Update Linux to v5.4.2

Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index f33fd22..fb94216 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 #
 # Marvell device configuration
 #
@@ -167,4 +168,7 @@
 
 	  If unsure, say N.
 
+
+source "drivers/net/ethernet/marvell/octeontx2/Kconfig"
+
 endif # NET_VENDOR_MARVELL
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
index 55d4d10..89dea72 100644
--- a/drivers/net/ethernet/marvell/Makefile
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -11,3 +11,4 @@
 obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
 obj-$(CONFIG_SKGE) += skge.o
 obj-$(CONFIG_SKY2) += sky2.o
+obj-y		+= octeontx2/
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 62f204f..82ea55a 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports
  * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
@@ -21,19 +22,6 @@
  *			   Lennert Buytenhek <buytenh@marvell.com>
  *
  * Copyright (C) 2013 Michael Stapelberg <michael@stapelberg.de>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -671,7 +659,7 @@
 	for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
 		const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
 
-		if (skb_frag_size(fragp) <= 8 && fragp->page_offset & 7)
+		if (skb_frag_size(fragp) <= 8 && skb_frag_off(fragp) & 7)
 			return 1;
 	}
 
@@ -1499,23 +1487,16 @@
 				   struct ethtool_link_ksettings *cmd)
 {
 	struct net_device *dev = mp->dev;
-	u32 supported, advertising;
 
 	phy_ethtool_ksettings_get(dev->phydev, cmd);
 
 	/*
 	 * The MAC does not support 1000baseT_Half.
 	 */
-	ethtool_convert_link_mode_to_legacy_u32(&supported,
-						cmd->link_modes.supported);
-	ethtool_convert_link_mode_to_legacy_u32(&advertising,
-						cmd->link_modes.advertising);
-	supported &= ~SUPPORTED_1000baseT_Half;
-	advertising &= ~ADVERTISED_1000baseT_Half;
-	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
-						supported);
-	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
-						advertising);
+	linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+			   cmd->link_modes.supported);
+	linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+			   cmd->link_modes.advertising);
 
 	return 0;
 }
@@ -2733,17 +2714,17 @@
 
 	memset(&res, 0, sizeof(res));
 	if (of_irq_to_resource(pnp, 0, &res) <= 0) {
-		dev_err(&pdev->dev, "missing interrupt on %s\n", pnp->name);
+		dev_err(&pdev->dev, "missing interrupt on %pOFn\n", pnp);
 		return -EINVAL;
 	}
 
 	if (of_property_read_u32(pnp, "reg", &ppd.port_number)) {
-		dev_err(&pdev->dev, "missing reg property on %s\n", pnp->name);
+		dev_err(&pdev->dev, "missing reg property on %pOFn\n", pnp);
 		return -EINVAL;
 	}
 
 	if (ppd.port_number >= 3) {
-		dev_err(&pdev->dev, "invalid reg property on %s\n", pnp->name);
+		dev_err(&pdev->dev, "invalid reg property on %pOFn\n", pnp);
 		return -EINVAL;
 	}
 
@@ -2756,8 +2737,8 @@
 	}
 
 	mac_addr = of_get_mac_address(pnp);
-	if (mac_addr)
-		memcpy(ppd.mac_addr, mac_addr, ETH_ALEN);
+	if (!IS_ERR(mac_addr))
+		ether_addr_copy(ppd.mac_addr, mac_addr);
 
 	mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
 	mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
@@ -2886,7 +2867,7 @@
 
 	ret = mv643xx_eth_shared_of_probe(pdev);
 	if (ret)
-		return ret;
+		goto err_put_clk;
 	pd = dev_get_platdata(&pdev->dev);
 
 	msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
@@ -2894,6 +2875,11 @@
 	infer_hw_params(msp);
 
 	return 0;
+
+err_put_clk:
+	if (!IS_ERR(msp->clk))
+		clk_disable_unprepare(msp->clk);
+	return ret;
 }
 
 static int mv643xx_eth_shared_remove(struct platform_device *pdev)
@@ -3031,10 +3017,12 @@
 		phy->autoneg = AUTONEG_ENABLE;
 		phy->speed = 0;
 		phy->duplex = 0;
-		phy->advertising = phy->supported | ADVERTISED_Autoneg;
+		linkmode_copy(phy->advertising, phy->supported);
+		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+				 phy->advertising);
 	} else {
 		phy->autoneg = AUTONEG_DISABLE;
-		phy->advertising = 0;
+		linkmode_zero(phy->advertising);
 		phy->speed = speed;
 		phy->duplex = duplex;
 	}
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index c5dac6b..0b9e851 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -64,7 +64,7 @@
 
 struct orion_mdio_dev {
 	void __iomem *regs;
-	struct clk *clk[3];
+	struct clk *clk[4];
 	/*
 	 * If we have access to the error interrupt pin (which is
 	 * somewhat misnamed as it not only reflects internal errors
@@ -319,13 +319,34 @@
 
 	init_waitqueue_head(&dev->smi_busy_wait);
 
-	for (i = 0; i < ARRAY_SIZE(dev->clk); i++) {
-		dev->clk[i] = of_clk_get(pdev->dev.of_node, i);
-		if (IS_ERR(dev->clk[i]))
-			break;
-		clk_prepare_enable(dev->clk[i]);
+	if (pdev->dev.of_node) {
+		for (i = 0; i < ARRAY_SIZE(dev->clk); i++) {
+			dev->clk[i] = of_clk_get(pdev->dev.of_node, i);
+			if (PTR_ERR(dev->clk[i]) == -EPROBE_DEFER) {
+				ret = -EPROBE_DEFER;
+				goto out_clk;
+			}
+			if (IS_ERR(dev->clk[i]))
+				break;
+			clk_prepare_enable(dev->clk[i]);
+		}
+
+		if (!IS_ERR(of_clk_get(pdev->dev.of_node,
+				       ARRAY_SIZE(dev->clk))))
+			dev_warn(&pdev->dev,
+				 "unsupported number of clocks, limiting to the first "
+				 __stringify(ARRAY_SIZE(dev->clk)) "\n");
+	} else {
+		dev->clk[0] = clk_get(&pdev->dev, NULL);
+		if (PTR_ERR(dev->clk[0]) == -EPROBE_DEFER) {
+			ret = -EPROBE_DEFER;
+			goto out_clk;
+		}
+		if (!IS_ERR(dev->clk[0]))
+			clk_prepare_enable(dev->clk[0]);
 	}
 
+
 	dev->err_interrupt = platform_get_irq(pdev, 0);
 	if (dev->err_interrupt > 0 &&
 	    resource_size(r) < MVMDIO_ERR_INT_MASK + 4) {
@@ -362,6 +383,7 @@
 	if (dev->err_interrupt > 0)
 		writel(0, dev->regs + MVMDIO_ERR_INT_MASK);
 
+out_clk:
 	for (i = 0; i < ARRAY_SIZE(dev->clk); i++) {
 		if (IS_ERR(dev->clk[i]))
 			break;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index b4ed7d3..e498206 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -27,6 +27,7 @@
 #include <linux/of_irq.h>
 #include <linux/of_mdio.h>
 #include <linux/of_net.h>
+#include <linux/phy/phy.h>
 #include <linux/phy.h>
 #include <linux/phylink.h>
 #include <linux/platform_device.h>
@@ -221,6 +222,8 @@
 #define      MVNETA_GMAC_AN_FLOW_CTRL_EN         BIT(11)
 #define      MVNETA_GMAC_CONFIG_FULL_DUPLEX      BIT(12)
 #define      MVNETA_GMAC_AN_DUPLEX_EN            BIT(13)
+#define MVNETA_GMAC_CTRL_4                       0x2c90
+#define      MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE  BIT(1)
 #define MVNETA_MIB_COUNTERS_BASE                 0x3000
 #define      MVNETA_MIB_LATE_COLLISION           0x7c
 #define MVNETA_DA_FILT_SPEC_MCAST                0x3400
@@ -406,7 +409,6 @@
 	struct mvneta_pcpu_stats __percpu	*stats;
 
 	int pkt_size;
-	unsigned int frag_size;
 	void __iomem *base;
 	struct mvneta_rx_queue *rxqs;
 	struct mvneta_tx_queue *txqs;
@@ -435,6 +437,8 @@
 	struct device_node *dn;
 	unsigned int tx_csum_limit;
 	struct phylink *phylink;
+	struct phylink_config phylink_config;
+	struct phy *comphy;
 
 	struct mvneta_bm *bm_priv;
 	struct mvneta_bm_pool *pool_long;
@@ -492,7 +496,7 @@
 #if defined(__LITTLE_ENDIAN)
 struct mvneta_tx_desc {
 	u32  command;		/* Options used by HW for packet transmitting.*/
-	u16  reserverd1;	/* csum_l4 (for future use)		*/
+	u16  reserved1;		/* csum_l4 (for future use)		*/
 	u16  data_size;		/* Data size of transmitted packet in bytes */
 	u32  buf_phys_addr;	/* Physical addr of transmitted buffer	*/
 	u32  reserved2;		/* hw_cmd - (for future use, PMT)	*/
@@ -517,7 +521,7 @@
 #else
 struct mvneta_tx_desc {
 	u16  data_size;		/* Data size of transmitted packet in bytes */
-	u16  reserverd1;	/* csum_l4 (for future use)		*/
+	u16  reserved1;		/* csum_l4 (for future use)		*/
 	u32  command;		/* Options used by HW for packet transmitting.*/
 	u32  reserved2;		/* hw_cmd - (for future use, PMT)	*/
 	u32  buf_phys_addr;	/* Physical addr of transmitted buffer	*/
@@ -1115,7 +1119,7 @@
 			SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size));
 
 	/* Fill entire long pool */
-	num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC);
+	num = hwbm_pool_add(hwbm_pool, hwbm_pool->size);
 	if (num != hwbm_pool->size) {
 		WARN(1, "pool %d: %d of %d allocated\n",
 		     bm_pool->id, num, hwbm_pool->size);
@@ -2064,10 +2068,7 @@
 		/* Linux processing */
 		rxq->skb->protocol = eth_type_trans(rxq->skb, dev);
 
-		if (dev->features & NETIF_F_GRO)
-			napi_gro_receive(napi, rxq->skb);
-		else
-			netif_receive_skb(rxq->skb);
+		napi_gro_receive(napi, rxq->skb);
 
 		/* clean uncomplete skb pointer in queue */
 		rxq->skb = NULL;
@@ -2148,7 +2149,7 @@
 			if (unlikely(!skb))
 				goto err_drop_frame_ret_pool;
 
-			dma_sync_single_range_for_cpu(dev->dev.parent,
+			dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev,
 			                              rx_desc->buf_phys_addr,
 			                              MVNETA_MH_SIZE + NET_SKB_PAD,
 			                              rx_bytes,
@@ -2349,10 +2350,10 @@
 
 	for (i = 0; i < nr_frags; i++) {
 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-		void *addr = page_address(frag->page.p) + frag->page_offset;
+		void *addr = skb_frag_address(frag);
 
 		tx_desc = mvneta_txq_next_desc_get(txq);
-		tx_desc->data_size = frag->size;
+		tx_desc->data_size = skb_frag_size(frag);
 
 		tx_desc->buf_phys_addr =
 			dma_map_single(pp->dev->dev.parent, addr,
@@ -2395,7 +2396,7 @@
 }
 
 /* Main tx processing */
-static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
 {
 	struct mvneta_port *pp = netdev_priv(dev);
 	u16 txq_id = skb_get_queue_mapping(skb);
@@ -2467,7 +2468,7 @@
 		if (txq->count >= txq->tx_stop_threshold)
 			netif_tx_stop_queue(nq);
 
-		if (!skb->xmit_more || netif_xmit_stopped(nq) ||
+		if (!netdev_xmit_more() || netif_xmit_stopped(nq) ||
 		    txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
 			mvneta_txq_pend_desc_add(pp, txq, frags);
 		else
@@ -2509,12 +2510,13 @@
 {
 	struct mvneta_tx_queue *txq;
 	struct netdev_queue *nq;
+	int cpu = smp_processor_id();
 
 	while (cause_tx_done) {
 		txq = mvneta_tx_done_policy(pp, cause_tx_done);
 
 		nq = netdev_get_tx_queue(pp->dev, txq->id);
-		__netif_tx_lock(nq, smp_processor_id());
+		__netif_tx_lock(nq, cpu);
 
 		if (txq->count)
 			mvneta_txq_done(pp, txq);
@@ -2905,7 +2907,9 @@
 	if (!pp->bm_priv) {
 		/* Set Offset */
 		mvneta_rxq_offset_set(pp, rxq, 0);
-		mvneta_rxq_buf_size_set(pp, rxq, pp->frag_size);
+		mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
+					PAGE_SIZE :
+					MVNETA_RX_BUF_SIZE(pp->pkt_size));
 		mvneta_rxq_bm_disable(pp, rxq);
 		mvneta_rxq_fill(pp, rxq, rxq->size);
 	} else {
@@ -3146,10 +3150,27 @@
 	return 0;
 }
 
+static int mvneta_comphy_init(struct mvneta_port *pp)
+{
+	int ret;
+
+	if (!pp->comphy)
+		return 0;
+
+	ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET,
+			       pp->phy_interface);
+	if (ret)
+		return ret;
+
+	return phy_power_on(pp->comphy);
+}
+
 static void mvneta_start_dev(struct mvneta_port *pp)
 {
 	int cpu;
 
+	WARN_ON(mvneta_comphy_init(pp));
+
 	mvneta_max_rx_size_set(pp, pp->pkt_size);
 	mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
 
@@ -3212,6 +3233,8 @@
 
 	mvneta_tx_reset(pp);
 	mvneta_rx_reset(pp);
+
+	WARN_ON(phy_power_off(pp->comphy));
 }
 
 static void mvneta_percpu_enable(void *arg)
@@ -3334,9 +3357,12 @@
 	return 0;
 }
 
-static void mvneta_validate(struct net_device *ndev, unsigned long *supported,
+static void mvneta_validate(struct phylink_config *config,
+			    unsigned long *supported,
 			    struct phylink_link_state *state)
 {
+	struct net_device *ndev = to_net_dev(config->dev);
+	struct mvneta_port *pp = netdev_priv(ndev);
 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
 
 	/* We only support QSGMII, SGMII, 802.3z and RGMII modes */
@@ -3355,9 +3381,16 @@
 
 	/* Asymmetric pause is unsupported */
 	phylink_set(mask, Pause);
+
 	/* Half-duplex at speeds higher than 100Mbit is unsupported */
-	phylink_set(mask, 1000baseT_Full);
-	phylink_set(mask, 1000baseX_Full);
+	if (pp->comphy || state->interface != PHY_INTERFACE_MODE_2500BASEX) {
+		phylink_set(mask, 1000baseT_Full);
+		phylink_set(mask, 1000baseX_Full);
+	}
+	if (pp->comphy || state->interface == PHY_INTERFACE_MODE_2500BASEX) {
+		phylink_set(mask, 2500baseT_Full);
+		phylink_set(mask, 2500baseX_Full);
+	}
 
 	if (!phy_interface_mode_is_8023z(state->interface)) {
 		/* 10M and 100M are only supported in non-802.3z mode */
@@ -3371,18 +3404,26 @@
 		   __ETHTOOL_LINK_MODE_MASK_NBITS);
 	bitmap_and(state->advertising, state->advertising, mask,
 		   __ETHTOOL_LINK_MODE_MASK_NBITS);
+
+	/* We can only operate at 2500BaseX or 1000BaseX.  If requested
+	 * to advertise both, only report advertising at 2500BaseX.
+	 */
+	phylink_helper_basex_speed(state);
 }
 
-static int mvneta_mac_link_state(struct net_device *ndev,
+static int mvneta_mac_link_state(struct phylink_config *config,
 				 struct phylink_link_state *state)
 {
+	struct net_device *ndev = to_net_dev(config->dev);
 	struct mvneta_port *pp = netdev_priv(ndev);
 	u32 gmac_stat;
 
 	gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
 
 	if (gmac_stat & MVNETA_GMAC_SPEED_1000)
-		state->speed = SPEED_1000;
+		state->speed =
+			state->interface == PHY_INTERFACE_MODE_2500BASEX ?
+			SPEED_2500 : SPEED_1000;
 	else if (gmac_stat & MVNETA_GMAC_SPEED_100)
 		state->speed = SPEED_100;
 	else
@@ -3401,8 +3442,9 @@
 	return 1;
 }
 
-static void mvneta_mac_an_restart(struct net_device *ndev)
+static void mvneta_mac_an_restart(struct phylink_config *config)
 {
+	struct net_device *ndev = to_net_dev(config->dev);
 	struct mvneta_port *pp = netdev_priv(ndev);
 	u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
 
@@ -3412,18 +3454,21 @@
 		    gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN);
 }
 
-static void mvneta_mac_config(struct net_device *ndev, unsigned int mode,
-	const struct phylink_link_state *state)
+static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
+			      const struct phylink_link_state *state)
 {
+	struct net_device *ndev = to_net_dev(config->dev);
 	struct mvneta_port *pp = netdev_priv(ndev);
 	u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
 	u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
+	u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4);
 	u32 new_clk, gmac_clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
 	u32 new_an, gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
 
 	new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X;
 	new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE |
 				   MVNETA_GMAC2_PORT_RESET);
+	new_ctrl4 = gmac_ctrl4 & ~(MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE);
 	new_clk = gmac_clk & ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
 	new_an = gmac_an & ~(MVNETA_GMAC_INBAND_AN_ENABLE |
 			     MVNETA_GMAC_INBAND_RESTART_AN |
@@ -3456,7 +3501,7 @@
 		if (state->duplex)
 			new_an |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
 
-		if (state->speed == SPEED_1000)
+		if (state->speed == SPEED_1000 || state->speed == SPEED_2500)
 			new_an |= MVNETA_GMAC_CONFIG_GMII_SPEED;
 		else if (state->speed == SPEED_100)
 			new_an |= MVNETA_GMAC_CONFIG_MII_SPEED;
@@ -3495,10 +3540,29 @@
 			    MVNETA_GMAC_FORCE_LINK_DOWN);
 	}
 
+
+	/* When at 2.5G, the link partner can send frames with shortened
+	 * preambles.
+	 */
+	if (state->speed == SPEED_2500)
+		new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE;
+
+	if (pp->comphy && pp->phy_interface != state->interface &&
+	    (state->interface == PHY_INTERFACE_MODE_SGMII ||
+	     state->interface == PHY_INTERFACE_MODE_1000BASEX ||
+	     state->interface == PHY_INTERFACE_MODE_2500BASEX)) {
+		pp->phy_interface = state->interface;
+
+		WARN_ON(phy_power_off(pp->comphy));
+		WARN_ON(mvneta_comphy_init(pp));
+	}
+
 	if (new_ctrl0 != gmac_ctrl0)
 		mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0);
 	if (new_ctrl2 != gmac_ctrl2)
 		mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2);
+	if (new_ctrl4 != gmac_ctrl4)
+		mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4);
 	if (new_clk != gmac_clk)
 		mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, new_clk);
 	if (new_an != gmac_an)
@@ -3523,9 +3587,10 @@
 	mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1);
 }
 
-static void mvneta_mac_link_down(struct net_device *ndev, unsigned int mode,
-				 phy_interface_t interface)
+static void mvneta_mac_link_down(struct phylink_config *config,
+				 unsigned int mode, phy_interface_t interface)
 {
+	struct net_device *ndev = to_net_dev(config->dev);
 	struct mvneta_port *pp = netdev_priv(ndev);
 	u32 val;
 
@@ -3542,10 +3607,11 @@
 	mvneta_set_eee(pp, false);
 }
 
-static void mvneta_mac_link_up(struct net_device *ndev, unsigned int mode,
+static void mvneta_mac_link_up(struct phylink_config *config, unsigned int mode,
 			       phy_interface_t interface,
 			       struct phy_device *phy)
 {
+	struct net_device *ndev = to_net_dev(config->dev);
 	struct mvneta_port *pp = netdev_priv(ndev);
 	u32 val;
 
@@ -3749,7 +3815,6 @@
 	int ret;
 
 	pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
-	pp->frag_size = PAGE_SIZE;
 
 	ret = mvneta_setup_rxqs(pp);
 	if (ret)
@@ -3792,9 +3857,6 @@
 			goto err_free_online_hp;
 	}
 
-	/* In default link is down */
-	netif_carrier_off(pp->dev);
-
 	ret = mvneta_mdio_probe(pp);
 	if (ret < 0) {
 		netdev_err(dev, "cannot probe MDIO bus\n");
@@ -4240,8 +4302,7 @@
 
 	/* The Armada 37x documents do not give limits for this other than
 	 * it being an 8-bit register. */
-	if (eee->tx_lpi_enabled &&
-	    (eee->tx_lpi_timer < 0 || eee->tx_lpi_timer > 255))
+	if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255)
 		return -EINVAL;
 
 	lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
@@ -4397,7 +4458,7 @@
 	if (phy_mode == PHY_INTERFACE_MODE_QSGMII)
 		mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
 	else if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
-		 phy_mode == PHY_INTERFACE_MODE_1000BASEX)
+		 phy_interface_mode_is_8023z(phy_mode))
 		mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
 	else if (!phy_interface_mode_is_rgmii(phy_mode))
 		return -EINVAL;
@@ -4408,12 +4469,12 @@
 /* Device initialization routine */
 static int mvneta_probe(struct platform_device *pdev)
 {
-	struct resource *res;
 	struct device_node *dn = pdev->dev.of_node;
 	struct device_node *bm_node;
 	struct mvneta_port *pp;
 	struct net_device *dev;
 	struct phylink *phylink;
+	struct phy *comphy;
 	const char *dt_mac_addr;
 	char hw_mac_addr[ETH_ALEN];
 	const char *mac_from;
@@ -4422,15 +4483,14 @@
 	int err;
 	int cpu;
 
-	dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
+	dev = devm_alloc_etherdev_mqs(&pdev->dev, sizeof(struct mvneta_port),
+				      txq_number, rxq_number);
 	if (!dev)
 		return -ENOMEM;
 
 	dev->irq = irq_of_parse_and_map(dn, 0);
-	if (dev->irq == 0) {
-		err = -EINVAL;
-		goto err_free_netdev;
-	}
+	if (dev->irq == 0)
+		return -EINVAL;
 
 	phy_mode = of_get_phy_mode(dn);
 	if (phy_mode < 0) {
@@ -4439,8 +4499,22 @@
 		goto err_free_irq;
 	}
 
-	phylink = phylink_create(dev, pdev->dev.fwnode, phy_mode,
-				 &mvneta_phylink_ops);
+	comphy = devm_of_phy_get(&pdev->dev, dn, NULL);
+	if (comphy == ERR_PTR(-EPROBE_DEFER)) {
+		err = -EPROBE_DEFER;
+		goto err_free_irq;
+	} else if (IS_ERR(comphy)) {
+		comphy = NULL;
+	}
+
+	pp = netdev_priv(dev);
+	spin_lock_init(&pp->lock);
+
+	pp->phylink_config.dev = &dev->dev;
+	pp->phylink_config.type = PHYLINK_NETDEV;
+
+	phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode,
+				 phy_mode, &mvneta_phylink_ops);
 	if (IS_ERR(phylink)) {
 		err = PTR_ERR(phylink);
 		goto err_free_irq;
@@ -4452,9 +4526,8 @@
 
 	dev->ethtool_ops = &mvneta_eth_tool_ops;
 
-	pp = netdev_priv(dev);
-	spin_lock_init(&pp->lock);
 	pp->phylink = phylink;
+	pp->comphy = comphy;
 	pp->phy_interface = phy_mode;
 	pp->dn = dn;
 
@@ -4479,8 +4552,7 @@
 	if (!IS_ERR(pp->clk_bus))
 		clk_prepare_enable(pp->clk_bus);
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	pp->base = devm_ioremap_resource(&pdev->dev, res);
+	pp->base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(pp->base)) {
 		err = PTR_ERR(pp->base);
 		goto err_clk;
@@ -4501,9 +4573,9 @@
 	}
 
 	dt_mac_addr = of_get_mac_address(dn);
-	if (dt_mac_addr) {
+	if (!IS_ERR(dt_mac_addr)) {
 		mac_from = "device tree";
-		memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN);
+		ether_addr_copy(dev->dev_addr, dt_mac_addr);
 	} else {
 		mvneta_get_mac_addr(pp, hw_mac_addr);
 		if (is_valid_ether_addr(hw_mac_addr)) {
@@ -4597,7 +4669,8 @@
 		}
 	}
 
-	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO;
+	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+			NETIF_F_TSO | NETIF_F_RXCSUM;
 	dev->hw_features |= dev->features;
 	dev->vlan_features |= dev->features;
 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
@@ -4611,7 +4684,7 @@
 	err = register_netdev(dev);
 	if (err < 0) {
 		dev_err(&pdev->dev, "failed to register\n");
-		goto err_free_stats;
+		goto err_netdev;
 	}
 
 	netdev_info(dev, "Using %s mac address %pM\n", mac_from,
@@ -4622,14 +4695,12 @@
 	return 0;
 
 err_netdev:
-	unregister_netdev(dev);
 	if (pp->bm_priv) {
 		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
 		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
 				       1 << pp->id);
 		mvneta_bm_put(pp->bm_priv);
 	}
-err_free_stats:
 	free_percpu(pp->stats);
 err_free_ports:
 	free_percpu(pp->ports);
@@ -4641,8 +4712,6 @@
 		phylink_destroy(pp->phylink);
 err_free_irq:
 	irq_dispose_mapping(dev->irq);
-err_free_netdev:
-	free_netdev(dev);
 	return err;
 }
 
@@ -4659,7 +4728,6 @@
 	free_percpu(pp->stats);
 	irq_dispose_mapping(dev->irq);
 	phylink_destroy(pp->phylink);
-	free_netdev(dev);
 
 	if (pp->bm_priv) {
 		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c
index de468e1..46c942e 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.c
+++ b/drivers/net/ethernet/marvell/mvneta_bm.c
@@ -190,7 +190,7 @@
 			SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 		hwbm_pool->construct = mvneta_bm_construct;
 		hwbm_pool->priv = new_pool;
-		spin_lock_init(&hwbm_pool->lock);
+		mutex_init(&hwbm_pool->buf_lock);
 
 		/* Create new pool */
 		err = mvneta_bm_pool_create(priv, new_pool);
@@ -201,7 +201,7 @@
 		}
 
 		/* Allocate buffers for this pool */
-		num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC);
+		num = hwbm_pool_add(hwbm_pool, hwbm_pool->size);
 		if (num != hwbm_pool->size) {
 			WARN(1, "pool %d: %d of %d allocated\n",
 			     new_pool->id, num, hwbm_pool->size);
@@ -411,15 +411,13 @@
 {
 	struct device_node *dn = pdev->dev.of_node;
 	struct mvneta_bm *priv;
-	struct resource *res;
 	int err;
 
 	priv = devm_kzalloc(&pdev->dev, sizeof(struct mvneta_bm), GFP_KERNEL);
 	if (!priv)
 		return -ENOMEM;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	priv->reg_base = devm_ioremap_resource(&pdev->dev, res);
+	priv->reg_base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(priv->reg_base))
 		return PTR_ERR(priv->reg_base);
 
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.h b/drivers/net/ethernet/marvell/mvneta_bm.h
index c8425d3..e47783c 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.h
+++ b/drivers/net/ethernet/marvell/mvneta_bm.h
@@ -160,16 +160,23 @@
 			     (bm_pool->id << MVNETA_BM_POOL_ACCESS_OFFS));
 }
 #else
-void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
-			    struct mvneta_bm_pool *bm_pool, u8 port_map) {}
-void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
-			 u8 port_map) {}
-int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf) { return 0; }
-int mvneta_bm_pool_refill(struct mvneta_bm *priv,
-			  struct mvneta_bm_pool *bm_pool) {return 0; }
-struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
-					  enum mvneta_bm_type type, u8 port_id,
-					  int pkt_size) { return NULL; }
+static inline void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
+					  struct mvneta_bm_pool *bm_pool,
+					  u8 port_map) {}
+static inline void mvneta_bm_bufs_free(struct mvneta_bm *priv,
+				       struct mvneta_bm_pool *bm_pool,
+				       u8 port_map) {}
+static inline int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf)
+{ return 0; }
+static inline int mvneta_bm_pool_refill(struct mvneta_bm *priv,
+					struct mvneta_bm_pool *bm_pool)
+{ return 0; }
+static inline struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv,
+							u8 pool_id,
+							enum mvneta_bm_type type,
+							u8 port_id,
+							int pkt_size)
+{ return NULL; }
 
 static inline void mvneta_bm_pool_put_bp(struct mvneta_bm *priv,
 					 struct mvneta_bm_pool *bm_pool,
@@ -178,7 +185,8 @@
 static inline u32 mvneta_bm_pool_get_bp(struct mvneta_bm *priv,
 					struct mvneta_bm_pool *bm_pool)
 { return 0; }
-struct mvneta_bm *mvneta_bm_get(struct device_node *node) { return NULL; }
-void mvneta_bm_put(struct mvneta_bm *priv) {}
+static inline struct mvneta_bm *mvneta_bm_get(struct device_node *node)
+{ return NULL; }
+static inline void mvneta_bm_put(struct mvneta_bm *priv) {}
 #endif /* CONFIG_MVNETA_BM */
 #endif
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index 67b9e81..543a310 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -14,6 +14,7 @@
 #include <linux/netdevice.h>
 #include <linux/phy.h>
 #include <linux/phylink.h>
+#include <net/flow_offload.h>
 
 /* Fifo Registers */
 #define MVPP2_RX_DATA_FIFO_SIZE_REG(port)	(0x00 + 4 * (port))
@@ -101,6 +102,7 @@
 #define MVPP2_CLS_FLOW_TBL1_REG			0x1828
 #define     MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK	0x7
 #define     MVPP2_CLS_FLOW_TBL1_N_FIELDS(x)	(x)
+#define     MVPP2_CLS_FLOW_TBL1_LU_TYPE(lu)	(((lu) & 0x3f) << 3)
 #define     MVPP2_CLS_FLOW_TBL1_PRIO_MASK	0x3f
 #define     MVPP2_CLS_FLOW_TBL1_PRIO(x)		((x) << 9)
 #define     MVPP2_CLS_FLOW_TBL1_SEQ_MASK	0x7
@@ -123,13 +125,18 @@
 #define MVPP22_CLS_C2_TCAM_DATA2		0x1b18
 #define MVPP22_CLS_C2_TCAM_DATA3		0x1b1c
 #define MVPP22_CLS_C2_TCAM_DATA4		0x1b20
+#define     MVPP22_CLS_C2_LU_TYPE(lu)		((lu) & 0x3f)
 #define     MVPP22_CLS_C2_PORT_ID(port)		((port) << 8)
+#define     MVPP22_CLS_C2_PORT_MASK		(0xff << 8)
+#define MVPP22_CLS_C2_TCAM_INV			0x1b24
+#define     MVPP22_CLS_C2_TCAM_INV_BIT		BIT(31)
 #define MVPP22_CLS_C2_HIT_CTR			0x1b50
 #define MVPP22_CLS_C2_ACT			0x1b60
 #define     MVPP22_CLS_C2_ACT_RSS_EN(act)	(((act) & 0x3) << 19)
 #define     MVPP22_CLS_C2_ACT_FWD(act)		(((act) & 0x7) << 13)
 #define     MVPP22_CLS_C2_ACT_QHIGH(act)	(((act) & 0x3) << 11)
 #define     MVPP22_CLS_C2_ACT_QLOW(act)		(((act) & 0x3) << 9)
+#define     MVPP22_CLS_C2_ACT_COLOR(act)	((act) & 0x7)
 #define MVPP22_CLS_C2_ATTR0			0x1b64
 #define     MVPP22_CLS_C2_ATTR0_QHIGH(qh)	(((qh) & 0x1f) << 24)
 #define     MVPP22_CLS_C2_ATTR0_QHIGH_MASK	0x1f
@@ -141,6 +148,8 @@
 #define MVPP22_CLS_C2_ATTR2			0x1b6c
 #define     MVPP22_CLS_C2_ATTR2_RSS_EN		BIT(30)
 #define MVPP22_CLS_C2_ATTR3			0x1b70
+#define MVPP22_CLS_C2_TCAM_CTRL			0x1b90
+#define     MVPP22_CLS_C2_TCAM_BYPASS_FIFO	BIT(0)
 
 /* Descriptor Manager Top Registers */
 #define MVPP2_RXQ_NUM_REG			0x2040
@@ -253,7 +262,8 @@
 #define     MVPP2_ISR_ENABLE_INTERRUPT(mask)	((mask) & 0xffff)
 #define     MVPP2_ISR_DISABLE_INTERRUPT(mask)	(((mask) << 16) & 0xffff0000)
 #define MVPP2_ISR_RX_TX_CAUSE_REG(port)		(0x5480 + 4 * (port))
-#define     MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK	0xffff
+#define     MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(version) \
+					((version) == MVPP21 ? 0xffff : 0xff)
 #define     MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK	0xff0000
 #define     MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET	16
 #define     MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK	BIT(24)
@@ -319,8 +329,26 @@
 #define     MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK	0xff00
 #define     MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT	8
 
+/* Packet Processor per-port counters */
+#define MVPP2_OVERRUN_ETH_DROP			0x7000
+#define MVPP2_CLS_ETH_DROP			0x7020
+
 /* Hit counters registers */
 #define MVPP2_CTRS_IDX				0x7040
+#define     MVPP22_CTRS_TX_CTR(port, txq)	((txq) | ((port) << 3) | BIT(7))
+#define MVPP2_TX_DESC_ENQ_CTR			0x7100
+#define MVPP2_TX_DESC_ENQ_TO_DDR_CTR		0x7104
+#define MVPP2_TX_BUFF_ENQ_TO_DDR_CTR		0x7108
+#define MVPP2_TX_DESC_ENQ_HW_FWD_CTR		0x710c
+#define MVPP2_RX_DESC_ENQ_CTR			0x7120
+#define MVPP2_TX_PKTS_DEQ_CTR			0x7130
+#define MVPP2_TX_PKTS_FULL_QUEUE_DROP_CTR	0x7200
+#define MVPP2_TX_PKTS_EARLY_DROP_CTR		0x7204
+#define MVPP2_TX_PKTS_BM_DROP_CTR		0x7208
+#define MVPP2_TX_PKTS_BM_MC_DROP_CTR		0x720c
+#define MVPP2_RX_PKTS_FULL_QUEUE_DROP_CTR	0x7220
+#define MVPP2_RX_PKTS_EARLY_DROP_CTR		0x7224
+#define MVPP2_RX_PKTS_BM_DROP_CTR		0x7228
 #define MVPP2_CLS_DEC_TBL_HIT_CTR		0x7700
 #define MVPP2_CLS_FLOW_TBL_HIT_CTR		0x7704
 
@@ -330,6 +358,7 @@
 #define     MVPP2_TXP_SCHED_ENQ_MASK		0xff
 #define     MVPP2_TXP_SCHED_DISQ_OFFSET		8
 #define MVPP2_TXP_SCHED_CMD_1_REG		0x8010
+#define MVPP2_TXP_SCHED_FIXED_PRIO_REG		0x8014
 #define MVPP2_TXP_SCHED_PERIOD_REG		0x8018
 #define MVPP2_TXP_SCHED_MTU_REG			0x801c
 #define     MVPP2_TXP_MTU_MAX			0x7FFFF
@@ -387,7 +416,7 @@
 #define     MVPP2_GMAC_IN_BAND_AUTONEG		BIT(2)
 #define     MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS	BIT(3)
 #define     MVPP2_GMAC_IN_BAND_RESTART_AN	BIT(4)
-#define     MVPP2_GMAC_CONFIG_MII_SPEED	BIT(5)
+#define     MVPP2_GMAC_CONFIG_MII_SPEED		BIT(5)
 #define     MVPP2_GMAC_CONFIG_GMII_SPEED	BIT(6)
 #define     MVPP2_GMAC_AN_SPEED_EN		BIT(7)
 #define     MVPP2_GMAC_FC_ADV_EN		BIT(9)
@@ -400,8 +429,8 @@
 #define     MVPP2_GMAC_STATUS0_GMII_SPEED	BIT(1)
 #define     MVPP2_GMAC_STATUS0_MII_SPEED	BIT(2)
 #define     MVPP2_GMAC_STATUS0_FULL_DUPLEX	BIT(3)
-#define     MVPP2_GMAC_STATUS0_RX_PAUSE		BIT(6)
-#define     MVPP2_GMAC_STATUS0_TX_PAUSE		BIT(7)
+#define     MVPP2_GMAC_STATUS0_RX_PAUSE		BIT(4)
+#define     MVPP2_GMAC_STATUS0_TX_PAUSE		BIT(5)
 #define     MVPP2_GMAC_STATUS0_AN_COMPLETE	BIT(11)
 #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG		0x1c
 #define     MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS	6
@@ -428,6 +457,8 @@
 #define MVPP22_XLG_CTRL0_REG			0x100
 #define     MVPP22_XLG_CTRL0_PORT_EN		BIT(0)
 #define     MVPP22_XLG_CTRL0_MAC_RESET_DIS	BIT(1)
+#define     MVPP22_XLG_CTRL0_FORCE_LINK_DOWN	BIT(2)
+#define     MVPP22_XLG_CTRL0_FORCE_LINK_PASS	BIT(3)
 #define     MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN	BIT(7)
 #define     MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN	BIT(8)
 #define     MVPP22_XLG_CTRL0_MIB_CNT_DIS	BIT(14)
@@ -479,6 +510,7 @@
 /* XPCS registers. PPv2.2 only */
 #define MVPP22_XPCS_BASE(port)			(0x7400 + (port) * 0x1000)
 #define MVPP22_XPCS_CFG0			0x0
+#define     MVPP22_XPCS_CFG0_RESET_DIS		BIT(0)
 #define     MVPP22_XPCS_CFG0_PCS_MODE(n)	((n) << 3)
 #define     MVPP22_XPCS_CFG0_ACTIVE_LANE(n)	((n) << 5)
 
@@ -547,8 +579,8 @@
 #define MVPP2_MAX_TSO_SEGS		300
 #define MVPP2_MAX_SKB_DESCS		(MVPP2_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
 
-/* Dfault number of RXQs in use */
-#define MVPP2_DEFAULT_RXQ		1
+/* Max number of RXQs per port */
+#define MVPP2_PORT_MAX_RXQ		32
 
 /* Max number of Rx descriptors */
 #define MVPP2_MAX_RXD_MAX		1024
@@ -605,7 +637,14 @@
 #define MVPP2_BIT_TO_WORD(bit)		((bit) / 32)
 #define MVPP2_BIT_IN_WORD(bit)		((bit) % 32)
 
+#define MVPP2_N_PRS_FLOWS		52
+#define MVPP2_N_RFS_ENTRIES_PER_FLOW	4
+
+/* There are 7 supported high-level flows */
+#define MVPP2_N_RFS_RULES		(MVPP2_N_RFS_ENTRIES_PER_FLOW * 7)
+
 /* RSS constants */
+#define MVPP22_N_RSS_TABLES		8
 #define MVPP22_RSS_TABLE_ENTRIES	32
 
 /* IPv6 max L3 address size */
@@ -613,6 +652,7 @@
 
 /* Port flags */
 #define MVPP2_F_LOOPBACK		BIT(0)
+#define MVPP2_F_DT_COMPAT		BIT(1)
 
 /* Marvell tag types */
 enum mvpp2_tag_type {
@@ -643,6 +683,7 @@
 #define MVPP2_BM_SHORT_BUF_NUM		2048
 #define MVPP2_BM_POOL_SIZE_MAX		(16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
 #define MVPP2_BM_POOL_PTR_ALIGN		128
+#define MVPP2_BM_MAX_POOLS		8
 
 /* BM cookie (32 bits) definition */
 #define MVPP2_BM_COOKIE_POOL_OFFS	8
@@ -662,7 +703,7 @@
 #define MVPP21_ADDR_SPACE_SZ		0
 #define MVPP22_ADDR_SPACE_SZ		SZ_64K
 
-#define MVPP2_MAX_THREADS		8
+#define MVPP2_MAX_THREADS		9
 #define MVPP2_MAX_QVECS			MVPP2_MAX_THREADS
 
 /* GMAC MIB Counters register definitions */
@@ -704,6 +745,11 @@
 #define MVPP2_DESC_DMA_MASK	DMA_BIT_MASK(40)
 
 /* Definitions */
+struct mvpp2_dbgfs_entries;
+
+struct mvpp2_rss_table {
+	u32 indir[MVPP22_RSS_TABLE_ENTRIES];
+};
 
 /* Shared Packet Processor resources */
 struct mvpp2 {
@@ -734,9 +780,17 @@
 	int port_count;
 	struct mvpp2_port *port_list[MVPP2_MAX_PORTS];
 
+	/* Number of Tx threads used */
+	unsigned int nthreads;
+	/* Map of threads needing locking */
+	unsigned long lock_map;
+
 	/* Aggregated TXQs */
 	struct mvpp2_tx_queue *aggr_txqs;
 
+	/* Are we using page_pool with per-cpu pools? */
+	int percpu_pools;
+
 	/* BM pools */
 	struct mvpp2_bm_pool *bm_pools;
 
@@ -760,6 +814,12 @@
 
 	/* Debugfs root entry */
 	struct dentry *dbgfs_dir;
+
+	/* Debugfs entries private data */
+	struct mvpp2_dbgfs_entries *dbgfs_entries;
+
+	/* RSS Indirection tables */
+	struct mvpp2_rss_table *rss_tables[MVPP22_N_RSS_TABLES];
 };
 
 struct mvpp2_pcpu_stats {
@@ -773,9 +833,8 @@
 /* Per-CPU port control */
 struct mvpp2_port_pcpu {
 	struct hrtimer tx_done_timer;
+	struct net_device *dev;
 	bool timer_scheduled;
-	/* Tasklet for egress finalization */
-	struct tasklet_struct tx_done_tasklet;
 };
 
 struct mvpp2_queue_vector {
@@ -788,13 +847,45 @@
 	int nrxqs;
 	u32 pending_cause_rx;
 	struct mvpp2_port *port;
+	struct cpumask *mask;
+};
+
+/* Internal represention of a Flow Steering rule */
+struct mvpp2_rfs_rule {
+	/* Rule location inside the flow*/
+	int loc;
+
+	/* Flow type, such as TCP_V4_FLOW, IP6_FLOW, etc. */
+	int flow_type;
+
+	/* Index of the C2 TCAM entry handling this rule */
+	int c2_index;
+
+	/* Header fields that needs to be extracted to match this flow */
+	u16 hek_fields;
+
+	/* CLS engine : only c2 is supported for now. */
+	u8 engine;
+
+	/* TCAM key and mask for C2-based steering. These fields should be
+	 * encapsulated in a union should we add more engines.
+	 */
+	u64 c2_tcam;
+	u64 c2_tcam_mask;
+
+	struct flow_rule *flow;
+};
+
+struct mvpp2_ethtool_fs {
+	struct mvpp2_rfs_rule rule;
+	struct ethtool_rxnfc rxnfc;
 };
 
 struct mvpp2_port {
 	u8 id;
 
 	/* Index of the port from the "group of ports" complex point
-	 * of view
+	 * of view. This is specific to PPv2.2.
 	 */
 	int gop_id;
 
@@ -823,6 +914,12 @@
 	/* Per-CPU port control */
 	struct mvpp2_port_pcpu __percpu *pcpu;
 
+	/* Protect the BM refills and the Tx paths when a thread is used on more
+	 * than a single CPU.
+	 */
+	spinlock_t bm_lock[MVPP2_MAX_THREADS];
+	spinlock_t tx_lock[MVPP2_MAX_THREADS];
+
 	/* Flags */
 	unsigned long flags;
 
@@ -839,6 +936,7 @@
 
 	phy_interface_t phy_interface;
 	struct phylink *phylink;
+	struct phylink_config phylink_config;
 	struct phy *comphy;
 
 	struct mvpp2_bm_pool *pool_long;
@@ -853,8 +951,14 @@
 
 	u32 tx_time_coal;
 
-	/* RSS indirection table */
-	u32 indir[MVPP22_RSS_TABLE_ENTRIES];
+	/* List of steering rules active on that port */
+	struct mvpp2_ethtool_fs *rfs_rules[MVPP2_N_RFS_ENTRIES_PER_FLOW];
+	int n_rfs_rules;
+
+	/* Each port has its own view of the rss contexts, so that it can number
+	 * them from 0
+	 */
+	int rss_ctx[MVPP22_N_RSS_TABLES];
 };
 
 /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
@@ -969,7 +1073,7 @@
 
 /* Per-CPU Tx queue control */
 struct mvpp2_txq_pcpu {
-	int cpu;
+	unsigned int thread;
 
 	/* Number of Tx DMA descriptors in the descriptor ring */
 	int size;
@@ -1095,14 +1199,6 @@
 void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data);
 u32 mvpp2_read(struct mvpp2 *priv, u32 offset);
 
-u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset);
-
-void mvpp2_percpu_write(struct mvpp2 *priv, int cpu, u32 offset, u32 data);
-u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, u32 offset);
-
-void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu, u32 offset,
-				u32 data);
-
 void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name);
 
 void mvpp2_dbgfs_cleanup(struct mvpp2 *priv);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
index efdb7a6..35478cb 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -22,302 +22,302 @@
 	}							\
 }
 
-static struct mvpp2_cls_flow cls_flows[MVPP2_N_FLOWS] = {
+static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
 	/* TCP over IPv4 flows, Not fragmented, no vlan tag */
-	MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG,
 		       MVPP22_CLS_HEK_IP4_5T,
 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
 		       MVPP2_PRS_RI_L4_TCP,
 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
 
-	MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG,
 		       MVPP22_CLS_HEK_IP4_5T,
 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
 		       MVPP2_PRS_RI_L4_TCP,
 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
 
-	MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG,
 		       MVPP22_CLS_HEK_IP4_5T,
 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
 		       MVPP2_PRS_RI_L4_TCP,
 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
 
 	/* TCP over IPv4 flows, Not fragmented, with vlan tag */
-	MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
-		       MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG,
+		       MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
 		       MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
 		       MVPP2_PRS_IP_MASK),
 
-	MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
-		       MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG,
+		       MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
 		       MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
 		       MVPP2_PRS_IP_MASK),
 
-	MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
-		       MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG,
+		       MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
 		       MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
 		       MVPP2_PRS_IP_MASK),
 
 	/* TCP over IPv4 flows, fragmented, no vlan tag */
-	MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
 		       MVPP22_CLS_HEK_IP4_2T,
 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
 		       MVPP2_PRS_RI_L4_TCP,
 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
 
-	MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
 		       MVPP22_CLS_HEK_IP4_2T,
 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
 		       MVPP2_PRS_RI_L4_TCP,
 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
 
-	MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
 		       MVPP22_CLS_HEK_IP4_2T,
 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
 		       MVPP2_PRS_RI_L4_TCP,
 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
 
 	/* TCP over IPv4 flows, fragmented, with vlan tag */
-	MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
-		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
+		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
 		       MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
 		       MVPP2_PRS_IP_MASK),
 
-	MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
-		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
+		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
 		       MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
 		       MVPP2_PRS_IP_MASK),
 
-	MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
-		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
+		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
 		       MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
 		       MVPP2_PRS_IP_MASK),
 
 	/* UDP over IPv4 flows, Not fragmented, no vlan tag */
-	MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG,
 		       MVPP22_CLS_HEK_IP4_5T,
 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
 		       MVPP2_PRS_RI_L4_UDP,
 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
 
-	MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG,
 		       MVPP22_CLS_HEK_IP4_5T,
 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
 		       MVPP2_PRS_RI_L4_UDP,
 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
 
-	MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG,
 		       MVPP22_CLS_HEK_IP4_5T,
 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
 		       MVPP2_PRS_RI_L4_UDP,
 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
 
 	/* UDP over IPv4 flows, Not fragmented, with vlan tag */
-	MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
-		       MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG,
+		       MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
 		       MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
 		       MVPP2_PRS_IP_MASK),
 
-	MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
-		       MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG,
+		       MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
 		       MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
 		       MVPP2_PRS_IP_MASK),
 
-	MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
-		       MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG,
+		       MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
 		       MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
 		       MVPP2_PRS_IP_MASK),
 
 	/* UDP over IPv4 flows, fragmented, no vlan tag */
-	MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
 		       MVPP22_CLS_HEK_IP4_2T,
 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
 		       MVPP2_PRS_RI_L4_UDP,
 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
 
-	MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
 		       MVPP22_CLS_HEK_IP4_2T,
 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
 		       MVPP2_PRS_RI_L4_UDP,
 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
 
-	MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
 		       MVPP22_CLS_HEK_IP4_2T,
 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
 		       MVPP2_PRS_RI_L4_UDP,
 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
 
 	/* UDP over IPv4 flows, fragmented, with vlan tag */
-	MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
-		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
+		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
 		       MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
 		       MVPP2_PRS_IP_MASK),
 
-	MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
-		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
+		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
 		       MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
 		       MVPP2_PRS_IP_MASK),
 
-	MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
-		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
+		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
 		       MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
 		       MVPP2_PRS_IP_MASK),
 
 	/* TCP over IPv6 flows, not fragmented, no vlan tag */
-	MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_UNTAG,
 		       MVPP22_CLS_HEK_IP6_5T,
 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
 		       MVPP2_PRS_RI_L4_TCP,
 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
 
-	MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_UNTAG,
 		       MVPP22_CLS_HEK_IP6_5T,
 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
 		       MVPP2_PRS_RI_L4_TCP,
 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
 
 	/* TCP over IPv6 flows, not fragmented, with vlan tag */
-	MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG,
-		       MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_TAG,
+		       MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
 		       MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_TCP,
 		       MVPP2_PRS_IP_MASK),
 
-	MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG,
-		       MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_TAG,
+		       MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
 		       MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_TCP,
 		       MVPP2_PRS_IP_MASK),
 
 	/* TCP over IPv6 flows, fragmented, no vlan tag */
-	MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
 		       MVPP22_CLS_HEK_IP6_2T,
 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
 		       MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
 
-	MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
 		       MVPP22_CLS_HEK_IP6_2T,
 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
 		       MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
 
 	/* TCP over IPv6 flows, fragmented, with vlan tag */
-	MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG,
-		       MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_TAG,
+		       MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
 		       MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
 		       MVPP2_PRS_RI_L4_TCP,
 		       MVPP2_PRS_IP_MASK),
 
-	MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG,
-		       MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_TAG,
+		       MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
 		       MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
 		       MVPP2_PRS_RI_L4_TCP,
 		       MVPP2_PRS_IP_MASK),
 
 	/* UDP over IPv6 flows, not fragmented, no vlan tag */
-	MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_UNTAG,
 		       MVPP22_CLS_HEK_IP6_5T,
 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
 		       MVPP2_PRS_RI_L4_UDP,
 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
 
-	MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_UNTAG,
 		       MVPP22_CLS_HEK_IP6_5T,
 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
 		       MVPP2_PRS_RI_L4_UDP,
 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
 
 	/* UDP over IPv6 flows, not fragmented, with vlan tag */
-	MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG,
-		       MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_TAG,
+		       MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
 		       MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_UDP,
 		       MVPP2_PRS_IP_MASK),
 
-	MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG,
-		       MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_TAG,
+		       MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
 		       MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_UDP,
 		       MVPP2_PRS_IP_MASK),
 
 	/* UDP over IPv6 flows, fragmented, no vlan tag */
-	MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
 		       MVPP22_CLS_HEK_IP6_2T,
 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
 		       MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
 
-	MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
 		       MVPP22_CLS_HEK_IP6_2T,
 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
 		       MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
 
 	/* UDP over IPv6 flows, fragmented, with vlan tag */
-	MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG,
-		       MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_TAG,
+		       MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
 		       MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
 		       MVPP2_PRS_RI_L4_UDP,
 		       MVPP2_PRS_IP_MASK),
 
-	MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG,
-		       MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_TAG,
+		       MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
 		       MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
 		       MVPP2_PRS_RI_L4_UDP,
 		       MVPP2_PRS_IP_MASK),
 
 	/* IPv4 flows, no vlan tag */
-	MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG,
 		       MVPP22_CLS_HEK_IP4_2T,
 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4,
 		       MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
-	MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG,
 		       MVPP22_CLS_HEK_IP4_2T,
 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT,
 		       MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
-	MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG,
 		       MVPP22_CLS_HEK_IP4_2T,
 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER,
 		       MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
 
 	/* IPv4 flows, with vlan tag */
-	MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
-		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG,
+		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
 		       MVPP2_PRS_RI_L3_IP4,
 		       MVPP2_PRS_RI_L3_PROTO_MASK),
-	MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
-		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG,
+		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
 		       MVPP2_PRS_RI_L3_IP4_OPT,
 		       MVPP2_PRS_RI_L3_PROTO_MASK),
-	MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
-		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG,
+		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
 		       MVPP2_PRS_RI_L3_IP4_OTHER,
 		       MVPP2_PRS_RI_L3_PROTO_MASK),
 
 	/* IPv6 flows, no vlan tag */
-	MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_UNTAG,
 		       MVPP22_CLS_HEK_IP6_2T,
 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
 		       MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
-	MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_UNTAG,
 		       MVPP22_CLS_HEK_IP6_2T,
 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
 		       MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
 
 	/* IPv6 flows, with vlan tag */
-	MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG,
-		       MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_TAG,
+		       MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
 		       MVPP2_PRS_RI_L3_IP6,
 		       MVPP2_PRS_RI_L3_PROTO_MASK),
-	MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG,
-		       MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_TAG,
+		       MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
 		       MVPP2_PRS_RI_L3_IP6,
 		       MVPP2_PRS_RI_L3_PROTO_MASK),
 
 	/* Non IP flow, no vlan tag */
-	MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_UNTAG,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_UNTAG,
 		       0,
 		       MVPP2_PRS_RI_VLAN_NONE,
 		       MVPP2_PRS_RI_VLAN_MASK),
 	/* Non IP flow, with vlan tag */
-	MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_TAG,
+	MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_TAG,
 		       MVPP22_CLS_HEK_OPT_VLAN,
 		       0, 0),
 };
@@ -344,9 +344,9 @@
 				 struct mvpp2_cls_flow_entry *fe)
 {
 	mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
-	mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG,  fe->data[0]);
-	mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG,  fe->data[1]);
-	mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG,  fe->data[2]);
+	mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
+	mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
+	mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
 }
 
 u32 mvpp2_cls_lookup_hits(struct mvpp2 *priv, int index)
@@ -429,12 +429,6 @@
 		fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL;
 }
 
-static void mvpp2_cls_flow_seq_set(struct mvpp2_cls_flow_entry *fe, u32 seq)
-{
-	fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_SEQ(MVPP2_CLS_FLOW_TBL1_SEQ_MASK);
-	fe->data[1] |= MVPP2_CLS_FLOW_TBL1_SEQ(seq);
-}
-
 static void mvpp2_cls_flow_last_set(struct mvpp2_cls_flow_entry *fe,
 				    bool is_last)
 {
@@ -454,9 +448,22 @@
 	fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID(port);
 }
 
+static void mvpp2_cls_flow_port_remove(struct mvpp2_cls_flow_entry *fe,
+				       u32 port)
+{
+	fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID(port);
+}
+
+static void mvpp2_cls_flow_lu_type_set(struct mvpp2_cls_flow_entry *fe,
+				       u8 lu_type)
+{
+	fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK);
+	fe->data[1] |= MVPP2_CLS_FLOW_TBL1_LU_TYPE(lu_type);
+}
+
 /* Initialize the parser entry for the given flow */
 static void mvpp2_cls_flow_prs_init(struct mvpp2 *priv,
-				    struct mvpp2_cls_flow *flow)
+				    const struct mvpp2_cls_flow *flow)
 {
 	mvpp2_prs_add_flow(priv, flow->flow_id, flow->prs_ri.ri,
 			   flow->prs_ri.ri_mask);
@@ -464,7 +471,7 @@
 
 /* Initialize the Lookup Id table entry for the given flow */
 static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv,
-				    struct mvpp2_cls_flow *flow)
+				    const struct mvpp2_cls_flow *flow)
 {
 	struct mvpp2_cls_lookup_entry le;
 
@@ -477,7 +484,7 @@
 	/* We point on the first lookup in the sequence for the flow, that is
 	 * the C2 lookup.
 	 */
-	le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_FLOW_C2_ENTRY(flow->flow_id));
+	le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_CLS_FLT_FIRST(flow->flow_id));
 
 	/* CLS is always enabled, RSS is enabled/disabled in C2 lookup */
 	le.data |= MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
@@ -485,21 +492,113 @@
 	mvpp2_cls_lookup_write(priv, &le);
 }
 
+static void mvpp2_cls_c2_write(struct mvpp2 *priv,
+			       struct mvpp2_cls_c2_entry *c2)
+{
+	u32 val;
+	mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2->index);
+
+	val = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_INV);
+	if (c2->valid)
+		val &= ~MVPP22_CLS_C2_TCAM_INV_BIT;
+	else
+		val |= MVPP22_CLS_C2_TCAM_INV_BIT;
+	mvpp2_write(priv, MVPP22_CLS_C2_TCAM_INV, val);
+
+	mvpp2_write(priv, MVPP22_CLS_C2_ACT, c2->act);
+
+	mvpp2_write(priv, MVPP22_CLS_C2_ATTR0, c2->attr[0]);
+	mvpp2_write(priv, MVPP22_CLS_C2_ATTR1, c2->attr[1]);
+	mvpp2_write(priv, MVPP22_CLS_C2_ATTR2, c2->attr[2]);
+	mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, c2->attr[3]);
+
+	mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA0, c2->tcam[0]);
+	mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA1, c2->tcam[1]);
+	mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA2, c2->tcam[2]);
+	mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA3, c2->tcam[3]);
+	/* Writing TCAM_DATA4 flushes writes to TCAM_DATA0-4 and INV to HW */
+	mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, c2->tcam[4]);
+}
+
+void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
+		       struct mvpp2_cls_c2_entry *c2)
+{
+	u32 val;
+	mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index);
+
+	c2->index = index;
+
+	c2->tcam[0] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA0);
+	c2->tcam[1] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA1);
+	c2->tcam[2] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA2);
+	c2->tcam[3] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA3);
+	c2->tcam[4] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA4);
+
+	c2->act = mvpp2_read(priv, MVPP22_CLS_C2_ACT);
+
+	c2->attr[0] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR0);
+	c2->attr[1] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR1);
+	c2->attr[2] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR2);
+	c2->attr[3] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR3);
+
+	val = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_INV);
+	c2->valid = !(val & MVPP22_CLS_C2_TCAM_INV_BIT);
+}
+
+static int mvpp2_cls_ethtool_flow_to_type(int flow_type)
+{
+	switch (flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) {
+	case ETHER_FLOW:
+		return MVPP22_FLOW_ETHERNET;
+	case TCP_V4_FLOW:
+		return MVPP22_FLOW_TCP4;
+	case TCP_V6_FLOW:
+		return MVPP22_FLOW_TCP6;
+	case UDP_V4_FLOW:
+		return MVPP22_FLOW_UDP4;
+	case UDP_V6_FLOW:
+		return MVPP22_FLOW_UDP6;
+	case IPV4_FLOW:
+		return MVPP22_FLOW_IP4;
+	case IPV6_FLOW:
+		return MVPP22_FLOW_IP6;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static int mvpp2_cls_c2_port_flow_index(struct mvpp2_port *port, int loc)
+{
+	return MVPP22_CLS_C2_RFS_LOC(port->id, loc);
+}
+
 /* Initialize the flow table entries for the given flow */
-static void mvpp2_cls_flow_init(struct mvpp2 *priv, struct mvpp2_cls_flow *flow)
+static void mvpp2_cls_flow_init(struct mvpp2 *priv,
+				const struct mvpp2_cls_flow *flow)
 {
 	struct mvpp2_cls_flow_entry fe;
-	int i;
+	int i, pri = 0;
 
-	/* C2 lookup */
-	memset(&fe, 0, sizeof(fe));
-	fe.index = MVPP2_FLOW_C2_ENTRY(flow->flow_id);
+	/* Assign default values to all entries in the flow */
+	for (i = MVPP2_CLS_FLT_FIRST(flow->flow_id);
+	     i <= MVPP2_CLS_FLT_LAST(flow->flow_id); i++) {
+		memset(&fe, 0, sizeof(fe));
+		fe.index = i;
+		mvpp2_cls_flow_pri_set(&fe, pri++);
+
+		if (i == MVPP2_CLS_FLT_LAST(flow->flow_id))
+			mvpp2_cls_flow_last_set(&fe, 1);
+
+		mvpp2_cls_flow_write(priv, &fe);
+	}
+
+	/* RSS config C2 lookup */
+	mvpp2_cls_flow_read(priv, MVPP2_CLS_FLT_C2_RSS_ENTRY(flow->flow_id),
+			    &fe);
 
 	mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C2);
 	mvpp2_cls_flow_port_id_sel(&fe, true);
-	mvpp2_cls_flow_last_set(&fe, 0);
-	mvpp2_cls_flow_pri_set(&fe, 0);
-	mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_FIRST1);
+	mvpp2_cls_flow_lu_type_set(&fe, MVPP22_CLS_LU_TYPE_ALL);
 
 	/* Add all ports */
 	for (i = 0; i < MVPP2_MAX_PORTS; i++)
@@ -509,22 +608,19 @@
 
 	/* C3Hx lookups */
 	for (i = 0; i < MVPP2_MAX_PORTS; i++) {
-		memset(&fe, 0, sizeof(fe));
-		fe.index = MVPP2_PORT_FLOW_HASH_ENTRY(i, flow->flow_id);
+		mvpp2_cls_flow_read(priv,
+				    MVPP2_CLS_FLT_HASH_ENTRY(i, flow->flow_id),
+				    &fe);
 
+		/* Set a default engine. Will be overwritten when setting the
+		 * real HEK parameters
+		 */
+		mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C3HA);
 		mvpp2_cls_flow_port_id_sel(&fe, true);
-		mvpp2_cls_flow_pri_set(&fe, i + 1);
-		mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_MIDDLE);
 		mvpp2_cls_flow_port_add(&fe, BIT(i));
 
 		mvpp2_cls_flow_write(priv, &fe);
 	}
-
-	/* Update the last entry */
-	mvpp2_cls_flow_last_set(&fe, 1);
-	mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_LAST);
-
-	mvpp2_cls_flow_write(priv, &fe);
 }
 
 /* Adds a field to the Header Extracted Key generation parameters*/
@@ -555,9 +651,15 @@
 
 	for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
 		switch (BIT(i)) {
+		case MVPP22_CLS_HEK_OPT_MAC_DA:
+			field_id = MVPP22_CLS_FIELD_MAC_DA;
+			break;
 		case MVPP22_CLS_HEK_OPT_VLAN:
 			field_id = MVPP22_CLS_FIELD_VLAN;
 			break;
+		case MVPP22_CLS_HEK_OPT_VLAN_PRI:
+			field_id = MVPP22_CLS_FIELD_VLAN_PRI;
+			break;
 		case MVPP22_CLS_HEK_OPT_IP4SA:
 			field_id = MVPP22_CLS_FIELD_IP4SA;
 			break;
@@ -586,9 +688,33 @@
 	return 0;
 }
 
-struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow)
+/* Returns the size, in bits, of the corresponding HEK field */
+static int mvpp2_cls_hek_field_size(u32 field)
 {
-	if (flow >= MVPP2_N_FLOWS)
+	switch (field) {
+	case MVPP22_CLS_HEK_OPT_MAC_DA:
+		return 48;
+	case MVPP22_CLS_HEK_OPT_VLAN:
+		return 12;
+	case MVPP22_CLS_HEK_OPT_VLAN_PRI:
+		return 3;
+	case MVPP22_CLS_HEK_OPT_IP4SA:
+	case MVPP22_CLS_HEK_OPT_IP4DA:
+		return 32;
+	case MVPP22_CLS_HEK_OPT_IP6SA:
+	case MVPP22_CLS_HEK_OPT_IP6DA:
+		return 128;
+	case MVPP22_CLS_HEK_OPT_L4SIP:
+	case MVPP22_CLS_HEK_OPT_L4DIP:
+		return 16;
+	default:
+		return -1;
+	}
+}
+
+const struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow)
+{
+	if (flow >= MVPP2_N_PRS_FLOWS)
 		return NULL;
 
 	return &cls_flows[flow];
@@ -608,21 +734,17 @@
 static int mvpp2_port_rss_hash_opts_set(struct mvpp2_port *port, int flow_type,
 					u16 requested_opts)
 {
+	const struct mvpp2_cls_flow *flow;
 	struct mvpp2_cls_flow_entry fe;
-	struct mvpp2_cls_flow *flow;
 	int i, engine, flow_index;
 	u16 hash_opts;
 
-	for (i = 0; i < MVPP2_N_FLOWS; i++) {
+	for_each_cls_flow_id_with_type(i, flow_type) {
 		flow = mvpp2_cls_flow_get(i);
 		if (!flow)
 			return -EINVAL;
 
-		if (flow->flow_type != flow_type)
-			continue;
-
-		flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(port->id,
-							flow->flow_id);
+		flow_index = MVPP2_CLS_FLT_HASH_ENTRY(port->id, flow->flow_id);
 
 		mvpp2_cls_flow_read(port->priv, flow_index, &fe);
 
@@ -664,6 +786,9 @@
 		case MVPP22_CLS_FIELD_VLAN:
 			hash_opts |= MVPP22_CLS_HEK_OPT_VLAN;
 			break;
+		case MVPP22_CLS_FIELD_VLAN_PRI:
+			hash_opts |= MVPP22_CLS_HEK_OPT_VLAN_PRI;
+			break;
 		case MVPP22_CLS_FIELD_L3_PROTO:
 			hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO;
 			break;
@@ -697,21 +822,17 @@
  */
 static u16 mvpp2_port_rss_hash_opts_get(struct mvpp2_port *port, int flow_type)
 {
+	const struct mvpp2_cls_flow *flow;
 	struct mvpp2_cls_flow_entry fe;
-	struct mvpp2_cls_flow *flow;
 	int i, flow_index;
 	u16 hash_opts = 0;
 
-	for (i = 0; i < MVPP2_N_FLOWS; i++) {
+	for_each_cls_flow_id_with_type(i, flow_type) {
 		flow = mvpp2_cls_flow_get(i);
 		if (!flow)
 			return 0;
 
-		if (flow->flow_type != flow_type)
-			continue;
-
-		flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(port->id,
-							flow->flow_id);
+		flow_index = MVPP2_CLS_FLT_HASH_ENTRY(port->id, flow->flow_id);
 
 		mvpp2_cls_flow_read(port->priv, flow_index, &fe);
 
@@ -723,10 +844,10 @@
 
 static void mvpp2_cls_port_init_flows(struct mvpp2 *priv)
 {
-	struct mvpp2_cls_flow *flow;
+	const struct mvpp2_cls_flow *flow;
 	int i;
 
-	for (i = 0; i < MVPP2_N_FLOWS; i++) {
+	for (i = 0; i < MVPP2_N_PRS_FLOWS; i++) {
 		flow = mvpp2_cls_flow_get(i);
 		if (!flow)
 			break;
@@ -737,47 +858,6 @@
 	}
 }
 
-static void mvpp2_cls_c2_write(struct mvpp2 *priv,
-			       struct mvpp2_cls_c2_entry *c2)
-{
-	mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2->index);
-
-	/* Write TCAM */
-	mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA0, c2->tcam[0]);
-	mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA1, c2->tcam[1]);
-	mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA2, c2->tcam[2]);
-	mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA3, c2->tcam[3]);
-	mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, c2->tcam[4]);
-
-	mvpp2_write(priv, MVPP22_CLS_C2_ACT, c2->act);
-
-	mvpp2_write(priv, MVPP22_CLS_C2_ATTR0, c2->attr[0]);
-	mvpp2_write(priv, MVPP22_CLS_C2_ATTR1, c2->attr[1]);
-	mvpp2_write(priv, MVPP22_CLS_C2_ATTR2, c2->attr[2]);
-	mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, c2->attr[3]);
-}
-
-void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
-		       struct mvpp2_cls_c2_entry *c2)
-{
-	mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index);
-
-	c2->index = index;
-
-	c2->tcam[0] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA0);
-	c2->tcam[1] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA1);
-	c2->tcam[2] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA2);
-	c2->tcam[3] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA3);
-	c2->tcam[4] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA4);
-
-	c2->act = mvpp2_read(priv, MVPP22_CLS_C2_ACT);
-
-	c2->attr[0] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR0);
-	c2->attr[1] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR1);
-	c2->attr[2] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR2);
-	c2->attr[3] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR3);
-}
-
 static void mvpp2_port_c2_cls_init(struct mvpp2_port *port)
 {
 	struct mvpp2_cls_c2_entry c2;
@@ -791,6 +871,10 @@
 	c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap);
 	c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap));
 
+	/* Match on Lookup Type */
+	c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK));
+	c2.tcam[4] |= MVPP22_CLS_C2_LU_TYPE(MVPP22_CLS_LU_TYPE_ALL);
+
 	/* Update RSS status after matching this entry */
 	c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK);
 
@@ -809,6 +893,8 @@
 	c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
 		      MVPP22_CLS_C2_ATTR0_QLOW(ql);
 
+	c2.valid = true;
+
 	mvpp2_cls_c2_write(port->priv, &c2);
 }
 
@@ -817,6 +903,7 @@
 {
 	struct mvpp2_cls_lookup_entry le;
 	struct mvpp2_cls_flow_entry fe;
+	struct mvpp2_cls_c2_entry c2;
 	int index;
 
 	/* Enable classifier */
@@ -840,6 +927,20 @@
 		mvpp2_cls_lookup_write(priv, &le);
 	}
 
+	/* Clear C2 TCAM engine table */
+	memset(&c2, 0, sizeof(c2));
+	c2.valid = false;
+	for (index = 0; index < MVPP22_CLS_C2_N_ENTRIES; index++) {
+		c2.index = index;
+		mvpp2_cls_c2_write(priv, &c2);
+	}
+
+	/* Disable the FIFO stages in C2 engine, which are only used in BIST
+	 * mode
+	 */
+	mvpp2_write(priv, MVPP22_CLS_C2_TCAM_CTRL,
+		    MVPP22_CLS_C2_TCAM_BYPASS_FIFO);
+
 	mvpp2_cls_port_init_flows(priv);
 }
 
@@ -880,12 +981,22 @@
 	return mvpp2_read(priv, MVPP22_CLS_C2_HIT_CTR);
 }
 
-static void mvpp2_rss_port_c2_enable(struct mvpp2_port *port)
+static void mvpp2_rss_port_c2_enable(struct mvpp2_port *port, u32 ctx)
 {
 	struct mvpp2_cls_c2_entry c2;
+	u8 qh, ql;
 
 	mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
 
+	/* The RxQ number is used to select the RSS table. It that case, we set
+	 * it to be the ctx number.
+	 */
+	qh = (ctx >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
+	ql = ctx & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
+
+	c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
+		     MVPP22_CLS_C2_ATTR0_QLOW(ql);
+
 	c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN;
 
 	mvpp2_cls_c2_write(port->priv, &c2);
@@ -894,22 +1005,57 @@
 static void mvpp2_rss_port_c2_disable(struct mvpp2_port *port)
 {
 	struct mvpp2_cls_c2_entry c2;
+	u8 qh, ql;
 
 	mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
 
+	/* Reset the default destination RxQ to the port's first rx queue. */
+	qh = (port->first_rxq >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
+	ql = port->first_rxq & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
+
+	c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
+		      MVPP22_CLS_C2_ATTR0_QLOW(ql);
+
 	c2.attr[2] &= ~MVPP22_CLS_C2_ATTR2_RSS_EN;
 
 	mvpp2_cls_c2_write(port->priv, &c2);
 }
 
-void mvpp22_rss_enable(struct mvpp2_port *port)
+static inline int mvpp22_rss_ctx(struct mvpp2_port *port, int port_rss_ctx)
 {
-	mvpp2_rss_port_c2_enable(port);
+	return port->rss_ctx[port_rss_ctx];
 }
 
-void mvpp22_rss_disable(struct mvpp2_port *port)
+int mvpp22_port_rss_enable(struct mvpp2_port *port)
 {
+	if (mvpp22_rss_ctx(port, 0) < 0)
+		return -EINVAL;
+
+	mvpp2_rss_port_c2_enable(port, mvpp22_rss_ctx(port, 0));
+
+	return 0;
+}
+
+int mvpp22_port_rss_disable(struct mvpp2_port *port)
+{
+	if (mvpp22_rss_ctx(port, 0) < 0)
+		return -EINVAL;
+
 	mvpp2_rss_port_c2_disable(port);
+
+	return 0;
+}
+
+static void mvpp22_port_c2_lookup_disable(struct mvpp2_port *port, int entry)
+{
+	struct mvpp2_cls_c2_entry c2;
+
+	mvpp2_cls_c2_read(port->priv, entry, &c2);
+
+	/* Clear the port map so that the entry doesn't match anymore */
+	c2.tcam[4] &= ~(MVPP22_CLS_C2_PORT_ID(BIT(port->id)));
+
+	mvpp2_cls_c2_write(port->priv, &c2);
 }
 
 /* Set CPU queue number for oversize packets */
@@ -928,6 +1074,370 @@
 	mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
 }
 
+static int mvpp2_port_c2_tcam_rule_add(struct mvpp2_port *port,
+				       struct mvpp2_rfs_rule *rule)
+{
+	struct flow_action_entry *act;
+	struct mvpp2_cls_c2_entry c2;
+	u8 qh, ql, pmap;
+	int index, ctx;
+
+	memset(&c2, 0, sizeof(c2));
+
+	index = mvpp2_cls_c2_port_flow_index(port, rule->loc);
+	if (index < 0)
+		return -EINVAL;
+	c2.index = index;
+
+	act = &rule->flow->action.entries[0];
+
+	rule->c2_index = c2.index;
+
+	c2.tcam[3] = (rule->c2_tcam & 0xffff) |
+		     ((rule->c2_tcam_mask & 0xffff) << 16);
+	c2.tcam[2] = ((rule->c2_tcam >> 16) & 0xffff) |
+		     (((rule->c2_tcam_mask >> 16) & 0xffff) << 16);
+	c2.tcam[1] = ((rule->c2_tcam >> 32) & 0xffff) |
+		     (((rule->c2_tcam_mask >> 32) & 0xffff) << 16);
+	c2.tcam[0] = ((rule->c2_tcam >> 48) & 0xffff) |
+		     (((rule->c2_tcam_mask >> 48) & 0xffff) << 16);
+
+	pmap = BIT(port->id);
+	c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap);
+	c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap));
+
+	/* Match on Lookup Type */
+	c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK));
+	c2.tcam[4] |= MVPP22_CLS_C2_LU_TYPE(rule->loc);
+
+	if (act->id == FLOW_ACTION_DROP) {
+		c2.act = MVPP22_CLS_C2_ACT_COLOR(MVPP22_C2_COL_RED_LOCK);
+	} else {
+		/* We want to keep the default color derived from the Header
+		 * Parser drop entries, for VLAN and MAC filtering. This will
+		 * assign a default color of Green or Red, and we want matches
+		 * with a non-drop action to keep that color.
+		 */
+		c2.act = MVPP22_CLS_C2_ACT_COLOR(MVPP22_C2_COL_NO_UPD_LOCK);
+
+		/* Update RSS status after matching this entry */
+		if (act->queue.ctx)
+			c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN;
+
+		/* Always lock the RSS_EN decision. We might have high prio
+		 * rules steering to an RXQ, and a lower one steering to RSS,
+		 * we don't want the low prio RSS rule overwriting this flag.
+		 */
+		c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK);
+
+		/* Mark packet as "forwarded to software", needed for RSS */
+		c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK);
+
+		c2.act |= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD_LOCK) |
+			   MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD_LOCK);
+
+		if (act->queue.ctx) {
+			/* Get the global ctx number */
+			ctx = mvpp22_rss_ctx(port, act->queue.ctx);
+			if (ctx < 0)
+				return -EINVAL;
+
+			qh = (ctx >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
+			ql = ctx & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
+		} else {
+			qh = ((act->queue.index + port->first_rxq) >> 3) &
+			      MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
+			ql = (act->queue.index + port->first_rxq) &
+			      MVPP22_CLS_C2_ATTR0_QLOW_MASK;
+		}
+
+		c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
+			      MVPP22_CLS_C2_ATTR0_QLOW(ql);
+	}
+
+	c2.valid = true;
+
+	mvpp2_cls_c2_write(port->priv, &c2);
+
+	return 0;
+}
+
+static int mvpp2_port_c2_rfs_rule_insert(struct mvpp2_port *port,
+					 struct mvpp2_rfs_rule *rule)
+{
+	return mvpp2_port_c2_tcam_rule_add(port, rule);
+}
+
+static int mvpp2_port_cls_rfs_rule_remove(struct mvpp2_port *port,
+					  struct mvpp2_rfs_rule *rule)
+{
+	const struct mvpp2_cls_flow *flow;
+	struct mvpp2_cls_flow_entry fe;
+	int index, i;
+
+	for_each_cls_flow_id_containing_type(i, rule->flow_type) {
+		flow = mvpp2_cls_flow_get(i);
+		if (!flow)
+			return 0;
+
+		index = MVPP2_CLS_FLT_C2_RFS(port->id, flow->flow_id, rule->loc);
+
+		mvpp2_cls_flow_read(port->priv, index, &fe);
+		mvpp2_cls_flow_port_remove(&fe, BIT(port->id));
+		mvpp2_cls_flow_write(port->priv, &fe);
+	}
+
+	if (rule->c2_index >= 0)
+		mvpp22_port_c2_lookup_disable(port, rule->c2_index);
+
+	return 0;
+}
+
+static int mvpp2_port_flt_rfs_rule_insert(struct mvpp2_port *port,
+					  struct mvpp2_rfs_rule *rule)
+{
+	const struct mvpp2_cls_flow *flow;
+	struct mvpp2 *priv = port->priv;
+	struct mvpp2_cls_flow_entry fe;
+	int index, ret, i;
+
+	if (rule->engine != MVPP22_CLS_ENGINE_C2)
+		return -EOPNOTSUPP;
+
+	ret = mvpp2_port_c2_rfs_rule_insert(port, rule);
+	if (ret)
+		return ret;
+
+	for_each_cls_flow_id_containing_type(i, rule->flow_type) {
+		flow = mvpp2_cls_flow_get(i);
+		if (!flow)
+			return 0;
+
+		if ((rule->hek_fields & flow->supported_hash_opts) != rule->hek_fields)
+			continue;
+
+		index = MVPP2_CLS_FLT_C2_RFS(port->id, flow->flow_id, rule->loc);
+
+		mvpp2_cls_flow_read(priv, index, &fe);
+		mvpp2_cls_flow_eng_set(&fe, rule->engine);
+		mvpp2_cls_flow_port_id_sel(&fe, true);
+		mvpp2_flow_set_hek_fields(&fe, rule->hek_fields);
+		mvpp2_cls_flow_lu_type_set(&fe, rule->loc);
+		mvpp2_cls_flow_port_add(&fe, 0xf);
+
+		mvpp2_cls_flow_write(priv, &fe);
+	}
+
+	return 0;
+}
+
+static int mvpp2_cls_c2_build_match(struct mvpp2_rfs_rule *rule)
+{
+	struct flow_rule *flow = rule->flow;
+	int offs = 0;
+
+	/* The order of insertion in C2 tcam must match the order in which
+	 * the fields are found in the header
+	 */
+	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
+		struct flow_match_vlan match;
+
+		flow_rule_match_vlan(flow, &match);
+		if (match.mask->vlan_id) {
+			rule->hek_fields |= MVPP22_CLS_HEK_OPT_VLAN;
+
+			rule->c2_tcam |= ((u64)match.key->vlan_id) << offs;
+			rule->c2_tcam_mask |= ((u64)match.mask->vlan_id) << offs;
+
+			/* Don't update the offset yet */
+		}
+
+		if (match.mask->vlan_priority) {
+			rule->hek_fields |= MVPP22_CLS_HEK_OPT_VLAN_PRI;
+
+			/* VLAN pri is always at offset 13 relative to the
+			 * current offset
+			 */
+			rule->c2_tcam |= ((u64)match.key->vlan_priority) <<
+				(offs + 13);
+			rule->c2_tcam_mask |= ((u64)match.mask->vlan_priority) <<
+				(offs + 13);
+		}
+
+		if (match.mask->vlan_dei)
+			return -EOPNOTSUPP;
+
+		/* vlan id and prio always seem to take a full 16-bit slot in
+		 * the Header Extracted Key.
+		 */
+		offs += 16;
+	}
+
+	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
+		struct flow_match_ports match;
+
+		flow_rule_match_ports(flow, &match);
+		if (match.mask->src) {
+			rule->hek_fields |= MVPP22_CLS_HEK_OPT_L4SIP;
+
+			rule->c2_tcam |= ((u64)ntohs(match.key->src)) << offs;
+			rule->c2_tcam_mask |= ((u64)ntohs(match.mask->src)) << offs;
+			offs += mvpp2_cls_hek_field_size(MVPP22_CLS_HEK_OPT_L4SIP);
+		}
+
+		if (match.mask->dst) {
+			rule->hek_fields |= MVPP22_CLS_HEK_OPT_L4DIP;
+
+			rule->c2_tcam |= ((u64)ntohs(match.key->dst)) << offs;
+			rule->c2_tcam_mask |= ((u64)ntohs(match.mask->dst)) << offs;
+			offs += mvpp2_cls_hek_field_size(MVPP22_CLS_HEK_OPT_L4DIP);
+		}
+	}
+
+	if (hweight16(rule->hek_fields) > MVPP2_FLOW_N_FIELDS)
+		return -EOPNOTSUPP;
+
+	return 0;
+}
+
+static int mvpp2_cls_rfs_parse_rule(struct mvpp2_rfs_rule *rule)
+{
+	struct flow_rule *flow = rule->flow;
+	struct flow_action_entry *act;
+
+	act = &flow->action.entries[0];
+	if (act->id != FLOW_ACTION_QUEUE && act->id != FLOW_ACTION_DROP)
+		return -EOPNOTSUPP;
+
+	/* When both an RSS context and an queue index are set, the index
+	 * is considered as an offset to be added to the indirection table
+	 * entries. We don't support this, so reject this rule.
+	 */
+	if (act->queue.ctx && act->queue.index)
+		return -EOPNOTSUPP;
+
+	/* For now, only use the C2 engine which has a HEK size limited to 64
+	 * bits for TCAM matching.
+	 */
+	rule->engine = MVPP22_CLS_ENGINE_C2;
+
+	if (mvpp2_cls_c2_build_match(rule))
+		return -EINVAL;
+
+	return 0;
+}
+
+int mvpp2_ethtool_cls_rule_get(struct mvpp2_port *port,
+			       struct ethtool_rxnfc *rxnfc)
+{
+	struct mvpp2_ethtool_fs *efs;
+
+	if (rxnfc->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
+		return -EINVAL;
+
+	efs = port->rfs_rules[rxnfc->fs.location];
+	if (!efs)
+		return -ENOENT;
+
+	memcpy(rxnfc, &efs->rxnfc, sizeof(efs->rxnfc));
+
+	return 0;
+}
+
+int mvpp2_ethtool_cls_rule_ins(struct mvpp2_port *port,
+			       struct ethtool_rxnfc *info)
+{
+	struct ethtool_rx_flow_spec_input input = {};
+	struct ethtool_rx_flow_rule *ethtool_rule;
+	struct mvpp2_ethtool_fs *efs, *old_efs;
+	int ret = 0;
+
+	if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
+		return -EINVAL;
+
+	efs = kzalloc(sizeof(*efs), GFP_KERNEL);
+	if (!efs)
+		return -ENOMEM;
+
+	input.fs = &info->fs;
+
+	/* We need to manually set the rss_ctx, since this info isn't present
+	 * in info->fs
+	 */
+	if (info->fs.flow_type & FLOW_RSS)
+		input.rss_ctx = info->rss_context;
+
+	ethtool_rule = ethtool_rx_flow_rule_create(&input);
+	if (IS_ERR(ethtool_rule)) {
+		ret = PTR_ERR(ethtool_rule);
+		goto clean_rule;
+	}
+
+	efs->rule.flow = ethtool_rule->rule;
+	efs->rule.flow_type = mvpp2_cls_ethtool_flow_to_type(info->fs.flow_type);
+	if (efs->rule.flow_type < 0) {
+		ret = efs->rule.flow_type;
+		goto clean_rule;
+	}
+
+	ret = mvpp2_cls_rfs_parse_rule(&efs->rule);
+	if (ret)
+		goto clean_eth_rule;
+
+	efs->rule.loc = info->fs.location;
+
+	/* Replace an already existing rule */
+	if (port->rfs_rules[efs->rule.loc]) {
+		old_efs = port->rfs_rules[efs->rule.loc];
+		ret = mvpp2_port_cls_rfs_rule_remove(port, &old_efs->rule);
+		if (ret)
+			goto clean_eth_rule;
+		kfree(old_efs);
+		port->n_rfs_rules--;
+	}
+
+	ret = mvpp2_port_flt_rfs_rule_insert(port, &efs->rule);
+	if (ret)
+		goto clean_eth_rule;
+
+	ethtool_rx_flow_rule_destroy(ethtool_rule);
+	efs->rule.flow = NULL;
+
+	memcpy(&efs->rxnfc, info, sizeof(*info));
+	port->rfs_rules[efs->rule.loc] = efs;
+	port->n_rfs_rules++;
+
+	return ret;
+
+clean_eth_rule:
+	ethtool_rx_flow_rule_destroy(ethtool_rule);
+clean_rule:
+	kfree(efs);
+	return ret;
+}
+
+int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port,
+			       struct ethtool_rxnfc *info)
+{
+	struct mvpp2_ethtool_fs *efs;
+	int ret;
+
+	efs = port->rfs_rules[info->fs.location];
+	if (!efs)
+		return -EINVAL;
+
+	/* Remove the rule from the engines. */
+	ret = mvpp2_port_cls_rfs_rule_remove(port, &efs->rule);
+	if (ret)
+		return ret;
+
+	port->n_rfs_rules--;
+	port->rfs_rules[info->fs.location] = NULL;
+	kfree(efs);
+
+	return 0;
+}
+
 static inline u32 mvpp22_rxfh_indir(struct mvpp2_port *port, u32 rxq)
 {
 	int nrxqs, cpu, cpus = num_possible_cpus();
@@ -947,37 +1457,181 @@
 	return port->first_rxq + ((rxq * nrxqs + rxq / cpus) % port->nrxqs);
 }
 
-void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table)
+static void mvpp22_rss_fill_table(struct mvpp2_port *port,
+				  struct mvpp2_rss_table *table,
+				  u32 rss_ctx)
 {
 	struct mvpp2 *priv = port->priv;
 	int i;
 
 	for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) {
-		u32 sel = MVPP22_RSS_INDEX_TABLE(table) |
+		u32 sel = MVPP22_RSS_INDEX_TABLE(rss_ctx) |
 			  MVPP22_RSS_INDEX_TABLE_ENTRY(i);
 		mvpp2_write(priv, MVPP22_RSS_INDEX, sel);
 
 		mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY,
-			    mvpp22_rxfh_indir(port, port->indir[i]));
+			    mvpp22_rxfh_indir(port, table->indir[i]));
 	}
 }
 
+static int mvpp22_rss_context_create(struct mvpp2_port *port, u32 *rss_ctx)
+{
+	struct mvpp2 *priv = port->priv;
+	u32 ctx;
+
+	/* Find the first free RSS table */
+	for (ctx = 0; ctx < MVPP22_N_RSS_TABLES; ctx++) {
+		if (!priv->rss_tables[ctx])
+			break;
+	}
+
+	if (ctx == MVPP22_N_RSS_TABLES)
+		return -EINVAL;
+
+	priv->rss_tables[ctx] = kzalloc(sizeof(*priv->rss_tables[ctx]),
+					GFP_KERNEL);
+	if (!priv->rss_tables[ctx])
+		return -ENOMEM;
+
+	*rss_ctx = ctx;
+
+	/* Set the table width: replace the whole classifier Rx queue number
+	 * with the ones configured in RSS table entries.
+	 */
+	mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(ctx));
+	mvpp2_write(priv, MVPP22_RSS_WIDTH, 8);
+
+	mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(ctx));
+	mvpp2_write(priv, MVPP22_RXQ2RSS_TABLE, MVPP22_RSS_TABLE_POINTER(ctx));
+
+	return 0;
+}
+
+int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 *port_ctx)
+{
+	u32 rss_ctx;
+	int ret, i;
+
+	ret = mvpp22_rss_context_create(port, &rss_ctx);
+	if (ret)
+		return ret;
+
+	/* Find the first available context number in the port, starting from 1.
+	 * Context 0 on each port is reserved for the default context.
+	 */
+	for (i = 1; i < MVPP22_N_RSS_TABLES; i++) {
+		if (port->rss_ctx[i] < 0)
+			break;
+	}
+
+	if (i == MVPP22_N_RSS_TABLES)
+		return -EINVAL;
+
+	port->rss_ctx[i] = rss_ctx;
+	*port_ctx = i;
+
+	return 0;
+}
+
+static struct mvpp2_rss_table *mvpp22_rss_table_get(struct mvpp2 *priv,
+						    int rss_ctx)
+{
+	if (rss_ctx < 0 || rss_ctx >= MVPP22_N_RSS_TABLES)
+		return NULL;
+
+	return priv->rss_tables[rss_ctx];
+}
+
+int mvpp22_port_rss_ctx_delete(struct mvpp2_port *port, u32 port_ctx)
+{
+	struct mvpp2 *priv = port->priv;
+	struct ethtool_rxnfc *rxnfc;
+	int i, rss_ctx, ret;
+
+	rss_ctx = mvpp22_rss_ctx(port, port_ctx);
+
+	if (rss_ctx < 0 || rss_ctx >= MVPP22_N_RSS_TABLES)
+		return -EINVAL;
+
+	/* Invalidate any active classification rule that use this context */
+	for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
+		if (!port->rfs_rules[i])
+			continue;
+
+		rxnfc = &port->rfs_rules[i]->rxnfc;
+		if (!(rxnfc->fs.flow_type & FLOW_RSS) ||
+		    rxnfc->rss_context != port_ctx)
+			continue;
+
+		ret = mvpp2_ethtool_cls_rule_del(port, rxnfc);
+		if (ret) {
+			netdev_warn(port->dev,
+				    "couldn't remove classification rule %d associated to this context",
+				    rxnfc->fs.location);
+		}
+	}
+
+	kfree(priv->rss_tables[rss_ctx]);
+
+	priv->rss_tables[rss_ctx] = NULL;
+	port->rss_ctx[port_ctx] = -1;
+
+	return 0;
+}
+
+int mvpp22_port_rss_ctx_indir_set(struct mvpp2_port *port, u32 port_ctx,
+				  const u32 *indir)
+{
+	int rss_ctx = mvpp22_rss_ctx(port, port_ctx);
+	struct mvpp2_rss_table *rss_table = mvpp22_rss_table_get(port->priv,
+								 rss_ctx);
+
+	if (!rss_table)
+		return -EINVAL;
+
+	memcpy(rss_table->indir, indir,
+	       MVPP22_RSS_TABLE_ENTRIES * sizeof(rss_table->indir[0]));
+
+	mvpp22_rss_fill_table(port, rss_table, rss_ctx);
+
+	return 0;
+}
+
+int mvpp22_port_rss_ctx_indir_get(struct mvpp2_port *port, u32 port_ctx,
+				  u32 *indir)
+{
+	int rss_ctx =  mvpp22_rss_ctx(port, port_ctx);
+	struct mvpp2_rss_table *rss_table = mvpp22_rss_table_get(port->priv,
+								 rss_ctx);
+
+	if (!rss_table)
+		return -EINVAL;
+
+	memcpy(indir, rss_table->indir,
+	       MVPP22_RSS_TABLE_ENTRIES * sizeof(rss_table->indir[0]));
+
+	return 0;
+}
+
 int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info)
 {
 	u16 hash_opts = 0;
+	u32 flow_type;
 
-	switch (info->flow_type) {
-	case TCP_V4_FLOW:
-	case UDP_V4_FLOW:
-	case TCP_V6_FLOW:
-	case UDP_V6_FLOW:
+	flow_type = mvpp2_cls_ethtool_flow_to_type(info->flow_type);
+
+	switch (flow_type) {
+	case MVPP22_FLOW_TCP4:
+	case MVPP22_FLOW_UDP4:
+	case MVPP22_FLOW_TCP6:
+	case MVPP22_FLOW_UDP6:
 		if (info->data & RXH_L4_B_0_1)
 			hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP;
 		if (info->data & RXH_L4_B_2_3)
 			hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP;
 		/* Fallthrough */
-	case IPV4_FLOW:
-	case IPV6_FLOW:
+	case MVPP22_FLOW_IP4:
+	case MVPP22_FLOW_IP6:
 		if (info->data & RXH_L2DA)
 			hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA;
 		if (info->data & RXH_VLAN)
@@ -994,15 +1648,18 @@
 	default: return -EOPNOTSUPP;
 	}
 
-	return mvpp2_port_rss_hash_opts_set(port, info->flow_type, hash_opts);
+	return mvpp2_port_rss_hash_opts_set(port, flow_type, hash_opts);
 }
 
 int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info)
 {
 	unsigned long hash_opts;
+	u32 flow_type;
 	int i;
 
-	hash_opts = mvpp2_port_rss_hash_opts_get(port, info->flow_type);
+	flow_type = mvpp2_cls_ethtool_flow_to_type(info->flow_type);
+
+	hash_opts = mvpp2_port_rss_hash_opts_get(port, flow_type);
 	info->data = 0;
 
 	for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
@@ -1037,38 +1694,40 @@
 	return 0;
 }
 
-void mvpp22_rss_port_init(struct mvpp2_port *port)
+int mvpp22_port_rss_init(struct mvpp2_port *port)
 {
-	struct mvpp2 *priv = port->priv;
-	int i;
+	struct mvpp2_rss_table *table;
+	u32 context = 0;
+	int i, ret;
 
-	/* Set the table width: replace the whole classifier Rx queue number
-	 * with the ones configured in RSS table entries.
-	 */
-	mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(port->id));
-	mvpp2_write(priv, MVPP22_RSS_WIDTH, 8);
+	for (i = 0; i < MVPP22_N_RSS_TABLES; i++)
+		port->rss_ctx[i] = -1;
 
-	/* The default RxQ is used as a key to select the RSS table to use.
-	 * We use one RSS table per port.
-	 */
-	mvpp2_write(priv, MVPP22_RSS_INDEX,
-		    MVPP22_RSS_INDEX_QUEUE(port->first_rxq));
-	mvpp2_write(priv, MVPP22_RXQ2RSS_TABLE,
-		    MVPP22_RSS_TABLE_POINTER(port->id));
+	ret = mvpp22_rss_context_create(port, &context);
+	if (ret)
+		return ret;
+
+	table = mvpp22_rss_table_get(port->priv, context);
+	if (!table)
+		return -EINVAL;
+
+	port->rss_ctx[0] = context;
 
 	/* Configure the first table to evenly distribute the packets across
 	 * real Rx Queues. The table entries map a hash to a port Rx Queue.
 	 */
 	for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++)
-		port->indir[i] = ethtool_rxfh_indir_default(i, port->nrxqs);
+		table->indir[i] = ethtool_rxfh_indir_default(i, port->nrxqs);
 
-	mvpp22_rss_fill_table(port, port->id);
+	mvpp22_rss_fill_table(port, table, mvpp22_rss_ctx(port, 0));
 
 	/* Configure default flows */
-	mvpp2_port_rss_hash_opts_set(port, IPV4_FLOW, MVPP22_CLS_HEK_IP4_2T);
-	mvpp2_port_rss_hash_opts_set(port, IPV6_FLOW, MVPP22_CLS_HEK_IP6_2T);
-	mvpp2_port_rss_hash_opts_set(port, TCP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T);
-	mvpp2_port_rss_hash_opts_set(port, TCP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T);
-	mvpp2_port_rss_hash_opts_set(port, UDP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T);
-	mvpp2_port_rss_hash_opts_set(port, UDP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T);
+	mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_IP4, MVPP22_CLS_HEK_IP4_2T);
+	mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_IP6, MVPP22_CLS_HEK_IP6_2T);
+	mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_TCP4, MVPP22_CLS_HEK_IP4_5T);
+	mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_TCP6, MVPP22_CLS_HEK_IP6_5T);
+	mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_UDP4, MVPP22_CLS_HEK_IP4_5T);
+	mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_UDP6, MVPP22_CLS_HEK_IP6_5T);
+
+	return 0;
 }
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
index 089f05f..8867f25 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
@@ -33,15 +33,16 @@
 };
 
 #define MVPP22_CLS_HEK_OPT_MAC_DA	BIT(0)
-#define MVPP22_CLS_HEK_OPT_VLAN		BIT(1)
-#define MVPP22_CLS_HEK_OPT_L3_PROTO	BIT(2)
-#define MVPP22_CLS_HEK_OPT_IP4SA	BIT(3)
-#define MVPP22_CLS_HEK_OPT_IP4DA	BIT(4)
-#define MVPP22_CLS_HEK_OPT_IP6SA	BIT(5)
-#define MVPP22_CLS_HEK_OPT_IP6DA	BIT(6)
-#define MVPP22_CLS_HEK_OPT_L4SIP	BIT(7)
-#define MVPP22_CLS_HEK_OPT_L4DIP	BIT(8)
-#define MVPP22_CLS_HEK_N_FIELDS		9
+#define MVPP22_CLS_HEK_OPT_VLAN_PRI	BIT(1)
+#define MVPP22_CLS_HEK_OPT_VLAN		BIT(2)
+#define MVPP22_CLS_HEK_OPT_L3_PROTO	BIT(3)
+#define MVPP22_CLS_HEK_OPT_IP4SA	BIT(4)
+#define MVPP22_CLS_HEK_OPT_IP4DA	BIT(5)
+#define MVPP22_CLS_HEK_OPT_IP6SA	BIT(6)
+#define MVPP22_CLS_HEK_OPT_IP6DA	BIT(7)
+#define MVPP22_CLS_HEK_OPT_L4SIP	BIT(8)
+#define MVPP22_CLS_HEK_OPT_L4DIP	BIT(9)
+#define MVPP22_CLS_HEK_N_FIELDS		10
 
 #define MVPP22_CLS_HEK_L4_OPTS	(MVPP22_CLS_HEK_OPT_L4SIP | \
 				 MVPP22_CLS_HEK_OPT_L4DIP)
@@ -59,8 +60,12 @@
 #define MVPP22_CLS_HEK_IP6_5T	(MVPP22_CLS_HEK_IP6_2T | \
 				 MVPP22_CLS_HEK_L4_OPTS)
 
+#define MVPP22_CLS_HEK_TAGGED	(MVPP22_CLS_HEK_OPT_VLAN | \
+				 MVPP22_CLS_HEK_OPT_VLAN_PRI)
+
 enum mvpp2_cls_field_id {
 	MVPP22_CLS_FIELD_MAC_DA = 0x03,
+	MVPP22_CLS_FIELD_VLAN_PRI = 0x05,
 	MVPP22_CLS_FIELD_VLAN = 0x06,
 	MVPP22_CLS_FIELD_L3_PROTO = 0x0f,
 	MVPP22_CLS_FIELD_IP4SA = 0x10,
@@ -71,14 +76,6 @@
 	MVPP22_CLS_FIELD_L4DIP = 0x1e,
 };
 
-enum mvpp2_cls_flow_seq {
-	MVPP2_CLS_FLOW_SEQ_NORMAL = 0,
-	MVPP2_CLS_FLOW_SEQ_FIRST1,
-	MVPP2_CLS_FLOW_SEQ_FIRST2,
-	MVPP2_CLS_FLOW_SEQ_LAST,
-	MVPP2_CLS_FLOW_SEQ_MIDDLE
-};
-
 /* Classifier C2 engine constants */
 #define MVPP22_CLS_C2_TCAM_EN(data)		((data) << 16)
 
@@ -100,39 +97,62 @@
 	MVPP22_C2_FWD_HW_LOW_LAT_LOCK,
 };
 
+enum mvpp22_cls_c2_color_action {
+	MVPP22_C2_COL_NO_UPD = 0,
+	MVPP22_C2_COL_NO_UPD_LOCK,
+	MVPP22_C2_COL_GREEN,
+	MVPP22_C2_COL_GREEN_LOCK,
+	MVPP22_C2_COL_YELLOW,
+	MVPP22_C2_COL_YELLOW_LOCK,
+	MVPP22_C2_COL_RED,		/* Drop */
+	MVPP22_C2_COL_RED_LOCK,		/* Drop */
+};
+
 #define MVPP2_CLS_C2_TCAM_WORDS			5
 #define MVPP2_CLS_C2_ATTR_WORDS			5
 
 struct mvpp2_cls_c2_entry {
 	u32 index;
+	/* TCAM lookup key */
 	u32 tcam[MVPP2_CLS_C2_TCAM_WORDS];
+	/* Actions to perform upon TCAM match */
 	u32 act;
+	/* Attributes relative to the actions to perform */
 	u32 attr[MVPP2_CLS_C2_ATTR_WORDS];
+	/* Entry validity */
+	u8 valid;
 };
 
+#define MVPP22_FLOW_ETHER_BIT	BIT(0)
+#define MVPP22_FLOW_IP4_BIT	BIT(1)
+#define MVPP22_FLOW_IP6_BIT	BIT(2)
+#define MVPP22_FLOW_TCP_BIT	BIT(3)
+#define MVPP22_FLOW_UDP_BIT	BIT(4)
+
+#define MVPP22_FLOW_TCP4	(MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP4_BIT | MVPP22_FLOW_TCP_BIT)
+#define MVPP22_FLOW_TCP6	(MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP6_BIT | MVPP22_FLOW_TCP_BIT)
+#define MVPP22_FLOW_UDP4	(MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP4_BIT | MVPP22_FLOW_UDP_BIT)
+#define MVPP22_FLOW_UDP6	(MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP6_BIT | MVPP22_FLOW_UDP_BIT)
+#define MVPP22_FLOW_IP4		(MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP4_BIT)
+#define MVPP22_FLOW_IP6		(MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP6_BIT)
+#define MVPP22_FLOW_ETHERNET	(MVPP22_FLOW_ETHER_BIT)
+
 /* Classifier C2 engine entries */
-#define MVPP22_CLS_C2_RSS_ENTRY(port)	(port)
-#define MVPP22_CLS_C2_N_ENTRIES		MVPP2_MAX_PORTS
+#define MVPP22_CLS_C2_N_ENTRIES		256
 
-/* RSS flow entries in the flow table. We have 2 entries per port for RSS.
- *
- * The first performs a lookup using the C2 TCAM engine, to tag the
- * packet for software forwarding (needed for RSS), enable or disable RSS, and
- * assign the default rx queue.
- *
- * The second configures the hash generation, by specifying which fields of the
- * packet header are used to generate the hash, and specifies the relevant hash
- * engine to use.
+/* Number of per-port dedicated entries in the C2 TCAM */
+#define MVPP22_CLS_C2_PORT_N_FLOWS	MVPP2_N_RFS_ENTRIES_PER_FLOW
+
+/* Each port has oen range per flow type + one entry controling the global RSS
+ * setting and the default rx queue
  */
-#define MVPP22_RSS_FLOW_C2_OFFS		0
-#define MVPP22_RSS_FLOW_HASH_OFFS	1
-#define MVPP22_RSS_FLOW_SIZE		(MVPP22_RSS_FLOW_HASH_OFFS + 1)
+#define MVPP22_CLS_C2_PORT_RANGE	(MVPP22_CLS_C2_PORT_N_FLOWS + 1)
+#define MVPP22_CLS_C2_PORT_FIRST(p)	((p) * MVPP22_CLS_C2_PORT_RANGE)
+#define MVPP22_CLS_C2_RSS_ENTRY(p)	(MVPP22_CLS_C2_PORT_FIRST((p) + 1) - 1)
 
-#define MVPP22_RSS_FLOW_C2(port)	((port) * MVPP22_RSS_FLOW_SIZE + \
-					 MVPP22_RSS_FLOW_C2_OFFS)
-#define MVPP22_RSS_FLOW_HASH(port)	((port) * MVPP22_RSS_FLOW_SIZE + \
-					 MVPP22_RSS_FLOW_HASH_OFFS)
-#define MVPP22_RSS_FLOW_FIRST(port)	MVPP22_RSS_FLOW_C2(port)
+#define MVPP22_CLS_C2_PORT_FLOW_FIRST(p)	(MVPP22_CLS_C2_PORT_FIRST(p))
+
+#define MVPP22_CLS_C2_RFS_LOC(p, loc)	(MVPP22_CLS_C2_PORT_FLOW_FIRST(p) + (loc))
 
 /* Packet flow ID */
 enum mvpp2_prs_flow {
@@ -162,6 +182,16 @@
 	MVPP2_FL_LAST,
 };
 
+/* LU Type defined for all engines, and specified in the flow table */
+#define MVPP2_CLS_LU_TYPE_MASK			0x3f
+
+enum mvpp2_cls_lu_type {
+	/* rule->loc is used as a lu-type for the entries 0 - 62. */
+	MVPP22_CLS_LU_TYPE_ALL = 63,
+};
+
+#define MVPP2_N_FLOWS		(MVPP2_FL_LAST - MVPP2_FL_START)
+
 struct mvpp2_cls_flow {
 	/* The L2-L4 traffic flow type */
 	int flow_type;
@@ -176,12 +206,48 @@
 	struct mvpp2_prs_result_info prs_ri;
 };
 
-#define MVPP2_N_FLOWS	52
+#define MVPP2_CLS_FLT_ENTRIES_PER_FLOW		(MVPP2_MAX_PORTS + 1 + 16)
+#define MVPP2_CLS_FLT_FIRST(id)			(((id) - MVPP2_FL_START) * \
+						 MVPP2_CLS_FLT_ENTRIES_PER_FLOW)
 
-#define MVPP2_ENTRIES_PER_FLOW			(MVPP2_MAX_PORTS + 1)
-#define MVPP2_FLOW_C2_ENTRY(id)			((id) * MVPP2_ENTRIES_PER_FLOW)
-#define MVPP2_PORT_FLOW_HASH_ENTRY(port, id)	((id) * MVPP2_ENTRIES_PER_FLOW + \
-						(port) + 1)
+#define MVPP2_CLS_FLT_C2_RFS(port, id, rfs_n)	(MVPP2_CLS_FLT_FIRST(id) + \
+						 ((port) * MVPP2_MAX_PORTS) + \
+						 (rfs_n))
+
+#define MVPP2_CLS_FLT_C2_RSS_ENTRY(id)		(MVPP2_CLS_FLT_C2_RFS(MVPP2_MAX_PORTS, id, 0))
+#define MVPP2_CLS_FLT_HASH_ENTRY(port, id)	(MVPP2_CLS_FLT_C2_RSS_ENTRY(id) + 1 + (port))
+#define MVPP2_CLS_FLT_LAST(id)			(MVPP2_CLS_FLT_FIRST(id) + \
+						 MVPP2_CLS_FLT_ENTRIES_PER_FLOW - 1)
+
+/* Iterate on each classifier flow id. Sets 'i' to be the index of the first
+ * entry in the cls_flows table for each different flow_id.
+ * This relies on entries having the same flow_id in the cls_flows table being
+ * contiguous.
+ */
+#define for_each_cls_flow_id(i)						      \
+	for ((i) = 0; (i) < MVPP2_N_PRS_FLOWS; (i)++)			      \
+		if ((i) > 0 &&						      \
+		    cls_flows[(i)].flow_id == cls_flows[(i) - 1].flow_id)       \
+			continue;					      \
+		else
+
+/* Iterate on each classifier flow that has a given flow_type. Sets 'i' to be
+ * the index of the first entry in the cls_flow table for each different flow_id
+ * that has the given flow_type. This allows to operate on all flows that
+ * matches a given ethtool flow type.
+ */
+#define for_each_cls_flow_id_with_type(i, type)				      \
+	for_each_cls_flow_id((i))					      \
+		if (cls_flows[(i)].flow_type != (type))			      \
+			continue;					      \
+		else
+
+#define for_each_cls_flow_id_containing_type(i, type)			      \
+	for_each_cls_flow_id((i))					      \
+		if ((cls_flows[(i)].flow_type & (type)) != (type))	      \
+			continue;					      \
+		else
+
 struct mvpp2_cls_flow_entry {
 	u32 index;
 	u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
@@ -193,12 +259,18 @@
 	u32 data;
 };
 
-void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table);
+int mvpp22_port_rss_init(struct mvpp2_port *port);
 
-void mvpp22_rss_port_init(struct mvpp2_port *port);
+int mvpp22_port_rss_enable(struct mvpp2_port *port);
+int mvpp22_port_rss_disable(struct mvpp2_port *port);
 
-void mvpp22_rss_enable(struct mvpp2_port *port);
-void mvpp22_rss_disable(struct mvpp2_port *port);
+int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 *rss_ctx);
+int mvpp22_port_rss_ctx_delete(struct mvpp2_port *port, u32 rss_ctx);
+
+int mvpp22_port_rss_ctx_indir_set(struct mvpp2_port *port, u32 rss_ctx,
+				  const u32 *indir);
+int mvpp22_port_rss_ctx_indir_get(struct mvpp2_port *port, u32 rss_ctx,
+				  u32 *indir);
 
 int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info);
 int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info);
@@ -213,7 +285,7 @@
 
 u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe);
 
-struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow);
+const struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow);
 
 u32 mvpp2_cls_flow_hits(struct mvpp2 *priv, int index);
 
@@ -230,4 +302,13 @@
 void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
 		       struct mvpp2_cls_c2_entry *c2);
 
+int mvpp2_ethtool_cls_rule_get(struct mvpp2_port *port,
+			       struct ethtool_rxnfc *rxnfc);
+
+int mvpp2_ethtool_cls_rule_ins(struct mvpp2_port *port,
+			       struct ethtool_rxnfc *info);
+
+int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port,
+			       struct ethtool_rxnfc *info);
+
 #endif
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
index f9744a6..4a3baa7 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
@@ -18,22 +18,48 @@
 	struct mvpp2 *priv;
 };
 
+struct mvpp2_dbgfs_c2_entry {
+	int id;
+	struct mvpp2 *priv;
+};
+
 struct mvpp2_dbgfs_flow_entry {
 	int flow;
 	struct mvpp2 *priv;
 };
 
+struct mvpp2_dbgfs_flow_tbl_entry {
+	int id;
+	struct mvpp2 *priv;
+};
+
 struct mvpp2_dbgfs_port_flow_entry {
 	struct mvpp2_port *port;
 	struct mvpp2_dbgfs_flow_entry *dbg_fe;
 };
 
+struct mvpp2_dbgfs_entries {
+	/* Entries for Header Parser debug info */
+	struct mvpp2_dbgfs_prs_entry prs_entries[MVPP2_PRS_TCAM_SRAM_SIZE];
+
+	/* Entries for Classifier C2 engine debug info */
+	struct mvpp2_dbgfs_c2_entry c2_entries[MVPP22_CLS_C2_N_ENTRIES];
+
+	/* Entries for Classifier Flow Table debug info */
+	struct mvpp2_dbgfs_flow_tbl_entry flt_entries[MVPP2_CLS_FLOWS_TBL_SIZE];
+
+	/* Entries for Classifier flows debug info */
+	struct mvpp2_dbgfs_flow_entry flow_entries[MVPP2_N_PRS_FLOWS];
+
+	/* Entries for per-port flows debug info */
+	struct mvpp2_dbgfs_port_flow_entry port_flow_entries[MVPP2_MAX_PORTS];
+};
+
 static int mvpp2_dbgfs_flow_flt_hits_show(struct seq_file *s, void *unused)
 {
-	struct mvpp2_dbgfs_flow_entry *entry = s->private;
-	int id = MVPP2_FLOW_C2_ENTRY(entry->flow);
+	struct mvpp2_dbgfs_flow_tbl_entry *entry = s->private;
 
-	u32 hits = mvpp2_cls_flow_hits(entry->priv, id);
+	u32 hits = mvpp2_cls_flow_hits(entry->priv, entry->id);
 
 	seq_printf(s, "%u\n", hits);
 
@@ -58,7 +84,7 @@
 static int mvpp2_dbgfs_flow_type_show(struct seq_file *s, void *unused)
 {
 	struct mvpp2_dbgfs_flow_entry *entry = s->private;
-	struct mvpp2_cls_flow *f;
+	const struct mvpp2_cls_flow *f;
 	const char *flow_name;
 
 	f = mvpp2_cls_flow_get(entry->flow);
@@ -93,30 +119,12 @@
 	return 0;
 }
 
-static int mvpp2_dbgfs_flow_type_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, mvpp2_dbgfs_flow_type_show, inode->i_private);
-}
-
-static int mvpp2_dbgfs_flow_type_release(struct inode *inode, struct file *file)
-{
-	struct seq_file *seq = file->private_data;
-	struct mvpp2_dbgfs_flow_entry *flow_entry = seq->private;
-
-	kfree(flow_entry);
-	return single_release(inode, file);
-}
-
-static const struct file_operations mvpp2_dbgfs_flow_type_fops = {
-	.open = mvpp2_dbgfs_flow_type_open,
-	.read = seq_read,
-	.release = mvpp2_dbgfs_flow_type_release,
-};
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_type);
 
 static int mvpp2_dbgfs_flow_id_show(struct seq_file *s, void *unused)
 {
-	struct mvpp2_dbgfs_flow_entry *entry = s->private;
-	struct mvpp2_cls_flow *f;
+	const struct mvpp2_dbgfs_flow_entry *entry = s->private;
+	const struct mvpp2_cls_flow *f;
 
 	f = mvpp2_cls_flow_get(entry->flow);
 	if (!f)
@@ -134,7 +142,7 @@
 	struct mvpp2_dbgfs_port_flow_entry *entry = s->private;
 	struct mvpp2_port *port = entry->port;
 	struct mvpp2_cls_flow_entry fe;
-	struct mvpp2_cls_flow *f;
+	const struct mvpp2_cls_flow *f;
 	int flow_index;
 	u16 hash_opts;
 
@@ -142,7 +150,7 @@
 	if (!f)
 		return -EINVAL;
 
-	flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(entry->port->id, f->flow_id);
+	flow_index = MVPP2_CLS_FLT_HASH_ENTRY(entry->port->id, f->flow_id);
 
 	mvpp2_cls_flow_read(port->priv, flow_index, &fe);
 
@@ -153,42 +161,21 @@
 	return 0;
 }
 
-static int mvpp2_dbgfs_port_flow_hash_opt_open(struct inode *inode,
-					       struct file *file)
-{
-	return single_open(file, mvpp2_dbgfs_port_flow_hash_opt_show,
-			   inode->i_private);
-}
-
-static int mvpp2_dbgfs_port_flow_hash_opt_release(struct inode *inode,
-						  struct file *file)
-{
-	struct seq_file *seq = file->private_data;
-	struct mvpp2_dbgfs_port_flow_entry *flow_entry = seq->private;
-
-	kfree(flow_entry);
-	return single_release(inode, file);
-}
-
-static const struct file_operations mvpp2_dbgfs_port_flow_hash_opt_fops = {
-	.open = mvpp2_dbgfs_port_flow_hash_opt_open,
-	.read = seq_read,
-	.release = mvpp2_dbgfs_port_flow_hash_opt_release,
-};
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_flow_hash_opt);
 
 static int mvpp2_dbgfs_port_flow_engine_show(struct seq_file *s, void *unused)
 {
 	struct mvpp2_dbgfs_port_flow_entry *entry = s->private;
 	struct mvpp2_port *port = entry->port;
 	struct mvpp2_cls_flow_entry fe;
-	struct mvpp2_cls_flow *f;
+	const struct mvpp2_cls_flow *f;
 	int flow_index, engine;
 
 	f = mvpp2_cls_flow_get(entry->dbg_fe->flow);
 	if (!f)
 		return -EINVAL;
 
-	flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(entry->port->id, f->flow_id);
+	flow_index = MVPP2_CLS_FLT_HASH_ENTRY(entry->port->id, f->flow_id);
 
 	mvpp2_cls_flow_read(port->priv, flow_index, &fe);
 
@@ -203,11 +190,10 @@
 
 static int mvpp2_dbgfs_flow_c2_hits_show(struct seq_file *s, void *unused)
 {
-	struct mvpp2_port *port = s->private;
+	struct mvpp2_dbgfs_c2_entry *entry = s->private;
 	u32 hits;
 
-	hits = mvpp2_cls_c2_hit_count(port->priv,
-				      MVPP22_CLS_C2_RSS_ENTRY(port->id));
+	hits = mvpp2_cls_c2_hit_count(entry->priv, entry->id);
 
 	seq_printf(s, "%u\n", hits);
 
@@ -218,11 +204,11 @@
 
 static int mvpp2_dbgfs_flow_c2_rxq_show(struct seq_file *s, void *unused)
 {
-	struct mvpp2_port *port = s->private;
+	struct mvpp2_dbgfs_c2_entry *entry = s->private;
 	struct mvpp2_cls_c2_entry c2;
 	u8 qh, ql;
 
-	mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
+	mvpp2_cls_c2_read(entry->priv, entry->id, &c2);
 
 	qh = (c2.attr[0] >> MVPP22_CLS_C2_ATTR0_QHIGH_OFFS) &
 	     MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
@@ -239,11 +225,11 @@
 
 static int mvpp2_dbgfs_flow_c2_enable_show(struct seq_file *s, void *unused)
 {
-	struct mvpp2_port *port = s->private;
+	struct mvpp2_dbgfs_c2_entry *entry = s->private;
 	struct mvpp2_cls_c2_entry c2;
 	int enabled;
 
-	mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
+	mvpp2_cls_c2_read(entry->priv, entry->id, &c2);
 
 	enabled = !!(c2.attr[2] & MVPP22_CLS_C2_ATTR2_RSS_EN);
 
@@ -456,25 +442,7 @@
 	return 0;
 }
 
-static int mvpp2_dbgfs_prs_valid_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, mvpp2_dbgfs_prs_valid_show, inode->i_private);
-}
-
-static int mvpp2_dbgfs_prs_valid_release(struct inode *inode, struct file *file)
-{
-	struct seq_file *seq = file->private_data;
-	struct mvpp2_dbgfs_prs_entry *entry = seq->private;
-
-	kfree(entry);
-	return single_release(inode, file);
-}
-
-static const struct file_operations mvpp2_dbgfs_prs_valid_fops = {
-	.open = mvpp2_dbgfs_prs_valid_open,
-	.read = seq_read,
-	.release = mvpp2_dbgfs_prs_valid_release,
-};
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_valid);
 
 static int mvpp2_dbgfs_flow_port_init(struct dentry *parent,
 				      struct mvpp2_port *port,
@@ -484,13 +452,8 @@
 	struct dentry *port_dir;
 
 	port_dir = debugfs_create_dir(port->dev->name, parent);
-	if (IS_ERR(port_dir))
-		return PTR_ERR(port_dir);
 
-	/* This will be freed by 'hash_opts' release op */
-	port_entry = kmalloc(sizeof(*port_entry), GFP_KERNEL);
-	if (!port_entry)
-		return -ENOMEM;
+	port_entry = &port->priv->dbgfs_entries->port_flow_entries[port->id];
 
 	port_entry->port = port;
 	port_entry->dbg_fe = entry;
@@ -515,20 +478,12 @@
 	sprintf(flow_entry_name, "%02d", flow);
 
 	flow_entry_dir = debugfs_create_dir(flow_entry_name, parent);
-	if (!flow_entry_dir)
-		return -ENOMEM;
 
-	/* This will be freed by 'type' release op */
-	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
-	if (!entry)
-		return -ENOMEM;
+	entry = &priv->dbgfs_entries->flow_entries[flow];
 
 	entry->flow = flow;
 	entry->priv = priv;
 
-	debugfs_create_file("flow_hits", 0444, flow_entry_dir, entry,
-			    &mvpp2_dbgfs_flow_flt_hits_fops);
-
 	debugfs_create_file("dec_hits", 0444, flow_entry_dir, entry,
 			    &mvpp2_dbgfs_flow_dec_hits_fops);
 
@@ -545,6 +500,7 @@
 		if (ret)
 			return ret;
 	}
+
 	return 0;
 }
 
@@ -554,10 +510,8 @@
 	int i, ret;
 
 	flow_dir = debugfs_create_dir("flows", parent);
-	if (!flow_dir)
-		return -ENOMEM;
 
-	for (i = 0; i < MVPP2_N_FLOWS; i++) {
+	for (i = 0; i < MVPP2_N_PRS_FLOWS; i++) {
 		ret = mvpp2_dbgfs_flow_entry_init(flow_dir, priv, i);
 		if (ret)
 			return ret;
@@ -579,13 +533,8 @@
 	sprintf(prs_entry_name, "%03d", tid);
 
 	prs_entry_dir = debugfs_create_dir(prs_entry_name, parent);
-	if (!prs_entry_dir)
-		return -ENOMEM;
 
-	/* The 'valid' entry's ops will free that */
-	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
-	if (!entry)
-		return -ENOMEM;
+	entry = &priv->dbgfs_entries->prs_entries[tid];
 
 	entry->tid = tid;
 	entry->priv = priv;
@@ -609,6 +558,9 @@
 	debugfs_create_file("hits", 0444, prs_entry_dir, entry,
 			    &mvpp2_dbgfs_prs_hits_fops);
 
+	debugfs_create_file("pmap", 0444, prs_entry_dir, entry,
+			     &mvpp2_dbgfs_prs_pmap_fops);
+
 	return 0;
 }
 
@@ -618,8 +570,6 @@
 	int i, ret;
 
 	prs_dir = debugfs_create_dir("parser", parent);
-	if (!prs_dir)
-		return -ENOMEM;
 
 	for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) {
 		ret = mvpp2_dbgfs_prs_entry_init(prs_dir, priv, i);
@@ -630,14 +580,104 @@
 	return 0;
 }
 
+static int mvpp2_dbgfs_c2_entry_init(struct dentry *parent,
+				     struct mvpp2 *priv, int id)
+{
+	struct mvpp2_dbgfs_c2_entry *entry;
+	struct dentry *c2_entry_dir;
+	char c2_entry_name[10];
+
+	if (id >= MVPP22_CLS_C2_N_ENTRIES)
+		return -EINVAL;
+
+	sprintf(c2_entry_name, "%03d", id);
+
+	c2_entry_dir = debugfs_create_dir(c2_entry_name, parent);
+	if (!c2_entry_dir)
+		return -ENOMEM;
+
+	entry = &priv->dbgfs_entries->c2_entries[id];
+
+	entry->id = id;
+	entry->priv = priv;
+
+	debugfs_create_file("hits", 0444, c2_entry_dir, entry,
+			    &mvpp2_dbgfs_flow_c2_hits_fops);
+
+	debugfs_create_file("default_rxq", 0444, c2_entry_dir, entry,
+			    &mvpp2_dbgfs_flow_c2_rxq_fops);
+
+	debugfs_create_file("rss_enable", 0444, c2_entry_dir, entry,
+			    &mvpp2_dbgfs_flow_c2_enable_fops);
+
+	return 0;
+}
+
+static int mvpp2_dbgfs_flow_tbl_entry_init(struct dentry *parent,
+					   struct mvpp2 *priv, int id)
+{
+	struct mvpp2_dbgfs_flow_tbl_entry *entry;
+	struct dentry *flow_tbl_entry_dir;
+	char flow_tbl_entry_name[10];
+
+	if (id >= MVPP2_CLS_FLOWS_TBL_SIZE)
+		return -EINVAL;
+
+	sprintf(flow_tbl_entry_name, "%03d", id);
+
+	flow_tbl_entry_dir = debugfs_create_dir(flow_tbl_entry_name, parent);
+	if (!flow_tbl_entry_dir)
+		return -ENOMEM;
+
+	entry = &priv->dbgfs_entries->flt_entries[id];
+
+	entry->id = id;
+	entry->priv = priv;
+
+	debugfs_create_file("hits", 0444, flow_tbl_entry_dir, entry,
+			    &mvpp2_dbgfs_flow_flt_hits_fops);
+
+	return 0;
+}
+
+static int mvpp2_dbgfs_cls_init(struct dentry *parent, struct mvpp2 *priv)
+{
+	struct dentry *cls_dir, *c2_dir, *flow_tbl_dir;
+	int i, ret;
+
+	cls_dir = debugfs_create_dir("classifier", parent);
+	if (!cls_dir)
+		return -ENOMEM;
+
+	c2_dir = debugfs_create_dir("c2", cls_dir);
+	if (!c2_dir)
+		return -ENOMEM;
+
+	for (i = 0; i < MVPP22_CLS_C2_N_ENTRIES; i++) {
+		ret = mvpp2_dbgfs_c2_entry_init(c2_dir, priv, i);
+		if (ret)
+			return ret;
+	}
+
+	flow_tbl_dir = debugfs_create_dir("flow_table", cls_dir);
+	if (!flow_tbl_dir)
+		return -ENOMEM;
+
+	for (i = 0; i < MVPP2_CLS_FLOWS_TBL_SIZE; i++) {
+		ret = mvpp2_dbgfs_flow_tbl_entry_init(flow_tbl_dir, priv, i);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
 static int mvpp2_dbgfs_port_init(struct dentry *parent,
 				 struct mvpp2_port *port)
 {
 	struct dentry *port_dir;
 
 	port_dir = debugfs_create_dir(port->dev->name, parent);
-	if (IS_ERR(port_dir))
-		return PTR_ERR(port_dir);
 
 	debugfs_create_file("parser_entries", 0444, port_dir, port,
 			    &mvpp2_dbgfs_port_parser_fops);
@@ -648,21 +688,14 @@
 	debugfs_create_file("vid_filter", 0444, port_dir, port,
 			    &mvpp2_dbgfs_port_vid_fops);
 
-	debugfs_create_file("c2_hits", 0444, port_dir, port,
-			    &mvpp2_dbgfs_flow_c2_hits_fops);
-
-	debugfs_create_file("default_rxq", 0444, port_dir, port,
-			    &mvpp2_dbgfs_flow_c2_rxq_fops);
-
-	debugfs_create_file("rss_enable", 0444, port_dir, port,
-			    &mvpp2_dbgfs_flow_c2_enable_fops);
-
 	return 0;
 }
 
 void mvpp2_dbgfs_cleanup(struct mvpp2 *priv)
 {
 	debugfs_remove_recursive(priv->dbgfs_dir);
+
+	kfree(priv->dbgfs_entries);
 }
 
 void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name)
@@ -671,22 +704,24 @@
 	int ret, i;
 
 	mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL);
-	if (!mvpp2_root) {
+	if (!mvpp2_root)
 		mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL);
-		if (IS_ERR(mvpp2_root))
-			return;
-	}
 
 	mvpp2_dir = debugfs_create_dir(name, mvpp2_root);
-	if (IS_ERR(mvpp2_dir))
-		return;
 
 	priv->dbgfs_dir = mvpp2_dir;
+	priv->dbgfs_entries = kzalloc(sizeof(*priv->dbgfs_entries), GFP_KERNEL);
+	if (!priv->dbgfs_entries)
+		goto err;
 
 	ret = mvpp2_dbgfs_prs_init(mvpp2_dir, priv);
 	if (ret)
 		goto err;
 
+	ret = mvpp2_dbgfs_cls_init(mvpp2_dir, priv);
+	if (ret)
+		goto err;
+
 	for (i = 0; i < priv->port_count; i++) {
 		ret = mvpp2_dbgfs_port_init(mvpp2_dir, priv->port_list[i]);
 		if (ret)
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 6320e08..111b3b8 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -56,9 +56,9 @@
 /* The prototype is added here to be used in start_dev when using ACPI. This
  * will be removed once phylink is used for all modes (dt+ACPI).
  */
-static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
+static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
 			     const struct phylink_link_state *state);
-static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode,
+static void mvpp2_mac_link_up(struct phylink_config *config, unsigned int mode,
 			      phy_interface_t interface, struct phy_device *phy);
 
 /* Queue modes */
@@ -82,13 +82,19 @@
 	return readl(priv->swth_base[0] + offset);
 }
 
-u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
+static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
 {
 	return readl_relaxed(priv->swth_base[0] + offset);
 }
+
+static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu)
+{
+	return cpu % priv->nthreads;
+}
+
 /* These accessors should be used to access:
  *
- * - per-CPU registers, where each CPU has its own copy of the
+ * - per-thread registers, where each thread has its own copy of the
  *   register.
  *
  *   MVPP2_BM_VIRT_ALLOC_REG
@@ -104,8 +110,8 @@
  *   MVPP2_TXQ_SENT_REG
  *   MVPP2_RXQ_NUM_REG
  *
- * - global registers that must be accessed through a specific CPU
- *   window, because they are related to an access to a per-CPU
+ * - global registers that must be accessed through a specific thread
+ *   window, because they are related to an access to a per-thread
  *   register
  *
  *   MVPP2_BM_PHY_ALLOC_REG    (related to MVPP2_BM_VIRT_ALLOC_REG)
@@ -122,28 +128,28 @@
  *   MVPP2_TXQ_PREF_BUF_REG    (related to MVPP2_TXQ_NUM_REG)
  *   MVPP2_TXQ_PREF_BUF_REG    (related to MVPP2_TXQ_NUM_REG)
  */
-void mvpp2_percpu_write(struct mvpp2 *priv, int cpu,
+static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread,
 			       u32 offset, u32 data)
 {
-	writel(data, priv->swth_base[cpu] + offset);
+	writel(data, priv->swth_base[thread] + offset);
 }
 
-u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu,
+static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread,
 			     u32 offset)
 {
-	return readl(priv->swth_base[cpu] + offset);
+	return readl(priv->swth_base[thread] + offset);
 }
 
-void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu,
+static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread,
 				       u32 offset, u32 data)
 {
-	writel_relaxed(data, priv->swth_base[cpu] + offset);
+	writel_relaxed(data, priv->swth_base[thread] + offset);
 }
 
-static u32 mvpp2_percpu_read_relaxed(struct mvpp2 *priv, int cpu,
+static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread,
 				     u32 offset)
 {
-	return readl_relaxed(priv->swth_base[cpu] + offset);
+	return readl_relaxed(priv->swth_base[thread] + offset);
 }
 
 static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
@@ -286,6 +292,26 @@
 		txq_pcpu->txq_put_index = 0;
 }
 
+/* Get number of maximum RXQ */
+static int mvpp2_get_nrxqs(struct mvpp2 *priv)
+{
+	unsigned int nrxqs;
+
+	if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE)
+		return 1;
+
+	/* According to the PPv2.2 datasheet and our experiments on
+	 * PPv2.1, RX queues have an allocation granularity of 4 (when
+	 * more than a single one on PPv2.2).
+	 * Round up to nearest multiple of 4.
+	 */
+	nrxqs = (num_possible_cpus() + 3) & ~0x3;
+	if (nrxqs > MVPP2_PORT_MAX_RXQ)
+		nrxqs = MVPP2_PORT_MAX_RXQ;
+
+	return nrxqs;
+}
+
 /* Get number of physical egress port */
 static inline int mvpp2_egress_port(struct mvpp2_port *port)
 {
@@ -317,8 +343,7 @@
 /* Buffer Manager configuration routines */
 
 /* Create pool */
-static int mvpp2_bm_pool_create(struct platform_device *pdev,
-				struct mvpp2 *priv,
+static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv,
 				struct mvpp2_bm_pool *bm_pool, int size)
 {
 	u32 val;
@@ -337,7 +362,7 @@
 	else
 		bm_pool->size_bytes = 2 * sizeof(u64) * size;
 
-	bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes,
+	bm_pool->virt_addr = dma_alloc_coherent(dev, bm_pool->size_bytes,
 						&bm_pool->dma_addr,
 						GFP_KERNEL);
 	if (!bm_pool->virt_addr)
@@ -345,9 +370,9 @@
 
 	if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
 			MVPP2_BM_POOL_PTR_ALIGN)) {
-		dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
+		dma_free_coherent(dev, bm_pool->size_bytes,
 				  bm_pool->virt_addr, bm_pool->dma_addr);
-		dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
+		dev_err(dev, "BM pool %d is not %d bytes aligned\n",
 			bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
 		return -ENOMEM;
 	}
@@ -385,17 +410,17 @@
 				    dma_addr_t *dma_addr,
 				    phys_addr_t *phys_addr)
 {
-	int cpu = get_cpu();
+	unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
 
-	*dma_addr = mvpp2_percpu_read(priv, cpu,
+	*dma_addr = mvpp2_thread_read(priv, thread,
 				      MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
-	*phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG);
+	*phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG);
 
 	if (priv->hw_version == MVPP22) {
 		u32 val;
 		u32 dma_addr_highbits, phys_addr_highbits;
 
-		val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC);
+		val = mvpp2_thread_read(priv, thread, MVPP22_BM_ADDR_HIGH_ALLOC);
 		dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
 		phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
 			MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
@@ -462,15 +487,14 @@
 }
 
 /* Cleanup pool */
-static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
-				 struct mvpp2 *priv,
+static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv,
 				 struct mvpp2_bm_pool *bm_pool)
 {
 	int buf_num;
 	u32 val;
 
 	buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
-	mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool, buf_num);
+	mvpp2_bm_bufs_free(dev, priv, bm_pool, buf_num);
 
 	/* Check buffer counters after free */
 	buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
@@ -484,24 +508,26 @@
 	val |= MVPP2_BM_STOP_MASK;
 	mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
 
-	dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
+	dma_free_coherent(dev, bm_pool->size_bytes,
 			  bm_pool->virt_addr,
 			  bm_pool->dma_addr);
 	return 0;
 }
 
-static int mvpp2_bm_pools_init(struct platform_device *pdev,
-			       struct mvpp2 *priv)
+static int mvpp2_bm_pools_init(struct device *dev, struct mvpp2 *priv)
 {
-	int i, err, size;
+	int i, err, size, poolnum = MVPP2_BM_POOLS_NUM;
 	struct mvpp2_bm_pool *bm_pool;
 
+	if (priv->percpu_pools)
+		poolnum = mvpp2_get_nrxqs(priv) * 2;
+
 	/* Create all pools with maximum size */
 	size = MVPP2_BM_POOL_SIZE_MAX;
-	for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
+	for (i = 0; i < poolnum; i++) {
 		bm_pool = &priv->bm_pools[i];
 		bm_pool->id = i;
-		err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
+		err = mvpp2_bm_pool_create(dev, priv, bm_pool, size);
 		if (err)
 			goto err_unroll_pools;
 		mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
@@ -509,17 +535,23 @@
 	return 0;
 
 err_unroll_pools:
-	dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
+	dev_err(dev, "failed to create BM pool %d, size %d\n", i, size);
 	for (i = i - 1; i >= 0; i--)
-		mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
+		mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
 	return err;
 }
 
-static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
+static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
 {
-	int i, err;
+	int i, err, poolnum = MVPP2_BM_POOLS_NUM;
 
-	for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
+	if (priv->percpu_pools)
+		poolnum = mvpp2_get_nrxqs(priv) * 2;
+
+	dev_info(dev, "using %d %s buffers\n", poolnum,
+		 priv->percpu_pools ? "per-cpu" : "shared");
+
+	for (i = 0; i < poolnum; i++) {
 		/* Mask BM all interrupts */
 		mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
 		/* Clear BM cause register */
@@ -527,12 +559,12 @@
 	}
 
 	/* Allocate and initialize BM pools */
-	priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
+	priv->bm_pools = devm_kcalloc(dev, poolnum,
 				      sizeof(*priv->bm_pools), GFP_KERNEL);
 	if (!priv->bm_pools)
 		return -ENOMEM;
 
-	err = mvpp2_bm_pools_init(pdev, priv);
+	err = mvpp2_bm_pools_init(dev, priv);
 	if (err < 0)
 		return err;
 	return 0;
@@ -626,7 +658,11 @@
 				     dma_addr_t buf_dma_addr,
 				     phys_addr_t buf_phys_addr)
 {
-	int cpu = get_cpu();
+	unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
+	unsigned long flags = 0;
+
+	if (test_bit(thread, &port->priv->lock_map))
+		spin_lock_irqsave(&port->bm_lock[thread], flags);
 
 	if (port->priv->hw_version == MVPP22) {
 		u32 val = 0;
@@ -640,7 +676,7 @@
 				<< MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
 				MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
 
-		mvpp2_percpu_write_relaxed(port->priv, cpu,
+		mvpp2_thread_write_relaxed(port->priv, thread,
 					   MVPP22_BM_ADDR_HIGH_RLS_REG, val);
 	}
 
@@ -649,11 +685,14 @@
 	 * descriptor. Instead of storing the virtual address, we
 	 * store the physical address
 	 */
-	mvpp2_percpu_write_relaxed(port->priv, cpu,
+	mvpp2_thread_write_relaxed(port->priv, thread,
 				   MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
-	mvpp2_percpu_write_relaxed(port->priv, cpu,
+	mvpp2_thread_write_relaxed(port->priv, thread,
 				   MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
 
+	if (test_bit(thread, &port->priv->lock_map))
+		spin_unlock_irqrestore(&port->bm_lock[thread], flags);
+
 	put_cpu();
 }
 
@@ -666,6 +705,13 @@
 	phys_addr_t phys_addr;
 	void *buf;
 
+	if (port->priv->percpu_pools &&
+	    bm_pool->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
+		netdev_err(port->dev,
+			   "attempted to use jumbo frames with per-cpu pools");
+		return 0;
+	}
+
 	buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
 	total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
 
@@ -709,7 +755,64 @@
 	struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
 	int num;
 
-	if (pool >= MVPP2_BM_POOLS_NUM) {
+	if ((port->priv->percpu_pools && pool > mvpp2_get_nrxqs(port->priv) * 2) ||
+	    (!port->priv->percpu_pools && pool >= MVPP2_BM_POOLS_NUM)) {
+		netdev_err(port->dev, "Invalid pool %d\n", pool);
+		return NULL;
+	}
+
+	/* Allocate buffers in case BM pool is used as long pool, but packet
+	 * size doesn't match MTU or BM pool hasn't being used yet
+	 */
+	if (new_pool->pkt_size == 0) {
+		int pkts_num;
+
+		/* Set default buffer number or free all the buffers in case
+		 * the pool is not empty
+		 */
+		pkts_num = new_pool->buf_num;
+		if (pkts_num == 0) {
+			if (port->priv->percpu_pools) {
+				if (pool < port->nrxqs)
+					pkts_num = mvpp2_pools[MVPP2_BM_SHORT].buf_num;
+				else
+					pkts_num = mvpp2_pools[MVPP2_BM_LONG].buf_num;
+			} else {
+				pkts_num = mvpp2_pools[pool].buf_num;
+			}
+		} else {
+			mvpp2_bm_bufs_free(port->dev->dev.parent,
+					   port->priv, new_pool, pkts_num);
+		}
+
+		new_pool->pkt_size = pkt_size;
+		new_pool->frag_size =
+			SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
+			MVPP2_SKB_SHINFO_SIZE;
+
+		/* Allocate buffers for this pool */
+		num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
+		if (num != pkts_num) {
+			WARN(1, "pool %d: %d of %d allocated\n",
+			     new_pool->id, num, pkts_num);
+			return NULL;
+		}
+	}
+
+	mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
+				  MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
+
+	return new_pool;
+}
+
+static struct mvpp2_bm_pool *
+mvpp2_bm_pool_use_percpu(struct mvpp2_port *port, int type,
+			 unsigned int pool, int pkt_size)
+{
+	struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
+	int num;
+
+	if (pool > port->nrxqs * 2) {
 		netdev_err(port->dev, "Invalid pool %d\n", pool);
 		return NULL;
 	}
@@ -725,7 +828,7 @@
 		 */
 		pkts_num = new_pool->buf_num;
 		if (pkts_num == 0)
-			pkts_num = mvpp2_pools[pool].buf_num;
+			pkts_num = mvpp2_pools[type].buf_num;
 		else
 			mvpp2_bm_bufs_free(port->dev->dev.parent,
 					   port->priv, new_pool, pkts_num);
@@ -750,11 +853,11 @@
 	return new_pool;
 }
 
-/* Initialize pools for swf */
-static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
+/* Initialize pools for swf, shared buffers variant */
+static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port)
 {
-	int rxq;
 	enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool;
+	int rxq;
 
 	/* If port pkt_size is higher than 1518B:
 	 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
@@ -798,12 +901,76 @@
 	return 0;
 }
 
+/* Initialize pools for swf, percpu buffers variant */
+static int mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port *port)
+{
+	struct mvpp2_bm_pool *p;
+	int i;
+
+	for (i = 0; i < port->nrxqs; i++) {
+		p = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_SHORT, i,
+					     mvpp2_pools[MVPP2_BM_SHORT].pkt_size);
+		if (!p)
+			return -ENOMEM;
+
+		port->priv->bm_pools[i].port_map |= BIT(port->id);
+		mvpp2_rxq_short_pool_set(port, i, port->priv->bm_pools[i].id);
+	}
+
+	for (i = 0; i < port->nrxqs; i++) {
+		p = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_LONG, i + port->nrxqs,
+					     mvpp2_pools[MVPP2_BM_LONG].pkt_size);
+		if (!p)
+			return -ENOMEM;
+
+		port->priv->bm_pools[i + port->nrxqs].port_map |= BIT(port->id);
+		mvpp2_rxq_long_pool_set(port, i,
+					port->priv->bm_pools[i + port->nrxqs].id);
+	}
+
+	port->pool_long = NULL;
+	port->pool_short = NULL;
+
+	return 0;
+}
+
+static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
+{
+	if (port->priv->percpu_pools)
+		return mvpp2_swf_bm_pool_init_percpu(port);
+	else
+		return mvpp2_swf_bm_pool_init_shared(port);
+}
+
+static void mvpp2_set_hw_csum(struct mvpp2_port *port,
+			      enum mvpp2_bm_pool_log_num new_long_pool)
+{
+	const netdev_features_t csums = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
+	/* Update L4 checksum when jumbo enable/disable on port.
+	 * Only port 0 supports hardware checksum offload due to
+	 * the Tx FIFO size limitation.
+	 * Also, don't set NETIF_F_HW_CSUM because L3_offset in TX descriptor
+	 * has 7 bits, so the maximum L3 offset is 128.
+	 */
+	if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
+		port->dev->features &= ~csums;
+		port->dev->hw_features &= ~csums;
+	} else {
+		port->dev->features |= csums;
+		port->dev->hw_features |= csums;
+	}
+}
+
 static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
 {
 	struct mvpp2_port *port = netdev_priv(dev);
 	enum mvpp2_bm_pool_log_num new_long_pool;
 	int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
 
+	if (port->priv->percpu_pools)
+		goto out_set;
+
 	/* If port MTU is higher than 1518B:
 	 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
 	 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
@@ -830,17 +997,10 @@
 		/* Add port to new short & long pool */
 		mvpp2_swf_bm_pool_init(port);
 
-		/* Update L4 checksum when jumbo enable/disable on port */
-		if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
-			dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
-			dev->hw_features &= ~(NETIF_F_IP_CSUM |
-					      NETIF_F_IPV6_CSUM);
-		} else {
-			dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
-			dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
-		}
+		mvpp2_set_hw_csum(port, new_long_pool);
 	}
 
+out_set:
 	dev->mtu = mtu;
 	dev->wanted_features = dev->features;
 
@@ -886,7 +1046,7 @@
 		    MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask));
 }
 
-/* Mask the current CPU's Rx/Tx interrupts
+/* Mask the current thread's Rx/Tx interrupts
  * Called by on_each_cpu(), guaranteed to run with migration disabled,
  * using smp_processor_id() is OK.
  */
@@ -894,11 +1054,16 @@
 {
 	struct mvpp2_port *port = arg;
 
-	mvpp2_percpu_write(port->priv, smp_processor_id(),
+	/* If the thread isn't used, don't do anything */
+	if (smp_processor_id() > port->priv->nthreads)
+		return;
+
+	mvpp2_thread_write(port->priv,
+			   mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
 			   MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
 }
 
-/* Unmask the current CPU's Rx/Tx interrupts.
+/* Unmask the current thread's Rx/Tx interrupts.
  * Called by on_each_cpu(), guaranteed to run with migration disabled,
  * using smp_processor_id() is OK.
  */
@@ -907,12 +1072,17 @@
 	struct mvpp2_port *port = arg;
 	u32 val;
 
+	/* If the thread isn't used, don't do anything */
+	if (smp_processor_id() > port->priv->nthreads)
+		return;
+
 	val = MVPP2_CAUSE_MISC_SUM_MASK |
-		MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
+		MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
 	if (port->has_tx_irqs)
 		val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
 
-	mvpp2_percpu_write(port->priv, smp_processor_id(),
+	mvpp2_thread_write(port->priv,
+			   mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
 			   MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
 }
 
@@ -928,7 +1098,7 @@
 	if (mask)
 		val = 0;
 	else
-		val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
+		val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22);
 
 	for (i = 0; i < port->nqvecs; i++) {
 		struct mvpp2_queue_vector *v = port->qvecs + i;
@@ -936,12 +1106,17 @@
 		if (v->type != MVPP2_QUEUE_VECTOR_SHARED)
 			continue;
 
-		mvpp2_percpu_write(port->priv, v->sw_thread_id,
+		mvpp2_thread_write(port->priv, v->sw_thread_id,
 				   MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
 	}
 }
 
 /* Port configuration routines */
+static bool mvpp2_is_xlg(phy_interface_t interface)
+{
+	return interface == PHY_INTERFACE_MODE_10GKR ||
+	       interface == PHY_INTERFACE_MODE_XAUI;
+}
 
 static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
 {
@@ -987,27 +1162,20 @@
 	void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
 	u32 val;
 
-	/* XPCS */
 	val = readl(xpcs + MVPP22_XPCS_CFG0);
 	val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
 		 MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
 	val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
 	writel(val, xpcs + MVPP22_XPCS_CFG0);
 
-	/* MPCS */
 	val = readl(mpcs + MVPP22_MPCS_CTRL);
 	val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN;
 	writel(val, mpcs + MVPP22_MPCS_CTRL);
 
 	val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
-	val &= ~(MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7) | MAC_CLK_RESET_MAC |
-		 MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
+	val &= ~MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7);
 	val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
 	writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
-
-	val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
-	val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX;
-	writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
 }
 
 static int mvpp22_gop_init(struct mvpp2_port *port)
@@ -1067,9 +1235,8 @@
 	u32 val;
 
 	if (phy_interface_mode_is_rgmii(port->phy_interface) ||
-	    port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
-	    port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
-	    port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) {
+	    phy_interface_mode_is_8023z(port->phy_interface) ||
+	    port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
 		/* Enable the GMAC link status irq for this port */
 		val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
 		val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
@@ -1079,7 +1246,7 @@
 	if (port->gop_id == 0) {
 		/* Enable the XLG/GIG irqs for this port */
 		val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
-		if (port->phy_interface == PHY_INTERFACE_MODE_10GKR)
+		if (mvpp2_is_xlg(port->phy_interface))
 			val |= MVPP22_XLG_EXT_INT_MASK_XLG;
 		else
 			val |= MVPP22_XLG_EXT_INT_MASK_GIG;
@@ -1099,9 +1266,8 @@
 	}
 
 	if (phy_interface_mode_is_rgmii(port->phy_interface) ||
-	    port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
-	    port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
-	    port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) {
+	    phy_interface_mode_is_8023z(port->phy_interface) ||
+	    port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
 		val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
 		val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
 		writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
@@ -1112,10 +1278,10 @@
 {
 	u32 val;
 
-	if (phy_interface_mode_is_rgmii(port->phy_interface) ||
-	    port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
-	    port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
-	    port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) {
+	if (port->phylink ||
+	    phy_interface_mode_is_rgmii(port->phy_interface) ||
+	    phy_interface_mode_is_8023z(port->phy_interface) ||
+	    port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
 		val = readl(port->base + MVPP22_GMAC_INT_MASK);
 		val |= MVPP22_GMAC_INT_MASK_LINK_STAT;
 		writel(val, port->base + MVPP22_GMAC_INT_MASK);
@@ -1142,28 +1308,13 @@
  */
 static int mvpp22_comphy_init(struct mvpp2_port *port)
 {
-	enum phy_mode mode;
 	int ret;
 
 	if (!port->comphy)
 		return 0;
 
-	switch (port->phy_interface) {
-	case PHY_INTERFACE_MODE_SGMII:
-	case PHY_INTERFACE_MODE_1000BASEX:
-		mode = PHY_MODE_SGMII;
-		break;
-	case PHY_INTERFACE_MODE_2500BASEX:
-		mode = PHY_MODE_2500SGMII;
-		break;
-	case PHY_INTERFACE_MODE_10GKR:
-		mode = PHY_MODE_10GKR;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	ret = phy_set_mode(port->comphy, mode);
+	ret = phy_set_mode_ext(port->comphy, PHY_MODE_ETHERNET,
+			       port->phy_interface);
 	if (ret)
 		return ret;
 
@@ -1175,12 +1326,9 @@
 	u32 val;
 
 	/* Only GOP port 0 has an XLG MAC */
-	if (port->gop_id == 0 &&
-	    (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
-	     port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
+	if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) {
 		val = readl(port->base + MVPP22_XLG_CTRL0_REG);
-		val |= MVPP22_XLG_CTRL0_PORT_EN |
-		       MVPP22_XLG_CTRL0_MAC_RESET_DIS;
+		val |= MVPP22_XLG_CTRL0_PORT_EN;
 		val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
 		writel(val, port->base + MVPP22_XLG_CTRL0_REG);
 	} else {
@@ -1196,21 +1344,15 @@
 	u32 val;
 
 	/* Only GOP port 0 has an XLG MAC */
-	if (port->gop_id == 0 &&
-	    (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
-	     port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
+	if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) {
 		val = readl(port->base + MVPP22_XLG_CTRL0_REG);
 		val &= ~MVPP22_XLG_CTRL0_PORT_EN;
 		writel(val, port->base + MVPP22_XLG_CTRL0_REG);
-
-		/* Disable & reset should be done separately */
-		val &= ~MVPP22_XLG_CTRL0_MAC_RESET_DIS;
-		writel(val, port->base + MVPP22_XLG_CTRL0_REG);
-	} else {
-		val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
-		val &= ~(MVPP2_GMAC_PORT_EN_MASK);
-		writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
 	}
+
+	val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
+	val &= ~(MVPP2_GMAC_PORT_EN_MASK);
+	writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
 }
 
 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
@@ -1236,9 +1378,8 @@
 	else
 		val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
 
-	if (port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
-	    port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
-	    port->phy_interface == PHY_INTERFACE_MODE_2500BASEX)
+	if (phy_interface_mode_is_8023z(port->phy_interface) ||
+	    port->phy_interface == PHY_INTERFACE_MODE_SGMII)
 		val |= MVPP2_GMAC_PCS_LB_EN_MASK;
 	else
 		val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
@@ -1264,6 +1405,17 @@
 	return val;
 }
 
+/* Some counters are accessed indirectly by first writing an index to
+ * MVPP2_CTRS_IDX. The index can represent various resources depending on the
+ * register we access, it can be a hit counter for some classification tables,
+ * a counter specific to a rxq, a txq or a buffer pool.
+ */
+static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg)
+{
+	mvpp2_write(priv, MVPP2_CTRS_IDX, index);
+	return mvpp2_read(priv, reg);
+}
+
 /* Due to the fact that software statistics and hardware statistics are, by
  * design, incremented at different moments in the chain of packet processing,
  * it is very likely that incoming packets could have been dropped after being
@@ -1273,7 +1425,7 @@
  * Hence, statistics gathered from userspace with ifconfig (software) and
  * ethtool (hardware) cannot be compared.
  */
-static const struct mvpp2_ethtool_counter mvpp2_ethtool_regs[] = {
+static const struct mvpp2_ethtool_counter mvpp2_ethtool_mib_regs[] = {
 	{ MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true },
 	{ MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" },
 	{ MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" },
@@ -1303,16 +1455,103 @@
 	{ MVPP2_MIB_LATE_COLLISION, "late_collision" },
 };
 
+static const struct mvpp2_ethtool_counter mvpp2_ethtool_port_regs[] = {
+	{ MVPP2_OVERRUN_ETH_DROP, "rx_fifo_or_parser_overrun_drops" },
+	{ MVPP2_CLS_ETH_DROP, "rx_classifier_drops" },
+};
+
+static const struct mvpp2_ethtool_counter mvpp2_ethtool_txq_regs[] = {
+	{ MVPP2_TX_DESC_ENQ_CTR, "txq_%d_desc_enqueue" },
+	{ MVPP2_TX_DESC_ENQ_TO_DDR_CTR, "txq_%d_desc_enqueue_to_ddr" },
+	{ MVPP2_TX_BUFF_ENQ_TO_DDR_CTR, "txq_%d_buff_euqueue_to_ddr" },
+	{ MVPP2_TX_DESC_ENQ_HW_FWD_CTR, "txq_%d_desc_hardware_forwarded" },
+	{ MVPP2_TX_PKTS_DEQ_CTR, "txq_%d_packets_dequeued" },
+	{ MVPP2_TX_PKTS_FULL_QUEUE_DROP_CTR, "txq_%d_queue_full_drops" },
+	{ MVPP2_TX_PKTS_EARLY_DROP_CTR, "txq_%d_packets_early_drops" },
+	{ MVPP2_TX_PKTS_BM_DROP_CTR, "txq_%d_packets_bm_drops" },
+	{ MVPP2_TX_PKTS_BM_MC_DROP_CTR, "txq_%d_packets_rep_bm_drops" },
+};
+
+static const struct mvpp2_ethtool_counter mvpp2_ethtool_rxq_regs[] = {
+	{ MVPP2_RX_DESC_ENQ_CTR, "rxq_%d_desc_enqueue" },
+	{ MVPP2_RX_PKTS_FULL_QUEUE_DROP_CTR, "rxq_%d_queue_full_drops" },
+	{ MVPP2_RX_PKTS_EARLY_DROP_CTR, "rxq_%d_packets_early_drops" },
+	{ MVPP2_RX_PKTS_BM_DROP_CTR, "rxq_%d_packets_bm_drops" },
+};
+
+#define MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs)	(ARRAY_SIZE(mvpp2_ethtool_mib_regs) + \
+						 ARRAY_SIZE(mvpp2_ethtool_port_regs) + \
+						 (ARRAY_SIZE(mvpp2_ethtool_txq_regs) * (ntxqs)) + \
+						 (ARRAY_SIZE(mvpp2_ethtool_rxq_regs) * (nrxqs)))
+
 static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
 				      u8 *data)
 {
-	if (sset == ETH_SS_STATS) {
-		int i;
+	struct mvpp2_port *port = netdev_priv(netdev);
+	int i, q;
 
-		for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
-			memcpy(data + i * ETH_GSTRING_LEN,
-			       &mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN);
+	if (sset != ETH_SS_STATS)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) {
+		strscpy(data, mvpp2_ethtool_mib_regs[i].string,
+			ETH_GSTRING_LEN);
+		data += ETH_GSTRING_LEN;
 	}
+
+	for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) {
+		strscpy(data, mvpp2_ethtool_port_regs[i].string,
+			ETH_GSTRING_LEN);
+		data += ETH_GSTRING_LEN;
+	}
+
+	for (q = 0; q < port->ntxqs; q++) {
+		for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) {
+			snprintf(data, ETH_GSTRING_LEN,
+				 mvpp2_ethtool_txq_regs[i].string, q);
+			data += ETH_GSTRING_LEN;
+		}
+	}
+
+	for (q = 0; q < port->nrxqs; q++) {
+		for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) {
+			snprintf(data, ETH_GSTRING_LEN,
+				 mvpp2_ethtool_rxq_regs[i].string,
+				 q);
+			data += ETH_GSTRING_LEN;
+		}
+	}
+}
+
+static void mvpp2_read_stats(struct mvpp2_port *port)
+{
+	u64 *pstats;
+	int i, q;
+
+	pstats = port->ethtool_stats;
+
+	for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++)
+		*pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_mib_regs[i]);
+
+	for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++)
+		*pstats++ += mvpp2_read(port->priv,
+					mvpp2_ethtool_port_regs[i].offset +
+					4 * port->id);
+
+	for (q = 0; q < port->ntxqs; q++)
+		for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++)
+			*pstats++ += mvpp2_read_index(port->priv,
+						      MVPP22_CTRS_TX_CTR(port->id, i),
+						      mvpp2_ethtool_txq_regs[i].offset);
+
+	/* Rxqs are numbered from 0 from the user standpoint, but not from the
+	 * driver's. We need to add the  port->first_rxq offset.
+	 */
+	for (q = 0; q < port->nrxqs; q++)
+		for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++)
+			*pstats++ += mvpp2_read_index(port->priv,
+						      port->first_rxq + i,
+						      mvpp2_ethtool_rxq_regs[i].offset);
 }
 
 static void mvpp2_gather_hw_statistics(struct work_struct *work)
@@ -1320,14 +1559,10 @@
 	struct delayed_work *del_work = to_delayed_work(work);
 	struct mvpp2_port *port = container_of(del_work, struct mvpp2_port,
 					       stats_work);
-	u64 *pstats;
-	int i;
 
 	mutex_lock(&port->gather_stats_lock);
 
-	pstats = port->ethtool_stats;
-	for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
-		*pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
+	mvpp2_read_stats(port);
 
 	/* No need to read again the counters right after this function if it
 	 * was called asynchronously by the user (ie. use of ethtool).
@@ -1351,34 +1586,84 @@
 
 	mutex_lock(&port->gather_stats_lock);
 	memcpy(data, port->ethtool_stats,
-	       sizeof(u64) * ARRAY_SIZE(mvpp2_ethtool_regs));
+	       sizeof(u64) * MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs));
 	mutex_unlock(&port->gather_stats_lock);
 }
 
 static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset)
 {
+	struct mvpp2_port *port = netdev_priv(dev);
+
 	if (sset == ETH_SS_STATS)
-		return ARRAY_SIZE(mvpp2_ethtool_regs);
+		return MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs);
 
 	return -EOPNOTSUPP;
 }
 
-static void mvpp2_port_reset(struct mvpp2_port *port)
+static void mvpp2_mac_reset_assert(struct mvpp2_port *port)
 {
 	u32 val;
-	unsigned int i;
 
-	/* Read the GOP statistics to reset the hardware counters */
-	for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
-		mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
-
-	val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
-		    ~MVPP2_GMAC_PORT_RESET_MASK;
+	val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) |
+	      MVPP2_GMAC_PORT_RESET_MASK;
 	writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
 
-	while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
-	       MVPP2_GMAC_PORT_RESET_MASK)
-		continue;
+	if (port->priv->hw_version == MVPP22 && port->gop_id == 0) {
+		val = readl(port->base + MVPP22_XLG_CTRL0_REG) &
+		      ~MVPP22_XLG_CTRL0_MAC_RESET_DIS;
+		writel(val, port->base + MVPP22_XLG_CTRL0_REG);
+	}
+}
+
+static void mvpp22_pcs_reset_assert(struct mvpp2_port *port)
+{
+	struct mvpp2 *priv = port->priv;
+	void __iomem *mpcs, *xpcs;
+	u32 val;
+
+	if (port->priv->hw_version != MVPP22 || port->gop_id != 0)
+		return;
+
+	mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
+	xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
+
+	val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
+	val &= ~(MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
+	val |= MVPP22_MPCS_CLK_RESET_DIV_SET;
+	writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
+
+	val = readl(xpcs + MVPP22_XPCS_CFG0);
+	writel(val & ~MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
+}
+
+static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port)
+{
+	struct mvpp2 *priv = port->priv;
+	void __iomem *mpcs, *xpcs;
+	u32 val;
+
+	if (port->priv->hw_version != MVPP22 || port->gop_id != 0)
+		return;
+
+	mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
+	xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
+
+	switch (port->phy_interface) {
+	case PHY_INTERFACE_MODE_10GKR:
+		val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
+		val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX |
+		       MAC_CLK_RESET_SD_TX;
+		val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
+		writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
+		break;
+	case PHY_INTERFACE_MODE_XAUI:
+	case PHY_INTERFACE_MODE_RXAUI:
+		val = readl(xpcs + MVPP22_XPCS_CFG0);
+		writel(val | MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
+		break;
+	default:
+		break;
+	}
 }
 
 /* Change maximum receive size of the port */
@@ -1408,7 +1693,7 @@
 /* Set defaults to the MVPP2 port */
 static void mvpp2_defaults_set(struct mvpp2_port *port)
 {
-	int tx_port_num, val, queue, ptxq, lrxq;
+	int tx_port_num, val, queue, lrxq;
 
 	if (port->priv->hw_version == MVPP21) {
 		/* Update TX FIFO MIN Threshold */
@@ -1425,12 +1710,13 @@
 		    tx_port_num);
 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
 
+	/* Set TXQ scheduling to Round-Robin */
+	mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0);
+
 	/* Close bandwidth for all queues */
-	for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
-		ptxq = mvpp2_txq_phys(port->id, queue);
+	for (queue = 0; queue < MVPP2_MAX_TXQ; queue++)
 		mvpp2_write(port->priv,
-			    MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
-	}
+			    MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0);
 
 	/* Set refill period to 1 usec, refill tokens
 	 * and bucket size to maximum
@@ -1624,7 +1910,8 @@
 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
 {
 	/* aggregated access - relevant TXQ number is written in TX desc */
-	mvpp2_percpu_write(port->priv, smp_processor_id(),
+	mvpp2_thread_write(port->priv,
+			   mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
 			   MVPP2_AGGR_TXQ_UPDATE_REG, pending);
 }
 
@@ -1634,14 +1921,15 @@
  * Called only from mvpp2_tx(), so migration is disabled, using
  * smp_processor_id() is OK.
  */
-static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
+static int mvpp2_aggr_desc_num_check(struct mvpp2_port *port,
 				     struct mvpp2_tx_queue *aggr_txq, int num)
 {
 	if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) {
 		/* Update number of occupied aggregated Tx descriptors */
-		int cpu = smp_processor_id();
-		u32 val = mvpp2_read_relaxed(priv,
-					     MVPP2_AGGR_TXQ_STATUS_REG(cpu));
+		unsigned int thread =
+			mvpp2_cpu_to_thread(port->priv, smp_processor_id());
+		u32 val = mvpp2_read_relaxed(port->priv,
+					     MVPP2_AGGR_TXQ_STATUS_REG(thread));
 
 		aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
 
@@ -1657,16 +1945,17 @@
  * only by mvpp2_tx(), so migration is disabled, using
  * smp_processor_id() is OK.
  */
-static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
+static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port *port,
 					 struct mvpp2_tx_queue *txq, int num)
 {
+	unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
+	struct mvpp2 *priv = port->priv;
 	u32 val;
-	int cpu = smp_processor_id();
 
 	val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
-	mvpp2_percpu_write_relaxed(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val);
+	mvpp2_thread_write_relaxed(priv, thread, MVPP2_TXQ_RSVD_REQ_REG, val);
 
-	val = mvpp2_percpu_read_relaxed(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG);
+	val = mvpp2_thread_read_relaxed(priv, thread, MVPP2_TXQ_RSVD_RSLT_REG);
 
 	return val & MVPP2_TXQ_RSVD_RSLT_MASK;
 }
@@ -1674,12 +1963,13 @@
 /* Check if there are enough reserved descriptors for transmission.
  * If not, request chunk of reserved descriptors and check again.
  */
-static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
+static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port,
 					    struct mvpp2_tx_queue *txq,
 					    struct mvpp2_txq_pcpu *txq_pcpu,
 					    int num)
 {
-	int req, cpu, desc_count;
+	int req, desc_count;
+	unsigned int thread;
 
 	if (txq_pcpu->reserved_num >= num)
 		return 0;
@@ -1690,10 +1980,10 @@
 
 	desc_count = 0;
 	/* Compute total of used descriptors */
-	for_each_present_cpu(cpu) {
+	for (thread = 0; thread < port->priv->nthreads; thread++) {
 		struct mvpp2_txq_pcpu *txq_pcpu_aux;
 
-		txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
+		txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread);
 		desc_count += txq_pcpu_aux->count;
 		desc_count += txq_pcpu_aux->reserved_num;
 	}
@@ -1702,10 +1992,10 @@
 	desc_count += req;
 
 	if (desc_count >
-	   (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
+	   (txq->size - (MVPP2_MAX_THREADS * MVPP2_CPU_DESC_CHUNK)))
 		return -ENOMEM;
 
-	txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
+	txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req);
 
 	/* OK, the descriptor could have been updated: check again. */
 	if (txq_pcpu->reserved_num < num)
@@ -1759,7 +2049,7 @@
 
 /* Get number of sent descriptors and decrement counter.
  * The number of sent descriptors is returned.
- * Per-CPU access
+ * Per-thread access
  *
  * Called only from mvpp2_txq_done(), called from mvpp2_tx()
  * (migration disabled) and from the TX completion tasklet (migration
@@ -1771,7 +2061,8 @@
 	u32 val;
 
 	/* Reading status reg resets transmitted descriptor counter */
-	val = mvpp2_percpu_read_relaxed(port->priv, smp_processor_id(),
+	val = mvpp2_thread_read_relaxed(port->priv,
+					mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
 					MVPP2_TXQ_SENT_REG(txq->id));
 
 	return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
@@ -1786,10 +2077,15 @@
 	struct mvpp2_port *port = arg;
 	int queue;
 
+	/* If the thread isn't used, don't do anything */
+	if (smp_processor_id() > port->priv->nthreads)
+		return;
+
 	for (queue = 0; queue < port->ntxqs; queue++) {
 		int id = port->txqs[queue]->id;
 
-		mvpp2_percpu_read(port->priv, smp_processor_id(),
+		mvpp2_thread_read(port->priv,
+				  mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
 				  MVPP2_TXQ_SENT_REG(id));
 	}
 }
@@ -1849,13 +2145,13 @@
 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
 				   struct mvpp2_rx_queue *rxq)
 {
-	int cpu = get_cpu();
+	unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
 
 	if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
 		rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
 
-	mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
-	mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG,
+	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
+	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_THRESH_REG,
 			   rxq->pkts_coal);
 
 	put_cpu();
@@ -1865,15 +2161,15 @@
 static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
 				   struct mvpp2_tx_queue *txq)
 {
-	int cpu = get_cpu();
+	unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
 	u32 val;
 
 	if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
 		txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
 
 	val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
-	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
-	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val);
+	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
+	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
 
 	put_cpu();
 }
@@ -1974,7 +2270,7 @@
 	struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
 	int tx_done;
 
-	if (txq_pcpu->cpu != smp_processor_id())
+	if (txq_pcpu->thread != mvpp2_cpu_to_thread(port->priv, smp_processor_id()))
 		netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
 
 	tx_done = mvpp2_txq_sent_desc_proc(port, txq);
@@ -1990,7 +2286,7 @@
 }
 
 static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
-				  int cpu)
+				  unsigned int thread)
 {
 	struct mvpp2_tx_queue *txq;
 	struct mvpp2_txq_pcpu *txq_pcpu;
@@ -2001,7 +2297,7 @@
 		if (!txq)
 			break;
 
-		txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+		txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
 
 		if (txq_pcpu->count) {
 			mvpp2_txq_done(port, txq, txq_pcpu);
@@ -2017,15 +2313,15 @@
 
 /* Allocate and initialize descriptors for aggr TXQ */
 static int mvpp2_aggr_txq_init(struct platform_device *pdev,
-			       struct mvpp2_tx_queue *aggr_txq, int cpu,
-			       struct mvpp2 *priv)
+			       struct mvpp2_tx_queue *aggr_txq,
+			       unsigned int thread, struct mvpp2 *priv)
 {
 	u32 txq_dma;
 
 	/* Allocate memory for TX descriptors */
-	aggr_txq->descs = dma_zalloc_coherent(&pdev->dev,
-				MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
-				&aggr_txq->descs_dma, GFP_KERNEL);
+	aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
+					     MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
+					     &aggr_txq->descs_dma, GFP_KERNEL);
 	if (!aggr_txq->descs)
 		return -ENOMEM;
 
@@ -2033,7 +2329,7 @@
 
 	/* Aggr TXQ no reset WA */
 	aggr_txq->next_desc_to_proc = mvpp2_read(priv,
-						 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
+						 MVPP2_AGGR_TXQ_INDEX_REG(thread));
 
 	/* Set Tx descriptors queue starting address indirect
 	 * access
@@ -2044,8 +2340,8 @@
 		txq_dma = aggr_txq->descs_dma >>
 			MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
 
-	mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
-	mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu),
+	mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(thread), txq_dma);
+	mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(thread),
 		    MVPP2_AGGR_TXQ_SIZE);
 
 	return 0;
@@ -2056,8 +2352,8 @@
 			  struct mvpp2_rx_queue *rxq)
 
 {
+	unsigned int thread;
 	u32 rxq_dma;
-	int cpu;
 
 	rxq->size = port->rx_ring_size;
 
@@ -2074,15 +2370,15 @@
 	mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
 
 	/* Set Rx descriptors queue starting address - indirect access */
-	cpu = get_cpu();
-	mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
+	thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
+	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
 	if (port->priv->hw_version == MVPP21)
 		rxq_dma = rxq->descs_dma;
 	else
 		rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
-	mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
-	mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
-	mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0);
+	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
+	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
+	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_INDEX_REG, 0);
 	put_cpu();
 
 	/* Set Offset */
@@ -2127,7 +2423,7 @@
 static void mvpp2_rxq_deinit(struct mvpp2_port *port,
 			     struct mvpp2_rx_queue *rxq)
 {
-	int cpu;
+	unsigned int thread;
 
 	mvpp2_rxq_drop_pkts(port, rxq);
 
@@ -2146,10 +2442,10 @@
 	 * free descriptor number
 	 */
 	mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
-	cpu = get_cpu();
-	mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
-	mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0);
-	mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0);
+	thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
+	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
+	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, 0);
+	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, 0);
 	put_cpu();
 }
 
@@ -2158,7 +2454,8 @@
 			  struct mvpp2_tx_queue *txq)
 {
 	u32 val;
-	int cpu, desc, desc_per_txq, tx_port_num;
+	unsigned int thread;
+	int desc, desc_per_txq, tx_port_num;
 	struct mvpp2_txq_pcpu *txq_pcpu;
 
 	txq->size = port->tx_ring_size;
@@ -2173,18 +2470,18 @@
 	txq->last_desc = txq->size - 1;
 
 	/* Set Tx descriptors queue starting address - indirect access */
-	cpu = get_cpu();
-	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
-	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG,
+	thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
+	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
+	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG,
 			   txq->descs_dma);
-	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG,
+	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG,
 			   txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
-	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0);
-	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG,
+	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_INDEX_REG, 0);
+	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_RSVD_CLR_REG,
 			   txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
-	val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG);
+	val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG);
 	val &= ~MVPP2_TXQ_PENDING_MASK;
-	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val);
+	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PENDING_REG, val);
 
 	/* Calculate base address in prefetch buffer. We reserve 16 descriptors
 	 * for each existing TXQ.
@@ -2195,7 +2492,7 @@
 	desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
 	       (txq->log_id * desc_per_txq);
 
-	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG,
+	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG,
 			   MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
 			   MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
 	put_cpu();
@@ -2214,8 +2511,8 @@
 	mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
 		    val);
 
-	for_each_present_cpu(cpu) {
-		txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+	for (thread = 0; thread < port->priv->nthreads; thread++) {
+		txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
 		txq_pcpu->size = txq->size;
 		txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
 						sizeof(*txq_pcpu->buffs),
@@ -2249,10 +2546,10 @@
 			     struct mvpp2_tx_queue *txq)
 {
 	struct mvpp2_txq_pcpu *txq_pcpu;
-	int cpu;
+	unsigned int thread;
 
-	for_each_present_cpu(cpu) {
-		txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+	for (thread = 0; thread < port->priv->nthreads; thread++) {
+		txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
 		kfree(txq_pcpu->buffs);
 
 		if (txq_pcpu->tso_headers)
@@ -2275,13 +2572,13 @@
 	txq->descs_dma         = 0;
 
 	/* Set minimum bandwidth for disabled TXQs */
-	mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
+	mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0);
 
 	/* Set Tx descriptors queue starting address and size */
-	cpu = get_cpu();
-	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
-	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0);
-	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0);
+	thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
+	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
+	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 0);
+	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 0);
 	put_cpu();
 }
 
@@ -2289,14 +2586,14 @@
 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
 {
 	struct mvpp2_txq_pcpu *txq_pcpu;
-	int delay, pending, cpu;
+	int delay, pending;
+	unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
 	u32 val;
 
-	cpu = get_cpu();
-	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
-	val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG);
+	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
+	val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG);
 	val |= MVPP2_TXQ_DRAIN_EN_MASK;
-	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
+	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
 
 	/* The napi queue has been stopped so wait for all packets
 	 * to be transmitted.
@@ -2312,17 +2609,17 @@
 		mdelay(1);
 		delay++;
 
-		pending = mvpp2_percpu_read(port->priv, cpu,
+		pending = mvpp2_thread_read(port->priv, thread,
 					    MVPP2_TXQ_PENDING_REG);
 		pending &= MVPP2_TXQ_PENDING_MASK;
 	} while (pending);
 
 	val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
-	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
+	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
 	put_cpu();
 
-	for_each_present_cpu(cpu) {
-		txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+	for (thread = 0; thread < port->priv->nthreads; thread++) {
+		txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
 
 		/* Release all packets */
 		mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
@@ -2389,13 +2686,17 @@
 static int mvpp2_setup_txqs(struct mvpp2_port *port)
 {
 	struct mvpp2_tx_queue *txq;
-	int queue, err;
+	int queue, err, cpu;
 
 	for (queue = 0; queue < port->ntxqs; queue++) {
 		txq = port->txqs[queue];
 		err = mvpp2_txq_init(port, txq);
 		if (err)
 			goto err_cleanup;
+
+		/* Assign this queue to a CPU */
+		cpu = queue % num_present_cpus();
+		netif_set_xps_queue(port->dev, cpumask_of(cpu), queue);
 	}
 
 	if (port->has_tx_irqs) {
@@ -2436,8 +2737,7 @@
 
 	mvpp22_gop_mask_irq(port);
 
-	if (port->gop_id == 0 &&
-	    port->phy_interface == PHY_INTERFACE_MODE_10GKR) {
+	if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) {
 		val = readl(port->base + MVPP22_XLG_INT_STAT);
 		if (val & MVPP22_XLG_INT_STAT_LINK) {
 			event = true;
@@ -2446,9 +2746,8 @@
 				link = true;
 		}
 	} else if (phy_interface_mode_is_rgmii(port->phy_interface) ||
-		   port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
-		   port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
-		   port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) {
+		   phy_interface_mode_is_8023z(port->phy_interface) ||
+		   port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
 		val = readl(port->base + MVPP22_GMAC_INT_STAT);
 		if (val & MVPP22_GMAC_INT_STAT_LINK) {
 			event = true;
@@ -2487,46 +2786,35 @@
 	return IRQ_HANDLED;
 }
 
-static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
+static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
 {
-	ktime_t interval;
-
-	if (!port_pcpu->timer_scheduled) {
-		port_pcpu->timer_scheduled = true;
-		interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
-		hrtimer_start(&port_pcpu->tx_done_timer, interval,
-			      HRTIMER_MODE_REL_PINNED);
-	}
-}
-
-static void mvpp2_tx_proc_cb(unsigned long data)
-{
-	struct net_device *dev = (struct net_device *)data;
-	struct mvpp2_port *port = netdev_priv(dev);
-	struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
+	struct net_device *dev;
+	struct mvpp2_port *port;
+	struct mvpp2_port_pcpu *port_pcpu;
 	unsigned int tx_todo, cause;
 
+	port_pcpu = container_of(timer, struct mvpp2_port_pcpu, tx_done_timer);
+	dev = port_pcpu->dev;
+
 	if (!netif_running(dev))
-		return;
+		return HRTIMER_NORESTART;
+
 	port_pcpu->timer_scheduled = false;
+	port = netdev_priv(dev);
 
 	/* Process all the Tx queues */
 	cause = (1 << port->ntxqs) - 1;
-	tx_todo = mvpp2_tx_done(port, cause, smp_processor_id());
+	tx_todo = mvpp2_tx_done(port, cause,
+				mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
 
 	/* Set the timer in case not all the packets were processed */
-	if (tx_todo)
-		mvpp2_timer_set(port_pcpu);
-}
+	if (tx_todo && !port_pcpu->timer_scheduled) {
+		port_pcpu->timer_scheduled = true;
+		hrtimer_forward_now(&port_pcpu->tx_done_timer,
+				    MVPP2_TXDONE_HRTIMER_PERIOD_NS);
 
-static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
-{
-	struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
-							 struct mvpp2_port_pcpu,
-							 tx_done_timer);
-
-	tasklet_schedule(&port_pcpu->tx_done_tasklet);
-
+		return HRTIMER_RESTART;
+	}
 	return HRTIMER_NORESTART;
 }
 
@@ -2729,7 +3017,8 @@
 tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
 		  struct mvpp2_tx_desc *desc)
 {
-	struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
+	unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
+	struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
 
 	dma_addr_t buf_dma_addr =
 		mvpp2_txdesc_dma_addr_get(port, desc);
@@ -2746,21 +3035,23 @@
 				 struct mvpp2_tx_queue *aggr_txq,
 				 struct mvpp2_tx_queue *txq)
 {
-	struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
+	unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
+	struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
 	struct mvpp2_tx_desc *tx_desc;
 	int i;
 	dma_addr_t buf_dma_addr;
 
 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-		void *addr = page_address(frag->page.p) + frag->page_offset;
+		void *addr = skb_frag_address(frag);
 
 		tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
 		mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
-		mvpp2_txdesc_size_set(port, tx_desc, frag->size);
+		mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag));
 
 		buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
-					      frag->size, DMA_TO_DEVICE);
+					      skb_frag_size(frag),
+					      DMA_TO_DEVICE);
 		if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
 			mvpp2_txq_desc_put(txq);
 			goto cleanup;
@@ -2865,9 +3156,8 @@
 	int i, len, descs = 0;
 
 	/* Check number of available descriptors */
-	if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq,
-				      tso_count_descs(skb)) ||
-	    mvpp2_txq_reserved_desc_num_proc(port->priv, txq, txq_pcpu,
+	if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) ||
+	    mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu,
 					     tso_count_descs(skb)))
 		return 0;
 
@@ -2907,21 +3197,28 @@
 }
 
 /* Main tx processing */
-static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
 {
 	struct mvpp2_port *port = netdev_priv(dev);
 	struct mvpp2_tx_queue *txq, *aggr_txq;
 	struct mvpp2_txq_pcpu *txq_pcpu;
 	struct mvpp2_tx_desc *tx_desc;
 	dma_addr_t buf_dma_addr;
+	unsigned long flags = 0;
+	unsigned int thread;
 	int frags = 0;
 	u16 txq_id;
 	u32 tx_cmd;
 
+	thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
+
 	txq_id = skb_get_queue_mapping(skb);
 	txq = port->txqs[txq_id];
-	txq_pcpu = this_cpu_ptr(txq->pcpu);
-	aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
+	txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
+	aggr_txq = &port->priv->aggr_txqs[thread];
+
+	if (test_bit(thread, &port->priv->lock_map))
+		spin_lock_irqsave(&port->tx_lock[thread], flags);
 
 	if (skb_is_gso(skb)) {
 		frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu);
@@ -2930,9 +3227,8 @@
 	frags = skb_shinfo(skb)->nr_frags + 1;
 
 	/* Check number of available descriptors */
-	if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
-	    mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
-					     txq_pcpu, frags)) {
+	if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) ||
+	    mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) {
 		frags = 0;
 		goto out;
 	}
@@ -2974,7 +3270,7 @@
 
 out:
 	if (frags > 0) {
-		struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
+		struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread);
 		struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
 
 		txq_pcpu->reserved_num -= frags;
@@ -3004,11 +3300,19 @@
 	/* Set the timer in case not all frags were processed */
 	if (!port->has_tx_irqs && txq_pcpu->count <= frags &&
 	    txq_pcpu->count > 0) {
-		struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
+		struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread);
 
-		mvpp2_timer_set(port_pcpu);
+		if (!port_pcpu->timer_scheduled) {
+			port_pcpu->timer_scheduled = true;
+			hrtimer_start(&port_pcpu->tx_done_timer,
+				      MVPP2_TXDONE_HRTIMER_PERIOD_NS,
+				      HRTIMER_MODE_REL_PINNED_SOFT);
+		}
 	}
 
+	if (test_bit(thread, &port->priv->lock_map))
+		spin_unlock_irqrestore(&port->tx_lock[thread], flags);
+
 	return NETDEV_TX_OK;
 }
 
@@ -3028,7 +3332,7 @@
 	int rx_done = 0;
 	struct mvpp2_port *port = netdev_priv(napi->dev);
 	struct mvpp2_queue_vector *qv;
-	int cpu = smp_processor_id();
+	unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
 
 	qv = container_of(napi, struct mvpp2_queue_vector, napi);
 
@@ -3042,7 +3346,7 @@
 	 *
 	 * Each CPU has its own Rx/Tx cause register
 	 */
-	cause_rx_tx = mvpp2_percpu_read_relaxed(port->priv, qv->sw_thread_id,
+	cause_rx_tx = mvpp2_thread_read_relaxed(port->priv, qv->sw_thread_id,
 						MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
 
 	cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
@@ -3051,7 +3355,7 @@
 
 		/* Clear the cause register */
 		mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
-		mvpp2_percpu_write(port->priv, cpu,
+		mvpp2_thread_write(port->priv, thread,
 				   MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
 				   cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
 	}
@@ -3065,7 +3369,8 @@
 	}
 
 	/* Process RX packets */
-	cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
+	cause_rx = cause_rx_tx &
+		   MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
 	cause_rx <<= qv->first_rxq;
 	cause_rx |= qv->pending_cause_rx;
 	while (cause_rx && budget > 0) {
@@ -3102,19 +3407,26 @@
 {
 	u32 ctrl3;
 
+	/* Set the GMAC & XLG MAC in reset */
+	mvpp2_mac_reset_assert(port);
+
+	/* Set the MPCS and XPCS in reset */
+	mvpp22_pcs_reset_assert(port);
+
 	/* comphy reconfiguration */
 	mvpp22_comphy_init(port);
 
 	/* gop reconfiguration */
 	mvpp22_gop_init(port);
 
+	mvpp22_pcs_reset_deassert(port);
+
 	/* Only GOP port 0 has an XLG MAC */
 	if (port->gop_id == 0) {
 		ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG);
 		ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
 
-		if (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
-		    port->phy_interface == PHY_INTERFACE_MODE_10GKR)
+		if (mvpp2_is_xlg(port->phy_interface))
 			ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
 		else
 			ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
@@ -3122,9 +3434,7 @@
 		writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG);
 	}
 
-	if (port->gop_id == 0 &&
-	    (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
-	     port->phy_interface == PHY_INTERFACE_MODE_10GKR))
+	if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface))
 		mvpp2_xlg_max_rx_size_set(port);
 	else
 		mvpp2_gmac_max_rx_size_set(port);
@@ -3140,14 +3450,13 @@
 	for (i = 0; i < port->nqvecs; i++)
 		napi_enable(&port->qvecs[i].napi);
 
-	/* Enable interrupts on all CPUs */
+	/* Enable interrupts on all threads */
 	mvpp2_interrupts_enable(port);
 
 	if (port->priv->hw_version == MVPP22)
 		mvpp22_mode_reconfigure(port);
 
 	if (port->phylink) {
-		netif_carrier_off(port->dev);
 		phylink_start(port->phylink);
 	} else {
 		/* Phylink isn't used as of now for ACPI, so the MAC has to be
@@ -3157,9 +3466,9 @@
 		struct phylink_link_state state = {
 			.interface = port->phy_interface,
 		};
-		mvpp2_mac_config(port->dev, MLO_AN_INBAND, &state);
-		mvpp2_mac_link_up(port->dev, MLO_AN_INBAND, port->phy_interface,
-				  NULL);
+		mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state);
+		mvpp2_mac_link_up(&port->phylink_config, MLO_AN_INBAND,
+				  port->phy_interface, NULL);
 	}
 
 	netif_tx_start_all_queues(port->dev);
@@ -3170,7 +3479,7 @@
 {
 	int i;
 
-	/* Disable interrupts on all CPUs */
+	/* Disable interrupts on all threads */
 	mvpp2_interrupts_disable(port);
 
 	for (i = 0; i < port->nqvecs; i++)
@@ -3243,16 +3552,31 @@
 	for (i = 0; i < port->nqvecs; i++) {
 		struct mvpp2_queue_vector *qv = port->qvecs + i;
 
-		if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
+		if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
+			qv->mask = kzalloc(cpumask_size(), GFP_KERNEL);
+			if (!qv->mask) {
+				err = -ENOMEM;
+				goto err;
+			}
+
 			irq_set_status_flags(qv->irq, IRQ_NO_BALANCING);
+		}
 
 		err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
 		if (err)
 			goto err;
 
-		if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
-			irq_set_affinity_hint(qv->irq,
-					      cpumask_of(qv->sw_thread_id));
+		if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
+			unsigned int cpu;
+
+			for_each_present_cpu(cpu) {
+				if (mvpp2_cpu_to_thread(port->priv, cpu) ==
+				    qv->sw_thread_id)
+					cpumask_set_cpu(cpu, qv->mask);
+			}
+
+			irq_set_affinity_hint(qv->irq, qv->mask);
+		}
 	}
 
 	return 0;
@@ -3261,6 +3585,8 @@
 		struct mvpp2_queue_vector *qv = port->qvecs + i;
 
 		irq_set_affinity_hint(qv->irq, NULL);
+		kfree(qv->mask);
+		qv->mask = NULL;
 		free_irq(qv->irq, qv);
 	}
 
@@ -3275,6 +3601,8 @@
 		struct mvpp2_queue_vector *qv = port->qvecs + i;
 
 		irq_set_affinity_hint(qv->irq, NULL);
+		kfree(qv->mask);
+		qv->mask = NULL;
 		irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING);
 		free_irq(qv->irq, qv);
 	}
@@ -3396,11 +3724,11 @@
 {
 	struct mvpp2_port *port = netdev_priv(dev);
 	struct mvpp2_port_pcpu *port_pcpu;
-	int cpu;
+	unsigned int thread;
 
 	mvpp2_stop_dev(port);
 
-	/* Mask interrupts on all CPUs */
+	/* Mask interrupts on all threads */
 	on_each_cpu(mvpp2_interrupts_mask, port, 1);
 	mvpp2_shared_interrupt_mask_unmask(port, true);
 
@@ -3411,12 +3739,11 @@
 
 	mvpp2_irqs_deinit(port);
 	if (!port->has_tx_irqs) {
-		for_each_present_cpu(cpu) {
-			port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+		for (thread = 0; thread < port->priv->nthreads; thread++) {
+			port_pcpu = per_cpu_ptr(port->pcpu, thread);
 
 			hrtimer_cancel(&port_pcpu->tx_done_timer);
 			port_pcpu->timer_scheduled = false;
-			tasklet_kill(&port_pcpu->tx_done_tasklet);
 		}
 	}
 	mvpp2_cleanup_rxqs(port);
@@ -3424,6 +3751,9 @@
 
 	cancel_delayed_work_sync(&port->stats_work);
 
+	mvpp2_mac_reset_assert(port);
+	mvpp22_pcs_reset_assert(port);
+
 	return 0;
 }
 
@@ -3504,9 +3834,48 @@
 	return err;
 }
 
+/* Shut down all the ports, reconfigure the pools as percpu or shared,
+ * then bring up again all ports.
+ */
+static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu)
+{
+	int numbufs = MVPP2_BM_POOLS_NUM, i;
+	struct mvpp2_port *port = NULL;
+	bool status[MVPP2_MAX_PORTS];
+
+	for (i = 0; i < priv->port_count; i++) {
+		port = priv->port_list[i];
+		status[i] = netif_running(port->dev);
+		if (status[i])
+			mvpp2_stop(port->dev);
+	}
+
+	/* nrxqs is the same for all ports */
+	if (priv->percpu_pools)
+		numbufs = port->nrxqs * 2;
+
+	for (i = 0; i < numbufs; i++)
+		mvpp2_bm_pool_destroy(port->dev->dev.parent, priv, &priv->bm_pools[i]);
+
+	devm_kfree(port->dev->dev.parent, priv->bm_pools);
+	priv->percpu_pools = percpu;
+	mvpp2_bm_init(port->dev->dev.parent, priv);
+
+	for (i = 0; i < priv->port_count; i++) {
+		port = priv->port_list[i];
+		mvpp2_swf_bm_pool_init(port);
+		if (status[i])
+			mvpp2_open(port->dev);
+	}
+
+	return 0;
+}
+
 static int mvpp2_change_mtu(struct net_device *dev, int mtu)
 {
 	struct mvpp2_port *port = netdev_priv(dev);
+	bool running = netif_running(dev);
+	struct mvpp2 *priv = port->priv;
 	int err;
 
 	if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
@@ -3515,40 +3884,49 @@
 		mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
 	}
 
-	if (!netif_running(dev)) {
-		err = mvpp2_bm_update_mtu(dev, mtu);
-		if (!err) {
-			port->pkt_size =  MVPP2_RX_PKT_SIZE(mtu);
-			return 0;
+	if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) {
+		if (priv->percpu_pools) {
+			netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu);
+			mvpp2_bm_switch_buffers(priv, false);
 		}
+	} else {
+		bool jumbo = false;
+		int i;
 
-		/* Reconfigure BM to the original MTU */
-		err = mvpp2_bm_update_mtu(dev, dev->mtu);
-		if (err)
-			goto log_error;
+		for (i = 0; i < priv->port_count; i++)
+			if (priv->port_list[i] != port &&
+			    MVPP2_RX_PKT_SIZE(priv->port_list[i]->dev->mtu) >
+			    MVPP2_BM_LONG_PKT_SIZE) {
+				jumbo = true;
+				break;
+			}
+
+		/* No port is using jumbo frames */
+		if (!jumbo) {
+			dev_info(port->dev->dev.parent,
+				 "all ports have a low MTU, switching to per-cpu buffers");
+			mvpp2_bm_switch_buffers(priv, true);
+		}
 	}
 
-	mvpp2_stop_dev(port);
+	if (running)
+		mvpp2_stop_dev(port);
 
 	err = mvpp2_bm_update_mtu(dev, mtu);
-	if (!err) {
+	if (err) {
+		netdev_err(dev, "failed to change MTU\n");
+		/* Reconfigure BM to the original MTU */
+		mvpp2_bm_update_mtu(dev, dev->mtu);
+	} else {
 		port->pkt_size =  MVPP2_RX_PKT_SIZE(mtu);
-		goto out_start;
 	}
 
-	/* Reconfigure BM to the original MTU */
-	err = mvpp2_bm_update_mtu(dev, dev->mtu);
-	if (err)
-		goto log_error;
+	if (running) {
+		mvpp2_start_dev(port);
+		mvpp2_egress_enable(port);
+		mvpp2_ingress_enable(port);
+	}
 
-out_start:
-	mvpp2_start_dev(port);
-	mvpp2_egress_enable(port);
-	mvpp2_ingress_enable(port);
-
-	return 0;
-log_error:
-	netdev_err(dev, "failed to change MTU\n");
 	return err;
 }
 
@@ -3557,7 +3935,7 @@
 {
 	struct mvpp2_port *port = netdev_priv(dev);
 	unsigned int start;
-	int cpu;
+	unsigned int cpu;
 
 	for_each_possible_cpu(cpu) {
 		struct mvpp2_pcpu_stats *cpu_stats;
@@ -3637,9 +4015,9 @@
 
 	if (changed & NETIF_F_RXHASH) {
 		if (features & NETIF_F_RXHASH)
-			mvpp22_rss_enable(port);
+			mvpp22_port_rss_enable(port);
 		else
-			mvpp22_rss_disable(port);
+			mvpp22_port_rss_disable(port);
 	}
 
 	return 0;
@@ -3833,7 +4211,7 @@
 				   struct ethtool_rxnfc *info, u32 *rules)
 {
 	struct mvpp2_port *port = netdev_priv(dev);
-	int ret = 0;
+	int ret = 0, i, loc = 0;
 
 	if (!mvpp22_rss_is_supported())
 		return -EOPNOTSUPP;
@@ -3845,6 +4223,18 @@
 	case ETHTOOL_GRXRINGS:
 		info->data = port->nrxqs;
 		break;
+	case ETHTOOL_GRXCLSRLCNT:
+		info->rule_cnt = port->n_rfs_rules;
+		break;
+	case ETHTOOL_GRXCLSRULE:
+		ret = mvpp2_ethtool_cls_rule_get(port, info);
+		break;
+	case ETHTOOL_GRXCLSRLALL:
+		for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
+			if (port->rfs_rules[i])
+				rules[loc++] = i;
+		}
+		break;
 	default:
 		return -ENOTSUPP;
 	}
@@ -3865,6 +4255,12 @@
 	case ETHTOOL_SRXFH:
 		ret = mvpp2_ethtool_rxfh_set(port, info);
 		break;
+	case ETHTOOL_SRXCLSRLINS:
+		ret = mvpp2_ethtool_cls_rule_ins(port, info);
+		break;
+	case ETHTOOL_SRXCLSRLDEL:
+		ret = mvpp2_ethtool_cls_rule_del(port, info);
+		break;
 	default:
 		return -EOPNOTSUPP;
 	}
@@ -3880,24 +4276,25 @@
 				  u8 *hfunc)
 {
 	struct mvpp2_port *port = netdev_priv(dev);
+	int ret = 0;
 
 	if (!mvpp22_rss_is_supported())
 		return -EOPNOTSUPP;
 
 	if (indir)
-		memcpy(indir, port->indir,
-		       ARRAY_SIZE(port->indir) * sizeof(port->indir[0]));
+		ret = mvpp22_port_rss_ctx_indir_get(port, 0, indir);
 
 	if (hfunc)
 		*hfunc = ETH_RSS_HASH_CRC32;
 
-	return 0;
+	return ret;
 }
 
 static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
 				  const u8 *key, const u8 hfunc)
 {
 	struct mvpp2_port *port = netdev_priv(dev);
+	int ret = 0;
 
 	if (!mvpp22_rss_is_supported())
 		return -EOPNOTSUPP;
@@ -3908,15 +4305,58 @@
 	if (key)
 		return -EOPNOTSUPP;
 
-	if (indir) {
-		memcpy(port->indir, indir,
-		       ARRAY_SIZE(port->indir) * sizeof(port->indir[0]));
-		mvpp22_rss_fill_table(port, port->id);
-	}
+	if (indir)
+		ret = mvpp22_port_rss_ctx_indir_set(port, 0, indir);
 
-	return 0;
+	return ret;
 }
 
+static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
+					  u8 *key, u8 *hfunc, u32 rss_context)
+{
+	struct mvpp2_port *port = netdev_priv(dev);
+	int ret = 0;
+
+	if (!mvpp22_rss_is_supported())
+		return -EOPNOTSUPP;
+
+	if (hfunc)
+		*hfunc = ETH_RSS_HASH_CRC32;
+
+	if (indir)
+		ret = mvpp22_port_rss_ctx_indir_get(port, rss_context, indir);
+
+	return ret;
+}
+
+static int mvpp2_ethtool_set_rxfh_context(struct net_device *dev,
+					  const u32 *indir, const u8 *key,
+					  const u8 hfunc, u32 *rss_context,
+					  bool delete)
+{
+	struct mvpp2_port *port = netdev_priv(dev);
+	int ret;
+
+	if (!mvpp22_rss_is_supported())
+		return -EOPNOTSUPP;
+
+	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
+		return -EOPNOTSUPP;
+
+	if (key)
+		return -EOPNOTSUPP;
+
+	if (delete)
+		return mvpp22_port_rss_ctx_delete(port, *rss_context);
+
+	if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
+		ret = mvpp22_port_rss_ctx_create(port, rss_context);
+		if (ret)
+			return ret;
+	}
+
+	return mvpp22_port_rss_ctx_indir_set(port, *rss_context, indir);
+}
 /* Device ops */
 
 static const struct net_device_ops mvpp2_netdev_ops = {
@@ -3953,7 +4393,8 @@
 	.get_rxfh_indir_size	= mvpp2_ethtool_get_rxfh_indir_size,
 	.get_rxfh		= mvpp2_ethtool_get_rxfh,
 	.set_rxfh		= mvpp2_ethtool_set_rxfh,
-
+	.get_rxfh_context	= mvpp2_ethtool_get_rxfh_context,
+	.set_rxfh_context	= mvpp2_ethtool_set_rxfh_context,
 };
 
 /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
@@ -3984,12 +4425,18 @@
 static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
 					  struct device_node *port_node)
 {
+	struct mvpp2 *priv = port->priv;
 	struct mvpp2_queue_vector *v;
 	int i, ret;
 
-	port->nqvecs = num_possible_cpus();
-	if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
-		port->nqvecs += 1;
+	switch (queue_mode) {
+	case MVPP2_QDIST_SINGLE_MODE:
+		port->nqvecs = priv->nthreads + 1;
+		break;
+	case MVPP2_QDIST_MULTI_MODE:
+		port->nqvecs = priv->nthreads;
+		break;
+	}
 
 	for (i = 0; i < port->nqvecs; i++) {
 		char irqname[16];
@@ -4001,17 +4448,22 @@
 		v->sw_thread_id = i;
 		v->sw_thread_mask = BIT(i);
 
-		snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
+		if (port->flags & MVPP2_F_DT_COMPAT)
+			snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
+		else
+			snprintf(irqname, sizeof(irqname), "hif%d", i);
 
 		if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
-			v->first_rxq = i * MVPP2_DEFAULT_RXQ;
-			v->nrxqs = MVPP2_DEFAULT_RXQ;
+			v->first_rxq = i;
+			v->nrxqs = 1;
 		} else if (queue_mode == MVPP2_QDIST_SINGLE_MODE &&
 			   i == (port->nqvecs - 1)) {
 			v->first_rxq = 0;
 			v->nrxqs = port->nrxqs;
 			v->type = MVPP2_QUEUE_VECTOR_SHARED;
-			strncpy(irqname, "rx-shared", sizeof(irqname));
+
+			if (port->flags & MVPP2_F_DT_COMPAT)
+				strncpy(irqname, "rx-shared", sizeof(irqname));
 		}
 
 		if (port_node)
@@ -4088,15 +4540,15 @@
 	struct device *dev = port->dev->dev.parent;
 	struct mvpp2 *priv = port->priv;
 	struct mvpp2_txq_pcpu *txq_pcpu;
-	int queue, cpu, err;
+	unsigned int thread;
+	int queue, err;
 
 	/* Checks for hardware constraints */
 	if (port->first_rxq + port->nrxqs >
 	    MVPP2_MAX_PORTS * priv->max_port_rxqs)
 		return -EINVAL;
 
-	if (port->nrxqs % MVPP2_DEFAULT_RXQ ||
-	    port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ)
+	if (port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ)
 		return -EINVAL;
 
 	/* Disable port */
@@ -4132,9 +4584,9 @@
 		txq->id = queue_phy_id;
 		txq->log_id = queue;
 		txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
-		for_each_present_cpu(cpu) {
-			txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
-			txq_pcpu->cpu = cpu;
+		for (thread = 0; thread < priv->nthreads; thread++) {
+			txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
+			txq_pcpu->thread = thread;
 		}
 
 		port->txqs[queue] = txq;
@@ -4186,7 +4638,7 @@
 	mvpp2_cls_port_config(port);
 
 	if (mvpp22_rss_is_supported())
-		mvpp22_rss_port_init(port);
+		mvpp22_port_rss_init(port);
 
 	/* Provide an initial Rx packet size */
 	port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
@@ -4196,6 +4648,11 @@
 	if (err)
 		goto err_free_percpu;
 
+	/* Clear all port stats */
+	mvpp2_read_stats(port);
+	memset(port->ethtool_stats, 0,
+	       MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs) * sizeof(u64));
+
 	return 0;
 
 err_free_percpu:
@@ -4207,24 +4664,51 @@
 	return err;
 }
 
-/* Checks if the port DT description has the TX interrupts
- * described. On PPv2.1, there are no such interrupts. On PPv2.2,
- * there are available, but we need to keep support for old DTs.
- */
-static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv,
-				   struct device_node *port_node)
+static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node,
+					   unsigned long *flags)
 {
-	char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1",
-			  "tx-cpu2", "tx-cpu3" };
-	int ret, i;
+	char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", "tx-cpu2",
+			  "tx-cpu3" };
+	int i;
+
+	for (i = 0; i < 5; i++)
+		if (of_property_match_string(port_node, "interrupt-names",
+					     irqs[i]) < 0)
+			return false;
+
+	*flags |= MVPP2_F_DT_COMPAT;
+	return true;
+}
+
+/* Checks if the port dt description has the required Tx interrupts:
+ * - PPv2.1: there are no such interrupts.
+ * - PPv2.2:
+ *   - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3]
+ *   - The new ones have: "hifX" with X in [0..8]
+ *
+ * All those variants are supported to keep the backward compatibility.
+ */
+static bool mvpp2_port_has_irqs(struct mvpp2 *priv,
+				struct device_node *port_node,
+				unsigned long *flags)
+{
+	char name[5];
+	int i;
+
+	/* ACPI */
+	if (!port_node)
+		return true;
 
 	if (priv->hw_version == MVPP21)
 		return false;
 
-	for (i = 0; i < 5; i++) {
-		ret = of_property_match_string(port_node, "interrupt-names",
-					       irqs[i]);
-		if (ret < 0)
+	if (mvpp22_port_has_legacy_tx_irqs(port_node, flags))
+		return true;
+
+	for (i = 0; i < MVPP2_MAX_THREADS; i++) {
+		snprintf(name, 5, "hif%d", i);
+		if (of_property_match_string(port_node, "interrupt-names",
+					     name) < 0)
 			return false;
 	}
 
@@ -4258,11 +4742,12 @@
 	eth_hw_addr_random(dev);
 }
 
-static void mvpp2_phylink_validate(struct net_device *dev,
+static void mvpp2_phylink_validate(struct phylink_config *config,
 				   unsigned long *supported,
 				   struct phylink_link_state *state)
 {
-	struct mvpp2_port *port = netdev_priv(dev);
+	struct mvpp2_port *port = container_of(config, struct mvpp2_port,
+					       phylink_config);
 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
 
 	/* Invalid combinations */
@@ -4276,7 +4761,7 @@
 	case PHY_INTERFACE_MODE_RGMII_ID:
 	case PHY_INTERFACE_MODE_RGMII_RXID:
 	case PHY_INTERFACE_MODE_RGMII_TXID:
-		if (port->gop_id == 0)
+		if (port->priv->hw_version == MVPP22 && port->gop_id == 0)
 			goto empty_set;
 		break;
 	default:
@@ -4292,12 +4777,15 @@
 	case PHY_INTERFACE_MODE_10GKR:
 	case PHY_INTERFACE_MODE_XAUI:
 	case PHY_INTERFACE_MODE_NA:
-		phylink_set(mask, 10000baseCR_Full);
-		phylink_set(mask, 10000baseSR_Full);
-		phylink_set(mask, 10000baseLR_Full);
-		phylink_set(mask, 10000baseLRM_Full);
-		phylink_set(mask, 10000baseER_Full);
-		phylink_set(mask, 10000baseKR_Full);
+		if (port->gop_id == 0) {
+			phylink_set(mask, 10000baseT_Full);
+			phylink_set(mask, 10000baseCR_Full);
+			phylink_set(mask, 10000baseSR_Full);
+			phylink_set(mask, 10000baseLR_Full);
+			phylink_set(mask, 10000baseLRM_Full);
+			phylink_set(mask, 10000baseER_Full);
+			phylink_set(mask, 10000baseKR_Full);
+		}
 		/* Fall-through */
 	case PHY_INTERFACE_MODE_RGMII:
 	case PHY_INTERFACE_MODE_RGMII_ID:
@@ -4308,12 +4796,12 @@
 		phylink_set(mask, 10baseT_Full);
 		phylink_set(mask, 100baseT_Half);
 		phylink_set(mask, 100baseT_Full);
-		phylink_set(mask, 10000baseT_Full);
 		/* Fall-through */
 	case PHY_INTERFACE_MODE_1000BASEX:
 	case PHY_INTERFACE_MODE_2500BASEX:
 		phylink_set(mask, 1000baseT_Full);
 		phylink_set(mask, 1000baseX_Full);
+		phylink_set(mask, 2500baseT_Full);
 		phylink_set(mask, 2500baseX_Full);
 		break;
 	default:
@@ -4383,10 +4871,11 @@
 		state->pause |= MLO_PAUSE_TX;
 }
 
-static int mvpp2_phylink_mac_link_state(struct net_device *dev,
+static int mvpp2_phylink_mac_link_state(struct phylink_config *config,
 					struct phylink_link_state *state)
 {
-	struct mvpp2_port *port = netdev_priv(dev);
+	struct mvpp2_port *port = container_of(config, struct mvpp2_port,
+					       phylink_config);
 
 	if (port->priv->hw_version == MVPP22 && port->gop_id == 0) {
 		u32 mode = readl(port->base + MVPP22_XLG_CTRL3_REG);
@@ -4402,134 +4891,203 @@
 	return 1;
 }
 
-static void mvpp2_mac_an_restart(struct net_device *dev)
+static void mvpp2_mac_an_restart(struct phylink_config *config)
 {
-	struct mvpp2_port *port = netdev_priv(dev);
-	u32 val;
+	struct mvpp2_port *port = container_of(config, struct mvpp2_port,
+					       phylink_config);
+	u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
 
-	if (port->phy_interface != PHY_INTERFACE_MODE_SGMII)
-		return;
-
-	val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
-	/* The RESTART_AN bit is cleared by the h/w after restarting the AN
-	 * process.
-	 */
-	val |= MVPP2_GMAC_IN_BAND_RESTART_AN | MVPP2_GMAC_IN_BAND_AUTONEG;
-	writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+	writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN,
+	       port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+	writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN,
+	       port->base + MVPP2_GMAC_AUTONEG_CONFIG);
 }
 
 static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
 			     const struct phylink_link_state *state)
 {
-	u32 ctrl0, ctrl4;
+	u32 old_ctrl0, ctrl0;
+	u32 old_ctrl4, ctrl4;
 
-	ctrl0 = readl(port->base + MVPP22_XLG_CTRL0_REG);
-	ctrl4 = readl(port->base + MVPP22_XLG_CTRL4_REG);
+	old_ctrl0 = ctrl0 = readl(port->base + MVPP22_XLG_CTRL0_REG);
+	old_ctrl4 = ctrl4 = readl(port->base + MVPP22_XLG_CTRL4_REG);
+
+	ctrl0 |= MVPP22_XLG_CTRL0_MAC_RESET_DIS;
 
 	if (state->pause & MLO_PAUSE_TX)
 		ctrl0 |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN;
+	else
+		ctrl0 &= ~MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN;
+
 	if (state->pause & MLO_PAUSE_RX)
 		ctrl0 |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
+	else
+		ctrl0 &= ~MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
 
-	ctrl4 &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC;
-	ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC |
-		 MVPP22_XLG_CTRL4_EN_IDLE_CHECK;
+	ctrl4 &= ~(MVPP22_XLG_CTRL4_MACMODSELECT_GMAC |
+		   MVPP22_XLG_CTRL4_EN_IDLE_CHECK);
+	ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC;
 
-	writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG);
-	writel(ctrl4, port->base + MVPP22_XLG_CTRL4_REG);
+	if (old_ctrl0 != ctrl0)
+		writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG);
+	if (old_ctrl4 != ctrl4)
+		writel(ctrl4, port->base + MVPP22_XLG_CTRL4_REG);
+
+	if (!(old_ctrl0 & MVPP22_XLG_CTRL0_MAC_RESET_DIS)) {
+		while (!(readl(port->base + MVPP22_XLG_CTRL0_REG) &
+			 MVPP22_XLG_CTRL0_MAC_RESET_DIS))
+			continue;
+	}
 }
 
 static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
 			      const struct phylink_link_state *state)
 {
-	u32 an, ctrl0, ctrl2, ctrl4;
+	u32 old_an, an;
+	u32 old_ctrl0, ctrl0;
+	u32 old_ctrl2, ctrl2;
+	u32 old_ctrl4, ctrl4;
 
-	an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
-	ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
-	ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
-	ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
-
-	/* Force link down */
-	an &= ~MVPP2_GMAC_FORCE_LINK_PASS;
-	an |= MVPP2_GMAC_FORCE_LINK_DOWN;
-	writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
-
-	/* Set the GMAC in a reset state */
-	ctrl2 |= MVPP2_GMAC_PORT_RESET_MASK;
-	writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
+	old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+	old_ctrl0 = ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
+	old_ctrl2 = ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
+	old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
 
 	an &= ~(MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED |
 		MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FC_ADV_EN |
 		MVPP2_GMAC_FC_ADV_ASM_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
 		MVPP2_GMAC_CONFIG_FULL_DUPLEX | MVPP2_GMAC_AN_DUPLEX_EN |
-		MVPP2_GMAC_FORCE_LINK_DOWN);
+		MVPP2_GMAC_IN_BAND_AUTONEG | MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS);
 	ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
-	ctrl2 &= ~(MVPP2_GMAC_PORT_RESET_MASK | MVPP2_GMAC_PCS_ENABLE_MASK);
+	ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PORT_RESET_MASK |
+		   MVPP2_GMAC_PCS_ENABLE_MASK);
+	ctrl4 &= ~(MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN);
 
-	if (state->interface == PHY_INTERFACE_MODE_1000BASEX ||
-	    state->interface == PHY_INTERFACE_MODE_2500BASEX) {
-		/* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can
-		 * they negotiate duplex: they are always operating with a fixed
-		 * speed of 1000/2500Mbps in full duplex, so force 1000/2500
-		 * speed and full duplex here.
-		 */
-		ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
-		an |= MVPP2_GMAC_CONFIG_GMII_SPEED |
-		      MVPP2_GMAC_CONFIG_FULL_DUPLEX;
-	} else if (!phy_interface_mode_is_rgmii(state->interface)) {
-		an |= MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG;
-	}
-
-	if (state->duplex)
-		an |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
-	if (phylink_test(state->advertising, Pause))
-		an |= MVPP2_GMAC_FC_ADV_EN;
-	if (phylink_test(state->advertising, Asym_Pause))
-		an |= MVPP2_GMAC_FC_ADV_ASM_EN;
-
-	if (state->interface == PHY_INTERFACE_MODE_SGMII ||
-	    state->interface == PHY_INTERFACE_MODE_1000BASEX ||
-	    state->interface == PHY_INTERFACE_MODE_2500BASEX) {
-		an |= MVPP2_GMAC_IN_BAND_AUTONEG;
-		ctrl2 |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK;
-
-		ctrl4 &= ~(MVPP22_CTRL4_EXT_PIN_GMII_SEL |
-			   MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN);
+	/* Configure port type */
+	if (phy_interface_mode_is_8023z(state->interface)) {
+		ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK;
+		ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
 		ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
 			 MVPP22_CTRL4_DP_CLK_SEL |
 			 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
-
-		if (state->pause & MLO_PAUSE_TX)
-			ctrl4 |= MVPP22_CTRL4_TX_FC_EN;
-		if (state->pause & MLO_PAUSE_RX)
-			ctrl4 |= MVPP22_CTRL4_RX_FC_EN;
+	} else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
+		ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_INBAND_AN_MASK;
+		ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
+		ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
+			 MVPP22_CTRL4_DP_CLK_SEL |
+			 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
 	} else if (phy_interface_mode_is_rgmii(state->interface)) {
-		an |= MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS;
-
-		if (state->speed == SPEED_1000)
-			an |= MVPP2_GMAC_CONFIG_GMII_SPEED;
-		else if (state->speed == SPEED_100)
-			an |= MVPP2_GMAC_CONFIG_MII_SPEED;
-
 		ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL;
 		ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
 			 MVPP22_CTRL4_SYNC_BYPASS_DIS |
 			 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
 	}
 
-	writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG);
-	writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
-	writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG);
-	writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+	/* Configure advertisement bits */
+	if (phylink_test(state->advertising, Pause))
+		an |= MVPP2_GMAC_FC_ADV_EN;
+	if (phylink_test(state->advertising, Asym_Pause))
+		an |= MVPP2_GMAC_FC_ADV_ASM_EN;
+
+	/* Configure negotiation style */
+	if (!phylink_autoneg_inband(mode)) {
+		/* Phy or fixed speed - no in-band AN */
+		if (state->duplex)
+			an |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
+
+		if (state->speed == SPEED_1000 || state->speed == SPEED_2500)
+			an |= MVPP2_GMAC_CONFIG_GMII_SPEED;
+		else if (state->speed == SPEED_100)
+			an |= MVPP2_GMAC_CONFIG_MII_SPEED;
+
+		if (state->pause & MLO_PAUSE_TX)
+			ctrl4 |= MVPP22_CTRL4_TX_FC_EN;
+		if (state->pause & MLO_PAUSE_RX)
+			ctrl4 |= MVPP22_CTRL4_RX_FC_EN;
+	} else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
+		/* SGMII in-band mode receives the speed and duplex from
+		 * the PHY. Flow control information is not received. */
+		an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN | MVPP2_GMAC_FORCE_LINK_PASS);
+		an |= MVPP2_GMAC_IN_BAND_AUTONEG |
+		      MVPP2_GMAC_AN_SPEED_EN |
+		      MVPP2_GMAC_AN_DUPLEX_EN;
+
+		if (state->pause & MLO_PAUSE_TX)
+			ctrl4 |= MVPP22_CTRL4_TX_FC_EN;
+		if (state->pause & MLO_PAUSE_RX)
+			ctrl4 |= MVPP22_CTRL4_RX_FC_EN;
+	} else if (phy_interface_mode_is_8023z(state->interface)) {
+		/* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can
+		 * they negotiate duplex: they are always operating with a fixed
+		 * speed of 1000/2500Mbps in full duplex, so force 1000/2500
+		 * speed and full duplex here.
+		 */
+		ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
+		an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN | MVPP2_GMAC_FORCE_LINK_PASS);
+		an |= MVPP2_GMAC_IN_BAND_AUTONEG |
+		      MVPP2_GMAC_CONFIG_GMII_SPEED |
+		      MVPP2_GMAC_CONFIG_FULL_DUPLEX;
+
+		if (state->pause & MLO_PAUSE_AN && state->an_enabled) {
+			an |= MVPP2_GMAC_FLOW_CTRL_AUTONEG;
+		} else {
+			if (state->pause & MLO_PAUSE_TX)
+				ctrl4 |= MVPP22_CTRL4_TX_FC_EN;
+			if (state->pause & MLO_PAUSE_RX)
+				ctrl4 |= MVPP22_CTRL4_RX_FC_EN;
+		}
+	}
+
+/* Some fields of the auto-negotiation register require the port to be down when
+ * their value is updated.
+ */
+#define MVPP2_GMAC_AN_PORT_DOWN_MASK	\
+		(MVPP2_GMAC_IN_BAND_AUTONEG | \
+		 MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS | \
+		 MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED | \
+		 MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_CONFIG_FULL_DUPLEX | \
+		 MVPP2_GMAC_AN_DUPLEX_EN)
+
+	if ((old_ctrl0 ^ ctrl0) & MVPP2_GMAC_PORT_TYPE_MASK ||
+	    (old_ctrl2 ^ ctrl2) & MVPP2_GMAC_INBAND_AN_MASK ||
+	    (old_an ^ an) & MVPP2_GMAC_AN_PORT_DOWN_MASK) {
+		/* Force link down */
+		old_an &= ~MVPP2_GMAC_FORCE_LINK_PASS;
+		old_an |= MVPP2_GMAC_FORCE_LINK_DOWN;
+		writel(old_an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+
+		/* Set the GMAC in a reset state - do this in a way that
+		 * ensures we clear it below.
+		 */
+		old_ctrl2 |= MVPP2_GMAC_PORT_RESET_MASK;
+		writel(old_ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
+	}
+
+	if (old_ctrl0 != ctrl0)
+		writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG);
+	if (old_ctrl2 != ctrl2)
+		writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
+	if (old_ctrl4 != ctrl4)
+		writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG);
+	if (old_an != an)
+		writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+
+	if (old_ctrl2 & MVPP2_GMAC_PORT_RESET_MASK) {
+		while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
+		       MVPP2_GMAC_PORT_RESET_MASK)
+			continue;
+	}
 }
 
-static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
+static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
 			     const struct phylink_link_state *state)
 {
+	struct net_device *dev = to_net_dev(config->dev);
 	struct mvpp2_port *port = netdev_priv(dev);
+	bool change_interface = port->phy_interface != state->interface;
 
 	/* Check for invalid configuration */
-	if (state->interface == PHY_INTERFACE_MODE_10GKR && port->gop_id != 0) {
+	if (mvpp2_is_xlg(state->interface) && port->gop_id != 0) {
 		netdev_err(dev, "Invalid mode on %s\n", dev->name);
 		return;
 	}
@@ -4537,8 +5095,9 @@
 	/* Make sure the port is disabled when reconfiguring the mode */
 	mvpp2_port_disable(port);
 
-	if (port->priv->hw_version == MVPP22 &&
-	    port->phy_interface != state->interface) {
+	if (port->priv->hw_version == MVPP22 && change_interface) {
+		mvpp22_gop_mask_irq(port);
+
 		port->phy_interface = state->interface;
 
 		/* Reconfigure the serdes lanes */
@@ -4547,33 +5106,41 @@
 	}
 
 	/* mac (re)configuration */
-	if (state->interface == PHY_INTERFACE_MODE_10GKR)
+	if (mvpp2_is_xlg(state->interface))
 		mvpp2_xlg_config(port, mode, state);
 	else if (phy_interface_mode_is_rgmii(state->interface) ||
-		 state->interface == PHY_INTERFACE_MODE_SGMII ||
-		 state->interface == PHY_INTERFACE_MODE_1000BASEX ||
-		 state->interface == PHY_INTERFACE_MODE_2500BASEX)
+		 phy_interface_mode_is_8023z(state->interface) ||
+		 state->interface == PHY_INTERFACE_MODE_SGMII)
 		mvpp2_gmac_config(port, mode, state);
 
 	if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
 		mvpp2_port_loopback_set(port, state);
 
+	if (port->priv->hw_version == MVPP22 && change_interface)
+		mvpp22_gop_unmask_irq(port);
+
 	mvpp2_port_enable(port);
 }
 
-static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode,
+static void mvpp2_mac_link_up(struct phylink_config *config, unsigned int mode,
 			      phy_interface_t interface, struct phy_device *phy)
 {
+	struct net_device *dev = to_net_dev(config->dev);
 	struct mvpp2_port *port = netdev_priv(dev);
 	u32 val;
 
-	if (!phylink_autoneg_inband(mode) &&
-	    interface != PHY_INTERFACE_MODE_10GKR) {
-		val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
-		val &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
-		if (phy_interface_mode_is_rgmii(interface))
+	if (!phylink_autoneg_inband(mode)) {
+		if (mvpp2_is_xlg(interface)) {
+			val = readl(port->base + MVPP22_XLG_CTRL0_REG);
+			val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
+			val |= MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
+			writel(val, port->base + MVPP22_XLG_CTRL0_REG);
+		} else {
+			val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+			val &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
 			val |= MVPP2_GMAC_FORCE_LINK_PASS;
-		writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+			writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+		}
 	}
 
 	mvpp2_port_enable(port);
@@ -4583,31 +5150,31 @@
 	netif_tx_wake_all_queues(dev);
 }
 
-static void mvpp2_mac_link_down(struct net_device *dev, unsigned int mode,
-				phy_interface_t interface)
+static void mvpp2_mac_link_down(struct phylink_config *config,
+				unsigned int mode, phy_interface_t interface)
 {
+	struct net_device *dev = to_net_dev(config->dev);
 	struct mvpp2_port *port = netdev_priv(dev);
 	u32 val;
 
-	if (!phylink_autoneg_inband(mode) &&
-	    interface != PHY_INTERFACE_MODE_10GKR) {
-		val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
-		val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
-		val |= MVPP2_GMAC_FORCE_LINK_DOWN;
-		writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+	if (!phylink_autoneg_inband(mode)) {
+		if (mvpp2_is_xlg(interface)) {
+			val = readl(port->base + MVPP22_XLG_CTRL0_REG);
+			val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
+			val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
+			writel(val, port->base + MVPP22_XLG_CTRL0_REG);
+		} else {
+			val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+			val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
+			val |= MVPP2_GMAC_FORCE_LINK_DOWN;
+			writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+		}
 	}
 
 	netif_tx_stop_all_queues(dev);
 	mvpp2_egress_disable(port);
 	mvpp2_ingress_disable(port);
 
-	/* When using link interrupts to notify phylink of a MAC state change,
-	 * we do not want the port to be disabled (we want to receive further
-	 * interrupts, to be notified when the port will have a link later).
-	 */
-	if (!port->has_phy)
-		return;
-
 	mvpp2_port_disable(port);
 }
 
@@ -4629,32 +5196,26 @@
 	struct mvpp2_port *port;
 	struct mvpp2_port_pcpu *port_pcpu;
 	struct device_node *port_node = to_of_node(port_fwnode);
+	netdev_features_t features;
 	struct net_device *dev;
-	struct resource *res;
 	struct phylink *phylink;
 	char *mac_from = "";
-	unsigned int ntxqs, nrxqs;
+	unsigned int ntxqs, nrxqs, thread;
+	unsigned long flags = 0;
 	bool has_tx_irqs;
 	u32 id;
-	int features;
 	int phy_mode;
-	int err, i, cpu;
+	int err, i;
 
-	if (port_node) {
-		has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node);
-	} else {
-		has_tx_irqs = true;
-		queue_mode = MVPP2_QDIST_MULTI_MODE;
+	has_tx_irqs = mvpp2_port_has_irqs(priv, port_node, &flags);
+	if (!has_tx_irqs && queue_mode == MVPP2_QDIST_MULTI_MODE) {
+		dev_err(&pdev->dev,
+			"not enough IRQs to support multi queue mode\n");
+		return -EINVAL;
 	}
 
-	if (!has_tx_irqs)
-		queue_mode = MVPP2_QDIST_SINGLE_MODE;
-
 	ntxqs = MVPP2_MAX_TXQ;
-	if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE)
-		nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus();
-	else
-		nrxqs = MVPP2_DEFAULT_RXQ;
+	nrxqs = mvpp2_get_nrxqs(priv);
 
 	dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
 	if (!dev)
@@ -4697,6 +5258,7 @@
 	port->nrxqs = nrxqs;
 	port->priv = priv;
 	port->has_tx_irqs = has_tx_irqs;
+	port->flags = flags;
 
 	err = mvpp2_queue_vectors_init(port, port_node);
 	if (err)
@@ -4728,8 +5290,7 @@
 	port->comphy = comphy;
 
 	if (priv->hw_version == MVPP21) {
-		res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id);
-		port->base = devm_ioremap_resource(&pdev->dev, res);
+		port->base = devm_platform_ioremap_resource(pdev, 2 + id);
 		if (IS_ERR(port->base)) {
 			err = PTR_ERR(port->base);
 			goto err_free_irq;
@@ -4760,7 +5321,7 @@
 	}
 
 	port->ethtool_stats = devm_kcalloc(&pdev->dev,
-					   ARRAY_SIZE(mvpp2_ethtool_regs),
+					   MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs),
 					   sizeof(u64), GFP_KERNEL);
 	if (!port->ethtool_stats) {
 		err = -ENOMEM;
@@ -4784,7 +5345,8 @@
 
 	mvpp2_port_periodic_xon_disable(port);
 
-	mvpp2_port_reset(port);
+	mvpp2_mac_reset_assert(port);
+	mvpp22_pcs_reset_assert(port);
 
 	port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
 	if (!port->pcpu) {
@@ -4793,17 +5355,14 @@
 	}
 
 	if (!port->has_tx_irqs) {
-		for_each_present_cpu(cpu) {
-			port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+		for (thread = 0; thread < priv->nthreads; thread++) {
+			port_pcpu = per_cpu_ptr(port->pcpu, thread);
 
 			hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
-				     HRTIMER_MODE_REL_PINNED);
+				     HRTIMER_MODE_REL_PINNED_SOFT);
 			port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
 			port_pcpu->timer_scheduled = false;
-
-			tasklet_init(&port_pcpu->tx_done_tasklet,
-				     mvpp2_tx_proc_cb,
-				     (unsigned long)dev);
+			port_pcpu->dev = dev;
 		}
 	}
 
@@ -4813,14 +5372,14 @@
 	dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
 			    NETIF_F_HW_VLAN_CTAG_FILTER;
 
-	if (mvpp22_rss_is_supported())
+	if (mvpp22_rss_is_supported()) {
 		dev->hw_features |= NETIF_F_RXHASH;
-
-	if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) {
-		dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
-		dev->hw_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+		dev->features |= NETIF_F_NTUPLE;
 	}
 
+	if (!port->priv->percpu_pools)
+		mvpp2_set_hw_csum(port, port->pool_long->id);
+
 	dev->vlan_features |= features;
 	dev->gso_max_segs = MVPP2_MAX_TSO_SEGS;
 	dev->priv_flags |= IFF_UNICAST_FLT;
@@ -4833,8 +5392,11 @@
 
 	/* Phylink isn't used w/ ACPI as of now */
 	if (port_node) {
-		phylink = phylink_create(dev, port_fwnode, phy_mode,
-					 &mvpp2_phylink_ops);
+		port->phylink_config.dev = &dev->dev;
+		port->phylink_config.type = PHYLINK_NETDEV;
+
+		phylink = phylink_create(&port->phylink_config, port_fwnode,
+					 phy_mode, &mvpp2_phylink_ops);
 		if (IS_ERR(phylink)) {
 			err = PTR_ERR(phylink);
 			goto err_free_port_pcpu;
@@ -5078,13 +5640,13 @@
 	}
 
 	/* Allocate and initialize aggregated TXQs */
-	priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
+	priv->aggr_txqs = devm_kcalloc(&pdev->dev, MVPP2_MAX_THREADS,
 				       sizeof(*priv->aggr_txqs),
 				       GFP_KERNEL);
 	if (!priv->aggr_txqs)
 		return -ENOMEM;
 
-	for_each_present_cpu(i) {
+	for (i = 0; i < MVPP2_MAX_THREADS; i++) {
 		priv->aggr_txqs[i].id = i;
 		priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
 		err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv);
@@ -5108,7 +5670,7 @@
 	mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
 
 	/* Buffer Manager initialization */
-	err = mvpp2_bm_init(pdev, priv);
+	err = mvpp2_bm_init(&pdev->dev, priv);
 	if (err < 0)
 		return err;
 
@@ -5131,7 +5693,7 @@
 	struct mvpp2 *priv;
 	struct resource *res;
 	void __iomem *base;
-	int i;
+	int i, shared;
 	int err;
 
 	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
@@ -5141,6 +5703,8 @@
 	if (has_acpi_companion(&pdev->dev)) {
 		acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
 					    &pdev->dev);
+		if (!acpi_id)
+			return -EINVAL;
 		priv->hw_version = (unsigned long)acpi_id->driver_data;
 	} else {
 		priv->hw_version =
@@ -5153,14 +5717,12 @@
 	if (priv->hw_version == MVPP21)
 		queue_mode = MVPP2_QDIST_SINGLE_MODE;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	base = devm_ioremap_resource(&pdev->dev, res);
+	base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(base))
 		return PTR_ERR(base);
 
 	if (priv->hw_version == MVPP21) {
-		res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-		priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
+		priv->lms_base = devm_platform_ioremap_resource(pdev, 1);
 		if (IS_ERR(priv->lms_base))
 			return PTR_ERR(priv->lms_base);
 	} else {
@@ -5194,8 +5756,21 @@
 			priv->sysctrl_base = NULL;
 	}
 
+	if (priv->hw_version == MVPP22 &&
+	    mvpp2_get_nrxqs(priv) * 2 <= MVPP2_BM_MAX_POOLS)
+		priv->percpu_pools = 1;
+
 	mvpp2_setup_bm_pool();
 
+
+	priv->nthreads = min_t(unsigned int, num_present_cpus(),
+			       MVPP2_MAX_THREADS);
+
+	shared = num_present_cpus() - priv->nthreads;
+	if (shared > 0)
+		bitmap_fill(&priv->lock_map,
+			    min_t(int, shared, MVPP2_MAX_THREADS));
+
 	for (i = 0; i < MVPP2_MAX_THREADS; i++) {
 		u32 addr_space_sz;
 
@@ -5353,9 +5928,6 @@
 
 	mvpp2_dbgfs_cleanup(priv);
 
-	flush_workqueue(priv->stats_queue);
-	destroy_workqueue(priv->stats_queue);
-
 	fwnode_for_each_available_child_node(fwnode, port_fwnode) {
 		if (priv->port_list[i]) {
 			mutex_destroy(&priv->port_list[i]->gather_stats_lock);
@@ -5364,13 +5936,15 @@
 		i++;
 	}
 
+	destroy_workqueue(priv->stats_queue);
+
 	for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
 		struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
 
-		mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
+		mvpp2_bm_pool_destroy(&pdev->dev, priv, bm_pool);
 	}
 
-	for_each_present_cpu(i) {
+	for (i = 0; i < MVPP2_MAX_THREADS; i++) {
 		struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
 
 		dma_free_coherent(&pdev->dev,
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
index 392fd89..5692c60 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
@@ -312,7 +312,8 @@
 	}
 
 	/* Set value */
-	pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] = shift & MVPP2_PRS_SRAM_SHIFT_MASK;
+	pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] |=
+		shift & MVPP2_PRS_SRAM_SHIFT_MASK;
 
 	/* Reset and set operation */
 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
@@ -1905,8 +1906,7 @@
 }
 
 /* Find tcam entry with matched pair <vid,port> */
-static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid,
-				    u16 mask)
+static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask)
 {
 	unsigned char byte[2], enable[2];
 	struct mvpp2_prs_entry pe;
@@ -1914,13 +1914,13 @@
 	int tid;
 
 	/* Go through the all entries with MVPP2_PRS_LU_VID */
-	for (tid = MVPP2_PE_VID_FILT_RANGE_START;
-	     tid <= MVPP2_PE_VID_FILT_RANGE_END; tid++) {
-		if (!priv->prs_shadow[tid].valid ||
-		    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
+	for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
+	     tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
+		if (!port->priv->prs_shadow[tid].valid ||
+		    port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
 			continue;
 
-		mvpp2_prs_init_from_hw(priv, &pe, tid);
+		mvpp2_prs_init_from_hw(port->priv, &pe, tid);
 
 		mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
 		mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
@@ -1950,7 +1950,7 @@
 	memset(&pe, 0, sizeof(pe));
 
 	/* Scan TCAM and see if entry with this <vid,port> already exist */
-	tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, mask);
+	tid = mvpp2_prs_vid_range_find(port, vid, mask);
 
 	reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
 	if (reg_val & MVPP2_DSA_EXTENDED)
@@ -2008,7 +2008,7 @@
 	int tid;
 
 	/* Scan TCAM and see if entry with this <vid,port> already exist */
-	tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, 0xfff);
+	tid = mvpp2_prs_vid_range_find(port, vid, 0xfff);
 
 	/* No such entry */
 	if (tid < 0)
@@ -2026,8 +2026,10 @@
 
 	for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
 	     tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
-		if (priv->prs_shadow[tid].valid)
-			mvpp2_prs_vid_entry_remove(port, tid);
+		if (priv->prs_shadow[tid].valid) {
+			mvpp2_prs_hw_inv(priv, tid);
+			priv->prs_shadow[tid].valid = false;
+		}
 	}
 }
 
diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig
new file mode 100644
index 0000000..711ada7
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Marvell OcteonTX2 drivers configuration
+#
+
+config OCTEONTX2_MBOX
+	tristate
+
+config OCTEONTX2_AF
+	tristate "Marvell OcteonTX2 RVU Admin Function driver"
+	select OCTEONTX2_MBOX
+	depends on (64BIT && COMPILE_TEST) || ARM64
+	depends on PCI
+	help
+	  This driver supports Marvell's OcteonTX2 Resource Virtualization
+	  Unit's admin function manager which manages all RVU HW resources
+	  and provides a medium to other PF/VFs to configure HW. Should be
+	  enabled for other RVU device drivers to work.
diff --git a/drivers/net/ethernet/marvell/octeontx2/Makefile b/drivers/net/ethernet/marvell/octeontx2/Makefile
new file mode 100644
index 0000000..e579dcd
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Marvell OcteonTX2 device drivers.
+#
+
+obj-$(CONFIG_OCTEONTX2_AF) += af/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
new file mode 100644
index 0000000..06329ac
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Marvell's OcteonTX2 RVU Admin Function driver
+#
+
+obj-$(CONFIG_OCTEONTX2_MBOX) += octeontx2_mbox.o
+obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o
+
+octeontx2_mbox-y := mbox.o
+octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
+		  rvu_reg.o rvu_npc.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
new file mode 100644
index 0000000..6d55e3d
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -0,0 +1,871 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 CGX driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+
+#include "cgx.h"
+
+#define DRV_NAME	"octeontx2-cgx"
+#define DRV_STRING      "Marvell OcteonTX2 CGX/MAC Driver"
+
+/**
+ * struct lmac
+ * @wq_cmd_cmplt:	waitq to keep the process blocked until cmd completion
+ * @cmd_lock:		Lock to serialize the command interface
+ * @resp:		command response
+ * @link_info:		link related information
+ * @event_cb:		callback for linkchange events
+ * @event_cb_lock:	lock for serializing callback with unregister
+ * @cmd_pend:		flag set before new command is started
+ *			flag cleared after command response is received
+ * @cgx:		parent cgx port
+ * @lmac_id:		lmac port id
+ * @name:		lmac port name
+ */
+struct lmac {
+	wait_queue_head_t wq_cmd_cmplt;
+	struct mutex cmd_lock;
+	u64 resp;
+	struct cgx_link_user_info link_info;
+	struct cgx_event_cb event_cb;
+	spinlock_t event_cb_lock;
+	bool cmd_pend;
+	struct cgx *cgx;
+	u8 lmac_id;
+	char *name;
+};
+
+struct cgx {
+	void __iomem		*reg_base;
+	struct pci_dev		*pdev;
+	u8			cgx_id;
+	u8			lmac_count;
+	struct lmac		*lmac_idmap[MAX_LMAC_PER_CGX];
+	struct			work_struct cgx_cmd_work;
+	struct			workqueue_struct *cgx_cmd_workq;
+	struct list_head	cgx_list;
+};
+
+static LIST_HEAD(cgx_list);
+
+/* Convert firmware speed encoding to user format(Mbps) */
+static u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX];
+
+/* Convert firmware lmac type encoding to string */
+static char *cgx_lmactype_string[LMAC_MODE_MAX];
+
+/* CGX PHY management internal APIs */
+static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
+
+/* Supported devices */
+static const struct pci_device_id cgx_id_table[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
+	{ 0, }  /* end of table */
+};
+
+MODULE_DEVICE_TABLE(pci, cgx_id_table);
+
+static void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
+{
+	writeq(val, cgx->reg_base + (lmac << 18) + offset);
+}
+
+static u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
+{
+	return readq(cgx->reg_base + (lmac << 18) + offset);
+}
+
+static inline struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
+{
+	if (!cgx || lmac_id >= MAX_LMAC_PER_CGX)
+		return NULL;
+
+	return cgx->lmac_idmap[lmac_id];
+}
+
+int cgx_get_cgxcnt_max(void)
+{
+	struct cgx *cgx_dev;
+	int idmax = -ENODEV;
+
+	list_for_each_entry(cgx_dev, &cgx_list, cgx_list)
+		if (cgx_dev->cgx_id > idmax)
+			idmax = cgx_dev->cgx_id;
+
+	if (idmax < 0)
+		return 0;
+
+	return idmax + 1;
+}
+EXPORT_SYMBOL(cgx_get_cgxcnt_max);
+
+int cgx_get_lmac_cnt(void *cgxd)
+{
+	struct cgx *cgx = cgxd;
+
+	if (!cgx)
+		return -ENODEV;
+
+	return cgx->lmac_count;
+}
+EXPORT_SYMBOL(cgx_get_lmac_cnt);
+
+void *cgx_get_pdata(int cgx_id)
+{
+	struct cgx *cgx_dev;
+
+	list_for_each_entry(cgx_dev, &cgx_list, cgx_list) {
+		if (cgx_dev->cgx_id == cgx_id)
+			return cgx_dev;
+	}
+	return NULL;
+}
+EXPORT_SYMBOL(cgx_get_pdata);
+
+/* Ensure the required lock for event queue(where asynchronous events are
+ * posted) is acquired before calling this API. Else an asynchronous event(with
+ * latest link status) can reach the destination before this function returns
+ * and could make the link status appear wrong.
+ */
+int cgx_get_link_info(void *cgxd, int lmac_id,
+		      struct cgx_link_user_info *linfo)
+{
+	struct lmac *lmac = lmac_pdata(lmac_id, cgxd);
+
+	if (!lmac)
+		return -ENODEV;
+
+	*linfo = lmac->link_info;
+	return 0;
+}
+EXPORT_SYMBOL(cgx_get_link_info);
+
+static u64 mac2u64 (u8 *mac_addr)
+{
+	u64 mac = 0;
+	int index;
+
+	for (index = ETH_ALEN - 1; index >= 0; index--)
+		mac |= ((u64)*mac_addr++) << (8 * index);
+	return mac;
+}
+
+int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
+{
+	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+	u64 cfg;
+
+	/* copy 6bytes from macaddr */
+	/* memcpy(&cfg, mac_addr, 6); */
+
+	cfg = mac2u64 (mac_addr);
+
+	cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (lmac_id * 0x8)),
+		  cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49));
+
+	cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+	cfg |= CGX_DMAC_CTL0_CAM_ENABLE;
+	cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+
+	return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_addr_set);
+
+u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
+{
+	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+	u64 cfg;
+
+	cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8);
+	return cfg & CGX_RX_DMAC_ADR_MASK;
+}
+EXPORT_SYMBOL(cgx_lmac_addr_get);
+
+int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind)
+{
+	struct cgx *cgx = cgxd;
+
+	if (!cgx || lmac_id >= cgx->lmac_count)
+		return -ENODEV;
+
+	cgx_write(cgx, lmac_id, CGXX_CMRX_RX_ID_MAP, (pkind & 0x3F));
+	return 0;
+}
+EXPORT_SYMBOL(cgx_set_pkind);
+
+static inline u8 cgx_get_lmac_type(struct cgx *cgx, int lmac_id)
+{
+	u64 cfg;
+
+	cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
+	return (cfg >> CGX_LMAC_TYPE_SHIFT) & CGX_LMAC_TYPE_MASK;
+}
+
+/* Configure CGX LMAC in internal loopback mode */
+int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
+{
+	struct cgx *cgx = cgxd;
+	u8 lmac_type;
+	u64 cfg;
+
+	if (!cgx || lmac_id >= cgx->lmac_count)
+		return -ENODEV;
+
+	lmac_type = cgx_get_lmac_type(cgx, lmac_id);
+	if (lmac_type == LMAC_MODE_SGMII || lmac_type == LMAC_MODE_QSGMII) {
+		cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL);
+		if (enable)
+			cfg |= CGXX_GMP_PCS_MRX_CTL_LBK;
+		else
+			cfg &= ~CGXX_GMP_PCS_MRX_CTL_LBK;
+		cgx_write(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL, cfg);
+	} else {
+		cfg = cgx_read(cgx, lmac_id, CGXX_SPUX_CONTROL1);
+		if (enable)
+			cfg |= CGXX_SPUX_CONTROL1_LBK;
+		else
+			cfg &= ~CGXX_SPUX_CONTROL1_LBK;
+		cgx_write(cgx, lmac_id, CGXX_SPUX_CONTROL1, cfg);
+	}
+	return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_internal_loopback);
+
+void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
+{
+	struct cgx *cgx = cgx_get_pdata(cgx_id);
+	u64 cfg = 0;
+
+	if (!cgx)
+		return;
+
+	if (enable) {
+		/* Enable promiscuous mode on LMAC */
+		cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+		cfg &= ~(CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE);
+		cfg |= CGX_DMAC_BCAST_MODE;
+		cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+
+		cfg = cgx_read(cgx, 0,
+			       (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
+		cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
+		cgx_write(cgx, 0,
+			  (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
+	} else {
+		/* Disable promiscuous mode */
+		cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+		cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE;
+		cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+		cfg = cgx_read(cgx, 0,
+			       (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
+		cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
+		cgx_write(cgx, 0,
+			  (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
+	}
+}
+EXPORT_SYMBOL(cgx_lmac_promisc_config);
+
+int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
+{
+	struct cgx *cgx = cgxd;
+
+	if (!cgx || lmac_id >= cgx->lmac_count)
+		return -ENODEV;
+	*rx_stat =  cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
+	return 0;
+}
+EXPORT_SYMBOL(cgx_get_rx_stats);
+
+int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat)
+{
+	struct cgx *cgx = cgxd;
+
+	if (!cgx || lmac_id >= cgx->lmac_count)
+		return -ENODEV;
+	*tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8));
+	return 0;
+}
+EXPORT_SYMBOL(cgx_get_tx_stats);
+
+int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
+{
+	struct cgx *cgx = cgxd;
+	u64 cfg;
+
+	if (!cgx || lmac_id >= cgx->lmac_count)
+		return -ENODEV;
+
+	cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
+	if (enable)
+		cfg |= CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN;
+	else
+		cfg &= ~(CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN);
+	cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
+	return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_rx_tx_enable);
+
+/* CGX Firmware interface low level support */
+static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
+{
+	struct cgx *cgx = lmac->cgx;
+	struct device *dev;
+	int err = 0;
+	u64 cmd;
+
+	/* Ensure no other command is in progress */
+	err = mutex_lock_interruptible(&lmac->cmd_lock);
+	if (err)
+		return err;
+
+	/* Ensure command register is free */
+	cmd = cgx_read(cgx, lmac->lmac_id,  CGX_COMMAND_REG);
+	if (FIELD_GET(CMDREG_OWN, cmd) != CGX_CMD_OWN_NS) {
+		err = -EBUSY;
+		goto unlock;
+	}
+
+	/* Update ownership in command request */
+	req = FIELD_SET(CMDREG_OWN, CGX_CMD_OWN_FIRMWARE, req);
+
+	/* Mark this lmac as pending, before we start */
+	lmac->cmd_pend = true;
+
+	/* Start command in hardware */
+	cgx_write(cgx, lmac->lmac_id, CGX_COMMAND_REG, req);
+
+	/* Ensure command is completed without errors */
+	if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend,
+				msecs_to_jiffies(CGX_CMD_TIMEOUT))) {
+		dev = &cgx->pdev->dev;
+		dev_err(dev, "cgx port %d:%d cmd timeout\n",
+			cgx->cgx_id, lmac->lmac_id);
+		err = -EIO;
+		goto unlock;
+	}
+
+	/* we have a valid command response */
+	smp_rmb(); /* Ensure the latest updates are visible */
+	*resp = lmac->resp;
+
+unlock:
+	mutex_unlock(&lmac->cmd_lock);
+
+	return err;
+}
+
+static inline int cgx_fwi_cmd_generic(u64 req, u64 *resp,
+				      struct cgx *cgx, int lmac_id)
+{
+	struct lmac *lmac;
+	int err;
+
+	lmac = lmac_pdata(lmac_id, cgx);
+	if (!lmac)
+		return -ENODEV;
+
+	err = cgx_fwi_cmd_send(req, resp, lmac);
+
+	/* Check for valid response */
+	if (!err) {
+		if (FIELD_GET(EVTREG_STAT, *resp) == CGX_STAT_FAIL)
+			return -EIO;
+		else
+			return 0;
+	}
+
+	return err;
+}
+
+static inline void cgx_link_usertable_init(void)
+{
+	cgx_speed_mbps[CGX_LINK_NONE] = 0;
+	cgx_speed_mbps[CGX_LINK_10M] = 10;
+	cgx_speed_mbps[CGX_LINK_100M] = 100;
+	cgx_speed_mbps[CGX_LINK_1G] = 1000;
+	cgx_speed_mbps[CGX_LINK_2HG] = 2500;
+	cgx_speed_mbps[CGX_LINK_5G] = 5000;
+	cgx_speed_mbps[CGX_LINK_10G] = 10000;
+	cgx_speed_mbps[CGX_LINK_20G] = 20000;
+	cgx_speed_mbps[CGX_LINK_25G] = 25000;
+	cgx_speed_mbps[CGX_LINK_40G] = 40000;
+	cgx_speed_mbps[CGX_LINK_50G] = 50000;
+	cgx_speed_mbps[CGX_LINK_100G] = 100000;
+
+	cgx_lmactype_string[LMAC_MODE_SGMII] = "SGMII";
+	cgx_lmactype_string[LMAC_MODE_XAUI] = "XAUI";
+	cgx_lmactype_string[LMAC_MODE_RXAUI] = "RXAUI";
+	cgx_lmactype_string[LMAC_MODE_10G_R] = "10G_R";
+	cgx_lmactype_string[LMAC_MODE_40G_R] = "40G_R";
+	cgx_lmactype_string[LMAC_MODE_QSGMII] = "QSGMII";
+	cgx_lmactype_string[LMAC_MODE_25G_R] = "25G_R";
+	cgx_lmactype_string[LMAC_MODE_50G_R] = "50G_R";
+	cgx_lmactype_string[LMAC_MODE_100G_R] = "100G_R";
+	cgx_lmactype_string[LMAC_MODE_USXGMII] = "USXGMII";
+}
+
+static inline void link_status_user_format(u64 lstat,
+					   struct cgx_link_user_info *linfo,
+					   struct cgx *cgx, u8 lmac_id)
+{
+	char *lmac_string;
+
+	linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
+	linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
+	linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
+	linfo->lmac_type_id = cgx_get_lmac_type(cgx, lmac_id);
+	lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
+	strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
+}
+
+/* Hardware event handlers */
+static inline void cgx_link_change_handler(u64 lstat,
+					   struct lmac *lmac)
+{
+	struct cgx_link_user_info *linfo;
+	struct cgx *cgx = lmac->cgx;
+	struct cgx_link_event event;
+	struct device *dev;
+	int err_type;
+
+	dev = &cgx->pdev->dev;
+
+	link_status_user_format(lstat, &event.link_uinfo, cgx, lmac->lmac_id);
+	err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat);
+
+	event.cgx_id = cgx->cgx_id;
+	event.lmac_id = lmac->lmac_id;
+
+	/* update the local copy of link status */
+	lmac->link_info = event.link_uinfo;
+	linfo = &lmac->link_info;
+
+	/* Ensure callback doesn't get unregistered until we finish it */
+	spin_lock(&lmac->event_cb_lock);
+
+	if (!lmac->event_cb.notify_link_chg) {
+		dev_dbg(dev, "cgx port %d:%d Link change handler null",
+			cgx->cgx_id, lmac->lmac_id);
+		if (err_type != CGX_ERR_NONE) {
+			dev_err(dev, "cgx port %d:%d Link error %d\n",
+				cgx->cgx_id, lmac->lmac_id, err_type);
+		}
+		dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n",
+			 cgx->cgx_id, lmac->lmac_id,
+			 linfo->link_up ? "UP" : "DOWN", linfo->speed);
+		goto err;
+	}
+
+	if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data))
+		dev_err(dev, "event notification failure\n");
+err:
+	spin_unlock(&lmac->event_cb_lock);
+}
+
+static inline bool cgx_cmdresp_is_linkevent(u64 event)
+{
+	u8 id;
+
+	id = FIELD_GET(EVTREG_ID, event);
+	if (id == CGX_CMD_LINK_BRING_UP ||
+	    id == CGX_CMD_LINK_BRING_DOWN)
+		return true;
+	else
+		return false;
+}
+
+static inline bool cgx_event_is_linkevent(u64 event)
+{
+	if (FIELD_GET(EVTREG_ID, event) == CGX_EVT_LINK_CHANGE)
+		return true;
+	else
+		return false;
+}
+
+static inline int cgx_fwi_get_mkex_prfl_sz(u64 *prfl_sz,
+					   struct cgx *cgx)
+{
+	u64 req = 0;
+	u64 resp;
+	int err;
+
+	req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_MKEX_PRFL_SIZE, req);
+	err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
+	if (!err)
+		*prfl_sz = FIELD_GET(RESP_MKEX_PRFL_SIZE, resp);
+
+	return err;
+}
+
+static inline int cgx_fwi_get_mkex_prfl_addr(u64 *prfl_addr,
+					     struct cgx *cgx)
+{
+	u64 req = 0;
+	u64 resp;
+	int err;
+
+	req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_MKEX_PRFL_ADDR, req);
+	err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
+	if (!err)
+		*prfl_addr = FIELD_GET(RESP_MKEX_PRFL_ADDR, resp);
+
+	return err;
+}
+
+int cgx_get_mkex_prfl_info(u64 *addr, u64 *size)
+{
+	struct cgx *cgx_dev;
+	int err;
+
+	if (!addr || !size)
+		return -EINVAL;
+
+	cgx_dev = list_first_entry(&cgx_list, struct cgx, cgx_list);
+	if (!cgx_dev)
+		return -ENXIO;
+
+	err = cgx_fwi_get_mkex_prfl_sz(size, cgx_dev);
+	if (err)
+		return -EIO;
+
+	err = cgx_fwi_get_mkex_prfl_addr(addr, cgx_dev);
+	if (err)
+		return -EIO;
+
+	return 0;
+}
+EXPORT_SYMBOL(cgx_get_mkex_prfl_info);
+
+static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
+{
+	struct lmac *lmac = data;
+	struct cgx *cgx;
+	u64 event;
+
+	cgx = lmac->cgx;
+
+	event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG);
+
+	if (!FIELD_GET(EVTREG_ACK, event))
+		return IRQ_NONE;
+
+	switch (FIELD_GET(EVTREG_EVT_TYPE, event)) {
+	case CGX_EVT_CMD_RESP:
+		/* Copy the response. Since only one command is active at a
+		 * time, there is no way a response can get overwritten
+		 */
+		lmac->resp = event;
+		/* Ensure response is updated before thread context starts */
+		smp_wmb();
+
+		/* There wont be separate events for link change initiated from
+		 * software; Hence report the command responses as events
+		 */
+		if (cgx_cmdresp_is_linkevent(event))
+			cgx_link_change_handler(event, lmac);
+
+		/* Release thread waiting for completion  */
+		lmac->cmd_pend = false;
+		wake_up_interruptible(&lmac->wq_cmd_cmplt);
+		break;
+	case CGX_EVT_ASYNC:
+		if (cgx_event_is_linkevent(event))
+			cgx_link_change_handler(event, lmac);
+		break;
+	}
+
+	/* Any new event or command response will be posted by firmware
+	 * only after the current status is acked.
+	 * Ack the interrupt register as well.
+	 */
+	cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0);
+	cgx_write(lmac->cgx, lmac->lmac_id, CGXX_CMRX_INT, FW_CGX_INT);
+
+	return IRQ_HANDLED;
+}
+
+/* APIs for PHY management using CGX firmware interface */
+
+/* callback registration for hardware events like link change */
+int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id)
+{
+	struct cgx *cgx = cgxd;
+	struct lmac *lmac;
+
+	lmac = lmac_pdata(lmac_id, cgx);
+	if (!lmac)
+		return -ENODEV;
+
+	lmac->event_cb = *cb;
+
+	return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_evh_register);
+
+int cgx_lmac_evh_unregister(void *cgxd, int lmac_id)
+{
+	struct lmac *lmac;
+	unsigned long flags;
+	struct cgx *cgx = cgxd;
+
+	lmac = lmac_pdata(lmac_id, cgx);
+	if (!lmac)
+		return -ENODEV;
+
+	spin_lock_irqsave(&lmac->event_cb_lock, flags);
+	lmac->event_cb.notify_link_chg = NULL;
+	lmac->event_cb.data = NULL;
+	spin_unlock_irqrestore(&lmac->event_cb_lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_evh_unregister);
+
+static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
+{
+	u64 req = 0;
+	u64 resp;
+
+	if (enable)
+		req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req);
+	else
+		req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req);
+
+	return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+}
+
+static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
+{
+	u64 req = 0;
+
+	req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
+	return cgx_fwi_cmd_generic(req, resp, cgx, 0);
+}
+
+static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
+{
+	struct device *dev = &cgx->pdev->dev;
+	int major_ver, minor_ver;
+	u64 resp;
+	int err;
+
+	if (!cgx->lmac_count)
+		return 0;
+
+	err = cgx_fwi_read_version(&resp, cgx);
+	if (err)
+		return err;
+
+	major_ver = FIELD_GET(RESP_MAJOR_VER, resp);
+	minor_ver = FIELD_GET(RESP_MINOR_VER, resp);
+	dev_dbg(dev, "Firmware command interface version = %d.%d\n",
+		major_ver, minor_ver);
+	if (major_ver != CGX_FIRMWARE_MAJOR_VER ||
+	    minor_ver != CGX_FIRMWARE_MINOR_VER)
+		return -EIO;
+	else
+		return 0;
+}
+
+static void cgx_lmac_linkup_work(struct work_struct *work)
+{
+	struct cgx *cgx = container_of(work, struct cgx, cgx_cmd_work);
+	struct device *dev = &cgx->pdev->dev;
+	int i, err;
+
+	/* Do Link up for all the lmacs */
+	for (i = 0; i < cgx->lmac_count; i++) {
+		err = cgx_fwi_link_change(cgx, i, true);
+		if (err)
+			dev_info(dev, "cgx port %d:%d Link up command failed\n",
+				 cgx->cgx_id, i);
+	}
+}
+
+int cgx_lmac_linkup_start(void *cgxd)
+{
+	struct cgx *cgx = cgxd;
+
+	if (!cgx)
+		return -ENODEV;
+
+	queue_work(cgx->cgx_cmd_workq, &cgx->cgx_cmd_work);
+
+	return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_linkup_start);
+
+static int cgx_lmac_init(struct cgx *cgx)
+{
+	struct lmac *lmac;
+	int i, err;
+
+	cgx->lmac_count = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7;
+	if (cgx->lmac_count > MAX_LMAC_PER_CGX)
+		cgx->lmac_count = MAX_LMAC_PER_CGX;
+
+	for (i = 0; i < cgx->lmac_count; i++) {
+		lmac = kcalloc(1, sizeof(struct lmac), GFP_KERNEL);
+		if (!lmac)
+			return -ENOMEM;
+		lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
+		if (!lmac->name)
+			return -ENOMEM;
+		sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
+		lmac->lmac_id = i;
+		lmac->cgx = cgx;
+		init_waitqueue_head(&lmac->wq_cmd_cmplt);
+		mutex_init(&lmac->cmd_lock);
+		spin_lock_init(&lmac->event_cb_lock);
+		err = request_irq(pci_irq_vector(cgx->pdev,
+						 CGX_LMAC_FWI + i * 9),
+				   cgx_fwi_event_handler, 0, lmac->name, lmac);
+		if (err)
+			return err;
+
+		/* Enable interrupt */
+		cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S,
+			  FW_CGX_INT);
+
+		/* Add reference */
+		cgx->lmac_idmap[i] = lmac;
+	}
+
+	return cgx_lmac_verify_fwi_version(cgx);
+}
+
+static int cgx_lmac_exit(struct cgx *cgx)
+{
+	struct lmac *lmac;
+	int i;
+
+	if (cgx->cgx_cmd_workq) {
+		flush_workqueue(cgx->cgx_cmd_workq);
+		destroy_workqueue(cgx->cgx_cmd_workq);
+		cgx->cgx_cmd_workq = NULL;
+	}
+
+	/* Free all lmac related resources */
+	for (i = 0; i < cgx->lmac_count; i++) {
+		lmac = cgx->lmac_idmap[i];
+		if (!lmac)
+			continue;
+		free_irq(pci_irq_vector(cgx->pdev, CGX_LMAC_FWI + i * 9), lmac);
+		kfree(lmac->name);
+		kfree(lmac);
+	}
+
+	return 0;
+}
+
+static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	struct device *dev = &pdev->dev;
+	struct cgx *cgx;
+	int err, nvec;
+
+	cgx = devm_kzalloc(dev, sizeof(*cgx), GFP_KERNEL);
+	if (!cgx)
+		return -ENOMEM;
+	cgx->pdev = pdev;
+
+	pci_set_drvdata(pdev, cgx);
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(dev, "Failed to enable PCI device\n");
+		pci_set_drvdata(pdev, NULL);
+		return err;
+	}
+
+	err = pci_request_regions(pdev, DRV_NAME);
+	if (err) {
+		dev_err(dev, "PCI request regions failed 0x%x\n", err);
+		goto err_disable_device;
+	}
+
+	/* MAP configuration registers */
+	cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+	if (!cgx->reg_base) {
+		dev_err(dev, "CGX: Cannot map CSR memory space, aborting\n");
+		err = -ENOMEM;
+		goto err_release_regions;
+	}
+
+	nvec = CGX_NVEC;
+	err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
+	if (err < 0 || err != nvec) {
+		dev_err(dev, "Request for %d msix vectors failed, err %d\n",
+			nvec, err);
+		goto err_release_regions;
+	}
+
+	cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
+		& CGX_ID_MASK;
+
+	/* init wq for processing linkup requests */
+	INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work);
+	cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0);
+	if (!cgx->cgx_cmd_workq) {
+		dev_err(dev, "alloc workqueue failed for cgx cmd");
+		err = -ENOMEM;
+		goto err_free_irq_vectors;
+	}
+
+	list_add(&cgx->cgx_list, &cgx_list);
+
+	cgx_link_usertable_init();
+
+	err = cgx_lmac_init(cgx);
+	if (err)
+		goto err_release_lmac;
+
+	return 0;
+
+err_release_lmac:
+	cgx_lmac_exit(cgx);
+	list_del(&cgx->cgx_list);
+err_free_irq_vectors:
+	pci_free_irq_vectors(pdev);
+err_release_regions:
+	pci_release_regions(pdev);
+err_disable_device:
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+	return err;
+}
+
+static void cgx_remove(struct pci_dev *pdev)
+{
+	struct cgx *cgx = pci_get_drvdata(pdev);
+
+	cgx_lmac_exit(cgx);
+	list_del(&cgx->cgx_list);
+	pci_free_irq_vectors(pdev);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+}
+
+struct pci_driver cgx_driver = {
+	.name = DRV_NAME,
+	.id_table = cgx_id_table,
+	.probe = cgx_probe,
+	.remove = cgx_remove,
+};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
new file mode 100644
index 0000000..5c1f389
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*  Marvell OcteonTx2 CGX driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef CGX_H
+#define CGX_H
+
+#include "mbox.h"
+#include "cgx_fw_if.h"
+
+ /* PCI device IDs */
+#define	PCI_DEVID_OCTEONTX2_CGX		0xA059
+
+/* PCI BAR nos */
+#define PCI_CFG_REG_BAR_NUM		0
+
+#define CGX_ID_MASK			0x7
+#define MAX_LMAC_PER_CGX		4
+#define CGX_FIFO_LEN			65536 /* 64K for both Rx & Tx */
+#define CGX_OFFSET(x)			((x) * MAX_LMAC_PER_CGX)
+
+/* Registers */
+#define CGXX_CMRX_CFG			0x00
+#define CMR_EN				BIT_ULL(55)
+#define DATA_PKT_TX_EN			BIT_ULL(53)
+#define DATA_PKT_RX_EN			BIT_ULL(54)
+#define CGX_LMAC_TYPE_SHIFT		40
+#define CGX_LMAC_TYPE_MASK		0xF
+#define CGXX_CMRX_INT			0x040
+#define FW_CGX_INT			BIT_ULL(1)
+#define CGXX_CMRX_INT_ENA_W1S		0x058
+#define CGXX_CMRX_RX_ID_MAP		0x060
+#define CGXX_CMRX_RX_STAT0		0x070
+#define CGXX_CMRX_RX_LMACS		0x128
+#define CGXX_CMRX_RX_DMAC_CTL0		0x1F8
+#define CGX_DMAC_CTL0_CAM_ENABLE	BIT_ULL(3)
+#define CGX_DMAC_CAM_ACCEPT		BIT_ULL(3)
+#define CGX_DMAC_MCAST_MODE		BIT_ULL(1)
+#define CGX_DMAC_BCAST_MODE		BIT_ULL(0)
+#define CGXX_CMRX_RX_DMAC_CAM0		0x200
+#define CGX_DMAC_CAM_ADDR_ENABLE	BIT_ULL(48)
+#define CGXX_CMRX_RX_DMAC_CAM1		0x400
+#define CGX_RX_DMAC_ADR_MASK		GENMASK_ULL(47, 0)
+#define CGXX_CMRX_TX_STAT0		0x700
+#define CGXX_SCRATCH0_REG		0x1050
+#define CGXX_SCRATCH1_REG		0x1058
+#define CGX_CONST			0x2000
+#define CGXX_SPUX_CONTROL1		0x10000
+#define CGXX_SPUX_CONTROL1_LBK		BIT_ULL(14)
+#define CGXX_GMP_PCS_MRX_CTL		0x30000
+#define CGXX_GMP_PCS_MRX_CTL_LBK	BIT_ULL(14)
+
+#define CGX_COMMAND_REG			CGXX_SCRATCH1_REG
+#define CGX_EVENT_REG			CGXX_SCRATCH0_REG
+#define CGX_CMD_TIMEOUT			2200 /* msecs */
+
+#define CGX_NVEC			37
+#define CGX_LMAC_FWI			0
+
+enum LMAC_TYPE {
+	LMAC_MODE_SGMII		= 0,
+	LMAC_MODE_XAUI		= 1,
+	LMAC_MODE_RXAUI		= 2,
+	LMAC_MODE_10G_R		= 3,
+	LMAC_MODE_40G_R		= 4,
+	LMAC_MODE_QSGMII	= 6,
+	LMAC_MODE_25G_R		= 7,
+	LMAC_MODE_50G_R		= 8,
+	LMAC_MODE_100G_R	= 9,
+	LMAC_MODE_USXGMII	= 10,
+	LMAC_MODE_MAX,
+};
+
+struct cgx_link_event {
+	struct cgx_link_user_info link_uinfo;
+	u8 cgx_id;
+	u8 lmac_id;
+};
+
+/**
+ * struct cgx_event_cb
+ * @notify_link_chg:	callback for link change notification
+ * @data:	data passed to callback function
+ */
+struct cgx_event_cb {
+	int (*notify_link_chg)(struct cgx_link_event *event, void *data);
+	void *data;
+};
+
+extern struct pci_driver cgx_driver;
+
+int cgx_get_cgxcnt_max(void);
+int cgx_get_lmac_cnt(void *cgxd);
+void *cgx_get_pdata(int cgx_id);
+int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind);
+int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id);
+int cgx_lmac_evh_unregister(void *cgxd, int lmac_id);
+int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat);
+int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat);
+int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable);
+int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
+u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id);
+void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable);
+int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable);
+int cgx_get_link_info(void *cgxd, int lmac_id,
+		      struct cgx_link_user_info *linfo);
+int cgx_lmac_linkup_start(void *cgxd);
+int cgx_get_mkex_prfl_info(u64 *addr, u64 *size);
+#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
new file mode 100644
index 0000000..473d975
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
@@ -0,0 +1,196 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*  Marvell OcteonTx2 CGX driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __CGX_FW_INTF_H__
+#define __CGX_FW_INTF_H__
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+
+#define CGX_FIRMWARE_MAJOR_VER		1
+#define CGX_FIRMWARE_MINOR_VER		0
+
+#define CGX_EVENT_ACK                   1UL
+
+/* CGX error types. set for cmd response status as CGX_STAT_FAIL */
+enum cgx_error_type {
+	CGX_ERR_NONE,
+	CGX_ERR_LMAC_NOT_ENABLED,
+	CGX_ERR_LMAC_MODE_INVALID,
+	CGX_ERR_REQUEST_ID_INVALID,
+	CGX_ERR_PREV_ACK_NOT_CLEAR,
+	CGX_ERR_PHY_LINK_DOWN,
+	CGX_ERR_PCS_RESET_FAIL,
+	CGX_ERR_AN_CPT_FAIL,
+	CGX_ERR_TX_NOT_IDLE,
+	CGX_ERR_RX_NOT_IDLE,
+	CGX_ERR_SPUX_BR_BLKLOCK_FAIL,
+	CGX_ERR_SPUX_RX_ALIGN_FAIL,
+	CGX_ERR_SPUX_TX_FAULT,
+	CGX_ERR_SPUX_RX_FAULT,
+	CGX_ERR_SPUX_RESET_FAIL,
+	CGX_ERR_SPUX_AN_RESET_FAIL,
+	CGX_ERR_SPUX_USX_AN_RESET_FAIL,
+	CGX_ERR_SMUX_RX_LINK_NOT_OK,
+	CGX_ERR_PCS_RECV_LINK_FAIL,
+	CGX_ERR_TRAINING_FAIL,
+	CGX_ERR_RX_EQU_FAIL,
+	CGX_ERR_SPUX_BER_FAIL,
+	CGX_ERR_SPUX_RSFEC_ALGN_FAIL,   /* = 22 */
+};
+
+/* LINK speed types */
+enum cgx_link_speed {
+	CGX_LINK_NONE,
+	CGX_LINK_10M,
+	CGX_LINK_100M,
+	CGX_LINK_1G,
+	CGX_LINK_2HG,
+	CGX_LINK_5G,
+	CGX_LINK_10G,
+	CGX_LINK_20G,
+	CGX_LINK_25G,
+	CGX_LINK_40G,
+	CGX_LINK_50G,
+	CGX_LINK_100G,
+	CGX_LINK_SPEED_MAX,
+};
+
+/* REQUEST ID types. Input to firmware */
+enum cgx_cmd_id {
+	CGX_CMD_NONE,
+	CGX_CMD_GET_FW_VER,
+	CGX_CMD_GET_MAC_ADDR,
+	CGX_CMD_SET_MTU,
+	CGX_CMD_GET_LINK_STS,		/* optional to user */
+	CGX_CMD_LINK_BRING_UP,
+	CGX_CMD_LINK_BRING_DOWN,
+	CGX_CMD_INTERNAL_LBK,
+	CGX_CMD_EXTERNAL_LBK,
+	CGX_CMD_HIGIG,
+	CGX_CMD_LINK_STATE_CHANGE,
+	CGX_CMD_MODE_CHANGE,		/* hot plug support */
+	CGX_CMD_INTF_SHUTDOWN,
+	CGX_CMD_GET_MKEX_PRFL_SIZE,
+	CGX_CMD_GET_MKEX_PRFL_ADDR
+};
+
+/* async event ids */
+enum cgx_evt_id {
+	CGX_EVT_NONE,
+	CGX_EVT_LINK_CHANGE,
+};
+
+/* event types - cause of interrupt */
+enum cgx_evt_type {
+	CGX_EVT_ASYNC,
+	CGX_EVT_CMD_RESP
+};
+
+enum cgx_stat {
+	CGX_STAT_SUCCESS,
+	CGX_STAT_FAIL
+};
+
+enum cgx_cmd_own {
+	CGX_CMD_OWN_NS,
+	CGX_CMD_OWN_FIRMWARE,
+};
+
+/* m - bit mask
+ * y - value to be written in the bitrange
+ * x - input value whose bitrange to be modified
+ */
+#define FIELD_SET(m, y, x)		\
+	(((x) & ~(m)) |			\
+	FIELD_PREP((m), (y)))
+
+/* scratchx(0) CSR used for ATF->non-secure SW communication.
+ * This acts as the status register
+ * Provides details on command ack/status, command response, error details
+ */
+#define EVTREG_ACK		BIT_ULL(0)
+#define EVTREG_EVT_TYPE		BIT_ULL(1)
+#define EVTREG_STAT		BIT_ULL(2)
+#define EVTREG_ID		GENMASK_ULL(8, 3)
+
+/* Response to command IDs with command status as CGX_STAT_FAIL
+ *
+ * Not applicable for commands :
+ * CGX_CMD_LINK_BRING_UP/DOWN/CGX_EVT_LINK_CHANGE
+ */
+#define EVTREG_ERRTYPE		GENMASK_ULL(18, 9)
+
+/* Response to cmd ID as CGX_CMD_GET_FW_VER with cmd status as
+ * CGX_STAT_SUCCESS
+ */
+#define RESP_MAJOR_VER		GENMASK_ULL(12, 9)
+#define RESP_MINOR_VER		GENMASK_ULL(16, 13)
+
+/* Response to cmd ID as CGX_CMD_GET_MAC_ADDR with cmd status as
+ * CGX_STAT_SUCCESS
+ */
+#define RESP_MAC_ADDR		GENMASK_ULL(56, 9)
+
+/* Response to cmd ID as CGX_CMD_GET_MKEX_PRFL_SIZE with cmd status as
+ * CGX_STAT_SUCCESS
+ */
+#define RESP_MKEX_PRFL_SIZE		GENMASK_ULL(63, 9)
+
+/* Response to cmd ID as CGX_CMD_GET_MKEX_PRFL_ADDR with cmd status as
+ * CGX_STAT_SUCCESS
+ */
+#define RESP_MKEX_PRFL_ADDR		GENMASK_ULL(63, 9)
+
+/* Response to cmd ID - CGX_CMD_LINK_BRING_UP/DOWN, event ID CGX_EVT_LINK_CHANGE
+ * status can be either CGX_STAT_FAIL or CGX_STAT_SUCCESS
+ *
+ * In case of CGX_STAT_FAIL, it indicates CGX configuration failed
+ * when processing link up/down/change command.
+ * Both err_type and current link status will be updated
+ *
+ * In case of CGX_STAT_SUCCESS, err_type will be CGX_ERR_NONE and current
+ * link status will be updated
+ */
+struct cgx_lnk_sts {
+	uint64_t reserved1:9;
+	uint64_t link_up:1;
+	uint64_t full_duplex:1;
+	uint64_t speed:4;		/* cgx_link_speed */
+	uint64_t err_type:10;
+	uint64_t reserved2:39;
+};
+
+#define RESP_LINKSTAT_UP		GENMASK_ULL(9, 9)
+#define RESP_LINKSTAT_FDUPLEX		GENMASK_ULL(10, 10)
+#define RESP_LINKSTAT_SPEED		GENMASK_ULL(14, 11)
+#define RESP_LINKSTAT_ERRTYPE		GENMASK_ULL(24, 15)
+
+/* scratchx(1) CSR used for non-secure SW->ATF communication
+ * This CSR acts as a command register
+ */
+#define CMDREG_OWN	BIT_ULL(0)
+#define CMDREG_ID	GENMASK_ULL(7, 2)
+
+/* Any command using enable/disable as an argument need
+ * to set this bitfield.
+ * Ex: Loopback, HiGig...
+ */
+#define CMDREG_ENABLE	BIT_ULL(8)
+
+/* command argument to be passed for cmd ID - CGX_CMD_SET_MTU */
+#define CMDMTU_SIZE	GENMASK_ULL(23, 8)
+
+/* command argument to be passed for cmd ID - CGX_CMD_LINK_CHANGE */
+#define CMDLINKCHANGE_LINKUP	BIT_ULL(8)
+#define CMDLINKCHANGE_FULLDPLX	BIT_ULL(9)
+#define CMDLINKCHANGE_SPEED	GENMASK_ULL(13, 10)
+
+#endif /* __CGX_FW_INTF_H__ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
new file mode 100644
index 0000000..413c3f2
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*  Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef COMMON_H
+#define COMMON_H
+
+#include "rvu_struct.h"
+
+#define OTX2_ALIGN			128  /* Align to cacheline */
+
+#define Q_SIZE_16		0ULL /* 16 entries */
+#define Q_SIZE_64		1ULL /* 64 entries */
+#define Q_SIZE_256		2ULL
+#define Q_SIZE_1K		3ULL
+#define Q_SIZE_4K		4ULL
+#define Q_SIZE_16K		5ULL
+#define Q_SIZE_64K		6ULL
+#define Q_SIZE_256K		7ULL
+#define Q_SIZE_1M		8ULL /* Million entries */
+#define Q_SIZE_MIN		Q_SIZE_16
+#define Q_SIZE_MAX		Q_SIZE_1M
+
+#define Q_COUNT(x)		(16ULL << (2 * x))
+#define Q_SIZE(x, n)		((ilog2(x) - (n)) / 2)
+
+/* Admin queue info */
+
+/* Since we intend to add only one instruction at a time,
+ * keep queue size to it's minimum.
+ */
+#define AQ_SIZE			Q_SIZE_16
+/* HW head & tail pointer mask */
+#define AQ_PTR_MASK		0xFFFFF
+
+struct qmem {
+	void            *base;
+	dma_addr_t	iova;
+	int		alloc_sz;
+	u8		entry_sz;
+	u8		align;
+	u32		qsize;
+};
+
+static inline int qmem_alloc(struct device *dev, struct qmem **q,
+			     int qsize, int entry_sz)
+{
+	struct qmem *qmem;
+	int aligned_addr;
+
+	if (!qsize)
+		return -EINVAL;
+
+	*q = devm_kzalloc(dev, sizeof(*qmem), GFP_KERNEL);
+	if (!*q)
+		return -ENOMEM;
+	qmem = *q;
+
+	qmem->entry_sz = entry_sz;
+	qmem->alloc_sz = (qsize * entry_sz) + OTX2_ALIGN;
+	qmem->base = dma_alloc_coherent(dev, qmem->alloc_sz,
+					 &qmem->iova, GFP_KERNEL);
+	if (!qmem->base)
+		return -ENOMEM;
+
+	qmem->qsize = qsize;
+
+	aligned_addr = ALIGN((u64)qmem->iova, OTX2_ALIGN);
+	qmem->align = (aligned_addr - qmem->iova);
+	qmem->base += qmem->align;
+	qmem->iova += qmem->align;
+	return 0;
+}
+
+static inline void qmem_free(struct device *dev, struct qmem *qmem)
+{
+	if (!qmem)
+		return;
+
+	if (qmem->base)
+		dma_free_coherent(dev, qmem->alloc_sz,
+				  qmem->base - qmem->align,
+				  qmem->iova - qmem->align);
+	devm_kfree(dev, qmem);
+}
+
+struct admin_queue {
+	struct qmem	*inst;
+	struct qmem	*res;
+	spinlock_t	lock; /* Serialize inst enqueue from PFs */
+};
+
+/* NPA aura count */
+enum npa_aura_sz {
+	NPA_AURA_SZ_0,
+	NPA_AURA_SZ_128,
+	NPA_AURA_SZ_256,
+	NPA_AURA_SZ_512,
+	NPA_AURA_SZ_1K,
+	NPA_AURA_SZ_2K,
+	NPA_AURA_SZ_4K,
+	NPA_AURA_SZ_8K,
+	NPA_AURA_SZ_16K,
+	NPA_AURA_SZ_32K,
+	NPA_AURA_SZ_64K,
+	NPA_AURA_SZ_128K,
+	NPA_AURA_SZ_256K,
+	NPA_AURA_SZ_512K,
+	NPA_AURA_SZ_1M,
+	NPA_AURA_SZ_MAX,
+};
+
+#define NPA_AURA_COUNT(x)	(1ULL << ((x) + 6))
+
+/* NPA AQ result structure for init/read/write of aura HW contexts */
+struct npa_aq_aura_res {
+	struct	npa_aq_res_s	res;
+	struct	npa_aura_s	aura_ctx;
+	struct	npa_aura_s	ctx_mask;
+};
+
+/* NPA AQ result structure for init/read/write of pool HW contexts */
+struct npa_aq_pool_res {
+	struct	npa_aq_res_s	res;
+	struct	npa_pool_s	pool_ctx;
+	struct	npa_pool_s	ctx_mask;
+};
+
+/* NIX Transmit schedulers */
+enum nix_scheduler {
+	NIX_TXSCH_LVL_SMQ = 0x0,
+	NIX_TXSCH_LVL_MDQ = 0x0,
+	NIX_TXSCH_LVL_TL4 = 0x1,
+	NIX_TXSCH_LVL_TL3 = 0x2,
+	NIX_TXSCH_LVL_TL2 = 0x3,
+	NIX_TXSCH_LVL_TL1 = 0x4,
+	NIX_TXSCH_LVL_CNT = 0x5,
+};
+
+#define TXSCH_TL1_DFLT_RR_QTM      ((1 << 24) - 1)
+#define TXSCH_TL1_DFLT_RR_PRIO     (0x1ull)
+
+/* Min/Max packet sizes, excluding FCS */
+#define	NIC_HW_MIN_FRS			40
+#define	NIC_HW_MAX_FRS			9212
+#define	SDP_HW_MAX_FRS			65535
+
+/* NIX RX action operation*/
+#define NIX_RX_ACTIONOP_DROP		(0x0ull)
+#define NIX_RX_ACTIONOP_UCAST		(0x1ull)
+#define NIX_RX_ACTIONOP_UCAST_IPSEC	(0x2ull)
+#define NIX_RX_ACTIONOP_MCAST		(0x3ull)
+#define NIX_RX_ACTIONOP_RSS		(0x4ull)
+
+/* NIX TX action operation*/
+#define NIX_TX_ACTIONOP_DROP		(0x0ull)
+#define NIX_TX_ACTIONOP_UCAST_DEFAULT	(0x1ull)
+#define NIX_TX_ACTIONOP_UCAST_CHAN	(0x2ull)
+#define NIX_TX_ACTIONOP_MCAST		(0x3ull)
+#define NIX_TX_ACTIONOP_DROP_VIOL	(0x5ull)
+
+#define NPC_MCAM_KEY_X1			0
+#define NPC_MCAM_KEY_X2			1
+#define NPC_MCAM_KEY_X4			2
+
+#define NIX_INTF_RX			0
+#define NIX_INTF_TX			1
+
+#define NIX_INTF_TYPE_CGX		0
+#define NIX_INTF_TYPE_LBK		1
+
+#define MAX_LMAC_PKIND			12
+#define NIX_LINK_CGX_LMAC(a, b)		(0 + 4 * (a) + (b))
+#define NIX_LINK_LBK(a)			(12 + (a))
+#define NIX_CHAN_CGX_LMAC_CHX(a, b, c)	(0x800 + 0x100 * (a) + 0x10 * (b) + (c))
+#define NIX_CHAN_LBK_CHX(a, b)		(0 + 0x100 * (a) + (b))
+
+/* NIX LSO format indices.
+ * As of now TSO is the only one using, so statically assigning indices.
+ */
+#define NIX_LSO_FORMAT_IDX_TSOV4	0
+#define NIX_LSO_FORMAT_IDX_TSOV6	1
+
+/* RSS info */
+#define MAX_RSS_GROUPS			8
+/* Group 0 has to be used in default pkt forwarding MCAM entries
+ * reserved for NIXLFs. Groups 1-7 can be used for RSS for ntuple
+ * filters.
+ */
+#define DEFAULT_RSS_CONTEXT_GROUP	0
+#define MAX_RSS_INDIR_TBL_SIZE		256 /* 1 << Max adder bits */
+
+#endif /* COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
new file mode 100644
index 0000000..d6f9ed8
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+
+#include "rvu_reg.h"
+#include "mbox.h"
+
+static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+
+void otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
+{
+	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+	struct mbox_hdr *tx_hdr, *rx_hdr;
+
+	tx_hdr = mdev->mbase + mbox->tx_start;
+	rx_hdr = mdev->mbase + mbox->rx_start;
+
+	spin_lock(&mdev->mbox_lock);
+	mdev->msg_size = 0;
+	mdev->rsp_size = 0;
+	tx_hdr->num_msgs = 0;
+	rx_hdr->num_msgs = 0;
+	spin_unlock(&mdev->mbox_lock);
+}
+EXPORT_SYMBOL(otx2_mbox_reset);
+
+void otx2_mbox_destroy(struct otx2_mbox *mbox)
+{
+	mbox->reg_base = NULL;
+	mbox->hwbase = NULL;
+
+	kfree(mbox->dev);
+	mbox->dev = NULL;
+}
+EXPORT_SYMBOL(otx2_mbox_destroy);
+
+int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
+		   void *reg_base, int direction, int ndevs)
+{
+	struct otx2_mbox_dev *mdev;
+	int devid;
+
+	switch (direction) {
+	case MBOX_DIR_AFPF:
+	case MBOX_DIR_PFVF:
+		mbox->tx_start = MBOX_DOWN_TX_START;
+		mbox->rx_start = MBOX_DOWN_RX_START;
+		mbox->tx_size  = MBOX_DOWN_TX_SIZE;
+		mbox->rx_size  = MBOX_DOWN_RX_SIZE;
+		break;
+	case MBOX_DIR_PFAF:
+	case MBOX_DIR_VFPF:
+		mbox->tx_start = MBOX_DOWN_RX_START;
+		mbox->rx_start = MBOX_DOWN_TX_START;
+		mbox->tx_size  = MBOX_DOWN_RX_SIZE;
+		mbox->rx_size  = MBOX_DOWN_TX_SIZE;
+		break;
+	case MBOX_DIR_AFPF_UP:
+	case MBOX_DIR_PFVF_UP:
+		mbox->tx_start = MBOX_UP_TX_START;
+		mbox->rx_start = MBOX_UP_RX_START;
+		mbox->tx_size  = MBOX_UP_TX_SIZE;
+		mbox->rx_size  = MBOX_UP_RX_SIZE;
+		break;
+	case MBOX_DIR_PFAF_UP:
+	case MBOX_DIR_VFPF_UP:
+		mbox->tx_start = MBOX_UP_RX_START;
+		mbox->rx_start = MBOX_UP_TX_START;
+		mbox->tx_size  = MBOX_UP_RX_SIZE;
+		mbox->rx_size  = MBOX_UP_TX_SIZE;
+		break;
+	default:
+		return -ENODEV;
+	}
+
+	switch (direction) {
+	case MBOX_DIR_AFPF:
+	case MBOX_DIR_AFPF_UP:
+		mbox->trigger = RVU_AF_AFPF_MBOX0;
+		mbox->tr_shift = 4;
+		break;
+	case MBOX_DIR_PFAF:
+	case MBOX_DIR_PFAF_UP:
+		mbox->trigger = RVU_PF_PFAF_MBOX1;
+		mbox->tr_shift = 0;
+		break;
+	case MBOX_DIR_PFVF:
+	case MBOX_DIR_PFVF_UP:
+		mbox->trigger = RVU_PF_VFX_PFVF_MBOX0;
+		mbox->tr_shift = 12;
+		break;
+	case MBOX_DIR_VFPF:
+	case MBOX_DIR_VFPF_UP:
+		mbox->trigger = RVU_VF_VFPF_MBOX1;
+		mbox->tr_shift = 0;
+		break;
+	default:
+		return -ENODEV;
+	}
+
+	mbox->reg_base = reg_base;
+	mbox->hwbase = hwbase;
+	mbox->pdev = pdev;
+
+	mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL);
+	if (!mbox->dev) {
+		otx2_mbox_destroy(mbox);
+		return -ENOMEM;
+	}
+
+	mbox->ndevs = ndevs;
+	for (devid = 0; devid < ndevs; devid++) {
+		mdev = &mbox->dev[devid];
+		mdev->mbase = mbox->hwbase + (devid * MBOX_SIZE);
+		spin_lock_init(&mdev->mbox_lock);
+		/* Init header to reset value */
+		otx2_mbox_reset(mbox, devid);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(otx2_mbox_init);
+
+int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
+{
+	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+	int timeout = 0, sleep = 1;
+
+	while (mdev->num_msgs != mdev->msgs_acked) {
+		msleep(sleep);
+		timeout += sleep;
+		if (timeout >= MBOX_RSP_TIMEOUT)
+			return -EIO;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(otx2_mbox_wait_for_rsp);
+
+int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid)
+{
+	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+	unsigned long timeout = jiffies + 1 * HZ;
+
+	while (!time_after(jiffies, timeout)) {
+		if (mdev->num_msgs == mdev->msgs_acked)
+			return 0;
+		cpu_relax();
+	}
+	return -EIO;
+}
+EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp);
+
+void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
+{
+	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+	struct mbox_hdr *tx_hdr, *rx_hdr;
+
+	tx_hdr = mdev->mbase + mbox->tx_start;
+	rx_hdr = mdev->mbase + mbox->rx_start;
+
+	spin_lock(&mdev->mbox_lock);
+	/* Reset header for next messages */
+	mdev->msg_size = 0;
+	mdev->rsp_size = 0;
+	mdev->msgs_acked = 0;
+
+	/* Sync mbox data into memory */
+	smp_wmb();
+
+	/* num_msgs != 0 signals to the peer that the buffer has a number of
+	 * messages.  So this should be written after writing all the messages
+	 * to the shared memory.
+	 */
+	tx_hdr->num_msgs = mdev->num_msgs;
+	rx_hdr->num_msgs = 0;
+	spin_unlock(&mdev->mbox_lock);
+
+	/* The interrupt should be fired after num_msgs is written
+	 * to the shared memory
+	 */
+	writeq(1, (void __iomem *)mbox->reg_base +
+	       (mbox->trigger | (devid << mbox->tr_shift)));
+}
+EXPORT_SYMBOL(otx2_mbox_msg_send);
+
+struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
+					    int size, int size_rsp)
+{
+	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+	struct mbox_msghdr *msghdr = NULL;
+
+	spin_lock(&mdev->mbox_lock);
+	size = ALIGN(size, MBOX_MSG_ALIGN);
+	size_rsp = ALIGN(size_rsp, MBOX_MSG_ALIGN);
+	/* Check if there is space in mailbox */
+	if ((mdev->msg_size + size) > mbox->tx_size - msgs_offset)
+		goto exit;
+	if ((mdev->rsp_size + size_rsp) > mbox->rx_size - msgs_offset)
+		goto exit;
+
+	if (mdev->msg_size == 0)
+		mdev->num_msgs = 0;
+	mdev->num_msgs++;
+
+	msghdr = mdev->mbase + mbox->tx_start + msgs_offset + mdev->msg_size;
+
+	/* Clear the whole msg region */
+	memset(msghdr, 0, sizeof(*msghdr) + size);
+	/* Init message header with reset values */
+	msghdr->ver = OTX2_MBOX_VERSION;
+	mdev->msg_size += size;
+	mdev->rsp_size += size_rsp;
+	msghdr->next_msgoff = mdev->msg_size + msgs_offset;
+exit:
+	spin_unlock(&mdev->mbox_lock);
+
+	return msghdr;
+}
+EXPORT_SYMBOL(otx2_mbox_alloc_msg_rsp);
+
+struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
+				      struct mbox_msghdr *msg)
+{
+	unsigned long imsg = mbox->tx_start + msgs_offset;
+	unsigned long irsp = mbox->rx_start + msgs_offset;
+	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+	u16 msgs;
+
+	if (mdev->num_msgs != mdev->msgs_acked)
+		return ERR_PTR(-ENODEV);
+
+	for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
+		struct mbox_msghdr *pmsg = mdev->mbase + imsg;
+		struct mbox_msghdr *prsp = mdev->mbase + irsp;
+
+		if (msg == pmsg) {
+			if (pmsg->id != prsp->id)
+				return ERR_PTR(-ENODEV);
+			return prsp;
+		}
+
+		imsg = pmsg->next_msgoff;
+		irsp = prsp->next_msgoff;
+	}
+
+	return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL(otx2_mbox_get_rsp);
+
+int
+otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, u16 pcifunc, u16 id)
+{
+	struct msg_rsp *rsp;
+
+	rsp = (struct msg_rsp *)
+	       otx2_mbox_alloc_msg(mbox, devid, sizeof(*rsp));
+	if (!rsp)
+		return -ENOMEM;
+	rsp->hdr.id = id;
+	rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
+	rsp->hdr.rc = MBOX_MSG_INVALID;
+	rsp->hdr.pcifunc = pcifunc;
+	return 0;
+}
+EXPORT_SYMBOL(otx2_reply_invalid_msg);
+
+bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid)
+{
+	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+	bool ret;
+
+	spin_lock(&mdev->mbox_lock);
+	ret = mdev->num_msgs != 0;
+	spin_unlock(&mdev->mbox_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(otx2_mbox_nonempty);
+
+const char *otx2_mbox_id2name(u16 id)
+{
+	switch (id) {
+#define M(_name, _id, _1, _2, _3) case _id: return # _name;
+	MBOX_MESSAGES
+#undef M
+	default:
+		return "INVALID ID";
+	}
+}
+EXPORT_SYMBOL(otx2_mbox_id2name);
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
new file mode 100644
index 0000000..75439fc
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -0,0 +1,795 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*  Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef MBOX_H
+#define MBOX_H
+
+#include <linux/etherdevice.h>
+#include <linux/sizes.h>
+
+#include "rvu_struct.h"
+#include "common.h"
+
+#define MBOX_SIZE		SZ_64K
+
+/* AF/PF: PF initiated, PF/VF VF initiated */
+#define MBOX_DOWN_RX_START	0
+#define MBOX_DOWN_RX_SIZE	(46 * SZ_1K)
+#define MBOX_DOWN_TX_START	(MBOX_DOWN_RX_START + MBOX_DOWN_RX_SIZE)
+#define MBOX_DOWN_TX_SIZE	(16 * SZ_1K)
+/* AF/PF: AF initiated, PF/VF PF initiated */
+#define MBOX_UP_RX_START	(MBOX_DOWN_TX_START + MBOX_DOWN_TX_SIZE)
+#define MBOX_UP_RX_SIZE		SZ_1K
+#define MBOX_UP_TX_START	(MBOX_UP_RX_START + MBOX_UP_RX_SIZE)
+#define MBOX_UP_TX_SIZE		SZ_1K
+
+#if MBOX_UP_TX_SIZE + MBOX_UP_TX_START != MBOX_SIZE
+# error "incorrect mailbox area sizes"
+#endif
+
+#define INTR_MASK(pfvfs) ((pfvfs < 64) ? (BIT_ULL(pfvfs) - 1) : (~0ull))
+
+#define MBOX_RSP_TIMEOUT	1000 /* in ms, Time to wait for mbox response */
+
+#define MBOX_MSG_ALIGN		16  /* Align mbox msg start to 16bytes */
+
+/* Mailbox directions */
+#define MBOX_DIR_AFPF		0  /* AF replies to PF */
+#define MBOX_DIR_PFAF		1  /* PF sends messages to AF */
+#define MBOX_DIR_PFVF		2  /* PF replies to VF */
+#define MBOX_DIR_VFPF		3  /* VF sends messages to PF */
+#define MBOX_DIR_AFPF_UP	4  /* AF sends messages to PF */
+#define MBOX_DIR_PFAF_UP	5  /* PF replies to AF */
+#define MBOX_DIR_PFVF_UP	6  /* PF sends messages to VF */
+#define MBOX_DIR_VFPF_UP	7  /* VF replies to PF */
+
+struct otx2_mbox_dev {
+	void	    *mbase;   /* This dev's mbox region */
+	spinlock_t  mbox_lock;
+	u16         msg_size; /* Total msg size to be sent */
+	u16         rsp_size; /* Total rsp size to be sure the reply is ok */
+	u16         num_msgs; /* No of msgs sent or waiting for response */
+	u16         msgs_acked; /* No of msgs for which response is received */
+};
+
+struct otx2_mbox {
+	struct pci_dev *pdev;
+	void   *hwbase;  /* Mbox region advertised by HW */
+	void   *reg_base;/* CSR base for this dev */
+	u64    trigger;  /* Trigger mbox notification */
+	u16    tr_shift; /* Mbox trigger shift */
+	u64    rx_start; /* Offset of Rx region in mbox memory */
+	u64    tx_start; /* Offset of Tx region in mbox memory */
+	u16    rx_size;  /* Size of Rx region */
+	u16    tx_size;  /* Size of Tx region */
+	u16    ndevs;    /* The number of peers */
+	struct otx2_mbox_dev *dev;
+};
+
+/* Header which preceeds all mbox messages */
+struct mbox_hdr {
+	u16  num_msgs;   /* No of msgs embedded */
+};
+
+/* Header which preceeds every msg and is also part of it */
+struct mbox_msghdr {
+	u16 pcifunc;     /* Who's sending this msg */
+	u16 id;          /* Mbox message ID */
+#define OTX2_MBOX_REQ_SIG (0xdead)
+#define OTX2_MBOX_RSP_SIG (0xbeef)
+	u16 sig;         /* Signature, for validating corrupted msgs */
+#define OTX2_MBOX_VERSION (0x0001)
+	u16 ver;         /* Version of msg's structure for this ID */
+	u16 next_msgoff; /* Offset of next msg within mailbox region */
+	int rc;          /* Msg process'ed response code */
+};
+
+void otx2_mbox_reset(struct otx2_mbox *mbox, int devid);
+void otx2_mbox_destroy(struct otx2_mbox *mbox);
+int otx2_mbox_init(struct otx2_mbox *mbox, void __force *hwbase,
+		   struct pci_dev *pdev, void __force *reg_base,
+		   int direction, int ndevs);
+void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid);
+int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid);
+int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid);
+struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
+					    int size, int size_rsp);
+struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
+				      struct mbox_msghdr *msg);
+int otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid,
+			   u16 pcifunc, u16 id);
+bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid);
+const char *otx2_mbox_id2name(u16 id);
+static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox,
+						      int devid, int size)
+{
+	return otx2_mbox_alloc_msg_rsp(mbox, devid, size, 0);
+}
+
+/* Mailbox message types */
+#define MBOX_MSG_MASK				0xFFFF
+#define MBOX_MSG_INVALID			0xFFFE
+#define MBOX_MSG_MAX				0xFFFF
+
+#define MBOX_MESSAGES							\
+/* Generic mbox IDs (range 0x000 - 0x1FF) */				\
+M(READY,		0x001, ready, msg_req, ready_msg_rsp)		\
+M(ATTACH_RESOURCES,	0x002, attach_resources, rsrc_attach, msg_rsp)	\
+M(DETACH_RESOURCES,	0x003, detach_resources, rsrc_detach, msg_rsp)	\
+M(MSIX_OFFSET,		0x004, msix_offset, msg_req, msix_offset_rsp)	\
+M(VF_FLR,		0x006, vf_flr, msg_req, msg_rsp)		\
+/* CGX mbox IDs (range 0x200 - 0x3FF) */				\
+M(CGX_START_RXTX,	0x200, cgx_start_rxtx, msg_req, msg_rsp)	\
+M(CGX_STOP_RXTX,	0x201, cgx_stop_rxtx, msg_req, msg_rsp)		\
+M(CGX_STATS,		0x202, cgx_stats, msg_req, cgx_stats_rsp)	\
+M(CGX_MAC_ADDR_SET,	0x203, cgx_mac_addr_set, cgx_mac_addr_set_or_get,    \
+				cgx_mac_addr_set_or_get)		\
+M(CGX_MAC_ADDR_GET,	0x204, cgx_mac_addr_get, cgx_mac_addr_set_or_get,    \
+				cgx_mac_addr_set_or_get)		\
+M(CGX_PROMISC_ENABLE,	0x205, cgx_promisc_enable, msg_req, msg_rsp)	\
+M(CGX_PROMISC_DISABLE,	0x206, cgx_promisc_disable, msg_req, msg_rsp)	\
+M(CGX_START_LINKEVENTS, 0x207, cgx_start_linkevents, msg_req, msg_rsp)	\
+M(CGX_STOP_LINKEVENTS,	0x208, cgx_stop_linkevents, msg_req, msg_rsp)	\
+M(CGX_GET_LINKINFO,	0x209, cgx_get_linkinfo, msg_req, cgx_link_info_msg) \
+M(CGX_INTLBK_ENABLE,	0x20A, cgx_intlbk_enable, msg_req, msg_rsp)	\
+M(CGX_INTLBK_DISABLE,	0x20B, cgx_intlbk_disable, msg_req, msg_rsp)	\
+/* NPA mbox IDs (range 0x400 - 0x5FF) */				\
+M(NPA_LF_ALLOC,		0x400, npa_lf_alloc,				\
+				npa_lf_alloc_req, npa_lf_alloc_rsp)	\
+M(NPA_LF_FREE,		0x401, npa_lf_free, msg_req, msg_rsp)		\
+M(NPA_AQ_ENQ,		0x402, npa_aq_enq, npa_aq_enq_req, npa_aq_enq_rsp)   \
+M(NPA_HWCTX_DISABLE,	0x403, npa_hwctx_disable, hwctx_disable_req, msg_rsp)\
+/* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */				\
+/* TIM mbox IDs (range 0x800 - 0x9FF) */				\
+/* CPT mbox IDs (range 0xA00 - 0xBFF) */				\
+/* NPC mbox IDs (range 0x6000 - 0x7FFF) */				\
+M(NPC_MCAM_ALLOC_ENTRY,	0x6000, npc_mcam_alloc_entry, npc_mcam_alloc_entry_req,\
+				npc_mcam_alloc_entry_rsp)		\
+M(NPC_MCAM_FREE_ENTRY,	0x6001, npc_mcam_free_entry,			\
+				 npc_mcam_free_entry_req, msg_rsp)	\
+M(NPC_MCAM_WRITE_ENTRY,	0x6002, npc_mcam_write_entry,			\
+				 npc_mcam_write_entry_req, msg_rsp)	\
+M(NPC_MCAM_ENA_ENTRY,   0x6003, npc_mcam_ena_entry,			\
+				 npc_mcam_ena_dis_entry_req, msg_rsp)	\
+M(NPC_MCAM_DIS_ENTRY,   0x6004, npc_mcam_dis_entry,			\
+				 npc_mcam_ena_dis_entry_req, msg_rsp)	\
+M(NPC_MCAM_SHIFT_ENTRY, 0x6005, npc_mcam_shift_entry, npc_mcam_shift_entry_req,\
+				npc_mcam_shift_entry_rsp)		\
+M(NPC_MCAM_ALLOC_COUNTER, 0x6006, npc_mcam_alloc_counter,		\
+					npc_mcam_alloc_counter_req,	\
+					npc_mcam_alloc_counter_rsp)	\
+M(NPC_MCAM_FREE_COUNTER,  0x6007, npc_mcam_free_counter,		\
+				    npc_mcam_oper_counter_req, msg_rsp)	\
+M(NPC_MCAM_UNMAP_COUNTER, 0x6008, npc_mcam_unmap_counter,		\
+				   npc_mcam_unmap_counter_req, msg_rsp)	\
+M(NPC_MCAM_CLEAR_COUNTER, 0x6009, npc_mcam_clear_counter,		\
+				   npc_mcam_oper_counter_req, msg_rsp)	\
+M(NPC_MCAM_COUNTER_STATS, 0x600a, npc_mcam_counter_stats,		\
+				   npc_mcam_oper_counter_req,		\
+				   npc_mcam_oper_counter_rsp)		\
+M(NPC_MCAM_ALLOC_AND_WRITE_ENTRY, 0x600b, npc_mcam_alloc_and_write_entry,      \
+					  npc_mcam_alloc_and_write_entry_req,  \
+					  npc_mcam_alloc_and_write_entry_rsp)  \
+M(NPC_GET_KEX_CFG,	  0x600c, npc_get_kex_cfg,			\
+				   msg_req, npc_get_kex_cfg_rsp)	\
+/* NIX mbox IDs (range 0x8000 - 0xFFFF) */				\
+M(NIX_LF_ALLOC,		0x8000, nix_lf_alloc,				\
+				 nix_lf_alloc_req, nix_lf_alloc_rsp)	\
+M(NIX_LF_FREE,		0x8001, nix_lf_free, msg_req, msg_rsp)		\
+M(NIX_AQ_ENQ,		0x8002, nix_aq_enq, nix_aq_enq_req, nix_aq_enq_rsp)  \
+M(NIX_HWCTX_DISABLE,	0x8003, nix_hwctx_disable,			\
+				 hwctx_disable_req, msg_rsp)		\
+M(NIX_TXSCH_ALLOC,	0x8004, nix_txsch_alloc,			\
+				 nix_txsch_alloc_req, nix_txsch_alloc_rsp)   \
+M(NIX_TXSCH_FREE,	0x8005, nix_txsch_free, nix_txsch_free_req, msg_rsp) \
+M(NIX_TXSCHQ_CFG,	0x8006, nix_txschq_cfg, nix_txschq_config, msg_rsp)  \
+M(NIX_STATS_RST,	0x8007, nix_stats_rst, msg_req, msg_rsp)	\
+M(NIX_VTAG_CFG,		0x8008, nix_vtag_cfg, nix_vtag_config, msg_rsp)	\
+M(NIX_RSS_FLOWKEY_CFG,  0x8009, nix_rss_flowkey_cfg,			\
+				 nix_rss_flowkey_cfg,			\
+				 nix_rss_flowkey_cfg_rsp)		\
+M(NIX_SET_MAC_ADDR,	0x800a, nix_set_mac_addr, nix_set_mac_addr, msg_rsp) \
+M(NIX_SET_RX_MODE,	0x800b, nix_set_rx_mode, nix_rx_mode, msg_rsp)	\
+M(NIX_SET_HW_FRS,	0x800c, nix_set_hw_frs, nix_frs_cfg, msg_rsp)	\
+M(NIX_LF_START_RX,	0x800d, nix_lf_start_rx, msg_req, msg_rsp)	\
+M(NIX_LF_STOP_RX,	0x800e, nix_lf_stop_rx, msg_req, msg_rsp)	\
+M(NIX_MARK_FORMAT_CFG,	0x800f, nix_mark_format_cfg,			\
+				 nix_mark_format_cfg,			\
+				 nix_mark_format_cfg_rsp)		\
+M(NIX_SET_RX_CFG,	0x8010, nix_set_rx_cfg, nix_rx_cfg, msg_rsp)	\
+M(NIX_LSO_FORMAT_CFG,	0x8011, nix_lso_format_cfg,			\
+				 nix_lso_format_cfg,			\
+				 nix_lso_format_cfg_rsp)		\
+M(NIX_RXVLAN_ALLOC,	0x8012, nix_rxvlan_alloc, msg_req, msg_rsp)
+
+/* Messages initiated by AF (range 0xC00 - 0xDFF) */
+#define MBOX_UP_CGX_MESSAGES						\
+M(CGX_LINK_EVENT,	0xC00, cgx_link_event, cgx_link_info_msg, msg_rsp)
+
+enum {
+#define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id,
+MBOX_MESSAGES
+MBOX_UP_CGX_MESSAGES
+#undef M
+};
+
+/* Mailbox message formats */
+
+#define RVU_DEFAULT_PF_FUNC     0xFFFF
+
+/* Generic request msg used for those mbox messages which
+ * don't send any data in the request.
+ */
+struct msg_req {
+	struct mbox_msghdr hdr;
+};
+
+/* Generic rsponse msg used a ack or response for those mbox
+ * messages which doesn't have a specific rsp msg format.
+ */
+struct msg_rsp {
+	struct mbox_msghdr hdr;
+};
+
+/* RVU mailbox error codes
+ * Range 256 - 300.
+ */
+enum rvu_af_status {
+	RVU_INVALID_VF_ID           = -256,
+};
+
+struct ready_msg_rsp {
+	struct mbox_msghdr hdr;
+	u16    sclk_feq;	/* SCLK frequency */
+};
+
+/* Structure for requesting resource provisioning.
+ * 'modify' flag to be used when either requesting more
+ * or to detach partial of a cetain resource type.
+ * Rest of the fields specify how many of what type to
+ * be attached.
+ */
+struct rsrc_attach {
+	struct mbox_msghdr hdr;
+	u8   modify:1;
+	u8   npalf:1;
+	u8   nixlf:1;
+	u16  sso;
+	u16  ssow;
+	u16  timlfs;
+	u16  cptlfs;
+};
+
+/* Structure for relinquishing resources.
+ * 'partial' flag to be used when relinquishing all resources
+ * but only of a certain type. If not set, all resources of all
+ * types provisioned to the RVU function will be detached.
+ */
+struct rsrc_detach {
+	struct mbox_msghdr hdr;
+	u8 partial:1;
+	u8 npalf:1;
+	u8 nixlf:1;
+	u8 sso:1;
+	u8 ssow:1;
+	u8 timlfs:1;
+	u8 cptlfs:1;
+};
+
+#define MSIX_VECTOR_INVALID	0xFFFF
+#define MAX_RVU_BLKLF_CNT	256
+
+struct msix_offset_rsp {
+	struct mbox_msghdr hdr;
+	u16  npa_msixoff;
+	u16  nix_msixoff;
+	u8   sso;
+	u8   ssow;
+	u8   timlfs;
+	u8   cptlfs;
+	u16  sso_msixoff[MAX_RVU_BLKLF_CNT];
+	u16  ssow_msixoff[MAX_RVU_BLKLF_CNT];
+	u16  timlf_msixoff[MAX_RVU_BLKLF_CNT];
+	u16  cptlf_msixoff[MAX_RVU_BLKLF_CNT];
+};
+
+/* CGX mbox message formats */
+
+struct cgx_stats_rsp {
+	struct mbox_msghdr hdr;
+#define CGX_RX_STATS_COUNT	13
+#define CGX_TX_STATS_COUNT	18
+	u64 rx_stats[CGX_RX_STATS_COUNT];
+	u64 tx_stats[CGX_TX_STATS_COUNT];
+};
+
+/* Structure for requesting the operation for
+ * setting/getting mac address in the CGX interface
+ */
+struct cgx_mac_addr_set_or_get {
+	struct mbox_msghdr hdr;
+	u8 mac_addr[ETH_ALEN];
+};
+
+struct cgx_link_user_info {
+	uint64_t link_up:1;
+	uint64_t full_duplex:1;
+	uint64_t lmac_type_id:4;
+	uint64_t speed:20; /* speed in Mbps */
+#define LMACTYPE_STR_LEN 16
+	char lmac_type[LMACTYPE_STR_LEN];
+};
+
+struct cgx_link_info_msg {
+	struct mbox_msghdr hdr;
+	struct cgx_link_user_info link_info;
+};
+
+/* NPA mbox message formats */
+
+/* NPA mailbox error codes
+ * Range 301 - 400.
+ */
+enum npa_af_status {
+	NPA_AF_ERR_PARAM            = -301,
+	NPA_AF_ERR_AQ_FULL          = -302,
+	NPA_AF_ERR_AQ_ENQUEUE       = -303,
+	NPA_AF_ERR_AF_LF_INVALID    = -304,
+	NPA_AF_ERR_AF_LF_ALLOC      = -305,
+	NPA_AF_ERR_LF_RESET         = -306,
+};
+
+/* For NPA LF context alloc and init */
+struct npa_lf_alloc_req {
+	struct mbox_msghdr hdr;
+	int node;
+	int aura_sz;  /* No of auras */
+	u32 nr_pools; /* No of pools */
+};
+
+struct npa_lf_alloc_rsp {
+	struct mbox_msghdr hdr;
+	u32 stack_pg_ptrs;  /* No of ptrs per stack page */
+	u32 stack_pg_bytes; /* Size of stack page */
+	u16 qints; /* NPA_AF_CONST::QINTS */
+};
+
+/* NPA AQ enqueue msg */
+struct npa_aq_enq_req {
+	struct mbox_msghdr hdr;
+	u32 aura_id;
+	u8 ctype;
+	u8 op;
+	union {
+		/* Valid when op == WRITE/INIT and ctype == AURA.
+		 * LF fills the pool_id in aura.pool_addr. AF will translate
+		 * the pool_id to pool context pointer.
+		 */
+		struct npa_aura_s aura;
+		/* Valid when op == WRITE/INIT and ctype == POOL */
+		struct npa_pool_s pool;
+	};
+	/* Mask data when op == WRITE (1=write, 0=don't write) */
+	union {
+		/* Valid when op == WRITE and ctype == AURA */
+		struct npa_aura_s aura_mask;
+		/* Valid when op == WRITE and ctype == POOL */
+		struct npa_pool_s pool_mask;
+	};
+};
+
+struct npa_aq_enq_rsp {
+	struct mbox_msghdr hdr;
+	union {
+		/* Valid when op == READ and ctype == AURA */
+		struct npa_aura_s aura;
+		/* Valid when op == READ and ctype == POOL */
+		struct npa_pool_s pool;
+	};
+};
+
+/* Disable all contexts of type 'ctype' */
+struct hwctx_disable_req {
+	struct mbox_msghdr hdr;
+	u8 ctype;
+};
+
+/* NIX mbox message formats */
+
+/* NIX mailbox error codes
+ * Range 401 - 500.
+ */
+enum nix_af_status {
+	NIX_AF_ERR_PARAM            = -401,
+	NIX_AF_ERR_AQ_FULL          = -402,
+	NIX_AF_ERR_AQ_ENQUEUE       = -403,
+	NIX_AF_ERR_AF_LF_INVALID    = -404,
+	NIX_AF_ERR_AF_LF_ALLOC      = -405,
+	NIX_AF_ERR_TLX_ALLOC_FAIL   = -406,
+	NIX_AF_ERR_TLX_INVALID      = -407,
+	NIX_AF_ERR_RSS_SIZE_INVALID = -408,
+	NIX_AF_ERR_RSS_GRPS_INVALID = -409,
+	NIX_AF_ERR_FRS_INVALID      = -410,
+	NIX_AF_ERR_RX_LINK_INVALID  = -411,
+	NIX_AF_INVAL_TXSCHQ_CFG     = -412,
+	NIX_AF_SMQ_FLUSH_FAILED     = -413,
+	NIX_AF_ERR_LF_RESET         = -414,
+	NIX_AF_ERR_RSS_NOSPC_FIELD  = -415,
+	NIX_AF_ERR_RSS_NOSPC_ALGO   = -416,
+	NIX_AF_ERR_MARK_CFG_FAIL    = -417,
+	NIX_AF_ERR_LSO_CFG_FAIL     = -418,
+	NIX_AF_INVAL_NPA_PF_FUNC    = -419,
+	NIX_AF_INVAL_SSO_PF_FUNC    = -420,
+};
+
+/* For NIX LF context alloc and init */
+struct nix_lf_alloc_req {
+	struct mbox_msghdr hdr;
+	int node;
+	u32 rq_cnt;   /* No of receive queues */
+	u32 sq_cnt;   /* No of send queues */
+	u32 cq_cnt;   /* No of completion queues */
+	u8  xqe_sz;
+	u16 rss_sz;
+	u8  rss_grps;
+	u16 npa_func;
+	u16 sso_func;
+	u64 rx_cfg;   /* See NIX_AF_LF(0..127)_RX_CFG */
+};
+
+struct nix_lf_alloc_rsp {
+	struct mbox_msghdr hdr;
+	u16	sqb_size;
+	u16	rx_chan_base;
+	u16	tx_chan_base;
+	u8      rx_chan_cnt; /* total number of RX channels */
+	u8      tx_chan_cnt; /* total number of TX channels */
+	u8	lso_tsov4_idx;
+	u8	lso_tsov6_idx;
+	u8      mac_addr[ETH_ALEN];
+	u8	lf_rx_stats; /* NIX_AF_CONST1::LF_RX_STATS */
+	u8	lf_tx_stats; /* NIX_AF_CONST1::LF_TX_STATS */
+	u16	cints; /* NIX_AF_CONST2::CINTS */
+	u16	qints; /* NIX_AF_CONST2::QINTS */
+};
+
+/* NIX AQ enqueue msg */
+struct nix_aq_enq_req {
+	struct mbox_msghdr hdr;
+	u32  qidx;
+	u8 ctype;
+	u8 op;
+	union {
+		struct nix_rq_ctx_s rq;
+		struct nix_sq_ctx_s sq;
+		struct nix_cq_ctx_s cq;
+		struct nix_rsse_s   rss;
+		struct nix_rx_mce_s mce;
+	};
+	union {
+		struct nix_rq_ctx_s rq_mask;
+		struct nix_sq_ctx_s sq_mask;
+		struct nix_cq_ctx_s cq_mask;
+		struct nix_rsse_s   rss_mask;
+		struct nix_rx_mce_s mce_mask;
+	};
+};
+
+struct nix_aq_enq_rsp {
+	struct mbox_msghdr hdr;
+	union {
+		struct nix_rq_ctx_s rq;
+		struct nix_sq_ctx_s sq;
+		struct nix_cq_ctx_s cq;
+		struct nix_rsse_s   rss;
+		struct nix_rx_mce_s mce;
+	};
+};
+
+/* Tx scheduler/shaper mailbox messages */
+
+#define MAX_TXSCHQ_PER_FUNC		128
+
+struct nix_txsch_alloc_req {
+	struct mbox_msghdr hdr;
+	/* Scheduler queue count request at each level */
+	u16 schq_contig[NIX_TXSCH_LVL_CNT]; /* No of contiguous queues */
+	u16 schq[NIX_TXSCH_LVL_CNT]; /* No of non-contiguous queues */
+};
+
+struct nix_txsch_alloc_rsp {
+	struct mbox_msghdr hdr;
+	/* Scheduler queue count allocated at each level */
+	u16 schq_contig[NIX_TXSCH_LVL_CNT];
+	u16 schq[NIX_TXSCH_LVL_CNT];
+	/* Scheduler queue list allocated at each level */
+	u16 schq_contig_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+	u16 schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+};
+
+struct nix_txsch_free_req {
+	struct mbox_msghdr hdr;
+#define TXSCHQ_FREE_ALL BIT_ULL(0)
+	u16 flags;
+	/* Scheduler queue level to be freed */
+	u16 schq_lvl;
+	/* List of scheduler queues to be freed */
+	u16 schq;
+};
+
+struct nix_txschq_config {
+	struct mbox_msghdr hdr;
+	u8 lvl;	/* SMQ/MDQ/TL4/TL3/TL2/TL1 */
+#define TXSCHQ_IDX_SHIFT	16
+#define TXSCHQ_IDX_MASK		(BIT_ULL(10) - 1)
+#define TXSCHQ_IDX(reg, shift)	(((reg) >> (shift)) & TXSCHQ_IDX_MASK)
+	u8 num_regs;
+#define MAX_REGS_PER_MBOX_MSG	20
+	u64 reg[MAX_REGS_PER_MBOX_MSG];
+	u64 regval[MAX_REGS_PER_MBOX_MSG];
+};
+
+struct nix_vtag_config {
+	struct mbox_msghdr hdr;
+	/* '0' for 4 octet VTAG, '1' for 8 octet VTAG */
+	u8 vtag_size;
+	/* cfg_type is '0' for tx vlan cfg
+	 * cfg_type is '1' for rx vlan cfg
+	 */
+	u8 cfg_type;
+	union {
+		/* valid when cfg_type is '0' */
+		struct {
+			/* tx vlan0 tag(C-VLAN) */
+			u64 vlan0;
+			/* tx vlan1 tag(S-VLAN) */
+			u64 vlan1;
+			/* insert tx vlan tag */
+			u8 insert_vlan :1;
+			/* insert tx double vlan tag */
+			u8 double_vlan :1;
+		} tx;
+
+		/* valid when cfg_type is '1' */
+		struct {
+			/* rx vtag type index, valid values are in 0..7 range */
+			u8 vtag_type;
+			/* rx vtag strip */
+			u8 strip_vtag :1;
+			/* rx vtag capture */
+			u8 capture_vtag :1;
+		} rx;
+	};
+};
+
+struct nix_rss_flowkey_cfg {
+	struct mbox_msghdr hdr;
+	int	mcam_index;  /* MCAM entry index to modify */
+#define NIX_FLOW_KEY_TYPE_PORT	BIT(0)
+#define NIX_FLOW_KEY_TYPE_IPV4	BIT(1)
+#define NIX_FLOW_KEY_TYPE_IPV6	BIT(2)
+#define NIX_FLOW_KEY_TYPE_TCP	BIT(3)
+#define NIX_FLOW_KEY_TYPE_UDP	BIT(4)
+#define NIX_FLOW_KEY_TYPE_SCTP	BIT(5)
+	u32	flowkey_cfg; /* Flowkey types selected */
+	u8	group;       /* RSS context or group */
+};
+
+struct nix_rss_flowkey_cfg_rsp {
+	struct mbox_msghdr hdr;
+	u8	alg_idx; /* Selected algo index */
+};
+
+struct nix_set_mac_addr {
+	struct mbox_msghdr hdr;
+	u8 mac_addr[ETH_ALEN]; /* MAC address to be set for this pcifunc */
+};
+
+struct nix_mark_format_cfg {
+	struct mbox_msghdr hdr;
+	u8 offset;
+	u8 y_mask;
+	u8 y_val;
+	u8 r_mask;
+	u8 r_val;
+};
+
+struct nix_mark_format_cfg_rsp {
+	struct mbox_msghdr hdr;
+	u8 mark_format_idx;
+};
+
+struct nix_rx_mode {
+	struct mbox_msghdr hdr;
+#define NIX_RX_MODE_UCAST	BIT(0)
+#define NIX_RX_MODE_PROMISC	BIT(1)
+#define NIX_RX_MODE_ALLMULTI	BIT(2)
+	u16	mode;
+};
+
+struct nix_rx_cfg {
+	struct mbox_msghdr hdr;
+#define NIX_RX_OL3_VERIFY   BIT(0)
+#define NIX_RX_OL4_VERIFY   BIT(1)
+	u8 len_verify; /* Outer L3/L4 len check */
+#define NIX_RX_CSUM_OL4_VERIFY  BIT(0)
+	u8 csum_verify; /* Outer L4 checksum verification */
+};
+
+struct nix_frs_cfg {
+	struct mbox_msghdr hdr;
+	u8	update_smq;    /* Update SMQ's min/max lens */
+	u8	update_minlen; /* Set minlen also */
+	u8	sdp_link;      /* Set SDP RX link */
+	u16	maxlen;
+	u16	minlen;
+};
+
+struct nix_lso_format_cfg {
+	struct mbox_msghdr hdr;
+	u64 field_mask;
+#define NIX_LSO_FIELD_MAX	8
+	u64 fields[NIX_LSO_FIELD_MAX];
+};
+
+struct nix_lso_format_cfg_rsp {
+	struct mbox_msghdr hdr;
+	u8 lso_format_idx;
+};
+
+/* NPC mbox message structs */
+
+#define NPC_MCAM_ENTRY_INVALID	0xFFFF
+#define NPC_MCAM_INVALID_MAP	0xFFFF
+
+/* NPC mailbox error codes
+ * Range 701 - 800.
+ */
+enum npc_af_status {
+	NPC_MCAM_INVALID_REQ	= -701,
+	NPC_MCAM_ALLOC_DENIED	= -702,
+	NPC_MCAM_ALLOC_FAILED	= -703,
+	NPC_MCAM_PERM_DENIED	= -704,
+};
+
+struct npc_mcam_alloc_entry_req {
+	struct mbox_msghdr hdr;
+#define NPC_MAX_NONCONTIG_ENTRIES	256
+	u8  contig;   /* Contiguous entries ? */
+#define NPC_MCAM_ANY_PRIO		0
+#define NPC_MCAM_LOWER_PRIO		1
+#define NPC_MCAM_HIGHER_PRIO		2
+	u8  priority; /* Lower or higher w.r.t ref_entry */
+	u16 ref_entry;
+	u16 count;    /* Number of entries requested */
+};
+
+struct npc_mcam_alloc_entry_rsp {
+	struct mbox_msghdr hdr;
+	u16 entry; /* Entry allocated or start index if contiguous.
+		    * Invalid incase of non-contiguous.
+		    */
+	u16 count; /* Number of entries allocated */
+	u16 free_count; /* Number of entries available */
+	u16 entry_list[NPC_MAX_NONCONTIG_ENTRIES];
+};
+
+struct npc_mcam_free_entry_req {
+	struct mbox_msghdr hdr;
+	u16 entry; /* Entry index to be freed */
+	u8  all;   /* If all entries allocated to this PFVF to be freed */
+};
+
+struct mcam_entry {
+#define NPC_MAX_KWS_IN_KEY	7 /* Number of keywords in max keywidth */
+	u64	kw[NPC_MAX_KWS_IN_KEY];
+	u64	kw_mask[NPC_MAX_KWS_IN_KEY];
+	u64	action;
+	u64	vtag_action;
+};
+
+struct npc_mcam_write_entry_req {
+	struct mbox_msghdr hdr;
+	struct mcam_entry entry_data;
+	u16 entry;	 /* MCAM entry to write this match key */
+	u16 cntr;	 /* Counter for this MCAM entry */
+	u8  intf;	 /* Rx or Tx interface */
+	u8  enable_entry;/* Enable this MCAM entry ? */
+	u8  set_cntr;    /* Set counter for this entry ? */
+};
+
+/* Enable/Disable a given entry */
+struct npc_mcam_ena_dis_entry_req {
+	struct mbox_msghdr hdr;
+	u16 entry;
+};
+
+struct npc_mcam_shift_entry_req {
+	struct mbox_msghdr hdr;
+#define NPC_MCAM_MAX_SHIFTS	64
+	u16 curr_entry[NPC_MCAM_MAX_SHIFTS];
+	u16 new_entry[NPC_MCAM_MAX_SHIFTS];
+	u16 shift_count; /* Number of entries to shift */
+};
+
+struct npc_mcam_shift_entry_rsp {
+	struct mbox_msghdr hdr;
+	u16 failed_entry_idx; /* Index in 'curr_entry', not entry itself */
+};
+
+struct npc_mcam_alloc_counter_req {
+	struct mbox_msghdr hdr;
+	u8  contig;	/* Contiguous counters ? */
+#define NPC_MAX_NONCONTIG_COUNTERS       64
+	u16 count;	/* Number of counters requested */
+};
+
+struct npc_mcam_alloc_counter_rsp {
+	struct mbox_msghdr hdr;
+	u16 cntr;   /* Counter allocated or start index if contiguous.
+		     * Invalid incase of non-contiguous.
+		     */
+	u16 count;  /* Number of counters allocated */
+	u16 cntr_list[NPC_MAX_NONCONTIG_COUNTERS];
+};
+
+struct npc_mcam_oper_counter_req {
+	struct mbox_msghdr hdr;
+	u16 cntr;   /* Free a counter or clear/fetch it's stats */
+};
+
+struct npc_mcam_oper_counter_rsp {
+	struct mbox_msghdr hdr;
+	u64 stat;  /* valid only while fetching counter's stats */
+};
+
+struct npc_mcam_unmap_counter_req {
+	struct mbox_msghdr hdr;
+	u16 cntr;
+	u16 entry; /* Entry and counter to be unmapped */
+	u8  all;   /* Unmap all entries using this counter ? */
+};
+
+struct npc_mcam_alloc_and_write_entry_req {
+	struct mbox_msghdr hdr;
+	struct mcam_entry entry_data;
+	u16 ref_entry;
+	u8  priority;    /* Lower or higher w.r.t ref_entry */
+	u8  intf;	 /* Rx or Tx interface */
+	u8  enable_entry;/* Enable this MCAM entry ? */
+	u8  alloc_cntr;  /* Allocate counter and map ? */
+};
+
+struct npc_mcam_alloc_and_write_entry_rsp {
+	struct mbox_msghdr hdr;
+	u16 entry;
+	u16 cntr;
+};
+
+struct npc_get_kex_cfg_rsp {
+	struct mbox_msghdr hdr;
+	u64 rx_keyx_cfg;   /* NPC_AF_INTF(0)_KEX_CFG */
+	u64 tx_keyx_cfg;   /* NPC_AF_INTF(1)_KEX_CFG */
+#define NPC_MAX_INTF	2
+#define NPC_MAX_LID	8
+#define NPC_MAX_LT	16
+#define NPC_MAX_LD	2
+#define NPC_MAX_LFL	16
+	/* NPC_AF_KEX_LDATA(0..1)_FLAGS_CFG */
+	u64 kex_ld_flags[NPC_MAX_LD];
+	/* NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG */
+	u64 intf_lid_lt_ld[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD];
+	/* NPC_AF_INTF(0..1)_LDATA(0..1)_FLAGS(0..15)_CFG */
+	u64 intf_ld_flags[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL];
+#define MKEX_NAME_LEN 128
+	u8 mkex_pfl_name[MKEX_NAME_LEN];
+};
+
+#endif /* MBOX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
new file mode 100644
index 0000000..5d4df31
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -0,0 +1,286 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*  Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef NPC_H
+#define NPC_H
+
+enum NPC_LID_E {
+	NPC_LID_LA = 0,
+	NPC_LID_LB,
+	NPC_LID_LC,
+	NPC_LID_LD,
+	NPC_LID_LE,
+	NPC_LID_LF,
+	NPC_LID_LG,
+	NPC_LID_LH,
+};
+
+#define NPC_LT_NA 0
+
+enum npc_kpu_la_ltype {
+	NPC_LT_LA_8023 = 1,
+	NPC_LT_LA_ETHER,
+};
+
+enum npc_kpu_lb_ltype {
+	NPC_LT_LB_ETAG = 1,
+	NPC_LT_LB_CTAG,
+	NPC_LT_LB_STAG,
+	NPC_LT_LB_BTAG,
+	NPC_LT_LB_QINQ,
+	NPC_LT_LB_ITAG,
+};
+
+enum npc_kpu_lc_ltype {
+	NPC_LT_LC_IP = 1,
+	NPC_LT_LC_IP6,
+	NPC_LT_LC_ARP,
+	NPC_LT_LC_RARP,
+	NPC_LT_LC_MPLS,
+	NPC_LT_LC_NSH,
+	NPC_LT_LC_PTP,
+	NPC_LT_LC_FCOE,
+};
+
+/* Don't modify Ltypes upto SCTP, otherwise it will
+ * effect flow tag calculation and thus RSS.
+ */
+enum npc_kpu_ld_ltype {
+	NPC_LT_LD_TCP = 1,
+	NPC_LT_LD_UDP,
+	NPC_LT_LD_ICMP,
+	NPC_LT_LD_SCTP,
+	NPC_LT_LD_IGMP,
+	NPC_LT_LD_ICMP6,
+	NPC_LT_LD_ESP,
+	NPC_LT_LD_AH,
+	NPC_LT_LD_GRE,
+	NPC_LT_LD_GRE_MPLS,
+	NPC_LT_LD_GRE_NSH,
+	NPC_LT_LD_TU_MPLS,
+};
+
+enum npc_kpu_le_ltype {
+	NPC_LT_LE_TU_ETHER = 1,
+	NPC_LT_LE_TU_PPP,
+	NPC_LT_LE_TU_MPLS_IN_NSH,
+	NPC_LT_LE_TU_3RD_NSH,
+};
+
+enum npc_kpu_lf_ltype {
+	NPC_LT_LF_TU_IP = 1,
+	NPC_LT_LF_TU_IP6,
+	NPC_LT_LF_TU_ARP,
+	NPC_LT_LF_TU_MPLS_IP,
+	NPC_LT_LF_TU_MPLS_IP6,
+	NPC_LT_LF_TU_MPLS_ETHER,
+};
+
+enum npc_kpu_lg_ltype {
+	NPC_LT_LG_TU_TCP = 1,
+	NPC_LT_LG_TU_UDP,
+	NPC_LT_LG_TU_SCTP,
+	NPC_LT_LG_TU_ICMP,
+	NPC_LT_LG_TU_IGMP,
+	NPC_LT_LG_TU_ICMP6,
+	NPC_LT_LG_TU_ESP,
+	NPC_LT_LG_TU_AH,
+};
+
+enum npc_kpu_lh_ltype {
+	NPC_LT_LH_TCP_DATA = 1,
+	NPC_LT_LH_HTTP_DATA,
+	NPC_LT_LH_HTTPS_DATA,
+	NPC_LT_LH_PPTP_DATA,
+	NPC_LT_LH_UDP_DATA,
+};
+
+struct npc_kpu_profile_cam {
+	u8 state;
+	u8 state_mask;
+	u16 dp0;
+	u16 dp0_mask;
+	u16 dp1;
+	u16 dp1_mask;
+	u16 dp2;
+	u16 dp2_mask;
+};
+
+struct npc_kpu_profile_action {
+	u8 errlev;
+	u8 errcode;
+	u8 dp0_offset;
+	u8 dp1_offset;
+	u8 dp2_offset;
+	u8 bypass_count;
+	u8 parse_done;
+	u8 next_state;
+	u8 ptr_advance;
+	u8 cap_ena;
+	u8 lid;
+	u8 ltype;
+	u8 flags;
+	u8 offset;
+	u8 mask;
+	u8 right;
+	u8 shift;
+};
+
+struct npc_kpu_profile {
+	int cam_entries;
+	int action_entries;
+	struct npc_kpu_profile_cam *cam;
+	struct npc_kpu_profile_action *action;
+};
+
+/* NPC KPU register formats */
+struct npc_kpu_cam {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64 rsvd_63_56     : 8;
+	u64 state          : 8;
+	u64 dp2_data       : 16;
+	u64 dp1_data       : 16;
+	u64 dp0_data       : 16;
+#else
+	u64 dp0_data       : 16;
+	u64 dp1_data       : 16;
+	u64 dp2_data       : 16;
+	u64 state          : 8;
+	u64 rsvd_63_56     : 8;
+#endif
+};
+
+struct npc_kpu_action0 {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64 rsvd_63_57     : 7;
+	u64 byp_count      : 3;
+	u64 capture_ena    : 1;
+	u64 parse_done     : 1;
+	u64 next_state     : 8;
+	u64 rsvd_43        : 1;
+	u64 capture_lid    : 3;
+	u64 capture_ltype  : 4;
+	u64 capture_flags  : 8;
+	u64 ptr_advance    : 8;
+	u64 var_len_offset : 8;
+	u64 var_len_mask   : 8;
+	u64 var_len_right  : 1;
+	u64 var_len_shift  : 3;
+#else
+	u64 var_len_shift  : 3;
+	u64 var_len_right  : 1;
+	u64 var_len_mask   : 8;
+	u64 var_len_offset : 8;
+	u64 ptr_advance    : 8;
+	u64 capture_flags  : 8;
+	u64 capture_ltype  : 4;
+	u64 capture_lid    : 3;
+	u64 rsvd_43        : 1;
+	u64 next_state     : 8;
+	u64 parse_done     : 1;
+	u64 capture_ena    : 1;
+	u64 byp_count      : 3;
+	u64 rsvd_63_57     : 7;
+#endif
+};
+
+struct npc_kpu_action1 {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64 rsvd_63_36     : 28;
+	u64 errlev         : 4;
+	u64 errcode        : 8;
+	u64 dp2_offset     : 8;
+	u64 dp1_offset     : 8;
+	u64 dp0_offset     : 8;
+#else
+	u64 dp0_offset     : 8;
+	u64 dp1_offset     : 8;
+	u64 dp2_offset     : 8;
+	u64 errcode        : 8;
+	u64 errlev         : 4;
+	u64 rsvd_63_36     : 28;
+#endif
+};
+
+struct npc_kpu_pkind_cpi_def {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64 ena            : 1;
+	u64 rsvd_62_59     : 4;
+	u64 lid            : 3;
+	u64 ltype_match    : 4;
+	u64 ltype_mask     : 4;
+	u64 flags_match    : 8;
+	u64 flags_mask     : 8;
+	u64 add_offset     : 8;
+	u64 add_mask       : 8;
+	u64 rsvd_15        : 1;
+	u64 add_shift      : 3;
+	u64 rsvd_11_10     : 2;
+	u64 cpi_base       : 10;
+#else
+	u64 cpi_base       : 10;
+	u64 rsvd_11_10     : 2;
+	u64 add_shift      : 3;
+	u64 rsvd_15        : 1;
+	u64 add_mask       : 8;
+	u64 add_offset     : 8;
+	u64 flags_mask     : 8;
+	u64 flags_match    : 8;
+	u64 ltype_mask     : 4;
+	u64 ltype_match    : 4;
+	u64 lid            : 3;
+	u64 rsvd_62_59     : 4;
+	u64 ena            : 1;
+#endif
+};
+
+struct nix_rx_action {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64	rsvd_63_61	:3;
+	u64	flow_key_alg	:5;
+	u64	match_id	:16;
+	u64	index		:20;
+	u64	pf_func		:16;
+	u64	op		:4;
+#else
+	u64	op		:4;
+	u64	pf_func		:16;
+	u64	index		:20;
+	u64	match_id	:16;
+	u64	flow_key_alg	:5;
+	u64	rsvd_63_61	:3;
+#endif
+};
+
+/* NIX Receive Vtag Action Structure */
+#define VTAG0_VALID_BIT		BIT_ULL(15)
+#define VTAG0_TYPE_MASK		GENMASK_ULL(14, 12)
+#define VTAG0_LID_MASK		GENMASK_ULL(10, 8)
+#define VTAG0_RELPTR_MASK	GENMASK_ULL(7, 0)
+
+struct npc_mcam_kex {
+	/* MKEX Profle Header */
+	u64 mkex_sign; /* "mcam-kex-profile" (8 bytes/ASCII characters) */
+	u8 name[MKEX_NAME_LEN];   /* MKEX Profile name */
+	u64 cpu_model;   /* Format as profiled by CPU hardware */
+	u64 kpu_version; /* KPU firmware/profile version */
+	u64 reserved; /* Reserved for extension */
+
+	/* MKEX Profle Data */
+	u64 keyx_cfg[NPC_MAX_INTF]; /* NPC_AF_INTF(0..1)_KEX_CFG */
+	/* NPC_AF_KEX_LDATA(0..1)_FLAGS_CFG */
+	u64 kex_ld_flags[NPC_MAX_LD];
+	/* NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG */
+	u64 intf_lid_lt_ld[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD];
+	/* NPC_AF_INTF(0..1)_LDATA(0..1)_FLAGS(0..15)_CFG */
+	u64 intf_ld_flags[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL];
+} __packed;
+
+#endif /* NPC_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
new file mode 100644
index 0000000..da649f6
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -0,0 +1,5709 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*  Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef NPC_PROFILE_H
+#define NPC_PROFILE_H
+
+#define NPC_ETYPE_IP		0x0800
+#define NPC_ETYPE_IP6		0x86dd
+#define NPC_ETYPE_ARP		0x0806
+#define NPC_ETYPE_RARP		0x8035
+#define NPC_ETYPE_MPLSU		0x8847
+#define NPC_ETYPE_MPLSM		0x8848
+#define NPC_ETYPE_ETAG		0x893f
+#define NPC_ETYPE_CTAG		0x8100
+#define NPC_ETYPE_SBTAG		0x88a8
+#define NPC_ETYPE_ITAG		0x88e7
+#define NPC_ETYPE_PTP		0x88f7
+#define NPC_ETYPE_FCOE		0x8906
+#define NPC_ETYPE_QINQ		0x9100
+#define NPC_ETYPE_TRANS_ETH_BR	0x6558
+#define NPC_ETYPE_PPP		0x880b
+#define NPC_ETYPE_NSH		0x894f
+
+#define NPC_IPNH_HOP		0
+#define NPC_IPNH_ICMP		1
+#define NPC_IPNH_IGMP		2
+#define NPC_IPNH_IP		4
+#define NPC_IPNH_TCP		6
+#define NPC_IPNH_UDP		17
+#define NPC_IPNH_IP6		41
+#define NPC_IPNH_ROUT		43
+#define NPC_IPNH_FRAG		44
+#define NPC_IPNH_GRE		47
+#define NPC_IPNH_ESP		50
+#define NPC_IPNH_AH		51
+#define NPC_IPNH_ICMP6		58
+#define NPC_IPNH_NONH		59
+#define NPC_IPNH_DEST		60
+#define NPC_IPNH_SCTP		132
+#define NPC_IPNH_MPLS		137
+
+#define NPC_UDP_PORT_GTPC	2123
+#define NPC_UDP_PORT_GTPU	2152
+#define NPC_UDP_PORT_VXLAN	4789
+#define NPC_UDP_PORT_VXLANGPE	4790
+#define NPC_UDP_PORT_GENEVE	6081
+
+#define NPC_VXLANGPE_NP_IP	0x1
+#define NPC_VXLANGPE_NP_IP6	0x2
+#define NPC_VXLANGPE_NP_ETH	0x3
+#define NPC_VXLANGPE_NP_NSH	0x4
+#define NPC_VXLANGPE_NP_MPLS	0x5
+#define NPC_VXLANGPE_NP_GBP	0x6
+#define NPC_VXLANGPE_NP_VBNG	0x7
+
+#define NPC_NSH_NP_IP		0x1
+#define NPC_NSH_NP_IP6		0x2
+#define NPC_NSH_NP_ETH		0x3
+#define NPC_NSH_NP_NSH		0x4
+#define NPC_NSH_NP_MPLS		0x5
+
+#define NPC_TCP_PORT_HTTP	80
+#define NPC_TCP_PORT_HTTPS	443
+#define NPC_TCP_PORT_PPTP	1723
+
+#define NPC_MPLS_S		0x0100
+
+#define NPC_IP_VER_4		0x4000
+#define NPC_IP_VER_6		0x6000
+#define NPC_IP_VER_MASK		0xf000
+#define NPC_IP_HDR_LEN_5	0x0500
+#define NPC_IP_HDR_LEN_MASK	0x0f00
+
+#define NPC_GRE_F_CSUM		(0x1 << 15)
+#define NPC_GRE_F_ROUTE		(0x1 << 14)
+#define NPC_GRE_F_KEY		(0x1 << 13)
+#define NPC_GRE_F_SEQ		(0x1 << 12)
+#define NPC_GRE_F_ACK		(0x1 << 7)
+#define NPC_GRE_FLAG_MASK	(NPC_GRE_F_CSUM | NPC_GRE_F_ROUTE | \
+				 NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_F_ACK)
+#define NPC_GRE_VER_MASK	0x0003
+#define NPC_GRE_VER_1		0x0001
+
+#define NPC_VXLAN_I		0x0800
+
+#define NPC_VXLANGPE_VER	(0x3 << 12)
+#define NPC_VXLANGPE_I		(0x1 << 11)
+#define NPC_VXLANGPE_P		(0x1 << 10)
+#define NPC_VXLANGPE_B		(0x1 << 9)
+#define NPC_VXLANGPE_NP_MASK	0x00ff
+
+#define NPC_NSH_NP_MASK		0x00ff
+
+#define NPC_GENEVE_F_OAM	(0x1 << 7)
+#define NPC_GENEVE_F_CRI_OPT	(0x1 << 6)
+
+#define NPC_GTP_PT_GTP		(0x1 << 12)
+#define NPC_GTP_PT_MASK		(0x1 << 12)
+#define NPC_GTP_VER1		(0x1 << 13)
+#define NPC_GTP_VER_MASK	(0x7 << 13)
+#define NPC_GTP_MT_G_PDU	0xff
+#define NPC_GTP_MT_MASK		0xff
+
+#define NPC_TCP_DATA_OFFSET_5		0x5000
+#define NPC_TCP_DATA_OFFSET_MASK	0xf000
+
+enum npc_kpu_parser_state {
+	NPC_S_NA = 0,
+	NPC_S_KPU1_ETHER,
+	NPC_S_KPU1_PKI,
+	NPC_S_KPU2_CTAG,
+	NPC_S_KPU2_SBTAG,
+	NPC_S_KPU2_QINQ,
+	NPC_S_KPU2_ETAG,
+	NPC_S_KPU2_ITAG,
+	NPC_S_KPU3_CTAG,
+	NPC_S_KPU3_STAG,
+	NPC_S_KPU3_QINQ,
+	NPC_S_KPU3_ITAG,
+	NPC_S_KPU4_MPLS,
+	NPC_S_KPU4_NSH,
+	NPC_S_KPU5_IP,
+	NPC_S_KPU5_IP6,
+	NPC_S_KPU5_ARP,
+	NPC_S_KPU5_RARP,
+	NPC_S_KPU5_PTP,
+	NPC_S_KPU5_FCOE,
+	NPC_S_KPU5_MPLS,
+	NPC_S_KPU5_MPLS_PL,
+	NPC_S_KPU5_NSH,
+	NPC_S_KPU6_IP6_EXT,
+	NPC_S_KPU7_IP6_EXT,
+	NPC_S_KPU8_TCP,
+	NPC_S_KPU8_UDP,
+	NPC_S_KPU8_SCTP,
+	NPC_S_KPU8_ICMP,
+	NPC_S_KPU8_IGMP,
+	NPC_S_KPU8_ICMP6,
+	NPC_S_KPU8_GRE,
+	NPC_S_KPU8_ESP,
+	NPC_S_KPU8_AH,
+	NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN,
+	NPC_S_KPU9_TU_MPLS,
+	NPC_S_KPU9_TU_NSH,
+	NPC_S_KPU10_TU_MPLS_PL,
+	NPC_S_KPU10_TU_MPLS,
+	NPC_S_KPU10_TU_NSH,
+	NPC_S_KPU11_TU_ETHER,
+	NPC_S_KPU11_TU_PPP,
+	NPC_S_KPU11_TU_MPLS_IN_NSH,
+	NPC_S_KPU11_TU_3RD_NSH,
+	NPC_S_KPU12_TU_IP,
+	NPC_S_KPU12_TU_IP6,
+	NPC_S_KPU12_TU_ARP,
+	NPC_S_KPU13_TU_IP6_EXT,
+	NPC_S_KPU14_TU_IP6_EXT,
+	NPC_S_KPU15_TU_TCP,
+	NPC_S_KPU15_TU_UDP,
+	NPC_S_KPU15_TU_SCTP,
+	NPC_S_KPU15_TU_ICMP,
+	NPC_S_KPU15_TU_IGMP,
+	NPC_S_KPU15_TU_ICMP6,
+	NPC_S_KPU15_TU_ESP,
+	NPC_S_KPU15_TU_AH,
+	NPC_S_KPU16_HTTP_DATA,
+	NPC_S_KPU16_HTTPS_DATA,
+	NPC_S_KPU16_PPTP_DATA,
+	NPC_S_KPU16_TCP_DATA,
+	NPC_S_KPU16_UDP_DATA,
+	NPC_S_LAST /* has to be the last item */
+};
+
+enum npc_kpu_parser_flag {
+	NPC_F_NA = 0,
+	NPC_F_PKI,
+	NPC_F_PKI_VLAN,
+	NPC_F_PKI_ETAG,
+	NPC_F_PKI_ITAG,
+	NPC_F_PKI_MPLS,
+	NPC_F_PKI_NSH,
+	NPC_F_ETYPE_UNK,
+	NPC_F_ETHER_VLAN,
+	NPC_F_ETHER_ETAG,
+	NPC_F_ETHER_ITAG,
+	NPC_F_ETHER_MPLS,
+	NPC_F_ETHER_NSH,
+	NPC_F_STAG_CTAG,
+	NPC_F_STAG_CTAG_UNK,
+	NPC_F_STAG_STAG_CTAG,
+	NPC_F_STAG_STAG_STAG,
+	NPC_F_QINQ_CTAG,
+	NPC_F_QINQ_CTAG_UNK,
+	NPC_F_QINQ_QINQ_CTAG,
+	NPC_F_QINQ_QINQ_QINQ,
+	NPC_F_BTAG_ITAG,
+	NPC_F_BTAG_ITAG_STAG,
+	NPC_F_BTAG_ITAG_CTAG,
+	NPC_F_BTAG_ITAG_UNK,
+	NPC_F_ETAG_CTAG,
+	NPC_F_ETAG_BTAG_ITAG,
+	NPC_F_ETAG_STAG,
+	NPC_F_ETAG_QINQ,
+	NPC_F_ETAG_ITAG,
+	NPC_F_ETAG_ITAG_STAG,
+	NPC_F_ETAG_ITAG_CTAG,
+	NPC_F_ETAG_ITAG_UNK,
+	NPC_F_ITAG_STAG_CTAG,
+	NPC_F_ITAG_STAG,
+	NPC_F_ITAG_CTAG,
+	NPC_F_MPLS_4_LABELS,
+	NPC_F_MPLS_3_LABELS,
+	NPC_F_MPLS_2_LABELS,
+	NPC_F_IP_HAS_OPTIONS,
+	NPC_F_IP_IP_IN_IP,
+	NPC_F_IP_6TO4,
+	NPC_F_IP_MPLS_IN_IP,
+	NPC_F_IP_UNK_PROTO,
+	NPC_F_IP_IP_IN_IP_HAS_OPTIONS,
+	NPC_F_IP_6TO4_HAS_OPTIONS,
+	NPC_F_IP_MPLS_IN_IP_HAS_OPTIONS,
+	NPC_F_IP_UNK_PROTO_HAS_OPTIONS,
+	NPC_F_IP6_HAS_EXT,
+	NPC_F_IP6_TUN_IP6,
+	NPC_F_IP6_MPLS_IN_IP,
+	NPC_F_TCP_HAS_OPTIONS,
+	NPC_F_TCP_HTTP,
+	NPC_F_TCP_HTTPS,
+	NPC_F_TCP_PPTP,
+	NPC_F_TCP_UNK_PORT,
+	NPC_F_TCP_HTTP_HAS_OPTIONS,
+	NPC_F_TCP_HTTPS_HAS_OPTIONS,
+	NPC_F_TCP_PPTP_HAS_OPTIONS,
+	NPC_F_TCP_UNK_PORT_HAS_OPTIONS,
+	NPC_F_UDP_VXLAN,
+	NPC_F_UDP_VXLAN_NOVNI,
+	NPC_F_UDP_VXLAN_NOVNI_NSH,
+	NPC_F_UDP_VXLANGPE,
+	NPC_F_UDP_VXLANGPE_NSH,
+	NPC_F_UDP_VXLANGPE_MPLS,
+	NPC_F_UDP_VXLANGPE_NOVNI,
+	NPC_F_UDP_VXLANGPE_NOVNI_NSH,
+	NPC_F_UDP_VXLANGPE_NOVNI_MPLS,
+	NPC_F_UDP_VXLANGPE_UNK,
+	NPC_F_UDP_VXLANGPE_NONP,
+	NPC_F_UDP_GTP_GTPC,
+	NPC_F_UDP_GTP_GTPU_G_PDU,
+	NPC_F_UDP_GTP_GTPU_UNK,
+	NPC_F_UDP_UNK_PORT,
+	NPC_F_UDP_GENEVE,
+	NPC_F_UDP_GENEVE_OAM,
+	NPC_F_UDP_GENEVE_CRI_OPT,
+	NPC_F_UDP_GENEVE_OAM_CRI_OPT,
+	NPC_F_GRE_NVGRE,
+	NPC_F_GRE_HAS_SRE,
+	NPC_F_GRE_HAS_CSUM,
+	NPC_F_GRE_HAS_KEY,
+	NPC_F_GRE_HAS_SEQ,
+	NPC_F_GRE_HAS_CSUM_KEY,
+	NPC_F_GRE_HAS_CSUM_SEQ,
+	NPC_F_GRE_HAS_KEY_SEQ,
+	NPC_F_GRE_HAS_CSUM_KEY_SEQ,
+	NPC_F_GRE_HAS_ROUTE,
+	NPC_F_GRE_UNK_PROTO,
+	NPC_F_GRE_VER1,
+	NPC_F_GRE_VER1_HAS_SEQ,
+	NPC_F_GRE_VER1_HAS_ACK,
+	NPC_F_GRE_VER1_HAS_SEQ_ACK,
+	NPC_F_GRE_VER1_UNK_PROTO,
+	NPC_F_TU_ETHER_UNK,
+	NPC_F_TU_ETHER_CTAG,
+	NPC_F_TU_ETHER_CTAG_UNK,
+	NPC_F_TU_ETHER_STAG_CTAG,
+	NPC_F_TU_ETHER_STAG_CTAG_UNK,
+	NPC_F_TU_ETHER_STAG,
+	NPC_F_TU_ETHER_STAG_UNK,
+	NPC_F_TU_ETHER_QINQ_CTAG,
+	NPC_F_TU_ETHER_QINQ_CTAG_UNK,
+	NPC_F_TU_ETHER_QINQ,
+	NPC_F_TU_ETHER_QINQ_UNK,
+	NPC_F_LAST /* has to be the last item */
+};
+
+enum npc_kpu_err_code {
+	NPC_EC_NOERR = 0, /* has to be zero */
+	NPC_EC_UNK,
+	NPC_EC_L2_K1,
+	NPC_EC_L2_K2,
+	NPC_EC_L2_K3,
+	NPC_EC_L2_K3_ETYPE_UNK,
+	NPC_EC_L2_MPLS_2MANY,
+	NPC_EC_L2_K4,
+	NPC_EC_IP_VER,
+	NPC_EC_IP6_VER,
+	NPC_EC_VXLAN,
+	NPC_EC_NVGRE,
+	NPC_EC_GRE,
+	NPC_EC_GRE_VER1,
+	NPC_EC_L4,
+	NPC_EC_LAST /* has to be the last item */
+};
+
+enum NPC_ERRLEV_E {
+	NPC_ERRLEV_RE = 0,
+	NPC_ERRLEV_LA = 1,
+	NPC_ERRLEV_LB = 2,
+	NPC_ERRLEV_LC = 3,
+	NPC_ERRLEV_LD = 4,
+	NPC_ERRLEV_LE = 5,
+	NPC_ERRLEV_LF = 6,
+	NPC_ERRLEV_LG = 7,
+	NPC_ERRLEV_LH = 8,
+	NPC_ERRLEV_R9 = 9,
+	NPC_ERRLEV_R10 = 10,
+	NPC_ERRLEV_R11 = 11,
+	NPC_ERRLEV_R12 = 12,
+	NPC_ERRLEV_R13 = 13,
+	NPC_ERRLEV_R14 = 14,
+	NPC_ERRLEV_NIX = 15,
+	NPC_ERRLEV_ENUM_LAST = 16,
+};
+
+static struct npc_kpu_profile_action ikpu_action_entries[] = {
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+		0, 0, NPC_S_KPU1_ETHER, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 1, 0xff,
+		0, 0,
+	},
+};
+
+static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
+	{
+		NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_IP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_IP6, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_ARP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_RARP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_PTP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_FCOE, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_ETAG, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_ITAG, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_NSH, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU1_ETHER, 0xff, 0x0000, 0xfc00,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU1_ETHER, 0xff, 0x0400, 0xfe00,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU1_ETHER, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_IP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_IP6, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_ARP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_RARP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_PTP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_FCOE, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_QINQ, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_ETAG, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_ITAG, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+		0x0010, 0x0010, 0x0000, 0xffff,
+	},
+	{
+		NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+		0x0010, 0x0010, 0x0000, 0xffff,
+	},
+	{
+		NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_NSH, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU1_PKI, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_NA, 0X00, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+};
+
+static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
+	{
+		NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_IP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_IP6, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_ARP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_RARP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_PTP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_FCOE, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_NSH, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_CTAG, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_RARP, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_PTP, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_FCOE, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+		NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+		NPC_ETYPE_SBTAG, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+		0x0000, 0x0000, NPC_ETYPE_IP, 0xffff,
+	},
+	{
+		NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+		0x0000, 0x0000, NPC_ETYPE_IP6, 0xffff,
+	},
+	{
+		NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+		0x0000, 0x0000, NPC_ETYPE_ARP, 0xffff,
+	},
+	{
+		NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+		0x0000, 0x0000, NPC_ETYPE_RARP, 0xffff,
+	},
+	{
+		NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+		0x0000, 0x0000, NPC_ETYPE_PTP, 0xffff,
+	},
+	{
+		NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+		0x0000, 0x0000, NPC_ETYPE_FCOE, 0xffff,
+	},
+	{
+		NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+		0x0000, 0x0000, NPC_ETYPE_MPLSU, 0xffff,
+	},
+	{
+		NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+		0x0000, 0x0000, NPC_ETYPE_MPLSM, 0xffff,
+	},
+	{
+		NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+		0x0000, 0x0000, NPC_ETYPE_NSH, 0xffff,
+	},
+	{
+		NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+		0x0000, 0x0000, NPC_ETYPE_SBTAG, 0xffff,
+	},
+	{
+		NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+		0x0000, 0x0000, NPC_ETYPE_CTAG, 0xffff,
+	},
+	{
+		NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_IP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_IP6, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ARP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_RARP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_PTP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_FCOE, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_NSH, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_SBTAG, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_RARP, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_PTP, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_FCOE, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_QINQ, 0xffff,
+		NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_QINQ, 0xffff,
+		NPC_ETYPE_QINQ, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_IP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_IP6, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_ARP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_RARP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_PTP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_FCOE, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_NSH, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_QINQ, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_IP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_IP6, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ARP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_RARP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_PTP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_FCOE, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_NSH, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+		NPC_ETYPE_ITAG, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_QINQ, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+		0x0000, 0x0000, NPC_ETYPE_IP, 0xffff,
+	},
+	{
+		NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+		0x0000, 0x0000, NPC_ETYPE_IP6, 0xffff,
+	},
+	{
+		NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+		0x0000, 0x0000, NPC_ETYPE_ARP, 0xffff,
+	},
+	{
+		NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+		0x0000, 0x0000, NPC_ETYPE_SBTAG, 0xffff,
+	},
+	{
+		NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+		0x0000, 0x0000, NPC_ETYPE_CTAG, 0xffff,
+	},
+	{
+		NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_ETAG, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_IP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_IP6, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_ARP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_RARP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+		NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP, 0xffff,
+	},
+	{
+		NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+		NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP6, 0xffff,
+	},
+	{
+		NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+		NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_ARP, 0xffff,
+	},
+	{
+		NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+		NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+		NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+		NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+		NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU2_ITAG, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_NA, 0X00, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+};
+
+static struct npc_kpu_profile_cam kpu3_cam_entries[] = {
+	{
+		NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_IP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_IP6, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_ARP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_RARP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_PTP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_FCOE, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_NSH, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_CTAG, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_RARP, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_PTP, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_FCOE, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_IP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_IP6, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_ARP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_RARP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_NSH, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_STAG, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_RARP, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_PTP, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_FCOE, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_IP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_IP6, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_ARP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_RARP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_PTP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_FCOE, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_NSH, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_QINQ, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_IP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_IP6, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_ARP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_RARP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+		NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP, 0xffff,
+	},
+	{
+		NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+		NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP6, 0xffff,
+	},
+	{
+		NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+		NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_ARP, 0xffff,
+	},
+	{
+		NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+		NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+		NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+		NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+		NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU3_ITAG, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_NA, 0X00, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+};
+
+static struct npc_kpu_profile_cam kpu4_cam_entries[] = {
+	{
+		NPC_S_KPU4_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU4_MPLS, 0xff, 0x0000, NPC_MPLS_S,
+		NPC_MPLS_S, NPC_MPLS_S, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU4_MPLS, 0xff, 0x0000, NPC_MPLS_S,
+		0x0000, NPC_MPLS_S, NPC_MPLS_S, NPC_MPLS_S,
+	},
+	{
+		NPC_S_KPU4_MPLS, 0xff, 0x0000, NPC_MPLS_S,
+		0x0000, NPC_MPLS_S, 0x0000, NPC_MPLS_S,
+	},
+	{
+		NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_IP, NPC_NSH_NP_MASK,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_IP6, NPC_NSH_NP_MASK,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_ETH, NPC_NSH_NP_MASK,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_NSH, NPC_NSH_NP_MASK,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_MPLS, NPC_NSH_NP_MASK,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_NA, 0X00, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+};
+
+static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
+	{
+		NPC_S_KPU5_IP, 0xff, NPC_IPNH_TCP, 0x00ff,
+		NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+		NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_IP, 0xff, NPC_IPNH_UDP, 0x00ff,
+		NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+		NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_IP, 0xff, NPC_IPNH_SCTP, 0x00ff,
+		NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+		NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_IP, 0xff, NPC_IPNH_ICMP, 0x00ff,
+		NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+		NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_IP, 0xff, NPC_IPNH_IGMP, 0x00ff,
+		NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+		NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_IP, 0xff, NPC_IPNH_ESP, 0x00ff,
+		NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+		NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_IP, 0xff, NPC_IPNH_AH, 0x00ff,
+		NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+		NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_IP, 0xff, NPC_IPNH_GRE, 0x00ff,
+		NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+		NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP, 0x00ff,
+		NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+		NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP6, 0x00ff,
+		NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+		NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_IP, 0xff, NPC_IPNH_MPLS, 0x00ff,
+		NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+		NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_IP, 0xff, 0x0000, 0x0000,
+		NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+		NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_IP, 0xff, NPC_IPNH_TCP, 0x00ff,
+		NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_IP, 0xff, NPC_IPNH_UDP, 0x00ff,
+		NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_IP, 0xff, NPC_IPNH_SCTP, 0x00ff,
+		NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_IP, 0xff, NPC_IPNH_ICMP, 0x00ff,
+		NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_IP, 0xff, NPC_IPNH_IGMP, 0x00ff,
+		NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_IP, 0xff, NPC_IPNH_ESP, 0x00ff,
+		NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_IP, 0xff, NPC_IPNH_AH, 0x00ff,
+		NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_IP, 0xff, NPC_IPNH_GRE, 0x00ff,
+		NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP, 0x00ff,
+		NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP6, 0x00ff,
+		NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_IP, 0xff, NPC_IPNH_MPLS, 0x00ff,
+		NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_IP, 0xff, 0x0000, 0x0000,
+		NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_IP, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_ARP, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_RARP, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_PTP, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_FCOE, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_IP6, 0xff, NPC_IPNH_TCP << 8, 0xff00,
+		NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_IP6, 0xff, NPC_IPNH_UDP << 8, 0xff00,
+		NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_IP6, 0xff, NPC_IPNH_SCTP << 8, 0xff00,
+		NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_IP6, 0xff, NPC_IPNH_ICMP << 8, 0xff00,
+		NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_IP6, 0xff, NPC_IPNH_ICMP6 << 8, 0xff00,
+		NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_IP6, 0xff, NPC_IPNH_ESP << 8, 0xff00,
+		NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_IP6, 0xff, NPC_IPNH_AH << 8, 0xff00,
+		NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_IP6, 0xff, NPC_IPNH_GRE << 8, 0xff00,
+		NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_IP6, 0xff, NPC_IPNH_IP6 << 8, 0xff00,
+		NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_IP6, 0xff, NPC_IPNH_MPLS << 8, 0xff00,
+		NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_IP6, 0xff, 0x0000, 0x0000,
+		NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_IP6, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+		NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+		NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+		0x0000, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_MPLS, 0xff, 0x0000, NPC_MPLS_S,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_MPLS_PL, 0xff, NPC_IP_VER_4, NPC_IP_VER_MASK,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_MPLS_PL, 0xff, NPC_IP_VER_6, NPC_IP_VER_MASK,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_MPLS_PL, 0xff, 0x0000, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_MPLS_PL, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_IP, NPC_NSH_NP_MASK,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_IP6, NPC_NSH_NP_MASK,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_ETH, NPC_NSH_NP_MASK,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_NSH, NPC_NSH_NP_MASK,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_MPLS, NPC_NSH_NP_MASK,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_NA, 0X00, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+};
+
+static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
+	{
+		NPC_S_KPU6_IP6_EXT, 0xff, 0x0000, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000,
+	},
+};
+
+static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
+	{
+		NPC_S_KPU7_IP6_EXT, 0xff, 0x0000, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000,
+	},
+};
+
+static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
+	{
+		NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_HTTP, 0xffff,
+		NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_HTTPS, 0xffff,
+		NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_PPTP, 0xffff,
+		NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_TCP, 0xff, 0x0000, 0x0000,
+		NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_HTTP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_HTTPS, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_PPTP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_TCP, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLAN, 0xffff,
+		NPC_VXLAN_I, NPC_VXLAN_I, 0x0000, 0xffff,
+	},
+	{
+		NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLAN, 0xffff,
+		0x0000, 0xffff, 0x0000, 0xffff,
+	},
+	{
+		NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLAN, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+		NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+		NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+		NPC_VXLANGPE_NP_IP, NPC_VXLANGPE_NP_MASK,
+	},
+	{
+		NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+		NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+		NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+		NPC_VXLANGPE_NP_IP6, NPC_VXLANGPE_NP_MASK,
+	},
+	{
+		NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+		NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+		NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+		NPC_VXLANGPE_NP_ETH, NPC_VXLANGPE_NP_MASK,
+	},
+	{
+		NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+		NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+		NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+		NPC_VXLANGPE_NP_NSH, NPC_VXLANGPE_NP_MASK,
+	},
+	{
+		NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+		NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+		NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+		NPC_VXLANGPE_NP_MPLS, NPC_VXLANGPE_NP_MASK,
+	},
+	{
+		NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+		NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+		NPC_VXLANGPE_NP_IP, NPC_VXLANGPE_NP_MASK,
+	},
+	{
+		NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+		NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+		NPC_VXLANGPE_NP_IP6, NPC_VXLANGPE_NP_MASK,
+	},
+	{
+		NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+		NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+		NPC_VXLANGPE_NP_ETH, NPC_VXLANGPE_NP_MASK,
+	},
+	{
+		NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+		NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+		NPC_VXLANGPE_NP_NSH, NPC_VXLANGPE_NP_MASK,
+	},
+	{
+		NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+		NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+		NPC_VXLANGPE_NP_MPLS, NPC_VXLANGPE_NP_MASK,
+	},
+	{
+		NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+		NPC_VXLANGPE_P, NPC_VXLANGPE_P, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+		0x0000, NPC_VXLANGPE_P, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+		0x0000, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+		NPC_ETYPE_TRANS_ETH_BR, 0xffff,
+	},
+	{
+		NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+		NPC_GENEVE_F_OAM, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+		NPC_ETYPE_TRANS_ETH_BR, 0xffff,
+	},
+	{
+		NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+		NPC_GENEVE_F_CRI_OPT, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+		NPC_ETYPE_TRANS_ETH_BR, 0xffff,
+	},
+	{
+		NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+		NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+		NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+		NPC_ETYPE_TRANS_ETH_BR, 0xffff,
+	},
+	{
+		NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+		0x0000, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+		NPC_ETYPE_IP, 0xffff,
+	},
+	{
+		NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+		NPC_GENEVE_F_OAM, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+		NPC_ETYPE_IP, 0xffff,
+	},
+	{
+		NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+		NPC_GENEVE_F_CRI_OPT, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+		NPC_ETYPE_IP, 0xffff,
+	},
+	{
+		NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+		NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+		NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, NPC_ETYPE_IP, 0xffff,
+	},
+	{
+		NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+		0x0000, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+		NPC_ETYPE_IP6, 0xffff,
+	},
+	{
+		NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+		NPC_GENEVE_F_OAM, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+		NPC_ETYPE_IP6, 0xffff,
+	},
+	{
+		NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+		NPC_GENEVE_F_CRI_OPT,
+		NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, NPC_ETYPE_IP6, 0xffff,
+	},
+	{
+		NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+		NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+		NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, NPC_ETYPE_IP6, 0xffff,
+	},
+	{
+		NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GTPC, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GTPU, 0xffff,
+		NPC_GTP_PT_GTP | NPC_GTP_VER1 | NPC_GTP_MT_G_PDU,
+		NPC_GTP_PT_MASK | NPC_GTP_VER_MASK | NPC_GTP_MT_MASK,
+		0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GTPU, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_UDP, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_SCTP, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_ICMP, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_IGMP, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_ICMP6, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_ESP, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_AH, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_TRANS_ETH_BR, 0xffff,
+		NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_TRANS_ETH_BR, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+		0x0000, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+		NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+		NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+		NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+		NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+		NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+		NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+		NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+		0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+		0x0000, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+		NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+		NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+		NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+		NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+		NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+		NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+		NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+		0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
+		0x0000, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
+		NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
+		NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
+		NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
+		NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
+		NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
+		NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
+		NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+		0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
+		0x0000, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
+		NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
+		NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
+		NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
+		NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
+		NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
+		NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
+		NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+		0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
+		0x0000, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
+		NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
+		NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
+		NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
+		NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
+		NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
+		NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
+		NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+		0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff,
+		NPC_GRE_F_ROUTE, 0x4fff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff,
+		0x0000, 0x4fff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff,
+		0x0000, 0x0003, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff,
+		NPC_GRE_F_KEY | NPC_GRE_VER_1, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff,
+		NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_VER_1,
+		0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff,
+		NPC_GRE_F_KEY | NPC_GRE_F_ACK | NPC_GRE_VER_1,
+		0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff,
+		NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_F_ACK | NPC_GRE_VER_1,
+		0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff,
+		0x2001, 0xef7f, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff,
+		0x0001, 0x0003, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_NA, 0X00, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+};
+
+static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
+	{
+		NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 0xff, 0x0000, NPC_MPLS_S,
+		NPC_MPLS_S, NPC_MPLS_S, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 0xff, 0x0000, NPC_MPLS_S,
+		0x0000, NPC_MPLS_S, NPC_MPLS_S, NPC_MPLS_S,
+	},
+	{
+		NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 0xff, 0x0000, NPC_MPLS_S,
+		0x0000, NPC_MPLS_S, 0x0000, NPC_MPLS_S,
+	},
+	{
+		NPC_S_KPU9_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU9_TU_MPLS, 0xff, 0x0000, NPC_MPLS_S,
+		NPC_MPLS_S, NPC_MPLS_S, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU9_TU_MPLS, 0xff, 0x0000, NPC_MPLS_S,
+		0x0000, NPC_MPLS_S, NPC_MPLS_S, NPC_MPLS_S,
+	},
+	{
+		NPC_S_KPU9_TU_MPLS, 0xff, 0x0000, NPC_MPLS_S,
+		0x0000, NPC_MPLS_S, 0x0000, NPC_MPLS_S,
+	},
+	{
+		NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_IP, NPC_NSH_NP_MASK,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_IP6, NPC_NSH_NP_MASK,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_ETH, NPC_NSH_NP_MASK,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_NSH, NPC_NSH_NP_MASK,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_MPLS, NPC_NSH_NP_MASK,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_NA, 0X00, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+};
+
+static struct npc_kpu_profile_cam kpu10_cam_entries[] = {
+	{
+		NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+		NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+		NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+		0x0000, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU10_TU_MPLS, 0xff, 0x0000, NPC_MPLS_S,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU10_TU_MPLS_PL, 0xff, NPC_IP_VER_4, NPC_IP_VER_MASK,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU10_TU_MPLS_PL, 0xff, NPC_IP_VER_6, NPC_IP_VER_MASK,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU10_TU_MPLS_PL, 0xff, 0x0000, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU10_TU_MPLS_PL, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_IP, NPC_NSH_NP_MASK,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_IP6, NPC_NSH_NP_MASK,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_ETH, NPC_NSH_NP_MASK,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_NSH, NPC_NSH_NP_MASK,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_MPLS, NPC_NSH_NP_MASK,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_NA, 0X00, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+};
+
+static struct npc_kpu_profile_cam kpu11_cam_entries[] = {
+	{
+		NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_IP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_IP6, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_ARP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+		NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP, 0xffff,
+	},
+	{
+		NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+		NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP6, 0xffff,
+	},
+	{
+		NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+		NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_ARP, 0xffff,
+	},
+	{
+		NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+		NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+		NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+		NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+		NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
+		NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP, 0xffff,
+	},
+	{
+		NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
+		NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP6, 0xffff,
+	},
+	{
+		NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
+		NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_ARP, 0xffff,
+	},
+	{
+		NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
+		NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
+		NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
+		NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
+		NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU11_TU_ETHER, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU11_TU_PPP, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU11_TU_MPLS_IN_NSH, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU11_TU_3RD_NSH, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_NA, 0X00, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+};
+
+static struct npc_kpu_profile_cam kpu12_cam_entries[] = {
+	{
+		NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_TCP, 0x00ff,
+		NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+		NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_UDP, 0x00ff,
+		NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+		NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_SCTP, 0x00ff,
+		NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+		NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ICMP, 0x00ff,
+		NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+		NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_IGMP, 0x00ff,
+		NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+		NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ESP, 0x00ff,
+		NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+		NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_AH, 0x00ff,
+		NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+		NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU12_TU_IP, 0xff, 0x0000, 0x0000,
+		NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+		NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_TCP, 0x00ff,
+		NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_UDP, 0x00ff,
+		NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_SCTP, 0x00ff,
+		NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ICMP, 0x00ff,
+		NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_IGMP, 0x00ff,
+		NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ESP, 0x00ff,
+		NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_AH, 0x00ff,
+		NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU12_TU_IP, 0xff, 0x0000, 0x0000,
+		NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU12_TU_IP, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU12_TU_ARP, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_TCP << 8, 0xff00,
+		NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_UDP << 8, 0xff00,
+		NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_SCTP << 8, 0xff00,
+		NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_ICMP << 8, 0xff00,
+		NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_ICMP6 << 8, 0xff00,
+		NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_ESP << 8, 0xff00,
+		NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_AH << 8, 0xff00,
+		NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU12_TU_IP6, 0xff, 0x0000, 0x0000,
+		NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU12_TU_IP6, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_NA, 0X00, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+};
+
+static struct npc_kpu_profile_cam kpu13_cam_entries[] = {
+	{
+		NPC_S_KPU13_TU_IP6_EXT, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+};
+
+static struct npc_kpu_profile_cam kpu14_cam_entries[] = {
+	{
+		NPC_S_KPU14_TU_IP6_EXT, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+};
+
+static struct npc_kpu_profile_cam kpu15_cam_entries[] = {
+	{
+		NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_HTTP, 0xffff,
+		NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_HTTPS, 0xffff,
+		NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_PPTP, 0xffff,
+		NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU15_TU_TCP, 0xff, 0x0000, 0x0000,
+		NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_HTTP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_HTTPS, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_PPTP, 0xffff,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU15_TU_TCP, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU15_TU_UDP, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU15_TU_SCTP, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU15_TU_ICMP, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU15_TU_IGMP, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU15_TU_ICMP6, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU15_TU_ESP, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU15_TU_AH, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_NA, 0X00, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+};
+
+static struct npc_kpu_profile_cam kpu16_cam_entries[] = {
+	{
+		NPC_S_KPU16_TCP_DATA, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU16_HTTP_DATA, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU16_HTTPS_DATA, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU16_PPTP_DATA, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+	{
+		NPC_S_KPU16_UDP_DATA, 0xff, 0x0000, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0000,
+	},
+};
+
+static struct npc_kpu_profile_action kpu1_action_entries[] = {
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		3, 0, NPC_S_KPU5_IP, 14, 1,
+		NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		3, 0, NPC_S_KPU5_IP6, 14, 1,
+		NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		3, 0, NPC_S_KPU5_ARP, 14, 1,
+		NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		3, 0, NPC_S_KPU5_RARP, 14, 1,
+		NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		3, 0, NPC_S_KPU5_PTP, 14, 1,
+		NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		3, 0, NPC_S_KPU5_FCOE, 14, 1,
+		NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+		0, 0, NPC_S_KPU2_CTAG, 14, 1,
+		NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_VLAN, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 20,
+		0, 0, NPC_S_KPU2_SBTAG, 14, 1,
+		NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_VLAN, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+		0, 0, NPC_S_KPU2_QINQ, 14, 1,
+		NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_VLAN, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 10, 24,
+		0, 0, NPC_S_KPU2_ETAG, 14, 1,
+		NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_ETAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 16, 20, 24,
+		0, 0, NPC_S_KPU2_ITAG, 14, 1,
+		NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_ITAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		2, 0, NPC_S_KPU4_MPLS, 14, 1,
+		NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_MPLS, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		2, 0, NPC_S_KPU4_MPLS, 14, 1,
+		NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_MPLS, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		2, 0, NPC_S_KPU4_NSH, 14, 1,
+		NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_NSH, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LA, NPC_LT_LA_8023, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LA, NPC_LT_LA_8023, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETYPE_UNK, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		3, 0, NPC_S_KPU5_IP, 14, 1,
+		NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		3, 0, NPC_S_KPU5_IP6, 14, 1,
+		NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		3, 0, NPC_S_KPU5_ARP, 14, 1,
+		NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		3, 0, NPC_S_KPU5_RARP, 14, 1,
+		NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		3, 0, NPC_S_KPU5_PTP, 14, 1,
+		NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		3, 0, NPC_S_KPU5_FCOE, 14, 1,
+		NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+		0, 0, NPC_S_KPU2_CTAG, 14, 1,
+		NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_VLAN, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 20,
+		0, 0, NPC_S_KPU2_SBTAG, 14, 1,
+		NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_VLAN, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+		0, 0, NPC_S_KPU2_QINQ, 14, 1,
+		NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_VLAN, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 10, 24,
+		0, 0, NPC_S_KPU2_ETAG, 14, 1,
+		NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_ETAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 16, 20, 24,
+		0, 0, NPC_S_KPU2_ITAG, 14, 1,
+		NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_ITAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		2, 0, NPC_S_KPU4_MPLS, 14, 1,
+		NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_MPLS, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		2, 0, NPC_S_KPU4_MPLS, 14, 1,
+		NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_MPLS, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		2, 0, NPC_S_KPU4_NSH, 14, 1,
+		NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_NSH, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETYPE_UNK, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_LA, NPC_EC_L2_K1, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 0,
+		NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+};
+
+static struct npc_kpu_profile_action kpu2_action_entries[] = {
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		2, 0, NPC_S_KPU5_IP, 4, 1,
+		NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		2, 0, NPC_S_KPU5_IP6, 4, 1,
+		NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU5_ARP, 4, 1,
+		NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU5_RARP, 4, 1,
+		NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU5_PTP, 4, 1,
+		NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU5_FCOE, 4, 1,
+		NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		1, 0, NPC_S_KPU4_MPLS, 4, 1,
+		NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		1, 0, NPC_S_KPU4_MPLS, 4, 1,
+		NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		1, 0, NPC_S_KPU4_NSH, 4, 1,
+		NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LB, NPC_LT_LB_CTAG, NPC_F_ETYPE_UNK, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		2, 0, NPC_S_KPU5_IP, 8, 1,
+		NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		2, 0, NPC_S_KPU5_IP6, 8, 1,
+		NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU5_ARP, 8, 1,
+		NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU5_RARP, 8, 1,
+		NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU5_PTP, 8, 1,
+		NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU5_FCOE, 8, 1,
+		NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		1, 0, NPC_S_KPU4_MPLS, 8, 1,
+		NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		1, 0, NPC_S_KPU4_MPLS, 8, 1,
+		NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		1, 0, NPC_S_KPU4_NSH, 8, 1,
+		NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG_UNK, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+		0, 0, NPC_S_KPU3_CTAG, 8, 1,
+		NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_STAG_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+		0, 0, NPC_S_KPU3_STAG, 8, 1,
+		NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_STAG_STAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		2, 0, NPC_S_KPU5_IP, 22, 1,
+		NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		2, 0, NPC_S_KPU5_IP6, 22, 1,
+		NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU5_ARP, 22, 1,
+		NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU5_RARP, 22, 1,
+		NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU5_PTP, 22, 1,
+		NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU5_FCOE, 22, 1,
+		NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		1, 0, NPC_S_KPU4_MPLS, 22, 1,
+		NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		1, 0, NPC_S_KPU4_MPLS, 22, 1,
+		NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		1, 0, NPC_S_KPU4_NSH, 22, 1,
+		NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		0, 0, NPC_S_KPU3_STAG, 22, 1,
+		NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG_STAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		0, 0, NPC_S_KPU3_CTAG, 22, 1,
+		NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG_UNK, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		2, 0, NPC_S_KPU5_IP, 4, 1,
+		NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		2, 0, NPC_S_KPU5_IP6, 4, 1,
+		NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU5_ARP, 4, 1,
+		NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU5_RARP, 4, 1,
+		NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU5_PTP, 4, 1,
+		NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU5_FCOE, 4, 1,
+		NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		1, 0, NPC_S_KPU4_MPLS, 4, 1,
+		NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		1, 0, NPC_S_KPU4_MPLS, 4, 1,
+		NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		1, 0, NPC_S_KPU4_NSH, 4, 1,
+		NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_ETYPE_UNK, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		2, 0, NPC_S_KPU5_IP, 8, 1,
+		NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		2, 0, NPC_S_KPU5_IP6, 8, 1,
+		NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU5_ARP, 8, 1,
+		NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU5_RARP, 8, 1,
+		NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU5_PTP, 8, 1,
+		NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU5_FCOE, 8, 1,
+		NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		1, 0, NPC_S_KPU4_MPLS, 8, 1,
+		NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		1, 0, NPC_S_KPU4_MPLS, 8, 1,
+		NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		1, 0, NPC_S_KPU4_NSH, 8, 1,
+		NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG_UNK, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+		0, 0, NPC_S_KPU3_CTAG, 8, 1,
+		NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_QINQ_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+		0, 0, NPC_S_KPU3_QINQ, 8, 1,
+		NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_QINQ_QINQ, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		2, 0, NPC_S_KPU5_IP, 4, 1,
+		NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		2, 0, NPC_S_KPU5_IP6, 4, 1,
+		NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU5_ARP, 4, 1,
+		NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU5_RARP, 4, 1,
+		NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU5_PTP, 4, 1,
+		NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU5_FCOE, 4, 1,
+		NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		1, 0, NPC_S_KPU4_MPLS, 4, 1,
+		NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		1, 0, NPC_S_KPU4_MPLS, 4, 1,
+		NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		1, 0, NPC_S_KPU4_NSH, 4, 1,
+		NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_ETYPE_UNK, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		2, 0, NPC_S_KPU5_IP, 8, 1,
+		NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		2, 0, NPC_S_KPU5_IP6, 8, 1,
+		NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU5_ARP, 8, 1,
+		NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU5_RARP, 8, 1,
+		NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU5_PTP, 8, 1,
+		NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU5_FCOE, 8, 1,
+		NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		1, 0, NPC_S_KPU4_MPLS, 8, 1,
+		NPC_LID_LB, NPC_LT_LB_ETAG, 1, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		1, 0, NPC_S_KPU4_MPLS, 8, 1,
+		NPC_LID_LB, NPC_LT_LB_ETAG, 2, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		1, 0, NPC_S_KPU4_NSH, 8, 1,
+		NPC_LID_LB, NPC_LT_LB_ETAG, 2, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		0, 0, NPC_S_KPU3_CTAG, 8, 1,
+		NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 16, 20, 24,
+		0, 0, NPC_S_KPU3_ITAG, 12, 1,
+		NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_BTAG_ITAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+		0, 0, NPC_S_KPU3_STAG, 8, 1,
+		NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_STAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+		0, 0, NPC_S_KPU3_QINQ, 8, 1,
+		NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_QINQ, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		2, 0, NPC_S_KPU5_IP, 26, 1,
+		NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		2, 0, NPC_S_KPU5_IP6, 26, 1,
+		NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU5_ARP, 26, 1,
+		NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		0, 0, NPC_S_KPU3_STAG, 26, 1,
+		NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG_STAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		0, 0, NPC_S_KPU3_CTAG, 26, 1,
+		NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG_UNK, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETYPE_UNK, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		2, 0, NPC_S_KPU5_IP, 18, 1,
+		NPC_LID_LB, NPC_LT_LB_ITAG, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		2, 0, NPC_S_KPU5_IP6, 18, 1,
+		NPC_LID_LB, NPC_LT_LB_ITAG, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU5_ARP, 18, 1,
+		NPC_LID_LB, NPC_LT_LB_ITAG, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU5_RARP, 18, 1,
+		NPC_LID_LB, NPC_LT_LB_ITAG, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		2, 0, NPC_S_KPU5_IP, 26, 1,
+		NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		2, 0, NPC_S_KPU5_IP6, 26, 1,
+		NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU5_ARP, 26, 1,
+		NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		2, 0, NPC_S_KPU5_IP, 22, 1,
+		NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		2, 0, NPC_S_KPU5_IP6, 22, 1,
+		NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU5_ARP, 22, 1,
+		NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		2, 0, NPC_S_KPU5_IP, 22, 1,
+		NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		2, 0, NPC_S_KPU5_IP6, 22, 1,
+		NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU5_ARP, 22, 1,
+		NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_LB, NPC_EC_L2_K3, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+};
+
+static struct npc_kpu_profile_action kpu3_action_entries[] = {
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		1, 0, NPC_S_KPU5_IP, 4, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		1, 0, NPC_S_KPU5_IP6, 4, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		1, 0, NPC_S_KPU5_ARP, 4, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		1, 0, NPC_S_KPU5_RARP, 4, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		1, 0, NPC_S_KPU5_PTP, 4, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		1, 0, NPC_S_KPU5_FCOE, 4, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		0, 0, NPC_S_KPU4_MPLS, 4, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		0, 0, NPC_S_KPU4_MPLS, 4, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		0, 0, NPC_S_KPU4_NSH, 4, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		1, 0, NPC_S_KPU5_IP, 8, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		1, 0, NPC_S_KPU5_IP6, 8, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		1, 0, NPC_S_KPU5_ARP, 8, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		1, 0, NPC_S_KPU5_RARP, 8, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		1, 0, NPC_S_KPU5_PTP, 8, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		1, 0, NPC_S_KPU5_FCOE, 8, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		0, 0, NPC_S_KPU4_MPLS, 8, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		0, 0, NPC_S_KPU4_MPLS, 8, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		0, 0, NPC_S_KPU4_NSH, 8, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		1, 0, NPC_S_KPU5_IP, 4, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		1, 0, NPC_S_KPU5_IP6, 4, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		1, 0, NPC_S_KPU5_ARP, 4, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		1, 0, NPC_S_KPU5_RARP, 4, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		0, 0, NPC_S_KPU4_MPLS, 4, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		0, 0, NPC_S_KPU4_MPLS, 4, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		0, 0, NPC_S_KPU4_NSH, 4, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		1, 0, NPC_S_KPU5_IP, 8, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		1, 0, NPC_S_KPU5_IP6, 8, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		1, 0, NPC_S_KPU5_ARP, 8, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		1, 0, NPC_S_KPU5_RARP, 8, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		1, 0, NPC_S_KPU5_PTP, 8, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		1, 0, NPC_S_KPU5_FCOE, 8, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		0, 0, NPC_S_KPU4_MPLS, 8, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		0, 0, NPC_S_KPU4_MPLS, 8, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		0, 0, NPC_S_KPU4_NSH, 8, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		1, 0, NPC_S_KPU5_IP, 4, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		1, 0, NPC_S_KPU5_IP6, 4, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		1, 0, NPC_S_KPU5_ARP, 4, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		1, 0, NPC_S_KPU5_RARP, 4, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		1, 0, NPC_S_KPU5_PTP, 4, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		1, 0, NPC_S_KPU5_FCOE, 4, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		0, 0, NPC_S_KPU4_MPLS, 4, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		0, 0, NPC_S_KPU4_MPLS, 4, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		0, 0, NPC_S_KPU4_NSH, 4, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		2, 0, NPC_S_KPU5_IP, 18, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		2, 0, NPC_S_KPU5_IP6, 18, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU5_ARP, 18, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU5_RARP, 18, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		1, 0, NPC_S_KPU5_IP, 26, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		1, 0, NPC_S_KPU5_IP6, 26, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		1, 0, NPC_S_KPU5_ARP, 26, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		1, 0, NPC_S_KPU5_IP, 22, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		1, 0, NPC_S_KPU5_IP6, 22, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		1, 0, NPC_S_KPU5_ARP, 22, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		1, 0, NPC_S_KPU5_IP, 22, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		1, 0, NPC_S_KPU5_IP6, 22, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		1, 0, NPC_S_KPU5_ARP, 22, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_LB, NPC_EC_L2_K3, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+};
+
+static struct npc_kpu_profile_action kpu4_action_entries[] = {
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 0, NPC_S_KPU5_MPLS_PL, 4, 1,
+		NPC_LID_LC, NPC_LT_LC_MPLS, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 0, NPC_S_KPU5_MPLS_PL, 8, 1,
+		NPC_LID_LC, NPC_LT_LC_MPLS, NPC_F_MPLS_2_LABELS, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 0, NPC_S_KPU5_MPLS_PL, 12, 1,
+		NPC_LID_LC, NPC_LT_LC_MPLS, NPC_F_MPLS_3_LABELS, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 4, 0,
+		0, 0, NPC_S_KPU5_MPLS, 12, 1,
+		NPC_LID_LC, NPC_LT_LC_MPLS, NPC_F_MPLS_4_LABELS, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		7, 0, NPC_S_KPU12_TU_IP, 0, 1,
+		NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		7, 0, NPC_S_KPU12_TU_IP6, 0, 1,
+		NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+		6, 0, NPC_S_KPU11_TU_ETHER, 0, 1,
+		NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		0, 0, NPC_S_KPU5_NSH, 0, 1,
+		NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		4, 0, NPC_S_KPU9_TU_MPLS, 0, 1,
+		NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_LB, NPC_EC_L2_K4, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 0,
+		NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+};
+
+static struct npc_kpu_profile_action kpu5_action_entries[] = {
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
+		2, 0, NPC_S_KPU8_TCP, 20, 1,
+		NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 8, 10,
+		2, 0, NPC_S_KPU8_UDP, 20, 1,
+		NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU8_SCTP, 20, 1,
+		NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU8_ICMP, 20, 1,
+		NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU8_IGMP, 20, 1,
+		NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 0, NPC_S_KPU8_ESP, 20, 1,
+		NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 0, NPC_S_KPU8_AH, 20, 1,
+		NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		2, 0, NPC_S_KPU8_GRE, 20, 1,
+		NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		6, 0, NPC_S_KPU12_TU_IP, 20, 1,
+		NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_IP_IN_IP, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		6, 0, NPC_S_KPU12_TU_IP6, 20, 1,
+		NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_6TO4, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		3, 0, NPC_S_KPU9_TU_MPLS, 20, 1,
+		NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_MPLS_IN_IP, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_UNK_PROTO, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
+		2, 0, NPC_S_KPU8_TCP, 0, 1,
+		NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 8, 10,
+		2, 0, NPC_S_KPU8_UDP, 0, 1,
+		NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU8_SCTP, 0, 1,
+		NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU8_ICMP, 0, 1,
+		NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU8_IGMP, 0, 1,
+		NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 0, NPC_S_KPU8_ESP, 0, 1,
+		NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 0, NPC_S_KPU8_AH, 0, 1,
+		NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		2, 0, NPC_S_KPU8_GRE, 0, 1,
+		NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		6, 0, NPC_S_KPU12_TU_IP, 0, 1,
+		NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_IP_IN_IP_HAS_OPTIONS, 0, 0xf,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		6, 0, NPC_S_KPU12_TU_IP6, 0, 1,
+		NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_6TO4_HAS_OPTIONS, 0, 0xf,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		3, 0, NPC_S_KPU9_TU_MPLS, 20, 1,
+		NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_MPLS_IN_IP_HAS_OPTIONS,
+		0, 0xf, 0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_UNK_PROTO_HAS_OPTIONS, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_LC, NPC_EC_IP_VER, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LC, NPC_LT_LC_ARP, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LC, NPC_LT_LC_RARP, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LC, NPC_LT_LC_PTP, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LC, NPC_LT_LC_FCOE, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
+		2, 0, NPC_S_KPU8_TCP, 40, 1,
+		NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 8, 10,
+		2, 0, NPC_S_KPU8_UDP, 40, 1,
+		NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU8_SCTP, 40, 1,
+		NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU8_ICMP, 40, 1,
+		NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU8_ICMP6, 40, 1,
+		NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU8_ESP, 40, 1,
+		NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU8_AH, 40, 1,
+		NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU8_GRE, 40, 1,
+		NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		6, 0, NPC_S_KPU12_TU_IP6, 40, 1,
+		NPC_LID_LC, NPC_LT_LC_IP6, NPC_F_IP6_TUN_IP6, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		3, 0, NPC_S_KPU9_TU_MPLS, 40, 1,
+		NPC_LID_LC, NPC_LT_LC_IP6, NPC_F_IP6_MPLS_IN_IP, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		0, 0, NPC_S_KPU6_IP6_EXT, 0, 1,
+		NPC_LID_LC, NPC_LT_LC_IP6, NPC_F_IP6_HAS_EXT, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_LC, NPC_EC_IP6_VER, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		6, 0, NPC_S_KPU12_TU_IP, 4, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		6, 0, NPC_S_KPU12_TU_IP6, 4, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+		5, 0, NPC_S_KPU11_TU_ETHER, 8, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+		5, 0, NPC_S_KPU11_TU_ETHER, 4, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_LB, NPC_EC_L2_MPLS_2MANY, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		6, 0, NPC_S_KPU12_TU_IP, 0, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		6, 0, NPC_S_KPU12_TU_IP6, 0, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+		5, 0, NPC_S_KPU11_TU_ETHER, 4, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+		5, 0, NPC_S_KPU11_TU_ETHER, 0, 0,
+		NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		6, 0, NPC_S_KPU12_TU_IP, 0, 0,
+		NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		6, 0, NPC_S_KPU12_TU_IP6, 0, 0,
+		NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+		5, 0, NPC_S_KPU11_TU_ETHER, 0, 0,
+		NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		5, 0, NPC_S_KPU11_TU_3RD_NSH, 0, 0,
+		NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		3, 0, NPC_S_KPU9_TU_MPLS, 0, 0,
+		NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_LC, NPC_EC_UNK, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 0,
+		NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+};
+
+static struct npc_kpu_profile_action kpu6_action_entries[] = {
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 0,
+		NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+};
+
+static struct npc_kpu_profile_action kpu7_action_entries[] = {
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 0,
+		NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+};
+
+static struct npc_kpu_profile_action kpu8_action_entries[] = {
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		7, 0, NPC_S_KPU16_HTTP_DATA, 20, 1,
+		NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_HTTP, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		7, 0, NPC_S_KPU16_HTTPS_DATA, 20, 1,
+		NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_HTTPS, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		7, 0, NPC_S_KPU16_PPTP_DATA, 20, 1,
+		NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_PPTP, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		7, 0, NPC_S_KPU16_TCP_DATA, 20, 1,
+		NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_UNK_PORT, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		7, 0, NPC_S_KPU16_HTTP_DATA, 0, 1,
+		NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_HTTP_HAS_OPTIONS,
+		12, 0xf0, 1, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		7, 0, NPC_S_KPU16_HTTPS_DATA, 0, 1,
+		NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_HTTPS_HAS_OPTIONS,
+		12, 0xf0, 1, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		7, 0, NPC_S_KPU16_PPTP_DATA, 0, 1,
+		NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_PPTP_HAS_OPTIONS,
+		12, 0xf0, 1, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		7, 0, NPC_S_KPU16_TCP_DATA, 0, 1,
+		NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_UNK_PORT_HAS_OPTIONS,
+		12, 0xf0, 1, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+		2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
+		NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLAN, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+		2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
+		NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLAN_NOVNI, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_LD, NPC_EC_VXLAN, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 0,
+		NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		3, 0, NPC_S_KPU12_TU_IP, 16, 1,
+		NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
+		NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+		2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
+		NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		0, 0, NPC_S_KPU9_TU_NSH, 16, 1,
+		NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NSH, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 16, 1,
+		NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_MPLS, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		3, 0, NPC_S_KPU12_TU_IP, 16, 1,
+		NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
+		NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+		2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
+		NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		0, 0, NPC_S_KPU9_TU_NSH, 16, 1,
+		NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI_NSH, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 16, 1,
+		NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI_MPLS, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_UNK, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NONP, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+		2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
+		NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE, 8, 0x3f,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+		2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
+		NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM, 8, 0x3f,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+		2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
+		NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_CRI_OPT, 8, 0x3f,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+		2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
+		NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM_CRI_OPT,
+		8, 0x3f, 0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		3, 0, NPC_S_KPU12_TU_IP, 16, 1,
+		NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE, 8, 0x3f,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		3, 0, NPC_S_KPU12_TU_IP, 16, 1,
+		NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM,
+		8, 0x3f, 0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		3, 0, NPC_S_KPU12_TU_IP, 16, 1,
+		NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_CRI_OPT,
+		8, 0x3f, 0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		3, 0, NPC_S_KPU12_TU_IP, 16, 1,
+		NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM_CRI_OPT,
+		8, 0x3f, 0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
+		NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE, 8, 0x3f,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
+		NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM, 8, 0x3f,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
+		NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_CRI_OPT,
+		8, 0x3f, 0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
+		NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM_CRI_OPT,
+		8, 0x3f, 0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GTP_GTPC, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		3, 0, NPC_S_KPU12_TU_IP, 16, 1,
+		NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GTP_GTPU_G_PDU, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GTP_GTPU_UNK, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		7, 0, NPC_S_KPU16_UDP_DATA, 8, 1,
+		NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_UNK_PORT, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LD, NPC_LT_LD_SCTP, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LD, NPC_LT_LD_ICMP, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LD, NPC_LT_LD_IGMP, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LD, NPC_LT_LD_ICMP6, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LD, NPC_LT_LD_ESP, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LD, NPC_LT_LD_AH, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+		2, 0, NPC_S_KPU11_TU_ETHER, 8, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_NVGRE, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_LD, NPC_EC_NVGRE, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 0,
+		NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 4, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE_MPLS, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_KEY, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_SEQ, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_KEY, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_KEY_SEQ, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 16, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_KEY_SEQ,
+		0, 0, 0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 4, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE_MPLS, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_KEY, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_SEQ, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_KEY, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_KEY_SEQ, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+		0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 16, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_KEY_SEQ,
+		0, 0, 0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		0, 0, NPC_S_KPU9_TU_NSH, 4, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE_NSH, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		0, 0, NPC_S_KPU9_TU_NSH, 8, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_CSUM, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		0, 0, NPC_S_KPU9_TU_NSH, 8, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_KEY, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		0, 0, NPC_S_KPU9_TU_NSH, 8, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_SEQ, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		0, 0, NPC_S_KPU9_TU_NSH, 12, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_CSUM_KEY, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		0, 0, NPC_S_KPU9_TU_NSH, 12, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		0, 0, NPC_S_KPU9_TU_NSH, 12, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_KEY_SEQ, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		0, 0, NPC_S_KPU9_TU_NSH, 16, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_CSUM_KEY_SEQ, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		3, 0, NPC_S_KPU12_TU_IP, 4, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		3, 0, NPC_S_KPU12_TU_IP, 8, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		3, 0, NPC_S_KPU12_TU_IP, 8, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_KEY, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		3, 0, NPC_S_KPU12_TU_IP, 8, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_SEQ, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		3, 0, NPC_S_KPU12_TU_IP, 12, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_KEY, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		3, 0, NPC_S_KPU12_TU_IP, 12, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		3, 0, NPC_S_KPU12_TU_IP, 12, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_KEY_SEQ, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		3, 0, NPC_S_KPU12_TU_IP, 16, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_KEY_SEQ, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		3, 0, NPC_S_KPU12_TU_IP6, 4, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		3, 0, NPC_S_KPU12_TU_IP6, 8, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		3, 0, NPC_S_KPU12_TU_IP6, 8, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_KEY, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		3, 0, NPC_S_KPU12_TU_IP6, 8, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_SEQ, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		3, 0, NPC_S_KPU12_TU_IP6, 12, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_KEY, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		3, 0, NPC_S_KPU12_TU_IP6, 12, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		3, 0, NPC_S_KPU12_TU_IP6, 12, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_KEY_SEQ, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_KEY_SEQ, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_ROUTE, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_UNK_PROTO, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_LD, NPC_EC_GRE, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 0,
+		NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU11_TU_PPP, 8, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU11_TU_PPP, 12, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1_HAS_SEQ, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU11_TU_PPP, 12, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1_HAS_ACK, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU11_TU_PPP, 16, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1_HAS_SEQ_ACK, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1_UNK_PROTO, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_LD, NPC_EC_GRE_VER1, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 0,
+		NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_LD, NPC_EC_UNK, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 0,
+		NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+};
+
+static struct npc_kpu_profile_action kpu9_action_entries[] = {
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 0, NPC_S_KPU10_TU_MPLS_PL, 4, 0,
+		NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 0, NPC_S_KPU10_TU_MPLS_PL, 8, 0,
+		NPC_LID_LD, NPC_LT_NA, NPC_F_MPLS_2_LABELS, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 0, NPC_S_KPU10_TU_MPLS_PL, 12, 0,
+		NPC_LID_LD, NPC_LT_NA, NPC_F_MPLS_3_LABELS, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 4, 0,
+		0, 0, NPC_S_KPU10_TU_MPLS, 12, 0,
+		NPC_LID_LD, NPC_LT_NA, NPC_F_MPLS_4_LABELS, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 0, NPC_S_KPU10_TU_MPLS_PL, 4, 1,
+		NPC_LID_LD, NPC_LT_LD_TU_MPLS, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 0, NPC_S_KPU10_TU_MPLS_PL, 8, 1,
+		NPC_LID_LD, NPC_LT_LD_TU_MPLS, NPC_F_MPLS_2_LABELS, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 0, NPC_S_KPU10_TU_MPLS_PL, 12, 1,
+		NPC_LID_LD, NPC_LT_LD_TU_MPLS, NPC_F_MPLS_3_LABELS, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 4, 0,
+		0, 0, NPC_S_KPU10_TU_MPLS, 12, 1,
+		NPC_LID_LD, NPC_LT_LD_TU_MPLS, NPC_F_MPLS_4_LABELS, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		2, 0, NPC_S_KPU12_TU_IP, 0, 0,
+		NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		2, 0, NPC_S_KPU12_TU_IP6, 0, 0,
+		NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+		1, 0, NPC_S_KPU11_TU_ETHER, 0, 0,
+		NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		0, 0, NPC_S_KPU10_TU_NSH, 0, 0,
+		NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		1, 0, NPC_S_KPU11_TU_MPLS_IN_NSH, 0, 0,
+		NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_LE, NPC_EC_UNK, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 0,
+		NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+};
+
+static struct npc_kpu_profile_action kpu10_action_entries[] = {
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		1, 0, NPC_S_KPU12_TU_IP, 4, 0,
+		NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		1, 0, NPC_S_KPU12_TU_IP6, 4, 0,
+		NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+		0, 0, NPC_S_KPU11_TU_ETHER, 8, 0,
+		NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+		0, 0, NPC_S_KPU11_TU_ETHER, 4, 0,
+		NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_LB, NPC_EC_L2_MPLS_2MANY, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 0,
+		NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		1, 0, NPC_S_KPU12_TU_IP, 0, 0,
+		NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		1, 0, NPC_S_KPU12_TU_IP6, 0, 0,
+		NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+		0, 0, NPC_S_KPU11_TU_ETHER, 4, 0,
+		NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+		0, 0, NPC_S_KPU11_TU_ETHER, 0, 0,
+		NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		1, 0, NPC_S_KPU12_TU_IP, 0, 0,
+		NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		1, 0, NPC_S_KPU12_TU_IP6, 0, 0,
+		NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+		0, 0, NPC_S_KPU11_TU_ETHER, 0, 0,
+		NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		0, 0, NPC_S_KPU11_TU_3RD_NSH, 0, 0,
+		NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 0, NPC_S_KPU11_TU_MPLS_IN_NSH, 0, 0,
+		NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_LE, NPC_EC_UNK, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 0,
+		NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+};
+
+static struct npc_kpu_profile_action kpu11_action_entries[] = {
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		0, 0, NPC_S_KPU12_TU_IP, 14, 1,
+		NPC_LID_LE, NPC_LT_LE_TU_ETHER, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		0, 0, NPC_S_KPU12_TU_IP6, 14, 1,
+		NPC_LID_LE, NPC_LT_LE_TU_ETHER, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 0, NPC_S_KPU12_TU_ARP, 14, 1,
+		NPC_LID_LE, NPC_LT_LE_TU_ETHER, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		0, 0, NPC_S_KPU12_TU_IP, 18, 1,
+		NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		0, 0, NPC_S_KPU12_TU_IP6, 18, 1,
+		NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 0, NPC_S_KPU12_TU_ARP, 18, 1,
+		NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_CTAG_UNK, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		0, 0, NPC_S_KPU12_TU_IP, 22, 1,
+		NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		0, 0, NPC_S_KPU12_TU_IP6, 22, 1,
+		NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 0, NPC_S_KPU12_TU_ARP, 22, 1,
+		NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LE, NPC_LT_LE_TU_ETHER,
+		NPC_F_TU_ETHER_STAG_CTAG_UNK, 0, 0, 0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		0, 0, NPC_S_KPU12_TU_IP, 18, 1,
+		NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		0, 0, NPC_S_KPU12_TU_IP6, 18, 1,
+		NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 0, NPC_S_KPU12_TU_ARP, 18, 1,
+		NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG_UNK, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		0, 0, NPC_S_KPU12_TU_IP, 22, 1,
+		NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		0, 0, NPC_S_KPU12_TU_IP6, 22, 1,
+		NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 0, NPC_S_KPU12_TU_ARP, 22, 1,
+		NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ_CTAG, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LE, NPC_LT_LE_TU_ETHER,
+		NPC_F_TU_ETHER_QINQ_CTAG_UNK, 0, 0, 0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+		0, 0, NPC_S_KPU12_TU_IP, 18, 1,
+		NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+		0, 0, NPC_S_KPU12_TU_IP6, 18, 1,
+		NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 0, NPC_S_KPU12_TU_ARP, 18, 1,
+		NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ_UNK, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_UNK, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LE, NPC_LT_LE_TU_PPP, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_NSH, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LE, NPC_LT_LE_TU_3RD_NSH, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_LE, NPC_EC_UNK, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 0,
+		NPC_LID_LE, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+};
+
+static struct npc_kpu_profile_action kpu12_action_entries[] = {
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
+		2, 0, NPC_S_KPU15_TU_TCP, 20, 1,
+		NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		2, 0, NPC_S_KPU15_TU_UDP, 20, 1,
+		NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU15_TU_SCTP, 20, 1,
+		NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU15_TU_ICMP, 20, 1,
+		NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU15_TU_IGMP, 20, 1,
+		NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU15_TU_ESP, 20, 1,
+		NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU15_TU_AH, 20, 1,
+		NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_UNK_PROTO, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
+		2, 0, NPC_S_KPU15_TU_TCP, 0, 1,
+		NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		2, 0, NPC_S_KPU15_TU_UDP, 0, 1,
+		NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU15_TU_SCTP, 0, 1,
+		NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU15_TU_ICMP, 0, 1,
+		NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU15_TU_IGMP, 0, 1,
+		NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU15_TU_ESP, 0, 1,
+		NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU15_TU_AH, 0, 1,
+		NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+		0, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LF, NPC_LT_LF_TU_IP,
+		NPC_F_IP_UNK_PROTO_HAS_OPTIONS, 0, 0, 0, 0,
+	},
+	{
+		NPC_ERRLEV_LF, NPC_EC_IP_VER, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LF, NPC_LT_LF_TU_ARP, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
+		2, 0, NPC_S_KPU15_TU_TCP, 40, 1,
+		NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		2, 0, NPC_S_KPU15_TU_UDP, 40, 1,
+		NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU15_TU_SCTP, 40, 1,
+		NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU15_TU_ICMP, 40, 1,
+		NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU15_TU_ICMP6, 40, 1,
+		NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU15_TU_ESP, 40, 1,
+		NPC_LID_LC, NPC_LT_LF_TU_IP6, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		2, 0, NPC_S_KPU15_TU_AH, 40, 1,
+		NPC_LID_LC, NPC_LT_LF_TU_IP6, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+		0, 0, NPC_S_KPU13_TU_IP6_EXT, 0, 1,
+		NPC_LID_LF, NPC_LT_LF_TU_IP6, NPC_F_IP6_HAS_EXT, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_LF, NPC_EC_IP6_VER, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_LF, NPC_EC_UNK, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 0,
+		NPC_LID_LF, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+};
+
+static struct npc_kpu_profile_action kpu13_action_entries[] = {
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 0,
+		NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+};
+
+static struct npc_kpu_profile_action kpu14_action_entries[] = {
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 0,
+		NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+};
+
+static struct npc_kpu_profile_action kpu15_action_entries[] = {
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 0, NPC_S_KPU16_HTTP_DATA, 20, 1,
+		NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_HTTP, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 0, NPC_S_KPU16_HTTPS_DATA, 20, 1,
+		NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_HTTPS, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 0, NPC_S_KPU16_PPTP_DATA, 20, 1,
+		NPC_LID_LD, NPC_LT_LG_TU_TCP, NPC_F_TCP_PPTP, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 0, NPC_S_KPU16_TCP_DATA, 20, 1,
+		NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_UNK_PORT, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 0, NPC_S_KPU16_HTTP_DATA, 0, 1,
+		NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_HTTP_HAS_OPTIONS,
+		12, 0xf0, 1, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 0, NPC_S_KPU16_HTTPS_DATA, 0, 1,
+		NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_HTTPS_HAS_OPTIONS,
+		12, 0xf0, 1, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 0, NPC_S_KPU16_PPTP_DATA, 0, 1,
+		NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_PPTP_HAS_OPTIONS,
+		12, 0xf0, 1, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 0, NPC_S_KPU16_TCP_DATA, 0, 1,
+		NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_UNK_PORT_HAS_OPTIONS,
+		12, 0xf0, 1, 2,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 0, NPC_S_KPU16_UDP_DATA, 8, 1,
+		NPC_LID_LG, NPC_LT_LG_TU_UDP, NPC_F_UDP_UNK_PORT, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LG, NPC_LT_LG_TU_SCTP, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LG, NPC_LT_LG_TU_ICMP, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LG, NPC_LT_LG_TU_IGMP, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LG, NPC_LT_LG_TU_ICMP6, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LG, NPC_LT_LG_TU_ESP, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LG, NPC_LT_LG_TU_AH, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_LG, NPC_EC_L4, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 0,
+		NPC_LID_LG, NPC_LT_NA, 0, 0, 0,
+		0, 0,
+	},
+};
+
+static struct npc_kpu_profile_action kpu16_action_entries[] = {
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LH, NPC_LT_LH_TCP_DATA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LH, NPC_LT_LH_HTTP_DATA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LH, NPC_LT_LH_HTTPS_DATA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LH, NPC_LT_LH_PPTP_DATA, 0, 0, 0,
+		0, 0,
+	},
+	{
+		NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+		0, 1, NPC_S_NA, 0, 1,
+		NPC_LID_LH, NPC_LT_LH_UDP_DATA, 0, 0, 0,
+		0, 0,
+	},
+};
+
+static struct npc_kpu_profile npc_kpu_profiles[] = {
+	{
+		ARRAY_SIZE(kpu1_cam_entries),
+		ARRAY_SIZE(kpu1_action_entries),
+		&kpu1_cam_entries[0],
+		&kpu1_action_entries[0],
+	},
+	{
+		ARRAY_SIZE(kpu2_cam_entries),
+		ARRAY_SIZE(kpu2_action_entries),
+		&kpu2_cam_entries[0],
+		&kpu2_action_entries[0],
+	},
+	{
+		ARRAY_SIZE(kpu3_cam_entries),
+		ARRAY_SIZE(kpu3_action_entries),
+		&kpu3_cam_entries[0],
+		&kpu3_action_entries[0],
+	},
+	{
+		ARRAY_SIZE(kpu4_cam_entries),
+		ARRAY_SIZE(kpu4_action_entries),
+		&kpu4_cam_entries[0],
+		&kpu4_action_entries[0],
+	},
+	{
+		ARRAY_SIZE(kpu5_cam_entries),
+		ARRAY_SIZE(kpu5_action_entries),
+		&kpu5_cam_entries[0],
+		&kpu5_action_entries[0],
+	},
+	{
+		ARRAY_SIZE(kpu6_cam_entries),
+		ARRAY_SIZE(kpu6_action_entries),
+		&kpu6_cam_entries[0],
+		&kpu6_action_entries[0],
+	},
+	{
+		ARRAY_SIZE(kpu7_cam_entries),
+		ARRAY_SIZE(kpu7_action_entries),
+		&kpu7_cam_entries[0],
+		&kpu7_action_entries[0],
+	},
+	{
+		ARRAY_SIZE(kpu8_cam_entries),
+		ARRAY_SIZE(kpu8_action_entries),
+		&kpu8_cam_entries[0],
+		&kpu8_action_entries[0],
+	},
+	{
+		ARRAY_SIZE(kpu9_cam_entries),
+		ARRAY_SIZE(kpu9_action_entries),
+		&kpu9_cam_entries[0],
+		&kpu9_action_entries[0],
+	},
+	{
+		ARRAY_SIZE(kpu10_cam_entries),
+		ARRAY_SIZE(kpu10_action_entries),
+		&kpu10_cam_entries[0],
+		&kpu10_action_entries[0],
+	},
+	{
+		ARRAY_SIZE(kpu11_cam_entries),
+		ARRAY_SIZE(kpu11_action_entries),
+		&kpu11_cam_entries[0],
+		&kpu11_action_entries[0],
+	},
+	{
+		ARRAY_SIZE(kpu12_cam_entries),
+		ARRAY_SIZE(kpu12_action_entries),
+		&kpu12_cam_entries[0],
+		&kpu12_action_entries[0],
+	},
+	{
+		ARRAY_SIZE(kpu13_cam_entries),
+		ARRAY_SIZE(kpu13_action_entries),
+		&kpu13_cam_entries[0],
+		&kpu13_action_entries[0],
+	},
+	{
+		ARRAY_SIZE(kpu14_cam_entries),
+		ARRAY_SIZE(kpu14_action_entries),
+		&kpu14_cam_entries[0],
+		&kpu14_action_entries[0],
+	},
+	{
+		ARRAY_SIZE(kpu15_cam_entries),
+		ARRAY_SIZE(kpu15_action_entries),
+		&kpu15_cam_entries[0],
+		&kpu15_action_entries[0],
+	},
+	{
+		ARRAY_SIZE(kpu16_cam_entries),
+		ARRAY_SIZE(kpu16_action_entries),
+		&kpu16_cam_entries[0],
+		&kpu16_action_entries[0],
+	},
+};
+
+#endif /* NPC_PROFILE_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
new file mode 100644
index 0000000..e581091
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -0,0 +1,2532 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <linux/sysfs.h>
+
+#include "cgx.h"
+#include "rvu.h"
+#include "rvu_reg.h"
+
+#define DRV_NAME	"octeontx2-af"
+#define DRV_STRING      "Marvell OcteonTX2 RVU Admin Function Driver"
+#define DRV_VERSION	"1.0"
+
+static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
+
+static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
+				struct rvu_block *block, int lf);
+static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
+				  struct rvu_block *block, int lf);
+static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc);
+
+static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
+			 int type, int num,
+			 void (mbox_handler)(struct work_struct *),
+			 void (mbox_up_handler)(struct work_struct *));
+enum {
+	TYPE_AFVF,
+	TYPE_AFPF,
+};
+
+/* Supported devices */
+static const struct pci_device_id rvu_id_table[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) },
+	{ 0, }  /* end of table */
+};
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION(DRV_STRING);
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, rvu_id_table);
+
+static char *mkex_profile; /* MKEX profile name */
+module_param(mkex_profile, charp, 0000);
+MODULE_PARM_DESC(mkex_profile, "MKEX profile name string");
+
+/* Poll a RVU block's register 'offset', for a 'zero'
+ * or 'nonzero' at bits specified by 'mask'
+ */
+int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
+{
+	unsigned long timeout = jiffies + usecs_to_jiffies(100);
+	void __iomem *reg;
+	u64 reg_val;
+
+	reg = rvu->afreg_base + ((block << 28) | offset);
+	while (time_before(jiffies, timeout)) {
+		reg_val = readq(reg);
+		if (zero && !(reg_val & mask))
+			return 0;
+		if (!zero && (reg_val & mask))
+			return 0;
+		usleep_range(1, 5);
+		timeout--;
+	}
+	return -EBUSY;
+}
+
+int rvu_alloc_rsrc(struct rsrc_bmap *rsrc)
+{
+	int id;
+
+	if (!rsrc->bmap)
+		return -EINVAL;
+
+	id = find_first_zero_bit(rsrc->bmap, rsrc->max);
+	if (id >= rsrc->max)
+		return -ENOSPC;
+
+	__set_bit(id, rsrc->bmap);
+
+	return id;
+}
+
+int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc)
+{
+	int start;
+
+	if (!rsrc->bmap)
+		return -EINVAL;
+
+	start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
+	if (start >= rsrc->max)
+		return -ENOSPC;
+
+	bitmap_set(rsrc->bmap, start, nrsrc);
+	return start;
+}
+
+static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start)
+{
+	if (!rsrc->bmap)
+		return;
+	if (start >= rsrc->max)
+		return;
+
+	bitmap_clear(rsrc->bmap, start, nrsrc);
+}
+
+bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc)
+{
+	int start;
+
+	if (!rsrc->bmap)
+		return false;
+
+	start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
+	if (start >= rsrc->max)
+		return false;
+
+	return true;
+}
+
+void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id)
+{
+	if (!rsrc->bmap)
+		return;
+
+	__clear_bit(id, rsrc->bmap);
+}
+
+int rvu_rsrc_free_count(struct rsrc_bmap *rsrc)
+{
+	int used;
+
+	if (!rsrc->bmap)
+		return 0;
+
+	used = bitmap_weight(rsrc->bmap, rsrc->max);
+	return (rsrc->max - used);
+}
+
+int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
+{
+	rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max),
+			     sizeof(long), GFP_KERNEL);
+	if (!rsrc->bmap)
+		return -ENOMEM;
+	return 0;
+}
+
+/* Get block LF's HW index from a PF_FUNC's block slot number */
+int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
+{
+	u16 match = 0;
+	int lf;
+
+	mutex_lock(&rvu->rsrc_lock);
+	for (lf = 0; lf < block->lf.max; lf++) {
+		if (block->fn_map[lf] == pcifunc) {
+			if (slot == match) {
+				mutex_unlock(&rvu->rsrc_lock);
+				return lf;
+			}
+			match++;
+		}
+	}
+	mutex_unlock(&rvu->rsrc_lock);
+	return -ENODEV;
+}
+
+/* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E.
+ * Some silicon variants of OcteonTX2 supports
+ * multiple blocks of same type.
+ *
+ * @pcifunc has to be zero when no LF is yet attached.
+ */
+int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
+{
+	int devnum, blkaddr = -ENODEV;
+	u64 cfg, reg;
+	bool is_pf;
+
+	switch (blktype) {
+	case BLKTYPE_NPC:
+		blkaddr = BLKADDR_NPC;
+		goto exit;
+	case BLKTYPE_NPA:
+		blkaddr = BLKADDR_NPA;
+		goto exit;
+	case BLKTYPE_NIX:
+		/* For now assume NIX0 */
+		if (!pcifunc) {
+			blkaddr = BLKADDR_NIX0;
+			goto exit;
+		}
+		break;
+	case BLKTYPE_SSO:
+		blkaddr = BLKADDR_SSO;
+		goto exit;
+	case BLKTYPE_SSOW:
+		blkaddr = BLKADDR_SSOW;
+		goto exit;
+	case BLKTYPE_TIM:
+		blkaddr = BLKADDR_TIM;
+		goto exit;
+	case BLKTYPE_CPT:
+		/* For now assume CPT0 */
+		if (!pcifunc) {
+			blkaddr = BLKADDR_CPT0;
+			goto exit;
+		}
+		break;
+	}
+
+	/* Check if this is a RVU PF or VF */
+	if (pcifunc & RVU_PFVF_FUNC_MASK) {
+		is_pf = false;
+		devnum = rvu_get_hwvf(rvu, pcifunc);
+	} else {
+		is_pf = true;
+		devnum = rvu_get_pf(pcifunc);
+	}
+
+	/* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' */
+	if (blktype == BLKTYPE_NIX) {
+		reg = is_pf ? RVU_PRIV_PFX_NIX0_CFG : RVU_PRIV_HWVFX_NIX0_CFG;
+		cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
+		if (cfg)
+			blkaddr = BLKADDR_NIX0;
+	}
+
+	/* Check if the 'pcifunc' has a CPT LF from 'BLKADDR_CPT0' */
+	if (blktype == BLKTYPE_CPT) {
+		reg = is_pf ? RVU_PRIV_PFX_CPT0_CFG : RVU_PRIV_HWVFX_CPT0_CFG;
+		cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
+		if (cfg)
+			blkaddr = BLKADDR_CPT0;
+	}
+
+exit:
+	if (is_block_implemented(rvu->hw, blkaddr))
+		return blkaddr;
+	return -ENODEV;
+}
+
+static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
+				struct rvu_block *block, u16 pcifunc,
+				u16 lf, bool attach)
+{
+	int devnum, num_lfs = 0;
+	bool is_pf;
+	u64 reg;
+
+	if (lf >= block->lf.max) {
+		dev_err(&rvu->pdev->dev,
+			"%s: FATAL: LF %d is >= %s's max lfs i.e %d\n",
+			__func__, lf, block->name, block->lf.max);
+		return;
+	}
+
+	/* Check if this is for a RVU PF or VF */
+	if (pcifunc & RVU_PFVF_FUNC_MASK) {
+		is_pf = false;
+		devnum = rvu_get_hwvf(rvu, pcifunc);
+	} else {
+		is_pf = true;
+		devnum = rvu_get_pf(pcifunc);
+	}
+
+	block->fn_map[lf] = attach ? pcifunc : 0;
+
+	switch (block->type) {
+	case BLKTYPE_NPA:
+		pfvf->npalf = attach ? true : false;
+		num_lfs = pfvf->npalf;
+		break;
+	case BLKTYPE_NIX:
+		pfvf->nixlf = attach ? true : false;
+		num_lfs = pfvf->nixlf;
+		break;
+	case BLKTYPE_SSO:
+		attach ? pfvf->sso++ : pfvf->sso--;
+		num_lfs = pfvf->sso;
+		break;
+	case BLKTYPE_SSOW:
+		attach ? pfvf->ssow++ : pfvf->ssow--;
+		num_lfs = pfvf->ssow;
+		break;
+	case BLKTYPE_TIM:
+		attach ? pfvf->timlfs++ : pfvf->timlfs--;
+		num_lfs = pfvf->timlfs;
+		break;
+	case BLKTYPE_CPT:
+		attach ? pfvf->cptlfs++ : pfvf->cptlfs--;
+		num_lfs = pfvf->cptlfs;
+		break;
+	}
+
+	reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg;
+	rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs);
+}
+
+inline int rvu_get_pf(u16 pcifunc)
+{
+	return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+}
+
+void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
+{
+	u64 cfg;
+
+	/* Get numVFs attached to this PF and first HWVF */
+	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+	*numvfs = (cfg >> 12) & 0xFF;
+	*hwvf = cfg & 0xFFF;
+}
+
+static int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
+{
+	int pf, func;
+	u64 cfg;
+
+	pf = rvu_get_pf(pcifunc);
+	func = pcifunc & RVU_PFVF_FUNC_MASK;
+
+	/* Get first HWVF attached to this PF */
+	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+
+	return ((cfg & 0xFFF) + func - 1);
+}
+
+struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc)
+{
+	/* Check if it is a PF or VF */
+	if (pcifunc & RVU_PFVF_FUNC_MASK)
+		return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)];
+	else
+		return &rvu->pf[rvu_get_pf(pcifunc)];
+}
+
+static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc)
+{
+	int pf, vf, nvfs;
+	u64 cfg;
+
+	pf = rvu_get_pf(pcifunc);
+	if (pf >= rvu->hw->total_pfs)
+		return false;
+
+	if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+		return true;
+
+	/* Check if VF is within number of VFs attached to this PF */
+	vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
+	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+	nvfs = (cfg >> 12) & 0xFF;
+	if (vf >= nvfs)
+		return false;
+
+	return true;
+}
+
+bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr)
+{
+	struct rvu_block *block;
+
+	if (blkaddr < BLKADDR_RVUM || blkaddr >= BLK_COUNT)
+		return false;
+
+	block = &hw->block[blkaddr];
+	return block->implemented;
+}
+
+static void rvu_check_block_implemented(struct rvu *rvu)
+{
+	struct rvu_hwinfo *hw = rvu->hw;
+	struct rvu_block *block;
+	int blkid;
+	u64 cfg;
+
+	/* For each block check if 'implemented' bit is set */
+	for (blkid = 0; blkid < BLK_COUNT; blkid++) {
+		block = &hw->block[blkid];
+		cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid));
+		if (cfg & BIT_ULL(11))
+			block->implemented = true;
+	}
+}
+
+int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
+{
+	int err;
+
+	if (!block->implemented)
+		return 0;
+
+	rvu_write64(rvu, block->addr, block->lfreset_reg, lf | BIT_ULL(12));
+	err = rvu_poll_reg(rvu, block->addr, block->lfreset_reg, BIT_ULL(12),
+			   true);
+	return err;
+}
+
+static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg)
+{
+	struct rvu_block *block = &rvu->hw->block[blkaddr];
+
+	if (!block->implemented)
+		return;
+
+	rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0));
+	rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true);
+}
+
+static void rvu_reset_all_blocks(struct rvu *rvu)
+{
+	/* Do a HW reset of all RVU blocks */
+	rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST);
+	rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST);
+	rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST);
+	rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST);
+	rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST);
+	rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST);
+	rvu_block_reset(rvu, BLKADDR_NDC0, NDC_AF_BLK_RST);
+	rvu_block_reset(rvu, BLKADDR_NDC1, NDC_AF_BLK_RST);
+	rvu_block_reset(rvu, BLKADDR_NDC2, NDC_AF_BLK_RST);
+}
+
+static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block)
+{
+	struct rvu_pfvf *pfvf;
+	u64 cfg;
+	int lf;
+
+	for (lf = 0; lf < block->lf.max; lf++) {
+		cfg = rvu_read64(rvu, block->addr,
+				 block->lfcfg_reg | (lf << block->lfshift));
+		if (!(cfg & BIT_ULL(63)))
+			continue;
+
+		/* Set this resource as being used */
+		__set_bit(lf, block->lf.bmap);
+
+		/* Get, to whom this LF is attached */
+		pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF);
+		rvu_update_rsrc_map(rvu, pfvf, block,
+				    (cfg >> 8) & 0xFFFF, lf, true);
+
+		/* Set start MSIX vector for this LF within this PF/VF */
+		rvu_set_msix_offset(rvu, pfvf, block, lf);
+	}
+}
+
+static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf)
+{
+	int min_vecs;
+
+	if (!vf)
+		goto check_pf;
+
+	if (!nvecs) {
+		dev_warn(rvu->dev,
+			 "PF%d:VF%d is configured with zero msix vectors, %d\n",
+			 pf, vf - 1, nvecs);
+	}
+	return;
+
+check_pf:
+	if (pf == 0)
+		min_vecs = RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT;
+	else
+		min_vecs = RVU_PF_INT_VEC_CNT;
+
+	if (!(nvecs < min_vecs))
+		return;
+	dev_warn(rvu->dev,
+		 "PF%d is configured with too few vectors, %d, min is %d\n",
+		 pf, nvecs, min_vecs);
+}
+
+static int rvu_setup_msix_resources(struct rvu *rvu)
+{
+	struct rvu_hwinfo *hw = rvu->hw;
+	int pf, vf, numvfs, hwvf, err;
+	int nvecs, offset, max_msix;
+	struct rvu_pfvf *pfvf;
+	u64 cfg, phy_addr;
+	dma_addr_t iova;
+
+	for (pf = 0; pf < hw->total_pfs; pf++) {
+		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+		/* If PF is not enabled, nothing to do */
+		if (!((cfg >> 20) & 0x01))
+			continue;
+
+		rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
+
+		pfvf = &rvu->pf[pf];
+		/* Get num of MSIX vectors attached to this PF */
+		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf));
+		pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1;
+		rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0);
+
+		/* Alloc msix bitmap for this PF */
+		err = rvu_alloc_bitmap(&pfvf->msix);
+		if (err)
+			return err;
+
+		/* Allocate memory for MSIX vector to RVU block LF mapping */
+		pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max,
+						sizeof(u16), GFP_KERNEL);
+		if (!pfvf->msix_lfmap)
+			return -ENOMEM;
+
+		/* For PF0 (AF) firmware will set msix vector offsets for
+		 * AF, block AF and PF0_INT vectors, so jump to VFs.
+		 */
+		if (!pf)
+			goto setup_vfmsix;
+
+		/* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors.
+		 * These are allocated on driver init and never freed,
+		 * so no need to set 'msix_lfmap' for these.
+		 */
+		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf));
+		nvecs = (cfg >> 12) & 0xFF;
+		cfg &= ~0x7FFULL;
+		offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
+		rvu_write64(rvu, BLKADDR_RVUM,
+			    RVU_PRIV_PFX_INT_CFG(pf), cfg | offset);
+setup_vfmsix:
+		/* Alloc msix bitmap for VFs */
+		for (vf = 0; vf < numvfs; vf++) {
+			pfvf =  &rvu->hwvf[hwvf + vf];
+			/* Get num of MSIX vectors attached to this VF */
+			cfg = rvu_read64(rvu, BLKADDR_RVUM,
+					 RVU_PRIV_PFX_MSIX_CFG(pf));
+			pfvf->msix.max = (cfg & 0xFFF) + 1;
+			rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1);
+
+			/* Alloc msix bitmap for this VF */
+			err = rvu_alloc_bitmap(&pfvf->msix);
+			if (err)
+				return err;
+
+			pfvf->msix_lfmap =
+				devm_kcalloc(rvu->dev, pfvf->msix.max,
+					     sizeof(u16), GFP_KERNEL);
+			if (!pfvf->msix_lfmap)
+				return -ENOMEM;
+
+			/* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors.
+			 * These are allocated on driver init and never freed,
+			 * so no need to set 'msix_lfmap' for these.
+			 */
+			cfg = rvu_read64(rvu, BLKADDR_RVUM,
+					 RVU_PRIV_HWVFX_INT_CFG(hwvf + vf));
+			nvecs = (cfg >> 12) & 0xFF;
+			cfg &= ~0x7FFULL;
+			offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
+			rvu_write64(rvu, BLKADDR_RVUM,
+				    RVU_PRIV_HWVFX_INT_CFG(hwvf + vf),
+				    cfg | offset);
+		}
+	}
+
+	/* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence
+	 * create a IOMMU mapping for the physcial address configured by
+	 * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA.
+	 */
+	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
+	max_msix = cfg & 0xFFFFF;
+	phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
+	iova = dma_map_resource(rvu->dev, phy_addr,
+				max_msix * PCI_MSIX_ENTRY_SIZE,
+				DMA_BIDIRECTIONAL, 0);
+
+	if (dma_mapping_error(rvu->dev, iova))
+		return -ENOMEM;
+
+	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova);
+	rvu->msix_base_iova = iova;
+
+	return 0;
+}
+
+static void rvu_free_hw_resources(struct rvu *rvu)
+{
+	struct rvu_hwinfo *hw = rvu->hw;
+	struct rvu_block *block;
+	struct rvu_pfvf  *pfvf;
+	int id, max_msix;
+	u64 cfg;
+
+	rvu_npa_freemem(rvu);
+	rvu_npc_freemem(rvu);
+	rvu_nix_freemem(rvu);
+
+	/* Free block LF bitmaps */
+	for (id = 0; id < BLK_COUNT; id++) {
+		block = &hw->block[id];
+		kfree(block->lf.bmap);
+	}
+
+	/* Free MSIX bitmaps */
+	for (id = 0; id < hw->total_pfs; id++) {
+		pfvf = &rvu->pf[id];
+		kfree(pfvf->msix.bmap);
+	}
+
+	for (id = 0; id < hw->total_vfs; id++) {
+		pfvf = &rvu->hwvf[id];
+		kfree(pfvf->msix.bmap);
+	}
+
+	/* Unmap MSIX vector base IOVA mapping */
+	if (!rvu->msix_base_iova)
+		return;
+	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
+	max_msix = cfg & 0xFFFFF;
+	dma_unmap_resource(rvu->dev, rvu->msix_base_iova,
+			   max_msix * PCI_MSIX_ENTRY_SIZE,
+			   DMA_BIDIRECTIONAL, 0);
+
+	mutex_destroy(&rvu->rsrc_lock);
+}
+
+static int rvu_setup_hw_resources(struct rvu *rvu)
+{
+	struct rvu_hwinfo *hw = rvu->hw;
+	struct rvu_block *block;
+	int blkid, err;
+	u64 cfg;
+
+	/* Get HW supported max RVU PF & VF count */
+	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
+	hw->total_pfs = (cfg >> 32) & 0xFF;
+	hw->total_vfs = (cfg >> 20) & 0xFFF;
+	hw->max_vfs_per_pf = (cfg >> 40) & 0xFF;
+
+	/* Init NPA LF's bitmap */
+	block = &hw->block[BLKADDR_NPA];
+	if (!block->implemented)
+		goto nix;
+	cfg = rvu_read64(rvu, BLKADDR_NPA, NPA_AF_CONST);
+	block->lf.max = (cfg >> 16) & 0xFFF;
+	block->addr = BLKADDR_NPA;
+	block->type = BLKTYPE_NPA;
+	block->lfshift = 8;
+	block->lookup_reg = NPA_AF_RVU_LF_CFG_DEBUG;
+	block->pf_lfcnt_reg = RVU_PRIV_PFX_NPA_CFG;
+	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NPA_CFG;
+	block->lfcfg_reg = NPA_PRIV_LFX_CFG;
+	block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG;
+	block->lfreset_reg = NPA_AF_LF_RST;
+	sprintf(block->name, "NPA");
+	err = rvu_alloc_bitmap(&block->lf);
+	if (err)
+		return err;
+
+nix:
+	/* Init NIX LF's bitmap */
+	block = &hw->block[BLKADDR_NIX0];
+	if (!block->implemented)
+		goto sso;
+	cfg = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST2);
+	block->lf.max = cfg & 0xFFF;
+	block->addr = BLKADDR_NIX0;
+	block->type = BLKTYPE_NIX;
+	block->lfshift = 8;
+	block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG;
+	block->pf_lfcnt_reg = RVU_PRIV_PFX_NIX0_CFG;
+	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIX0_CFG;
+	block->lfcfg_reg = NIX_PRIV_LFX_CFG;
+	block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
+	block->lfreset_reg = NIX_AF_LF_RST;
+	sprintf(block->name, "NIX");
+	err = rvu_alloc_bitmap(&block->lf);
+	if (err)
+		return err;
+
+sso:
+	/* Init SSO group's bitmap */
+	block = &hw->block[BLKADDR_SSO];
+	if (!block->implemented)
+		goto ssow;
+	cfg = rvu_read64(rvu, BLKADDR_SSO, SSO_AF_CONST);
+	block->lf.max = cfg & 0xFFFF;
+	block->addr = BLKADDR_SSO;
+	block->type = BLKTYPE_SSO;
+	block->multislot = true;
+	block->lfshift = 3;
+	block->lookup_reg = SSO_AF_RVU_LF_CFG_DEBUG;
+	block->pf_lfcnt_reg = RVU_PRIV_PFX_SSO_CFG;
+	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSO_CFG;
+	block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG;
+	block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG;
+	block->lfreset_reg = SSO_AF_LF_HWGRP_RST;
+	sprintf(block->name, "SSO GROUP");
+	err = rvu_alloc_bitmap(&block->lf);
+	if (err)
+		return err;
+
+ssow:
+	/* Init SSO workslot's bitmap */
+	block = &hw->block[BLKADDR_SSOW];
+	if (!block->implemented)
+		goto tim;
+	block->lf.max = (cfg >> 56) & 0xFF;
+	block->addr = BLKADDR_SSOW;
+	block->type = BLKTYPE_SSOW;
+	block->multislot = true;
+	block->lfshift = 3;
+	block->lookup_reg = SSOW_AF_RVU_LF_HWS_CFG_DEBUG;
+	block->pf_lfcnt_reg = RVU_PRIV_PFX_SSOW_CFG;
+	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSOW_CFG;
+	block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG;
+	block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG;
+	block->lfreset_reg = SSOW_AF_LF_HWS_RST;
+	sprintf(block->name, "SSOWS");
+	err = rvu_alloc_bitmap(&block->lf);
+	if (err)
+		return err;
+
+tim:
+	/* Init TIM LF's bitmap */
+	block = &hw->block[BLKADDR_TIM];
+	if (!block->implemented)
+		goto cpt;
+	cfg = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CONST);
+	block->lf.max = cfg & 0xFFFF;
+	block->addr = BLKADDR_TIM;
+	block->type = BLKTYPE_TIM;
+	block->multislot = true;
+	block->lfshift = 3;
+	block->lookup_reg = TIM_AF_RVU_LF_CFG_DEBUG;
+	block->pf_lfcnt_reg = RVU_PRIV_PFX_TIM_CFG;
+	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_TIM_CFG;
+	block->lfcfg_reg = TIM_PRIV_LFX_CFG;
+	block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG;
+	block->lfreset_reg = TIM_AF_LF_RST;
+	sprintf(block->name, "TIM");
+	err = rvu_alloc_bitmap(&block->lf);
+	if (err)
+		return err;
+
+cpt:
+	/* Init CPT LF's bitmap */
+	block = &hw->block[BLKADDR_CPT0];
+	if (!block->implemented)
+		goto init;
+	cfg = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_CONSTANTS0);
+	block->lf.max = cfg & 0xFF;
+	block->addr = BLKADDR_CPT0;
+	block->type = BLKTYPE_CPT;
+	block->multislot = true;
+	block->lfshift = 3;
+	block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG;
+	block->pf_lfcnt_reg = RVU_PRIV_PFX_CPT0_CFG;
+	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPT0_CFG;
+	block->lfcfg_reg = CPT_PRIV_LFX_CFG;
+	block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
+	block->lfreset_reg = CPT_AF_LF_RST;
+	sprintf(block->name, "CPT");
+	err = rvu_alloc_bitmap(&block->lf);
+	if (err)
+		return err;
+
+init:
+	/* Allocate memory for PFVF data */
+	rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs,
+			       sizeof(struct rvu_pfvf), GFP_KERNEL);
+	if (!rvu->pf)
+		return -ENOMEM;
+
+	rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs,
+				 sizeof(struct rvu_pfvf), GFP_KERNEL);
+	if (!rvu->hwvf)
+		return -ENOMEM;
+
+	mutex_init(&rvu->rsrc_lock);
+
+	err = rvu_setup_msix_resources(rvu);
+	if (err)
+		return err;
+
+	for (blkid = 0; blkid < BLK_COUNT; blkid++) {
+		block = &hw->block[blkid];
+		if (!block->lf.bmap)
+			continue;
+
+		/* Allocate memory for block LF/slot to pcifunc mapping info */
+		block->fn_map = devm_kcalloc(rvu->dev, block->lf.max,
+					     sizeof(u16), GFP_KERNEL);
+		if (!block->fn_map)
+			return -ENOMEM;
+
+		/* Scan all blocks to check if low level firmware has
+		 * already provisioned any of the resources to a PF/VF.
+		 */
+		rvu_scan_block(rvu, block);
+	}
+
+	err = rvu_npc_init(rvu);
+	if (err)
+		goto exit;
+
+	err = rvu_cgx_init(rvu);
+	if (err)
+		goto exit;
+
+	err = rvu_npa_init(rvu);
+	if (err)
+		goto cgx_err;
+
+	err = rvu_nix_init(rvu);
+	if (err)
+		goto cgx_err;
+
+	return 0;
+
+cgx_err:
+	rvu_cgx_exit(rvu);
+exit:
+	return err;
+}
+
+/* NPA and NIX admin queue APIs */
+void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq)
+{
+	if (!aq)
+		return;
+
+	qmem_free(rvu->dev, aq->inst);
+	qmem_free(rvu->dev, aq->res);
+	devm_kfree(rvu->dev, aq);
+}
+
+int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
+		 int qsize, int inst_size, int res_size)
+{
+	struct admin_queue *aq;
+	int err;
+
+	*ad_queue = devm_kzalloc(rvu->dev, sizeof(*aq), GFP_KERNEL);
+	if (!*ad_queue)
+		return -ENOMEM;
+	aq = *ad_queue;
+
+	/* Alloc memory for instructions i.e AQ */
+	err = qmem_alloc(rvu->dev, &aq->inst, qsize, inst_size);
+	if (err) {
+		devm_kfree(rvu->dev, aq);
+		return err;
+	}
+
+	/* Alloc memory for results */
+	err = qmem_alloc(rvu->dev, &aq->res, qsize, res_size);
+	if (err) {
+		rvu_aq_free(rvu, aq);
+		return err;
+	}
+
+	spin_lock_init(&aq->lock);
+	return 0;
+}
+
+static int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
+				  struct ready_msg_rsp *rsp)
+{
+	return 0;
+}
+
+/* Get current count of a RVU block's LF/slots
+ * provisioned to a given RVU func.
+ */
+static u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype)
+{
+	switch (blktype) {
+	case BLKTYPE_NPA:
+		return pfvf->npalf ? 1 : 0;
+	case BLKTYPE_NIX:
+		return pfvf->nixlf ? 1 : 0;
+	case BLKTYPE_SSO:
+		return pfvf->sso;
+	case BLKTYPE_SSOW:
+		return pfvf->ssow;
+	case BLKTYPE_TIM:
+		return pfvf->timlfs;
+	case BLKTYPE_CPT:
+		return pfvf->cptlfs;
+	}
+	return 0;
+}
+
+bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype)
+{
+	struct rvu_pfvf *pfvf;
+
+	if (!is_pf_func_valid(rvu, pcifunc))
+		return false;
+
+	pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+	/* Check if this PFFUNC has a LF of type blktype attached */
+	if (!rvu_get_rsrc_mapcount(pfvf, blktype))
+		return false;
+
+	return true;
+}
+
+static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
+			   int pcifunc, int slot)
+{
+	u64 val;
+
+	val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13);
+	rvu_write64(rvu, block->addr, block->lookup_reg, val);
+	/* Wait for the lookup to finish */
+	/* TODO: put some timeout here */
+	while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13))
+		;
+
+	val = rvu_read64(rvu, block->addr, block->lookup_reg);
+
+	/* Check LF valid bit */
+	if (!(val & (1ULL << 12)))
+		return -1;
+
+	return (val & 0xFFF);
+}
+
+static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
+{
+	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+	struct rvu_hwinfo *hw = rvu->hw;
+	struct rvu_block *block;
+	int slot, lf, num_lfs;
+	int blkaddr;
+
+	blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc);
+	if (blkaddr < 0)
+		return;
+
+	block = &hw->block[blkaddr];
+
+	num_lfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+	if (!num_lfs)
+		return;
+
+	for (slot = 0; slot < num_lfs; slot++) {
+		lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot);
+		if (lf < 0) /* This should never happen */
+			continue;
+
+		/* Disable the LF */
+		rvu_write64(rvu, blkaddr, block->lfcfg_reg |
+			    (lf << block->lfshift), 0x00ULL);
+
+		/* Update SW maintained mapping info as well */
+		rvu_update_rsrc_map(rvu, pfvf, block,
+				    pcifunc, lf, false);
+
+		/* Free the resource */
+		rvu_free_rsrc(&block->lf, lf);
+
+		/* Clear MSIX vector offset for this LF */
+		rvu_clear_msix_offset(rvu, pfvf, block, lf);
+	}
+}
+
+static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
+			    u16 pcifunc)
+{
+	struct rvu_hwinfo *hw = rvu->hw;
+	bool detach_all = true;
+	struct rvu_block *block;
+	int blkid;
+
+	mutex_lock(&rvu->rsrc_lock);
+
+	/* Check for partial resource detach */
+	if (detach && detach->partial)
+		detach_all = false;
+
+	/* Check for RVU block's LFs attached to this func,
+	 * if so, detach them.
+	 */
+	for (blkid = 0; blkid < BLK_COUNT; blkid++) {
+		block = &hw->block[blkid];
+		if (!block->lf.bmap)
+			continue;
+		if (!detach_all && detach) {
+			if (blkid == BLKADDR_NPA && !detach->npalf)
+				continue;
+			else if ((blkid == BLKADDR_NIX0) && !detach->nixlf)
+				continue;
+			else if ((blkid == BLKADDR_SSO) && !detach->sso)
+				continue;
+			else if ((blkid == BLKADDR_SSOW) && !detach->ssow)
+				continue;
+			else if ((blkid == BLKADDR_TIM) && !detach->timlfs)
+				continue;
+			else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs)
+				continue;
+		}
+		rvu_detach_block(rvu, pcifunc, block->type);
+	}
+
+	mutex_unlock(&rvu->rsrc_lock);
+	return 0;
+}
+
+static int rvu_mbox_handler_detach_resources(struct rvu *rvu,
+					     struct rsrc_detach *detach,
+					     struct msg_rsp *rsp)
+{
+	return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
+}
+
+static void rvu_attach_block(struct rvu *rvu, int pcifunc,
+			     int blktype, int num_lfs)
+{
+	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+	struct rvu_hwinfo *hw = rvu->hw;
+	struct rvu_block *block;
+	int slot, lf;
+	int blkaddr;
+	u64 cfg;
+
+	if (!num_lfs)
+		return;
+
+	blkaddr = rvu_get_blkaddr(rvu, blktype, 0);
+	if (blkaddr < 0)
+		return;
+
+	block = &hw->block[blkaddr];
+	if (!block->lf.bmap)
+		return;
+
+	for (slot = 0; slot < num_lfs; slot++) {
+		/* Allocate the resource */
+		lf = rvu_alloc_rsrc(&block->lf);
+		if (lf < 0)
+			return;
+
+		cfg = (1ULL << 63) | (pcifunc << 8) | slot;
+		rvu_write64(rvu, blkaddr, block->lfcfg_reg |
+			    (lf << block->lfshift), cfg);
+		rvu_update_rsrc_map(rvu, pfvf, block,
+				    pcifunc, lf, true);
+
+		/* Set start MSIX vector for this LF within this PF/VF */
+		rvu_set_msix_offset(rvu, pfvf, block, lf);
+	}
+}
+
+static int rvu_check_rsrc_availability(struct rvu *rvu,
+				       struct rsrc_attach *req, u16 pcifunc)
+{
+	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+	struct rvu_hwinfo *hw = rvu->hw;
+	struct rvu_block *block;
+	int free_lfs, mappedlfs;
+
+	/* Only one NPA LF can be attached */
+	if (req->npalf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NPA)) {
+		block = &hw->block[BLKADDR_NPA];
+		free_lfs = rvu_rsrc_free_count(&block->lf);
+		if (!free_lfs)
+			goto fail;
+	} else if (req->npalf) {
+		dev_err(&rvu->pdev->dev,
+			"Func 0x%x: Invalid req, already has NPA\n",
+			 pcifunc);
+		return -EINVAL;
+	}
+
+	/* Only one NIX LF can be attached */
+	if (req->nixlf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NIX)) {
+		block = &hw->block[BLKADDR_NIX0];
+		free_lfs = rvu_rsrc_free_count(&block->lf);
+		if (!free_lfs)
+			goto fail;
+	} else if (req->nixlf) {
+		dev_err(&rvu->pdev->dev,
+			"Func 0x%x: Invalid req, already has NIX\n",
+			pcifunc);
+		return -EINVAL;
+	}
+
+	if (req->sso) {
+		block = &hw->block[BLKADDR_SSO];
+		/* Is request within limits ? */
+		if (req->sso > block->lf.max) {
+			dev_err(&rvu->pdev->dev,
+				"Func 0x%x: Invalid SSO req, %d > max %d\n",
+				 pcifunc, req->sso, block->lf.max);
+			return -EINVAL;
+		}
+		mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+		free_lfs = rvu_rsrc_free_count(&block->lf);
+		/* Check if additional resources are available */
+		if (req->sso > mappedlfs &&
+		    ((req->sso - mappedlfs) > free_lfs))
+			goto fail;
+	}
+
+	if (req->ssow) {
+		block = &hw->block[BLKADDR_SSOW];
+		if (req->ssow > block->lf.max) {
+			dev_err(&rvu->pdev->dev,
+				"Func 0x%x: Invalid SSOW req, %d > max %d\n",
+				 pcifunc, req->sso, block->lf.max);
+			return -EINVAL;
+		}
+		mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+		free_lfs = rvu_rsrc_free_count(&block->lf);
+		if (req->ssow > mappedlfs &&
+		    ((req->ssow - mappedlfs) > free_lfs))
+			goto fail;
+	}
+
+	if (req->timlfs) {
+		block = &hw->block[BLKADDR_TIM];
+		if (req->timlfs > block->lf.max) {
+			dev_err(&rvu->pdev->dev,
+				"Func 0x%x: Invalid TIMLF req, %d > max %d\n",
+				 pcifunc, req->timlfs, block->lf.max);
+			return -EINVAL;
+		}
+		mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+		free_lfs = rvu_rsrc_free_count(&block->lf);
+		if (req->timlfs > mappedlfs &&
+		    ((req->timlfs - mappedlfs) > free_lfs))
+			goto fail;
+	}
+
+	if (req->cptlfs) {
+		block = &hw->block[BLKADDR_CPT0];
+		if (req->cptlfs > block->lf.max) {
+			dev_err(&rvu->pdev->dev,
+				"Func 0x%x: Invalid CPTLF req, %d > max %d\n",
+				 pcifunc, req->cptlfs, block->lf.max);
+			return -EINVAL;
+		}
+		mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+		free_lfs = rvu_rsrc_free_count(&block->lf);
+		if (req->cptlfs > mappedlfs &&
+		    ((req->cptlfs - mappedlfs) > free_lfs))
+			goto fail;
+	}
+
+	return 0;
+
+fail:
+	dev_info(rvu->dev, "Request for %s failed\n", block->name);
+	return -ENOSPC;
+}
+
+static int rvu_mbox_handler_attach_resources(struct rvu *rvu,
+					     struct rsrc_attach *attach,
+					     struct msg_rsp *rsp)
+{
+	u16 pcifunc = attach->hdr.pcifunc;
+	int err;
+
+	/* If first request, detach all existing attached resources */
+	if (!attach->modify)
+		rvu_detach_rsrcs(rvu, NULL, pcifunc);
+
+	mutex_lock(&rvu->rsrc_lock);
+
+	/* Check if the request can be accommodated */
+	err = rvu_check_rsrc_availability(rvu, attach, pcifunc);
+	if (err)
+		goto exit;
+
+	/* Now attach the requested resources */
+	if (attach->npalf)
+		rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1);
+
+	if (attach->nixlf)
+		rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1);
+
+	if (attach->sso) {
+		/* RVU func doesn't know which exact LF or slot is attached
+		 * to it, it always sees as slot 0,1,2. So for a 'modify'
+		 * request, simply detach all existing attached LFs/slots
+		 * and attach a fresh.
+		 */
+		if (attach->modify)
+			rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
+		rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO, attach->sso);
+	}
+
+	if (attach->ssow) {
+		if (attach->modify)
+			rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
+		rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW, attach->ssow);
+	}
+
+	if (attach->timlfs) {
+		if (attach->modify)
+			rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
+		rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM, attach->timlfs);
+	}
+
+	if (attach->cptlfs) {
+		if (attach->modify)
+			rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT);
+		rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT, attach->cptlfs);
+	}
+
+exit:
+	mutex_unlock(&rvu->rsrc_lock);
+	return err;
+}
+
+static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
+			       int blkaddr, int lf)
+{
+	u16 vec;
+
+	if (lf < 0)
+		return MSIX_VECTOR_INVALID;
+
+	for (vec = 0; vec < pfvf->msix.max; vec++) {
+		if (pfvf->msix_lfmap[vec] == MSIX_BLKLF(blkaddr, lf))
+			return vec;
+	}
+	return MSIX_VECTOR_INVALID;
+}
+
+static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
+				struct rvu_block *block, int lf)
+{
+	u16 nvecs, vec, offset;
+	u64 cfg;
+
+	cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
+			 (lf << block->lfshift));
+	nvecs = (cfg >> 12) & 0xFF;
+
+	/* Check and alloc MSIX vectors, must be contiguous */
+	if (!rvu_rsrc_check_contig(&pfvf->msix, nvecs))
+		return;
+
+	offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
+
+	/* Config MSIX offset in LF */
+	rvu_write64(rvu, block->addr, block->msixcfg_reg |
+		    (lf << block->lfshift), (cfg & ~0x7FFULL) | offset);
+
+	/* Update the bitmap as well */
+	for (vec = 0; vec < nvecs; vec++)
+		pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf);
+}
+
+static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
+				  struct rvu_block *block, int lf)
+{
+	u16 nvecs, vec, offset;
+	u64 cfg;
+
+	cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
+			 (lf << block->lfshift));
+	nvecs = (cfg >> 12) & 0xFF;
+
+	/* Clear MSIX offset in LF */
+	rvu_write64(rvu, block->addr, block->msixcfg_reg |
+		    (lf << block->lfshift), cfg & ~0x7FFULL);
+
+	offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf);
+
+	/* Update the mapping */
+	for (vec = 0; vec < nvecs; vec++)
+		pfvf->msix_lfmap[offset + vec] = 0;
+
+	/* Free the same in MSIX bitmap */
+	rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset);
+}
+
+static int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
+					struct msix_offset_rsp *rsp)
+{
+	struct rvu_hwinfo *hw = rvu->hw;
+	u16 pcifunc = req->hdr.pcifunc;
+	struct rvu_pfvf *pfvf;
+	int lf, slot;
+
+	pfvf = rvu_get_pfvf(rvu, pcifunc);
+	if (!pfvf->msix.bmap)
+		return 0;
+
+	/* Set MSIX offsets for each block's LFs attached to this PF/VF */
+	lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0);
+	rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf);
+
+	lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NIX0], pcifunc, 0);
+	rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NIX0, lf);
+
+	rsp->sso = pfvf->sso;
+	for (slot = 0; slot < rsp->sso; slot++) {
+		lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot);
+		rsp->sso_msixoff[slot] =
+			rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf);
+	}
+
+	rsp->ssow = pfvf->ssow;
+	for (slot = 0; slot < rsp->ssow; slot++) {
+		lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot);
+		rsp->ssow_msixoff[slot] =
+			rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf);
+	}
+
+	rsp->timlfs = pfvf->timlfs;
+	for (slot = 0; slot < rsp->timlfs; slot++) {
+		lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot);
+		rsp->timlf_msixoff[slot] =
+			rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf);
+	}
+
+	rsp->cptlfs = pfvf->cptlfs;
+	for (slot = 0; slot < rsp->cptlfs; slot++) {
+		lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot);
+		rsp->cptlf_msixoff[slot] =
+			rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf);
+	}
+	return 0;
+}
+
+static int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
+				   struct msg_rsp *rsp)
+{
+	u16 pcifunc = req->hdr.pcifunc;
+	u16 vf, numvfs;
+	u64 cfg;
+
+	vf = pcifunc & RVU_PFVF_FUNC_MASK;
+	cfg = rvu_read64(rvu, BLKADDR_RVUM,
+			 RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc)));
+	numvfs = (cfg >> 12) & 0xFF;
+
+	if (vf && vf <= numvfs)
+		__rvu_flr_handler(rvu, pcifunc);
+	else
+		return RVU_INVALID_VF_ID;
+
+	return 0;
+}
+
+static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
+				struct mbox_msghdr *req)
+{
+	struct rvu *rvu = pci_get_drvdata(mbox->pdev);
+
+	/* Check if valid, if not reply with a invalid msg */
+	if (req->sig != OTX2_MBOX_REQ_SIG)
+		goto bad_message;
+
+	switch (req->id) {
+#define M(_name, _id, _fn_name, _req_type, _rsp_type)			\
+	case _id: {							\
+		struct _rsp_type *rsp;					\
+		int err;						\
+									\
+		rsp = (struct _rsp_type *)otx2_mbox_alloc_msg(		\
+			mbox, devid,					\
+			sizeof(struct _rsp_type));			\
+		/* some handlers should complete even if reply */	\
+		/* could not be allocated */				\
+		if (!rsp &&						\
+		    _id != MBOX_MSG_DETACH_RESOURCES &&			\
+		    _id != MBOX_MSG_NIX_TXSCH_FREE &&			\
+		    _id != MBOX_MSG_VF_FLR)				\
+			return -ENOMEM;					\
+		if (rsp) {						\
+			rsp->hdr.id = _id;				\
+			rsp->hdr.sig = OTX2_MBOX_RSP_SIG;		\
+			rsp->hdr.pcifunc = req->pcifunc;		\
+			rsp->hdr.rc = 0;				\
+		}							\
+									\
+		err = rvu_mbox_handler_ ## _fn_name(rvu,		\
+						    (struct _req_type *)req, \
+						    rsp);		\
+		if (rsp && err)						\
+			rsp->hdr.rc = err;				\
+									\
+		return rsp ? err : -ENOMEM;				\
+	}
+MBOX_MESSAGES
+#undef M
+
+bad_message:
+	default:
+		otx2_reply_invalid_msg(mbox, devid, req->pcifunc, req->id);
+		return -ENODEV;
+	}
+}
+
+static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
+{
+	struct rvu *rvu = mwork->rvu;
+	int offset, err, id, devid;
+	struct otx2_mbox_dev *mdev;
+	struct mbox_hdr *req_hdr;
+	struct mbox_msghdr *msg;
+	struct mbox_wq_info *mw;
+	struct otx2_mbox *mbox;
+
+	switch (type) {
+	case TYPE_AFPF:
+		mw = &rvu->afpf_wq_info;
+		break;
+	case TYPE_AFVF:
+		mw = &rvu->afvf_wq_info;
+		break;
+	default:
+		return;
+	}
+
+	devid = mwork - mw->mbox_wrk;
+	mbox = &mw->mbox;
+	mdev = &mbox->dev[devid];
+
+	/* Process received mbox messages */
+	req_hdr = mdev->mbase + mbox->rx_start;
+	if (req_hdr->num_msgs == 0)
+		return;
+
+	offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+
+	for (id = 0; id < req_hdr->num_msgs; id++) {
+		msg = mdev->mbase + offset;
+
+		/* Set which PF/VF sent this message based on mbox IRQ */
+		switch (type) {
+		case TYPE_AFPF:
+			msg->pcifunc &=
+				~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT);
+			msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT);
+			break;
+		case TYPE_AFVF:
+			msg->pcifunc &=
+				~(RVU_PFVF_FUNC_MASK << RVU_PFVF_FUNC_SHIFT);
+			msg->pcifunc |= (devid << RVU_PFVF_FUNC_SHIFT) + 1;
+			break;
+		}
+
+		err = rvu_process_mbox_msg(mbox, devid, msg);
+		if (!err) {
+			offset = mbox->rx_start + msg->next_msgoff;
+			continue;
+		}
+
+		if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
+			dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
+				 err, otx2_mbox_id2name(msg->id),
+				 msg->id, devid,
+				 (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
+		else
+			dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
+				 err, otx2_mbox_id2name(msg->id),
+				 msg->id, devid);
+	}
+
+	/* Send mbox responses to VF/PF */
+	otx2_mbox_msg_send(mbox, devid);
+}
+
+static inline void rvu_afpf_mbox_handler(struct work_struct *work)
+{
+	struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+
+	__rvu_mbox_handler(mwork, TYPE_AFPF);
+}
+
+static inline void rvu_afvf_mbox_handler(struct work_struct *work)
+{
+	struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+
+	__rvu_mbox_handler(mwork, TYPE_AFVF);
+}
+
+static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
+{
+	struct rvu *rvu = mwork->rvu;
+	struct otx2_mbox_dev *mdev;
+	struct mbox_hdr *rsp_hdr;
+	struct mbox_msghdr *msg;
+	struct mbox_wq_info *mw;
+	struct otx2_mbox *mbox;
+	int offset, id, devid;
+
+	switch (type) {
+	case TYPE_AFPF:
+		mw = &rvu->afpf_wq_info;
+		break;
+	case TYPE_AFVF:
+		mw = &rvu->afvf_wq_info;
+		break;
+	default:
+		return;
+	}
+
+	devid = mwork - mw->mbox_wrk_up;
+	mbox = &mw->mbox_up;
+	mdev = &mbox->dev[devid];
+
+	rsp_hdr = mdev->mbase + mbox->rx_start;
+	if (rsp_hdr->num_msgs == 0) {
+		dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n");
+		return;
+	}
+
+	offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+
+	for (id = 0; id < rsp_hdr->num_msgs; id++) {
+		msg = mdev->mbase + offset;
+
+		if (msg->id >= MBOX_MSG_MAX) {
+			dev_err(rvu->dev,
+				"Mbox msg with unknown ID 0x%x\n", msg->id);
+			goto end;
+		}
+
+		if (msg->sig != OTX2_MBOX_RSP_SIG) {
+			dev_err(rvu->dev,
+				"Mbox msg with wrong signature %x, ID 0x%x\n",
+				msg->sig, msg->id);
+			goto end;
+		}
+
+		switch (msg->id) {
+		case MBOX_MSG_CGX_LINK_EVENT:
+			break;
+		default:
+			if (msg->rc)
+				dev_err(rvu->dev,
+					"Mbox msg response has err %d, ID 0x%x\n",
+					msg->rc, msg->id);
+			break;
+		}
+end:
+		offset = mbox->rx_start + msg->next_msgoff;
+		mdev->msgs_acked++;
+	}
+
+	otx2_mbox_reset(mbox, devid);
+}
+
+static inline void rvu_afpf_mbox_up_handler(struct work_struct *work)
+{
+	struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+
+	__rvu_mbox_up_handler(mwork, TYPE_AFPF);
+}
+
+static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
+{
+	struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+
+	__rvu_mbox_up_handler(mwork, TYPE_AFVF);
+}
+
+static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
+			 int type, int num,
+			 void (mbox_handler)(struct work_struct *),
+			 void (mbox_up_handler)(struct work_struct *))
+{
+	void __iomem *hwbase = NULL, *reg_base;
+	int err, i, dir, dir_up;
+	struct rvu_work *mwork;
+	const char *name;
+	u64 bar4_addr;
+
+	switch (type) {
+	case TYPE_AFPF:
+		name = "rvu_afpf_mailbox";
+		bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR);
+		dir = MBOX_DIR_AFPF;
+		dir_up = MBOX_DIR_AFPF_UP;
+		reg_base = rvu->afreg_base;
+		break;
+	case TYPE_AFVF:
+		name = "rvu_afvf_mailbox";
+		bar4_addr = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
+		dir = MBOX_DIR_PFVF;
+		dir_up = MBOX_DIR_PFVF_UP;
+		reg_base = rvu->pfreg_base;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	mw->mbox_wq = alloc_workqueue(name,
+				      WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
+				      num);
+	if (!mw->mbox_wq)
+		return -ENOMEM;
+
+	mw->mbox_wrk = devm_kcalloc(rvu->dev, num,
+				    sizeof(struct rvu_work), GFP_KERNEL);
+	if (!mw->mbox_wrk) {
+		err = -ENOMEM;
+		goto exit;
+	}
+
+	mw->mbox_wrk_up = devm_kcalloc(rvu->dev, num,
+				       sizeof(struct rvu_work), GFP_KERNEL);
+	if (!mw->mbox_wrk_up) {
+		err = -ENOMEM;
+		goto exit;
+	}
+
+	/* Mailbox is a reserved memory (in RAM) region shared between
+	 * RVU devices, shouldn't be mapped as device memory to allow
+	 * unaligned accesses.
+	 */
+	hwbase = ioremap_wc(bar4_addr, MBOX_SIZE * num);
+	if (!hwbase) {
+		dev_err(rvu->dev, "Unable to map mailbox region\n");
+		err = -ENOMEM;
+		goto exit;
+	}
+
+	err = otx2_mbox_init(&mw->mbox, hwbase, rvu->pdev, reg_base, dir, num);
+	if (err)
+		goto exit;
+
+	err = otx2_mbox_init(&mw->mbox_up, hwbase, rvu->pdev,
+			     reg_base, dir_up, num);
+	if (err)
+		goto exit;
+
+	for (i = 0; i < num; i++) {
+		mwork = &mw->mbox_wrk[i];
+		mwork->rvu = rvu;
+		INIT_WORK(&mwork->work, mbox_handler);
+
+		mwork = &mw->mbox_wrk_up[i];
+		mwork->rvu = rvu;
+		INIT_WORK(&mwork->work, mbox_up_handler);
+	}
+
+	return 0;
+exit:
+	if (hwbase)
+		iounmap((void __iomem *)hwbase);
+	destroy_workqueue(mw->mbox_wq);
+	return err;
+}
+
+static void rvu_mbox_destroy(struct mbox_wq_info *mw)
+{
+	if (mw->mbox_wq) {
+		flush_workqueue(mw->mbox_wq);
+		destroy_workqueue(mw->mbox_wq);
+		mw->mbox_wq = NULL;
+	}
+
+	if (mw->mbox.hwbase)
+		iounmap((void __iomem *)mw->mbox.hwbase);
+
+	otx2_mbox_destroy(&mw->mbox);
+	otx2_mbox_destroy(&mw->mbox_up);
+}
+
+static void rvu_queue_work(struct mbox_wq_info *mw, int first,
+			   int mdevs, u64 intr)
+{
+	struct otx2_mbox_dev *mdev;
+	struct otx2_mbox *mbox;
+	struct mbox_hdr *hdr;
+	int i;
+
+	for (i = first; i < mdevs; i++) {
+		/* start from 0 */
+		if (!(intr & BIT_ULL(i - first)))
+			continue;
+
+		mbox = &mw->mbox;
+		mdev = &mbox->dev[i];
+		hdr = mdev->mbase + mbox->rx_start;
+		if (hdr->num_msgs)
+			queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work);
+
+		mbox = &mw->mbox_up;
+		mdev = &mbox->dev[i];
+		hdr = mdev->mbase + mbox->rx_start;
+		if (hdr->num_msgs)
+			queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work);
+	}
+}
+
+static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
+{
+	struct rvu *rvu = (struct rvu *)rvu_irq;
+	int vfs = rvu->vfs;
+	u64 intr;
+
+	intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
+	/* Clear interrupts */
+	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr);
+
+	/* Sync with mbox memory region */
+	rmb();
+
+	rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr);
+
+	/* Handle VF interrupts */
+	if (vfs > 64) {
+		intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1));
+		rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr);
+
+		rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr);
+		vfs -= 64;
+	}
+
+	intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0));
+	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr);
+
+	rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr);
+
+	return IRQ_HANDLED;
+}
+
+static void rvu_enable_mbox_intr(struct rvu *rvu)
+{
+	struct rvu_hwinfo *hw = rvu->hw;
+
+	/* Clear spurious irqs, if any */
+	rvu_write64(rvu, BLKADDR_RVUM,
+		    RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs));
+
+	/* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */
+	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1S,
+		    INTR_MASK(hw->total_pfs) & ~1ULL);
+}
+
+static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
+{
+	struct rvu_block *block;
+	int slot, lf, num_lfs;
+	int err;
+
+	block = &rvu->hw->block[blkaddr];
+	num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
+					block->type);
+	if (!num_lfs)
+		return;
+	for (slot = 0; slot < num_lfs; slot++) {
+		lf = rvu_get_lf(rvu, block, pcifunc, slot);
+		if (lf < 0)
+			continue;
+
+		/* Cleanup LF and reset it */
+		if (block->addr == BLKADDR_NIX0)
+			rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf);
+		else if (block->addr == BLKADDR_NPA)
+			rvu_npa_lf_teardown(rvu, pcifunc, lf);
+
+		err = rvu_lf_reset(rvu, block, lf);
+		if (err) {
+			dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
+				block->addr, lf);
+		}
+	}
+}
+
+static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
+{
+	mutex_lock(&rvu->flr_lock);
+	/* Reset order should reflect inter-block dependencies:
+	 * 1. Reset any packet/work sources (NIX, CPT, TIM)
+	 * 2. Flush and reset SSO/SSOW
+	 * 3. Cleanup pools (NPA)
+	 */
+	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0);
+	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0);
+	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM);
+	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW);
+	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
+	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
+	rvu_detach_rsrcs(rvu, NULL, pcifunc);
+	mutex_unlock(&rvu->flr_lock);
+}
+
+static void rvu_afvf_flr_handler(struct rvu *rvu, int vf)
+{
+	int reg = 0;
+
+	/* pcifunc = 0(PF0) | (vf + 1) */
+	__rvu_flr_handler(rvu, vf + 1);
+
+	if (vf >= 64) {
+		reg = 1;
+		vf = vf - 64;
+	}
+
+	/* Signal FLR finish and enable IRQ */
+	rvupf_write64(rvu, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
+	rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
+}
+
+static void rvu_flr_handler(struct work_struct *work)
+{
+	struct rvu_work *flrwork = container_of(work, struct rvu_work, work);
+	struct rvu *rvu = flrwork->rvu;
+	u16 pcifunc, numvfs, vf;
+	u64 cfg;
+	int pf;
+
+	pf = flrwork - rvu->flr_wrk;
+	if (pf >= rvu->hw->total_pfs) {
+		rvu_afvf_flr_handler(rvu, pf - rvu->hw->total_pfs);
+		return;
+	}
+
+	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+	numvfs = (cfg >> 12) & 0xFF;
+	pcifunc  = pf << RVU_PFVF_PF_SHIFT;
+
+	for (vf = 0; vf < numvfs; vf++)
+		__rvu_flr_handler(rvu, (pcifunc | (vf + 1)));
+
+	__rvu_flr_handler(rvu, pcifunc);
+
+	/* Signal FLR finish */
+	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, BIT_ULL(pf));
+
+	/* Enable interrupt */
+	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S,  BIT_ULL(pf));
+}
+
+static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs)
+{
+	int dev, vf, reg = 0;
+	u64 intr;
+
+	if (start_vf >= 64)
+		reg = 1;
+
+	intr = rvupf_read64(rvu, RVU_PF_VFFLR_INTX(reg));
+	if (!intr)
+		return;
+
+	for (vf = 0; vf < numvfs; vf++) {
+		if (!(intr & BIT_ULL(vf)))
+			continue;
+		dev = vf + start_vf + rvu->hw->total_pfs;
+		queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work);
+		/* Clear and disable the interrupt */
+		rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
+		rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf));
+	}
+}
+
+static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq)
+{
+	struct rvu *rvu = (struct rvu *)rvu_irq;
+	u64 intr;
+	u8  pf;
+
+	intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT);
+	if (!intr)
+		goto afvf_flr;
+
+	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+		if (intr & (1ULL << pf)) {
+			/* PF is already dead do only AF related operations */
+			queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work);
+			/* clear interrupt */
+			rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT,
+				    BIT_ULL(pf));
+			/* Disable the interrupt */
+			rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
+				    BIT_ULL(pf));
+		}
+	}
+
+afvf_flr:
+	rvu_afvf_queue_flr_work(rvu, 0, 64);
+	if (rvu->vfs > 64)
+		rvu_afvf_queue_flr_work(rvu, 64, rvu->vfs - 64);
+
+	return IRQ_HANDLED;
+}
+
+static void rvu_me_handle_vfset(struct rvu *rvu, int idx, u64 intr)
+{
+	int vf;
+
+	/* Nothing to be done here other than clearing the
+	 * TRPEND bit.
+	 */
+	for (vf = 0; vf < 64; vf++) {
+		if (intr & (1ULL << vf)) {
+			/* clear the trpend due to ME(master enable) */
+			rvupf_write64(rvu, RVU_PF_VFTRPENDX(idx), BIT_ULL(vf));
+			/* clear interrupt */
+			rvupf_write64(rvu, RVU_PF_VFME_INTX(idx), BIT_ULL(vf));
+		}
+	}
+}
+
+/* Handles ME interrupts from VFs of AF */
+static irqreturn_t rvu_me_vf_intr_handler(int irq, void *rvu_irq)
+{
+	struct rvu *rvu = (struct rvu *)rvu_irq;
+	int vfset;
+	u64 intr;
+
+	intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
+
+	for (vfset = 0; vfset <= 1; vfset++) {
+		intr = rvupf_read64(rvu, RVU_PF_VFME_INTX(vfset));
+		if (intr)
+			rvu_me_handle_vfset(rvu, vfset, intr);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/* Handles ME interrupts from PFs */
+static irqreturn_t rvu_me_pf_intr_handler(int irq, void *rvu_irq)
+{
+	struct rvu *rvu = (struct rvu *)rvu_irq;
+	u64 intr;
+	u8  pf;
+
+	intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
+
+	/* Nothing to be done here other than clearing the
+	 * TRPEND bit.
+	 */
+	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+		if (intr & (1ULL << pf)) {
+			/* clear the trpend due to ME(master enable) */
+			rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND,
+				    BIT_ULL(pf));
+			/* clear interrupt */
+			rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT,
+				    BIT_ULL(pf));
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void rvu_unregister_interrupts(struct rvu *rvu)
+{
+	int irq;
+
+	/* Disable the Mbox interrupt */
+	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
+		    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+	/* Disable the PF FLR interrupt */
+	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
+		    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+	/* Disable the PF ME interrupt */
+	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1C,
+		    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+	for (irq = 0; irq < rvu->num_vec; irq++) {
+		if (rvu->irq_allocated[irq])
+			free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
+	}
+
+	pci_free_irq_vectors(rvu->pdev);
+	rvu->num_vec = 0;
+}
+
+static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu)
+{
+	struct rvu_pfvf *pfvf = &rvu->pf[0];
+	int offset;
+
+	pfvf = &rvu->pf[0];
+	offset = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
+
+	/* Make sure there are enough MSIX vectors configured so that
+	 * VF interrupts can be handled. Offset equal to zero means
+	 * that PF vectors are not configured and overlapping AF vectors.
+	 */
+	return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) &&
+	       offset;
+}
+
+static int rvu_register_interrupts(struct rvu *rvu)
+{
+	int ret, offset, pf_vec_start;
+
+	rvu->num_vec = pci_msix_vec_count(rvu->pdev);
+
+	rvu->irq_name = devm_kmalloc_array(rvu->dev, rvu->num_vec,
+					   NAME_SIZE, GFP_KERNEL);
+	if (!rvu->irq_name)
+		return -ENOMEM;
+
+	rvu->irq_allocated = devm_kcalloc(rvu->dev, rvu->num_vec,
+					  sizeof(bool), GFP_KERNEL);
+	if (!rvu->irq_allocated)
+		return -ENOMEM;
+
+	/* Enable MSI-X */
+	ret = pci_alloc_irq_vectors(rvu->pdev, rvu->num_vec,
+				    rvu->num_vec, PCI_IRQ_MSIX);
+	if (ret < 0) {
+		dev_err(rvu->dev,
+			"RVUAF: Request for %d msix vectors failed, ret %d\n",
+			rvu->num_vec, ret);
+		return ret;
+	}
+
+	/* Register mailbox interrupt handler */
+	sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
+	ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
+			  rvu_mbox_intr_handler, 0,
+			  &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
+	if (ret) {
+		dev_err(rvu->dev,
+			"RVUAF: IRQ registration failed for mbox irq\n");
+		goto fail;
+	}
+
+	rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true;
+
+	/* Enable mailbox interrupts from all PFs */
+	rvu_enable_mbox_intr(rvu);
+
+	/* Register FLR interrupt handler */
+	sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
+		"RVUAF FLR");
+	ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFFLR),
+			  rvu_flr_intr_handler, 0,
+			  &rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
+			  rvu);
+	if (ret) {
+		dev_err(rvu->dev,
+			"RVUAF: IRQ registration failed for FLR\n");
+		goto fail;
+	}
+	rvu->irq_allocated[RVU_AF_INT_VEC_PFFLR] = true;
+
+	/* Enable FLR interrupt for all PFs*/
+	rvu_write64(rvu, BLKADDR_RVUM,
+		    RVU_AF_PFFLR_INT, INTR_MASK(rvu->hw->total_pfs));
+
+	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S,
+		    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+	/* Register ME interrupt handler */
+	sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
+		"RVUAF ME");
+	ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFME),
+			  rvu_me_pf_intr_handler, 0,
+			  &rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
+			  rvu);
+	if (ret) {
+		dev_err(rvu->dev,
+			"RVUAF: IRQ registration failed for ME\n");
+	}
+	rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true;
+
+	/* Enable ME interrupt for all PFs*/
+	rvu_write64(rvu, BLKADDR_RVUM,
+		    RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs));
+
+	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1S,
+		    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+	if (!rvu_afvf_msix_vectors_num_ok(rvu))
+		return 0;
+
+	/* Get PF MSIX vectors offset. */
+	pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM,
+				  RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
+
+	/* Register MBOX0 interrupt. */
+	offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0;
+	sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0");
+	ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+			  rvu_mbox_intr_handler, 0,
+			  &rvu->irq_name[offset * NAME_SIZE],
+			  rvu);
+	if (ret)
+		dev_err(rvu->dev,
+			"RVUAF: IRQ registration failed for Mbox0\n");
+
+	rvu->irq_allocated[offset] = true;
+
+	/* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so
+	 * simply increment current offset by 1.
+	 */
+	offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1;
+	sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1");
+	ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+			  rvu_mbox_intr_handler, 0,
+			  &rvu->irq_name[offset * NAME_SIZE],
+			  rvu);
+	if (ret)
+		dev_err(rvu->dev,
+			"RVUAF: IRQ registration failed for Mbox1\n");
+
+	rvu->irq_allocated[offset] = true;
+
+	/* Register FLR interrupt handler for AF's VFs */
+	offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0;
+	sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR0");
+	ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+			  rvu_flr_intr_handler, 0,
+			  &rvu->irq_name[offset * NAME_SIZE], rvu);
+	if (ret) {
+		dev_err(rvu->dev,
+			"RVUAF: IRQ registration failed for RVUAFVF FLR0\n");
+		goto fail;
+	}
+	rvu->irq_allocated[offset] = true;
+
+	offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR1;
+	sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR1");
+	ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+			  rvu_flr_intr_handler, 0,
+			  &rvu->irq_name[offset * NAME_SIZE], rvu);
+	if (ret) {
+		dev_err(rvu->dev,
+			"RVUAF: IRQ registration failed for RVUAFVF FLR1\n");
+		goto fail;
+	}
+	rvu->irq_allocated[offset] = true;
+
+	/* Register ME interrupt handler for AF's VFs */
+	offset = pf_vec_start + RVU_PF_INT_VEC_VFME0;
+	sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME0");
+	ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+			  rvu_me_vf_intr_handler, 0,
+			  &rvu->irq_name[offset * NAME_SIZE], rvu);
+	if (ret) {
+		dev_err(rvu->dev,
+			"RVUAF: IRQ registration failed for RVUAFVF ME0\n");
+		goto fail;
+	}
+	rvu->irq_allocated[offset] = true;
+
+	offset = pf_vec_start + RVU_PF_INT_VEC_VFME1;
+	sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME1");
+	ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+			  rvu_me_vf_intr_handler, 0,
+			  &rvu->irq_name[offset * NAME_SIZE], rvu);
+	if (ret) {
+		dev_err(rvu->dev,
+			"RVUAF: IRQ registration failed for RVUAFVF ME1\n");
+		goto fail;
+	}
+	rvu->irq_allocated[offset] = true;
+	return 0;
+
+fail:
+	rvu_unregister_interrupts(rvu);
+	return ret;
+}
+
+static void rvu_flr_wq_destroy(struct rvu *rvu)
+{
+	if (rvu->flr_wq) {
+		flush_workqueue(rvu->flr_wq);
+		destroy_workqueue(rvu->flr_wq);
+		rvu->flr_wq = NULL;
+	}
+}
+
+static int rvu_flr_init(struct rvu *rvu)
+{
+	int dev, num_devs;
+	u64 cfg;
+	int pf;
+
+	/* Enable FLR for all PFs*/
+	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+		rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf),
+			    cfg | BIT_ULL(22));
+	}
+
+	rvu->flr_wq = alloc_workqueue("rvu_afpf_flr",
+				      WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
+				       1);
+	if (!rvu->flr_wq)
+		return -ENOMEM;
+
+	num_devs = rvu->hw->total_pfs + pci_sriov_get_totalvfs(rvu->pdev);
+	rvu->flr_wrk = devm_kcalloc(rvu->dev, num_devs,
+				    sizeof(struct rvu_work), GFP_KERNEL);
+	if (!rvu->flr_wrk) {
+		destroy_workqueue(rvu->flr_wq);
+		return -ENOMEM;
+	}
+
+	for (dev = 0; dev < num_devs; dev++) {
+		rvu->flr_wrk[dev].rvu = rvu;
+		INIT_WORK(&rvu->flr_wrk[dev].work, rvu_flr_handler);
+	}
+
+	mutex_init(&rvu->flr_lock);
+
+	return 0;
+}
+
+static void rvu_disable_afvf_intr(struct rvu *rvu)
+{
+	int vfs = rvu->vfs;
+
+	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs));
+	rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
+	rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
+	if (vfs <= 64)
+		return;
+
+	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1),
+		      INTR_MASK(vfs - 64));
+	rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+	rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+}
+
+static void rvu_enable_afvf_intr(struct rvu *rvu)
+{
+	int vfs = rvu->vfs;
+
+	/* Clear any pending interrupts and enable AF VF interrupts for
+	 * the first 64 VFs.
+	 */
+	/* Mbox */
+	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), INTR_MASK(vfs));
+	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(vfs));
+
+	/* FLR */
+	rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs));
+	rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs));
+	rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs));
+
+	/* Same for remaining VFs, if any. */
+	if (vfs <= 64)
+		return;
+
+	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), INTR_MASK(vfs - 64));
+	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
+		      INTR_MASK(vfs - 64));
+
+	rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64));
+	rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
+	rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
+}
+
+#define PCI_DEVID_OCTEONTX2_LBK 0xA061
+
+static int lbk_get_num_chans(void)
+{
+	struct pci_dev *pdev;
+	void __iomem *base;
+	int ret = -EIO;
+
+	pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_LBK,
+			      NULL);
+	if (!pdev)
+		goto err;
+
+	base = pci_ioremap_bar(pdev, 0);
+	if (!base)
+		goto err_put;
+
+	/* Read number of available LBK channels from LBK(0)_CONST register. */
+	ret = (readq(base + 0x10) >> 32) & 0xffff;
+	iounmap(base);
+err_put:
+	pci_dev_put(pdev);
+err:
+	return ret;
+}
+
+static int rvu_enable_sriov(struct rvu *rvu)
+{
+	struct pci_dev *pdev = rvu->pdev;
+	int err, chans, vfs;
+
+	if (!rvu_afvf_msix_vectors_num_ok(rvu)) {
+		dev_warn(&pdev->dev,
+			 "Skipping SRIOV enablement since not enough IRQs are available\n");
+		return 0;
+	}
+
+	chans = lbk_get_num_chans();
+	if (chans < 0)
+		return chans;
+
+	vfs = pci_sriov_get_totalvfs(pdev);
+
+	/* Limit VFs in case we have more VFs than LBK channels available. */
+	if (vfs > chans)
+		vfs = chans;
+
+	/* AF's VFs work in pairs and talk over consecutive loopback channels.
+	 * Thus we want to enable maximum even number of VFs. In case
+	 * odd number of VFs are available then the last VF on the list
+	 * remains disabled.
+	 */
+	if (vfs & 0x1) {
+		dev_warn(&pdev->dev,
+			 "Number of VFs should be even. Enabling %d out of %d.\n",
+			 vfs - 1, vfs);
+		vfs--;
+	}
+
+	if (!vfs)
+		return 0;
+
+	/* Save VFs number for reference in VF interrupts handlers.
+	 * Since interrupts might start arriving during SRIOV enablement
+	 * ordinary API cannot be used to get number of enabled VFs.
+	 */
+	rvu->vfs = vfs;
+
+	err = rvu_mbox_init(rvu, &rvu->afvf_wq_info, TYPE_AFVF, vfs,
+			    rvu_afvf_mbox_handler, rvu_afvf_mbox_up_handler);
+	if (err)
+		return err;
+
+	rvu_enable_afvf_intr(rvu);
+	/* Make sure IRQs are enabled before SRIOV. */
+	mb();
+
+	err = pci_enable_sriov(pdev, vfs);
+	if (err) {
+		rvu_disable_afvf_intr(rvu);
+		rvu_mbox_destroy(&rvu->afvf_wq_info);
+		return err;
+	}
+
+	return 0;
+}
+
+static void rvu_disable_sriov(struct rvu *rvu)
+{
+	rvu_disable_afvf_intr(rvu);
+	rvu_mbox_destroy(&rvu->afvf_wq_info);
+	pci_disable_sriov(rvu->pdev);
+}
+
+static void rvu_update_module_params(struct rvu *rvu)
+{
+	const char *default_pfl_name = "default";
+
+	strscpy(rvu->mkex_pfl_name,
+		mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN);
+}
+
+static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	struct device *dev = &pdev->dev;
+	struct rvu *rvu;
+	int    err;
+
+	rvu = devm_kzalloc(dev, sizeof(*rvu), GFP_KERNEL);
+	if (!rvu)
+		return -ENOMEM;
+
+	rvu->hw = devm_kzalloc(dev, sizeof(struct rvu_hwinfo), GFP_KERNEL);
+	if (!rvu->hw) {
+		devm_kfree(dev, rvu);
+		return -ENOMEM;
+	}
+
+	pci_set_drvdata(pdev, rvu);
+	rvu->pdev = pdev;
+	rvu->dev = &pdev->dev;
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(dev, "Failed to enable PCI device\n");
+		goto err_freemem;
+	}
+
+	err = pci_request_regions(pdev, DRV_NAME);
+	if (err) {
+		dev_err(dev, "PCI request regions failed 0x%x\n", err);
+		goto err_disable_device;
+	}
+
+	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+	if (err) {
+		dev_err(dev, "Unable to set DMA mask\n");
+		goto err_release_regions;
+	}
+
+	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
+	if (err) {
+		dev_err(dev, "Unable to set consistent DMA mask\n");
+		goto err_release_regions;
+	}
+
+	/* Map Admin function CSRs */
+	rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
+	rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
+	if (!rvu->afreg_base || !rvu->pfreg_base) {
+		dev_err(dev, "Unable to map admin function CSRs, aborting\n");
+		err = -ENOMEM;
+		goto err_release_regions;
+	}
+
+	/* Store module params in rvu structure */
+	rvu_update_module_params(rvu);
+
+	/* Check which blocks the HW supports */
+	rvu_check_block_implemented(rvu);
+
+	rvu_reset_all_blocks(rvu);
+
+	err = rvu_setup_hw_resources(rvu);
+	if (err)
+		goto err_release_regions;
+
+	/* Init mailbox btw AF and PFs */
+	err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF,
+			    rvu->hw->total_pfs, rvu_afpf_mbox_handler,
+			    rvu_afpf_mbox_up_handler);
+	if (err)
+		goto err_hwsetup;
+
+	err = rvu_flr_init(rvu);
+	if (err)
+		goto err_mbox;
+
+	err = rvu_register_interrupts(rvu);
+	if (err)
+		goto err_flr;
+
+	/* Enable AF's VFs (if any) */
+	err = rvu_enable_sriov(rvu);
+	if (err)
+		goto err_irq;
+
+	return 0;
+err_irq:
+	rvu_unregister_interrupts(rvu);
+err_flr:
+	rvu_flr_wq_destroy(rvu);
+err_mbox:
+	rvu_mbox_destroy(&rvu->afpf_wq_info);
+err_hwsetup:
+	rvu_cgx_exit(rvu);
+	rvu_reset_all_blocks(rvu);
+	rvu_free_hw_resources(rvu);
+err_release_regions:
+	pci_release_regions(pdev);
+err_disable_device:
+	pci_disable_device(pdev);
+err_freemem:
+	pci_set_drvdata(pdev, NULL);
+	devm_kfree(&pdev->dev, rvu->hw);
+	devm_kfree(dev, rvu);
+	return err;
+}
+
+static void rvu_remove(struct pci_dev *pdev)
+{
+	struct rvu *rvu = pci_get_drvdata(pdev);
+
+	rvu_unregister_interrupts(rvu);
+	rvu_flr_wq_destroy(rvu);
+	rvu_cgx_exit(rvu);
+	rvu_mbox_destroy(&rvu->afpf_wq_info);
+	rvu_disable_sriov(rvu);
+	rvu_reset_all_blocks(rvu);
+	rvu_free_hw_resources(rvu);
+
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+
+	devm_kfree(&pdev->dev, rvu->hw);
+	devm_kfree(&pdev->dev, rvu);
+}
+
+static struct pci_driver rvu_driver = {
+	.name = DRV_NAME,
+	.id_table = rvu_id_table,
+	.probe = rvu_probe,
+	.remove = rvu_remove,
+};
+
+static int __init rvu_init_module(void)
+{
+	int err;
+
+	pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
+
+	err = pci_register_driver(&cgx_driver);
+	if (err < 0)
+		return err;
+
+	err =  pci_register_driver(&rvu_driver);
+	if (err < 0)
+		pci_unregister_driver(&cgx_driver);
+
+	return err;
+}
+
+static void __exit rvu_cleanup_module(void)
+{
+	pci_unregister_driver(&rvu_driver);
+	pci_unregister_driver(&cgx_driver);
+}
+
+module_init(rvu_init_module);
+module_exit(rvu_cleanup_module);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
new file mode 100644
index 0000000..5222e42
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -0,0 +1,504 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*  Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef RVU_H
+#define RVU_H
+
+#include <linux/pci.h>
+#include "rvu_struct.h"
+#include "common.h"
+#include "mbox.h"
+
+/* PCI device IDs */
+#define	PCI_DEVID_OCTEONTX2_RVU_AF		0xA065
+
+/* Subsystem Device ID */
+#define PCI_SUBSYS_DEVID_96XX                  0xB200
+
+/* PCI BAR nos */
+#define	PCI_AF_REG_BAR_NUM			0
+#define	PCI_PF_REG_BAR_NUM			2
+#define	PCI_MBOX_BAR_NUM			4
+
+#define NAME_SIZE				32
+
+/* PF_FUNC */
+#define RVU_PFVF_PF_SHIFT	10
+#define RVU_PFVF_PF_MASK	0x3F
+#define RVU_PFVF_FUNC_SHIFT	0
+#define RVU_PFVF_FUNC_MASK	0x3FF
+
+struct rvu_work {
+	struct	work_struct work;
+	struct	rvu *rvu;
+};
+
+struct rsrc_bmap {
+	unsigned long *bmap;	/* Pointer to resource bitmap */
+	u16  max;		/* Max resource id or count */
+};
+
+struct rvu_block {
+	struct rsrc_bmap	lf;
+	struct admin_queue	*aq; /* NIX/NPA AQ */
+	u16  *fn_map; /* LF to pcifunc mapping */
+	bool multislot;
+	bool implemented;
+	u8   addr;  /* RVU_BLOCK_ADDR_E */
+	u8   type;  /* RVU_BLOCK_TYPE_E */
+	u8   lfshift;
+	u64  lookup_reg;
+	u64  pf_lfcnt_reg;
+	u64  vf_lfcnt_reg;
+	u64  lfcfg_reg;
+	u64  msixcfg_reg;
+	u64  lfreset_reg;
+	unsigned char name[NAME_SIZE];
+};
+
+struct nix_mcast {
+	struct qmem	*mce_ctx;
+	struct qmem	*mcast_buf;
+	int		replay_pkind;
+	int		next_free_mce;
+	struct mutex	mce_lock; /* Serialize MCE updates */
+};
+
+struct nix_mce_list {
+	struct hlist_head	head;
+	int			count;
+	int			max;
+};
+
+struct npc_mcam {
+	struct rsrc_bmap counters;
+	struct mutex	lock;	/* MCAM entries and counters update lock */
+	unsigned long	*bmap;		/* bitmap, 0 => bmap_entries */
+	unsigned long	*bmap_reverse;	/* Reverse bitmap, bmap_entries => 0 */
+	u16	bmap_entries;	/* Number of unreserved MCAM entries */
+	u16	bmap_fcnt;	/* MCAM entries free count */
+	u16	*entry2pfvf_map;
+	u16	*entry2cntr_map;
+	u16	*cntr2pfvf_map;
+	u16	*cntr_refcnt;
+	u8	keysize;	/* MCAM keysize 112/224/448 bits */
+	u8	banks;		/* Number of MCAM banks */
+	u8	banks_per_entry;/* Number of keywords in key */
+	u16	banksize;	/* Number of MCAM entries in each bank */
+	u16	total_entries;	/* Total number of MCAM entries */
+	u16	nixlf_offset;	/* Offset of nixlf rsvd uncast entries */
+	u16	pf_offset;	/* Offset of PF's rsvd bcast, promisc entries */
+	u16	lprio_count;
+	u16	lprio_start;
+	u16	hprio_count;
+	u16	hprio_end;
+};
+
+/* Structure for per RVU func info ie PF/VF */
+struct rvu_pfvf {
+	bool		npalf; /* Only one NPALF per RVU_FUNC */
+	bool		nixlf; /* Only one NIXLF per RVU_FUNC */
+	u16		sso;
+	u16		ssow;
+	u16		cptlfs;
+	u16		timlfs;
+	u8		cgx_lmac;
+
+	/* Block LF's MSIX vector info */
+	struct rsrc_bmap msix;      /* Bitmap for MSIX vector alloc */
+#define MSIX_BLKLF(blkaddr, lf) (((blkaddr) << 8) | ((lf) & 0xFF))
+	u16		 *msix_lfmap; /* Vector to block LF mapping */
+
+	/* NPA contexts */
+	struct qmem	*aura_ctx;
+	struct qmem	*pool_ctx;
+	struct qmem	*npa_qints_ctx;
+	unsigned long	*aura_bmap;
+	unsigned long	*pool_bmap;
+
+	/* NIX contexts */
+	struct qmem	*rq_ctx;
+	struct qmem	*sq_ctx;
+	struct qmem	*cq_ctx;
+	struct qmem	*rss_ctx;
+	struct qmem	*cq_ints_ctx;
+	struct qmem	*nix_qints_ctx;
+	unsigned long	*sq_bmap;
+	unsigned long	*rq_bmap;
+	unsigned long	*cq_bmap;
+
+	u16		rx_chan_base;
+	u16		tx_chan_base;
+	u8              rx_chan_cnt; /* total number of RX channels */
+	u8              tx_chan_cnt; /* total number of TX channels */
+	u16		maxlen;
+	u16		minlen;
+
+	u8		mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */
+
+	/* Broadcast pkt replication info */
+	u16			bcast_mce_idx;
+	struct nix_mce_list	bcast_mce_list;
+
+	/* VLAN offload */
+	struct mcam_entry entry;
+	int rxvlan_index;
+	bool rxvlan;
+};
+
+struct nix_txsch {
+	struct rsrc_bmap schq;
+	u8   lvl;
+#define NIX_TXSCHQ_TL1_CFG_DONE       BIT_ULL(0)
+#define TXSCH_MAP_FUNC(__pfvf_map)    ((__pfvf_map) & 0xFFFF)
+#define TXSCH_MAP_FLAGS(__pfvf_map)   ((__pfvf_map) >> 16)
+#define TXSCH_MAP(__func, __flags)    (((__func) & 0xFFFF) | ((__flags) << 16))
+	u32  *pfvf_map;
+};
+
+struct nix_mark_format {
+	u8 total;
+	u8 in_use;
+	u32 *cfg;
+};
+
+struct npc_pkind {
+	struct rsrc_bmap rsrc;
+	u32	*pfchan_map;
+};
+
+struct nix_flowkey {
+#define NIX_FLOW_KEY_ALG_MAX 32
+	u32 flowkey[NIX_FLOW_KEY_ALG_MAX];
+	int in_use;
+};
+
+struct nix_lso {
+	u8 total;
+	u8 in_use;
+};
+
+struct nix_hw {
+	struct nix_txsch txsch[NIX_TXSCH_LVL_CNT]; /* Tx schedulers */
+	struct nix_mcast mcast;
+	struct nix_flowkey flowkey;
+	struct nix_mark_format mark_format;
+	struct nix_lso lso;
+};
+
+struct rvu_hwinfo {
+	u8	total_pfs;   /* MAX RVU PFs HW supports */
+	u16	total_vfs;   /* Max RVU VFs HW supports */
+	u16	max_vfs_per_pf; /* Max VFs that can be attached to a PF */
+	u8	cgx;
+	u8	lmac_per_cgx;
+	u8	cgx_links;
+	u8	lbk_links;
+	u8	sdp_links;
+	u8	npc_kpus;          /* No of parser units */
+
+
+	struct rvu_block block[BLK_COUNT]; /* Block info */
+	struct nix_hw    *nix0;
+	struct npc_pkind pkind;
+	struct npc_mcam  mcam;
+};
+
+struct mbox_wq_info {
+	struct otx2_mbox mbox;
+	struct rvu_work *mbox_wrk;
+
+	struct otx2_mbox mbox_up;
+	struct rvu_work *mbox_wrk_up;
+
+	struct workqueue_struct *mbox_wq;
+};
+
+struct rvu {
+	void __iomem		*afreg_base;
+	void __iomem		*pfreg_base;
+	struct pci_dev		*pdev;
+	struct device		*dev;
+	struct rvu_hwinfo       *hw;
+	struct rvu_pfvf		*pf;
+	struct rvu_pfvf		*hwvf;
+	struct mutex		rsrc_lock; /* Serialize resource alloc/free */
+	int			vfs; /* Number of VFs attached to RVU */
+
+	/* Mbox */
+	struct mbox_wq_info	afpf_wq_info;
+	struct mbox_wq_info	afvf_wq_info;
+
+	/* PF FLR */
+	struct rvu_work		*flr_wrk;
+	struct workqueue_struct *flr_wq;
+	struct mutex		flr_lock; /* Serialize FLRs */
+
+	/* MSI-X */
+	u16			num_vec;
+	char			*irq_name;
+	bool			*irq_allocated;
+	dma_addr_t		msix_base_iova;
+
+	/* CGX */
+#define PF_CGXMAP_BASE		1 /* PF 0 is reserved for RVU PF */
+	u8			cgx_mapped_pfs;
+	u8			cgx_cnt_max;	 /* CGX port count max */
+	u8			*pf2cgxlmac_map; /* pf to cgx_lmac map */
+	u16			*cgxlmac2pf_map; /* bitmap of mapped pfs for
+						  * every cgx lmac port
+						  */
+	unsigned long		pf_notify_bmap; /* Flags for PF notification */
+	void			**cgx_idmap; /* cgx id to cgx data map table */
+	struct			work_struct cgx_evh_work;
+	struct			workqueue_struct *cgx_evh_wq;
+	spinlock_t		cgx_evq_lock; /* cgx event queue lock */
+	struct list_head	cgx_evq_head; /* cgx event queue head */
+
+	char mkex_pfl_name[MKEX_NAME_LEN]; /* Configured MKEX profile name */
+};
+
+static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
+{
+	writeq(val, rvu->afreg_base + ((block << 28) | offset));
+}
+
+static inline u64 rvu_read64(struct rvu *rvu, u64 block, u64 offset)
+{
+	return readq(rvu->afreg_base + ((block << 28) | offset));
+}
+
+static inline void rvupf_write64(struct rvu *rvu, u64 offset, u64 val)
+{
+	writeq(val, rvu->pfreg_base + offset);
+}
+
+static inline u64 rvupf_read64(struct rvu *rvu, u64 offset)
+{
+	return readq(rvu->pfreg_base + offset);
+}
+
+static inline bool is_rvu_9xxx_A0(struct rvu *rvu)
+{
+	struct pci_dev *pdev = rvu->pdev;
+
+	return (pdev->revision == 0x00) &&
+		(pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX);
+}
+
+/* Function Prototypes
+ * RVU
+ */
+static inline int is_afvf(u16 pcifunc)
+{
+	return !(pcifunc & ~RVU_PFVF_FUNC_MASK);
+}
+
+int rvu_alloc_bitmap(struct rsrc_bmap *rsrc);
+int rvu_alloc_rsrc(struct rsrc_bmap *rsrc);
+void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
+int rvu_rsrc_free_count(struct rsrc_bmap *rsrc);
+int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc);
+bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc);
+int rvu_get_pf(u16 pcifunc);
+struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc);
+void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf);
+bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr);
+bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype);
+int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot);
+int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf);
+int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
+int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
+
+/* RVU HW reg validation */
+enum regmap_block {
+	TXSCHQ_HWREGMAP = 0,
+	MAX_HWREGMAP,
+};
+
+bool rvu_check_valid_reg(int regmap, int regblk, u64 reg);
+
+/* NPA/NIX AQ APIs */
+int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
+		 int qsize, int inst_size, int res_size);
+void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq);
+
+/* CGX APIs */
+static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf)
+{
+	return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs);
+}
+
+static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
+{
+	*cgx_id = (map >> 4) & 0xF;
+	*lmac_id = (map & 0xF);
+}
+
+int rvu_cgx_init(struct rvu *rvu);
+int rvu_cgx_exit(struct rvu *rvu);
+void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu);
+int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start);
+int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
+				    struct msg_rsp *rsp);
+int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
+				   struct msg_rsp *rsp);
+int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
+			       struct cgx_stats_rsp *rsp);
+int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
+				      struct cgx_mac_addr_set_or_get *req,
+				      struct cgx_mac_addr_set_or_get *rsp);
+int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
+				      struct cgx_mac_addr_set_or_get *req,
+				      struct cgx_mac_addr_set_or_get *rsp);
+int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
+					struct msg_rsp *rsp);
+int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
+					 struct msg_rsp *rsp);
+int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
+					  struct msg_rsp *rsp);
+int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
+					 struct msg_rsp *rsp);
+int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
+				      struct cgx_link_info_msg *rsp);
+int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
+				       struct msg_rsp *rsp);
+int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
+					struct msg_rsp *rsp);
+
+/* NPA APIs */
+int rvu_npa_init(struct rvu *rvu);
+void rvu_npa_freemem(struct rvu *rvu);
+void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf);
+int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
+				struct npa_aq_enq_req *req,
+				struct npa_aq_enq_rsp *rsp);
+int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
+				       struct hwctx_disable_req *req,
+				       struct msg_rsp *rsp);
+int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
+				  struct npa_lf_alloc_req *req,
+				  struct npa_lf_alloc_rsp *rsp);
+int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req,
+				 struct msg_rsp *rsp);
+
+/* NIX APIs */
+bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc);
+int rvu_nix_init(struct rvu *rvu);
+int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
+				int blkaddr, u32 cfg);
+void rvu_nix_freemem(struct rvu *rvu);
+int rvu_get_nixlf_count(struct rvu *rvu);
+void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf);
+int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
+				  struct nix_lf_alloc_req *req,
+				  struct nix_lf_alloc_rsp *rsp);
+int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
+				 struct msg_rsp *rsp);
+int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
+				struct nix_aq_enq_req *req,
+				struct nix_aq_enq_rsp *rsp);
+int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
+				       struct hwctx_disable_req *req,
+				       struct msg_rsp *rsp);
+int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
+				     struct nix_txsch_alloc_req *req,
+				     struct nix_txsch_alloc_rsp *rsp);
+int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
+				    struct nix_txsch_free_req *req,
+				    struct msg_rsp *rsp);
+int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
+				    struct nix_txschq_config *req,
+				    struct msg_rsp *rsp);
+int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
+				   struct msg_rsp *rsp);
+int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
+				  struct nix_vtag_config *req,
+				  struct msg_rsp *rsp);
+int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
+				      struct msg_rsp *rsp);
+int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
+					 struct nix_rss_flowkey_cfg *req,
+					 struct nix_rss_flowkey_cfg_rsp *rsp);
+int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
+				      struct nix_set_mac_addr *req,
+				      struct msg_rsp *rsp);
+int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
+				     struct msg_rsp *rsp);
+int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
+				    struct msg_rsp *rsp);
+int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
+				     struct msg_rsp *rsp);
+int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
+				    struct msg_rsp *rsp);
+int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
+					 struct nix_mark_format_cfg  *req,
+					 struct nix_mark_format_cfg_rsp *rsp);
+int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
+				    struct msg_rsp *rsp);
+int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
+					struct nix_lso_format_cfg *req,
+					struct nix_lso_format_cfg_rsp *rsp);
+
+/* NPC APIs */
+int rvu_npc_init(struct rvu *rvu);
+void rvu_npc_freemem(struct rvu *rvu);
+int rvu_npc_get_pkind(struct rvu *rvu, u16 pf);
+void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf);
+void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
+				 int nixlf, u64 chan, u8 *mac_addr);
+void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
+				   int nixlf, u64 chan, bool allmulti);
+void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
+				       int nixlf, u64 chan);
+int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
+				    int group, int alg_idx, int mcam_index);
+int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
+					  struct npc_mcam_alloc_entry_req *req,
+					  struct npc_mcam_alloc_entry_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu,
+					 struct npc_mcam_free_entry_req *req,
+					 struct msg_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
+					  struct npc_mcam_write_entry_req *req,
+					  struct msg_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_ena_entry(struct rvu *rvu,
+					struct npc_mcam_ena_dis_entry_req *req,
+					struct msg_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_dis_entry(struct rvu *rvu,
+					struct npc_mcam_ena_dis_entry_req *req,
+					struct msg_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu,
+					  struct npc_mcam_shift_entry_req *req,
+					  struct npc_mcam_shift_entry_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
+				struct npc_mcam_alloc_counter_req *req,
+				struct npc_mcam_alloc_counter_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
+		   struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_clear_counter(struct rvu *rvu,
+		struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu,
+		struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_counter_stats(struct rvu *rvu,
+			struct npc_mcam_oper_counter_req *req,
+			struct npc_mcam_oper_counter_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
+			  struct npc_mcam_alloc_and_write_entry_req *req,
+			  struct npc_mcam_alloc_and_write_entry_rsp *rsp);
+int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req,
+				     struct npc_get_kex_cfg_rsp *rsp);
+#endif /* RVU_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
new file mode 100644
index 0000000..7d7133c
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -0,0 +1,564 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu.h"
+#include "cgx.h"
+
+struct cgx_evq_entry {
+	struct list_head evq_node;
+	struct cgx_link_event link_event;
+};
+
+#define M(_name, _id, _fn_name, _req_type, _rsp_type)			\
+static struct _req_type __maybe_unused					\
+*otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid)		\
+{									\
+	struct _req_type *req;						\
+									\
+	req = (struct _req_type *)otx2_mbox_alloc_msg_rsp(		\
+		&rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
+		sizeof(struct _rsp_type));				\
+	if (!req)							\
+		return NULL;						\
+	req->hdr.sig = OTX2_MBOX_REQ_SIG;				\
+	req->hdr.id = _id;						\
+	return req;							\
+}
+
+MBOX_UP_CGX_MESSAGES
+#undef M
+
+/* Returns bitmap of mapped PFs */
+static inline u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
+{
+	return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
+}
+
+static inline u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
+{
+	return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
+}
+
+void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
+{
+	if (cgx_id >= rvu->cgx_cnt_max)
+		return NULL;
+
+	return rvu->cgx_idmap[cgx_id];
+}
+
+static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
+{
+	struct npc_pkind *pkind = &rvu->hw->pkind;
+	int cgx_cnt_max = rvu->cgx_cnt_max;
+	int cgx, lmac_cnt, lmac;
+	int pf = PF_CGXMAP_BASE;
+	int size, free_pkind;
+
+	if (!cgx_cnt_max)
+		return 0;
+
+	if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF)
+		return -EINVAL;
+
+	/* Alloc map table
+	 * An additional entry is required since PF id starts from 1 and
+	 * hence entry at offset 0 is invalid.
+	 */
+	size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
+	rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
+	if (!rvu->pf2cgxlmac_map)
+		return -ENOMEM;
+
+	/* Initialize all entries with an invalid cgx and lmac id */
+	memset(rvu->pf2cgxlmac_map, 0xFF, size);
+
+	/* Reverse map table */
+	rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
+				  cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16),
+				  GFP_KERNEL);
+	if (!rvu->cgxlmac2pf_map)
+		return -ENOMEM;
+
+	rvu->cgx_mapped_pfs = 0;
+	for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
+		if (!rvu_cgx_pdata(cgx, rvu))
+			continue;
+		lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
+		for (lmac = 0; lmac < lmac_cnt; lmac++, pf++) {
+			rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
+			rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
+			free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
+			pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
+			rvu->cgx_mapped_pfs++;
+		}
+	}
+	return 0;
+}
+
+static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu)
+{
+	struct cgx_evq_entry *qentry;
+	unsigned long flags;
+	int err;
+
+	qentry = kmalloc(sizeof(*qentry), GFP_KERNEL);
+	if (!qentry)
+		return -ENOMEM;
+
+	/* Lock the event queue before we read the local link status */
+	spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
+	err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
+				&qentry->link_event.link_uinfo);
+	qentry->link_event.cgx_id = cgx_id;
+	qentry->link_event.lmac_id = lmac_id;
+	if (err)
+		goto skip_add;
+	list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
+skip_add:
+	spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
+
+	/* start worker to process the events */
+	queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
+
+	return 0;
+}
+
+/* This is called from interrupt context and is expected to be atomic */
+static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
+{
+	struct cgx_evq_entry *qentry;
+	struct rvu *rvu = data;
+
+	/* post event to the event queue */
+	qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
+	if (!qentry)
+		return -ENOMEM;
+	qentry->link_event = *event;
+	spin_lock(&rvu->cgx_evq_lock);
+	list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
+	spin_unlock(&rvu->cgx_evq_lock);
+
+	/* start worker to process the events */
+	queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
+
+	return 0;
+}
+
+static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
+{
+	struct cgx_link_user_info *linfo;
+	struct cgx_link_info_msg *msg;
+	unsigned long pfmap;
+	int err, pfid;
+
+	linfo = &event->link_uinfo;
+	pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
+
+	do {
+		pfid = find_first_bit(&pfmap, 16);
+		clear_bit(pfid, &pfmap);
+
+		/* check if notification is enabled */
+		if (!test_bit(pfid, &rvu->pf_notify_bmap)) {
+			dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n",
+				 event->cgx_id, event->lmac_id,
+				 linfo->link_up ? "UP" : "DOWN");
+			continue;
+		}
+
+		/* Send mbox message to PF */
+		msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
+		if (!msg)
+			continue;
+		msg->link_info = *linfo;
+		otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid);
+		err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
+		if (err)
+			dev_warn(rvu->dev, "notification to pf %d failed\n",
+				 pfid);
+	} while (pfmap);
+}
+
+static void cgx_evhandler_task(struct work_struct *work)
+{
+	struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work);
+	struct cgx_evq_entry *qentry;
+	struct cgx_link_event *event;
+	unsigned long flags;
+
+	do {
+		/* Dequeue an event */
+		spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
+		qentry = list_first_entry_or_null(&rvu->cgx_evq_head,
+						  struct cgx_evq_entry,
+						  evq_node);
+		if (qentry)
+			list_del(&qentry->evq_node);
+		spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
+		if (!qentry)
+			break; /* nothing more to process */
+
+		event = &qentry->link_event;
+
+		/* process event */
+		cgx_notify_pfs(event, rvu);
+		kfree(qentry);
+	} while (1);
+}
+
+static int cgx_lmac_event_handler_init(struct rvu *rvu)
+{
+	struct cgx_event_cb cb;
+	int cgx, lmac, err;
+	void *cgxd;
+
+	spin_lock_init(&rvu->cgx_evq_lock);
+	INIT_LIST_HEAD(&rvu->cgx_evq_head);
+	INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task);
+	rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
+	if (!rvu->cgx_evh_wq) {
+		dev_err(rvu->dev, "alloc workqueue failed");
+		return -ENOMEM;
+	}
+
+	cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
+	cb.data = rvu;
+
+	for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
+		cgxd = rvu_cgx_pdata(cgx, rvu);
+		if (!cgxd)
+			continue;
+		for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) {
+			err = cgx_lmac_evh_register(&cb, cgxd, lmac);
+			if (err)
+				dev_err(rvu->dev,
+					"%d:%d handler register failed\n",
+					cgx, lmac);
+		}
+	}
+
+	return 0;
+}
+
+static void rvu_cgx_wq_destroy(struct rvu *rvu)
+{
+	if (rvu->cgx_evh_wq) {
+		flush_workqueue(rvu->cgx_evh_wq);
+		destroy_workqueue(rvu->cgx_evh_wq);
+		rvu->cgx_evh_wq = NULL;
+	}
+}
+
+int rvu_cgx_init(struct rvu *rvu)
+{
+	int cgx, err;
+	void *cgxd;
+
+	/* CGX port id starts from 0 and are not necessarily contiguous
+	 * Hence we allocate resources based on the maximum port id value.
+	 */
+	rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
+	if (!rvu->cgx_cnt_max) {
+		dev_info(rvu->dev, "No CGX devices found!\n");
+		return -ENODEV;
+	}
+
+	rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
+				      sizeof(void *), GFP_KERNEL);
+	if (!rvu->cgx_idmap)
+		return -ENOMEM;
+
+	/* Initialize the cgxdata table */
+	for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++)
+		rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx);
+
+	/* Map CGX LMAC interfaces to RVU PFs */
+	err = rvu_map_cgx_lmac_pf(rvu);
+	if (err)
+		return err;
+
+	/* Register for CGX events */
+	err = cgx_lmac_event_handler_init(rvu);
+	if (err)
+		return err;
+
+	/* Ensure event handler registration is completed, before
+	 * we turn on the links
+	 */
+	mb();
+
+	/* Do link up for all CGX ports */
+	for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
+		cgxd = rvu_cgx_pdata(cgx, rvu);
+		if (!cgxd)
+			continue;
+		err = cgx_lmac_linkup_start(cgxd);
+		if (err)
+			dev_err(rvu->dev,
+				"Link up process failed to start on cgx %d\n",
+				cgx);
+	}
+
+	return 0;
+}
+
+int rvu_cgx_exit(struct rvu *rvu)
+{
+	int cgx, lmac;
+	void *cgxd;
+
+	for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
+		cgxd = rvu_cgx_pdata(cgx, rvu);
+		if (!cgxd)
+			continue;
+		for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++)
+			cgx_lmac_evh_unregister(cgxd, lmac);
+	}
+
+	/* Ensure event handler unregister is completed */
+	mb();
+
+	rvu_cgx_wq_destroy(rvu);
+	return 0;
+}
+
+int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
+{
+	int pf = rvu_get_pf(pcifunc);
+	u8 cgx_id, lmac_id;
+
+	/* This msg is expected only from PFs that are mapped to CGX LMACs,
+	 * if received from other PF/VF simply ACK, nothing to do.
+	 */
+	if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
+		return -ENODEV;
+
+	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+	cgx_lmac_rx_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, start);
+
+	return 0;
+}
+
+int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
+				    struct msg_rsp *rsp)
+{
+	rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true);
+	return 0;
+}
+
+int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
+				   struct msg_rsp *rsp)
+{
+	rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false);
+	return 0;
+}
+
+int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
+			       struct cgx_stats_rsp *rsp)
+{
+	int pf = rvu_get_pf(req->hdr.pcifunc);
+	int stat = 0, err = 0;
+	u64 tx_stat, rx_stat;
+	u8 cgx_idx, lmac;
+	void *cgxd;
+
+	if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
+	    !is_pf_cgxmapped(rvu, pf))
+		return -ENODEV;
+
+	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
+	cgxd = rvu_cgx_pdata(cgx_idx, rvu);
+
+	/* Rx stats */
+	while (stat < CGX_RX_STATS_COUNT) {
+		err = cgx_get_rx_stats(cgxd, lmac, stat, &rx_stat);
+		if (err)
+			return err;
+		rsp->rx_stats[stat] = rx_stat;
+		stat++;
+	}
+
+	/* Tx stats */
+	stat = 0;
+	while (stat < CGX_TX_STATS_COUNT) {
+		err = cgx_get_tx_stats(cgxd, lmac, stat, &tx_stat);
+		if (err)
+			return err;
+		rsp->tx_stats[stat] = tx_stat;
+		stat++;
+	}
+	return 0;
+}
+
+int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
+				      struct cgx_mac_addr_set_or_get *req,
+				      struct cgx_mac_addr_set_or_get *rsp)
+{
+	int pf = rvu_get_pf(req->hdr.pcifunc);
+	u8 cgx_id, lmac_id;
+
+	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+	cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
+
+	return 0;
+}
+
+int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
+				      struct cgx_mac_addr_set_or_get *req,
+				      struct cgx_mac_addr_set_or_get *rsp)
+{
+	int pf = rvu_get_pf(req->hdr.pcifunc);
+	u8 cgx_id, lmac_id;
+	int rc = 0, i;
+	u64 cfg;
+
+	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+	rsp->hdr.rc = rc;
+	cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
+	/* copy 48 bit mac address to req->mac_addr */
+	for (i = 0; i < ETH_ALEN; i++)
+		rsp->mac_addr[i] = cfg >> (ETH_ALEN - 1 - i) * 8;
+	return 0;
+}
+
+int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
+					struct msg_rsp *rsp)
+{
+	u16 pcifunc = req->hdr.pcifunc;
+	int pf = rvu_get_pf(pcifunc);
+	u8 cgx_id, lmac_id;
+
+	/* This msg is expected only from PFs that are mapped to CGX LMACs,
+	 * if received from other PF/VF simply ACK, nothing to do.
+	 */
+	if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
+	    !is_pf_cgxmapped(rvu, pf))
+		return -ENODEV;
+
+	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+	cgx_lmac_promisc_config(cgx_id, lmac_id, true);
+	return 0;
+}
+
+int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
+					 struct msg_rsp *rsp)
+{
+	u16 pcifunc = req->hdr.pcifunc;
+	int pf = rvu_get_pf(pcifunc);
+	u8 cgx_id, lmac_id;
+
+	/* This msg is expected only from PFs that are mapped to CGX LMACs,
+	 * if received from other PF/VF simply ACK, nothing to do.
+	 */
+	if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
+	    !is_pf_cgxmapped(rvu, pf))
+		return -ENODEV;
+
+	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+	cgx_lmac_promisc_config(cgx_id, lmac_id, false);
+	return 0;
+}
+
+static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
+{
+	int pf = rvu_get_pf(pcifunc);
+	u8 cgx_id, lmac_id;
+
+	/* This msg is expected only from PFs that are mapped to CGX LMACs,
+	 * if received from other PF/VF simply ACK, nothing to do.
+	 */
+	if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
+		return -ENODEV;
+
+	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+	if (en) {
+		set_bit(pf, &rvu->pf_notify_bmap);
+		/* Send the current link status to PF */
+		rvu_cgx_send_link_info(cgx_id, lmac_id, rvu);
+	} else {
+		clear_bit(pf, &rvu->pf_notify_bmap);
+	}
+
+	return 0;
+}
+
+int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
+					  struct msg_rsp *rsp)
+{
+	rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true);
+	return 0;
+}
+
+int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
+					 struct msg_rsp *rsp)
+{
+	rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false);
+	return 0;
+}
+
+int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
+				      struct cgx_link_info_msg *rsp)
+{
+	u8 cgx_id, lmac_id;
+	int pf, err;
+
+	pf = rvu_get_pf(req->hdr.pcifunc);
+
+	if (!is_pf_cgxmapped(rvu, pf))
+		return -ENODEV;
+
+	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+	err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
+				&rsp->link_info);
+	return err;
+}
+
+static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
+{
+	int pf = rvu_get_pf(pcifunc);
+	u8 cgx_id, lmac_id;
+
+	/* This msg is expected only from PFs that are mapped to CGX LMACs,
+	 * if received from other PF/VF simply ACK, nothing to do.
+	 */
+	if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
+		return -ENODEV;
+
+	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+	return cgx_lmac_internal_loopback(rvu_cgx_pdata(cgx_id, rvu),
+					  lmac_id, en);
+}
+
+int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
+				       struct msg_rsp *rsp)
+{
+	rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true);
+	return 0;
+}
+
+int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
+					struct msg_rsp *rsp)
+{
+	rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
+	return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
new file mode 100644
index 0000000..4a7609f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -0,0 +1,2959 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+#include "npc.h"
+#include "cgx.h"
+
+static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add);
+
+enum mc_tbl_sz {
+	MC_TBL_SZ_256,
+	MC_TBL_SZ_512,
+	MC_TBL_SZ_1K,
+	MC_TBL_SZ_2K,
+	MC_TBL_SZ_4K,
+	MC_TBL_SZ_8K,
+	MC_TBL_SZ_16K,
+	MC_TBL_SZ_32K,
+	MC_TBL_SZ_64K,
+};
+
+enum mc_buf_cnt {
+	MC_BUF_CNT_8,
+	MC_BUF_CNT_16,
+	MC_BUF_CNT_32,
+	MC_BUF_CNT_64,
+	MC_BUF_CNT_128,
+	MC_BUF_CNT_256,
+	MC_BUF_CNT_512,
+	MC_BUF_CNT_1024,
+	MC_BUF_CNT_2048,
+};
+
+enum nix_makr_fmt_indexes {
+	NIX_MARK_CFG_IP_DSCP_RED,
+	NIX_MARK_CFG_IP_DSCP_YELLOW,
+	NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
+	NIX_MARK_CFG_IP_ECN_RED,
+	NIX_MARK_CFG_IP_ECN_YELLOW,
+	NIX_MARK_CFG_IP_ECN_YELLOW_RED,
+	NIX_MARK_CFG_VLAN_DEI_RED,
+	NIX_MARK_CFG_VLAN_DEI_YELLOW,
+	NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
+	NIX_MARK_CFG_MAX,
+};
+
+/* For now considering MC resources needed for broadcast
+ * pkt replication only. i.e 256 HWVFs + 12 PFs.
+ */
+#define MC_TBL_SIZE	MC_TBL_SZ_512
+#define MC_BUF_CNT	MC_BUF_CNT_128
+
+struct mce {
+	struct hlist_node	node;
+	u16			idx;
+	u16			pcifunc;
+};
+
+bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
+{
+	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+	int blkaddr;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+	if (!pfvf->nixlf || blkaddr < 0)
+		return false;
+	return true;
+}
+
+int rvu_get_nixlf_count(struct rvu *rvu)
+{
+	struct rvu_block *block;
+	int blkaddr;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+	if (blkaddr < 0)
+		return 0;
+	block = &rvu->hw->block[blkaddr];
+	return block->lf.max;
+}
+
+static void nix_mce_list_init(struct nix_mce_list *list, int max)
+{
+	INIT_HLIST_HEAD(&list->head);
+	list->count = 0;
+	list->max = max;
+}
+
+static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
+{
+	int idx;
+
+	if (!mcast)
+		return 0;
+
+	idx = mcast->next_free_mce;
+	mcast->next_free_mce += count;
+	return idx;
+}
+
+static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
+{
+	if (blkaddr == BLKADDR_NIX0 && hw->nix0)
+		return hw->nix0;
+
+	return NULL;
+}
+
+static void nix_rx_sync(struct rvu *rvu, int blkaddr)
+{
+	int err;
+
+	/*Sync all in flight RX packets to LLC/DRAM */
+	rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
+	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
+	if (err)
+		dev_err(rvu->dev, "NIX RX software sync failed\n");
+
+	/* As per a HW errata in 9xxx A0 silicon, HW may clear SW_SYNC[ENA]
+	 * bit too early. Hence wait for 50us more.
+	 */
+	if (is_rvu_9xxx_A0(rvu))
+		usleep_range(50, 60);
+}
+
+static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
+			    int lvl, u16 pcifunc, u16 schq)
+{
+	struct nix_txsch *txsch;
+	struct nix_hw *nix_hw;
+	u16 map_func;
+
+	nix_hw = get_nix_hw(rvu->hw, blkaddr);
+	if (!nix_hw)
+		return false;
+
+	txsch = &nix_hw->txsch[lvl];
+	/* Check out of bounds */
+	if (schq >= txsch->schq.max)
+		return false;
+
+	mutex_lock(&rvu->rsrc_lock);
+	map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
+	mutex_unlock(&rvu->rsrc_lock);
+
+	/* For TL1 schq, sharing across VF's of same PF is ok */
+	if (lvl == NIX_TXSCH_LVL_TL1 &&
+	    rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
+		return false;
+
+	if (lvl != NIX_TXSCH_LVL_TL1 &&
+	    map_func != pcifunc)
+		return false;
+
+	return true;
+}
+
+static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
+{
+	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+	u8 cgx_id, lmac_id;
+	int pkind, pf, vf;
+	int err;
+
+	pf = rvu_get_pf(pcifunc);
+	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
+		return 0;
+
+	switch (type) {
+	case NIX_INTF_TYPE_CGX:
+		pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
+		rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
+
+		pkind = rvu_npc_get_pkind(rvu, pf);
+		if (pkind < 0) {
+			dev_err(rvu->dev,
+				"PF_Func 0x%x: Invalid pkind\n", pcifunc);
+			return -EINVAL;
+		}
+		pfvf->rx_chan_base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0);
+		pfvf->tx_chan_base = pfvf->rx_chan_base;
+		pfvf->rx_chan_cnt = 1;
+		pfvf->tx_chan_cnt = 1;
+		cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
+		rvu_npc_set_pkind(rvu, pkind, pfvf);
+		break;
+	case NIX_INTF_TYPE_LBK:
+		vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
+		pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(0, vf);
+		pfvf->tx_chan_base = vf & 0x1 ? NIX_CHAN_LBK_CHX(0, vf - 1) :
+						NIX_CHAN_LBK_CHX(0, vf + 1);
+		pfvf->rx_chan_cnt = 1;
+		pfvf->tx_chan_cnt = 1;
+		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
+					      pfvf->rx_chan_base, false);
+		break;
+	}
+
+	/* Add a UCAST forwarding rule in MCAM with this NIXLF attached
+	 * RVU PF/VF's MAC address.
+	 */
+	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
+				    pfvf->rx_chan_base, pfvf->mac_addr);
+
+	/* Add this PF_FUNC to bcast pkt replication list */
+	err = nix_update_bcast_mce_list(rvu, pcifunc, true);
+	if (err) {
+		dev_err(rvu->dev,
+			"Bcast list, failed to enable PF_FUNC 0x%x\n",
+			pcifunc);
+		return err;
+	}
+
+	rvu_npc_install_bcast_match_entry(rvu, pcifunc,
+					  nixlf, pfvf->rx_chan_base);
+	pfvf->maxlen = NIC_HW_MIN_FRS;
+	pfvf->minlen = NIC_HW_MIN_FRS;
+
+	return 0;
+}
+
+static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
+{
+	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+	int err;
+
+	pfvf->maxlen = 0;
+	pfvf->minlen = 0;
+	pfvf->rxvlan = false;
+
+	/* Remove this PF_FUNC from bcast pkt replication list */
+	err = nix_update_bcast_mce_list(rvu, pcifunc, false);
+	if (err) {
+		dev_err(rvu->dev,
+			"Bcast list, failed to disable PF_FUNC 0x%x\n",
+			pcifunc);
+	}
+
+	/* Free and disable any MCAM entries used by this NIX LF */
+	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+}
+
+static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
+				 u64 format, bool v4, u64 *fidx)
+{
+	struct nix_lso_format field = {0};
+
+	/* IP's Length field */
+	field.layer = NIX_TXLAYER_OL3;
+	/* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
+	field.offset = v4 ? 2 : 4;
+	field.sizem1 = 1; /* i.e 2 bytes */
+	field.alg = NIX_LSOALG_ADD_PAYLEN;
+	rvu_write64(rvu, blkaddr,
+		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
+		    *(u64 *)&field);
+
+	/* No ID field in IPv6 header */
+	if (!v4)
+		return;
+
+	/* IP's ID field */
+	field.layer = NIX_TXLAYER_OL3;
+	field.offset = 4;
+	field.sizem1 = 1; /* i.e 2 bytes */
+	field.alg = NIX_LSOALG_ADD_SEGNUM;
+	rvu_write64(rvu, blkaddr,
+		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
+		    *(u64 *)&field);
+}
+
+static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
+				 u64 format, u64 *fidx)
+{
+	struct nix_lso_format field = {0};
+
+	/* TCP's sequence number field */
+	field.layer = NIX_TXLAYER_OL4;
+	field.offset = 4;
+	field.sizem1 = 3; /* i.e 4 bytes */
+	field.alg = NIX_LSOALG_ADD_OFFSET;
+	rvu_write64(rvu, blkaddr,
+		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
+		    *(u64 *)&field);
+
+	/* TCP's flags field */
+	field.layer = NIX_TXLAYER_OL4;
+	field.offset = 12;
+	field.sizem1 = 1; /* 2 bytes */
+	field.alg = NIX_LSOALG_TCP_FLAGS;
+	rvu_write64(rvu, blkaddr,
+		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
+		    *(u64 *)&field);
+}
+
+static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
+{
+	u64 cfg, idx, fidx = 0;
+
+	/* Get max HW supported format indices */
+	cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
+	nix_hw->lso.total = cfg;
+
+	/* Enable LSO */
+	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
+	/* For TSO, set first and middle segment flags to
+	 * mask out PSH, RST & FIN flags in TCP packet
+	 */
+	cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
+	cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
+	rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
+
+	/* Setup default static LSO formats
+	 *
+	 * Configure format fields for TCPv4 segmentation offload
+	 */
+	idx = NIX_LSO_FORMAT_IDX_TSOV4;
+	nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
+	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
+
+	/* Set rest of the fields to NOP */
+	for (; fidx < 8; fidx++) {
+		rvu_write64(rvu, blkaddr,
+			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
+	}
+	nix_hw->lso.in_use++;
+
+	/* Configure format fields for TCPv6 segmentation offload */
+	idx = NIX_LSO_FORMAT_IDX_TSOV6;
+	fidx = 0;
+	nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
+	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
+
+	/* Set rest of the fields to NOP */
+	for (; fidx < 8; fidx++) {
+		rvu_write64(rvu, blkaddr,
+			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
+	}
+	nix_hw->lso.in_use++;
+}
+
+static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
+{
+	kfree(pfvf->rq_bmap);
+	kfree(pfvf->sq_bmap);
+	kfree(pfvf->cq_bmap);
+	if (pfvf->rq_ctx)
+		qmem_free(rvu->dev, pfvf->rq_ctx);
+	if (pfvf->sq_ctx)
+		qmem_free(rvu->dev, pfvf->sq_ctx);
+	if (pfvf->cq_ctx)
+		qmem_free(rvu->dev, pfvf->cq_ctx);
+	if (pfvf->rss_ctx)
+		qmem_free(rvu->dev, pfvf->rss_ctx);
+	if (pfvf->nix_qints_ctx)
+		qmem_free(rvu->dev, pfvf->nix_qints_ctx);
+	if (pfvf->cq_ints_ctx)
+		qmem_free(rvu->dev, pfvf->cq_ints_ctx);
+
+	pfvf->rq_bmap = NULL;
+	pfvf->cq_bmap = NULL;
+	pfvf->sq_bmap = NULL;
+	pfvf->rq_ctx = NULL;
+	pfvf->sq_ctx = NULL;
+	pfvf->cq_ctx = NULL;
+	pfvf->rss_ctx = NULL;
+	pfvf->nix_qints_ctx = NULL;
+	pfvf->cq_ints_ctx = NULL;
+}
+
+static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
+			      struct rvu_pfvf *pfvf, int nixlf,
+			      int rss_sz, int rss_grps, int hwctx_size)
+{
+	int err, grp, num_indices;
+
+	/* RSS is not requested for this NIXLF */
+	if (!rss_sz)
+		return 0;
+	num_indices = rss_sz * rss_grps;
+
+	/* Alloc NIX RSS HW context memory and config the base */
+	err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
+	if (err)
+		return err;
+
+	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
+		    (u64)pfvf->rss_ctx->iova);
+
+	/* Config full RSS table size, enable RSS and caching */
+	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
+		    BIT_ULL(36) | BIT_ULL(4) |
+		    ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE));
+	/* Config RSS group offset and sizes */
+	for (grp = 0; grp < rss_grps; grp++)
+		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
+			    ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
+	return 0;
+}
+
+static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
+			       struct nix_aq_inst_s *inst)
+{
+	struct admin_queue *aq = block->aq;
+	struct nix_aq_res_s *result;
+	int timeout = 1000;
+	u64 reg, head;
+
+	result = (struct nix_aq_res_s *)aq->res->base;
+
+	/* Get current head pointer where to append this instruction */
+	reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
+	head = (reg >> 4) & AQ_PTR_MASK;
+
+	memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
+	       (void *)inst, aq->inst->entry_sz);
+	memset(result, 0, sizeof(*result));
+	/* sync into memory */
+	wmb();
+
+	/* Ring the doorbell and wait for result */
+	rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
+	while (result->compcode == NIX_AQ_COMP_NOTDONE) {
+		cpu_relax();
+		udelay(1);
+		timeout--;
+		if (!timeout)
+			return -EBUSY;
+	}
+
+	if (result->compcode != NIX_AQ_COMP_GOOD)
+		/* TODO: Replace this with some error code */
+		return -EBUSY;
+
+	return 0;
+}
+
+static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
+			       struct nix_aq_enq_rsp *rsp)
+{
+	struct rvu_hwinfo *hw = rvu->hw;
+	u16 pcifunc = req->hdr.pcifunc;
+	int nixlf, blkaddr, rc = 0;
+	struct nix_aq_inst_s inst;
+	struct rvu_block *block;
+	struct admin_queue *aq;
+	struct rvu_pfvf *pfvf;
+	void *ctx, *mask;
+	bool ena;
+	u64 cfg;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+	if (blkaddr < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	block = &hw->block[blkaddr];
+	aq = block->aq;
+	if (!aq) {
+		dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
+		return NIX_AF_ERR_AQ_ENQUEUE;
+	}
+
+	pfvf = rvu_get_pfvf(rvu, pcifunc);
+	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
+
+	/* Skip NIXLF check for broadcast MCE entry init */
+	if (!(!rsp && req->ctype == NIX_AQ_CTYPE_MCE)) {
+		if (!pfvf->nixlf || nixlf < 0)
+			return NIX_AF_ERR_AF_LF_INVALID;
+	}
+
+	switch (req->ctype) {
+	case NIX_AQ_CTYPE_RQ:
+		/* Check if index exceeds max no of queues */
+		if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
+			rc = NIX_AF_ERR_AQ_ENQUEUE;
+		break;
+	case NIX_AQ_CTYPE_SQ:
+		if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
+			rc = NIX_AF_ERR_AQ_ENQUEUE;
+		break;
+	case NIX_AQ_CTYPE_CQ:
+		if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
+			rc = NIX_AF_ERR_AQ_ENQUEUE;
+		break;
+	case NIX_AQ_CTYPE_RSS:
+		/* Check if RSS is enabled and qidx is within range */
+		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
+		if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
+		    (req->qidx >= (256UL << (cfg & 0xF))))
+			rc = NIX_AF_ERR_AQ_ENQUEUE;
+		break;
+	case NIX_AQ_CTYPE_MCE:
+		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
+		/* Check if index exceeds MCE list length */
+		if (!hw->nix0->mcast.mce_ctx ||
+		    (req->qidx >= (256UL << (cfg & 0xF))))
+			rc = NIX_AF_ERR_AQ_ENQUEUE;
+
+		/* Adding multicast lists for requests from PF/VFs is not
+		 * yet supported, so ignore this.
+		 */
+		if (rsp)
+			rc = NIX_AF_ERR_AQ_ENQUEUE;
+		break;
+	default:
+		rc = NIX_AF_ERR_AQ_ENQUEUE;
+	}
+
+	if (rc)
+		return rc;
+
+	/* Check if SQ pointed SMQ belongs to this PF/VF or not */
+	if (req->ctype == NIX_AQ_CTYPE_SQ &&
+	    ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
+	     (req->op == NIX_AQ_INSTOP_WRITE &&
+	      req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
+		if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
+				     pcifunc, req->sq.smq))
+			return NIX_AF_ERR_AQ_ENQUEUE;
+	}
+
+	memset(&inst, 0, sizeof(struct nix_aq_inst_s));
+	inst.lf = nixlf;
+	inst.cindex = req->qidx;
+	inst.ctype = req->ctype;
+	inst.op = req->op;
+	/* Currently we are not supporting enqueuing multiple instructions,
+	 * so always choose first entry in result memory.
+	 */
+	inst.res_addr = (u64)aq->res->iova;
+
+	/* Clean result + context memory */
+	memset(aq->res->base, 0, aq->res->entry_sz);
+	/* Context needs to be written at RES_ADDR + 128 */
+	ctx = aq->res->base + 128;
+	/* Mask needs to be written at RES_ADDR + 256 */
+	mask = aq->res->base + 256;
+
+	switch (req->op) {
+	case NIX_AQ_INSTOP_WRITE:
+		if (req->ctype == NIX_AQ_CTYPE_RQ)
+			memcpy(mask, &req->rq_mask,
+			       sizeof(struct nix_rq_ctx_s));
+		else if (req->ctype == NIX_AQ_CTYPE_SQ)
+			memcpy(mask, &req->sq_mask,
+			       sizeof(struct nix_sq_ctx_s));
+		else if (req->ctype == NIX_AQ_CTYPE_CQ)
+			memcpy(mask, &req->cq_mask,
+			       sizeof(struct nix_cq_ctx_s));
+		else if (req->ctype == NIX_AQ_CTYPE_RSS)
+			memcpy(mask, &req->rss_mask,
+			       sizeof(struct nix_rsse_s));
+		else if (req->ctype == NIX_AQ_CTYPE_MCE)
+			memcpy(mask, &req->mce_mask,
+			       sizeof(struct nix_rx_mce_s));
+		/* Fall through */
+	case NIX_AQ_INSTOP_INIT:
+		if (req->ctype == NIX_AQ_CTYPE_RQ)
+			memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
+		else if (req->ctype == NIX_AQ_CTYPE_SQ)
+			memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
+		else if (req->ctype == NIX_AQ_CTYPE_CQ)
+			memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
+		else if (req->ctype == NIX_AQ_CTYPE_RSS)
+			memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
+		else if (req->ctype == NIX_AQ_CTYPE_MCE)
+			memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
+		break;
+	case NIX_AQ_INSTOP_NOP:
+	case NIX_AQ_INSTOP_READ:
+	case NIX_AQ_INSTOP_LOCK:
+	case NIX_AQ_INSTOP_UNLOCK:
+		break;
+	default:
+		rc = NIX_AF_ERR_AQ_ENQUEUE;
+		return rc;
+	}
+
+	spin_lock(&aq->lock);
+
+	/* Submit the instruction to AQ */
+	rc = nix_aq_enqueue_wait(rvu, block, &inst);
+	if (rc) {
+		spin_unlock(&aq->lock);
+		return rc;
+	}
+
+	/* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
+	if (req->op == NIX_AQ_INSTOP_INIT) {
+		if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
+			__set_bit(req->qidx, pfvf->rq_bmap);
+		if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
+			__set_bit(req->qidx, pfvf->sq_bmap);
+		if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
+			__set_bit(req->qidx, pfvf->cq_bmap);
+	}
+
+	if (req->op == NIX_AQ_INSTOP_WRITE) {
+		if (req->ctype == NIX_AQ_CTYPE_RQ) {
+			ena = (req->rq.ena & req->rq_mask.ena) |
+				(test_bit(req->qidx, pfvf->rq_bmap) &
+				~req->rq_mask.ena);
+			if (ena)
+				__set_bit(req->qidx, pfvf->rq_bmap);
+			else
+				__clear_bit(req->qidx, pfvf->rq_bmap);
+		}
+		if (req->ctype == NIX_AQ_CTYPE_SQ) {
+			ena = (req->rq.ena & req->sq_mask.ena) |
+				(test_bit(req->qidx, pfvf->sq_bmap) &
+				~req->sq_mask.ena);
+			if (ena)
+				__set_bit(req->qidx, pfvf->sq_bmap);
+			else
+				__clear_bit(req->qidx, pfvf->sq_bmap);
+		}
+		if (req->ctype == NIX_AQ_CTYPE_CQ) {
+			ena = (req->rq.ena & req->cq_mask.ena) |
+				(test_bit(req->qidx, pfvf->cq_bmap) &
+				~req->cq_mask.ena);
+			if (ena)
+				__set_bit(req->qidx, pfvf->cq_bmap);
+			else
+				__clear_bit(req->qidx, pfvf->cq_bmap);
+		}
+	}
+
+	if (rsp) {
+		/* Copy read context into mailbox */
+		if (req->op == NIX_AQ_INSTOP_READ) {
+			if (req->ctype == NIX_AQ_CTYPE_RQ)
+				memcpy(&rsp->rq, ctx,
+				       sizeof(struct nix_rq_ctx_s));
+			else if (req->ctype == NIX_AQ_CTYPE_SQ)
+				memcpy(&rsp->sq, ctx,
+				       sizeof(struct nix_sq_ctx_s));
+			else if (req->ctype == NIX_AQ_CTYPE_CQ)
+				memcpy(&rsp->cq, ctx,
+				       sizeof(struct nix_cq_ctx_s));
+			else if (req->ctype == NIX_AQ_CTYPE_RSS)
+				memcpy(&rsp->rss, ctx,
+				       sizeof(struct nix_rsse_s));
+			else if (req->ctype == NIX_AQ_CTYPE_MCE)
+				memcpy(&rsp->mce, ctx,
+				       sizeof(struct nix_rx_mce_s));
+		}
+	}
+
+	spin_unlock(&aq->lock);
+	return 0;
+}
+
+static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
+{
+	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+	struct nix_aq_enq_req aq_req;
+	unsigned long *bmap;
+	int qidx, q_cnt = 0;
+	int err = 0, rc;
+
+	if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
+		return NIX_AF_ERR_AQ_ENQUEUE;
+
+	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
+	aq_req.hdr.pcifunc = req->hdr.pcifunc;
+
+	if (req->ctype == NIX_AQ_CTYPE_CQ) {
+		aq_req.cq.ena = 0;
+		aq_req.cq_mask.ena = 1;
+		q_cnt = pfvf->cq_ctx->qsize;
+		bmap = pfvf->cq_bmap;
+	}
+	if (req->ctype == NIX_AQ_CTYPE_SQ) {
+		aq_req.sq.ena = 0;
+		aq_req.sq_mask.ena = 1;
+		q_cnt = pfvf->sq_ctx->qsize;
+		bmap = pfvf->sq_bmap;
+	}
+	if (req->ctype == NIX_AQ_CTYPE_RQ) {
+		aq_req.rq.ena = 0;
+		aq_req.rq_mask.ena = 1;
+		q_cnt = pfvf->rq_ctx->qsize;
+		bmap = pfvf->rq_bmap;
+	}
+
+	aq_req.ctype = req->ctype;
+	aq_req.op = NIX_AQ_INSTOP_WRITE;
+
+	for (qidx = 0; qidx < q_cnt; qidx++) {
+		if (!test_bit(qidx, bmap))
+			continue;
+		aq_req.qidx = qidx;
+		rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
+		if (rc) {
+			err = rc;
+			dev_err(rvu->dev, "Failed to disable %s:%d context\n",
+				(req->ctype == NIX_AQ_CTYPE_CQ) ?
+				"CQ" : ((req->ctype == NIX_AQ_CTYPE_RQ) ?
+				"RQ" : "SQ"), qidx);
+		}
+	}
+
+	return err;
+}
+
+int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
+				struct nix_aq_enq_req *req,
+				struct nix_aq_enq_rsp *rsp)
+{
+	return rvu_nix_aq_enq_inst(rvu, req, rsp);
+}
+
+int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
+				       struct hwctx_disable_req *req,
+				       struct msg_rsp *rsp)
+{
+	return nix_lf_hwctx_disable(rvu, req);
+}
+
+int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
+				  struct nix_lf_alloc_req *req,
+				  struct nix_lf_alloc_rsp *rsp)
+{
+	int nixlf, qints, hwctx_size, intf, err, rc = 0;
+	struct rvu_hwinfo *hw = rvu->hw;
+	u16 pcifunc = req->hdr.pcifunc;
+	struct rvu_block *block;
+	struct rvu_pfvf *pfvf;
+	u64 cfg, ctx_cfg;
+	int blkaddr;
+
+	if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
+		return NIX_AF_ERR_PARAM;
+
+	pfvf = rvu_get_pfvf(rvu, pcifunc);
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+	if (!pfvf->nixlf || blkaddr < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	block = &hw->block[blkaddr];
+	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
+	if (nixlf < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	/* Check if requested 'NIXLF <=> NPALF' mapping is valid */
+	if (req->npa_func) {
+		/* If default, use 'this' NIXLF's PFFUNC */
+		if (req->npa_func == RVU_DEFAULT_PF_FUNC)
+			req->npa_func = pcifunc;
+		if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
+			return NIX_AF_INVAL_NPA_PF_FUNC;
+	}
+
+	/* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
+	if (req->sso_func) {
+		/* If default, use 'this' NIXLF's PFFUNC */
+		if (req->sso_func == RVU_DEFAULT_PF_FUNC)
+			req->sso_func = pcifunc;
+		if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
+			return NIX_AF_INVAL_SSO_PF_FUNC;
+	}
+
+	/* If RSS is being enabled, check if requested config is valid.
+	 * RSS table size should be power of two, otherwise
+	 * RSS_GRP::OFFSET + adder might go beyond that group or
+	 * won't be able to use entire table.
+	 */
+	if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
+			    !is_power_of_2(req->rss_sz)))
+		return NIX_AF_ERR_RSS_SIZE_INVALID;
+
+	if (req->rss_sz &&
+	    (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
+		return NIX_AF_ERR_RSS_GRPS_INVALID;
+
+	/* Reset this NIX LF */
+	err = rvu_lf_reset(rvu, block, nixlf);
+	if (err) {
+		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
+			block->addr - BLKADDR_NIX0, nixlf);
+		return NIX_AF_ERR_LF_RESET;
+	}
+
+	ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
+
+	/* Alloc NIX RQ HW context memory and config the base */
+	hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
+	err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
+	if (err)
+		goto free_mem;
+
+	pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
+	if (!pfvf->rq_bmap)
+		goto free_mem;
+
+	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
+		    (u64)pfvf->rq_ctx->iova);
+
+	/* Set caching and queue count in HW */
+	cfg = BIT_ULL(36) | (req->rq_cnt - 1);
+	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
+
+	/* Alloc NIX SQ HW context memory and config the base */
+	hwctx_size = 1UL << (ctx_cfg & 0xF);
+	err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
+	if (err)
+		goto free_mem;
+
+	pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
+	if (!pfvf->sq_bmap)
+		goto free_mem;
+
+	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
+		    (u64)pfvf->sq_ctx->iova);
+	cfg = BIT_ULL(36) | (req->sq_cnt - 1);
+	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
+
+	/* Alloc NIX CQ HW context memory and config the base */
+	hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
+	err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
+	if (err)
+		goto free_mem;
+
+	pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
+	if (!pfvf->cq_bmap)
+		goto free_mem;
+
+	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
+		    (u64)pfvf->cq_ctx->iova);
+	cfg = BIT_ULL(36) | (req->cq_cnt - 1);
+	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
+
+	/* Initialize receive side scaling (RSS) */
+	hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
+	err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf,
+				 req->rss_sz, req->rss_grps, hwctx_size);
+	if (err)
+		goto free_mem;
+
+	/* Alloc memory for CQINT's HW contexts */
+	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
+	qints = (cfg >> 24) & 0xFFF;
+	hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
+	err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
+	if (err)
+		goto free_mem;
+
+	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
+		    (u64)pfvf->cq_ints_ctx->iova);
+	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), BIT_ULL(36));
+
+	/* Alloc memory for QINT's HW contexts */
+	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
+	qints = (cfg >> 12) & 0xFFF;
+	hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
+	err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
+	if (err)
+		goto free_mem;
+
+	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
+		    (u64)pfvf->nix_qints_ctx->iova);
+	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), BIT_ULL(36));
+
+	/* Setup VLANX TPID's.
+	 * Use VLAN1 for 802.1Q
+	 * and VLAN0 for 802.1AD.
+	 */
+	cfg = (0x8100ULL << 16) | 0x88A8ULL;
+	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
+
+	/* Enable LMTST for this NIX LF */
+	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
+
+	/* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
+	if (req->npa_func)
+		cfg = req->npa_func;
+	if (req->sso_func)
+		cfg |= (u64)req->sso_func << 16;
+
+	cfg |= (u64)req->xqe_sz << 33;
+	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
+
+	/* Config Rx pkt length, csum checks and apad  enable / disable */
+	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
+
+	intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+	err = nix_interface_init(rvu, pcifunc, intf, nixlf);
+	if (err)
+		goto free_mem;
+
+	/* Disable NPC entries as NIXLF's contexts are not initialized yet */
+	rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
+
+	goto exit;
+
+free_mem:
+	nix_ctx_free(rvu, pfvf);
+	rc = -ENOMEM;
+
+exit:
+	/* Set macaddr of this PF/VF */
+	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
+
+	/* set SQB size info */
+	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
+	rsp->sqb_size = (cfg >> 34) & 0xFFFF;
+	rsp->rx_chan_base = pfvf->rx_chan_base;
+	rsp->tx_chan_base = pfvf->tx_chan_base;
+	rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
+	rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
+	rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
+	rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
+	/* Get HW supported stat count */
+	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+	rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
+	rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
+	/* Get count of CQ IRQs and error IRQs supported per LF */
+	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
+	rsp->qints = ((cfg >> 12) & 0xFFF);
+	rsp->cints = ((cfg >> 24) & 0xFFF);
+	return rc;
+}
+
+int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
+				 struct msg_rsp *rsp)
+{
+	struct rvu_hwinfo *hw = rvu->hw;
+	u16 pcifunc = req->hdr.pcifunc;
+	struct rvu_block *block;
+	int blkaddr, nixlf, err;
+	struct rvu_pfvf *pfvf;
+
+	pfvf = rvu_get_pfvf(rvu, pcifunc);
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+	if (!pfvf->nixlf || blkaddr < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	block = &hw->block[blkaddr];
+	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
+	if (nixlf < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	nix_interface_deinit(rvu, pcifunc, nixlf);
+
+	/* Reset this NIX LF */
+	err = rvu_lf_reset(rvu, block, nixlf);
+	if (err) {
+		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
+			block->addr - BLKADDR_NIX0, nixlf);
+		return NIX_AF_ERR_LF_RESET;
+	}
+
+	nix_ctx_free(rvu, pfvf);
+
+	return 0;
+}
+
+int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
+					 struct nix_mark_format_cfg  *req,
+					 struct nix_mark_format_cfg_rsp *rsp)
+{
+	u16 pcifunc = req->hdr.pcifunc;
+	struct nix_hw *nix_hw;
+	struct rvu_pfvf *pfvf;
+	int blkaddr, rc;
+	u32 cfg;
+
+	pfvf = rvu_get_pfvf(rvu, pcifunc);
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+	if (!pfvf->nixlf || blkaddr < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	nix_hw = get_nix_hw(rvu->hw, blkaddr);
+	if (!nix_hw)
+		return -EINVAL;
+
+	cfg = (((u32)req->offset & 0x7) << 16) |
+	      (((u32)req->y_mask & 0xF) << 12) |
+	      (((u32)req->y_val & 0xF) << 8) |
+	      (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
+
+	rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
+	if (rc < 0) {
+		dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
+			rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
+		return NIX_AF_ERR_MARK_CFG_FAIL;
+	}
+
+	rsp->mark_format_idx = rc;
+	return 0;
+}
+
+/* Disable shaping of pkts by a scheduler queue
+ * at a given scheduler level.
+ */
+static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
+				 int lvl, int schq)
+{
+	u64  cir_reg = 0, pir_reg = 0;
+	u64  cfg;
+
+	switch (lvl) {
+	case NIX_TXSCH_LVL_TL1:
+		cir_reg = NIX_AF_TL1X_CIR(schq);
+		pir_reg = 0; /* PIR not available at TL1 */
+		break;
+	case NIX_TXSCH_LVL_TL2:
+		cir_reg = NIX_AF_TL2X_CIR(schq);
+		pir_reg = NIX_AF_TL2X_PIR(schq);
+		break;
+	case NIX_TXSCH_LVL_TL3:
+		cir_reg = NIX_AF_TL3X_CIR(schq);
+		pir_reg = NIX_AF_TL3X_PIR(schq);
+		break;
+	case NIX_TXSCH_LVL_TL4:
+		cir_reg = NIX_AF_TL4X_CIR(schq);
+		pir_reg = NIX_AF_TL4X_PIR(schq);
+		break;
+	}
+
+	if (!cir_reg)
+		return;
+	cfg = rvu_read64(rvu, blkaddr, cir_reg);
+	rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
+
+	if (!pir_reg)
+		return;
+	cfg = rvu_read64(rvu, blkaddr, pir_reg);
+	rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
+}
+
+static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
+				 int lvl, int schq)
+{
+	struct rvu_hwinfo *hw = rvu->hw;
+	int link;
+
+	/* Reset TL4's SDP link config */
+	if (lvl == NIX_TXSCH_LVL_TL4)
+		rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
+
+	if (lvl != NIX_TXSCH_LVL_TL2)
+		return;
+
+	/* Reset TL2's CGX or LBK link config */
+	for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
+		rvu_write64(rvu, blkaddr,
+			    NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
+}
+
+static int
+rvu_get_tl1_schqs(struct rvu *rvu, int blkaddr, u16 pcifunc,
+		  u16 *schq_list, u16 *schq_cnt)
+{
+	struct nix_txsch *txsch;
+	struct nix_hw *nix_hw;
+	struct rvu_pfvf *pfvf;
+	u8 cgx_id, lmac_id;
+	u16 schq_base;
+	u32 *pfvf_map;
+	int pf, intf;
+
+	nix_hw = get_nix_hw(rvu->hw, blkaddr);
+	if (!nix_hw)
+		return -ENODEV;
+
+	pfvf = rvu_get_pfvf(rvu, pcifunc);
+	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1];
+	pfvf_map = txsch->pfvf_map;
+	pf = rvu_get_pf(pcifunc);
+
+	/* static allocation as two TL1's per link */
+	intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+
+	switch (intf) {
+	case NIX_INTF_TYPE_CGX:
+		rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
+		schq_base = (cgx_id * MAX_LMAC_PER_CGX + lmac_id) * 2;
+		break;
+	case NIX_INTF_TYPE_LBK:
+		schq_base = rvu->cgx_cnt_max * MAX_LMAC_PER_CGX * 2;
+		break;
+	default:
+		return -ENODEV;
+	}
+
+	if (schq_base + 1 > txsch->schq.max)
+		return -ENODEV;
+
+	/* init pfvf_map as we store flags */
+	if (pfvf_map[schq_base] == U32_MAX) {
+		pfvf_map[schq_base] =
+			TXSCH_MAP((pf << RVU_PFVF_PF_SHIFT), 0);
+		pfvf_map[schq_base + 1] =
+			TXSCH_MAP((pf << RVU_PFVF_PF_SHIFT), 0);
+
+		/* Onetime reset for TL1 */
+		nix_reset_tx_linkcfg(rvu, blkaddr,
+				     NIX_TXSCH_LVL_TL1, schq_base);
+		nix_reset_tx_shaping(rvu, blkaddr,
+				     NIX_TXSCH_LVL_TL1, schq_base);
+
+		nix_reset_tx_linkcfg(rvu, blkaddr,
+				     NIX_TXSCH_LVL_TL1, schq_base + 1);
+		nix_reset_tx_shaping(rvu, blkaddr,
+				     NIX_TXSCH_LVL_TL1, schq_base + 1);
+	}
+
+	if (schq_list && schq_cnt) {
+		schq_list[0] = schq_base;
+		schq_list[1] = schq_base + 1;
+		*schq_cnt = 2;
+	}
+
+	return 0;
+}
+
+int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
+				     struct nix_txsch_alloc_req *req,
+				     struct nix_txsch_alloc_rsp *rsp)
+{
+	u16 pcifunc = req->hdr.pcifunc;
+	struct nix_txsch *txsch;
+	int lvl, idx, req_schq;
+	struct rvu_pfvf *pfvf;
+	struct nix_hw *nix_hw;
+	int blkaddr, rc = 0;
+	u32 *pfvf_map;
+	u16 schq;
+
+	pfvf = rvu_get_pfvf(rvu, pcifunc);
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+	if (!pfvf->nixlf || blkaddr < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	nix_hw = get_nix_hw(rvu->hw, blkaddr);
+	if (!nix_hw)
+		return -EINVAL;
+
+	mutex_lock(&rvu->rsrc_lock);
+	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+		txsch = &nix_hw->txsch[lvl];
+		req_schq = req->schq_contig[lvl] + req->schq[lvl];
+		pfvf_map = txsch->pfvf_map;
+
+		if (!req_schq)
+			continue;
+
+		/* There are only 28 TL1s */
+		if (lvl == NIX_TXSCH_LVL_TL1) {
+			if (req->schq_contig[lvl] ||
+			    req->schq[lvl] > 2 ||
+			    rvu_get_tl1_schqs(rvu, blkaddr,
+					      pcifunc, NULL, NULL))
+				goto err;
+			continue;
+		}
+
+		/* Check if request is valid */
+		if (req_schq > MAX_TXSCHQ_PER_FUNC)
+			goto err;
+
+		/* If contiguous queues are needed, check for availability */
+		if (req->schq_contig[lvl] &&
+		    !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
+			goto err;
+
+		/* Check if full request can be accommodated */
+		if (req_schq >= rvu_rsrc_free_count(&txsch->schq))
+			goto err;
+	}
+
+	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+		txsch = &nix_hw->txsch[lvl];
+		rsp->schq_contig[lvl] = req->schq_contig[lvl];
+		pfvf_map = txsch->pfvf_map;
+		rsp->schq[lvl] = req->schq[lvl];
+
+		if (!req->schq[lvl] && !req->schq_contig[lvl])
+			continue;
+
+		/* Handle TL1 specially as it is
+		 * allocation is restricted to 2 TL1's
+		 * per link
+		 */
+
+		if (lvl == NIX_TXSCH_LVL_TL1) {
+			rsp->schq_contig[lvl] = 0;
+			rvu_get_tl1_schqs(rvu, blkaddr, pcifunc,
+					  &rsp->schq_list[lvl][0],
+					  &rsp->schq[lvl]);
+			continue;
+		}
+
+		/* Alloc contiguous queues first */
+		if (req->schq_contig[lvl]) {
+			schq = rvu_alloc_rsrc_contig(&txsch->schq,
+						     req->schq_contig[lvl]);
+
+			for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
+				pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
+				nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
+				nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
+				rsp->schq_contig_list[lvl][idx] = schq;
+				schq++;
+			}
+		}
+
+		/* Alloc non-contiguous queues */
+		for (idx = 0; idx < req->schq[lvl]; idx++) {
+			schq = rvu_alloc_rsrc(&txsch->schq);
+			pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
+			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
+			nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
+			rsp->schq_list[lvl][idx] = schq;
+		}
+	}
+	goto exit;
+err:
+	rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
+exit:
+	mutex_unlock(&rvu->rsrc_lock);
+	return rc;
+}
+
+static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
+{
+	int blkaddr, nixlf, lvl, schq, err;
+	struct rvu_hwinfo *hw = rvu->hw;
+	struct nix_txsch *txsch;
+	struct nix_hw *nix_hw;
+	u64 cfg;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+	if (blkaddr < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	nix_hw = get_nix_hw(rvu->hw, blkaddr);
+	if (!nix_hw)
+		return -EINVAL;
+
+	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+	if (nixlf < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	/* Disable TL2/3 queue links before SMQ flush*/
+	mutex_lock(&rvu->rsrc_lock);
+	for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+		if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
+			continue;
+
+		txsch = &nix_hw->txsch[lvl];
+		for (schq = 0; schq < txsch->schq.max; schq++) {
+			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
+				continue;
+			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
+		}
+	}
+
+	/* Flush SMQs */
+	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
+	for (schq = 0; schq < txsch->schq.max; schq++) {
+		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
+			continue;
+		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
+		/* Do SMQ flush and set enqueue xoff */
+		cfg |= BIT_ULL(50) | BIT_ULL(49);
+		rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
+
+		/* Wait for flush to complete */
+		err = rvu_poll_reg(rvu, blkaddr,
+				   NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true);
+		if (err) {
+			dev_err(rvu->dev,
+				"NIXLF%d: SMQ%d flush failed\n", nixlf, schq);
+		}
+	}
+
+	/* Now free scheduler queues to free pool */
+	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+		/* Free all SCHQ's except TL1 as
+		 * TL1 is shared across all VF's for a RVU PF
+		 */
+		if (lvl == NIX_TXSCH_LVL_TL1)
+			continue;
+
+		txsch = &nix_hw->txsch[lvl];
+		for (schq = 0; schq < txsch->schq.max; schq++) {
+			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
+				continue;
+			rvu_free_rsrc(&txsch->schq, schq);
+			txsch->pfvf_map[schq] = 0;
+		}
+	}
+	mutex_unlock(&rvu->rsrc_lock);
+
+	/* Sync cached info for this LF in NDC-TX to LLC/DRAM */
+	rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
+	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
+	if (err)
+		dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
+
+	return 0;
+}
+
+static int nix_txschq_free_one(struct rvu *rvu,
+			       struct nix_txsch_free_req *req)
+{
+	int lvl, schq, nixlf, blkaddr, rc;
+	struct rvu_hwinfo *hw = rvu->hw;
+	u16 pcifunc = req->hdr.pcifunc;
+	struct nix_txsch *txsch;
+	struct nix_hw *nix_hw;
+	u32 *pfvf_map;
+	u64 cfg;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+	if (blkaddr < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	nix_hw = get_nix_hw(rvu->hw, blkaddr);
+	if (!nix_hw)
+		return -EINVAL;
+
+	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+	if (nixlf < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	lvl = req->schq_lvl;
+	schq = req->schq;
+	txsch = &nix_hw->txsch[lvl];
+
+	/* Don't allow freeing TL1 */
+	if (lvl > NIX_TXSCH_LVL_TL2 ||
+	    schq >= txsch->schq.max)
+		goto err;
+
+	pfvf_map = txsch->pfvf_map;
+	mutex_lock(&rvu->rsrc_lock);
+
+	if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
+		mutex_unlock(&rvu->rsrc_lock);
+		goto err;
+	}
+
+	/* Flush if it is a SMQ. Onus of disabling
+	 * TL2/3 queue links before SMQ flush is on user
+	 */
+	if (lvl == NIX_TXSCH_LVL_SMQ) {
+		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
+		/* Do SMQ flush and set enqueue xoff */
+		cfg |= BIT_ULL(50) | BIT_ULL(49);
+		rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
+
+		/* Wait for flush to complete */
+		rc = rvu_poll_reg(rvu, blkaddr,
+				  NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true);
+		if (rc) {
+			dev_err(rvu->dev,
+				"NIXLF%d: SMQ%d flush failed\n", nixlf, schq);
+		}
+	}
+
+	/* Free the resource */
+	rvu_free_rsrc(&txsch->schq, schq);
+	txsch->pfvf_map[schq] = 0;
+	mutex_unlock(&rvu->rsrc_lock);
+	return 0;
+err:
+	return NIX_AF_ERR_TLX_INVALID;
+}
+
+int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
+				    struct nix_txsch_free_req *req,
+				    struct msg_rsp *rsp)
+{
+	if (req->flags & TXSCHQ_FREE_ALL)
+		return nix_txschq_free(rvu, req->hdr.pcifunc);
+	else
+		return nix_txschq_free_one(rvu, req);
+}
+
+static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
+				   int lvl, u64 reg, u64 regval)
+{
+	u64 regbase = reg & 0xFFFF;
+	u16 schq, parent;
+
+	if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
+		return false;
+
+	schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
+	/* Check if this schq belongs to this PF/VF or not */
+	if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
+		return false;
+
+	parent = (regval >> 16) & 0x1FF;
+	/* Validate MDQ's TL4 parent */
+	if (regbase == NIX_AF_MDQX_PARENT(0) &&
+	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
+		return false;
+
+	/* Validate TL4's TL3 parent */
+	if (regbase == NIX_AF_TL4X_PARENT(0) &&
+	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
+		return false;
+
+	/* Validate TL3's TL2 parent */
+	if (regbase == NIX_AF_TL3X_PARENT(0) &&
+	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
+		return false;
+
+	/* Validate TL2's TL1 parent */
+	if (regbase == NIX_AF_TL2X_PARENT(0) &&
+	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
+		return false;
+
+	return true;
+}
+
+static int
+nix_tl1_default_cfg(struct rvu *rvu, u16 pcifunc)
+{
+	u16 schq_list[2], schq_cnt, schq;
+	int blkaddr, idx, err = 0;
+	u16 map_func, map_flags;
+	struct nix_hw *nix_hw;
+	u64 reg, regval;
+	u32 *pfvf_map;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+	if (blkaddr < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	nix_hw = get_nix_hw(rvu->hw, blkaddr);
+	if (!nix_hw)
+		return -EINVAL;
+
+	pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
+
+	mutex_lock(&rvu->rsrc_lock);
+
+	err = rvu_get_tl1_schqs(rvu, blkaddr,
+				pcifunc, schq_list, &schq_cnt);
+	if (err)
+		goto unlock;
+
+	for (idx = 0; idx < schq_cnt; idx++) {
+		schq = schq_list[idx];
+		map_func = TXSCH_MAP_FUNC(pfvf_map[schq]);
+		map_flags = TXSCH_MAP_FLAGS(pfvf_map[schq]);
+
+		/* check if config is already done or this is pf */
+		if (map_flags & NIX_TXSCHQ_TL1_CFG_DONE)
+			continue;
+
+		/* default configuration */
+		reg = NIX_AF_TL1X_TOPOLOGY(schq);
+		regval = (TXSCH_TL1_DFLT_RR_PRIO << 1);
+		rvu_write64(rvu, blkaddr, reg, regval);
+		reg = NIX_AF_TL1X_SCHEDULE(schq);
+		regval = TXSCH_TL1_DFLT_RR_QTM;
+		rvu_write64(rvu, blkaddr, reg, regval);
+		reg = NIX_AF_TL1X_CIR(schq);
+		regval = 0;
+		rvu_write64(rvu, blkaddr, reg, regval);
+
+		map_flags |= NIX_TXSCHQ_TL1_CFG_DONE;
+		pfvf_map[schq] = TXSCH_MAP(map_func, map_flags);
+	}
+unlock:
+	mutex_unlock(&rvu->rsrc_lock);
+	return err;
+}
+
+int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
+				    struct nix_txschq_config *req,
+				    struct msg_rsp *rsp)
+{
+	u16 schq, pcifunc = req->hdr.pcifunc;
+	struct rvu_hwinfo *hw = rvu->hw;
+	u64 reg, regval, schq_regbase;
+	struct nix_txsch *txsch;
+	u16 map_func, map_flags;
+	struct nix_hw *nix_hw;
+	int blkaddr, idx, err;
+	u32 *pfvf_map;
+	int nixlf;
+
+	if (req->lvl >= NIX_TXSCH_LVL_CNT ||
+	    req->num_regs > MAX_REGS_PER_MBOX_MSG)
+		return NIX_AF_INVAL_TXSCHQ_CFG;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+	if (blkaddr < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	nix_hw = get_nix_hw(rvu->hw, blkaddr);
+	if (!nix_hw)
+		return -EINVAL;
+
+	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+	if (nixlf < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	txsch = &nix_hw->txsch[req->lvl];
+	pfvf_map = txsch->pfvf_map;
+
+	/* VF is only allowed to trigger
+	 * setting default cfg on TL1
+	 */
+	if (pcifunc & RVU_PFVF_FUNC_MASK &&
+	    req->lvl == NIX_TXSCH_LVL_TL1) {
+		return nix_tl1_default_cfg(rvu, pcifunc);
+	}
+
+	for (idx = 0; idx < req->num_regs; idx++) {
+		reg = req->reg[idx];
+		regval = req->regval[idx];
+		schq_regbase = reg & 0xFFFF;
+
+		if (!is_txschq_config_valid(rvu, pcifunc, blkaddr,
+					    txsch->lvl, reg, regval))
+			return NIX_AF_INVAL_TXSCHQ_CFG;
+
+		/* Replace PF/VF visible NIXLF slot with HW NIXLF id */
+		if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
+			nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
+					   pcifunc, 0);
+			regval &= ~(0x7FULL << 24);
+			regval |= ((u64)nixlf << 24);
+		}
+
+		/* Mark config as done for TL1 by PF */
+		if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
+		    schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
+			schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
+
+			mutex_lock(&rvu->rsrc_lock);
+
+			map_func = TXSCH_MAP_FUNC(pfvf_map[schq]);
+			map_flags = TXSCH_MAP_FLAGS(pfvf_map[schq]);
+
+			map_flags |= NIX_TXSCHQ_TL1_CFG_DONE;
+			pfvf_map[schq] = TXSCH_MAP(map_func, map_flags);
+			mutex_unlock(&rvu->rsrc_lock);
+		}
+
+		rvu_write64(rvu, blkaddr, reg, regval);
+
+		/* Check for SMQ flush, if so, poll for its completion */
+		if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
+		    (regval & BIT_ULL(49))) {
+			err = rvu_poll_reg(rvu, blkaddr,
+					   reg, BIT_ULL(49), true);
+			if (err)
+				return NIX_AF_SMQ_FLUSH_FAILED;
+		}
+	}
+	return 0;
+}
+
+static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
+			   struct nix_vtag_config *req)
+{
+	u64 regval = req->vtag_size;
+
+	if (req->rx.vtag_type > 7 || req->vtag_size > VTAGSIZE_T8)
+		return -EINVAL;
+
+	if (req->rx.capture_vtag)
+		regval |= BIT_ULL(5);
+	if (req->rx.strip_vtag)
+		regval |= BIT_ULL(4);
+
+	rvu_write64(rvu, blkaddr,
+		    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
+	return 0;
+}
+
+int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
+				  struct nix_vtag_config *req,
+				  struct msg_rsp *rsp)
+{
+	struct rvu_hwinfo *hw = rvu->hw;
+	u16 pcifunc = req->hdr.pcifunc;
+	int blkaddr, nixlf, err;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+	if (blkaddr < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+	if (nixlf < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	if (req->cfg_type) {
+		err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
+		if (err)
+			return NIX_AF_ERR_PARAM;
+	} else {
+		/* TODO: handle tx vtag configuration */
+		return 0;
+	}
+
+	return 0;
+}
+
+static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
+			 u16 pcifunc, int next, bool eol)
+{
+	struct nix_aq_enq_req aq_req;
+	int err;
+
+	aq_req.hdr.pcifunc = 0;
+	aq_req.ctype = NIX_AQ_CTYPE_MCE;
+	aq_req.op = op;
+	aq_req.qidx = mce;
+
+	/* Forward bcast pkts to RQ0, RSS not needed */
+	aq_req.mce.op = 0;
+	aq_req.mce.index = 0;
+	aq_req.mce.eol = eol;
+	aq_req.mce.pf_func = pcifunc;
+	aq_req.mce.next = next;
+
+	/* All fields valid */
+	*(u64 *)(&aq_req.mce_mask) = ~0ULL;
+
+	err = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
+	if (err) {
+		dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
+			rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
+		return err;
+	}
+	return 0;
+}
+
+static int nix_update_mce_list(struct nix_mce_list *mce_list,
+			       u16 pcifunc, int idx, bool add)
+{
+	struct mce *mce, *tail = NULL;
+	bool delete = false;
+
+	/* Scan through the current list */
+	hlist_for_each_entry(mce, &mce_list->head, node) {
+		/* If already exists, then delete */
+		if (mce->pcifunc == pcifunc && !add) {
+			delete = true;
+			break;
+		}
+		tail = mce;
+	}
+
+	if (delete) {
+		hlist_del(&mce->node);
+		kfree(mce);
+		mce_list->count--;
+		return 0;
+	}
+
+	if (!add)
+		return 0;
+
+	/* Add a new one to the list, at the tail */
+	mce = kzalloc(sizeof(*mce), GFP_KERNEL);
+	if (!mce)
+		return -ENOMEM;
+	mce->idx = idx;
+	mce->pcifunc = pcifunc;
+	if (!tail)
+		hlist_add_head(&mce->node, &mce_list->head);
+	else
+		hlist_add_behind(&mce->node, &tail->node);
+	mce_list->count++;
+	return 0;
+}
+
+static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
+{
+	int err = 0, idx, next_idx, count;
+	struct nix_mce_list *mce_list;
+	struct mce *mce, *next_mce;
+	struct nix_mcast *mcast;
+	struct nix_hw *nix_hw;
+	struct rvu_pfvf *pfvf;
+	int blkaddr;
+
+	/* Broadcast pkt replication is not needed for AF's VFs, hence skip */
+	if (is_afvf(pcifunc))
+		return 0;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+	if (blkaddr < 0)
+		return 0;
+
+	nix_hw = get_nix_hw(rvu->hw, blkaddr);
+	if (!nix_hw)
+		return 0;
+
+	mcast = &nix_hw->mcast;
+
+	/* Get this PF/VF func's MCE index */
+	pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
+	idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
+
+	mce_list = &pfvf->bcast_mce_list;
+	if (idx > (pfvf->bcast_mce_idx + mce_list->max)) {
+		dev_err(rvu->dev,
+			"%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
+			__func__, idx, mce_list->max,
+			pcifunc >> RVU_PFVF_PF_SHIFT);
+		return -EINVAL;
+	}
+
+	mutex_lock(&mcast->mce_lock);
+
+	err = nix_update_mce_list(mce_list, pcifunc, idx, add);
+	if (err)
+		goto end;
+
+	/* Disable MCAM entry in NPC */
+
+	if (!mce_list->count)
+		goto end;
+	count = mce_list->count;
+
+	/* Dump the updated list to HW */
+	hlist_for_each_entry(mce, &mce_list->head, node) {
+		next_idx = 0;
+		count--;
+		if (count) {
+			next_mce = hlist_entry(mce->node.next,
+					       struct mce, node);
+			next_idx = next_mce->idx;
+		}
+		/* EOL should be set in last MCE */
+		err = nix_setup_mce(rvu, mce->idx,
+				    NIX_AQ_INSTOP_WRITE, mce->pcifunc,
+				    next_idx, count ? false : true);
+		if (err)
+			goto end;
+	}
+
+end:
+	mutex_unlock(&mcast->mce_lock);
+	return err;
+}
+
+static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
+{
+	struct nix_mcast *mcast = &nix_hw->mcast;
+	int err, pf, numvfs, idx;
+	struct rvu_pfvf *pfvf;
+	u16 pcifunc;
+	u64 cfg;
+
+	/* Skip PF0 (i.e AF) */
+	for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
+		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+		/* If PF is not enabled, nothing to do */
+		if (!((cfg >> 20) & 0x01))
+			continue;
+		/* Get numVFs attached to this PF */
+		numvfs = (cfg >> 12) & 0xFF;
+
+		pfvf = &rvu->pf[pf];
+		/* Save the start MCE */
+		pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
+
+		nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
+
+		for (idx = 0; idx < (numvfs + 1); idx++) {
+			/* idx-0 is for PF, followed by VFs */
+			pcifunc = (pf << RVU_PFVF_PF_SHIFT);
+			pcifunc |= idx;
+			/* Add dummy entries now, so that we don't have to check
+			 * for whether AQ_OP should be INIT/WRITE later on.
+			 * Will be updated when a NIXLF is attached/detached to
+			 * these PF/VFs.
+			 */
+			err = nix_setup_mce(rvu, pfvf->bcast_mce_idx + idx,
+					    NIX_AQ_INSTOP_INIT,
+					    pcifunc, 0, true);
+			if (err)
+				return err;
+		}
+	}
+	return 0;
+}
+
+static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
+{
+	struct nix_mcast *mcast = &nix_hw->mcast;
+	struct rvu_hwinfo *hw = rvu->hw;
+	int err, size;
+
+	size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
+	size = (1ULL << size);
+
+	/* Alloc memory for multicast/mirror replication entries */
+	err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
+			 (256UL << MC_TBL_SIZE), size);
+	if (err)
+		return -ENOMEM;
+
+	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
+		    (u64)mcast->mce_ctx->iova);
+
+	/* Set max list length equal to max no of VFs per PF  + PF itself */
+	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
+		    BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
+
+	/* Alloc memory for multicast replication buffers */
+	size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
+	err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
+			 (8UL << MC_BUF_CNT), size);
+	if (err)
+		return -ENOMEM;
+
+	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
+		    (u64)mcast->mcast_buf->iova);
+
+	/* Alloc pkind for NIX internal RX multicast/mirror replay */
+	mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
+
+	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
+		    BIT_ULL(63) | (mcast->replay_pkind << 24) |
+		    BIT_ULL(20) | MC_BUF_CNT);
+
+	mutex_init(&mcast->mce_lock);
+
+	return nix_setup_bcast_tables(rvu, nix_hw);
+}
+
+static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
+{
+	struct nix_txsch *txsch;
+	u64 cfg, reg;
+	int err, lvl;
+
+	/* Get scheduler queue count of each type and alloc
+	 * bitmap for each for alloc/free/attach operations.
+	 */
+	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+		txsch = &nix_hw->txsch[lvl];
+		txsch->lvl = lvl;
+		switch (lvl) {
+		case NIX_TXSCH_LVL_SMQ:
+			reg = NIX_AF_MDQ_CONST;
+			break;
+		case NIX_TXSCH_LVL_TL4:
+			reg = NIX_AF_TL4_CONST;
+			break;
+		case NIX_TXSCH_LVL_TL3:
+			reg = NIX_AF_TL3_CONST;
+			break;
+		case NIX_TXSCH_LVL_TL2:
+			reg = NIX_AF_TL2_CONST;
+			break;
+		case NIX_TXSCH_LVL_TL1:
+			reg = NIX_AF_TL1_CONST;
+			break;
+		}
+		cfg = rvu_read64(rvu, blkaddr, reg);
+		txsch->schq.max = cfg & 0xFFFF;
+		err = rvu_alloc_bitmap(&txsch->schq);
+		if (err)
+			return err;
+
+		/* Allocate memory for scheduler queues to
+		 * PF/VF pcifunc mapping info.
+		 */
+		txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
+					       sizeof(u32), GFP_KERNEL);
+		if (!txsch->pfvf_map)
+			return -ENOMEM;
+		memset(txsch->pfvf_map, U8_MAX, txsch->schq.max * sizeof(u32));
+	}
+	return 0;
+}
+
+int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
+				int blkaddr, u32 cfg)
+{
+	int fmt_idx;
+
+	for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
+		if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
+			return fmt_idx;
+	}
+	if (fmt_idx >= nix_hw->mark_format.total)
+		return -ERANGE;
+
+	rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
+	nix_hw->mark_format.cfg[fmt_idx] = cfg;
+	nix_hw->mark_format.in_use++;
+	return fmt_idx;
+}
+
+static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
+				    int blkaddr)
+{
+	u64 cfgs[] = {
+		[NIX_MARK_CFG_IP_DSCP_RED]         = 0x10003,
+		[NIX_MARK_CFG_IP_DSCP_YELLOW]      = 0x11200,
+		[NIX_MARK_CFG_IP_DSCP_YELLOW_RED]  = 0x11203,
+		[NIX_MARK_CFG_IP_ECN_RED]          = 0x6000c,
+		[NIX_MARK_CFG_IP_ECN_YELLOW]       = 0x60c00,
+		[NIX_MARK_CFG_IP_ECN_YELLOW_RED]   = 0x60c0c,
+		[NIX_MARK_CFG_VLAN_DEI_RED]        = 0x30008,
+		[NIX_MARK_CFG_VLAN_DEI_YELLOW]     = 0x30800,
+		[NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
+	};
+	int i, rc;
+	u64 total;
+
+	total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
+	nix_hw->mark_format.total = (u8)total;
+	nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
+					       GFP_KERNEL);
+	if (!nix_hw->mark_format.cfg)
+		return -ENOMEM;
+	for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
+		rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
+		if (rc < 0)
+			dev_err(rvu->dev, "Err %d in setup mark format %d\n",
+				i, rc);
+	}
+
+	return 0;
+}
+
+int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
+				   struct msg_rsp *rsp)
+{
+	struct rvu_hwinfo *hw = rvu->hw;
+	u16 pcifunc = req->hdr.pcifunc;
+	int i, nixlf, blkaddr;
+	u64 stats;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+	if (blkaddr < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+	if (nixlf < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	/* Get stats count supported by HW */
+	stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+
+	/* Reset tx stats */
+	for (i = 0; i < ((stats >> 24) & 0xFF); i++)
+		rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
+
+	/* Reset rx stats */
+	for (i = 0; i < ((stats >> 32) & 0xFF); i++)
+		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
+
+	return 0;
+}
+
+/* Returns the ALG index to be set into NPC_RX_ACTION */
+static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
+{
+	int i;
+
+	/* Scan over exiting algo entries to find a match */
+	for (i = 0; i < nix_hw->flowkey.in_use; i++)
+		if (nix_hw->flowkey.flowkey[i] == flow_cfg)
+			return i;
+
+	return -ERANGE;
+}
+
+static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
+{
+	int idx, nr_field, key_off, field_marker, keyoff_marker;
+	int max_key_off, max_bit_pos, group_member;
+	struct nix_rx_flowkey_alg *field;
+	struct nix_rx_flowkey_alg tmp;
+	u32 key_type, valid_key;
+
+	if (!alg)
+		return -EINVAL;
+
+#define FIELDS_PER_ALG  5
+#define MAX_KEY_OFF	40
+	/* Clear all fields */
+	memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
+
+	/* Each of the 32 possible flow key algorithm definitions should
+	 * fall into above incremental config (except ALG0). Otherwise a
+	 * single NPC MCAM entry is not sufficient for supporting RSS.
+	 *
+	 * If a different definition or combination needed then NPC MCAM
+	 * has to be programmed to filter such pkts and it's action should
+	 * point to this definition to calculate flowtag or hash.
+	 *
+	 * The `for loop` goes over _all_ protocol field and the following
+	 * variables depicts the state machine forward progress logic.
+	 *
+	 * keyoff_marker - Enabled when hash byte length needs to be accounted
+	 * in field->key_offset update.
+	 * field_marker - Enabled when a new field needs to be selected.
+	 * group_member - Enabled when protocol is part of a group.
+	 */
+
+	keyoff_marker = 0; max_key_off = 0; group_member = 0;
+	nr_field = 0; key_off = 0; field_marker = 1;
+	field = &tmp; max_bit_pos = fls(flow_cfg);
+	for (idx = 0;
+	     idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
+	     key_off < MAX_KEY_OFF; idx++) {
+		key_type = BIT(idx);
+		valid_key = flow_cfg & key_type;
+		/* Found a field marker, reset the field values */
+		if (field_marker)
+			memset(&tmp, 0, sizeof(tmp));
+
+		switch (key_type) {
+		case NIX_FLOW_KEY_TYPE_PORT:
+			field->sel_chan = true;
+			/* This should be set to 1, when SEL_CHAN is set */
+			field->bytesm1 = 1;
+			field_marker = true;
+			keyoff_marker = true;
+			break;
+		case NIX_FLOW_KEY_TYPE_IPV4:
+			field->lid = NPC_LID_LC;
+			field->ltype_match = NPC_LT_LC_IP;
+			field->hdr_offset = 12; /* SIP offset */
+			field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
+			field->ltype_mask = 0xF; /* Match only IPv4 */
+			field_marker = true;
+			keyoff_marker = false;
+			break;
+		case NIX_FLOW_KEY_TYPE_IPV6:
+			field->lid = NPC_LID_LC;
+			field->ltype_match = NPC_LT_LC_IP6;
+			field->hdr_offset = 8; /* SIP offset */
+			field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
+			field->ltype_mask = 0xF; /* Match only IPv6 */
+			field_marker = true;
+			keyoff_marker = true;
+			break;
+		case NIX_FLOW_KEY_TYPE_TCP:
+		case NIX_FLOW_KEY_TYPE_UDP:
+		case NIX_FLOW_KEY_TYPE_SCTP:
+			field->lid = NPC_LID_LD;
+			field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
+			if (key_type == NIX_FLOW_KEY_TYPE_TCP && valid_key) {
+				field->ltype_match |= NPC_LT_LD_TCP;
+				group_member = true;
+			} else if (key_type == NIX_FLOW_KEY_TYPE_UDP &&
+				   valid_key) {
+				field->ltype_match |= NPC_LT_LD_UDP;
+				group_member = true;
+			} else if (key_type == NIX_FLOW_KEY_TYPE_SCTP &&
+				   valid_key) {
+				field->ltype_match |= NPC_LT_LD_SCTP;
+				group_member = true;
+			}
+			field->ltype_mask = ~field->ltype_match;
+			if (key_type == NIX_FLOW_KEY_TYPE_SCTP) {
+				/* Handle the case where any of the group item
+				 * is enabled in the group but not the final one
+				 */
+				if (group_member) {
+					valid_key = true;
+					group_member = false;
+				}
+				field_marker = true;
+				keyoff_marker = true;
+			} else {
+				field_marker = false;
+				keyoff_marker = false;
+			}
+			break;
+		}
+		field->ena = 1;
+
+		/* Found a valid flow key type */
+		if (valid_key) {
+			field->key_offset = key_off;
+			memcpy(&alg[nr_field], field, sizeof(*field));
+			max_key_off = max(max_key_off, field->bytesm1 + 1);
+
+			/* Found a field marker, get the next field */
+			if (field_marker)
+				nr_field++;
+		}
+
+		/* Found a keyoff marker, update the new key_off */
+		if (keyoff_marker) {
+			key_off += max_key_off;
+			max_key_off = 0;
+		}
+	}
+	/* Processed all the flow key types */
+	if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
+		return 0;
+	else
+		return NIX_AF_ERR_RSS_NOSPC_FIELD;
+}
+
+static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
+{
+	u64 field[FIELDS_PER_ALG];
+	struct nix_hw *hw;
+	int fid, rc;
+
+	hw = get_nix_hw(rvu->hw, blkaddr);
+	if (!hw)
+		return -EINVAL;
+
+	/* No room to add new flow hash algoritham */
+	if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
+		return NIX_AF_ERR_RSS_NOSPC_ALGO;
+
+	/* Generate algo fields for the given flow_cfg */
+	rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
+	if (rc)
+		return rc;
+
+	/* Update ALGX_FIELDX register with generated fields */
+	for (fid = 0; fid < FIELDS_PER_ALG; fid++)
+		rvu_write64(rvu, blkaddr,
+			    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
+							   fid), field[fid]);
+
+	/* Store the flow_cfg for futher lookup */
+	rc = hw->flowkey.in_use;
+	hw->flowkey.flowkey[rc] = flow_cfg;
+	hw->flowkey.in_use++;
+
+	return rc;
+}
+
+int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
+					 struct nix_rss_flowkey_cfg *req,
+					 struct nix_rss_flowkey_cfg_rsp *rsp)
+{
+	struct rvu_hwinfo *hw = rvu->hw;
+	u16 pcifunc = req->hdr.pcifunc;
+	int alg_idx, nixlf, blkaddr;
+	struct nix_hw *nix_hw;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+	if (blkaddr < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+	if (nixlf < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	nix_hw = get_nix_hw(rvu->hw, blkaddr);
+	if (!nix_hw)
+		return -EINVAL;
+
+	alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
+	/* Failed to get algo index from the exiting list, reserve new  */
+	if (alg_idx < 0) {
+		alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
+						  req->flowkey_cfg);
+		if (alg_idx < 0)
+			return alg_idx;
+	}
+	rsp->alg_idx = alg_idx;
+	rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
+				       alg_idx, req->mcam_index);
+	return 0;
+}
+
+static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
+{
+	u32 flowkey_cfg, minkey_cfg;
+	int alg, fid, rc;
+
+	/* Disable all flow key algx fieldx */
+	for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
+		for (fid = 0; fid < FIELDS_PER_ALG; fid++)
+			rvu_write64(rvu, blkaddr,
+				    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
+				    0);
+	}
+
+	/* IPv4/IPv6 SIP/DIPs */
+	flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
+	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+	if (rc < 0)
+		return rc;
+
+	/* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
+	minkey_cfg = flowkey_cfg;
+	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
+	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+	if (rc < 0)
+		return rc;
+
+	/* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
+	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
+	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+	if (rc < 0)
+		return rc;
+
+	/* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
+	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
+	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+	if (rc < 0)
+		return rc;
+
+	/* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
+	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
+			NIX_FLOW_KEY_TYPE_UDP;
+	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+	if (rc < 0)
+		return rc;
+
+	/* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
+	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
+			NIX_FLOW_KEY_TYPE_SCTP;
+	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+	if (rc < 0)
+		return rc;
+
+	/* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
+	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
+			NIX_FLOW_KEY_TYPE_SCTP;
+	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+	if (rc < 0)
+		return rc;
+
+	/* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
+	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
+		      NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
+	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+	if (rc < 0)
+		return rc;
+
+	return 0;
+}
+
+int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
+				      struct nix_set_mac_addr *req,
+				      struct msg_rsp *rsp)
+{
+	struct rvu_hwinfo *hw = rvu->hw;
+	u16 pcifunc = req->hdr.pcifunc;
+	struct rvu_pfvf *pfvf;
+	int blkaddr, nixlf;
+
+	pfvf = rvu_get_pfvf(rvu, pcifunc);
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+	if (!pfvf->nixlf || blkaddr < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+	if (nixlf < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	ether_addr_copy(pfvf->mac_addr, req->mac_addr);
+
+	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
+				    pfvf->rx_chan_base, req->mac_addr);
+
+	rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
+
+	return 0;
+}
+
+int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
+				     struct msg_rsp *rsp)
+{
+	bool allmulti = false, disable_promisc = false;
+	struct rvu_hwinfo *hw = rvu->hw;
+	u16 pcifunc = req->hdr.pcifunc;
+	struct rvu_pfvf *pfvf;
+	int blkaddr, nixlf;
+
+	pfvf = rvu_get_pfvf(rvu, pcifunc);
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+	if (!pfvf->nixlf || blkaddr < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+	if (nixlf < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	if (req->mode & NIX_RX_MODE_PROMISC)
+		allmulti = false;
+	else if (req->mode & NIX_RX_MODE_ALLMULTI)
+		allmulti = true;
+	else
+		disable_promisc = true;
+
+	if (disable_promisc)
+		rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
+	else
+		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
+					      pfvf->rx_chan_base, allmulti);
+
+	rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
+
+	return 0;
+}
+
+static void nix_find_link_frs(struct rvu *rvu,
+			      struct nix_frs_cfg *req, u16 pcifunc)
+{
+	int pf = rvu_get_pf(pcifunc);
+	struct rvu_pfvf *pfvf;
+	int maxlen, minlen;
+	int numvfs, hwvf;
+	int vf;
+
+	/* Update with requester's min/max lengths */
+	pfvf = rvu_get_pfvf(rvu, pcifunc);
+	pfvf->maxlen = req->maxlen;
+	if (req->update_minlen)
+		pfvf->minlen = req->minlen;
+
+	maxlen = req->maxlen;
+	minlen = req->update_minlen ? req->minlen : 0;
+
+	/* Get this PF's numVFs and starting hwvf */
+	rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
+
+	/* For each VF, compare requested max/minlen */
+	for (vf = 0; vf < numvfs; vf++) {
+		pfvf =  &rvu->hwvf[hwvf + vf];
+		if (pfvf->maxlen > maxlen)
+			maxlen = pfvf->maxlen;
+		if (req->update_minlen &&
+		    pfvf->minlen && pfvf->minlen < minlen)
+			minlen = pfvf->minlen;
+	}
+
+	/* Compare requested max/minlen with PF's max/minlen */
+	pfvf = &rvu->pf[pf];
+	if (pfvf->maxlen > maxlen)
+		maxlen = pfvf->maxlen;
+	if (req->update_minlen &&
+	    pfvf->minlen && pfvf->minlen < minlen)
+		minlen = pfvf->minlen;
+
+	/* Update the request with max/min PF's and it's VF's max/min */
+	req->maxlen = maxlen;
+	if (req->update_minlen)
+		req->minlen = minlen;
+}
+
+int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
+				    struct msg_rsp *rsp)
+{
+	struct rvu_hwinfo *hw = rvu->hw;
+	u16 pcifunc = req->hdr.pcifunc;
+	int pf = rvu_get_pf(pcifunc);
+	int blkaddr, schq, link = -1;
+	struct nix_txsch *txsch;
+	u64 cfg, lmac_fifo_len;
+	struct nix_hw *nix_hw;
+	u8 cgx = 0, lmac = 0;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+	if (blkaddr < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	nix_hw = get_nix_hw(rvu->hw, blkaddr);
+	if (!nix_hw)
+		return -EINVAL;
+
+	if (!req->sdp_link && req->maxlen > NIC_HW_MAX_FRS)
+		return NIX_AF_ERR_FRS_INVALID;
+
+	if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
+		return NIX_AF_ERR_FRS_INVALID;
+
+	/* Check if requester wants to update SMQ's */
+	if (!req->update_smq)
+		goto rx_frscfg;
+
+	/* Update min/maxlen in each of the SMQ attached to this PF/VF */
+	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
+	mutex_lock(&rvu->rsrc_lock);
+	for (schq = 0; schq < txsch->schq.max; schq++) {
+		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
+			continue;
+		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
+		cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
+		if (req->update_minlen)
+			cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
+		rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
+	}
+	mutex_unlock(&rvu->rsrc_lock);
+
+rx_frscfg:
+	/* Check if config is for SDP link */
+	if (req->sdp_link) {
+		if (!hw->sdp_links)
+			return NIX_AF_ERR_RX_LINK_INVALID;
+		link = hw->cgx_links + hw->lbk_links;
+		goto linkcfg;
+	}
+
+	/* Check if the request is from CGX mapped RVU PF */
+	if (is_pf_cgxmapped(rvu, pf)) {
+		/* Get CGX and LMAC to which this PF is mapped and find link */
+		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
+		link = (cgx * hw->lmac_per_cgx) + lmac;
+	} else if (pf == 0) {
+		/* For VFs of PF0 ingress is LBK port, so config LBK link */
+		link = hw->cgx_links;
+	}
+
+	if (link < 0)
+		return NIX_AF_ERR_RX_LINK_INVALID;
+
+	nix_find_link_frs(rvu, req, pcifunc);
+
+linkcfg:
+	cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
+	cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
+	if (req->update_minlen)
+		cfg = (cfg & ~0xFFFFULL) | req->minlen;
+	rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
+
+	if (req->sdp_link || pf == 0)
+		return 0;
+
+	/* Update transmit credits for CGX links */
+	lmac_fifo_len =
+		CGX_FIFO_LEN / cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
+	cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
+	cfg &= ~(0xFFFFFULL << 12);
+	cfg |=  ((lmac_fifo_len - req->maxlen) / 16) << 12;
+	rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
+	rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_EXPR_CREDIT(link), cfg);
+
+	return 0;
+}
+
+int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
+				      struct msg_rsp *rsp)
+{
+	struct npc_mcam_alloc_entry_req alloc_req = { };
+	struct npc_mcam_alloc_entry_rsp alloc_rsp = { };
+	struct npc_mcam_free_entry_req free_req = { };
+	u16 pcifunc = req->hdr.pcifunc;
+	int blkaddr, nixlf, err;
+	struct rvu_pfvf *pfvf;
+
+	/* LBK VFs do not have separate MCAM UCAST entry hence
+	 * skip allocating rxvlan for them
+	 */
+	if (is_afvf(pcifunc))
+		return 0;
+
+	pfvf = rvu_get_pfvf(rvu, pcifunc);
+	if (pfvf->rxvlan)
+		return 0;
+
+	/* alloc new mcam entry */
+	alloc_req.hdr.pcifunc = pcifunc;
+	alloc_req.count = 1;
+
+	err = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
+						    &alloc_rsp);
+	if (err)
+		return err;
+
+	/* update entry to enable rxvlan offload */
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+	if (blkaddr < 0) {
+		err = NIX_AF_ERR_AF_LF_INVALID;
+		goto free_entry;
+	}
+
+	nixlf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, 0);
+	if (nixlf < 0) {
+		err = NIX_AF_ERR_AF_LF_INVALID;
+		goto free_entry;
+	}
+
+	pfvf->rxvlan_index = alloc_rsp.entry_list[0];
+	/* all it means is that rxvlan_index is valid */
+	pfvf->rxvlan = true;
+
+	err = rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
+	if (err)
+		goto free_entry;
+
+	return 0;
+free_entry:
+	free_req.hdr.pcifunc = pcifunc;
+	free_req.entry = alloc_rsp.entry_list[0];
+	rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, rsp);
+	pfvf->rxvlan = false;
+	return err;
+}
+
+int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
+				    struct msg_rsp *rsp)
+{
+	struct rvu_hwinfo *hw = rvu->hw;
+	u16 pcifunc = req->hdr.pcifunc;
+	struct rvu_block *block;
+	struct rvu_pfvf *pfvf;
+	int nixlf, blkaddr;
+	u64 cfg;
+
+	pfvf = rvu_get_pfvf(rvu, pcifunc);
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+	if (!pfvf->nixlf || blkaddr < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	block = &hw->block[blkaddr];
+	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
+	if (nixlf < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
+	/* Set the interface configuration */
+	if (req->len_verify & BIT(0))
+		cfg |= BIT_ULL(41);
+	else
+		cfg &= ~BIT_ULL(41);
+
+	if (req->len_verify & BIT(1))
+		cfg |= BIT_ULL(40);
+	else
+		cfg &= ~BIT_ULL(40);
+
+	if (req->csum_verify & BIT(0))
+		cfg |= BIT_ULL(37);
+	else
+		cfg &= ~BIT_ULL(37);
+
+	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
+
+	return 0;
+}
+
+static void nix_link_config(struct rvu *rvu, int blkaddr)
+{
+	struct rvu_hwinfo *hw = rvu->hw;
+	int cgx, lmac_cnt, slink, link;
+	u64 tx_credits;
+
+	/* Set default min/max packet lengths allowed on NIX Rx links.
+	 *
+	 * With HW reset minlen value of 60byte, HW will treat ARP pkts
+	 * as undersize and report them to SW as error pkts, hence
+	 * setting it to 40 bytes.
+	 */
+	for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) {
+		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
+			    NIC_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
+	}
+
+	if (hw->sdp_links) {
+		link = hw->cgx_links + hw->lbk_links;
+		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
+			    SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
+	}
+
+	/* Set credits for Tx links assuming max packet length allowed.
+	 * This will be reconfigured based on MTU set for PF/VF.
+	 */
+	for (cgx = 0; cgx < hw->cgx; cgx++) {
+		lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
+		tx_credits = ((CGX_FIFO_LEN / lmac_cnt) - NIC_HW_MAX_FRS) / 16;
+		/* Enable credits and set credit pkt count to max allowed */
+		tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
+		slink = cgx * hw->lmac_per_cgx;
+		for (link = slink; link < (slink + lmac_cnt); link++) {
+			rvu_write64(rvu, blkaddr,
+				    NIX_AF_TX_LINKX_NORM_CREDIT(link),
+				    tx_credits);
+			rvu_write64(rvu, blkaddr,
+				    NIX_AF_TX_LINKX_EXPR_CREDIT(link),
+				    tx_credits);
+		}
+	}
+
+	/* Set Tx credits for LBK link */
+	slink = hw->cgx_links;
+	for (link = slink; link < (slink + hw->lbk_links); link++) {
+		tx_credits = 1000; /* 10 * max LBK datarate = 10 * 100Gbps */
+		/* Enable credits and set credit pkt count to max allowed */
+		tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
+		rvu_write64(rvu, blkaddr,
+			    NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
+		rvu_write64(rvu, blkaddr,
+			    NIX_AF_TX_LINKX_EXPR_CREDIT(link), tx_credits);
+	}
+}
+
+static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
+{
+	int idx, err;
+	u64 status;
+
+	/* Start X2P bus calibration */
+	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
+		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
+	/* Wait for calibration to complete */
+	err = rvu_poll_reg(rvu, blkaddr,
+			   NIX_AF_STATUS, BIT_ULL(10), false);
+	if (err) {
+		dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
+		return err;
+	}
+
+	status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
+	/* Check if CGX devices are ready */
+	for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
+		/* Skip when cgx port is not available */
+		if (!rvu_cgx_pdata(idx, rvu) ||
+		    (status & (BIT_ULL(16 + idx))))
+			continue;
+		dev_err(rvu->dev,
+			"CGX%d didn't respond to NIX X2P calibration\n", idx);
+		err = -EBUSY;
+	}
+
+	/* Check if LBK is ready */
+	if (!(status & BIT_ULL(19))) {
+		dev_err(rvu->dev,
+			"LBK didn't respond to NIX X2P calibration\n");
+		err = -EBUSY;
+	}
+
+	/* Clear 'calibrate_x2p' bit */
+	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
+		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
+	if (err || (status & 0x3FFULL))
+		dev_err(rvu->dev,
+			"NIX X2P calibration failed, status 0x%llx\n", status);
+	if (err)
+		return err;
+	return 0;
+}
+
+static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
+{
+	u64 cfg;
+	int err;
+
+	/* Set admin queue endianness */
+	cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
+#ifdef __BIG_ENDIAN
+	cfg |= BIT_ULL(8);
+	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
+#else
+	cfg &= ~BIT_ULL(8);
+	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
+#endif
+
+	/* Do not bypass NDC cache */
+	cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
+	cfg &= ~0x3FFEULL;
+	rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
+
+	/* Result structure can be followed by RQ/SQ/CQ context at
+	 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
+	 * operation type. Alloc sufficient result memory for all operations.
+	 */
+	err = rvu_aq_alloc(rvu, &block->aq,
+			   Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
+			   ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
+	if (err)
+		return err;
+
+	rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
+	rvu_write64(rvu, block->addr,
+		    NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
+	return 0;
+}
+
+int rvu_nix_init(struct rvu *rvu)
+{
+	struct rvu_hwinfo *hw = rvu->hw;
+	struct rvu_block *block;
+	int blkaddr, err;
+	u64 cfg;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+	if (blkaddr < 0)
+		return 0;
+	block = &hw->block[blkaddr];
+
+	/* As per a HW errata in 9xxx A0 silicon, NIX may corrupt
+	 * internal state when conditional clocks are turned off.
+	 * Hence enable them.
+	 */
+	if (is_rvu_9xxx_A0(rvu))
+		rvu_write64(rvu, blkaddr, NIX_AF_CFG,
+			    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x5EULL);
+
+	/* Calibrate X2P bus to check if CGX/LBK links are fine */
+	err = nix_calibrate_x2p(rvu, blkaddr);
+	if (err)
+		return err;
+
+	/* Set num of links of each type */
+	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
+	hw->cgx = (cfg >> 12) & 0xF;
+	hw->lmac_per_cgx = (cfg >> 8) & 0xF;
+	hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
+	hw->lbk_links = 1;
+	hw->sdp_links = 1;
+
+	/* Initialize admin queue */
+	err = nix_aq_init(rvu, block);
+	if (err)
+		return err;
+
+	/* Restore CINT timer delay to HW reset values */
+	rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
+
+	if (blkaddr == BLKADDR_NIX0) {
+		hw->nix0 = devm_kzalloc(rvu->dev,
+					sizeof(struct nix_hw), GFP_KERNEL);
+		if (!hw->nix0)
+			return -ENOMEM;
+
+		err = nix_setup_txschq(rvu, hw->nix0, blkaddr);
+		if (err)
+			return err;
+
+		err = nix_af_mark_format_setup(rvu, hw->nix0, blkaddr);
+		if (err)
+			return err;
+
+		err = nix_setup_mcast(rvu, hw->nix0, blkaddr);
+		if (err)
+			return err;
+
+		/* Configure segmentation offload formats */
+		nix_setup_lso(rvu, hw->nix0, blkaddr);
+
+		/* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
+		 * This helps HW protocol checker to identify headers
+		 * and validate length and checksums.
+		 */
+		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
+			    (NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F);
+		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
+			    (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
+		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
+			    (NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP << 4) | 0x0F);
+		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
+			    (NPC_LID_LC << 8) | (NPC_LT_LC_IP6 << 4) | 0x0F);
+		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
+			    (NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP6 << 4) | 0x0F);
+		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
+			    (NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F);
+		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
+			    (NPC_LID_LG << 8) | (NPC_LT_LG_TU_TCP << 4) | 0x0F);
+		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
+			    (NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F);
+		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
+			    (NPC_LID_LG << 8) | (NPC_LT_LG_TU_UDP << 4) | 0x0F);
+		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
+			    (NPC_LID_LD << 8) | (NPC_LT_LD_SCTP << 4) | 0x0F);
+		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
+			    (NPC_LID_LG << 8) | (NPC_LT_LG_TU_SCTP << 4) |
+			    0x0F);
+
+		err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
+		if (err)
+			return err;
+
+		/* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
+		nix_link_config(rvu, blkaddr);
+	}
+	return 0;
+}
+
+void rvu_nix_freemem(struct rvu *rvu)
+{
+	struct rvu_hwinfo *hw = rvu->hw;
+	struct rvu_block *block;
+	struct nix_txsch *txsch;
+	struct nix_mcast *mcast;
+	struct nix_hw *nix_hw;
+	int blkaddr, lvl;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+	if (blkaddr < 0)
+		return;
+
+	block = &hw->block[blkaddr];
+	rvu_aq_free(rvu, block->aq);
+
+	if (blkaddr == BLKADDR_NIX0) {
+		nix_hw = get_nix_hw(rvu->hw, blkaddr);
+		if (!nix_hw)
+			return;
+
+		for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+			txsch = &nix_hw->txsch[lvl];
+			kfree(txsch->schq.bmap);
+		}
+
+		mcast = &nix_hw->mcast;
+		qmem_free(rvu->dev, mcast->mce_ctx);
+		qmem_free(rvu->dev, mcast->mcast_buf);
+		mutex_destroy(&mcast->mce_lock);
+	}
+}
+
+static int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf)
+{
+	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+	struct rvu_hwinfo *hw = rvu->hw;
+	int blkaddr;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+	if (!pfvf->nixlf || blkaddr < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	*nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+	if (*nixlf < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	return 0;
+}
+
+int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
+				     struct msg_rsp *rsp)
+{
+	u16 pcifunc = req->hdr.pcifunc;
+	int nixlf, err;
+
+	err = nix_get_nixlf(rvu, pcifunc, &nixlf);
+	if (err)
+		return err;
+
+	rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
+	return 0;
+}
+
+int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
+				    struct msg_rsp *rsp)
+{
+	u16 pcifunc = req->hdr.pcifunc;
+	int nixlf, err;
+
+	err = nix_get_nixlf(rvu, pcifunc, &nixlf);
+	if (err)
+		return err;
+
+	rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
+	return 0;
+}
+
+void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
+{
+	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+	struct hwctx_disable_req ctx_req;
+	int err;
+
+	ctx_req.hdr.pcifunc = pcifunc;
+
+	/* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
+	nix_interface_deinit(rvu, pcifunc, nixlf);
+	nix_rx_sync(rvu, blkaddr);
+	nix_txschq_free(rvu, pcifunc);
+
+	if (pfvf->sq_ctx) {
+		ctx_req.ctype = NIX_AQ_CTYPE_SQ;
+		err = nix_lf_hwctx_disable(rvu, &ctx_req);
+		if (err)
+			dev_err(rvu->dev, "SQ ctx disable failed\n");
+	}
+
+	if (pfvf->rq_ctx) {
+		ctx_req.ctype = NIX_AQ_CTYPE_RQ;
+		err = nix_lf_hwctx_disable(rvu, &ctx_req);
+		if (err)
+			dev_err(rvu->dev, "RQ ctx disable failed\n");
+	}
+
+	if (pfvf->cq_ctx) {
+		ctx_req.ctype = NIX_AQ_CTYPE_CQ;
+		err = nix_lf_hwctx_disable(rvu, &ctx_req);
+		if (err)
+			dev_err(rvu->dev, "CQ ctx disable failed\n");
+	}
+
+	nix_ctx_free(rvu, pfvf);
+}
+
+int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
+					struct nix_lso_format_cfg *req,
+					struct nix_lso_format_cfg_rsp *rsp)
+{
+	u16 pcifunc = req->hdr.pcifunc;
+	struct nix_hw *nix_hw;
+	struct rvu_pfvf *pfvf;
+	int blkaddr, idx, f;
+	u64 reg;
+
+	pfvf = rvu_get_pfvf(rvu, pcifunc);
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+	if (!pfvf->nixlf || blkaddr < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	nix_hw = get_nix_hw(rvu->hw, blkaddr);
+	if (!nix_hw)
+		return -EINVAL;
+
+	/* Find existing matching LSO format, if any */
+	for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
+		for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
+			reg = rvu_read64(rvu, blkaddr,
+					 NIX_AF_LSO_FORMATX_FIELDX(idx, f));
+			if (req->fields[f] != (reg & req->field_mask))
+				break;
+		}
+
+		if (f == NIX_LSO_FIELD_MAX)
+			break;
+	}
+
+	if (idx < nix_hw->lso.in_use) {
+		/* Match found */
+		rsp->lso_format_idx = idx;
+		return 0;
+	}
+
+	if (nix_hw->lso.in_use == nix_hw->lso.total)
+		return NIX_AF_ERR_LSO_CFG_FAIL;
+
+	rsp->lso_format_idx = nix_hw->lso.in_use++;
+
+	for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
+		rvu_write64(rvu, blkaddr,
+			    NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
+			    req->fields[f]);
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
new file mode 100644
index 0000000..c0e165d
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
@@ -0,0 +1,489 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+
+static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
+			       struct npa_aq_inst_s *inst)
+{
+	struct admin_queue *aq = block->aq;
+	struct npa_aq_res_s *result;
+	int timeout = 1000;
+	u64 reg, head;
+
+	result = (struct npa_aq_res_s *)aq->res->base;
+
+	/* Get current head pointer where to append this instruction */
+	reg = rvu_read64(rvu, block->addr, NPA_AF_AQ_STATUS);
+	head = (reg >> 4) & AQ_PTR_MASK;
+
+	memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
+	       (void *)inst, aq->inst->entry_sz);
+	memset(result, 0, sizeof(*result));
+	/* sync into memory */
+	wmb();
+
+	/* Ring the doorbell and wait for result */
+	rvu_write64(rvu, block->addr, NPA_AF_AQ_DOOR, 1);
+	while (result->compcode == NPA_AQ_COMP_NOTDONE) {
+		cpu_relax();
+		udelay(1);
+		timeout--;
+		if (!timeout)
+			return -EBUSY;
+	}
+
+	if (result->compcode != NPA_AQ_COMP_GOOD)
+		/* TODO: Replace this with some error code */
+		return -EBUSY;
+
+	return 0;
+}
+
+static int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
+			       struct npa_aq_enq_rsp *rsp)
+{
+	struct rvu_hwinfo *hw = rvu->hw;
+	u16 pcifunc = req->hdr.pcifunc;
+	int blkaddr, npalf, rc = 0;
+	struct npa_aq_inst_s inst;
+	struct rvu_block *block;
+	struct admin_queue *aq;
+	struct rvu_pfvf *pfvf;
+	void *ctx, *mask;
+	bool ena;
+
+	pfvf = rvu_get_pfvf(rvu, pcifunc);
+	if (!pfvf->aura_ctx || req->aura_id >= pfvf->aura_ctx->qsize)
+		return NPA_AF_ERR_AQ_ENQUEUE;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
+	if (!pfvf->npalf || blkaddr < 0)
+		return NPA_AF_ERR_AF_LF_INVALID;
+
+	block = &hw->block[blkaddr];
+	aq = block->aq;
+	if (!aq) {
+		dev_warn(rvu->dev, "%s: NPA AQ not initialized\n", __func__);
+		return NPA_AF_ERR_AQ_ENQUEUE;
+	}
+
+	npalf = rvu_get_lf(rvu, block, pcifunc, 0);
+	if (npalf < 0)
+		return NPA_AF_ERR_AF_LF_INVALID;
+
+	memset(&inst, 0, sizeof(struct npa_aq_inst_s));
+	inst.cindex = req->aura_id;
+	inst.lf = npalf;
+	inst.ctype = req->ctype;
+	inst.op = req->op;
+	/* Currently we are not supporting enqueuing multiple instructions,
+	 * so always choose first entry in result memory.
+	 */
+	inst.res_addr = (u64)aq->res->iova;
+
+	/* Clean result + context memory */
+	memset(aq->res->base, 0, aq->res->entry_sz);
+	/* Context needs to be written at RES_ADDR + 128 */
+	ctx = aq->res->base + 128;
+	/* Mask needs to be written at RES_ADDR + 256 */
+	mask = aq->res->base + 256;
+
+	switch (req->op) {
+	case NPA_AQ_INSTOP_WRITE:
+		/* Copy context and write mask */
+		if (req->ctype == NPA_AQ_CTYPE_AURA) {
+			memcpy(mask, &req->aura_mask,
+			       sizeof(struct npa_aura_s));
+			memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
+		} else {
+			memcpy(mask, &req->pool_mask,
+			       sizeof(struct npa_pool_s));
+			memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
+		}
+		break;
+	case NPA_AQ_INSTOP_INIT:
+		if (req->ctype == NPA_AQ_CTYPE_AURA) {
+			if (req->aura.pool_addr >= pfvf->pool_ctx->qsize) {
+				rc = NPA_AF_ERR_AQ_FULL;
+				break;
+			}
+			/* Set pool's context address */
+			req->aura.pool_addr = pfvf->pool_ctx->iova +
+			(req->aura.pool_addr * pfvf->pool_ctx->entry_sz);
+			memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
+		} else { /* POOL's context */
+			memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
+		}
+		break;
+	case NPA_AQ_INSTOP_NOP:
+	case NPA_AQ_INSTOP_READ:
+	case NPA_AQ_INSTOP_LOCK:
+	case NPA_AQ_INSTOP_UNLOCK:
+		break;
+	default:
+		rc = NPA_AF_ERR_AQ_FULL;
+		break;
+	}
+
+	if (rc)
+		return rc;
+
+	spin_lock(&aq->lock);
+
+	/* Submit the instruction to AQ */
+	rc = npa_aq_enqueue_wait(rvu, block, &inst);
+	if (rc) {
+		spin_unlock(&aq->lock);
+		return rc;
+	}
+
+	/* Set aura bitmap if aura hw context is enabled */
+	if (req->ctype == NPA_AQ_CTYPE_AURA) {
+		if (req->op == NPA_AQ_INSTOP_INIT && req->aura.ena)
+			__set_bit(req->aura_id, pfvf->aura_bmap);
+		if (req->op == NPA_AQ_INSTOP_WRITE) {
+			ena = (req->aura.ena & req->aura_mask.ena) |
+				(test_bit(req->aura_id, pfvf->aura_bmap) &
+				~req->aura_mask.ena);
+			if (ena)
+				__set_bit(req->aura_id, pfvf->aura_bmap);
+			else
+				__clear_bit(req->aura_id, pfvf->aura_bmap);
+		}
+	}
+
+	/* Set pool bitmap if pool hw context is enabled */
+	if (req->ctype == NPA_AQ_CTYPE_POOL) {
+		if (req->op == NPA_AQ_INSTOP_INIT && req->pool.ena)
+			__set_bit(req->aura_id, pfvf->pool_bmap);
+		if (req->op == NPA_AQ_INSTOP_WRITE) {
+			ena = (req->pool.ena & req->pool_mask.ena) |
+				(test_bit(req->aura_id, pfvf->pool_bmap) &
+				~req->pool_mask.ena);
+			if (ena)
+				__set_bit(req->aura_id, pfvf->pool_bmap);
+			else
+				__clear_bit(req->aura_id, pfvf->pool_bmap);
+		}
+	}
+	spin_unlock(&aq->lock);
+
+	if (rsp) {
+		/* Copy read context into mailbox */
+		if (req->op == NPA_AQ_INSTOP_READ) {
+			if (req->ctype == NPA_AQ_CTYPE_AURA)
+				memcpy(&rsp->aura, ctx,
+				       sizeof(struct npa_aura_s));
+			else
+				memcpy(&rsp->pool, ctx,
+				       sizeof(struct npa_pool_s));
+		}
+	}
+
+	return 0;
+}
+
+static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
+{
+	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+	struct npa_aq_enq_req aq_req;
+	unsigned long *bmap;
+	int id, cnt = 0;
+	int err = 0, rc;
+
+	if (!pfvf->pool_ctx || !pfvf->aura_ctx)
+		return NPA_AF_ERR_AQ_ENQUEUE;
+
+	memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
+	aq_req.hdr.pcifunc = req->hdr.pcifunc;
+
+	if (req->ctype == NPA_AQ_CTYPE_POOL) {
+		aq_req.pool.ena = 0;
+		aq_req.pool_mask.ena = 1;
+		cnt = pfvf->pool_ctx->qsize;
+		bmap = pfvf->pool_bmap;
+	} else if (req->ctype == NPA_AQ_CTYPE_AURA) {
+		aq_req.aura.ena = 0;
+		aq_req.aura_mask.ena = 1;
+		cnt = pfvf->aura_ctx->qsize;
+		bmap = pfvf->aura_bmap;
+	}
+
+	aq_req.ctype = req->ctype;
+	aq_req.op = NPA_AQ_INSTOP_WRITE;
+
+	for (id = 0; id < cnt; id++) {
+		if (!test_bit(id, bmap))
+			continue;
+		aq_req.aura_id = id;
+		rc = rvu_npa_aq_enq_inst(rvu, &aq_req, NULL);
+		if (rc) {
+			err = rc;
+			dev_err(rvu->dev, "Failed to disable %s:%d context\n",
+				(req->ctype == NPA_AQ_CTYPE_AURA) ?
+				"Aura" : "Pool", id);
+		}
+	}
+
+	return err;
+}
+
+int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
+				struct npa_aq_enq_req *req,
+				struct npa_aq_enq_rsp *rsp)
+{
+	return rvu_npa_aq_enq_inst(rvu, req, rsp);
+}
+
+int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
+				       struct hwctx_disable_req *req,
+				       struct msg_rsp *rsp)
+{
+	return npa_lf_hwctx_disable(rvu, req);
+}
+
+static void npa_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
+{
+	kfree(pfvf->aura_bmap);
+	pfvf->aura_bmap = NULL;
+
+	qmem_free(rvu->dev, pfvf->aura_ctx);
+	pfvf->aura_ctx = NULL;
+
+	kfree(pfvf->pool_bmap);
+	pfvf->pool_bmap = NULL;
+
+	qmem_free(rvu->dev, pfvf->pool_ctx);
+	pfvf->pool_ctx = NULL;
+
+	qmem_free(rvu->dev, pfvf->npa_qints_ctx);
+	pfvf->npa_qints_ctx = NULL;
+}
+
+int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
+				  struct npa_lf_alloc_req *req,
+				  struct npa_lf_alloc_rsp *rsp)
+{
+	int npalf, qints, hwctx_size, err, rc = 0;
+	struct rvu_hwinfo *hw = rvu->hw;
+	u16 pcifunc = req->hdr.pcifunc;
+	struct rvu_block *block;
+	struct rvu_pfvf *pfvf;
+	u64 cfg, ctx_cfg;
+	int blkaddr;
+
+	if (req->aura_sz > NPA_AURA_SZ_MAX ||
+	    req->aura_sz == NPA_AURA_SZ_0 || !req->nr_pools)
+		return NPA_AF_ERR_PARAM;
+
+	pfvf = rvu_get_pfvf(rvu, pcifunc);
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
+	if (!pfvf->npalf || blkaddr < 0)
+		return NPA_AF_ERR_AF_LF_INVALID;
+
+	block = &hw->block[blkaddr];
+	npalf = rvu_get_lf(rvu, block, pcifunc, 0);
+	if (npalf < 0)
+		return NPA_AF_ERR_AF_LF_INVALID;
+
+	/* Reset this NPA LF */
+	err = rvu_lf_reset(rvu, block, npalf);
+	if (err) {
+		dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf);
+		return NPA_AF_ERR_LF_RESET;
+	}
+
+	ctx_cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST1);
+
+	/* Alloc memory for aura HW contexts */
+	hwctx_size = 1UL << (ctx_cfg & 0xF);
+	err = qmem_alloc(rvu->dev, &pfvf->aura_ctx,
+			 NPA_AURA_COUNT(req->aura_sz), hwctx_size);
+	if (err)
+		goto free_mem;
+
+	pfvf->aura_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
+				  GFP_KERNEL);
+	if (!pfvf->aura_bmap)
+		goto free_mem;
+
+	/* Alloc memory for pool HW contexts */
+	hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
+	err = qmem_alloc(rvu->dev, &pfvf->pool_ctx, req->nr_pools, hwctx_size);
+	if (err)
+		goto free_mem;
+
+	pfvf->pool_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
+				  GFP_KERNEL);
+	if (!pfvf->pool_bmap)
+		goto free_mem;
+
+	/* Get no of queue interrupts supported */
+	cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
+	qints = (cfg >> 28) & 0xFFF;
+
+	/* Alloc memory for Qints HW contexts */
+	hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
+	err = qmem_alloc(rvu->dev, &pfvf->npa_qints_ctx, qints, hwctx_size);
+	if (err)
+		goto free_mem;
+
+	cfg = rvu_read64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf));
+	/* Clear way partition mask and set aura offset to '0' */
+	cfg &= ~(BIT_ULL(34) - 1);
+	/* Set aura size & enable caching of contexts */
+	cfg |= (req->aura_sz << 16) | BIT_ULL(34);
+	rvu_write64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf), cfg);
+
+	/* Configure aura HW context's base */
+	rvu_write64(rvu, blkaddr, NPA_AF_LFX_LOC_AURAS_BASE(npalf),
+		    (u64)pfvf->aura_ctx->iova);
+
+	/* Enable caching of qints hw context */
+	rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf), BIT_ULL(36));
+	rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_BASE(npalf),
+		    (u64)pfvf->npa_qints_ctx->iova);
+
+	goto exit;
+
+free_mem:
+	npa_ctx_free(rvu, pfvf);
+	rc = -ENOMEM;
+
+exit:
+	/* set stack page info */
+	cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
+	rsp->stack_pg_ptrs = (cfg >> 8) & 0xFF;
+	rsp->stack_pg_bytes = cfg & 0xFF;
+	rsp->qints = (cfg >> 28) & 0xFFF;
+	return rc;
+}
+
+int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req,
+				 struct msg_rsp *rsp)
+{
+	struct rvu_hwinfo *hw = rvu->hw;
+	u16 pcifunc = req->hdr.pcifunc;
+	struct rvu_block *block;
+	struct rvu_pfvf *pfvf;
+	int npalf, err;
+	int blkaddr;
+
+	pfvf = rvu_get_pfvf(rvu, pcifunc);
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
+	if (!pfvf->npalf || blkaddr < 0)
+		return NPA_AF_ERR_AF_LF_INVALID;
+
+	block = &hw->block[blkaddr];
+	npalf = rvu_get_lf(rvu, block, pcifunc, 0);
+	if (npalf < 0)
+		return NPA_AF_ERR_AF_LF_INVALID;
+
+	/* Reset this NPA LF */
+	err = rvu_lf_reset(rvu, block, npalf);
+	if (err) {
+		dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf);
+		return NPA_AF_ERR_LF_RESET;
+	}
+
+	npa_ctx_free(rvu, pfvf);
+
+	return 0;
+}
+
+static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
+{
+	u64 cfg;
+	int err;
+
+	/* Set admin queue endianness */
+	cfg = rvu_read64(rvu, block->addr, NPA_AF_GEN_CFG);
+#ifdef __BIG_ENDIAN
+	cfg |= BIT_ULL(1);
+	rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
+#else
+	cfg &= ~BIT_ULL(1);
+	rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
+#endif
+
+	/* Do not bypass NDC cache */
+	cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG);
+	cfg &= ~0x03DULL;
+	rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg);
+
+	/* Result structure can be followed by Aura/Pool context at
+	 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
+	 * operation type. Alloc sufficient result memory for all operations.
+	 */
+	err = rvu_aq_alloc(rvu, &block->aq,
+			   Q_COUNT(AQ_SIZE), sizeof(struct npa_aq_inst_s),
+			   ALIGN(sizeof(struct npa_aq_res_s), 128) + 256);
+	if (err)
+		return err;
+
+	rvu_write64(rvu, block->addr, NPA_AF_AQ_CFG, AQ_SIZE);
+	rvu_write64(rvu, block->addr,
+		    NPA_AF_AQ_BASE, (u64)block->aq->inst->iova);
+	return 0;
+}
+
+int rvu_npa_init(struct rvu *rvu)
+{
+	struct rvu_hwinfo *hw = rvu->hw;
+	int blkaddr, err;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+	if (blkaddr < 0)
+		return 0;
+
+	/* Initialize admin queue */
+	err = npa_aq_init(rvu, &hw->block[blkaddr]);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+void rvu_npa_freemem(struct rvu *rvu)
+{
+	struct rvu_hwinfo *hw = rvu->hw;
+	struct rvu_block *block;
+	int blkaddr;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+	if (blkaddr < 0)
+		return;
+
+	block = &hw->block[blkaddr];
+	rvu_aq_free(rvu, block->aq);
+}
+
+void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf)
+{
+	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+	struct hwctx_disable_req ctx_req;
+
+	/* Disable all pools */
+	ctx_req.hdr.pcifunc = pcifunc;
+	ctx_req.ctype = NPA_AQ_CTYPE_POOL;
+	npa_lf_hwctx_disable(rvu, &ctx_req);
+
+	/* Disable all auras */
+	ctx_req.ctype = NPA_AQ_CTYPE_AURA;
+	npa_lf_hwctx_disable(rvu, &ctx_req);
+
+	npa_ctx_free(rvu, pfvf);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
new file mode 100644
index 0000000..15f7027
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -0,0 +1,2212 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+#include "npc.h"
+#include "cgx.h"
+#include "npc_profile.h"
+
+#define RSVD_MCAM_ENTRIES_PER_PF	2 /* Bcast & Promisc */
+#define RSVD_MCAM_ENTRIES_PER_NIXLF	1 /* Ucast for LFs */
+
+#define NIXLF_UCAST_ENTRY	0
+#define NIXLF_BCAST_ENTRY	1
+#define NIXLF_PROMISC_ENTRY	2
+
+#define NPC_PARSE_RESULT_DMAC_OFFSET	8
+
+static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam,
+				      int blkaddr, u16 pcifunc);
+static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam,
+				       u16 pcifunc);
+
+void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf)
+{
+	int blkaddr;
+	u64 val = 0;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+	if (blkaddr < 0)
+		return;
+
+	/* Config CPI base for the PKIND */
+	val = pkind | 1ULL << 62;
+	rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_CPI_DEFX(pkind, 0), val);
+}
+
+int rvu_npc_get_pkind(struct rvu *rvu, u16 pf)
+{
+	struct npc_pkind *pkind = &rvu->hw->pkind;
+	u32 map;
+	int i;
+
+	for (i = 0; i < pkind->rsrc.max; i++) {
+		map = pkind->pfchan_map[i];
+		if (((map >> 16) & 0x3F) == pf)
+			return i;
+	}
+	return -1;
+}
+
+static int npc_get_nixlf_mcam_index(struct npc_mcam *mcam,
+				    u16 pcifunc, int nixlf, int type)
+{
+	int pf = rvu_get_pf(pcifunc);
+	int index;
+
+	/* Check if this is for a PF */
+	if (pf && !(pcifunc & RVU_PFVF_FUNC_MASK)) {
+		/* Reserved entries exclude PF0 */
+		pf--;
+		index = mcam->pf_offset + (pf * RSVD_MCAM_ENTRIES_PER_PF);
+		/* Broadcast address matching entry should be first so
+		 * that the packet can be replicated to all VFs.
+		 */
+		if (type == NIXLF_BCAST_ENTRY)
+			return index;
+		else if (type == NIXLF_PROMISC_ENTRY)
+			return index + 1;
+	}
+
+	return (mcam->nixlf_offset + (nixlf * RSVD_MCAM_ENTRIES_PER_NIXLF));
+}
+
+static int npc_get_bank(struct npc_mcam *mcam, int index)
+{
+	int bank = index / mcam->banksize;
+
+	/* 0,1 & 2,3 banks are combined for this keysize */
+	if (mcam->keysize == NPC_MCAM_KEY_X2)
+		return bank ? 2 : 0;
+
+	return bank;
+}
+
+static bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam,
+				  int blkaddr, int index)
+{
+	int bank = npc_get_bank(mcam, index);
+	u64 cfg;
+
+	index &= (mcam->banksize - 1);
+	cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(index, bank));
+	return (cfg & 1);
+}
+
+static void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+				  int blkaddr, int index, bool enable)
+{
+	int bank = npc_get_bank(mcam, index);
+	int actbank = bank;
+
+	index &= (mcam->banksize - 1);
+	for (; bank < (actbank + mcam->banks_per_entry); bank++) {
+		rvu_write64(rvu, blkaddr,
+			    NPC_AF_MCAMEX_BANKX_CFG(index, bank),
+			    enable ? 1 : 0);
+	}
+}
+
+static void npc_get_keyword(struct mcam_entry *entry, int idx,
+			    u64 *cam0, u64 *cam1)
+{
+	u64 kw_mask = 0x00;
+
+#define CAM_MASK(n)	(BIT_ULL(n) - 1)
+
+	/* 0, 2, 4, 6 indices refer to BANKX_CAMX_W0 and
+	 * 1, 3, 5, 7 indices refer to BANKX_CAMX_W1.
+	 *
+	 * Also, only 48 bits of BANKX_CAMX_W1 are valid.
+	 */
+	switch (idx) {
+	case 0:
+		/* BANK(X)_CAM_W0<63:0> = MCAM_KEY[KW0]<63:0> */
+		*cam1 = entry->kw[0];
+		kw_mask = entry->kw_mask[0];
+		break;
+	case 1:
+		/* BANK(X)_CAM_W1<47:0> = MCAM_KEY[KW1]<47:0> */
+		*cam1 = entry->kw[1] & CAM_MASK(48);
+		kw_mask = entry->kw_mask[1] & CAM_MASK(48);
+		break;
+	case 2:
+		/* BANK(X + 1)_CAM_W0<15:0> = MCAM_KEY[KW1]<63:48>
+		 * BANK(X + 1)_CAM_W0<63:16> = MCAM_KEY[KW2]<47:0>
+		 */
+		*cam1 = (entry->kw[1] >> 48) & CAM_MASK(16);
+		*cam1 |= ((entry->kw[2] & CAM_MASK(48)) << 16);
+		kw_mask = (entry->kw_mask[1] >> 48) & CAM_MASK(16);
+		kw_mask |= ((entry->kw_mask[2] & CAM_MASK(48)) << 16);
+		break;
+	case 3:
+		/* BANK(X + 1)_CAM_W1<15:0> = MCAM_KEY[KW2]<63:48>
+		 * BANK(X + 1)_CAM_W1<47:16> = MCAM_KEY[KW3]<31:0>
+		 */
+		*cam1 = (entry->kw[2] >> 48) & CAM_MASK(16);
+		*cam1 |= ((entry->kw[3] & CAM_MASK(32)) << 16);
+		kw_mask = (entry->kw_mask[2] >> 48) & CAM_MASK(16);
+		kw_mask |= ((entry->kw_mask[3] & CAM_MASK(32)) << 16);
+		break;
+	case 4:
+		/* BANK(X + 2)_CAM_W0<31:0> = MCAM_KEY[KW3]<63:32>
+		 * BANK(X + 2)_CAM_W0<63:32> = MCAM_KEY[KW4]<31:0>
+		 */
+		*cam1 = (entry->kw[3] >> 32) & CAM_MASK(32);
+		*cam1 |= ((entry->kw[4] & CAM_MASK(32)) << 32);
+		kw_mask = (entry->kw_mask[3] >> 32) & CAM_MASK(32);
+		kw_mask |= ((entry->kw_mask[4] & CAM_MASK(32)) << 32);
+		break;
+	case 5:
+		/* BANK(X + 2)_CAM_W1<31:0> = MCAM_KEY[KW4]<63:32>
+		 * BANK(X + 2)_CAM_W1<47:32> = MCAM_KEY[KW5]<15:0>
+		 */
+		*cam1 = (entry->kw[4] >> 32) & CAM_MASK(32);
+		*cam1 |= ((entry->kw[5] & CAM_MASK(16)) << 32);
+		kw_mask = (entry->kw_mask[4] >> 32) & CAM_MASK(32);
+		kw_mask |= ((entry->kw_mask[5] & CAM_MASK(16)) << 32);
+		break;
+	case 6:
+		/* BANK(X + 3)_CAM_W0<47:0> = MCAM_KEY[KW5]<63:16>
+		 * BANK(X + 3)_CAM_W0<63:48> = MCAM_KEY[KW6]<15:0>
+		 */
+		*cam1 = (entry->kw[5] >> 16) & CAM_MASK(48);
+		*cam1 |= ((entry->kw[6] & CAM_MASK(16)) << 48);
+		kw_mask = (entry->kw_mask[5] >> 16) & CAM_MASK(48);
+		kw_mask |= ((entry->kw_mask[6] & CAM_MASK(16)) << 48);
+		break;
+	case 7:
+		/* BANK(X + 3)_CAM_W1<47:0> = MCAM_KEY[KW6]<63:16> */
+		*cam1 = (entry->kw[6] >> 16) & CAM_MASK(48);
+		kw_mask = (entry->kw_mask[6] >> 16) & CAM_MASK(48);
+		break;
+	}
+
+	*cam1 &= kw_mask;
+	*cam0 = ~*cam1 & kw_mask;
+}
+
+static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+				  int blkaddr, int index, u8 intf,
+				  struct mcam_entry *entry, bool enable)
+{
+	int bank = npc_get_bank(mcam, index);
+	int kw = 0, actbank, actindex;
+	u64 cam0, cam1;
+
+	actbank = bank; /* Save bank id, to set action later on */
+	actindex = index;
+	index &= (mcam->banksize - 1);
+
+	/* CAM1 takes the comparison value and
+	 * CAM0 specifies match for a bit in key being '0' or '1' or 'dontcare'.
+	 * CAM1<n> = 0 & CAM0<n> = 1 => match if key<n> = 0
+	 * CAM1<n> = 1 & CAM0<n> = 0 => match if key<n> = 1
+	 * CAM1<n> = 0 & CAM0<n> = 0 => always match i.e dontcare.
+	 */
+	for (; bank < (actbank + mcam->banks_per_entry); bank++, kw = kw + 2) {
+		/* Interface should be set in all banks */
+		rvu_write64(rvu, blkaddr,
+			    NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1),
+			    intf);
+		rvu_write64(rvu, blkaddr,
+			    NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0),
+			    ~intf & 0x3);
+
+		/* Set the match key */
+		npc_get_keyword(entry, kw, &cam0, &cam1);
+		rvu_write64(rvu, blkaddr,
+			    NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 1), cam1);
+		rvu_write64(rvu, blkaddr,
+			    NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 0), cam0);
+
+		npc_get_keyword(entry, kw + 1, &cam0, &cam1);
+		rvu_write64(rvu, blkaddr,
+			    NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 1), cam1);
+		rvu_write64(rvu, blkaddr,
+			    NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), cam0);
+	}
+
+	/* Set 'action' */
+	rvu_write64(rvu, blkaddr,
+		    NPC_AF_MCAMEX_BANKX_ACTION(index, actbank), entry->action);
+
+	/* Set TAG 'action' */
+	rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_TAG_ACT(index, actbank),
+		    entry->vtag_action);
+
+	/* Enable the entry */
+	if (enable)
+		npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, true);
+	else
+		npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, false);
+}
+
+static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+				int blkaddr, u16 src, u16 dest)
+{
+	int dbank = npc_get_bank(mcam, dest);
+	int sbank = npc_get_bank(mcam, src);
+	u64 cfg, sreg, dreg;
+	int bank, i;
+
+	src &= (mcam->banksize - 1);
+	dest &= (mcam->banksize - 1);
+
+	/* Copy INTF's, W0's, W1's CAM0 and CAM1 configuration */
+	for (bank = 0; bank < mcam->banks_per_entry; bank++) {
+		sreg = NPC_AF_MCAMEX_BANKX_CAMX_INTF(src, sbank + bank, 0);
+		dreg = NPC_AF_MCAMEX_BANKX_CAMX_INTF(dest, dbank + bank, 0);
+		for (i = 0; i < 6; i++) {
+			cfg = rvu_read64(rvu, blkaddr, sreg + (i * 8));
+			rvu_write64(rvu, blkaddr, dreg + (i * 8), cfg);
+		}
+	}
+
+	/* Copy action */
+	cfg = rvu_read64(rvu, blkaddr,
+			 NPC_AF_MCAMEX_BANKX_ACTION(src, sbank));
+	rvu_write64(rvu, blkaddr,
+		    NPC_AF_MCAMEX_BANKX_ACTION(dest, dbank), cfg);
+
+	/* Copy TAG action */
+	cfg = rvu_read64(rvu, blkaddr,
+			 NPC_AF_MCAMEX_BANKX_TAG_ACT(src, sbank));
+	rvu_write64(rvu, blkaddr,
+		    NPC_AF_MCAMEX_BANKX_TAG_ACT(dest, dbank), cfg);
+
+	/* Enable or disable */
+	cfg = rvu_read64(rvu, blkaddr,
+			 NPC_AF_MCAMEX_BANKX_CFG(src, sbank));
+	rvu_write64(rvu, blkaddr,
+		    NPC_AF_MCAMEX_BANKX_CFG(dest, dbank), cfg);
+}
+
+static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
+			       int blkaddr, int index)
+{
+	int bank = npc_get_bank(mcam, index);
+
+	index &= (mcam->banksize - 1);
+	return rvu_read64(rvu, blkaddr,
+			  NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
+}
+
+void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
+				 int nixlf, u64 chan, u8 *mac_addr)
+{
+	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+	struct mcam_entry entry = { {0} };
+	struct nix_rx_action action;
+	int blkaddr, index, kwi;
+	u64 mac = 0;
+
+	/* AF's VFs work in promiscuous mode */
+	if (is_afvf(pcifunc))
+		return;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+	if (blkaddr < 0)
+		return;
+
+	for (index = ETH_ALEN - 1; index >= 0; index--)
+		mac |= ((u64)*mac_addr++) << (8 * index);
+
+	index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+					 nixlf, NIXLF_UCAST_ENTRY);
+
+	/* Match ingress channel and DMAC */
+	entry.kw[0] = chan;
+	entry.kw_mask[0] = 0xFFFULL;
+
+	kwi = NPC_PARSE_RESULT_DMAC_OFFSET / sizeof(u64);
+	entry.kw[kwi] = mac;
+	entry.kw_mask[kwi] = BIT_ULL(48) - 1;
+
+	/* Don't change the action if entry is already enabled
+	 * Otherwise RSS action may get overwritten.
+	 */
+	if (is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) {
+		*(u64 *)&action = npc_get_mcam_action(rvu, mcam,
+						      blkaddr, index);
+	} else {
+		*(u64 *)&action = 0x00;
+		action.op = NIX_RX_ACTIONOP_UCAST;
+		action.pf_func = pcifunc;
+	}
+
+	entry.action = *(u64 *)&action;
+	npc_config_mcam_entry(rvu, mcam, blkaddr, index,
+			      NIX_INTF_RX, &entry, true);
+
+	/* add VLAN matching, setup action and save entry back for later */
+	entry.kw[0] |= (NPC_LT_LB_STAG | NPC_LT_LB_CTAG) << 20;
+	entry.kw_mask[0] |= (NPC_LT_LB_STAG & NPC_LT_LB_CTAG) << 20;
+
+	entry.vtag_action = VTAG0_VALID_BIT |
+			    FIELD_PREP(VTAG0_TYPE_MASK, 0) |
+			    FIELD_PREP(VTAG0_LID_MASK, NPC_LID_LA) |
+			    FIELD_PREP(VTAG0_RELPTR_MASK, 12);
+
+	memcpy(&pfvf->entry, &entry, sizeof(entry));
+}
+
+void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
+				   int nixlf, u64 chan, bool allmulti)
+{
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+	int blkaddr, ucast_idx, index, kwi;
+	struct mcam_entry entry = { {0} };
+	struct nix_rx_action action = { };
+
+	/* Only PF or AF VF can add a promiscuous entry */
+	if ((pcifunc & RVU_PFVF_FUNC_MASK) && !is_afvf(pcifunc))
+		return;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+	if (blkaddr < 0)
+		return;
+
+	index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+					 nixlf, NIXLF_PROMISC_ENTRY);
+
+	entry.kw[0] = chan;
+	entry.kw_mask[0] = 0xFFFULL;
+
+	if (allmulti) {
+		kwi = NPC_PARSE_RESULT_DMAC_OFFSET / sizeof(u64);
+		entry.kw[kwi] = BIT_ULL(40); /* LSB bit of 1st byte in DMAC */
+		entry.kw_mask[kwi] = BIT_ULL(40);
+	}
+
+	ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc,
+					     nixlf, NIXLF_UCAST_ENTRY);
+
+	/* If the corresponding PF's ucast action is RSS,
+	 * use the same action for promisc also
+	 */
+	if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx))
+		*(u64 *)&action = npc_get_mcam_action(rvu, mcam,
+							blkaddr, ucast_idx);
+
+	if (action.op != NIX_RX_ACTIONOP_RSS) {
+		*(u64 *)&action = 0x00;
+		action.op = NIX_RX_ACTIONOP_UCAST;
+		action.pf_func = pcifunc;
+	}
+
+	entry.action = *(u64 *)&action;
+	npc_config_mcam_entry(rvu, mcam, blkaddr, index,
+			      NIX_INTF_RX, &entry, true);
+}
+
+static void npc_enadis_promisc_entry(struct rvu *rvu, u16 pcifunc,
+				     int nixlf, bool enable)
+{
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+	int blkaddr, index;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+	if (blkaddr < 0)
+		return;
+
+	/* Only PF's have a promiscuous entry */
+	if (pcifunc & RVU_PFVF_FUNC_MASK)
+		return;
+
+	index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+					 nixlf, NIXLF_PROMISC_ENTRY);
+	npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+}
+
+void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+	npc_enadis_promisc_entry(rvu, pcifunc, nixlf, false);
+}
+
+void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+	npc_enadis_promisc_entry(rvu, pcifunc, nixlf, true);
+}
+
+void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
+				       int nixlf, u64 chan)
+{
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+	struct mcam_entry entry = { {0} };
+	struct nix_rx_action action;
+#ifdef MCAST_MCE
+	struct rvu_pfvf *pfvf;
+#endif
+	int blkaddr, index;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+	if (blkaddr < 0)
+		return;
+
+	/* Only PF can add a bcast match entry */
+	if (pcifunc & RVU_PFVF_FUNC_MASK)
+		return;
+#ifdef MCAST_MCE
+	pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
+#endif
+
+	index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+					 nixlf, NIXLF_BCAST_ENTRY);
+
+	/* Check for L2B bit and LMAC channel
+	 * NOTE: Since MKEX default profile(a reduced version intended to
+	 * accommodate more capability but igoring few bits) a stap-gap
+	 * approach.
+	 * Since we care for L2B which by HRM NPC_PARSE_KEX_S at BIT_POS[25], So
+	 * moved to BIT_POS[13], ignoring ERRCODE, ERRLEV as we'll loose out
+	 * on capability features needed for CoS (/from ODP PoV) e.g: VLAN,
+	 * DSCP.
+	 *
+	 * Reduced layout of MKEX default profile -
+	 * Includes following are (i.e.CHAN, L2/3{B/M}, LA, LB, LC, LD):
+	 *
+	 * BIT_POS[31:28] : LD
+	 * BIT_POS[27:24] : LC
+	 * BIT_POS[23:20] : LB
+	 * BIT_POS[19:16] : LA
+	 * BIT_POS[15:12] : L3B, L3M, L2B, L2M
+	 * BIT_POS[11:00] : CHAN
+	 *
+	 */
+	entry.kw[0] = BIT_ULL(13) | chan;
+	entry.kw_mask[0] = BIT_ULL(13) | 0xFFFULL;
+
+	*(u64 *)&action = 0x00;
+#ifdef MCAST_MCE
+	/* Early silicon doesn't support pkt replication,
+	 * so install entry with UCAST action, so that PF
+	 * receives all broadcast packets.
+	 */
+	action.op = NIX_RX_ACTIONOP_MCAST;
+	action.pf_func = pcifunc;
+	action.index = pfvf->bcast_mce_idx;
+#else
+	action.op = NIX_RX_ACTIONOP_UCAST;
+	action.pf_func = pcifunc;
+#endif
+
+	entry.action = *(u64 *)&action;
+	npc_config_mcam_entry(rvu, mcam, blkaddr, index,
+			      NIX_INTF_RX, &entry, true);
+}
+
+void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
+				    int group, int alg_idx, int mcam_index)
+{
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+	struct nix_rx_action action;
+	int blkaddr, index, bank;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+	if (blkaddr < 0)
+		return;
+
+	/* Check if this is for reserved default entry */
+	if (mcam_index < 0) {
+		if (group != DEFAULT_RSS_CONTEXT_GROUP)
+			return;
+		index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+						 nixlf, NIXLF_UCAST_ENTRY);
+	} else {
+		/* TODO: validate this mcam index */
+		index = mcam_index;
+	}
+
+	if (index >= mcam->total_entries)
+		return;
+
+	bank = npc_get_bank(mcam, index);
+	index &= (mcam->banksize - 1);
+
+	*(u64 *)&action = rvu_read64(rvu, blkaddr,
+				     NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
+	/* Ignore if no action was set earlier */
+	if (!*(u64 *)&action)
+		return;
+
+	action.op = NIX_RX_ACTIONOP_RSS;
+	action.pf_func = pcifunc;
+	action.index = group;
+	action.flow_key_alg = alg_idx;
+
+	rvu_write64(rvu, blkaddr,
+		    NPC_AF_MCAMEX_BANKX_ACTION(index, bank), *(u64 *)&action);
+
+	index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+					 nixlf, NIXLF_PROMISC_ENTRY);
+
+	/* If PF's promiscuous entry is enabled,
+	 * Set RSS action for that entry as well
+	 */
+	if (is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) {
+		bank = npc_get_bank(mcam, index);
+		index &= (mcam->banksize - 1);
+
+		rvu_write64(rvu, blkaddr,
+			    NPC_AF_MCAMEX_BANKX_ACTION(index, bank),
+			    *(u64 *)&action);
+	}
+
+	rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
+}
+
+static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
+				       int nixlf, bool enable)
+{
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+	struct nix_rx_action action;
+	int index, bank, blkaddr;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+	if (blkaddr < 0)
+		return;
+
+	/* Ucast MCAM match entry of this PF/VF */
+	index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+					 nixlf, NIXLF_UCAST_ENTRY);
+	npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+
+	/* For PF, ena/dis promisc and bcast MCAM match entries */
+	if (pcifunc & RVU_PFVF_FUNC_MASK)
+		return;
+
+	/* For bcast, enable/disable only if it's action is not
+	 * packet replication, incase if action is replication
+	 * then this PF's nixlf is removed from bcast replication
+	 * list.
+	 */
+	index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+					 nixlf, NIXLF_BCAST_ENTRY);
+	bank = npc_get_bank(mcam, index);
+	*(u64 *)&action = rvu_read64(rvu, blkaddr,
+	     NPC_AF_MCAMEX_BANKX_ACTION(index & (mcam->banksize - 1), bank));
+	if (action.op != NIX_RX_ACTIONOP_MCAST)
+		npc_enable_mcam_entry(rvu, mcam,
+				      blkaddr, index, enable);
+	if (enable)
+		rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf);
+	else
+		rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
+
+	rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
+}
+
+void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+	npc_enadis_default_entries(rvu, pcifunc, nixlf, false);
+}
+
+void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+	npc_enadis_default_entries(rvu, pcifunc, nixlf, true);
+}
+
+void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+	int blkaddr;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+	if (blkaddr < 0)
+		return;
+
+	mutex_lock(&mcam->lock);
+
+	/* Disable and free all MCAM entries mapped to this 'pcifunc' */
+	npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc);
+
+	/* Free all MCAM counters mapped to this 'pcifunc' */
+	npc_mcam_free_all_counters(rvu, mcam, pcifunc);
+
+	mutex_unlock(&mcam->lock);
+
+	rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
+}
+
+#define SET_KEX_LD(intf, lid, ltype, ld, cfg)	\
+	rvu_write64(rvu, blkaddr,			\
+		NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, ltype, ld), cfg)
+
+#define SET_KEX_LDFLAGS(intf, ld, flags, cfg)	\
+	rvu_write64(rvu, blkaddr,			\
+		NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg)
+
+#define KEX_LD_CFG(bytesm1, hdr_ofs, ena, flags_ena, key_ofs)		\
+			(((bytesm1) << 16) | ((hdr_ofs) << 8) | ((ena) << 7) | \
+			 ((flags_ena) << 6) | ((key_ofs) & 0x3F))
+
+static void npc_config_ldata_extract(struct rvu *rvu, int blkaddr)
+{
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+	int lid, ltype;
+	int lid_count;
+	u64 cfg;
+
+	cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
+	lid_count = (cfg >> 4) & 0xF;
+
+	/* First clear any existing config i.e
+	 * disable LDATA and FLAGS extraction.
+	 */
+	for (lid = 0; lid < lid_count; lid++) {
+		for (ltype = 0; ltype < 16; ltype++) {
+			SET_KEX_LD(NIX_INTF_RX, lid, ltype, 0, 0ULL);
+			SET_KEX_LD(NIX_INTF_RX, lid, ltype, 1, 0ULL);
+			SET_KEX_LD(NIX_INTF_TX, lid, ltype, 0, 0ULL);
+			SET_KEX_LD(NIX_INTF_TX, lid, ltype, 1, 0ULL);
+
+			SET_KEX_LDFLAGS(NIX_INTF_RX, 0, ltype, 0ULL);
+			SET_KEX_LDFLAGS(NIX_INTF_RX, 1, ltype, 0ULL);
+			SET_KEX_LDFLAGS(NIX_INTF_TX, 0, ltype, 0ULL);
+			SET_KEX_LDFLAGS(NIX_INTF_TX, 1, ltype, 0ULL);
+		}
+	}
+
+	if (mcam->keysize != NPC_MCAM_KEY_X2)
+		return;
+
+	/* Default MCAM KEX profile */
+	/* Layer A: Ethernet: */
+
+	/* DMAC: 6 bytes, KW1[47:0] */
+	cfg = KEX_LD_CFG(0x05, 0x0, 0x1, 0x0, NPC_PARSE_RESULT_DMAC_OFFSET);
+	SET_KEX_LD(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 0, cfg);
+
+	/* Ethertype: 2 bytes, KW0[47:32] */
+	cfg = KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x4);
+	SET_KEX_LD(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 1, cfg);
+
+	/* Layer B: Single VLAN (CTAG) */
+	/* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
+	cfg = KEX_LD_CFG(0x03, 0x0, 0x1, 0x0, 0x4);
+	SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_CTAG, 0, cfg);
+
+	/* Layer B: Stacked VLAN (STAG|QinQ) */
+	/* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
+	cfg = KEX_LD_CFG(0x03, 0x4, 0x1, 0x0, 0x4);
+	SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_STAG, 0, cfg);
+	SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_QINQ, 0, cfg);
+
+	/* Layer C: IPv4 */
+	/* SIP+DIP: 8 bytes, KW2[63:0] */
+	cfg = KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10);
+	SET_KEX_LD(NIX_INTF_RX, NPC_LID_LC, NPC_LT_LC_IP, 0, cfg);
+	/* TOS: 1 byte, KW1[63:56] */
+	cfg = KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0xf);
+	SET_KEX_LD(NIX_INTF_RX, NPC_LID_LC, NPC_LT_LC_IP, 1, cfg);
+
+	/* Layer D:UDP */
+	/* SPORT: 2 bytes, KW3[15:0] */
+	cfg = KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18);
+	SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_UDP, 0, cfg);
+	/* DPORT: 2 bytes, KW3[31:16] */
+	cfg = KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a);
+	SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_UDP, 1, cfg);
+
+	/* Layer D:TCP */
+	/* SPORT: 2 bytes, KW3[15:0] */
+	cfg = KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18);
+	SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_TCP, 0, cfg);
+	/* DPORT: 2 bytes, KW3[31:16] */
+	cfg = KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a);
+	SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_TCP, 1, cfg);
+}
+
+static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr,
+				     struct npc_mcam_kex *mkex)
+{
+	int lid, lt, ld, fl;
+
+	rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX),
+		    mkex->keyx_cfg[NIX_INTF_RX]);
+	rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX),
+		    mkex->keyx_cfg[NIX_INTF_TX]);
+
+	for (ld = 0; ld < NPC_MAX_LD; ld++)
+		rvu_write64(rvu, blkaddr, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld),
+			    mkex->kex_ld_flags[ld]);
+
+	for (lid = 0; lid < NPC_MAX_LID; lid++) {
+		for (lt = 0; lt < NPC_MAX_LT; lt++) {
+			for (ld = 0; ld < NPC_MAX_LD; ld++) {
+				SET_KEX_LD(NIX_INTF_RX, lid, lt, ld,
+					   mkex->intf_lid_lt_ld[NIX_INTF_RX]
+					   [lid][lt][ld]);
+
+				SET_KEX_LD(NIX_INTF_TX, lid, lt, ld,
+					   mkex->intf_lid_lt_ld[NIX_INTF_TX]
+					   [lid][lt][ld]);
+			}
+		}
+	}
+
+	for (ld = 0; ld < NPC_MAX_LD; ld++) {
+		for (fl = 0; fl < NPC_MAX_LFL; fl++) {
+			SET_KEX_LDFLAGS(NIX_INTF_RX, ld, fl,
+					mkex->intf_ld_flags[NIX_INTF_RX]
+					[ld][fl]);
+
+			SET_KEX_LDFLAGS(NIX_INTF_TX, ld, fl,
+					mkex->intf_ld_flags[NIX_INTF_TX]
+					[ld][fl]);
+		}
+	}
+}
+
+/* strtoull of "mkexprof" with base:36 */
+#define MKEX_SIGN      0x19bbfdbd15f
+#define MKEX_END_SIGN  0xdeadbeef
+
+static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr)
+{
+	const char *mkex_profile = rvu->mkex_pfl_name;
+	struct device *dev = &rvu->pdev->dev;
+	void __iomem *mkex_prfl_addr = NULL;
+	struct npc_mcam_kex *mcam_kex;
+	u64 prfl_addr;
+	u64 prfl_sz;
+
+	/* If user not selected mkex profile */
+	if (!strncmp(mkex_profile, "default", MKEX_NAME_LEN))
+		goto load_default;
+
+	if (cgx_get_mkex_prfl_info(&prfl_addr, &prfl_sz))
+		goto load_default;
+
+	if (!prfl_addr || !prfl_sz)
+		goto load_default;
+
+	mkex_prfl_addr = ioremap_wc(prfl_addr, prfl_sz);
+	if (!mkex_prfl_addr)
+		goto load_default;
+
+	mcam_kex = (struct npc_mcam_kex *)mkex_prfl_addr;
+
+	while (((s64)prfl_sz > 0) && (mcam_kex->mkex_sign != MKEX_END_SIGN)) {
+		/* Compare with mkex mod_param name string */
+		if (mcam_kex->mkex_sign == MKEX_SIGN &&
+		    !strncmp(mcam_kex->name, mkex_profile, MKEX_NAME_LEN)) {
+			/* Due to an errata (35786) in A0 pass silicon,
+			 * parse nibble enable configuration has to be
+			 * identical for both Rx and Tx interfaces.
+			 */
+			if (is_rvu_9xxx_A0(rvu) &&
+			    mcam_kex->keyx_cfg[NIX_INTF_RX] !=
+			    mcam_kex->keyx_cfg[NIX_INTF_TX])
+				goto load_default;
+
+			/* Program selected mkex profile */
+			npc_program_mkex_profile(rvu, blkaddr, mcam_kex);
+
+			goto unmap;
+		}
+
+		mcam_kex++;
+		prfl_sz -= sizeof(struct npc_mcam_kex);
+	}
+	dev_warn(dev, "Failed to load requested profile: %s\n",
+		 rvu->mkex_pfl_name);
+
+load_default:
+	dev_info(rvu->dev, "Using default mkex profile\n");
+	/* Config packet data and flags extraction into PARSE result */
+	npc_config_ldata_extract(rvu, blkaddr);
+
+unmap:
+	if (mkex_prfl_addr)
+		iounmap(mkex_prfl_addr);
+}
+
+static void npc_config_kpuaction(struct rvu *rvu, int blkaddr,
+				 struct npc_kpu_profile_action *kpuaction,
+				 int kpu, int entry, bool pkind)
+{
+	struct npc_kpu_action0 action0 = {0};
+	struct npc_kpu_action1 action1 = {0};
+	u64 reg;
+
+	action1.errlev = kpuaction->errlev;
+	action1.errcode = kpuaction->errcode;
+	action1.dp0_offset = kpuaction->dp0_offset;
+	action1.dp1_offset = kpuaction->dp1_offset;
+	action1.dp2_offset = kpuaction->dp2_offset;
+
+	if (pkind)
+		reg = NPC_AF_PKINDX_ACTION1(entry);
+	else
+		reg = NPC_AF_KPUX_ENTRYX_ACTION1(kpu, entry);
+
+	rvu_write64(rvu, blkaddr, reg, *(u64 *)&action1);
+
+	action0.byp_count = kpuaction->bypass_count;
+	action0.capture_ena = kpuaction->cap_ena;
+	action0.parse_done = kpuaction->parse_done;
+	action0.next_state = kpuaction->next_state;
+	action0.capture_lid = kpuaction->lid;
+	action0.capture_ltype = kpuaction->ltype;
+	action0.capture_flags = kpuaction->flags;
+	action0.ptr_advance = kpuaction->ptr_advance;
+	action0.var_len_offset = kpuaction->offset;
+	action0.var_len_mask = kpuaction->mask;
+	action0.var_len_right = kpuaction->right;
+	action0.var_len_shift = kpuaction->shift;
+
+	if (pkind)
+		reg = NPC_AF_PKINDX_ACTION0(entry);
+	else
+		reg = NPC_AF_KPUX_ENTRYX_ACTION0(kpu, entry);
+
+	rvu_write64(rvu, blkaddr, reg, *(u64 *)&action0);
+}
+
+static void npc_config_kpucam(struct rvu *rvu, int blkaddr,
+			      struct npc_kpu_profile_cam *kpucam,
+			      int kpu, int entry)
+{
+	struct npc_kpu_cam cam0 = {0};
+	struct npc_kpu_cam cam1 = {0};
+
+	cam1.state = kpucam->state & kpucam->state_mask;
+	cam1.dp0_data = kpucam->dp0 & kpucam->dp0_mask;
+	cam1.dp1_data = kpucam->dp1 & kpucam->dp1_mask;
+	cam1.dp2_data = kpucam->dp2 & kpucam->dp2_mask;
+
+	cam0.state = ~kpucam->state & kpucam->state_mask;
+	cam0.dp0_data = ~kpucam->dp0 & kpucam->dp0_mask;
+	cam0.dp1_data = ~kpucam->dp1 & kpucam->dp1_mask;
+	cam0.dp2_data = ~kpucam->dp2 & kpucam->dp2_mask;
+
+	rvu_write64(rvu, blkaddr,
+		    NPC_AF_KPUX_ENTRYX_CAMX(kpu, entry, 0), *(u64 *)&cam0);
+	rvu_write64(rvu, blkaddr,
+		    NPC_AF_KPUX_ENTRYX_CAMX(kpu, entry, 1), *(u64 *)&cam1);
+}
+
+static inline u64 enable_mask(int count)
+{
+	return (((count) < 64) ? ~(BIT_ULL(count) - 1) : (0x00ULL));
+}
+
+static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu,
+				    struct npc_kpu_profile *profile)
+{
+	int entry, num_entries, max_entries;
+
+	if (profile->cam_entries != profile->action_entries) {
+		dev_err(rvu->dev,
+			"KPU%d: CAM and action entries [%d != %d] not equal\n",
+			kpu, profile->cam_entries, profile->action_entries);
+	}
+
+	max_entries = rvu_read64(rvu, blkaddr, NPC_AF_CONST1) & 0xFFF;
+
+	/* Program CAM match entries for previous KPU extracted data */
+	num_entries = min_t(int, profile->cam_entries, max_entries);
+	for (entry = 0; entry < num_entries; entry++)
+		npc_config_kpucam(rvu, blkaddr,
+				  &profile->cam[entry], kpu, entry);
+
+	/* Program this KPU's actions */
+	num_entries = min_t(int, profile->action_entries, max_entries);
+	for (entry = 0; entry < num_entries; entry++)
+		npc_config_kpuaction(rvu, blkaddr, &profile->action[entry],
+				     kpu, entry, false);
+
+	/* Enable all programmed entries */
+	num_entries = min_t(int, profile->action_entries, profile->cam_entries);
+	rvu_write64(rvu, blkaddr,
+		    NPC_AF_KPUX_ENTRY_DISX(kpu, 0), enable_mask(num_entries));
+	if (num_entries > 64) {
+		rvu_write64(rvu, blkaddr,
+			    NPC_AF_KPUX_ENTRY_DISX(kpu, 1),
+			    enable_mask(num_entries - 64));
+	}
+
+	/* Enable this KPU */
+	rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(kpu), 0x01);
+}
+
+static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
+{
+	struct rvu_hwinfo *hw = rvu->hw;
+	int num_pkinds, num_kpus, idx;
+	struct npc_pkind *pkind;
+
+	/* Get HW limits */
+	hw->npc_kpus = (rvu_read64(rvu, blkaddr, NPC_AF_CONST) >> 8) & 0x1F;
+
+	/* Disable all KPUs and their entries */
+	for (idx = 0; idx < hw->npc_kpus; idx++) {
+		rvu_write64(rvu, blkaddr,
+			    NPC_AF_KPUX_ENTRY_DISX(idx, 0), ~0ULL);
+		rvu_write64(rvu, blkaddr,
+			    NPC_AF_KPUX_ENTRY_DISX(idx, 1), ~0ULL);
+		rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(idx), 0x00);
+	}
+
+	/* First program IKPU profile i.e PKIND configs.
+	 * Check HW max count to avoid configuring junk or
+	 * writing to unsupported CSR addresses.
+	 */
+	pkind = &hw->pkind;
+	num_pkinds = ARRAY_SIZE(ikpu_action_entries);
+	num_pkinds = min_t(int, pkind->rsrc.max, num_pkinds);
+
+	for (idx = 0; idx < num_pkinds; idx++)
+		npc_config_kpuaction(rvu, blkaddr,
+				     &ikpu_action_entries[idx], 0, idx, true);
+
+	/* Program KPU CAM and Action profiles */
+	num_kpus = ARRAY_SIZE(npc_kpu_profiles);
+	num_kpus = min_t(int, hw->npc_kpus, num_kpus);
+
+	for (idx = 0; idx < num_kpus; idx++)
+		npc_program_kpu_profile(rvu, blkaddr,
+					idx, &npc_kpu_profiles[idx]);
+}
+
+static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
+{
+	int nixlf_count = rvu_get_nixlf_count(rvu);
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+	int rsvd, err;
+	u64 cfg;
+
+	/* Get HW limits */
+	cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
+	mcam->banks = (cfg >> 44) & 0xF;
+	mcam->banksize = (cfg >> 28) & 0xFFFF;
+	mcam->counters.max = (cfg >> 48) & 0xFFFF;
+
+	/* Actual number of MCAM entries vary by entry size */
+	cfg = (rvu_read64(rvu, blkaddr,
+			  NPC_AF_INTFX_KEX_CFG(0)) >> 32) & 0x07;
+	mcam->total_entries = (mcam->banks / BIT_ULL(cfg)) * mcam->banksize;
+	mcam->keysize = cfg;
+
+	/* Number of banks combined per MCAM entry */
+	if (cfg == NPC_MCAM_KEY_X4)
+		mcam->banks_per_entry = 4;
+	else if (cfg == NPC_MCAM_KEY_X2)
+		mcam->banks_per_entry = 2;
+	else
+		mcam->banks_per_entry = 1;
+
+	/* Reserve one MCAM entry for each of the NIX LF to
+	 * guarantee space to install default matching DMAC rule.
+	 * Also reserve 2 MCAM entries for each PF for default
+	 * channel based matching or 'bcast & promisc' matching to
+	 * support BCAST and PROMISC modes of operation for PFs.
+	 * PF0 is excluded.
+	 */
+	rsvd = (nixlf_count * RSVD_MCAM_ENTRIES_PER_NIXLF) +
+		((rvu->hw->total_pfs - 1) * RSVD_MCAM_ENTRIES_PER_PF);
+	if (mcam->total_entries <= rsvd) {
+		dev_warn(rvu->dev,
+			 "Insufficient NPC MCAM size %d for pkt I/O, exiting\n",
+			 mcam->total_entries);
+		return -ENOMEM;
+	}
+
+	mcam->bmap_entries = mcam->total_entries - rsvd;
+	mcam->nixlf_offset = mcam->bmap_entries;
+	mcam->pf_offset = mcam->nixlf_offset + nixlf_count;
+
+	/* Allocate bitmaps for managing MCAM entries */
+	mcam->bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(mcam->bmap_entries),
+				  sizeof(long), GFP_KERNEL);
+	if (!mcam->bmap)
+		return -ENOMEM;
+
+	mcam->bmap_reverse = devm_kcalloc(rvu->dev,
+					  BITS_TO_LONGS(mcam->bmap_entries),
+					  sizeof(long), GFP_KERNEL);
+	if (!mcam->bmap_reverse)
+		return -ENOMEM;
+
+	mcam->bmap_fcnt = mcam->bmap_entries;
+
+	/* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
+	mcam->entry2pfvf_map = devm_kcalloc(rvu->dev, mcam->bmap_entries,
+					    sizeof(u16), GFP_KERNEL);
+	if (!mcam->entry2pfvf_map)
+		return -ENOMEM;
+
+	/* Reserve 1/8th of MCAM entries at the bottom for low priority
+	 * allocations and another 1/8th at the top for high priority
+	 * allocations.
+	 */
+	mcam->lprio_count = mcam->bmap_entries / 8;
+	if (mcam->lprio_count > BITS_PER_LONG)
+		mcam->lprio_count = round_down(mcam->lprio_count,
+					       BITS_PER_LONG);
+	mcam->lprio_start = mcam->bmap_entries - mcam->lprio_count;
+	mcam->hprio_count = mcam->lprio_count;
+	mcam->hprio_end = mcam->hprio_count;
+
+	/* Allocate bitmap for managing MCAM counters and memory
+	 * for saving counter to RVU PFFUNC allocation mapping.
+	 */
+	err = rvu_alloc_bitmap(&mcam->counters);
+	if (err)
+		return err;
+
+	mcam->cntr2pfvf_map = devm_kcalloc(rvu->dev, mcam->counters.max,
+					   sizeof(u16), GFP_KERNEL);
+	if (!mcam->cntr2pfvf_map)
+		goto free_mem;
+
+	/* Alloc memory for MCAM entry to counter mapping and for tracking
+	 * counter's reference count.
+	 */
+	mcam->entry2cntr_map = devm_kcalloc(rvu->dev, mcam->bmap_entries,
+					    sizeof(u16), GFP_KERNEL);
+	if (!mcam->entry2cntr_map)
+		goto free_mem;
+
+	mcam->cntr_refcnt = devm_kcalloc(rvu->dev, mcam->counters.max,
+					 sizeof(u16), GFP_KERNEL);
+	if (!mcam->cntr_refcnt)
+		goto free_mem;
+
+	mutex_init(&mcam->lock);
+
+	return 0;
+
+free_mem:
+	kfree(mcam->counters.bmap);
+	return -ENOMEM;
+}
+
+int rvu_npc_init(struct rvu *rvu)
+{
+	struct npc_pkind *pkind = &rvu->hw->pkind;
+	u64 keyz = NPC_MCAM_KEY_X2;
+	int blkaddr, entry, bank, err;
+	u64 cfg, nibble_ena;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+	if (blkaddr < 0) {
+		dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+		return -ENODEV;
+	}
+
+	/* First disable all MCAM entries, to stop traffic towards NIXLFs */
+	cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
+	for (bank = 0; bank < ((cfg >> 44) & 0xF); bank++) {
+		for (entry = 0; entry < ((cfg >> 28) & 0xFFFF); entry++)
+			rvu_write64(rvu, blkaddr,
+				    NPC_AF_MCAMEX_BANKX_CFG(entry, bank), 0);
+	}
+
+	/* Allocate resource bimap for pkind*/
+	pkind->rsrc.max = (rvu_read64(rvu, blkaddr,
+				      NPC_AF_CONST1) >> 12) & 0xFF;
+	err = rvu_alloc_bitmap(&pkind->rsrc);
+	if (err)
+		return err;
+
+	/* Allocate mem for pkind to PF and channel mapping info */
+	pkind->pfchan_map = devm_kcalloc(rvu->dev, pkind->rsrc.max,
+					 sizeof(u32), GFP_KERNEL);
+	if (!pkind->pfchan_map)
+		return -ENOMEM;
+
+	/* Configure KPU profile */
+	npc_parser_profile_init(rvu, blkaddr);
+
+	/* Config Outer L2, IPv4's NPC layer info */
+	rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OL2,
+		    (NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F);
+	rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OIP4,
+		    (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
+
+	/* Config Inner IPV4 NPC layer info */
+	rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_IIP4,
+		    (NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP << 4) | 0x0F);
+
+	/* Enable below for Rx pkts.
+	 * - Outer IPv4 header checksum validation.
+	 * - Detect outer L2 broadcast address and set NPC_RESULT_S[L2M].
+	 * - Inner IPv4 header checksum validation.
+	 * - Set non zero checksum error code value
+	 */
+	rvu_write64(rvu, blkaddr, NPC_AF_PCK_CFG,
+		    rvu_read64(rvu, blkaddr, NPC_AF_PCK_CFG) |
+		    BIT_ULL(32) | BIT_ULL(24) | BIT_ULL(6) |
+		    BIT_ULL(2) | BIT_ULL(1));
+
+	/* Set RX and TX side MCAM search key size.
+	 * LA..LD (ltype only) + Channel
+	 */
+	nibble_ena = 0x49247;
+	rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX),
+			((keyz & 0x3) << 32) | nibble_ena);
+	/* Due to an errata (35786) in A0 pass silicon, parse nibble enable
+	 * configuration has to be identical for both Rx and Tx interfaces.
+	 */
+	if (!is_rvu_9xxx_A0(rvu))
+		nibble_ena = (1ULL << 19) - 1;
+	rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX),
+			((keyz & 0x3) << 32) | nibble_ena);
+
+	err = npc_mcam_rsrcs_init(rvu, blkaddr);
+	if (err)
+		return err;
+
+	/* Configure MKEX profile */
+	npc_load_mkex_profile(rvu, blkaddr);
+
+	/* Set TX miss action to UCAST_DEFAULT i.e
+	 * transmit the packet on NIX LF SQ's default channel.
+	 */
+	rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_TX),
+		    NIX_TX_ACTIONOP_UCAST_DEFAULT);
+
+	/* If MCAM lookup doesn't result in a match, drop the received packet */
+	rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_RX),
+		    NIX_RX_ACTIONOP_DROP);
+
+	return 0;
+}
+
+void rvu_npc_freemem(struct rvu *rvu)
+{
+	struct npc_pkind *pkind = &rvu->hw->pkind;
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+
+	kfree(pkind->rsrc.bmap);
+	kfree(mcam->counters.bmap);
+	mutex_destroy(&mcam->lock);
+}
+
+static int npc_mcam_verify_entry(struct npc_mcam *mcam,
+				 u16 pcifunc, int entry)
+{
+	/* Verify if entry is valid and if it is indeed
+	 * allocated to the requesting PFFUNC.
+	 */
+	if (entry >= mcam->bmap_entries)
+		return NPC_MCAM_INVALID_REQ;
+
+	if (pcifunc != mcam->entry2pfvf_map[entry])
+		return NPC_MCAM_PERM_DENIED;
+
+	return 0;
+}
+
+static int npc_mcam_verify_counter(struct npc_mcam *mcam,
+				   u16 pcifunc, int cntr)
+{
+	/* Verify if counter is valid and if it is indeed
+	 * allocated to the requesting PFFUNC.
+	 */
+	if (cntr >= mcam->counters.max)
+		return NPC_MCAM_INVALID_REQ;
+
+	if (pcifunc != mcam->cntr2pfvf_map[cntr])
+		return NPC_MCAM_PERM_DENIED;
+
+	return 0;
+}
+
+static void npc_map_mcam_entry_and_cntr(struct rvu *rvu, struct npc_mcam *mcam,
+					int blkaddr, u16 entry, u16 cntr)
+{
+	u16 index = entry & (mcam->banksize - 1);
+	u16 bank = npc_get_bank(mcam, entry);
+
+	/* Set mapping and increment counter's refcnt */
+	mcam->entry2cntr_map[entry] = cntr;
+	mcam->cntr_refcnt[cntr]++;
+	/* Enable stats */
+	rvu_write64(rvu, blkaddr,
+		    NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank),
+		    BIT_ULL(9) | cntr);
+}
+
+static void npc_unmap_mcam_entry_and_cntr(struct rvu *rvu,
+					  struct npc_mcam *mcam,
+					  int blkaddr, u16 entry, u16 cntr)
+{
+	u16 index = entry & (mcam->banksize - 1);
+	u16 bank = npc_get_bank(mcam, entry);
+
+	/* Remove mapping and reduce counter's refcnt */
+	mcam->entry2cntr_map[entry] = NPC_MCAM_INVALID_MAP;
+	mcam->cntr_refcnt[cntr]--;
+	/* Disable stats */
+	rvu_write64(rvu, blkaddr,
+		    NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank), 0x00);
+}
+
+/* Sets MCAM entry in bitmap as used. Update
+ * reverse bitmap too. Should be called with
+ * 'mcam->lock' held.
+ */
+static void npc_mcam_set_bit(struct npc_mcam *mcam, u16 index)
+{
+	u16 entry, rentry;
+
+	entry = index;
+	rentry = mcam->bmap_entries - index - 1;
+
+	__set_bit(entry, mcam->bmap);
+	__set_bit(rentry, mcam->bmap_reverse);
+	mcam->bmap_fcnt--;
+}
+
+/* Sets MCAM entry in bitmap as free. Update
+ * reverse bitmap too. Should be called with
+ * 'mcam->lock' held.
+ */
+static void npc_mcam_clear_bit(struct npc_mcam *mcam, u16 index)
+{
+	u16 entry, rentry;
+
+	entry = index;
+	rentry = mcam->bmap_entries - index - 1;
+
+	__clear_bit(entry, mcam->bmap);
+	__clear_bit(rentry, mcam->bmap_reverse);
+	mcam->bmap_fcnt++;
+}
+
+static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam,
+				      int blkaddr, u16 pcifunc)
+{
+	u16 index, cntr;
+
+	/* Scan all MCAM entries and free the ones mapped to 'pcifunc' */
+	for (index = 0; index < mcam->bmap_entries; index++) {
+		if (mcam->entry2pfvf_map[index] == pcifunc) {
+			mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP;
+			/* Free the entry in bitmap */
+			npc_mcam_clear_bit(mcam, index);
+			/* Disable the entry */
+			npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false);
+
+			/* Update entry2counter mapping */
+			cntr = mcam->entry2cntr_map[index];
+			if (cntr != NPC_MCAM_INVALID_MAP)
+				npc_unmap_mcam_entry_and_cntr(rvu, mcam,
+							      blkaddr, index,
+							      cntr);
+		}
+	}
+}
+
+static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam,
+				       u16 pcifunc)
+{
+	u16 cntr;
+
+	/* Scan all MCAM counters and free the ones mapped to 'pcifunc' */
+	for (cntr = 0; cntr < mcam->counters.max; cntr++) {
+		if (mcam->cntr2pfvf_map[cntr] == pcifunc) {
+			mcam->cntr2pfvf_map[cntr] = NPC_MCAM_INVALID_MAP;
+			mcam->cntr_refcnt[cntr] = 0;
+			rvu_free_rsrc(&mcam->counters, cntr);
+			/* This API is expected to be called after freeing
+			 * MCAM entries, which inturn will remove
+			 * 'entry to counter' mapping.
+			 * No need to do it again.
+			 */
+		}
+	}
+}
+
+/* Find area of contiguous free entries of size 'nr'.
+ * If not found return max contiguous free entries available.
+ */
+static u16 npc_mcam_find_zero_area(unsigned long *map, u16 size, u16 start,
+				   u16 nr, u16 *max_area)
+{
+	u16 max_area_start = 0;
+	u16 index, next, end;
+
+	*max_area = 0;
+
+again:
+	index = find_next_zero_bit(map, size, start);
+	if (index >= size)
+		return max_area_start;
+
+	end = ((index + nr) >= size) ? size : index + nr;
+	next = find_next_bit(map, end, index);
+	if (*max_area < (next - index)) {
+		*max_area = next - index;
+		max_area_start = index;
+	}
+
+	if (next < end) {
+		start = next + 1;
+		goto again;
+	}
+
+	return max_area_start;
+}
+
+/* Find number of free MCAM entries available
+ * within range i.e in between 'start' and 'end'.
+ */
+static u16 npc_mcam_get_free_count(unsigned long *map, u16 start, u16 end)
+{
+	u16 index, next;
+	u16 fcnt = 0;
+
+again:
+	if (start >= end)
+		return fcnt;
+
+	index = find_next_zero_bit(map, end, start);
+	if (index >= end)
+		return fcnt;
+
+	next = find_next_bit(map, end, index);
+	if (next <= end) {
+		fcnt += next - index;
+		start = next + 1;
+		goto again;
+	}
+
+	fcnt += end - index;
+	return fcnt;
+}
+
+static void
+npc_get_mcam_search_range_priority(struct npc_mcam *mcam,
+				   struct npc_mcam_alloc_entry_req *req,
+				   u16 *start, u16 *end, bool *reverse)
+{
+	u16 fcnt;
+
+	if (req->priority == NPC_MCAM_HIGHER_PRIO)
+		goto hprio;
+
+	/* For a low priority entry allocation
+	 * - If reference entry is not in hprio zone then
+	 *      search range: ref_entry to end.
+	 * - If reference entry is in hprio zone and if
+	 *   request can be accomodated in non-hprio zone then
+	 *      search range: 'start of middle zone' to 'end'
+	 * - else search in reverse, so that less number of hprio
+	 *   zone entries are allocated.
+	 */
+
+	*reverse = false;
+	*start = req->ref_entry + 1;
+	*end = mcam->bmap_entries;
+
+	if (req->ref_entry >= mcam->hprio_end)
+		return;
+
+	fcnt = npc_mcam_get_free_count(mcam->bmap,
+				       mcam->hprio_end, mcam->bmap_entries);
+	if (fcnt > req->count)
+		*start = mcam->hprio_end;
+	else
+		*reverse = true;
+	return;
+
+hprio:
+	/* For a high priority entry allocation, search is always
+	 * in reverse to preserve hprio zone entries.
+	 * - If reference entry is not in lprio zone then
+	 *      search range: 0 to ref_entry.
+	 * - If reference entry is in lprio zone and if
+	 *   request can be accomodated in middle zone then
+	 *      search range: 'hprio_end' to 'lprio_start'
+	 */
+
+	*reverse = true;
+	*start = 0;
+	*end = req->ref_entry;
+
+	if (req->ref_entry <= mcam->lprio_start)
+		return;
+
+	fcnt = npc_mcam_get_free_count(mcam->bmap,
+				       mcam->hprio_end, mcam->lprio_start);
+	if (fcnt < req->count)
+		return;
+	*start = mcam->hprio_end;
+	*end = mcam->lprio_start;
+}
+
+static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
+				  struct npc_mcam_alloc_entry_req *req,
+				  struct npc_mcam_alloc_entry_rsp *rsp)
+{
+	u16 entry_list[NPC_MAX_NONCONTIG_ENTRIES];
+	u16 fcnt, hp_fcnt, lp_fcnt;
+	u16 start, end, index;
+	int entry, next_start;
+	bool reverse = false;
+	unsigned long *bmap;
+	u16 max_contig;
+
+	mutex_lock(&mcam->lock);
+
+	/* Check if there are any free entries */
+	if (!mcam->bmap_fcnt) {
+		mutex_unlock(&mcam->lock);
+		return NPC_MCAM_ALLOC_FAILED;
+	}
+
+	/* MCAM entries are divided into high priority, middle and
+	 * low priority zones. Idea is to not allocate top and lower
+	 * most entries as much as possible, this is to increase
+	 * probability of honouring priority allocation requests.
+	 *
+	 * Two bitmaps are used for mcam entry management,
+	 * mcam->bmap for forward search i.e '0 to mcam->bmap_entries'.
+	 * mcam->bmap_reverse for reverse search i.e 'mcam->bmap_entries to 0'.
+	 *
+	 * Reverse bitmap is used to allocate entries
+	 * - when a higher priority entry is requested
+	 * - when available free entries are less.
+	 * Lower priority ones out of avaialble free entries are always
+	 * chosen when 'high vs low' question arises.
+	 */
+
+	/* Get the search range for priority allocation request */
+	if (req->priority) {
+		npc_get_mcam_search_range_priority(mcam, req,
+						   &start, &end, &reverse);
+		goto alloc;
+	}
+
+	/* Find out the search range for non-priority allocation request
+	 *
+	 * Get MCAM free entry count in middle zone.
+	 */
+	lp_fcnt = npc_mcam_get_free_count(mcam->bmap,
+					  mcam->lprio_start,
+					  mcam->bmap_entries);
+	hp_fcnt = npc_mcam_get_free_count(mcam->bmap, 0, mcam->hprio_end);
+	fcnt = mcam->bmap_fcnt - lp_fcnt - hp_fcnt;
+
+	/* Check if request can be accomodated in the middle zone */
+	if (fcnt > req->count) {
+		start = mcam->hprio_end;
+		end = mcam->lprio_start;
+	} else if ((fcnt + (hp_fcnt / 2) + (lp_fcnt / 2)) > req->count) {
+		/* Expand search zone from half of hprio zone to
+		 * half of lprio zone.
+		 */
+		start = mcam->hprio_end / 2;
+		end = mcam->bmap_entries - (mcam->lprio_count / 2);
+		reverse = true;
+	} else {
+		/* Not enough free entries, search all entries in reverse,
+		 * so that low priority ones will get used up.
+		 */
+		reverse = true;
+		start = 0;
+		end = mcam->bmap_entries;
+	}
+
+alloc:
+	if (reverse) {
+		bmap = mcam->bmap_reverse;
+		start = mcam->bmap_entries - start;
+		end = mcam->bmap_entries - end;
+		index = start;
+		start = end;
+		end = index;
+	} else {
+		bmap = mcam->bmap;
+	}
+
+	if (req->contig) {
+		/* Allocate requested number of contiguous entries, if
+		 * unsuccessful find max contiguous entries available.
+		 */
+		index = npc_mcam_find_zero_area(bmap, end, start,
+						req->count, &max_contig);
+		rsp->count = max_contig;
+		if (reverse)
+			rsp->entry = mcam->bmap_entries - index - max_contig;
+		else
+			rsp->entry = index;
+	} else {
+		/* Allocate requested number of non-contiguous entries,
+		 * if unsuccessful allocate as many as possible.
+		 */
+		rsp->count = 0;
+		next_start = start;
+		for (entry = 0; entry < req->count; entry++) {
+			index = find_next_zero_bit(bmap, end, next_start);
+			if (index >= end)
+				break;
+
+			next_start = start + (index - start) + 1;
+
+			/* Save the entry's index */
+			if (reverse)
+				index = mcam->bmap_entries - index - 1;
+			entry_list[entry] = index;
+			rsp->count++;
+		}
+	}
+
+	/* If allocating requested no of entries is unsucessful,
+	 * expand the search range to full bitmap length and retry.
+	 */
+	if (!req->priority && (rsp->count < req->count) &&
+	    ((end - start) != mcam->bmap_entries)) {
+		reverse = true;
+		start = 0;
+		end = mcam->bmap_entries;
+		goto alloc;
+	}
+
+	/* For priority entry allocation requests, if allocation is
+	 * failed then expand search to max possible range and retry.
+	 */
+	if (req->priority && rsp->count < req->count) {
+		if (req->priority == NPC_MCAM_LOWER_PRIO &&
+		    (start != (req->ref_entry + 1))) {
+			start = req->ref_entry + 1;
+			end = mcam->bmap_entries;
+			reverse = false;
+			goto alloc;
+		} else if ((req->priority == NPC_MCAM_HIGHER_PRIO) &&
+			   ((end - start) != req->ref_entry)) {
+			start = 0;
+			end = req->ref_entry;
+			reverse = true;
+			goto alloc;
+		}
+	}
+
+	/* Copy MCAM entry indices into mbox response entry_list.
+	 * Requester always expects indices in ascending order, so
+	 * so reverse the list if reverse bitmap is used for allocation.
+	 */
+	if (!req->contig && rsp->count) {
+		index = 0;
+		for (entry = rsp->count - 1; entry >= 0; entry--) {
+			if (reverse)
+				rsp->entry_list[index++] = entry_list[entry];
+			else
+				rsp->entry_list[entry] = entry_list[entry];
+		}
+	}
+
+	/* Mark the allocated entries as used and set nixlf mapping */
+	for (entry = 0; entry < rsp->count; entry++) {
+		index = req->contig ?
+			(rsp->entry + entry) : rsp->entry_list[entry];
+		npc_mcam_set_bit(mcam, index);
+		mcam->entry2pfvf_map[index] = pcifunc;
+		mcam->entry2cntr_map[index] = NPC_MCAM_INVALID_MAP;
+	}
+
+	/* Update available free count in mbox response */
+	rsp->free_count = mcam->bmap_fcnt;
+
+	mutex_unlock(&mcam->lock);
+	return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
+					  struct npc_mcam_alloc_entry_req *req,
+					  struct npc_mcam_alloc_entry_rsp *rsp)
+{
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+	u16 pcifunc = req->hdr.pcifunc;
+	int blkaddr;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+	if (blkaddr < 0)
+		return NPC_MCAM_INVALID_REQ;
+
+	rsp->entry = NPC_MCAM_ENTRY_INVALID;
+	rsp->free_count = 0;
+
+	/* Check if ref_entry is within range */
+	if (req->priority && req->ref_entry >= mcam->bmap_entries)
+		return NPC_MCAM_INVALID_REQ;
+
+	/* ref_entry can't be '0' if requested priority is high.
+	 * Can't be last entry if requested priority is low.
+	 */
+	if ((!req->ref_entry && req->priority == NPC_MCAM_HIGHER_PRIO) ||
+	    ((req->ref_entry == (mcam->bmap_entries - 1)) &&
+	     req->priority == NPC_MCAM_LOWER_PRIO))
+		return NPC_MCAM_INVALID_REQ;
+
+	/* Since list of allocated indices needs to be sent to requester,
+	 * max number of non-contiguous entries per mbox msg is limited.
+	 */
+	if (!req->contig && req->count > NPC_MAX_NONCONTIG_ENTRIES)
+		return NPC_MCAM_INVALID_REQ;
+
+	/* Alloc request from PFFUNC with no NIXLF attached should be denied */
+	if (!is_nixlf_attached(rvu, pcifunc))
+		return NPC_MCAM_ALLOC_DENIED;
+
+	return npc_mcam_alloc_entries(mcam, pcifunc, req, rsp);
+}
+
+int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu,
+					 struct npc_mcam_free_entry_req *req,
+					 struct msg_rsp *rsp)
+{
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+	u16 pcifunc = req->hdr.pcifunc;
+	int blkaddr, rc = 0;
+	u16 cntr;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+	if (blkaddr < 0)
+		return NPC_MCAM_INVALID_REQ;
+
+	/* Free request from PFFUNC with no NIXLF attached, ignore */
+	if (!is_nixlf_attached(rvu, pcifunc))
+		return NPC_MCAM_INVALID_REQ;
+
+	mutex_lock(&mcam->lock);
+
+	if (req->all)
+		goto free_all;
+
+	rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
+	if (rc)
+		goto exit;
+
+	mcam->entry2pfvf_map[req->entry] = 0;
+	npc_mcam_clear_bit(mcam, req->entry);
+	npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false);
+
+	/* Update entry2counter mapping */
+	cntr = mcam->entry2cntr_map[req->entry];
+	if (cntr != NPC_MCAM_INVALID_MAP)
+		npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+					      req->entry, cntr);
+
+	goto exit;
+
+free_all:
+	/* Free up all entries allocated to requesting PFFUNC */
+	npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc);
+exit:
+	mutex_unlock(&mcam->lock);
+	return rc;
+}
+
+int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
+					  struct npc_mcam_write_entry_req *req,
+					  struct msg_rsp *rsp)
+{
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+	u16 pcifunc = req->hdr.pcifunc;
+	int blkaddr, rc;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+	if (blkaddr < 0)
+		return NPC_MCAM_INVALID_REQ;
+
+	mutex_lock(&mcam->lock);
+	rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
+	if (rc)
+		goto exit;
+
+	if (req->set_cntr &&
+	    npc_mcam_verify_counter(mcam, pcifunc, req->cntr)) {
+		rc = NPC_MCAM_INVALID_REQ;
+		goto exit;
+	}
+
+	if (req->intf != NIX_INTF_RX && req->intf != NIX_INTF_TX) {
+		rc = NPC_MCAM_INVALID_REQ;
+		goto exit;
+	}
+
+	npc_config_mcam_entry(rvu, mcam, blkaddr, req->entry, req->intf,
+			      &req->entry_data, req->enable_entry);
+
+	if (req->set_cntr)
+		npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+					    req->entry, req->cntr);
+
+	rc = 0;
+exit:
+	mutex_unlock(&mcam->lock);
+	return rc;
+}
+
+int rvu_mbox_handler_npc_mcam_ena_entry(struct rvu *rvu,
+					struct npc_mcam_ena_dis_entry_req *req,
+					struct msg_rsp *rsp)
+{
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+	u16 pcifunc = req->hdr.pcifunc;
+	int blkaddr, rc;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+	if (blkaddr < 0)
+		return NPC_MCAM_INVALID_REQ;
+
+	mutex_lock(&mcam->lock);
+	rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
+	mutex_unlock(&mcam->lock);
+	if (rc)
+		return rc;
+
+	npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, true);
+
+	return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_dis_entry(struct rvu *rvu,
+					struct npc_mcam_ena_dis_entry_req *req,
+					struct msg_rsp *rsp)
+{
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+	u16 pcifunc = req->hdr.pcifunc;
+	int blkaddr, rc;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+	if (blkaddr < 0)
+		return NPC_MCAM_INVALID_REQ;
+
+	mutex_lock(&mcam->lock);
+	rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
+	mutex_unlock(&mcam->lock);
+	if (rc)
+		return rc;
+
+	npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false);
+
+	return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu,
+					  struct npc_mcam_shift_entry_req *req,
+					  struct npc_mcam_shift_entry_rsp *rsp)
+{
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+	u16 pcifunc = req->hdr.pcifunc;
+	u16 old_entry, new_entry;
+	u16 index, cntr;
+	int blkaddr, rc;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+	if (blkaddr < 0)
+		return NPC_MCAM_INVALID_REQ;
+
+	if (req->shift_count > NPC_MCAM_MAX_SHIFTS)
+		return NPC_MCAM_INVALID_REQ;
+
+	mutex_lock(&mcam->lock);
+	for (index = 0; index < req->shift_count; index++) {
+		old_entry = req->curr_entry[index];
+		new_entry = req->new_entry[index];
+
+		/* Check if both old and new entries are valid and
+		 * does belong to this PFFUNC or not.
+		 */
+		rc = npc_mcam_verify_entry(mcam, pcifunc, old_entry);
+		if (rc)
+			break;
+
+		rc = npc_mcam_verify_entry(mcam, pcifunc, new_entry);
+		if (rc)
+			break;
+
+		/* new_entry should not have a counter mapped */
+		if (mcam->entry2cntr_map[new_entry] != NPC_MCAM_INVALID_MAP) {
+			rc = NPC_MCAM_PERM_DENIED;
+			break;
+		}
+
+		/* Disable the new_entry */
+		npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, false);
+
+		/* Copy rule from old entry to new entry */
+		npc_copy_mcam_entry(rvu, mcam, blkaddr, old_entry, new_entry);
+
+		/* Copy counter mapping, if any */
+		cntr = mcam->entry2cntr_map[old_entry];
+		if (cntr != NPC_MCAM_INVALID_MAP) {
+			npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+						      old_entry, cntr);
+			npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+						    new_entry, cntr);
+		}
+
+		/* Enable new_entry and disable old_entry */
+		npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, true);
+		npc_enable_mcam_entry(rvu, mcam, blkaddr, old_entry, false);
+	}
+
+	/* If shift has failed then report the failed index */
+	if (index != req->shift_count) {
+		rc = NPC_MCAM_PERM_DENIED;
+		rsp->failed_entry_idx = index;
+	}
+
+	mutex_unlock(&mcam->lock);
+	return rc;
+}
+
+int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
+			struct npc_mcam_alloc_counter_req *req,
+			struct npc_mcam_alloc_counter_rsp *rsp)
+{
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+	u16 pcifunc = req->hdr.pcifunc;
+	u16 max_contig, cntr;
+	int blkaddr, index;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+	if (blkaddr < 0)
+		return NPC_MCAM_INVALID_REQ;
+
+	/* If the request is from a PFFUNC with no NIXLF attached, ignore */
+	if (!is_nixlf_attached(rvu, pcifunc))
+		return NPC_MCAM_INVALID_REQ;
+
+	/* Since list of allocated counter IDs needs to be sent to requester,
+	 * max number of non-contiguous counters per mbox msg is limited.
+	 */
+	if (!req->contig && req->count > NPC_MAX_NONCONTIG_COUNTERS)
+		return NPC_MCAM_INVALID_REQ;
+
+	mutex_lock(&mcam->lock);
+
+	/* Check if unused counters are available or not */
+	if (!rvu_rsrc_free_count(&mcam->counters)) {
+		mutex_unlock(&mcam->lock);
+		return NPC_MCAM_ALLOC_FAILED;
+	}
+
+	rsp->count = 0;
+
+	if (req->contig) {
+		/* Allocate requested number of contiguous counters, if
+		 * unsuccessful find max contiguous entries available.
+		 */
+		index = npc_mcam_find_zero_area(mcam->counters.bmap,
+						mcam->counters.max, 0,
+						req->count, &max_contig);
+		rsp->count = max_contig;
+		rsp->cntr = index;
+		for (cntr = index; cntr < (index + max_contig); cntr++) {
+			__set_bit(cntr, mcam->counters.bmap);
+			mcam->cntr2pfvf_map[cntr] = pcifunc;
+		}
+	} else {
+		/* Allocate requested number of non-contiguous counters,
+		 * if unsuccessful allocate as many as possible.
+		 */
+		for (cntr = 0; cntr < req->count; cntr++) {
+			index = rvu_alloc_rsrc(&mcam->counters);
+			if (index < 0)
+				break;
+			rsp->cntr_list[cntr] = index;
+			rsp->count++;
+			mcam->cntr2pfvf_map[index] = pcifunc;
+		}
+	}
+
+	mutex_unlock(&mcam->lock);
+	return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
+		struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp)
+{
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+	u16 index, entry = 0;
+	int blkaddr, err;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+	if (blkaddr < 0)
+		return NPC_MCAM_INVALID_REQ;
+
+	mutex_lock(&mcam->lock);
+	err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr);
+	if (err) {
+		mutex_unlock(&mcam->lock);
+		return err;
+	}
+
+	/* Mark counter as free/unused */
+	mcam->cntr2pfvf_map[req->cntr] = NPC_MCAM_INVALID_MAP;
+	rvu_free_rsrc(&mcam->counters, req->cntr);
+
+	/* Disable all MCAM entry's stats which are using this counter */
+	while (entry < mcam->bmap_entries) {
+		if (!mcam->cntr_refcnt[req->cntr])
+			break;
+
+		index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry);
+		if (index >= mcam->bmap_entries)
+			break;
+		if (mcam->entry2cntr_map[index] != req->cntr)
+			continue;
+
+		entry = index + 1;
+		npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+					      index, req->cntr);
+	}
+
+	mutex_unlock(&mcam->lock);
+	return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu,
+		struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp)
+{
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+	u16 index, entry = 0;
+	int blkaddr, rc;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+	if (blkaddr < 0)
+		return NPC_MCAM_INVALID_REQ;
+
+	mutex_lock(&mcam->lock);
+	rc = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr);
+	if (rc)
+		goto exit;
+
+	/* Unmap the MCAM entry and counter */
+	if (!req->all) {
+		rc = npc_mcam_verify_entry(mcam, req->hdr.pcifunc, req->entry);
+		if (rc)
+			goto exit;
+		npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+					      req->entry, req->cntr);
+		goto exit;
+	}
+
+	/* Disable all MCAM entry's stats which are using this counter */
+	while (entry < mcam->bmap_entries) {
+		if (!mcam->cntr_refcnt[req->cntr])
+			break;
+
+		index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry);
+		if (index >= mcam->bmap_entries)
+			break;
+		if (mcam->entry2cntr_map[index] != req->cntr)
+			continue;
+
+		entry = index + 1;
+		npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+					      index, req->cntr);
+	}
+exit:
+	mutex_unlock(&mcam->lock);
+	return rc;
+}
+
+int rvu_mbox_handler_npc_mcam_clear_counter(struct rvu *rvu,
+		struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp)
+{
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+	int blkaddr, err;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+	if (blkaddr < 0)
+		return NPC_MCAM_INVALID_REQ;
+
+	mutex_lock(&mcam->lock);
+	err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr);
+	mutex_unlock(&mcam->lock);
+	if (err)
+		return err;
+
+	rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(req->cntr), 0x00);
+
+	return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_counter_stats(struct rvu *rvu,
+			struct npc_mcam_oper_counter_req *req,
+			struct npc_mcam_oper_counter_rsp *rsp)
+{
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+	int blkaddr, err;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+	if (blkaddr < 0)
+		return NPC_MCAM_INVALID_REQ;
+
+	mutex_lock(&mcam->lock);
+	err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr);
+	mutex_unlock(&mcam->lock);
+	if (err)
+		return err;
+
+	rsp->stat = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(req->cntr));
+	rsp->stat &= BIT_ULL(48) - 1;
+
+	return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
+			  struct npc_mcam_alloc_and_write_entry_req *req,
+			  struct npc_mcam_alloc_and_write_entry_rsp *rsp)
+{
+	struct npc_mcam_alloc_counter_req cntr_req;
+	struct npc_mcam_alloc_counter_rsp cntr_rsp;
+	struct npc_mcam_alloc_entry_req entry_req;
+	struct npc_mcam_alloc_entry_rsp entry_rsp;
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+	u16 entry = NPC_MCAM_ENTRY_INVALID;
+	u16 cntr = NPC_MCAM_ENTRY_INVALID;
+	int blkaddr, rc;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+	if (blkaddr < 0)
+		return NPC_MCAM_INVALID_REQ;
+
+	if (req->intf != NIX_INTF_RX && req->intf != NIX_INTF_TX)
+		return NPC_MCAM_INVALID_REQ;
+
+	/* Try to allocate a MCAM entry */
+	entry_req.hdr.pcifunc = req->hdr.pcifunc;
+	entry_req.contig = true;
+	entry_req.priority = req->priority;
+	entry_req.ref_entry = req->ref_entry;
+	entry_req.count = 1;
+
+	rc = rvu_mbox_handler_npc_mcam_alloc_entry(rvu,
+						   &entry_req, &entry_rsp);
+	if (rc)
+		return rc;
+
+	if (!entry_rsp.count)
+		return NPC_MCAM_ALLOC_FAILED;
+
+	entry = entry_rsp.entry;
+
+	if (!req->alloc_cntr)
+		goto write_entry;
+
+	/* Now allocate counter */
+	cntr_req.hdr.pcifunc = req->hdr.pcifunc;
+	cntr_req.contig = true;
+	cntr_req.count = 1;
+
+	rc = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp);
+	if (rc) {
+		/* Free allocated MCAM entry */
+		mutex_lock(&mcam->lock);
+		mcam->entry2pfvf_map[entry] = 0;
+		npc_mcam_clear_bit(mcam, entry);
+		mutex_unlock(&mcam->lock);
+		return rc;
+	}
+
+	cntr = cntr_rsp.cntr;
+
+write_entry:
+	mutex_lock(&mcam->lock);
+	npc_config_mcam_entry(rvu, mcam, blkaddr, entry, req->intf,
+			      &req->entry_data, req->enable_entry);
+
+	if (req->alloc_cntr)
+		npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, entry, cntr);
+	mutex_unlock(&mcam->lock);
+
+	rsp->entry = entry;
+	rsp->cntr = cntr;
+
+	return 0;
+}
+
+#define GET_KEX_CFG(intf) \
+	rvu_read64(rvu, BLKADDR_NPC, NPC_AF_INTFX_KEX_CFG(intf))
+
+#define GET_KEX_FLAGS(ld) \
+	rvu_read64(rvu, BLKADDR_NPC, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld))
+
+#define GET_KEX_LD(intf, lid, lt, ld)	\
+	rvu_read64(rvu, BLKADDR_NPC,	\
+		NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, lt, ld))
+
+#define GET_KEX_LDFLAGS(intf, ld, fl)	\
+	rvu_read64(rvu, BLKADDR_NPC,	\
+		NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, fl))
+
+int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req,
+				     struct npc_get_kex_cfg_rsp *rsp)
+{
+	int lid, lt, ld, fl;
+
+	rsp->rx_keyx_cfg = GET_KEX_CFG(NIX_INTF_RX);
+	rsp->tx_keyx_cfg = GET_KEX_CFG(NIX_INTF_TX);
+	for (lid = 0; lid < NPC_MAX_LID; lid++) {
+		for (lt = 0; lt < NPC_MAX_LT; lt++) {
+			for (ld = 0; ld < NPC_MAX_LD; ld++) {
+				rsp->intf_lid_lt_ld[NIX_INTF_RX][lid][lt][ld] =
+					GET_KEX_LD(NIX_INTF_RX, lid, lt, ld);
+				rsp->intf_lid_lt_ld[NIX_INTF_TX][lid][lt][ld] =
+					GET_KEX_LD(NIX_INTF_TX, lid, lt, ld);
+			}
+		}
+	}
+	for (ld = 0; ld < NPC_MAX_LD; ld++)
+		rsp->kex_ld_flags[ld] = GET_KEX_FLAGS(ld);
+
+	for (ld = 0; ld < NPC_MAX_LD; ld++) {
+		for (fl = 0; fl < NPC_MAX_LFL; fl++) {
+			rsp->intf_ld_flags[NIX_INTF_RX][ld][fl] =
+					GET_KEX_LDFLAGS(NIX_INTF_RX, ld, fl);
+			rsp->intf_ld_flags[NIX_INTF_TX][ld][fl] =
+					GET_KEX_LDFLAGS(NIX_INTF_TX, ld, fl);
+		}
+	}
+	memcpy(rsp->mkex_pfl_name, rvu->mkex_pfl_name, MKEX_NAME_LEN);
+	return 0;
+}
+
+int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+	struct npc_mcam *mcam = &rvu->hw->mcam;
+	int blkaddr, index;
+	bool enable;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+	if (blkaddr < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	if (!pfvf->rxvlan)
+		return 0;
+
+	index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf,
+					 NIXLF_UCAST_ENTRY);
+	pfvf->entry.action = npc_get_mcam_action(rvu, mcam, blkaddr, index);
+	enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, index);
+	npc_config_mcam_entry(rvu, mcam, blkaddr, pfvf->rxvlan_index,
+			      NIX_INTF_RX, &pfvf->entry, enable);
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
new file mode 100644
index 0000000..9d7c135
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu_struct.h"
+#include "common.h"
+#include "mbox.h"
+#include "rvu.h"
+
+struct reg_range {
+	u64  start;
+	u64  end;
+};
+
+struct hw_reg_map {
+	u8	regblk;
+	u8	num_ranges;
+	u64	mask;
+#define	 MAX_REG_RANGES	8
+	struct reg_range range[MAX_REG_RANGES];
+};
+
+static struct hw_reg_map txsch_reg_map[NIX_TXSCH_LVL_CNT] = {
+	{NIX_TXSCH_LVL_SMQ, 2, 0xFFFF, {{0x0700, 0x0708}, {0x1400, 0x14C8} } },
+	{NIX_TXSCH_LVL_TL4, 3, 0xFFFF, {{0x0B00, 0x0B08}, {0x0B10, 0x0B18},
+			      {0x1200, 0x12E0} } },
+	{NIX_TXSCH_LVL_TL3, 3, 0xFFFF, {{0x1000, 0x10E0}, {0x1600, 0x1608},
+			      {0x1610, 0x1618} } },
+	{NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x1768} } },
+	{NIX_TXSCH_LVL_TL1, 1, 0xFFFF, {{0x0C00, 0x0D98} } },
+};
+
+bool rvu_check_valid_reg(int regmap, int regblk, u64 reg)
+{
+	int idx;
+	struct hw_reg_map *map;
+
+	/* Only 64bit offsets */
+	if (reg & 0x07)
+		return false;
+
+	if (regmap == TXSCHQ_HWREGMAP) {
+		if (regblk >= NIX_TXSCH_LVL_CNT)
+			return false;
+		map = &txsch_reg_map[regblk];
+	} else {
+		return false;
+	}
+
+	/* Should never happen */
+	if (map->regblk != regblk)
+		return false;
+
+	reg &= map->mask;
+
+	for (idx = 0; idx < map->num_ranges; idx++) {
+		if (reg >= map->range[idx].start &&
+		    reg < map->range[idx].end)
+			return true;
+	}
+	return false;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
new file mode 100644
index 0000000..1ea92a2
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -0,0 +1,502 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*  Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef RVU_REG_H
+#define RVU_REG_H
+
+/* Admin function registers */
+#define RVU_AF_MSIXTR_BASE                  (0x10)
+#define RVU_AF_ECO                          (0x20)
+#define RVU_AF_BLK_RST                      (0x30)
+#define RVU_AF_PF_BAR4_ADDR                 (0x40)
+#define RVU_AF_RAS                          (0x100)
+#define RVU_AF_RAS_W1S                      (0x108)
+#define RVU_AF_RAS_ENA_W1S                  (0x110)
+#define RVU_AF_RAS_ENA_W1C                  (0x118)
+#define RVU_AF_GEN_INT                      (0x120)
+#define RVU_AF_GEN_INT_W1S                  (0x128)
+#define RVU_AF_GEN_INT_ENA_W1S              (0x130)
+#define RVU_AF_GEN_INT_ENA_W1C              (0x138)
+#define	RVU_AF_AFPF_MBOX0		    (0x02000)
+#define	RVU_AF_AFPF_MBOX1		    (0x02008)
+#define RVU_AF_AFPFX_MBOXX(a, b)            (0x2000 | (a) << 4 | (b) << 3)
+#define RVU_AF_PFME_STATUS                  (0x2800)
+#define RVU_AF_PFTRPEND                     (0x2810)
+#define RVU_AF_PFTRPEND_W1S                 (0x2820)
+#define RVU_AF_PF_RST                       (0x2840)
+#define RVU_AF_HWVF_RST                     (0x2850)
+#define RVU_AF_PFAF_MBOX_INT                (0x2880)
+#define RVU_AF_PFAF_MBOX_INT_W1S            (0x2888)
+#define RVU_AF_PFAF_MBOX_INT_ENA_W1S        (0x2890)
+#define RVU_AF_PFAF_MBOX_INT_ENA_W1C        (0x2898)
+#define RVU_AF_PFFLR_INT                    (0x28a0)
+#define RVU_AF_PFFLR_INT_W1S                (0x28a8)
+#define RVU_AF_PFFLR_INT_ENA_W1S            (0x28b0)
+#define RVU_AF_PFFLR_INT_ENA_W1C            (0x28b8)
+#define RVU_AF_PFME_INT                     (0x28c0)
+#define RVU_AF_PFME_INT_W1S                 (0x28c8)
+#define RVU_AF_PFME_INT_ENA_W1S             (0x28d0)
+#define RVU_AF_PFME_INT_ENA_W1C             (0x28d8)
+
+/* Admin function's privileged PF/VF registers */
+#define RVU_PRIV_CONST                      (0x8000000)
+#define RVU_PRIV_GEN_CFG                    (0x8000010)
+#define RVU_PRIV_CLK_CFG                    (0x8000020)
+#define RVU_PRIV_ACTIVE_PC                  (0x8000030)
+#define RVU_PRIV_PFX_CFG(a)                 (0x8000100 | (a) << 16)
+#define RVU_PRIV_PFX_MSIX_CFG(a)            (0x8000110 | (a) << 16)
+#define RVU_PRIV_PFX_ID_CFG(a)              (0x8000120 | (a) << 16)
+#define RVU_PRIV_PFX_INT_CFG(a)             (0x8000200 | (a) << 16)
+#define RVU_PRIV_PFX_NIX0_CFG               (0x8000300)
+#define RVU_PRIV_PFX_NPA_CFG		    (0x8000310)
+#define RVU_PRIV_PFX_SSO_CFG                (0x8000320)
+#define RVU_PRIV_PFX_SSOW_CFG               (0x8000330)
+#define RVU_PRIV_PFX_TIM_CFG                (0x8000340)
+#define RVU_PRIV_PFX_CPT0_CFG               (0x8000350)
+#define RVU_PRIV_BLOCK_TYPEX_REV(a)         (0x8000400 | (a) << 3)
+#define RVU_PRIV_HWVFX_INT_CFG(a)           (0x8001280 | (a) << 16)
+#define RVU_PRIV_HWVFX_NIX0_CFG             (0x8001300)
+#define RVU_PRIV_HWVFX_NPA_CFG              (0x8001310)
+#define RVU_PRIV_HWVFX_SSO_CFG              (0x8001320)
+#define RVU_PRIV_HWVFX_SSOW_CFG             (0x8001330)
+#define RVU_PRIV_HWVFX_TIM_CFG              (0x8001340)
+#define RVU_PRIV_HWVFX_CPT0_CFG             (0x8001350)
+
+/* RVU PF registers */
+#define	RVU_PF_VFX_PFVF_MBOX0		    (0x00000)
+#define	RVU_PF_VFX_PFVF_MBOX1		    (0x00008)
+#define RVU_PF_VFX_PFVF_MBOXX(a, b)         (0x0 | (a) << 12 | (b) << 3)
+#define RVU_PF_VF_BAR4_ADDR                 (0x10)
+#define RVU_PF_BLOCK_ADDRX_DISC(a)          (0x200 | (a) << 3)
+#define RVU_PF_VFME_STATUSX(a)              (0x800 | (a) << 3)
+#define RVU_PF_VFTRPENDX(a)                 (0x820 | (a) << 3)
+#define RVU_PF_VFTRPEND_W1SX(a)             (0x840 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INTX(a)            (0x880 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_W1SX(a)        (0x8A0 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_ENA_W1SX(a)    (0x8C0 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_ENA_W1CX(a)    (0x8E0 | (a) << 3)
+#define RVU_PF_VFFLR_INTX(a)                (0x900 | (a) << 3)
+#define RVU_PF_VFFLR_INT_W1SX(a)            (0x920 | (a) << 3)
+#define RVU_PF_VFFLR_INT_ENA_W1SX(a)        (0x940 | (a) << 3)
+#define RVU_PF_VFFLR_INT_ENA_W1CX(a)        (0x960 | (a) << 3)
+#define RVU_PF_VFME_INTX(a)                 (0x980 | (a) << 3)
+#define RVU_PF_VFME_INT_W1SX(a)             (0x9A0 | (a) << 3)
+#define RVU_PF_VFME_INT_ENA_W1SX(a)         (0x9C0 | (a) << 3)
+#define RVU_PF_VFME_INT_ENA_W1CX(a)         (0x9E0 | (a) << 3)
+#define RVU_PF_PFAF_MBOX0                   (0xC00)
+#define RVU_PF_PFAF_MBOX1                   (0xC08)
+#define RVU_PF_PFAF_MBOXX(a)                (0xC00 | (a) << 3)
+#define RVU_PF_INT                          (0xc20)
+#define RVU_PF_INT_W1S                      (0xc28)
+#define RVU_PF_INT_ENA_W1S                  (0xc30)
+#define RVU_PF_INT_ENA_W1C                  (0xc38)
+#define RVU_PF_MSIX_VECX_ADDR(a)            (0x000 | (a) << 4)
+#define RVU_PF_MSIX_VECX_CTL(a)             (0x008 | (a) << 4)
+#define RVU_PF_MSIX_PBAX(a)                 (0xF0000 | (a) << 3)
+
+/* RVU VF registers */
+#define	RVU_VF_VFPF_MBOX0		    (0x00000)
+#define	RVU_VF_VFPF_MBOX1		    (0x00008)
+
+/* NPA block's admin function registers */
+#define NPA_AF_BLK_RST                  (0x0000)
+#define NPA_AF_CONST                    (0x0010)
+#define NPA_AF_CONST1                   (0x0018)
+#define NPA_AF_LF_RST                   (0x0020)
+#define NPA_AF_GEN_CFG                  (0x0030)
+#define NPA_AF_NDC_CFG                  (0x0040)
+#define NPA_AF_INP_CTL                  (0x00D0)
+#define NPA_AF_ACTIVE_CYCLES_PC         (0x00F0)
+#define NPA_AF_AVG_DELAY                (0x0100)
+#define NPA_AF_GEN_INT                  (0x0140)
+#define NPA_AF_GEN_INT_W1S              (0x0148)
+#define NPA_AF_GEN_INT_ENA_W1S          (0x0150)
+#define NPA_AF_GEN_INT_ENA_W1C          (0x0158)
+#define NPA_AF_RVU_INT                  (0x0160)
+#define NPA_AF_RVU_INT_W1S              (0x0168)
+#define NPA_AF_RVU_INT_ENA_W1S          (0x0170)
+#define NPA_AF_RVU_INT_ENA_W1C          (0x0178)
+#define NPA_AF_ERR_INT			(0x0180)
+#define NPA_AF_ERR_INT_W1S		(0x0188)
+#define NPA_AF_ERR_INT_ENA_W1S		(0x0190)
+#define NPA_AF_ERR_INT_ENA_W1C		(0x0198)
+#define NPA_AF_RAS			(0x01A0)
+#define NPA_AF_RAS_W1S			(0x01A8)
+#define NPA_AF_RAS_ENA_W1S		(0x01B0)
+#define NPA_AF_RAS_ENA_W1C		(0x01B8)
+#define NPA_AF_BP_TEST                  (0x0200)
+#define NPA_AF_ECO                      (0x0300)
+#define NPA_AF_AQ_CFG                   (0x0600)
+#define NPA_AF_AQ_BASE                  (0x0610)
+#define NPA_AF_AQ_STATUS		(0x0620)
+#define NPA_AF_AQ_DOOR                  (0x0630)
+#define NPA_AF_AQ_DONE_WAIT             (0x0640)
+#define NPA_AF_AQ_DONE                  (0x0650)
+#define NPA_AF_AQ_DONE_ACK              (0x0660)
+#define NPA_AF_AQ_DONE_INT              (0x0680)
+#define NPA_AF_AQ_DONE_INT_W1S          (0x0688)
+#define NPA_AF_AQ_DONE_ENA_W1S          (0x0690)
+#define NPA_AF_AQ_DONE_ENA_W1C          (0x0698)
+#define NPA_AF_LFX_AURAS_CFG(a)         (0x4000 | (a) << 18)
+#define NPA_AF_LFX_LOC_AURAS_BASE(a)    (0x4010 | (a) << 18)
+#define NPA_AF_LFX_QINTS_CFG(a)         (0x4100 | (a) << 18)
+#define NPA_AF_LFX_QINTS_BASE(a)        (0x4110 | (a) << 18)
+#define NPA_PRIV_AF_INT_CFG             (0x10000)
+#define NPA_PRIV_LFX_CFG		(0x10010)
+#define NPA_PRIV_LFX_INT_CFG		(0x10020)
+#define NPA_AF_RVU_LF_CFG_DEBUG         (0x10030)
+
+/* NIX block's admin function registers */
+#define NIX_AF_CFG			(0x0000)
+#define NIX_AF_STATUS			(0x0010)
+#define NIX_AF_NDC_CFG			(0x0018)
+#define NIX_AF_CONST			(0x0020)
+#define NIX_AF_CONST1			(0x0028)
+#define NIX_AF_CONST2			(0x0030)
+#define NIX_AF_CONST3			(0x0038)
+#define NIX_AF_SQ_CONST			(0x0040)
+#define NIX_AF_CQ_CONST			(0x0048)
+#define NIX_AF_RQ_CONST			(0x0050)
+#define NIX_AF_PSE_CONST		(0x0060)
+#define NIX_AF_TL1_CONST		(0x0070)
+#define NIX_AF_TL2_CONST		(0x0078)
+#define NIX_AF_TL3_CONST		(0x0080)
+#define NIX_AF_TL4_CONST		(0x0088)
+#define NIX_AF_MDQ_CONST		(0x0090)
+#define NIX_AF_MC_MIRROR_CONST		(0x0098)
+#define NIX_AF_LSO_CFG			(0x00A8)
+#define NIX_AF_BLK_RST			(0x00B0)
+#define NIX_AF_TX_TSTMP_CFG		(0x00C0)
+#define NIX_AF_RX_CFG			(0x00D0)
+#define NIX_AF_AVG_DELAY		(0x00E0)
+#define NIX_AF_CINT_DELAY		(0x00F0)
+#define NIX_AF_RX_MCAST_BASE		(0x0100)
+#define NIX_AF_RX_MCAST_CFG		(0x0110)
+#define NIX_AF_RX_MCAST_BUF_BASE	(0x0120)
+#define NIX_AF_RX_MCAST_BUF_CFG		(0x0130)
+#define NIX_AF_RX_MIRROR_BUF_BASE	(0x0140)
+#define NIX_AF_RX_MIRROR_BUF_CFG	(0x0148)
+#define NIX_AF_LF_RST			(0x0150)
+#define NIX_AF_GEN_INT			(0x0160)
+#define NIX_AF_GEN_INT_W1S		(0x0168)
+#define NIX_AF_GEN_INT_ENA_W1S		(0x0170)
+#define NIX_AF_GEN_INT_ENA_W1C		(0x0178)
+#define NIX_AF_ERR_INT			(0x0180)
+#define NIX_AF_ERR_INT_W1S		(0x0188)
+#define NIX_AF_ERR_INT_ENA_W1S		(0x0190)
+#define NIX_AF_ERR_INT_ENA_W1C		(0x0198)
+#define NIX_AF_RAS			(0x01A0)
+#define NIX_AF_RAS_W1S			(0x01A8)
+#define NIX_AF_RAS_ENA_W1S		(0x01B0)
+#define NIX_AF_RAS_ENA_W1C		(0x01B8)
+#define NIX_AF_RVU_INT			(0x01C0)
+#define NIX_AF_RVU_INT_W1S		(0x01C8)
+#define NIX_AF_RVU_INT_ENA_W1S		(0x01D0)
+#define NIX_AF_RVU_INT_ENA_W1C		(0x01D8)
+#define NIX_AF_TCP_TIMER		(0x01E0)
+#define NIX_AF_RX_WQE_TAG_CTL		(0x01F0)
+#define NIX_AF_RX_DEF_OL2		(0x0200)
+#define NIX_AF_RX_DEF_OIP4		(0x0210)
+#define NIX_AF_RX_DEF_IIP4		(0x0220)
+#define NIX_AF_RX_DEF_OIP6		(0x0230)
+#define NIX_AF_RX_DEF_IIP6		(0x0240)
+#define NIX_AF_RX_DEF_OTCP		(0x0250)
+#define NIX_AF_RX_DEF_ITCP		(0x0260)
+#define NIX_AF_RX_DEF_OUDP		(0x0270)
+#define NIX_AF_RX_DEF_IUDP		(0x0280)
+#define NIX_AF_RX_DEF_OSCTP		(0x0290)
+#define NIX_AF_RX_DEF_ISCTP		(0x02A0)
+#define NIX_AF_RX_DEF_IPSECX		(0x02B0)
+#define NIX_AF_RX_IPSEC_GEN_CFG		(0x0300)
+#define NIX_AF_RX_CPTX_INST_ADDR	(0x0310)
+#define NIX_AF_NDC_TX_SYNC		(0x03F0)
+#define NIX_AF_AQ_CFG			(0x0400)
+#define NIX_AF_AQ_BASE			(0x0410)
+#define NIX_AF_AQ_STATUS		(0x0420)
+#define NIX_AF_AQ_DOOR			(0x0430)
+#define NIX_AF_AQ_DONE_WAIT		(0x0440)
+#define NIX_AF_AQ_DONE			(0x0450)
+#define NIX_AF_AQ_DONE_ACK		(0x0460)
+#define NIX_AF_AQ_DONE_TIMER		(0x0470)
+#define NIX_AF_AQ_DONE_INT		(0x0480)
+#define NIX_AF_AQ_DONE_INT_W1S		(0x0488)
+#define NIX_AF_AQ_DONE_ENA_W1S		(0x0490)
+#define NIX_AF_AQ_DONE_ENA_W1C		(0x0498)
+#define NIX_AF_RX_LINKX_SLX_SPKT_CNT	(0x0500)
+#define NIX_AF_RX_LINKX_SLX_SXQE_CNT	(0x0510)
+#define NIX_AF_RX_MCAST_JOBSX_SW_CNT	(0x0520)
+#define NIX_AF_RX_MIRROR_JOBSX_SW_CNT	(0x0530)
+#define NIX_AF_RX_LINKX_CFG(a)		(0x0540 | (a) << 16)
+#define NIX_AF_RX_SW_SYNC		(0x0550)
+#define NIX_AF_RX_SW_SYNC_DONE		(0x0560)
+#define NIX_AF_SEB_ECO			(0x0600)
+#define NIX_AF_SEB_TEST_BP		(0x0610)
+#define NIX_AF_NORM_TX_FIFO_STATUS	(0x0620)
+#define NIX_AF_EXPR_TX_FIFO_STATUS	(0x0630)
+#define NIX_AF_SDP_TX_FIFO_STATUS	(0x0640)
+#define NIX_AF_TX_NPC_CAPTURE_CONFIG	(0x0660)
+#define NIX_AF_TX_NPC_CAPTURE_INFO	(0x0670)
+
+#define NIX_AF_DEBUG_NPC_RESP_DATAX(a)          (0x680 | (a) << 3)
+#define NIX_AF_SMQX_CFG(a)                      (0x700 | (a) << 16)
+#define NIX_AF_PSE_CHANNEL_LEVEL                (0x800)
+#define NIX_AF_PSE_SHAPER_CFG                   (0x810)
+#define NIX_AF_TX_EXPR_CREDIT			(0x830)
+#define NIX_AF_MARK_FORMATX_CTL(a)              (0x900 | (a) << 18)
+#define NIX_AF_TX_LINKX_NORM_CREDIT(a)		(0xA00 | (a) << 16)
+#define NIX_AF_TX_LINKX_EXPR_CREDIT(a)		(0xA10 | (a) << 16)
+#define NIX_AF_TX_LINKX_SW_XOFF(a)              (0xA20 | (a) << 16)
+#define NIX_AF_TX_LINKX_HW_XOFF(a)              (0xA30 | (a) << 16)
+#define NIX_AF_SDP_LINK_CREDIT                  (0xa40)
+#define NIX_AF_SDP_SW_XOFFX(a)                  (0xA60 | (a) << 3)
+#define NIX_AF_SDP_HW_XOFFX(a)                  (0xAC0 | (a) << 3)
+#define NIX_AF_TL4X_BP_STATUS(a)                (0xB00 | (a) << 16)
+#define NIX_AF_TL4X_SDP_LINK_CFG(a)             (0xB10 | (a) << 16)
+#define NIX_AF_TL1X_SCHEDULE(a)                 (0xC00 | (a) << 16)
+#define NIX_AF_TL1X_SHAPE(a)                    (0xC10 | (a) << 16)
+#define NIX_AF_TL1X_CIR(a)                      (0xC20 | (a) << 16)
+#define NIX_AF_TL1X_SHAPE_STATE(a)              (0xC50 | (a) << 16)
+#define NIX_AF_TL1X_SW_XOFF(a)                  (0xC70 | (a) << 16)
+#define NIX_AF_TL1X_TOPOLOGY(a)                 (0xC80 | (a) << 16)
+#define NIX_AF_TL1X_GREEN(a)                    (0xC90 | (a) << 16)
+#define NIX_AF_TL1X_YELLOW(a)                   (0xCA0 | (a) << 16)
+#define NIX_AF_TL1X_RED(a)                      (0xCB0 | (a) << 16)
+#define NIX_AF_TL1X_MD_DEBUG0(a)                (0xCC0 | (a) << 16)
+#define NIX_AF_TL1X_MD_DEBUG1(a)                (0xCC8 | (a) << 16)
+#define NIX_AF_TL1X_MD_DEBUG2(a)                (0xCD0 | (a) << 16)
+#define NIX_AF_TL1X_MD_DEBUG3(a)                (0xCD8 | (a) << 16)
+#define NIX_AF_TL1A_DEBUG                       (0xce0)
+#define NIX_AF_TL1B_DEBUG                       (0xcf0)
+#define NIX_AF_TL1_DEBUG_GREEN                  (0xd00)
+#define NIX_AF_TL1_DEBUG_NODE                   (0xd10)
+#define NIX_AF_TL1X_DROPPED_PACKETS(a)          (0xD20 | (a) << 16)
+#define NIX_AF_TL1X_DROPPED_BYTES(a)            (0xD30 | (a) << 16)
+#define NIX_AF_TL1X_RED_PACKETS(a)              (0xD40 | (a) << 16)
+#define NIX_AF_TL1X_RED_BYTES(a)                (0xD50 | (a) << 16)
+#define NIX_AF_TL1X_YELLOW_PACKETS(a)           (0xD60 | (a) << 16)
+#define NIX_AF_TL1X_YELLOW_BYTES(a)             (0xD70 | (a) << 16)
+#define NIX_AF_TL1X_GREEN_PACKETS(a)            (0xD80 | (a) << 16)
+#define NIX_AF_TL1X_GREEN_BYTES(a)              (0xD90 | (a) << 16)
+#define NIX_AF_TL2X_SCHEDULE(a)                 (0xE00 | (a) << 16)
+#define NIX_AF_TL2X_SHAPE(a)                    (0xE10 | (a) << 16)
+#define NIX_AF_TL2X_CIR(a)                      (0xE20 | (a) << 16)
+#define NIX_AF_TL2X_PIR(a)                      (0xE30 | (a) << 16)
+#define NIX_AF_TL2X_SCHED_STATE(a)              (0xE40 | (a) << 16)
+#define NIX_AF_TL2X_SHAPE_STATE(a)              (0xE50 | (a) << 16)
+#define NIX_AF_TL2X_POINTERS(a)                 (0xE60 | (a) << 16)
+#define NIX_AF_TL2X_SW_XOFF(a)                  (0xE70 | (a) << 16)
+#define NIX_AF_TL2X_TOPOLOGY(a)                 (0xE80 | (a) << 16)
+#define NIX_AF_TL2X_PARENT(a)                   (0xE88 | (a) << 16)
+#define NIX_AF_TL2X_GREEN(a)                    (0xE90 | (a) << 16)
+#define NIX_AF_TL2X_YELLOW(a)                   (0xEA0 | (a) << 16)
+#define NIX_AF_TL2X_RED(a)                      (0xEB0 | (a) << 16)
+#define NIX_AF_TL2X_MD_DEBUG0(a)                (0xEC0 | (a) << 16)
+#define NIX_AF_TL2X_MD_DEBUG1(a)                (0xEC8 | (a) << 16)
+#define NIX_AF_TL2X_MD_DEBUG2(a)                (0xED0 | (a) << 16)
+#define NIX_AF_TL2X_MD_DEBUG3(a)                (0xED8 | (a) << 16)
+#define NIX_AF_TL2A_DEBUG                       (0xee0)
+#define NIX_AF_TL2B_DEBUG                       (0xef0)
+#define NIX_AF_TL3X_SCHEDULE(a)                 (0x1000 | (a) << 16)
+#define NIX_AF_TL3X_SHAPE(a)                    (0x1010 | (a) << 16)
+#define NIX_AF_TL3X_CIR(a)                      (0x1020 | (a) << 16)
+#define NIX_AF_TL3X_PIR(a)                      (0x1030 | (a) << 16)
+#define NIX_AF_TL3X_SCHED_STATE(a)              (0x1040 | (a) << 16)
+#define NIX_AF_TL3X_SHAPE_STATE(a)              (0x1050 | (a) << 16)
+#define NIX_AF_TL3X_POINTERS(a)                 (0x1060 | (a) << 16)
+#define NIX_AF_TL3X_SW_XOFF(a)                  (0x1070 | (a) << 16)
+#define NIX_AF_TL3X_TOPOLOGY(a)                 (0x1080 | (a) << 16)
+#define NIX_AF_TL3X_PARENT(a)                   (0x1088 | (a) << 16)
+#define NIX_AF_TL3X_GREEN(a)                    (0x1090 | (a) << 16)
+#define NIX_AF_TL3X_YELLOW(a)                   (0x10A0 | (a) << 16)
+#define NIX_AF_TL3X_RED(a)                      (0x10B0 | (a) << 16)
+#define NIX_AF_TL3X_MD_DEBUG0(a)                (0x10C0 | (a) << 16)
+#define NIX_AF_TL3X_MD_DEBUG1(a)                (0x10C8 | (a) << 16)
+#define NIX_AF_TL3X_MD_DEBUG2(a)                (0x10D0 | (a) << 16)
+#define NIX_AF_TL3X_MD_DEBUG3(a)                (0x10D8 | (a) << 16)
+#define NIX_AF_TL3A_DEBUG                       (0x10e0)
+#define NIX_AF_TL3B_DEBUG                       (0x10f0)
+#define NIX_AF_TL4X_SCHEDULE(a)                 (0x1200 | (a) << 16)
+#define NIX_AF_TL4X_SHAPE(a)                    (0x1210 | (a) << 16)
+#define NIX_AF_TL4X_CIR(a)                      (0x1220 | (a) << 16)
+#define NIX_AF_TL4X_PIR(a)                      (0x1230 | (a) << 16)
+#define NIX_AF_TL4X_SCHED_STATE(a)              (0x1240 | (a) << 16)
+#define NIX_AF_TL4X_SHAPE_STATE(a)              (0x1250 | (a) << 16)
+#define NIX_AF_TL4X_POINTERS(a)                 (0x1260 | (a) << 16)
+#define NIX_AF_TL4X_SW_XOFF(a)                  (0x1270 | (a) << 16)
+#define NIX_AF_TL4X_TOPOLOGY(a)                 (0x1280 | (a) << 16)
+#define NIX_AF_TL4X_PARENT(a)                   (0x1288 | (a) << 16)
+#define NIX_AF_TL4X_GREEN(a)                    (0x1290 | (a) << 16)
+#define NIX_AF_TL4X_YELLOW(a)                   (0x12A0 | (a) << 16)
+#define NIX_AF_TL4X_RED(a)                      (0x12B0 | (a) << 16)
+#define NIX_AF_TL4X_MD_DEBUG0(a)                (0x12C0 | (a) << 16)
+#define NIX_AF_TL4X_MD_DEBUG1(a)                (0x12C8 | (a) << 16)
+#define NIX_AF_TL4X_MD_DEBUG2(a)                (0x12D0 | (a) << 16)
+#define NIX_AF_TL4X_MD_DEBUG3(a)                (0x12D8 | (a) << 16)
+#define NIX_AF_TL4A_DEBUG                       (0x12e0)
+#define NIX_AF_TL4B_DEBUG                       (0x12f0)
+#define NIX_AF_MDQX_SCHEDULE(a)                 (0x1400 | (a) << 16)
+#define NIX_AF_MDQX_SHAPE(a)                    (0x1410 | (a) << 16)
+#define NIX_AF_MDQX_CIR(a)                      (0x1420 | (a) << 16)
+#define NIX_AF_MDQX_PIR(a)                      (0x1430 | (a) << 16)
+#define NIX_AF_MDQX_SCHED_STATE(a)              (0x1440 | (a) << 16)
+#define NIX_AF_MDQX_SHAPE_STATE(a)              (0x1450 | (a) << 16)
+#define NIX_AF_MDQX_POINTERS(a)                 (0x1460 | (a) << 16)
+#define NIX_AF_MDQX_SW_XOFF(a)                  (0x1470 | (a) << 16)
+#define NIX_AF_MDQX_PARENT(a)                   (0x1480 | (a) << 16)
+#define NIX_AF_MDQX_MD_DEBUG(a)                 (0x14C0 | (a) << 16)
+#define NIX_AF_MDQX_PTR_FIFO(a)                 (0x14D0 | (a) << 16)
+#define NIX_AF_MDQA_DEBUG                       (0x14e0)
+#define NIX_AF_MDQB_DEBUG                       (0x14f0)
+#define NIX_AF_TL3_TL2X_CFG(a)                  (0x1600 | (a) << 18)
+#define NIX_AF_TL3_TL2X_BP_STATUS(a)            (0x1610 | (a) << 16)
+#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b)         (0x1700 | (a) << 16 | (b) << 3)
+#define NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(a, b)    (0x1800 | (a) << 18 | (b) << 3)
+#define NIX_AF_TX_MCASTX(a)                     (0x1900 | (a) << 15)
+#define NIX_AF_TX_VTAG_DEFX_CTL(a)              (0x1A00 | (a) << 16)
+#define NIX_AF_TX_VTAG_DEFX_DATA(a)             (0x1A10 | (a) << 16)
+#define NIX_AF_RX_BPIDX_STATUS(a)               (0x1A20 | (a) << 17)
+#define NIX_AF_RX_CHANX_CFG(a)                  (0x1A30 | (a) << 15)
+#define NIX_AF_CINT_TIMERX(a)                   (0x1A40 | (a) << 18)
+#define NIX_AF_LSO_FORMATX_FIELDX(a, b)         (0x1B00 | (a) << 16 | (b) << 3)
+#define NIX_AF_LFX_CFG(a)		(0x4000 | (a) << 17)
+#define NIX_AF_LFX_SQS_CFG(a)		(0x4020 | (a) << 17)
+#define NIX_AF_LFX_TX_CFG2(a)		(0x4028 | (a) << 17)
+#define NIX_AF_LFX_SQS_BASE(a)		(0x4030 | (a) << 17)
+#define NIX_AF_LFX_RQS_CFG(a)		(0x4040 | (a) << 17)
+#define NIX_AF_LFX_RQS_BASE(a)		(0x4050 | (a) << 17)
+#define NIX_AF_LFX_CQS_CFG(a)		(0x4060 | (a) << 17)
+#define NIX_AF_LFX_CQS_BASE(a)		(0x4070 | (a) << 17)
+#define NIX_AF_LFX_TX_CFG(a)		(0x4080 | (a) << 17)
+#define NIX_AF_LFX_TX_PARSE_CFG(a)	(0x4090 | (a) << 17)
+#define NIX_AF_LFX_RX_CFG(a)		(0x40A0 | (a) << 17)
+#define NIX_AF_LFX_RSS_CFG(a)		(0x40C0 | (a) << 17)
+#define NIX_AF_LFX_RSS_BASE(a)		(0x40D0 | (a) << 17)
+#define NIX_AF_LFX_QINTS_CFG(a)		(0x4100 | (a) << 17)
+#define NIX_AF_LFX_QINTS_BASE(a)	(0x4110 | (a) << 17)
+#define NIX_AF_LFX_CINTS_CFG(a)		(0x4120 | (a) << 17)
+#define NIX_AF_LFX_CINTS_BASE(a)	(0x4130 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_CFG0(a)	(0x4140 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_CFG1(a)	(0x4148 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_DYNO_CFG(a)	(0x4150 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_DYNO_BASE(a)	(0x4158 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_SA_BASE(a)	(0x4170 | (a) << 17)
+#define NIX_AF_LFX_TX_STATUS(a)		(0x4180 | (a) << 17)
+#define NIX_AF_LFX_RX_VTAG_TYPEX(a, b)	(0x4200 | (a) << 17 | (b) << 3)
+#define NIX_AF_LFX_LOCKX(a, b)		(0x4300 | (a) << 17 | (b) << 3)
+#define NIX_AF_LFX_TX_STATX(a, b)	(0x4400 | (a) << 17 | (b) << 3)
+#define NIX_AF_LFX_RX_STATX(a, b)	(0x4500 | (a) << 17 | (b) << 3)
+#define NIX_AF_LFX_RSS_GRPX(a, b)	(0x4600 | (a) << 17 | (b) << 3)
+#define NIX_AF_RX_NPC_MC_RCV		(0x4700)
+#define NIX_AF_RX_NPC_MC_DROP		(0x4710)
+#define NIX_AF_RX_NPC_MIRROR_RCV	(0x4720)
+#define NIX_AF_RX_NPC_MIRROR_DROP	(0x4730)
+#define NIX_AF_RX_ACTIVE_CYCLES_PCX(a)	(0x4800 | (a) << 16)
+
+#define NIX_PRIV_AF_INT_CFG		(0x8000000)
+#define NIX_PRIV_LFX_CFG		(0x8000010)
+#define NIX_PRIV_LFX_INT_CFG		(0x8000020)
+#define NIX_AF_RVU_LF_CFG_DEBUG		(0x8000030)
+
+/* SSO */
+#define SSO_AF_CONST			(0x1000)
+#define SSO_AF_CONST1			(0x1008)
+#define SSO_AF_BLK_RST			(0x10f8)
+#define SSO_AF_LF_HWGRP_RST		(0x10e0)
+#define SSO_AF_RVU_LF_CFG_DEBUG		(0x3800)
+#define SSO_PRIV_LFX_HWGRP_CFG		(0x10000)
+#define SSO_PRIV_LFX_HWGRP_INT_CFG	(0x20000)
+
+/* SSOW */
+#define SSOW_AF_RVU_LF_HWS_CFG_DEBUG	(0x0010)
+#define SSOW_AF_LF_HWS_RST		(0x0030)
+#define SSOW_PRIV_LFX_HWS_CFG		(0x1000)
+#define SSOW_PRIV_LFX_HWS_INT_CFG	(0x2000)
+
+/* TIM */
+#define TIM_AF_CONST			(0x90)
+#define TIM_PRIV_LFX_CFG		(0x20000)
+#define TIM_PRIV_LFX_INT_CFG		(0x24000)
+#define TIM_AF_RVU_LF_CFG_DEBUG		(0x30000)
+#define TIM_AF_BLK_RST			(0x10)
+#define TIM_AF_LF_RST			(0x20)
+
+/* CPT */
+#define CPT_AF_CONSTANTS0		(0x0000)
+#define CPT_PRIV_LFX_CFG		(0x41000)
+#define CPT_PRIV_LFX_INT_CFG		(0x43000)
+#define CPT_AF_RVU_LF_CFG_DEBUG		(0x45000)
+#define CPT_AF_LF_RST			(0x44000)
+#define CPT_AF_BLK_RST			(0x46000)
+
+#define NDC_AF_BLK_RST                  (0x002F0)
+#define NPC_AF_BLK_RST                  (0x00040)
+
+/* NPC */
+#define NPC_AF_CFG			(0x00000)
+#define NPC_AF_ACTIVE_PC		(0x00010)
+#define NPC_AF_CONST			(0x00020)
+#define NPC_AF_CONST1			(0x00030)
+#define NPC_AF_BLK_RST			(0x00040)
+#define NPC_AF_MCAM_SCRUB_CTL		(0x000a0)
+#define NPC_AF_KCAM_SCRUB_CTL		(0x000b0)
+#define NPC_AF_KPUX_CFG(a)		(0x00500 | (a) << 3)
+#define NPC_AF_PCK_CFG			(0x00600)
+#define NPC_AF_PCK_DEF_OL2		(0x00610)
+#define NPC_AF_PCK_DEF_OIP4		(0x00620)
+#define NPC_AF_PCK_DEF_OIP6		(0x00630)
+#define NPC_AF_PCK_DEF_IIP4		(0x00640)
+#define NPC_AF_KEX_LDATAX_FLAGS_CFG(a)	(0x00800 | (a) << 3)
+#define NPC_AF_INTFX_KEX_CFG(a)		(0x01010 | (a) << 8)
+#define NPC_AF_PKINDX_ACTION0(a)	(0x80000ull | (a) << 6)
+#define NPC_AF_PKINDX_ACTION1(a)	(0x80008ull | (a) << 6)
+#define NPC_AF_PKINDX_CPI_DEFX(a, b)	(0x80020ull | (a) << 6 | (b) << 3)
+#define NPC_AF_KPUX_ENTRYX_CAMX(a, b, c) \
+		(0x100000 | (a) << 14 | (b) << 6 | (c) << 3)
+#define NPC_AF_KPUX_ENTRYX_ACTION0(a, b) \
+		(0x100020 | (a) << 14 | (b) << 6)
+#define NPC_AF_KPUX_ENTRYX_ACTION1(a, b) \
+		(0x100028 | (a) << 14 | (b) << 6)
+#define NPC_AF_KPUX_ENTRY_DISX(a, b)	(0x180000 | (a) << 6 | (b) << 3)
+#define NPC_AF_CPIX_CFG(a)		(0x200000 | (a) << 3)
+#define NPC_AF_INTFX_LIDX_LTX_LDX_CFG(a, b, c, d) \
+		(0x900000 | (a) << 16 | (b) << 12 | (c) << 5 | (d) << 3)
+#define NPC_AF_INTFX_LDATAX_FLAGSX_CFG(a, b, c) \
+		(0x980000 | (a) << 16 | (b) << 12 | (c) << 3)
+#define NPC_AF_MCAMEX_BANKX_CAMX_INTF(a, b, c)       \
+		(0x1000000ull | (a) << 10 | (b) << 6 | (c) << 3)
+#define NPC_AF_MCAMEX_BANKX_CAMX_W0(a, b, c)         \
+		(0x1000010ull | (a) << 10 | (b) << 6 | (c) << 3)
+#define NPC_AF_MCAMEX_BANKX_CAMX_W1(a, b, c)         \
+		(0x1000020ull | (a) << 10 | (b) << 6 | (c) << 3)
+#define NPC_AF_MCAMEX_BANKX_CFG(a, b)	 (0x1800000ull | (a) << 8 | (b) << 4)
+#define NPC_AF_MCAMEX_BANKX_STAT_ACT(a, b) \
+		(0x1880000 | (a) << 8 | (b) << 4)
+#define NPC_AF_MATCH_STATX(a)		(0x1880008 | (a) << 8)
+#define NPC_AF_INTFX_MISS_STAT_ACT(a)	(0x1880040 + (a) * 0x8)
+#define NPC_AF_MCAMEX_BANKX_ACTION(a, b) (0x1900000ull | (a) << 8 | (b) << 4)
+#define NPC_AF_MCAMEX_BANKX_TAG_ACT(a, b) \
+		(0x1900008 | (a) << 8 | (b) << 4)
+#define NPC_AF_INTFX_MISS_ACT(a)	(0x1a00000 | (a) << 4)
+#define NPC_AF_INTFX_MISS_TAG_ACT(a)	(0x1b00008 | (a) << 4)
+#define NPC_AF_MCAM_BANKX_HITX(a, b)	(0x1c80000 | (a) << 8 | (b) << 4)
+#define NPC_AF_LKUP_CTL			(0x2000000)
+#define NPC_AF_LKUP_DATAX(a)		(0x2000200 | (a) << 4)
+#define NPC_AF_LKUP_RESULTX(a)		(0x2000400 | (a) << 4)
+#define NPC_AF_INTFX_STAT(a)		(0x2000800 | (a) << 4)
+#define NPC_AF_DBG_CTL			(0x3000000)
+#define NPC_AF_DBG_STATUS		(0x3000010)
+#define NPC_AF_KPUX_DBG(a)		(0x3000020 | (a) << 8)
+#define NPC_AF_IKPU_ERR_CTL		(0x3000080)
+#define NPC_AF_KPUX_ERR_CTL(a)		(0x30000a0 | (a) << 8)
+#define NPC_AF_MCAM_DBG			(0x3001000)
+#define NPC_AF_DBG_DATAX(a)		(0x3001400 | (a) << 4)
+#define NPC_AF_DBG_RESULTX(a)		(0x3001800 | (a) << 4)
+
+#endif /* RVU_REG_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
new file mode 100644
index 0000000..84a3906
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -0,0 +1,917 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*  Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef RVU_STRUCT_H
+#define RVU_STRUCT_H
+
+/* RVU Block Address Enumeration */
+enum rvu_block_addr_e {
+	BLKADDR_RVUM    = 0x0ULL,
+	BLKADDR_LMT     = 0x1ULL,
+	BLKADDR_MSIX    = 0x2ULL,
+	BLKADDR_NPA     = 0x3ULL,
+	BLKADDR_NIX0    = 0x4ULL,
+	BLKADDR_NIX1    = 0x5ULL,
+	BLKADDR_NPC     = 0x6ULL,
+	BLKADDR_SSO     = 0x7ULL,
+	BLKADDR_SSOW    = 0x8ULL,
+	BLKADDR_TIM     = 0x9ULL,
+	BLKADDR_CPT0    = 0xaULL,
+	BLKADDR_CPT1    = 0xbULL,
+	BLKADDR_NDC0    = 0xcULL,
+	BLKADDR_NDC1    = 0xdULL,
+	BLKADDR_NDC2    = 0xeULL,
+	BLK_COUNT	= 0xfULL,
+};
+
+/* RVU Block Type Enumeration */
+enum rvu_block_type_e {
+	BLKTYPE_RVUM = 0x0,
+	BLKTYPE_MSIX = 0x1,
+	BLKTYPE_LMT  = 0x2,
+	BLKTYPE_NIX  = 0x3,
+	BLKTYPE_NPA  = 0x4,
+	BLKTYPE_NPC  = 0x5,
+	BLKTYPE_SSO  = 0x6,
+	BLKTYPE_SSOW = 0x7,
+	BLKTYPE_TIM  = 0x8,
+	BLKTYPE_CPT  = 0x9,
+	BLKTYPE_NDC  = 0xa,
+	BLKTYPE_MAX  = 0xa,
+};
+
+/* RVU Admin function Interrupt Vector Enumeration */
+enum rvu_af_int_vec_e {
+	RVU_AF_INT_VEC_POISON = 0x0,
+	RVU_AF_INT_VEC_PFFLR  = 0x1,
+	RVU_AF_INT_VEC_PFME   = 0x2,
+	RVU_AF_INT_VEC_GEN    = 0x3,
+	RVU_AF_INT_VEC_MBOX   = 0x4,
+	RVU_AF_INT_VEC_CNT    = 0x5,
+};
+
+/**
+ * RVU PF Interrupt Vector Enumeration
+ */
+enum rvu_pf_int_vec_e {
+	RVU_PF_INT_VEC_VFFLR0     = 0x0,
+	RVU_PF_INT_VEC_VFFLR1     = 0x1,
+	RVU_PF_INT_VEC_VFME0      = 0x2,
+	RVU_PF_INT_VEC_VFME1      = 0x3,
+	RVU_PF_INT_VEC_VFPF_MBOX0 = 0x4,
+	RVU_PF_INT_VEC_VFPF_MBOX1 = 0x5,
+	RVU_PF_INT_VEC_AFPF_MBOX  = 0x6,
+	RVU_PF_INT_VEC_CNT	  = 0x7,
+};
+
+/* NPA admin queue completion enumeration */
+enum npa_aq_comp {
+	NPA_AQ_COMP_NOTDONE    = 0x0,
+	NPA_AQ_COMP_GOOD       = 0x1,
+	NPA_AQ_COMP_SWERR      = 0x2,
+	NPA_AQ_COMP_CTX_POISON = 0x3,
+	NPA_AQ_COMP_CTX_FAULT  = 0x4,
+	NPA_AQ_COMP_LOCKERR    = 0x5,
+};
+
+/* NPA admin queue context types */
+enum npa_aq_ctype {
+	NPA_AQ_CTYPE_AURA = 0x0,
+	NPA_AQ_CTYPE_POOL = 0x1,
+};
+
+/* NPA admin queue instruction opcodes */
+enum npa_aq_instop {
+	NPA_AQ_INSTOP_NOP    = 0x0,
+	NPA_AQ_INSTOP_INIT   = 0x1,
+	NPA_AQ_INSTOP_WRITE  = 0x2,
+	NPA_AQ_INSTOP_READ   = 0x3,
+	NPA_AQ_INSTOP_LOCK   = 0x4,
+	NPA_AQ_INSTOP_UNLOCK = 0x5,
+};
+
+/* NPA admin queue instruction structure */
+struct npa_aq_inst_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64 doneint               : 1;	/* W0 */
+	u64 reserved_44_62        : 19;
+	u64 cindex                : 20;
+	u64 reserved_17_23        : 7;
+	u64 lf                    : 9;
+	u64 ctype                 : 4;
+	u64 op                    : 4;
+#else
+	u64 op                    : 4;
+	u64 ctype                 : 4;
+	u64 lf                    : 9;
+	u64 reserved_17_23        : 7;
+	u64 cindex                : 20;
+	u64 reserved_44_62        : 19;
+	u64 doneint               : 1;
+#endif
+	u64 res_addr;			/* W1 */
+};
+
+/* NPA admin queue result structure */
+struct npa_aq_res_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64 reserved_17_63        : 47; /* W0 */
+	u64 doneint               : 1;
+	u64 compcode              : 8;
+	u64 ctype                 : 4;
+	u64 op                    : 4;
+#else
+	u64 op                    : 4;
+	u64 ctype                 : 4;
+	u64 compcode              : 8;
+	u64 doneint               : 1;
+	u64 reserved_17_63        : 47;
+#endif
+	u64 reserved_64_127;		/* W1 */
+};
+
+struct npa_aura_s {
+	u64 pool_addr;			/* W0 */
+#if defined(__BIG_ENDIAN_BITFIELD)	/* W1 */
+	u64 avg_level             : 8;
+	u64 reserved_118_119      : 2;
+	u64 shift                 : 6;
+	u64 aura_drop             : 8;
+	u64 reserved_98_103       : 6;
+	u64 bp_ena                : 2;
+	u64 aura_drop_ena         : 1;
+	u64 pool_drop_ena         : 1;
+	u64 reserved_93           : 1;
+	u64 avg_con               : 9;
+	u64 pool_way_mask         : 16;
+	u64 pool_caching          : 1;
+	u64 reserved_65           : 2;
+	u64 ena                   : 1;
+#else
+	u64 ena                   : 1;
+	u64 reserved_65           : 2;
+	u64 pool_caching          : 1;
+	u64 pool_way_mask         : 16;
+	u64 avg_con               : 9;
+	u64 reserved_93           : 1;
+	u64 pool_drop_ena         : 1;
+	u64 aura_drop_ena         : 1;
+	u64 bp_ena                : 2;
+	u64 reserved_98_103       : 6;
+	u64 aura_drop             : 8;
+	u64 shift                 : 6;
+	u64 reserved_118_119      : 2;
+	u64 avg_level             : 8;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)	/* W2 */
+	u64 reserved_189_191      : 3;
+	u64 nix1_bpid             : 9;
+	u64 reserved_177_179      : 3;
+	u64 nix0_bpid             : 9;
+	u64 reserved_164_167      : 4;
+	u64 count                 : 36;
+#else
+	u64 count                 : 36;
+	u64 reserved_164_167      : 4;
+	u64 nix0_bpid             : 9;
+	u64 reserved_177_179      : 3;
+	u64 nix1_bpid             : 9;
+	u64 reserved_189_191      : 3;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)	/* W3 */
+	u64 reserved_252_255      : 4;
+	u64 fc_hyst_bits          : 4;
+	u64 fc_stype              : 2;
+	u64 fc_up_crossing        : 1;
+	u64 fc_ena                : 1;
+	u64 reserved_240_243      : 4;
+	u64 bp                    : 8;
+	u64 reserved_228_231      : 4;
+	u64 limit                 : 36;
+#else
+	u64 limit                 : 36;
+	u64 reserved_228_231      : 4;
+	u64 bp                    : 8;
+	u64 reserved_240_243      : 4;
+	u64 fc_ena                : 1;
+	u64 fc_up_crossing        : 1;
+	u64 fc_stype              : 2;
+	u64 fc_hyst_bits          : 4;
+	u64 reserved_252_255      : 4;
+#endif
+	u64 fc_addr;			/* W4 */
+#if defined(__BIG_ENDIAN_BITFIELD)	/* W5 */
+	u64 reserved_379_383      : 5;
+	u64 err_qint_idx          : 7;
+	u64 reserved_371          : 1;
+	u64 thresh_qint_idx       : 7;
+	u64 reserved_363          : 1;
+	u64 thresh_up             : 1;
+	u64 thresh_int_ena        : 1;
+	u64 thresh_int            : 1;
+	u64 err_int_ena           : 8;
+	u64 err_int               : 8;
+	u64 update_time           : 16;
+	u64 pool_drop             : 8;
+#else
+	u64 pool_drop             : 8;
+	u64 update_time           : 16;
+	u64 err_int               : 8;
+	u64 err_int_ena           : 8;
+	u64 thresh_int            : 1;
+	u64 thresh_int_ena        : 1;
+	u64 thresh_up             : 1;
+	u64 reserved_363          : 1;
+	u64 thresh_qint_idx       : 7;
+	u64 reserved_371          : 1;
+	u64 err_qint_idx          : 7;
+	u64 reserved_379_383      : 5;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)	/* W6 */
+	u64 reserved_420_447      : 28;
+	u64 thresh                : 36;
+#else
+	u64 thresh                : 36;
+	u64 reserved_420_447      : 28;
+#endif
+	u64 reserved_448_511;		/* W7 */
+};
+
+struct npa_pool_s {
+	u64 stack_base;			/* W0 */
+#if defined(__BIG_ENDIAN_BITFIELD)	/* W1 */
+	u64 reserved_115_127      : 13;
+	u64 buf_size              : 11;
+	u64 reserved_100_103      : 4;
+	u64 buf_offset            : 12;
+	u64 stack_way_mask        : 16;
+	u64 reserved_70_71        : 3;
+	u64 stack_caching         : 1;
+	u64 reserved_66_67        : 2;
+	u64 nat_align             : 1;
+	u64 ena                   : 1;
+#else
+	u64 ena                   : 1;
+	u64 nat_align             : 1;
+	u64 reserved_66_67        : 2;
+	u64 stack_caching         : 1;
+	u64 reserved_70_71        : 3;
+	u64 stack_way_mask        : 16;
+	u64 buf_offset            : 12;
+	u64 reserved_100_103      : 4;
+	u64 buf_size              : 11;
+	u64 reserved_115_127      : 13;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)	/* W2 */
+	u64 stack_pages           : 32;
+	u64 stack_max_pages       : 32;
+#else
+	u64 stack_max_pages       : 32;
+	u64 stack_pages           : 32;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)	/* W3 */
+	u64 reserved_240_255      : 16;
+	u64 op_pc                 : 48;
+#else
+	u64 op_pc                 : 48;
+	u64 reserved_240_255      : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)	/* W4 */
+	u64 reserved_316_319      : 4;
+	u64 update_time           : 16;
+	u64 reserved_297_299      : 3;
+	u64 fc_up_crossing        : 1;
+	u64 fc_hyst_bits          : 4;
+	u64 fc_stype              : 2;
+	u64 fc_ena                : 1;
+	u64 avg_con               : 9;
+	u64 avg_level             : 8;
+	u64 reserved_270_271      : 2;
+	u64 shift                 : 6;
+	u64 reserved_260_263      : 4;
+	u64 stack_offset          : 4;
+#else
+	u64 stack_offset          : 4;
+	u64 reserved_260_263      : 4;
+	u64 shift                 : 6;
+	u64 reserved_270_271      : 2;
+	u64 avg_level             : 8;
+	u64 avg_con               : 9;
+	u64 fc_ena                : 1;
+	u64 fc_stype              : 2;
+	u64 fc_hyst_bits          : 4;
+	u64 fc_up_crossing        : 1;
+	u64 reserved_297_299      : 3;
+	u64 update_time           : 16;
+	u64 reserved_316_319      : 4;
+#endif
+	u64 fc_addr;			/* W5 */
+	u64 ptr_start;			/* W6 */
+	u64 ptr_end;			/* W7 */
+#if defined(__BIG_ENDIAN_BITFIELD)	/* W8 */
+	u64 reserved_571_575      : 5;
+	u64 err_qint_idx          : 7;
+	u64 reserved_563          : 1;
+	u64 thresh_qint_idx       : 7;
+	u64 reserved_555          : 1;
+	u64 thresh_up             : 1;
+	u64 thresh_int_ena        : 1;
+	u64 thresh_int            : 1;
+	u64 err_int_ena           : 8;
+	u64 err_int               : 8;
+	u64 reserved_512_535      : 24;
+#else
+	u64 reserved_512_535      : 24;
+	u64 err_int               : 8;
+	u64 err_int_ena           : 8;
+	u64 thresh_int            : 1;
+	u64 thresh_int_ena        : 1;
+	u64 thresh_up             : 1;
+	u64 reserved_555          : 1;
+	u64 thresh_qint_idx       : 7;
+	u64 reserved_563          : 1;
+	u64 err_qint_idx          : 7;
+	u64 reserved_571_575      : 5;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)	/* W9 */
+	u64 reserved_612_639      : 28;
+	u64 thresh                : 36;
+#else
+	u64 thresh                : 36;
+	u64 reserved_612_639      : 28;
+#endif
+	u64 reserved_640_703;		/* W10 */
+	u64 reserved_704_767;		/* W11 */
+	u64 reserved_768_831;		/* W12 */
+	u64 reserved_832_895;		/* W13 */
+	u64 reserved_896_959;		/* W14 */
+	u64 reserved_960_1023;		/* W15 */
+};
+
+/* NIX admin queue completion status */
+enum nix_aq_comp {
+	NIX_AQ_COMP_NOTDONE        = 0x0,
+	NIX_AQ_COMP_GOOD           = 0x1,
+	NIX_AQ_COMP_SWERR          = 0x2,
+	NIX_AQ_COMP_CTX_POISON     = 0x3,
+	NIX_AQ_COMP_CTX_FAULT      = 0x4,
+	NIX_AQ_COMP_LOCKERR        = 0x5,
+	NIX_AQ_COMP_SQB_ALLOC_FAIL = 0x6,
+};
+
+/* NIX admin queue context types */
+enum nix_aq_ctype {
+	NIX_AQ_CTYPE_RQ   = 0x0,
+	NIX_AQ_CTYPE_SQ   = 0x1,
+	NIX_AQ_CTYPE_CQ   = 0x2,
+	NIX_AQ_CTYPE_MCE  = 0x3,
+	NIX_AQ_CTYPE_RSS  = 0x4,
+	NIX_AQ_CTYPE_DYNO = 0x5,
+};
+
+/* NIX admin queue instruction opcodes */
+enum nix_aq_instop {
+	NIX_AQ_INSTOP_NOP    = 0x0,
+	NIX_AQ_INSTOP_INIT   = 0x1,
+	NIX_AQ_INSTOP_WRITE  = 0x2,
+	NIX_AQ_INSTOP_READ   = 0x3,
+	NIX_AQ_INSTOP_LOCK   = 0x4,
+	NIX_AQ_INSTOP_UNLOCK = 0x5,
+};
+
+/* NIX admin queue instruction structure */
+struct nix_aq_inst_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64 doneint		: 1;	/* W0 */
+	u64 reserved_44_62	: 19;
+	u64 cindex		: 20;
+	u64 reserved_15_23	: 9;
+	u64 lf			: 7;
+	u64 ctype		: 4;
+	u64 op			: 4;
+#else
+	u64 op			: 4;
+	u64 ctype		: 4;
+	u64 lf			: 7;
+	u64 reserved_15_23	: 9;
+	u64 cindex		: 20;
+	u64 reserved_44_62	: 19;
+	u64 doneint		: 1;
+#endif
+	u64 res_addr;			/* W1 */
+};
+
+/* NIX admin queue result structure */
+struct nix_aq_res_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64 reserved_17_63	: 47;	/* W0 */
+	u64 doneint		: 1;
+	u64 compcode		: 8;
+	u64 ctype		: 4;
+	u64 op			: 4;
+#else
+	u64 op			: 4;
+	u64 ctype		: 4;
+	u64 compcode		: 8;
+	u64 doneint		: 1;
+	u64 reserved_17_63	: 47;
+#endif
+	u64 reserved_64_127;		/* W1 */
+};
+
+/* NIX Completion queue context structure */
+struct nix_cq_ctx_s {
+	u64 base;
+#if defined(__BIG_ENDIAN_BITFIELD)	/* W1 */
+	u64 wrptr		: 20;
+	u64 avg_con		: 9;
+	u64 cint_idx		: 7;
+	u64 cq_err		: 1;
+	u64 qint_idx		: 7;
+	u64 rsvd_81_83		: 3;
+	u64 bpid		: 9;
+	u64 rsvd_69_71		: 3;
+	u64 bp_ena		: 1;
+	u64 rsvd_64_67		: 4;
+#else
+	u64 rsvd_64_67		: 4;
+	u64 bp_ena		: 1;
+	u64 rsvd_69_71		: 3;
+	u64 bpid		: 9;
+	u64 rsvd_81_83		: 3;
+	u64 qint_idx		: 7;
+	u64 cq_err		: 1;
+	u64 cint_idx		: 7;
+	u64 avg_con		: 9;
+	u64 wrptr		: 20;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)  /* W2 */
+	u64 update_time		: 16;
+	u64 avg_level		: 8;
+	u64 head		: 20;
+	u64 tail		: 20;
+#else
+	u64 tail		: 20;
+	u64 head		: 20;
+	u64 avg_level		: 8;
+	u64 update_time		: 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)  /* W3 */
+	u64 cq_err_int_ena	: 8;
+	u64 cq_err_int		: 8;
+	u64 qsize		: 4;
+	u64 rsvd_233_235	: 3;
+	u64 caching		: 1;
+	u64 substream		: 20;
+	u64 rsvd_210_211	: 2;
+	u64 ena			: 1;
+	u64 drop_ena		: 1;
+	u64 drop		: 8;
+	u64 dp			: 8;
+#else
+	u64 dp			: 8;
+	u64 drop		: 8;
+	u64 drop_ena		: 1;
+	u64 ena			: 1;
+	u64 rsvd_210_211	: 2;
+	u64 substream		: 20;
+	u64 caching		: 1;
+	u64 rsvd_233_235	: 3;
+	u64 qsize		: 4;
+	u64 cq_err_int		: 8;
+	u64 cq_err_int_ena	: 8;
+#endif
+};
+
+/* NIX Receive queue context structure */
+struct nix_rq_ctx_s {
+#if defined(__BIG_ENDIAN_BITFIELD)  /* W0 */
+	u64 wqe_aura      : 20;
+	u64 substream     : 20;
+	u64 cq            : 20;
+	u64 ena_wqwd      : 1;
+	u64 ipsech_ena    : 1;
+	u64 sso_ena       : 1;
+	u64 ena           : 1;
+#else
+	u64 ena           : 1;
+	u64 sso_ena       : 1;
+	u64 ipsech_ena    : 1;
+	u64 ena_wqwd      : 1;
+	u64 cq            : 20;
+	u64 substream     : 20;
+	u64 wqe_aura      : 20;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)  /* W1 */
+	u64 rsvd_127_122  : 6;
+	u64 lpb_drop_ena  : 1;
+	u64 spb_drop_ena  : 1;
+	u64 xqe_drop_ena  : 1;
+	u64 wqe_caching   : 1;
+	u64 pb_caching    : 2;
+	u64 sso_tt        : 2;
+	u64 sso_grp       : 10;
+	u64 lpb_aura      : 20;
+	u64 spb_aura      : 20;
+#else
+	u64 spb_aura      : 20;
+	u64 lpb_aura      : 20;
+	u64 sso_grp       : 10;
+	u64 sso_tt        : 2;
+	u64 pb_caching    : 2;
+	u64 wqe_caching   : 1;
+	u64 xqe_drop_ena  : 1;
+	u64 spb_drop_ena  : 1;
+	u64 lpb_drop_ena  : 1;
+	u64 rsvd_127_122  : 6;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)  /* W2 */
+	u64 xqe_hdr_split : 1;
+	u64 xqe_imm_copy  : 1;
+	u64 rsvd_189_184  : 6;
+	u64 xqe_imm_size  : 6;
+	u64 later_skip    : 6;
+	u64 rsvd_171      : 1;
+	u64 first_skip    : 7;
+	u64 lpb_sizem1    : 12;
+	u64 spb_ena       : 1;
+	u64 rsvd_150_148  : 3;
+	u64 wqe_skip      : 2;
+	u64 spb_sizem1    : 6;
+	u64 rsvd_139_128  : 12;
+#else
+	u64 rsvd_139_128  : 12;
+	u64 spb_sizem1    : 6;
+	u64 wqe_skip      : 2;
+	u64 rsvd_150_148  : 3;
+	u64 spb_ena       : 1;
+	u64 lpb_sizem1    : 12;
+	u64 first_skip    : 7;
+	u64 rsvd_171      : 1;
+	u64 later_skip    : 6;
+	u64 xqe_imm_size  : 6;
+	u64 rsvd_189_184  : 6;
+	u64 xqe_imm_copy  : 1;
+	u64 xqe_hdr_split : 1;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)  /* W3 */
+	u64 spb_pool_pass : 8;
+	u64 spb_pool_drop : 8;
+	u64 spb_aura_pass : 8;
+	u64 spb_aura_drop : 8;
+	u64 wqe_pool_pass : 8;
+	u64 wqe_pool_drop : 8;
+	u64 xqe_pass      : 8;
+	u64 xqe_drop      : 8;
+#else
+	u64 xqe_drop      : 8;
+	u64 xqe_pass      : 8;
+	u64 wqe_pool_drop : 8;
+	u64 wqe_pool_pass : 8;
+	u64 spb_aura_drop : 8;
+	u64 spb_aura_pass : 8;
+	u64 spb_pool_drop : 8;
+	u64 spb_pool_pass : 8;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)  /* W4 */
+	u64 rsvd_319_315  : 5;
+	u64 qint_idx      : 7;
+	u64 rq_int_ena    : 8;
+	u64 rq_int        : 8;
+	u64 rsvd_291_288  : 4;
+	u64 lpb_pool_pass : 8;
+	u64 lpb_pool_drop : 8;
+	u64 lpb_aura_pass : 8;
+	u64 lpb_aura_drop : 8;
+#else
+	u64 lpb_aura_drop : 8;
+	u64 lpb_aura_pass : 8;
+	u64 lpb_pool_drop : 8;
+	u64 lpb_pool_pass : 8;
+	u64 rsvd_291_288  : 4;
+	u64 rq_int        : 8;
+	u64 rq_int_ena    : 8;
+	u64 qint_idx      : 7;
+	u64 rsvd_319_315  : 5;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)  /* W5 */
+	u64 rsvd_383_366  : 18;
+	u64 flow_tagw     : 6;
+	u64 bad_utag      : 8;
+	u64 good_utag     : 8;
+	u64 ltag          : 24;
+#else
+	u64 ltag          : 24;
+	u64 good_utag     : 8;
+	u64 bad_utag      : 8;
+	u64 flow_tagw     : 6;
+	u64 rsvd_383_366  : 18;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)  /* W6 */
+	u64 rsvd_447_432  : 16;
+	u64 octs          : 48;
+#else
+	u64 octs          : 48;
+	u64 rsvd_447_432  : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)  /* W7 */
+	u64 rsvd_511_496  : 16;
+	u64 pkts          : 48;
+#else
+	u64 pkts          : 48;
+	u64 rsvd_511_496  : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)  /* W8 */
+	u64 rsvd_575_560  : 16;
+	u64 drop_octs     : 48;
+#else
+	u64 drop_octs     : 48;
+	u64 rsvd_575_560  : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)	/* W9 */
+	u64 rsvd_639_624  : 16;
+	u64 drop_pkts     : 48;
+#else
+	u64 drop_pkts     : 48;
+	u64 rsvd_639_624  : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)	/* W10 */
+	u64 rsvd_703_688  : 16;
+	u64 re_pkts       : 48;
+#else
+	u64 re_pkts       : 48;
+	u64 rsvd_703_688  : 16;
+#endif
+	u64 rsvd_767_704;		/* W11 */
+	u64 rsvd_831_768;		/* W12 */
+	u64 rsvd_895_832;		/* W13 */
+	u64 rsvd_959_896;		/* W14 */
+	u64 rsvd_1023_960;		/* W15 */
+};
+
+/* NIX sqe sizes */
+enum nix_maxsqesz {
+	NIX_MAXSQESZ_W16 = 0x0,
+	NIX_MAXSQESZ_W8  = 0x1,
+};
+
+/* NIX SQB caching type */
+enum nix_stype {
+	NIX_STYPE_STF = 0x0,
+	NIX_STYPE_STT = 0x1,
+	NIX_STYPE_STP = 0x2,
+};
+
+/* NIX Send queue context structure */
+struct nix_sq_ctx_s {
+#if defined(__BIG_ENDIAN_BITFIELD)  /* W0 */
+	u64 sqe_way_mask          : 16;
+	u64 cq                    : 20;
+	u64 sdp_mcast             : 1;
+	u64 substream             : 20;
+	u64 qint_idx              : 6;
+	u64 ena                   : 1;
+#else
+	u64 ena                   : 1;
+	u64 qint_idx              : 6;
+	u64 substream             : 20;
+	u64 sdp_mcast             : 1;
+	u64 cq                    : 20;
+	u64 sqe_way_mask          : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)  /* W1 */
+	u64 sqb_count             : 16;
+	u64 default_chan          : 12;
+	u64 smq_rr_quantum        : 24;
+	u64 sso_ena               : 1;
+	u64 xoff                  : 1;
+	u64 cq_ena                : 1;
+	u64 smq                   : 9;
+#else
+	u64 smq                   : 9;
+	u64 cq_ena                : 1;
+	u64 xoff                  : 1;
+	u64 sso_ena               : 1;
+	u64 smq_rr_quantum        : 24;
+	u64 default_chan          : 12;
+	u64 sqb_count             : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)  /* W2 */
+	u64 rsvd_191              : 1;
+	u64 sqe_stype             : 2;
+	u64 sq_int_ena            : 8;
+	u64 sq_int                : 8;
+	u64 sqb_aura              : 20;
+	u64 smq_rr_count          : 25;
+#else
+	u64 smq_rr_count          : 25;
+	u64 sqb_aura              : 20;
+	u64 sq_int                : 8;
+	u64 sq_int_ena            : 8;
+	u64 sqe_stype             : 2;
+	u64 rsvd_191              : 1;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)  /* W3 */
+	u64 rsvd_255_253          : 3;
+	u64 smq_next_sq_vld       : 1;
+	u64 smq_pend              : 1;
+	u64 smenq_next_sqb_vld    : 1;
+	u64 head_offset           : 6;
+	u64 smenq_offset          : 6;
+	u64 tail_offset           : 6;
+	u64 smq_lso_segnum        : 8;
+	u64 smq_next_sq           : 20;
+	u64 mnq_dis               : 1;
+	u64 lmt_dis               : 1;
+	u64 cq_limit              : 8;
+	u64 max_sqe_size          : 2;
+#else
+	u64 max_sqe_size          : 2;
+	u64 cq_limit              : 8;
+	u64 lmt_dis               : 1;
+	u64 mnq_dis               : 1;
+	u64 smq_next_sq           : 20;
+	u64 smq_lso_segnum        : 8;
+	u64 tail_offset           : 6;
+	u64 smenq_offset          : 6;
+	u64 head_offset           : 6;
+	u64 smenq_next_sqb_vld    : 1;
+	u64 smq_pend              : 1;
+	u64 smq_next_sq_vld       : 1;
+	u64 rsvd_255_253          : 3;
+#endif
+	u64 next_sqb              : 64;/* W4 */
+	u64 tail_sqb              : 64;/* W5 */
+	u64 smenq_sqb             : 64;/* W6 */
+	u64 smenq_next_sqb        : 64;/* W7 */
+	u64 head_sqb              : 64;/* W8 */
+#if defined(__BIG_ENDIAN_BITFIELD)  /* W9 */
+	u64 rsvd_639_630          : 10;
+	u64 vfi_lso_vld           : 1;
+	u64 vfi_lso_vlan1_ins_ena : 1;
+	u64 vfi_lso_vlan0_ins_ena : 1;
+	u64 vfi_lso_mps           : 14;
+	u64 vfi_lso_sb            : 8;
+	u64 vfi_lso_sizem1        : 3;
+	u64 vfi_lso_total         : 18;
+	u64 rsvd_583_576          : 8;
+#else
+	u64 rsvd_583_576          : 8;
+	u64 vfi_lso_total         : 18;
+	u64 vfi_lso_sizem1        : 3;
+	u64 vfi_lso_sb            : 8;
+	u64 vfi_lso_mps           : 14;
+	u64 vfi_lso_vlan0_ins_ena : 1;
+	u64 vfi_lso_vlan1_ins_ena : 1;
+	u64 vfi_lso_vld           : 1;
+	u64 rsvd_639_630          : 10;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W10 */
+	u64 rsvd_703_658          : 46;
+	u64 scm_lso_rem           : 18;
+#else
+	u64 scm_lso_rem           : 18;
+	u64 rsvd_703_658          : 46;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W11 */
+	u64 rsvd_767_752          : 16;
+	u64 octs                  : 48;
+#else
+	u64 octs                  : 48;
+	u64 rsvd_767_752          : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W12 */
+	u64 rsvd_831_816          : 16;
+	u64 pkts                  : 48;
+#else
+	u64 pkts                  : 48;
+	u64 rsvd_831_816          : 16;
+#endif
+	u64 rsvd_895_832          : 64;/* W13 */
+#if defined(__BIG_ENDIAN_BITFIELD) /* W14 */
+	u64 rsvd_959_944          : 16;
+	u64 dropped_octs          : 48;
+#else
+	u64 dropped_octs          : 48;
+	u64 rsvd_959_944          : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W15 */
+	u64 rsvd_1023_1008        : 16;
+	u64 dropped_pkts          : 48;
+#else
+	u64 dropped_pkts          : 48;
+	u64 rsvd_1023_1008        : 16;
+#endif
+};
+
+/* NIX Receive side scaling entry structure*/
+struct nix_rsse_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	uint32_t reserved_20_31		: 12;
+	uint32_t rq			: 20;
+#else
+	uint32_t rq			: 20;
+	uint32_t reserved_20_31		: 12;
+
+#endif
+};
+
+/* NIX receive multicast/mirror entry structure */
+struct nix_rx_mce_s {
+#if defined(__BIG_ENDIAN_BITFIELD)  /* W0 */
+	uint64_t next       : 16;
+	uint64_t pf_func    : 16;
+	uint64_t rsvd_31_24 : 8;
+	uint64_t index      : 20;
+	uint64_t eol        : 1;
+	uint64_t rsvd_2     : 1;
+	uint64_t op         : 2;
+#else
+	uint64_t op         : 2;
+	uint64_t rsvd_2     : 1;
+	uint64_t eol        : 1;
+	uint64_t index      : 20;
+	uint64_t rsvd_31_24 : 8;
+	uint64_t pf_func    : 16;
+	uint64_t next       : 16;
+#endif
+};
+
+enum nix_lsoalg {
+	NIX_LSOALG_NOP,
+	NIX_LSOALG_ADD_SEGNUM,
+	NIX_LSOALG_ADD_PAYLEN,
+	NIX_LSOALG_ADD_OFFSET,
+	NIX_LSOALG_TCP_FLAGS,
+};
+
+enum nix_txlayer {
+	NIX_TXLAYER_OL3,
+	NIX_TXLAYER_OL4,
+	NIX_TXLAYER_IL3,
+	NIX_TXLAYER_IL4,
+};
+
+struct nix_lso_format {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64 rsvd_19_63		: 45;
+	u64 alg			: 3;
+	u64 rsvd_14_15		: 2;
+	u64 sizem1		: 2;
+	u64 rsvd_10_11		: 2;
+	u64 layer		: 2;
+	u64 offset		: 8;
+#else
+	u64 offset		: 8;
+	u64 layer		: 2;
+	u64 rsvd_10_11		: 2;
+	u64 sizem1		: 2;
+	u64 rsvd_14_15		: 2;
+	u64 alg			: 3;
+	u64 rsvd_19_63		: 45;
+#endif
+};
+
+struct nix_rx_flowkey_alg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64 reserved_35_63	:29;
+	u64 ltype_match		:4;
+	u64 ltype_mask		:4;
+	u64 sel_chan		:1;
+	u64 ena			:1;
+	u64 reserved_24_24	:1;
+	u64 lid			:3;
+	u64 bytesm1		:5;
+	u64 hdr_offset		:8;
+	u64 fn_mask		:1;
+	u64 ln_mask		:1;
+	u64 key_offset		:6;
+#else
+	u64 key_offset		:6;
+	u64 ln_mask		:1;
+	u64 fn_mask		:1;
+	u64 hdr_offset		:8;
+	u64 bytesm1		:5;
+	u64 lid			:3;
+	u64 reserved_24_24	:1;
+	u64 ena			:1;
+	u64 sel_chan		:1;
+	u64 ltype_mask		:4;
+	u64 ltype_match		:4;
+	u64 reserved_35_63	:29;
+#endif
+};
+
+/* NIX VTAG size */
+enum nix_vtag_size {
+	VTAGSIZE_T4   = 0x0,
+	VTAGSIZE_T8   = 0x1,
+};
+#endif /* RVU_STRUCT_H */
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 3a97306..51b77c2 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * PXA168 ethernet driver.
  * Most of the code is derived from mv643xx ethernet driver.
@@ -7,19 +8,6 @@
  *		Zhangfei Gao <zgao6@marvell.com>
  *		Philip Rakity <prakity@marvell.com>
  *		Mark Brown <markb@marvell.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #include <linux/bitops.h>
@@ -201,6 +189,7 @@
 };
 
 struct pxa168_eth_private {
+	struct platform_device *pdev;
 	int port_num;		/* User Ethernet port number    */
 	int phy_addr;
 	int phy_speed;
@@ -331,7 +320,7 @@
 		used_rx_desc = pep->rx_used_desc_q;
 		p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc];
 		size = skb_end_pointer(skb) - skb->data;
-		p_used_rx_desc->buf_ptr = dma_map_single(NULL,
+		p_used_rx_desc->buf_ptr = dma_map_single(&pep->pdev->dev,
 							 skb->data,
 							 size,
 							 DMA_FROM_DEVICE);
@@ -557,9 +546,9 @@
 	 * table is full.
 	 */
 	if (!pep->htpr) {
-		pep->htpr = dma_zalloc_coherent(pep->dev->dev.parent,
-						HASH_ADDR_TABLE_SIZE,
-						&pep->htpr_dma, GFP_KERNEL);
+		pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
+					       HASH_ADDR_TABLE_SIZE,
+					       &pep->htpr_dma, GFP_KERNEL);
 		if (!pep->htpr)
 			return -ENOMEM;
 	} else {
@@ -743,7 +732,7 @@
 				netdev_err(dev, "Error in TX\n");
 			dev->stats.tx_errors++;
 		}
-		dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
+		dma_unmap_single(&pep->pdev->dev, addr, count, DMA_TO_DEVICE);
 		if (skb)
 			dev_kfree_skb_irq(skb);
 		released++;
@@ -805,7 +794,7 @@
 		if (rx_next_curr_desc == rx_used_desc)
 			pep->rx_resource_err = 1;
 		pep->rx_desc_count--;
-		dma_unmap_single(NULL, rx_desc->buf_ptr,
+		dma_unmap_single(&pep->pdev->dev, rx_desc->buf_ptr,
 				 rx_desc->buf_size,
 				 DMA_FROM_DEVICE);
 		received_packets++;
@@ -988,8 +977,8 @@
 	cmd.base.phy_address = pep->phy_addr;
 	cmd.base.speed = pep->phy_speed;
 	cmd.base.duplex = pep->phy_duplex;
-	ethtool_convert_legacy_u32_to_link_mode(cmd.link_modes.advertising,
-						PHY_BASIC_FEATURES);
+	bitmap_copy(cmd.link_modes.advertising, PHY_BASIC_FEATURES,
+		    __ETHTOOL_LINK_MODE_MASK_NBITS);
 	cmd.base.autoneg = AUTONEG_ENABLE;
 
 	if (cmd.base.speed != 0)
@@ -1044,9 +1033,9 @@
 	pep->rx_desc_count = 0;
 	size = pep->rx_ring_size * sizeof(struct rx_desc);
 	pep->rx_desc_area_size = size;
-	pep->p_rx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size,
-						  &pep->rx_desc_dma,
-						  GFP_KERNEL);
+	pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
+						 &pep->rx_desc_dma,
+						 GFP_KERNEL);
 	if (!pep->p_rx_desc_area)
 		goto out;
 
@@ -1103,9 +1092,9 @@
 	pep->tx_desc_count = 0;
 	size = pep->tx_ring_size * sizeof(struct tx_desc);
 	pep->tx_desc_area_size = size;
-	pep->p_tx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size,
-						  &pep->tx_desc_dma,
-						  GFP_KERNEL);
+	pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
+						 &pep->tx_desc_dma,
+						 GFP_KERNEL);
 	if (!pep->p_tx_desc_area)
 		goto out;
 	/* Initialize the next_desc_ptr links in the Tx descriptors ring */
@@ -1260,7 +1249,8 @@
 	return work_done;
 }
 
-static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t
+pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct pxa168_eth_private *pep = netdev_priv(dev);
 	struct net_device_stats *stats = &dev->stats;
@@ -1273,7 +1263,8 @@
 	length = skb->len;
 	pep->tx_skb[tx_index] = skb;
 	desc->byte_cnt = length;
-	desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
+	desc->buf_ptr = dma_map_single(&pep->pdev->dev, skb->data, length,
+					DMA_TO_DEVICE);
 
 	skb_tx_timestamp(skb);
 
@@ -1434,8 +1425,7 @@
 	pep->dev = dev;
 	pep->clk = clk;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	pep->base = devm_ioremap_resource(&pdev->dev, res);
+	pep->base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(pep->base)) {
 		err = -ENOMEM;
 		goto err_netdev;
@@ -1458,7 +1448,7 @@
 	if (pdev->dev.of_node)
 		mac_addr = of_get_mac_address(pdev->dev.of_node);
 
-	if (mac_addr && is_valid_ether_addr(mac_addr)) {
+	if (!IS_ERR_OR_NULL(mac_addr)) {
 		ether_addr_copy(dev->dev_addr, mac_addr);
 	} else {
 		/* try reading the mac address, if set by the bootloader */
@@ -1527,6 +1517,7 @@
 	if (err)
 		goto err_free_mdio;
 
+	pep->pdev = pdev;
 	SET_NETDEV_DEV(dev, &pdev->dev);
 	pxa168_init_hw(pep);
 	err = register_netdev(dev);
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 9c08c36..095f6c7 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * New driver for Marvell Yukon chipset and SysKonnect Gigabit
  * Ethernet adapters. Based on earlier sk98lin, e100 and
@@ -8,19 +9,6 @@
  * those should be done at higher levels.
  *
  * Copyright (C) 2004, 2005 Stephen Hemminger <shemminger@osdl.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -152,8 +140,10 @@
 	memset(p, 0, regs->len);
 	memcpy_fromio(p, io, B3_RAM_ADDR);
 
-	memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
-		      regs->len - B3_RI_WTO_R1);
+	if (regs->len > B3_RI_WTO_R1) {
+		memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
+			      regs->len - B3_RI_WTO_R1);
+	}
 }
 
 /* Wake on Lan only supported on Yukon chips with rev 1 or above */
@@ -2568,8 +2558,6 @@
 		goto free_pci_mem;
 	}
 
-	memset(skge->mem, 0, skge->mem_size);
-
 	err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma);
 	if (err)
 		goto free_pci_mem;
@@ -3120,7 +3108,7 @@
 	skb_put(skb, len);
 
 	if (dev->features & NETIF_F_RXCSUM) {
-		skb->csum = csum;
+		skb->csum = le16_to_cpu(csum);
 		skb->ip_summed = CHECKSUM_COMPLETE;
 	}
 
@@ -3732,19 +3720,7 @@
 
 	return 0;
 }
-
-static int skge_debug_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, skge_debug_show, inode->i_private);
-}
-
-static const struct file_operations skge_debug_fops = {
-	.owner		= THIS_MODULE,
-	.open		= skge_debug_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(skge_debug);
 
 /*
  * Use network device events to create/remove/rename
@@ -3755,7 +3731,6 @@
 {
 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 	struct skge_port *skge;
-	struct dentry *d;
 
 	if (dev->netdev_ops->ndo_open != &skge_up || !skge_debug)
 		goto done;
@@ -3763,33 +3738,20 @@
 	skge = netdev_priv(dev);
 	switch (event) {
 	case NETDEV_CHANGENAME:
-		if (skge->debugfs) {
-			d = debugfs_rename(skge_debug, skge->debugfs,
-					   skge_debug, dev->name);
-			if (d)
-				skge->debugfs = d;
-			else {
-				netdev_info(dev, "rename failed\n");
-				debugfs_remove(skge->debugfs);
-			}
-		}
+		if (skge->debugfs)
+			skge->debugfs = debugfs_rename(skge_debug,
+						       skge->debugfs,
+						       skge_debug, dev->name);
 		break;
 
 	case NETDEV_GOING_DOWN:
-		if (skge->debugfs) {
-			debugfs_remove(skge->debugfs);
-			skge->debugfs = NULL;
-		}
+		debugfs_remove(skge->debugfs);
+		skge->debugfs = NULL;
 		break;
 
 	case NETDEV_UP:
-		d = debugfs_create_file(dev->name, 0444,
-					skge_debug, dev,
-					&skge_debug_fops);
-		if (!d || IS_ERR(d))
-			netdev_info(dev, "debugfs create failed\n");
-		else
-			skge->debugfs = d;
+		skge->debugfs = debugfs_create_file(dev->name, 0444, skge_debug,
+						    dev, &skge_debug_fops);
 		break;
 	}
 
@@ -3804,15 +3766,8 @@
 
 static __init void skge_debug_init(void)
 {
-	struct dentry *ent;
+	skge_debug = debugfs_create_dir("skge", NULL);
 
-	ent = debugfs_create_dir("skge", NULL);
-	if (!ent || IS_ERR(ent)) {
-		pr_info("debugfs create directory failed\n");
-		return;
-	}
-
-	skge_debug = ent;
 	register_netdevice_notifier(&skge_notifier);
 }
 
@@ -4102,8 +4057,7 @@
 #ifdef CONFIG_PM_SLEEP
 static int skge_suspend(struct device *dev)
 {
-	struct pci_dev *pdev = to_pci_dev(dev);
-	struct skge_hw *hw  = pci_get_drvdata(pdev);
+	struct skge_hw *hw  = dev_get_drvdata(dev);
 	int i;
 
 	if (!hw)
@@ -4127,8 +4081,7 @@
 
 static int skge_resume(struct device *dev)
 {
-	struct pci_dev *pdev = to_pci_dev(dev);
-	struct skge_hw *hw  = pci_get_drvdata(pdev);
+	struct skge_hw *hw  = dev_get_drvdata(dev);
 	int i, err;
 
 	if (!hw)
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 697d9b3..5f56ee8 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * New driver for Marvell Yukon 2 chipset.
  * Based on earlier sk98lin, and skge driver.
@@ -7,19 +8,6 @@
  * those should be done at higher levels.
  *
  * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -46,6 +34,7 @@
 #include <linux/mii.h>
 #include <linux/of_device.h>
 #include <linux/of_net.h>
+#include <linux/dmi.h>
 
 #include <asm/irq.h>
 
@@ -93,7 +82,7 @@
 module_param(copybreak, int, 0);
 MODULE_PARM_DESC(copybreak, "Receive copy threshold");
 
-static int disable_msi = 0;
+static int disable_msi = -1;
 module_param(disable_msi, int, 0);
 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
 
@@ -1138,9 +1127,6 @@
 	/* Make sure write' to descriptors are complete before we tell hardware */
 	wmb();
 	sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
-
-	/* Synchronize I/O on since next processor may write to tail */
-	mmiowb();
 }
 
 
@@ -1353,7 +1339,6 @@
 
 	/* reset the Rx prefetch unit */
 	sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
-	mmiowb();
 }
 
 /* Clean out receive buffer area, assumes receiver hardware stopped */
@@ -2485,13 +2470,11 @@
 		skb->ip_summed = re->skb->ip_summed;
 		skb->csum = re->skb->csum;
 		skb_copy_hash(skb, re->skb);
-		skb->vlan_proto = re->skb->vlan_proto;
-		skb->vlan_tci = re->skb->vlan_tci;
+		__vlan_hwaccel_copy_tag(skb, re->skb);
 
 		pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
 					       length, PCI_DMA_FROMDEVICE);
-		re->skb->vlan_proto = 0;
-		re->skb->vlan_tci = 0;
+		__vlan_hwaccel_clear_tag(re->skb);
 		skb_clear_hash(re->skb);
 		re->skb->ip_summed = CHECKSUM_NONE;
 		skb_put(skb, length);
@@ -4623,19 +4606,7 @@
 	napi_enable(&hw->napi);
 	return 0;
 }
-
-static int sky2_debug_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, sky2_debug_show, inode->i_private);
-}
-
-static const struct file_operations sky2_debug_fops = {
-	.owner		= THIS_MODULE,
-	.open		= sky2_debug_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(sky2_debug);
 
 /*
  * Use network device events to create/remove/rename
@@ -4821,8 +4792,8 @@
 	 * 2) from internal registers set by bootloader
 	 */
 	iap = of_get_mac_address(hw->pdev->dev.of_node);
-	if (iap)
-		memcpy(dev->dev_addr, iap, ETH_ALEN);
+	if (!IS_ERR(iap))
+		ether_addr_copy(dev->dev_addr, iap);
 	else
 		memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8,
 			      ETH_ALEN);
@@ -4931,6 +4902,45 @@
 	return buf;
 }
 
+static const struct dmi_system_id msi_blacklist[] = {
+	{
+		.ident = "Dell Inspiron 1545",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1545"),
+		},
+	},
+	{
+		.ident = "Gateway P-79",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Gateway"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "P-79"),
+		},
+	},
+	{
+		.ident = "ASUS P5W DH Deluxe",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTEK COMPUTER INC"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
+		},
+	},
+	{
+		.ident = "ASUS P6T",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+			DMI_MATCH(DMI_BOARD_NAME, "P6T"),
+		},
+	},
+	{
+		.ident = "ASUS P6X",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+			DMI_MATCH(DMI_BOARD_NAME, "P6X"),
+		},
+	},
+	{}
+};
+
 static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	struct net_device *dev, *dev1;
@@ -5042,6 +5052,9 @@
 		goto err_out_free_pci;
 	}
 
+	if (disable_msi == -1)
+		disable_msi = !!dmi_check_system(msi_blacklist);
+
 	if (!disable_msi && pci_enable_msi(pdev) == 0) {
 		err = sky2_test_msi(hw);
 		if (err) {
@@ -5087,7 +5100,7 @@
 	INIT_WORK(&hw->restart_work, sky2_restart);
 
 	pci_set_drvdata(pdev, hw);
-	pdev->d3_delay = 200;
+	pdev->d3_delay = 300;
 
 	return 0;
 
@@ -5161,8 +5174,7 @@
 
 static int sky2_suspend(struct device *dev)
 {
-	struct pci_dev *pdev = to_pci_dev(dev);
-	struct sky2_hw *hw = pci_get_drvdata(pdev);
+	struct sky2_hw *hw = dev_get_drvdata(dev);
 	int i;
 
 	if (!hw)