Update Linux to v5.10.109

Sourced from [1]

[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz

Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index 73c779e..5645226 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -142,6 +142,7 @@
 	tristate "FPGA Device Feature List (DFL) support"
 	select FPGA_BRIDGE
 	select FPGA_REGION
+	depends on HAS_IOMEM
 	help
 	  Device Feature List (DFL) defines a feature list structure that
 	  creates a linked list of feature headers within the MMIO space
@@ -156,7 +157,7 @@
 
 config FPGA_DFL_FME
 	tristate "FPGA DFL FME Driver"
-	depends on FPGA_DFL
+	depends on FPGA_DFL && HWMON && PERF_EVENTS
 	help
 	  The FPGA Management Engine (FME) is a feature device implemented
 	  under Device Feature List (DFL) framework. Select this option to
@@ -208,7 +209,7 @@
 
 config FPGA_MGR_ZYNQMP_FPGA
 	tristate "Xilinx ZynqMP FPGA"
-	depends on ARCH_ZYNQMP || COMPILE_TEST
+	depends on ZYNQMP_FIRMWARE || (!ZYNQMP_FIRMWARE && COMPILE_TEST)
 	help
 	  FPGA manager driver support for Xilinx ZynqMP FPGAs.
 	  This driver uses the processor configuration port(PCAP)
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index 4865b74..d8e21df 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -40,6 +40,7 @@
 obj-$(CONFIG_FPGA_DFL_AFU)		+= dfl-afu.o
 
 dfl-fme-objs := dfl-fme-main.o dfl-fme-pr.o dfl-fme-error.o
+dfl-fme-objs += dfl-fme-perf.o
 dfl-afu-objs := dfl-afu-main.o dfl-afu-region.o dfl-afu-dma-region.o
 dfl-afu-objs += dfl-afu-error.o
 
diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c
index 5942343..02b60fd 100644
--- a/drivers/fpga/dfl-afu-dma-region.c
+++ b/drivers/fpga/dfl-afu-dma-region.c
@@ -16,15 +16,6 @@
 
 #include "dfl-afu.h"
 
-static void put_all_pages(struct page **pages, int npages)
-{
-	int i;
-
-	for (i = 0; i < npages; i++)
-		if (pages[i])
-			put_page(pages[i]);
-}
-
 void afu_dma_region_init(struct dfl_feature_platform_data *pdata)
 {
 	struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
@@ -57,22 +48,22 @@
 		goto unlock_vm;
 	}
 
-	pinned = get_user_pages_fast(region->user_addr, npages, FOLL_WRITE,
+	pinned = pin_user_pages_fast(region->user_addr, npages, FOLL_WRITE,
 				     region->pages);
 	if (pinned < 0) {
 		ret = pinned;
 		goto free_pages;
 	} else if (pinned != npages) {
 		ret = -EFAULT;
-		goto put_pages;
+		goto unpin_pages;
 	}
 
 	dev_dbg(dev, "%d pages pinned\n", pinned);
 
 	return 0;
 
-put_pages:
-	put_all_pages(region->pages, pinned);
+unpin_pages:
+	unpin_user_pages(region->pages, pinned);
 free_pages:
 	kfree(region->pages);
 unlock_vm:
@@ -94,7 +85,7 @@
 	long npages = region->length >> PAGE_SHIFT;
 	struct device *dev = &pdata->dev->dev;
 
-	put_all_pages(region->pages, npages);
+	unpin_user_pages(region->pages, npages);
 	kfree(region->pages);
 	account_locked_vm(current->mm, npages, false);
 
@@ -324,10 +315,6 @@
 	if (user_addr + length < user_addr)
 		return -EINVAL;
 
-	if (!access_ok((void __user *)(unsigned long)user_addr,
-		       length))
-		return -EINVAL;
-
 	region = kzalloc(sizeof(*region), GFP_KERNEL);
 	if (!region)
 		return -ENOMEM;
diff --git a/drivers/fpga/dfl-afu-error.c b/drivers/fpga/dfl-afu-error.c
index c1467ae..c469118 100644
--- a/drivers/fpga/dfl-afu-error.c
+++ b/drivers/fpga/dfl-afu-error.c
@@ -14,6 +14,7 @@
  *   Mitchel Henry <henry.mitchel@intel.com>
  */
 
+#include <linux/fpga-dfl.h>
 #include <linux/uaccess.h>
 
 #include "dfl-afu.h"
@@ -219,6 +220,21 @@
 	afu_port_err_mask(&pdev->dev, true);
 }
 
+static long
+port_err_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
+	       unsigned int cmd, unsigned long arg)
+{
+	switch (cmd) {
+	case DFL_FPGA_PORT_ERR_GET_IRQ_NUM:
+		return dfl_feature_ioctl_get_num_irqs(pdev, feature, arg);
+	case DFL_FPGA_PORT_ERR_SET_IRQ:
+		return dfl_feature_ioctl_set_irq(pdev, feature, arg);
+	default:
+		dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
+		return -ENODEV;
+	}
+}
+
 const struct dfl_feature_id port_err_id_table[] = {
 	{.id = PORT_FEATURE_ID_ERROR,},
 	{0,}
@@ -227,4 +243,5 @@
 const struct dfl_feature_ops port_err_ops = {
 	.init = port_err_init,
 	.uinit = port_err_uinit,
+	.ioctl = port_err_ioctl,
 };
diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c
index 041d234..753cda4 100644
--- a/drivers/fpga/dfl-afu-main.c
+++ b/drivers/fpga/dfl-afu-main.c
@@ -530,6 +530,30 @@
 	.init = port_stp_init,
 };
 
+static long
+port_uint_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
+		unsigned int cmd, unsigned long arg)
+{
+	switch (cmd) {
+	case DFL_FPGA_PORT_UINT_GET_IRQ_NUM:
+		return dfl_feature_ioctl_get_num_irqs(pdev, feature, arg);
+	case DFL_FPGA_PORT_UINT_SET_IRQ:
+		return dfl_feature_ioctl_set_irq(pdev, feature, arg);
+	default:
+		dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
+		return -ENODEV;
+	}
+}
+
+static const struct dfl_feature_id port_uint_id_table[] = {
+	{.id = PORT_FEATURE_ID_UINT,},
+	{0,}
+};
+
+static const struct dfl_feature_ops port_uint_ops = {
+	.ioctl = port_uint_ioctl,
+};
+
 static struct dfl_feature_driver port_feature_drvs[] = {
 	{
 		.id_table = port_hdr_id_table,
@@ -548,6 +572,10 @@
 		.ops = &port_stp_ops,
 	},
 	{
+		.id_table = port_uint_id_table,
+		.ops = &port_uint_ops,
+	},
+	{
 		.ops = NULL,
 	}
 };
@@ -562,32 +590,40 @@
 	if (WARN_ON(!pdata))
 		return -ENODEV;
 
-	ret = dfl_feature_dev_use_begin(pdata);
-	if (ret)
-		return ret;
+	mutex_lock(&pdata->lock);
+	ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL);
+	if (!ret) {
+		dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
+			dfl_feature_dev_use_count(pdata));
+		filp->private_data = fdev;
+	}
+	mutex_unlock(&pdata->lock);
 
-	dev_dbg(&fdev->dev, "Device File Open\n");
-	filp->private_data = fdev;
-
-	return 0;
+	return ret;
 }
 
 static int afu_release(struct inode *inode, struct file *filp)
 {
 	struct platform_device *pdev = filp->private_data;
 	struct dfl_feature_platform_data *pdata;
+	struct dfl_feature *feature;
 
 	dev_dbg(&pdev->dev, "Device File Release\n");
 
 	pdata = dev_get_platdata(&pdev->dev);
 
 	mutex_lock(&pdata->lock);
-	__port_reset(pdev);
-	afu_dma_region_destroy(pdata);
-	mutex_unlock(&pdata->lock);
-
 	dfl_feature_dev_use_end(pdata);
 
+	if (!dfl_feature_dev_use_count(pdata)) {
+		dfl_fpga_dev_for_each_feature(pdata, feature)
+			dfl_fpga_set_irq_triggers(feature, 0,
+						  feature->nr_irqs, NULL);
+		__port_reset(pdev);
+		afu_dma_region_destroy(pdata);
+	}
+	mutex_unlock(&pdata->lock);
+
 	return 0;
 }
 
@@ -747,6 +783,12 @@
 	return -EINVAL;
 }
 
+static const struct vm_operations_struct afu_vma_ops = {
+#ifdef CONFIG_HAVE_IOREMAP_PROT
+	.access = generic_access_phys,
+#endif
+};
+
 static int afu_mmap(struct file *filp, struct vm_area_struct *vma)
 {
 	struct platform_device *pdev = filp->private_data;
@@ -776,6 +818,9 @@
 	    !(region.flags & DFL_PORT_REGION_WRITE))
 		return -EPERM;
 
+	/* Support debug access to the mapping */
+	vma->vm_ops = &afu_vma_ops;
+
 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
 	return remap_pfn_range(vma, vma->vm_start,
@@ -814,10 +859,8 @@
 static int afu_dev_destroy(struct platform_device *pdev)
 {
 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
-	struct dfl_afu *afu;
 
 	mutex_lock(&pdata->lock);
-	afu = dfl_fpga_pdata_get_private(pdata);
 	afu_mmio_region_destroy(pdata);
 	afu_dma_region_destroy(pdata);
 	dfl_fpga_pdata_set_private(pdata, NULL);
diff --git a/drivers/fpga/dfl-fme-error.c b/drivers/fpga/dfl-fme-error.c
index f897d41..51c2892 100644
--- a/drivers/fpga/dfl-fme-error.c
+++ b/drivers/fpga/dfl-fme-error.c
@@ -15,6 +15,7 @@
  *   Mitchel, Henry <henry.mitchel@intel.com>
  */
 
+#include <linux/fpga-dfl.h>
 #include <linux/uaccess.h>
 
 #include "dfl.h"
@@ -348,6 +349,22 @@
 	fme_err_mask(&pdev->dev, true);
 }
 
+static long
+fme_global_error_ioctl(struct platform_device *pdev,
+		       struct dfl_feature *feature,
+		       unsigned int cmd, unsigned long arg)
+{
+	switch (cmd) {
+	case DFL_FPGA_FME_ERR_GET_IRQ_NUM:
+		return dfl_feature_ioctl_get_num_irqs(pdev, feature, arg);
+	case DFL_FPGA_FME_ERR_SET_IRQ:
+		return dfl_feature_ioctl_set_irq(pdev, feature, arg);
+	default:
+		dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
+		return -ENODEV;
+	}
+}
+
 const struct dfl_feature_id fme_global_err_id_table[] = {
 	{.id = FME_FEATURE_ID_GLOBAL_ERR,},
 	{0,}
@@ -356,4 +373,5 @@
 const struct dfl_feature_ops fme_global_err_ops = {
 	.init = fme_global_err_init,
 	.uinit = fme_global_err_uinit,
+	.ioctl = fme_global_error_ioctl,
 };
diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c
index 4d78e18..77ea04d 100644
--- a/drivers/fpga/dfl-fme-main.c
+++ b/drivers/fpga/dfl-fme-main.c
@@ -14,6 +14,8 @@
  *   Henry Mitchel <henry.mitchel@intel.com>
  */
 
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/uaccess.h>
@@ -181,6 +183,381 @@
 	.ioctl = fme_hdr_ioctl,
 };
 
+#define FME_THERM_THRESHOLD	0x8
+#define TEMP_THRESHOLD1		GENMASK_ULL(6, 0)
+#define TEMP_THRESHOLD1_EN	BIT_ULL(7)
+#define TEMP_THRESHOLD2		GENMASK_ULL(14, 8)
+#define TEMP_THRESHOLD2_EN	BIT_ULL(15)
+#define TRIP_THRESHOLD		GENMASK_ULL(30, 24)
+#define TEMP_THRESHOLD1_STATUS	BIT_ULL(32)		/* threshold1 reached */
+#define TEMP_THRESHOLD2_STATUS	BIT_ULL(33)		/* threshold2 reached */
+/* threshold1 policy: 0 - AP2 (90% throttle) / 1 - AP1 (50% throttle) */
+#define TEMP_THRESHOLD1_POLICY	BIT_ULL(44)
+
+#define FME_THERM_RDSENSOR_FMT1	0x10
+#define FPGA_TEMPERATURE	GENMASK_ULL(6, 0)
+
+#define FME_THERM_CAP		0x20
+#define THERM_NO_THROTTLE	BIT_ULL(0)
+
+#define MD_PRE_DEG
+
+static bool fme_thermal_throttle_support(void __iomem *base)
+{
+	u64 v = readq(base + FME_THERM_CAP);
+
+	return FIELD_GET(THERM_NO_THROTTLE, v) ? false : true;
+}
+
+static umode_t thermal_hwmon_attrs_visible(const void *drvdata,
+					   enum hwmon_sensor_types type,
+					   u32 attr, int channel)
+{
+	const struct dfl_feature *feature = drvdata;
+
+	/* temperature is always supported, and check hardware cap for others */
+	if (attr == hwmon_temp_input)
+		return 0444;
+
+	return fme_thermal_throttle_support(feature->ioaddr) ? 0444 : 0;
+}
+
+static int thermal_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+			      u32 attr, int channel, long *val)
+{
+	struct dfl_feature *feature = dev_get_drvdata(dev);
+	u64 v;
+
+	switch (attr) {
+	case hwmon_temp_input:
+		v = readq(feature->ioaddr + FME_THERM_RDSENSOR_FMT1);
+		*val = (long)(FIELD_GET(FPGA_TEMPERATURE, v) * 1000);
+		break;
+	case hwmon_temp_max:
+		v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
+		*val = (long)(FIELD_GET(TEMP_THRESHOLD1, v) * 1000);
+		break;
+	case hwmon_temp_crit:
+		v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
+		*val = (long)(FIELD_GET(TEMP_THRESHOLD2, v) * 1000);
+		break;
+	case hwmon_temp_emergency:
+		v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
+		*val = (long)(FIELD_GET(TRIP_THRESHOLD, v) * 1000);
+		break;
+	case hwmon_temp_max_alarm:
+		v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
+		*val = (long)FIELD_GET(TEMP_THRESHOLD1_STATUS, v);
+		break;
+	case hwmon_temp_crit_alarm:
+		v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
+		*val = (long)FIELD_GET(TEMP_THRESHOLD2_STATUS, v);
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+
+static const struct hwmon_ops thermal_hwmon_ops = {
+	.is_visible = thermal_hwmon_attrs_visible,
+	.read = thermal_hwmon_read,
+};
+
+static const struct hwmon_channel_info *thermal_hwmon_info[] = {
+	HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_EMERGENCY |
+				 HWMON_T_MAX   | HWMON_T_MAX_ALARM |
+				 HWMON_T_CRIT  | HWMON_T_CRIT_ALARM),
+	NULL
+};
+
+static const struct hwmon_chip_info thermal_hwmon_chip_info = {
+	.ops = &thermal_hwmon_ops,
+	.info = thermal_hwmon_info,
+};
+
+static ssize_t temp1_max_policy_show(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	struct dfl_feature *feature = dev_get_drvdata(dev);
+	u64 v;
+
+	v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
+
+	return sprintf(buf, "%u\n",
+		       (unsigned int)FIELD_GET(TEMP_THRESHOLD1_POLICY, v));
+}
+
+static DEVICE_ATTR_RO(temp1_max_policy);
+
+static struct attribute *thermal_extra_attrs[] = {
+	&dev_attr_temp1_max_policy.attr,
+	NULL,
+};
+
+static umode_t thermal_extra_attrs_visible(struct kobject *kobj,
+					   struct attribute *attr, int index)
+{
+	struct device *dev = kobj_to_dev(kobj);
+	struct dfl_feature *feature = dev_get_drvdata(dev);
+
+	return fme_thermal_throttle_support(feature->ioaddr) ? attr->mode : 0;
+}
+
+static const struct attribute_group thermal_extra_group = {
+	.attrs		= thermal_extra_attrs,
+	.is_visible	= thermal_extra_attrs_visible,
+};
+__ATTRIBUTE_GROUPS(thermal_extra);
+
+static int fme_thermal_mgmt_init(struct platform_device *pdev,
+				 struct dfl_feature *feature)
+{
+	struct device *hwmon;
+
+	/*
+	 * create hwmon to allow userspace monitoring temperature and other
+	 * threshold information.
+	 *
+	 * temp1_input      -> FPGA device temperature
+	 * temp1_max        -> hardware threshold 1 -> 50% or 90% throttling
+	 * temp1_crit       -> hardware threshold 2 -> 100% throttling
+	 * temp1_emergency  -> hardware trip_threshold to shutdown FPGA
+	 * temp1_max_alarm  -> hardware threshold 1 alarm
+	 * temp1_crit_alarm -> hardware threshold 2 alarm
+	 *
+	 * create device specific sysfs interfaces, e.g. read temp1_max_policy
+	 * to understand the actual hardware throttling action (50% vs 90%).
+	 *
+	 * If hardware doesn't support automatic throttling per thresholds,
+	 * then all above sysfs interfaces are not visible except temp1_input
+	 * for temperature.
+	 */
+	hwmon = devm_hwmon_device_register_with_info(&pdev->dev,
+						     "dfl_fme_thermal", feature,
+						     &thermal_hwmon_chip_info,
+						     thermal_extra_groups);
+	if (IS_ERR(hwmon)) {
+		dev_err(&pdev->dev, "Fail to register thermal hwmon\n");
+		return PTR_ERR(hwmon);
+	}
+
+	return 0;
+}
+
+static const struct dfl_feature_id fme_thermal_mgmt_id_table[] = {
+	{.id = FME_FEATURE_ID_THERMAL_MGMT,},
+	{0,}
+};
+
+static const struct dfl_feature_ops fme_thermal_mgmt_ops = {
+	.init = fme_thermal_mgmt_init,
+};
+
+#define FME_PWR_STATUS		0x8
+#define FME_LATENCY_TOLERANCE	BIT_ULL(18)
+#define PWR_CONSUMED		GENMASK_ULL(17, 0)
+
+#define FME_PWR_THRESHOLD	0x10
+#define PWR_THRESHOLD1		GENMASK_ULL(6, 0)	/* in Watts */
+#define PWR_THRESHOLD2		GENMASK_ULL(14, 8)	/* in Watts */
+#define PWR_THRESHOLD_MAX	0x7f			/* in Watts */
+#define PWR_THRESHOLD1_STATUS	BIT_ULL(16)
+#define PWR_THRESHOLD2_STATUS	BIT_ULL(17)
+
+#define FME_PWR_XEON_LIMIT	0x18
+#define XEON_PWR_LIMIT		GENMASK_ULL(14, 0)	/* in 0.1 Watts */
+#define XEON_PWR_EN		BIT_ULL(15)
+#define FME_PWR_FPGA_LIMIT	0x20
+#define FPGA_PWR_LIMIT		GENMASK_ULL(14, 0)	/* in 0.1 Watts */
+#define FPGA_PWR_EN		BIT_ULL(15)
+
+static int power_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+			    u32 attr, int channel, long *val)
+{
+	struct dfl_feature *feature = dev_get_drvdata(dev);
+	u64 v;
+
+	switch (attr) {
+	case hwmon_power_input:
+		v = readq(feature->ioaddr + FME_PWR_STATUS);
+		*val = (long)(FIELD_GET(PWR_CONSUMED, v) * 1000000);
+		break;
+	case hwmon_power_max:
+		v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
+		*val = (long)(FIELD_GET(PWR_THRESHOLD1, v) * 1000000);
+		break;
+	case hwmon_power_crit:
+		v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
+		*val = (long)(FIELD_GET(PWR_THRESHOLD2, v) * 1000000);
+		break;
+	case hwmon_power_max_alarm:
+		v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
+		*val = (long)FIELD_GET(PWR_THRESHOLD1_STATUS, v);
+		break;
+	case hwmon_power_crit_alarm:
+		v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
+		*val = (long)FIELD_GET(PWR_THRESHOLD2_STATUS, v);
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+
+static int power_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
+			     u32 attr, int channel, long val)
+{
+	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev->parent);
+	struct dfl_feature *feature = dev_get_drvdata(dev);
+	int ret = 0;
+	u64 v;
+
+	val = clamp_val(val / 1000000, 0, PWR_THRESHOLD_MAX);
+
+	mutex_lock(&pdata->lock);
+
+	switch (attr) {
+	case hwmon_power_max:
+		v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
+		v &= ~PWR_THRESHOLD1;
+		v |= FIELD_PREP(PWR_THRESHOLD1, val);
+		writeq(v, feature->ioaddr + FME_PWR_THRESHOLD);
+		break;
+	case hwmon_power_crit:
+		v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
+		v &= ~PWR_THRESHOLD2;
+		v |= FIELD_PREP(PWR_THRESHOLD2, val);
+		writeq(v, feature->ioaddr + FME_PWR_THRESHOLD);
+		break;
+	default:
+		ret = -EOPNOTSUPP;
+		break;
+	}
+
+	mutex_unlock(&pdata->lock);
+
+	return ret;
+}
+
+static umode_t power_hwmon_attrs_visible(const void *drvdata,
+					 enum hwmon_sensor_types type,
+					 u32 attr, int channel)
+{
+	switch (attr) {
+	case hwmon_power_input:
+	case hwmon_power_max_alarm:
+	case hwmon_power_crit_alarm:
+		return 0444;
+	case hwmon_power_max:
+	case hwmon_power_crit:
+		return 0644;
+	}
+
+	return 0;
+}
+
+static const struct hwmon_ops power_hwmon_ops = {
+	.is_visible = power_hwmon_attrs_visible,
+	.read = power_hwmon_read,
+	.write = power_hwmon_write,
+};
+
+static const struct hwmon_channel_info *power_hwmon_info[] = {
+	HWMON_CHANNEL_INFO(power, HWMON_P_INPUT |
+				  HWMON_P_MAX   | HWMON_P_MAX_ALARM |
+				  HWMON_P_CRIT  | HWMON_P_CRIT_ALARM),
+	NULL
+};
+
+static const struct hwmon_chip_info power_hwmon_chip_info = {
+	.ops = &power_hwmon_ops,
+	.info = power_hwmon_info,
+};
+
+static ssize_t power1_xeon_limit_show(struct device *dev,
+				      struct device_attribute *attr, char *buf)
+{
+	struct dfl_feature *feature = dev_get_drvdata(dev);
+	u16 xeon_limit = 0;
+	u64 v;
+
+	v = readq(feature->ioaddr + FME_PWR_XEON_LIMIT);
+
+	if (FIELD_GET(XEON_PWR_EN, v))
+		xeon_limit = FIELD_GET(XEON_PWR_LIMIT, v);
+
+	return sprintf(buf, "%u\n", xeon_limit * 100000);
+}
+
+static ssize_t power1_fpga_limit_show(struct device *dev,
+				      struct device_attribute *attr, char *buf)
+{
+	struct dfl_feature *feature = dev_get_drvdata(dev);
+	u16 fpga_limit = 0;
+	u64 v;
+
+	v = readq(feature->ioaddr + FME_PWR_FPGA_LIMIT);
+
+	if (FIELD_GET(FPGA_PWR_EN, v))
+		fpga_limit = FIELD_GET(FPGA_PWR_LIMIT, v);
+
+	return sprintf(buf, "%u\n", fpga_limit * 100000);
+}
+
+static ssize_t power1_ltr_show(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct dfl_feature *feature = dev_get_drvdata(dev);
+	u64 v;
+
+	v = readq(feature->ioaddr + FME_PWR_STATUS);
+
+	return sprintf(buf, "%u\n",
+		       (unsigned int)FIELD_GET(FME_LATENCY_TOLERANCE, v));
+}
+
+static DEVICE_ATTR_RO(power1_xeon_limit);
+static DEVICE_ATTR_RO(power1_fpga_limit);
+static DEVICE_ATTR_RO(power1_ltr);
+
+static struct attribute *power_extra_attrs[] = {
+	&dev_attr_power1_xeon_limit.attr,
+	&dev_attr_power1_fpga_limit.attr,
+	&dev_attr_power1_ltr.attr,
+	NULL
+};
+
+ATTRIBUTE_GROUPS(power_extra);
+
+static int fme_power_mgmt_init(struct platform_device *pdev,
+			       struct dfl_feature *feature)
+{
+	struct device *hwmon;
+
+	hwmon = devm_hwmon_device_register_with_info(&pdev->dev,
+						     "dfl_fme_power", feature,
+						     &power_hwmon_chip_info,
+						     power_extra_groups);
+	if (IS_ERR(hwmon)) {
+		dev_err(&pdev->dev, "Fail to register power hwmon\n");
+		return PTR_ERR(hwmon);
+	}
+
+	return 0;
+}
+
+static const struct dfl_feature_id fme_power_mgmt_id_table[] = {
+	{.id = FME_FEATURE_ID_POWER_MGMT,},
+	{0,}
+};
+
+static const struct dfl_feature_ops fme_power_mgmt_ops = {
+	.init = fme_power_mgmt_init,
+};
+
 static struct dfl_feature_driver fme_feature_drvs[] = {
 	{
 		.id_table = fme_hdr_id_table,
@@ -195,6 +572,18 @@
 		.ops = &fme_global_err_ops,
 	},
 	{
+		.id_table = fme_thermal_mgmt_id_table,
+		.ops = &fme_thermal_mgmt_ops,
+	},
+	{
+		.id_table = fme_power_mgmt_id_table,
+		.ops = &fme_power_mgmt_ops,
+	},
+	{
+		.id_table = fme_perf_id_table,
+		.ops = &fme_perf_ops,
+	},
+	{
 		.ops = NULL,
 	},
 };
@@ -215,24 +604,35 @@
 	if (WARN_ON(!pdata))
 		return -ENODEV;
 
-	ret = dfl_feature_dev_use_begin(pdata);
-	if (ret)
-		return ret;
+	mutex_lock(&pdata->lock);
+	ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL);
+	if (!ret) {
+		dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
+			dfl_feature_dev_use_count(pdata));
+		filp->private_data = pdata;
+	}
+	mutex_unlock(&pdata->lock);
 
-	dev_dbg(&fdev->dev, "Device File Open\n");
-	filp->private_data = pdata;
-
-	return 0;
+	return ret;
 }
 
 static int fme_release(struct inode *inode, struct file *filp)
 {
 	struct dfl_feature_platform_data *pdata = filp->private_data;
 	struct platform_device *pdev = pdata->dev;
+	struct dfl_feature *feature;
 
 	dev_dbg(&pdev->dev, "Device File Release\n");
+
+	mutex_lock(&pdata->lock);
 	dfl_feature_dev_use_end(pdata);
 
+	if (!dfl_feature_dev_use_count(pdata))
+		dfl_fpga_dev_for_each_feature(pdata, feature)
+			dfl_fpga_set_irq_triggers(feature, 0,
+						  feature->nr_irqs, NULL);
+	mutex_unlock(&pdata->lock);
+
 	return 0;
 }
 
@@ -290,10 +690,8 @@
 static void fme_dev_destroy(struct platform_device *pdev)
 {
 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
-	struct dfl_fme *fme;
 
 	mutex_lock(&pdata->lock);
-	fme = dfl_fpga_pdata_get_private(pdata);
 	dfl_fpga_pdata_set_private(pdata, NULL);
 	mutex_unlock(&pdata->lock);
 }
diff --git a/drivers/fpga/dfl-fme-perf.c b/drivers/fpga/dfl-fme-perf.c
new file mode 100644
index 0000000..329b032
--- /dev/null
+++ b/drivers/fpga/dfl-fme-perf.c
@@ -0,0 +1,1022 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Management Engine (FME) Global Performance Reporting
+ *
+ * Copyright 2019 Intel Corporation, Inc.
+ *
+ * Authors:
+ *   Kang Luwei <luwei.kang@intel.com>
+ *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ *   Wu Hao <hao.wu@intel.com>
+ *   Xu Yilun <yilun.xu@intel.com>
+ *   Joseph Grecco <joe.grecco@intel.com>
+ *   Enno Luebbers <enno.luebbers@intel.com>
+ *   Tim Whisonant <tim.whisonant@intel.com>
+ *   Ananda Ravuri <ananda.ravuri@intel.com>
+ *   Mitchel, Henry <henry.mitchel@intel.com>
+ */
+
+#include <linux/perf_event.h>
+#include "dfl.h"
+#include "dfl-fme.h"
+
+/*
+ * Performance Counter Registers for Cache.
+ *
+ * Cache Events are listed below as CACHE_EVNT_*.
+ */
+#define CACHE_CTRL			0x8
+#define CACHE_RESET_CNTR		BIT_ULL(0)
+#define CACHE_FREEZE_CNTR		BIT_ULL(8)
+#define CACHE_CTRL_EVNT			GENMASK_ULL(19, 16)
+#define CACHE_EVNT_RD_HIT		0x0
+#define CACHE_EVNT_WR_HIT		0x1
+#define CACHE_EVNT_RD_MISS		0x2
+#define CACHE_EVNT_WR_MISS		0x3
+#define CACHE_EVNT_RSVD			0x4
+#define CACHE_EVNT_HOLD_REQ		0x5
+#define CACHE_EVNT_DATA_WR_PORT_CONTEN	0x6
+#define CACHE_EVNT_TAG_WR_PORT_CONTEN	0x7
+#define CACHE_EVNT_TX_REQ_STALL		0x8
+#define CACHE_EVNT_RX_REQ_STALL		0x9
+#define CACHE_EVNT_EVICTIONS		0xa
+#define CACHE_EVNT_MAX			CACHE_EVNT_EVICTIONS
+#define CACHE_CHANNEL_SEL		BIT_ULL(20)
+#define CACHE_CHANNEL_RD		0
+#define CACHE_CHANNEL_WR		1
+#define CACHE_CNTR0			0x10
+#define CACHE_CNTR1			0x18
+#define CACHE_CNTR_EVNT_CNTR		GENMASK_ULL(47, 0)
+#define CACHE_CNTR_EVNT			GENMASK_ULL(63, 60)
+
+/*
+ * Performance Counter Registers for Fabric.
+ *
+ * Fabric Events are listed below as FAB_EVNT_*
+ */
+#define FAB_CTRL			0x20
+#define FAB_RESET_CNTR			BIT_ULL(0)
+#define FAB_FREEZE_CNTR			BIT_ULL(8)
+#define FAB_CTRL_EVNT			GENMASK_ULL(19, 16)
+#define FAB_EVNT_PCIE0_RD		0x0
+#define FAB_EVNT_PCIE0_WR		0x1
+#define FAB_EVNT_PCIE1_RD		0x2
+#define FAB_EVNT_PCIE1_WR		0x3
+#define FAB_EVNT_UPI_RD			0x4
+#define FAB_EVNT_UPI_WR			0x5
+#define FAB_EVNT_MMIO_RD		0x6
+#define FAB_EVNT_MMIO_WR		0x7
+#define FAB_EVNT_MAX			FAB_EVNT_MMIO_WR
+#define FAB_PORT_ID			GENMASK_ULL(21, 20)
+#define FAB_PORT_FILTER			BIT_ULL(23)
+#define FAB_PORT_FILTER_DISABLE		0
+#define FAB_PORT_FILTER_ENABLE		1
+#define FAB_CNTR			0x28
+#define FAB_CNTR_EVNT_CNTR		GENMASK_ULL(59, 0)
+#define FAB_CNTR_EVNT			GENMASK_ULL(63, 60)
+
+/*
+ * Performance Counter Registers for Clock.
+ *
+ * Clock Counter can't be reset or frozen by SW.
+ */
+#define CLK_CNTR			0x30
+#define BASIC_EVNT_CLK			0x0
+#define BASIC_EVNT_MAX			BASIC_EVNT_CLK
+
+/*
+ * Performance Counter Registers for IOMMU / VT-D.
+ *
+ * VT-D Events are listed below as VTD_EVNT_* and VTD_SIP_EVNT_*
+ */
+#define VTD_CTRL			0x38
+#define VTD_RESET_CNTR			BIT_ULL(0)
+#define VTD_FREEZE_CNTR			BIT_ULL(8)
+#define VTD_CTRL_EVNT			GENMASK_ULL(19, 16)
+#define VTD_EVNT_AFU_MEM_RD_TRANS	0x0
+#define VTD_EVNT_AFU_MEM_WR_TRANS	0x1
+#define VTD_EVNT_AFU_DEVTLB_RD_HIT	0x2
+#define VTD_EVNT_AFU_DEVTLB_WR_HIT	0x3
+#define VTD_EVNT_DEVTLB_4K_FILL		0x4
+#define VTD_EVNT_DEVTLB_2M_FILL		0x5
+#define VTD_EVNT_DEVTLB_1G_FILL		0x6
+#define VTD_EVNT_MAX			VTD_EVNT_DEVTLB_1G_FILL
+#define VTD_CNTR			0x40
+#define VTD_CNTR_EVNT_CNTR		GENMASK_ULL(47, 0)
+#define VTD_CNTR_EVNT			GENMASK_ULL(63, 60)
+
+#define VTD_SIP_CTRL			0x48
+#define VTD_SIP_RESET_CNTR		BIT_ULL(0)
+#define VTD_SIP_FREEZE_CNTR		BIT_ULL(8)
+#define VTD_SIP_CTRL_EVNT		GENMASK_ULL(19, 16)
+#define VTD_SIP_EVNT_IOTLB_4K_HIT	0x0
+#define VTD_SIP_EVNT_IOTLB_2M_HIT	0x1
+#define VTD_SIP_EVNT_IOTLB_1G_HIT	0x2
+#define VTD_SIP_EVNT_SLPWC_L3_HIT	0x3
+#define VTD_SIP_EVNT_SLPWC_L4_HIT	0x4
+#define VTD_SIP_EVNT_RCC_HIT		0x5
+#define VTD_SIP_EVNT_IOTLB_4K_MISS	0x6
+#define VTD_SIP_EVNT_IOTLB_2M_MISS	0x7
+#define VTD_SIP_EVNT_IOTLB_1G_MISS	0x8
+#define VTD_SIP_EVNT_SLPWC_L3_MISS	0x9
+#define VTD_SIP_EVNT_SLPWC_L4_MISS	0xa
+#define VTD_SIP_EVNT_RCC_MISS		0xb
+#define VTD_SIP_EVNT_MAX		VTD_SIP_EVNT_SLPWC_L4_MISS
+#define VTD_SIP_CNTR			0X50
+#define VTD_SIP_CNTR_EVNT_CNTR		GENMASK_ULL(47, 0)
+#define VTD_SIP_CNTR_EVNT		GENMASK_ULL(63, 60)
+
+#define PERF_TIMEOUT			30
+
+#define PERF_MAX_PORT_NUM		1U
+
+/**
+ * struct fme_perf_priv - priv data structure for fme perf driver
+ *
+ * @dev: parent device.
+ * @ioaddr: mapped base address of mmio region.
+ * @pmu: pmu data structure for fme perf counters.
+ * @id: id of this fme performance report private feature.
+ * @fab_users: current user number on fabric counters.
+ * @fab_port_id: used to indicate current working mode of fabric counters.
+ * @fab_lock: lock to protect fabric counters working mode.
+ * @cpu: active CPU to which the PMU is bound for accesses.
+ * @cpuhp_node: node for CPU hotplug notifier link.
+ * @cpuhp_state: state for CPU hotplug notification;
+ */
+struct fme_perf_priv {
+	struct device *dev;
+	void __iomem *ioaddr;
+	struct pmu pmu;
+	u16 id;
+
+	u32 fab_users;
+	u32 fab_port_id;
+	spinlock_t fab_lock;
+
+	unsigned int cpu;
+	struct hlist_node node;
+	enum cpuhp_state cpuhp_state;
+};
+
+/**
+ * struct fme_perf_event_ops - callbacks for fme perf events
+ *
+ * @event_init: callback invoked during event init.
+ * @event_destroy: callback invoked during event destroy.
+ * @read_counter: callback to read hardware counters.
+ */
+struct fme_perf_event_ops {
+	int (*event_init)(struct fme_perf_priv *priv, u32 event, u32 portid);
+	void (*event_destroy)(struct fme_perf_priv *priv, u32 event,
+			      u32 portid);
+	u64 (*read_counter)(struct fme_perf_priv *priv, u32 event, u32 portid);
+};
+
+#define to_fme_perf_priv(_pmu)	container_of(_pmu, struct fme_perf_priv, pmu)
+
+static ssize_t cpumask_show(struct device *dev,
+			    struct device_attribute *attr, char *buf)
+{
+	struct pmu *pmu = dev_get_drvdata(dev);
+	struct fme_perf_priv *priv;
+
+	priv = to_fme_perf_priv(pmu);
+
+	return cpumap_print_to_pagebuf(true, buf, cpumask_of(priv->cpu));
+}
+static DEVICE_ATTR_RO(cpumask);
+
+static struct attribute *fme_perf_cpumask_attrs[] = {
+	&dev_attr_cpumask.attr,
+	NULL,
+};
+
+static struct attribute_group fme_perf_cpumask_group = {
+	.attrs = fme_perf_cpumask_attrs,
+};
+
+#define FME_EVENT_MASK		GENMASK_ULL(11, 0)
+#define FME_EVENT_SHIFT		0
+#define FME_EVTYPE_MASK		GENMASK_ULL(15, 12)
+#define FME_EVTYPE_SHIFT	12
+#define FME_EVTYPE_BASIC	0
+#define FME_EVTYPE_CACHE	1
+#define FME_EVTYPE_FABRIC	2
+#define FME_EVTYPE_VTD		3
+#define FME_EVTYPE_VTD_SIP	4
+#define FME_EVTYPE_MAX		FME_EVTYPE_VTD_SIP
+#define FME_PORTID_MASK		GENMASK_ULL(23, 16)
+#define FME_PORTID_SHIFT	16
+#define FME_PORTID_ROOT		(0xffU)
+
+#define get_event(_config)	FIELD_GET(FME_EVENT_MASK, _config)
+#define get_evtype(_config)	FIELD_GET(FME_EVTYPE_MASK, _config)
+#define get_portid(_config)	FIELD_GET(FME_PORTID_MASK, _config)
+
+PMU_FORMAT_ATTR(event,		"config:0-11");
+PMU_FORMAT_ATTR(evtype,		"config:12-15");
+PMU_FORMAT_ATTR(portid,		"config:16-23");
+
+static struct attribute *fme_perf_format_attrs[] = {
+	&format_attr_event.attr,
+	&format_attr_evtype.attr,
+	&format_attr_portid.attr,
+	NULL,
+};
+
+static struct attribute_group fme_perf_format_group = {
+	.name = "format",
+	.attrs = fme_perf_format_attrs,
+};
+
+/*
+ * There are no default events, but we need to create
+ * "events" group (with empty attrs) before updating
+ * it with detected events (using pmu->attr_update).
+ */
+static struct attribute *fme_perf_events_attrs_empty[] = {
+	NULL,
+};
+
+static struct attribute_group fme_perf_events_group = {
+	.name = "events",
+	.attrs = fme_perf_events_attrs_empty,
+};
+
+static const struct attribute_group *fme_perf_groups[] = {
+	&fme_perf_format_group,
+	&fme_perf_cpumask_group,
+	&fme_perf_events_group,
+	NULL,
+};
+
+static bool is_portid_root(u32 portid)
+{
+	return portid == FME_PORTID_ROOT;
+}
+
+static bool is_portid_port(u32 portid)
+{
+	return portid < PERF_MAX_PORT_NUM;
+}
+
+static bool is_portid_root_or_port(u32 portid)
+{
+	return is_portid_root(portid) || is_portid_port(portid);
+}
+
+static u64 fme_read_perf_cntr_reg(void __iomem *addr)
+{
+	u32 low;
+	u64 v;
+
+	/*
+	 * For 64bit counter registers, the counter may increases and carries
+	 * out of bit [31] between 2 32bit reads. So add extra reads to help
+	 * to prevent this issue. This only happens in platforms which don't
+	 * support 64bit read - readq is split into 2 readl.
+	 */
+	do {
+		v = readq(addr);
+		low = readl(addr);
+	} while (((u32)v) > low);
+
+	return v;
+}
+
+static int basic_event_init(struct fme_perf_priv *priv, u32 event, u32 portid)
+{
+	if (event <= BASIC_EVNT_MAX && is_portid_root(portid))
+		return 0;
+
+	return -EINVAL;
+}
+
+static u64 basic_read_event_counter(struct fme_perf_priv *priv,
+				    u32 event, u32 portid)
+{
+	void __iomem *base = priv->ioaddr;
+
+	return fme_read_perf_cntr_reg(base + CLK_CNTR);
+}
+
+static int cache_event_init(struct fme_perf_priv *priv, u32 event, u32 portid)
+{
+	if (priv->id == FME_FEATURE_ID_GLOBAL_IPERF &&
+	    event <= CACHE_EVNT_MAX && is_portid_root(portid))
+		return 0;
+
+	return -EINVAL;
+}
+
+static u64 cache_read_event_counter(struct fme_perf_priv *priv,
+				    u32 event, u32 portid)
+{
+	void __iomem *base = priv->ioaddr;
+	u64 v, count;
+	u8 channel;
+
+	if (event == CACHE_EVNT_WR_HIT || event == CACHE_EVNT_WR_MISS ||
+	    event == CACHE_EVNT_DATA_WR_PORT_CONTEN ||
+	    event == CACHE_EVNT_TAG_WR_PORT_CONTEN)
+		channel = CACHE_CHANNEL_WR;
+	else
+		channel = CACHE_CHANNEL_RD;
+
+	/* set channel access type and cache event code. */
+	v = readq(base + CACHE_CTRL);
+	v &= ~(CACHE_CHANNEL_SEL | CACHE_CTRL_EVNT);
+	v |= FIELD_PREP(CACHE_CHANNEL_SEL, channel);
+	v |= FIELD_PREP(CACHE_CTRL_EVNT, event);
+	writeq(v, base + CACHE_CTRL);
+
+	if (readq_poll_timeout_atomic(base + CACHE_CNTR0, v,
+				      FIELD_GET(CACHE_CNTR_EVNT, v) == event,
+				      1, PERF_TIMEOUT)) {
+		dev_err(priv->dev, "timeout, unmatched cache event code in counter register.\n");
+		return 0;
+	}
+
+	v = fme_read_perf_cntr_reg(base + CACHE_CNTR0);
+	count = FIELD_GET(CACHE_CNTR_EVNT_CNTR, v);
+	v = fme_read_perf_cntr_reg(base + CACHE_CNTR1);
+	count += FIELD_GET(CACHE_CNTR_EVNT_CNTR, v);
+
+	return count;
+}
+
+static bool is_fabric_event_supported(struct fme_perf_priv *priv, u32 event,
+				      u32 portid)
+{
+	if (event > FAB_EVNT_MAX || !is_portid_root_or_port(portid))
+		return false;
+
+	if (priv->id == FME_FEATURE_ID_GLOBAL_DPERF &&
+	    (event == FAB_EVNT_PCIE1_RD || event == FAB_EVNT_UPI_RD ||
+	     event == FAB_EVNT_PCIE1_WR || event == FAB_EVNT_UPI_WR))
+		return false;
+
+	return true;
+}
+
+static int fabric_event_init(struct fme_perf_priv *priv, u32 event, u32 portid)
+{
+	void __iomem *base = priv->ioaddr;
+	int ret = 0;
+	u64 v;
+
+	if (!is_fabric_event_supported(priv, event, portid))
+		return -EINVAL;
+
+	/*
+	 * as fabric counter set only can be in either overall or port mode.
+	 * In overall mode, it counts overall data for FPGA, and in port mode,
+	 * it is configured to monitor on one individual port.
+	 *
+	 * so every time, a new event is initialized, driver checks
+	 * current working mode and if someone is using this counter set.
+	 */
+	spin_lock(&priv->fab_lock);
+	if (priv->fab_users && priv->fab_port_id != portid) {
+		dev_dbg(priv->dev, "conflict fabric event monitoring mode.\n");
+		ret = -EOPNOTSUPP;
+		goto exit;
+	}
+
+	priv->fab_users++;
+
+	/*
+	 * skip if current working mode matches, otherwise change the working
+	 * mode per input port_id, to monitor overall data or another port.
+	 */
+	if (priv->fab_port_id == portid)
+		goto exit;
+
+	priv->fab_port_id = portid;
+
+	v = readq(base + FAB_CTRL);
+	v &= ~(FAB_PORT_FILTER | FAB_PORT_ID);
+
+	if (is_portid_root(portid)) {
+		v |= FIELD_PREP(FAB_PORT_FILTER, FAB_PORT_FILTER_DISABLE);
+	} else {
+		v |= FIELD_PREP(FAB_PORT_FILTER, FAB_PORT_FILTER_ENABLE);
+		v |= FIELD_PREP(FAB_PORT_ID, portid);
+	}
+	writeq(v, base + FAB_CTRL);
+
+exit:
+	spin_unlock(&priv->fab_lock);
+	return ret;
+}
+
+static void fabric_event_destroy(struct fme_perf_priv *priv, u32 event,
+				 u32 portid)
+{
+	spin_lock(&priv->fab_lock);
+	priv->fab_users--;
+	spin_unlock(&priv->fab_lock);
+}
+
+static u64 fabric_read_event_counter(struct fme_perf_priv *priv, u32 event,
+				     u32 portid)
+{
+	void __iomem *base = priv->ioaddr;
+	u64 v;
+
+	v = readq(base + FAB_CTRL);
+	v &= ~FAB_CTRL_EVNT;
+	v |= FIELD_PREP(FAB_CTRL_EVNT, event);
+	writeq(v, base + FAB_CTRL);
+
+	if (readq_poll_timeout_atomic(base + FAB_CNTR, v,
+				      FIELD_GET(FAB_CNTR_EVNT, v) == event,
+				      1, PERF_TIMEOUT)) {
+		dev_err(priv->dev, "timeout, unmatched fab event code in counter register.\n");
+		return 0;
+	}
+
+	v = fme_read_perf_cntr_reg(base + FAB_CNTR);
+	return FIELD_GET(FAB_CNTR_EVNT_CNTR, v);
+}
+
+static int vtd_event_init(struct fme_perf_priv *priv, u32 event, u32 portid)
+{
+	if (priv->id == FME_FEATURE_ID_GLOBAL_IPERF &&
+	    event <= VTD_EVNT_MAX && is_portid_port(portid))
+		return 0;
+
+	return -EINVAL;
+}
+
+static u64 vtd_read_event_counter(struct fme_perf_priv *priv, u32 event,
+				  u32 portid)
+{
+	void __iomem *base = priv->ioaddr;
+	u64 v;
+
+	event += (portid * (VTD_EVNT_MAX + 1));
+
+	v = readq(base + VTD_CTRL);
+	v &= ~VTD_CTRL_EVNT;
+	v |= FIELD_PREP(VTD_CTRL_EVNT, event);
+	writeq(v, base + VTD_CTRL);
+
+	if (readq_poll_timeout_atomic(base + VTD_CNTR, v,
+				      FIELD_GET(VTD_CNTR_EVNT, v) == event,
+				      1, PERF_TIMEOUT)) {
+		dev_err(priv->dev, "timeout, unmatched vtd event code in counter register.\n");
+		return 0;
+	}
+
+	v = fme_read_perf_cntr_reg(base + VTD_CNTR);
+	return FIELD_GET(VTD_CNTR_EVNT_CNTR, v);
+}
+
+static int vtd_sip_event_init(struct fme_perf_priv *priv, u32 event, u32 portid)
+{
+	if (priv->id == FME_FEATURE_ID_GLOBAL_IPERF &&
+	    event <= VTD_SIP_EVNT_MAX && is_portid_root(portid))
+		return 0;
+
+	return -EINVAL;
+}
+
+static u64 vtd_sip_read_event_counter(struct fme_perf_priv *priv, u32 event,
+				      u32 portid)
+{
+	void __iomem *base = priv->ioaddr;
+	u64 v;
+
+	v = readq(base + VTD_SIP_CTRL);
+	v &= ~VTD_SIP_CTRL_EVNT;
+	v |= FIELD_PREP(VTD_SIP_CTRL_EVNT, event);
+	writeq(v, base + VTD_SIP_CTRL);
+
+	if (readq_poll_timeout_atomic(base + VTD_SIP_CNTR, v,
+				      FIELD_GET(VTD_SIP_CNTR_EVNT, v) == event,
+				      1, PERF_TIMEOUT)) {
+		dev_err(priv->dev, "timeout, unmatched vtd sip event code in counter register\n");
+		return 0;
+	}
+
+	v = fme_read_perf_cntr_reg(base + VTD_SIP_CNTR);
+	return FIELD_GET(VTD_SIP_CNTR_EVNT_CNTR, v);
+}
+
+static struct fme_perf_event_ops fme_perf_event_ops[] = {
+	[FME_EVTYPE_BASIC]	= {.event_init = basic_event_init,
+				   .read_counter = basic_read_event_counter,},
+	[FME_EVTYPE_CACHE]	= {.event_init = cache_event_init,
+				   .read_counter = cache_read_event_counter,},
+	[FME_EVTYPE_FABRIC]	= {.event_init = fabric_event_init,
+				   .event_destroy = fabric_event_destroy,
+				   .read_counter = fabric_read_event_counter,},
+	[FME_EVTYPE_VTD]	= {.event_init = vtd_event_init,
+				   .read_counter = vtd_read_event_counter,},
+	[FME_EVTYPE_VTD_SIP]	= {.event_init = vtd_sip_event_init,
+				   .read_counter = vtd_sip_read_event_counter,},
+};
+
+static ssize_t fme_perf_event_show(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct dev_ext_attribute *eattr;
+	unsigned long config;
+	char *ptr = buf;
+
+	eattr = container_of(attr, struct dev_ext_attribute, attr);
+	config = (unsigned long)eattr->var;
+
+	ptr += sprintf(ptr, "event=0x%02x", (unsigned int)get_event(config));
+	ptr += sprintf(ptr, ",evtype=0x%02x", (unsigned int)get_evtype(config));
+
+	if (is_portid_root(get_portid(config)))
+		ptr += sprintf(ptr, ",portid=0x%02x\n", FME_PORTID_ROOT);
+	else
+		ptr += sprintf(ptr, ",portid=?\n");
+
+	return (ssize_t)(ptr - buf);
+}
+
+#define FME_EVENT_ATTR(_name) \
+	__ATTR(_name, 0444, fme_perf_event_show, NULL)
+
+#define FME_PORT_EVENT_CONFIG(_event, _type)				\
+	(void *)((((_event) << FME_EVENT_SHIFT) & FME_EVENT_MASK) |	\
+		(((_type) << FME_EVTYPE_SHIFT) & FME_EVTYPE_MASK))
+
+#define FME_EVENT_CONFIG(_event, _type)					\
+	(void *)((((_event) << FME_EVENT_SHIFT) & FME_EVENT_MASK) |	\
+		(((_type) << FME_EVTYPE_SHIFT) & FME_EVTYPE_MASK) |	\
+		(FME_PORTID_ROOT << FME_PORTID_SHIFT))
+
+/* FME Perf Basic Events */
+#define FME_EVENT_BASIC(_name, _event)					\
+static struct dev_ext_attribute fme_perf_event_##_name = {		\
+	.attr = FME_EVENT_ATTR(_name),					\
+	.var = FME_EVENT_CONFIG(_event, FME_EVTYPE_BASIC),		\
+}
+
+FME_EVENT_BASIC(clock, BASIC_EVNT_CLK);
+
+static struct attribute *fme_perf_basic_events_attrs[] = {
+	&fme_perf_event_clock.attr.attr,
+	NULL,
+};
+
+static const struct attribute_group fme_perf_basic_events_group = {
+	.name = "events",
+	.attrs = fme_perf_basic_events_attrs,
+};
+
+/* FME Perf Cache Events */
+#define FME_EVENT_CACHE(_name, _event)					\
+static struct dev_ext_attribute fme_perf_event_cache_##_name = {	\
+	.attr = FME_EVENT_ATTR(cache_##_name),				\
+	.var = FME_EVENT_CONFIG(_event, FME_EVTYPE_CACHE),		\
+}
+
+FME_EVENT_CACHE(read_hit,     CACHE_EVNT_RD_HIT);
+FME_EVENT_CACHE(read_miss,    CACHE_EVNT_RD_MISS);
+FME_EVENT_CACHE(write_hit,    CACHE_EVNT_WR_HIT);
+FME_EVENT_CACHE(write_miss,   CACHE_EVNT_WR_MISS);
+FME_EVENT_CACHE(hold_request, CACHE_EVNT_HOLD_REQ);
+FME_EVENT_CACHE(tx_req_stall, CACHE_EVNT_TX_REQ_STALL);
+FME_EVENT_CACHE(rx_req_stall, CACHE_EVNT_RX_REQ_STALL);
+FME_EVENT_CACHE(eviction,     CACHE_EVNT_EVICTIONS);
+FME_EVENT_CACHE(data_write_port_contention, CACHE_EVNT_DATA_WR_PORT_CONTEN);
+FME_EVENT_CACHE(tag_write_port_contention,  CACHE_EVNT_TAG_WR_PORT_CONTEN);
+
+static struct attribute *fme_perf_cache_events_attrs[] = {
+	&fme_perf_event_cache_read_hit.attr.attr,
+	&fme_perf_event_cache_read_miss.attr.attr,
+	&fme_perf_event_cache_write_hit.attr.attr,
+	&fme_perf_event_cache_write_miss.attr.attr,
+	&fme_perf_event_cache_hold_request.attr.attr,
+	&fme_perf_event_cache_tx_req_stall.attr.attr,
+	&fme_perf_event_cache_rx_req_stall.attr.attr,
+	&fme_perf_event_cache_eviction.attr.attr,
+	&fme_perf_event_cache_data_write_port_contention.attr.attr,
+	&fme_perf_event_cache_tag_write_port_contention.attr.attr,
+	NULL,
+};
+
+static umode_t fme_perf_events_visible(struct kobject *kobj,
+				       struct attribute *attr, int n)
+{
+	struct pmu *pmu = dev_get_drvdata(kobj_to_dev(kobj));
+	struct fme_perf_priv *priv = to_fme_perf_priv(pmu);
+
+	return (priv->id == FME_FEATURE_ID_GLOBAL_IPERF) ? attr->mode : 0;
+}
+
+static const struct attribute_group fme_perf_cache_events_group = {
+	.name = "events",
+	.attrs = fme_perf_cache_events_attrs,
+	.is_visible = fme_perf_events_visible,
+};
+
+/* FME Perf Fabric Events */
+#define FME_EVENT_FABRIC(_name, _event)					\
+static struct dev_ext_attribute fme_perf_event_fab_##_name = {		\
+	.attr = FME_EVENT_ATTR(fab_##_name),				\
+	.var = FME_EVENT_CONFIG(_event, FME_EVTYPE_FABRIC),		\
+}
+
+#define FME_EVENT_FABRIC_PORT(_name, _event)				\
+static struct dev_ext_attribute fme_perf_event_fab_port_##_name = {	\
+	.attr = FME_EVENT_ATTR(fab_port_##_name),			\
+	.var = FME_PORT_EVENT_CONFIG(_event, FME_EVTYPE_FABRIC),	\
+}
+
+FME_EVENT_FABRIC(pcie0_read,  FAB_EVNT_PCIE0_RD);
+FME_EVENT_FABRIC(pcie0_write, FAB_EVNT_PCIE0_WR);
+FME_EVENT_FABRIC(pcie1_read,  FAB_EVNT_PCIE1_RD);
+FME_EVENT_FABRIC(pcie1_write, FAB_EVNT_PCIE1_WR);
+FME_EVENT_FABRIC(upi_read,    FAB_EVNT_UPI_RD);
+FME_EVENT_FABRIC(upi_write,   FAB_EVNT_UPI_WR);
+FME_EVENT_FABRIC(mmio_read,   FAB_EVNT_MMIO_RD);
+FME_EVENT_FABRIC(mmio_write,  FAB_EVNT_MMIO_WR);
+
+FME_EVENT_FABRIC_PORT(pcie0_read,  FAB_EVNT_PCIE0_RD);
+FME_EVENT_FABRIC_PORT(pcie0_write, FAB_EVNT_PCIE0_WR);
+FME_EVENT_FABRIC_PORT(pcie1_read,  FAB_EVNT_PCIE1_RD);
+FME_EVENT_FABRIC_PORT(pcie1_write, FAB_EVNT_PCIE1_WR);
+FME_EVENT_FABRIC_PORT(upi_read,    FAB_EVNT_UPI_RD);
+FME_EVENT_FABRIC_PORT(upi_write,   FAB_EVNT_UPI_WR);
+FME_EVENT_FABRIC_PORT(mmio_read,   FAB_EVNT_MMIO_RD);
+FME_EVENT_FABRIC_PORT(mmio_write,  FAB_EVNT_MMIO_WR);
+
+static struct attribute *fme_perf_fabric_events_attrs[] = {
+	&fme_perf_event_fab_pcie0_read.attr.attr,
+	&fme_perf_event_fab_pcie0_write.attr.attr,
+	&fme_perf_event_fab_pcie1_read.attr.attr,
+	&fme_perf_event_fab_pcie1_write.attr.attr,
+	&fme_perf_event_fab_upi_read.attr.attr,
+	&fme_perf_event_fab_upi_write.attr.attr,
+	&fme_perf_event_fab_mmio_read.attr.attr,
+	&fme_perf_event_fab_mmio_write.attr.attr,
+	&fme_perf_event_fab_port_pcie0_read.attr.attr,
+	&fme_perf_event_fab_port_pcie0_write.attr.attr,
+	&fme_perf_event_fab_port_pcie1_read.attr.attr,
+	&fme_perf_event_fab_port_pcie1_write.attr.attr,
+	&fme_perf_event_fab_port_upi_read.attr.attr,
+	&fme_perf_event_fab_port_upi_write.attr.attr,
+	&fme_perf_event_fab_port_mmio_read.attr.attr,
+	&fme_perf_event_fab_port_mmio_write.attr.attr,
+	NULL,
+};
+
+static umode_t fme_perf_fabric_events_visible(struct kobject *kobj,
+					      struct attribute *attr, int n)
+{
+	struct pmu *pmu = dev_get_drvdata(kobj_to_dev(kobj));
+	struct fme_perf_priv *priv = to_fme_perf_priv(pmu);
+	struct dev_ext_attribute *eattr;
+	unsigned long var;
+
+	eattr = container_of(attr, struct dev_ext_attribute, attr.attr);
+	var = (unsigned long)eattr->var;
+
+	if (is_fabric_event_supported(priv, get_event(var), get_portid(var)))
+		return attr->mode;
+
+	return 0;
+}
+
+static const struct attribute_group fme_perf_fabric_events_group = {
+	.name = "events",
+	.attrs = fme_perf_fabric_events_attrs,
+	.is_visible = fme_perf_fabric_events_visible,
+};
+
+/* FME Perf VTD Events */
+#define FME_EVENT_VTD_PORT(_name, _event)				\
+static struct dev_ext_attribute fme_perf_event_vtd_port_##_name = {	\
+	.attr = FME_EVENT_ATTR(vtd_port_##_name),			\
+	.var = FME_PORT_EVENT_CONFIG(_event, FME_EVTYPE_VTD),		\
+}
+
+FME_EVENT_VTD_PORT(read_transaction,  VTD_EVNT_AFU_MEM_RD_TRANS);
+FME_EVENT_VTD_PORT(write_transaction, VTD_EVNT_AFU_MEM_WR_TRANS);
+FME_EVENT_VTD_PORT(devtlb_read_hit,   VTD_EVNT_AFU_DEVTLB_RD_HIT);
+FME_EVENT_VTD_PORT(devtlb_write_hit,  VTD_EVNT_AFU_DEVTLB_WR_HIT);
+FME_EVENT_VTD_PORT(devtlb_4k_fill,    VTD_EVNT_DEVTLB_4K_FILL);
+FME_EVENT_VTD_PORT(devtlb_2m_fill,    VTD_EVNT_DEVTLB_2M_FILL);
+FME_EVENT_VTD_PORT(devtlb_1g_fill,    VTD_EVNT_DEVTLB_1G_FILL);
+
+static struct attribute *fme_perf_vtd_events_attrs[] = {
+	&fme_perf_event_vtd_port_read_transaction.attr.attr,
+	&fme_perf_event_vtd_port_write_transaction.attr.attr,
+	&fme_perf_event_vtd_port_devtlb_read_hit.attr.attr,
+	&fme_perf_event_vtd_port_devtlb_write_hit.attr.attr,
+	&fme_perf_event_vtd_port_devtlb_4k_fill.attr.attr,
+	&fme_perf_event_vtd_port_devtlb_2m_fill.attr.attr,
+	&fme_perf_event_vtd_port_devtlb_1g_fill.attr.attr,
+	NULL,
+};
+
+static const struct attribute_group fme_perf_vtd_events_group = {
+	.name = "events",
+	.attrs = fme_perf_vtd_events_attrs,
+	.is_visible = fme_perf_events_visible,
+};
+
+/* FME Perf VTD SIP Events */
+#define FME_EVENT_VTD_SIP(_name, _event)				\
+static struct dev_ext_attribute fme_perf_event_vtd_sip_##_name = {	\
+	.attr = FME_EVENT_ATTR(vtd_sip_##_name),			\
+	.var = FME_EVENT_CONFIG(_event, FME_EVTYPE_VTD_SIP),		\
+}
+
+FME_EVENT_VTD_SIP(iotlb_4k_hit,  VTD_SIP_EVNT_IOTLB_4K_HIT);
+FME_EVENT_VTD_SIP(iotlb_2m_hit,  VTD_SIP_EVNT_IOTLB_2M_HIT);
+FME_EVENT_VTD_SIP(iotlb_1g_hit,  VTD_SIP_EVNT_IOTLB_1G_HIT);
+FME_EVENT_VTD_SIP(slpwc_l3_hit,  VTD_SIP_EVNT_SLPWC_L3_HIT);
+FME_EVENT_VTD_SIP(slpwc_l4_hit,  VTD_SIP_EVNT_SLPWC_L4_HIT);
+FME_EVENT_VTD_SIP(rcc_hit,       VTD_SIP_EVNT_RCC_HIT);
+FME_EVENT_VTD_SIP(iotlb_4k_miss, VTD_SIP_EVNT_IOTLB_4K_MISS);
+FME_EVENT_VTD_SIP(iotlb_2m_miss, VTD_SIP_EVNT_IOTLB_2M_MISS);
+FME_EVENT_VTD_SIP(iotlb_1g_miss, VTD_SIP_EVNT_IOTLB_1G_MISS);
+FME_EVENT_VTD_SIP(slpwc_l3_miss, VTD_SIP_EVNT_SLPWC_L3_MISS);
+FME_EVENT_VTD_SIP(slpwc_l4_miss, VTD_SIP_EVNT_SLPWC_L4_MISS);
+FME_EVENT_VTD_SIP(rcc_miss,      VTD_SIP_EVNT_RCC_MISS);
+
+static struct attribute *fme_perf_vtd_sip_events_attrs[] = {
+	&fme_perf_event_vtd_sip_iotlb_4k_hit.attr.attr,
+	&fme_perf_event_vtd_sip_iotlb_2m_hit.attr.attr,
+	&fme_perf_event_vtd_sip_iotlb_1g_hit.attr.attr,
+	&fme_perf_event_vtd_sip_slpwc_l3_hit.attr.attr,
+	&fme_perf_event_vtd_sip_slpwc_l4_hit.attr.attr,
+	&fme_perf_event_vtd_sip_rcc_hit.attr.attr,
+	&fme_perf_event_vtd_sip_iotlb_4k_miss.attr.attr,
+	&fme_perf_event_vtd_sip_iotlb_2m_miss.attr.attr,
+	&fme_perf_event_vtd_sip_iotlb_1g_miss.attr.attr,
+	&fme_perf_event_vtd_sip_slpwc_l3_miss.attr.attr,
+	&fme_perf_event_vtd_sip_slpwc_l4_miss.attr.attr,
+	&fme_perf_event_vtd_sip_rcc_miss.attr.attr,
+	NULL,
+};
+
+static const struct attribute_group fme_perf_vtd_sip_events_group = {
+	.name = "events",
+	.attrs = fme_perf_vtd_sip_events_attrs,
+	.is_visible = fme_perf_events_visible,
+};
+
+static const struct attribute_group *fme_perf_events_groups[] = {
+	&fme_perf_basic_events_group,
+	&fme_perf_cache_events_group,
+	&fme_perf_fabric_events_group,
+	&fme_perf_vtd_events_group,
+	&fme_perf_vtd_sip_events_group,
+	NULL,
+};
+
+static struct fme_perf_event_ops *get_event_ops(u32 evtype)
+{
+	if (evtype > FME_EVTYPE_MAX)
+		return NULL;
+
+	return &fme_perf_event_ops[evtype];
+}
+
+static void fme_perf_event_destroy(struct perf_event *event)
+{
+	struct fme_perf_event_ops *ops = get_event_ops(event->hw.event_base);
+	struct fme_perf_priv *priv = to_fme_perf_priv(event->pmu);
+
+	if (ops->event_destroy)
+		ops->event_destroy(priv, event->hw.idx, event->hw.config_base);
+}
+
+static int fme_perf_event_init(struct perf_event *event)
+{
+	struct fme_perf_priv *priv = to_fme_perf_priv(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	struct fme_perf_event_ops *ops;
+	u32 eventid, evtype, portid;
+
+	/* test the event attr type check for PMU enumeration */
+	if (event->attr.type != event->pmu->type)
+		return -ENOENT;
+
+	/*
+	 * fme counters are shared across all cores.
+	 * Therefore, it does not support per-process mode.
+	 * Also, it does not support event sampling mode.
+	 */
+	if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+		return -EINVAL;
+
+	if (event->cpu < 0)
+		return -EINVAL;
+
+	if (event->cpu != priv->cpu)
+		return -EINVAL;
+
+	eventid = get_event(event->attr.config);
+	portid = get_portid(event->attr.config);
+	evtype = get_evtype(event->attr.config);
+	if (evtype > FME_EVTYPE_MAX)
+		return -EINVAL;
+
+	hwc->event_base = evtype;
+	hwc->idx = (int)eventid;
+	hwc->config_base = portid;
+
+	event->destroy = fme_perf_event_destroy;
+
+	dev_dbg(priv->dev, "%s event=0x%x, evtype=0x%x, portid=0x%x,\n",
+		__func__, eventid, evtype, portid);
+
+	ops = get_event_ops(evtype);
+	if (ops->event_init)
+		return ops->event_init(priv, eventid, portid);
+
+	return 0;
+}
+
+static void fme_perf_event_update(struct perf_event *event)
+{
+	struct fme_perf_event_ops *ops = get_event_ops(event->hw.event_base);
+	struct fme_perf_priv *priv = to_fme_perf_priv(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	u64 now, prev, delta;
+
+	now = ops->read_counter(priv, (u32)hwc->idx, hwc->config_base);
+	prev = local64_read(&hwc->prev_count);
+	delta = now - prev;
+
+	local64_add(delta, &event->count);
+}
+
+static void fme_perf_event_start(struct perf_event *event, int flags)
+{
+	struct fme_perf_event_ops *ops = get_event_ops(event->hw.event_base);
+	struct fme_perf_priv *priv = to_fme_perf_priv(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	u64 count;
+
+	count = ops->read_counter(priv, (u32)hwc->idx, hwc->config_base);
+	local64_set(&hwc->prev_count, count);
+}
+
+static void fme_perf_event_stop(struct perf_event *event, int flags)
+{
+	fme_perf_event_update(event);
+}
+
+static int fme_perf_event_add(struct perf_event *event, int flags)
+{
+	if (flags & PERF_EF_START)
+		fme_perf_event_start(event, flags);
+
+	return 0;
+}
+
+static void fme_perf_event_del(struct perf_event *event, int flags)
+{
+	fme_perf_event_stop(event, PERF_EF_UPDATE);
+}
+
+static void fme_perf_event_read(struct perf_event *event)
+{
+	fme_perf_event_update(event);
+}
+
+static void fme_perf_setup_hardware(struct fme_perf_priv *priv)
+{
+	void __iomem *base = priv->ioaddr;
+	u64 v;
+
+	/* read and save current working mode for fabric counters */
+	v = readq(base + FAB_CTRL);
+
+	if (FIELD_GET(FAB_PORT_FILTER, v) == FAB_PORT_FILTER_DISABLE)
+		priv->fab_port_id = FME_PORTID_ROOT;
+	else
+		priv->fab_port_id = FIELD_GET(FAB_PORT_ID, v);
+}
+
+static int fme_perf_pmu_register(struct platform_device *pdev,
+				 struct fme_perf_priv *priv)
+{
+	struct pmu *pmu = &priv->pmu;
+	char *name;
+	int ret;
+
+	spin_lock_init(&priv->fab_lock);
+
+	fme_perf_setup_hardware(priv);
+
+	pmu->task_ctx_nr =	perf_invalid_context;
+	pmu->attr_groups =	fme_perf_groups;
+	pmu->attr_update =	fme_perf_events_groups;
+	pmu->event_init =	fme_perf_event_init;
+	pmu->add =		fme_perf_event_add;
+	pmu->del =		fme_perf_event_del;
+	pmu->start =		fme_perf_event_start;
+	pmu->stop =		fme_perf_event_stop;
+	pmu->read =		fme_perf_event_read;
+	pmu->capabilities =	PERF_PMU_CAP_NO_INTERRUPT |
+				PERF_PMU_CAP_NO_EXCLUDE;
+
+	name = devm_kasprintf(priv->dev, GFP_KERNEL, "dfl_fme%d", pdev->id);
+
+	ret = perf_pmu_register(pmu, name, -1);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static void fme_perf_pmu_unregister(struct fme_perf_priv *priv)
+{
+	perf_pmu_unregister(&priv->pmu);
+}
+
+static int fme_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+	struct fme_perf_priv *priv;
+	int target;
+
+	priv = hlist_entry_safe(node, struct fme_perf_priv, node);
+
+	if (cpu != priv->cpu)
+		return 0;
+
+	target = cpumask_any_but(cpu_online_mask, cpu);
+	if (target >= nr_cpu_ids)
+		return 0;
+
+	priv->cpu = target;
+	perf_pmu_migrate_context(&priv->pmu, cpu, target);
+
+	return 0;
+}
+
+static int fme_perf_init(struct platform_device *pdev,
+			 struct dfl_feature *feature)
+{
+	struct fme_perf_priv *priv;
+	int ret;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->dev = &pdev->dev;
+	priv->ioaddr = feature->ioaddr;
+	priv->id = feature->id;
+	priv->cpu = raw_smp_processor_id();
+
+	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+				      "perf/fpga/dfl_fme:online",
+				      NULL, fme_perf_offline_cpu);
+	if (ret < 0)
+		return ret;
+
+	priv->cpuhp_state = ret;
+
+	/* Register the pmu instance for cpu hotplug */
+	ret = cpuhp_state_add_instance_nocalls(priv->cpuhp_state, &priv->node);
+	if (ret)
+		goto cpuhp_instance_err;
+
+	ret = fme_perf_pmu_register(pdev, priv);
+	if (ret)
+		goto pmu_register_err;
+
+	feature->priv = priv;
+	return 0;
+
+pmu_register_err:
+	cpuhp_state_remove_instance_nocalls(priv->cpuhp_state, &priv->node);
+cpuhp_instance_err:
+	cpuhp_remove_multi_state(priv->cpuhp_state);
+	return ret;
+}
+
+static void fme_perf_uinit(struct platform_device *pdev,
+			   struct dfl_feature *feature)
+{
+	struct fme_perf_priv *priv = feature->priv;
+
+	fme_perf_pmu_unregister(priv);
+	cpuhp_state_remove_instance_nocalls(priv->cpuhp_state, &priv->node);
+	cpuhp_remove_multi_state(priv->cpuhp_state);
+}
+
+const struct dfl_feature_id fme_perf_id_table[] = {
+	{.id = FME_FEATURE_ID_GLOBAL_IPERF,},
+	{.id = FME_FEATURE_ID_GLOBAL_DPERF,},
+	{0,}
+};
+
+const struct dfl_feature_ops fme_perf_ops = {
+	.init = fme_perf_init,
+	.uinit = fme_perf_uinit,
+};
diff --git a/drivers/fpga/dfl-fme-pr.c b/drivers/fpga/dfl-fme-pr.c
index a233a53..1194c0e 100644
--- a/drivers/fpga/dfl-fme-pr.c
+++ b/drivers/fpga/dfl-fme-pr.c
@@ -97,10 +97,6 @@
 		return -EINVAL;
 	}
 
-	if (!access_ok((void __user *)(unsigned long)port_pr.buffer_address,
-		       port_pr.buffer_size))
-		return -EFAULT;
-
 	/*
 	 * align PR buffer per PR bandwidth, as HW ignores the extra padding
 	 * data automatically.
diff --git a/drivers/fpga/dfl-fme.h b/drivers/fpga/dfl-fme.h
index 6685c8e..4195dd6 100644
--- a/drivers/fpga/dfl-fme.h
+++ b/drivers/fpga/dfl-fme.h
@@ -38,5 +38,7 @@
 extern const struct dfl_feature_ops fme_global_err_ops;
 extern const struct dfl_feature_id fme_global_err_id_table[];
 extern const struct attribute_group fme_global_err_group;
+extern const struct dfl_feature_ops fme_perf_ops;
+extern const struct dfl_feature_id fme_perf_id_table[];
 
 #endif /* __DFL_FME_H */
diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
index a78c409..bc108ee 100644
--- a/drivers/fpga/dfl-pci.c
+++ b/drivers/fpga/dfl-pci.c
@@ -31,22 +31,46 @@
 	struct dfl_fpga_cdev *cdev;	/* container device */
 };
 
-static void __iomem *cci_pci_ioremap_bar(struct pci_dev *pcidev, int bar)
+static void __iomem *cci_pci_ioremap_bar0(struct pci_dev *pcidev)
 {
-	if (pcim_iomap_regions(pcidev, BIT(bar), DRV_NAME))
+	if (pcim_iomap_regions(pcidev, BIT(0), DRV_NAME))
 		return NULL;
 
-	return pcim_iomap_table(pcidev)[bar];
+	return pcim_iomap_table(pcidev)[0];
+}
+
+static int cci_pci_alloc_irq(struct pci_dev *pcidev)
+{
+	int ret, nvec = pci_msix_vec_count(pcidev);
+
+	if (nvec <= 0) {
+		dev_dbg(&pcidev->dev, "fpga interrupt not supported\n");
+		return 0;
+	}
+
+	ret = pci_alloc_irq_vectors(pcidev, nvec, nvec, PCI_IRQ_MSIX);
+	if (ret < 0)
+		return ret;
+
+	return nvec;
+}
+
+static void cci_pci_free_irq(struct pci_dev *pcidev)
+{
+	pci_free_irq_vectors(pcidev);
 }
 
 /* PCI Device ID */
-#define PCIE_DEVICE_ID_PF_INT_5_X	0xBCBD
-#define PCIE_DEVICE_ID_PF_INT_6_X	0xBCC0
-#define PCIE_DEVICE_ID_PF_DSC_1_X	0x09C4
+#define PCIE_DEVICE_ID_PF_INT_5_X		0xBCBD
+#define PCIE_DEVICE_ID_PF_INT_6_X		0xBCC0
+#define PCIE_DEVICE_ID_PF_DSC_1_X		0x09C4
+#define PCIE_DEVICE_ID_INTEL_PAC_N3000		0x0B30
+#define PCIE_DEVICE_ID_INTEL_PAC_D5005		0x0B2B
 /* VF Device */
-#define PCIE_DEVICE_ID_VF_INT_5_X	0xBCBF
-#define PCIE_DEVICE_ID_VF_INT_6_X	0xBCC1
-#define PCIE_DEVICE_ID_VF_DSC_1_X	0x09C5
+#define PCIE_DEVICE_ID_VF_INT_5_X		0xBCBF
+#define PCIE_DEVICE_ID_VF_INT_6_X		0xBCC1
+#define PCIE_DEVICE_ID_VF_DSC_1_X		0x09C5
+#define PCIE_DEVICE_ID_INTEL_PAC_D5005_VF	0x0B2C
 
 static struct pci_device_id cci_pcie_id_tbl[] = {
 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_5_X),},
@@ -55,6 +79,9 @@
 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_6_X),},
 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_DSC_1_X),},
 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_DSC_1_X),},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_N3000),},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005),},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005_VF),},
 	{0,}
 };
 MODULE_DEVICE_TABLE(pci, cci_pcie_id_tbl);
@@ -78,17 +105,34 @@
 
 	/* remove all children feature devices */
 	dfl_fpga_feature_devs_remove(drvdata->cdev);
+	cci_pci_free_irq(pcidev);
+}
+
+static int *cci_pci_create_irq_table(struct pci_dev *pcidev, unsigned int nvec)
+{
+	unsigned int i;
+	int *table;
+
+	table = kcalloc(nvec, sizeof(int), GFP_KERNEL);
+	if (!table)
+		return table;
+
+	for (i = 0; i < nvec; i++)
+		table[i] = pci_irq_vector(pcidev, i);
+
+	return table;
 }
 
 /* enumerate feature devices under pci device */
 static int cci_enumerate_feature_devs(struct pci_dev *pcidev)
 {
 	struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
+	int port_num, bar, i, nvec, ret = 0;
 	struct dfl_fpga_enum_info *info;
 	struct dfl_fpga_cdev *cdev;
 	resource_size_t start, len;
-	int port_num, bar, i, ret = 0;
 	void __iomem *base;
+	int *irq_table;
 	u32 offset;
 	u64 v;
 
@@ -97,11 +141,30 @@
 	if (!info)
 		return -ENOMEM;
 
-	/* start to find Device Feature List from Bar 0 */
-	base = cci_pci_ioremap_bar(pcidev, 0);
+	/* add irq info for enumeration if the device support irq */
+	nvec = cci_pci_alloc_irq(pcidev);
+	if (nvec < 0) {
+		dev_err(&pcidev->dev, "Fail to alloc irq %d.\n", nvec);
+		ret = nvec;
+		goto enum_info_free_exit;
+	} else if (nvec) {
+		irq_table = cci_pci_create_irq_table(pcidev, nvec);
+		if (!irq_table) {
+			ret = -ENOMEM;
+			goto irq_free_exit;
+		}
+
+		ret = dfl_fpga_enum_info_add_irq(info, nvec, irq_table);
+		kfree(irq_table);
+		if (ret)
+			goto irq_free_exit;
+	}
+
+	/* start to find Device Feature List in Bar 0 */
+	base = cci_pci_ioremap_bar0(pcidev);
 	if (!base) {
 		ret = -ENOMEM;
-		goto enum_info_free_exit;
+		goto irq_free_exit;
 	}
 
 	/*
@@ -113,7 +176,7 @@
 		start = pci_resource_start(pcidev, 0);
 		len = pci_resource_len(pcidev, 0);
 
-		dfl_fpga_enum_info_add_dfl(info, start, len, base);
+		dfl_fpga_enum_info_add_dfl(info, start, len);
 
 		/*
 		 * find more Device Feature Lists (e.g. Ports) per information
@@ -137,36 +200,37 @@
 			 */
 			bar = FIELD_GET(FME_PORT_OFST_BAR_ID, v);
 			offset = FIELD_GET(FME_PORT_OFST_DFH_OFST, v);
-			base = cci_pci_ioremap_bar(pcidev, bar);
-			if (!base)
-				continue;
-
 			start = pci_resource_start(pcidev, bar) + offset;
 			len = pci_resource_len(pcidev, bar) - offset;
 
-			dfl_fpga_enum_info_add_dfl(info, start, len,
-						   base + offset);
+			dfl_fpga_enum_info_add_dfl(info, start, len);
 		}
 	} else if (dfl_feature_is_port(base)) {
 		start = pci_resource_start(pcidev, 0);
 		len = pci_resource_len(pcidev, 0);
 
-		dfl_fpga_enum_info_add_dfl(info, start, len, base);
+		dfl_fpga_enum_info_add_dfl(info, start, len);
 	} else {
 		ret = -ENODEV;
-		goto enum_info_free_exit;
+		goto irq_free_exit;
 	}
 
+	/* release I/O mappings for next step enumeration */
+	pcim_iounmap_regions(pcidev, BIT(0));
+
 	/* start enumeration with prepared enumeration information */
 	cdev = dfl_fpga_feature_devs_enumerate(info);
 	if (IS_ERR(cdev)) {
 		dev_err(&pcidev->dev, "Enumeration failure\n");
 		ret = PTR_ERR(cdev);
-		goto enum_info_free_exit;
+		goto irq_free_exit;
 	}
 
 	drvdata->cdev = cdev;
 
+irq_free_exit:
+	if (ret)
+		cci_pci_free_irq(pcidev);
 enum_info_free_exit:
 	dfl_fpga_enum_info_free(info);
 
@@ -211,12 +275,10 @@
 	}
 
 	ret = cci_enumerate_feature_devs(pcidev);
-	if (ret) {
-		dev_err(&pcidev->dev, "enumeration failure %d.\n", ret);
-		goto disable_error_report_exit;
-	}
+	if (!ret)
+		return ret;
 
-	return ret;
+	dev_err(&pcidev->dev, "enumeration failure %d.\n", ret);
 
 disable_error_report_exit:
 	pci_disable_pcie_error_reporting(pcidev);
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index 96a2b82..b450870 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -10,7 +10,9 @@
  *   Wu Hao <hao.wu@intel.com>
  *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
  */
+#include <linux/fpga-dfl.h>
 #include <linux/module.h>
+#include <linux/uaccess.h>
 
 #include "dfl.h"
 
@@ -28,12 +30,6 @@
  * index to dfl_chardevs table. If no chardev support just set devt_type
  * as one invalid index (DFL_FPGA_DEVT_MAX).
  */
-enum dfl_id_type {
-	FME_ID,		/* fme id allocation and mapping */
-	PORT_ID,	/* port id allocation and mapping */
-	DFL_ID_MAX,
-};
-
 enum dfl_fpga_devt_type {
 	DFL_FPGA_DEVT_FME,
 	DFL_FPGA_DEVT_PORT,
@@ -56,7 +52,7 @@
  */
 struct dfl_dev_info {
 	const char *name;
-	u32 dfh_id;
+	u16 dfh_id;
 	struct idr id;
 	enum dfl_fpga_devt_type devt_type;
 };
@@ -132,7 +128,7 @@
 	return DFL_ID_MAX;
 }
 
-static enum dfl_id_type dfh_id_to_type(u32 id)
+static enum dfl_id_type dfh_id_to_type(u16 id)
 {
 	int i;
 
@@ -248,6 +244,249 @@
 }
 EXPORT_SYMBOL_GPL(dfl_fpga_check_port_id);
 
+static DEFINE_IDA(dfl_device_ida);
+
+static const struct dfl_device_id *
+dfl_match_one_device(const struct dfl_device_id *id, struct dfl_device *ddev)
+{
+	if (id->type == ddev->type && id->feature_id == ddev->feature_id)
+		return id;
+
+	return NULL;
+}
+
+static int dfl_bus_match(struct device *dev, struct device_driver *drv)
+{
+	struct dfl_device *ddev = to_dfl_dev(dev);
+	struct dfl_driver *ddrv = to_dfl_drv(drv);
+	const struct dfl_device_id *id_entry;
+
+	id_entry = ddrv->id_table;
+	if (id_entry) {
+		while (id_entry->feature_id) {
+			if (dfl_match_one_device(id_entry, ddev)) {
+				ddev->id_entry = id_entry;
+				return 1;
+			}
+			id_entry++;
+		}
+	}
+
+	return 0;
+}
+
+static int dfl_bus_probe(struct device *dev)
+{
+	struct dfl_driver *ddrv = to_dfl_drv(dev->driver);
+	struct dfl_device *ddev = to_dfl_dev(dev);
+
+	return ddrv->probe(ddev);
+}
+
+static int dfl_bus_remove(struct device *dev)
+{
+	struct dfl_driver *ddrv = to_dfl_drv(dev->driver);
+	struct dfl_device *ddev = to_dfl_dev(dev);
+
+	if (ddrv->remove)
+		ddrv->remove(ddev);
+
+	return 0;
+}
+
+static int dfl_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	struct dfl_device *ddev = to_dfl_dev(dev);
+
+	/* The type has 4 valid bits and feature_id has 12 valid bits */
+	return add_uevent_var(env, "MODALIAS=dfl:t%01Xf%03X",
+			      ddev->type, ddev->feature_id);
+}
+
+static ssize_t
+type_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct dfl_device *ddev = to_dfl_dev(dev);
+
+	return sprintf(buf, "0x%x\n", ddev->type);
+}
+static DEVICE_ATTR_RO(type);
+
+static ssize_t
+feature_id_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct dfl_device *ddev = to_dfl_dev(dev);
+
+	return sprintf(buf, "0x%x\n", ddev->feature_id);
+}
+static DEVICE_ATTR_RO(feature_id);
+
+static struct attribute *dfl_dev_attrs[] = {
+	&dev_attr_type.attr,
+	&dev_attr_feature_id.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(dfl_dev);
+
+static struct bus_type dfl_bus_type = {
+	.name		= "dfl",
+	.match		= dfl_bus_match,
+	.probe		= dfl_bus_probe,
+	.remove		= dfl_bus_remove,
+	.uevent		= dfl_bus_uevent,
+	.dev_groups	= dfl_dev_groups,
+};
+
+static void release_dfl_dev(struct device *dev)
+{
+	struct dfl_device *ddev = to_dfl_dev(dev);
+
+	if (ddev->mmio_res.parent)
+		release_resource(&ddev->mmio_res);
+
+	ida_simple_remove(&dfl_device_ida, ddev->id);
+	kfree(ddev->irqs);
+	kfree(ddev);
+}
+
+static struct dfl_device *
+dfl_dev_add(struct dfl_feature_platform_data *pdata,
+	    struct dfl_feature *feature)
+{
+	struct platform_device *pdev = pdata->dev;
+	struct resource *parent_res;
+	struct dfl_device *ddev;
+	int id, i, ret;
+
+	ddev = kzalloc(sizeof(*ddev), GFP_KERNEL);
+	if (!ddev)
+		return ERR_PTR(-ENOMEM);
+
+	id = ida_simple_get(&dfl_device_ida, 0, 0, GFP_KERNEL);
+	if (id < 0) {
+		dev_err(&pdev->dev, "unable to get id\n");
+		kfree(ddev);
+		return ERR_PTR(id);
+	}
+
+	/* freeing resources by put_device() after device_initialize() */
+	device_initialize(&ddev->dev);
+	ddev->dev.parent = &pdev->dev;
+	ddev->dev.bus = &dfl_bus_type;
+	ddev->dev.release = release_dfl_dev;
+	ddev->id = id;
+	ret = dev_set_name(&ddev->dev, "dfl_dev.%d", id);
+	if (ret)
+		goto put_dev;
+
+	ddev->type = feature_dev_id_type(pdev);
+	ddev->feature_id = feature->id;
+	ddev->cdev = pdata->dfl_cdev;
+
+	/* add mmio resource */
+	parent_res = &pdev->resource[feature->resource_index];
+	ddev->mmio_res.flags = IORESOURCE_MEM;
+	ddev->mmio_res.start = parent_res->start;
+	ddev->mmio_res.end = parent_res->end;
+	ddev->mmio_res.name = dev_name(&ddev->dev);
+	ret = insert_resource(parent_res, &ddev->mmio_res);
+	if (ret) {
+		dev_err(&pdev->dev, "%s failed to claim resource: %pR\n",
+			dev_name(&ddev->dev), &ddev->mmio_res);
+		goto put_dev;
+	}
+
+	/* then add irq resource */
+	if (feature->nr_irqs) {
+		ddev->irqs = kcalloc(feature->nr_irqs,
+				     sizeof(*ddev->irqs), GFP_KERNEL);
+		if (!ddev->irqs) {
+			ret = -ENOMEM;
+			goto put_dev;
+		}
+
+		for (i = 0; i < feature->nr_irqs; i++)
+			ddev->irqs[i] = feature->irq_ctx[i].irq;
+
+		ddev->num_irqs = feature->nr_irqs;
+	}
+
+	ret = device_add(&ddev->dev);
+	if (ret)
+		goto put_dev;
+
+	dev_dbg(&pdev->dev, "add dfl_dev: %s\n", dev_name(&ddev->dev));
+	return ddev;
+
+put_dev:
+	/* calls release_dfl_dev() which does the clean up  */
+	put_device(&ddev->dev);
+	return ERR_PTR(ret);
+}
+
+static void dfl_devs_remove(struct dfl_feature_platform_data *pdata)
+{
+	struct dfl_feature *feature;
+
+	dfl_fpga_dev_for_each_feature(pdata, feature) {
+		if (feature->ddev) {
+			device_unregister(&feature->ddev->dev);
+			feature->ddev = NULL;
+		}
+	}
+}
+
+static int dfl_devs_add(struct dfl_feature_platform_data *pdata)
+{
+	struct dfl_feature *feature;
+	struct dfl_device *ddev;
+	int ret;
+
+	dfl_fpga_dev_for_each_feature(pdata, feature) {
+		if (feature->ioaddr)
+			continue;
+
+		if (feature->ddev) {
+			ret = -EEXIST;
+			goto err;
+		}
+
+		ddev = dfl_dev_add(pdata, feature);
+		if (IS_ERR(ddev)) {
+			ret = PTR_ERR(ddev);
+			goto err;
+		}
+
+		feature->ddev = ddev;
+	}
+
+	return 0;
+
+err:
+	dfl_devs_remove(pdata);
+	return ret;
+}
+
+int __dfl_driver_register(struct dfl_driver *dfl_drv, struct module *owner)
+{
+	if (!dfl_drv || !dfl_drv->probe || !dfl_drv->id_table)
+		return -EINVAL;
+
+	dfl_drv->drv.owner = owner;
+	dfl_drv->drv.bus = &dfl_bus_type;
+
+	return driver_register(&dfl_drv->drv);
+}
+EXPORT_SYMBOL(__dfl_driver_register);
+
+void dfl_driver_unregister(struct dfl_driver *dfl_drv)
+{
+	driver_unregister(&dfl_drv->drv);
+}
+EXPORT_SYMBOL(dfl_driver_unregister);
+
+#define is_header_feature(feature) ((feature)->id == FEATURE_ID_FIU_HEADER)
+
 /**
  * dfl_fpga_dev_feature_uinit - uinit for sub features of dfl feature device
  * @pdev: feature device.
@@ -257,12 +496,15 @@
 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
 	struct dfl_feature *feature;
 
-	dfl_fpga_dev_for_each_feature(pdata, feature)
+	dfl_devs_remove(pdata);
+
+	dfl_fpga_dev_for_each_feature(pdata, feature) {
 		if (feature->ops) {
 			if (feature->ops->uinit)
 				feature->ops->uinit(pdev, feature);
 			feature->ops = NULL;
 		}
+	}
 }
 EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_uinit);
 
@@ -271,8 +513,22 @@
 				     struct dfl_feature *feature,
 				     struct dfl_feature_driver *drv)
 {
+	void __iomem *base;
 	int ret = 0;
 
+	if (!is_header_feature(feature)) {
+		base = devm_platform_ioremap_resource(pdev,
+						      feature->resource_index);
+		if (IS_ERR(base)) {
+			dev_err(&pdev->dev,
+				"ioremap failed for feature 0x%x!\n",
+				feature->id);
+			return PTR_ERR(base);
+		}
+
+		feature->ioaddr = base;
+	}
+
 	if (drv->ops->init) {
 		ret = drv->ops->init(pdev, feature);
 		if (ret)
@@ -329,6 +585,10 @@
 		drv++;
 	}
 
+	ret = dfl_devs_add(pdata);
+	if (ret)
+		goto exit;
+
 	return 0;
 exit:
 	dfl_fpga_dev_feature_uinit(pdev);
@@ -421,16 +681,26 @@
  *
  * @dev: device to enumerate.
  * @cdev: the container device for all feature devices.
+ * @nr_irqs: number of irqs for all feature devices.
+ * @irq_table: Linux IRQ numbers for all irqs, indexed by local irq index of
+ *	       this device.
  * @feature_dev: current feature device.
- * @ioaddr: header register region address of feature device in enumeration.
+ * @ioaddr: header register region address of current FIU in enumeration.
+ * @start: register resource start of current FIU.
+ * @len: max register resource length of current FIU.
  * @sub_features: a sub features linked list for feature device in enumeration.
  * @feature_num: number of sub features for feature device in enumeration.
  */
 struct build_feature_devs_info {
 	struct device *dev;
 	struct dfl_fpga_cdev *cdev;
+	unsigned int nr_irqs;
+	int *irq_table;
+
 	struct platform_device *feature_dev;
 	void __iomem *ioaddr;
+	resource_size_t start;
+	resource_size_t len;
 	struct list_head sub_features;
 	int feature_num;
 };
@@ -442,12 +712,16 @@
  * @mmio_res: mmio resource of this sub feature.
  * @ioaddr: mapped base address of mmio resource.
  * @node: node in sub_features linked list.
+ * @irq_base: start of irq index in this sub feature.
+ * @nr_irqs: number of irqs of this sub feature.
  */
 struct dfl_feature_info {
-	u64 fid;
+	u16 fid;
 	struct resource mmio_res;
 	void __iomem *ioaddr;
 	struct list_head node;
+	unsigned int irq_base;
+	unsigned int nr_irqs;
 };
 
 static void dfl_fpga_cdev_add_port_dev(struct dfl_fpga_cdev *cdev,
@@ -472,10 +746,7 @@
 	struct dfl_feature_platform_data *pdata;
 	struct dfl_feature_info *finfo, *p;
 	enum dfl_id_type type;
-	int ret, index = 0;
-
-	if (!fdev)
-		return 0;
+	int ret, index = 0, res_idx = 0;
 
 	type = feature_dev_id_type(fdev);
 	if (WARN_ON_ONCE(type >= DFL_ID_MAX))
@@ -487,8 +758,7 @@
 	 * it will be automatically freed by device's release() callback,
 	 * platform_device_release().
 	 */
-	pdata = kzalloc(dfl_feature_platform_data_size(binfo->feature_num),
-			GFP_KERNEL);
+	pdata = kzalloc(struct_size(pdata, features, binfo->feature_num), GFP_KERNEL);
 	if (!pdata)
 		return -ENOMEM;
 
@@ -519,13 +789,46 @@
 
 	/* fill features and resource information for feature dev */
 	list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
-		struct dfl_feature *feature = &pdata->features[index];
+		struct dfl_feature *feature = &pdata->features[index++];
+		struct dfl_feature_irq_ctx *ctx;
+		unsigned int i;
 
 		/* save resource information for each feature */
+		feature->dev = fdev;
 		feature->id = finfo->fid;
-		feature->resource_index = index;
-		feature->ioaddr = finfo->ioaddr;
-		fdev->resource[index++] = finfo->mmio_res;
+
+		/*
+		 * the FIU header feature has some fundamental functions (sriov
+		 * set, port enable/disable) needed for the dfl bus device and
+		 * other sub features. So its mmio resource should be mapped by
+		 * DFL bus device. And we should not assign it to feature
+		 * devices (dfl-fme/afu) again.
+		 */
+		if (is_header_feature(feature)) {
+			feature->resource_index = -1;
+			feature->ioaddr =
+				devm_ioremap_resource(binfo->dev,
+						      &finfo->mmio_res);
+			if (IS_ERR(feature->ioaddr))
+				return PTR_ERR(feature->ioaddr);
+		} else {
+			feature->resource_index = res_idx;
+			fdev->resource[res_idx++] = finfo->mmio_res;
+		}
+
+		if (finfo->nr_irqs) {
+			ctx = devm_kcalloc(binfo->dev, finfo->nr_irqs,
+					   sizeof(*ctx), GFP_KERNEL);
+			if (!ctx)
+				return -ENOMEM;
+
+			for (i = 0; i < finfo->nr_irqs; i++)
+				ctx[i].irq =
+					binfo->irq_table[finfo->irq_base + i];
+
+			feature->irq_ctx = ctx;
+			feature->nr_irqs = finfo->nr_irqs;
+		}
 
 		list_del(&finfo->node);
 		kfree(finfo);
@@ -554,19 +857,13 @@
 
 static int
 build_info_create_dev(struct build_feature_devs_info *binfo,
-		      enum dfl_id_type type, void __iomem *ioaddr)
+		      enum dfl_id_type type)
 {
 	struct platform_device *fdev;
-	int ret;
 
 	if (type >= DFL_ID_MAX)
 		return -EINVAL;
 
-	/* we will create a new device, commit current device first */
-	ret = build_info_commit_dev(binfo);
-	if (ret)
-		return ret;
-
 	/*
 	 * we use -ENODEV as the initialization indicator which indicates
 	 * whether the id need to be reclaimed
@@ -577,7 +874,7 @@
 
 	binfo->feature_dev = fdev;
 	binfo->feature_num = 0;
-	binfo->ioaddr = ioaddr;
+
 	INIT_LIST_HEAD(&binfo->sub_features);
 
 	fdev->id = dfl_id_alloc(type, &fdev->dev);
@@ -621,7 +918,7 @@
 	return ofst ? ofst : 4096;
 }
 
-static u64 feature_id(void __iomem *start)
+static u16 feature_id(void __iomem *start)
 {
 	u64 v = readq(start + DFH);
 	u16 id = FIELD_GET(DFH_ID, v);
@@ -638,6 +935,78 @@
 	return 0;
 }
 
+static int parse_feature_irqs(struct build_feature_devs_info *binfo,
+			      resource_size_t ofst, u16 fid,
+			      unsigned int *irq_base, unsigned int *nr_irqs)
+{
+	void __iomem *base = binfo->ioaddr + ofst;
+	unsigned int i, ibase, inr = 0;
+	int virq;
+	u64 v;
+
+	/*
+	 * Ideally DFL framework should only read info from DFL header, but
+	 * current version DFL only provides mmio resources information for
+	 * each feature in DFL Header, no field for interrupt resources.
+	 * Interrupt resource information is provided by specific mmio
+	 * registers of each private feature which supports interrupt. So in
+	 * order to parse and assign irq resources, DFL framework has to look
+	 * into specific capability registers of these private features.
+	 *
+	 * Once future DFL version supports generic interrupt resource
+	 * information in common DFL headers, the generic interrupt parsing
+	 * code will be added. But in order to be compatible to old version
+	 * DFL, the driver may still fall back to these quirks.
+	 */
+	switch (fid) {
+	case PORT_FEATURE_ID_UINT:
+		v = readq(base + PORT_UINT_CAP);
+		ibase = FIELD_GET(PORT_UINT_CAP_FST_VECT, v);
+		inr = FIELD_GET(PORT_UINT_CAP_INT_NUM, v);
+		break;
+	case PORT_FEATURE_ID_ERROR:
+		v = readq(base + PORT_ERROR_CAP);
+		ibase = FIELD_GET(PORT_ERROR_CAP_INT_VECT, v);
+		inr = FIELD_GET(PORT_ERROR_CAP_SUPP_INT, v);
+		break;
+	case FME_FEATURE_ID_GLOBAL_ERR:
+		v = readq(base + FME_ERROR_CAP);
+		ibase = FIELD_GET(FME_ERROR_CAP_INT_VECT, v);
+		inr = FIELD_GET(FME_ERROR_CAP_SUPP_INT, v);
+		break;
+	}
+
+	if (!inr) {
+		*irq_base = 0;
+		*nr_irqs = 0;
+		return 0;
+	}
+
+	dev_dbg(binfo->dev, "feature: 0x%x, irq_base: %u, nr_irqs: %u\n",
+		fid, ibase, inr);
+
+	if (ibase + inr > binfo->nr_irqs) {
+		dev_err(binfo->dev,
+			"Invalid interrupt number in feature 0x%x\n", fid);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < inr; i++) {
+		virq = binfo->irq_table[ibase + i];
+		if (virq < 0 || virq > NR_IRQS) {
+			dev_err(binfo->dev,
+				"Invalid irq table entry for feature 0x%x\n",
+				fid);
+			return -EINVAL;
+		}
+	}
+
+	*irq_base = ibase;
+	*nr_irqs = inr;
+
+	return 0;
+}
+
 /*
  * when create sub feature instances, for private features, it doesn't need
  * to provide resource size and feature id as they could be read from DFH
@@ -647,27 +1016,33 @@
  */
 static int
 create_feature_instance(struct build_feature_devs_info *binfo,
-			struct dfl_fpga_enum_dfl *dfl, resource_size_t ofst,
-			resource_size_t size, u64 fid)
+			resource_size_t ofst, resource_size_t size, u16 fid)
 {
+	unsigned int irq_base, nr_irqs;
 	struct dfl_feature_info *finfo;
+	int ret;
 
 	/* read feature size and id if inputs are invalid */
-	size = size ? size : feature_size(dfl->ioaddr + ofst);
-	fid = fid ? fid : feature_id(dfl->ioaddr + ofst);
+	size = size ? size : feature_size(binfo->ioaddr + ofst);
+	fid = fid ? fid : feature_id(binfo->ioaddr + ofst);
 
-	if (dfl->len - ofst < size)
+	if (binfo->len - ofst < size)
 		return -EINVAL;
 
+	ret = parse_feature_irqs(binfo, ofst, fid, &irq_base, &nr_irqs);
+	if (ret)
+		return ret;
+
 	finfo = kzalloc(sizeof(*finfo), GFP_KERNEL);
 	if (!finfo)
 		return -ENOMEM;
 
 	finfo->fid = fid;
-	finfo->mmio_res.start = dfl->start + ofst;
+	finfo->mmio_res.start = binfo->start + ofst;
 	finfo->mmio_res.end = finfo->mmio_res.start + size - 1;
 	finfo->mmio_res.flags = IORESOURCE_MEM;
-	finfo->ioaddr = dfl->ioaddr + ofst;
+	finfo->irq_base = irq_base;
+	finfo->nr_irqs = nr_irqs;
 
 	list_add_tail(&finfo->node, &binfo->sub_features);
 	binfo->feature_num++;
@@ -676,7 +1051,6 @@
 }
 
 static int parse_feature_port_afu(struct build_feature_devs_info *binfo,
-				  struct dfl_fpga_enum_dfl *dfl,
 				  resource_size_t ofst)
 {
 	u64 v = readq(binfo->ioaddr + PORT_HDR_CAP);
@@ -684,21 +1058,22 @@
 
 	WARN_ON(!size);
 
-	return create_feature_instance(binfo, dfl, ofst, size, FEATURE_ID_AFU);
+	return create_feature_instance(binfo, ofst, size, FEATURE_ID_AFU);
 }
 
+#define is_feature_dev_detected(binfo) (!!(binfo)->feature_dev)
+
 static int parse_feature_afu(struct build_feature_devs_info *binfo,
-			     struct dfl_fpga_enum_dfl *dfl,
 			     resource_size_t ofst)
 {
-	if (!binfo->feature_dev) {
+	if (!is_feature_dev_detected(binfo)) {
 		dev_err(binfo->dev, "this AFU does not belong to any FIU.\n");
 		return -EINVAL;
 	}
 
 	switch (feature_dev_id_type(binfo->feature_dev)) {
 	case PORT_ID:
-		return parse_feature_port_afu(binfo, dfl, ofst);
+		return parse_feature_port_afu(binfo, ofst);
 	default:
 		dev_info(binfo->dev, "AFU belonging to FIU %s is not supported yet.\n",
 			 binfo->feature_dev->name);
@@ -707,35 +1082,79 @@
 	return 0;
 }
 
+static int build_info_prepare(struct build_feature_devs_info *binfo,
+			      resource_size_t start, resource_size_t len)
+{
+	struct device *dev = binfo->dev;
+	void __iomem *ioaddr;
+
+	if (!devm_request_mem_region(dev, start, len, dev_name(dev))) {
+		dev_err(dev, "request region fail, start:%pa, len:%pa\n",
+			&start, &len);
+		return -EBUSY;
+	}
+
+	ioaddr = devm_ioremap(dev, start, len);
+	if (!ioaddr) {
+		dev_err(dev, "ioremap region fail, start:%pa, len:%pa\n",
+			&start, &len);
+		return -ENOMEM;
+	}
+
+	binfo->start = start;
+	binfo->len = len;
+	binfo->ioaddr = ioaddr;
+
+	return 0;
+}
+
+static void build_info_complete(struct build_feature_devs_info *binfo)
+{
+	devm_iounmap(binfo->dev, binfo->ioaddr);
+	devm_release_mem_region(binfo->dev, binfo->start, binfo->len);
+}
+
 static int parse_feature_fiu(struct build_feature_devs_info *binfo,
-			     struct dfl_fpga_enum_dfl *dfl,
 			     resource_size_t ofst)
 {
-	u32 id, offset;
-	u64 v;
 	int ret = 0;
+	u32 offset;
+	u16 id;
+	u64 v;
 
-	v = readq(dfl->ioaddr + ofst + DFH);
+	if (is_feature_dev_detected(binfo)) {
+		build_info_complete(binfo);
+
+		ret = build_info_commit_dev(binfo);
+		if (ret)
+			return ret;
+
+		ret = build_info_prepare(binfo, binfo->start + ofst,
+					 binfo->len - ofst);
+		if (ret)
+			return ret;
+	}
+
+	v = readq(binfo->ioaddr + DFH);
 	id = FIELD_GET(DFH_ID, v);
 
 	/* create platform device for dfl feature dev */
-	ret = build_info_create_dev(binfo, dfh_id_to_type(id),
-				    dfl->ioaddr + ofst);
+	ret = build_info_create_dev(binfo, dfh_id_to_type(id));
 	if (ret)
 		return ret;
 
-	ret = create_feature_instance(binfo, dfl, ofst, 0, 0);
+	ret = create_feature_instance(binfo, 0, 0, 0);
 	if (ret)
 		return ret;
 	/*
 	 * find and parse FIU's child AFU via its NEXT_AFU register.
 	 * please note that only Port has valid NEXT_AFU pointer per spec.
 	 */
-	v = readq(dfl->ioaddr + ofst + NEXT_AFU);
+	v = readq(binfo->ioaddr + NEXT_AFU);
 
 	offset = FIELD_GET(NEXT_AFU_NEXT_DFH_OFST, v);
 	if (offset)
-		return parse_feature_afu(binfo, dfl, ofst + offset);
+		return parse_feature_afu(binfo, offset);
 
 	dev_dbg(binfo->dev, "No AFUs detected on FIU %d\n", id);
 
@@ -743,41 +1162,39 @@
 }
 
 static int parse_feature_private(struct build_feature_devs_info *binfo,
-				 struct dfl_fpga_enum_dfl *dfl,
 				 resource_size_t ofst)
 {
-	if (!binfo->feature_dev) {
-		dev_err(binfo->dev, "the private feature %llx does not belong to any AFU.\n",
-			(unsigned long long)feature_id(dfl->ioaddr + ofst));
+	if (!is_feature_dev_detected(binfo)) {
+		dev_err(binfo->dev, "the private feature 0x%x does not belong to any AFU.\n",
+			feature_id(binfo->ioaddr + ofst));
 		return -EINVAL;
 	}
 
-	return create_feature_instance(binfo, dfl, ofst, 0, 0);
+	return create_feature_instance(binfo, ofst, 0, 0);
 }
 
 /**
  * parse_feature - parse a feature on given device feature list
  *
  * @binfo: build feature devices information.
- * @dfl: device feature list to parse
- * @ofst: offset to feature header on this device feature list
+ * @ofst: offset to current FIU header
  */
 static int parse_feature(struct build_feature_devs_info *binfo,
-			 struct dfl_fpga_enum_dfl *dfl, resource_size_t ofst)
+			 resource_size_t ofst)
 {
 	u64 v;
 	u32 type;
 
-	v = readq(dfl->ioaddr + ofst + DFH);
+	v = readq(binfo->ioaddr + ofst + DFH);
 	type = FIELD_GET(DFH_TYPE, v);
 
 	switch (type) {
 	case DFH_TYPE_AFU:
-		return parse_feature_afu(binfo, dfl, ofst);
+		return parse_feature_afu(binfo, ofst);
 	case DFH_TYPE_PRIVATE:
-		return parse_feature_private(binfo, dfl, ofst);
+		return parse_feature_private(binfo, ofst);
 	case DFH_TYPE_FIU:
-		return parse_feature_fiu(binfo, dfl, ofst);
+		return parse_feature_fiu(binfo, ofst);
 	default:
 		dev_info(binfo->dev,
 			 "Feature Type %x is not supported.\n", type);
@@ -787,14 +1204,17 @@
 }
 
 static int parse_feature_list(struct build_feature_devs_info *binfo,
-			      struct dfl_fpga_enum_dfl *dfl)
+			      resource_size_t start, resource_size_t len)
 {
-	void __iomem *start = dfl->ioaddr;
-	void __iomem *end = dfl->ioaddr + dfl->len;
+	resource_size_t end = start + len;
 	int ret = 0;
 	u32 ofst = 0;
 	u64 v;
 
+	ret = build_info_prepare(binfo, start, len);
+	if (ret)
+		return ret;
+
 	/* walk through the device feature list via DFH's next DFH pointer. */
 	for (; start < end; start += ofst) {
 		if (end - start < DFH_SIZE) {
@@ -802,11 +1222,11 @@
 			return -EINVAL;
 		}
 
-		ret = parse_feature(binfo, dfl, start - dfl->ioaddr);
+		ret = parse_feature(binfo, start - binfo->start);
 		if (ret)
 			return ret;
 
-		v = readq(start + DFH);
+		v = readq(binfo->ioaddr + start - binfo->start + DFH);
 		ofst = FIELD_GET(DFH_NEXT_HDR_OFST, v);
 
 		/* stop parsing if EOL(End of List) is set or offset is 0 */
@@ -815,7 +1235,12 @@
 	}
 
 	/* commit current feature device when reach the end of list */
-	return build_info_commit_dev(binfo);
+	build_info_complete(binfo);
+
+	if (is_feature_dev_detected(binfo))
+		ret = build_info_commit_dev(binfo);
+
+	return ret;
 }
 
 struct dfl_fpga_enum_info *dfl_fpga_enum_info_alloc(struct device *dev)
@@ -853,6 +1278,10 @@
 		devm_kfree(dev, dfl);
 	}
 
+	/* remove irq table */
+	if (info->irq_table)
+		devm_kfree(dev, info->irq_table);
+
 	devm_kfree(dev, info);
 	put_device(dev);
 }
@@ -864,7 +1293,6 @@
  * @info: ptr to dfl_fpga_enum_info
  * @start: mmio resource address of the device feature list.
  * @len: mmio resource length of the device feature list.
- * @ioaddr: mapped mmio resource address of the device feature list.
  *
  * One FPGA device may have one or more Device Feature Lists (DFLs), use this
  * function to add information of each DFL to common data structure for next
@@ -873,8 +1301,7 @@
  * Return: 0 on success, negative error code otherwise.
  */
 int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info *info,
-			       resource_size_t start, resource_size_t len,
-			       void __iomem *ioaddr)
+			       resource_size_t start, resource_size_t len)
 {
 	struct dfl_fpga_enum_dfl *dfl;
 
@@ -884,7 +1311,6 @@
 
 	dfl->start = start;
 	dfl->len = len;
-	dfl->ioaddr = ioaddr;
 
 	list_add_tail(&dfl->node, &info->dfls);
 
@@ -892,6 +1318,45 @@
 }
 EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_dfl);
 
+/**
+ * dfl_fpga_enum_info_add_irq - add irq table to enum info
+ *
+ * @info: ptr to dfl_fpga_enum_info
+ * @nr_irqs: number of irqs of the DFL fpga device to be enumerated.
+ * @irq_table: Linux IRQ numbers for all irqs, indexed by local irq index of
+ *	       this device.
+ *
+ * One FPGA device may have several interrupts. This function adds irq
+ * information of the DFL fpga device to enum info for next step enumeration.
+ * This function should be called before dfl_fpga_feature_devs_enumerate().
+ * As we only support one irq domain for all DFLs in the same enum info, adding
+ * irq table a second time for the same enum info will return error.
+ *
+ * If we need to enumerate DFLs which belong to different irq domains, we
+ * should fill more enum info and enumerate them one by one.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int dfl_fpga_enum_info_add_irq(struct dfl_fpga_enum_info *info,
+			       unsigned int nr_irqs, int *irq_table)
+{
+	if (!nr_irqs || !irq_table)
+		return -EINVAL;
+
+	if (info->irq_table)
+		return -EEXIST;
+
+	info->irq_table = devm_kmemdup(info->dev, irq_table,
+				       sizeof(int) * nr_irqs, GFP_KERNEL);
+	if (!info->irq_table)
+		return -ENOMEM;
+
+	info->nr_irqs = nr_irqs;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_irq);
+
 static int remove_feature_dev(struct device *dev, void *data)
 {
 	struct platform_device *pdev = to_platform_device(dev);
@@ -959,12 +1424,16 @@
 	binfo->dev = info->dev;
 	binfo->cdev = cdev;
 
+	binfo->nr_irqs = info->nr_irqs;
+	if (info->nr_irqs)
+		binfo->irq_table = info->irq_table;
+
 	/*
 	 * start enumeration for all feature devices based on Device Feature
 	 * Lists.
 	 */
 	list_for_each_entry(dfl, &info->dfls, node) {
-		ret = parse_feature_list(binfo, dfl);
+		ret = parse_feature_list(binfo, dfl->start, dfl->len);
 		if (ret) {
 			remove_feature_devs(cdev);
 			build_info_free(binfo);
@@ -1057,11 +1526,17 @@
 {
 	int ret;
 
+	ret = bus_register(&dfl_bus_type);
+	if (ret)
+		return ret;
+
 	dfl_ids_init();
 
 	ret = dfl_chardev_init();
-	if (ret)
+	if (ret) {
 		dfl_ids_destroy();
+		bus_unregister(&dfl_bus_type);
+	}
 
 	return ret;
 }
@@ -1079,6 +1554,7 @@
  */
 int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id)
 {
+	struct dfl_feature_platform_data *pdata;
 	struct platform_device *port_pdev;
 	int ret = -ENODEV;
 
@@ -1093,7 +1569,11 @@
 		goto put_dev_exit;
 	}
 
-	ret = dfl_feature_dev_use_begin(dev_get_platdata(&port_pdev->dev));
+	pdata = dev_get_platdata(&port_pdev->dev);
+
+	mutex_lock(&pdata->lock);
+	ret = dfl_feature_dev_use_begin(pdata, true);
+	mutex_unlock(&pdata->lock);
 	if (ret)
 		goto put_dev_exit;
 
@@ -1120,6 +1600,7 @@
  */
 int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id)
 {
+	struct dfl_feature_platform_data *pdata;
 	struct platform_device *port_pdev;
 	int ret = -ENODEV;
 
@@ -1138,7 +1619,12 @@
 	if (ret)
 		goto put_dev_exit;
 
-	dfl_feature_dev_use_end(dev_get_platdata(&port_pdev->dev));
+	pdata = dev_get_platdata(&port_pdev->dev);
+
+	mutex_lock(&pdata->lock);
+	dfl_feature_dev_use_end(pdata);
+	mutex_unlock(&pdata->lock);
+
 	cdev->released_port_num--;
 put_dev_exit:
 	put_device(&port_pdev->dev);
@@ -1230,10 +1716,165 @@
 }
 EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_vf);
 
+static irqreturn_t dfl_irq_handler(int irq, void *arg)
+{
+	struct eventfd_ctx *trigger = arg;
+
+	eventfd_signal(trigger, 1);
+	return IRQ_HANDLED;
+}
+
+static int do_set_irq_trigger(struct dfl_feature *feature, unsigned int idx,
+			      int fd)
+{
+	struct platform_device *pdev = feature->dev;
+	struct eventfd_ctx *trigger;
+	int irq, ret;
+
+	irq = feature->irq_ctx[idx].irq;
+
+	if (feature->irq_ctx[idx].trigger) {
+		free_irq(irq, feature->irq_ctx[idx].trigger);
+		kfree(feature->irq_ctx[idx].name);
+		eventfd_ctx_put(feature->irq_ctx[idx].trigger);
+		feature->irq_ctx[idx].trigger = NULL;
+	}
+
+	if (fd < 0)
+		return 0;
+
+	feature->irq_ctx[idx].name =
+		kasprintf(GFP_KERNEL, "fpga-irq[%u](%s-%x)", idx,
+			  dev_name(&pdev->dev), feature->id);
+	if (!feature->irq_ctx[idx].name)
+		return -ENOMEM;
+
+	trigger = eventfd_ctx_fdget(fd);
+	if (IS_ERR(trigger)) {
+		ret = PTR_ERR(trigger);
+		goto free_name;
+	}
+
+	ret = request_irq(irq, dfl_irq_handler, 0,
+			  feature->irq_ctx[idx].name, trigger);
+	if (!ret) {
+		feature->irq_ctx[idx].trigger = trigger;
+		return ret;
+	}
+
+	eventfd_ctx_put(trigger);
+free_name:
+	kfree(feature->irq_ctx[idx].name);
+
+	return ret;
+}
+
+/**
+ * dfl_fpga_set_irq_triggers - set eventfd triggers for dfl feature interrupts
+ *
+ * @feature: dfl sub feature.
+ * @start: start of irq index in this dfl sub feature.
+ * @count: number of irqs.
+ * @fds: eventfds to bind with irqs. unbind related irq if fds[n] is negative.
+ *	 unbind "count" specified number of irqs if fds ptr is NULL.
+ *
+ * Bind given eventfds with irqs in this dfl sub feature. Unbind related irq if
+ * fds[n] is negative. Unbind "count" specified number of irqs if fds ptr is
+ * NULL.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int dfl_fpga_set_irq_triggers(struct dfl_feature *feature, unsigned int start,
+			      unsigned int count, int32_t *fds)
+{
+	unsigned int i;
+	int ret = 0;
+
+	/* overflow */
+	if (unlikely(start + count < start))
+		return -EINVAL;
+
+	/* exceeds nr_irqs */
+	if (start + count > feature->nr_irqs)
+		return -EINVAL;
+
+	for (i = 0; i < count; i++) {
+		int fd = fds ? fds[i] : -1;
+
+		ret = do_set_irq_trigger(feature, start + i, fd);
+		if (ret) {
+			while (i--)
+				do_set_irq_trigger(feature, start + i, -1);
+			break;
+		}
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_set_irq_triggers);
+
+/**
+ * dfl_feature_ioctl_get_num_irqs - dfl feature _GET_IRQ_NUM ioctl interface.
+ * @pdev: the feature device which has the sub feature
+ * @feature: the dfl sub feature
+ * @arg: ioctl argument
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+long dfl_feature_ioctl_get_num_irqs(struct platform_device *pdev,
+				    struct dfl_feature *feature,
+				    unsigned long arg)
+{
+	return put_user(feature->nr_irqs, (__u32 __user *)arg);
+}
+EXPORT_SYMBOL_GPL(dfl_feature_ioctl_get_num_irqs);
+
+/**
+ * dfl_feature_ioctl_set_irq - dfl feature _SET_IRQ ioctl interface.
+ * @pdev: the feature device which has the sub feature
+ * @feature: the dfl sub feature
+ * @arg: ioctl argument
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+long dfl_feature_ioctl_set_irq(struct platform_device *pdev,
+			       struct dfl_feature *feature,
+			       unsigned long arg)
+{
+	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+	struct dfl_fpga_irq_set hdr;
+	s32 *fds;
+	long ret;
+
+	if (!feature->nr_irqs)
+		return -ENOENT;
+
+	if (copy_from_user(&hdr, (void __user *)arg, sizeof(hdr)))
+		return -EFAULT;
+
+	if (!hdr.count || (hdr.start + hdr.count > feature->nr_irqs) ||
+	    (hdr.start + hdr.count < hdr.start))
+		return -EINVAL;
+
+	fds = memdup_user((void __user *)(arg + sizeof(hdr)),
+			  hdr.count * sizeof(s32));
+	if (IS_ERR(fds))
+		return PTR_ERR(fds);
+
+	mutex_lock(&pdata->lock);
+	ret = dfl_fpga_set_irq_triggers(feature, hdr.start, hdr.count, fds);
+	mutex_unlock(&pdata->lock);
+
+	kfree(fds);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dfl_feature_ioctl_set_irq);
+
 static void __exit dfl_fpga_exit(void)
 {
 	dfl_chardev_uinit();
 	dfl_ids_destroy();
+	bus_unregister(&dfl_bus_type);
 }
 
 module_init(dfl_fpga_init);
diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h
index 9f0e656..5dc758f 100644
--- a/drivers/fpga/dfl.h
+++ b/drivers/fpga/dfl.h
@@ -17,7 +17,9 @@
 #include <linux/bitfield.h>
 #include <linux/cdev.h>
 #include <linux/delay.h>
+#include <linux/eventfd.h>
 #include <linux/fs.h>
+#include <linux/interrupt.h>
 #include <linux/iopoll.h>
 #include <linux/io-64-nonatomic-lo-hi.h>
 #include <linux/platform_device.h>
@@ -112,6 +114,13 @@
 #define FME_PORT_OFST_ACC_VF	1
 #define FME_PORT_OFST_IMP	BIT_ULL(60)
 
+/* FME Error Capability Register */
+#define FME_ERROR_CAP		0x70
+
+/* FME Error Capability Register Bitfield */
+#define FME_ERROR_CAP_SUPP_INT	BIT_ULL(0)		/* Interrupt Support */
+#define FME_ERROR_CAP_INT_VECT	GENMASK_ULL(12, 1)	/* Interrupt vector */
+
 /* PORT Header Register Set */
 #define PORT_HDR_DFH		DFH
 #define PORT_HDR_GUID_L		GUID_L
@@ -145,6 +154,20 @@
 #define PORT_STS_PWR_STATE_AP2	2			/* 90% throttling */
 #define PORT_STS_PWR_STATE_AP6	6			/* 100% throttling */
 
+/* Port Error Capability Register */
+#define PORT_ERROR_CAP		0x38
+
+/* Port Error Capability Register Bitfield */
+#define PORT_ERROR_CAP_SUPP_INT	BIT_ULL(0)		/* Interrupt Support */
+#define PORT_ERROR_CAP_INT_VECT	GENMASK_ULL(12, 1)	/* Interrupt vector */
+
+/* Port Uint Capability Register */
+#define PORT_UINT_CAP		0x8
+
+/* Port Uint Capability Register Bitfield */
+#define PORT_UINT_CAP_INT_NUM	GENMASK_ULL(11, 0)	/* Interrupts num */
+#define PORT_UINT_CAP_FST_VECT	GENMASK_ULL(23, 12)	/* First Vector */
+
 /**
  * struct dfl_fpga_port_ops - port ops
  *
@@ -174,7 +197,7 @@
  * @id: unique dfl private feature id.
  */
 struct dfl_feature_id {
-	u64 id;
+	u16 id;
 };
 
 /**
@@ -189,24 +212,45 @@
 };
 
 /**
+ * struct dfl_feature_irq_ctx - dfl private feature interrupt context
+ *
+ * @irq: Linux IRQ number of this interrupt.
+ * @trigger: eventfd context to signal when interrupt happens.
+ * @name: irq name needed when requesting irq.
+ */
+struct dfl_feature_irq_ctx {
+	int irq;
+	struct eventfd_ctx *trigger;
+	char *name;
+};
+
+/**
  * struct dfl_feature - sub feature of the feature devices
  *
+ * @dev: ptr to pdev of the feature device which has the sub feature.
  * @id: sub feature id.
  * @resource_index: each sub feature has one mmio resource for its registers.
  *		    this index is used to find its mmio resource from the
  *		    feature dev (platform device)'s reources.
  * @ioaddr: mapped mmio resource address.
+ * @irq_ctx: interrupt context list.
+ * @nr_irqs: number of interrupt contexts.
  * @ops: ops of this sub feature.
+ * @ddev: ptr to the dfl device of this sub feature.
+ * @priv: priv data of this feature.
  */
 struct dfl_feature {
-	u64 id;
+	struct platform_device *dev;
+	u16 id;
 	int resource_index;
 	void __iomem *ioaddr;
+	struct dfl_feature_irq_ctx *irq_ctx;
+	unsigned int nr_irqs;
 	const struct dfl_feature_ops *ops;
+	struct dfl_device *ddev;
+	void *priv;
 };
 
-#define DEV_STATUS_IN_USE	0
-
 #define FEATURE_DEV_ID_UNUSED	(-1)
 
 /**
@@ -219,8 +263,9 @@
  * @dfl_cdev: ptr to container device.
  * @id: id used for this feature device.
  * @disable_count: count for port disable.
+ * @excl_open: set on feature device exclusive open.
+ * @open_count: count for feature device open.
  * @num: number for sub features.
- * @dev_status: dev status (e.g. DEV_STATUS_IN_USE).
  * @private: ptr to feature dev private data.
  * @features: sub features of this feature dev.
  */
@@ -232,26 +277,46 @@
 	struct dfl_fpga_cdev *dfl_cdev;
 	int id;
 	unsigned int disable_count;
-	unsigned long dev_status;
+	bool excl_open;
+	int open_count;
 	void *private;
 	int num;
-	struct dfl_feature features[0];
+	struct dfl_feature features[];
 };
 
 static inline
-int dfl_feature_dev_use_begin(struct dfl_feature_platform_data *pdata)
+int dfl_feature_dev_use_begin(struct dfl_feature_platform_data *pdata,
+			      bool excl)
 {
-	/* Test and set IN_USE flags to ensure file is exclusively used */
-	if (test_and_set_bit_lock(DEV_STATUS_IN_USE, &pdata->dev_status))
+	if (pdata->excl_open)
 		return -EBUSY;
 
+	if (excl) {
+		if (pdata->open_count)
+			return -EBUSY;
+
+		pdata->excl_open = true;
+	}
+	pdata->open_count++;
+
 	return 0;
 }
 
 static inline
 void dfl_feature_dev_use_end(struct dfl_feature_platform_data *pdata)
 {
-	clear_bit_unlock(DEV_STATUS_IN_USE, &pdata->dev_status);
+	pdata->excl_open = false;
+
+	if (WARN_ON(pdata->open_count <= 0))
+		return;
+
+	pdata->open_count--;
+}
+
+static inline
+int dfl_feature_dev_use_count(struct dfl_feature_platform_data *pdata)
+{
+	return pdata->open_count;
 }
 
 static inline
@@ -278,12 +343,6 @@
 #define DFL_FPGA_FEATURE_DEV_FME		"dfl-fme"
 #define DFL_FPGA_FEATURE_DEV_PORT		"dfl-port"
 
-static inline int dfl_feature_platform_data_size(const int num)
-{
-	return sizeof(struct dfl_feature_platform_data) +
-		num * sizeof(struct dfl_feature);
-}
-
 void dfl_fpga_dev_feature_uinit(struct platform_device *pdev);
 int dfl_fpga_dev_feature_init(struct platform_device *pdev,
 			      struct dfl_feature_driver *feature_drvs);
@@ -308,7 +367,7 @@
 	   (feature) < (pdata)->features + (pdata)->num; (feature)++)
 
 static inline
-struct dfl_feature *dfl_get_feature_by_id(struct device *dev, u64 id)
+struct dfl_feature *dfl_get_feature_by_id(struct device *dev, u16 id)
 {
 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
 	struct dfl_feature *feature;
@@ -321,7 +380,7 @@
 }
 
 static inline
-void __iomem *dfl_get_feature_ioaddr_by_id(struct device *dev, u64 id)
+void __iomem *dfl_get_feature_ioaddr_by_id(struct device *dev, u16 id)
 {
 	struct dfl_feature *feature = dfl_get_feature_by_id(dev, id);
 
@@ -332,7 +391,7 @@
 	return NULL;
 }
 
-static inline bool is_dfl_feature_present(struct device *dev, u64 id)
+static inline bool is_dfl_feature_present(struct device *dev, u16 id)
 {
 	return !!dfl_get_feature_ioaddr_by_id(dev, id);
 }
@@ -369,10 +428,14 @@
  *
  * @dev: parent device.
  * @dfls: list of device feature lists.
+ * @nr_irqs: number of irqs for all feature devices.
+ * @irq_table: Linux IRQ numbers for all irqs, indexed by hw irq numbers.
  */
 struct dfl_fpga_enum_info {
 	struct device *dev;
 	struct list_head dfls;
+	unsigned int nr_irqs;
+	int *irq_table;
 };
 
 /**
@@ -380,22 +443,19 @@
  *
  * @start: base address of this device feature list.
  * @len: size of this device feature list.
- * @ioaddr: mapped base address of this device feature list.
  * @node: node in list of device feature lists.
  */
 struct dfl_fpga_enum_dfl {
 	resource_size_t start;
 	resource_size_t len;
-
-	void __iomem *ioaddr;
-
 	struct list_head node;
 };
 
 struct dfl_fpga_enum_info *dfl_fpga_enum_info_alloc(struct device *dev);
 int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info *info,
-			       resource_size_t start, resource_size_t len,
-			       void __iomem *ioaddr);
+			       resource_size_t start, resource_size_t len);
+int dfl_fpga_enum_info_add_irq(struct dfl_fpga_enum_info *info,
+			       unsigned int nr_irqs, int *irq_table);
 void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info);
 
 /**
@@ -447,4 +507,97 @@
 int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id);
 void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev *cdev);
 int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vf);
+int dfl_fpga_set_irq_triggers(struct dfl_feature *feature, unsigned int start,
+			      unsigned int count, int32_t *fds);
+long dfl_feature_ioctl_get_num_irqs(struct platform_device *pdev,
+				    struct dfl_feature *feature,
+				    unsigned long arg);
+long dfl_feature_ioctl_set_irq(struct platform_device *pdev,
+			       struct dfl_feature *feature,
+			       unsigned long arg);
+
+/**
+ * enum dfl_id_type - define the DFL FIU types
+ */
+enum dfl_id_type {
+	FME_ID,
+	PORT_ID,
+	DFL_ID_MAX,
+};
+
+/**
+ * struct dfl_device_id -  dfl device identifier
+ * @type: contains 4 bits DFL FIU type of the device. See enum dfl_id_type.
+ * @feature_id: contains 12 bits feature identifier local to its DFL FIU type.
+ * @driver_data: driver specific data.
+ */
+struct dfl_device_id {
+	u8 type;
+	u16 feature_id;
+	unsigned long driver_data;
+};
+
+/**
+ * struct dfl_device - represent an dfl device on dfl bus
+ *
+ * @dev: generic device interface.
+ * @id: id of the dfl device.
+ * @type: type of DFL FIU of the device. See enum dfl_id_type.
+ * @feature_id: 16 bits feature identifier local to its DFL FIU type.
+ * @mmio_res: mmio resource of this dfl device.
+ * @irqs: list of Linux IRQ numbers of this dfl device.
+ * @num_irqs: number of IRQs supported by this dfl device.
+ * @cdev: pointer to DFL FPGA container device this dfl device belongs to.
+ * @id_entry: matched id entry in dfl driver's id table.
+ */
+struct dfl_device {
+	struct device dev;
+	int id;
+	u8 type;
+	u16 feature_id;
+	struct resource mmio_res;
+	int *irqs;
+	unsigned int num_irqs;
+	struct dfl_fpga_cdev *cdev;
+	const struct dfl_device_id *id_entry;
+};
+
+/**
+ * struct dfl_driver - represent an dfl device driver
+ *
+ * @drv: driver model structure.
+ * @id_table: pointer to table of device IDs the driver is interested in.
+ *	      { } member terminated.
+ * @probe: mandatory callback for device binding.
+ * @remove: callback for device unbinding.
+ */
+struct dfl_driver {
+	struct device_driver drv;
+	const struct dfl_device_id *id_table;
+
+	int (*probe)(struct dfl_device *dfl_dev);
+	void (*remove)(struct dfl_device *dfl_dev);
+};
+
+#define to_dfl_dev(d) container_of(d, struct dfl_device, dev)
+#define to_dfl_drv(d) container_of(d, struct dfl_driver, drv)
+
+/*
+ * use a macro to avoid include chaining to get THIS_MODULE.
+ */
+#define dfl_driver_register(drv) \
+	__dfl_driver_register(drv, THIS_MODULE)
+int __dfl_driver_register(struct dfl_driver *dfl_drv, struct module *owner);
+void dfl_driver_unregister(struct dfl_driver *dfl_drv);
+
+/*
+ * module_dfl_driver() - Helper macro for drivers that don't do
+ * anything special in module init/exit.  This eliminates a lot of
+ * boilerplate.  Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit().
+ */
+#define module_dfl_driver(__dfl_driver) \
+	module_driver(__dfl_driver, dfl_driver_register, \
+		      dfl_driver_unregister)
+
 #endif /* __FPGA_DFL_H */
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
index 4bab902..2deccac 100644
--- a/drivers/fpga/fpga-bridge.c
+++ b/drivers/fpga/fpga-bridge.c
@@ -328,7 +328,7 @@
 				       void *priv)
 {
 	struct fpga_bridge *bridge;
-	int id, ret = 0;
+	int id, ret;
 
 	if (!name || !strlen(name)) {
 		dev_err(dev, "Attempt to register with no name!\n");
@@ -340,10 +340,8 @@
 		return NULL;
 
 	id = ida_simple_get(&fpga_bridge_ida, 0, 0, GFP_KERNEL);
-	if (id < 0) {
-		ret = id;
+	if (id < 0)
 		goto error_kfree;
-	}
 
 	mutex_init(&bridge->mutex);
 	INIT_LIST_HEAD(&bridge->node);
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index e05104f..f38bab0 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -581,10 +581,8 @@
 		return NULL;
 
 	id = ida_simple_get(&fpga_mgr_ida, 0, 0, GFP_KERNEL);
-	if (id < 0) {
-		ret = id;
+	if (id < 0)
 		goto error_kfree;
-	}
 
 	mutex_init(&mgr->ref_mutex);
 
diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c
index bde5a9d..c3134b8 100644
--- a/drivers/fpga/fpga-region.c
+++ b/drivers/fpga/fpga-region.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * FPGA Region - Device Tree support for FPGA programming under Linux
+ * FPGA Region - Support for FPGA programming under Linux
  *
  *  Copyright (C) 2013-2016 Altera Corporation
  *  Copyright (C) 2017 Intel Corporation
diff --git a/drivers/fpga/ice40-spi.c b/drivers/fpga/ice40-spi.c
index 56e112e..8d689fe 100644
--- a/drivers/fpga/ice40-spi.c
+++ b/drivers/fpga/ice40-spi.c
@@ -46,10 +46,16 @@
 	struct spi_message message;
 	struct spi_transfer assert_cs_then_reset_delay = {
 		.cs_change   = 1,
-		.delay_usecs = ICE40_SPI_RESET_DELAY
+		.delay = {
+			.value = ICE40_SPI_RESET_DELAY,
+			.unit = SPI_DELAY_UNIT_USECS
+		}
 	};
 	struct spi_transfer housekeeping_delay_then_release_cs = {
-		.delay_usecs = ICE40_SPI_HOUSEKEEPING_DELAY
+		.delay = {
+			.value = ICE40_SPI_HOUSEKEEPING_DELAY,
+			.unit = SPI_DELAY_UNIT_USECS
+		}
 	};
 	int ret;
 
diff --git a/drivers/fpga/machxo2-spi.c b/drivers/fpga/machxo2-spi.c
index 4d8a876..9eef183 100644
--- a/drivers/fpga/machxo2-spi.c
+++ b/drivers/fpga/machxo2-spi.c
@@ -157,7 +157,8 @@
 	spi_message_init(&msg);
 	tx[1].tx_buf = &refresh;
 	tx[1].len = sizeof(refresh);
-	tx[1].delay_usecs = MACHXO2_REFRESH_USEC;
+	tx[1].delay.value = MACHXO2_REFRESH_USEC;
+	tx[1].delay.unit = SPI_DELAY_UNIT_USECS;
 	spi_message_add_tail(&tx[1], &msg);
 	ret = spi_sync(spi, &msg);
 	if (ret)
@@ -208,7 +209,8 @@
 	spi_message_init(&msg);
 	tx[0].tx_buf = &enable;
 	tx[0].len = sizeof(enable);
-	tx[0].delay_usecs = MACHXO2_LOW_DELAY_USEC;
+	tx[0].delay.value = MACHXO2_LOW_DELAY_USEC;
+	tx[0].delay.unit = SPI_DELAY_UNIT_USECS;
 	spi_message_add_tail(&tx[0], &msg);
 
 	tx[1].tx_buf = &erase;
@@ -223,8 +225,10 @@
 		goto fail;
 
 	get_status(spi, &status);
-	if (test_bit(FAIL, &status))
+	if (test_bit(FAIL, &status)) {
+		ret = -EINVAL;
 		goto fail;
+	}
 	dump_status_reg(&status);
 
 	spi_message_init(&msg);
@@ -269,7 +273,8 @@
 		spi_message_init(&msg);
 		tx.tx_buf = payload;
 		tx.len = MACHXO2_BUF_SIZE;
-		tx.delay_usecs = MACHXO2_HIGH_DELAY_USEC;
+		tx.delay.value = MACHXO2_HIGH_DELAY_USEC;
+		tx.delay.unit = SPI_DELAY_UNIT_USECS;
 		spi_message_add_tail(&tx, &msg);
 		ret = spi_sync(spi, &msg);
 		if (ret) {
@@ -310,6 +315,7 @@
 	dump_status_reg(&status);
 	if (!test_bit(DONE, &status)) {
 		machxo2_cleanup(mgr);
+		ret = -EINVAL;
 		goto fail;
 	}
 
@@ -317,7 +323,8 @@
 		spi_message_init(&msg);
 		tx[1].tx_buf = &refresh;
 		tx[1].len = sizeof(refresh);
-		tx[1].delay_usecs = MACHXO2_REFRESH_USEC;
+		tx[1].delay.value = MACHXO2_REFRESH_USEC;
+		tx[1].delay.unit = SPI_DELAY_UNIT_USECS;
 		spi_message_add_tail(&tx[1], &msg);
 		ret = spi_sync(spi, &msg);
 		if (ret)
@@ -331,6 +338,7 @@
 			break;
 		if (++refreshloop == MACHXO2_MAX_REFRESH_LOOP) {
 			machxo2_cleanup(mgr);
+			ret = -EINVAL;
 			goto fail;
 		}
 	} while (1);
diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c
index 559839a..9e34bbb 100644
--- a/drivers/fpga/stratix10-soc.c
+++ b/drivers/fpga/stratix10-soc.c
@@ -154,11 +154,11 @@
 	 * Here we set status bits as we receive them.  Elsewhere, we always use
 	 * test_and_clear_bit() to check status in priv->status
 	 */
-	for (i = 0; i <= SVC_STATUS_RECONFIG_ERROR; i++)
+	for (i = 0; i <= SVC_STATUS_ERROR; i++)
 		if (status & (1 << i))
 			set_bit(i, &priv->status);
 
-	if (status & BIT(SVC_STATUS_RECONFIG_BUFFER_DONE)) {
+	if (status & BIT(SVC_STATUS_BUFFER_DONE)) {
 		s10_unlock_bufs(priv, data->kaddr1);
 		s10_unlock_bufs(priv, data->kaddr2);
 		s10_unlock_bufs(priv, data->kaddr3);
@@ -196,21 +196,16 @@
 	if (ret < 0)
 		goto init_done;
 
-	ret = wait_for_completion_interruptible_timeout(
+	ret = wait_for_completion_timeout(
 		&priv->status_return_completion, S10_RECONFIG_TIMEOUT);
 	if (!ret) {
 		dev_err(dev, "timeout waiting for RECONFIG_REQUEST\n");
 		ret = -ETIMEDOUT;
 		goto init_done;
 	}
-	if (ret < 0) {
-		dev_err(dev, "error (%d) waiting for RECONFIG_REQUEST\n", ret);
-		goto init_done;
-	}
 
 	ret = 0;
-	if (!test_and_clear_bit(SVC_STATUS_RECONFIG_REQUEST_OK,
-				&priv->status)) {
+	if (!test_and_clear_bit(SVC_STATUS_OK, &priv->status)) {
 		ret = -ETIMEDOUT;
 		goto init_done;
 	}
@@ -319,21 +314,19 @@
 		 */
 		wait_status = 1; /* not timed out */
 		if (!priv->status)
-			wait_status = wait_for_completion_interruptible_timeout(
+			wait_status = wait_for_completion_timeout(
 				&priv->status_return_completion,
 				S10_BUFFER_TIMEOUT);
 
-		if (test_and_clear_bit(SVC_STATUS_RECONFIG_BUFFER_DONE,
-				       &priv->status) ||
-		    test_and_clear_bit(SVC_STATUS_RECONFIG_BUFFER_SUBMITTED,
+		if (test_and_clear_bit(SVC_STATUS_BUFFER_DONE, &priv->status) ||
+		    test_and_clear_bit(SVC_STATUS_BUFFER_SUBMITTED,
 				       &priv->status)) {
 			ret = 0;
 			continue;
 		}
 
-		if (test_and_clear_bit(SVC_STATUS_RECONFIG_ERROR,
-				       &priv->status)) {
-			dev_err(dev, "ERROR - giving up - SVC_STATUS_RECONFIG_ERROR\n");
+		if (test_and_clear_bit(SVC_STATUS_ERROR, &priv->status)) {
+			dev_err(dev, "ERROR - giving up - SVC_STATUS_ERROR\n");
 			ret = -EFAULT;
 			break;
 		}
@@ -343,13 +336,6 @@
 			ret = -ETIMEDOUT;
 			break;
 		}
-		if (wait_status < 0) {
-			ret = wait_status;
-			dev_err(dev,
-				"error (%d) waiting for svc layer buffers\n",
-				ret);
-			break;
-		}
 	}
 
 	if (!s10_free_buffers(mgr))
@@ -375,7 +361,7 @@
 		if (ret < 0)
 			break;
 
-		ret = wait_for_completion_interruptible_timeout(
+		ret = wait_for_completion_timeout(
 			&priv->status_return_completion, timeout);
 		if (!ret) {
 			dev_err(dev,
@@ -383,23 +369,15 @@
 			ret = -ETIMEDOUT;
 			break;
 		}
-		if (ret < 0) {
-			dev_err(dev,
-				"error (%d) waiting for RECONFIG_COMPLETED\n",
-				ret);
-			break;
-		}
 		/* Not error or timeout, so ret is # of jiffies until timeout */
 		timeout = ret;
 		ret = 0;
 
-		if (test_and_clear_bit(SVC_STATUS_RECONFIG_COMPLETED,
-				       &priv->status))
+		if (test_and_clear_bit(SVC_STATUS_COMPLETED, &priv->status))
 			break;
 
-		if (test_and_clear_bit(SVC_STATUS_RECONFIG_ERROR,
-				       &priv->status)) {
-			dev_err(dev, "ERROR - giving up - SVC_STATUS_RECONFIG_ERROR\n");
+		if (test_and_clear_bit(SVC_STATUS_ERROR, &priv->status)) {
+			dev_err(dev, "ERROR - giving up - SVC_STATUS_ERROR\n");
 			ret = -EFAULT;
 			break;
 		}
@@ -483,7 +461,8 @@
 }
 
 static const struct of_device_id s10_of_match[] = {
-	{ .compatible = "intel,stratix10-soc-fpga-mgr", },
+	{.compatible = "intel,stratix10-soc-fpga-mgr"},
+	{.compatible = "intel,agilex-soc-fpga-mgr"},
 	{},
 };
 
diff --git a/drivers/fpga/ts73xx-fpga.c b/drivers/fpga/ts73xx-fpga.c
index 9a17fe9..2888ff0 100644
--- a/drivers/fpga/ts73xx-fpga.c
+++ b/drivers/fpga/ts73xx-fpga.c
@@ -119,10 +119,8 @@
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	priv->io_base = devm_ioremap_resource(kdev, res);
-	if (IS_ERR(priv->io_base)) {
-		dev_err(kdev, "unable to remap registers\n");
+	if (IS_ERR(priv->io_base))
 		return PTR_ERR(priv->io_base);
-	}
 
 	mgr = devm_fpga_mgr_create(kdev, "TS-73xx FPGA Manager",
 				   &ts73xx_fpga_ops, priv);
diff --git a/drivers/fpga/xilinx-pr-decoupler.c b/drivers/fpga/xilinx-pr-decoupler.c
index af9b387..7d69af2 100644
--- a/drivers/fpga/xilinx-pr-decoupler.c
+++ b/drivers/fpga/xilinx-pr-decoupler.c
@@ -101,7 +101,8 @@
 
 	priv->clk = devm_clk_get(&pdev->dev, "aclk");
 	if (IS_ERR(priv->clk)) {
-		dev_err(&pdev->dev, "input clock not found\n");
+		if (PTR_ERR(priv->clk) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "input clock not found\n");
 		return PTR_ERR(priv->clk);
 	}
 
diff --git a/drivers/fpga/xilinx-spi.c b/drivers/fpga/xilinx-spi.c
index 272ee0c..d3e6f41 100644
--- a/drivers/fpga/xilinx-spi.c
+++ b/drivers/fpga/xilinx-spi.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Xilinx Spartan6 Slave Serial SPI Driver
+ * Xilinx Spartan6 and 7 Series Slave Serial SPI Driver
  *
  * Copyright (C) 2017 DENX Software Engineering
  *
@@ -23,45 +23,107 @@
 struct xilinx_spi_conf {
 	struct spi_device *spi;
 	struct gpio_desc *prog_b;
+	struct gpio_desc *init_b;
 	struct gpio_desc *done;
 };
 
-static enum fpga_mgr_states xilinx_spi_state(struct fpga_manager *mgr)
+static int get_done_gpio(struct fpga_manager *mgr)
 {
 	struct xilinx_spi_conf *conf = mgr->priv;
+	int ret;
 
-	if (!gpiod_get_value(conf->done))
+	ret = gpiod_get_value(conf->done);
+
+	if (ret < 0)
+		dev_err(&mgr->dev, "Error reading DONE (%d)\n", ret);
+
+	return ret;
+}
+
+static enum fpga_mgr_states xilinx_spi_state(struct fpga_manager *mgr)
+{
+	if (!get_done_gpio(mgr))
 		return FPGA_MGR_STATE_RESET;
 
 	return FPGA_MGR_STATE_UNKNOWN;
 }
 
+/**
+ * wait_for_init_b - wait for the INIT_B pin to have a given state, or wait
+ * a given delay if the pin is unavailable
+ *
+ * @mgr:        The FPGA manager object
+ * @value:      Value INIT_B to wait for (1 = asserted = low)
+ * @alt_udelay: Delay to wait if the INIT_B GPIO is not available
+ *
+ * Returns 0 when the INIT_B GPIO reached the given state or -ETIMEDOUT if
+ * too much time passed waiting for that. If no INIT_B GPIO is available
+ * then always return 0.
+ */
+static int wait_for_init_b(struct fpga_manager *mgr, int value,
+			   unsigned long alt_udelay)
+{
+	struct xilinx_spi_conf *conf = mgr->priv;
+	unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+	if (conf->init_b) {
+		while (time_before(jiffies, timeout)) {
+			int ret = gpiod_get_value(conf->init_b);
+
+			if (ret == value)
+				return 0;
+
+			if (ret < 0) {
+				dev_err(&mgr->dev, "Error reading INIT_B (%d)\n", ret);
+				return ret;
+			}
+
+			usleep_range(100, 400);
+		}
+
+		dev_err(&mgr->dev, "Timeout waiting for INIT_B to %s\n",
+			value ? "assert" : "deassert");
+		return -ETIMEDOUT;
+	}
+
+	udelay(alt_udelay);
+
+	return 0;
+}
+
 static int xilinx_spi_write_init(struct fpga_manager *mgr,
 				 struct fpga_image_info *info,
 				 const char *buf, size_t count)
 {
 	struct xilinx_spi_conf *conf = mgr->priv;
-	const size_t prog_latency_7500us = 7500;
-	const size_t prog_pulse_1us = 1;
+	int err;
 
 	if (info->flags & FPGA_MGR_PARTIAL_RECONFIG) {
-		dev_err(&mgr->dev, "Partial reconfiguration not supported.\n");
+		dev_err(&mgr->dev, "Partial reconfiguration not supported\n");
 		return -EINVAL;
 	}
 
 	gpiod_set_value(conf->prog_b, 1);
 
-	udelay(prog_pulse_1us); /* min is 500 ns */
+	err = wait_for_init_b(mgr, 1, 1); /* min is 500 ns */
+	if (err) {
+		gpiod_set_value(conf->prog_b, 0);
+		return err;
+	}
 
 	gpiod_set_value(conf->prog_b, 0);
 
-	if (gpiod_get_value(conf->done)) {
+	err = wait_for_init_b(mgr, 0, 0);
+	if (err)
+		return err;
+
+	if (get_done_gpio(mgr)) {
 		dev_err(&mgr->dev, "Unexpected DONE pin state...\n");
 		return -EIO;
 	}
 
 	/* program latency */
-	usleep_range(prog_latency_7500us, prog_latency_7500us + 100);
+	usleep_range(7500, 7600);
 	return 0;
 }
 
@@ -108,25 +170,46 @@
 				     struct fpga_image_info *info)
 {
 	struct xilinx_spi_conf *conf = mgr->priv;
-	unsigned long timeout;
+	unsigned long timeout = jiffies + usecs_to_jiffies(info->config_complete_timeout_us);
+	bool expired = false;
+	int done;
 	int ret;
 
-	if (gpiod_get_value(conf->done))
-		return xilinx_spi_apply_cclk_cycles(conf);
+	/*
+	 * This loop is carefully written such that if the driver is
+	 * scheduled out for more than 'timeout', we still check for DONE
+	 * before giving up and we apply 8 extra CCLK cycles in all cases.
+	 */
+	while (!expired) {
+		expired = time_after(jiffies, timeout);
 
-	timeout = jiffies + usecs_to_jiffies(info->config_complete_timeout_us);
-
-	while (time_before(jiffies, timeout)) {
+		done = get_done_gpio(mgr);
+		if (done < 0)
+			return done;
 
 		ret = xilinx_spi_apply_cclk_cycles(conf);
 		if (ret)
 			return ret;
 
-		if (gpiod_get_value(conf->done))
-			return xilinx_spi_apply_cclk_cycles(conf);
+		if (done)
+			return 0;
 	}
 
-	dev_err(&mgr->dev, "Timeout after config data transfer.\n");
+	if (conf->init_b) {
+		ret = gpiod_get_value(conf->init_b);
+
+		if (ret < 0) {
+			dev_err(&mgr->dev, "Error reading INIT_B (%d)\n", ret);
+			return ret;
+		}
+
+		dev_err(&mgr->dev,
+			ret ? "CRC error or invalid device\n"
+			: "Missing sync word or incomplete bitstream\n");
+	} else {
+		dev_err(&mgr->dev, "Timeout after config data transfer\n");
+	}
+
 	return -ETIMEDOUT;
 }
 
@@ -150,18 +233,19 @@
 
 	/* PROGRAM_B is active low */
 	conf->prog_b = devm_gpiod_get(&spi->dev, "prog_b", GPIOD_OUT_LOW);
-	if (IS_ERR(conf->prog_b)) {
-		dev_err(&spi->dev, "Failed to get PROGRAM_B gpio: %ld\n",
-			PTR_ERR(conf->prog_b));
-		return PTR_ERR(conf->prog_b);
-	}
+	if (IS_ERR(conf->prog_b))
+		return dev_err_probe(&spi->dev, PTR_ERR(conf->prog_b),
+				     "Failed to get PROGRAM_B gpio\n");
+
+	conf->init_b = devm_gpiod_get_optional(&spi->dev, "init-b", GPIOD_IN);
+	if (IS_ERR(conf->init_b))
+		return dev_err_probe(&spi->dev, PTR_ERR(conf->init_b),
+				     "Failed to get INIT_B gpio\n");
 
 	conf->done = devm_gpiod_get(&spi->dev, "done", GPIOD_IN);
-	if (IS_ERR(conf->done)) {
-		dev_err(&spi->dev, "Failed to get DONE gpio: %ld\n",
-			PTR_ERR(conf->done));
-		return PTR_ERR(conf->done);
-	}
+	if (IS_ERR(conf->done))
+		return dev_err_probe(&spi->dev, PTR_ERR(conf->done),
+				     "Failed to get DONE gpio\n");
 
 	mgr = devm_fpga_mgr_create(&spi->dev,
 				   "Xilinx Slave Serial FPGA Manager",
diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c
index 31ef38e..07fa8d9 100644
--- a/drivers/fpga/zynq-fpga.c
+++ b/drivers/fpga/zynq-fpga.c
@@ -578,14 +578,13 @@
 	init_completion(&priv->dma_done);
 
 	priv->irq = platform_get_irq(pdev, 0);
-	if (priv->irq < 0) {
-		dev_err(dev, "No IRQ available\n");
+	if (priv->irq < 0)
 		return priv->irq;
-	}
 
 	priv->clk = devm_clk_get(dev, "ref_clk");
 	if (IS_ERR(priv->clk)) {
-		dev_err(dev, "input clock not found\n");
+		if (PTR_ERR(priv->clk) != -EPROBE_DEFER)
+			dev_err(dev, "input clock not found\n");
 		return PTR_ERR(priv->clk);
 	}
 
diff --git a/drivers/fpga/zynqmp-fpga.c b/drivers/fpga/zynqmp-fpga.c
index b8a88d2..4a1139e 100644
--- a/drivers/fpga/zynqmp-fpga.c
+++ b/drivers/fpga/zynqmp-fpga.c
@@ -40,16 +40,12 @@
 static int zynqmp_fpga_ops_write(struct fpga_manager *mgr,
 				 const char *buf, size_t size)
 {
-	const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
 	struct zynqmp_fpga_priv *priv;
 	dma_addr_t dma_addr;
 	u32 eemi_flags = 0;
 	char *kbuf;
 	int ret;
 
-	if (IS_ERR_OR_NULL(eemi_ops) || !eemi_ops->fpga_load)
-		return -ENXIO;
-
 	priv = mgr->priv;
 
 	kbuf = dma_alloc_coherent(priv->dev, size, &dma_addr, GFP_KERNEL);
@@ -63,7 +59,7 @@
 	if (priv->flags & FPGA_MGR_PARTIAL_RECONFIG)
 		eemi_flags |= XILINX_ZYNQMP_PM_FPGA_PARTIAL;
 
-	ret = eemi_ops->fpga_load(dma_addr, size, eemi_flags);
+	ret = zynqmp_pm_fpga_load(dma_addr, size, eemi_flags);
 
 	dma_free_coherent(priv->dev, size, kbuf, dma_addr);
 
@@ -78,13 +74,9 @@
 
 static enum fpga_mgr_states zynqmp_fpga_ops_state(struct fpga_manager *mgr)
 {
-	const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
-	u32 status;
+	u32 status = 0;
 
-	if (IS_ERR_OR_NULL(eemi_ops) || !eemi_ops->fpga_get_status)
-		return FPGA_MGR_STATE_UNKNOWN;
-
-	eemi_ops->fpga_get_status(&status);
+	zynqmp_pm_fpga_get_status(&status);
 	if (status & IXR_FPGA_DONE_MASK)
 		return FPGA_MGR_STATE_OPERATING;