Update Linux to v5.4.2

Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 6e83880..e40a77b 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -1,24 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
 #
 # For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
+# see Documentation/kbuild/kconfig-language.rst.
 #
 
 menu "Firmware Drivers"
 
-config ARM_PSCI_FW
-	bool
-
-config ARM_PSCI_CHECKER
-	bool "ARM PSCI checker"
-	depends on ARM_PSCI_FW && HOTPLUG_CPU && CPU_IDLE && !TORTURE_TEST
-	help
-	  Run the PSCI checker during startup. This checks that hotplug and
-	  suspend operations work correctly when using PSCI.
-
-	  The torture tests may interfere with the PSCI checker by turning CPUs
-	  on and off through hotplug, so for now torture tests and PSCI checker
-	  are mutually exclusive.
-
 config ARM_SCMI_PROTOCOL
 	bool "ARM System Control and Management Interface (SCMI) Message Protocol"
 	depends on ARM || ARM64 || COMPILE_TEST
@@ -145,34 +132,6 @@
 	  See DIG64_HCDPv20_042804.pdf available from
 	  <http://www.dig64.org/specifications/> 
 
-config DELL_RBU
-	tristate "BIOS update support for DELL systems via sysfs"
-	depends on X86
-	select FW_LOADER
-	select FW_LOADER_USER_HELPER
-	help
-	 Say m if you want to have the option of updating the BIOS for your
-	 DELL system. Note you need a Dell OpenManage or Dell Update package (DUP)
-	 supporting application to communicate with the BIOS regarding the new
-	 image for the image update to take effect.
-	 See <file:Documentation/dell_rbu.txt> for more details on the driver.
-
-config DCDBAS
-	tristate "Dell Systems Management Base Driver"
-	depends on X86
-	help
-	  The Dell Systems Management Base Driver provides a sysfs interface
-	  for systems management software to perform System Management
-	  Interrupts (SMIs) and Host Control Actions (system power cycle or
-	  power off after OS shutdown) on certain Dell systems.
-
-	  See <file:Documentation/dcdbas.txt> for more details on the driver
-	  and the Dell systems on which Dell systems management software makes
-	  use of this driver.
-
-	  Say Y or M here to enable the driver for use by Dell systems
-	  management software such as Dell OpenManage.
-
 config DMIID
     bool "Export DMI identification via sysfs to userspace"
     depends on DMI
@@ -198,7 +157,7 @@
 
 config ISCSI_IBFT_FIND
 	bool "iSCSI Boot Firmware Table Attributes"
-	depends on X86 && ACPI
+	depends on X86 && ISCSI_IBFT
 	default n
 	help
 	  This option enables the kernel to find the region of memory
@@ -209,7 +168,8 @@
 config ISCSI_IBFT
 	tristate "iSCSI Boot Firmware Table Attributes module"
 	select ISCSI_BOOT_SYSFS
-	depends on ISCSI_IBFT_FIND && SCSI && SCSI_LOWLEVEL
+	select ISCSI_IBFT_FIND if X86
+	depends on ACPI && SCSI && SCSI_LOWLEVEL
 	default	n
 	help
 	  This option enables support for detection and exposing of iSCSI
@@ -244,6 +204,36 @@
 	  WARNING: Using incorrect parameters (base address in particular)
 	  may crash your system.
 
+config INTEL_STRATIX10_SERVICE
+	tristate "Intel Stratix10 Service Layer"
+	depends on ARCH_STRATIX10 && HAVE_ARM_SMCCC
+	default n
+	help
+	  Intel Stratix10 service layer runs at privileged exception level,
+	  interfaces with the service providers (FPGA manager is one of them)
+	  and manages secure monitor call to communicate with secure monitor
+	  software at secure monitor exception level.
+
+	  Say Y here if you want Stratix10 service layer support.
+
+config INTEL_STRATIX10_RSU
+	tristate "Intel Stratix10 Remote System Update"
+	depends on INTEL_STRATIX10_SERVICE
+	help
+	  The Intel Remote System Update (RSU) driver exposes interfaces
+	  access through the Intel Service Layer to user space via sysfs
+	  device attribute nodes. The RSU interfaces report/control some of
+	  the optional RSU features of the Stratix 10 SoC FPGA.
+
+	  The RSU provides a way for customers to update the boot
+	  configuration of a Stratix 10 SoC device with significantly reduced
+	  risk of corrupting the bitstream storage and bricking the system.
+
+	  Enable RSU support if you are using an Intel SoC FPGA with the RSU
+	  feature enabled and you want Linux user space control.
+
+	  Say Y here if you want Intel RSU support.
+
 config QCOM_SCM
 	bool
 	depends on ARM || ARM64
@@ -283,13 +273,46 @@
 	  This protocol library is used by client drivers to use the features
 	  provided by the system controller.
 
+config TRUSTED_FOUNDATIONS
+	bool "Trusted Foundations secure monitor support"
+	depends on ARM && CPU_V7
+	help
+	  Some devices (including most early Tegra-based consumer devices on
+	  the market) are booted with the Trusted Foundations secure monitor
+	  active, requiring some core operations to be performed by the secure
+	  monitor instead of the kernel.
+
+	  This option allows the kernel to invoke the secure monitor whenever
+	  required on devices using Trusted Foundations. See the functions and
+	  comments in linux/firmware/trusted_foundations.h or the device tree
+	  bindings for "tlm,trusted-foundations" for details on how to use it.
+
+	  Choose N if you don't know what this is about.
+
+config TURRIS_MOX_RWTM
+	tristate "Turris Mox rWTM secure firmware driver"
+	depends on ARCH_MVEBU || COMPILE_TEST
+	depends on HAS_DMA && OF
+	depends on MAILBOX
+	select HW_RANDOM
+	select ARMADA_37XX_RWTM_MBOX
+	help
+	  This driver communicates with the firmware on the Cortex-M3 secure
+	  processor of the Turris Mox router. Enable if you are building for
+	  Turris Mox, and you will be able to read the device serial number and
+	  other manufacturing data and also utilize the Entropy Bit Generator
+	  for hardware random number generation.
+
 config HAVE_ARM_SMCCC
 	bool
 
+source "drivers/firmware/psci/Kconfig"
 source "drivers/firmware/broadcom/Kconfig"
 source "drivers/firmware/google/Kconfig"
 source "drivers/firmware/efi/Kconfig"
+source "drivers/firmware/imx/Kconfig"
 source "drivers/firmware/meson/Kconfig"
 source "drivers/firmware/tegra/Kconfig"
+source "drivers/firmware/xilinx/Kconfig"
 
 endmenu
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index e18a041..3fcb919 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -2,8 +2,6 @@
 #
 # Makefile for the linux kernel.
 #
-obj-$(CONFIG_ARM_PSCI_FW)	+= psci.o
-obj-$(CONFIG_ARM_PSCI_CHECKER)	+= psci_checker.o
 obj-$(CONFIG_ARM_SCPI_PROTOCOL)	+= arm_scpi.o
 obj-$(CONFIG_ARM_SCPI_POWER_DOMAIN) += scpi_pm_domain.o
 obj-$(CONFIG_ARM_SDE_INTERFACE)	+= arm_sdei.o
@@ -11,9 +9,9 @@
 obj-$(CONFIG_DMI_SYSFS)		+= dmi-sysfs.o
 obj-$(CONFIG_EDD)		+= edd.o
 obj-$(CONFIG_EFI_PCDP)		+= pcdp.o
-obj-$(CONFIG_DELL_RBU)          += dell_rbu.o
-obj-$(CONFIG_DCDBAS)		+= dcdbas.o
 obj-$(CONFIG_DMIID)		+= dmi-id.o
+obj-$(CONFIG_INTEL_STRATIX10_SERVICE) += stratix10-svc.o
+obj-$(CONFIG_INTEL_STRATIX10_RSU)     += stratix10-rsu.o
 obj-$(CONFIG_ISCSI_IBFT_FIND)	+= iscsi_ibft_find.o
 obj-$(CONFIG_ISCSI_IBFT)	+= iscsi_ibft.o
 obj-$(CONFIG_FIRMWARE_MEMMAP)	+= memmap.o
@@ -24,11 +22,16 @@
 obj-$(CONFIG_QCOM_SCM_32)	+= qcom_scm-32.o
 CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch armv7-a\n.arch_extension sec,-DREQUIRES_SEC=1) -march=armv7-a
 obj-$(CONFIG_TI_SCI_PROTOCOL)	+= ti_sci.o
+obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o
+obj-$(CONFIG_TURRIS_MOX_RWTM)	+= turris-mox-rwtm.o
 
 obj-$(CONFIG_ARM_SCMI_PROTOCOL)	+= arm_scmi/
+obj-y				+= psci/
 obj-y				+= broadcom/
 obj-y				+= meson/
 obj-$(CONFIG_GOOGLE_FIRMWARE)	+= google/
 obj-$(CONFIG_EFI)		+= efi/
 obj-$(CONFIG_UEFI_CPER)		+= efi/
+obj-y				+= imx/
 obj-y				+= tegra/
+obj-y				+= xilinx/
diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile
index 99e36c5..5f298f0 100644
--- a/drivers/firmware/arm_scmi/Makefile
+++ b/drivers/firmware/arm_scmi/Makefile
@@ -1,5 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
 obj-y	= scmi-bus.o scmi-driver.o scmi-protocols.o
 scmi-bus-y = bus.o
 scmi-driver-y = driver.o
-scmi-protocols-y = base.o clock.o perf.o power.o sensors.o
+scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o
 obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o
diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c
index 9dff33e..f804e8a 100644
--- a/drivers/firmware/arm_scmi/base.c
+++ b/drivers/firmware/arm_scmi/base.c
@@ -204,11 +204,11 @@
 	if (ret)
 		return ret;
 
-	*(__le32 *)t->tx.buf = cpu_to_le32(id);
+	put_unaligned_le32(id, t->tx.buf);
 
 	ret = scmi_do_xfer(handle, t);
 	if (!ret)
-		memcpy(name, t->rx.buf, SCMI_MAX_STR_SIZE);
+		strlcpy(name, t->rx.buf, SCMI_MAX_STR_SIZE);
 
 	scmi_xfer_put(handle, t);
 
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index 472c88a..92f843e 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -119,6 +119,11 @@
 }
 EXPORT_SYMBOL_GPL(scmi_driver_unregister);
 
+static void scmi_device_release(struct device *dev)
+{
+	kfree(to_scmi_dev(dev));
+}
+
 struct scmi_device *
 scmi_device_create(struct device_node *np, struct device *parent, int protocol)
 {
@@ -138,6 +143,7 @@
 	scmi_dev->dev.parent = parent;
 	scmi_dev->dev.of_node = np;
 	scmi_dev->dev.bus = &scmi_bus_type;
+	scmi_dev->dev.release = scmi_device_release;
 	dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id);
 
 	retval = device_register(&scmi_dev->dev);
@@ -156,9 +162,8 @@
 void scmi_device_destroy(struct scmi_device *scmi_dev)
 {
 	scmi_handle_put(scmi_dev->handle);
-	device_unregister(&scmi_dev->dev);
 	ida_simple_remove(&scmi_bus_id, scmi_dev->id);
-	kfree(scmi_dev);
+	device_unregister(&scmi_dev->dev);
 }
 
 void scmi_set_handle(struct scmi_device *scmi_dev)
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index e4119eb..32526a7 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -56,7 +56,7 @@
 struct scmi_clock_set_rate {
 	__le32 flags;
 #define CLOCK_SET_ASYNC		BIT(0)
-#define CLOCK_SET_DELAYED	BIT(1)
+#define CLOCK_SET_IGNORE_RESP	BIT(1)
 #define CLOCK_SET_ROUND_UP	BIT(2)
 #define CLOCK_SET_ROUND_AUTO	BIT(3)
 	__le32 id;
@@ -67,6 +67,7 @@
 struct clock_info {
 	int num_clocks;
 	int max_async_req;
+	atomic_t cur_async_req;
 	struct scmi_clock_info *clk;
 };
 
@@ -106,12 +107,12 @@
 	if (ret)
 		return ret;
 
-	*(__le32 *)t->tx.buf = cpu_to_le32(clk_id);
+	put_unaligned_le32(clk_id, t->tx.buf);
 	attr = t->rx.buf;
 
 	ret = scmi_do_xfer(handle, t);
 	if (!ret)
-		memcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE);
+		strlcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE);
 	else
 		clk->name[0] = '\0';
 
@@ -185,6 +186,8 @@
 	if (rate_discrete)
 		clk->list.num_rates = tot_rate_cnt;
 
+	clk->rate_discrete = rate_discrete;
+
 err:
 	scmi_xfer_put(handle, t);
 	return ret;
@@ -201,39 +204,47 @@
 	if (ret)
 		return ret;
 
-	*(__le32 *)t->tx.buf = cpu_to_le32(clk_id);
+	put_unaligned_le32(clk_id, t->tx.buf);
 
 	ret = scmi_do_xfer(handle, t);
-	if (!ret) {
-		__le32 *pval = t->rx.buf;
-
-		*value = le32_to_cpu(*pval);
-		*value |= (u64)le32_to_cpu(*(pval + 1)) << 32;
-	}
+	if (!ret)
+		*value = get_unaligned_le64(t->rx.buf);
 
 	scmi_xfer_put(handle, t);
 	return ret;
 }
 
 static int scmi_clock_rate_set(const struct scmi_handle *handle, u32 clk_id,
-			       u32 config, u64 rate)
+			       u64 rate)
 {
 	int ret;
+	u32 flags = 0;
 	struct scmi_xfer *t;
 	struct scmi_clock_set_rate *cfg;
+	struct clock_info *ci = handle->clk_priv;
 
 	ret = scmi_xfer_get_init(handle, CLOCK_RATE_SET, SCMI_PROTOCOL_CLOCK,
 				 sizeof(*cfg), 0, &t);
 	if (ret)
 		return ret;
 
+	if (ci->max_async_req &&
+	    atomic_inc_return(&ci->cur_async_req) < ci->max_async_req)
+		flags |= CLOCK_SET_ASYNC;
+
 	cfg = t->tx.buf;
-	cfg->flags = cpu_to_le32(config);
+	cfg->flags = cpu_to_le32(flags);
 	cfg->id = cpu_to_le32(clk_id);
 	cfg->value_low = cpu_to_le32(rate & 0xffffffff);
 	cfg->value_high = cpu_to_le32(rate >> 32);
 
-	ret = scmi_do_xfer(handle, t);
+	if (flags & CLOCK_SET_ASYNC)
+		ret = scmi_do_xfer_with_response(handle, t);
+	else
+		ret = scmi_do_xfer(handle, t);
+
+	if (ci->max_async_req)
+		atomic_dec(&ci->cur_async_req);
 
 	scmi_xfer_put(handle, t);
 	return ret;
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index 937a930..5237c2f 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * System Control and Management Interface (SCMI) Message Protocol
  * driver common header file containing some definitions, structures
@@ -15,6 +15,8 @@
 #include <linux/scmi_protocol.h>
 #include <linux/types.h>
 
+#include <asm/unaligned.h>
+
 #define PROTOCOL_REV_MINOR_MASK	GENMASK(15, 0)
 #define PROTOCOL_REV_MAJOR_MASK	GENMASK(31, 16)
 #define PROTOCOL_REV_MAJOR(x)	(u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x)))
@@ -48,11 +50,11 @@
 /**
  * struct scmi_msg_hdr - Message(Tx/Rx) header
  *
- * @id: The identifier of the command being sent
- * @protocol_id: The identifier of the protocol used to send @id command
- * @seq: The token to identify the message. when a message/command returns,
- *	the platform returns the whole message header unmodified including
- *	the token
+ * @id: The identifier of the message being sent
+ * @protocol_id: The identifier of the protocol used to send @id message
+ * @seq: The token to identify the message. When a message returns, the
+ *	platform returns the whole message header unmodified including the
+ *	token
  * @status: Status of the transfer once it's complete
  * @poll_completion: Indicate if the transfer needs to be polled for
  *	completion or interrupt mode is used
@@ -84,17 +86,21 @@
  * @rx: Receive message, the buffer should be pre-allocated to store
  *	message. If request-ACK protocol is used, we can reuse the same
  *	buffer for the rx path as we use for the tx path.
- * @done: completion event
+ * @done: command message transmit completion event
+ * @async: pointer to delayed response message received event completion
  */
 struct scmi_xfer {
 	struct scmi_msg_hdr hdr;
 	struct scmi_msg tx;
 	struct scmi_msg rx;
 	struct completion done;
+	struct completion *async_done;
 };
 
 void scmi_xfer_put(const struct scmi_handle *h, struct scmi_xfer *xfer);
 int scmi_do_xfer(const struct scmi_handle *h, struct scmi_xfer *xfer);
+int scmi_do_xfer_with_response(const struct scmi_handle *h,
+			       struct scmi_xfer *xfer);
 int scmi_xfer_get_init(const struct scmi_handle *h, u8 msg_id, u8 prot_id,
 		       size_t tx_size, size_t rx_size, struct scmi_xfer **p);
 int scmi_handle_put(const struct scmi_handle *handle);
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 8f952f2..3eb0382 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -30,8 +30,14 @@
 #include "common.h"
 
 #define MSG_ID_MASK		GENMASK(7, 0)
+#define MSG_XTRACT_ID(hdr)	FIELD_GET(MSG_ID_MASK, (hdr))
 #define MSG_TYPE_MASK		GENMASK(9, 8)
+#define MSG_XTRACT_TYPE(hdr)	FIELD_GET(MSG_TYPE_MASK, (hdr))
+#define MSG_TYPE_COMMAND	0
+#define MSG_TYPE_DELAYED_RESP	2
+#define MSG_TYPE_NOTIFICATION	3
 #define MSG_PROTOCOL_ID_MASK	GENMASK(17, 10)
+#define MSG_XTRACT_PROT_ID(hdr)	FIELD_GET(MSG_PROTOCOL_ID_MASK, (hdr))
 #define MSG_TOKEN_ID_MASK	GENMASK(27, 18)
 #define MSG_XTRACT_TOKEN(hdr)	FIELD_GET(MSG_TOKEN_ID_MASK, (hdr))
 #define MSG_TOKEN_MAX		(MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1)
@@ -86,7 +92,7 @@
 };
 
 /**
- * struct scmi_chan_info - Structure representing a SCMI channel informfation
+ * struct scmi_chan_info - Structure representing a SCMI channel information
  *
  * @cl: Mailbox Client
  * @chan: Transmit/Receive mailbox channel
@@ -111,8 +117,9 @@
  * @handle: Instance of SCMI handle to send to clients
  * @version: SCMI revision information containing protocol version,
  *	implementation version and (sub-)vendor identification.
- * @minfo: Message info
- * @tx_idr: IDR object to map protocol id to channel info pointer
+ * @tx_minfo: Universal Transmit Message management info
+ * @tx_idr: IDR object to map protocol id to Tx channel info pointer
+ * @rx_idr: IDR object to map protocol id to Rx channel info pointer
  * @protocols_imp: List of protocols implemented, currently maximum of
  *	MAX_PROTOCOLS_IMP elements allocated by the base protocol
  * @node: List head
@@ -123,8 +130,9 @@
 	const struct scmi_desc *desc;
 	struct scmi_revision_info version;
 	struct scmi_handle handle;
-	struct scmi_xfers_info minfo;
+	struct scmi_xfers_info tx_minfo;
 	struct idr tx_idr;
+	struct idr rx_idr;
 	u8 *protocols_imp;
 	struct list_head node;
 	int users;
@@ -182,7 +190,7 @@
 static inline void scmi_dump_header_dbg(struct device *dev,
 					struct scmi_msg_hdr *hdr)
 {
-	dev_dbg(dev, "Command ID: %x Sequence ID: %x Protocol: %x\n",
+	dev_dbg(dev, "Message ID: %x Sequence ID: %x Protocol: %x\n",
 		hdr->id, hdr->seq, hdr->protocol_id);
 }
 
@@ -190,7 +198,7 @@
 				struct scmi_shared_mem __iomem *mem)
 {
 	xfer->hdr.status = ioread32(mem->msg_payload);
-	/* Skip the length of header and statues in payload area i.e 8 bytes*/
+	/* Skip the length of header and status in payload area i.e 8 bytes */
 	xfer->rx.len = min_t(size_t, xfer->rx.len, ioread32(&mem->length) - 8);
 
 	/* Take a copy to the rx buffer.. */
@@ -198,56 +206,12 @@
 }
 
 /**
- * scmi_rx_callback() - mailbox client callback for receive messages
- *
- * @cl: client pointer
- * @m: mailbox message
- *
- * Processes one received message to appropriate transfer information and
- * signals completion of the transfer.
- *
- * NOTE: This function will be invoked in IRQ context, hence should be
- * as optimal as possible.
- */
-static void scmi_rx_callback(struct mbox_client *cl, void *m)
-{
-	u16 xfer_id;
-	struct scmi_xfer *xfer;
-	struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
-	struct device *dev = cinfo->dev;
-	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
-	struct scmi_xfers_info *minfo = &info->minfo;
-	struct scmi_shared_mem __iomem *mem = cinfo->payload;
-
-	xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header));
-
-	/* Are we even expecting this? */
-	if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
-		dev_err(dev, "message for %d is not expected!\n", xfer_id);
-		return;
-	}
-
-	xfer = &minfo->xfer_block[xfer_id];
-
-	scmi_dump_header_dbg(dev, &xfer->hdr);
-	/* Is the message of valid length? */
-	if (xfer->rx.len > info->desc->max_msg_size) {
-		dev_err(dev, "unable to handle %zu xfer(max %d)\n",
-			xfer->rx.len, info->desc->max_msg_size);
-		return;
-	}
-
-	scmi_fetch_response(xfer, mem);
-	complete(&xfer->done);
-}
-
-/**
  * pack_scmi_header() - packs and returns 32-bit header
  *
  * @hdr: pointer to header containing all the information on message id,
  *	protocol id and sequence id.
  *
- * Return: 32-bit packed command header to be sent to the platform.
+ * Return: 32-bit packed message header to be sent to the platform.
  */
 static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
 {
@@ -257,6 +221,18 @@
 }
 
 /**
+ * unpack_scmi_header() - unpacks and records message and protocol id
+ *
+ * @msg_hdr: 32-bit packed message header sent from the platform
+ * @hdr: pointer to header to fetch message and protocol id.
+ */
+static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr)
+{
+	hdr->id = MSG_XTRACT_ID(msg_hdr);
+	hdr->protocol_id = MSG_XTRACT_PROT_ID(msg_hdr);
+}
+
+/**
  * scmi_tx_prepare() - mailbox client callback to prepare for the transfer
  *
  * @cl: client pointer
@@ -271,6 +247,14 @@
 	struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
 	struct scmi_shared_mem __iomem *mem = cinfo->payload;
 
+	/*
+	 * Ideally channel must be free by now unless OS timeout last
+	 * request and platform continued to process the same, wait
+	 * until it releases the shared memory, otherwise we may endup
+	 * overwriting its response with new message payload or vice-versa
+	 */
+	spin_until_cond(ioread32(&mem->channel_status) &
+			SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
 	/* Mark channel busy + clear error */
 	iowrite32(0x0, &mem->channel_status);
 	iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
@@ -285,8 +269,9 @@
  * scmi_xfer_get() - Allocate one message
  *
  * @handle: Pointer to SCMI entity handle
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
  *
- * Helper function which is used by various command functions that are
+ * Helper function which is used by various message functions that are
  * exposed to clients of this driver for allocating a message traffic event.
  *
  * This function can sleep depending on pending requests already in the system
@@ -295,13 +280,13 @@
  *
  * Return: 0 if all went fine, else corresponding error.
  */
-static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle)
+static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
+				       struct scmi_xfers_info *minfo)
 {
 	u16 xfer_id;
 	struct scmi_xfer *xfer;
 	unsigned long flags, bit_pos;
 	struct scmi_info *info = handle_to_scmi_info(handle);
-	struct scmi_xfers_info *minfo = &info->minfo;
 
 	/* Keep the locked section as small as possible */
 	spin_lock_irqsave(&minfo->xfer_lock, flags);
@@ -324,18 +309,17 @@
 }
 
 /**
- * scmi_xfer_put() - Release a message
+ * __scmi_xfer_put() - Release a message
  *
- * @handle: Pointer to SCMI entity handle
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
  * @xfer: message that was reserved by scmi_xfer_get
  *
  * This holds a spinlock to maintain integrity of internal data structures.
  */
-void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
+static void
+__scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
 {
 	unsigned long flags;
-	struct scmi_info *info = handle_to_scmi_info(handle);
-	struct scmi_xfers_info *minfo = &info->minfo;
 
 	/*
 	 * Keep the locked section as small as possible
@@ -347,6 +331,68 @@
 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
 }
 
+/**
+ * scmi_rx_callback() - mailbox client callback for receive messages
+ *
+ * @cl: client pointer
+ * @m: mailbox message
+ *
+ * Processes one received message to appropriate transfer information and
+ * signals completion of the transfer.
+ *
+ * NOTE: This function will be invoked in IRQ context, hence should be
+ * as optimal as possible.
+ */
+static void scmi_rx_callback(struct mbox_client *cl, void *m)
+{
+	u8 msg_type;
+	u32 msg_hdr;
+	u16 xfer_id;
+	struct scmi_xfer *xfer;
+	struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
+	struct device *dev = cinfo->dev;
+	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+	struct scmi_xfers_info *minfo = &info->tx_minfo;
+	struct scmi_shared_mem __iomem *mem = cinfo->payload;
+
+	msg_hdr = ioread32(&mem->msg_header);
+	msg_type = MSG_XTRACT_TYPE(msg_hdr);
+	xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
+
+	if (msg_type == MSG_TYPE_NOTIFICATION)
+		return; /* Notifications not yet supported */
+
+	/* Are we even expecting this? */
+	if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
+		dev_err(dev, "message for %d is not expected!\n", xfer_id);
+		return;
+	}
+
+	xfer = &minfo->xfer_block[xfer_id];
+
+	scmi_dump_header_dbg(dev, &xfer->hdr);
+
+	scmi_fetch_response(xfer, mem);
+
+	if (msg_type == MSG_TYPE_DELAYED_RESP)
+		complete(xfer->async_done);
+	else
+		complete(&xfer->done);
+}
+
+/**
+ * scmi_xfer_put() - Release a transmit message
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @xfer: message that was reserved by scmi_xfer_get
+ */
+void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
+{
+	struct scmi_info *info = handle_to_scmi_info(handle);
+
+	__scmi_xfer_put(&info->tx_minfo, xfer);
+}
+
 static bool
 scmi_xfer_poll_done(const struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
 {
@@ -435,8 +481,36 @@
 	return ret;
 }
 
+#define SCMI_MAX_RESPONSE_TIMEOUT	(2 * MSEC_PER_SEC)
+
 /**
- * scmi_xfer_get_init() - Allocate and initialise one message
+ * scmi_do_xfer_with_response() - Do one transfer and wait until the delayed
+ *	response is received
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @xfer: Transfer to initiate and wait for response
+ *
+ * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
+ *	return corresponding error, else if all goes well, return 0.
+ */
+int scmi_do_xfer_with_response(const struct scmi_handle *handle,
+			       struct scmi_xfer *xfer)
+{
+	int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
+	DECLARE_COMPLETION_ONSTACK(async_response);
+
+	xfer->async_done = &async_response;
+
+	ret = scmi_do_xfer(handle, xfer);
+	if (!ret && !wait_for_completion_timeout(xfer->async_done, timeout))
+		ret = -ETIMEDOUT;
+
+	xfer->async_done = NULL;
+	return ret;
+}
+
+/**
+ * scmi_xfer_get_init() - Allocate and initialise one message for transmit
  *
  * @handle: Pointer to SCMI entity handle
  * @msg_id: Message identifier
@@ -457,6 +531,7 @@
 	int ret;
 	struct scmi_xfer *xfer;
 	struct scmi_info *info = handle_to_scmi_info(handle);
+	struct scmi_xfers_info *minfo = &info->tx_minfo;
 	struct device *dev = info->dev;
 
 	/* Ensure we have sane transfer sizes */
@@ -464,7 +539,7 @@
 	    tx_size > info->desc->max_msg_size)
 		return -ERANGE;
 
-	xfer = scmi_xfer_get(handle);
+	xfer = scmi_xfer_get(handle, minfo);
 	if (IS_ERR(xfer)) {
 		ret = PTR_ERR(xfer);
 		dev_err(dev, "failed to get free message slot(%d)\n", ret);
@@ -597,27 +672,13 @@
 	return 0;
 }
 
-static const struct scmi_desc scmi_generic_desc = {
-	.max_rx_timeout_ms = 30,	/* We may increase this if required */
-	.max_msg = 20,		/* Limited by MBOX_TX_QUEUE_LEN */
-	.max_msg_size = 128,
-};
-
-/* Each compatible listed below must have descriptor associated with it */
-static const struct of_device_id scmi_of_match[] = {
-	{ .compatible = "arm,scmi", .data = &scmi_generic_desc },
-	{ /* Sentinel */ },
-};
-
-MODULE_DEVICE_TABLE(of, scmi_of_match);
-
 static int scmi_xfer_info_init(struct scmi_info *sinfo)
 {
 	int i;
 	struct scmi_xfer *xfer;
 	struct device *dev = sinfo->dev;
 	const struct scmi_desc *desc = sinfo->desc;
-	struct scmi_xfers_info *info = &sinfo->minfo;
+	struct scmi_xfers_info *info = &sinfo->tx_minfo;
 
 	/* Pre-allocated messages, no more than what hdr.seq can support */
 	if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) {
@@ -652,11 +713,189 @@
 	return 0;
 }
 
-static int scmi_mailbox_check(struct device_node *np)
+static int scmi_mailbox_check(struct device_node *np, int idx)
 {
-	struct of_phandle_args arg;
+	return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells",
+					  idx, NULL);
+}
 
-	return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells", 0, &arg);
+static int scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev,
+				int prot_id, bool tx)
+{
+	int ret, idx;
+	struct resource res;
+	resource_size_t size;
+	struct device_node *shmem, *np = dev->of_node;
+	struct scmi_chan_info *cinfo;
+	struct mbox_client *cl;
+	struct idr *idr;
+	const char *desc = tx ? "Tx" : "Rx";
+
+	/* Transmit channel is first entry i.e. index 0 */
+	idx = tx ? 0 : 1;
+	idr = tx ? &info->tx_idr : &info->rx_idr;
+
+	if (scmi_mailbox_check(np, idx)) {
+		cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
+		if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
+			return -EINVAL;
+		goto idr_alloc;
+	}
+
+	cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
+	if (!cinfo)
+		return -ENOMEM;
+
+	cinfo->dev = dev;
+
+	cl = &cinfo->cl;
+	cl->dev = dev;
+	cl->rx_callback = scmi_rx_callback;
+	cl->tx_prepare = tx ? scmi_tx_prepare : NULL;
+	cl->tx_block = false;
+	cl->knows_txdone = tx;
+
+	shmem = of_parse_phandle(np, "shmem", idx);
+	ret = of_address_to_resource(shmem, 0, &res);
+	of_node_put(shmem);
+	if (ret) {
+		dev_err(dev, "failed to get SCMI %s payload memory\n", desc);
+		return ret;
+	}
+
+	size = resource_size(&res);
+	cinfo->payload = devm_ioremap(info->dev, res.start, size);
+	if (!cinfo->payload) {
+		dev_err(dev, "failed to ioremap SCMI %s payload\n", desc);
+		return -EADDRNOTAVAIL;
+	}
+
+	cinfo->chan = mbox_request_channel(cl, idx);
+	if (IS_ERR(cinfo->chan)) {
+		ret = PTR_ERR(cinfo->chan);
+		if (ret != -EPROBE_DEFER)
+			dev_err(dev, "failed to request SCMI %s mailbox\n",
+				desc);
+		return ret;
+	}
+
+idr_alloc:
+	ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
+	if (ret != prot_id) {
+		dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
+		return ret;
+	}
+
+	cinfo->handle = &info->handle;
+	return 0;
+}
+
+static inline int
+scmi_mbox_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
+{
+	int ret = scmi_mbox_chan_setup(info, dev, prot_id, true);
+
+	if (!ret) /* Rx is optional, hence no error check */
+		scmi_mbox_chan_setup(info, dev, prot_id, false);
+
+	return ret;
+}
+
+static inline void
+scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
+			    int prot_id)
+{
+	struct scmi_device *sdev;
+
+	sdev = scmi_device_create(np, info->dev, prot_id);
+	if (!sdev) {
+		dev_err(info->dev, "failed to create %d protocol device\n",
+			prot_id);
+		return;
+	}
+
+	if (scmi_mbox_txrx_setup(info, &sdev->dev, prot_id)) {
+		dev_err(&sdev->dev, "failed to setup transport\n");
+		scmi_device_destroy(sdev);
+		return;
+	}
+
+	/* setup handle now as the transport is ready */
+	scmi_set_handle(sdev);
+}
+
+static int scmi_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct scmi_handle *handle;
+	const struct scmi_desc *desc;
+	struct scmi_info *info;
+	struct device *dev = &pdev->dev;
+	struct device_node *child, *np = dev->of_node;
+
+	/* Only mailbox method supported, check for the presence of one */
+	if (scmi_mailbox_check(np, 0)) {
+		dev_err(dev, "no mailbox found in %pOF\n", np);
+		return -EINVAL;
+	}
+
+	desc = of_device_get_match_data(dev);
+	if (!desc)
+		return -EINVAL;
+
+	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	info->dev = dev;
+	info->desc = desc;
+	INIT_LIST_HEAD(&info->node);
+
+	ret = scmi_xfer_info_init(info);
+	if (ret)
+		return ret;
+
+	platform_set_drvdata(pdev, info);
+	idr_init(&info->tx_idr);
+	idr_init(&info->rx_idr);
+
+	handle = &info->handle;
+	handle->dev = info->dev;
+	handle->version = &info->version;
+
+	ret = scmi_mbox_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
+	if (ret)
+		return ret;
+
+	ret = scmi_base_protocol_init(handle);
+	if (ret) {
+		dev_err(dev, "unable to communicate with SCMI(%d)\n", ret);
+		return ret;
+	}
+
+	mutex_lock(&scmi_list_mutex);
+	list_add_tail(&info->node, &scmi_list);
+	mutex_unlock(&scmi_list_mutex);
+
+	for_each_available_child_of_node(np, child) {
+		u32 prot_id;
+
+		if (of_property_read_u32(child, "reg", &prot_id))
+			continue;
+
+		if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
+			dev_err(dev, "Out of range protocol %d\n", prot_id);
+
+		if (!scmi_is_protocol_implemented(handle, prot_id)) {
+			dev_err(dev, "SCMI protocol %d not implemented\n",
+				prot_id);
+			continue;
+		}
+
+		scmi_create_protocol_device(child, info, prot_id);
+	}
+
+	return 0;
 }
 
 static int scmi_mbox_free_channel(int id, void *p, void *data)
@@ -694,165 +933,26 @@
 	ret = idr_for_each(idr, scmi_mbox_free_channel, idr);
 	idr_destroy(&info->tx_idr);
 
+	idr = &info->rx_idr;
+	ret = idr_for_each(idr, scmi_mbox_free_channel, idr);
+	idr_destroy(&info->rx_idr);
+
 	return ret;
 }
 
-static inline int
-scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev, int prot_id)
-{
-	int ret;
-	struct resource res;
-	resource_size_t size;
-	struct device_node *shmem, *np = dev->of_node;
-	struct scmi_chan_info *cinfo;
-	struct mbox_client *cl;
+static const struct scmi_desc scmi_generic_desc = {
+	.max_rx_timeout_ms = 30,	/* We may increase this if required */
+	.max_msg = 20,		/* Limited by MBOX_TX_QUEUE_LEN */
+	.max_msg_size = 128,
+};
 
-	if (scmi_mailbox_check(np)) {
-		cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE);
-		goto idr_alloc;
-	}
+/* Each compatible listed below must have descriptor associated with it */
+static const struct of_device_id scmi_of_match[] = {
+	{ .compatible = "arm,scmi", .data = &scmi_generic_desc },
+	{ /* Sentinel */ },
+};
 
-	cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
-	if (!cinfo)
-		return -ENOMEM;
-
-	cinfo->dev = dev;
-
-	cl = &cinfo->cl;
-	cl->dev = dev;
-	cl->rx_callback = scmi_rx_callback;
-	cl->tx_prepare = scmi_tx_prepare;
-	cl->tx_block = false;
-	cl->knows_txdone = true;
-
-	shmem = of_parse_phandle(np, "shmem", 0);
-	ret = of_address_to_resource(shmem, 0, &res);
-	of_node_put(shmem);
-	if (ret) {
-		dev_err(dev, "failed to get SCMI Tx payload mem resource\n");
-		return ret;
-	}
-
-	size = resource_size(&res);
-	cinfo->payload = devm_ioremap(info->dev, res.start, size);
-	if (!cinfo->payload) {
-		dev_err(dev, "failed to ioremap SCMI Tx payload\n");
-		return -EADDRNOTAVAIL;
-	}
-
-	/* Transmit channel is first entry i.e. index 0 */
-	cinfo->chan = mbox_request_channel(cl, 0);
-	if (IS_ERR(cinfo->chan)) {
-		ret = PTR_ERR(cinfo->chan);
-		if (ret != -EPROBE_DEFER)
-			dev_err(dev, "failed to request SCMI Tx mailbox\n");
-		return ret;
-	}
-
-idr_alloc:
-	ret = idr_alloc(&info->tx_idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
-	if (ret != prot_id) {
-		dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
-		return ret;
-	}
-
-	cinfo->handle = &info->handle;
-	return 0;
-}
-
-static inline void
-scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
-			    int prot_id)
-{
-	struct scmi_device *sdev;
-
-	sdev = scmi_device_create(np, info->dev, prot_id);
-	if (!sdev) {
-		dev_err(info->dev, "failed to create %d protocol device\n",
-			prot_id);
-		return;
-	}
-
-	if (scmi_mbox_chan_setup(info, &sdev->dev, prot_id)) {
-		dev_err(&sdev->dev, "failed to setup transport\n");
-		scmi_device_destroy(sdev);
-		return;
-	}
-
-	/* setup handle now as the transport is ready */
-	scmi_set_handle(sdev);
-}
-
-static int scmi_probe(struct platform_device *pdev)
-{
-	int ret;
-	struct scmi_handle *handle;
-	const struct scmi_desc *desc;
-	struct scmi_info *info;
-	struct device *dev = &pdev->dev;
-	struct device_node *child, *np = dev->of_node;
-
-	/* Only mailbox method supported, check for the presence of one */
-	if (scmi_mailbox_check(np)) {
-		dev_err(dev, "no mailbox found in %pOF\n", np);
-		return -EINVAL;
-	}
-
-	desc = of_match_device(scmi_of_match, dev)->data;
-
-	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
-	if (!info)
-		return -ENOMEM;
-
-	info->dev = dev;
-	info->desc = desc;
-	INIT_LIST_HEAD(&info->node);
-
-	ret = scmi_xfer_info_init(info);
-	if (ret)
-		return ret;
-
-	platform_set_drvdata(pdev, info);
-	idr_init(&info->tx_idr);
-
-	handle = &info->handle;
-	handle->dev = info->dev;
-	handle->version = &info->version;
-
-	ret = scmi_mbox_chan_setup(info, dev, SCMI_PROTOCOL_BASE);
-	if (ret)
-		return ret;
-
-	ret = scmi_base_protocol_init(handle);
-	if (ret) {
-		dev_err(dev, "unable to communicate with SCMI(%d)\n", ret);
-		return ret;
-	}
-
-	mutex_lock(&scmi_list_mutex);
-	list_add_tail(&info->node, &scmi_list);
-	mutex_unlock(&scmi_list_mutex);
-
-	for_each_available_child_of_node(np, child) {
-		u32 prot_id;
-
-		if (of_property_read_u32(child, "reg", &prot_id))
-			continue;
-
-		if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
-			dev_err(dev, "Out of range protocol %d\n", prot_id);
-
-		if (!scmi_is_protocol_implemented(handle, prot_id)) {
-			dev_err(dev, "SCMI protocol %d not implemented\n",
-				prot_id);
-			continue;
-		}
-
-		scmi_create_protocol_device(child, info, prot_id);
-	}
-
-	return 0;
-}
+MODULE_DEVICE_TABLE(of, scmi_of_match);
 
 static struct platform_driver scmi_driver = {
 	.driver = {
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index 6434294..4a8012e 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -5,7 +5,10 @@
  * Copyright (C) 2018 ARM Ltd.
  */
 
+#include <linux/bits.h>
 #include <linux/of.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
 #include <linux/platform_device.h>
 #include <linux/pm_opp.h>
 #include <linux/sort.h>
@@ -21,6 +24,7 @@
 	PERF_LEVEL_GET = 0x8,
 	PERF_NOTIFY_LIMITS = 0x9,
 	PERF_NOTIFY_LEVEL = 0xa,
+	PERF_DESCRIBE_FASTCHANNEL = 0xb,
 };
 
 struct scmi_opp {
@@ -44,6 +48,7 @@
 #define SUPPORTS_SET_PERF_LVL(x)	((x) & BIT(30))
 #define SUPPORTS_PERF_LIMIT_NOTIFY(x)	((x) & BIT(29))
 #define SUPPORTS_PERF_LEVEL_NOTIFY(x)	((x) & BIT(28))
+#define SUPPORTS_PERF_FASTCHANNELS(x)	((x) & BIT(27))
 	__le32 rate_limit_us;
 	__le32 sustained_freq_khz;
 	__le32 sustained_perf_level;
@@ -87,17 +92,56 @@
 	} opp[0];
 };
 
+struct scmi_perf_get_fc_info {
+	__le32 domain;
+	__le32 message_id;
+};
+
+struct scmi_msg_resp_perf_desc_fc {
+	__le32 attr;
+#define SUPPORTS_DOORBELL(x)		((x) & BIT(0))
+#define DOORBELL_REG_WIDTH(x)		FIELD_GET(GENMASK(2, 1), (x))
+	__le32 rate_limit;
+	__le32 chan_addr_low;
+	__le32 chan_addr_high;
+	__le32 chan_size;
+	__le32 db_addr_low;
+	__le32 db_addr_high;
+	__le32 db_set_lmask;
+	__le32 db_set_hmask;
+	__le32 db_preserve_lmask;
+	__le32 db_preserve_hmask;
+};
+
+struct scmi_fc_db_info {
+	int width;
+	u64 set;
+	u64 mask;
+	void __iomem *addr;
+};
+
+struct scmi_fc_info {
+	void __iomem *level_set_addr;
+	void __iomem *limit_set_addr;
+	void __iomem *level_get_addr;
+	void __iomem *limit_get_addr;
+	struct scmi_fc_db_info *level_set_db;
+	struct scmi_fc_db_info *limit_set_db;
+};
+
 struct perf_dom_info {
 	bool set_limits;
 	bool set_perf;
 	bool perf_limit_notify;
 	bool perf_level_notify;
+	bool perf_fastchannels;
 	u32 opp_count;
 	u32 sustained_freq_khz;
 	u32 sustained_perf_level;
 	u32 mult_factor;
 	char name[SCMI_MAX_STR_SIZE];
 	struct scmi_opp opp[MAX_OPPS];
+	struct scmi_fc_info *fc_info;
 };
 
 struct scmi_perf_info {
@@ -151,7 +195,7 @@
 	if (ret)
 		return ret;
 
-	*(__le32 *)t->tx.buf = cpu_to_le32(domain);
+	put_unaligned_le32(domain, t->tx.buf);
 	attr = t->rx.buf;
 
 	ret = scmi_do_xfer(handle, t);
@@ -162,6 +206,7 @@
 		dom_info->set_perf = SUPPORTS_SET_PERF_LVL(flags);
 		dom_info->perf_limit_notify = SUPPORTS_PERF_LIMIT_NOTIFY(flags);
 		dom_info->perf_level_notify = SUPPORTS_PERF_LEVEL_NOTIFY(flags);
+		dom_info->perf_fastchannels = SUPPORTS_PERF_FASTCHANNELS(flags);
 		dom_info->sustained_freq_khz =
 					le32_to_cpu(attr->sustained_freq_khz);
 		dom_info->sustained_perf_level =
@@ -174,7 +219,7 @@
 			dom_info->mult_factor =
 					(dom_info->sustained_freq_khz * 1000) /
 					dom_info->sustained_perf_level;
-		memcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
+		strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
 	}
 
 	scmi_xfer_put(handle, t);
@@ -249,8 +294,42 @@
 	return ret;
 }
 
-static int scmi_perf_limits_set(const struct scmi_handle *handle, u32 domain,
-				u32 max_perf, u32 min_perf)
+#define SCMI_PERF_FC_RING_DB(w)				\
+do {							\
+	u##w val = 0;					\
+							\
+	if (db->mask)					\
+		val = ioread##w(db->addr) & db->mask;	\
+	iowrite##w((u##w)db->set | val, db->addr);	\
+} while (0)
+
+static void scmi_perf_fc_ring_db(struct scmi_fc_db_info *db)
+{
+	if (!db || !db->addr)
+		return;
+
+	if (db->width == 1)
+		SCMI_PERF_FC_RING_DB(8);
+	else if (db->width == 2)
+		SCMI_PERF_FC_RING_DB(16);
+	else if (db->width == 4)
+		SCMI_PERF_FC_RING_DB(32);
+	else /* db->width == 8 */
+#ifdef CONFIG_64BIT
+		SCMI_PERF_FC_RING_DB(64);
+#else
+	{
+		u64 val = 0;
+
+		if (db->mask)
+			val = ioread64_hi_lo(db->addr) & db->mask;
+		iowrite64_hi_lo(db->set, db->addr);
+	}
+#endif
+}
+
+static int scmi_perf_mb_limits_set(const struct scmi_handle *handle, u32 domain,
+				   u32 max_perf, u32 min_perf)
 {
 	int ret;
 	struct scmi_xfer *t;
@@ -272,8 +351,24 @@
 	return ret;
 }
 
-static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain,
-				u32 *max_perf, u32 *min_perf)
+static int scmi_perf_limits_set(const struct scmi_handle *handle, u32 domain,
+				u32 max_perf, u32 min_perf)
+{
+	struct scmi_perf_info *pi = handle->perf_priv;
+	struct perf_dom_info *dom = pi->dom_info + domain;
+
+	if (dom->fc_info && dom->fc_info->limit_set_addr) {
+		iowrite32(max_perf, dom->fc_info->limit_set_addr);
+		iowrite32(min_perf, dom->fc_info->limit_set_addr + 4);
+		scmi_perf_fc_ring_db(dom->fc_info->limit_set_db);
+		return 0;
+	}
+
+	return scmi_perf_mb_limits_set(handle, domain, max_perf, min_perf);
+}
+
+static int scmi_perf_mb_limits_get(const struct scmi_handle *handle, u32 domain,
+				   u32 *max_perf, u32 *min_perf)
 {
 	int ret;
 	struct scmi_xfer *t;
@@ -284,7 +379,7 @@
 	if (ret)
 		return ret;
 
-	*(__le32 *)t->tx.buf = cpu_to_le32(domain);
+	put_unaligned_le32(domain, t->tx.buf);
 
 	ret = scmi_do_xfer(handle, t);
 	if (!ret) {
@@ -298,8 +393,23 @@
 	return ret;
 }
 
-static int scmi_perf_level_set(const struct scmi_handle *handle, u32 domain,
-			       u32 level, bool poll)
+static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain,
+				u32 *max_perf, u32 *min_perf)
+{
+	struct scmi_perf_info *pi = handle->perf_priv;
+	struct perf_dom_info *dom = pi->dom_info + domain;
+
+	if (dom->fc_info && dom->fc_info->limit_get_addr) {
+		*max_perf = ioread32(dom->fc_info->limit_get_addr);
+		*min_perf = ioread32(dom->fc_info->limit_get_addr + 4);
+		return 0;
+	}
+
+	return scmi_perf_mb_limits_get(handle, domain, max_perf, min_perf);
+}
+
+static int scmi_perf_mb_level_set(const struct scmi_handle *handle, u32 domain,
+				  u32 level, bool poll)
 {
 	int ret;
 	struct scmi_xfer *t;
@@ -321,8 +431,23 @@
 	return ret;
 }
 
-static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain,
-			       u32 *level, bool poll)
+static int scmi_perf_level_set(const struct scmi_handle *handle, u32 domain,
+			       u32 level, bool poll)
+{
+	struct scmi_perf_info *pi = handle->perf_priv;
+	struct perf_dom_info *dom = pi->dom_info + domain;
+
+	if (dom->fc_info && dom->fc_info->level_set_addr) {
+		iowrite32(level, dom->fc_info->level_set_addr);
+		scmi_perf_fc_ring_db(dom->fc_info->level_set_db);
+		return 0;
+	}
+
+	return scmi_perf_mb_level_set(handle, domain, level, poll);
+}
+
+static int scmi_perf_mb_level_get(const struct scmi_handle *handle, u32 domain,
+				  u32 *level, bool poll)
 {
 	int ret;
 	struct scmi_xfer *t;
@@ -333,16 +458,128 @@
 		return ret;
 
 	t->hdr.poll_completion = poll;
-	*(__le32 *)t->tx.buf = cpu_to_le32(domain);
+	put_unaligned_le32(domain, t->tx.buf);
 
 	ret = scmi_do_xfer(handle, t);
 	if (!ret)
-		*level = le32_to_cpu(*(__le32 *)t->rx.buf);
+		*level = get_unaligned_le32(t->rx.buf);
 
 	scmi_xfer_put(handle, t);
 	return ret;
 }
 
+static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain,
+			       u32 *level, bool poll)
+{
+	struct scmi_perf_info *pi = handle->perf_priv;
+	struct perf_dom_info *dom = pi->dom_info + domain;
+
+	if (dom->fc_info && dom->fc_info->level_get_addr) {
+		*level = ioread32(dom->fc_info->level_get_addr);
+		return 0;
+	}
+
+	return scmi_perf_mb_level_get(handle, domain, level, poll);
+}
+
+static bool scmi_perf_fc_size_is_valid(u32 msg, u32 size)
+{
+	if ((msg == PERF_LEVEL_GET || msg == PERF_LEVEL_SET) && size == 4)
+		return true;
+	if ((msg == PERF_LIMITS_GET || msg == PERF_LIMITS_SET) && size == 8)
+		return true;
+	return false;
+}
+
+static void
+scmi_perf_domain_desc_fc(const struct scmi_handle *handle, u32 domain,
+			 u32 message_id, void __iomem **p_addr,
+			 struct scmi_fc_db_info **p_db)
+{
+	int ret;
+	u32 flags;
+	u64 phys_addr;
+	u8 size;
+	void __iomem *addr;
+	struct scmi_xfer *t;
+	struct scmi_fc_db_info *db;
+	struct scmi_perf_get_fc_info *info;
+	struct scmi_msg_resp_perf_desc_fc *resp;
+
+	if (!p_addr)
+		return;
+
+	ret = scmi_xfer_get_init(handle, PERF_DESCRIBE_FASTCHANNEL,
+				 SCMI_PROTOCOL_PERF,
+				 sizeof(*info), sizeof(*resp), &t);
+	if (ret)
+		return;
+
+	info = t->tx.buf;
+	info->domain = cpu_to_le32(domain);
+	info->message_id = cpu_to_le32(message_id);
+
+	ret = scmi_do_xfer(handle, t);
+	if (ret)
+		goto err_xfer;
+
+	resp = t->rx.buf;
+	flags = le32_to_cpu(resp->attr);
+	size = le32_to_cpu(resp->chan_size);
+	if (!scmi_perf_fc_size_is_valid(message_id, size))
+		goto err_xfer;
+
+	phys_addr = le32_to_cpu(resp->chan_addr_low);
+	phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32;
+	addr = devm_ioremap(handle->dev, phys_addr, size);
+	if (!addr)
+		goto err_xfer;
+	*p_addr = addr;
+
+	if (p_db && SUPPORTS_DOORBELL(flags)) {
+		db = devm_kzalloc(handle->dev, sizeof(*db), GFP_KERNEL);
+		if (!db)
+			goto err_xfer;
+
+		size = 1 << DOORBELL_REG_WIDTH(flags);
+		phys_addr = le32_to_cpu(resp->db_addr_low);
+		phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32;
+		addr = devm_ioremap(handle->dev, phys_addr, size);
+		if (!addr)
+			goto err_xfer;
+
+		db->addr = addr;
+		db->width = size;
+		db->set = le32_to_cpu(resp->db_set_lmask);
+		db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32;
+		db->mask = le32_to_cpu(resp->db_preserve_lmask);
+		db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32;
+		*p_db = db;
+	}
+err_xfer:
+	scmi_xfer_put(handle, t);
+}
+
+static void scmi_perf_domain_init_fc(const struct scmi_handle *handle,
+				     u32 domain, struct scmi_fc_info **p_fc)
+{
+	struct scmi_fc_info *fc;
+
+	fc = devm_kzalloc(handle->dev, sizeof(*fc), GFP_KERNEL);
+	if (!fc)
+		return;
+
+	scmi_perf_domain_desc_fc(handle, domain, PERF_LEVEL_SET,
+				 &fc->level_set_addr, &fc->level_set_db);
+	scmi_perf_domain_desc_fc(handle, domain, PERF_LEVEL_GET,
+				 &fc->level_get_addr, NULL);
+	scmi_perf_domain_desc_fc(handle, domain, PERF_LIMITS_SET,
+				 &fc->limit_set_addr, &fc->limit_set_db);
+	scmi_perf_domain_desc_fc(handle, domain, PERF_LIMITS_GET,
+				 &fc->limit_get_addr, NULL);
+	*p_fc = fc;
+}
+
 /* Device specific ops */
 static int scmi_dev_domain_id(struct device *dev)
 {
@@ -427,6 +664,33 @@
 	return ret;
 }
 
+static int scmi_dvfs_est_power_get(const struct scmi_handle *handle, u32 domain,
+				   unsigned long *freq, unsigned long *power)
+{
+	struct scmi_perf_info *pi = handle->perf_priv;
+	struct perf_dom_info *dom;
+	unsigned long opp_freq;
+	int idx, ret = -EINVAL;
+	struct scmi_opp *opp;
+
+	dom = pi->dom_info + domain;
+	if (!dom)
+		return -EIO;
+
+	for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
+		opp_freq = opp->perf * dom->mult_factor;
+		if (opp_freq < *freq)
+			continue;
+
+		*freq = opp_freq;
+		*power = opp->power;
+		ret = 0;
+		break;
+	}
+
+	return ret;
+}
+
 static struct scmi_perf_ops perf_ops = {
 	.limits_set = scmi_perf_limits_set,
 	.limits_get = scmi_perf_limits_get,
@@ -437,6 +701,7 @@
 	.device_opps_add = scmi_dvfs_device_opps_add,
 	.freq_set = scmi_dvfs_freq_set,
 	.freq_get = scmi_dvfs_freq_get,
+	.est_power_get = scmi_dvfs_est_power_get,
 };
 
 static int scmi_perf_protocol_init(struct scmi_handle *handle)
@@ -466,6 +731,9 @@
 
 		scmi_perf_domain_attributes_get(handle, domain, dom);
 		scmi_perf_describe_levels_get(handle, domain, dom);
+
+		if (dom->perf_fastchannels)
+			scmi_perf_domain_init_fc(handle, domain, &dom->fc_info);
 	}
 
 	handle->perf_ops = &perf_ops;
diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c
index cfa033b..5abef70 100644
--- a/drivers/firmware/arm_scmi/power.c
+++ b/drivers/firmware/arm_scmi/power.c
@@ -96,7 +96,7 @@
 	if (ret)
 		return ret;
 
-	*(__le32 *)t->tx.buf = cpu_to_le32(domain);
+	put_unaligned_le32(domain, t->tx.buf);
 	attr = t->rx.buf;
 
 	ret = scmi_do_xfer(handle, t);
@@ -106,7 +106,7 @@
 		dom_info->state_set_notify = SUPPORTS_STATE_SET_NOTIFY(flags);
 		dom_info->state_set_async = SUPPORTS_STATE_SET_ASYNC(flags);
 		dom_info->state_set_sync = SUPPORTS_STATE_SET_SYNC(flags);
-		memcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
+		strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
 	}
 
 	scmi_xfer_put(handle, t);
@@ -147,11 +147,11 @@
 	if (ret)
 		return ret;
 
-	*(__le32 *)t->tx.buf = cpu_to_le32(domain);
+	put_unaligned_le32(domain, t->tx.buf);
 
 	ret = scmi_do_xfer(handle, t);
 	if (!ret)
-		*state = le32_to_cpu(*(__le32 *)t->rx.buf);
+		*state = get_unaligned_le32(t->rx.buf);
 
 	scmi_xfer_put(handle, t);
 	return ret;
diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c
new file mode 100644
index 0000000..ab42c21
--- /dev/null
+++ b/drivers/firmware/arm_scmi/reset.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Reset Protocol
+ *
+ * Copyright (C) 2019 ARM Ltd.
+ */
+
+#include "common.h"
+
+enum scmi_reset_protocol_cmd {
+	RESET_DOMAIN_ATTRIBUTES = 0x3,
+	RESET = 0x4,
+	RESET_NOTIFY = 0x5,
+};
+
+enum scmi_reset_protocol_notify {
+	RESET_ISSUED = 0x0,
+};
+
+#define NUM_RESET_DOMAIN_MASK	0xffff
+#define RESET_NOTIFY_ENABLE	BIT(0)
+
+struct scmi_msg_resp_reset_domain_attributes {
+	__le32 attributes;
+#define SUPPORTS_ASYNC_RESET(x)		((x) & BIT(31))
+#define SUPPORTS_NOTIFY_RESET(x)	((x) & BIT(30))
+	__le32 latency;
+	    u8 name[SCMI_MAX_STR_SIZE];
+};
+
+struct scmi_msg_reset_domain_reset {
+	__le32 domain_id;
+	__le32 flags;
+#define AUTONOMOUS_RESET	BIT(0)
+#define EXPLICIT_RESET_ASSERT	BIT(1)
+#define ASYNCHRONOUS_RESET	BIT(2)
+	__le32 reset_state;
+#define ARCH_RESET_TYPE		BIT(31)
+#define COLD_RESET_STATE	BIT(0)
+#define ARCH_COLD_RESET		(ARCH_RESET_TYPE | COLD_RESET_STATE)
+};
+
+struct reset_dom_info {
+	bool async_reset;
+	bool reset_notify;
+	u32 latency_us;
+	char name[SCMI_MAX_STR_SIZE];
+};
+
+struct scmi_reset_info {
+	int num_domains;
+	struct reset_dom_info *dom_info;
+};
+
+static int scmi_reset_attributes_get(const struct scmi_handle *handle,
+				     struct scmi_reset_info *pi)
+{
+	int ret;
+	struct scmi_xfer *t;
+	u32 attr;
+
+	ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
+				 SCMI_PROTOCOL_RESET, 0, sizeof(attr), &t);
+	if (ret)
+		return ret;
+
+	ret = scmi_do_xfer(handle, t);
+	if (!ret) {
+		attr = get_unaligned_le32(t->rx.buf);
+		pi->num_domains = attr & NUM_RESET_DOMAIN_MASK;
+	}
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int
+scmi_reset_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
+				 struct reset_dom_info *dom_info)
+{
+	int ret;
+	struct scmi_xfer *t;
+	struct scmi_msg_resp_reset_domain_attributes *attr;
+
+	ret = scmi_xfer_get_init(handle, RESET_DOMAIN_ATTRIBUTES,
+				 SCMI_PROTOCOL_RESET, sizeof(domain),
+				 sizeof(*attr), &t);
+	if (ret)
+		return ret;
+
+	put_unaligned_le32(domain, t->tx.buf);
+	attr = t->rx.buf;
+
+	ret = scmi_do_xfer(handle, t);
+	if (!ret) {
+		u32 attributes = le32_to_cpu(attr->attributes);
+
+		dom_info->async_reset = SUPPORTS_ASYNC_RESET(attributes);
+		dom_info->reset_notify = SUPPORTS_NOTIFY_RESET(attributes);
+		dom_info->latency_us = le32_to_cpu(attr->latency);
+		if (dom_info->latency_us == U32_MAX)
+			dom_info->latency_us = 0;
+		strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
+	}
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int scmi_reset_num_domains_get(const struct scmi_handle *handle)
+{
+	struct scmi_reset_info *pi = handle->reset_priv;
+
+	return pi->num_domains;
+}
+
+static char *scmi_reset_name_get(const struct scmi_handle *handle, u32 domain)
+{
+	struct scmi_reset_info *pi = handle->reset_priv;
+	struct reset_dom_info *dom = pi->dom_info + domain;
+
+	return dom->name;
+}
+
+static int scmi_reset_latency_get(const struct scmi_handle *handle, u32 domain)
+{
+	struct scmi_reset_info *pi = handle->reset_priv;
+	struct reset_dom_info *dom = pi->dom_info + domain;
+
+	return dom->latency_us;
+}
+
+static int scmi_domain_reset(const struct scmi_handle *handle, u32 domain,
+			     u32 flags, u32 state)
+{
+	int ret;
+	struct scmi_xfer *t;
+	struct scmi_msg_reset_domain_reset *dom;
+	struct scmi_reset_info *pi = handle->reset_priv;
+	struct reset_dom_info *rdom = pi->dom_info + domain;
+
+	if (rdom->async_reset)
+		flags |= ASYNCHRONOUS_RESET;
+
+	ret = scmi_xfer_get_init(handle, RESET, SCMI_PROTOCOL_RESET,
+				 sizeof(*dom), 0, &t);
+	if (ret)
+		return ret;
+
+	dom = t->tx.buf;
+	dom->domain_id = cpu_to_le32(domain);
+	dom->flags = cpu_to_le32(flags);
+	dom->reset_state = cpu_to_le32(state);
+
+	if (rdom->async_reset)
+		ret = scmi_do_xfer_with_response(handle, t);
+	else
+		ret = scmi_do_xfer(handle, t);
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int scmi_reset_domain_reset(const struct scmi_handle *handle, u32 domain)
+{
+	return scmi_domain_reset(handle, domain, AUTONOMOUS_RESET,
+				 ARCH_COLD_RESET);
+}
+
+static int
+scmi_reset_domain_assert(const struct scmi_handle *handle, u32 domain)
+{
+	return scmi_domain_reset(handle, domain, EXPLICIT_RESET_ASSERT,
+				 ARCH_COLD_RESET);
+}
+
+static int
+scmi_reset_domain_deassert(const struct scmi_handle *handle, u32 domain)
+{
+	return scmi_domain_reset(handle, domain, 0, ARCH_COLD_RESET);
+}
+
+static struct scmi_reset_ops reset_ops = {
+	.num_domains_get = scmi_reset_num_domains_get,
+	.name_get = scmi_reset_name_get,
+	.latency_get = scmi_reset_latency_get,
+	.reset = scmi_reset_domain_reset,
+	.assert = scmi_reset_domain_assert,
+	.deassert = scmi_reset_domain_deassert,
+};
+
+static int scmi_reset_protocol_init(struct scmi_handle *handle)
+{
+	int domain;
+	u32 version;
+	struct scmi_reset_info *pinfo;
+
+	scmi_version_get(handle, SCMI_PROTOCOL_RESET, &version);
+
+	dev_dbg(handle->dev, "Reset Version %d.%d\n",
+		PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+	pinfo = devm_kzalloc(handle->dev, sizeof(*pinfo), GFP_KERNEL);
+	if (!pinfo)
+		return -ENOMEM;
+
+	scmi_reset_attributes_get(handle, pinfo);
+
+	pinfo->dom_info = devm_kcalloc(handle->dev, pinfo->num_domains,
+				       sizeof(*pinfo->dom_info), GFP_KERNEL);
+	if (!pinfo->dom_info)
+		return -ENOMEM;
+
+	for (domain = 0; domain < pinfo->num_domains; domain++) {
+		struct reset_dom_info *dom = pinfo->dom_info + domain;
+
+		scmi_reset_domain_attributes_get(handle, domain, dom);
+	}
+
+	handle->reset_ops = &reset_ops;
+	handle->reset_priv = pinfo;
+
+	return 0;
+}
+
+static int __init scmi_reset_init(void)
+{
+	return scmi_protocol_register(SCMI_PROTOCOL_RESET,
+				      &scmi_reset_protocol_init);
+}
+subsys_initcall(scmi_reset_init);
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
index 27f2092..a400ea8 100644
--- a/drivers/firmware/arm_scmi/sensors.c
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -9,8 +9,8 @@
 
 enum scmi_sensor_protocol_cmd {
 	SENSOR_DESCRIPTION_GET = 0x3,
-	SENSOR_CONFIG_SET = 0x4,
-	SENSOR_TRIP_POINT_SET = 0x5,
+	SENSOR_TRIP_POINT_NOTIFY = 0x4,
+	SENSOR_TRIP_POINT_CONFIG = 0x5,
 	SENSOR_READING_GET = 0x6,
 };
 
@@ -30,19 +30,22 @@
 		__le32 id;
 		__le32 attributes_low;
 #define SUPPORTS_ASYNC_READ(x)	((x) & BIT(31))
-#define NUM_TRIP_POINTS(x)	(((x) >> 4) & 0xff)
+#define NUM_TRIP_POINTS(x)	((x) & 0xff)
 		__le32 attributes_high;
 #define SENSOR_TYPE(x)		((x) & 0xff)
-#define SENSOR_SCALE(x)		(((x) >> 11) & 0x3f)
+#define SENSOR_SCALE(x)		(((x) >> 11) & 0x1f)
+#define SENSOR_SCALE_SIGN	BIT(4)
+#define SENSOR_SCALE_EXTEND	GENMASK(7, 5)
 #define SENSOR_UPDATE_SCALE(x)	(((x) >> 22) & 0x1f)
 #define SENSOR_UPDATE_BASE(x)	(((x) >> 27) & 0x1f)
 		    u8 name[SCMI_MAX_STR_SIZE];
 	} desc[0];
 };
 
-struct scmi_msg_set_sensor_config {
+struct scmi_msg_sensor_trip_point_notify {
 	__le32 id;
 	__le32 event_control;
+#define SENSOR_TP_NOTIFY_ALL	BIT(0)
 };
 
 struct scmi_msg_set_sensor_trip_point {
@@ -117,7 +120,7 @@
 
 	do {
 		/* Set the number of sensors to be skipped/already read */
-		*(__le32 *)t->tx.buf = cpu_to_le32(desc_index);
+		put_unaligned_le32(desc_index, t->tx.buf);
 
 		ret = scmi_do_xfer(handle, t);
 		if (ret)
@@ -133,14 +136,21 @@
 		}
 
 		for (cnt = 0; cnt < num_returned; cnt++) {
-			u32 attrh;
+			u32 attrh, attrl;
 			struct scmi_sensor_info *s;
 
+			attrl = le32_to_cpu(buf->desc[cnt].attributes_low);
 			attrh = le32_to_cpu(buf->desc[cnt].attributes_high);
 			s = &si->sensors[desc_index + cnt];
 			s->id = le32_to_cpu(buf->desc[cnt].id);
 			s->type = SENSOR_TYPE(attrh);
-			memcpy(s->name, buf->desc[cnt].name, SCMI_MAX_STR_SIZE);
+			s->scale = SENSOR_SCALE(attrh);
+			/* Sign extend to a full s8 */
+			if (s->scale & SENSOR_SCALE_SIGN)
+				s->scale |= SENSOR_SCALE_EXTEND;
+			s->async = SUPPORTS_ASYNC_READ(attrl);
+			s->num_trip_points = NUM_TRIP_POINTS(attrl);
+			strlcpy(s->name, buf->desc[cnt].name, SCMI_MAX_STR_SIZE);
 		}
 
 		desc_index += num_returned;
@@ -154,15 +164,15 @@
 	return ret;
 }
 
-static int
-scmi_sensor_configuration_set(const struct scmi_handle *handle, u32 sensor_id)
+static int scmi_sensor_trip_point_notify(const struct scmi_handle *handle,
+					 u32 sensor_id, bool enable)
 {
 	int ret;
-	u32 evt_cntl = BIT(0);
+	u32 evt_cntl = enable ? SENSOR_TP_NOTIFY_ALL : 0;
 	struct scmi_xfer *t;
-	struct scmi_msg_set_sensor_config *cfg;
+	struct scmi_msg_sensor_trip_point_notify *cfg;
 
-	ret = scmi_xfer_get_init(handle, SENSOR_CONFIG_SET,
+	ret = scmi_xfer_get_init(handle, SENSOR_TRIP_POINT_NOTIFY,
 				 SCMI_PROTOCOL_SENSOR, sizeof(*cfg), 0, &t);
 	if (ret)
 		return ret;
@@ -177,15 +187,16 @@
 	return ret;
 }
 
-static int scmi_sensor_trip_point_set(const struct scmi_handle *handle,
-				      u32 sensor_id, u8 trip_id, u64 trip_value)
+static int
+scmi_sensor_trip_point_config(const struct scmi_handle *handle, u32 sensor_id,
+			      u8 trip_id, u64 trip_value)
 {
 	int ret;
 	u32 evt_cntl = SENSOR_TP_BOTH;
 	struct scmi_xfer *t;
 	struct scmi_msg_set_sensor_trip_point *trip;
 
-	ret = scmi_xfer_get_init(handle, SENSOR_TRIP_POINT_SET,
+	ret = scmi_xfer_get_init(handle, SENSOR_TRIP_POINT_CONFIG,
 				 SCMI_PROTOCOL_SENSOR, sizeof(*trip), 0, &t);
 	if (ret)
 		return ret;
@@ -203,11 +214,13 @@
 }
 
 static int scmi_sensor_reading_get(const struct scmi_handle *handle,
-				   u32 sensor_id, bool async, u64 *value)
+				   u32 sensor_id, u64 *value)
 {
 	int ret;
 	struct scmi_xfer *t;
 	struct scmi_msg_sensor_reading_get *sensor;
+	struct sensors_info *si = handle->sensor_priv;
+	struct scmi_sensor_info *s = si->sensors + sensor_id;
 
 	ret = scmi_xfer_get_init(handle, SENSOR_READING_GET,
 				 SCMI_PROTOCOL_SENSOR, sizeof(*sensor),
@@ -217,14 +230,18 @@
 
 	sensor = t->tx.buf;
 	sensor->id = cpu_to_le32(sensor_id);
-	sensor->flags = cpu_to_le32(async ? SENSOR_READ_ASYNC : 0);
 
-	ret = scmi_do_xfer(handle, t);
-	if (!ret) {
-		__le32 *pval = t->rx.buf;
-
-		*value = le32_to_cpu(*pval);
-		*value |= (u64)le32_to_cpu(*(pval + 1)) << 32;
+	if (s->async) {
+		sensor->flags = cpu_to_le32(SENSOR_READ_ASYNC);
+		ret = scmi_do_xfer_with_response(handle, t);
+		if (!ret)
+			*value = get_unaligned_le64((void *)
+						    ((__le32 *)t->rx.buf + 1));
+	} else {
+		sensor->flags = cpu_to_le32(0);
+		ret = scmi_do_xfer(handle, t);
+		if (!ret)
+			*value = get_unaligned_le64(t->rx.buf);
 	}
 
 	scmi_xfer_put(handle, t);
@@ -249,8 +266,8 @@
 static struct scmi_sensor_ops sensor_ops = {
 	.count_get = scmi_sensor_count_get,
 	.info_get = scmi_sensor_info_get,
-	.configuration_set = scmi_sensor_configuration_set,
-	.trip_point_set = scmi_sensor_trip_point_set,
+	.trip_point_notify = scmi_sensor_trip_point_notify,
+	.trip_point_config = scmi_sensor_trip_point_config,
 	.reading_get = scmi_sensor_reading_get,
 };
 
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index c7d06a3..a80c331 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * System Control and Power Interface (SCPI) Message Protocol driver
  *
@@ -11,18 +12,6 @@
  * clocks configuration, thermal sensors and many others.
  *
  * Copyright (C) 2015 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -1022,10 +1011,6 @@
 				   scpi_info->firmware_version));
 	scpi_info->scpi_ops = &scpi_ops;
 
-	ret = devm_device_add_groups(dev, versions_groups);
-	if (ret)
-		dev_err(dev, "unable to create sysfs version group\n");
-
 	return devm_of_platform_populate(dev);
 }
 
@@ -1041,6 +1026,7 @@
 	.driver = {
 		.name = "scpi_protocol",
 		.of_match_table = scpi_of_match,
+		.dev_groups = versions_groups,
 	},
 	.probe = scpi_probe,
 	.remove = scpi_remove,
diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
index 1ea7164..9cd70d1 100644
--- a/drivers/firmware/arm_sdei.c
+++ b/drivers/firmware/arm_sdei.c
@@ -2,6 +2,7 @@
 // Copyright (C) 2017 Arm Ltd.
 #define pr_fmt(fmt) "sdei: " fmt
 
+#include <acpi/ghes.h>
 #include <linux/acpi.h>
 #include <linux/arm_sdei.h>
 #include <linux/arm-smccc.h>
@@ -164,6 +165,7 @@
 
 	return err;
 }
+NOKPROBE_SYMBOL(invoke_sdei_fn);
 
 static struct sdei_event *sdei_event_find(u32 event_num)
 {
@@ -878,6 +880,7 @@
 {
 	arm_smccc_smc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
 }
+NOKPROBE_SYMBOL(sdei_smccc_smc);
 
 static void sdei_smccc_hvc(unsigned long function_id,
 			   unsigned long arg0, unsigned long arg1,
@@ -886,6 +889,74 @@
 {
 	arm_smccc_hvc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
 }
+NOKPROBE_SYMBOL(sdei_smccc_hvc);
+
+int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb,
+		       sdei_event_callback *critical_cb)
+{
+	int err;
+	u64 result;
+	u32 event_num;
+	sdei_event_callback *cb;
+
+	if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
+		return -EOPNOTSUPP;
+
+	event_num = ghes->generic->notify.vector;
+	if (event_num == 0) {
+		/*
+		 * Event 0 is reserved by the specification for
+		 * SDEI_EVENT_SIGNAL.
+		 */
+		return -EINVAL;
+	}
+
+	err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY,
+				      &result);
+	if (err)
+		return err;
+
+	if (result == SDEI_EVENT_PRIORITY_CRITICAL)
+		cb = critical_cb;
+	else
+		cb = normal_cb;
+
+	err = sdei_event_register(event_num, cb, ghes);
+	if (!err)
+		err = sdei_event_enable(event_num);
+
+	return err;
+}
+
+int sdei_unregister_ghes(struct ghes *ghes)
+{
+	int i;
+	int err;
+	u32 event_num = ghes->generic->notify.vector;
+
+	might_sleep();
+
+	if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
+		return -EOPNOTSUPP;
+
+	/*
+	 * The event may be running on another CPU. Disable it
+	 * to stop new events, then try to unregister a few times.
+	 */
+	err = sdei_event_disable(event_num);
+	if (err)
+		return err;
+
+	for (i = 0; i < 3; i++) {
+		err = sdei_event_unregister(event_num);
+		if (err != -EINPROGRESS)
+			break;
+
+		schedule();
+	}
+
+	return err;
+}
 
 static int sdei_get_conduit(struct platform_device *pdev)
 {
@@ -1009,7 +1080,6 @@
 
 static bool __init sdei_present_dt(void)
 {
-	struct platform_device *pdev;
 	struct device_node *np, *fw_np;
 
 	fw_np = of_find_node_by_name(NULL, "firmware");
@@ -1017,14 +1087,9 @@
 		return false;
 
 	np = of_find_matching_node(fw_np, sdei_of_match);
-	of_node_put(fw_np);
 	if (!np)
 		return false;
-
-	pdev = of_platform_device_create(np, sdei_driver.driver.name, NULL);
 	of_node_put(np);
-	if (!pdev)
-		return false;
 
 	return true;
 }
diff --git a/drivers/firmware/broadcom/Kconfig b/drivers/firmware/broadcom/Kconfig
index f77cdb3..d03ed8e 100644
--- a/drivers/firmware/broadcom/Kconfig
+++ b/drivers/firmware/broadcom/Kconfig
@@ -1,6 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
 config BCM47XX_NVRAM
 	bool "Broadcom NVRAM driver"
-	depends on BCM47XX || ARCH_BCM_5301X
+	depends on BCM47XX || ARCH_BCM_5301X || COMPILE_TEST
 	help
 	  Broadcom home routers contain flash partition called "nvram" with all
 	  important hardware configuration as well as some minor user setup.
diff --git a/drivers/firmware/broadcom/Makefile b/drivers/firmware/broadcom/Makefile
index f93efc4..72c7fdc 100644
--- a/drivers/firmware/broadcom/Makefile
+++ b/drivers/firmware/broadcom/Makefile
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
 obj-$(CONFIG_BCM47XX_NVRAM)		+= bcm47xx_nvram.o
 obj-$(CONFIG_BCM47XX_SPROM)		+= bcm47xx_sprom.o
diff --git a/drivers/firmware/broadcom/bcm47xx_nvram.c b/drivers/firmware/broadcom/bcm47xx_nvram.c
index d25f080..da04fda 100644
--- a/drivers/firmware/broadcom/bcm47xx_nvram.c
+++ b/drivers/firmware/broadcom/bcm47xx_nvram.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * BCM947xx nvram variable access
  *
  * Copyright (C) 2005 Broadcom Corporation
  * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org>
  * Copyright (C) 2010-2012 Hauke Mehrtens <hauke@hauke-m.de>
- *
- * This program is free software; you can redistribute	it and/or modify it
- * under  the terms of	the GNU General	 Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
  */
 
 #include <linux/io.h>
@@ -100,7 +96,7 @@
 		nvram_len = size;
 	}
 	if (nvram_len >= NVRAM_SPACE) {
-		pr_err("nvram on flash (%i bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n",
+		pr_err("nvram on flash (%zu bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n",
 		       nvram_len, NVRAM_SPACE - 1);
 		nvram_len = NVRAM_SPACE - 1;
 	}
@@ -152,8 +148,8 @@
 	    header.len > sizeof(header)) {
 		nvram_len = header.len;
 		if (nvram_len >= NVRAM_SPACE) {
-			pr_err("nvram on flash (%i bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n",
-				header.len, NVRAM_SPACE);
+			pr_err("nvram on flash (%zu bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n",
+				nvram_len, NVRAM_SPACE);
 			nvram_len = NVRAM_SPACE - 1;
 		}
 
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
deleted file mode 100644
index 0bdea60..0000000
--- a/drivers/firmware/dcdbas.c
+++ /dev/null
@@ -1,650 +0,0 @@
-/*
- *  dcdbas.c: Dell Systems Management Base Driver
- *
- *  The Dell Systems Management Base Driver provides a sysfs interface for
- *  systems management software to perform System Management Interrupts (SMIs)
- *  and Host Control Actions (power cycle or power off after OS shutdown) on
- *  Dell systems.
- *
- *  See Documentation/dcdbas.txt for more information.
- *
- *  Copyright (C) 1995-2006 Dell Inc.
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License v2.0 as published by
- *  the Free Software Foundation.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- */
-
-#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
-#include <linux/errno.h>
-#include <linux/cpu.h>
-#include <linux/gfp.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/mc146818rtc.h>
-#include <linux/module.h>
-#include <linux/reboot.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <linux/spinlock.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/mutex.h>
-#include <asm/io.h>
-
-#include "dcdbas.h"
-
-#define DRIVER_NAME		"dcdbas"
-#define DRIVER_VERSION		"5.6.0-3.2"
-#define DRIVER_DESCRIPTION	"Dell Systems Management Base Driver"
-
-static struct platform_device *dcdbas_pdev;
-
-static u8 *smi_data_buf;
-static dma_addr_t smi_data_buf_handle;
-static unsigned long smi_data_buf_size;
-static u32 smi_data_buf_phys_addr;
-static DEFINE_MUTEX(smi_data_lock);
-
-static unsigned int host_control_action;
-static unsigned int host_control_smi_type;
-static unsigned int host_control_on_shutdown;
-
-/**
- * smi_data_buf_free: free SMI data buffer
- */
-static void smi_data_buf_free(void)
-{
-	if (!smi_data_buf)
-		return;
-
-	dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n",
-		__func__, smi_data_buf_phys_addr, smi_data_buf_size);
-
-	dma_free_coherent(&dcdbas_pdev->dev, smi_data_buf_size, smi_data_buf,
-			  smi_data_buf_handle);
-	smi_data_buf = NULL;
-	smi_data_buf_handle = 0;
-	smi_data_buf_phys_addr = 0;
-	smi_data_buf_size = 0;
-}
-
-/**
- * smi_data_buf_realloc: grow SMI data buffer if needed
- */
-static int smi_data_buf_realloc(unsigned long size)
-{
-	void *buf;
-	dma_addr_t handle;
-
-	if (smi_data_buf_size >= size)
-		return 0;
-
-	if (size > MAX_SMI_DATA_BUF_SIZE)
-		return -EINVAL;
-
-	/* new buffer is needed */
-	buf = dma_alloc_coherent(&dcdbas_pdev->dev, size, &handle, GFP_KERNEL);
-	if (!buf) {
-		dev_dbg(&dcdbas_pdev->dev,
-			"%s: failed to allocate memory size %lu\n",
-			__func__, size);
-		return -ENOMEM;
-	}
-	/* memory zeroed by dma_alloc_coherent */
-
-	if (smi_data_buf)
-		memcpy(buf, smi_data_buf, smi_data_buf_size);
-
-	/* free any existing buffer */
-	smi_data_buf_free();
-
-	/* set up new buffer for use */
-	smi_data_buf = buf;
-	smi_data_buf_handle = handle;
-	smi_data_buf_phys_addr = (u32) virt_to_phys(buf);
-	smi_data_buf_size = size;
-
-	dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n",
-		__func__, smi_data_buf_phys_addr, smi_data_buf_size);
-
-	return 0;
-}
-
-static ssize_t smi_data_buf_phys_addr_show(struct device *dev,
-					   struct device_attribute *attr,
-					   char *buf)
-{
-	return sprintf(buf, "%x\n", smi_data_buf_phys_addr);
-}
-
-static ssize_t smi_data_buf_size_show(struct device *dev,
-				      struct device_attribute *attr,
-				      char *buf)
-{
-	return sprintf(buf, "%lu\n", smi_data_buf_size);
-}
-
-static ssize_t smi_data_buf_size_store(struct device *dev,
-				       struct device_attribute *attr,
-				       const char *buf, size_t count)
-{
-	unsigned long buf_size;
-	ssize_t ret;
-
-	buf_size = simple_strtoul(buf, NULL, 10);
-
-	/* make sure SMI data buffer is at least buf_size */
-	mutex_lock(&smi_data_lock);
-	ret = smi_data_buf_realloc(buf_size);
-	mutex_unlock(&smi_data_lock);
-	if (ret)
-		return ret;
-
-	return count;
-}
-
-static ssize_t smi_data_read(struct file *filp, struct kobject *kobj,
-			     struct bin_attribute *bin_attr,
-			     char *buf, loff_t pos, size_t count)
-{
-	ssize_t ret;
-
-	mutex_lock(&smi_data_lock);
-	ret = memory_read_from_buffer(buf, count, &pos, smi_data_buf,
-					smi_data_buf_size);
-	mutex_unlock(&smi_data_lock);
-	return ret;
-}
-
-static ssize_t smi_data_write(struct file *filp, struct kobject *kobj,
-			      struct bin_attribute *bin_attr,
-			      char *buf, loff_t pos, size_t count)
-{
-	ssize_t ret;
-
-	if ((pos + count) > MAX_SMI_DATA_BUF_SIZE)
-		return -EINVAL;
-
-	mutex_lock(&smi_data_lock);
-
-	ret = smi_data_buf_realloc(pos + count);
-	if (ret)
-		goto out;
-
-	memcpy(smi_data_buf + pos, buf, count);
-	ret = count;
-out:
-	mutex_unlock(&smi_data_lock);
-	return ret;
-}
-
-static ssize_t host_control_action_show(struct device *dev,
-					struct device_attribute *attr,
-					char *buf)
-{
-	return sprintf(buf, "%u\n", host_control_action);
-}
-
-static ssize_t host_control_action_store(struct device *dev,
-					 struct device_attribute *attr,
-					 const char *buf, size_t count)
-{
-	ssize_t ret;
-
-	/* make sure buffer is available for host control command */
-	mutex_lock(&smi_data_lock);
-	ret = smi_data_buf_realloc(sizeof(struct apm_cmd));
-	mutex_unlock(&smi_data_lock);
-	if (ret)
-		return ret;
-
-	host_control_action = simple_strtoul(buf, NULL, 10);
-	return count;
-}
-
-static ssize_t host_control_smi_type_show(struct device *dev,
-					  struct device_attribute *attr,
-					  char *buf)
-{
-	return sprintf(buf, "%u\n", host_control_smi_type);
-}
-
-static ssize_t host_control_smi_type_store(struct device *dev,
-					   struct device_attribute *attr,
-					   const char *buf, size_t count)
-{
-	host_control_smi_type = simple_strtoul(buf, NULL, 10);
-	return count;
-}
-
-static ssize_t host_control_on_shutdown_show(struct device *dev,
-					     struct device_attribute *attr,
-					     char *buf)
-{
-	return sprintf(buf, "%u\n", host_control_on_shutdown);
-}
-
-static ssize_t host_control_on_shutdown_store(struct device *dev,
-					      struct device_attribute *attr,
-					      const char *buf, size_t count)
-{
-	host_control_on_shutdown = simple_strtoul(buf, NULL, 10);
-	return count;
-}
-
-static int raise_smi(void *par)
-{
-	struct smi_cmd *smi_cmd = par;
-
-	if (smp_processor_id() != 0) {
-		dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
-			__func__);
-		return -EBUSY;
-	}
-
-	/* generate SMI */
-	/* inb to force posted write through and make SMI happen now */
-	asm volatile (
-		"outb %b0,%w1\n"
-		"inb %w1"
-		: /* no output args */
-		: "a" (smi_cmd->command_code),
-		  "d" (smi_cmd->command_address),
-		  "b" (smi_cmd->ebx),
-		  "c" (smi_cmd->ecx)
-		: "memory"
-	);
-
-	return 0;
-}
-/**
- * dcdbas_smi_request: generate SMI request
- *
- * Called with smi_data_lock.
- */
-int dcdbas_smi_request(struct smi_cmd *smi_cmd)
-{
-	int ret;
-
-	if (smi_cmd->magic != SMI_CMD_MAGIC) {
-		dev_info(&dcdbas_pdev->dev, "%s: invalid magic value\n",
-			 __func__);
-		return -EBADR;
-	}
-
-	/* SMI requires CPU 0 */
-	get_online_cpus();
-	ret = smp_call_on_cpu(0, raise_smi, smi_cmd, true);
-	put_online_cpus();
-
-	return ret;
-}
-
-/**
- * smi_request_store:
- *
- * The valid values are:
- * 0: zero SMI data buffer
- * 1: generate calling interface SMI
- * 2: generate raw SMI
- *
- * User application writes smi_cmd to smi_data before telling driver
- * to generate SMI.
- */
-static ssize_t smi_request_store(struct device *dev,
-				 struct device_attribute *attr,
-				 const char *buf, size_t count)
-{
-	struct smi_cmd *smi_cmd;
-	unsigned long val = simple_strtoul(buf, NULL, 10);
-	ssize_t ret;
-
-	mutex_lock(&smi_data_lock);
-
-	if (smi_data_buf_size < sizeof(struct smi_cmd)) {
-		ret = -ENODEV;
-		goto out;
-	}
-	smi_cmd = (struct smi_cmd *)smi_data_buf;
-
-	switch (val) {
-	case 2:
-		/* Raw SMI */
-		ret = dcdbas_smi_request(smi_cmd);
-		if (!ret)
-			ret = count;
-		break;
-	case 1:
-		/* Calling Interface SMI */
-		smi_cmd->ebx = (u32) virt_to_phys(smi_cmd->command_buffer);
-		ret = dcdbas_smi_request(smi_cmd);
-		if (!ret)
-			ret = count;
-		break;
-	case 0:
-		memset(smi_data_buf, 0, smi_data_buf_size);
-		ret = count;
-		break;
-	default:
-		ret = -EINVAL;
-		break;
-	}
-
-out:
-	mutex_unlock(&smi_data_lock);
-	return ret;
-}
-EXPORT_SYMBOL(dcdbas_smi_request);
-
-/**
- * host_control_smi: generate host control SMI
- *
- * Caller must set up the host control command in smi_data_buf.
- */
-static int host_control_smi(void)
-{
-	struct apm_cmd *apm_cmd;
-	u8 *data;
-	unsigned long flags;
-	u32 num_ticks;
-	s8 cmd_status;
-	u8 index;
-
-	apm_cmd = (struct apm_cmd *)smi_data_buf;
-	apm_cmd->status = ESM_STATUS_CMD_UNSUCCESSFUL;
-
-	switch (host_control_smi_type) {
-	case HC_SMITYPE_TYPE1:
-		spin_lock_irqsave(&rtc_lock, flags);
-		/* write SMI data buffer physical address */
-		data = (u8 *)&smi_data_buf_phys_addr;
-		for (index = PE1300_CMOS_CMD_STRUCT_PTR;
-		     index < (PE1300_CMOS_CMD_STRUCT_PTR + 4);
-		     index++, data++) {
-			outb(index,
-			     (CMOS_BASE_PORT + CMOS_PAGE2_INDEX_PORT_PIIX4));
-			outb(*data,
-			     (CMOS_BASE_PORT + CMOS_PAGE2_DATA_PORT_PIIX4));
-		}
-
-		/* first set status to -1 as called by spec */
-		cmd_status = ESM_STATUS_CMD_UNSUCCESSFUL;
-		outb((u8) cmd_status, PCAT_APM_STATUS_PORT);
-
-		/* generate SMM call */
-		outb(ESM_APM_CMD, PCAT_APM_CONTROL_PORT);
-		spin_unlock_irqrestore(&rtc_lock, flags);
-
-		/* wait a few to see if it executed */
-		num_ticks = TIMEOUT_USEC_SHORT_SEMA_BLOCKING;
-		while ((cmd_status = inb(PCAT_APM_STATUS_PORT))
-		       == ESM_STATUS_CMD_UNSUCCESSFUL) {
-			num_ticks--;
-			if (num_ticks == EXPIRED_TIMER)
-				return -ETIME;
-		}
-		break;
-
-	case HC_SMITYPE_TYPE2:
-	case HC_SMITYPE_TYPE3:
-		spin_lock_irqsave(&rtc_lock, flags);
-		/* write SMI data buffer physical address */
-		data = (u8 *)&smi_data_buf_phys_addr;
-		for (index = PE1400_CMOS_CMD_STRUCT_PTR;
-		     index < (PE1400_CMOS_CMD_STRUCT_PTR + 4);
-		     index++, data++) {
-			outb(index, (CMOS_BASE_PORT + CMOS_PAGE1_INDEX_PORT));
-			outb(*data, (CMOS_BASE_PORT + CMOS_PAGE1_DATA_PORT));
-		}
-
-		/* generate SMM call */
-		if (host_control_smi_type == HC_SMITYPE_TYPE3)
-			outb(ESM_APM_CMD, PCAT_APM_CONTROL_PORT);
-		else
-			outb(ESM_APM_CMD, PE1400_APM_CONTROL_PORT);
-
-		/* restore RTC index pointer since it was written to above */
-		CMOS_READ(RTC_REG_C);
-		spin_unlock_irqrestore(&rtc_lock, flags);
-
-		/* read control port back to serialize write */
-		cmd_status = inb(PE1400_APM_CONTROL_PORT);
-
-		/* wait a few to see if it executed */
-		num_ticks = TIMEOUT_USEC_SHORT_SEMA_BLOCKING;
-		while (apm_cmd->status == ESM_STATUS_CMD_UNSUCCESSFUL) {
-			num_ticks--;
-			if (num_ticks == EXPIRED_TIMER)
-				return -ETIME;
-		}
-		break;
-
-	default:
-		dev_dbg(&dcdbas_pdev->dev, "%s: invalid SMI type %u\n",
-			__func__, host_control_smi_type);
-		return -ENOSYS;
-	}
-
-	return 0;
-}
-
-/**
- * dcdbas_host_control: initiate host control
- *
- * This function is called by the driver after the system has
- * finished shutting down if the user application specified a
- * host control action to perform on shutdown.  It is safe to
- * use smi_data_buf at this point because the system has finished
- * shutting down and no userspace apps are running.
- */
-static void dcdbas_host_control(void)
-{
-	struct apm_cmd *apm_cmd;
-	u8 action;
-
-	if (host_control_action == HC_ACTION_NONE)
-		return;
-
-	action = host_control_action;
-	host_control_action = HC_ACTION_NONE;
-
-	if (!smi_data_buf) {
-		dev_dbg(&dcdbas_pdev->dev, "%s: no SMI buffer\n", __func__);
-		return;
-	}
-
-	if (smi_data_buf_size < sizeof(struct apm_cmd)) {
-		dev_dbg(&dcdbas_pdev->dev, "%s: SMI buffer too small\n",
-			__func__);
-		return;
-	}
-
-	apm_cmd = (struct apm_cmd *)smi_data_buf;
-
-	/* power off takes precedence */
-	if (action & HC_ACTION_HOST_CONTROL_POWEROFF) {
-		apm_cmd->command = ESM_APM_POWER_CYCLE;
-		apm_cmd->reserved = 0;
-		*((s16 *)&apm_cmd->parameters.shortreq.parm[0]) = (s16) 0;
-		host_control_smi();
-	} else if (action & HC_ACTION_HOST_CONTROL_POWERCYCLE) {
-		apm_cmd->command = ESM_APM_POWER_CYCLE;
-		apm_cmd->reserved = 0;
-		*((s16 *)&apm_cmd->parameters.shortreq.parm[0]) = (s16) 20;
-		host_control_smi();
-	}
-}
-
-/**
- * dcdbas_reboot_notify: handle reboot notification for host control
- */
-static int dcdbas_reboot_notify(struct notifier_block *nb, unsigned long code,
-				void *unused)
-{
-	switch (code) {
-	case SYS_DOWN:
-	case SYS_HALT:
-	case SYS_POWER_OFF:
-		if (host_control_on_shutdown) {
-			/* firmware is going to perform host control action */
-			printk(KERN_WARNING "Please wait for shutdown "
-			       "action to complete...\n");
-			dcdbas_host_control();
-		}
-		break;
-	}
-
-	return NOTIFY_DONE;
-}
-
-static struct notifier_block dcdbas_reboot_nb = {
-	.notifier_call = dcdbas_reboot_notify,
-	.next = NULL,
-	.priority = INT_MIN
-};
-
-static DCDBAS_BIN_ATTR_RW(smi_data);
-
-static struct bin_attribute *dcdbas_bin_attrs[] = {
-	&bin_attr_smi_data,
-	NULL
-};
-
-static DCDBAS_DEV_ATTR_RW(smi_data_buf_size);
-static DCDBAS_DEV_ATTR_RO(smi_data_buf_phys_addr);
-static DCDBAS_DEV_ATTR_WO(smi_request);
-static DCDBAS_DEV_ATTR_RW(host_control_action);
-static DCDBAS_DEV_ATTR_RW(host_control_smi_type);
-static DCDBAS_DEV_ATTR_RW(host_control_on_shutdown);
-
-static struct attribute *dcdbas_dev_attrs[] = {
-	&dev_attr_smi_data_buf_size.attr,
-	&dev_attr_smi_data_buf_phys_addr.attr,
-	&dev_attr_smi_request.attr,
-	&dev_attr_host_control_action.attr,
-	&dev_attr_host_control_smi_type.attr,
-	&dev_attr_host_control_on_shutdown.attr,
-	NULL
-};
-
-static const struct attribute_group dcdbas_attr_group = {
-	.attrs = dcdbas_dev_attrs,
-	.bin_attrs = dcdbas_bin_attrs,
-};
-
-static int dcdbas_probe(struct platform_device *dev)
-{
-	int error;
-
-	host_control_action = HC_ACTION_NONE;
-	host_control_smi_type = HC_SMITYPE_NONE;
-
-	dcdbas_pdev = dev;
-
-	/*
-	 * BIOS SMI calls require buffer addresses be in 32-bit address space.
-	 * This is done by setting the DMA mask below.
-	 */
-	error = dma_set_coherent_mask(&dcdbas_pdev->dev, DMA_BIT_MASK(32));
-	if (error)
-		return error;
-
-	error = sysfs_create_group(&dev->dev.kobj, &dcdbas_attr_group);
-	if (error)
-		return error;
-
-	register_reboot_notifier(&dcdbas_reboot_nb);
-
-	dev_info(&dev->dev, "%s (version %s)\n",
-		 DRIVER_DESCRIPTION, DRIVER_VERSION);
-
-	return 0;
-}
-
-static int dcdbas_remove(struct platform_device *dev)
-{
-	unregister_reboot_notifier(&dcdbas_reboot_nb);
-	sysfs_remove_group(&dev->dev.kobj, &dcdbas_attr_group);
-
-	return 0;
-}
-
-static struct platform_driver dcdbas_driver = {
-	.driver		= {
-		.name	= DRIVER_NAME,
-	},
-	.probe		= dcdbas_probe,
-	.remove		= dcdbas_remove,
-};
-
-static const struct platform_device_info dcdbas_dev_info __initconst = {
-	.name		= DRIVER_NAME,
-	.id		= -1,
-	.dma_mask	= DMA_BIT_MASK(32),
-};
-
-static struct platform_device *dcdbas_pdev_reg;
-
-/**
- * dcdbas_init: initialize driver
- */
-static int __init dcdbas_init(void)
-{
-	int error;
-
-	error = platform_driver_register(&dcdbas_driver);
-	if (error)
-		return error;
-
-	dcdbas_pdev_reg = platform_device_register_full(&dcdbas_dev_info);
-	if (IS_ERR(dcdbas_pdev_reg)) {
-		error = PTR_ERR(dcdbas_pdev_reg);
-		goto err_unregister_driver;
-	}
-
-	return 0;
-
- err_unregister_driver:
-	platform_driver_unregister(&dcdbas_driver);
-	return error;
-}
-
-/**
- * dcdbas_exit: perform driver cleanup
- */
-static void __exit dcdbas_exit(void)
-{
-	/*
-	 * make sure functions that use dcdbas_pdev are called
-	 * before platform_device_unregister
-	 */
-	unregister_reboot_notifier(&dcdbas_reboot_nb);
-
-	/*
-	 * We have to free the buffer here instead of dcdbas_remove
-	 * because only in module exit function we can be sure that
-	 * all sysfs attributes belonging to this module have been
-	 * released.
-	 */
-	if (dcdbas_pdev)
-		smi_data_buf_free();
-	platform_device_unregister(dcdbas_pdev_reg);
-	platform_driver_unregister(&dcdbas_driver);
-}
-
-subsys_initcall_sync(dcdbas_init);
-module_exit(dcdbas_exit);
-
-MODULE_DESCRIPTION(DRIVER_DESCRIPTION " (version " DRIVER_VERSION ")");
-MODULE_VERSION(DRIVER_VERSION);
-MODULE_AUTHOR("Dell Inc.");
-MODULE_LICENSE("GPL");
-/* Any System or BIOS claiming to be by Dell */
-MODULE_ALIAS("dmi:*:[bs]vnD[Ee][Ll][Ll]*:*");
diff --git a/drivers/firmware/dcdbas.h b/drivers/firmware/dcdbas.h
deleted file mode 100644
index ca3cb0a..0000000
--- a/drivers/firmware/dcdbas.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- *  dcdbas.h: Definitions for Dell Systems Management Base driver
- *
- *  Copyright (C) 1995-2005 Dell Inc.
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License v2.0 as published by
- *  the Free Software Foundation.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- */
-
-#ifndef _DCDBAS_H_
-#define _DCDBAS_H_
-
-#include <linux/device.h>
-#include <linux/sysfs.h>
-#include <linux/types.h>
-
-#define MAX_SMI_DATA_BUF_SIZE			(256 * 1024)
-
-#define HC_ACTION_NONE				(0)
-#define HC_ACTION_HOST_CONTROL_POWEROFF		BIT(1)
-#define HC_ACTION_HOST_CONTROL_POWERCYCLE	BIT(2)
-
-#define HC_SMITYPE_NONE				(0)
-#define HC_SMITYPE_TYPE1			(1)
-#define HC_SMITYPE_TYPE2			(2)
-#define HC_SMITYPE_TYPE3			(3)
-
-#define ESM_APM_CMD				(0x0A0)
-#define ESM_APM_POWER_CYCLE			(0x10)
-#define ESM_STATUS_CMD_UNSUCCESSFUL		(-1)
-
-#define CMOS_BASE_PORT				(0x070)
-#define CMOS_PAGE1_INDEX_PORT			(0)
-#define CMOS_PAGE1_DATA_PORT			(1)
-#define CMOS_PAGE2_INDEX_PORT_PIIX4		(2)
-#define CMOS_PAGE2_DATA_PORT_PIIX4		(3)
-#define PE1400_APM_CONTROL_PORT			(0x0B0)
-#define PCAT_APM_CONTROL_PORT			(0x0B2)
-#define PCAT_APM_STATUS_PORT			(0x0B3)
-#define PE1300_CMOS_CMD_STRUCT_PTR		(0x38)
-#define PE1400_CMOS_CMD_STRUCT_PTR		(0x70)
-
-#define MAX_SYSMGMT_SHORTCMD_PARMBUF_LEN	(14)
-#define MAX_SYSMGMT_LONGCMD_SGENTRY_NUM		(16)
-
-#define TIMEOUT_USEC_SHORT_SEMA_BLOCKING	(10000)
-#define EXPIRED_TIMER				(0)
-
-#define SMI_CMD_MAGIC				(0x534D4931)
-
-#define DCDBAS_DEV_ATTR_RW(_name) \
-	DEVICE_ATTR(_name,0600,_name##_show,_name##_store);
-
-#define DCDBAS_DEV_ATTR_RO(_name) \
-	DEVICE_ATTR(_name,0400,_name##_show,NULL);
-
-#define DCDBAS_DEV_ATTR_WO(_name) \
-	DEVICE_ATTR(_name,0200,NULL,_name##_store);
-
-#define DCDBAS_BIN_ATTR_RW(_name) \
-struct bin_attribute bin_attr_##_name = { \
-	.attr =  { .name = __stringify(_name), \
-		   .mode = 0600 }, \
-	.read =  _name##_read, \
-	.write = _name##_write, \
-}
-
-struct smi_cmd {
-	__u32 magic;
-	__u32 ebx;
-	__u32 ecx;
-	__u16 command_address;
-	__u8 command_code;
-	__u8 reserved;
-	__u8 command_buffer[1];
-} __attribute__ ((packed));
-
-struct apm_cmd {
-	__u8 command;
-	__s8 status;
-	__u16 reserved;
-	union {
-		struct {
-			__u8 parm[MAX_SYSMGMT_SHORTCMD_PARMBUF_LEN];
-		} __attribute__ ((packed)) shortreq;
-
-		struct {
-			__u16 num_sg_entries;
-			struct {
-				__u32 size;
-				__u64 addr;
-			} __attribute__ ((packed))
-			    sglist[MAX_SYSMGMT_LONGCMD_SGENTRY_NUM];
-		} __attribute__ ((packed)) longreq;
-	} __attribute__ ((packed)) parameters;
-} __attribute__ ((packed));
-
-int dcdbas_smi_request(struct smi_cmd *smi_cmd);
-
-#endif /* _DCDBAS_H_ */
-
diff --git a/drivers/firmware/dell_rbu.c b/drivers/firmware/dell_rbu.c
deleted file mode 100644
index fb8af5c..0000000
--- a/drivers/firmware/dell_rbu.c
+++ /dev/null
@@ -1,745 +0,0 @@
-/*
- * dell_rbu.c
- * Bios Update driver for Dell systems
- * Author: Dell Inc
- *         Abhay Salunke <abhay_salunke@dell.com>
- *
- * Copyright (C) 2005 Dell Inc.
- *
- * Remote BIOS Update (rbu) driver is used for updating DELL BIOS by
- * creating entries in the /sys file systems on Linux 2.6 and higher
- * kernels. The driver supports two mechanism to update the BIOS namely
- * contiguous and packetized. Both these methods still require having some
- * application to set the CMOS bit indicating the BIOS to update itself
- * after a reboot.
- *
- * Contiguous method:
- * This driver writes the incoming data in a monolithic image by allocating
- * contiguous physical pages large enough to accommodate the incoming BIOS
- * image size.
- *
- * Packetized method:
- * The driver writes the incoming packet image by allocating a new packet
- * on every time the packet data is written. This driver requires an
- * application to break the BIOS image in to fixed sized packet chunks.
- *
- * See Documentation/dell_rbu.txt for more info.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License v2.0 as published by
- * the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/blkdev.h>
-#include <linux/platform_device.h>
-#include <linux/spinlock.h>
-#include <linux/moduleparam.h>
-#include <linux/firmware.h>
-#include <linux/dma-mapping.h>
-
-MODULE_AUTHOR("Abhay Salunke <abhay_salunke@dell.com>");
-MODULE_DESCRIPTION("Driver for updating BIOS image on DELL systems");
-MODULE_LICENSE("GPL");
-MODULE_VERSION("3.2");
-
-#define BIOS_SCAN_LIMIT 0xffffffff
-#define MAX_IMAGE_LENGTH 16
-static struct _rbu_data {
-	void *image_update_buffer;
-	unsigned long image_update_buffer_size;
-	unsigned long bios_image_size;
-	int image_update_ordernum;
-	int dma_alloc;
-	spinlock_t lock;
-	unsigned long packet_read_count;
-	unsigned long num_packets;
-	unsigned long packetsize;
-	unsigned long imagesize;
-	int entry_created;
-} rbu_data;
-
-static char image_type[MAX_IMAGE_LENGTH + 1] = "mono";
-module_param_string(image_type, image_type, sizeof (image_type), 0);
-MODULE_PARM_DESC(image_type,
-	"BIOS image type. choose- mono or packet or init");
-
-static unsigned long allocation_floor = 0x100000;
-module_param(allocation_floor, ulong, 0644);
-MODULE_PARM_DESC(allocation_floor,
-    "Minimum address for allocations when using Packet mode");
-
-struct packet_data {
-	struct list_head list;
-	size_t length;
-	void *data;
-	int ordernum;
-};
-
-static struct packet_data packet_data_head;
-
-static struct platform_device *rbu_device;
-static int context;
-static dma_addr_t dell_rbu_dmaaddr;
-
-static void init_packet_head(void)
-{
-	INIT_LIST_HEAD(&packet_data_head.list);
-	rbu_data.packet_read_count = 0;
-	rbu_data.num_packets = 0;
-	rbu_data.packetsize = 0;
-	rbu_data.imagesize = 0;
-}
-
-static int create_packet(void *data, size_t length)
-{
-	struct packet_data *newpacket;
-	int ordernum = 0;
-	int retval = 0;
-	unsigned int packet_array_size = 0;
-	void **invalid_addr_packet_array = NULL;
-	void *packet_data_temp_buf = NULL;
-	unsigned int idx = 0;
-
-	pr_debug("create_packet: entry \n");
-
-	if (!rbu_data.packetsize) {
-		pr_debug("create_packet: packetsize not specified\n");
-		retval = -EINVAL;
-		goto out_noalloc;
-	}
-
-	spin_unlock(&rbu_data.lock);
-
-	newpacket = kzalloc(sizeof (struct packet_data), GFP_KERNEL);
-
-	if (!newpacket) {
-		printk(KERN_WARNING
-			"dell_rbu:%s: failed to allocate new "
-			"packet\n", __func__);
-		retval = -ENOMEM;
-		spin_lock(&rbu_data.lock);
-		goto out_noalloc;
-	}
-
-	ordernum = get_order(length);
-
-	/*
-	 * BIOS errata mean we cannot allocate packets below 1MB or they will
-	 * be overwritten by BIOS.
-	 *
-	 * array to temporarily hold packets
-	 * that are below the allocation floor
-	 *
-	 * NOTE: very simplistic because we only need the floor to be at 1MB
-	 *       due to BIOS errata. This shouldn't be used for higher floors
-	 *       or you will run out of mem trying to allocate the array.
-	 */
-	packet_array_size = max(
-	       		(unsigned int)(allocation_floor / rbu_data.packetsize),
-			(unsigned int)1);
-	invalid_addr_packet_array = kcalloc(packet_array_size, sizeof(void *),
-						GFP_KERNEL);
-
-	if (!invalid_addr_packet_array) {
-		printk(KERN_WARNING
-			"dell_rbu:%s: failed to allocate "
-			"invalid_addr_packet_array \n",
-			__func__);
-		retval = -ENOMEM;
-		spin_lock(&rbu_data.lock);
-		goto out_alloc_packet;
-	}
-
-	while (!packet_data_temp_buf) {
-		packet_data_temp_buf = (unsigned char *)
-			__get_free_pages(GFP_KERNEL, ordernum);
-		if (!packet_data_temp_buf) {
-			printk(KERN_WARNING
-				"dell_rbu:%s: failed to allocate new "
-				"packet\n", __func__);
-			retval = -ENOMEM;
-			spin_lock(&rbu_data.lock);
-			goto out_alloc_packet_array;
-		}
-
-		if ((unsigned long)virt_to_phys(packet_data_temp_buf)
-				< allocation_floor) {
-			pr_debug("packet 0x%lx below floor at 0x%lx.\n",
-					(unsigned long)virt_to_phys(
-						packet_data_temp_buf),
-					allocation_floor);
-			invalid_addr_packet_array[idx++] = packet_data_temp_buf;
-			packet_data_temp_buf = NULL;
-		}
-	}
-	spin_lock(&rbu_data.lock);
-
-	newpacket->data = packet_data_temp_buf;
-
-	pr_debug("create_packet: newpacket at physical addr %lx\n",
-		(unsigned long)virt_to_phys(newpacket->data));
-
-	/* packets may not have fixed size */
-	newpacket->length = length;
-	newpacket->ordernum = ordernum;
-	++rbu_data.num_packets;
-
-	/* initialize the newly created packet headers */
-	INIT_LIST_HEAD(&newpacket->list);
-	list_add_tail(&newpacket->list, &packet_data_head.list);
-
-	memcpy(newpacket->data, data, length);
-
-	pr_debug("create_packet: exit \n");
-
-out_alloc_packet_array:
-	/* always free packet array */
-	for (;idx>0;idx--) {
-		pr_debug("freeing unused packet below floor 0x%lx.\n",
-			(unsigned long)virt_to_phys(
-				invalid_addr_packet_array[idx-1]));
-		free_pages((unsigned long)invalid_addr_packet_array[idx-1],
-			ordernum);
-	}
-	kfree(invalid_addr_packet_array);
-
-out_alloc_packet:
-	/* if error, free data */
-	if (retval)
-		kfree(newpacket);
-
-out_noalloc:
-	return retval;
-}
-
-static int packetize_data(const u8 *data, size_t length)
-{
-	int rc = 0;
-	int done = 0;
-	int packet_length;
-	u8 *temp;
-	u8 *end = (u8 *) data + length;
-	pr_debug("packetize_data: data length %zd\n", length);
-	if (!rbu_data.packetsize) {
-		printk(KERN_WARNING
-			"dell_rbu: packetsize not specified\n");
-		return -EIO;
-	}
-
-	temp = (u8 *) data;
-
-	/* packetize the hunk */
-	while (!done) {
-		if ((temp + rbu_data.packetsize) < end)
-			packet_length = rbu_data.packetsize;
-		else {
-			/* this is the last packet */
-			packet_length = end - temp;
-			done = 1;
-		}
-
-		if ((rc = create_packet(temp, packet_length)))
-			return rc;
-
-		pr_debug("%p:%td\n", temp, (end - temp));
-		temp += packet_length;
-	}
-
-	rbu_data.imagesize = length;
-
-	return rc;
-}
-
-static int do_packet_read(char *data, struct list_head *ptemp_list,
-	int length, int bytes_read, int *list_read_count)
-{
-	void *ptemp_buf;
-	struct packet_data *newpacket = NULL;
-	int bytes_copied = 0;
-	int j = 0;
-
-	newpacket = list_entry(ptemp_list, struct packet_data, list);
-	*list_read_count += newpacket->length;
-
-	if (*list_read_count > bytes_read) {
-		/* point to the start of unread data */
-		j = newpacket->length - (*list_read_count - bytes_read);
-		/* point to the offset in the packet buffer */
-		ptemp_buf = (u8 *) newpacket->data + j;
-		/*
-		 * check if there is enough room in
-		 * * the incoming buffer
-		 */
-		if (length > (*list_read_count - bytes_read))
-			/*
-			 * copy what ever is there in this
-			 * packet and move on
-			 */
-			bytes_copied = (*list_read_count - bytes_read);
-		else
-			/* copy the remaining */
-			bytes_copied = length;
-		memcpy(data, ptemp_buf, bytes_copied);
-	}
-	return bytes_copied;
-}
-
-static int packet_read_list(char *data, size_t * pread_length)
-{
-	struct list_head *ptemp_list;
-	int temp_count = 0;
-	int bytes_copied = 0;
-	int bytes_read = 0;
-	int remaining_bytes = 0;
-	char *pdest = data;
-
-	/* check if we have any packets */
-	if (0 == rbu_data.num_packets)
-		return -ENOMEM;
-
-	remaining_bytes = *pread_length;
-	bytes_read = rbu_data.packet_read_count;
-
-	ptemp_list = (&packet_data_head.list)->next;
-	while (!list_empty(ptemp_list)) {
-		bytes_copied = do_packet_read(pdest, ptemp_list,
-			remaining_bytes, bytes_read, &temp_count);
-		remaining_bytes -= bytes_copied;
-		bytes_read += bytes_copied;
-		pdest += bytes_copied;
-		/*
-		 * check if we reached end of buffer before reaching the
-		 * last packet
-		 */
-		if (remaining_bytes == 0)
-			break;
-
-		ptemp_list = ptemp_list->next;
-	}
-	/*finally set the bytes read */
-	*pread_length = bytes_read - rbu_data.packet_read_count;
-	rbu_data.packet_read_count = bytes_read;
-	return 0;
-}
-
-static void packet_empty_list(void)
-{
-	struct list_head *ptemp_list;
-	struct list_head *pnext_list;
-	struct packet_data *newpacket;
-
-	ptemp_list = (&packet_data_head.list)->next;
-	while (!list_empty(ptemp_list)) {
-		newpacket =
-			list_entry(ptemp_list, struct packet_data, list);
-		pnext_list = ptemp_list->next;
-		list_del(ptemp_list);
-		ptemp_list = pnext_list;
-		/*
-		 * zero out the RBU packet memory before freeing
-		 * to make sure there are no stale RBU packets left in memory
-		 */
-		memset(newpacket->data, 0, rbu_data.packetsize);
-		free_pages((unsigned long) newpacket->data,
-			newpacket->ordernum);
-		kfree(newpacket);
-	}
-	rbu_data.packet_read_count = 0;
-	rbu_data.num_packets = 0;
-	rbu_data.imagesize = 0;
-}
-
-/*
- * img_update_free: Frees the buffer allocated for storing BIOS image
- * Always called with lock held and returned with lock held
- */
-static void img_update_free(void)
-{
-	if (!rbu_data.image_update_buffer)
-		return;
-	/*
-	 * zero out this buffer before freeing it to get rid of any stale
-	 * BIOS image copied in memory.
-	 */
-	memset(rbu_data.image_update_buffer, 0,
-		rbu_data.image_update_buffer_size);
-	if (rbu_data.dma_alloc == 1)
-		dma_free_coherent(NULL, rbu_data.bios_image_size,
-			rbu_data.image_update_buffer, dell_rbu_dmaaddr);
-	else
-		free_pages((unsigned long) rbu_data.image_update_buffer,
-			rbu_data.image_update_ordernum);
-
-	/*
-	 * Re-initialize the rbu_data variables after a free
-	 */
-	rbu_data.image_update_ordernum = -1;
-	rbu_data.image_update_buffer = NULL;
-	rbu_data.image_update_buffer_size = 0;
-	rbu_data.bios_image_size = 0;
-	rbu_data.dma_alloc = 0;
-}
-
-/*
- * img_update_realloc: This function allocates the contiguous pages to
- * accommodate the requested size of data. The memory address and size
- * values are stored globally and on every call to this function the new
- * size is checked to see if more data is required than the existing size.
- * If true the previous memory is freed and new allocation is done to
- * accommodate the new size. If the incoming size is less then than the
- * already allocated size, then that memory is reused. This function is
- * called with lock held and returns with lock held.
- */
-static int img_update_realloc(unsigned long size)
-{
-	unsigned char *image_update_buffer = NULL;
-	unsigned long rc;
-	unsigned long img_buf_phys_addr;
-	int ordernum;
-	int dma_alloc = 0;
-
-	/*
-	 * check if the buffer of sufficient size has been
-	 * already allocated
-	 */
-	if (rbu_data.image_update_buffer_size >= size) {
-		/*
-		 * check for corruption
-		 */
-		if ((size != 0) && (rbu_data.image_update_buffer == NULL)) {
-			printk(KERN_ERR "dell_rbu:%s: corruption "
-				"check failed\n", __func__);
-			return -EINVAL;
-		}
-		/*
-		 * we have a valid pre-allocated buffer with
-		 * sufficient size
-		 */
-		return 0;
-	}
-
-	/*
-	 * free any previously allocated buffer
-	 */
-	img_update_free();
-
-	spin_unlock(&rbu_data.lock);
-
-	ordernum = get_order(size);
-	image_update_buffer =
-		(unsigned char *) __get_free_pages(GFP_KERNEL, ordernum);
-
-	img_buf_phys_addr =
-		(unsigned long) virt_to_phys(image_update_buffer);
-
-	if (img_buf_phys_addr > BIOS_SCAN_LIMIT) {
-		free_pages((unsigned long) image_update_buffer, ordernum);
-		ordernum = -1;
-		image_update_buffer = dma_alloc_coherent(NULL, size,
-			&dell_rbu_dmaaddr, GFP_KERNEL);
-		dma_alloc = 1;
-	}
-
-	spin_lock(&rbu_data.lock);
-
-	if (image_update_buffer != NULL) {
-		rbu_data.image_update_buffer = image_update_buffer;
-		rbu_data.image_update_buffer_size = size;
-		rbu_data.bios_image_size =
-			rbu_data.image_update_buffer_size;
-		rbu_data.image_update_ordernum = ordernum;
-		rbu_data.dma_alloc = dma_alloc;
-		rc = 0;
-	} else {
-		pr_debug("Not enough memory for image update:"
-			"size = %ld\n", size);
-		rc = -ENOMEM;
-	}
-
-	return rc;
-}
-
-static ssize_t read_packet_data(char *buffer, loff_t pos, size_t count)
-{
-	int retval;
-	size_t bytes_left;
-	size_t data_length;
-	char *ptempBuf = buffer;
-
-	/* check to see if we have something to return */
-	if (rbu_data.num_packets == 0) {
-		pr_debug("read_packet_data: no packets written\n");
-		retval = -ENOMEM;
-		goto read_rbu_data_exit;
-	}
-
-	if (pos > rbu_data.imagesize) {
-		retval = 0;
-		printk(KERN_WARNING "dell_rbu:read_packet_data: "
-			"data underrun\n");
-		goto read_rbu_data_exit;
-	}
-
-	bytes_left = rbu_data.imagesize - pos;
-	data_length = min(bytes_left, count);
-
-	if ((retval = packet_read_list(ptempBuf, &data_length)) < 0)
-		goto read_rbu_data_exit;
-
-	if ((pos + count) > rbu_data.imagesize) {
-		rbu_data.packet_read_count = 0;
-		/* this was the last copy */
-		retval = bytes_left;
-	} else
-		retval = count;
-
-      read_rbu_data_exit:
-	return retval;
-}
-
-static ssize_t read_rbu_mono_data(char *buffer, loff_t pos, size_t count)
-{
-	/* check to see if we have something to return */
-	if ((rbu_data.image_update_buffer == NULL) ||
-		(rbu_data.bios_image_size == 0)) {
-		pr_debug("read_rbu_data_mono: image_update_buffer %p ,"
-			"bios_image_size %lu\n",
-			rbu_data.image_update_buffer,
-			rbu_data.bios_image_size);
-		return -ENOMEM;
-	}
-
-	return memory_read_from_buffer(buffer, count, &pos,
-			rbu_data.image_update_buffer, rbu_data.bios_image_size);
-}
-
-static ssize_t read_rbu_data(struct file *filp, struct kobject *kobj,
-			     struct bin_attribute *bin_attr,
-			     char *buffer, loff_t pos, size_t count)
-{
-	ssize_t ret_count = 0;
-
-	spin_lock(&rbu_data.lock);
-
-	if (!strcmp(image_type, "mono"))
-		ret_count = read_rbu_mono_data(buffer, pos, count);
-	else if (!strcmp(image_type, "packet"))
-		ret_count = read_packet_data(buffer, pos, count);
-	else
-		pr_debug("read_rbu_data: invalid image type specified\n");
-
-	spin_unlock(&rbu_data.lock);
-	return ret_count;
-}
-
-static void callbackfn_rbu(const struct firmware *fw, void *context)
-{
-	rbu_data.entry_created = 0;
-
-	if (!fw)
-		return;
-
-	if (!fw->size)
-		goto out;
-
-	spin_lock(&rbu_data.lock);
-	if (!strcmp(image_type, "mono")) {
-		if (!img_update_realloc(fw->size))
-			memcpy(rbu_data.image_update_buffer,
-				fw->data, fw->size);
-	} else if (!strcmp(image_type, "packet")) {
-		/*
-		 * we need to free previous packets if a
-		 * new hunk of packets needs to be downloaded
-		 */
-		packet_empty_list();
-		if (packetize_data(fw->data, fw->size))
-			/* Incase something goes wrong when we are
-			 * in middle of packetizing the data, we
-			 * need to free up whatever packets might
-			 * have been created before we quit.
-			 */
-			packet_empty_list();
-	} else
-		pr_debug("invalid image type specified.\n");
-	spin_unlock(&rbu_data.lock);
- out:
-	release_firmware(fw);
-}
-
-static ssize_t read_rbu_image_type(struct file *filp, struct kobject *kobj,
-				   struct bin_attribute *bin_attr,
-				   char *buffer, loff_t pos, size_t count)
-{
-	int size = 0;
-	if (!pos)
-		size = scnprintf(buffer, count, "%s\n", image_type);
-	return size;
-}
-
-static ssize_t write_rbu_image_type(struct file *filp, struct kobject *kobj,
-				    struct bin_attribute *bin_attr,
-				    char *buffer, loff_t pos, size_t count)
-{
-	int rc = count;
-	int req_firm_rc = 0;
-	int i;
-	spin_lock(&rbu_data.lock);
-	/*
-	 * Find the first newline or space
-	 */
-	for (i = 0; i < count; ++i)
-		if (buffer[i] == '\n' || buffer[i] == ' ') {
-			buffer[i] = '\0';
-			break;
-		}
-	if (i == count)
-		buffer[count] = '\0';
-
-	if (strstr(buffer, "mono"))
-		strcpy(image_type, "mono");
-	else if (strstr(buffer, "packet"))
-		strcpy(image_type, "packet");
-	else if (strstr(buffer, "init")) {
-		/*
-		 * If due to the user error the driver gets in a bad
-		 * state where even though it is loaded , the
-		 * /sys/class/firmware/dell_rbu entries are missing.
-		 * to cover this situation the user can recreate entries
-		 * by writing init to image_type.
-		 */
-		if (!rbu_data.entry_created) {
-			spin_unlock(&rbu_data.lock);
-			req_firm_rc = request_firmware_nowait(THIS_MODULE,
-				FW_ACTION_NOHOTPLUG, "dell_rbu",
-				&rbu_device->dev, GFP_KERNEL, &context,
-				callbackfn_rbu);
-			if (req_firm_rc) {
-				printk(KERN_ERR
-					"dell_rbu:%s request_firmware_nowait"
-					" failed %d\n", __func__, rc);
-				rc = -EIO;
-			} else
-				rbu_data.entry_created = 1;
-
-			spin_lock(&rbu_data.lock);
-		}
-	} else {
-		printk(KERN_WARNING "dell_rbu: image_type is invalid\n");
-		spin_unlock(&rbu_data.lock);
-		return -EINVAL;
-	}
-
-	/* we must free all previous allocations */
-	packet_empty_list();
-	img_update_free();
-	spin_unlock(&rbu_data.lock);
-
-	return rc;
-}
-
-static ssize_t read_rbu_packet_size(struct file *filp, struct kobject *kobj,
-				    struct bin_attribute *bin_attr,
-				    char *buffer, loff_t pos, size_t count)
-{
-	int size = 0;
-	if (!pos) {
-		spin_lock(&rbu_data.lock);
-		size = scnprintf(buffer, count, "%lu\n", rbu_data.packetsize);
-		spin_unlock(&rbu_data.lock);
-	}
-	return size;
-}
-
-static ssize_t write_rbu_packet_size(struct file *filp, struct kobject *kobj,
-				     struct bin_attribute *bin_attr,
-				     char *buffer, loff_t pos, size_t count)
-{
-	unsigned long temp;
-	spin_lock(&rbu_data.lock);
-	packet_empty_list();
-	sscanf(buffer, "%lu", &temp);
-	if (temp < 0xffffffff)
-		rbu_data.packetsize = temp;
-
-	spin_unlock(&rbu_data.lock);
-	return count;
-}
-
-static struct bin_attribute rbu_data_attr = {
-	.attr = {.name = "data", .mode = 0444},
-	.read = read_rbu_data,
-};
-
-static struct bin_attribute rbu_image_type_attr = {
-	.attr = {.name = "image_type", .mode = 0644},
-	.read = read_rbu_image_type,
-	.write = write_rbu_image_type,
-};
-
-static struct bin_attribute rbu_packet_size_attr = {
-	.attr = {.name = "packet_size", .mode = 0644},
-	.read = read_rbu_packet_size,
-	.write = write_rbu_packet_size,
-};
-
-static int __init dcdrbu_init(void)
-{
-	int rc;
-	spin_lock_init(&rbu_data.lock);
-
-	init_packet_head();
-	rbu_device = platform_device_register_simple("dell_rbu", -1, NULL, 0);
-	if (IS_ERR(rbu_device)) {
-		printk(KERN_ERR
-			"dell_rbu:%s:platform_device_register_simple "
-			"failed\n", __func__);
-		return PTR_ERR(rbu_device);
-	}
-
-	rc = sysfs_create_bin_file(&rbu_device->dev.kobj, &rbu_data_attr);
-	if (rc)
-		goto out_devreg;
-	rc = sysfs_create_bin_file(&rbu_device->dev.kobj, &rbu_image_type_attr);
-	if (rc)
-		goto out_data;
-	rc = sysfs_create_bin_file(&rbu_device->dev.kobj,
-		&rbu_packet_size_attr);
-	if (rc)
-		goto out_imtype;
-
-	rbu_data.entry_created = 0;
-	return 0;
-
-out_imtype:
-	sysfs_remove_bin_file(&rbu_device->dev.kobj, &rbu_image_type_attr);
-out_data:
-	sysfs_remove_bin_file(&rbu_device->dev.kobj, &rbu_data_attr);
-out_devreg:
-	platform_device_unregister(rbu_device);
-	return rc;
-}
-
-static __exit void dcdrbu_exit(void)
-{
-	spin_lock(&rbu_data.lock);
-	packet_empty_list();
-	img_update_free();
-	spin_unlock(&rbu_data.lock);
-	platform_device_unregister(rbu_device);
-}
-
-module_exit(dcdrbu_exit);
-module_init(dcdrbu_init);
-
-/* vim:noet:ts=8:sw=8
-*/
diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
index 624a11c..ff39f64 100644
--- a/drivers/firmware/dmi-id.c
+++ b/drivers/firmware/dmi-id.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Export SMBIOS/DMI info via sysfs to userspace
  *
  * Copyright 2007, Lennart Poettering
- *
- * Licensed under GPLv2
  */
 
 #include <linux/module.h>
diff --git a/drivers/firmware/dmi-sysfs.c b/drivers/firmware/dmi-sysfs.c
index ecf2eeb..b618002 100644
--- a/drivers/firmware/dmi-sysfs.c
+++ b/drivers/firmware/dmi-sysfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * dmi-sysfs.c
  *
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index f248354..1e21fc3 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 #include <linux/types.h>
 #include <linux/string.h>
 #include <linux/init.h>
@@ -5,7 +6,7 @@
 #include <linux/ctype.h>
 #include <linux/dmi.h>
 #include <linux/efi.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <linux/random.h>
 #include <asm/dmi.h>
 #include <asm/unaligned.h>
@@ -407,7 +408,7 @@
 		bytes = ~0ull;
 	else if (size & 0x8000)
 		bytes = (u64)(size & 0x7fff) << 10;
-	else if (size != 0x7fff)
+	else if (size != 0x7fff || dm->length < 0x20)
 		bytes = (u64)size << 20;
 	else
 		bytes = (u64)get_unaligned((u32 *)&d[0x1C]) << 20;
@@ -416,11 +417,8 @@
 	nr++;
 }
 
-void __init dmi_memdev_walk(void)
+static void __init dmi_memdev_walk(void)
 {
-	if (!dmi_available)
-		return;
-
 	if (dmi_walk_early(count_mem_devices) == 0 && dmi_memdev_nr) {
 		dmi_memdev = dmi_alloc(sizeof(*dmi_memdev) * dmi_memdev_nr);
 		if (dmi_memdev)
@@ -614,7 +612,7 @@
 	return 1;
 }
 
-void __init dmi_scan_machine(void)
+static void __init dmi_scan_machine(void)
 {
 	char __iomem *p, *q;
 	char buf[32];
@@ -769,15 +767,20 @@
 subsys_initcall(dmi_init);
 
 /**
- * dmi_set_dump_stack_arch_desc - set arch description for dump_stack()
+ *	dmi_setup - scan and setup DMI system information
  *
- * Invoke dump_stack_set_arch_desc() with DMI system information so that
- * DMI identifiers are printed out on task dumps.  Arch boot code should
- * call this function after dmi_scan_machine() if it wants to print out DMI
- * identifiers on task dumps.
+ *	Scan the DMI system information. This setups DMI identifiers
+ *	(dmi_system_id) for printing it out on task dumps and prepares
+ *	DIMM entry information (dmi_memdev_info) from the SMBIOS table
+ *	for using this when reporting memory errors.
  */
-void __init dmi_set_dump_stack_arch_desc(void)
+void __init dmi_setup(void)
 {
+	dmi_scan_machine();
+	if (!dmi_available)
+		return;
+
+	dmi_memdev_walk();
 	dump_stack_set_arch_desc("%s", dmi_ids_string);
 }
 
@@ -841,7 +844,7 @@
  *	returns non zero or we hit the end. Callback function is called for
  *	each successful match. Returns the number of matches.
  *
- *	dmi_scan_machine must be called before this function is called.
+ *	dmi_setup must be called before this function is called.
  */
 int dmi_check_system(const struct dmi_system_id *list)
 {
@@ -871,7 +874,7 @@
  *	Walk the blacklist table until the first match is found.  Return the
  *	pointer to the matching entry or NULL if there's no match.
  *
- *	dmi_scan_machine must be called before this function is called.
+ *	dmi_setup must be called before this function is called.
  */
 const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list)
 {
diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
index 1b82c89..29906e3 100644
--- a/drivers/firmware/edd.c
+++ b/drivers/firmware/edd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * linux/drivers/firmware/edd.c
  *  Copyright (C) 2002, 2003, 2004 Dell Inc.
@@ -17,16 +18,6 @@
  *
  * Please see http://linux.dell.com/edd/results.html for
  * the list of BIOSs which have been reported to implement EDD.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License v2.0 as published by
- * the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  */
 
 #include <linux/module.h>
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 89110df..b248870 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 menu "EFI (Extensible Firmware Interface) Support"
 	depends on EFI
 
@@ -179,6 +180,20 @@
 	  have been evicted, since otherwise it will trigger even on clean
 	  reboots.
 
+config EFI_RCI2_TABLE
+	bool "EFI Runtime Configuration Interface Table Version 2 Support"
+	depends on X86 || COMPILE_TEST
+	help
+	  Displays the content of the Runtime Configuration Interface
+	  Table version 2 on Dell EMC PowerEdge systems as a binary
+	  attribute 'rci2' under /sys/firmware/efi/tables directory.
+
+	  RCI2 table contains BIOS HII in XML format and is used to populate
+	  BIOS setup page in Dell EMC OpenManage Server Administrator tool.
+	  The BIOS setup page contains BIOS tokens which can be configured.
+
+	  Say Y here for Dell EMC PowerEdge systems.
+
 endmenu
 
 config UEFI_CPER
@@ -198,3 +213,9 @@
 	bool
 	depends on ACPI
 	default n
+
+config EFI_EARLYCON
+	def_bool y
+	depends on SERIAL_EARLYCON && !ARM && !IA64
+	select FONT_SUPPORT
+	select ARCH_USE_MEMREMAP_PROT
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index 5f9f503..4ac2de4 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -25,10 +25,12 @@
 obj-$(CONFIG_EFI_TEST)			+= test/
 obj-$(CONFIG_EFI_DEV_PATH_PARSER)	+= dev-path-parser.o
 obj-$(CONFIG_APPLE_PROPERTIES)		+= apple-properties.o
+obj-$(CONFIG_EFI_RCI2_TABLE)		+= rci2-table.o
 
 arm-obj-$(CONFIG_EFI)			:= arm-init.o arm-runtime.o
 obj-$(CONFIG_ARM)			+= $(arm-obj-y)
 obj-$(CONFIG_ARM64)			+= $(arm-obj-y)
 obj-$(CONFIG_EFI_CAPSULE_LOADER)	+= capsule-loader.o
+obj-$(CONFIG_EFI_EARLYCON)		+= earlycon.o
 obj-$(CONFIG_UEFI_CPER_ARM)		+= cper-arm.o
 obj-$(CONFIG_UEFI_CPER_X86)		+= cper-x86.o
diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c
index 60a9571..0e206c9 100644
--- a/drivers/firmware/efi/apple-properties.c
+++ b/drivers/firmware/efi/apple-properties.c
@@ -1,26 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * apple-properties.c - EFI device properties on Macs
  * Copyright (C) 2016 Lukas Wunner <lukas@wunner.de>
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- *
  * Note, all properties are considered as u8 arrays.
  * To get a value of any of them the caller must use device_property_read_u8_array().
  */
 
 #define pr_fmt(fmt) "apple-properties: " fmt
 
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <linux/efi.h>
 #include <linux/io.h>
 #include <linux/platform_data/x86/apple.h>
@@ -235,7 +224,7 @@
 		 */
 		data->len = 0;
 		memunmap(data);
-		free_bootmem_late(pa_data + sizeof(*data), data_len);
+		memblock_free_late(pa_data + sizeof(*data), data_len);
 
 		return ret;
 	}
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index 1a6a77d..311cd34 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Extensible Firmware Interface
  *
  * Based on Extensible Firmware Interface Specification version 2.4
  *
  * Copyright (C) 2013 - 2015 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
  */
 
 #define pr_fmt(fmt)	"efi: " fmt
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index a00934d..e2ac5fa 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Extensible Firmware Interface
  *
  * Based on Extensible Firmware Interface Specification version 2.4
  *
  * Copyright (C) 2013, 2014 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
  */
 
 #include <linux/dmi.h>
@@ -37,18 +33,19 @@
 static struct ptdump_info efi_ptdump_info = {
 	.mm		= &efi_mm,
 	.markers	= (struct addr_marker[]){
-		{ 0,		"UEFI runtime start" },
-		{ TASK_SIZE_64,	"UEFI runtime end" }
+		{ 0,				"UEFI runtime start" },
+		{ DEFAULT_MAP_WINDOW_64,	"UEFI runtime end" },
+		{ -1,				NULL }
 	},
 	.base_addr	= 0,
 };
 
 static int __init ptdump_init(void)
 {
-	if (!efi_enabled(EFI_RUNTIME_SERVICES))
-		return 0;
+	if (efi_enabled(EFI_RUNTIME_SERVICES))
+		ptdump_debugfs_register(&efi_ptdump_info, "efi_page_tables");
 
-	return ptdump_debugfs_register(&efi_ptdump_info, "efi_page_tables");
+	return 0;
 }
 device_initcall(ptdump_init);
 
@@ -165,13 +162,11 @@
 static int __init arm_dmi_init(void)
 {
 	/*
-	 * On arm64/ARM, DMI depends on UEFI, and dmi_scan_machine() needs to
+	 * On arm64/ARM, DMI depends on UEFI, and dmi_setup() needs to
 	 * be called early because dmi_id_init(), which is an arch_initcall
 	 * itself, depends on dmi_scan_machine() having been called already.
 	 */
-	dmi_scan_machine();
-	if (dmi_available)
-		dmi_set_dump_stack_arch_desc();
+	dmi_setup();
 	return 0;
 }
 core_initcall(arm_dmi_init);
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
index 9668898..b139513 100644
--- a/drivers/firmware/efi/capsule-loader.c
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -1,10 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * EFI capsule loader driver.
  *
  * Copyright 2015 Intel Corporation
- *
- * This file is part of the Linux kernel, and is made available under
- * the terms of the GNU General Public License version 2.
  */
 
 #define pr_fmt(fmt) "efi: " fmt
diff --git a/drivers/firmware/efi/capsule.c b/drivers/firmware/efi/capsule.c
index 4938c29..598b780 100644
--- a/drivers/firmware/efi/capsule.c
+++ b/drivers/firmware/efi/capsule.c
@@ -1,10 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * EFI capsule support.
  *
  * Copyright 2013 Intel Corporation; author Matt Fleming
- *
- * This file is part of the Linux kernel, and is made available under
- * the terms of the GNU General Public License version 2.
  */
 
 #define pr_fmt(fmt) "efi: " fmt
diff --git a/drivers/firmware/efi/cper-arm.c b/drivers/firmware/efi/cper-arm.c
index 5028113..36d3b8b 100644
--- a/drivers/firmware/efi/cper-arm.c
+++ b/drivers/firmware/efi/cper-arm.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * UEFI Common Platform Error Record (CPER) support
  *
  * Copyright (C) 2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #include <linux/kernel.h>
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index a7902fc..b1af0de 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * UEFI Common Platform Error Record (CPER) support
  *
@@ -9,19 +10,6 @@
  *
  * For more information about CPER, please refer to Appendix N of UEFI
  * Specification version 2.4.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #include <linux/kernel.h>
@@ -393,7 +381,7 @@
 		printk("%s""vendor_id: 0x%04x, device_id: 0x%04x\n", pfx,
 		       pcie->device_id.vendor_id, pcie->device_id.device_id);
 		p = pcie->device_id.class_code;
-		printk("%s""class_code: %02x%02x%02x\n", pfx, p[0], p[1], p[2]);
+		printk("%s""class_code: %02x%02x%02x\n", pfx, p[2], p[1], p[0]);
 	}
 	if (pcie->validation_bits & CPER_PCIE_VALID_SERIAL_NUMBER)
 		printk("%s""serial number: 0x%04x, 0x%04x\n", pfx,
@@ -402,6 +390,21 @@
 		printk(
 	"%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n",
 	pfx, pcie->bridge.secondary_status, pcie->bridge.control);
+
+	/* Fatal errors call __ghes_panic() before AER handler prints this */
+	if ((pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) &&
+	    (gdata->error_severity & CPER_SEV_FATAL)) {
+		struct aer_capability_regs *aer;
+
+		aer = (struct aer_capability_regs *)pcie->aer_info;
+		printk("%saer_uncor_status: 0x%08x, aer_uncor_mask: 0x%08x\n",
+		       pfx, aer->uncor_status, aer->uncor_mask);
+		printk("%saer_uncor_severity: 0x%08x\n",
+		       pfx, aer->uncor_severity);
+		printk("%sTLP Header: %08x %08x %08x %08x\n", pfx,
+		       aer->header_log.dw0, aer->header_log.dw1,
+		       aer->header_log.dw2, aer->header_log.dw3);
+	}
 }
 
 static void cper_print_tstamp(const char *pfx,
@@ -546,19 +549,24 @@
 int cper_estatus_check(const struct acpi_hest_generic_status *estatus)
 {
 	struct acpi_hest_generic_data *gdata;
-	unsigned int data_len, gedata_len;
+	unsigned int data_len, record_size;
 	int rc;
 
 	rc = cper_estatus_check_header(estatus);
 	if (rc)
 		return rc;
+
 	data_len = estatus->data_length;
 
 	apei_estatus_for_each_section(estatus, gdata) {
-		gedata_len = acpi_hest_get_error_length(gdata);
-		if (gedata_len > data_len - acpi_hest_get_size(gdata))
+		if (sizeof(struct acpi_hest_generic_data) > data_len)
 			return -EINVAL;
-		data_len -= acpi_hest_get_record_size(gdata);
+
+		record_size = acpi_hest_get_record_size(gdata);
+		if (record_size > data_len)
+			return -EINVAL;
+
+		data_len -= record_size;
 	}
 	if (data_len)
 		return -EINVAL;
diff --git a/drivers/firmware/efi/dev-path-parser.c b/drivers/firmware/efi/dev-path-parser.c
index 85d1834..2012338 100644
--- a/drivers/firmware/efi/dev-path-parser.c
+++ b/drivers/firmware/efi/dev-path-parser.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * dev-path-parser.c - EFI Device Path parser
  * Copyright (C) 2016 Lukas Wunner <lukas@wunner.de>
@@ -5,14 +6,6 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License (version 2) as
  * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #include <linux/acpi.h>
@@ -24,9 +17,9 @@
 	char uid[11]; /* UINT_MAX + null byte */
 };
 
-static int __init match_acpi_dev(struct device *dev, void *data)
+static int __init match_acpi_dev(struct device *dev, const void *data)
 {
-	struct acpi_hid_uid hid_uid = *(struct acpi_hid_uid *)data;
+	struct acpi_hid_uid hid_uid = *(const struct acpi_hid_uid *)data;
 	struct acpi_device *adev = to_acpi_device(dev);
 
 	if (acpi_match_device_ids(adev, hid_uid.hid))
diff --git a/drivers/firmware/efi/earlycon.c b/drivers/firmware/efi/earlycon.c
new file mode 100644
index 0000000..c9a0efc
--- /dev/null
+++ b/drivers/firmware/efi/earlycon.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2013 Intel Corporation; author Matt Fleming
+ */
+
+#include <linux/console.h>
+#include <linux/efi.h>
+#include <linux/font.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/serial_core.h>
+#include <linux/screen_info.h>
+
+#include <asm/early_ioremap.h>
+
+static const struct font_desc *font;
+static u32 efi_x, efi_y;
+static u64 fb_base;
+static pgprot_t fb_prot;
+
+static __ref void *efi_earlycon_map(unsigned long start, unsigned long len)
+{
+	return early_memremap_prot(fb_base + start, len, pgprot_val(fb_prot));
+}
+
+static __ref void efi_earlycon_unmap(void *addr, unsigned long len)
+{
+	early_memunmap(addr, len);
+}
+
+static void efi_earlycon_clear_scanline(unsigned int y)
+{
+	unsigned long *dst;
+	u16 len;
+
+	len = screen_info.lfb_linelength;
+	dst = efi_earlycon_map(y*len, len);
+	if (!dst)
+		return;
+
+	memset(dst, 0, len);
+	efi_earlycon_unmap(dst, len);
+}
+
+static void efi_earlycon_scroll_up(void)
+{
+	unsigned long *dst, *src;
+	u16 len;
+	u32 i, height;
+
+	len = screen_info.lfb_linelength;
+	height = screen_info.lfb_height;
+
+	for (i = 0; i < height - font->height; i++) {
+		dst = efi_earlycon_map(i*len, len);
+		if (!dst)
+			return;
+
+		src = efi_earlycon_map((i + font->height) * len, len);
+		if (!src) {
+			efi_earlycon_unmap(dst, len);
+			return;
+		}
+
+		memmove(dst, src, len);
+
+		efi_earlycon_unmap(src, len);
+		efi_earlycon_unmap(dst, len);
+	}
+}
+
+static void efi_earlycon_write_char(u32 *dst, unsigned char c, unsigned int h)
+{
+	const u32 color_black = 0x00000000;
+	const u32 color_white = 0x00ffffff;
+	const u8 *src;
+	u8 s8;
+	int m;
+
+	src = font->data + c * font->height;
+	s8 = *(src + h);
+
+	for (m = 0; m < 8; m++) {
+		if ((s8 >> (7 - m)) & 1)
+			*dst = color_white;
+		else
+			*dst = color_black;
+		dst++;
+	}
+}
+
+static void
+efi_earlycon_write(struct console *con, const char *str, unsigned int num)
+{
+	struct screen_info *si;
+	unsigned int len;
+	const char *s;
+	void *dst;
+
+	si = &screen_info;
+	len = si->lfb_linelength;
+
+	while (num) {
+		unsigned int linemax;
+		unsigned int h, count = 0;
+
+		for (s = str; *s && *s != '\n'; s++) {
+			if (count == num)
+				break;
+			count++;
+		}
+
+		linemax = (si->lfb_width - efi_x) / font->width;
+		if (count > linemax)
+			count = linemax;
+
+		for (h = 0; h < font->height; h++) {
+			unsigned int n, x;
+
+			dst = efi_earlycon_map((efi_y + h) * len, len);
+			if (!dst)
+				return;
+
+			s = str;
+			n = count;
+			x = efi_x;
+
+			while (n-- > 0) {
+				efi_earlycon_write_char(dst + x*4, *s, h);
+				x += font->width;
+				s++;
+			}
+
+			efi_earlycon_unmap(dst, len);
+		}
+
+		num -= count;
+		efi_x += count * font->width;
+		str += count;
+
+		if (num > 0 && *s == '\n') {
+			efi_x = 0;
+			efi_y += font->height;
+			str++;
+			num--;
+		}
+
+		if (efi_x + font->width > si->lfb_width) {
+			efi_x = 0;
+			efi_y += font->height;
+		}
+
+		if (efi_y + font->height > si->lfb_height) {
+			u32 i;
+
+			efi_y -= font->height;
+			efi_earlycon_scroll_up();
+
+			for (i = 0; i < font->height; i++)
+				efi_earlycon_clear_scanline(efi_y + i);
+		}
+	}
+}
+
+static int __init efi_earlycon_setup(struct earlycon_device *device,
+				     const char *opt)
+{
+	struct screen_info *si;
+	u16 xres, yres;
+	u32 i;
+
+	if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
+		return -ENODEV;
+
+	fb_base = screen_info.lfb_base;
+	if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
+		fb_base |= (u64)screen_info.ext_lfb_base << 32;
+
+	if (opt && !strcmp(opt, "ram"))
+		fb_prot = PAGE_KERNEL;
+	else
+		fb_prot = pgprot_writecombine(PAGE_KERNEL);
+
+	si = &screen_info;
+	xres = si->lfb_width;
+	yres = si->lfb_height;
+
+	/*
+	 * efi_earlycon_write_char() implicitly assumes a framebuffer with
+	 * 32 bits per pixel.
+	 */
+	if (si->lfb_depth != 32)
+		return -ENODEV;
+
+	font = get_default_font(xres, yres, -1, -1);
+	if (!font)
+		return -ENODEV;
+
+	efi_y = rounddown(yres, font->height) - font->height;
+	for (i = 0; i < (yres - efi_y) / font->height; i++)
+		efi_earlycon_scroll_up();
+
+	device->con->write = efi_earlycon_write;
+	return 0;
+}
+EARLYCON_DECLARE(efifb, efi_earlycon_setup);
diff --git a/drivers/firmware/efi/efi-bgrt.c b/drivers/firmware/efi/efi-bgrt.c
index b22ccfb..b07c176 100644
--- a/drivers/firmware/efi/efi-bgrt.c
+++ b/drivers/firmware/efi/efi-bgrt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright 2012 Intel Corporation
  * Author: Josh Triplett <josh@joshtriplett.org>
@@ -5,10 +6,6 @@
  * Based on the bgrt driver:
  * Copyright 2012 Red Hat, Inc <mjg@redhat.com>
  * Author: Matthew Garrett
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -50,11 +47,6 @@
 		       bgrt->version);
 		goto out;
 	}
-	if (bgrt->status & 0xfe) {
-		pr_notice("Ignoring BGRT: reserved status bits are non-zero %u\n",
-		       bgrt->status);
-		goto out;
-	}
 	if (bgrt->image_type != 0) {
 		pr_notice("Ignoring BGRT: invalid image type %u (expected 0)\n",
 		       bgrt->image_type);
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index cfe87b4..9ea13e8 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -1,3 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0+
+
 #include <linux/efi.h>
 #include <linux/module.h>
 #include <linux/pstore.h>
@@ -259,8 +261,7 @@
 		efi_name[i] = name[i];
 
 	ret = efivar_entry_set_safe(efi_name, vendor, PSTORE_EFI_ATTRIBUTES,
-			      !pstore_cannot_block_path(record->reason),
-			      record->size, record->psi->buf);
+			      preemptible(), record->size, record->psi->buf);
 
 	if (record->reason == KMSG_DUMP_OOPS)
 		efivar_run_worker();
@@ -369,7 +370,6 @@
 		return -ENOMEM;
 
 	efi_pstore_info.bufsize = 1024;
-	spin_lock_init(&efi_pstore_info.buf_lock);
 
 	if (pstore_register(&efi_pstore_info)) {
 		kfree(efi_pstore_info.buf);
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 2a29dd9..e98bbf8 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * efi.c - EFI subsystem
  *
@@ -9,8 +10,6 @@
  * allowing the efivarfs to be mounted or the efivars module to be loaded.
  * The existance of /sys/firmware/efi may also be used by userspace to
  * determine that the system supports EFI.
- *
- * This file is released under the GPLv2.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -31,6 +30,7 @@
 #include <linux/acpi.h>
 #include <linux/ucs2_string.h>
 #include <linux/memblock.h>
+#include <linux/security.h>
 
 #include <asm/early_ioremap.h>
 
@@ -40,11 +40,9 @@
 	.acpi20			= EFI_INVALID_TABLE_ADDR,
 	.smbios			= EFI_INVALID_TABLE_ADDR,
 	.smbios3		= EFI_INVALID_TABLE_ADDR,
-	.sal_systab		= EFI_INVALID_TABLE_ADDR,
 	.boot_info		= EFI_INVALID_TABLE_ADDR,
 	.hcdp			= EFI_INVALID_TABLE_ADDR,
 	.uga			= EFI_INVALID_TABLE_ADDR,
-	.uv_systab		= EFI_INVALID_TABLE_ADDR,
 	.fw_vendor		= EFI_INVALID_TABLE_ADDR,
 	.runtime		= EFI_INVALID_TABLE_ADDR,
 	.config_table		= EFI_INVALID_TABLE_ADDR,
@@ -52,29 +50,12 @@
 	.properties_table	= EFI_INVALID_TABLE_ADDR,
 	.mem_attr_table		= EFI_INVALID_TABLE_ADDR,
 	.rng_seed		= EFI_INVALID_TABLE_ADDR,
-	.tpm_log		= EFI_INVALID_TABLE_ADDR
+	.tpm_log		= EFI_INVALID_TABLE_ADDR,
+	.tpm_final_log		= EFI_INVALID_TABLE_ADDR,
+	.mem_reserve		= EFI_INVALID_TABLE_ADDR,
 };
 EXPORT_SYMBOL(efi);
 
-static unsigned long *efi_tables[] = {
-	&efi.mps,
-	&efi.acpi,
-	&efi.acpi20,
-	&efi.smbios,
-	&efi.smbios3,
-	&efi.sal_systab,
-	&efi.boot_info,
-	&efi.hcdp,
-	&efi.uga,
-	&efi.uv_systab,
-	&efi.fw_vendor,
-	&efi.runtime,
-	&efi.config_table,
-	&efi.esrt,
-	&efi.properties_table,
-	&efi.mem_attr_table,
-};
-
 struct mm_struct efi_mm = {
 	.mm_rb			= RB_ROOT,
 	.mm_users		= ATOMIC_INIT(2),
@@ -241,6 +222,11 @@
 static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
 static int __init efivar_ssdt_setup(char *str)
 {
+	int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
+
+	if (ret)
+		return ret;
+
 	if (strlen(str) < sizeof(efivar_ssdt))
 		memcpy(efivar_ssdt, str, strlen(str));
 	else
@@ -281,6 +267,9 @@
 	void *data;
 	int ret;
 
+	if (!efivar_ssdt[0])
+		return 0;
+
 	ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries);
 
 	list_for_each_entry_safe(entry, aux, &entries, list) {
@@ -475,7 +464,6 @@
 	{ACPI_TABLE_GUID, "ACPI", &efi.acpi},
 	{HCDP_TABLE_GUID, "HCDP", &efi.hcdp},
 	{MPS_TABLE_GUID, "MPS", &efi.mps},
-	{SAL_SYSTEM_TABLE_GUID, "SALsystab", &efi.sal_systab},
 	{SMBIOS_TABLE_GUID, "SMBIOS", &efi.smbios},
 	{SMBIOS3_TABLE_GUID, "SMBIOS 3.0", &efi.smbios3},
 	{UGA_IO_PROTOCOL_GUID, "UGA", &efi.uga},
@@ -484,6 +472,11 @@
 	{EFI_MEMORY_ATTRIBUTES_TABLE_GUID, "MEMATTR", &efi.mem_attr_table},
 	{LINUX_EFI_RANDOM_SEED_TABLE_GUID, "RNG", &efi.rng_seed},
 	{LINUX_EFI_TPM_EVENT_LOG_GUID, "TPMEventLog", &efi.tpm_log},
+	{LINUX_EFI_TPM_FINAL_LOG_GUID, "TPMFinalLog", &efi.tpm_final_log},
+	{LINUX_EFI_MEMRESERVE_TABLE_GUID, "MEMRESERVE", &efi.mem_reserve},
+#ifdef CONFIG_EFI_RCI2_TABLE
+	{DELLEMC_EFI_RCI2_TABLE_GUID, NULL, &rci2_table_phys},
+#endif
 	{NULL_GUID, NULL, NULL},
 };
 
@@ -561,7 +554,7 @@
 					      sizeof(*seed) + size);
 			if (seed != NULL) {
 				pr_notice("seeding entropy pool\n");
-				add_device_randomness(seed->bits, seed->size);
+				add_bootloader_randomness(seed->bits, seed->size);
 				early_memunmap(seed, sizeof(*seed) + size);
 			} else {
 				pr_err("Could not map UEFI random seed!\n");
@@ -591,6 +584,41 @@
 		early_memunmap(tbl, sizeof(*tbl));
 	}
 
+	if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) {
+		unsigned long prsv = efi.mem_reserve;
+
+		while (prsv) {
+			struct linux_efi_memreserve *rsv;
+			u8 *p;
+			int i;
+
+			/*
+			 * Just map a full page: that is what we will get
+			 * anyway, and it permits us to map the entire entry
+			 * before knowing its size.
+			 */
+			p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
+					   PAGE_SIZE);
+			if (p == NULL) {
+				pr_err("Could not map UEFI memreserve entry!\n");
+				return -ENOMEM;
+			}
+
+			rsv = (void *)(p + prsv % PAGE_SIZE);
+
+			/* reserve the entry itself */
+			memblock_reserve(prsv, EFI_MEMRESERVE_SIZE(rsv->size));
+
+			for (i = 0; i < atomic_read(&rsv->count); i++) {
+				memblock_reserve(rsv->entry[i].base,
+						 rsv->entry[i].size);
+			}
+
+			prsv = rsv->next;
+			early_memunmap(p, PAGE_SIZE);
+		}
+	}
+
 	return 0;
 }
 
@@ -599,6 +627,9 @@
 	void *config_tables;
 	int sz, ret;
 
+	if (efi.systab->nr_tables == 0)
+		return 0;
+
 	if (efi_enabled(EFI_64BIT))
 		sz = sizeof(efi_config_table_64_t);
 	else
@@ -923,20 +954,85 @@
 	return err;
 }
 
-bool efi_is_table_address(unsigned long phys_addr)
+static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
+static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
+
+static int __init efi_memreserve_map_root(void)
 {
-	unsigned int i;
+	if (efi.mem_reserve == EFI_INVALID_TABLE_ADDR)
+		return -ENODEV;
 
-	if (phys_addr == EFI_INVALID_TABLE_ADDR)
-		return false;
-
-	for (i = 0; i < ARRAY_SIZE(efi_tables); i++)
-		if (*(efi_tables[i]) == phys_addr)
-			return true;
-
-	return false;
+	efi_memreserve_root = memremap(efi.mem_reserve,
+				       sizeof(*efi_memreserve_root),
+				       MEMREMAP_WB);
+	if (WARN_ON_ONCE(!efi_memreserve_root))
+		return -ENOMEM;
+	return 0;
 }
 
+int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
+{
+	struct linux_efi_memreserve *rsv;
+	unsigned long prsv;
+	int rc, index;
+
+	if (efi_memreserve_root == (void *)ULONG_MAX)
+		return -ENODEV;
+
+	if (!efi_memreserve_root) {
+		rc = efi_memreserve_map_root();
+		if (rc)
+			return rc;
+	}
+
+	/* first try to find a slot in an existing linked list entry */
+	for (prsv = efi_memreserve_root->next; prsv; prsv = rsv->next) {
+		rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
+		index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
+		if (index < rsv->size) {
+			rsv->entry[index].base = addr;
+			rsv->entry[index].size = size;
+
+			memunmap(rsv);
+			return 0;
+		}
+		memunmap(rsv);
+	}
+
+	/* no slot found - allocate a new linked list entry */
+	rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC);
+	if (!rsv)
+		return -ENOMEM;
+
+	/*
+	 * The memremap() call above assumes that a linux_efi_memreserve entry
+	 * never crosses a page boundary, so let's ensure that this remains true
+	 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
+	 * using SZ_4K explicitly in the size calculation below.
+	 */
+	rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
+	atomic_set(&rsv->count, 1);
+	rsv->entry[0].base = addr;
+	rsv->entry[0].size = size;
+
+	spin_lock(&efi_mem_reserve_persistent_lock);
+	rsv->next = efi_memreserve_root->next;
+	efi_memreserve_root->next = __pa(rsv);
+	spin_unlock(&efi_mem_reserve_persistent_lock);
+
+	return 0;
+}
+
+static int __init efi_memreserve_root_init(void)
+{
+	if (efi_memreserve_root)
+		return 0;
+	if (efi_memreserve_map_root())
+		efi_memreserve_root = (void *)ULONG_MAX;
+	return 0;
+}
+early_initcall(efi_memreserve_root_init);
+
 #ifdef CONFIG_KEXEC
 static int update_efi_random_seed(struct notifier_block *nb,
 				  unsigned long code, void *unused)
diff --git a/drivers/firmware/efi/efibc.c b/drivers/firmware/efi/efibc.c
index 503bbe2..35dccc8 100644
--- a/drivers/firmware/efi/efibc.c
+++ b/drivers/firmware/efi/efibc.c
@@ -1,15 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * efibc: control EFI bootloaders which obey LoaderEntryOneShot var
  * Copyright (c) 2013-2016, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
  */
 
 #define pr_fmt(fmt) "efibc: " fmt
@@ -51,11 +43,13 @@
 	efibc_str_to_str16(value, (efi_char16_t *)entry->var.Data);
 	memcpy(&entry->var.VendorGuid, &guid, sizeof(guid));
 
-	ret = efivar_entry_set(entry,
-			       EFI_VARIABLE_NON_VOLATILE
-			       | EFI_VARIABLE_BOOTSERVICE_ACCESS
-			       | EFI_VARIABLE_RUNTIME_ACCESS,
-			       size, entry->var.Data, NULL);
+	ret = efivar_entry_set_safe(entry->var.VariableName,
+				    entry->var.VendorGuid,
+				    EFI_VARIABLE_NON_VOLATILE
+				    | EFI_VARIABLE_BOOTSERVICE_ACCESS
+				    | EFI_VARIABLE_RUNTIME_ACCESS,
+				    false, size, entry->var.Data);
+
 	if (ret)
 		pr_err("failed to set %s EFI variable: 0x%x\n",
 		       name, ret);
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
index 3e626fd..7576450 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Originally from efivars.c,
  *
@@ -6,63 +7,6 @@
  *
  * This code takes all variables accessible from EFI runtime and
  *  exports them via sysfs
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or
- *  (at your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
- * Changelog:
- *
- *  17 May 2004 - Matt Domsch <Matt_Domsch@dell.com>
- *   remove check for efi_enabled in exit
- *   add MODULE_VERSION
- *
- *  26 Apr 2004 - Matt Domsch <Matt_Domsch@dell.com>
- *   minor bug fixes
- *
- *  21 Apr 2004 - Matt Tolentino <matthew.e.tolentino@intel.com)
- *   converted driver to export variable information via sysfs
- *   and moved to drivers/firmware directory
- *   bumped revision number to v0.07 to reflect conversion & move
- *
- *  10 Dec 2002 - Matt Domsch <Matt_Domsch@dell.com>
- *   fix locking per Peter Chubb's findings
- *
- *  25 Mar 2002 - Matt Domsch <Matt_Domsch@dell.com>
- *   move uuid_unparse() to include/asm-ia64/efi.h:efi_guid_to_str()
- *
- *  12 Feb 2002 - Matt Domsch <Matt_Domsch@dell.com>
- *   use list_for_each_safe when deleting vars.
- *   remove ifdef CONFIG_SMP around include <linux/smp.h>
- *   v0.04 release to linux-ia64@linuxia64.org
- *
- *  20 April 2001 - Matt Domsch <Matt_Domsch@dell.com>
- *   Moved vars from /proc/efi to /proc/efi/vars, and made
- *   efi.c own the /proc/efi directory.
- *   v0.03 release to linux-ia64@linuxia64.org
- *
- *  26 March 2001 - Matt Domsch <Matt_Domsch@dell.com>
- *   At the request of Stephane, moved ownership of /proc/efi
- *   to efi.c, and now efivars lives under /proc/efi/vars.
- *
- *  12 March 2001 - Matt Domsch <Matt_Domsch@dell.com>
- *   Feedback received from Stephane Eranian incorporated.
- *   efivar_write() checks copy_from_user() return value.
- *   efivar_read/write() returns proper errno.
- *   v0.02 release to linux-ia64@linuxia64.org
- *
- *  26 February 2001 - Matt Domsch <Matt_Domsch@dell.com>
- *   v0.01 release to linux-ia64@linuxia64.org
  */
 
 #include <linux/efi.h>
@@ -229,14 +173,6 @@
 	return 0;
 }
 
-static inline bool is_compat(void)
-{
-	if (IS_ENABLED(CONFIG_COMPAT) && in_compat_syscall())
-		return true;
-
-	return false;
-}
-
 static void
 copy_out_compat(struct efi_variable *dst, struct compat_efi_variable *src)
 {
@@ -263,7 +199,7 @@
 	u8 *data;
 	int err;
 
-	if (is_compat()) {
+	if (in_compat_syscall()) {
 		struct compat_efi_variable *compat;
 
 		if (count != sizeof(*compat))
@@ -324,7 +260,7 @@
 			     &entry->var.DataSize, entry->var.Data))
 		return -EIO;
 
-	if (is_compat()) {
+	if (in_compat_syscall()) {
 		compat = (struct compat_efi_variable *)buf;
 
 		size = sizeof(*compat);
@@ -418,7 +354,7 @@
 	struct compat_efi_variable *compat = (struct compat_efi_variable *)buf;
 	struct efi_variable *new_var = (struct efi_variable *)buf;
 	struct efivar_entry *new_entry;
-	bool need_compat = is_compat();
+	bool need_compat = in_compat_syscall();
 	efi_char16_t *name;
 	unsigned long size;
 	u32 attributes;
@@ -495,7 +431,7 @@
 	if (!capable(CAP_SYS_ADMIN))
 		return -EACCES;
 
-	if (is_compat()) {
+	if (in_compat_syscall()) {
 		if (count != sizeof(*compat))
 			return -EINVAL;
 
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index 5d06bd2..d6dd5f5 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * esrt.c
  *
diff --git a/drivers/firmware/efi/fake_mem.c b/drivers/firmware/efi/fake_mem.c
index 6c7d60c..9501edc 100644
--- a/drivers/firmware/efi/fake_mem.c
+++ b/drivers/firmware/efi/fake_mem.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * fake_mem.c
  *
@@ -8,21 +9,6 @@
  * By specifying this parameter, you can add arbitrary attribute to
  * specific memory range by updating original (firmware provided) EFI
  * memmap.
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms and conditions of the GNU General Public License,
- *  version 2, as published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- *  more details.
- *
- *  You should have received a copy of the GNU General Public License along with
- *  this program; if not, see <http://www.gnu.org/licenses/>.
- *
- *  The full GNU General Public License is included in this distribution in
- *  the file called "COPYING".
  */
 
 #include <linux/kernel.h>
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index c516276..ee0661d 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -9,13 +9,16 @@
 cflags-$(CONFIG_X86_64)		:= -mcmodel=small
 cflags-$(CONFIG_X86)		+= -m$(BITS) -D__KERNEL__ -O2 \
 				   -fPIC -fno-strict-aliasing -mno-red-zone \
-				   -mno-mmx -mno-sse -fshort-wchar
+				   -mno-mmx -mno-sse -fshort-wchar \
+				   -Wno-pointer-sign \
+				   $(call cc-disable-warning, address-of-packed-member) \
+				   $(call cc-disable-warning, gnu)
 
 # arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
 # disable the stackleak plugin
-cflags-$(CONFIG_ARM64)		:= $(subst -pg,,$(KBUILD_CFLAGS)) -fpie \
-				   $(DISABLE_STACKLEAK_PLUGIN)
-cflags-$(CONFIG_ARM)		:= $(subst -pg,,$(KBUILD_CFLAGS)) \
+cflags-$(CONFIG_ARM64)		:= $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \
+				   -fpie $(DISABLE_STACKLEAK_PLUGIN)
+cflags-$(CONFIG_ARM)		:= $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \
 				   -fno-builtin -fpic \
 				   $(call cc-option,-mno-single-pic-base)
 
@@ -49,7 +52,8 @@
 
 lib-$(CONFIG_ARM)		+= arm32-stub.o
 lib-$(CONFIG_ARM64)		+= arm64-stub.o
-CFLAGS_arm64-stub.o 		:= -DTEXT_OFFSET=$(TEXT_OFFSET)
+CFLAGS_arm32-stub.o		:= -DTEXT_OFFSET=$(TEXT_OFFSET)
+CFLAGS_arm64-stub.o		:= -DTEXT_OFFSET=$(TEXT_OFFSET)
 
 #
 # arm64 puts the stub in the kernel proper, which will unnecessarily retain all
@@ -68,7 +72,6 @@
 extra-$(CONFIG_EFI_ARMSTUB)	:= $(lib-y)
 lib-$(CONFIG_EFI_ARMSTUB)	:= $(patsubst %.o,%.stub.o,$(lib-y))
 
-STUBCOPY_RM-y			:= -R *ksymtab* -R *kcrctab*
 STUBCOPY_FLAGS-$(CONFIG_ARM64)	+= --prefix-alloc-sections=.init \
 				   --prefix-symbols=__efistub_
 STUBCOPY_RELOC-$(CONFIG_ARM64)	:= R_AARCH64_ABS
@@ -83,12 +86,13 @@
 # this time, use objcopy and leave all sections in place.
 #
 quiet_cmd_stubcopy = STUBCPY $@
-      cmd_stubcopy = if $(STRIP) --strip-debug $(STUBCOPY_RM-y) -o $@ $<; \
-		     then if $(OBJDUMP) -r $@ | grep $(STUBCOPY_RELOC-y); \
-		     then (echo >&2 "$@: absolute symbol references not allowed in the EFI stub"; \
-			   rm -f $@; /bin/false); 			  \
-		     else $(OBJCOPY) $(STUBCOPY_FLAGS-y) $< $@; fi	  \
-		     else /bin/false; fi
+      cmd_stubcopy =							\
+	$(STRIP) --strip-debug -o $@ $<;				\
+	if $(OBJDUMP) -r $@ | grep $(STUBCOPY_RELOC-y); then		\
+		echo "$@: absolute symbol references not allowed in the EFI stub" >&2; \
+		/bin/false;						\
+	fi;								\
+	$(OBJCOPY) $(STUBCOPY_FLAGS-y) $< $@
 
 #
 # ARM discards the .data section because it disallows r/w data in the
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index 6920033..c382a48 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * EFI stub implementation that is shared by arm and arm64 architectures.
  * This should be #included by the EFI stub implementation files.
@@ -6,10 +7,6 @@
  *     Roy Franz <roy.franz@linaro.org
  * Copyright (C) 2013 Red Hat, Inc.
  *     Mark Salter <msalter@redhat.com>
- *
- * This file is part of the Linux kernel, and is made available under the
- * terms of the GNU General Public License version 2.
- *
  */
 
 #include <linux/efi.h>
@@ -33,7 +30,7 @@
 #define EFI_RT_VIRTUAL_SIZE	SZ_512M
 
 #ifdef CONFIG_ARM64
-# define EFI_RT_VIRTUAL_LIMIT	TASK_SIZE_64
+# define EFI_RT_VIRTUAL_LIMIT	DEFAULT_MAP_WINDOW_64
 #else
 # define EFI_RT_VIRTUAL_LIMIT	TASK_SIZE
 #endif
@@ -69,6 +66,31 @@
 	return si;
 }
 
+void install_memreserve_table(efi_system_table_t *sys_table_arg)
+{
+	struct linux_efi_memreserve *rsv;
+	efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID;
+	efi_status_t status;
+
+	status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv),
+				(void **)&rsv);
+	if (status != EFI_SUCCESS) {
+		pr_efi_err(sys_table_arg, "Failed to allocate memreserve entry!\n");
+		return;
+	}
+
+	rsv->next = 0;
+	rsv->size = 0;
+	atomic_set(&rsv->count, 0);
+
+	status = efi_call_early(install_configuration_table,
+				&memreserve_table_guid,
+				rsv);
+	if (status != EFI_SUCCESS)
+		pr_efi_err(sys_table_arg, "Failed to install memreserve config table!\n");
+}
+
+
 /*
  * This function handles the architcture specific differences between arm and
  * arm64 regarding where the kernel image must be loaded and any memory that
@@ -235,6 +257,8 @@
 		}
 	}
 
+	install_memreserve_table(sys_table);
+
 	new_fdt_addr = fdt_addr;
 	status = allocate_new_fdt_and_exit_boot(sys_table, handle,
 				&new_fdt_addr, efi_get_max_fdt_addr(dram_base),
@@ -340,6 +364,11 @@
 		paddr = in->phys_addr;
 		size = in->num_pages * EFI_PAGE_SIZE;
 
+		if (novamap()) {
+			in->virt_addr = in->phys_addr;
+			continue;
+		}
+
 		/*
 		 * Make the mapping compatible with 64k pages: this allows
 		 * a 4k page size kernel to kexec a 64k page size kernel and
diff --git a/drivers/firmware/efi/libstub/arm32-stub.c b/drivers/firmware/efi/libstub/arm32-stub.c
index becbda4..41213bf 100644
--- a/drivers/firmware/efi/libstub/arm32-stub.c
+++ b/drivers/firmware/efi/libstub/arm32-stub.c
@@ -1,10 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright (C) 2013 Linaro Ltd;  <roy.franz@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
  */
 #include <linux/efi.h>
 #include <asm/efi.h>
@@ -199,6 +195,7 @@
 				 unsigned long dram_base,
 				 efi_loaded_image_t *image)
 {
+	unsigned long kernel_base;
 	efi_status_t status;
 
 	/*
@@ -208,9 +205,18 @@
 	 * loaded. These assumptions are made by the decompressor,
 	 * before any memory map is available.
 	 */
-	dram_base = round_up(dram_base, SZ_128M);
+	kernel_base = round_up(dram_base, SZ_128M);
 
-	status = reserve_kernel_base(sys_table, dram_base, reserve_addr,
+	/*
+	 * Note that some platforms (notably, the Raspberry Pi 2) put
+	 * spin-tables and other pieces of firmware at the base of RAM,
+	 * abusing the fact that the window of TEXT_OFFSET bytes at the
+	 * base of the kernel image is only partially used at the moment.
+	 * (Up to 5 pages are used for the swapper page tables)
+	 */
+	kernel_base += TEXT_OFFSET - 5 * PAGE_SIZE;
+
+	status = reserve_kernel_base(sys_table, kernel_base, reserve_addr,
 				     reserve_size);
 	if (status != EFI_SUCCESS) {
 		pr_efi_err(sys_table, "Unable to allocate memory for uncompressed kernel.\n");
@@ -224,7 +230,7 @@
 	*image_size = image->image_size;
 	status = efi_relocate_kernel(sys_table, image_addr, *image_size,
 				     *image_size,
-				     dram_base + MAX_UNCOMP_KERNEL_SIZE, 0);
+				     kernel_base + MAX_UNCOMP_KERNEL_SIZE, 0, 0);
 	if (status != EFI_SUCCESS) {
 		pr_efi_err(sys_table, "Failed to relocate kernel.\n");
 		efi_free(sys_table, *reserve_size, *reserve_addr);
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index 1b4d465..1550d24 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright (C) 2013, 2014 Linaro Ltd;  <roy.franz@linaro.org>
  *
  * This file implements the EFI boot stub for the arm64 kernel.
  * Adapted from ARM version by Mark Salter <msalter@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
  */
 
 /*
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index e94975f..35dbc27 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Helper functions used by the EFI stub on multiple
  * architectures. This should be #included by the EFI stub
  * implementation files.
  *
  * Copyright 2011 Intel Corporation; author Matt Fleming
- *
- * This file is part of the Linux kernel, and is made available
- * under the terms of the GNU General Public License version 2.
- *
  */
 
 #include <linux/efi.h>
@@ -34,6 +31,7 @@
 
 static int __section(.data) __nokaslr;
 static int __section(.data) __quiet;
+static int __section(.data) __novamap;
 
 int __pure nokaslr(void)
 {
@@ -43,6 +41,10 @@
 {
 	return __quiet;
 }
+int __pure novamap(void)
+{
+	return __novamap;
+}
 
 #define EFI_MMAP_NR_SLACK_SLOTS	8
 
@@ -258,11 +260,11 @@
 }
 
 /*
- * Allocate at the lowest possible address.
+ * Allocate at the lowest possible address that is not below 'min'.
  */
-efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
-			   unsigned long size, unsigned long align,
-			   unsigned long *addr)
+efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg,
+				 unsigned long size, unsigned long align,
+				 unsigned long *addr, unsigned long min)
 {
 	unsigned long map_size, desc_size, buff_size;
 	efi_memory_desc_t *map;
@@ -309,13 +311,8 @@
 		start = desc->phys_addr;
 		end = start + desc->num_pages * EFI_PAGE_SIZE;
 
-		/*
-		 * Don't allocate at 0x0. It will confuse code that
-		 * checks pointers against NULL. Skip the first 8
-		 * bytes so we start at a nice even number.
-		 */
-		if (start == 0x0)
-			start += 8;
+		if (start < min)
+			start = min;
 
 		start = round_up(start, align);
 		if ((start + size) > end)
@@ -482,6 +479,11 @@
 			__chunk_size = -1UL;
 		}
 
+		if (!strncmp(str, "novamap", 7)) {
+			str += strlen("novamap");
+			__novamap = 1;
+		}
+
 		/* Group words together, delimited by "," */
 		while (*str && *str != ' ' && *str != ',')
 			str++;
@@ -691,7 +693,8 @@
 				 unsigned long image_size,
 				 unsigned long alloc_size,
 				 unsigned long preferred_addr,
-				 unsigned long alignment)
+				 unsigned long alignment,
+				 unsigned long min_addr)
 {
 	unsigned long cur_image_addr;
 	unsigned long new_addr = 0;
@@ -724,8 +727,8 @@
 	 * possible.
 	 */
 	if (status != EFI_SUCCESS) {
-		status = efi_low_alloc(sys_table_arg, alloc_size, alignment,
-				       &new_addr);
+		status = efi_low_alloc_above(sys_table_arg, alloc_size,
+					     alignment, &new_addr, min_addr);
 	}
 	if (status != EFI_SUCCESS) {
 		pr_efi_err(sys_table_arg, "Failed to allocate usable memory for kernel.\n");
@@ -919,3 +922,34 @@
 fail:
 	return status;
 }
+
+#define GET_EFI_CONFIG_TABLE(bits)					\
+static void *get_efi_config_table##bits(efi_system_table_t *_sys_table,	\
+					efi_guid_t guid)		\
+{									\
+	efi_system_table_##bits##_t *sys_table;				\
+	efi_config_table_##bits##_t *tables;				\
+	int i;								\
+									\
+	sys_table = (typeof(sys_table))_sys_table;			\
+	tables = (typeof(tables))(unsigned long)sys_table->tables;	\
+									\
+	for (i = 0; i < sys_table->nr_tables; i++) {			\
+		if (efi_guidcmp(tables[i].guid, guid) != 0)		\
+			continue;					\
+									\
+		return (void *)(unsigned long)tables[i].table;		\
+	}								\
+									\
+	return NULL;							\
+}
+GET_EFI_CONFIG_TABLE(32)
+GET_EFI_CONFIG_TABLE(64)
+
+void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid)
+{
+	if (efi_is_64bit())
+		return get_efi_config_table64(sys_table, guid);
+	else
+		return get_efi_config_table32(sys_table, guid);
+}
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 32799cf..7f1556f 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -27,6 +27,7 @@
 
 extern int __pure nokaslr(void);
 extern int __pure is_quiet(void);
+extern int __pure novamap(void);
 
 #define pr_efi(sys_table, msg)		do {				\
 	if (!is_quiet()) efi_printk(sys_table, "EFI stub: "msg);	\
@@ -64,4 +65,17 @@
 
 efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg);
 
+void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid);
+
+/* Helper macros for the usual case of using simple C variables: */
+#ifndef fdt_setprop_inplace_var
+#define fdt_setprop_inplace_var(fdt, node_offset, name, var) \
+	fdt_setprop_inplace((fdt), (node_offset), (name), &(var), sizeof(var))
+#endif
+
+#ifndef fdt_setprop_var
+#define fdt_setprop_var(fdt, node_offset, name, var) \
+	fdt_setprop((fdt), (node_offset), (name), &(var), sizeof(var))
+#endif
+
 #endif
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index 0c0d231..0bf0190 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * FDT related Helper functions used by the EFI stub on multiple
  * architectures. This should be #included by the EFI stub
  * implementation files.
  *
  * Copyright 2013 Linaro Limited; author Roy Franz
- *
- * This file is part of the Linux kernel, and is made available
- * under the terms of the GNU General Public License version 2.
- *
  */
 
 #include <linux/efi.h>
@@ -26,10 +23,8 @@
 	offset = fdt_path_offset(fdt, "/");
 	/* Set the #address-cells and #size-cells values for an empty tree */
 
-	fdt_setprop_u32(fdt, offset, "#address-cells",
-			EFI_DT_ADDR_CELLS_DEFAULT);
-
-	fdt_setprop_u32(fdt, offset, "#size-cells", EFI_DT_SIZE_CELLS_DEFAULT);
+	fdt_setprop_u32(fdt, offset, "#address-cells", EFI_DT_ADDR_CELLS_DEFAULT);
+	fdt_setprop_u32(fdt, offset, "#size-cells",    EFI_DT_SIZE_CELLS_DEFAULT);
 }
 
 static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
@@ -42,7 +37,7 @@
 	u32 fdt_val32;
 	u64 fdt_val64;
 
-	/* Do some checks on provided FDT, if it exists*/
+	/* Do some checks on provided FDT, if it exists: */
 	if (orig_fdt) {
 		if (fdt_check_header(orig_fdt)) {
 			pr_efi_err(sys_table, "Device Tree header not valid!\n");
@@ -50,7 +45,7 @@
 		}
 		/*
 		 * We don't get the size of the FDT if we get if from a
-		 * configuration table.
+		 * configuration table:
 		 */
 		if (orig_fdt_size && fdt_totalsize(orig_fdt) > orig_fdt_size) {
 			pr_efi_err(sys_table, "Truncated device tree! foo!\n");
@@ -64,8 +59,8 @@
 		status = fdt_create_empty_tree(fdt, new_fdt_size);
 		if (status == 0) {
 			/*
-			 * Any failure from the following function is non
-			 * critical
+			 * Any failure from the following function is
+			 * non-critical:
 			 */
 			fdt_update_cell_size(sys_table, fdt);
 		}
@@ -86,12 +81,13 @@
 	if (node < 0) {
 		node = fdt_add_subnode(fdt, 0, "chosen");
 		if (node < 0) {
-			status = node; /* node is error code when negative */
+			/* 'node' is an error code when negative: */
+			status = node;
 			goto fdt_set_fail;
 		}
 	}
 
-	if ((cmdline_ptr != NULL) && (strlen(cmdline_ptr) > 0)) {
+	if (cmdline_ptr != NULL && strlen(cmdline_ptr) > 0) {
 		status = fdt_setprop(fdt, node, "bootargs", cmdline_ptr,
 				     strlen(cmdline_ptr) + 1);
 		if (status)
@@ -103,13 +99,12 @@
 		u64 initrd_image_end;
 		u64 initrd_image_start = cpu_to_fdt64(initrd_addr);
 
-		status = fdt_setprop(fdt, node, "linux,initrd-start",
-				     &initrd_image_start, sizeof(u64));
+		status = fdt_setprop_var(fdt, node, "linux,initrd-start", initrd_image_start);
 		if (status)
 			goto fdt_set_fail;
+
 		initrd_image_end = cpu_to_fdt64(initrd_addr + initrd_size);
-		status = fdt_setprop(fdt, node, "linux,initrd-end",
-				     &initrd_image_end, sizeof(u64));
+		status = fdt_setprop_var(fdt, node, "linux,initrd-end", initrd_image_end);
 		if (status)
 			goto fdt_set_fail;
 	}
@@ -117,30 +112,28 @@
 	/* Add FDT entries for EFI runtime services in chosen node. */
 	node = fdt_subnode_offset(fdt, 0, "chosen");
 	fdt_val64 = cpu_to_fdt64((u64)(unsigned long)sys_table);
-	status = fdt_setprop(fdt, node, "linux,uefi-system-table",
-			     &fdt_val64, sizeof(fdt_val64));
+
+	status = fdt_setprop_var(fdt, node, "linux,uefi-system-table", fdt_val64);
 	if (status)
 		goto fdt_set_fail;
 
 	fdt_val64 = U64_MAX; /* placeholder */
-	status = fdt_setprop(fdt, node, "linux,uefi-mmap-start",
-			     &fdt_val64,  sizeof(fdt_val64));
+
+	status = fdt_setprop_var(fdt, node, "linux,uefi-mmap-start", fdt_val64);
 	if (status)
 		goto fdt_set_fail;
 
 	fdt_val32 = U32_MAX; /* placeholder */
-	status = fdt_setprop(fdt, node, "linux,uefi-mmap-size",
-			     &fdt_val32,  sizeof(fdt_val32));
+
+	status = fdt_setprop_var(fdt, node, "linux,uefi-mmap-size", fdt_val32);
 	if (status)
 		goto fdt_set_fail;
 
-	status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-size",
-			     &fdt_val32, sizeof(fdt_val32));
+	status = fdt_setprop_var(fdt, node, "linux,uefi-mmap-desc-size", fdt_val32);
 	if (status)
 		goto fdt_set_fail;
 
-	status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-ver",
-			     &fdt_val32, sizeof(fdt_val32));
+	status = fdt_setprop_var(fdt, node, "linux,uefi-mmap-desc-ver", fdt_val32);
 	if (status)
 		goto fdt_set_fail;
 
@@ -150,8 +143,7 @@
 		efi_status = efi_get_random_bytes(sys_table, sizeof(fdt_val64),
 						  (u8 *)&fdt_val64);
 		if (efi_status == EFI_SUCCESS) {
-			status = fdt_setprop(fdt, node, "kaslr-seed",
-					     &fdt_val64, sizeof(fdt_val64));
+			status = fdt_setprop_var(fdt, node, "kaslr-seed", fdt_val64);
 			if (status)
 				goto fdt_set_fail;
 		} else if (efi_status != EFI_NOT_FOUND) {
@@ -159,7 +151,7 @@
 		}
 	}
 
-	/* shrink the FDT back to its minimum size */
+	/* Shrink the FDT back to its minimum size: */
 	fdt_pack(fdt);
 
 	return EFI_SUCCESS;
@@ -182,26 +174,26 @@
 		return EFI_LOAD_ERROR;
 
 	fdt_val64 = cpu_to_fdt64((unsigned long)*map->map);
-	err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-start",
-				  &fdt_val64, sizeof(fdt_val64));
+
+	err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-start", fdt_val64);
 	if (err)
 		return EFI_LOAD_ERROR;
 
 	fdt_val32 = cpu_to_fdt32(*map->map_size);
-	err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-size",
-				  &fdt_val32, sizeof(fdt_val32));
+
+	err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-size", fdt_val32);
 	if (err)
 		return EFI_LOAD_ERROR;
 
 	fdt_val32 = cpu_to_fdt32(*map->desc_size);
-	err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-desc-size",
-				  &fdt_val32, sizeof(fdt_val32));
+
+	err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-desc-size", fdt_val32);
 	if (err)
 		return EFI_LOAD_ERROR;
 
 	fdt_val32 = cpu_to_fdt32(*map->desc_ver);
-	err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-desc-ver",
-				  &fdt_val32, sizeof(fdt_val32));
+
+	err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-desc-ver", fdt_val32);
 	if (err)
 		return EFI_LOAD_ERROR;
 
@@ -209,13 +201,13 @@
 }
 
 #ifndef EFI_FDT_ALIGN
-#define EFI_FDT_ALIGN EFI_PAGE_SIZE
+# define EFI_FDT_ALIGN EFI_PAGE_SIZE
 #endif
 
 struct exit_boot_struct {
-	efi_memory_desc_t *runtime_map;
-	int *runtime_entry_count;
-	void *new_fdt_addr;
+	efi_memory_desc_t	*runtime_map;
+	int			*runtime_entry_count;
+	void			*new_fdt_addr;
 };
 
 static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
@@ -235,7 +227,7 @@
 }
 
 #ifndef MAX_FDT_SIZE
-#define MAX_FDT_SIZE	SZ_2M
+# define MAX_FDT_SIZE SZ_2M
 #endif
 
 /*
@@ -266,16 +258,16 @@
 	unsigned long mmap_key;
 	efi_memory_desc_t *memory_map, *runtime_map;
 	efi_status_t status;
-	int runtime_entry_count = 0;
+	int runtime_entry_count;
 	struct efi_boot_memmap map;
 	struct exit_boot_struct priv;
 
-	map.map =	&runtime_map;
-	map.map_size =	&map_size;
-	map.desc_size =	&desc_size;
-	map.desc_ver =	&desc_ver;
-	map.key_ptr =	&mmap_key;
-	map.buff_size =	&buff_size;
+	map.map		= &runtime_map;
+	map.map_size	= &map_size;
+	map.desc_size	= &desc_size;
+	map.desc_ver	= &desc_ver;
+	map.key_ptr	= &mmap_key;
+	map.buff_size	= &buff_size;
 
 	/*
 	 * Get a copy of the current memory map that we will use to prepare
@@ -289,15 +281,13 @@
 		return status;
 	}
 
-	pr_efi(sys_table,
-	       "Exiting boot services and installing virtual address map...\n");
+	pr_efi(sys_table, "Exiting boot services and installing virtual address map...\n");
 
 	map.map = &memory_map;
 	status = efi_high_alloc(sys_table, MAX_FDT_SIZE, EFI_FDT_ALIGN,
 				new_fdt_addr, max_addr);
 	if (status != EFI_SUCCESS) {
-		pr_efi_err(sys_table,
-			   "Unable to allocate memory for new device tree.\n");
+		pr_efi_err(sys_table, "Unable to allocate memory for new device tree.\n");
 		goto fail;
 	}
 
@@ -318,15 +308,19 @@
 		goto fail_free_new_fdt;
 	}
 
-	priv.runtime_map = runtime_map;
-	priv.runtime_entry_count = &runtime_entry_count;
-	priv.new_fdt_addr = (void *)*new_fdt_addr;
-	status = efi_exit_boot_services(sys_table, handle, &map, &priv,
-					exit_boot_func);
+	runtime_entry_count		= 0;
+	priv.runtime_map		= runtime_map;
+	priv.runtime_entry_count	= &runtime_entry_count;
+	priv.new_fdt_addr		= (void *)*new_fdt_addr;
+
+	status = efi_exit_boot_services(sys_table, handle, &map, &priv, exit_boot_func);
 
 	if (status == EFI_SUCCESS) {
 		efi_set_virtual_address_map_t *svam;
 
+		if (novamap())
+			return EFI_SUCCESS;
+
 		/* Install the new virtual address map */
 		svam = sys_table->runtime->set_virtual_address_map;
 		status = svam(runtime_entry_count * desc_size, desc_size,
@@ -363,29 +357,23 @@
 
 fail:
 	sys_table->boottime->free_pool(runtime_map);
+
 	return EFI_LOAD_ERROR;
 }
 
 void *get_fdt(efi_system_table_t *sys_table, unsigned long *fdt_size)
 {
-	efi_guid_t fdt_guid = DEVICE_TREE_GUID;
-	efi_config_table_t *tables;
 	void *fdt;
-	int i;
 
-	tables = (efi_config_table_t *) sys_table->tables;
-	fdt = NULL;
+	fdt = get_efi_config_table(sys_table, DEVICE_TREE_GUID);
 
-	for (i = 0; i < sys_table->nr_tables; i++)
-		if (efi_guidcmp(tables[i].guid, fdt_guid) == 0) {
-			fdt = (void *) tables[i].table;
-			if (fdt_check_header(fdt) != 0) {
-				pr_efi_err(sys_table, "Invalid header detected on UEFI supplied FDT, ignoring ...\n");
-				return NULL;
-			}
-			*fdt_size = fdt_totalsize(fdt);
-			break;
-	 }
+	if (!fdt)
+		return NULL;
 
+	if (fdt_check_header(fdt) != 0) {
+		pr_efi_err(sys_table, "Invalid header detected on UEFI supplied FDT, ignoring ...\n");
+		return NULL;
+	}
+	*fdt_size = fdt_totalsize(fdt);
 	return fdt;
 }
diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c
index 24c461d..0101ca4 100644
--- a/drivers/firmware/efi/libstub/gop.c
+++ b/drivers/firmware/efi/libstub/gop.c
@@ -1,10 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
 /* -----------------------------------------------------------------------
  *
  *   Copyright 2011 Intel Corporation; author Matt Fleming
  *
- *   This file is part of the Linux kernel, and is made available under
- *   the terms of the GNU General Public License version 2.
- *
  * ----------------------------------------------------------------------- */
 
 #include <linux/efi.h>
diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c
index e0e603a..b4b1d1d 100644
--- a/drivers/firmware/efi/libstub/random.c
+++ b/drivers/firmware/efi/libstub/random.c
@@ -1,10 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright (C) 2016 Linaro Ltd;  <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
  */
 
 #include <linux/efi.h>
diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c
index 72d9dfb..edba5e7 100644
--- a/drivers/firmware/efi/libstub/secureboot.c
+++ b/drivers/firmware/efi/libstub/secureboot.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Secure boot handling.
  *
@@ -5,9 +6,6 @@
  *     Roy Franz <roy.franz@linaro.org
  * Copyright (C) 2013 Red Hat, Inc.
  *     Mark Salter <msalter@redhat.com>
- *
- * This file is part of the Linux kernel, and is made available under the
- * terms of the GNU General Public License version 2.
  */
 #include <linux/efi.h>
 #include <asm/efi.h>
diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c
index a90b0b8..eb9af83 100644
--- a/drivers/firmware/efi/libstub/tpm.c
+++ b/drivers/firmware/efi/libstub/tpm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * TPM handling.
  *
@@ -5,9 +6,6 @@
  * Copyright (C) 2017 Google, Inc.
  *     Matthew Garrett <mjg59@google.com>
  *     Thiebaud Weksteen <tweek@google.com>
- *
- * This file is part of the Linux kernel, and is made available under the
- * terms of the GNU General Public License version 2.
  */
 #include <linux/efi.h>
 #include <linux/tpm_eventlog.h>
@@ -59,31 +57,40 @@
 
 #endif
 
-static void efi_retrieve_tpm2_eventlog_1_2(efi_system_table_t *sys_table_arg)
+void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg)
 {
 	efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
 	efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID;
 	efi_status_t status;
 	efi_physical_addr_t log_location = 0, log_last_entry = 0;
 	struct linux_efi_tpm_eventlog *log_tbl = NULL;
+	struct efi_tcg2_final_events_table *final_events_table;
 	unsigned long first_entry_addr, last_entry_addr;
 	size_t log_size, last_entry_size;
 	efi_bool_t truncated;
+	int version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_2;
 	void *tcg2_protocol = NULL;
+	int final_events_size = 0;
 
 	status = efi_call_early(locate_protocol, &tcg2_guid, NULL,
 				&tcg2_protocol);
 	if (status != EFI_SUCCESS)
 		return;
 
-	status = efi_call_proto(efi_tcg2_protocol, get_event_log, tcg2_protocol,
-				EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2,
-				&log_location, &log_last_entry, &truncated);
-	if (status != EFI_SUCCESS)
-		return;
+	status = efi_call_proto(efi_tcg2_protocol, get_event_log,
+				tcg2_protocol, version, &log_location,
+				&log_last_entry, &truncated);
 
-	if (!log_location)
-		return;
+	if (status != EFI_SUCCESS || !log_location) {
+		version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
+		status = efi_call_proto(efi_tcg2_protocol, get_event_log,
+					tcg2_protocol, version, &log_location,
+					&log_last_entry, &truncated);
+		if (status != EFI_SUCCESS || !log_location)
+			return;
+
+	}
+
 	first_entry_addr = (unsigned long) log_location;
 
 	/*
@@ -98,8 +105,23 @@
 		 * We need to calculate its size to deduce the full size of
 		 * the logs.
 		 */
-		last_entry_size = sizeof(struct tcpa_event) +
-			((struct tcpa_event *) last_entry_addr)->event_size;
+		if (version == EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) {
+			/*
+			 * The TCG2 log format has variable length entries,
+			 * and the information to decode the hash algorithms
+			 * back into a size is contained in the first entry -
+			 * pass a pointer to the final entry (to calculate its
+			 * size) and the first entry (so we know how long each
+			 * digest is)
+			 */
+			last_entry_size =
+				__calc_tpm2_event_size((void *)last_entry_addr,
+						    (void *)(long)log_location,
+						    false);
+		} else {
+			last_entry_size = sizeof(struct tcpa_event) +
+			   ((struct tcpa_event *) last_entry_addr)->event_size;
+		}
 		log_size = log_last_entry - log_location + last_entry_size;
 	}
 
@@ -114,9 +136,37 @@
 		return;
 	}
 
+	/*
+	 * Figure out whether any events have already been logged to the
+	 * final events structure, and if so how much space they take up
+	 */
+	final_events_table = get_efi_config_table(sys_table_arg,
+						LINUX_EFI_TPM_FINAL_LOG_GUID);
+	if (final_events_table && final_events_table->nr_events) {
+		struct tcg_pcr_event2_head *header;
+		int offset;
+		void *data;
+		int event_size;
+		int i = final_events_table->nr_events;
+
+		data = (void *)final_events_table;
+		offset = sizeof(final_events_table->version) +
+			sizeof(final_events_table->nr_events);
+
+		while (i > 0) {
+			header = data + offset + final_events_size;
+			event_size = __calc_tpm2_event_size(header,
+						   (void *)(long)log_location,
+						   false);
+			final_events_size += event_size;
+			i--;
+		}
+	}
+
 	memset(log_tbl, 0, sizeof(*log_tbl) + log_size);
 	log_tbl->size = log_size;
-	log_tbl->version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
+	log_tbl->final_events_preboot_size = final_events_size;
+	log_tbl->version = version;
 	memcpy(log_tbl->log, (void *) first_entry_addr, log_size);
 
 	status = efi_call_early(install_configuration_table,
@@ -128,9 +178,3 @@
 err_free:
 	efi_call_early(free_pool, log_tbl);
 }
-
-void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg)
-{
-	/* Only try to retrieve the logs in 1.2 format. */
-	efi_retrieve_tpm2_eventlog_1_2(sys_table_arg);
-}
diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
index 8986757..58452fd 100644
--- a/drivers/firmware/efi/memattr.c
+++ b/drivers/firmware/efi/memattr.c
@@ -1,9 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright (C) 2016 Linaro Ltd. <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
  */
 
 #define pr_fmt(fmt)	"efi: memattr: " fmt
@@ -94,7 +91,7 @@
 
 		if (!(md->attribute & EFI_MEMORY_RUNTIME))
 			continue;
-		if (md->virt_addr == 0) {
+		if (md->virt_addr == 0 && md->phys_addr != 0) {
 			/* no virtual mapping has been installed by the stub */
 			break;
 		}
diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
index 1907db2..38b686c 100644
--- a/drivers/firmware/efi/memmap.c
+++ b/drivers/firmware/efi/memmap.c
@@ -15,7 +15,7 @@
 
 static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size)
 {
-	return memblock_alloc(size, 0);
+	return memblock_phys_alloc(size, SMP_CACHE_BYTES);
 }
 
 static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
diff --git a/drivers/firmware/efi/rci2-table.c b/drivers/firmware/efi/rci2-table.c
new file mode 100644
index 0000000..76b0c35
--- /dev/null
+++ b/drivers/firmware/efi/rci2-table.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Export Runtime Configuration Interface Table Version 2 (RCI2)
+ * to sysfs
+ *
+ * Copyright (C) 2019 Dell Inc
+ * by Narendra K <Narendra.K@dell.com>
+ *
+ * System firmware advertises the address of the RCI2 Table via
+ * an EFI Configuration Table entry. This code retrieves the RCI2
+ * table from the address and exports it to sysfs as a binary
+ * attribute 'rci2' under /sys/firmware/efi/tables directory.
+ */
+
+#include <linux/kobject.h>
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <linux/efi.h>
+#include <linux/types.h>
+#include <linux/io.h>
+
+#define RCI_SIGNATURE	"_RC_"
+
+struct rci2_table_global_hdr {
+	u16 type;
+	u16 resvd0;
+	u16 hdr_len;
+	u8 rci2_sig[4];
+	u16 resvd1;
+	u32 resvd2;
+	u32 resvd3;
+	u8 major_rev;
+	u8 minor_rev;
+	u16 num_of_structs;
+	u32 rci2_len;
+	u16 rci2_chksum;
+} __packed;
+
+static u8 *rci2_base;
+static u32 rci2_table_len;
+unsigned long rci2_table_phys __ro_after_init = EFI_INVALID_TABLE_ADDR;
+
+static ssize_t raw_table_read(struct file *file, struct kobject *kobj,
+			      struct bin_attribute *attr, char *buf,
+			      loff_t pos, size_t count)
+{
+	memcpy(buf, attr->private + pos, count);
+	return count;
+}
+
+static BIN_ATTR(rci2, S_IRUSR, raw_table_read, NULL, 0);
+
+static u16 checksum(void)
+{
+	u8 len_is_odd = rci2_table_len % 2;
+	u32 chksum_len = rci2_table_len;
+	u16 *base = (u16 *)rci2_base;
+	u8 buf[2] = {0};
+	u32 offset = 0;
+	u16 chksum = 0;
+
+	if (len_is_odd)
+		chksum_len -= 1;
+
+	while (offset < chksum_len) {
+		chksum += *base;
+		offset += 2;
+		base++;
+	}
+
+	if (len_is_odd) {
+		buf[0] = *(u8 *)base;
+		chksum += *(u16 *)(buf);
+	}
+
+	return chksum;
+}
+
+static int __init efi_rci2_sysfs_init(void)
+{
+	struct kobject *tables_kobj;
+	int ret = -ENOMEM;
+
+	rci2_base = memremap(rci2_table_phys,
+			     sizeof(struct rci2_table_global_hdr),
+			     MEMREMAP_WB);
+	if (!rci2_base) {
+		pr_debug("RCI2 table init failed - could not map RCI2 table\n");
+		goto err;
+	}
+
+	if (strncmp(rci2_base +
+		    offsetof(struct rci2_table_global_hdr, rci2_sig),
+		    RCI_SIGNATURE, 4)) {
+		pr_debug("RCI2 table init failed - incorrect signature\n");
+		ret = -ENODEV;
+		goto err_unmap;
+	}
+
+	rci2_table_len = *(u32 *)(rci2_base +
+				  offsetof(struct rci2_table_global_hdr,
+				  rci2_len));
+
+	memunmap(rci2_base);
+
+	if (!rci2_table_len) {
+		pr_debug("RCI2 table init failed - incorrect table length\n");
+		goto err;
+	}
+
+	rci2_base = memremap(rci2_table_phys, rci2_table_len, MEMREMAP_WB);
+	if (!rci2_base) {
+		pr_debug("RCI2 table - could not map RCI2 table\n");
+		goto err;
+	}
+
+	if (checksum() != 0) {
+		pr_debug("RCI2 table - incorrect checksum\n");
+		ret = -ENODEV;
+		goto err_unmap;
+	}
+
+	tables_kobj = kobject_create_and_add("tables", efi_kobj);
+	if (!tables_kobj) {
+		pr_debug("RCI2 table - tables_kobj creation failed\n");
+		goto err_unmap;
+	}
+
+	bin_attr_rci2.size = rci2_table_len;
+	bin_attr_rci2.private = rci2_base;
+	ret = sysfs_create_bin_file(tables_kobj, &bin_attr_rci2);
+	if (ret != 0) {
+		pr_debug("RCI2 table - rci2 sysfs bin file creation failed\n");
+		kobject_del(tables_kobj);
+		kobject_put(tables_kobj);
+		goto err_unmap;
+	}
+
+	return 0;
+
+ err_unmap:
+	memunmap(rci2_base);
+ err:
+	pr_debug("RCI2 table - sysfs initialization failed\n");
+	return ret;
+}
+late_initcall(efi_rci2_sysfs_init);
diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
index 84a11d0..ad9ddef 100644
--- a/drivers/firmware/efi/runtime-map.c
+++ b/drivers/firmware/efi/runtime-map.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * linux/drivers/efi/runtime-map.c
  * Copyright (C) 2013 Red Hat, Inc., Dave Young <dyoung@redhat.com>
- *
- * This file is released under the GPLv2.
  */
 
 #include <linux/string.h>
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index aa66cbf..65fffaa 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * runtime-wrappers.c - Runtime Services function call wrappers
  *
@@ -19,8 +20,6 @@
  * Copyright (C) 1999-2002 Hewlett-Packard Co.
  * Copyright (C) 2005-2008 Intel Co.
  * Copyright (C) 2013 SuSE Labs
- *
- * This file is released under the GPLv2.
  */
 
 #define pr_fmt(fmt)	"efi: " fmt
@@ -45,39 +44,7 @@
 #define __efi_call_virt(f, args...) \
 	__efi_call_virt_pointer(efi.systab->runtime, f, args)
 
-/* efi_runtime_service() function identifiers */
-enum efi_rts_ids {
-	GET_TIME,
-	SET_TIME,
-	GET_WAKEUP_TIME,
-	SET_WAKEUP_TIME,
-	GET_VARIABLE,
-	GET_NEXT_VARIABLE,
-	SET_VARIABLE,
-	QUERY_VARIABLE_INFO,
-	GET_NEXT_HIGH_MONO_COUNT,
-	UPDATE_CAPSULE,
-	QUERY_CAPSULE_CAPS,
-};
-
-/*
- * efi_runtime_work:	Details of EFI Runtime Service work
- * @arg<1-5>:		EFI Runtime Service function arguments
- * @status:		Status of executing EFI Runtime Service
- * @efi_rts_id:		EFI Runtime Service function identifier
- * @efi_rts_comp:	Struct used for handling completions
- */
-struct efi_runtime_work {
-	void *arg1;
-	void *arg2;
-	void *arg3;
-	void *arg4;
-	void *arg5;
-	efi_status_t status;
-	struct work_struct work;
-	enum efi_rts_ids efi_rts_id;
-	struct completion efi_rts_comp;
-};
+struct efi_runtime_work efi_rts_work;
 
 /*
  * efi_queue_work:	Queue efi_runtime_service() and wait until it's done
@@ -91,11 +58,15 @@
  */
 #define efi_queue_work(_rts, _arg1, _arg2, _arg3, _arg4, _arg5)		\
 ({									\
-	struct efi_runtime_work efi_rts_work;				\
 	efi_rts_work.status = EFI_ABORTED;				\
 									\
+	if (!efi_enabled(EFI_RUNTIME_SERVICES)) {			\
+		pr_warn_once("EFI Runtime Services are disabled!\n");	\
+		goto exit;						\
+	}								\
+									\
 	init_completion(&efi_rts_work.efi_rts_comp);			\
-	INIT_WORK_ONSTACK(&efi_rts_work.work, efi_call_rts);		\
+	INIT_WORK(&efi_rts_work.work, efi_call_rts);			\
 	efi_rts_work.arg1 = _arg1;					\
 	efi_rts_work.arg2 = _arg2;					\
 	efi_rts_work.arg3 = _arg3;					\
@@ -112,14 +83,29 @@
 	else								\
 		pr_err("Failed to queue work to efi_rts_wq.\n");	\
 									\
+exit:									\
+	efi_rts_work.efi_rts_id = EFI_NONE;				\
 	efi_rts_work.status;						\
 })
 
+#ifndef arch_efi_save_flags
+#define arch_efi_save_flags(state_flags)	local_save_flags(state_flags)
+#define arch_efi_restore_flags(state_flags)	local_irq_restore(state_flags)
+#endif
+
+unsigned long efi_call_virt_save_flags(void)
+{
+	unsigned long flags;
+
+	arch_efi_save_flags(flags);
+	return flags;
+}
+
 void efi_call_virt_check_flags(unsigned long flags, const char *call)
 {
 	unsigned long cur_flags, mismatch;
 
-	local_save_flags(cur_flags);
+	cur_flags = efi_call_virt_save_flags();
 
 	mismatch = flags ^ cur_flags;
 	if (!WARN_ON_ONCE(mismatch & ARCH_EFI_IRQ_FLAGS_MASK))
@@ -128,7 +114,7 @@
 	add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_NOW_UNRELIABLE);
 	pr_err_ratelimited(FW_BUG "IRQ flags corrupted (0x%08lx=>0x%08lx) by EFI %s\n",
 			   flags, cur_flags, call);
-	local_irq_restore(flags);
+	arch_efi_restore_flags(flags);
 }
 
 /*
@@ -173,6 +159,13 @@
 static DEFINE_SEMAPHORE(efi_runtime_lock);
 
 /*
+ * Expose the EFI runtime lock to the UV platform
+ */
+#ifdef CONFIG_X86_UV
+extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock);
+#endif
+
+/*
  * Calls the appropriate efi_runtime_service() with the appropriate
  * arguments.
  *
@@ -184,62 +177,60 @@
  */
 static void efi_call_rts(struct work_struct *work)
 {
-	struct efi_runtime_work *efi_rts_work;
 	void *arg1, *arg2, *arg3, *arg4, *arg5;
 	efi_status_t status = EFI_NOT_FOUND;
 
-	efi_rts_work = container_of(work, struct efi_runtime_work, work);
-	arg1 = efi_rts_work->arg1;
-	arg2 = efi_rts_work->arg2;
-	arg3 = efi_rts_work->arg3;
-	arg4 = efi_rts_work->arg4;
-	arg5 = efi_rts_work->arg5;
+	arg1 = efi_rts_work.arg1;
+	arg2 = efi_rts_work.arg2;
+	arg3 = efi_rts_work.arg3;
+	arg4 = efi_rts_work.arg4;
+	arg5 = efi_rts_work.arg5;
 
-	switch (efi_rts_work->efi_rts_id) {
-	case GET_TIME:
+	switch (efi_rts_work.efi_rts_id) {
+	case EFI_GET_TIME:
 		status = efi_call_virt(get_time, (efi_time_t *)arg1,
 				       (efi_time_cap_t *)arg2);
 		break;
-	case SET_TIME:
+	case EFI_SET_TIME:
 		status = efi_call_virt(set_time, (efi_time_t *)arg1);
 		break;
-	case GET_WAKEUP_TIME:
+	case EFI_GET_WAKEUP_TIME:
 		status = efi_call_virt(get_wakeup_time, (efi_bool_t *)arg1,
 				       (efi_bool_t *)arg2, (efi_time_t *)arg3);
 		break;
-	case SET_WAKEUP_TIME:
+	case EFI_SET_WAKEUP_TIME:
 		status = efi_call_virt(set_wakeup_time, *(efi_bool_t *)arg1,
 				       (efi_time_t *)arg2);
 		break;
-	case GET_VARIABLE:
+	case EFI_GET_VARIABLE:
 		status = efi_call_virt(get_variable, (efi_char16_t *)arg1,
 				       (efi_guid_t *)arg2, (u32 *)arg3,
 				       (unsigned long *)arg4, (void *)arg5);
 		break;
-	case GET_NEXT_VARIABLE:
+	case EFI_GET_NEXT_VARIABLE:
 		status = efi_call_virt(get_next_variable, (unsigned long *)arg1,
 				       (efi_char16_t *)arg2,
 				       (efi_guid_t *)arg3);
 		break;
-	case SET_VARIABLE:
+	case EFI_SET_VARIABLE:
 		status = efi_call_virt(set_variable, (efi_char16_t *)arg1,
 				       (efi_guid_t *)arg2, *(u32 *)arg3,
 				       *(unsigned long *)arg4, (void *)arg5);
 		break;
-	case QUERY_VARIABLE_INFO:
+	case EFI_QUERY_VARIABLE_INFO:
 		status = efi_call_virt(query_variable_info, *(u32 *)arg1,
 				       (u64 *)arg2, (u64 *)arg3, (u64 *)arg4);
 		break;
-	case GET_NEXT_HIGH_MONO_COUNT:
+	case EFI_GET_NEXT_HIGH_MONO_COUNT:
 		status = efi_call_virt(get_next_high_mono_count, (u32 *)arg1);
 		break;
-	case UPDATE_CAPSULE:
+	case EFI_UPDATE_CAPSULE:
 		status = efi_call_virt(update_capsule,
 				       (efi_capsule_header_t **)arg1,
 				       *(unsigned long *)arg2,
 				       *(unsigned long *)arg3);
 		break;
-	case QUERY_CAPSULE_CAPS:
+	case EFI_QUERY_CAPSULE_CAPS:
 		status = efi_call_virt(query_capsule_caps,
 				       (efi_capsule_header_t **)arg1,
 				       *(unsigned long *)arg2, (u64 *)arg3,
@@ -253,8 +244,8 @@
 		 */
 		pr_err("Requested executing invalid EFI Runtime Service.\n");
 	}
-	efi_rts_work->status = status;
-	complete(&efi_rts_work->efi_rts_comp);
+	efi_rts_work.status = status;
+	complete(&efi_rts_work.efi_rts_comp);
 }
 
 static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
@@ -263,7 +254,7 @@
 
 	if (down_interruptible(&efi_runtime_lock))
 		return EFI_ABORTED;
-	status = efi_queue_work(GET_TIME, tm, tc, NULL, NULL, NULL);
+	status = efi_queue_work(EFI_GET_TIME, tm, tc, NULL, NULL, NULL);
 	up(&efi_runtime_lock);
 	return status;
 }
@@ -274,7 +265,7 @@
 
 	if (down_interruptible(&efi_runtime_lock))
 		return EFI_ABORTED;
-	status = efi_queue_work(SET_TIME, tm, NULL, NULL, NULL, NULL);
+	status = efi_queue_work(EFI_SET_TIME, tm, NULL, NULL, NULL, NULL);
 	up(&efi_runtime_lock);
 	return status;
 }
@@ -287,7 +278,7 @@
 
 	if (down_interruptible(&efi_runtime_lock))
 		return EFI_ABORTED;
-	status = efi_queue_work(GET_WAKEUP_TIME, enabled, pending, tm, NULL,
+	status = efi_queue_work(EFI_GET_WAKEUP_TIME, enabled, pending, tm, NULL,
 				NULL);
 	up(&efi_runtime_lock);
 	return status;
@@ -299,7 +290,7 @@
 
 	if (down_interruptible(&efi_runtime_lock))
 		return EFI_ABORTED;
-	status = efi_queue_work(SET_WAKEUP_TIME, &enabled, tm, NULL, NULL,
+	status = efi_queue_work(EFI_SET_WAKEUP_TIME, &enabled, tm, NULL, NULL,
 				NULL);
 	up(&efi_runtime_lock);
 	return status;
@@ -315,7 +306,7 @@
 
 	if (down_interruptible(&efi_runtime_lock))
 		return EFI_ABORTED;
-	status = efi_queue_work(GET_VARIABLE, name, vendor, attr, data_size,
+	status = efi_queue_work(EFI_GET_VARIABLE, name, vendor, attr, data_size,
 				data);
 	up(&efi_runtime_lock);
 	return status;
@@ -329,7 +320,7 @@
 
 	if (down_interruptible(&efi_runtime_lock))
 		return EFI_ABORTED;
-	status = efi_queue_work(GET_NEXT_VARIABLE, name_size, name, vendor,
+	status = efi_queue_work(EFI_GET_NEXT_VARIABLE, name_size, name, vendor,
 				NULL, NULL);
 	up(&efi_runtime_lock);
 	return status;
@@ -345,7 +336,7 @@
 
 	if (down_interruptible(&efi_runtime_lock))
 		return EFI_ABORTED;
-	status = efi_queue_work(SET_VARIABLE, name, vendor, &attr, &data_size,
+	status = efi_queue_work(EFI_SET_VARIABLE, name, vendor, &attr, &data_size,
 				data);
 	up(&efi_runtime_lock);
 	return status;
@@ -380,7 +371,7 @@
 
 	if (down_interruptible(&efi_runtime_lock))
 		return EFI_ABORTED;
-	status = efi_queue_work(QUERY_VARIABLE_INFO, &attr, storage_space,
+	status = efi_queue_work(EFI_QUERY_VARIABLE_INFO, &attr, storage_space,
 				remaining_space, max_variable_size, NULL);
 	up(&efi_runtime_lock);
 	return status;
@@ -412,7 +403,7 @@
 
 	if (down_interruptible(&efi_runtime_lock))
 		return EFI_ABORTED;
-	status = efi_queue_work(GET_NEXT_HIGH_MONO_COUNT, count, NULL, NULL,
+	status = efi_queue_work(EFI_GET_NEXT_HIGH_MONO_COUNT, count, NULL, NULL,
 				NULL, NULL);
 	up(&efi_runtime_lock);
 	return status;
@@ -428,6 +419,7 @@
 			"could not get exclusive access to the firmware\n");
 		return;
 	}
+	efi_rts_work.efi_rts_id = EFI_RESET_SYSTEM;
 	__efi_call_virt(reset_system, reset_type, status, data_size, data);
 	up(&efi_runtime_lock);
 }
@@ -443,7 +435,7 @@
 
 	if (down_interruptible(&efi_runtime_lock))
 		return EFI_ABORTED;
-	status = efi_queue_work(UPDATE_CAPSULE, capsules, &count, &sg_list,
+	status = efi_queue_work(EFI_UPDATE_CAPSULE, capsules, &count, &sg_list,
 				NULL, NULL);
 	up(&efi_runtime_lock);
 	return status;
@@ -461,7 +453,7 @@
 
 	if (down_interruptible(&efi_runtime_lock))
 		return EFI_ABORTED;
-	status = efi_queue_work(QUERY_CAPSULE_CAPS, capsules, &count,
+	status = efi_queue_work(EFI_QUERY_CAPSULE_CAPS, capsules, &count,
 				max_size, reset_type, NULL);
 	up(&efi_runtime_lock);
 	return status;
diff --git a/drivers/firmware/efi/test/Makefile b/drivers/firmware/efi/test/Makefile
index bcd4577..4197088 100644
--- a/drivers/firmware/efi/test/Makefile
+++ b/drivers/firmware/efi/test/Makefile
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
 obj-$(CONFIG_EFI_TEST)			+= efi_test.o
diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c
index 41c48a1..7baf48c 100644
--- a/drivers/firmware/efi/test/efi_test.c
+++ b/drivers/firmware/efi/test/efi_test.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * EFI Test Driver for Runtime Services
  *
@@ -13,6 +14,7 @@
 #include <linux/init.h>
 #include <linux/proc_fs.h>
 #include <linux/efi.h>
+#include <linux/security.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
 
@@ -68,7 +70,7 @@
 		return 0;
 	}
 
-	if (!access_ok(VERIFY_READ, src, 1))
+	if (!access_ok(src, 1))
 		return -EFAULT;
 
 	buf = memdup_user(src, len);
@@ -89,7 +91,7 @@
 static inline int
 get_ucs2_strsize_from_user(efi_char16_t __user *src, size_t *len)
 {
-	if (!access_ok(VERIFY_READ, src, 1))
+	if (!access_ok(src, 1))
 		return -EFAULT;
 
 	*len = user_ucs2_strsize(src);
@@ -116,7 +118,7 @@
 {
 	size_t len;
 
-	if (!access_ok(VERIFY_READ, src, 1))
+	if (!access_ok(src, 1))
 		return -EFAULT;
 
 	len = user_ucs2_strsize(src);
@@ -140,7 +142,7 @@
 	if (!src)
 		return 0;
 
-	if (!access_ok(VERIFY_WRITE, dst, 1))
+	if (!access_ok(dst, 1))
 		return -EFAULT;
 
 	return copy_to_user(dst, src, len);
@@ -542,6 +544,30 @@
 	return 0;
 }
 
+static long efi_runtime_reset_system(unsigned long arg)
+{
+	struct efi_resetsystem __user *resetsystem_user;
+	struct efi_resetsystem resetsystem;
+	void *data = NULL;
+
+	resetsystem_user = (struct efi_resetsystem __user *)arg;
+	if (copy_from_user(&resetsystem, resetsystem_user,
+						sizeof(resetsystem)))
+		return -EFAULT;
+	if (resetsystem.data_size != 0) {
+		data = memdup_user((void *)resetsystem.data,
+						resetsystem.data_size);
+		if (IS_ERR(data))
+			return PTR_ERR(data);
+	}
+
+	efi.reset_system(resetsystem.reset_type, resetsystem.status,
+				resetsystem.data_size, (efi_char16_t *)data);
+
+	kfree(data);
+	return 0;
+}
+
 static long efi_runtime_query_variableinfo(unsigned long arg)
 {
 	struct efi_queryvariableinfo __user *queryvariableinfo_user;
@@ -682,6 +708,9 @@
 
 	case EFI_RUNTIME_QUERY_CAPSULECAPABILITIES:
 		return efi_runtime_query_capsulecaps(arg);
+
+	case EFI_RUNTIME_RESET_SYSTEM:
+		return efi_runtime_reset_system(arg);
 	}
 
 	return -ENOTTY;
@@ -689,6 +718,13 @@
 
 static int efi_test_open(struct inode *inode, struct file *file)
 {
+	int ret = security_locked_down(LOCKDOWN_EFI_TEST);
+
+	if (ret)
+		return ret;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
 	/*
 	 * nothing special to do here
 	 * We do accept multiple open files at the same time as we
diff --git a/drivers/firmware/efi/test/efi_test.h b/drivers/firmware/efi/test/efi_test.h
index 9812c6a..f2446aa 100644
--- a/drivers/firmware/efi/test/efi_test.h
+++ b/drivers/firmware/efi/test/efi_test.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * EFI Test driver Header
  *
@@ -81,6 +81,13 @@
 	efi_status_t		*status;
 } __packed;
 
+struct efi_resetsystem {
+	int			reset_type;
+	efi_status_t		status;
+	unsigned long		data_size;
+	efi_char16_t		*data;
+} __packed;
+
 #define EFI_RUNTIME_GET_VARIABLE \
 	_IOWR('p', 0x01, struct efi_getvariable)
 #define EFI_RUNTIME_SET_VARIABLE \
@@ -108,4 +115,7 @@
 #define EFI_RUNTIME_QUERY_CAPSULECAPABILITIES \
 	_IOR('p', 0x0A, struct efi_querycapsulecapabilities)
 
+#define EFI_RUNTIME_RESET_SYSTEM \
+	_IOW('p', 0x0B, struct efi_resetsystem)
+
 #endif /* _DRIVERS_FIRMWARE_EFI_TEST_H_ */
diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c
index 0cbeb3d..31f9f0e 100644
--- a/drivers/firmware/efi/tpm.c
+++ b/drivers/firmware/efi/tpm.c
@@ -1,17 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright (C) 2017 Google, Inc.
  *     Thiebaud Weksteen <tweek@google.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
  */
 
+#define TPM_MEMREMAP(start, size) early_memremap(start, size)
+#define TPM_MEMUNMAP(start, size) early_memunmap(start, size)
+
+#include <asm/early_ioremap.h>
 #include <linux/efi.h>
 #include <linux/init.h>
 #include <linux/memblock.h>
+#include <linux/tpm_eventlog.h>
 
-#include <asm/early_ioremap.h>
+int efi_tpm_final_log_size;
+EXPORT_SYMBOL(efi_tpm_final_log_size);
+
+static int tpm2_calc_event_log_size(void *data, int count, void *size_info)
+{
+	struct tcg_pcr_event2_head *header;
+	int event_size, size = 0;
+
+	while (count > 0) {
+		header = data + size;
+		event_size = __calc_tpm2_event_size(header, size_info, true);
+		if (event_size == 0)
+			return -1;
+		size += event_size;
+		count--;
+	}
+
+	return size;
+}
 
 /*
  * Reserve the memory associated with the TPM Event Log configuration table.
@@ -19,22 +39,67 @@
 int __init efi_tpm_eventlog_init(void)
 {
 	struct linux_efi_tpm_eventlog *log_tbl;
-	unsigned int tbl_size;
+	struct efi_tcg2_final_events_table *final_tbl;
+	int tbl_size;
+	int ret = 0;
 
-	if (efi.tpm_log == EFI_INVALID_TABLE_ADDR)
+	if (efi.tpm_log == EFI_INVALID_TABLE_ADDR) {
+		/*
+		 * We can't calculate the size of the final events without the
+		 * first entry in the TPM log, so bail here.
+		 */
 		return 0;
+	}
 
 	log_tbl = early_memremap(efi.tpm_log, sizeof(*log_tbl));
 	if (!log_tbl) {
 		pr_err("Failed to map TPM Event Log table @ 0x%lx\n",
-			efi.tpm_log);
+		       efi.tpm_log);
 		efi.tpm_log = EFI_INVALID_TABLE_ADDR;
 		return -ENOMEM;
 	}
 
 	tbl_size = sizeof(*log_tbl) + log_tbl->size;
 	memblock_reserve(efi.tpm_log, tbl_size);
+
+	if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR)
+		goto out;
+
+	final_tbl = early_memremap(efi.tpm_final_log, sizeof(*final_tbl));
+
+	if (!final_tbl) {
+		pr_err("Failed to map TPM Final Event Log table @ 0x%lx\n",
+		       efi.tpm_final_log);
+		efi.tpm_final_log = EFI_INVALID_TABLE_ADDR;
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	tbl_size = 0;
+	if (final_tbl->nr_events != 0) {
+		void *events = (void *)efi.tpm_final_log
+				+ sizeof(final_tbl->version)
+				+ sizeof(final_tbl->nr_events);
+
+		tbl_size = tpm2_calc_event_log_size(events,
+						    final_tbl->nr_events,
+						    log_tbl->log);
+	}
+
+	if (tbl_size < 0) {
+		pr_err(FW_BUG "Failed to parse event in TPM Final Events Log\n");
+		ret = -EINVAL;
+		goto out_calc;
+	}
+
+	memblock_reserve((unsigned long)final_tbl,
+			 tbl_size + sizeof(*final_tbl));
+	efi_tpm_final_log_size = tbl_size;
+
+out_calc:
+	early_memunmap(final_tbl, sizeof(*final_tbl));
+out:
 	early_memunmap(log_tbl, sizeof(*log_tbl));
-	return 0;
+	return ret;
 }
 
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 9336ffd..436d177 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -1,22 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Originally from efivars.c
  *
  * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
  * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or
- *  (at your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #include <linux/capability.h>
@@ -318,7 +305,12 @@
 static efi_status_t
 check_var_size(u32 attributes, unsigned long size)
 {
-	const struct efivar_operations *fops = __efivars->ops;
+	const struct efivar_operations *fops;
+
+	if (!__efivars)
+		return EFI_UNSUPPORTED;
+
+	fops = __efivars->ops;
 
 	if (!fops->query_variable_store)
 		return EFI_UNSUPPORTED;
@@ -329,7 +321,12 @@
 static efi_status_t
 check_var_size_nonblocking(u32 attributes, unsigned long size)
 {
-	const struct efivar_operations *fops = __efivars->ops;
+	const struct efivar_operations *fops;
+
+	if (!__efivars)
+		return EFI_UNSUPPORTED;
+
+	fops = __efivars->ops;
 
 	if (!fops->query_variable_store)
 		return EFI_UNSUPPORTED;
@@ -429,13 +426,18 @@
 int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
 		void *data, bool duplicates, struct list_head *head)
 {
-	const struct efivar_operations *ops = __efivars->ops;
+	const struct efivar_operations *ops;
 	unsigned long variable_name_size = 1024;
 	efi_char16_t *variable_name;
 	efi_status_t status;
 	efi_guid_t vendor_guid;
 	int err = 0;
 
+	if (!__efivars)
+		return -EFAULT;
+
+	ops = __efivars->ops;
+
 	variable_name = kzalloc(variable_name_size, GFP_KERNEL);
 	if (!variable_name) {
 		printk(KERN_ERR "efivars: Memory allocation failed.\n");
@@ -583,12 +585,14 @@
  */
 int __efivar_entry_delete(struct efivar_entry *entry)
 {
-	const struct efivar_operations *ops = __efivars->ops;
 	efi_status_t status;
 
-	status = ops->set_variable(entry->var.VariableName,
-				   &entry->var.VendorGuid,
-				   0, 0, NULL);
+	if (!__efivars)
+		return -EINVAL;
+
+	status = __efivars->ops->set_variable(entry->var.VariableName,
+					      &entry->var.VendorGuid,
+					      0, 0, NULL);
 
 	return efi_status_to_err(status);
 }
@@ -607,12 +611,17 @@
  */
 int efivar_entry_delete(struct efivar_entry *entry)
 {
-	const struct efivar_operations *ops = __efivars->ops;
+	const struct efivar_operations *ops;
 	efi_status_t status;
 
 	if (down_interruptible(&efivars_lock))
 		return -EINTR;
 
+	if (!__efivars) {
+		up(&efivars_lock);
+		return -EINVAL;
+	}
+	ops = __efivars->ops;
 	status = ops->set_variable(entry->var.VariableName,
 				   &entry->var.VendorGuid,
 				   0, 0, NULL);
@@ -650,13 +659,19 @@
 int efivar_entry_set(struct efivar_entry *entry, u32 attributes,
 		     unsigned long size, void *data, struct list_head *head)
 {
-	const struct efivar_operations *ops = __efivars->ops;
+	const struct efivar_operations *ops;
 	efi_status_t status;
 	efi_char16_t *name = entry->var.VariableName;
 	efi_guid_t vendor = entry->var.VendorGuid;
 
 	if (down_interruptible(&efivars_lock))
 		return -EINTR;
+
+	if (!__efivars) {
+		up(&efivars_lock);
+		return -EINVAL;
+	}
+	ops = __efivars->ops;
 	if (head && efivar_entry_find(name, vendor, head, false)) {
 		up(&efivars_lock);
 		return -EEXIST;
@@ -687,12 +702,17 @@
 efivar_entry_set_nonblocking(efi_char16_t *name, efi_guid_t vendor,
 			     u32 attributes, unsigned long size, void *data)
 {
-	const struct efivar_operations *ops = __efivars->ops;
+	const struct efivar_operations *ops;
 	efi_status_t status;
 
 	if (down_trylock(&efivars_lock))
 		return -EBUSY;
 
+	if (!__efivars) {
+		up(&efivars_lock);
+		return -EINVAL;
+	}
+
 	status = check_var_size_nonblocking(attributes,
 					    size + ucs2_strsize(name, 1024));
 	if (status != EFI_SUCCESS) {
@@ -700,6 +720,7 @@
 		return -ENOSPC;
 	}
 
+	ops = __efivars->ops;
 	status = ops->set_variable_nonblocking(name, &vendor, attributes,
 					       size, data);
 
@@ -727,9 +748,13 @@
 int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes,
 			  bool block, unsigned long size, void *data)
 {
-	const struct efivar_operations *ops = __efivars->ops;
+	const struct efivar_operations *ops;
 	efi_status_t status;
 
+	if (!__efivars)
+		return -EINVAL;
+
+	ops = __efivars->ops;
 	if (!ops->query_variable_store)
 		return -ENOSYS;
 
@@ -829,13 +854,18 @@
  */
 int efivar_entry_size(struct efivar_entry *entry, unsigned long *size)
 {
-	const struct efivar_operations *ops = __efivars->ops;
+	const struct efivar_operations *ops;
 	efi_status_t status;
 
 	*size = 0;
 
 	if (down_interruptible(&efivars_lock))
 		return -EINTR;
+	if (!__efivars) {
+		up(&efivars_lock);
+		return -EINVAL;
+	}
+	ops = __efivars->ops;
 	status = ops->get_variable(entry->var.VariableName,
 				   &entry->var.VendorGuid, NULL, size, NULL);
 	up(&efivars_lock);
@@ -861,12 +891,14 @@
 int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
 		       unsigned long *size, void *data)
 {
-	const struct efivar_operations *ops = __efivars->ops;
 	efi_status_t status;
 
-	status = ops->get_variable(entry->var.VariableName,
-				   &entry->var.VendorGuid,
-				   attributes, size, data);
+	if (!__efivars)
+		return -EINVAL;
+
+	status = __efivars->ops->get_variable(entry->var.VariableName,
+					      &entry->var.VendorGuid,
+					      attributes, size, data);
 
 	return efi_status_to_err(status);
 }
@@ -882,14 +914,19 @@
 int efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
 		     unsigned long *size, void *data)
 {
-	const struct efivar_operations *ops = __efivars->ops;
 	efi_status_t status;
 
 	if (down_interruptible(&efivars_lock))
 		return -EINTR;
-	status = ops->get_variable(entry->var.VariableName,
-				   &entry->var.VendorGuid,
-				   attributes, size, data);
+
+	if (!__efivars) {
+		up(&efivars_lock);
+		return -EINVAL;
+	}
+
+	status = __efivars->ops->get_variable(entry->var.VariableName,
+					      &entry->var.VendorGuid,
+					      attributes, size, data);
 	up(&efivars_lock);
 
 	return efi_status_to_err(status);
@@ -921,7 +958,7 @@
 int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
 			      unsigned long *size, void *data, bool *set)
 {
-	const struct efivar_operations *ops = __efivars->ops;
+	const struct efivar_operations *ops;
 	efi_char16_t *name = entry->var.VariableName;
 	efi_guid_t *vendor = &entry->var.VendorGuid;
 	efi_status_t status;
@@ -940,6 +977,11 @@
 	if (down_interruptible(&efivars_lock))
 		return -EINTR;
 
+	if (!__efivars) {
+		err = -EINVAL;
+		goto out;
+	}
+
 	/*
 	 * Ensure that the available space hasn't shrunk below the safe level
 	 */
@@ -956,6 +998,8 @@
 		}
 	}
 
+	ops = __efivars->ops;
+
 	status = ops->set_variable(name, vendor, attributes, *size, data);
 	if (status != EFI_SUCCESS) {
 		err = efi_status_to_err(status);
diff --git a/drivers/firmware/google/Kconfig b/drivers/firmware/google/Kconfig
index a456a00..a3a6ca6 100644
--- a/drivers/firmware/google/Kconfig
+++ b/drivers/firmware/google/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 menuconfig GOOGLE_FIRMWARE
 	bool "Google Firmware Drivers"
 	default n
@@ -10,37 +11,31 @@
 
 config GOOGLE_SMI
 	tristate "SMI interface for Google platforms"
-	depends on X86 && ACPI && DMI && EFI
-	select EFI_VARS
+	depends on X86 && ACPI && DMI
 	help
 	  Say Y here if you want to enable SMI callbacks for Google
 	  platforms.  This provides an interface for writing to and
-	  clearing the EFI event log and reading and writing NVRAM
+	  clearing the event log.  If EFI_VARS is also enabled this
+	  driver provides an interface for reading and writing NVRAM
 	  variables.
 
 config GOOGLE_COREBOOT_TABLE
-	tristate
-	depends on GOOGLE_COREBOOT_TABLE_ACPI || GOOGLE_COREBOOT_TABLE_OF
-
-config GOOGLE_COREBOOT_TABLE_ACPI
-	tristate "Coreboot Table Access - ACPI"
-	depends on ACPI
-	select GOOGLE_COREBOOT_TABLE
+	tristate "Coreboot Table Access"
+	depends on ACPI || OF
 	help
 	  This option enables the coreboot_table module, which provides other
-	  firmware modules to access to the coreboot table. The coreboot table
-	  pointer is accessed through the ACPI "GOOGCB00" object.
+	  firmware modules access to the coreboot table. The coreboot table
+	  pointer is accessed through the ACPI "GOOGCB00" object or the
+	  device tree node /firmware/coreboot.
 	  If unsure say N.
 
-config GOOGLE_COREBOOT_TABLE_OF
-	tristate "Coreboot Table Access - Device Tree"
-	depends on OF
+config GOOGLE_COREBOOT_TABLE_ACPI
+	tristate
 	select GOOGLE_COREBOOT_TABLE
-	help
-	  This option enable the coreboot_table module, which provide other
-	  firmware modules to access coreboot table. The coreboot table pointer
-	  is accessed through the device tree node /firmware/coreboot.
-	  If unsure say N.
+
+config GOOGLE_COREBOOT_TABLE_OF
+	tristate
+	select GOOGLE_COREBOOT_TABLE
 
 config GOOGLE_MEMCONSOLE
 	tristate
diff --git a/drivers/firmware/google/Makefile b/drivers/firmware/google/Makefile
index d0b3fba..d17cade 100644
--- a/drivers/firmware/google/Makefile
+++ b/drivers/firmware/google/Makefile
@@ -2,8 +2,6 @@
 
 obj-$(CONFIG_GOOGLE_SMI)		+= gsmi.o
 obj-$(CONFIG_GOOGLE_COREBOOT_TABLE)        += coreboot_table.o
-obj-$(CONFIG_GOOGLE_COREBOOT_TABLE_ACPI)   += coreboot_table-acpi.o
-obj-$(CONFIG_GOOGLE_COREBOOT_TABLE_OF)     += coreboot_table-of.o
 obj-$(CONFIG_GOOGLE_FRAMEBUFFER_COREBOOT)  += framebuffer-coreboot.o
 obj-$(CONFIG_GOOGLE_MEMCONSOLE)            += memconsole.o
 obj-$(CONFIG_GOOGLE_MEMCONSOLE_COREBOOT)   += memconsole-coreboot.o
diff --git a/drivers/firmware/google/coreboot_table-acpi.c b/drivers/firmware/google/coreboot_table-acpi.c
deleted file mode 100644
index 77197fe..0000000
--- a/drivers/firmware/google/coreboot_table-acpi.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * coreboot_table-acpi.c
- *
- * Using ACPI to locate Coreboot table and provide coreboot table access.
- *
- * Copyright 2017 Google Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License v2.0 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/acpi.h>
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-
-#include "coreboot_table.h"
-
-static int coreboot_table_acpi_probe(struct platform_device *pdev)
-{
-	phys_addr_t phyaddr;
-	resource_size_t len;
-	struct coreboot_table_header __iomem *header = NULL;
-	struct resource *res;
-	void __iomem *ptr = NULL;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		return -EINVAL;
-
-	len = resource_size(res);
-	if (!res->start || !len)
-		return -EINVAL;
-
-	phyaddr = res->start;
-	header = ioremap_cache(phyaddr, sizeof(*header));
-	if (header == NULL)
-		return -ENOMEM;
-
-	ptr = ioremap_cache(phyaddr,
-			    header->header_bytes + header->table_bytes);
-	iounmap(header);
-	if (!ptr)
-		return -ENOMEM;
-
-	return coreboot_table_init(&pdev->dev, ptr);
-}
-
-static int coreboot_table_acpi_remove(struct platform_device *pdev)
-{
-	return coreboot_table_exit();
-}
-
-static const struct acpi_device_id cros_coreboot_acpi_match[] = {
-	{ "GOOGCB00", 0 },
-	{ "BOOT0000", 0 },
-	{ }
-};
-MODULE_DEVICE_TABLE(acpi, cros_coreboot_acpi_match);
-
-static struct platform_driver coreboot_table_acpi_driver = {
-	.probe = coreboot_table_acpi_probe,
-	.remove = coreboot_table_acpi_remove,
-	.driver = {
-		.name = "coreboot_table_acpi",
-		.acpi_match_table = ACPI_PTR(cros_coreboot_acpi_match),
-	},
-};
-
-static int __init coreboot_table_acpi_init(void)
-{
-	return platform_driver_register(&coreboot_table_acpi_driver);
-}
-
-module_init(coreboot_table_acpi_init);
-
-MODULE_AUTHOR("Google, Inc.");
-MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/coreboot_table-of.c b/drivers/firmware/google/coreboot_table-of.c
deleted file mode 100644
index f15bf40..0000000
--- a/drivers/firmware/google/coreboot_table-of.c
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * coreboot_table-of.c
- *
- * Coreboot table access through open firmware.
- *
- * Copyright 2017 Google Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License v2.0 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/device.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include <linux/platform_device.h>
-
-#include "coreboot_table.h"
-
-static int coreboot_table_of_probe(struct platform_device *pdev)
-{
-	struct device_node *fw_dn = pdev->dev.of_node;
-	void __iomem *ptr;
-
-	ptr = of_iomap(fw_dn, 0);
-	of_node_put(fw_dn);
-	if (!ptr)
-		return -ENOMEM;
-
-	return coreboot_table_init(&pdev->dev, ptr);
-}
-
-static int coreboot_table_of_remove(struct platform_device *pdev)
-{
-	return coreboot_table_exit();
-}
-
-static const struct of_device_id coreboot_of_match[] = {
-	{ .compatible = "coreboot" },
-	{},
-};
-
-static struct platform_driver coreboot_table_of_driver = {
-	.probe = coreboot_table_of_probe,
-	.remove = coreboot_table_of_remove,
-	.driver = {
-		.name = "coreboot_table_of",
-		.of_match_table = coreboot_of_match,
-	},
-};
-
-static int __init platform_coreboot_table_of_init(void)
-{
-	struct platform_device *pdev;
-	struct device_node *of_node;
-
-	/* Limit device creation to the presence of /firmware/coreboot node */
-	of_node = of_find_node_by_path("/firmware/coreboot");
-	if (!of_node)
-		return -ENODEV;
-
-	if (!of_match_node(coreboot_of_match, of_node))
-		return -ENODEV;
-
-	pdev = of_platform_device_create(of_node, "coreboot_table_of", NULL);
-	if (!pdev)
-		return -ENODEV;
-
-	return platform_driver_register(&coreboot_table_of_driver);
-}
-
-module_init(platform_coreboot_table_of_init);
-
-MODULE_AUTHOR("Google, Inc.");
-MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c
index 898bb9a..8d132e4 100644
--- a/drivers/firmware/google/coreboot_table.c
+++ b/drivers/firmware/google/coreboot_table.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * coreboot_table.c
  *
@@ -5,23 +6,17 @@
  *
  * Copyright 2017 Google Inc.
  * Copyright 2017 Samuel Holland <samuel@sholland.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License v2.0 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  */
 
+#include <linux/acpi.h>
 #include <linux/device.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
 #include <linux/slab.h>
 
 #include "coreboot_table.h"
@@ -29,8 +24,6 @@
 #define CB_DEV(d) container_of(d, struct coreboot_device, dev)
 #define CB_DRV(d) container_of(d, struct coreboot_driver, drv)
 
-static struct coreboot_table_header __iomem *ptr_header;
-
 static int coreboot_bus_match(struct device *dev, struct device_driver *drv)
 {
 	struct coreboot_device *device = CB_DEV(dev);
@@ -70,12 +63,6 @@
 	.remove		= coreboot_bus_remove,
 };
 
-static int __init coreboot_bus_init(void)
-{
-	return bus_register(&coreboot_bus_type);
-}
-module_init(coreboot_bus_init);
-
 static void coreboot_device_release(struct device *dev)
 {
 	struct coreboot_device *device = CB_DEV(dev);
@@ -97,63 +84,117 @@
 }
 EXPORT_SYMBOL(coreboot_driver_unregister);
 
-int coreboot_table_init(struct device *dev, void __iomem *ptr)
+static int coreboot_table_populate(struct device *dev, void *ptr)
 {
 	int i, ret;
 	void *ptr_entry;
 	struct coreboot_device *device;
-	struct coreboot_table_entry entry;
-	struct coreboot_table_header header;
+	struct coreboot_table_entry *entry;
+	struct coreboot_table_header *header = ptr;
 
-	ptr_header = ptr;
-	memcpy_fromio(&header, ptr_header, sizeof(header));
+	ptr_entry = ptr + header->header_bytes;
+	for (i = 0; i < header->table_entries; i++) {
+		entry = ptr_entry;
 
-	if (strncmp(header.signature, "LBIO", sizeof(header.signature))) {
-		pr_warn("coreboot_table: coreboot table missing or corrupt!\n");
-		ret = -ENODEV;
-		goto out;
-	}
-
-	ptr_entry = (void *)ptr_header + header.header_bytes;
-	for (i = 0; i < header.table_entries; i++) {
-		memcpy_fromio(&entry, ptr_entry, sizeof(entry));
-
-		device = kzalloc(sizeof(struct device) + entry.size, GFP_KERNEL);
-		if (!device) {
-			ret = -ENOMEM;
-			break;
-		}
+		device = kzalloc(sizeof(struct device) + entry->size, GFP_KERNEL);
+		if (!device)
+			return -ENOMEM;
 
 		dev_set_name(&device->dev, "coreboot%d", i);
 		device->dev.parent = dev;
 		device->dev.bus = &coreboot_bus_type;
 		device->dev.release = coreboot_device_release;
-		memcpy_fromio(&device->entry, ptr_entry, entry.size);
+		memcpy(&device->entry, ptr_entry, entry->size);
 
 		ret = device_register(&device->dev);
 		if (ret) {
 			put_device(&device->dev);
-			break;
+			return ret;
 		}
 
-		ptr_entry += entry.size;
-	}
-out:
-	iounmap(ptr);
-	return ret;
-}
-EXPORT_SYMBOL(coreboot_table_init);
-
-int coreboot_table_exit(void)
-{
-	if (ptr_header) {
-		bus_unregister(&coreboot_bus_type);
-		ptr_header = NULL;
+		ptr_entry += entry->size;
 	}
 
 	return 0;
 }
-EXPORT_SYMBOL(coreboot_table_exit);
 
+static int coreboot_table_probe(struct platform_device *pdev)
+{
+	resource_size_t len;
+	struct coreboot_table_header *header;
+	struct resource *res;
+	struct device *dev = &pdev->dev;
+	void *ptr;
+	int ret;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -EINVAL;
+
+	len = resource_size(res);
+	if (!res->start || !len)
+		return -EINVAL;
+
+	/* Check just the header first to make sure things are sane */
+	header = memremap(res->start, sizeof(*header), MEMREMAP_WB);
+	if (!header)
+		return -ENOMEM;
+
+	len = header->header_bytes + header->table_bytes;
+	ret = strncmp(header->signature, "LBIO", sizeof(header->signature));
+	memunmap(header);
+	if (ret) {
+		dev_warn(dev, "coreboot table missing or corrupt!\n");
+		return -ENODEV;
+	}
+
+	ptr = memremap(res->start, len, MEMREMAP_WB);
+	if (!ptr)
+		return -ENOMEM;
+
+	ret = bus_register(&coreboot_bus_type);
+	if (!ret) {
+		ret = coreboot_table_populate(dev, ptr);
+		if (ret)
+			bus_unregister(&coreboot_bus_type);
+	}
+	memunmap(ptr);
+
+	return ret;
+}
+
+static int coreboot_table_remove(struct platform_device *pdev)
+{
+	bus_unregister(&coreboot_bus_type);
+	return 0;
+}
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id cros_coreboot_acpi_match[] = {
+	{ "GOOGCB00", 0 },
+	{ "BOOT0000", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(acpi, cros_coreboot_acpi_match);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id coreboot_of_match[] = {
+	{ .compatible = "coreboot" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, coreboot_of_match);
+#endif
+
+static struct platform_driver coreboot_table_driver = {
+	.probe = coreboot_table_probe,
+	.remove = coreboot_table_remove,
+	.driver = {
+		.name = "coreboot_table",
+		.acpi_match_table = ACPI_PTR(cros_coreboot_acpi_match),
+		.of_match_table = of_match_ptr(coreboot_of_match),
+	},
+};
+module_platform_driver(coreboot_table_driver);
 MODULE_AUTHOR("Google, Inc.");
 MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/coreboot_table.h b/drivers/firmware/google/coreboot_table.h
index 8ad95a9..7b7b4a6 100644
--- a/drivers/firmware/google/coreboot_table.h
+++ b/drivers/firmware/google/coreboot_table.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * coreboot_table.h
  *
@@ -6,21 +7,12 @@
  * Copyright 2014 Gerd Hoffmann <kraxel@redhat.com>
  * Copyright 2017 Google Inc.
  * Copyright 2017 Samuel Holland <samuel@sholland.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License v2.0 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  */
 
 #ifndef __COREBOOT_TABLE_H
 #define __COREBOOT_TABLE_H
 
-#include <linux/io.h>
+#include <linux/device.h>
 
 /* Coreboot table header structure */
 struct coreboot_table_header {
@@ -91,10 +83,13 @@
 /* Unregister a driver that uses the data from a coreboot table. */
 void coreboot_driver_unregister(struct coreboot_driver *driver);
 
-/* Initialize coreboot table module given a pointer to iomem */
-int coreboot_table_init(struct device *dev, void __iomem *ptr);
-
-/* Cleanup coreboot table module */
-int coreboot_table_exit(void);
+/* module_coreboot_driver() - Helper macro for drivers that don't do
+ * anything special in module init/exit.  This eliminates a lot of
+ * boilerplate.  Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit()
+ */
+#define module_coreboot_driver(__coreboot_driver) \
+	module_driver(__coreboot_driver, coreboot_driver_register, \
+			coreboot_driver_unregister)
 
 #endif /* __COREBOOT_TABLE_H */
diff --git a/drivers/firmware/google/framebuffer-coreboot.c b/drivers/firmware/google/framebuffer-coreboot.c
index b8b49c0..916f26a 100644
--- a/drivers/firmware/google/framebuffer-coreboot.c
+++ b/drivers/firmware/google/framebuffer-coreboot.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * framebuffer-coreboot.c
  *
@@ -6,15 +7,6 @@
  * Copyright 2012-2013 David Herrmann <dh.herrmann@gmail.com>
  * Copyright 2017 Google Inc.
  * Copyright 2017 Samuel Holland <samuel@sholland.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License v2.0 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  */
 
 #include <linux/device.h>
@@ -97,19 +89,7 @@
 	},
 	.tag = CB_TAG_FRAMEBUFFER,
 };
-
-static int __init coreboot_framebuffer_init(void)
-{
-	return coreboot_driver_register(&framebuffer_driver);
-}
-
-static void coreboot_framebuffer_exit(void)
-{
-	coreboot_driver_unregister(&framebuffer_driver);
-}
-
-module_init(coreboot_framebuffer_init);
-module_exit(coreboot_framebuffer_exit);
+module_coreboot_driver(framebuffer_driver);
 
 MODULE_AUTHOR("Samuel Holland <samuel@sholland.org>");
 MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index c8f169b..edaa4e5 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright 2010 Google Inc. All Rights Reserved.
  * Author: dlaurie@google.com (Duncan Laurie)
@@ -29,6 +30,7 @@
 #include <linux/efi.h>
 #include <linux/module.h>
 #include <linux/ucs2_string.h>
+#include <linux/suspend.h>
 
 #define GSMI_SHUTDOWN_CLEAN	0	/* Clean Shutdown */
 /* TODO(mikew@google.com): Tie in HARDLOCKUP_DETECTOR with NMIWDT */
@@ -70,6 +72,8 @@
 #define GSMI_CMD_SET_NVRAM_VAR		0x03
 #define GSMI_CMD_SET_EVENT_LOG		0x08
 #define GSMI_CMD_CLEAR_EVENT_LOG	0x09
+#define GSMI_CMD_LOG_S0IX_SUSPEND	0x0a
+#define GSMI_CMD_LOG_S0IX_RESUME	0x0b
 #define GSMI_CMD_CLEAR_CONFIG		0x20
 #define GSMI_CMD_HANDSHAKE_TYPE		0xC1
 
@@ -84,7 +88,7 @@
 	u32 address;			/* physical address of buffer */
 };
 
-struct gsmi_device {
+static struct gsmi_device {
 	struct platform_device *pdev;	/* platform device */
 	struct gsmi_buf *name_buf;	/* variable name buffer */
 	struct gsmi_buf *data_buf;	/* generic data buffer */
@@ -122,7 +126,6 @@
 	u32	instance;
 } __packed;
 
-
 /*
  * Some platforms don't have explicit SMI handshake
  * and need to wait for SMI to complete.
@@ -133,6 +136,15 @@
 MODULE_PARM_DESC(spincount,
 	"The number of loop iterations to use when using the spin handshake.");
 
+/*
+ * Platforms might not support S0ix logging in their GSMI handlers. In order to
+ * avoid any side-effects of generating an SMI for S0ix logging, use the S0ix
+ * related GSMI commands only for those platforms that explicitly enable this
+ * option.
+ */
+static bool s0ix_logging_enable;
+module_param(s0ix_logging_enable, bool, 0600);
+
 static struct gsmi_buf *gsmi_buf_alloc(void)
 {
 	struct gsmi_buf *smibuf;
@@ -289,6 +301,10 @@
 	return rc;
 }
 
+#ifdef CONFIG_EFI_VARS
+
+static struct efivars efivars;
+
 static efi_status_t gsmi_get_variable(efi_char16_t *name,
 				      efi_guid_t *vendor, u32 *attr,
 				      unsigned long *data_size,
@@ -466,6 +482,8 @@
 	.get_next_variable = gsmi_get_next_variable,
 };
 
+#endif /* CONFIG_EFI_VARS */
+
 static ssize_t eventlog_write(struct file *filp, struct kobject *kobj,
 			       struct bin_attribute *bin_attr,
 			       char *buf, loff_t pos, size_t count)
@@ -480,11 +498,10 @@
 	if (count < sizeof(u32))
 		return -EINVAL;
 	param.type = *(u32 *)buf;
-	count -= sizeof(u32);
 	buf += sizeof(u32);
 
 	/* The remaining buffer is the data payload */
-	if (count > gsmi_dev.data_buf->length)
+	if ((count - sizeof(u32)) > gsmi_dev.data_buf->length)
 		return -EINVAL;
 	param.data_len = count - sizeof(u32);
 
@@ -504,7 +521,7 @@
 
 	spin_unlock_irqrestore(&gsmi_dev.lock, flags);
 
-	return rc;
+	return (rc == 0) ? count : rc;
 
 }
 
@@ -716,6 +733,12 @@
 			DMI_MATCH(DMI_BOARD_VENDOR, "Google, Inc."),
 		},
 	},
+	{
+		.ident = "Coreboot Firmware",
+		.matches = {
+			DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
+		},
+	},
 	{}
 };
 MODULE_DEVICE_TABLE(dmi, gsmi_dmi_table);
@@ -762,7 +785,6 @@
 }
 
 static struct kobject *gsmi_kobj;
-static struct efivars efivars;
 
 static const struct platform_device_info gsmi_dev_info = {
 	.name		= "gsmi",
@@ -771,6 +793,78 @@
 	.dma_mask	= DMA_BIT_MASK(32),
 };
 
+#ifdef CONFIG_PM
+static void gsmi_log_s0ix_info(u8 cmd)
+{
+	unsigned long flags;
+
+	/*
+	 * If platform has not enabled S0ix logging, then no action is
+	 * necessary.
+	 */
+	if (!s0ix_logging_enable)
+		return;
+
+	spin_lock_irqsave(&gsmi_dev.lock, flags);
+
+	memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
+
+	gsmi_exec(GSMI_CALLBACK, cmd);
+
+	spin_unlock_irqrestore(&gsmi_dev.lock, flags);
+}
+
+static int gsmi_log_s0ix_suspend(struct device *dev)
+{
+	/*
+	 * If system is not suspending via firmware using the standard ACPI Sx
+	 * types, then make a GSMI call to log the suspend info.
+	 */
+	if (!pm_suspend_via_firmware())
+		gsmi_log_s0ix_info(GSMI_CMD_LOG_S0IX_SUSPEND);
+
+	/*
+	 * Always return success, since we do not want suspend
+	 * to fail just because of logging failure.
+	 */
+	return 0;
+}
+
+static int gsmi_log_s0ix_resume(struct device *dev)
+{
+	/*
+	 * If system did not resume via firmware, then make a GSMI call to log
+	 * the resume info and wake source.
+	 */
+	if (!pm_resume_via_firmware())
+		gsmi_log_s0ix_info(GSMI_CMD_LOG_S0IX_RESUME);
+
+	/*
+	 * Always return success, since we do not want resume
+	 * to fail just because of logging failure.
+	 */
+	return 0;
+}
+
+static const struct dev_pm_ops gsmi_pm_ops = {
+	.suspend_noirq = gsmi_log_s0ix_suspend,
+	.resume_noirq = gsmi_log_s0ix_resume,
+};
+
+static int gsmi_platform_driver_probe(struct platform_device *dev)
+{
+	return 0;
+}
+
+static struct platform_driver gsmi_driver_info = {
+	.driver = {
+		.name = "gsmi",
+		.pm = &gsmi_pm_ops,
+	},
+	.probe = gsmi_platform_driver_probe,
+};
+#endif
+
 static __init int gsmi_init(void)
 {
 	unsigned long flags;
@@ -782,6 +876,14 @@
 
 	gsmi_dev.smi_cmd = acpi_gbl_FADT.smi_command;
 
+#ifdef CONFIG_PM
+	ret = platform_driver_register(&gsmi_driver_info);
+	if (unlikely(ret)) {
+		printk(KERN_ERR "gsmi: unable to register platform driver\n");
+		return ret;
+	}
+#endif
+
 	/* register device */
 	gsmi_dev.pdev = platform_device_register_full(&gsmi_dev_info);
 	if (IS_ERR(gsmi_dev.pdev)) {
@@ -886,11 +988,14 @@
 		goto out_remove_bin_file;
 	}
 
+#ifdef CONFIG_EFI_VARS
 	ret = efivars_register(&efivars, &efivar_ops, gsmi_kobj);
 	if (ret) {
 		printk(KERN_INFO "gsmi: Failed to register efivars\n");
-		goto out_remove_sysfs_files;
+		sysfs_remove_files(gsmi_kobj, gsmi_attrs);
+		goto out_remove_bin_file;
 	}
+#endif
 
 	register_reboot_notifier(&gsmi_reboot_notifier);
 	register_die_notifier(&gsmi_die_notifier);
@@ -901,8 +1006,6 @@
 
 	return 0;
 
-out_remove_sysfs_files:
-	sysfs_remove_files(gsmi_kobj, gsmi_attrs);
 out_remove_bin_file:
 	sysfs_remove_bin_file(gsmi_kobj, &eventlog_bin_attr);
 out_err:
@@ -922,7 +1025,9 @@
 	unregister_die_notifier(&gsmi_die_notifier);
 	atomic_notifier_chain_unregister(&panic_notifier_list,
 					 &gsmi_panic_notifier);
+#ifdef CONFIG_EFI_VARS
 	efivars_unregister(&efivars);
+#endif
 
 	sysfs_remove_files(gsmi_kobj, gsmi_attrs);
 	sysfs_remove_bin_file(gsmi_kobj, &eventlog_bin_attr);
diff --git a/drivers/firmware/google/memconsole-coreboot.c b/drivers/firmware/google/memconsole-coreboot.c
index b29e107..fd7f0fb 100644
--- a/drivers/firmware/google/memconsole-coreboot.c
+++ b/drivers/firmware/google/memconsole-coreboot.c
@@ -1,21 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * memconsole-coreboot.c
  *
  * Memory based BIOS console accessed through coreboot table.
  *
  * Copyright 2017 Google Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License v2.0 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  */
 
 #include <linux/device.h>
+#include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 
@@ -34,7 +27,7 @@
 #define CURSOR_MASK ((1 << 28) - 1)
 #define OVERFLOW (1 << 31)
 
-static struct cbmem_cons __iomem *cbmem_console;
+static struct cbmem_cons *cbmem_console;
 static u32 cbmem_console_size;
 
 /*
@@ -75,7 +68,7 @@
 
 static int memconsole_probe(struct coreboot_device *dev)
 {
-	struct cbmem_cons __iomem *tmp_cbmc;
+	struct cbmem_cons *tmp_cbmc;
 
 	tmp_cbmc = memremap(dev->cbmem_ref.cbmem_addr,
 			    sizeof(*tmp_cbmc), MEMREMAP_WB);
@@ -85,13 +78,13 @@
 
 	/* Read size only once to prevent overrun attack through /dev/mem. */
 	cbmem_console_size = tmp_cbmc->size_dont_access_after_boot;
-	cbmem_console = memremap(dev->cbmem_ref.cbmem_addr,
+	cbmem_console = devm_memremap(&dev->dev, dev->cbmem_ref.cbmem_addr,
 				 cbmem_console_size + sizeof(*cbmem_console),
 				 MEMREMAP_WB);
 	memunmap(tmp_cbmc);
 
-	if (!cbmem_console)
-		return -ENOMEM;
+	if (IS_ERR(cbmem_console))
+		return PTR_ERR(cbmem_console);
 
 	memconsole_setup(memconsole_coreboot_read);
 
@@ -102,9 +95,6 @@
 {
 	memconsole_exit();
 
-	if (cbmem_console)
-		memunmap(cbmem_console);
-
 	return 0;
 }
 
@@ -116,19 +106,7 @@
 	},
 	.tag = CB_TAG_CBMEM_CONSOLE,
 };
-
-static void coreboot_memconsole_exit(void)
-{
-	coreboot_driver_unregister(&memconsole_driver);
-}
-
-static int __init coreboot_memconsole_init(void)
-{
-	return coreboot_driver_register(&memconsole_driver);
-}
-
-module_exit(coreboot_memconsole_exit);
-module_init(coreboot_memconsole_init);
+module_coreboot_driver(memconsole_driver);
 
 MODULE_AUTHOR("Google, Inc.");
 MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/memconsole-x86-legacy.c b/drivers/firmware/google/memconsole-x86-legacy.c
index 19bcbd1..3d3c4f6 100644
--- a/drivers/firmware/google/memconsole-x86-legacy.c
+++ b/drivers/firmware/google/memconsole-x86-legacy.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * memconsole-x86-legacy.c
  *
  * EBDA specific parts of the memory based BIOS console.
  *
  * Copyright 2017 Google Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License v2.0 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  */
 
 #include <linux/kernel.h>
diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
index 166f07c..44d314a 100644
--- a/drivers/firmware/google/memconsole.c
+++ b/drivers/firmware/google/memconsole.c
@@ -1,35 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * memconsole.c
  *
  * Architecture-independent parts of the memory based BIOS console.
  *
  * Copyright 2017 Google Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License v2.0 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  */
 
-#include <linux/init.h>
 #include <linux/sysfs.h>
 #include <linux/kobject.h>
 #include <linux/module.h>
 
 #include "memconsole.h"
 
-static ssize_t (*memconsole_read_func)(char *, loff_t, size_t);
-
 static ssize_t memconsole_read(struct file *filp, struct kobject *kobp,
 			       struct bin_attribute *bin_attr, char *buf,
 			       loff_t pos, size_t count)
 {
+	ssize_t (*memconsole_read_func)(char *, loff_t, size_t);
+
+	memconsole_read_func = bin_attr->private;
 	if (WARN_ON_ONCE(!memconsole_read_func))
 		return -EIO;
+
 	return memconsole_read_func(buf, pos, count);
 }
 
@@ -40,7 +33,7 @@
 
 void memconsole_setup(ssize_t (*read_func)(char *, loff_t, size_t))
 {
-	memconsole_read_func = read_func;
+	memconsole_bin_attr.private = read_func;
 }
 EXPORT_SYMBOL(memconsole_setup);
 
diff --git a/drivers/firmware/google/memconsole.h b/drivers/firmware/google/memconsole.h
index ff1592d..aaff2b7 100644
--- a/drivers/firmware/google/memconsole.h
+++ b/drivers/firmware/google/memconsole.h
@@ -1,18 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * memconsole.h
  *
  * Internal headers of the memory based BIOS console.
  *
  * Copyright 2017 Google Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License v2.0 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  */
 
 #ifndef __FIRMWARE_GOOGLE_MEMCONSOLE_H
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index 1aa67bb..db08122 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * vpd.c
  *
  * Driver for exporting VPD content to sysfs.
  *
  * Copyright 2017 Google Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License v2.0 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  */
 
 #include <linux/ctype.h>
@@ -100,8 +92,8 @@
 	return VPD_OK;
 }
 
-static int vpd_section_attrib_add(const u8 *key, s32 key_len,
-				  const u8 *value, s32 value_len,
+static int vpd_section_attrib_add(const u8 *key, u32 key_len,
+				  const u8 *value, u32 value_len,
 				  void *arg)
 {
 	int ret;
@@ -198,7 +190,7 @@
 
 	sec->name = name;
 
-	/* We want to export the raw partion with name ${name}_raw */
+	/* We want to export the raw partition with name ${name}_raw */
 	sec->raw_name = kasprintf(GFP_KERNEL, "%s_raw", name);
 	if (!sec->raw_name) {
 		err = -ENOMEM;
@@ -254,7 +246,7 @@
 
 static int vpd_sections_init(phys_addr_t physaddr)
 {
-	struct vpd_cbmem __iomem *temp;
+	struct vpd_cbmem *temp;
 	struct vpd_cbmem header;
 	int ret = 0;
 
@@ -262,7 +254,7 @@
 	if (!temp)
 		return -ENOMEM;
 
-	memcpy_fromio(&header, temp, sizeof(struct vpd_cbmem));
+	memcpy(&header, temp, sizeof(struct vpd_cbmem));
 	memunmap(temp);
 
 	if (header.magic != VPD_CBMEM_MAGIC)
@@ -324,19 +316,7 @@
 	},
 	.tag = CB_TAG_VPD,
 };
-
-static int __init coreboot_vpd_init(void)
-{
-	return coreboot_driver_register(&vpd_driver);
-}
-
-static void __exit coreboot_vpd_exit(void)
-{
-	coreboot_driver_unregister(&vpd_driver);
-}
-
-module_init(coreboot_vpd_init);
-module_exit(coreboot_vpd_exit);
+module_coreboot_driver(vpd_driver);
 
 MODULE_AUTHOR("Google, Inc.");
 MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/vpd_decode.c b/drivers/firmware/google/vpd_decode.c
index 943acaa..5c6f2a7 100644
--- a/drivers/firmware/google/vpd_decode.c
+++ b/drivers/firmware/google/vpd_decode.c
@@ -1,26 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * vpd_decode.c
  *
  * Google VPD decoding routines.
  *
  * Copyright 2017 Google Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License v2.0 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  */
 
-#include <linux/export.h>
-
 #include "vpd_decode.h"
 
-static int vpd_decode_len(const s32 max_len, const u8 *in,
-			  s32 *length, s32 *decoded_len)
+static int vpd_decode_len(const u32 max_len, const u8 *in,
+			  u32 *length, u32 *decoded_len)
 {
 	u8 more;
 	int i = 0;
@@ -40,18 +30,39 @@
 	} while (more);
 
 	*decoded_len = i;
-
 	return VPD_OK;
 }
 
-int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed,
+static int vpd_decode_entry(const u32 max_len, const u8 *input_buf,
+			    u32 *_consumed, const u8 **entry, u32 *entry_len)
+{
+	u32 decoded_len;
+	u32 consumed = *_consumed;
+
+	if (vpd_decode_len(max_len - consumed, &input_buf[consumed],
+			   entry_len, &decoded_len) != VPD_OK)
+		return VPD_FAIL;
+	if (max_len - consumed < decoded_len)
+		return VPD_FAIL;
+
+	consumed += decoded_len;
+	*entry = input_buf + consumed;
+
+	/* entry_len is untrusted data and must be checked again. */
+	if (max_len - consumed < *entry_len)
+		return VPD_FAIL;
+
+	consumed += *entry_len;
+	*_consumed = consumed;
+	return VPD_OK;
+}
+
+int vpd_decode_string(const u32 max_len, const u8 *input_buf, u32 *consumed,
 		      vpd_decode_callback callback, void *callback_arg)
 {
 	int type;
-	int res;
-	s32 key_len;
-	s32 value_len;
-	s32 decoded_len;
+	u32 key_len;
+	u32 value_len;
 	const u8 *key;
 	const u8 *value;
 
@@ -66,26 +77,14 @@
 	case VPD_TYPE_STRING:
 		(*consumed)++;
 
-		/* key */
-		res = vpd_decode_len(max_len - *consumed, &input_buf[*consumed],
-				     &key_len, &decoded_len);
-		if (res != VPD_OK || *consumed + decoded_len >= max_len)
+		if (vpd_decode_entry(max_len, input_buf, consumed, &key,
+				     &key_len) != VPD_OK)
 			return VPD_FAIL;
 
-		*consumed += decoded_len;
-		key = &input_buf[*consumed];
-		*consumed += key_len;
-
-		/* value */
-		res = vpd_decode_len(max_len - *consumed, &input_buf[*consumed],
-				     &value_len, &decoded_len);
-		if (res != VPD_OK || *consumed + decoded_len > max_len)
+		if (vpd_decode_entry(max_len, input_buf, consumed, &value,
+				     &value_len) != VPD_OK)
 			return VPD_FAIL;
 
-		*consumed += decoded_len;
-		value = &input_buf[*consumed];
-		*consumed += value_len;
-
 		if (type == VPD_TYPE_STRING)
 			return callback(key, key_len, value, value_len,
 					callback_arg);
diff --git a/drivers/firmware/google/vpd_decode.h b/drivers/firmware/google/vpd_decode.h
index be3d62c..8dbe41c 100644
--- a/drivers/firmware/google/vpd_decode.h
+++ b/drivers/firmware/google/vpd_decode.h
@@ -1,18 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * vpd_decode.h
  *
  * Google VPD decoding routines.
  *
  * Copyright 2017 Google Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License v2.0 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  */
 
 #ifndef __VPD_DECODE_H
@@ -33,8 +25,8 @@
 };
 
 /* Callback for vpd_decode_string to invoke. */
-typedef int vpd_decode_callback(const u8 *key, s32 key_len,
-				const u8 *value, s32 value_len,
+typedef int vpd_decode_callback(const u8 *key, u32 key_len,
+				const u8 *value, u32 value_len,
 				void *arg);
 
 /*
@@ -52,7 +44,7 @@
  * If one entry is successfully decoded, sends it to callback and returns the
  * result.
  */
-int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed,
+int vpd_decode_string(const u32 max_len, const u8 *input_buf, u32 *consumed,
 		      vpd_decode_callback callback, void *callback_arg);
 
 #endif  /* __VPD_DECODE_H */
diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig
new file mode 100644
index 0000000..0dbee32
--- /dev/null
+++ b/drivers/firmware/imx/Kconfig
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config IMX_DSP
+	bool "IMX DSP Protocol driver"
+	depends on IMX_MBOX
+	help
+	  This enables DSP IPC protocol between host AP (Linux)
+	  and the firmware running on DSP.
+	  DSP exists on some i.MX8 processors (e.g i.MX8QM, i.MX8QXP).
+
+	  It acts like a doorbell. Client might use shared memory to
+	  exchange information with DSP side.
+
+config IMX_SCU
+	bool "IMX SCU Protocol driver"
+	depends on IMX_MBOX
+	help
+	  The System Controller Firmware (SCFW) is a low-level system function
+	  which runs on a dedicated Cortex-M core to provide power, clock, and
+	  resource management. It exists on some i.MX8 processors. e.g. i.MX8QM
+	  (QM, QP), and i.MX8QX (QXP, DX).
+
+	  This driver manages the IPC interface between host CPU and the
+	  SCU firmware running on M4.
+
+config IMX_SCU_PD
+	bool "IMX SCU Power Domain driver"
+	depends on IMX_SCU
+	help
+	  The System Controller Firmware (SCFW) based power domain driver.
diff --git a/drivers/firmware/imx/Makefile b/drivers/firmware/imx/Makefile
new file mode 100644
index 0000000..08bc9dd
--- /dev/null
+++ b/drivers/firmware/imx/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_IMX_DSP)		+= imx-dsp.o
+obj-$(CONFIG_IMX_SCU)		+= imx-scu.o misc.o imx-scu-irq.o
+obj-$(CONFIG_IMX_SCU_PD)	+= scu-pd.o
diff --git a/drivers/firmware/imx/imx-dsp.c b/drivers/firmware/imx/imx-dsp.c
new file mode 100644
index 0000000..a43d2db
--- /dev/null
+++ b/drivers/firmware/imx/imx-dsp.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2019 NXP
+ *  Author: Daniel Baluta <daniel.baluta@nxp.com>
+ *
+ * Implementation of the DSP IPC interface (host side)
+ */
+
+#include <linux/firmware/imx/dsp.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/*
+ * imx_dsp_ring_doorbell - triggers an interrupt on the other side (DSP)
+ *
+ * @dsp: DSP IPC handle
+ * @chan_idx: index of the channel where to trigger the interrupt
+ *
+ * Returns non-negative value for success, negative value for error
+ */
+int imx_dsp_ring_doorbell(struct imx_dsp_ipc *ipc, unsigned int idx)
+{
+	int ret;
+	struct imx_dsp_chan *dsp_chan;
+
+	if (idx >= DSP_MU_CHAN_NUM)
+		return -EINVAL;
+
+	dsp_chan = &ipc->chans[idx];
+	ret = mbox_send_message(dsp_chan->ch, NULL);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+EXPORT_SYMBOL(imx_dsp_ring_doorbell);
+
+/*
+ * imx_dsp_handle_rx - rx callback used by imx mailbox
+ *
+ * @c: mbox client
+ * @msg: message received
+ *
+ * Users of DSP IPC will need to privde handle_reply and handle_request
+ * callbacks.
+ */
+static void imx_dsp_handle_rx(struct mbox_client *c, void *msg)
+{
+	struct imx_dsp_chan *chan = container_of(c, struct imx_dsp_chan, cl);
+
+	if (chan->idx == 0) {
+		chan->ipc->ops->handle_reply(chan->ipc);
+	} else {
+		chan->ipc->ops->handle_request(chan->ipc);
+		imx_dsp_ring_doorbell(chan->ipc, 1);
+	}
+}
+
+static int imx_dsp_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct imx_dsp_ipc *dsp_ipc;
+	struct imx_dsp_chan *dsp_chan;
+	struct mbox_client *cl;
+	char *chan_name;
+	int ret;
+	int i, j;
+
+	device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
+
+	dsp_ipc = devm_kzalloc(dev, sizeof(*dsp_ipc), GFP_KERNEL);
+	if (!dsp_ipc)
+		return -ENOMEM;
+
+	for (i = 0; i < DSP_MU_CHAN_NUM; i++) {
+		if (i < 2)
+			chan_name = kasprintf(GFP_KERNEL, "txdb%d", i);
+		else
+			chan_name = kasprintf(GFP_KERNEL, "rxdb%d", i - 2);
+
+		if (!chan_name)
+			return -ENOMEM;
+
+		dsp_chan = &dsp_ipc->chans[i];
+		cl = &dsp_chan->cl;
+		cl->dev = dev;
+		cl->tx_block = false;
+		cl->knows_txdone = true;
+		cl->rx_callback = imx_dsp_handle_rx;
+
+		dsp_chan->ipc = dsp_ipc;
+		dsp_chan->idx = i % 2;
+		dsp_chan->ch = mbox_request_channel_byname(cl, chan_name);
+		if (IS_ERR(dsp_chan->ch)) {
+			ret = PTR_ERR(dsp_chan->ch);
+			if (ret != -EPROBE_DEFER)
+				dev_err(dev, "Failed to request mbox chan %s ret %d\n",
+					chan_name, ret);
+			goto out;
+		}
+
+		dev_dbg(dev, "request mbox chan %s\n", chan_name);
+		/* chan_name is not used anymore by framework */
+		kfree(chan_name);
+	}
+
+	dsp_ipc->dev = dev;
+
+	dev_set_drvdata(dev, dsp_ipc);
+
+	dev_info(dev, "NXP i.MX DSP IPC initialized\n");
+
+	return devm_of_platform_populate(dev);
+out:
+	kfree(chan_name);
+	for (j = 0; j < i; j++) {
+		dsp_chan = &dsp_ipc->chans[j];
+		mbox_free_channel(dsp_chan->ch);
+	}
+
+	return ret;
+}
+
+static int imx_dsp_remove(struct platform_device *pdev)
+{
+	struct imx_dsp_chan *dsp_chan;
+	struct imx_dsp_ipc *dsp_ipc;
+	int i;
+
+	dsp_ipc = dev_get_drvdata(&pdev->dev);
+
+	for (i = 0; i < DSP_MU_CHAN_NUM; i++) {
+		dsp_chan = &dsp_ipc->chans[i];
+		mbox_free_channel(dsp_chan->ch);
+	}
+
+	return 0;
+}
+
+static struct platform_driver imx_dsp_driver = {
+	.driver = {
+		.name = "imx-dsp",
+	},
+	.probe = imx_dsp_probe,
+	.remove = imx_dsp_remove,
+};
+builtin_platform_driver(imx_dsp_driver);
+
+MODULE_AUTHOR("Daniel Baluta <daniel.baluta@nxp.com>");
+MODULE_DESCRIPTION("IMX DSP IPC protocol driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/imx/imx-scu-irq.c b/drivers/firmware/imx/imx-scu-irq.c
new file mode 100644
index 0000000..687121f
--- /dev/null
+++ b/drivers/firmware/imx/imx-scu-irq.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2019 NXP
+ *
+ * Implementation of the SCU IRQ functions using MU.
+ *
+ */
+
+#include <dt-bindings/firmware/imx/rsrc.h>
+#include <linux/firmware/imx/ipc.h>
+#include <linux/mailbox_client.h>
+
+#define IMX_SC_IRQ_FUNC_ENABLE	1
+#define IMX_SC_IRQ_FUNC_STATUS	2
+#define IMX_SC_IRQ_NUM_GROUP	4
+
+static u32 mu_resource_id;
+
+struct imx_sc_msg_irq_get_status {
+	struct imx_sc_rpc_msg hdr;
+	union {
+		struct {
+			u16 resource;
+			u8 group;
+			u8 reserved;
+		} __packed req;
+		struct {
+			u32 status;
+		} resp;
+	} data;
+};
+
+struct imx_sc_msg_irq_enable {
+	struct imx_sc_rpc_msg hdr;
+	u32 mask;
+	u16 resource;
+	u8 group;
+	u8 enable;
+} __packed;
+
+static struct imx_sc_ipc *imx_sc_irq_ipc_handle;
+static struct work_struct imx_sc_irq_work;
+static ATOMIC_NOTIFIER_HEAD(imx_scu_irq_notifier_chain);
+
+int imx_scu_irq_register_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_register(
+		&imx_scu_irq_notifier_chain, nb);
+}
+EXPORT_SYMBOL(imx_scu_irq_register_notifier);
+
+int imx_scu_irq_unregister_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_unregister(
+		&imx_scu_irq_notifier_chain, nb);
+}
+EXPORT_SYMBOL(imx_scu_irq_unregister_notifier);
+
+static int imx_scu_irq_notifier_call_chain(unsigned long status, u8 *group)
+{
+	return atomic_notifier_call_chain(&imx_scu_irq_notifier_chain,
+		status, (void *)group);
+}
+
+static void imx_scu_irq_work_handler(struct work_struct *work)
+{
+	struct imx_sc_msg_irq_get_status msg;
+	struct imx_sc_rpc_msg *hdr = &msg.hdr;
+	u32 irq_status;
+	int ret;
+	u8 i;
+
+	for (i = 0; i < IMX_SC_IRQ_NUM_GROUP; i++) {
+		hdr->ver = IMX_SC_RPC_VERSION;
+		hdr->svc = IMX_SC_RPC_SVC_IRQ;
+		hdr->func = IMX_SC_IRQ_FUNC_STATUS;
+		hdr->size = 2;
+
+		msg.data.req.resource = mu_resource_id;
+		msg.data.req.group = i;
+
+		ret = imx_scu_call_rpc(imx_sc_irq_ipc_handle, &msg, true);
+		if (ret) {
+			pr_err("get irq group %d status failed, ret %d\n",
+			       i, ret);
+			return;
+		}
+
+		irq_status = msg.data.resp.status;
+		if (!irq_status)
+			continue;
+
+		imx_scu_irq_notifier_call_chain(irq_status, &i);
+	}
+}
+
+int imx_scu_irq_group_enable(u8 group, u32 mask, u8 enable)
+{
+	struct imx_sc_msg_irq_enable msg;
+	struct imx_sc_rpc_msg *hdr = &msg.hdr;
+	int ret;
+
+	if (!imx_sc_irq_ipc_handle)
+		return -EPROBE_DEFER;
+
+	hdr->ver = IMX_SC_RPC_VERSION;
+	hdr->svc = IMX_SC_RPC_SVC_IRQ;
+	hdr->func = IMX_SC_IRQ_FUNC_ENABLE;
+	hdr->size = 3;
+
+	msg.resource = mu_resource_id;
+	msg.group = group;
+	msg.mask = mask;
+	msg.enable = enable;
+
+	ret = imx_scu_call_rpc(imx_sc_irq_ipc_handle, &msg, true);
+	if (ret)
+		pr_err("enable irq failed, group %d, mask %d, ret %d\n",
+			group, mask, ret);
+
+	return ret;
+}
+EXPORT_SYMBOL(imx_scu_irq_group_enable);
+
+static void imx_scu_irq_callback(struct mbox_client *c, void *msg)
+{
+	schedule_work(&imx_sc_irq_work);
+}
+
+int imx_scu_enable_general_irq_channel(struct device *dev)
+{
+	struct of_phandle_args spec;
+	struct mbox_client *cl;
+	struct mbox_chan *ch;
+	int ret = 0, i = 0;
+
+	ret = imx_scu_get_handle(&imx_sc_irq_ipc_handle);
+	if (ret)
+		return ret;
+
+	cl = devm_kzalloc(dev, sizeof(*cl), GFP_KERNEL);
+	if (!cl)
+		return -ENOMEM;
+
+	cl->dev = dev;
+	cl->rx_callback = imx_scu_irq_callback;
+
+	/* SCU general IRQ uses general interrupt channel 3 */
+	ch = mbox_request_channel_byname(cl, "gip3");
+	if (IS_ERR(ch)) {
+		ret = PTR_ERR(ch);
+		dev_err(dev, "failed to request mbox chan gip3, ret %d\n", ret);
+		devm_kfree(dev, cl);
+		return ret;
+	}
+
+	INIT_WORK(&imx_sc_irq_work, imx_scu_irq_work_handler);
+
+	if (!of_parse_phandle_with_args(dev->of_node, "mboxes",
+				       "#mbox-cells", 0, &spec))
+		i = of_alias_get_id(spec.np, "mu");
+
+	/* use mu1 as general mu irq channel if failed */
+	if (i < 0)
+		i = 1;
+
+	mu_resource_id = IMX_SC_R_MU_0A + i;
+
+	return ret;
+}
+EXPORT_SYMBOL(imx_scu_enable_general_irq_channel);
diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c
new file mode 100644
index 0000000..04a24a8
--- /dev/null
+++ b/drivers/firmware/imx/imx-scu.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2018 NXP
+ *  Author: Dong Aisheng <aisheng.dong@nxp.com>
+ *
+ * Implementation of the SCU IPC functions using MUs (client side).
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/firmware/imx/types.h>
+#include <linux/firmware/imx/ipc.h>
+#include <linux/firmware/imx/sci.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#define SCU_MU_CHAN_NUM		8
+#define MAX_RX_TIMEOUT		(msecs_to_jiffies(30))
+
+struct imx_sc_chan {
+	struct imx_sc_ipc *sc_ipc;
+
+	struct mbox_client cl;
+	struct mbox_chan *ch;
+	int idx;
+};
+
+struct imx_sc_ipc {
+	/* SCU uses 4 Tx and 4 Rx channels */
+	struct imx_sc_chan chans[SCU_MU_CHAN_NUM];
+	struct device *dev;
+	struct mutex lock;
+	struct completion done;
+
+	/* temporarily store the SCU msg */
+	u32 *msg;
+	u8 rx_size;
+	u8 count;
+};
+
+/*
+ * This type is used to indicate error response for most functions.
+ */
+enum imx_sc_error_codes {
+	IMX_SC_ERR_NONE = 0,	/* Success */
+	IMX_SC_ERR_VERSION = 1,	/* Incompatible API version */
+	IMX_SC_ERR_CONFIG = 2,	/* Configuration error */
+	IMX_SC_ERR_PARM = 3,	/* Bad parameter */
+	IMX_SC_ERR_NOACCESS = 4,	/* Permission error (no access) */
+	IMX_SC_ERR_LOCKED = 5,	/* Permission error (locked) */
+	IMX_SC_ERR_UNAVAILABLE = 6,	/* Unavailable (out of resources) */
+	IMX_SC_ERR_NOTFOUND = 7,	/* Not found */
+	IMX_SC_ERR_NOPOWER = 8,	/* No power */
+	IMX_SC_ERR_IPC = 9,		/* Generic IPC error */
+	IMX_SC_ERR_BUSY = 10,	/* Resource is currently busy/active */
+	IMX_SC_ERR_FAIL = 11,	/* General I/O failure */
+	IMX_SC_ERR_LAST
+};
+
+static int imx_sc_linux_errmap[IMX_SC_ERR_LAST] = {
+	0,	 /* IMX_SC_ERR_NONE */
+	-EINVAL, /* IMX_SC_ERR_VERSION */
+	-EINVAL, /* IMX_SC_ERR_CONFIG */
+	-EINVAL, /* IMX_SC_ERR_PARM */
+	-EACCES, /* IMX_SC_ERR_NOACCESS */
+	-EACCES, /* IMX_SC_ERR_LOCKED */
+	-ERANGE, /* IMX_SC_ERR_UNAVAILABLE */
+	-EEXIST, /* IMX_SC_ERR_NOTFOUND */
+	-EPERM,	 /* IMX_SC_ERR_NOPOWER */
+	-EPIPE,	 /* IMX_SC_ERR_IPC */
+	-EBUSY,	 /* IMX_SC_ERR_BUSY */
+	-EIO,	 /* IMX_SC_ERR_FAIL */
+};
+
+static struct imx_sc_ipc *imx_sc_ipc_handle;
+
+static inline int imx_sc_to_linux_errno(int errno)
+{
+	if (errno >= IMX_SC_ERR_NONE && errno < IMX_SC_ERR_LAST)
+		return imx_sc_linux_errmap[errno];
+	return -EIO;
+}
+
+/*
+ * Get the default handle used by SCU
+ */
+int imx_scu_get_handle(struct imx_sc_ipc **ipc)
+{
+	if (!imx_sc_ipc_handle)
+		return -EPROBE_DEFER;
+
+	*ipc = imx_sc_ipc_handle;
+	return 0;
+}
+EXPORT_SYMBOL(imx_scu_get_handle);
+
+static void imx_scu_rx_callback(struct mbox_client *c, void *msg)
+{
+	struct imx_sc_chan *sc_chan = container_of(c, struct imx_sc_chan, cl);
+	struct imx_sc_ipc *sc_ipc = sc_chan->sc_ipc;
+	struct imx_sc_rpc_msg *hdr;
+	u32 *data = msg;
+
+	if (sc_chan->idx == 0) {
+		hdr = msg;
+		sc_ipc->rx_size = hdr->size;
+		dev_dbg(sc_ipc->dev, "msg rx size %u\n", sc_ipc->rx_size);
+		if (sc_ipc->rx_size > 4)
+			dev_warn(sc_ipc->dev, "RPC does not support receiving over 4 words: %u\n",
+				 sc_ipc->rx_size);
+	}
+
+	sc_ipc->msg[sc_chan->idx] = *data;
+	sc_ipc->count++;
+
+	dev_dbg(sc_ipc->dev, "mu %u msg %u 0x%x\n", sc_chan->idx,
+		sc_ipc->count, *data);
+
+	if ((sc_ipc->rx_size != 0) && (sc_ipc->count == sc_ipc->rx_size))
+		complete(&sc_ipc->done);
+}
+
+static int imx_scu_ipc_write(struct imx_sc_ipc *sc_ipc, void *msg)
+{
+	struct imx_sc_rpc_msg *hdr = msg;
+	struct imx_sc_chan *sc_chan;
+	u32 *data = msg;
+	int ret;
+	int i;
+
+	/* Check size */
+	if (hdr->size > IMX_SC_RPC_MAX_MSG)
+		return -EINVAL;
+
+	dev_dbg(sc_ipc->dev, "RPC SVC %u FUNC %u SIZE %u\n", hdr->svc,
+		hdr->func, hdr->size);
+
+	for (i = 0; i < hdr->size; i++) {
+		sc_chan = &sc_ipc->chans[i % 4];
+		ret = mbox_send_message(sc_chan->ch, &data[i]);
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+/*
+ * RPC command/response
+ */
+int imx_scu_call_rpc(struct imx_sc_ipc *sc_ipc, void *msg, bool have_resp)
+{
+	struct imx_sc_rpc_msg *hdr;
+	int ret;
+
+	if (WARN_ON(!sc_ipc || !msg))
+		return -EINVAL;
+
+	mutex_lock(&sc_ipc->lock);
+	reinit_completion(&sc_ipc->done);
+
+	sc_ipc->msg = msg;
+	sc_ipc->count = 0;
+	ret = imx_scu_ipc_write(sc_ipc, msg);
+	if (ret < 0) {
+		dev_err(sc_ipc->dev, "RPC send msg failed: %d\n", ret);
+		goto out;
+	}
+
+	if (have_resp) {
+		if (!wait_for_completion_timeout(&sc_ipc->done,
+						 MAX_RX_TIMEOUT)) {
+			dev_err(sc_ipc->dev, "RPC send msg timeout\n");
+			mutex_unlock(&sc_ipc->lock);
+			return -ETIMEDOUT;
+		}
+
+		/* response status is stored in hdr->func field */
+		hdr = msg;
+		ret = hdr->func;
+	}
+
+out:
+	mutex_unlock(&sc_ipc->lock);
+
+	dev_dbg(sc_ipc->dev, "RPC SVC done\n");
+
+	return imx_sc_to_linux_errno(ret);
+}
+EXPORT_SYMBOL(imx_scu_call_rpc);
+
+static int imx_scu_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct imx_sc_ipc *sc_ipc;
+	struct imx_sc_chan *sc_chan;
+	struct mbox_client *cl;
+	char *chan_name;
+	int ret;
+	int i;
+
+	sc_ipc = devm_kzalloc(dev, sizeof(*sc_ipc), GFP_KERNEL);
+	if (!sc_ipc)
+		return -ENOMEM;
+
+	for (i = 0; i < SCU_MU_CHAN_NUM; i++) {
+		if (i < 4)
+			chan_name = kasprintf(GFP_KERNEL, "tx%d", i);
+		else
+			chan_name = kasprintf(GFP_KERNEL, "rx%d", i - 4);
+
+		if (!chan_name)
+			return -ENOMEM;
+
+		sc_chan = &sc_ipc->chans[i];
+		cl = &sc_chan->cl;
+		cl->dev = dev;
+		cl->tx_block = false;
+		cl->knows_txdone = true;
+		cl->rx_callback = imx_scu_rx_callback;
+
+		sc_chan->sc_ipc = sc_ipc;
+		sc_chan->idx = i % 4;
+		sc_chan->ch = mbox_request_channel_byname(cl, chan_name);
+		if (IS_ERR(sc_chan->ch)) {
+			ret = PTR_ERR(sc_chan->ch);
+			if (ret != -EPROBE_DEFER)
+				dev_err(dev, "Failed to request mbox chan %s ret %d\n",
+					chan_name, ret);
+			return ret;
+		}
+
+		dev_dbg(dev, "request mbox chan %s\n", chan_name);
+		/* chan_name is not used anymore by framework */
+		kfree(chan_name);
+	}
+
+	sc_ipc->dev = dev;
+	mutex_init(&sc_ipc->lock);
+	init_completion(&sc_ipc->done);
+
+	imx_sc_ipc_handle = sc_ipc;
+
+	ret = imx_scu_enable_general_irq_channel(dev);
+	if (ret)
+		dev_warn(dev,
+			"failed to enable general irq channel: %d\n", ret);
+
+	dev_info(dev, "NXP i.MX SCU Initialized\n");
+
+	return devm_of_platform_populate(dev);
+}
+
+static const struct of_device_id imx_scu_match[] = {
+	{ .compatible = "fsl,imx-scu", },
+	{ /* Sentinel */ }
+};
+
+static struct platform_driver imx_scu_driver = {
+	.driver = {
+		.name = "imx-scu",
+		.of_match_table = imx_scu_match,
+	},
+	.probe = imx_scu_probe,
+};
+builtin_platform_driver(imx_scu_driver);
+
+MODULE_AUTHOR("Dong Aisheng <aisheng.dong@nxp.com>");
+MODULE_DESCRIPTION("IMX SCU firmware protocol driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/imx/misc.c b/drivers/firmware/imx/misc.c
new file mode 100644
index 0000000..4b56a58
--- /dev/null
+++ b/drivers/firmware/imx/misc.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017~2018 NXP
+ *  Author: Dong Aisheng <aisheng.dong@nxp.com>
+ *
+ * File containing client-side RPC functions for the MISC service. These
+ * function are ported to clients that communicate to the SC.
+ *
+ */
+
+#include <linux/firmware/imx/svc/misc.h>
+
+struct imx_sc_msg_req_misc_set_ctrl {
+	struct imx_sc_rpc_msg hdr;
+	u32 ctrl;
+	u32 val;
+	u16 resource;
+} __packed;
+
+struct imx_sc_msg_req_cpu_start {
+	struct imx_sc_rpc_msg hdr;
+	u32 address_hi;
+	u32 address_lo;
+	u16 resource;
+	u8 enable;
+} __packed;
+
+struct imx_sc_msg_req_misc_get_ctrl {
+	struct imx_sc_rpc_msg hdr;
+	u32 ctrl;
+	u16 resource;
+} __packed;
+
+struct imx_sc_msg_resp_misc_get_ctrl {
+	struct imx_sc_rpc_msg hdr;
+	u32 val;
+} __packed;
+
+/*
+ * This function sets a miscellaneous control value.
+ *
+ * @param[in]     ipc         IPC handle
+ * @param[in]     resource    resource the control is associated with
+ * @param[in]     ctrl        control to change
+ * @param[in]     val         value to apply to the control
+ *
+ * @return Returns 0 for success and < 0 for errors.
+ */
+
+int imx_sc_misc_set_control(struct imx_sc_ipc *ipc, u32 resource,
+			    u8 ctrl, u32 val)
+{
+	struct imx_sc_msg_req_misc_set_ctrl msg;
+	struct imx_sc_rpc_msg *hdr = &msg.hdr;
+
+	hdr->ver = IMX_SC_RPC_VERSION;
+	hdr->svc = (uint8_t)IMX_SC_RPC_SVC_MISC;
+	hdr->func = (uint8_t)IMX_SC_MISC_FUNC_SET_CONTROL;
+	hdr->size = 4;
+
+	msg.ctrl = ctrl;
+	msg.val = val;
+	msg.resource = resource;
+
+	return imx_scu_call_rpc(ipc, &msg, true);
+}
+EXPORT_SYMBOL(imx_sc_misc_set_control);
+
+/*
+ * This function gets a miscellaneous control value.
+ *
+ * @param[in]     ipc         IPC handle
+ * @param[in]     resource    resource the control is associated with
+ * @param[in]     ctrl        control to get
+ * @param[out]    val         pointer to return the control value
+ *
+ * @return Returns 0 for success and < 0 for errors.
+ */
+
+int imx_sc_misc_get_control(struct imx_sc_ipc *ipc, u32 resource,
+			    u8 ctrl, u32 *val)
+{
+	struct imx_sc_msg_req_misc_get_ctrl msg;
+	struct imx_sc_msg_resp_misc_get_ctrl *resp;
+	struct imx_sc_rpc_msg *hdr = &msg.hdr;
+	int ret;
+
+	hdr->ver = IMX_SC_RPC_VERSION;
+	hdr->svc = (uint8_t)IMX_SC_RPC_SVC_MISC;
+	hdr->func = (uint8_t)IMX_SC_MISC_FUNC_GET_CONTROL;
+	hdr->size = 3;
+
+	msg.ctrl = ctrl;
+	msg.resource = resource;
+
+	ret = imx_scu_call_rpc(ipc, &msg, true);
+	if (ret)
+		return ret;
+
+	resp = (struct imx_sc_msg_resp_misc_get_ctrl *)&msg;
+	if (val != NULL)
+		*val = resp->val;
+
+	return 0;
+}
+EXPORT_SYMBOL(imx_sc_misc_get_control);
+
+/*
+ * This function starts/stops a CPU identified by @resource
+ *
+ * @param[in]     ipc         IPC handle
+ * @param[in]     resource    resource the control is associated with
+ * @param[in]     enable      true for start, false for stop
+ * @param[in]     phys_addr   initial instruction address to be executed
+ *
+ * @return Returns 0 for success and < 0 for errors.
+ */
+int imx_sc_pm_cpu_start(struct imx_sc_ipc *ipc, u32 resource,
+			bool enable, u64 phys_addr)
+{
+	struct imx_sc_msg_req_cpu_start msg;
+	struct imx_sc_rpc_msg *hdr = &msg.hdr;
+
+	hdr->ver = IMX_SC_RPC_VERSION;
+	hdr->svc = IMX_SC_RPC_SVC_PM;
+	hdr->func = IMX_SC_PM_FUNC_CPU_START;
+	hdr->size = 4;
+
+	msg.address_hi = phys_addr >> 32;
+	msg.address_lo = phys_addr;
+	msg.resource = resource;
+	msg.enable = enable;
+
+	return imx_scu_call_rpc(ipc, &msg, true);
+}
+EXPORT_SYMBOL(imx_sc_pm_cpu_start);
diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c
new file mode 100644
index 0000000..b556612
--- /dev/null
+++ b/drivers/firmware/imx/scu-pd.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2018 NXP
+ *	Dong Aisheng <aisheng.dong@nxp.com>
+ *
+ * Implementation of the SCU based Power Domains
+ *
+ * NOTE: a better implementation suggested by Ulf Hansson is using a
+ * single global power domain and implement the ->attach|detach_dev()
+ * callback for the genpd and use the regular of_genpd_add_provider_simple().
+ * From within the ->attach_dev(), we could get the OF node for
+ * the device that is being attached and then parse the power-domain
+ * cell containing the "resource id" and store that in the per device
+ * struct generic_pm_domain_data (we have void pointer there for
+ * storing these kind of things).
+ *
+ * Additionally, we need to implement the ->stop() and ->start()
+ * callbacks of genpd, which is where you "power on/off" devices,
+ * rather than using the above ->power_on|off() callbacks.
+ *
+ * However, there're two known issues:
+ * 1. The ->attach_dev() of power domain infrastructure still does
+ *    not support multi domains case as the struct device *dev passed
+ *    in is a virtual PD device, it does not help for parsing the real
+ *    device resource id from device tree, so it's unware of which
+ *    real sub power domain of device should be attached.
+ *
+ *    The framework needs some proper extension to support multi power
+ *    domain cases.
+ *
+ * 2. It also breaks most of current drivers as the driver probe sequence
+ *    behavior changed if removing ->power_on|off() callback and use
+ *    ->start() and ->stop() instead. genpd_dev_pm_attach will only power
+ *    up the domain and attach device, but will not call .start() which
+ *    relies on device runtime pm. That means the device power is still
+ *    not up before running driver probe function. For SCU enabled
+ *    platforms, all device drivers accessing registers/clock without power
+ *    domain enabled will trigger a HW access error. That means we need fix
+ *    most drivers probe sequence with proper runtime pm.
+ *
+ * In summary, we need fix above two issue before being able to switch to
+ * the "single global power domain" way.
+ *
+ */
+
+#include <dt-bindings/firmware/imx/rsrc.h>
+#include <linux/firmware/imx/sci.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+
+/* SCU Power Mode Protocol definition */
+struct imx_sc_msg_req_set_resource_power_mode {
+	struct imx_sc_rpc_msg hdr;
+	u16 resource;
+	u8 mode;
+} __packed;
+
+#define IMX_SCU_PD_NAME_SIZE 20
+struct imx_sc_pm_domain {
+	struct generic_pm_domain pd;
+	char name[IMX_SCU_PD_NAME_SIZE];
+	u32 rsrc;
+};
+
+struct imx_sc_pd_range {
+	char *name;
+	u32 rsrc;
+	u8 num;
+
+	/* add domain index */
+	bool postfix;
+	u8 start_from;
+};
+
+struct imx_sc_pd_soc {
+	const struct imx_sc_pd_range *pd_ranges;
+	u8 num_ranges;
+};
+
+static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
+	/* LSIO SS */
+	{ "pwm", IMX_SC_R_PWM_0, 8, true, 0 },
+	{ "gpio", IMX_SC_R_GPIO_0, 8, true, 0 },
+	{ "gpt", IMX_SC_R_GPT_0, 5, true, 0 },
+	{ "kpp", IMX_SC_R_KPP, 1, false, 0 },
+	{ "fspi", IMX_SC_R_FSPI_0, 2, true, 0 },
+	{ "mu_a", IMX_SC_R_MU_0A, 14, true, 0 },
+	{ "mu_b", IMX_SC_R_MU_13B, 1, true, 13 },
+
+	/* CONN SS */
+	{ "usb", IMX_SC_R_USB_0, 2, true, 0 },
+	{ "usb0phy", IMX_SC_R_USB_0_PHY, 1, false, 0 },
+	{ "usb2", IMX_SC_R_USB_2, 1, false, 0 },
+	{ "usb2phy", IMX_SC_R_USB_2_PHY, 1, false, 0 },
+	{ "sdhc", IMX_SC_R_SDHC_0, 3, true, 0 },
+	{ "enet", IMX_SC_R_ENET_0, 2, true, 0 },
+	{ "nand", IMX_SC_R_NAND, 1, false, 0 },
+	{ "mlb", IMX_SC_R_MLB_0, 1, true, 0 },
+
+	/* AUDIO SS */
+	{ "audio-pll0", IMX_SC_R_AUDIO_PLL_0, 1, false, 0 },
+	{ "audio-pll1", IMX_SC_R_AUDIO_PLL_1, 1, false, 0 },
+	{ "audio-clk-0", IMX_SC_R_AUDIO_CLK_0, 1, false, 0 },
+	{ "dma0-ch", IMX_SC_R_DMA_0_CH0, 16, true, 0 },
+	{ "dma1-ch", IMX_SC_R_DMA_1_CH0, 16, true, 0 },
+	{ "dma2-ch", IMX_SC_R_DMA_2_CH0, 5, true, 0 },
+	{ "asrc0", IMX_SC_R_ASRC_0, 1, false, 0 },
+	{ "asrc1", IMX_SC_R_ASRC_1, 1, false, 0 },
+	{ "esai0", IMX_SC_R_ESAI_0, 1, false, 0 },
+	{ "spdif0", IMX_SC_R_SPDIF_0, 1, false, 0 },
+	{ "sai", IMX_SC_R_SAI_0, 3, true, 0 },
+	{ "amix", IMX_SC_R_AMIX, 1, false, 0 },
+	{ "mqs0", IMX_SC_R_MQS_0, 1, false, 0 },
+	{ "dsp", IMX_SC_R_DSP, 1, false, 0 },
+	{ "dsp-ram", IMX_SC_R_DSP_RAM, 1, false, 0 },
+
+	/* DMA SS */
+	{ "can", IMX_SC_R_CAN_0, 3, true, 0 },
+	{ "ftm", IMX_SC_R_FTM_0, 2, true, 0 },
+	{ "lpi2c", IMX_SC_R_I2C_0, 4, true, 0 },
+	{ "adc", IMX_SC_R_ADC_0, 1, true, 0 },
+	{ "lcd", IMX_SC_R_LCD_0, 1, true, 0 },
+	{ "lcd0-pwm", IMX_SC_R_LCD_0_PWM_0, 1, true, 0 },
+	{ "lpuart", IMX_SC_R_UART_0, 4, true, 0 },
+	{ "lpspi", IMX_SC_R_SPI_0, 4, true, 0 },
+	{ "irqstr_dsp", IMX_SC_R_IRQSTR_DSP, 1, false, 0 },
+
+	/* VPU SS */
+	{ "vpu", IMX_SC_R_VPU, 1, false, 0 },
+	{ "vpu-pid", IMX_SC_R_VPU_PID0, 8, true, 0 },
+	{ "vpu-dec0", IMX_SC_R_VPU_DEC_0, 1, false, 0 },
+	{ "vpu-enc0", IMX_SC_R_VPU_ENC_0, 1, false, 0 },
+
+	/* GPU SS */
+	{ "gpu0-pid", IMX_SC_R_GPU_0_PID0, 4, true, 0 },
+
+	/* HSIO SS */
+	{ "pcie-b", IMX_SC_R_PCIE_B, 1, false, 0 },
+	{ "serdes-1", IMX_SC_R_SERDES_1, 1, false, 0 },
+	{ "hsio-gpio", IMX_SC_R_HSIO_GPIO, 1, false, 0 },
+
+	/* MIPI SS */
+	{ "mipi0", IMX_SC_R_MIPI_0, 1, false, 0 },
+	{ "mipi0-pwm0", IMX_SC_R_MIPI_0_PWM_0, 1, false, 0 },
+	{ "mipi0-i2c", IMX_SC_R_MIPI_0_I2C_0, 2, true, 0 },
+
+	/* LVDS SS */
+	{ "lvds0", IMX_SC_R_LVDS_0, 1, false, 0 },
+
+	/* DC SS */
+	{ "dc0", IMX_SC_R_DC_0, 1, false, 0 },
+	{ "dc0-pll", IMX_SC_R_DC_0_PLL_0, 2, true, 0 },
+};
+
+static const struct imx_sc_pd_soc imx8qxp_scu_pd = {
+	.pd_ranges = imx8qxp_scu_pd_ranges,
+	.num_ranges = ARRAY_SIZE(imx8qxp_scu_pd_ranges),
+};
+
+static struct imx_sc_ipc *pm_ipc_handle;
+
+static inline struct imx_sc_pm_domain *
+to_imx_sc_pd(struct generic_pm_domain *genpd)
+{
+	return container_of(genpd, struct imx_sc_pm_domain, pd);
+}
+
+static int imx_sc_pd_power(struct generic_pm_domain *domain, bool power_on)
+{
+	struct imx_sc_msg_req_set_resource_power_mode msg;
+	struct imx_sc_rpc_msg *hdr = &msg.hdr;
+	struct imx_sc_pm_domain *pd;
+	int ret;
+
+	pd = to_imx_sc_pd(domain);
+
+	hdr->ver = IMX_SC_RPC_VERSION;
+	hdr->svc = IMX_SC_RPC_SVC_PM;
+	hdr->func = IMX_SC_PM_FUNC_SET_RESOURCE_POWER_MODE;
+	hdr->size = 2;
+
+	msg.resource = pd->rsrc;
+	msg.mode = power_on ? IMX_SC_PM_PW_MODE_ON : IMX_SC_PM_PW_MODE_LP;
+
+	ret = imx_scu_call_rpc(pm_ipc_handle, &msg, true);
+	if (ret)
+		dev_err(&domain->dev, "failed to power %s resource %d ret %d\n",
+			power_on ? "up" : "off", pd->rsrc, ret);
+
+	return ret;
+}
+
+static int imx_sc_pd_power_on(struct generic_pm_domain *domain)
+{
+	return imx_sc_pd_power(domain, true);
+}
+
+static int imx_sc_pd_power_off(struct generic_pm_domain *domain)
+{
+	return imx_sc_pd_power(domain, false);
+}
+
+static struct generic_pm_domain *imx_scu_pd_xlate(struct of_phandle_args *spec,
+						  void *data)
+{
+	struct generic_pm_domain *domain = ERR_PTR(-ENOENT);
+	struct genpd_onecell_data *pd_data = data;
+	unsigned int i;
+
+	for (i = 0; i < pd_data->num_domains; i++) {
+		struct imx_sc_pm_domain *sc_pd;
+
+		sc_pd = to_imx_sc_pd(pd_data->domains[i]);
+		if (sc_pd->rsrc == spec->args[0]) {
+			domain = &sc_pd->pd;
+			break;
+		}
+	}
+
+	return domain;
+}
+
+static struct imx_sc_pm_domain *
+imx_scu_add_pm_domain(struct device *dev, int idx,
+		      const struct imx_sc_pd_range *pd_ranges)
+{
+	struct imx_sc_pm_domain *sc_pd;
+	int ret;
+
+	sc_pd = devm_kzalloc(dev, sizeof(*sc_pd), GFP_KERNEL);
+	if (!sc_pd)
+		return ERR_PTR(-ENOMEM);
+
+	sc_pd->rsrc = pd_ranges->rsrc + idx;
+	sc_pd->pd.power_off = imx_sc_pd_power_off;
+	sc_pd->pd.power_on = imx_sc_pd_power_on;
+
+	if (pd_ranges->postfix)
+		snprintf(sc_pd->name, sizeof(sc_pd->name),
+			 "%s%i", pd_ranges->name, pd_ranges->start_from + idx);
+	else
+		snprintf(sc_pd->name, sizeof(sc_pd->name),
+			 "%s", pd_ranges->name);
+
+	sc_pd->pd.name = sc_pd->name;
+
+	if (sc_pd->rsrc >= IMX_SC_R_LAST) {
+		dev_warn(dev, "invalid pd %s rsrc id %d found",
+			 sc_pd->name, sc_pd->rsrc);
+
+		devm_kfree(dev, sc_pd);
+		return NULL;
+	}
+
+	ret = pm_genpd_init(&sc_pd->pd, NULL, true);
+	if (ret) {
+		dev_warn(dev, "failed to init pd %s rsrc id %d",
+			 sc_pd->name, sc_pd->rsrc);
+		devm_kfree(dev, sc_pd);
+		return NULL;
+	}
+
+	return sc_pd;
+}
+
+static int imx_scu_init_pm_domains(struct device *dev,
+				    const struct imx_sc_pd_soc *pd_soc)
+{
+	const struct imx_sc_pd_range *pd_ranges = pd_soc->pd_ranges;
+	struct generic_pm_domain **domains;
+	struct genpd_onecell_data *pd_data;
+	struct imx_sc_pm_domain *sc_pd;
+	u32 count = 0;
+	int i, j;
+
+	for (i = 0; i < pd_soc->num_ranges; i++)
+		count += pd_ranges[i].num;
+
+	domains = devm_kcalloc(dev, count, sizeof(*domains), GFP_KERNEL);
+	if (!domains)
+		return -ENOMEM;
+
+	pd_data = devm_kzalloc(dev, sizeof(*pd_data), GFP_KERNEL);
+	if (!pd_data)
+		return -ENOMEM;
+
+	count = 0;
+	for (i = 0; i < pd_soc->num_ranges; i++) {
+		for (j = 0; j < pd_ranges[i].num; j++) {
+			sc_pd = imx_scu_add_pm_domain(dev, j, &pd_ranges[i]);
+			if (IS_ERR_OR_NULL(sc_pd))
+				continue;
+
+			domains[count++] = &sc_pd->pd;
+			dev_dbg(dev, "added power domain %s\n", sc_pd->pd.name);
+		}
+	}
+
+	pd_data->domains = domains;
+	pd_data->num_domains = count;
+	pd_data->xlate = imx_scu_pd_xlate;
+
+	of_genpd_add_provider_onecell(dev->of_node, pd_data);
+
+	return 0;
+}
+
+static int imx_sc_pd_probe(struct platform_device *pdev)
+{
+	const struct imx_sc_pd_soc *pd_soc;
+	int ret;
+
+	ret = imx_scu_get_handle(&pm_ipc_handle);
+	if (ret)
+		return ret;
+
+	pd_soc = of_device_get_match_data(&pdev->dev);
+	if (!pd_soc)
+		return -ENODEV;
+
+	return imx_scu_init_pm_domains(&pdev->dev, pd_soc);
+}
+
+static const struct of_device_id imx_sc_pd_match[] = {
+	{ .compatible = "fsl,imx8qxp-scu-pd", &imx8qxp_scu_pd},
+	{ .compatible = "fsl,scu-pd", &imx8qxp_scu_pd},
+	{ /* sentinel */ }
+};
+
+static struct platform_driver imx_sc_pd_driver = {
+	.driver = {
+		.name = "imx-scu-pd",
+		.of_match_table = imx_sc_pd_match,
+	},
+	.probe = imx_sc_pd_probe,
+};
+builtin_platform_driver(imx_sc_pd_driver);
+
+MODULE_AUTHOR("Dong Aisheng <aisheng.dong@nxp.com>");
+MODULE_DESCRIPTION("IMX SCU Power Domain driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 6bc8e66..7e12cbd 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  *  Copyright 2007-2010 Red Hat, Inc.
  *  by Peter Jones <pjones@redhat.com>
@@ -8,15 +9,6 @@
  *
  * This code exposes the iSCSI Boot Format Table to userland via sysfs.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License v2.0 as published by
- * the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Changelog:
  *
  *  06 Jan 2010 - Peter Jones <pjones@redhat.com>
@@ -63,7 +55,6 @@
  *
  *  27 Aug 2007 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
  *   First version exposing iBFT data via a binary /sysfs. (v0.1)
- *
  */
 
 
@@ -93,6 +84,10 @@
 MODULE_LICENSE("GPL");
 MODULE_VERSION(IBFT_ISCSI_VERSION);
 
+#ifndef CONFIG_ISCSI_IBFT_FIND
+struct acpi_table_ibft *ibft_addr;
+#endif
+
 struct ibft_hdr {
 	u8 id;
 	u8 version;
@@ -425,7 +420,7 @@
 
 	switch (type) {
 	case ISCSI_BOOT_ACPITBL_SIGNATURE:
-		str += sprintf_string(str, ACPI_NAME_SIZE,
+		str += sprintf_string(str, ACPI_NAMESEG_SIZE,
 				      entry->header->header.signature);
 		break;
 	case ISCSI_BOOT_ACPITBL_OEM_ID:
@@ -542,6 +537,7 @@
 	case ISCSI_BOOT_TGT_NIC_ASSOC:
 	case ISCSI_BOOT_TGT_CHAP_TYPE:
 		rc = S_IRUGO;
+		break;
 	case ISCSI_BOOT_TGT_NAME:
 		if (tgt->tgt_name_len)
 			rc = S_IRUGO;
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c
index 2224f1d..64bb945 100644
--- a/drivers/firmware/iscsi_ibft_find.c
+++ b/drivers/firmware/iscsi_ibft_find.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  *  Copyright 2007-2010 Red Hat, Inc.
  *  by Peter Jones <pjones@redhat.com>
@@ -7,18 +8,9 @@
  *  by Konrad Rzeszutek <ketuzsezr@darnok.org>
  *
  * This code finds the iSCSI Boot Format Table.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License v2.0 as published by
- * the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  */
 
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <linux/blkdev.h>
 #include <linux/ctype.h>
 #include <linux/device.h>
@@ -104,7 +96,7 @@
 
 	if (ibft_addr) {
 		*sizep = PAGE_ALIGN(ibft_addr->header.length);
-		return (u64)isa_virt_to_bus(ibft_addr);
+		return (u64)virt_to_phys(ibft_addr);
 	}
 
 	*sizep = 0;
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
index 5de3ed2..24945e2 100644
--- a/drivers/firmware/memmap.c
+++ b/drivers/firmware/memmap.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * linux/drivers/firmware/memmap.c
  *  Copyright (C) 2008 SUSE LINUX Products GmbH
  *  by Bernhard Walle <bernhard.walle@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License v2.0 as published by
- * the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  */
 
 #include <linux/string.h>
@@ -19,7 +10,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/types.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <linux/slab.h>
 #include <linux/mm.h>
 
@@ -333,7 +324,8 @@
 {
 	struct firmware_map_entry *entry;
 
-	entry = memblock_virt_alloc(sizeof(struct firmware_map_entry), 0);
+	entry = memblock_alloc(sizeof(struct firmware_map_entry),
+			       SMP_CACHE_BYTES);
 	if (WARN_ON(!entry))
 		return -ENOMEM;
 
diff --git a/drivers/firmware/meson/Kconfig b/drivers/firmware/meson/Kconfig
index 170d7e8..2671dcd 100644
--- a/drivers/firmware/meson/Kconfig
+++ b/drivers/firmware/meson/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 #
 # Amlogic Secure Monitor driver
 #
diff --git a/drivers/firmware/meson/Makefile b/drivers/firmware/meson/Makefile
index 9ab3884..c6c0948 100644
--- a/drivers/firmware/meson/Makefile
+++ b/drivers/firmware/meson/Makefile
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
 obj-$(CONFIG_MESON_SM) +=	meson_sm.o
diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c
index 0ec2ca8..8d908a8 100644
--- a/drivers/firmware/meson/meson_sm.c
+++ b/drivers/firmware/meson/meson_sm.c
@@ -1,15 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Amlogic Secure Monitor driver
  *
  * Copyright (C) 2016 Endless Mobile, Inc.
  * Author: Carlo Caione <carlo@endlessm.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
  */
 
 #define pr_fmt(fmt) "meson-sm: " fmt
@@ -24,6 +18,7 @@
 #include <linux/printk.h>
 #include <linux/types.h>
 #include <linux/sizes.h>
+ #include <linux/slab.h>
 
 #include <linux/firmware/meson/meson_sm.h>
 
@@ -48,6 +43,7 @@
 		CMD(SM_EFUSE_READ,	0x82000030),
 		CMD(SM_EFUSE_WRITE,	0x82000031),
 		CMD(SM_EFUSE_USER_MAX,	0x82000033),
+		CMD(SM_GET_CHIP_ID,	0x82000044),
 		{ /* sentinel */ },
 	},
 };
@@ -214,6 +210,57 @@
 }
 EXPORT_SYMBOL(meson_sm_call_write);
 
+#define SM_CHIP_ID_LENGTH	119
+#define SM_CHIP_ID_OFFSET	4
+#define SM_CHIP_ID_SIZE		12
+
+static ssize_t serial_show(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	uint8_t *id_buf;
+	int ret;
+
+	id_buf = kmalloc(SM_CHIP_ID_LENGTH, GFP_KERNEL);
+	if (!id_buf)
+		return -ENOMEM;
+
+	ret = meson_sm_call_read(id_buf, SM_CHIP_ID_LENGTH, SM_GET_CHIP_ID,
+				 0, 0, 0, 0, 0);
+	if (ret < 0) {
+		kfree(id_buf);
+		return ret;
+	}
+
+	ret = sprintf(buf, "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+			id_buf[SM_CHIP_ID_OFFSET + 0],
+			id_buf[SM_CHIP_ID_OFFSET + 1],
+			id_buf[SM_CHIP_ID_OFFSET + 2],
+			id_buf[SM_CHIP_ID_OFFSET + 3],
+			id_buf[SM_CHIP_ID_OFFSET + 4],
+			id_buf[SM_CHIP_ID_OFFSET + 5],
+			id_buf[SM_CHIP_ID_OFFSET + 6],
+			id_buf[SM_CHIP_ID_OFFSET + 7],
+			id_buf[SM_CHIP_ID_OFFSET + 8],
+			id_buf[SM_CHIP_ID_OFFSET + 9],
+			id_buf[SM_CHIP_ID_OFFSET + 10],
+			id_buf[SM_CHIP_ID_OFFSET + 11]);
+
+	kfree(id_buf);
+
+	return ret;
+}
+
+static DEVICE_ATTR_RO(serial);
+
+static struct attribute *meson_sm_sysfs_attributes[] = {
+	&dev_attr_serial.attr,
+	NULL,
+};
+
+static const struct attribute_group meson_sm_sysfs_attr_group = {
+	.attrs = meson_sm_sysfs_attributes,
+};
+
 static const struct of_device_id meson_sm_ids[] = {
 	{ .compatible = "amlogic,meson-gxbb-sm", .data = &gxbb_chip },
 	{ /* sentinel */ },
@@ -242,6 +289,9 @@
 	fw.chip = chip;
 	pr_info("secure-monitor enabled\n");
 
+	if (sysfs_create_group(&pdev->dev.kobj, &meson_sm_sysfs_attr_group))
+		goto out_in_base;
+
 	return 0;
 
 out_in_base:
diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c
index e83d6ae..4adeb7a 100644
--- a/drivers/firmware/pcdp.c
+++ b/drivers/firmware/pcdp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Parse the EFI PCDP table to locate the console device.
  *
@@ -5,10 +6,6 @@
  *	Khalid Aziz <khalid.aziz@hp.com>
  *	Alex Williamson <alex.williamson@hp.com>
  *	Bjorn Helgaas <bjorn.helgaas@hp.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
  */
 
 #include <linux/acpi.h>
diff --git a/drivers/firmware/pcdp.h b/drivers/firmware/pcdp.h
index e553060..ce75d1d 100644
--- a/drivers/firmware/pcdp.h
+++ b/drivers/firmware/pcdp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Definitions for PCDP-defined console devices
  *
@@ -7,10 +8,6 @@
  * (c) Copyright 2002, 2004 Hewlett-Packard Development Company, L.P.
  *	Khalid Aziz <khalid.aziz@hp.com>
  *	Bjorn Helgaas <bjorn.helgaas@hp.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
  */
 
 #define PCDP_CONSOLE			0
diff --git a/drivers/firmware/psci/Kconfig b/drivers/firmware/psci/Kconfig
new file mode 100644
index 0000000..9794416
--- /dev/null
+++ b/drivers/firmware/psci/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config ARM_PSCI_FW
+	bool
+
+config ARM_PSCI_CHECKER
+	bool "ARM PSCI checker"
+	depends on ARM_PSCI_FW && HOTPLUG_CPU && CPU_IDLE && !TORTURE_TEST
+	help
+	  Run the PSCI checker during startup. This checks that hotplug and
+	  suspend operations work correctly when using PSCI.
+
+	  The torture tests may interfere with the PSCI checker by turning CPUs
+	  on and off through hotplug, so for now torture tests and PSCI checker
+	  are mutually exclusive.
diff --git a/drivers/firmware/psci/Makefile b/drivers/firmware/psci/Makefile
new file mode 100644
index 0000000..1956b88
--- /dev/null
+++ b/drivers/firmware/psci/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+obj-$(CONFIG_ARM_PSCI_FW)	+= psci.o
+obj-$(CONFIG_ARM_PSCI_CHECKER)	+= psci_checker.o
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci/psci.c
similarity index 73%
rename from drivers/firmware/psci.c
rename to drivers/firmware/psci/psci.c
index c80ec1d..84f4ff3 100644
--- a/drivers/firmware/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -1,12 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  *
  * Copyright (C) 2015 ARM Limited
  */
@@ -88,6 +81,7 @@
 				PSCI_1_0_EXT_POWER_STATE_TYPE_MASK)
 
 static u32 psci_cpu_suspend_feature;
+static bool psci_system_reset2_supported;
 
 static inline bool psci_has_ext_power_state(void)
 {
@@ -95,6 +89,11 @@
 				PSCI_1_0_FEATURES_CPU_SUSPEND_PF_MASK;
 }
 
+static inline bool psci_has_osi_support(void)
+{
+	return psci_cpu_suspend_feature & PSCI_1_0_OS_INITIATED;
+}
+
 static inline bool psci_power_state_loses_context(u32 state)
 {
 	const u32 mask = psci_has_ext_power_state() ?
@@ -104,7 +103,7 @@
 	return state & mask;
 }
 
-static inline bool psci_power_state_is_valid(u32 state)
+bool psci_power_state_is_valid(u32 state)
 {
 	const u32 valid_mask = psci_has_ext_power_state() ?
 			       PSCI_1_0_EXT_POWER_STATE_MASK :
@@ -253,7 +252,17 @@
 
 static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
 {
-	invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
+	if ((reboot_mode == REBOOT_WARM || reboot_mode == REBOOT_SOFT) &&
+	    psci_system_reset2_supported) {
+		/*
+		 * reset_type[31] = 0 (architectural)
+		 * reset_type[30:0] = 0 (SYSTEM_WARM_RESET)
+		 * cookie = 0 (ignored by the implementation)
+		 */
+		invoke_psci_fn(PSCI_FN_NATIVE(1_1, SYSTEM_RESET2), 0, 0, 0);
+	} else {
+		invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
+	}
 }
 
 static void psci_sys_poweroff(void)
@@ -268,171 +277,24 @@
 }
 
 #ifdef CONFIG_CPU_IDLE
-static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
-
-static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
+static int psci_suspend_finisher(unsigned long state)
 {
-	int i, ret, count = 0;
-	u32 *psci_states;
-	struct device_node *state_node;
+	u32 power_state = state;
 
-	/* Count idle states */
-	while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
-					      count))) {
-		count++;
-		of_node_put(state_node);
-	}
-
-	if (!count)
-		return -ENODEV;
-
-	psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
-	if (!psci_states)
-		return -ENOMEM;
-
-	for (i = 0; i < count; i++) {
-		u32 state;
-
-		state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
-
-		ret = of_property_read_u32(state_node,
-					   "arm,psci-suspend-param",
-					   &state);
-		if (ret) {
-			pr_warn(" * %pOF missing arm,psci-suspend-param property\n",
-				state_node);
-			of_node_put(state_node);
-			goto free_mem;
-		}
-
-		of_node_put(state_node);
-		pr_debug("psci-power-state %#x index %d\n", state, i);
-		if (!psci_power_state_is_valid(state)) {
-			pr_warn("Invalid PSCI power state %#x\n", state);
-			ret = -EINVAL;
-			goto free_mem;
-		}
-		psci_states[i] = state;
-	}
-	/* Idle states parsed correctly, initialize per-cpu pointer */
-	per_cpu(psci_power_state, cpu) = psci_states;
-	return 0;
-
-free_mem:
-	kfree(psci_states);
-	return ret;
+	return psci_ops.cpu_suspend(power_state, __pa_symbol(cpu_resume));
 }
 
-#ifdef CONFIG_ACPI
-#include <acpi/processor.h>
-
-static int __maybe_unused psci_acpi_cpu_init_idle(unsigned int cpu)
+int psci_cpu_suspend_enter(u32 state)
 {
-	int i, count;
-	u32 *psci_states;
-	struct acpi_lpi_state *lpi;
-	struct acpi_processor *pr = per_cpu(processors, cpu);
-
-	if (unlikely(!pr || !pr->flags.has_lpi))
-		return -EINVAL;
-
-	count = pr->power.count - 1;
-	if (count <= 0)
-		return -ENODEV;
-
-	psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
-	if (!psci_states)
-		return -ENOMEM;
-
-	for (i = 0; i < count; i++) {
-		u32 state;
-
-		lpi = &pr->power.lpi_states[i + 1];
-		/*
-		 * Only bits[31:0] represent a PSCI power_state while
-		 * bits[63:32] must be 0x0 as per ARM ACPI FFH Specification
-		 */
-		state = lpi->address;
-		if (!psci_power_state_is_valid(state)) {
-			pr_warn("Invalid PSCI power state %#x\n", state);
-			kfree(psci_states);
-			return -EINVAL;
-		}
-		psci_states[i] = state;
-	}
-	/* Idle states parsed correctly, initialize per-cpu pointer */
-	per_cpu(psci_power_state, cpu) = psci_states;
-	return 0;
-}
-#else
-static int __maybe_unused psci_acpi_cpu_init_idle(unsigned int cpu)
-{
-	return -EINVAL;
-}
-#endif
-
-int psci_cpu_init_idle(unsigned int cpu)
-{
-	struct device_node *cpu_node;
 	int ret;
 
-	/*
-	 * If the PSCI cpu_suspend function hook has not been initialized
-	 * idle states must not be enabled, so bail out
-	 */
-	if (!psci_ops.cpu_suspend)
-		return -EOPNOTSUPP;
-
-	if (!acpi_disabled)
-		return psci_acpi_cpu_init_idle(cpu);
-
-	cpu_node = of_get_cpu_node(cpu, NULL);
-	if (!cpu_node)
-		return -ENODEV;
-
-	ret = psci_dt_cpu_init_idle(cpu_node, cpu);
-
-	of_node_put(cpu_node);
-
-	return ret;
-}
-
-static int psci_suspend_finisher(unsigned long index)
-{
-	u32 *state = __this_cpu_read(psci_power_state);
-
-	return psci_ops.cpu_suspend(state[index - 1],
-				    __pa_symbol(cpu_resume));
-}
-
-int psci_cpu_suspend_enter(unsigned long index)
-{
-	int ret;
-	u32 *state = __this_cpu_read(psci_power_state);
-	/*
-	 * idle state index 0 corresponds to wfi, should never be called
-	 * from the cpu_suspend operations
-	 */
-	if (WARN_ON_ONCE(!index))
-		return -EINVAL;
-
-	if (!psci_power_state_loses_context(state[index - 1]))
-		ret = psci_ops.cpu_suspend(state[index - 1], 0);
+	if (!psci_power_state_loses_context(state))
+		ret = psci_ops.cpu_suspend(state, 0);
 	else
-		ret = cpu_suspend(index, psci_suspend_finisher);
+		ret = cpu_suspend(state, psci_suspend_finisher);
 
 	return ret;
 }
-
-/* ARM specific CPU idle operations */
-#ifdef CONFIG_ARM
-static const struct cpuidle_ops psci_cpuidle_ops __initconst = {
-	.suspend = psci_cpu_suspend_enter,
-	.init = psci_dt_cpu_init_idle,
-};
-
-CPUIDLE_METHOD_OF_DECLARE(psci, "psci", &psci_cpuidle_ops);
-#endif
 #endif
 
 static int psci_system_suspend(unsigned long unused)
@@ -451,6 +313,16 @@
 	.enter          = psci_system_suspend_enter,
 };
 
+static void __init psci_init_system_reset2(void)
+{
+	int ret;
+
+	ret = psci_features(PSCI_FN_NATIVE(1_1, SYSTEM_RESET2));
+
+	if (ret != PSCI_RET_NOT_SUPPORTED)
+		psci_system_reset2_supported = true;
+}
+
 static void __init psci_init_system_suspend(void)
 {
 	int ret;
@@ -588,6 +460,7 @@
 		psci_init_smccc();
 		psci_init_cpu_suspend();
 		psci_init_system_suspend();
+		psci_init_system_reset2();
 	}
 
 	return 0;
@@ -605,9 +478,9 @@
 	int err;
 
 	err = get_set_conduit_method(np);
-
 	if (err)
-		goto out_put_node;
+		return err;
+
 	/*
 	 * Starting with v0.2, the PSCI specification introduced a call
 	 * (PSCI_VERSION) that allows probing the firmware version, so
@@ -615,11 +488,7 @@
 	 * can be carried out according to the specific version reported
 	 * by firmware
 	 */
-	err = psci_probe();
-
-out_put_node:
-	of_node_put(np);
-	return err;
+	return psci_probe();
 }
 
 /*
@@ -631,9 +500,8 @@
 	int err;
 
 	err = get_set_conduit_method(np);
-
 	if (err)
-		goto out_put_node;
+		return err;
 
 	pr_info("Using PSCI v0.1 Function IDs from DT\n");
 
@@ -657,15 +525,27 @@
 		psci_ops.migrate = psci_migrate;
 	}
 
-out_put_node:
-	of_node_put(np);
-	return err;
+	return 0;
+}
+
+static int __init psci_1_0_init(struct device_node *np)
+{
+	int err;
+
+	err = psci_0_2_init(np);
+	if (err)
+		return err;
+
+	if (psci_has_osi_support())
+		pr_info("OSI mode supported.\n");
+
+	return 0;
 }
 
 static const struct of_device_id psci_of_match[] __initconst = {
 	{ .compatible = "arm,psci",	.data = psci_0_1_init},
 	{ .compatible = "arm,psci-0.2",	.data = psci_0_2_init},
-	{ .compatible = "arm,psci-1.0",	.data = psci_0_2_init},
+	{ .compatible = "arm,psci-1.0",	.data = psci_1_0_init},
 	{},
 };
 
@@ -674,6 +554,7 @@
 	struct device_node *np;
 	const struct of_device_id *matched_np;
 	psci_initcall_t init_fn;
+	int ret;
 
 	np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
 
@@ -681,7 +562,10 @@
 		return -ENODEV;
 
 	init_fn = (psci_initcall_t)matched_np->data;
-	return init_fn(np);
+	ret = init_fn(np);
+
+	of_node_put(np);
+	return ret;
 }
 
 #ifdef CONFIG_ACPI
diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci/psci_checker.c
similarity index 93%
rename from drivers/firmware/psci_checker.c
rename to drivers/firmware/psci/psci_checker.c
index 3469436..6a44539 100644
--- a/drivers/firmware/psci_checker.c
+++ b/drivers/firmware/psci/psci_checker.c
@@ -1,12 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  *
  * Copyright (C) 2016 ARM Limited
  */
@@ -235,8 +228,11 @@
 
 static void dummy_callback(struct timer_list *unused) {}
 
-static int suspend_cpu(int index, bool broadcast)
+static int suspend_cpu(struct cpuidle_device *dev,
+		       struct cpuidle_driver *drv, int index)
 {
+	struct cpuidle_state *state = &drv->states[index];
+	bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP;
 	int ret;
 
 	arch_cpu_idle_enter();
@@ -261,11 +257,7 @@
 		}
 	}
 
-	/*
-	 * Replicate the common ARM cpuidle enter function
-	 * (arm_enter_idle_state).
-	 */
-	ret = CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, index);
+	ret = state->enter(dev, drv, index);
 
 	if (broadcast)
 		tick_broadcast_exit();
@@ -308,9 +300,8 @@
 		 * doesn't use PSCI).
 		 */
 		for (index = 1; index < drv->state_count; ++index) {
-			struct cpuidle_state *state = &drv->states[index];
-			bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP;
 			int ret;
+			struct cpuidle_state *state = &drv->states[index];
 
 			/*
 			 * Set the timer to wake this CPU up in some time (which
@@ -325,7 +316,7 @@
 			/* IRQs must be disabled during suspend operations. */
 			local_irq_disable();
 
-			ret = suspend_cpu(index, broadcast);
+			ret = suspend_cpu(dev, drv, index);
 
 			/*
 			 * We have woken up. Re-enable IRQs to handle any
@@ -366,16 +357,16 @@
 	for (;;) {
 		/* Needs to be set first to avoid missing a wakeup. */
 		set_current_state(TASK_INTERRUPTIBLE);
-		if (kthread_should_stop()) {
-			__set_current_state(TASK_RUNNING);
+		if (kthread_should_park())
 			break;
-		}
 		schedule();
 	}
 
 	pr_info("CPU %d suspend test results: success %d, shallow states %d, errors %d\n",
 		cpu, nb_suspend, nb_shallow_sleep, nb_err);
 
+	kthread_parkme();
+
 	return nb_err;
 }
 
@@ -440,8 +431,10 @@
 
 
 	/* Stop and destroy all threads, get return status. */
-	for (i = 0; i < nb_threads; ++i)
+	for (i = 0; i < nb_threads; ++i) {
+		err += kthread_park(threads[i]);
 		err += kthread_stop(threads[i]);
+	}
  out:
 	cpuidle_resume_and_unlock();
 	kfree(threads);
diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c
index 4e24e59..215061c 100644
--- a/drivers/firmware/qcom_scm-32.c
+++ b/drivers/firmware/qcom_scm-32.c
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /* Copyright (c) 2010,2015, The Linux Foundation. All rights reserved.
  * Copyright (C) 2015 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include <linux/slab.h>
diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c
index 688525d..91d5ad7 100644
--- a/drivers/firmware/qcom_scm-64.c
+++ b/drivers/firmware/qcom_scm-64.c
@@ -1,13 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  */
 
 #include <linux/io.h>
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index e778af7..4802ab1 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -1,23 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Qualcomm SCM driver
  *
  * Copyright (c) 2010,2015, The Linux Foundation. All rights reserved.
  * Copyright (C) 2015 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  */
 #include <linux/platform_device.h>
 #include <linux/init.h>
 #include <linux/cpumask.h>
 #include <linux/export.h>
+#include <linux/dma-direct.h>
 #include <linux/dma-mapping.h>
 #include <linux/module.h>
 #include <linux/types.h>
@@ -434,21 +426,23 @@
  * @mem_sz:   size of the region.
  * @srcvm:    vmid for current set of owners, each set bit in
  *            flag indicate a unique owner
- * @newvm:    array having new owners and corrsponding permission
+ * @newvm:    array having new owners and corresponding permission
  *            flags
  * @dest_cnt: number of owners in next set.
  *
- * Return negative errno on failure, 0 on success, with @srcvm updated.
+ * Return negative errno on failure or 0 on success with @srcvm updated.
  */
 int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
 			unsigned int *srcvm,
-			struct qcom_scm_vmperm *newvm, int dest_cnt)
+			const struct qcom_scm_vmperm *newvm,
+			unsigned int dest_cnt)
 {
 	struct qcom_scm_current_perm_info *destvm;
 	struct qcom_scm_mem_map_info *mem_to_map;
 	phys_addr_t mem_to_map_phys;
 	phys_addr_t dest_phys;
 	phys_addr_t ptr_phys;
+	dma_addr_t ptr_dma;
 	size_t mem_to_map_sz;
 	size_t dest_sz;
 	size_t src_sz;
@@ -456,52 +450,50 @@
 	int next_vm;
 	__le32 *src;
 	void *ptr;
-	int ret;
-	int len;
-	int i;
+	int ret, i, b;
+	unsigned long srcvm_bits = *srcvm;
 
-	src_sz = hweight_long(*srcvm) * sizeof(*src);
+	src_sz = hweight_long(srcvm_bits) * sizeof(*src);
 	mem_to_map_sz = sizeof(*mem_to_map);
 	dest_sz = dest_cnt * sizeof(*destvm);
 	ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
 			ALIGN(dest_sz, SZ_64);
 
-	ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
+	ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_dma, GFP_KERNEL);
 	if (!ptr)
 		return -ENOMEM;
+	ptr_phys = dma_to_phys(__scm->dev, ptr_dma);
 
 	/* Fill source vmid detail */
 	src = ptr;
-	len = hweight_long(*srcvm);
-	for (i = 0; i < len; i++) {
-		src[i] = cpu_to_le32(ffs(*srcvm) - 1);
-		*srcvm ^= 1 << (ffs(*srcvm) - 1);
-	}
+	i = 0;
+	for_each_set_bit(b, &srcvm_bits, BITS_PER_LONG)
+		src[i++] = cpu_to_le32(b);
 
 	/* Fill details of mem buff to map */
 	mem_to_map = ptr + ALIGN(src_sz, SZ_64);
 	mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
-	mem_to_map[0].mem_addr = cpu_to_le64(mem_addr);
-	mem_to_map[0].mem_size = cpu_to_le64(mem_sz);
+	mem_to_map->mem_addr = cpu_to_le64(mem_addr);
+	mem_to_map->mem_size = cpu_to_le64(mem_sz);
 
 	next_vm = 0;
 	/* Fill details of next vmid detail */
 	destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
 	dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
-	for (i = 0; i < dest_cnt; i++) {
-		destvm[i].vmid = cpu_to_le32(newvm[i].vmid);
-		destvm[i].perm = cpu_to_le32(newvm[i].perm);
-		destvm[i].ctx = 0;
-		destvm[i].ctx_size = 0;
-		next_vm |= BIT(newvm[i].vmid);
+	for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
+		destvm->vmid = cpu_to_le32(newvm->vmid);
+		destvm->perm = cpu_to_le32(newvm->perm);
+		destvm->ctx = 0;
+		destvm->ctx_size = 0;
+		next_vm |= BIT(newvm->vmid);
 	}
 
 	ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
 				    ptr_phys, src_sz, dest_phys, dest_sz);
-	dma_free_coherent(__scm->dev, ALIGN(ptr_sz, SZ_64), ptr, ptr_phys);
+	dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_dma);
 	if (ret) {
 		dev_err(__scm->dev,
-			"Assign memory protection call failed %d.\n", ret);
+			"Assign memory protection call failed %d\n", ret);
 		return -EINVAL;
 	}
 
@@ -525,34 +517,44 @@
 		return ret;
 
 	clks = (unsigned long)of_device_get_match_data(&pdev->dev);
-	if (clks & SCM_HAS_CORE_CLK) {
-		scm->core_clk = devm_clk_get(&pdev->dev, "core");
-		if (IS_ERR(scm->core_clk)) {
-			if (PTR_ERR(scm->core_clk) != -EPROBE_DEFER)
-				dev_err(&pdev->dev,
-					"failed to acquire core clk\n");
+
+	scm->core_clk = devm_clk_get(&pdev->dev, "core");
+	if (IS_ERR(scm->core_clk)) {
+		if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER)
+			return PTR_ERR(scm->core_clk);
+
+		if (clks & SCM_HAS_CORE_CLK) {
+			dev_err(&pdev->dev, "failed to acquire core clk\n");
 			return PTR_ERR(scm->core_clk);
 		}
+
+		scm->core_clk = NULL;
 	}
 
-	if (clks & SCM_HAS_IFACE_CLK) {
-		scm->iface_clk = devm_clk_get(&pdev->dev, "iface");
-		if (IS_ERR(scm->iface_clk)) {
-			if (PTR_ERR(scm->iface_clk) != -EPROBE_DEFER)
-				dev_err(&pdev->dev,
-					"failed to acquire iface clk\n");
+	scm->iface_clk = devm_clk_get(&pdev->dev, "iface");
+	if (IS_ERR(scm->iface_clk)) {
+		if (PTR_ERR(scm->iface_clk) == -EPROBE_DEFER)
+			return PTR_ERR(scm->iface_clk);
+
+		if (clks & SCM_HAS_IFACE_CLK) {
+			dev_err(&pdev->dev, "failed to acquire iface clk\n");
 			return PTR_ERR(scm->iface_clk);
 		}
+
+		scm->iface_clk = NULL;
 	}
 
-	if (clks & SCM_HAS_BUS_CLK) {
-		scm->bus_clk = devm_clk_get(&pdev->dev, "bus");
-		if (IS_ERR(scm->bus_clk)) {
-			if (PTR_ERR(scm->bus_clk) != -EPROBE_DEFER)
-				dev_err(&pdev->dev,
-					"failed to acquire bus clk\n");
+	scm->bus_clk = devm_clk_get(&pdev->dev, "bus");
+	if (IS_ERR(scm->bus_clk)) {
+		if (PTR_ERR(scm->bus_clk) == -EPROBE_DEFER)
+			return PTR_ERR(scm->bus_clk);
+
+		if (clks & SCM_HAS_BUS_CLK) {
+			dev_err(&pdev->dev, "failed to acquire bus clk\n");
 			return PTR_ERR(scm->bus_clk);
 		}
+
+		scm->bus_clk = NULL;
 	}
 
 	scm->reset.ops = &qcom_scm_pas_reset_ops;
@@ -594,23 +596,23 @@
 	{ .compatible = "qcom,scm-apq8064",
 	  /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */
 	},
-	{ .compatible = "qcom,scm-msm8660",
-	  .data = (void *) SCM_HAS_CORE_CLK,
+	{ .compatible = "qcom,scm-apq8084", .data = (void *)(SCM_HAS_CORE_CLK |
+							     SCM_HAS_IFACE_CLK |
+							     SCM_HAS_BUS_CLK)
 	},
-	{ .compatible = "qcom,scm-msm8960",
-	  .data = (void *) SCM_HAS_CORE_CLK,
+	{ .compatible = "qcom,scm-ipq4019" },
+	{ .compatible = "qcom,scm-msm8660", .data = (void *) SCM_HAS_CORE_CLK },
+	{ .compatible = "qcom,scm-msm8960", .data = (void *) SCM_HAS_CORE_CLK },
+	{ .compatible = "qcom,scm-msm8916", .data = (void *)(SCM_HAS_CORE_CLK |
+							     SCM_HAS_IFACE_CLK |
+							     SCM_HAS_BUS_CLK)
 	},
-	{ .compatible = "qcom,scm-msm8996",
-	  .data = NULL, /* no clocks */
+	{ .compatible = "qcom,scm-msm8974", .data = (void *)(SCM_HAS_CORE_CLK |
+							     SCM_HAS_IFACE_CLK |
+							     SCM_HAS_BUS_CLK)
 	},
-	{ .compatible = "qcom,scm-ipq4019",
-	  .data = NULL, /* no clocks */
-	},
-	{ .compatible = "qcom,scm",
-	  .data = (void *)(SCM_HAS_CORE_CLK
-			   | SCM_HAS_IFACE_CLK
-			   | SCM_HAS_BUS_CLK),
-	},
+	{ .compatible = "qcom,scm-msm8996" },
+	{ .compatible = "qcom,scm" },
 	{}
 };
 
diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
index dcd7f79..99506bd 100644
--- a/drivers/firmware/qcom_scm.h
+++ b/drivers/firmware/qcom_scm.h
@@ -1,13 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  */
 #ifndef __QCOM_SCM_INT_H
 #define __QCOM_SCM_INT_H
diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c
index a200a21..da26a58 100644
--- a/drivers/firmware/raspberrypi.c
+++ b/drivers/firmware/raspberrypi.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Defines interfaces for interacting wtih the Raspberry Pi firmware's
  * property channel.
  *
  * Copyright © 2015 Broadcom
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
  */
 
 #include <linux/dma-mapping.h>
@@ -14,6 +11,7 @@
 #include <linux/module.h>
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
+#include <linux/slab.h>
 #include <soc/bcm2835/raspberrypi-firmware.h>
 
 #define MBOX_MSG(chan, data28)		(((data28) & ~0xf) | ((chan) & 0xf))
@@ -21,9 +19,8 @@
 #define MBOX_DATA28(msg)		((msg) & ~0xf)
 #define MBOX_CHAN_PROPERTY		8
 
-#define MAX_RPI_FW_PROP_BUF_SIZE	32
-
 static struct platform_device *rpi_hwmon;
+static struct platform_device *rpi_clk;
 
 struct rpi_firmware {
 	struct mbox_client cl;
@@ -56,8 +53,12 @@
 	reinit_completion(&fw->c);
 	ret = mbox_send_message(fw->chan, &message);
 	if (ret >= 0) {
-		wait_for_completion(&fw->c);
-		ret = 0;
+		if (wait_for_completion_timeout(&fw->c, HZ)) {
+			ret = 0;
+		} else {
+			ret = -ETIMEDOUT;
+			WARN_ONCE(1, "Firmware transaction timeout");
+		}
 	} else {
 		dev_err(fw->cl.dev, "mbox_send_message returned %d\n", ret);
 	}
@@ -144,28 +145,30 @@
 int rpi_firmware_property(struct rpi_firmware *fw,
 			  u32 tag, void *tag_data, size_t buf_size)
 {
-	/* Single tags are very small (generally 8 bytes), so the
-	 * stack should be safe.
-	 */
-	u8 data[sizeof(struct rpi_firmware_property_tag_header) +
-		MAX_RPI_FW_PROP_BUF_SIZE];
-	struct rpi_firmware_property_tag_header *header =
-		(struct rpi_firmware_property_tag_header *)data;
+	struct rpi_firmware_property_tag_header *header;
 	int ret;
 
-	if (WARN_ON(buf_size > sizeof(data) - sizeof(*header)))
-		return -EINVAL;
+	/* Some mailboxes can use over 1k bytes. Rather than checking
+	 * size and using stack or kmalloc depending on requirements,
+	 * just use kmalloc. Mailboxes don't get called enough to worry
+	 * too much about the time taken in the allocation.
+	 */
+	void *data = kmalloc(sizeof(*header) + buf_size, GFP_KERNEL);
 
+	if (!data)
+		return -ENOMEM;
+
+	header = data;
 	header->tag = tag;
 	header->buf_size = buf_size;
 	header->req_resp_size = 0;
-	memcpy(data + sizeof(struct rpi_firmware_property_tag_header),
-	       tag_data, buf_size);
+	memcpy(data + sizeof(*header), tag_data, buf_size);
 
-	ret = rpi_firmware_property_list(fw, &data, buf_size + sizeof(*header));
-	memcpy(tag_data,
-	       data + sizeof(struct rpi_firmware_property_tag_header),
-	       buf_size);
+	ret = rpi_firmware_property_list(fw, data, buf_size + sizeof(*header));
+
+	memcpy(tag_data, data + sizeof(*header), buf_size);
+
+	kfree(data);
 
 	return ret;
 }
@@ -205,6 +208,12 @@
 						  -1, NULL, 0);
 }
 
+static void rpi_register_clk_driver(struct device *dev)
+{
+	rpi_clk = platform_device_register_data(dev, "raspberrypi-clk",
+						-1, NULL, 0);
+}
+
 static int rpi_firmware_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
@@ -232,16 +241,29 @@
 
 	rpi_firmware_print_firmware_revision(fw);
 	rpi_register_hwmon_driver(dev, fw);
+	rpi_register_clk_driver(dev);
 
 	return 0;
 }
 
+static void rpi_firmware_shutdown(struct platform_device *pdev)
+{
+	struct rpi_firmware *fw = platform_get_drvdata(pdev);
+
+	if (!fw)
+		return;
+
+	rpi_firmware_property(fw, RPI_FIRMWARE_NOTIFY_REBOOT, NULL, 0);
+}
+
 static int rpi_firmware_remove(struct platform_device *pdev)
 {
 	struct rpi_firmware *fw = platform_get_drvdata(pdev);
 
 	platform_device_unregister(rpi_hwmon);
 	rpi_hwmon = NULL;
+	platform_device_unregister(rpi_clk);
+	rpi_clk = NULL;
 	mbox_free_channel(fw->chan);
 
 	return 0;
@@ -276,6 +298,7 @@
 		.of_match_table = rpi_firmware_of_match,
 	},
 	.probe		= rpi_firmware_probe,
+	.shutdown	= rpi_firmware_shutdown,
 	.remove		= rpi_firmware_remove,
 };
 module_platform_driver(rpi_firmware_driver);
diff --git a/drivers/firmware/scpi_pm_domain.c b/drivers/firmware/scpi_pm_domain.c
index f395dec..5120160 100644
--- a/drivers/firmware/scpi_pm_domain.c
+++ b/drivers/firmware/scpi_pm_domain.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * SCPI Generic power domain support.
  *
  * Copyright (C) 2016 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
  */
 
 #include <linux/err.h>
@@ -121,7 +110,7 @@
 
 		scpi_pd->domain = i;
 		scpi_pd->ops = scpi_ops;
-		sprintf(scpi_pd->name, "%s.%d", np->name, i);
+		sprintf(scpi_pd->name, "%pOFn.%d", np, i);
 		scpi_pd->genpd.name = scpi_pd->name;
 		scpi_pd->genpd.power_off = scpi_pd_power_off;
 		scpi_pd->genpd.power_on = scpi_pd_power_on;
diff --git a/drivers/firmware/stratix10-rsu.c b/drivers/firmware/stratix10-rsu.c
new file mode 100644
index 0000000..bb008c0
--- /dev/null
+++ b/drivers/firmware/stratix10-rsu.c
@@ -0,0 +1,451 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2019, Intel Corporation
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/bitfield.h>
+#include <linux/completion.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/firmware/intel/stratix10-svc-client.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+
+#define RSU_STATE_MASK			GENMASK_ULL(31, 0)
+#define RSU_VERSION_MASK		GENMASK_ULL(63, 32)
+#define RSU_ERROR_LOCATION_MASK		GENMASK_ULL(31, 0)
+#define RSU_ERROR_DETAIL_MASK		GENMASK_ULL(63, 32)
+#define RSU_FW_VERSION_MASK		GENMASK_ULL(15, 0)
+
+#define RSU_TIMEOUT	(msecs_to_jiffies(SVC_RSU_REQUEST_TIMEOUT_MS))
+
+#define INVALID_RETRY_COUNTER		0xFFFFFFFF
+
+typedef void (*rsu_callback)(struct stratix10_svc_client *client,
+			     struct stratix10_svc_cb_data *data);
+/**
+ * struct stratix10_rsu_priv - rsu data structure
+ * @chan: pointer to the allocated service channel
+ * @client: active service client
+ * @completion: state for callback completion
+ * @lock: a mutex to protect callback completion state
+ * @status.current_image: address of image currently running in flash
+ * @status.fail_image: address of failed image in flash
+ * @status.version: the version number of RSU firmware
+ * @status.state: the state of RSU system
+ * @status.error_details: error code
+ * @status.error_location: the error offset inside the image that failed
+ * @retry_counter: the current image's retry counter
+ */
+struct stratix10_rsu_priv {
+	struct stratix10_svc_chan *chan;
+	struct stratix10_svc_client client;
+	struct completion completion;
+	struct mutex lock;
+	struct {
+		unsigned long current_image;
+		unsigned long fail_image;
+		unsigned int version;
+		unsigned int state;
+		unsigned int error_details;
+		unsigned int error_location;
+	} status;
+	unsigned int retry_counter;
+};
+
+/**
+ * rsu_status_callback() - Status callback from Intel Service Layer
+ * @client: pointer to service client
+ * @data: pointer to callback data structure
+ *
+ * Callback from Intel service layer for RSU status request. Status is
+ * only updated after a system reboot, so a get updated status call is
+ * made during driver probe.
+ */
+static void rsu_status_callback(struct stratix10_svc_client *client,
+				struct stratix10_svc_cb_data *data)
+{
+	struct stratix10_rsu_priv *priv = client->priv;
+	struct arm_smccc_res *res = (struct arm_smccc_res *)data->kaddr1;
+
+	if (data->status == BIT(SVC_STATUS_RSU_OK)) {
+		priv->status.version = FIELD_GET(RSU_VERSION_MASK,
+						 res->a2);
+		priv->status.state = FIELD_GET(RSU_STATE_MASK, res->a2);
+		priv->status.fail_image = res->a1;
+		priv->status.current_image = res->a0;
+		priv->status.error_location =
+			FIELD_GET(RSU_ERROR_LOCATION_MASK, res->a3);
+		priv->status.error_details =
+			FIELD_GET(RSU_ERROR_DETAIL_MASK, res->a3);
+	} else {
+		dev_err(client->dev, "COMMAND_RSU_STATUS returned 0x%lX\n",
+			res->a0);
+		priv->status.version = 0;
+		priv->status.state = 0;
+		priv->status.fail_image = 0;
+		priv->status.current_image = 0;
+		priv->status.error_location = 0;
+		priv->status.error_details = 0;
+	}
+
+	complete(&priv->completion);
+}
+
+/**
+ * rsu_command_callback() - Update callback from Intel Service Layer
+ * @client: pointer to client
+ * @data: pointer to callback data structure
+ *
+ * Callback from Intel service layer for RSU commands.
+ */
+static void rsu_command_callback(struct stratix10_svc_client *client,
+				 struct stratix10_svc_cb_data *data)
+{
+	struct stratix10_rsu_priv *priv = client->priv;
+
+	if (data->status != BIT(SVC_STATUS_RSU_OK))
+		dev_err(client->dev, "RSU returned status is %i\n",
+			data->status);
+	complete(&priv->completion);
+}
+
+/**
+ * rsu_retry_callback() - Callback from Intel service layer for getting
+ * the current image's retry counter from firmware
+ * @client: pointer to client
+ * @data: pointer to callback data structure
+ *
+ * Callback from Intel service layer for retry counter, which is used by
+ * user to know how many times the images is still allowed to reload
+ * itself before giving up and starting RSU fail-over flow.
+ */
+static void rsu_retry_callback(struct stratix10_svc_client *client,
+			       struct stratix10_svc_cb_data *data)
+{
+	struct stratix10_rsu_priv *priv = client->priv;
+	unsigned int *counter = (unsigned int *)data->kaddr1;
+
+	if (data->status == BIT(SVC_STATUS_RSU_OK))
+		priv->retry_counter = *counter;
+	else
+		dev_err(client->dev, "Failed to get retry counter %i\n",
+			data->status);
+
+	complete(&priv->completion);
+}
+
+/**
+ * rsu_send_msg() - send a message to Intel service layer
+ * @priv: pointer to rsu private data
+ * @command: RSU status or update command
+ * @arg: the request argument, the bitstream address or notify status
+ * @callback: function pointer for the callback (status or update)
+ *
+ * Start an Intel service layer transaction to perform the SMC call that
+ * is necessary to get RSU boot log or set the address of bitstream to
+ * boot after reboot.
+ *
+ * Returns 0 on success or -ETIMEDOUT on error.
+ */
+static int rsu_send_msg(struct stratix10_rsu_priv *priv,
+			enum stratix10_svc_command_code command,
+			unsigned long arg,
+			rsu_callback callback)
+{
+	struct stratix10_svc_client_msg msg;
+	int ret;
+
+	mutex_lock(&priv->lock);
+	reinit_completion(&priv->completion);
+	priv->client.receive_cb = callback;
+
+	msg.command = command;
+	if (arg)
+		msg.arg[0] = arg;
+
+	ret = stratix10_svc_send(priv->chan, &msg);
+	if (ret < 0)
+		goto status_done;
+
+	ret = wait_for_completion_interruptible_timeout(&priv->completion,
+							RSU_TIMEOUT);
+	if (!ret) {
+		dev_err(priv->client.dev,
+			"timeout waiting for SMC call\n");
+		ret = -ETIMEDOUT;
+		goto status_done;
+	} else if (ret < 0) {
+		dev_err(priv->client.dev,
+			"error %d waiting for SMC call\n", ret);
+		goto status_done;
+	} else {
+		ret = 0;
+	}
+
+status_done:
+	stratix10_svc_done(priv->chan);
+	mutex_unlock(&priv->lock);
+	return ret;
+}
+
+/*
+ * This driver exposes some optional features of the Intel Stratix 10 SoC FPGA.
+ * The sysfs interfaces exposed here are FPGA Remote System Update (RSU)
+ * related. They allow user space software to query the configuration system
+ * status and to request optional reboot behavior specific to Intel FPGAs.
+ */
+
+static ssize_t current_image_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+	if (!priv)
+		return -ENODEV;
+
+	return sprintf(buf, "0x%08lx\n", priv->status.current_image);
+}
+
+static ssize_t fail_image_show(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+	if (!priv)
+		return -ENODEV;
+
+	return sprintf(buf, "0x%08lx\n", priv->status.fail_image);
+}
+
+static ssize_t version_show(struct device *dev, struct device_attribute *attr,
+			    char *buf)
+{
+	struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+	if (!priv)
+		return -ENODEV;
+
+	return sprintf(buf, "0x%08x\n", priv->status.version);
+}
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+			  char *buf)
+{
+	struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+	if (!priv)
+		return -ENODEV;
+
+	return sprintf(buf, "0x%08x\n", priv->status.state);
+}
+
+static ssize_t error_location_show(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+	if (!priv)
+		return -ENODEV;
+
+	return sprintf(buf, "0x%08x\n", priv->status.error_location);
+}
+
+static ssize_t error_details_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+	if (!priv)
+		return -ENODEV;
+
+	return sprintf(buf, "0x%08x\n", priv->status.error_details);
+}
+
+static ssize_t retry_counter_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+	if (!priv)
+		return -ENODEV;
+
+	return sprintf(buf, "0x%08x\n", priv->retry_counter);
+}
+
+static ssize_t reboot_image_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t count)
+{
+	struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+	unsigned long address;
+	int ret;
+
+	if (priv == 0)
+		return -ENODEV;
+
+	ret = kstrtoul(buf, 0, &address);
+	if (ret)
+		return ret;
+
+	ret = rsu_send_msg(priv, COMMAND_RSU_UPDATE,
+			   address, rsu_command_callback);
+	if (ret) {
+		dev_err(dev, "Error, RSU update returned %i\n", ret);
+		return ret;
+	}
+
+	return count;
+}
+
+static ssize_t notify_store(struct device *dev,
+			    struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+	unsigned long status;
+	int ret;
+
+	if (priv == 0)
+		return -ENODEV;
+
+	ret = kstrtoul(buf, 0, &status);
+	if (ret)
+		return ret;
+
+	ret = rsu_send_msg(priv, COMMAND_RSU_NOTIFY,
+			   status, rsu_command_callback);
+	if (ret) {
+		dev_err(dev, "Error, RSU notify returned %i\n", ret);
+		return ret;
+	}
+
+	/* to get the updated state */
+	ret = rsu_send_msg(priv, COMMAND_RSU_STATUS,
+			   0, rsu_status_callback);
+	if (ret) {
+		dev_err(dev, "Error, getting RSU status %i\n", ret);
+		return ret;
+	}
+
+	/* only 19.3 or late version FW supports retry counter feature */
+	if (FIELD_GET(RSU_FW_VERSION_MASK, priv->status.version)) {
+		ret = rsu_send_msg(priv, COMMAND_RSU_RETRY,
+				   0, rsu_retry_callback);
+		if (ret) {
+			dev_err(dev,
+				"Error, getting RSU retry %i\n", ret);
+			return ret;
+		}
+	}
+
+	return count;
+}
+
+static DEVICE_ATTR_RO(current_image);
+static DEVICE_ATTR_RO(fail_image);
+static DEVICE_ATTR_RO(state);
+static DEVICE_ATTR_RO(version);
+static DEVICE_ATTR_RO(error_location);
+static DEVICE_ATTR_RO(error_details);
+static DEVICE_ATTR_RO(retry_counter);
+static DEVICE_ATTR_WO(reboot_image);
+static DEVICE_ATTR_WO(notify);
+
+static struct attribute *rsu_attrs[] = {
+	&dev_attr_current_image.attr,
+	&dev_attr_fail_image.attr,
+	&dev_attr_state.attr,
+	&dev_attr_version.attr,
+	&dev_attr_error_location.attr,
+	&dev_attr_error_details.attr,
+	&dev_attr_retry_counter.attr,
+	&dev_attr_reboot_image.attr,
+	&dev_attr_notify.attr,
+	NULL
+};
+
+ATTRIBUTE_GROUPS(rsu);
+
+static int stratix10_rsu_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct stratix10_rsu_priv *priv;
+	int ret;
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->client.dev = dev;
+	priv->client.receive_cb = NULL;
+	priv->client.priv = priv;
+	priv->status.current_image = 0;
+	priv->status.fail_image = 0;
+	priv->status.error_location = 0;
+	priv->status.error_details = 0;
+	priv->status.version = 0;
+	priv->status.state = 0;
+	priv->retry_counter = INVALID_RETRY_COUNTER;
+
+	mutex_init(&priv->lock);
+	priv->chan = stratix10_svc_request_channel_byname(&priv->client,
+							  SVC_CLIENT_RSU);
+	if (IS_ERR(priv->chan)) {
+		dev_err(dev, "couldn't get service channel %s\n",
+			SVC_CLIENT_RSU);
+		return PTR_ERR(priv->chan);
+	}
+
+	init_completion(&priv->completion);
+	platform_set_drvdata(pdev, priv);
+
+	/* get the initial state from firmware */
+	ret = rsu_send_msg(priv, COMMAND_RSU_STATUS,
+			   0, rsu_status_callback);
+	if (ret) {
+		dev_err(dev, "Error, getting RSU status %i\n", ret);
+		stratix10_svc_free_channel(priv->chan);
+	}
+
+	/* only 19.3 or late version FW supports retry counter feature */
+	if (FIELD_GET(RSU_FW_VERSION_MASK, priv->status.version)) {
+		ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0,
+				   rsu_retry_callback);
+		if (ret) {
+			dev_err(dev,
+				"Error, getting RSU retry %i\n", ret);
+			stratix10_svc_free_channel(priv->chan);
+		}
+	}
+
+	return ret;
+}
+
+static int stratix10_rsu_remove(struct platform_device *pdev)
+{
+	struct stratix10_rsu_priv *priv = platform_get_drvdata(pdev);
+
+	stratix10_svc_free_channel(priv->chan);
+	return 0;
+}
+
+static struct platform_driver stratix10_rsu_driver = {
+	.probe = stratix10_rsu_probe,
+	.remove = stratix10_rsu_remove,
+	.driver = {
+		.name = "stratix10-rsu",
+		.dev_groups = rsu_groups,
+	},
+};
+
+module_platform_driver(stratix10_rsu_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Remote System Update Driver");
+MODULE_AUTHOR("Richard Gong <richard.gong@intel.com>");
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
new file mode 100644
index 0000000..b485321
--- /dev/null
+++ b/drivers/firmware/stratix10-svc.c
@@ -0,0 +1,1111 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017-2018, Intel Corporation
+ */
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/kfifo.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/firmware/intel/stratix10-smc.h>
+#include <linux/firmware/intel/stratix10-svc-client.h>
+#include <linux/types.h>
+
+/**
+ * SVC_NUM_DATA_IN_FIFO - number of struct stratix10_svc_data in the FIFO
+ *
+ * SVC_NUM_CHANNEL - number of channel supported by service layer driver
+ *
+ * FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS - claim back the submitted buffer(s)
+ * from the secure world for FPGA manager to reuse, or to free the buffer(s)
+ * when all bit-stream data had be send.
+ *
+ * FPGA_CONFIG_STATUS_TIMEOUT_SEC - poll the FPGA configuration status,
+ * service layer will return error to FPGA manager when timeout occurs,
+ * timeout is set to 30 seconds (30 * 1000) at Intel Stratix10 SoC.
+ */
+#define SVC_NUM_DATA_IN_FIFO			32
+#define SVC_NUM_CHANNEL				2
+#define FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS	200
+#define FPGA_CONFIG_STATUS_TIMEOUT_SEC		30
+
+/* stratix10 service layer clients */
+#define STRATIX10_RSU				"stratix10-rsu"
+
+typedef void (svc_invoke_fn)(unsigned long, unsigned long, unsigned long,
+			     unsigned long, unsigned long, unsigned long,
+			     unsigned long, unsigned long,
+			     struct arm_smccc_res *);
+struct stratix10_svc_chan;
+
+/**
+ * struct stratix10_svc - svc private data
+ * @stratix10_svc_rsu: pointer to stratix10 RSU device
+ */
+struct stratix10_svc {
+	struct platform_device *stratix10_svc_rsu;
+};
+
+/**
+ * struct stratix10_svc_sh_memory - service shared memory structure
+ * @sync_complete: state for a completion
+ * @addr: physical address of shared memory block
+ * @size: size of shared memory block
+ * @invoke_fn: function to issue secure monitor or hypervisor call
+ *
+ * This struct is used to save physical address and size of shared memory
+ * block. The shared memory blocked is allocated by secure monitor software
+ * at secure world.
+ *
+ * Service layer driver uses the physical address and size to create a memory
+ * pool, then allocates data buffer from that memory pool for service client.
+ */
+struct stratix10_svc_sh_memory {
+	struct completion sync_complete;
+	unsigned long addr;
+	unsigned long size;
+	svc_invoke_fn *invoke_fn;
+};
+
+/**
+ * struct stratix10_svc_data_mem - service memory structure
+ * @vaddr: virtual address
+ * @paddr: physical address
+ * @size: size of memory
+ * @node: link list head node
+ *
+ * This struct is used in a list that keeps track of buffers which have
+ * been allocated or freed from the memory pool. Service layer driver also
+ * uses this struct to transfer physical address to virtual address.
+ */
+struct stratix10_svc_data_mem {
+	void *vaddr;
+	phys_addr_t paddr;
+	size_t size;
+	struct list_head node;
+};
+
+/**
+ * struct stratix10_svc_data - service data structure
+ * @chan: service channel
+ * @paddr: playload physical address
+ * @size: playload size
+ * @command: service command requested by client
+ * @flag: configuration type (full or partial)
+ * @arg: args to be passed via registers and not physically mapped buffers
+ *
+ * This struct is used in service FIFO for inter-process communication.
+ */
+struct stratix10_svc_data {
+	struct stratix10_svc_chan *chan;
+	phys_addr_t paddr;
+	size_t size;
+	u32 command;
+	u32 flag;
+	u64 arg[3];
+};
+
+/**
+ * struct stratix10_svc_controller - service controller
+ * @dev: device
+ * @chans: array of service channels
+ * @num_chans: number of channels in 'chans' array
+ * @num_active_client: number of active service client
+ * @node: list management
+ * @genpool: memory pool pointing to the memory region
+ * @task: pointer to the thread task which handles SMC or HVC call
+ * @svc_fifo: a queue for storing service message data
+ * @complete_status: state for completion
+ * @svc_fifo_lock: protect access to service message data queue
+ * @invoke_fn: function to issue secure monitor call or hypervisor call
+ *
+ * This struct is used to create communication channels for service clients, to
+ * handle secure monitor or hypervisor call.
+ */
+struct stratix10_svc_controller {
+	struct device *dev;
+	struct stratix10_svc_chan *chans;
+	int num_chans;
+	int num_active_client;
+	struct list_head node;
+	struct gen_pool *genpool;
+	struct task_struct *task;
+	struct kfifo svc_fifo;
+	struct completion complete_status;
+	spinlock_t svc_fifo_lock;
+	svc_invoke_fn *invoke_fn;
+};
+
+/**
+ * struct stratix10_svc_chan - service communication channel
+ * @ctrl: pointer to service controller which is the provider of this channel
+ * @scl: pointer to service client which owns the channel
+ * @name: service client name associated with the channel
+ * @lock: protect access to the channel
+ *
+ * This struct is used by service client to communicate with service layer, each
+ * service client has its own channel created by service controller.
+ */
+struct stratix10_svc_chan {
+	struct stratix10_svc_controller *ctrl;
+	struct stratix10_svc_client *scl;
+	char *name;
+	spinlock_t lock;
+};
+
+static LIST_HEAD(svc_ctrl);
+static LIST_HEAD(svc_data_mem);
+
+/**
+ * svc_pa_to_va() - translate physical address to virtual address
+ * @addr: to be translated physical address
+ *
+ * Return: valid virtual address or NULL if the provided physical
+ * address doesn't exist.
+ */
+static void *svc_pa_to_va(unsigned long addr)
+{
+	struct stratix10_svc_data_mem *pmem;
+
+	pr_debug("claim back P-addr=0x%016x\n", (unsigned int)addr);
+	list_for_each_entry(pmem, &svc_data_mem, node)
+		if (pmem->paddr == addr)
+			return pmem->vaddr;
+
+	/* physical address is not found */
+	return NULL;
+}
+
+/**
+ * svc_thread_cmd_data_claim() - claim back buffer from the secure world
+ * @ctrl: pointer to service layer controller
+ * @p_data: pointer to service data structure
+ * @cb_data: pointer to callback data structure to service client
+ *
+ * Claim back the submitted buffers from the secure world and pass buffer
+ * back to service client (FPGA manager, etc) for reuse.
+ */
+static void svc_thread_cmd_data_claim(struct stratix10_svc_controller *ctrl,
+				      struct stratix10_svc_data *p_data,
+				      struct stratix10_svc_cb_data *cb_data)
+{
+	struct arm_smccc_res res;
+	unsigned long timeout;
+
+	reinit_completion(&ctrl->complete_status);
+	timeout = msecs_to_jiffies(FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS);
+
+	pr_debug("%s: claim back the submitted buffer\n", __func__);
+	do {
+		ctrl->invoke_fn(INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE,
+				0, 0, 0, 0, 0, 0, 0, &res);
+
+		if (res.a0 == INTEL_SIP_SMC_STATUS_OK) {
+			if (!res.a1) {
+				complete(&ctrl->complete_status);
+				break;
+			}
+			cb_data->status = BIT(SVC_STATUS_RECONFIG_BUFFER_DONE);
+			cb_data->kaddr1 = svc_pa_to_va(res.a1);
+			cb_data->kaddr2 = (res.a2) ?
+					  svc_pa_to_va(res.a2) : NULL;
+			cb_data->kaddr3 = (res.a3) ?
+					  svc_pa_to_va(res.a3) : NULL;
+			p_data->chan->scl->receive_cb(p_data->chan->scl,
+						      cb_data);
+		} else {
+			pr_debug("%s: secure world busy, polling again\n",
+				 __func__);
+		}
+	} while (res.a0 == INTEL_SIP_SMC_STATUS_OK ||
+		 res.a0 == INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY ||
+		 wait_for_completion_timeout(&ctrl->complete_status, timeout));
+}
+
+/**
+ * svc_thread_cmd_config_status() - check configuration status
+ * @ctrl: pointer to service layer controller
+ * @p_data: pointer to service data structure
+ * @cb_data: pointer to callback data structure to service client
+ *
+ * Check whether the secure firmware at secure world has finished the FPGA
+ * configuration, and then inform FPGA manager the configuration status.
+ */
+static void svc_thread_cmd_config_status(struct stratix10_svc_controller *ctrl,
+					 struct stratix10_svc_data *p_data,
+					 struct stratix10_svc_cb_data *cb_data)
+{
+	struct arm_smccc_res res;
+	int count_in_sec;
+
+	cb_data->kaddr1 = NULL;
+	cb_data->kaddr2 = NULL;
+	cb_data->kaddr3 = NULL;
+	cb_data->status = BIT(SVC_STATUS_RECONFIG_ERROR);
+
+	pr_debug("%s: polling config status\n", __func__);
+
+	count_in_sec = FPGA_CONFIG_STATUS_TIMEOUT_SEC;
+	while (count_in_sec) {
+		ctrl->invoke_fn(INTEL_SIP_SMC_FPGA_CONFIG_ISDONE,
+				0, 0, 0, 0, 0, 0, 0, &res);
+		if ((res.a0 == INTEL_SIP_SMC_STATUS_OK) ||
+		    (res.a0 == INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR))
+			break;
+
+		/*
+		 * configuration is still in progress, wait one second then
+		 * poll again
+		 */
+		msleep(1000);
+		count_in_sec--;
+	};
+
+	if (res.a0 == INTEL_SIP_SMC_STATUS_OK && count_in_sec)
+		cb_data->status = BIT(SVC_STATUS_RECONFIG_COMPLETED);
+
+	p_data->chan->scl->receive_cb(p_data->chan->scl, cb_data);
+}
+
+/**
+ * svc_thread_recv_status_ok() - handle the successful status
+ * @p_data: pointer to service data structure
+ * @cb_data: pointer to callback data structure to service client
+ * @res: result from SMC or HVC call
+ *
+ * Send back the correspond status to the service clients.
+ */
+static void svc_thread_recv_status_ok(struct stratix10_svc_data *p_data,
+				      struct stratix10_svc_cb_data *cb_data,
+				      struct arm_smccc_res res)
+{
+	cb_data->kaddr1 = NULL;
+	cb_data->kaddr2 = NULL;
+	cb_data->kaddr3 = NULL;
+
+	switch (p_data->command) {
+	case COMMAND_RECONFIG:
+		cb_data->status = BIT(SVC_STATUS_RECONFIG_REQUEST_OK);
+		break;
+	case COMMAND_RECONFIG_DATA_SUBMIT:
+		cb_data->status = BIT(SVC_STATUS_RECONFIG_BUFFER_SUBMITTED);
+		break;
+	case COMMAND_NOOP:
+		cb_data->status = BIT(SVC_STATUS_RECONFIG_BUFFER_SUBMITTED);
+		cb_data->kaddr1 = svc_pa_to_va(res.a1);
+		break;
+	case COMMAND_RECONFIG_STATUS:
+		cb_data->status = BIT(SVC_STATUS_RECONFIG_COMPLETED);
+		break;
+	case COMMAND_RSU_UPDATE:
+	case COMMAND_RSU_NOTIFY:
+		cb_data->status = BIT(SVC_STATUS_RSU_OK);
+		break;
+	case COMMAND_RSU_RETRY:
+		cb_data->status = BIT(SVC_STATUS_RSU_OK);
+		cb_data->kaddr1 = &res.a1;
+		break;
+	default:
+		pr_warn("it shouldn't happen\n");
+		break;
+	}
+
+	pr_debug("%s: call receive_cb\n", __func__);
+	p_data->chan->scl->receive_cb(p_data->chan->scl, cb_data);
+}
+
+/**
+ * svc_normal_to_secure_thread() - the function to run in the kthread
+ * @data: data pointer for kthread function
+ *
+ * Service layer driver creates stratix10_svc_smc_hvc_call kthread on CPU
+ * node 0, its function stratix10_svc_secure_call_thread is used to handle
+ * SMC or HVC calls between kernel driver and secure monitor software.
+ *
+ * Return: 0 for success or -ENOMEM on error.
+ */
+static int svc_normal_to_secure_thread(void *data)
+{
+	struct stratix10_svc_controller
+			*ctrl = (struct stratix10_svc_controller *)data;
+	struct stratix10_svc_data *pdata;
+	struct stratix10_svc_cb_data *cbdata;
+	struct arm_smccc_res res;
+	unsigned long a0, a1, a2;
+	int ret_fifo = 0;
+
+	pdata =  kmalloc(sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return -ENOMEM;
+
+	cbdata = kmalloc(sizeof(*cbdata), GFP_KERNEL);
+	if (!cbdata) {
+		kfree(pdata);
+		return -ENOMEM;
+	}
+
+	/* default set, to remove build warning */
+	a0 = INTEL_SIP_SMC_FPGA_CONFIG_LOOPBACK;
+	a1 = 0;
+	a2 = 0;
+
+	pr_debug("smc_hvc_shm_thread is running\n");
+
+	while (!kthread_should_stop()) {
+		ret_fifo = kfifo_out_spinlocked(&ctrl->svc_fifo,
+						pdata, sizeof(*pdata),
+						&ctrl->svc_fifo_lock);
+
+		if (!ret_fifo)
+			continue;
+
+		pr_debug("get from FIFO pa=0x%016x, command=%u, size=%u\n",
+			 (unsigned int)pdata->paddr, pdata->command,
+			 (unsigned int)pdata->size);
+
+		switch (pdata->command) {
+		case COMMAND_RECONFIG_DATA_CLAIM:
+			svc_thread_cmd_data_claim(ctrl, pdata, cbdata);
+			continue;
+		case COMMAND_RECONFIG:
+			a0 = INTEL_SIP_SMC_FPGA_CONFIG_START;
+			pr_debug("conf_type=%u\n", (unsigned int)pdata->flag);
+			a1 = pdata->flag;
+			a2 = 0;
+			break;
+		case COMMAND_RECONFIG_DATA_SUBMIT:
+			a0 = INTEL_SIP_SMC_FPGA_CONFIG_WRITE;
+			a1 = (unsigned long)pdata->paddr;
+			a2 = (unsigned long)pdata->size;
+			break;
+		case COMMAND_RECONFIG_STATUS:
+			a0 = INTEL_SIP_SMC_FPGA_CONFIG_ISDONE;
+			a1 = 0;
+			a2 = 0;
+			break;
+		case COMMAND_RSU_STATUS:
+			a0 = INTEL_SIP_SMC_RSU_STATUS;
+			a1 = 0;
+			a2 = 0;
+			break;
+		case COMMAND_RSU_UPDATE:
+			a0 = INTEL_SIP_SMC_RSU_UPDATE;
+			a1 = pdata->arg[0];
+			a2 = 0;
+			break;
+		case COMMAND_RSU_NOTIFY:
+			a0 = INTEL_SIP_SMC_RSU_NOTIFY;
+			a1 = pdata->arg[0];
+			a2 = 0;
+			break;
+		case COMMAND_RSU_RETRY:
+			a0 = INTEL_SIP_SMC_RSU_RETRY_COUNTER;
+			a1 = 0;
+			a2 = 0;
+			break;
+		default:
+			pr_warn("it shouldn't happen\n");
+			break;
+		}
+		pr_debug("%s: before SMC call -- a0=0x%016x a1=0x%016x",
+			 __func__, (unsigned int)a0, (unsigned int)a1);
+		pr_debug(" a2=0x%016x\n", (unsigned int)a2);
+
+		ctrl->invoke_fn(a0, a1, a2, 0, 0, 0, 0, 0, &res);
+
+		pr_debug("%s: after SMC call -- res.a0=0x%016x",
+			 __func__, (unsigned int)res.a0);
+		pr_debug(" res.a1=0x%016x, res.a2=0x%016x",
+			 (unsigned int)res.a1, (unsigned int)res.a2);
+		pr_debug(" res.a3=0x%016x\n", (unsigned int)res.a3);
+
+		if (pdata->command == COMMAND_RSU_STATUS) {
+			if (res.a0 == INTEL_SIP_SMC_RSU_ERROR)
+				cbdata->status = BIT(SVC_STATUS_RSU_ERROR);
+			else
+				cbdata->status = BIT(SVC_STATUS_RSU_OK);
+
+			cbdata->kaddr1 = &res;
+			cbdata->kaddr2 = NULL;
+			cbdata->kaddr3 = NULL;
+			pdata->chan->scl->receive_cb(pdata->chan->scl, cbdata);
+			continue;
+		}
+
+		switch (res.a0) {
+		case INTEL_SIP_SMC_STATUS_OK:
+			svc_thread_recv_status_ok(pdata, cbdata, res);
+			break;
+		case INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY:
+			switch (pdata->command) {
+			case COMMAND_RECONFIG_DATA_SUBMIT:
+				svc_thread_cmd_data_claim(ctrl,
+							  pdata, cbdata);
+				break;
+			case COMMAND_RECONFIG_STATUS:
+				svc_thread_cmd_config_status(ctrl,
+							     pdata, cbdata);
+				break;
+			default:
+				pr_warn("it shouldn't happen\n");
+				break;
+			}
+			break;
+		case INTEL_SIP_SMC_FPGA_CONFIG_STATUS_REJECTED:
+			pr_debug("%s: STATUS_REJECTED\n", __func__);
+			break;
+		case INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR:
+		case INTEL_SIP_SMC_RSU_ERROR:
+			pr_err("%s: STATUS_ERROR\n", __func__);
+			switch (pdata->command) {
+			/* for FPGA mgr */
+			case COMMAND_RECONFIG_DATA_CLAIM:
+			case COMMAND_RECONFIG:
+			case COMMAND_RECONFIG_DATA_SUBMIT:
+			case COMMAND_RECONFIG_STATUS:
+				cbdata->status =
+					BIT(SVC_STATUS_RECONFIG_ERROR);
+				break;
+
+			/* for RSU */
+			case COMMAND_RSU_STATUS:
+			case COMMAND_RSU_UPDATE:
+			case COMMAND_RSU_NOTIFY:
+			case COMMAND_RSU_RETRY:
+				cbdata->status =
+					BIT(SVC_STATUS_RSU_ERROR);
+				break;
+			}
+
+			cbdata->status = BIT(SVC_STATUS_RECONFIG_ERROR);
+			cbdata->kaddr1 = NULL;
+			cbdata->kaddr2 = NULL;
+			cbdata->kaddr3 = NULL;
+			pdata->chan->scl->receive_cb(pdata->chan->scl, cbdata);
+			break;
+		default:
+			pr_warn("it shouldn't happen\n");
+			break;
+		}
+	};
+
+	kfree(cbdata);
+	kfree(pdata);
+
+	return 0;
+}
+
+/**
+ * svc_normal_to_secure_shm_thread() - the function to run in the kthread
+ * @data: data pointer for kthread function
+ *
+ * Service layer driver creates stratix10_svc_smc_hvc_shm kthread on CPU
+ * node 0, its function stratix10_svc_secure_shm_thread is used to query the
+ * physical address of memory block reserved by secure monitor software at
+ * secure world.
+ *
+ * svc_normal_to_secure_shm_thread() calls do_exit() directly since it is a
+ * standlone thread for which no one will call kthread_stop() or return when
+ * 'kthread_should_stop()' is true.
+ */
+static int svc_normal_to_secure_shm_thread(void *data)
+{
+	struct stratix10_svc_sh_memory
+			*sh_mem = (struct stratix10_svc_sh_memory *)data;
+	struct arm_smccc_res res;
+
+	/* SMC or HVC call to get shared memory info from secure world */
+	sh_mem->invoke_fn(INTEL_SIP_SMC_FPGA_CONFIG_GET_MEM,
+			  0, 0, 0, 0, 0, 0, 0, &res);
+	if (res.a0 == INTEL_SIP_SMC_STATUS_OK) {
+		sh_mem->addr = res.a1;
+		sh_mem->size = res.a2;
+	} else {
+		pr_err("%s: after SMC call -- res.a0=0x%016x",  __func__,
+		       (unsigned int)res.a0);
+		sh_mem->addr = 0;
+		sh_mem->size = 0;
+	}
+
+	complete(&sh_mem->sync_complete);
+	do_exit(0);
+}
+
+/**
+ * svc_get_sh_memory() - get memory block reserved by secure monitor SW
+ * @pdev: pointer to service layer device
+ * @sh_memory: pointer to service shared memory structure
+ *
+ * Return: zero for successfully getting the physical address of memory block
+ * reserved by secure monitor software, or negative value on error.
+ */
+static int svc_get_sh_memory(struct platform_device *pdev,
+				    struct stratix10_svc_sh_memory *sh_memory)
+{
+	struct device *dev = &pdev->dev;
+	struct task_struct *sh_memory_task;
+	unsigned int cpu = 0;
+
+	init_completion(&sh_memory->sync_complete);
+
+	/* smc or hvc call happens on cpu 0 bound kthread */
+	sh_memory_task = kthread_create_on_node(svc_normal_to_secure_shm_thread,
+					       (void *)sh_memory,
+						cpu_to_node(cpu),
+						"svc_smc_hvc_shm_thread");
+	if (IS_ERR(sh_memory_task)) {
+		dev_err(dev, "fail to create stratix10_svc_smc_shm_thread\n");
+		return -EINVAL;
+	}
+
+	wake_up_process(sh_memory_task);
+
+	if (!wait_for_completion_timeout(&sh_memory->sync_complete, 10 * HZ)) {
+		dev_err(dev,
+			"timeout to get sh-memory paras from secure world\n");
+		return -ETIMEDOUT;
+	}
+
+	if (!sh_memory->addr || !sh_memory->size) {
+		dev_err(dev,
+			"failed to get shared memory info from secure world\n");
+		return -ENOMEM;
+	}
+
+	dev_dbg(dev, "SM software provides paddr: 0x%016x, size: 0x%08x\n",
+		(unsigned int)sh_memory->addr,
+		(unsigned int)sh_memory->size);
+
+	return 0;
+}
+
+/**
+ * svc_create_memory_pool() - create a memory pool from reserved memory block
+ * @pdev: pointer to service layer device
+ * @sh_memory: pointer to service shared memory structure
+ *
+ * Return: pool allocated from reserved memory block or ERR_PTR() on error.
+ */
+static struct gen_pool *
+svc_create_memory_pool(struct platform_device *pdev,
+		       struct stratix10_svc_sh_memory *sh_memory)
+{
+	struct device *dev = &pdev->dev;
+	struct gen_pool *genpool;
+	unsigned long vaddr;
+	phys_addr_t paddr;
+	size_t size;
+	phys_addr_t begin;
+	phys_addr_t end;
+	void *va;
+	size_t page_mask = PAGE_SIZE - 1;
+	int min_alloc_order = 3;
+	int ret;
+
+	begin = roundup(sh_memory->addr, PAGE_SIZE);
+	end = rounddown(sh_memory->addr + sh_memory->size, PAGE_SIZE);
+	paddr = begin;
+	size = end - begin;
+	va = memremap(paddr, size, MEMREMAP_WC);
+	if (!va) {
+		dev_err(dev, "fail to remap shared memory\n");
+		return ERR_PTR(-EINVAL);
+	}
+	vaddr = (unsigned long)va;
+	dev_dbg(dev,
+		"reserved memory vaddr: %p, paddr: 0x%16x size: 0x%8x\n",
+		va, (unsigned int)paddr, (unsigned int)size);
+	if ((vaddr & page_mask) || (paddr & page_mask) ||
+	    (size & page_mask)) {
+		dev_err(dev, "page is not aligned\n");
+		return ERR_PTR(-EINVAL);
+	}
+	genpool = gen_pool_create(min_alloc_order, -1);
+	if (!genpool) {
+		dev_err(dev, "fail to create genpool\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	gen_pool_set_algo(genpool, gen_pool_best_fit, NULL);
+	ret = gen_pool_add_virt(genpool, vaddr, paddr, size, -1);
+	if (ret) {
+		dev_err(dev, "fail to add memory chunk to the pool\n");
+		gen_pool_destroy(genpool);
+		return ERR_PTR(ret);
+	}
+
+	return genpool;
+}
+
+/**
+ * svc_smccc_smc() - secure monitor call between normal and secure world
+ * @a0: argument passed in registers 0
+ * @a1: argument passed in registers 1
+ * @a2: argument passed in registers 2
+ * @a3: argument passed in registers 3
+ * @a4: argument passed in registers 4
+ * @a5: argument passed in registers 5
+ * @a6: argument passed in registers 6
+ * @a7: argument passed in registers 7
+ * @res: result values from register 0 to 3
+ */
+static void svc_smccc_smc(unsigned long a0, unsigned long a1,
+			  unsigned long a2, unsigned long a3,
+			  unsigned long a4, unsigned long a5,
+			  unsigned long a6, unsigned long a7,
+			  struct arm_smccc_res *res)
+{
+	arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res);
+}
+
+/**
+ * svc_smccc_hvc() - hypervisor call between normal and secure world
+ * @a0: argument passed in registers 0
+ * @a1: argument passed in registers 1
+ * @a2: argument passed in registers 2
+ * @a3: argument passed in registers 3
+ * @a4: argument passed in registers 4
+ * @a5: argument passed in registers 5
+ * @a6: argument passed in registers 6
+ * @a7: argument passed in registers 7
+ * @res: result values from register 0 to 3
+ */
+static void svc_smccc_hvc(unsigned long a0, unsigned long a1,
+			  unsigned long a2, unsigned long a3,
+			  unsigned long a4, unsigned long a5,
+			  unsigned long a6, unsigned long a7,
+			  struct arm_smccc_res *res)
+{
+	arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
+}
+
+/**
+ * get_invoke_func() - invoke SMC or HVC call
+ * @dev: pointer to device
+ *
+ * Return: function pointer to svc_smccc_smc or svc_smccc_hvc.
+ */
+static svc_invoke_fn *get_invoke_func(struct device *dev)
+{
+	const char *method;
+
+	if (of_property_read_string(dev->of_node, "method", &method)) {
+		dev_warn(dev, "missing \"method\" property\n");
+		return ERR_PTR(-ENXIO);
+	}
+
+	if (!strcmp(method, "smc"))
+		return svc_smccc_smc;
+	if (!strcmp(method, "hvc"))
+		return svc_smccc_hvc;
+
+	dev_warn(dev, "invalid \"method\" property: %s\n", method);
+
+	return ERR_PTR(-EINVAL);
+}
+
+/**
+ * stratix10_svc_request_channel_byname() - request a service channel
+ * @client: pointer to service client
+ * @name: service client name
+ *
+ * This function is used by service client to request a service channel.
+ *
+ * Return: a pointer to channel assigned to the client on success,
+ * or ERR_PTR() on error.
+ */
+struct stratix10_svc_chan *stratix10_svc_request_channel_byname(
+	struct stratix10_svc_client *client, const char *name)
+{
+	struct device *dev = client->dev;
+	struct stratix10_svc_controller *controller;
+	struct stratix10_svc_chan *chan = NULL;
+	unsigned long flag;
+	int i;
+
+	/* if probe was called after client's, or error on probe */
+	if (list_empty(&svc_ctrl))
+		return ERR_PTR(-EPROBE_DEFER);
+
+	controller = list_first_entry(&svc_ctrl,
+				      struct stratix10_svc_controller, node);
+	for (i = 0; i < SVC_NUM_CHANNEL; i++) {
+		if (!strcmp(controller->chans[i].name, name)) {
+			chan = &controller->chans[i];
+			break;
+		}
+	}
+
+	/* if there was no channel match */
+	if (i == SVC_NUM_CHANNEL) {
+		dev_err(dev, "%s: channel not allocated\n", __func__);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (chan->scl || !try_module_get(controller->dev->driver->owner)) {
+		dev_dbg(dev, "%s: svc not free\n", __func__);
+		return ERR_PTR(-EBUSY);
+	}
+
+	spin_lock_irqsave(&chan->lock, flag);
+	chan->scl = client;
+	chan->ctrl->num_active_client++;
+	spin_unlock_irqrestore(&chan->lock, flag);
+
+	return chan;
+}
+EXPORT_SYMBOL_GPL(stratix10_svc_request_channel_byname);
+
+/**
+ * stratix10_svc_free_channel() - free service channel
+ * @chan: service channel to be freed
+ *
+ * This function is used by service client to free a service channel.
+ */
+void stratix10_svc_free_channel(struct stratix10_svc_chan *chan)
+{
+	unsigned long flag;
+
+	spin_lock_irqsave(&chan->lock, flag);
+	chan->scl = NULL;
+	chan->ctrl->num_active_client--;
+	module_put(chan->ctrl->dev->driver->owner);
+	spin_unlock_irqrestore(&chan->lock, flag);
+}
+EXPORT_SYMBOL_GPL(stratix10_svc_free_channel);
+
+/**
+ * stratix10_svc_send() - send a message data to the remote
+ * @chan: service channel assigned to the client
+ * @msg: message data to be sent, in the format of
+ * "struct stratix10_svc_client_msg"
+ *
+ * This function is used by service client to add a message to the service
+ * layer driver's queue for being sent to the secure world.
+ *
+ * Return: 0 for success, -ENOMEM or -ENOBUFS on error.
+ */
+int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg)
+{
+	struct stratix10_svc_client_msg
+		*p_msg = (struct stratix10_svc_client_msg *)msg;
+	struct stratix10_svc_data_mem *p_mem;
+	struct stratix10_svc_data *p_data;
+	int ret = 0;
+	unsigned int cpu = 0;
+
+	p_data = kzalloc(sizeof(*p_data), GFP_KERNEL);
+	if (!p_data)
+		return -ENOMEM;
+
+	/* first client will create kernel thread */
+	if (!chan->ctrl->task) {
+		chan->ctrl->task =
+			kthread_create_on_node(svc_normal_to_secure_thread,
+					      (void *)chan->ctrl,
+					      cpu_to_node(cpu),
+					      "svc_smc_hvc_thread");
+			if (IS_ERR(chan->ctrl->task)) {
+				dev_err(chan->ctrl->dev,
+					"failed to create svc_smc_hvc_thread\n");
+				kfree(p_data);
+				return -EINVAL;
+			}
+		kthread_bind(chan->ctrl->task, cpu);
+		wake_up_process(chan->ctrl->task);
+	}
+
+	pr_debug("%s: sent P-va=%p, P-com=%x, P-size=%u\n", __func__,
+		 p_msg->payload, p_msg->command,
+		 (unsigned int)p_msg->payload_length);
+
+	if (list_empty(&svc_data_mem)) {
+		if (p_msg->command == COMMAND_RECONFIG) {
+			struct stratix10_svc_command_config_type *ct =
+				(struct stratix10_svc_command_config_type *)
+				p_msg->payload;
+			p_data->flag = ct->flags;
+		}
+	} else {
+		list_for_each_entry(p_mem, &svc_data_mem, node)
+			if (p_mem->vaddr == p_msg->payload) {
+				p_data->paddr = p_mem->paddr;
+				break;
+			}
+	}
+
+	p_data->command = p_msg->command;
+	p_data->arg[0] = p_msg->arg[0];
+	p_data->arg[1] = p_msg->arg[1];
+	p_data->arg[2] = p_msg->arg[2];
+	p_data->size = p_msg->payload_length;
+	p_data->chan = chan;
+	pr_debug("%s: put to FIFO pa=0x%016x, cmd=%x, size=%u\n", __func__,
+	       (unsigned int)p_data->paddr, p_data->command,
+	       (unsigned int)p_data->size);
+	ret = kfifo_in_spinlocked(&chan->ctrl->svc_fifo, p_data,
+				  sizeof(*p_data),
+				  &chan->ctrl->svc_fifo_lock);
+
+	kfree(p_data);
+
+	if (!ret)
+		return -ENOBUFS;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(stratix10_svc_send);
+
+/**
+ * stratix10_svc_done() - complete service request transactions
+ * @chan: service channel assigned to the client
+ *
+ * This function should be called when client has finished its request
+ * or there is an error in the request process. It allows the service layer
+ * to stop the running thread to have maximize savings in kernel resources.
+ */
+void stratix10_svc_done(struct stratix10_svc_chan *chan)
+{
+	/* stop thread when thread is running AND only one active client */
+	if (chan->ctrl->task && chan->ctrl->num_active_client <= 1) {
+		pr_debug("svc_smc_hvc_shm_thread is stopped\n");
+		kthread_stop(chan->ctrl->task);
+		chan->ctrl->task = NULL;
+	}
+}
+EXPORT_SYMBOL_GPL(stratix10_svc_done);
+
+/**
+ * stratix10_svc_allocate_memory() - allocate memory
+ * @chan: service channel assigned to the client
+ * @size: memory size requested by a specific service client
+ *
+ * Service layer allocates the requested number of bytes buffer from the
+ * memory pool, service client uses this function to get allocated buffers.
+ *
+ * Return: address of allocated memory on success, or ERR_PTR() on error.
+ */
+void *stratix10_svc_allocate_memory(struct stratix10_svc_chan *chan,
+				    size_t size)
+{
+	struct stratix10_svc_data_mem *pmem;
+	unsigned long va;
+	phys_addr_t pa;
+	struct gen_pool *genpool = chan->ctrl->genpool;
+	size_t s = roundup(size, 1 << genpool->min_alloc_order);
+
+	pmem = devm_kzalloc(chan->ctrl->dev, sizeof(*pmem), GFP_KERNEL);
+	if (!pmem)
+		return ERR_PTR(-ENOMEM);
+
+	va = gen_pool_alloc(genpool, s);
+	if (!va)
+		return ERR_PTR(-ENOMEM);
+
+	memset((void *)va, 0, s);
+	pa = gen_pool_virt_to_phys(genpool, va);
+
+	pmem->vaddr = (void *)va;
+	pmem->paddr = pa;
+	pmem->size = s;
+	list_add_tail(&pmem->node, &svc_data_mem);
+	pr_debug("%s: va=%p, pa=0x%016x\n", __func__,
+		 pmem->vaddr, (unsigned int)pmem->paddr);
+
+	return (void *)va;
+}
+EXPORT_SYMBOL_GPL(stratix10_svc_allocate_memory);
+
+/**
+ * stratix10_svc_free_memory() - free allocated memory
+ * @chan: service channel assigned to the client
+ * @kaddr: memory to be freed
+ *
+ * This function is used by service client to free allocated buffers.
+ */
+void stratix10_svc_free_memory(struct stratix10_svc_chan *chan, void *kaddr)
+{
+	struct stratix10_svc_data_mem *pmem;
+	size_t size = 0;
+
+	list_for_each_entry(pmem, &svc_data_mem, node)
+		if (pmem->vaddr == kaddr) {
+			size = pmem->size;
+			break;
+		}
+
+	gen_pool_free(chan->ctrl->genpool, (unsigned long)kaddr, size);
+	pmem->vaddr = NULL;
+	list_del(&pmem->node);
+}
+EXPORT_SYMBOL_GPL(stratix10_svc_free_memory);
+
+static const struct of_device_id stratix10_svc_drv_match[] = {
+	{.compatible = "intel,stratix10-svc"},
+	{},
+};
+
+static int stratix10_svc_drv_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct stratix10_svc_controller *controller;
+	struct stratix10_svc_chan *chans;
+	struct gen_pool *genpool;
+	struct stratix10_svc_sh_memory *sh_memory;
+	struct stratix10_svc *svc;
+
+	svc_invoke_fn *invoke_fn;
+	size_t fifo_size;
+	int ret;
+
+	/* get SMC or HVC function */
+	invoke_fn = get_invoke_func(dev);
+	if (IS_ERR(invoke_fn))
+		return -EINVAL;
+
+	sh_memory = devm_kzalloc(dev, sizeof(*sh_memory), GFP_KERNEL);
+	if (!sh_memory)
+		return -ENOMEM;
+
+	sh_memory->invoke_fn = invoke_fn;
+	ret = svc_get_sh_memory(pdev, sh_memory);
+	if (ret)
+		return ret;
+
+	genpool = svc_create_memory_pool(pdev, sh_memory);
+	if (!genpool)
+		return -ENOMEM;
+
+	/* allocate service controller and supporting channel */
+	controller = devm_kzalloc(dev, sizeof(*controller), GFP_KERNEL);
+	if (!controller)
+		return -ENOMEM;
+
+	chans = devm_kmalloc_array(dev, SVC_NUM_CHANNEL,
+				   sizeof(*chans), GFP_KERNEL | __GFP_ZERO);
+	if (!chans)
+		return -ENOMEM;
+
+	controller->dev = dev;
+	controller->num_chans = SVC_NUM_CHANNEL;
+	controller->num_active_client = 0;
+	controller->chans = chans;
+	controller->genpool = genpool;
+	controller->task = NULL;
+	controller->invoke_fn = invoke_fn;
+	init_completion(&controller->complete_status);
+
+	fifo_size = sizeof(struct stratix10_svc_data) * SVC_NUM_DATA_IN_FIFO;
+	ret = kfifo_alloc(&controller->svc_fifo, fifo_size, GFP_KERNEL);
+	if (ret) {
+		dev_err(dev, "failed to allocate FIFO\n");
+		return ret;
+	}
+	spin_lock_init(&controller->svc_fifo_lock);
+
+	chans[0].scl = NULL;
+	chans[0].ctrl = controller;
+	chans[0].name = SVC_CLIENT_FPGA;
+	spin_lock_init(&chans[0].lock);
+
+	chans[1].scl = NULL;
+	chans[1].ctrl = controller;
+	chans[1].name = SVC_CLIENT_RSU;
+	spin_lock_init(&chans[1].lock);
+
+	list_add_tail(&controller->node, &svc_ctrl);
+	platform_set_drvdata(pdev, controller);
+
+	/* add svc client device(s) */
+	svc = devm_kzalloc(dev, sizeof(*svc), GFP_KERNEL);
+	if (!svc)
+		return -ENOMEM;
+
+	svc->stratix10_svc_rsu = platform_device_alloc(STRATIX10_RSU, 0);
+	if (!svc->stratix10_svc_rsu) {
+		dev_err(dev, "failed to allocate %s device\n", STRATIX10_RSU);
+		return -ENOMEM;
+	}
+
+	ret = platform_device_add(svc->stratix10_svc_rsu);
+	if (ret) {
+		platform_device_put(svc->stratix10_svc_rsu);
+		return ret;
+	}
+	dev_set_drvdata(dev, svc);
+
+	pr_info("Intel Service Layer Driver Initialized\n");
+
+	return ret;
+}
+
+static int stratix10_svc_drv_remove(struct platform_device *pdev)
+{
+	struct stratix10_svc *svc = dev_get_drvdata(&pdev->dev);
+	struct stratix10_svc_controller *ctrl = platform_get_drvdata(pdev);
+
+	platform_device_unregister(svc->stratix10_svc_rsu);
+
+	kfifo_free(&ctrl->svc_fifo);
+	if (ctrl->task) {
+		kthread_stop(ctrl->task);
+		ctrl->task = NULL;
+	}
+	if (ctrl->genpool)
+		gen_pool_destroy(ctrl->genpool);
+	list_del(&ctrl->node);
+
+	return 0;
+}
+
+static struct platform_driver stratix10_svc_driver = {
+	.probe = stratix10_svc_drv_probe,
+	.remove = stratix10_svc_drv_remove,
+	.driver = {
+		.name = "stratix10-svc",
+		.of_match_table = stratix10_svc_drv_match,
+	},
+};
+
+static int __init stratix10_svc_init(void)
+{
+	struct device_node *fw_np;
+	struct device_node *np;
+	int ret;
+
+	fw_np = of_find_node_by_name(NULL, "firmware");
+	if (!fw_np)
+		return -ENODEV;
+
+	np = of_find_matching_node(fw_np, stratix10_svc_drv_match);
+	if (!np)
+		return -ENODEV;
+
+	of_node_put(np);
+	ret = of_platform_populate(fw_np, stratix10_svc_drv_match, NULL, NULL);
+	if (ret)
+		return ret;
+
+	return platform_driver_register(&stratix10_svc_driver);
+}
+
+static void __exit stratix10_svc_exit(void)
+{
+	return platform_driver_unregister(&stratix10_svc_driver);
+}
+
+subsys_initcall(stratix10_svc_init);
+module_exit(stratix10_svc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Stratix10 Service Layer Driver");
+MODULE_AUTHOR("Richard Gong <richard.gong@intel.com>");
+MODULE_ALIAS("platform:stratix10-svc");
diff --git a/drivers/firmware/tegra/Kconfig b/drivers/firmware/tegra/Kconfig
index ff2730d..a887731 100644
--- a/drivers/firmware/tegra/Kconfig
+++ b/drivers/firmware/tegra/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 menu "Tegra firmware driver"
 
 config TEGRA_IVC
diff --git a/drivers/firmware/tegra/Makefile b/drivers/firmware/tegra/Makefile
index 1b826dc..49c87e0 100644
--- a/drivers/firmware/tegra/Makefile
+++ b/drivers/firmware/tegra/Makefile
@@ -1,4 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
 tegra-bpmp-y			= bpmp.o
+tegra-bpmp-$(CONFIG_ARCH_TEGRA_210_SOC)	+= bpmp-tegra210.o
+tegra-bpmp-$(CONFIG_ARCH_TEGRA_186_SOC)	+= bpmp-tegra186.o
+tegra-bpmp-$(CONFIG_ARCH_TEGRA_194_SOC)	+= bpmp-tegra186.o
 tegra-bpmp-$(CONFIG_DEBUG_FS)	+= bpmp-debugfs.o
 obj-$(CONFIG_TEGRA_BPMP)	+= tegra-bpmp.o
 obj-$(CONFIG_TEGRA_IVC)		+= ivc.o
diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c
index f7f6a0a..636b40d 100644
--- a/drivers/firmware/tegra/bpmp-debugfs.c
+++ b/drivers/firmware/tegra/bpmp-debugfs.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2017, NVIDIA CORPORATION.  All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
  */
 #include <linux/debugfs.h>
 #include <linux/dma-mapping.h>
@@ -379,33 +370,6 @@
 	return err;
 }
 
-static int mrq_is_supported(struct tegra_bpmp *bpmp, unsigned int mrq)
-{
-	struct mrq_query_abi_request req = { .mrq = cpu_to_le32(mrq) };
-	struct mrq_query_abi_response resp;
-	struct tegra_bpmp_message msg = {
-		.mrq = MRQ_QUERY_ABI,
-		.tx = {
-			.data = &req,
-			.size = sizeof(req),
-		},
-		.rx = {
-			.data = &resp,
-			.size = sizeof(resp),
-		},
-	};
-	int ret;
-
-	ret = tegra_bpmp_transfer(bpmp, &msg);
-	if (ret < 0) {
-		/* something went wrong; assume not supported */
-		dev_warn(bpmp->dev, "tegra_bpmp_transfer failed (%d)\n", ret);
-		return 0;
-	}
-
-	return resp.status ? 0 : 1;
-}
-
 int tegra_bpmp_init_debugfs(struct tegra_bpmp *bpmp)
 {
 	dma_addr_t phys;
@@ -415,7 +379,7 @@
 	int ret;
 	struct dentry *root;
 
-	if (!mrq_is_supported(bpmp, MRQ_DEBUGFS))
+	if (!tegra_bpmp_mrq_is_supported(bpmp, MRQ_DEBUGFS))
 		return 0;
 
 	root = debugfs_create_dir("bpmp", NULL);
diff --git a/drivers/firmware/tegra/bpmp-private.h b/drivers/firmware/tegra/bpmp-private.h
new file mode 100644
index 0000000..54d560c
--- /dev/null
+++ b/drivers/firmware/tegra/bpmp-private.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, NVIDIA CORPORATION.
+ */
+
+#ifndef __FIRMWARE_TEGRA_BPMP_PRIVATE_H
+#define __FIRMWARE_TEGRA_BPMP_PRIVATE_H
+
+#include <soc/tegra/bpmp.h>
+
+struct tegra_bpmp_ops {
+	int (*init)(struct tegra_bpmp *bpmp);
+	void (*deinit)(struct tegra_bpmp *bpmp);
+	bool (*is_response_ready)(struct tegra_bpmp_channel *channel);
+	bool (*is_request_ready)(struct tegra_bpmp_channel *channel);
+	int (*ack_response)(struct tegra_bpmp_channel *channel);
+	int (*ack_request)(struct tegra_bpmp_channel *channel);
+	bool (*is_response_channel_free)(struct tegra_bpmp_channel *channel);
+	bool (*is_request_channel_free)(struct tegra_bpmp_channel *channel);
+	int (*post_response)(struct tegra_bpmp_channel *channel);
+	int (*post_request)(struct tegra_bpmp_channel *channel);
+	int (*ring_doorbell)(struct tegra_bpmp *bpmp);
+	int (*resume)(struct tegra_bpmp *bpmp);
+};
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
+    IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
+extern const struct tegra_bpmp_ops tegra186_bpmp_ops;
+#endif
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+extern const struct tegra_bpmp_ops tegra210_bpmp_ops;
+#endif
+
+#endif
diff --git a/drivers/firmware/tegra/bpmp-tegra186.c b/drivers/firmware/tegra/bpmp-tegra186.c
new file mode 100644
index 0000000..ea30875
--- /dev/null
+++ b/drivers/firmware/tegra/bpmp-tegra186.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, NVIDIA CORPORATION.
+ */
+
+#include <linux/genalloc.h>
+#include <linux/mailbox_client.h>
+#include <linux/platform_device.h>
+
+#include <soc/tegra/bpmp.h>
+#include <soc/tegra/bpmp-abi.h>
+#include <soc/tegra/ivc.h>
+
+#include "bpmp-private.h"
+
+struct tegra186_bpmp {
+	struct tegra_bpmp *parent;
+
+	struct {
+		struct gen_pool *pool;
+		dma_addr_t phys;
+		void *virt;
+	} tx, rx;
+
+	struct {
+		struct mbox_client client;
+		struct mbox_chan *channel;
+	} mbox;
+};
+
+static inline struct tegra_bpmp *
+mbox_client_to_bpmp(struct mbox_client *client)
+{
+	struct tegra186_bpmp *priv;
+
+	priv = container_of(client, struct tegra186_bpmp, mbox.client);
+
+	return priv->parent;
+}
+
+static bool tegra186_bpmp_is_message_ready(struct tegra_bpmp_channel *channel)
+{
+	void *frame;
+
+	frame = tegra_ivc_read_get_next_frame(channel->ivc);
+	if (IS_ERR(frame)) {
+		channel->ib = NULL;
+		return false;
+	}
+
+	channel->ib = frame;
+
+	return true;
+}
+
+static bool tegra186_bpmp_is_channel_free(struct tegra_bpmp_channel *channel)
+{
+	void *frame;
+
+	frame = tegra_ivc_write_get_next_frame(channel->ivc);
+	if (IS_ERR(frame)) {
+		channel->ob = NULL;
+		return false;
+	}
+
+	channel->ob = frame;
+
+	return true;
+}
+
+static int tegra186_bpmp_ack_message(struct tegra_bpmp_channel *channel)
+{
+	return tegra_ivc_read_advance(channel->ivc);
+}
+
+static int tegra186_bpmp_post_message(struct tegra_bpmp_channel *channel)
+{
+	return tegra_ivc_write_advance(channel->ivc);
+}
+
+static int tegra186_bpmp_ring_doorbell(struct tegra_bpmp *bpmp)
+{
+	struct tegra186_bpmp *priv = bpmp->priv;
+	int err;
+
+	err = mbox_send_message(priv->mbox.channel, NULL);
+	if (err < 0)
+		return err;
+
+	mbox_client_txdone(priv->mbox.channel, 0);
+
+	return 0;
+}
+
+static void tegra186_bpmp_ivc_notify(struct tegra_ivc *ivc, void *data)
+{
+	struct tegra_bpmp *bpmp = data;
+	struct tegra186_bpmp *priv = bpmp->priv;
+
+	if (WARN_ON(priv->mbox.channel == NULL))
+		return;
+
+	tegra186_bpmp_ring_doorbell(bpmp);
+}
+
+static int tegra186_bpmp_channel_init(struct tegra_bpmp_channel *channel,
+				      struct tegra_bpmp *bpmp,
+				      unsigned int index)
+{
+	struct tegra186_bpmp *priv = bpmp->priv;
+	size_t message_size, queue_size;
+	unsigned int offset;
+	int err;
+
+	channel->ivc = devm_kzalloc(bpmp->dev, sizeof(*channel->ivc),
+				    GFP_KERNEL);
+	if (!channel->ivc)
+		return -ENOMEM;
+
+	message_size = tegra_ivc_align(MSG_MIN_SZ);
+	queue_size = tegra_ivc_total_queue_size(message_size);
+	offset = queue_size * index;
+
+	err = tegra_ivc_init(channel->ivc, NULL,
+			     priv->rx.virt + offset, priv->rx.phys + offset,
+			     priv->tx.virt + offset, priv->tx.phys + offset,
+			     1, message_size, tegra186_bpmp_ivc_notify,
+			     bpmp);
+	if (err < 0) {
+		dev_err(bpmp->dev, "failed to setup IVC for channel %u: %d\n",
+			index, err);
+		return err;
+	}
+
+	init_completion(&channel->completion);
+	channel->bpmp = bpmp;
+
+	return 0;
+}
+
+static void tegra186_bpmp_channel_reset(struct tegra_bpmp_channel *channel)
+{
+	/* reset the channel state */
+	tegra_ivc_reset(channel->ivc);
+
+	/* sync the channel state with BPMP */
+	while (tegra_ivc_notified(channel->ivc))
+		;
+}
+
+static void tegra186_bpmp_channel_cleanup(struct tegra_bpmp_channel *channel)
+{
+	tegra_ivc_cleanup(channel->ivc);
+}
+
+static void mbox_handle_rx(struct mbox_client *client, void *data)
+{
+	struct tegra_bpmp *bpmp = mbox_client_to_bpmp(client);
+
+	tegra_bpmp_handle_rx(bpmp);
+}
+
+static int tegra186_bpmp_init(struct tegra_bpmp *bpmp)
+{
+	struct tegra186_bpmp *priv;
+	unsigned int i;
+	int err;
+
+	priv = devm_kzalloc(bpmp->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	bpmp->priv = priv;
+	priv->parent = bpmp;
+
+	priv->tx.pool = of_gen_pool_get(bpmp->dev->of_node, "shmem", 0);
+	if (!priv->tx.pool) {
+		dev_err(bpmp->dev, "TX shmem pool not found\n");
+		return -ENOMEM;
+	}
+
+	priv->tx.virt = gen_pool_dma_alloc(priv->tx.pool, 4096, &priv->tx.phys);
+	if (!priv->tx.virt) {
+		dev_err(bpmp->dev, "failed to allocate from TX pool\n");
+		return -ENOMEM;
+	}
+
+	priv->rx.pool = of_gen_pool_get(bpmp->dev->of_node, "shmem", 1);
+	if (!priv->rx.pool) {
+		dev_err(bpmp->dev, "RX shmem pool not found\n");
+		err = -ENOMEM;
+		goto free_tx;
+	}
+
+	priv->rx.virt = gen_pool_dma_alloc(priv->rx.pool, 4096, &priv->rx.phys);
+	if (!priv->rx.virt) {
+		dev_err(bpmp->dev, "failed to allocate from RX pool\n");
+		err = -ENOMEM;
+		goto free_tx;
+	}
+
+	err = tegra186_bpmp_channel_init(bpmp->tx_channel, bpmp,
+					 bpmp->soc->channels.cpu_tx.offset);
+	if (err < 0)
+		goto free_rx;
+
+	err = tegra186_bpmp_channel_init(bpmp->rx_channel, bpmp,
+					 bpmp->soc->channels.cpu_rx.offset);
+	if (err < 0)
+		goto cleanup_tx_channel;
+
+	for (i = 0; i < bpmp->threaded.count; i++) {
+		unsigned int index = bpmp->soc->channels.thread.offset + i;
+
+		err = tegra186_bpmp_channel_init(&bpmp->threaded_channels[i],
+						 bpmp, index);
+		if (err < 0)
+			goto cleanup_channels;
+	}
+
+	/* mbox registration */
+	priv->mbox.client.dev = bpmp->dev;
+	priv->mbox.client.rx_callback = mbox_handle_rx;
+	priv->mbox.client.tx_block = false;
+	priv->mbox.client.knows_txdone = false;
+
+	priv->mbox.channel = mbox_request_channel(&priv->mbox.client, 0);
+	if (IS_ERR(priv->mbox.channel)) {
+		err = PTR_ERR(priv->mbox.channel);
+		dev_err(bpmp->dev, "failed to get HSP mailbox: %d\n", err);
+		goto cleanup_channels;
+	}
+
+	tegra186_bpmp_channel_reset(bpmp->tx_channel);
+	tegra186_bpmp_channel_reset(bpmp->rx_channel);
+
+	for (i = 0; i < bpmp->threaded.count; i++)
+		tegra186_bpmp_channel_reset(&bpmp->threaded_channels[i]);
+
+	return 0;
+
+cleanup_channels:
+	for (i = 0; i < bpmp->threaded.count; i++) {
+		if (!bpmp->threaded_channels[i].bpmp)
+			continue;
+
+		tegra186_bpmp_channel_cleanup(&bpmp->threaded_channels[i]);
+	}
+
+	tegra186_bpmp_channel_cleanup(bpmp->rx_channel);
+cleanup_tx_channel:
+	tegra186_bpmp_channel_cleanup(bpmp->tx_channel);
+free_rx:
+	gen_pool_free(priv->rx.pool, (unsigned long)priv->rx.virt, 4096);
+free_tx:
+	gen_pool_free(priv->tx.pool, (unsigned long)priv->tx.virt, 4096);
+
+	return err;
+}
+
+static void tegra186_bpmp_deinit(struct tegra_bpmp *bpmp)
+{
+	struct tegra186_bpmp *priv = bpmp->priv;
+	unsigned int i;
+
+	mbox_free_channel(priv->mbox.channel);
+
+	for (i = 0; i < bpmp->threaded.count; i++)
+		tegra186_bpmp_channel_cleanup(&bpmp->threaded_channels[i]);
+
+	tegra186_bpmp_channel_cleanup(bpmp->rx_channel);
+	tegra186_bpmp_channel_cleanup(bpmp->tx_channel);
+
+	gen_pool_free(priv->rx.pool, (unsigned long)priv->rx.virt, 4096);
+	gen_pool_free(priv->tx.pool, (unsigned long)priv->tx.virt, 4096);
+}
+
+static int tegra186_bpmp_resume(struct tegra_bpmp *bpmp)
+{
+	unsigned int i;
+
+	/* reset message channels */
+	tegra186_bpmp_channel_reset(bpmp->tx_channel);
+	tegra186_bpmp_channel_reset(bpmp->rx_channel);
+
+	for (i = 0; i < bpmp->threaded.count; i++)
+		tegra186_bpmp_channel_reset(&bpmp->threaded_channels[i]);
+
+	return 0;
+}
+
+const struct tegra_bpmp_ops tegra186_bpmp_ops = {
+	.init = tegra186_bpmp_init,
+	.deinit = tegra186_bpmp_deinit,
+	.is_response_ready = tegra186_bpmp_is_message_ready,
+	.is_request_ready = tegra186_bpmp_is_message_ready,
+	.ack_response = tegra186_bpmp_ack_message,
+	.ack_request = tegra186_bpmp_ack_message,
+	.is_response_channel_free = tegra186_bpmp_is_channel_free,
+	.is_request_channel_free = tegra186_bpmp_is_channel_free,
+	.post_response = tegra186_bpmp_post_message,
+	.post_request = tegra186_bpmp_post_message,
+	.ring_doorbell = tegra186_bpmp_ring_doorbell,
+	.resume = tegra186_bpmp_resume,
+};
diff --git a/drivers/firmware/tegra/bpmp-tegra210.c b/drivers/firmware/tegra/bpmp-tegra210.c
new file mode 100644
index 0000000..ae15940
--- /dev/null
+++ b/drivers/firmware/tegra/bpmp-tegra210.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, NVIDIA CORPORATION.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <soc/tegra/bpmp.h>
+
+#include "bpmp-private.h"
+
+#define TRIGGER_OFFSET		0x000
+#define RESULT_OFFSET(id)	(0xc00 + id * 4)
+#define TRIGGER_ID_SHIFT	16
+#define TRIGGER_CMD_GET		4
+
+#define STA_OFFSET		0
+#define SET_OFFSET		4
+#define CLR_OFFSET		8
+
+#define CH_MASK(ch)	(0x3 << ((ch) * 2))
+#define SL_SIGL(ch)	(0x0 << ((ch) * 2))
+#define SL_QUED(ch)	(0x1 << ((ch) * 2))
+#define MA_FREE(ch)	(0x2 << ((ch) * 2))
+#define MA_ACKD(ch)	(0x3 << ((ch) * 2))
+
+struct tegra210_bpmp {
+	void __iomem *atomics;
+	void __iomem *arb_sema;
+	struct irq_data *tx_irq_data;
+};
+
+static u32 bpmp_channel_status(struct tegra_bpmp *bpmp, unsigned int index)
+{
+	struct tegra210_bpmp *priv = bpmp->priv;
+
+	return __raw_readl(priv->arb_sema + STA_OFFSET) & CH_MASK(index);
+}
+
+static bool tegra210_bpmp_is_response_ready(struct tegra_bpmp_channel *channel)
+{
+	unsigned int index = channel->index;
+
+	return bpmp_channel_status(channel->bpmp, index) == MA_ACKD(index);
+}
+
+static bool tegra210_bpmp_is_request_ready(struct tegra_bpmp_channel *channel)
+{
+	unsigned int index = channel->index;
+
+	return bpmp_channel_status(channel->bpmp, index) == SL_SIGL(index);
+}
+
+static bool
+tegra210_bpmp_is_request_channel_free(struct tegra_bpmp_channel *channel)
+{
+	unsigned int index = channel->index;
+
+	return bpmp_channel_status(channel->bpmp, index) == MA_FREE(index);
+}
+
+static bool
+tegra210_bpmp_is_response_channel_free(struct tegra_bpmp_channel *channel)
+{
+	unsigned int index = channel->index;
+
+	return bpmp_channel_status(channel->bpmp, index) == SL_QUED(index);
+}
+
+static int tegra210_bpmp_post_request(struct tegra_bpmp_channel *channel)
+{
+	struct tegra210_bpmp *priv = channel->bpmp->priv;
+
+	__raw_writel(CH_MASK(channel->index), priv->arb_sema + CLR_OFFSET);
+
+	return 0;
+}
+
+static int tegra210_bpmp_post_response(struct tegra_bpmp_channel *channel)
+{
+	struct tegra210_bpmp *priv = channel->bpmp->priv;
+
+	__raw_writel(MA_ACKD(channel->index), priv->arb_sema + SET_OFFSET);
+
+	return 0;
+}
+
+static int tegra210_bpmp_ack_response(struct tegra_bpmp_channel *channel)
+{
+	struct tegra210_bpmp *priv = channel->bpmp->priv;
+
+	__raw_writel(MA_ACKD(channel->index) ^ MA_FREE(channel->index),
+		     priv->arb_sema + CLR_OFFSET);
+
+	return 0;
+}
+
+static int tegra210_bpmp_ack_request(struct tegra_bpmp_channel *channel)
+{
+	struct tegra210_bpmp *priv = channel->bpmp->priv;
+
+	__raw_writel(SL_QUED(channel->index), priv->arb_sema + SET_OFFSET);
+
+	return 0;
+}
+
+static int tegra210_bpmp_ring_doorbell(struct tegra_bpmp *bpmp)
+{
+	struct tegra210_bpmp *priv = bpmp->priv;
+	struct irq_data *irq_data = priv->tx_irq_data;
+
+	/*
+	 * Tegra Legacy Interrupt Controller (LIC) is used to notify BPMP of
+	 * available messages
+	 */
+	if (irq_data->chip->irq_retrigger)
+		return irq_data->chip->irq_retrigger(irq_data);
+
+	return -EINVAL;
+}
+
+static irqreturn_t rx_irq(int irq, void *data)
+{
+	struct tegra_bpmp *bpmp = data;
+
+	tegra_bpmp_handle_rx(bpmp);
+
+	return IRQ_HANDLED;
+}
+
+static int tegra210_bpmp_channel_init(struct tegra_bpmp_channel *channel,
+				      struct tegra_bpmp *bpmp,
+				      unsigned int index)
+{
+	struct tegra210_bpmp *priv = bpmp->priv;
+	u32 address;
+	void *p;
+
+	/* Retrieve channel base address from BPMP */
+	writel(index << TRIGGER_ID_SHIFT | TRIGGER_CMD_GET,
+	       priv->atomics + TRIGGER_OFFSET);
+	address = readl(priv->atomics + RESULT_OFFSET(index));
+
+	p = devm_ioremap(bpmp->dev, address, 0x80);
+	if (!p)
+		return -ENOMEM;
+
+	channel->ib = p;
+	channel->ob = p;
+	channel->index = index;
+	init_completion(&channel->completion);
+	channel->bpmp = bpmp;
+
+	return 0;
+}
+
+static int tegra210_bpmp_init(struct tegra_bpmp *bpmp)
+{
+	struct platform_device *pdev = to_platform_device(bpmp->dev);
+	struct tegra210_bpmp *priv;
+	struct resource *res;
+	unsigned int i;
+	int err;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	bpmp->priv = priv;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->atomics = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(priv->atomics))
+		return PTR_ERR(priv->atomics);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	priv->arb_sema = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(priv->arb_sema))
+		return PTR_ERR(priv->arb_sema);
+
+	err = tegra210_bpmp_channel_init(bpmp->tx_channel, bpmp,
+					 bpmp->soc->channels.cpu_tx.offset);
+	if (err < 0)
+		return err;
+
+	err = tegra210_bpmp_channel_init(bpmp->rx_channel, bpmp,
+					 bpmp->soc->channels.cpu_rx.offset);
+	if (err < 0)
+		return err;
+
+	for (i = 0; i < bpmp->threaded.count; i++) {
+		unsigned int index = bpmp->soc->channels.thread.offset + i;
+
+		err = tegra210_bpmp_channel_init(&bpmp->threaded_channels[i],
+						 bpmp, index);
+		if (err < 0)
+			return err;
+	}
+
+	err = platform_get_irq_byname(pdev, "tx");
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to get TX IRQ: %d\n", err);
+		return err;
+	}
+
+	priv->tx_irq_data = irq_get_irq_data(err);
+	if (!priv->tx_irq_data) {
+		dev_err(&pdev->dev, "failed to get IRQ data for TX IRQ\n");
+		return err;
+	}
+
+	err = platform_get_irq_byname(pdev, "rx");
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to get rx IRQ: %d\n", err);
+		return err;
+	}
+
+	err = devm_request_irq(&pdev->dev, err, rx_irq,
+			       IRQF_NO_SUSPEND, dev_name(&pdev->dev), bpmp);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+		return err;
+	}
+
+	return 0;
+}
+
+const struct tegra_bpmp_ops tegra210_bpmp_ops = {
+	.init = tegra210_bpmp_init,
+	.is_response_ready = tegra210_bpmp_is_response_ready,
+	.is_request_ready = tegra210_bpmp_is_request_ready,
+	.ack_response = tegra210_bpmp_ack_response,
+	.ack_request = tegra210_bpmp_ack_request,
+	.is_response_channel_free = tegra210_bpmp_is_response_channel_free,
+	.is_request_channel_free = tegra210_bpmp_is_request_channel_free,
+	.post_response = tegra210_bpmp_post_response,
+	.post_request = tegra210_bpmp_post_request,
+	.ring_doorbell = tegra210_bpmp_ring_doorbell,
+};
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
index 14a456a..19c5613 100644
--- a/drivers/firmware/tegra/bpmp.c
+++ b/drivers/firmware/tegra/bpmp.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2016, NVIDIA CORPORATION.  All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
  */
 
 #include <linux/clk/tegra.h>
@@ -18,6 +10,7 @@
 #include <linux/of_address.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
+#include <linux/pm.h>
 #include <linux/semaphore.h>
 #include <linux/sched/clock.h>
 
@@ -25,8 +18,11 @@
 #include <soc/tegra/bpmp-abi.h>
 #include <soc/tegra/ivc.h>
 
+#include "bpmp-private.h"
+
 #define MSG_ACK		BIT(0)
 #define MSG_RING	BIT(1)
+#define TAG_SZ		32
 
 static inline struct tegra_bpmp *
 mbox_client_to_bpmp(struct mbox_client *client)
@@ -34,6 +30,14 @@
 	return container_of(client, struct tegra_bpmp, mbox.client);
 }
 
+static inline const struct tegra_bpmp_ops *
+channel_to_ops(struct tegra_bpmp_channel *channel)
+{
+	struct tegra_bpmp *bpmp = channel->bpmp;
+
+	return bpmp->soc->ops;
+}
+
 struct tegra_bpmp *tegra_bpmp_get(struct device *dev)
 {
 	struct platform_device *pdev;
@@ -94,22 +98,21 @@
 	       (msg->rx.size == 0 || msg->rx.data);
 }
 
-static bool tegra_bpmp_master_acked(struct tegra_bpmp_channel *channel)
+static bool tegra_bpmp_is_response_ready(struct tegra_bpmp_channel *channel)
 {
-	void *frame;
+	const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
 
-	frame = tegra_ivc_read_get_next_frame(channel->ivc);
-	if (IS_ERR(frame)) {
-		channel->ib = NULL;
-		return false;
-	}
-
-	channel->ib = frame;
-
-	return true;
+	return ops->is_response_ready(channel);
 }
 
-static int tegra_bpmp_wait_ack(struct tegra_bpmp_channel *channel)
+static bool tegra_bpmp_is_request_ready(struct tegra_bpmp_channel *channel)
+{
+	const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+	return ops->is_request_ready(channel);
+}
+
+static int tegra_bpmp_wait_response(struct tegra_bpmp_channel *channel)
 {
 	unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
 	ktime_t end;
@@ -117,29 +120,45 @@
 	end = ktime_add_us(ktime_get(), timeout);
 
 	do {
-		if (tegra_bpmp_master_acked(channel))
+		if (tegra_bpmp_is_response_ready(channel))
 			return 0;
 	} while (ktime_before(ktime_get(), end));
 
 	return -ETIMEDOUT;
 }
 
-static bool tegra_bpmp_master_free(struct tegra_bpmp_channel *channel)
+static int tegra_bpmp_ack_response(struct tegra_bpmp_channel *channel)
 {
-	void *frame;
+	const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
 
-	frame = tegra_ivc_write_get_next_frame(channel->ivc);
-	if (IS_ERR(frame)) {
-		channel->ob = NULL;
-		return false;
-	}
-
-	channel->ob = frame;
-
-	return true;
+	return ops->ack_response(channel);
 }
 
-static int tegra_bpmp_wait_master_free(struct tegra_bpmp_channel *channel)
+static int tegra_bpmp_ack_request(struct tegra_bpmp_channel *channel)
+{
+	const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+	return ops->ack_request(channel);
+}
+
+static bool
+tegra_bpmp_is_request_channel_free(struct tegra_bpmp_channel *channel)
+{
+	const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+	return ops->is_request_channel_free(channel);
+}
+
+static bool
+tegra_bpmp_is_response_channel_free(struct tegra_bpmp_channel *channel)
+{
+	const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+	return ops->is_response_channel_free(channel);
+}
+
+static int
+tegra_bpmp_wait_request_channel_free(struct tegra_bpmp_channel *channel)
 {
 	unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
 	ktime_t start, now;
@@ -147,7 +166,7 @@
 	start = ns_to_ktime(local_clock());
 
 	do {
-		if (tegra_bpmp_master_free(channel))
+		if (tegra_bpmp_is_request_channel_free(channel))
 			return 0;
 
 		now = ns_to_ktime(local_clock());
@@ -156,6 +175,25 @@
 	return -ETIMEDOUT;
 }
 
+static int tegra_bpmp_post_request(struct tegra_bpmp_channel *channel)
+{
+	const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+	return ops->post_request(channel);
+}
+
+static int tegra_bpmp_post_response(struct tegra_bpmp_channel *channel)
+{
+	const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+	return ops->post_response(channel);
+}
+
+static int tegra_bpmp_ring_doorbell(struct tegra_bpmp *bpmp)
+{
+	return bpmp->soc->ops->ring_doorbell(bpmp);
+}
+
 static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
 					 void *data, size_t size, int *ret)
 {
@@ -164,7 +202,7 @@
 	if (data && size > 0)
 		memcpy(data, channel->ib->data, size);
 
-	err = tegra_ivc_read_advance(channel->ivc);
+	err = tegra_bpmp_ack_response(channel);
 	if (err < 0)
 		return err;
 
@@ -208,7 +246,7 @@
 	if (data && size > 0)
 		memcpy(channel->ob->data, data, size);
 
-	return tegra_ivc_write_advance(channel->ivc);
+	return tegra_bpmp_post_request(channel);
 }
 
 static struct tegra_bpmp_channel *
@@ -236,7 +274,7 @@
 
 	channel = &bpmp->threaded_channels[index];
 
-	if (!tegra_bpmp_master_free(channel)) {
+	if (!tegra_bpmp_is_request_channel_free(channel)) {
 		err = -EBUSY;
 		goto unlock;
 	}
@@ -268,7 +306,7 @@
 {
 	int err;
 
-	err = tegra_bpmp_wait_master_free(channel);
+	err = tegra_bpmp_wait_request_channel_free(channel);
 	if (err < 0)
 		return err;
 
@@ -300,13 +338,11 @@
 
 	spin_unlock(&bpmp->atomic_tx_lock);
 
-	err = mbox_send_message(bpmp->mbox.channel, NULL);
+	err = tegra_bpmp_ring_doorbell(bpmp);
 	if (err < 0)
 		return err;
 
-	mbox_client_txdone(bpmp->mbox.channel, 0);
-
-	err = tegra_bpmp_wait_ack(channel);
+	err = tegra_bpmp_wait_response(channel);
 	if (err < 0)
 		return err;
 
@@ -333,12 +369,10 @@
 	if (IS_ERR(channel))
 		return PTR_ERR(channel);
 
-	err = mbox_send_message(bpmp->mbox.channel, NULL);
+	err = tegra_bpmp_ring_doorbell(bpmp);
 	if (err < 0)
 		return err;
 
-	mbox_client_txdone(bpmp->mbox.channel, 0);
-
 	timeout = usecs_to_jiffies(bpmp->soc->channels.thread.timeout);
 
 	err = wait_for_completion_timeout(&channel->completion, timeout);
@@ -367,38 +401,34 @@
 {
 	unsigned long flags = channel->ib->flags;
 	struct tegra_bpmp *bpmp = channel->bpmp;
-	struct tegra_bpmp_mb_data *frame;
 	int err;
 
 	if (WARN_ON(size > MSG_DATA_MIN_SZ))
 		return;
 
-	err = tegra_ivc_read_advance(channel->ivc);
+	err = tegra_bpmp_ack_request(channel);
 	if (WARN_ON(err < 0))
 		return;
 
 	if ((flags & MSG_ACK) == 0)
 		return;
 
-	frame = tegra_ivc_write_get_next_frame(channel->ivc);
-	if (WARN_ON(IS_ERR(frame)))
+	if (WARN_ON(!tegra_bpmp_is_response_channel_free(channel)))
 		return;
 
-	frame->code = code;
+	channel->ob->code = code;
 
 	if (data && size > 0)
-		memcpy(frame->data, data, size);
+		memcpy(channel->ob->data, data, size);
 
-	err = tegra_ivc_write_advance(channel->ivc);
+	err = tegra_bpmp_post_response(channel);
 	if (WARN_ON(err < 0))
 		return;
 
 	if (flags & MSG_RING) {
-		err = mbox_send_message(bpmp->mbox.channel, NULL);
+		err = tegra_bpmp_ring_doorbell(bpmp);
 		if (WARN_ON(err < 0))
 			return;
-
-		mbox_client_txdone(bpmp->mbox.channel, 0);
 	}
 }
 EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_return);
@@ -469,6 +499,31 @@
 }
 EXPORT_SYMBOL_GPL(tegra_bpmp_free_mrq);
 
+bool tegra_bpmp_mrq_is_supported(struct tegra_bpmp *bpmp, unsigned int mrq)
+{
+	struct mrq_query_abi_request req = { .mrq = cpu_to_le32(mrq) };
+	struct mrq_query_abi_response resp;
+	struct tegra_bpmp_message msg = {
+		.mrq = MRQ_QUERY_ABI,
+		.tx = {
+			.data = &req,
+			.size = sizeof(req),
+		},
+		.rx = {
+			.data = &resp,
+			.size = sizeof(resp),
+		},
+	};
+	int ret;
+
+	ret = tegra_bpmp_transfer(bpmp, &msg);
+	if (ret || msg.rx.ret)
+		return false;
+
+	return resp.status == 0;
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_is_supported);
+
 static void tegra_bpmp_mrq_handle_ping(unsigned int mrq,
 				       struct tegra_bpmp_channel *channel,
 				       void *data)
@@ -520,8 +575,9 @@
 	return err;
 }
 
-static int tegra_bpmp_get_firmware_tag(struct tegra_bpmp *bpmp, char *tag,
-				       size_t size)
+/* deprecated version of tag query */
+static int tegra_bpmp_get_firmware_tag_old(struct tegra_bpmp *bpmp, char *tag,
+					   size_t size)
 {
 	struct mrq_query_tag_request request;
 	struct tegra_bpmp_message msg;
@@ -530,7 +586,10 @@
 	void *virt;
 	int err;
 
-	virt = dma_alloc_coherent(bpmp->dev, MSG_DATA_MIN_SZ, &phys,
+	if (size != TAG_SZ)
+		return -EINVAL;
+
+	virt = dma_alloc_coherent(bpmp->dev, TAG_SZ, &phys,
 				  GFP_KERNEL | GFP_DMA32);
 	if (!virt)
 		return -ENOMEM;
@@ -548,13 +607,44 @@
 	local_irq_restore(flags);
 
 	if (err == 0)
-		strlcpy(tag, virt, size);
+		memcpy(tag, virt, TAG_SZ);
 
-	dma_free_coherent(bpmp->dev, MSG_DATA_MIN_SZ, virt, phys);
+	dma_free_coherent(bpmp->dev, TAG_SZ, virt, phys);
 
 	return err;
 }
 
+static int tegra_bpmp_get_firmware_tag(struct tegra_bpmp *bpmp, char *tag,
+				       size_t size)
+{
+	if (tegra_bpmp_mrq_is_supported(bpmp, MRQ_QUERY_FW_TAG)) {
+		struct mrq_query_fw_tag_response resp;
+		struct tegra_bpmp_message msg = {
+			.mrq = MRQ_QUERY_FW_TAG,
+			.rx = {
+				.data = &resp,
+				.size = sizeof(resp),
+			},
+		};
+		int err;
+
+		if (size != sizeof(resp.tag))
+			return -EINVAL;
+
+		err = tegra_bpmp_transfer(bpmp, &msg);
+
+		if (err)
+			return err;
+		if (msg.rx.ret < 0)
+			return -EINVAL;
+
+		memcpy(tag, resp.tag, sizeof(resp.tag));
+		return 0;
+	}
+
+	return tegra_bpmp_get_firmware_tag_old(bpmp, tag, size);
+}
+
 static void tegra_bpmp_channel_signal(struct tegra_bpmp_channel *channel)
 {
 	unsigned long flags = channel->ob->flags;
@@ -565,9 +655,8 @@
 	complete(&channel->completion);
 }
 
-static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data)
+void tegra_bpmp_handle_rx(struct tegra_bpmp *bpmp)
 {
-	struct tegra_bpmp *bpmp = mbox_client_to_bpmp(client);
 	struct tegra_bpmp_channel *channel;
 	unsigned int i, count;
 	unsigned long *busy;
@@ -576,7 +665,7 @@
 	count = bpmp->soc->channels.thread.count;
 	busy = bpmp->threaded.busy;
 
-	if (tegra_bpmp_master_acked(channel))
+	if (tegra_bpmp_is_request_ready(channel))
 		tegra_bpmp_handle_mrq(bpmp, channel->ib->code, channel);
 
 	spin_lock(&bpmp->lock);
@@ -586,7 +675,7 @@
 
 		channel = &bpmp->threaded_channels[i];
 
-		if (tegra_bpmp_master_acked(channel)) {
+		if (tegra_bpmp_is_response_ready(channel)) {
 			tegra_bpmp_channel_signal(channel);
 			clear_bit(i, busy);
 		}
@@ -595,75 +684,10 @@
 	spin_unlock(&bpmp->lock);
 }
 
-static void tegra_bpmp_ivc_notify(struct tegra_ivc *ivc, void *data)
-{
-	struct tegra_bpmp *bpmp = data;
-	int err;
-
-	if (WARN_ON(bpmp->mbox.channel == NULL))
-		return;
-
-	err = mbox_send_message(bpmp->mbox.channel, NULL);
-	if (err < 0)
-		return;
-
-	mbox_client_txdone(bpmp->mbox.channel, 0);
-}
-
-static int tegra_bpmp_channel_init(struct tegra_bpmp_channel *channel,
-				   struct tegra_bpmp *bpmp,
-				   unsigned int index)
-{
-	size_t message_size, queue_size;
-	unsigned int offset;
-	int err;
-
-	channel->ivc = devm_kzalloc(bpmp->dev, sizeof(*channel->ivc),
-				    GFP_KERNEL);
-	if (!channel->ivc)
-		return -ENOMEM;
-
-	message_size = tegra_ivc_align(MSG_MIN_SZ);
-	queue_size = tegra_ivc_total_queue_size(message_size);
-	offset = queue_size * index;
-
-	err = tegra_ivc_init(channel->ivc, NULL,
-			     bpmp->rx.virt + offset, bpmp->rx.phys + offset,
-			     bpmp->tx.virt + offset, bpmp->tx.phys + offset,
-			     1, message_size, tegra_bpmp_ivc_notify,
-			     bpmp);
-	if (err < 0) {
-		dev_err(bpmp->dev, "failed to setup IVC for channel %u: %d\n",
-			index, err);
-		return err;
-	}
-
-	init_completion(&channel->completion);
-	channel->bpmp = bpmp;
-
-	return 0;
-}
-
-static void tegra_bpmp_channel_reset(struct tegra_bpmp_channel *channel)
-{
-	/* reset the channel state */
-	tegra_ivc_reset(channel->ivc);
-
-	/* sync the channel state with BPMP */
-	while (tegra_ivc_notified(channel->ivc))
-		;
-}
-
-static void tegra_bpmp_channel_cleanup(struct tegra_bpmp_channel *channel)
-{
-	tegra_ivc_cleanup(channel->ivc);
-}
-
 static int tegra_bpmp_probe(struct platform_device *pdev)
 {
 	struct tegra_bpmp *bpmp;
-	unsigned int i;
-	char tag[32];
+	char tag[TAG_SZ];
 	size_t size;
 	int err;
 
@@ -674,32 +698,6 @@
 	bpmp->soc = of_device_get_match_data(&pdev->dev);
 	bpmp->dev = &pdev->dev;
 
-	bpmp->tx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 0);
-	if (!bpmp->tx.pool) {
-		dev_err(&pdev->dev, "TX shmem pool not found\n");
-		return -ENOMEM;
-	}
-
-	bpmp->tx.virt = gen_pool_dma_alloc(bpmp->tx.pool, 4096, &bpmp->tx.phys);
-	if (!bpmp->tx.virt) {
-		dev_err(&pdev->dev, "failed to allocate from TX pool\n");
-		return -ENOMEM;
-	}
-
-	bpmp->rx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 1);
-	if (!bpmp->rx.pool) {
-		dev_err(&pdev->dev, "RX shmem pool not found\n");
-		err = -ENOMEM;
-		goto free_tx;
-	}
-
-	bpmp->rx.virt = gen_pool_dma_alloc(bpmp->rx.pool, 4096, &bpmp->rx.phys);
-	if (!bpmp->rx.virt) {
-		dev_err(&pdev->dev, "failed to allocate from RX pool\n");
-		err = -ENOMEM;
-		goto free_tx;
-	}
-
 	INIT_LIST_HEAD(&bpmp->mrqs);
 	spin_lock_init(&bpmp->lock);
 
@@ -709,81 +707,38 @@
 	size = BITS_TO_LONGS(bpmp->threaded.count) * sizeof(long);
 
 	bpmp->threaded.allocated = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
-	if (!bpmp->threaded.allocated) {
-		err = -ENOMEM;
-		goto free_rx;
-	}
+	if (!bpmp->threaded.allocated)
+		return -ENOMEM;
 
 	bpmp->threaded.busy = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
-	if (!bpmp->threaded.busy) {
-		err = -ENOMEM;
-		goto free_rx;
-	}
+	if (!bpmp->threaded.busy)
+		return -ENOMEM;
 
 	spin_lock_init(&bpmp->atomic_tx_lock);
 	bpmp->tx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->tx_channel),
 					GFP_KERNEL);
-	if (!bpmp->tx_channel) {
-		err = -ENOMEM;
-		goto free_rx;
-	}
+	if (!bpmp->tx_channel)
+		return -ENOMEM;
 
 	bpmp->rx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->rx_channel),
 	                                GFP_KERNEL);
-	if (!bpmp->rx_channel) {
-		err = -ENOMEM;
-		goto free_rx;
-	}
+	if (!bpmp->rx_channel)
+		return -ENOMEM;
 
 	bpmp->threaded_channels = devm_kcalloc(&pdev->dev, bpmp->threaded.count,
 					       sizeof(*bpmp->threaded_channels),
 					       GFP_KERNEL);
-	if (!bpmp->threaded_channels) {
-		err = -ENOMEM;
-		goto free_rx;
-	}
+	if (!bpmp->threaded_channels)
+		return -ENOMEM;
 
-	err = tegra_bpmp_channel_init(bpmp->tx_channel, bpmp,
-				      bpmp->soc->channels.cpu_tx.offset);
+	err = bpmp->soc->ops->init(bpmp);
 	if (err < 0)
-		goto free_rx;
-
-	err = tegra_bpmp_channel_init(bpmp->rx_channel, bpmp,
-				      bpmp->soc->channels.cpu_rx.offset);
-	if (err < 0)
-		goto cleanup_tx_channel;
-
-	for (i = 0; i < bpmp->threaded.count; i++) {
-		err = tegra_bpmp_channel_init(
-			&bpmp->threaded_channels[i], bpmp,
-			bpmp->soc->channels.thread.offset + i);
-		if (err < 0)
-			goto cleanup_threaded_channels;
-	}
-
-	/* mbox registration */
-	bpmp->mbox.client.dev = &pdev->dev;
-	bpmp->mbox.client.rx_callback = tegra_bpmp_handle_rx;
-	bpmp->mbox.client.tx_block = false;
-	bpmp->mbox.client.knows_txdone = false;
-
-	bpmp->mbox.channel = mbox_request_channel(&bpmp->mbox.client, 0);
-	if (IS_ERR(bpmp->mbox.channel)) {
-		err = PTR_ERR(bpmp->mbox.channel);
-		dev_err(&pdev->dev, "failed to get HSP mailbox: %d\n", err);
-		goto cleanup_threaded_channels;
-	}
-
-	/* reset message channels */
-	tegra_bpmp_channel_reset(bpmp->tx_channel);
-	tegra_bpmp_channel_reset(bpmp->rx_channel);
-	for (i = 0; i < bpmp->threaded.count; i++)
-		tegra_bpmp_channel_reset(&bpmp->threaded_channels[i]);
+		return err;
 
 	err = tegra_bpmp_request_mrq(bpmp, MRQ_PING,
 				     tegra_bpmp_mrq_handle_ping, bpmp);
 	if (err < 0)
-		goto free_mbox;
+		goto deinit;
 
 	err = tegra_bpmp_ping(bpmp);
 	if (err < 0) {
@@ -791,13 +746,13 @@
 		goto free_mrq;
 	}
 
-	err = tegra_bpmp_get_firmware_tag(bpmp, tag, sizeof(tag) - 1);
+	err = tegra_bpmp_get_firmware_tag(bpmp, tag, sizeof(tag));
 	if (err < 0) {
 		dev_err(&pdev->dev, "failed to get firmware tag: %d\n", err);
 		goto free_mrq;
 	}
 
-	dev_info(&pdev->dev, "firmware: %s\n", tag);
+	dev_info(&pdev->dev, "firmware: %.*s\n", (int)sizeof(tag), tag);
 
 	platform_set_drvdata(pdev, bpmp);
 
@@ -805,17 +760,23 @@
 	if (err < 0)
 		goto free_mrq;
 
-	err = tegra_bpmp_init_clocks(bpmp);
-	if (err < 0)
-		goto free_mrq;
+	if (of_find_property(pdev->dev.of_node, "#clock-cells", NULL)) {
+		err = tegra_bpmp_init_clocks(bpmp);
+		if (err < 0)
+			goto free_mrq;
+	}
 
-	err = tegra_bpmp_init_resets(bpmp);
-	if (err < 0)
-		goto free_mrq;
+	if (of_find_property(pdev->dev.of_node, "#reset-cells", NULL)) {
+		err = tegra_bpmp_init_resets(bpmp);
+		if (err < 0)
+			goto free_mrq;
+	}
 
-	err = tegra_bpmp_init_powergates(bpmp);
-	if (err < 0)
-		goto free_mrq;
+	if (of_find_property(pdev->dev.of_node, "#power-domain-cells", NULL)) {
+		err = tegra_bpmp_init_powergates(bpmp);
+		if (err < 0)
+			goto free_mrq;
+	}
 
 	err = tegra_bpmp_init_debugfs(bpmp);
 	if (err < 0)
@@ -825,24 +786,29 @@
 
 free_mrq:
 	tegra_bpmp_free_mrq(bpmp, MRQ_PING, bpmp);
-free_mbox:
-	mbox_free_channel(bpmp->mbox.channel);
-cleanup_threaded_channels:
-	for (i = 0; i < bpmp->threaded.count; i++) {
-		if (bpmp->threaded_channels[i].bpmp)
-			tegra_bpmp_channel_cleanup(&bpmp->threaded_channels[i]);
-	}
+deinit:
+	if (bpmp->soc->ops->deinit)
+		bpmp->soc->ops->deinit(bpmp);
 
-	tegra_bpmp_channel_cleanup(bpmp->rx_channel);
-cleanup_tx_channel:
-	tegra_bpmp_channel_cleanup(bpmp->tx_channel);
-free_rx:
-	gen_pool_free(bpmp->rx.pool, (unsigned long)bpmp->rx.virt, 4096);
-free_tx:
-	gen_pool_free(bpmp->tx.pool, (unsigned long)bpmp->tx.virt, 4096);
 	return err;
 }
 
+static int __maybe_unused tegra_bpmp_resume(struct device *dev)
+{
+	struct tegra_bpmp *bpmp = dev_get_drvdata(dev);
+
+	if (bpmp->soc->ops->resume)
+		return bpmp->soc->ops->resume(bpmp);
+	else
+		return 0;
+}
+
+static const struct dev_pm_ops tegra_bpmp_pm_ops = {
+	.resume_early = tegra_bpmp_resume,
+};
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
+    IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
 static const struct tegra_bpmp_soc tegra186_soc = {
 	.channels = {
 		.cpu_tx = {
@@ -859,11 +825,42 @@
 			.timeout = 0,
 		},
 	},
+	.ops = &tegra186_bpmp_ops,
 	.num_resets = 193,
 };
+#endif
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+static const struct tegra_bpmp_soc tegra210_soc = {
+	.channels = {
+		.cpu_tx = {
+			.offset = 0,
+			.count = 1,
+			.timeout = 60 * USEC_PER_SEC,
+		},
+		.thread = {
+			.offset = 4,
+			.count = 1,
+			.timeout = 600 * USEC_PER_SEC,
+		},
+		.cpu_rx = {
+			.offset = 8,
+			.count = 1,
+			.timeout = 0,
+		},
+	},
+	.ops = &tegra210_bpmp_ops,
+};
+#endif
 
 static const struct of_device_id tegra_bpmp_match[] = {
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
+    IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
 	{ .compatible = "nvidia,tegra186-bpmp", .data = &tegra186_soc },
+#endif
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+	{ .compatible = "nvidia,tegra210-bpmp", .data = &tegra210_soc },
+#endif
 	{ }
 };
 
@@ -871,6 +868,7 @@
 	.driver = {
 		.name = "tegra-bpmp",
 		.of_match_table = tegra_bpmp_match,
+		.pm = &tegra_bpmp_pm_ops,
 	},
 	.probe = tegra_bpmp_probe,
 };
diff --git a/drivers/firmware/tegra/ivc.c b/drivers/firmware/tegra/ivc.c
index 00de793..e2398cd 100644
--- a/drivers/firmware/tegra/ivc.c
+++ b/drivers/firmware/tegra/ivc.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2014-2016, NVIDIA CORPORATION.  All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
  */
 
 #include <soc/tegra/ivc.h>
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 7fa7447..4126be9 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -65,18 +65,36 @@
 };
 
 /**
+ * struct ti_sci_rm_type_map - Structure representing TISCI Resource
+ *				management representation of dev_ids.
+ * @dev_id:	TISCI device ID
+ * @type:	Corresponding id as identified by TISCI RM.
+ *
+ * Note: This is used only as a work around for using RM range apis
+ *	for AM654 SoC. For future SoCs dev_id will be used as type
+ *	for RM range APIs. In order to maintain ABI backward compatibility
+ *	type is not being changed for AM654 SoC.
+ */
+struct ti_sci_rm_type_map {
+	u32 dev_id;
+	u16 type;
+};
+
+/**
  * struct ti_sci_desc - Description of SoC integration
- * @host_id:		Host identifier representing the compute entity
+ * @default_host_id:	Host identifier representing the compute entity
  * @max_rx_timeout_ms:	Timeout for communication with SoC (in Milliseconds)
  * @max_msgs: Maximum number of messages that can be pending
  *		  simultaneously in the system
  * @max_msg_size: Maximum size of data per message that can be handled.
+ * @rm_type_map: RM resource type mapping structure.
  */
 struct ti_sci_desc {
-	u8 host_id;
+	u8 default_host_id;
 	int max_rx_timeout_ms;
 	int max_msgs;
 	int max_msg_size;
+	struct ti_sci_rm_type_map *rm_type_map;
 };
 
 /**
@@ -94,6 +112,7 @@
  * @chan_rx:	Receive mailbox channel
  * @minfo:	Message info
  * @node:	list head
+ * @host_id:	Host ID
  * @users:	Number of users of this instance
  */
 struct ti_sci_info {
@@ -110,6 +129,7 @@
 	struct mbox_chan *chan_rx;
 	struct ti_sci_xfers_info minfo;
 	struct list_head node;
+	u8 host_id;
 	/* protected by ti_sci_list_mutex */
 	int users;
 
@@ -144,25 +164,8 @@
 	return 0;
 }
 
-/**
- * ti_sci_debug_open() - debug file open
- * @inode:	inode pointer
- * @file:	file pointer
- *
- * Return: result of single_open
- */
-static int ti_sci_debug_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, ti_sci_debug_show, inode->i_private);
-}
-
-/* log file operations */
-static const struct file_operations ti_sci_debug_fops = {
-	.open = ti_sci_debug_open,
-	.read = seq_read,
-	.llseek = seq_lseek,
-	.release = single_release,
-};
+/* Provide the log file operations interface*/
+DEFINE_SHOW_ATTRIBUTE(ti_sci_debug);
 
 /**
  * ti_sci_debugfs_create() - Create log debug file
@@ -370,7 +373,7 @@
 
 	hdr->seq = xfer_id;
 	hdr->type = msg_type;
-	hdr->host = info->desc->host_id;
+	hdr->host = info->host_id;
 	hdr->flags = msg_flags;
 
 	return xfer;
@@ -463,9 +466,9 @@
 	struct ti_sci_xfer *xfer;
 	int ret;
 
-	/* No need to setup flags since it is expected to respond */
 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION,
-				   0x0, sizeof(struct ti_sci_msg_hdr),
+				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(struct ti_sci_msg_hdr),
 				   sizeof(*rev_info));
 	if (IS_ERR(xfer)) {
 		ret = PTR_ERR(xfer);
@@ -593,9 +596,9 @@
 	info = handle_to_ti_sci_info(handle);
 	dev = info->dev;
 
-	/* Response is expected, so need of any flags */
 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
-				   0, sizeof(*req), sizeof(*resp));
+				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
 	if (IS_ERR(xfer)) {
 		ret = PTR_ERR(xfer);
 		dev_err(dev, "Message alloc failed(%d)\n", ret);
@@ -632,6 +635,7 @@
 
 /**
  * ti_sci_cmd_get_device() - command to request for device managed by TISCI
+ *			     that can be shared with other hosts.
  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  * @id:		Device Identifier
  *
@@ -639,12 +643,30 @@
  * usage count by balancing get_device with put_device. No refcounting is
  * managed by driver for that purpose.
  *
- * NOTE: The request is for exclusive access for the processor.
- *
  * Return: 0 if all went fine, else return appropriate error.
  */
 static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
 {
+	return ti_sci_set_device_state(handle, id, 0,
+				       MSG_DEVICE_SW_STATE_ON);
+}
+
+/**
+ * ti_sci_cmd_get_device_exclusive() - command to request for device managed by
+ *				       TISCI that is exclusively owned by the
+ *				       requesting host.
+ * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id:		Device Identifier
+ *
+ * Request for the device - NOTE: the client MUST maintain integrity of
+ * usage count by balancing get_device with put_device. No refcounting is
+ * managed by driver for that purpose.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle,
+					   u32 id)
+{
 	return ti_sci_set_device_state(handle, id,
 				       MSG_FLAG_DEVICE_EXCLUSIVE,
 				       MSG_DEVICE_SW_STATE_ON);
@@ -663,6 +685,26 @@
  */
 static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
 {
+	return ti_sci_set_device_state(handle, id, 0,
+				       MSG_DEVICE_SW_STATE_RETENTION);
+}
+
+/**
+ * ti_sci_cmd_idle_device_exclusive() - Command to idle a device managed by
+ *					TISCI that is exclusively owned by
+ *					requesting host.
+ * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id:		Device Identifier
+ *
+ * Request for the device - NOTE: the client MUST maintain integrity of
+ * usage count by balancing get_device with put_device. No refcounting is
+ * managed by driver for that purpose.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle,
+					    u32 id)
+{
 	return ti_sci_set_device_state(handle, id,
 				       MSG_FLAG_DEVICE_EXCLUSIVE,
 				       MSG_DEVICE_SW_STATE_RETENTION);
@@ -913,7 +955,7 @@
  * Return: 0 if all went well, else returns appropriate error value.
  */
 static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
-				  u32 dev_id, u8 clk_id,
+				  u32 dev_id, u32 clk_id,
 				  u32 flags, u8 state)
 {
 	struct ti_sci_info *info;
@@ -941,7 +983,12 @@
 	}
 	req = (struct ti_sci_msg_req_set_clock_state *)xfer->xfer_buf;
 	req->dev_id = dev_id;
-	req->clk_id = clk_id;
+	if (clk_id < 255) {
+		req->clk_id = clk_id;
+	} else {
+		req->clk_id = 255;
+		req->clk_id_32 = clk_id;
+	}
 	req->request_state = state;
 
 	ret = ti_sci_do_xfer(info, xfer);
@@ -973,7 +1020,7 @@
  * Return: 0 if all went well, else returns appropriate error value.
  */
 static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
-				      u32 dev_id, u8 clk_id,
+				      u32 dev_id, u32 clk_id,
 				      u8 *programmed_state, u8 *current_state)
 {
 	struct ti_sci_info *info;
@@ -1004,7 +1051,12 @@
 	}
 	req = (struct ti_sci_msg_req_get_clock_state *)xfer->xfer_buf;
 	req->dev_id = dev_id;
-	req->clk_id = clk_id;
+	if (clk_id < 255) {
+		req->clk_id = clk_id;
+	} else {
+		req->clk_id = 255;
+		req->clk_id_32 = clk_id;
+	}
 
 	ret = ti_sci_do_xfer(info, xfer);
 	if (ret) {
@@ -1044,8 +1096,8 @@
  * Return: 0 if all went well, else returns appropriate error value.
  */
 static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
-				u8 clk_id, bool needs_ssc, bool can_change_freq,
-				bool enable_input_term)
+				u32 clk_id, bool needs_ssc,
+				bool can_change_freq, bool enable_input_term)
 {
 	u32 flags = 0;
 
@@ -1070,7 +1122,7 @@
  * Return: 0 if all went well, else returns appropriate error value.
  */
 static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
-				 u32 dev_id, u8 clk_id)
+				 u32 dev_id, u32 clk_id)
 {
 	return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
 				      MSG_CLOCK_SW_STATE_UNREQ);
@@ -1089,7 +1141,7 @@
  * Return: 0 if all went well, else returns appropriate error value.
  */
 static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
-				u32 dev_id, u8 clk_id)
+				u32 dev_id, u32 clk_id)
 {
 	return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
 				      MSG_CLOCK_SW_STATE_AUTO);
@@ -1107,7 +1159,7 @@
  * Return: 0 if all went well, else returns appropriate error value.
  */
 static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
-				  u32 dev_id, u8 clk_id, bool *req_state)
+				  u32 dev_id, u32 clk_id, bool *req_state)
 {
 	u8 state = 0;
 	int ret;
@@ -1136,7 +1188,7 @@
  * Return: 0 if all went well, else returns appropriate error value.
  */
 static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
-				u8 clk_id, bool *req_state, bool *curr_state)
+				u32 clk_id, bool *req_state, bool *curr_state)
 {
 	u8 c_state = 0, r_state = 0;
 	int ret;
@@ -1169,7 +1221,7 @@
  * Return: 0 if all went well, else returns appropriate error value.
  */
 static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
-				 u8 clk_id, bool *req_state, bool *curr_state)
+				 u32 clk_id, bool *req_state, bool *curr_state)
 {
 	u8 c_state = 0, r_state = 0;
 	int ret;
@@ -1201,7 +1253,7 @@
  * Return: 0 if all went well, else returns appropriate error value.
  */
 static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
-				     u32 dev_id, u8 clk_id, u8 parent_id)
+				     u32 dev_id, u32 clk_id, u32 parent_id)
 {
 	struct ti_sci_info *info;
 	struct ti_sci_msg_req_set_clock_parent *req;
@@ -1228,8 +1280,18 @@
 	}
 	req = (struct ti_sci_msg_req_set_clock_parent *)xfer->xfer_buf;
 	req->dev_id = dev_id;
-	req->clk_id = clk_id;
-	req->parent_id = parent_id;
+	if (clk_id < 255) {
+		req->clk_id = clk_id;
+	} else {
+		req->clk_id = 255;
+		req->clk_id_32 = clk_id;
+	}
+	if (parent_id < 255) {
+		req->parent_id = parent_id;
+	} else {
+		req->parent_id = 255;
+		req->parent_id_32 = parent_id;
+	}
 
 	ret = ti_sci_do_xfer(info, xfer);
 	if (ret) {
@@ -1259,7 +1321,7 @@
  * Return: 0 if all went well, else returns appropriate error value.
  */
 static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
-				     u32 dev_id, u8 clk_id, u8 *parent_id)
+				     u32 dev_id, u32 clk_id, u32 *parent_id)
 {
 	struct ti_sci_info *info;
 	struct ti_sci_msg_req_get_clock_parent *req;
@@ -1286,7 +1348,12 @@
 	}
 	req = (struct ti_sci_msg_req_get_clock_parent *)xfer->xfer_buf;
 	req->dev_id = dev_id;
-	req->clk_id = clk_id;
+	if (clk_id < 255) {
+		req->clk_id = clk_id;
+	} else {
+		req->clk_id = 255;
+		req->clk_id_32 = clk_id;
+	}
 
 	ret = ti_sci_do_xfer(info, xfer);
 	if (ret) {
@@ -1296,10 +1363,14 @@
 
 	resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->xfer_buf;
 
-	if (!ti_sci_is_response_ack(resp))
+	if (!ti_sci_is_response_ack(resp)) {
 		ret = -ENODEV;
-	else
-		*parent_id = resp->parent_id;
+	} else {
+		if (resp->parent_id < 255)
+			*parent_id = resp->parent_id;
+		else
+			*parent_id = resp->parent_id_32;
+	}
 
 fail:
 	ti_sci_put_one_xfer(&info->minfo, xfer);
@@ -1319,8 +1390,8 @@
  * Return: 0 if all went well, else returns appropriate error value.
  */
 static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
-					  u32 dev_id, u8 clk_id,
-					  u8 *num_parents)
+					  u32 dev_id, u32 clk_id,
+					  u32 *num_parents)
 {
 	struct ti_sci_info *info;
 	struct ti_sci_msg_req_get_clock_num_parents *req;
@@ -1347,7 +1418,12 @@
 	}
 	req = (struct ti_sci_msg_req_get_clock_num_parents *)xfer->xfer_buf;
 	req->dev_id = dev_id;
-	req->clk_id = clk_id;
+	if (clk_id < 255) {
+		req->clk_id = clk_id;
+	} else {
+		req->clk_id = 255;
+		req->clk_id_32 = clk_id;
+	}
 
 	ret = ti_sci_do_xfer(info, xfer);
 	if (ret) {
@@ -1357,10 +1433,14 @@
 
 	resp = (struct ti_sci_msg_resp_get_clock_num_parents *)xfer->xfer_buf;
 
-	if (!ti_sci_is_response_ack(resp))
+	if (!ti_sci_is_response_ack(resp)) {
 		ret = -ENODEV;
-	else
-		*num_parents = resp->num_parents;
+	} else {
+		if (resp->num_parents < 255)
+			*num_parents = resp->num_parents;
+		else
+			*num_parents = resp->num_parents_32;
+	}
 
 fail:
 	ti_sci_put_one_xfer(&info->minfo, xfer);
@@ -1388,7 +1468,7 @@
  * Return: 0 if all went well, else returns appropriate error value.
  */
 static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
-					 u32 dev_id, u8 clk_id, u64 min_freq,
+					 u32 dev_id, u32 clk_id, u64 min_freq,
 					 u64 target_freq, u64 max_freq,
 					 u64 *match_freq)
 {
@@ -1417,7 +1497,12 @@
 	}
 	req = (struct ti_sci_msg_req_query_clock_freq *)xfer->xfer_buf;
 	req->dev_id = dev_id;
-	req->clk_id = clk_id;
+	if (clk_id < 255) {
+		req->clk_id = clk_id;
+	} else {
+		req->clk_id = 255;
+		req->clk_id_32 = clk_id;
+	}
 	req->min_freq_hz = min_freq;
 	req->target_freq_hz = target_freq;
 	req->max_freq_hz = max_freq;
@@ -1460,7 +1545,7 @@
  * Return: 0 if all went well, else returns appropriate error value.
  */
 static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
-				   u32 dev_id, u8 clk_id, u64 min_freq,
+				   u32 dev_id, u32 clk_id, u64 min_freq,
 				   u64 target_freq, u64 max_freq)
 {
 	struct ti_sci_info *info;
@@ -1488,7 +1573,12 @@
 	}
 	req = (struct ti_sci_msg_req_set_clock_freq *)xfer->xfer_buf;
 	req->dev_id = dev_id;
-	req->clk_id = clk_id;
+	if (clk_id < 255) {
+		req->clk_id = clk_id;
+	} else {
+		req->clk_id = 255;
+		req->clk_id_32 = clk_id;
+	}
 	req->min_freq_hz = min_freq;
 	req->target_freq_hz = target_freq;
 	req->max_freq_hz = max_freq;
@@ -1521,7 +1611,7 @@
  * Return: 0 if all went well, else returns appropriate error value.
  */
 static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
-				   u32 dev_id, u8 clk_id, u64 *freq)
+				   u32 dev_id, u32 clk_id, u64 *freq)
 {
 	struct ti_sci_info *info;
 	struct ti_sci_msg_req_get_clock_freq *req;
@@ -1548,7 +1638,12 @@
 	}
 	req = (struct ti_sci_msg_req_get_clock_freq *)xfer->xfer_buf;
 	req->dev_id = dev_id;
-	req->clk_id = clk_id;
+	if (clk_id < 255) {
+		req->clk_id = clk_id;
+	} else {
+		req->clk_id = 255;
+		req->clk_id_32 = clk_id;
+	}
 
 	ret = ti_sci_do_xfer(info, xfer);
 	if (ret) {
@@ -1615,6 +1710,1209 @@
 	return ret;
 }
 
+static int ti_sci_get_resource_type(struct ti_sci_info *info, u16 dev_id,
+				    u16 *type)
+{
+	struct ti_sci_rm_type_map *rm_type_map = info->desc->rm_type_map;
+	bool found = false;
+	int i;
+
+	/* If map is not provided then assume dev_id is used as type */
+	if (!rm_type_map) {
+		*type = dev_id;
+		return 0;
+	}
+
+	for (i = 0; rm_type_map[i].dev_id; i++) {
+		if (rm_type_map[i].dev_id == dev_id) {
+			*type = rm_type_map[i].type;
+			found = true;
+			break;
+		}
+	}
+
+	if (!found)
+		return -EINVAL;
+
+	return 0;
+}
+
+/**
+ * ti_sci_get_resource_range - Helper to get a range of resources assigned
+ *			       to a host. Resource is uniquely identified by
+ *			       type and subtype.
+ * @handle:		Pointer to TISCI handle.
+ * @dev_id:		TISCI device ID.
+ * @subtype:		Resource assignment subtype that is being requested
+ *			from the given device.
+ * @s_host:		Host processor ID to which the resources are allocated
+ * @range_start:	Start index of the resource range
+ * @range_num:		Number of resources in the range
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
+				     u32 dev_id, u8 subtype, u8 s_host,
+				     u16 *range_start, u16 *range_num)
+{
+	struct ti_sci_msg_resp_get_resource_range *resp;
+	struct ti_sci_msg_req_get_resource_range *req;
+	struct ti_sci_xfer *xfer;
+	struct ti_sci_info *info;
+	struct device *dev;
+	u16 type;
+	int ret = 0;
+
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	if (!handle)
+		return -EINVAL;
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
+				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "Message alloc failed(%d)\n", ret);
+		return ret;
+	}
+
+	ret = ti_sci_get_resource_type(info, dev_id, &type);
+	if (ret) {
+		dev_err(dev, "rm type lookup failed for %u\n", dev_id);
+		goto fail;
+	}
+
+	req = (struct ti_sci_msg_req_get_resource_range *)xfer->xfer_buf;
+	req->secondary_host = s_host;
+	req->type = type & MSG_RM_RESOURCE_TYPE_MASK;
+	req->subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->xfer_buf;
+
+	if (!ti_sci_is_response_ack(resp)) {
+		ret = -ENODEV;
+	} else if (!resp->range_start && !resp->range_num) {
+		ret = -ENODEV;
+	} else {
+		*range_start = resp->range_start;
+		*range_num = resp->range_num;
+	};
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+
+	return ret;
+}
+
+/**
+ * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host
+ *				   that is same as ti sci interface host.
+ * @handle:		Pointer to TISCI handle.
+ * @dev_id:		TISCI device ID.
+ * @subtype:		Resource assignment subtype that is being requested
+ *			from the given device.
+ * @range_start:	Start index of the resource range
+ * @range_num:		Number of resources in the range
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
+					 u32 dev_id, u8 subtype,
+					 u16 *range_start, u16 *range_num)
+{
+	return ti_sci_get_resource_range(handle, dev_id, subtype,
+					 TI_SCI_IRQ_SECONDARY_HOST_INVALID,
+					 range_start, range_num);
+}
+
+/**
+ * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources
+ *					      assigned to a specified host.
+ * @handle:		Pointer to TISCI handle.
+ * @dev_id:		TISCI device ID.
+ * @subtype:		Resource assignment subtype that is being requested
+ *			from the given device.
+ * @s_host:		Host processor ID to which the resources are allocated
+ * @range_start:	Start index of the resource range
+ * @range_num:		Number of resources in the range
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static
+int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
+					     u32 dev_id, u8 subtype, u8 s_host,
+					     u16 *range_start, u16 *range_num)
+{
+	return ti_sci_get_resource_range(handle, dev_id, subtype, s_host,
+					 range_start, range_num);
+}
+
+/**
+ * ti_sci_manage_irq() - Helper api to configure/release the irq route between
+ *			 the requested source and destination
+ * @handle:		Pointer to TISCI handle.
+ * @valid_params:	Bit fields defining the validity of certain params
+ * @src_id:		Device ID of the IRQ source
+ * @src_index:		IRQ source index within the source device
+ * @dst_id:		Device ID of the IRQ destination
+ * @dst_host_irq:	IRQ number of the destination device
+ * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
+ * @vint:		Virtual interrupt to be used within the IA
+ * @global_event:	Global event number to be used for the requesting event
+ * @vint_status_bit:	Virtual interrupt status bit to be used for the event
+ * @s_host:		Secondary host ID to which the irq/event is being
+ *			requested for.
+ * @type:		Request type irq set or release.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_manage_irq(const struct ti_sci_handle *handle,
+			     u32 valid_params, u16 src_id, u16 src_index,
+			     u16 dst_id, u16 dst_host_irq, u16 ia_id, u16 vint,
+			     u16 global_event, u8 vint_status_bit, u8 s_host,
+			     u16 type)
+{
+	struct ti_sci_msg_req_manage_irq *req;
+	struct ti_sci_msg_hdr *resp;
+	struct ti_sci_xfer *xfer;
+	struct ti_sci_info *info;
+	struct device *dev;
+	int ret = 0;
+
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	if (!handle)
+		return -EINVAL;
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, type, TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "Message alloc failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_req_manage_irq *)xfer->xfer_buf;
+	req->valid_params = valid_params;
+	req->src_id = src_id;
+	req->src_index = src_index;
+	req->dst_id = dst_id;
+	req->dst_host_irq = dst_host_irq;
+	req->ia_id = ia_id;
+	req->vint = vint;
+	req->global_event = global_event;
+	req->vint_status_bit = vint_status_bit;
+	req->secondary_host = s_host;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+
+	return ret;
+}
+
+/**
+ * ti_sci_set_irq() - Helper api to configure the irq route between the
+ *		      requested source and destination
+ * @handle:		Pointer to TISCI handle.
+ * @valid_params:	Bit fields defining the validity of certain params
+ * @src_id:		Device ID of the IRQ source
+ * @src_index:		IRQ source index within the source device
+ * @dst_id:		Device ID of the IRQ destination
+ * @dst_host_irq:	IRQ number of the destination device
+ * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
+ * @vint:		Virtual interrupt to be used within the IA
+ * @global_event:	Global event number to be used for the requesting event
+ * @vint_status_bit:	Virtual interrupt status bit to be used for the event
+ * @s_host:		Secondary host ID to which the irq/event is being
+ *			requested for.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_set_irq(const struct ti_sci_handle *handle, u32 valid_params,
+			  u16 src_id, u16 src_index, u16 dst_id,
+			  u16 dst_host_irq, u16 ia_id, u16 vint,
+			  u16 global_event, u8 vint_status_bit, u8 s_host)
+{
+	pr_debug("%s: IRQ set with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
+		 __func__, valid_params, src_id, src_index,
+		 dst_id, dst_host_irq, ia_id, vint, global_event,
+		 vint_status_bit);
+
+	return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
+				 dst_id, dst_host_irq, ia_id, vint,
+				 global_event, vint_status_bit, s_host,
+				 TI_SCI_MSG_SET_IRQ);
+}
+
+/**
+ * ti_sci_free_irq() - Helper api to free the irq route between the
+ *			   requested source and destination
+ * @handle:		Pointer to TISCI handle.
+ * @valid_params:	Bit fields defining the validity of certain params
+ * @src_id:		Device ID of the IRQ source
+ * @src_index:		IRQ source index within the source device
+ * @dst_id:		Device ID of the IRQ destination
+ * @dst_host_irq:	IRQ number of the destination device
+ * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
+ * @vint:		Virtual interrupt to be used within the IA
+ * @global_event:	Global event number to be used for the requesting event
+ * @vint_status_bit:	Virtual interrupt status bit to be used for the event
+ * @s_host:		Secondary host ID to which the irq/event is being
+ *			requested for.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_free_irq(const struct ti_sci_handle *handle, u32 valid_params,
+			   u16 src_id, u16 src_index, u16 dst_id,
+			   u16 dst_host_irq, u16 ia_id, u16 vint,
+			   u16 global_event, u8 vint_status_bit, u8 s_host)
+{
+	pr_debug("%s: IRQ release with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
+		 __func__, valid_params, src_id, src_index,
+		 dst_id, dst_host_irq, ia_id, vint, global_event,
+		 vint_status_bit);
+
+	return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
+				 dst_id, dst_host_irq, ia_id, vint,
+				 global_event, vint_status_bit, s_host,
+				 TI_SCI_MSG_FREE_IRQ);
+}
+
+/**
+ * ti_sci_cmd_set_irq() - Configure a host irq route between the requested
+ *			  source and destination.
+ * @handle:		Pointer to TISCI handle.
+ * @src_id:		Device ID of the IRQ source
+ * @src_index:		IRQ source index within the source device
+ * @dst_id:		Device ID of the IRQ destination
+ * @dst_host_irq:	IRQ number of the destination device
+ * @vint_irq:		Boolean specifying if this interrupt belongs to
+ *			Interrupt Aggregator.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_set_irq(const struct ti_sci_handle *handle, u16 src_id,
+			      u16 src_index, u16 dst_id, u16 dst_host_irq)
+{
+	u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
+
+	return ti_sci_set_irq(handle, valid_params, src_id, src_index, dst_id,
+			      dst_host_irq, 0, 0, 0, 0, 0);
+}
+
+/**
+ * ti_sci_cmd_set_event_map() - Configure an event based irq route between the
+ *				requested source and Interrupt Aggregator.
+ * @handle:		Pointer to TISCI handle.
+ * @src_id:		Device ID of the IRQ source
+ * @src_index:		IRQ source index within the source device
+ * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
+ * @vint:		Virtual interrupt to be used within the IA
+ * @global_event:	Global event number to be used for the requesting event
+ * @vint_status_bit:	Virtual interrupt status bit to be used for the event
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_set_event_map(const struct ti_sci_handle *handle,
+				    u16 src_id, u16 src_index, u16 ia_id,
+				    u16 vint, u16 global_event,
+				    u8 vint_status_bit)
+{
+	u32 valid_params = MSG_FLAG_IA_ID_VALID | MSG_FLAG_VINT_VALID |
+			   MSG_FLAG_GLB_EVNT_VALID |
+			   MSG_FLAG_VINT_STS_BIT_VALID;
+
+	return ti_sci_set_irq(handle, valid_params, src_id, src_index, 0, 0,
+			      ia_id, vint, global_event, vint_status_bit, 0);
+}
+
+/**
+ * ti_sci_cmd_free_irq() - Free a host irq route between the between the
+ *			   requested source and destination.
+ * @handle:		Pointer to TISCI handle.
+ * @src_id:		Device ID of the IRQ source
+ * @src_index:		IRQ source index within the source device
+ * @dst_id:		Device ID of the IRQ destination
+ * @dst_host_irq:	IRQ number of the destination device
+ * @vint_irq:		Boolean specifying if this interrupt belongs to
+ *			Interrupt Aggregator.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_free_irq(const struct ti_sci_handle *handle, u16 src_id,
+			       u16 src_index, u16 dst_id, u16 dst_host_irq)
+{
+	u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
+
+	return ti_sci_free_irq(handle, valid_params, src_id, src_index, dst_id,
+			       dst_host_irq, 0, 0, 0, 0, 0);
+}
+
+/**
+ * ti_sci_cmd_free_event_map() - Free an event map between the requested source
+ *				 and Interrupt Aggregator.
+ * @handle:		Pointer to TISCI handle.
+ * @src_id:		Device ID of the IRQ source
+ * @src_index:		IRQ source index within the source device
+ * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
+ * @vint:		Virtual interrupt to be used within the IA
+ * @global_event:	Global event number to be used for the requesting event
+ * @vint_status_bit:	Virtual interrupt status bit to be used for the event
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_free_event_map(const struct ti_sci_handle *handle,
+				     u16 src_id, u16 src_index, u16 ia_id,
+				     u16 vint, u16 global_event,
+				     u8 vint_status_bit)
+{
+	u32 valid_params = MSG_FLAG_IA_ID_VALID |
+			   MSG_FLAG_VINT_VALID | MSG_FLAG_GLB_EVNT_VALID |
+			   MSG_FLAG_VINT_STS_BIT_VALID;
+
+	return ti_sci_free_irq(handle, valid_params, src_id, src_index, 0, 0,
+			       ia_id, vint, global_event, vint_status_bit, 0);
+}
+
+/**
+ * ti_sci_cmd_ring_config() - configure RA ring
+ * @handle:		Pointer to TI SCI handle.
+ * @valid_params:	Bitfield defining validity of ring configuration
+ *			parameters
+ * @nav_id:		Device ID of Navigator Subsystem from which the ring is
+ *			allocated
+ * @index:		Ring index
+ * @addr_lo:		The ring base address lo 32 bits
+ * @addr_hi:		The ring base address hi 32 bits
+ * @count:		Number of ring elements
+ * @mode:		The mode of the ring
+ * @size:		The ring element size.
+ * @order_id:		Specifies the ring's bus order ID
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ *
+ * See @ti_sci_msg_rm_ring_cfg_req for more info.
+ */
+static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
+				  u32 valid_params, u16 nav_id, u16 index,
+				  u32 addr_lo, u32 addr_hi, u32 count,
+				  u8 mode, u8 size, u8 order_id)
+{
+	struct ti_sci_msg_rm_ring_cfg_req *req;
+	struct ti_sci_msg_hdr *resp;
+	struct ti_sci_xfer *xfer;
+	struct ti_sci_info *info;
+	struct device *dev;
+	int ret = 0;
+
+	if (IS_ERR_OR_NULL(handle))
+		return -EINVAL;
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_CFG,
+				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "RM_RA:Message config failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_rm_ring_cfg_req *)xfer->xfer_buf;
+	req->valid_params = valid_params;
+	req->nav_id = nav_id;
+	req->index = index;
+	req->addr_lo = addr_lo;
+	req->addr_hi = addr_hi;
+	req->count = count;
+	req->mode = mode;
+	req->size = size;
+	req->order_id = order_id;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "RM_RA:Mbox config send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+	dev_dbg(dev, "RM_RA:config ring %u ret:%d\n", index, ret);
+	return ret;
+}
+
+/**
+ * ti_sci_cmd_ring_get_config() - get RA ring configuration
+ * @handle:	Pointer to TI SCI handle.
+ * @nav_id:	Device ID of Navigator Subsystem from which the ring is
+ *		allocated
+ * @index:	Ring index
+ * @addr_lo:	Returns ring's base address lo 32 bits
+ * @addr_hi:	Returns ring's base address hi 32 bits
+ * @count:	Returns number of ring elements
+ * @mode:	Returns mode of the ring
+ * @size:	Returns ring element size
+ * @order_id:	Returns ring's bus order ID
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ *
+ * See @ti_sci_msg_rm_ring_get_cfg_req for more info.
+ */
+static int ti_sci_cmd_ring_get_config(const struct ti_sci_handle *handle,
+				      u32 nav_id, u32 index, u8 *mode,
+				      u32 *addr_lo, u32 *addr_hi,
+				      u32 *count, u8 *size, u8 *order_id)
+{
+	struct ti_sci_msg_rm_ring_get_cfg_resp *resp;
+	struct ti_sci_msg_rm_ring_get_cfg_req *req;
+	struct ti_sci_xfer *xfer;
+	struct ti_sci_info *info;
+	struct device *dev;
+	int ret = 0;
+
+	if (IS_ERR_OR_NULL(handle))
+		return -EINVAL;
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_GET_CFG,
+				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev,
+			"RM_RA:Message get config failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_rm_ring_get_cfg_req *)xfer->xfer_buf;
+	req->nav_id = nav_id;
+	req->index = index;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "RM_RA:Mbox get config send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_rm_ring_get_cfg_resp *)xfer->xfer_buf;
+
+	if (!ti_sci_is_response_ack(resp)) {
+		ret = -ENODEV;
+	} else {
+		if (mode)
+			*mode = resp->mode;
+		if (addr_lo)
+			*addr_lo = resp->addr_lo;
+		if (addr_hi)
+			*addr_hi = resp->addr_hi;
+		if (count)
+			*count = resp->count;
+		if (size)
+			*size = resp->size;
+		if (order_id)
+			*order_id = resp->order_id;
+	};
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+	dev_dbg(dev, "RM_RA:get config ring %u ret:%d\n", index, ret);
+	return ret;
+}
+
+/**
+ * ti_sci_cmd_rm_psil_pair() - Pair PSI-L source to destination thread
+ * @handle:	Pointer to TI SCI handle.
+ * @nav_id:	Device ID of Navigator Subsystem which should be used for
+ *		pairing
+ * @src_thread:	Source PSI-L thread ID
+ * @dst_thread: Destination PSI-L thread ID
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle,
+				   u32 nav_id, u32 src_thread, u32 dst_thread)
+{
+	struct ti_sci_msg_psil_pair *req;
+	struct ti_sci_msg_hdr *resp;
+	struct ti_sci_xfer *xfer;
+	struct ti_sci_info *info;
+	struct device *dev;
+	int ret = 0;
+
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	if (!handle)
+		return -EINVAL;
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR,
+				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_psil_pair *)xfer->xfer_buf;
+	req->nav_id = nav_id;
+	req->src_thread = src_thread;
+	req->dst_thread = dst_thread;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+
+	return ret;
+}
+
+/**
+ * ti_sci_cmd_rm_psil_unpair() - Unpair PSI-L source from destination thread
+ * @handle:	Pointer to TI SCI handle.
+ * @nav_id:	Device ID of Navigator Subsystem which should be used for
+ *		unpairing
+ * @src_thread:	Source PSI-L thread ID
+ * @dst_thread:	Destination PSI-L thread ID
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle,
+				     u32 nav_id, u32 src_thread, u32 dst_thread)
+{
+	struct ti_sci_msg_psil_unpair *req;
+	struct ti_sci_msg_hdr *resp;
+	struct ti_sci_xfer *xfer;
+	struct ti_sci_info *info;
+	struct device *dev;
+	int ret = 0;
+
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	if (!handle)
+		return -EINVAL;
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR,
+				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_psil_unpair *)xfer->xfer_buf;
+	req->nav_id = nav_id;
+	req->src_thread = src_thread;
+	req->dst_thread = dst_thread;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+
+	return ret;
+}
+
+/**
+ * ti_sci_cmd_rm_udmap_tx_ch_cfg() - Configure a UDMAP TX channel
+ * @handle:	Pointer to TI SCI handle.
+ * @params:	Pointer to ti_sci_msg_rm_udmap_tx_ch_cfg TX channel config
+ *		structure
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ *
+ * See @ti_sci_msg_rm_udmap_tx_ch_cfg and @ti_sci_msg_rm_udmap_tx_ch_cfg_req for
+ * more info.
+ */
+static int ti_sci_cmd_rm_udmap_tx_ch_cfg(const struct ti_sci_handle *handle,
+			const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)
+{
+	struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *req;
+	struct ti_sci_msg_hdr *resp;
+	struct ti_sci_xfer *xfer;
+	struct ti_sci_info *info;
+	struct device *dev;
+	int ret = 0;
+
+	if (IS_ERR_OR_NULL(handle))
+		return -EINVAL;
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG,
+				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "Message TX_CH_CFG alloc failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *)xfer->xfer_buf;
+	req->valid_params = params->valid_params;
+	req->nav_id = params->nav_id;
+	req->index = params->index;
+	req->tx_pause_on_err = params->tx_pause_on_err;
+	req->tx_filt_einfo = params->tx_filt_einfo;
+	req->tx_filt_pswords = params->tx_filt_pswords;
+	req->tx_atype = params->tx_atype;
+	req->tx_chan_type = params->tx_chan_type;
+	req->tx_supr_tdpkt = params->tx_supr_tdpkt;
+	req->tx_fetch_size = params->tx_fetch_size;
+	req->tx_credit_count = params->tx_credit_count;
+	req->txcq_qnum = params->txcq_qnum;
+	req->tx_priority = params->tx_priority;
+	req->tx_qos = params->tx_qos;
+	req->tx_orderid = params->tx_orderid;
+	req->fdepth = params->fdepth;
+	req->tx_sched_priority = params->tx_sched_priority;
+	req->tx_burst_size = params->tx_burst_size;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "Mbox send TX_CH_CFG fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+	dev_dbg(dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret);
+	return ret;
+}
+
+/**
+ * ti_sci_cmd_rm_udmap_rx_ch_cfg() - Configure a UDMAP RX channel
+ * @handle:	Pointer to TI SCI handle.
+ * @params:	Pointer to ti_sci_msg_rm_udmap_rx_ch_cfg RX channel config
+ *		structure
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ *
+ * See @ti_sci_msg_rm_udmap_rx_ch_cfg and @ti_sci_msg_rm_udmap_rx_ch_cfg_req for
+ * more info.
+ */
+static int ti_sci_cmd_rm_udmap_rx_ch_cfg(const struct ti_sci_handle *handle,
+			const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)
+{
+	struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *req;
+	struct ti_sci_msg_hdr *resp;
+	struct ti_sci_xfer *xfer;
+	struct ti_sci_info *info;
+	struct device *dev;
+	int ret = 0;
+
+	if (IS_ERR_OR_NULL(handle))
+		return -EINVAL;
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG,
+				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "Message RX_CH_CFG alloc failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *)xfer->xfer_buf;
+	req->valid_params = params->valid_params;
+	req->nav_id = params->nav_id;
+	req->index = params->index;
+	req->rx_fetch_size = params->rx_fetch_size;
+	req->rxcq_qnum = params->rxcq_qnum;
+	req->rx_priority = params->rx_priority;
+	req->rx_qos = params->rx_qos;
+	req->rx_orderid = params->rx_orderid;
+	req->rx_sched_priority = params->rx_sched_priority;
+	req->flowid_start = params->flowid_start;
+	req->flowid_cnt = params->flowid_cnt;
+	req->rx_pause_on_err = params->rx_pause_on_err;
+	req->rx_atype = params->rx_atype;
+	req->rx_chan_type = params->rx_chan_type;
+	req->rx_ignore_short = params->rx_ignore_short;
+	req->rx_ignore_long = params->rx_ignore_long;
+	req->rx_burst_size = params->rx_burst_size;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "Mbox send RX_CH_CFG fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+	dev_dbg(dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret);
+	return ret;
+}
+
+/**
+ * ti_sci_cmd_rm_udmap_rx_flow_cfg() - Configure UDMAP RX FLOW
+ * @handle:	Pointer to TI SCI handle.
+ * @params:	Pointer to ti_sci_msg_rm_udmap_flow_cfg RX FLOW config
+ *		structure
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ *
+ * See @ti_sci_msg_rm_udmap_flow_cfg and @ti_sci_msg_rm_udmap_flow_cfg_req for
+ * more info.
+ */
+static int ti_sci_cmd_rm_udmap_rx_flow_cfg(const struct ti_sci_handle *handle,
+			const struct ti_sci_msg_rm_udmap_flow_cfg *params)
+{
+	struct ti_sci_msg_rm_udmap_flow_cfg_req *req;
+	struct ti_sci_msg_hdr *resp;
+	struct ti_sci_xfer *xfer;
+	struct ti_sci_info *info;
+	struct device *dev;
+	int ret = 0;
+
+	if (IS_ERR_OR_NULL(handle))
+		return -EINVAL;
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG,
+				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "RX_FL_CFG: Message alloc failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_rm_udmap_flow_cfg_req *)xfer->xfer_buf;
+	req->valid_params = params->valid_params;
+	req->nav_id = params->nav_id;
+	req->flow_index = params->flow_index;
+	req->rx_einfo_present = params->rx_einfo_present;
+	req->rx_psinfo_present = params->rx_psinfo_present;
+	req->rx_error_handling = params->rx_error_handling;
+	req->rx_desc_type = params->rx_desc_type;
+	req->rx_sop_offset = params->rx_sop_offset;
+	req->rx_dest_qnum = params->rx_dest_qnum;
+	req->rx_src_tag_hi = params->rx_src_tag_hi;
+	req->rx_src_tag_lo = params->rx_src_tag_lo;
+	req->rx_dest_tag_hi = params->rx_dest_tag_hi;
+	req->rx_dest_tag_lo = params->rx_dest_tag_lo;
+	req->rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
+	req->rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
+	req->rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
+	req->rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
+	req->rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
+	req->rx_fdq1_qnum = params->rx_fdq1_qnum;
+	req->rx_fdq2_qnum = params->rx_fdq2_qnum;
+	req->rx_fdq3_qnum = params->rx_fdq3_qnum;
+	req->rx_ps_location = params->rx_ps_location;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "RX_FL_CFG: Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+	dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret);
+	return ret;
+}
+
+/**
+ * ti_sci_cmd_proc_request() - Command to request a physical processor control
+ * @handle:	Pointer to TI SCI handle
+ * @proc_id:	Processor ID this request is for
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle,
+				   u8 proc_id)
+{
+	struct ti_sci_msg_req_proc_request *req;
+	struct ti_sci_msg_hdr *resp;
+	struct ti_sci_info *info;
+	struct ti_sci_xfer *xfer;
+	struct device *dev;
+	int ret = 0;
+
+	if (!handle)
+		return -EINVAL;
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_REQUEST,
+				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "Message alloc failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_req_proc_request *)xfer->xfer_buf;
+	req->processor_id = proc_id;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+
+	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+
+	return ret;
+}
+
+/**
+ * ti_sci_cmd_proc_release() - Command to release a physical processor control
+ * @handle:	Pointer to TI SCI handle
+ * @proc_id:	Processor ID this request is for
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle,
+				   u8 proc_id)
+{
+	struct ti_sci_msg_req_proc_release *req;
+	struct ti_sci_msg_hdr *resp;
+	struct ti_sci_info *info;
+	struct ti_sci_xfer *xfer;
+	struct device *dev;
+	int ret = 0;
+
+	if (!handle)
+		return -EINVAL;
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_RELEASE,
+				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "Message alloc failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_req_proc_release *)xfer->xfer_buf;
+	req->processor_id = proc_id;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+
+	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+
+	return ret;
+}
+
+/**
+ * ti_sci_cmd_proc_handover() - Command to handover a physical processor
+ *				control to a host in the processor's access
+ *				control list.
+ * @handle:	Pointer to TI SCI handle
+ * @proc_id:	Processor ID this request is for
+ * @host_id:	Host ID to get the control of the processor
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle,
+				    u8 proc_id, u8 host_id)
+{
+	struct ti_sci_msg_req_proc_handover *req;
+	struct ti_sci_msg_hdr *resp;
+	struct ti_sci_info *info;
+	struct ti_sci_xfer *xfer;
+	struct device *dev;
+	int ret = 0;
+
+	if (!handle)
+		return -EINVAL;
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_HANDOVER,
+				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "Message alloc failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_req_proc_handover *)xfer->xfer_buf;
+	req->processor_id = proc_id;
+	req->host_id = host_id;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+
+	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+
+	return ret;
+}
+
+/**
+ * ti_sci_cmd_proc_set_config() - Command to set the processor boot
+ *				    configuration flags
+ * @handle:		Pointer to TI SCI handle
+ * @proc_id:		Processor ID this request is for
+ * @config_flags_set:	Configuration flags to be set
+ * @config_flags_clear:	Configuration flags to be cleared.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_proc_set_config(const struct ti_sci_handle *handle,
+				      u8 proc_id, u64 bootvector,
+				      u32 config_flags_set,
+				      u32 config_flags_clear)
+{
+	struct ti_sci_msg_req_set_config *req;
+	struct ti_sci_msg_hdr *resp;
+	struct ti_sci_info *info;
+	struct ti_sci_xfer *xfer;
+	struct device *dev;
+	int ret = 0;
+
+	if (!handle)
+		return -EINVAL;
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CONFIG,
+				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "Message alloc failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_req_set_config *)xfer->xfer_buf;
+	req->processor_id = proc_id;
+	req->bootvector_low = bootvector & TI_SCI_ADDR_LOW_MASK;
+	req->bootvector_high = (bootvector & TI_SCI_ADDR_HIGH_MASK) >>
+				TI_SCI_ADDR_HIGH_SHIFT;
+	req->config_flags_set = config_flags_set;
+	req->config_flags_clear = config_flags_clear;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+
+	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+
+	return ret;
+}
+
+/**
+ * ti_sci_cmd_proc_set_control() - Command to set the processor boot
+ *				     control flags
+ * @handle:			Pointer to TI SCI handle
+ * @proc_id:			Processor ID this request is for
+ * @control_flags_set:		Control flags to be set
+ * @control_flags_clear:	Control flags to be cleared
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_proc_set_control(const struct ti_sci_handle *handle,
+				       u8 proc_id, u32 control_flags_set,
+				       u32 control_flags_clear)
+{
+	struct ti_sci_msg_req_set_ctrl *req;
+	struct ti_sci_msg_hdr *resp;
+	struct ti_sci_info *info;
+	struct ti_sci_xfer *xfer;
+	struct device *dev;
+	int ret = 0;
+
+	if (!handle)
+		return -EINVAL;
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CTRL,
+				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "Message alloc failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_req_set_ctrl *)xfer->xfer_buf;
+	req->processor_id = proc_id;
+	req->control_flags_set = control_flags_set;
+	req->control_flags_clear = control_flags_clear;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+
+	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+
+	return ret;
+}
+
+/**
+ * ti_sci_cmd_get_boot_status() - Command to get the processor boot status
+ * @handle:	Pointer to TI SCI handle
+ * @proc_id:	Processor ID this request is for
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_proc_get_status(const struct ti_sci_handle *handle,
+				      u8 proc_id, u64 *bv, u32 *cfg_flags,
+				      u32 *ctrl_flags, u32 *sts_flags)
+{
+	struct ti_sci_msg_resp_get_status *resp;
+	struct ti_sci_msg_req_get_status *req;
+	struct ti_sci_info *info;
+	struct ti_sci_xfer *xfer;
+	struct device *dev;
+	int ret = 0;
+
+	if (!handle)
+		return -EINVAL;
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_STATUS,
+				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "Message alloc failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_req_get_status *)xfer->xfer_buf;
+	req->processor_id = proc_id;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_resp_get_status *)xfer->tx_message.buf;
+
+	if (!ti_sci_is_response_ack(resp)) {
+		ret = -ENODEV;
+	} else {
+		*bv = (resp->bootvector_low & TI_SCI_ADDR_LOW_MASK) |
+		      (((u64)resp->bootvector_high << TI_SCI_ADDR_HIGH_SHIFT) &
+		       TI_SCI_ADDR_HIGH_MASK);
+		*cfg_flags = resp->config_flags;
+		*ctrl_flags = resp->control_flags;
+		*sts_flags = resp->status_flags;
+	}
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+
+	return ret;
+}
+
 /*
  * ti_sci_setup_ops() - Setup the operations structures
  * @info:	pointer to TISCI pointer
@@ -1625,11 +2923,19 @@
 	struct ti_sci_core_ops *core_ops = &ops->core_ops;
 	struct ti_sci_dev_ops *dops = &ops->dev_ops;
 	struct ti_sci_clk_ops *cops = &ops->clk_ops;
+	struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
+	struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops;
+	struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
+	struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops;
+	struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops;
+	struct ti_sci_proc_ops *pops = &ops->proc_ops;
 
 	core_ops->reboot_device = ti_sci_cmd_core_reboot;
 
 	dops->get_device = ti_sci_cmd_get_device;
+	dops->get_device_exclusive = ti_sci_cmd_get_device_exclusive;
 	dops->idle_device = ti_sci_cmd_idle_device;
+	dops->idle_device_exclusive = ti_sci_cmd_idle_device_exclusive;
 	dops->put_device = ti_sci_cmd_put_device;
 
 	dops->is_valid = ti_sci_cmd_dev_is_valid;
@@ -1655,6 +2961,32 @@
 	cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
 	cops->set_freq = ti_sci_cmd_clk_set_freq;
 	cops->get_freq = ti_sci_cmd_clk_get_freq;
+
+	rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
+	rm_core_ops->get_range_from_shost =
+				ti_sci_cmd_get_resource_range_from_shost;
+
+	iops->set_irq = ti_sci_cmd_set_irq;
+	iops->set_event_map = ti_sci_cmd_set_event_map;
+	iops->free_irq = ti_sci_cmd_free_irq;
+	iops->free_event_map = ti_sci_cmd_free_event_map;
+
+	rops->config = ti_sci_cmd_ring_config;
+	rops->get_config = ti_sci_cmd_ring_get_config;
+
+	psilops->pair = ti_sci_cmd_rm_psil_pair;
+	psilops->unpair = ti_sci_cmd_rm_psil_unpair;
+
+	udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
+	udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
+	udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
+
+	pops->request = ti_sci_cmd_proc_request;
+	pops->release = ti_sci_cmd_proc_release;
+	pops->handover = ti_sci_cmd_proc_handover;
+	pops->set_config = ti_sci_cmd_proc_set_config;
+	pops->set_control = ti_sci_cmd_proc_set_control;
+	pops->get_status = ti_sci_cmd_proc_get_status;
 }
 
 /**
@@ -1779,6 +3111,227 @@
 }
 EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle);
 
+/**
+ * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle
+ * @np:		device node
+ * @property:	property name containing phandle on TISCI node
+ *
+ * NOTE: The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of TI SCI protocol library.
+ * ti_sci_put_handle must be balanced with successful ti_sci_get_by_phandle
+ * Return: pointer to handle if successful, else:
+ * -EPROBE_DEFER if the instance is not ready
+ * -ENODEV if the required node handler is missing
+ * -EINVAL if invalid conditions are encountered.
+ */
+const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np,
+						  const char *property)
+{
+	struct ti_sci_handle *handle = NULL;
+	struct device_node *ti_sci_np;
+	struct ti_sci_info *info;
+	struct list_head *p;
+
+	if (!np) {
+		pr_err("I need a device pointer\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	ti_sci_np = of_parse_phandle(np, property, 0);
+	if (!ti_sci_np)
+		return ERR_PTR(-ENODEV);
+
+	mutex_lock(&ti_sci_list_mutex);
+	list_for_each(p, &ti_sci_list) {
+		info = list_entry(p, struct ti_sci_info, node);
+		if (ti_sci_np == info->dev->of_node) {
+			handle = &info->handle;
+			info->users++;
+			break;
+		}
+	}
+	mutex_unlock(&ti_sci_list_mutex);
+	of_node_put(ti_sci_np);
+
+	if (!handle)
+		return ERR_PTR(-EPROBE_DEFER);
+
+	return handle;
+}
+EXPORT_SYMBOL_GPL(ti_sci_get_by_phandle);
+
+/**
+ * devm_ti_sci_get_by_phandle() - Managed get handle using phandle
+ * @dev:	Device pointer requesting TISCI handle
+ * @property:	property name containing phandle on TISCI node
+ *
+ * NOTE: This releases the handle once the device resources are
+ * no longer needed. MUST NOT BE released with ti_sci_put_handle.
+ * The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of TI SCI protocol library.
+ *
+ * Return: 0 if all went fine, else corresponding error.
+ */
+const struct ti_sci_handle *devm_ti_sci_get_by_phandle(struct device *dev,
+						       const char *property)
+{
+	const struct ti_sci_handle *handle;
+	const struct ti_sci_handle **ptr;
+
+	ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
+	if (!ptr)
+		return ERR_PTR(-ENOMEM);
+	handle = ti_sci_get_by_phandle(dev_of_node(dev), property);
+
+	if (!IS_ERR(handle)) {
+		*ptr = handle;
+		devres_add(dev, ptr);
+	} else {
+		devres_free(ptr);
+	}
+
+	return handle;
+}
+EXPORT_SYMBOL_GPL(devm_ti_sci_get_by_phandle);
+
+/**
+ * ti_sci_get_free_resource() - Get a free resource from TISCI resource.
+ * @res:	Pointer to the TISCI resource
+ *
+ * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL.
+ */
+u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
+{
+	unsigned long flags;
+	u16 set, free_bit;
+
+	raw_spin_lock_irqsave(&res->lock, flags);
+	for (set = 0; set < res->sets; set++) {
+		free_bit = find_first_zero_bit(res->desc[set].res_map,
+					       res->desc[set].num);
+		if (free_bit != res->desc[set].num) {
+			set_bit(free_bit, res->desc[set].res_map);
+			raw_spin_unlock_irqrestore(&res->lock, flags);
+			return res->desc[set].start + free_bit;
+		}
+	}
+	raw_spin_unlock_irqrestore(&res->lock, flags);
+
+	return TI_SCI_RESOURCE_NULL;
+}
+EXPORT_SYMBOL_GPL(ti_sci_get_free_resource);
+
+/**
+ * ti_sci_release_resource() - Release a resource from TISCI resource.
+ * @res:	Pointer to the TISCI resource
+ * @id:		Resource id to be released.
+ */
+void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
+{
+	unsigned long flags;
+	u16 set;
+
+	raw_spin_lock_irqsave(&res->lock, flags);
+	for (set = 0; set < res->sets; set++) {
+		if (res->desc[set].start <= id &&
+		    (res->desc[set].num + res->desc[set].start) > id)
+			clear_bit(id - res->desc[set].start,
+				  res->desc[set].res_map);
+	}
+	raw_spin_unlock_irqrestore(&res->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ti_sci_release_resource);
+
+/**
+ * ti_sci_get_num_resources() - Get the number of resources in TISCI resource
+ * @res:	Pointer to the TISCI resource
+ *
+ * Return: Total number of available resources.
+ */
+u32 ti_sci_get_num_resources(struct ti_sci_resource *res)
+{
+	u32 set, count = 0;
+
+	for (set = 0; set < res->sets; set++)
+		count += res->desc[set].num;
+
+	return count;
+}
+EXPORT_SYMBOL_GPL(ti_sci_get_num_resources);
+
+/**
+ * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
+ * @handle:	TISCI handle
+ * @dev:	Device pointer to which the resource is assigned
+ * @dev_id:	TISCI device id to which the resource is assigned
+ * @of_prop:	property name by which the resource are represented
+ *
+ * Return: Pointer to ti_sci_resource if all went well else appropriate
+ *	   error pointer.
+ */
+struct ti_sci_resource *
+devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
+			    struct device *dev, u32 dev_id, char *of_prop)
+{
+	struct ti_sci_resource *res;
+	bool valid_set = false;
+	u32 resource_subtype;
+	int i, ret;
+
+	res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
+	if (!res)
+		return ERR_PTR(-ENOMEM);
+
+	ret = of_property_count_elems_of_size(dev_of_node(dev), of_prop,
+					      sizeof(u32));
+	if (ret < 0) {
+		dev_err(dev, "%s resource type ids not available\n", of_prop);
+		return ERR_PTR(ret);
+	}
+	res->sets = ret;
+
+	res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
+				 GFP_KERNEL);
+	if (!res->desc)
+		return ERR_PTR(-ENOMEM);
+
+	for (i = 0; i < res->sets; i++) {
+		ret = of_property_read_u32_index(dev_of_node(dev), of_prop, i,
+						 &resource_subtype);
+		if (ret)
+			return ERR_PTR(-EINVAL);
+
+		ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
+							resource_subtype,
+							&res->desc[i].start,
+							&res->desc[i].num);
+		if (ret) {
+			dev_dbg(dev, "dev = %d subtype %d not allocated for this host\n",
+				dev_id, resource_subtype);
+			res->desc[i].start = 0;
+			res->desc[i].num = 0;
+			continue;
+		}
+
+		dev_dbg(dev, "dev = %d, subtype = %d, start = %d, num = %d\n",
+			dev_id, resource_subtype, res->desc[i].start,
+			res->desc[i].num);
+
+		valid_set = true;
+		res->desc[i].res_map =
+			devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) *
+				     sizeof(*res->desc[i].res_map), GFP_KERNEL);
+		if (!res->desc[i].res_map)
+			return ERR_PTR(-ENOMEM);
+	}
+	raw_spin_lock_init(&res->lock);
+
+	if (valid_set)
+		return res;
+
+	return ERR_PTR(-EINVAL);
+}
+
 static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
 				void *cmd)
 {
@@ -1793,16 +3346,39 @@
 
 /* Description for K2G */
 static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
-	.host_id = 2,
+	.default_host_id = 2,
 	/* Conservative duration */
 	.max_rx_timeout_ms = 1000,
 	/* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
 	.max_msgs = 20,
 	.max_msg_size = 64,
+	.rm_type_map = NULL,
+};
+
+static struct ti_sci_rm_type_map ti_sci_am654_rm_type_map[] = {
+	{.dev_id = 56, .type = 0x00b}, /* GIC_IRQ */
+	{.dev_id = 179, .type = 0x000}, /* MAIN_NAV_UDMASS_IA0 */
+	{.dev_id = 187, .type = 0x009}, /* MAIN_NAV_RA */
+	{.dev_id = 188, .type = 0x006}, /* MAIN_NAV_UDMAP */
+	{.dev_id = 194, .type = 0x007}, /* MCU_NAV_UDMAP */
+	{.dev_id = 195, .type = 0x00a}, /* MCU_NAV_RA */
+	{.dev_id = 0, .type = 0x000}, /* end of table */
+};
+
+/* Description for AM654 */
+static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
+	.default_host_id = 12,
+	/* Conservative duration */
+	.max_rx_timeout_ms = 10000,
+	/* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
+	.max_msgs = 20,
+	.max_msg_size = 60,
+	.rm_type_map = ti_sci_am654_rm_type_map,
 };
 
 static const struct of_device_id ti_sci_of_match[] = {
 	{.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc},
+	{.compatible = "ti,am654-sci", .data = &ti_sci_pmmc_am654_desc},
 	{ /* Sentinel */ },
 };
 MODULE_DEVICE_TABLE(of, ti_sci_of_match);
@@ -1819,6 +3395,7 @@
 	int ret = -EINVAL;
 	int i;
 	int reboot = 0;
+	u32 h_id;
 
 	of_id = of_match_device(ti_sci_of_match, dev);
 	if (!of_id) {
@@ -1833,6 +3410,19 @@
 
 	info->dev = dev;
 	info->desc = desc;
+	ret = of_property_read_u32(dev->of_node, "ti,host-id", &h_id);
+	/* if the property is not present in DT, use a default from desc */
+	if (ret < 0) {
+		info->host_id = info->desc->default_host_id;
+	} else {
+		if (!h_id) {
+			dev_warn(dev, "Host ID 0 is reserved for firmware\n");
+			info->host_id = info->desc->default_host_id;
+		} else {
+			info->host_id = h_id;
+		}
+	}
+
 	reboot = of_property_read_bool(dev->of_node,
 				       "ti,system-reboot-controller");
 	INIT_LIST_HEAD(&info->node);
diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h
index 12bf316..f0d068c 100644
--- a/drivers/firmware/ti_sci.h
+++ b/drivers/firmware/ti_sci.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause */
 /*
  * Texas Instruments System Control Interface (TISCI) Protocol
  *
@@ -35,6 +35,50 @@
 #define TI_SCI_MSG_QUERY_CLOCK_FREQ	0x010d
 #define TI_SCI_MSG_GET_CLOCK_FREQ	0x010e
 
+/* Resource Management Requests */
+#define TI_SCI_MSG_GET_RESOURCE_RANGE	0x1500
+
+/* IRQ requests */
+#define TI_SCI_MSG_SET_IRQ		0x1000
+#define TI_SCI_MSG_FREE_IRQ		0x1001
+
+/* NAVSS resource management */
+/* Ringacc requests */
+#define TI_SCI_MSG_RM_RING_ALLOCATE		0x1100
+#define TI_SCI_MSG_RM_RING_FREE			0x1101
+#define TI_SCI_MSG_RM_RING_RECONFIG		0x1102
+#define TI_SCI_MSG_RM_RING_RESET		0x1103
+#define TI_SCI_MSG_RM_RING_CFG			0x1110
+#define TI_SCI_MSG_RM_RING_GET_CFG		0x1111
+
+/* PSI-L requests */
+#define TI_SCI_MSG_RM_PSIL_PAIR			0x1280
+#define TI_SCI_MSG_RM_PSIL_UNPAIR		0x1281
+
+#define TI_SCI_MSG_RM_UDMAP_TX_ALLOC		0x1200
+#define TI_SCI_MSG_RM_UDMAP_TX_FREE		0x1201
+#define TI_SCI_MSG_RM_UDMAP_RX_ALLOC		0x1210
+#define TI_SCI_MSG_RM_UDMAP_RX_FREE		0x1211
+#define TI_SCI_MSG_RM_UDMAP_FLOW_CFG		0x1220
+#define TI_SCI_MSG_RM_UDMAP_OPT_FLOW_CFG	0x1221
+
+#define TISCI_MSG_RM_UDMAP_TX_CH_CFG		0x1205
+#define TISCI_MSG_RM_UDMAP_TX_CH_GET_CFG	0x1206
+#define TISCI_MSG_RM_UDMAP_RX_CH_CFG		0x1215
+#define TISCI_MSG_RM_UDMAP_RX_CH_GET_CFG	0x1216
+#define TISCI_MSG_RM_UDMAP_FLOW_CFG		0x1230
+#define TISCI_MSG_RM_UDMAP_FLOW_SIZE_THRESH_CFG	0x1231
+#define TISCI_MSG_RM_UDMAP_FLOW_GET_CFG		0x1232
+#define TISCI_MSG_RM_UDMAP_FLOW_SIZE_THRESH_GET_CFG	0x1233
+
+/* Processor Control requests */
+#define TI_SCI_MSG_PROC_REQUEST		0xc000
+#define TI_SCI_MSG_PROC_RELEASE		0xc001
+#define TI_SCI_MSG_PROC_HANDOVER	0xc005
+#define TI_SCI_MSG_SET_CONFIG		0xc100
+#define TI_SCI_MSG_SET_CTRL		0xc101
+#define TI_SCI_MSG_GET_STATUS		0xc400
+
 /**
  * struct ti_sci_msg_hdr - Generic Message Header for All messages and responses
  * @type:	Type of messages: One of TI_SCI_MSG* values
@@ -195,7 +239,8 @@
  * @dev_id:	Device identifier this request is for
  * @clk_id:	Clock identifier for the device for this request.
  *		Each device has it's own set of clock inputs. This indexes
- *		which clock input to modify.
+ *		which clock input to modify. Set to 255 if clock ID is
+ *		greater than or equal to 255.
  * @request_state: Request the state for the clock to be set to.
  *		MSG_CLOCK_SW_STATE_UNREQ: The IP does not require this clock,
  *		it can be disabled, regardless of the state of the device
@@ -206,6 +251,9 @@
  *		being required by the device.(default)
  *		MSG_CLOCK_SW_STATE_REQ:  Configure the clock to be enabled,
  *		regardless of the state of the device.
+ * @clk_id_32:	Clock identifier for the device for this request.
+ *		Only to be used if the clock ID is greater than or equal to
+ *		255.
  *
  * Normally, all required clocks are managed by TISCI entity, this is used
  * only for specific control *IF* required. Auto managed state is
@@ -227,6 +275,7 @@
 #define MSG_CLOCK_SW_STATE_AUTO		1
 #define MSG_CLOCK_SW_STATE_REQ		2
 	u8 request_state;
+	u32 clk_id_32;
 } __packed;
 
 /**
@@ -235,7 +284,11 @@
  * @dev_id:	Device identifier this request is for
  * @clk_id:	Clock identifier for the device for this request.
  *		Each device has it's own set of clock inputs. This indexes
- *		which clock input to get state of.
+ *		which clock input to get state of. Set to 255 if the clock
+ *		ID is greater than or equal to 255.
+ * @clk_id_32:	Clock identifier for the device for the request.
+ *		Only to be used if the clock ID is greater than or equal to
+ *		255.
  *
  * Request type is TI_SCI_MSG_GET_CLOCK_STATE, response is state
  * of the clock
@@ -244,6 +297,7 @@
 	struct ti_sci_msg_hdr hdr;
 	u32 dev_id;
 	u8 clk_id;
+	u32 clk_id_32;
 } __packed;
 
 /**
@@ -271,9 +325,13 @@
  * @dev_id:	Device identifier this request is for
  * @clk_id:	Clock identifier for the device for this request.
  *		Each device has it's own set of clock inputs. This indexes
- *		which clock input to modify.
+ *		which clock input to modify. Set to 255 if clock ID is
+ *		greater than or equal to 255.
  * @parent_id:	The new clock parent is selectable by an index via this
- *		parameter.
+ *		parameter. Set to 255 if clock ID is greater than or
+ *		equal to 255.
+ * @clk_id_32:	Clock identifier if @clk_id field is 255.
+ * @parent_id_32:	Parent identifier if @parent_id is 255.
  *
  * Request type is TI_SCI_MSG_SET_CLOCK_PARENT, response is generic
  * ACK / NACK message.
@@ -283,6 +341,8 @@
 	u32 dev_id;
 	u8 clk_id;
 	u8 parent_id;
+	u32 clk_id_32;
+	u32 parent_id_32;
 } __packed;
 
 /**
@@ -291,7 +351,10 @@
  * @dev_id:	Device identifier this request is for
  * @clk_id:	Clock identifier for the device for this request.
  *		Each device has it's own set of clock inputs. This indexes
- *		which clock input to get the parent for.
+ *		which clock input to get the parent for. If this field
+ *		contains 255, the actual clock identifier is stored in
+ *		@clk_id_32.
+ * @clk_id_32:	Clock identifier if the @clk_id field contains 255.
  *
  * Request type is TI_SCI_MSG_GET_CLOCK_PARENT, response is parent information
  */
@@ -299,25 +362,32 @@
 	struct ti_sci_msg_hdr hdr;
 	u32 dev_id;
 	u8 clk_id;
+	u32 clk_id_32;
 } __packed;
 
 /**
  * struct ti_sci_msg_resp_get_clock_parent - Response with clock parent
  * @hdr:	Generic Header
- * @parent_id:	The current clock parent
+ * @parent_id:	The current clock parent. If set to 255, the current parent
+ *		ID can be found from the @parent_id_32 field.
+ * @parent_id_32:	Current clock parent if @parent_id field is set to
+ *			255.
  *
  * Response to TI_SCI_MSG_GET_CLOCK_PARENT.
  */
 struct ti_sci_msg_resp_get_clock_parent {
 	struct ti_sci_msg_hdr hdr;
 	u8 parent_id;
+	u32 parent_id_32;
 } __packed;
 
 /**
  * struct ti_sci_msg_req_get_clock_num_parents - Request to get clock parents
  * @hdr:	Generic header
  * @dev_id:	Device identifier this request is for
- * @clk_id:	Clock identifier for the device for this request.
+ * @clk_id:	Clock identifier for the device for this request. Set to
+ *		255 if clock ID is greater than or equal to 255.
+ * @clk_id_32:	Clock identifier if the @clk_id field contains 255.
  *
  * This request provides information about how many clock parent options
  * are available for a given clock to a device. This is typically used
@@ -330,18 +400,24 @@
 	struct ti_sci_msg_hdr hdr;
 	u32 dev_id;
 	u8 clk_id;
+	u32 clk_id_32;
 } __packed;
 
 /**
  * struct ti_sci_msg_resp_get_clock_num_parents - Response for get clk parents
  * @hdr:		Generic header
- * @num_parents:	Number of clock parents
+ * @num_parents:	Number of clock parents. If set to 255, the actual
+ *			number of parents is stored into @num_parents_32
+ *			field instead.
+ * @num_parents_32:	Number of clock parents if @num_parents field is
+ *			set to 255.
  *
  * Response to TI_SCI_MSG_GET_NUM_CLOCK_PARENTS
  */
 struct ti_sci_msg_resp_get_clock_num_parents {
 	struct ti_sci_msg_hdr hdr;
 	u8 num_parents;
+	u32 num_parents_32;
 } __packed;
 
 /**
@@ -356,7 +432,9 @@
  * @max_freq_hz: The maximum allowable frequency in Hz. This is the maximum
  *		allowable programmed frequency and does not account for clock
  *		tolerances and jitter.
- * @clk_id:	Clock identifier for the device for this request.
+ * @clk_id:	Clock identifier for the device for this request. Set to
+ *		255 if clock identifier is greater than or equal to 255.
+ * @clk_id_32:	Clock identifier if @clk_id is set to 255.
  *
  * NOTE: Normally clock frequency management is automatically done by TISCI
  * entity. In case of specific requests, TISCI evaluates capability to achieve
@@ -373,6 +451,7 @@
 	u64 target_freq_hz;
 	u64 max_freq_hz;
 	u8 clk_id;
+	u32 clk_id_32;
 } __packed;
 
 /**
@@ -400,7 +479,9 @@
  * @max_freq_hz: The maximum allowable frequency in Hz. This is the maximum
  *		allowable programmed frequency and does not account for clock
  *		tolerances and jitter.
- * @clk_id:	Clock identifier for the device for this request.
+ * @clk_id:	Clock identifier for the device for this request. Set to
+ *		255 if clock ID is greater than or equal to 255.
+ * @clk_id_32:	Clock identifier if @clk_id field is set to 255.
  *
  * NOTE: Normally clock frequency management is automatically done by TISCI
  * entity. In case of specific requests, TISCI evaluates capability to achieve
@@ -429,13 +510,16 @@
 	u64 target_freq_hz;
 	u64 max_freq_hz;
 	u8 clk_id;
+	u32 clk_id_32;
 } __packed;
 
 /**
  * struct ti_sci_msg_req_get_clock_freq - Request to get the clock frequency
  * @hdr:	Generic Header
  * @dev_id:	Device identifier this request is for
- * @clk_id:	Clock identifier for the device for this request.
+ * @clk_id:	Clock identifier for the device for this request. Set to
+ *		255 if clock ID is greater than or equal to 255.
+ * @clk_id_32:	Clock identifier if @clk_id field is set to 255.
  *
  * NOTE: Normally clock frequency management is automatically done by TISCI
  * entity. In some cases, clock frequencies are configured by host.
@@ -447,6 +531,7 @@
 	struct ti_sci_msg_hdr hdr;
 	u32 dev_id;
 	u8 clk_id;
+	u32 clk_id_32;
 } __packed;
 
 /**
@@ -461,4 +546,872 @@
 	u64 freq_hz;
 } __packed;
 
+#define TI_SCI_IRQ_SECONDARY_HOST_INVALID	0xff
+
+/**
+ * struct ti_sci_msg_req_get_resource_range - Request to get a host's assigned
+ *					      range of resources.
+ * @hdr:		Generic Header
+ * @type:		Unique resource assignment type
+ * @subtype:		Resource assignment subtype within the resource type.
+ * @secondary_host:	Host processing entity to which the resources are
+ *			allocated. This is required only when the destination
+ *			host id id different from ti sci interface host id,
+ *			else TI_SCI_IRQ_SECONDARY_HOST_INVALID can be passed.
+ *
+ * Request type is TI_SCI_MSG_GET_RESOURCE_RANGE. Responded with requested
+ * resource range which is of type TI_SCI_MSG_GET_RESOURCE_RANGE.
+ */
+struct ti_sci_msg_req_get_resource_range {
+	struct ti_sci_msg_hdr hdr;
+#define MSG_RM_RESOURCE_TYPE_MASK	GENMASK(9, 0)
+#define MSG_RM_RESOURCE_SUBTYPE_MASK	GENMASK(5, 0)
+	u16 type;
+	u8 subtype;
+	u8 secondary_host;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_resource_range - Response to resource get range.
+ * @hdr:		Generic Header
+ * @range_start:	Start index of the resource range.
+ * @range_num:		Number of resources in the range.
+ *
+ * Response to request TI_SCI_MSG_GET_RESOURCE_RANGE.
+ */
+struct ti_sci_msg_resp_get_resource_range {
+	struct ti_sci_msg_hdr hdr;
+	u16 range_start;
+	u16 range_num;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_manage_irq - Request to configure/release the route
+ *					between the dev and the host.
+ * @hdr:		Generic Header
+ * @valid_params:	Bit fields defining the validity of interrupt source
+ *			parameters. If a bit is not set, then corresponding
+ *			field is not valid and will not be used for route set.
+ *			Bit field definitions:
+ *			0 - Valid bit for @dst_id
+ *			1 - Valid bit for @dst_host_irq
+ *			2 - Valid bit for @ia_id
+ *			3 - Valid bit for @vint
+ *			4 - Valid bit for @global_event
+ *			5 - Valid bit for @vint_status_bit_index
+ *			31 - Valid bit for @secondary_host
+ * @src_id:		IRQ source peripheral ID.
+ * @src_index:		IRQ source index within the peripheral
+ * @dst_id:		IRQ Destination ID. Based on the architecture it can be
+ *			IRQ controller or host processor ID.
+ * @dst_host_irq:	IRQ number of the destination host IRQ controller
+ * @ia_id:		Device ID of the interrupt aggregator in which the
+ *			vint resides.
+ * @vint:		Virtual interrupt number if the interrupt route
+ *			is through an interrupt aggregator.
+ * @global_event:	Global event that is to be mapped to interrupt
+ *			aggregator virtual interrupt status bit.
+ * @vint_status_bit:	Virtual interrupt status bit if the interrupt route
+ *			utilizes an interrupt aggregator status bit.
+ * @secondary_host:	Host ID of the IRQ destination computing entity. This is
+ *			required only when destination host id is different
+ *			from ti sci interface host id.
+ *
+ * Request type is TI_SCI_MSG_SET/RELEASE_IRQ.
+ * Response is generic ACK / NACK message.
+ */
+struct ti_sci_msg_req_manage_irq {
+	struct ti_sci_msg_hdr hdr;
+#define MSG_FLAG_DST_ID_VALID			TI_SCI_MSG_FLAG(0)
+#define MSG_FLAG_DST_HOST_IRQ_VALID		TI_SCI_MSG_FLAG(1)
+#define MSG_FLAG_IA_ID_VALID			TI_SCI_MSG_FLAG(2)
+#define MSG_FLAG_VINT_VALID			TI_SCI_MSG_FLAG(3)
+#define MSG_FLAG_GLB_EVNT_VALID			TI_SCI_MSG_FLAG(4)
+#define MSG_FLAG_VINT_STS_BIT_VALID		TI_SCI_MSG_FLAG(5)
+#define MSG_FLAG_SHOST_VALID			TI_SCI_MSG_FLAG(31)
+	u32 valid_params;
+	u16 src_id;
+	u16 src_index;
+	u16 dst_id;
+	u16 dst_host_irq;
+	u16 ia_id;
+	u16 vint;
+	u16 global_event;
+	u8 vint_status_bit;
+	u8 secondary_host;
+} __packed;
+
+/**
+ * struct ti_sci_msg_rm_ring_cfg_req - Configure a Navigator Subsystem ring
+ *
+ * Configures the non-real-time registers of a Navigator Subsystem ring.
+ * @hdr:	Generic Header
+ * @valid_params: Bitfield defining validity of ring configuration parameters.
+ *	The ring configuration fields are not valid, and will not be used for
+ *	ring configuration, if their corresponding valid bit is zero.
+ *	Valid bit usage:
+ *	0 - Valid bit for @tisci_msg_rm_ring_cfg_req addr_lo
+ *	1 - Valid bit for @tisci_msg_rm_ring_cfg_req addr_hi
+ *	2 - Valid bit for @tisci_msg_rm_ring_cfg_req count
+ *	3 - Valid bit for @tisci_msg_rm_ring_cfg_req mode
+ *	4 - Valid bit for @tisci_msg_rm_ring_cfg_req size
+ *	5 - Valid bit for @tisci_msg_rm_ring_cfg_req order_id
+ * @nav_id: Device ID of Navigator Subsystem from which the ring is allocated
+ * @index: ring index to be configured.
+ * @addr_lo: 32 LSBs of ring base address to be programmed into the ring's
+ *	RING_BA_LO register
+ * @addr_hi: 16 MSBs of ring base address to be programmed into the ring's
+ *	RING_BA_HI register.
+ * @count: Number of ring elements. Must be even if mode is CREDENTIALS or QM
+ *	modes.
+ * @mode: Specifies the mode the ring is to be configured.
+ * @size: Specifies encoded ring element size. To calculate the encoded size use
+ *	the formula (log2(size_bytes) - 2), where size_bytes cannot be
+ *	greater than 256.
+ * @order_id: Specifies the ring's bus order ID.
+ */
+struct ti_sci_msg_rm_ring_cfg_req {
+	struct ti_sci_msg_hdr hdr;
+	u32 valid_params;
+	u16 nav_id;
+	u16 index;
+	u32 addr_lo;
+	u32 addr_hi;
+	u32 count;
+	u8 mode;
+	u8 size;
+	u8 order_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_rm_ring_get_cfg_req - Get RA ring's configuration
+ *
+ * Gets the configuration of the non-real-time register fields of a ring.  The
+ * host, or a supervisor of the host, who owns the ring must be the requesting
+ * host.  The values of the non-real-time registers are returned in
+ * @ti_sci_msg_rm_ring_get_cfg_resp.
+ *
+ * @hdr: Generic Header
+ * @nav_id: Device ID of Navigator Subsystem from which the ring is allocated
+ * @index: ring index.
+ */
+struct ti_sci_msg_rm_ring_get_cfg_req {
+	struct ti_sci_msg_hdr hdr;
+	u16 nav_id;
+	u16 index;
+} __packed;
+
+/**
+ * struct ti_sci_msg_rm_ring_get_cfg_resp -  Ring get configuration response
+ *
+ * Response received by host processor after RM has handled
+ * @ti_sci_msg_rm_ring_get_cfg_req. The response contains the ring's
+ * non-real-time register values.
+ *
+ * @hdr: Generic Header
+ * @addr_lo: Ring 32 LSBs of base address
+ * @addr_hi: Ring 16 MSBs of base address.
+ * @count: Ring number of elements.
+ * @mode: Ring mode.
+ * @size: encoded Ring element size
+ * @order_id: ing order ID.
+ */
+struct ti_sci_msg_rm_ring_get_cfg_resp {
+	struct ti_sci_msg_hdr hdr;
+	u32 addr_lo;
+	u32 addr_hi;
+	u32 count;
+	u8 mode;
+	u8 size;
+	u8 order_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_psil_pair - Pairs a PSI-L source thread to a destination
+ *				 thread
+ * @hdr:	Generic Header
+ * @nav_id:	SoC Navigator Subsystem device ID whose PSI-L config proxy is
+ *		used to pair the source and destination threads.
+ * @src_thread:	PSI-L source thread ID within the PSI-L System thread map.
+ *
+ * UDMAP transmit channels mapped to source threads will have their
+ * TCHAN_THRD_ID register programmed with the destination thread if the pairing
+ * is successful.
+
+ * @dst_thread: PSI-L destination thread ID within the PSI-L System thread map.
+ * PSI-L destination threads start at index 0x8000.  The request is NACK'd if
+ * the destination thread is not greater than or equal to 0x8000.
+ *
+ * UDMAP receive channels mapped to destination threads will have their
+ * RCHAN_THRD_ID register programmed with the source thread if the pairing
+ * is successful.
+ *
+ * Request type is TI_SCI_MSG_RM_PSIL_PAIR, response is a generic ACK or NACK
+ * message.
+ */
+struct ti_sci_msg_psil_pair {
+	struct ti_sci_msg_hdr hdr;
+	u32 nav_id;
+	u32 src_thread;
+	u32 dst_thread;
+} __packed;
+
+/**
+ * struct ti_sci_msg_psil_unpair - Unpairs a PSI-L source thread from a
+ *				   destination thread
+ * @hdr:	Generic Header
+ * @nav_id:	SoC Navigator Subsystem device ID whose PSI-L config proxy is
+ *		used to unpair the source and destination threads.
+ * @src_thread:	PSI-L source thread ID within the PSI-L System thread map.
+ *
+ * UDMAP transmit channels mapped to source threads will have their
+ * TCHAN_THRD_ID register cleared if the unpairing is successful.
+ *
+ * @dst_thread: PSI-L destination thread ID within the PSI-L System thread map.
+ * PSI-L destination threads start at index 0x8000.  The request is NACK'd if
+ * the destination thread is not greater than or equal to 0x8000.
+ *
+ * UDMAP receive channels mapped to destination threads will have their
+ * RCHAN_THRD_ID register cleared if the unpairing is successful.
+ *
+ * Request type is TI_SCI_MSG_RM_PSIL_UNPAIR, response is a generic ACK or NACK
+ * message.
+ */
+struct ti_sci_msg_psil_unpair {
+	struct ti_sci_msg_hdr hdr;
+	u32 nav_id;
+	u32 src_thread;
+	u32 dst_thread;
+} __packed;
+
+/**
+ * struct ti_sci_msg_udmap_rx_flow_cfg -  UDMAP receive flow configuration
+ *					  message
+ * @hdr: Generic Header
+ * @nav_id: SoC Navigator Subsystem device ID from which the receive flow is
+ *	allocated
+ * @flow_index: UDMAP receive flow index for non-optional configuration.
+ * @rx_ch_index: Specifies the index of the receive channel using the flow_index
+ * @rx_einfo_present: UDMAP receive flow extended packet info present.
+ * @rx_psinfo_present: UDMAP receive flow PS words present.
+ * @rx_error_handling: UDMAP receive flow error handling configuration. Valid
+ *	values are TI_SCI_RM_UDMAP_RX_FLOW_ERR_DROP/RETRY.
+ * @rx_desc_type: UDMAP receive flow descriptor type. It can be one of
+ *	TI_SCI_RM_UDMAP_RX_FLOW_DESC_HOST/MONO.
+ * @rx_sop_offset: UDMAP receive flow start of packet offset.
+ * @rx_dest_qnum: UDMAP receive flow destination queue number.
+ * @rx_ps_location: UDMAP receive flow PS words location.
+ *	0 - end of packet descriptor
+ *	1 - Beginning of the data buffer
+ * @rx_src_tag_hi: UDMAP receive flow source tag high byte constant
+ * @rx_src_tag_lo: UDMAP receive flow source tag low byte constant
+ * @rx_dest_tag_hi: UDMAP receive flow destination tag high byte constant
+ * @rx_dest_tag_lo: UDMAP receive flow destination tag low byte constant
+ * @rx_src_tag_hi_sel: UDMAP receive flow source tag high byte selector
+ * @rx_src_tag_lo_sel: UDMAP receive flow source tag low byte selector
+ * @rx_dest_tag_hi_sel: UDMAP receive flow destination tag high byte selector
+ * @rx_dest_tag_lo_sel: UDMAP receive flow destination tag low byte selector
+ * @rx_size_thresh_en: UDMAP receive flow packet size based free buffer queue
+ *	enable. If enabled, the ti_sci_rm_udmap_rx_flow_opt_cfg also need to be
+ *	configured and sent.
+ * @rx_fdq0_sz0_qnum: UDMAP receive flow free descriptor queue 0.
+ * @rx_fdq1_qnum: UDMAP receive flow free descriptor queue 1.
+ * @rx_fdq2_qnum: UDMAP receive flow free descriptor queue 2.
+ * @rx_fdq3_qnum: UDMAP receive flow free descriptor queue 3.
+ *
+ * For detailed information on the settings, see the UDMAP section of the TRM.
+ */
+struct ti_sci_msg_udmap_rx_flow_cfg {
+	struct ti_sci_msg_hdr hdr;
+	u32 nav_id;
+	u32 flow_index;
+	u32 rx_ch_index;
+	u8 rx_einfo_present;
+	u8 rx_psinfo_present;
+	u8 rx_error_handling;
+	u8 rx_desc_type;
+	u16 rx_sop_offset;
+	u16 rx_dest_qnum;
+	u8 rx_ps_location;
+	u8 rx_src_tag_hi;
+	u8 rx_src_tag_lo;
+	u8 rx_dest_tag_hi;
+	u8 rx_dest_tag_lo;
+	u8 rx_src_tag_hi_sel;
+	u8 rx_src_tag_lo_sel;
+	u8 rx_dest_tag_hi_sel;
+	u8 rx_dest_tag_lo_sel;
+	u8 rx_size_thresh_en;
+	u16 rx_fdq0_sz0_qnum;
+	u16 rx_fdq1_qnum;
+	u16 rx_fdq2_qnum;
+	u16 rx_fdq3_qnum;
+} __packed;
+
+/**
+ * struct rm_ti_sci_msg_udmap_rx_flow_opt_cfg - parameters for UDMAP receive
+ *						flow optional configuration
+ * @hdr: Generic Header
+ * @nav_id: SoC Navigator Subsystem device ID from which the receive flow is
+ *	allocated
+ * @flow_index: UDMAP receive flow index for optional configuration.
+ * @rx_ch_index: Specifies the index of the receive channel using the flow_index
+ * @rx_size_thresh0: UDMAP receive flow packet size threshold 0.
+ * @rx_size_thresh1: UDMAP receive flow packet size threshold 1.
+ * @rx_size_thresh2: UDMAP receive flow packet size threshold 2.
+ * @rx_fdq0_sz1_qnum: UDMAP receive flow free descriptor queue for size
+ *	threshold 1.
+ * @rx_fdq0_sz2_qnum: UDMAP receive flow free descriptor queue for size
+ *	threshold 2.
+ * @rx_fdq0_sz3_qnum: UDMAP receive flow free descriptor queue for size
+ *	threshold 3.
+ *
+ * For detailed information on the settings, see the UDMAP section of the TRM.
+ */
+struct rm_ti_sci_msg_udmap_rx_flow_opt_cfg {
+	struct ti_sci_msg_hdr hdr;
+	u32 nav_id;
+	u32 flow_index;
+	u32 rx_ch_index;
+	u16 rx_size_thresh0;
+	u16 rx_size_thresh1;
+	u16 rx_size_thresh2;
+	u16 rx_fdq0_sz1_qnum;
+	u16 rx_fdq0_sz2_qnum;
+	u16 rx_fdq0_sz3_qnum;
+} __packed;
+
+/**
+ * Configures a Navigator Subsystem UDMAP transmit channel
+ *
+ * Configures the non-real-time registers of a Navigator Subsystem UDMAP
+ * transmit channel.  The channel index must be assigned to the host defined
+ * in the TISCI header via the RM board configuration resource assignment
+ * range list.
+ *
+ * @hdr: Generic Header
+ *
+ * @valid_params: Bitfield defining validity of tx channel configuration
+ * parameters. The tx channel configuration fields are not valid, and will not
+ * be used for ch configuration, if their corresponding valid bit is zero.
+ * Valid bit usage:
+ *    0 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_pause_on_err
+ *    1 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_atype
+ *    2 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_chan_type
+ *    3 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_fetch_size
+ *    4 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::txcq_qnum
+ *    5 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_priority
+ *    6 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_qos
+ *    7 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_orderid
+ *    8 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_sched_priority
+ *    9 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_filt_einfo
+ *   10 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_filt_pswords
+ *   11 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_supr_tdpkt
+ *   12 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_credit_count
+ *   13 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::fdepth
+ *   14 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_burst_size
+ *
+ * @nav_id: SoC device ID of Navigator Subsystem where tx channel is located
+ *
+ * @index: UDMAP transmit channel index.
+ *
+ * @tx_pause_on_err: UDMAP transmit channel pause on error configuration to
+ * be programmed into the tx_pause_on_err field of the channel's TCHAN_TCFG
+ * register.
+ *
+ * @tx_filt_einfo: UDMAP transmit channel extended packet information passing
+ * configuration to be programmed into the tx_filt_einfo field of the
+ * channel's TCHAN_TCFG register.
+ *
+ * @tx_filt_pswords: UDMAP transmit channel protocol specific word passing
+ * configuration to be programmed into the tx_filt_pswords field of the
+ * channel's TCHAN_TCFG register.
+ *
+ * @tx_atype: UDMAP transmit channel non Ring Accelerator access pointer
+ * interpretation configuration to be programmed into the tx_atype field of
+ * the channel's TCHAN_TCFG register.
+ *
+ * @tx_chan_type: UDMAP transmit channel functional channel type and work
+ * passing mechanism configuration to be programmed into the tx_chan_type
+ * field of the channel's TCHAN_TCFG register.
+ *
+ * @tx_supr_tdpkt: UDMAP transmit channel teardown packet generation suppression
+ * configuration to be programmed into the tx_supr_tdpkt field of the channel's
+ * TCHAN_TCFG register.
+ *
+ * @tx_fetch_size: UDMAP transmit channel number of 32-bit descriptor words to
+ * fetch configuration to be programmed into the tx_fetch_size field of the
+ * channel's TCHAN_TCFG register.  The user must make sure to set the maximum
+ * word count that can pass through the channel for any allowed descriptor type.
+ *
+ * @tx_credit_count: UDMAP transmit channel transfer request credit count
+ * configuration to be programmed into the count field of the TCHAN_TCREDIT
+ * register.  Specifies how many credits for complete TRs are available.
+ *
+ * @txcq_qnum: UDMAP transmit channel completion queue configuration to be
+ * programmed into the txcq_qnum field of the TCHAN_TCQ register. The specified
+ * completion queue must be assigned to the host, or a subordinate of the host,
+ * requesting configuration of the transmit channel.
+ *
+ * @tx_priority: UDMAP transmit channel transmit priority value to be programmed
+ * into the priority field of the channel's TCHAN_TPRI_CTRL register.
+ *
+ * @tx_qos: UDMAP transmit channel transmit qos value to be programmed into the
+ * qos field of the channel's TCHAN_TPRI_CTRL register.
+ *
+ * @tx_orderid: UDMAP transmit channel bus order id value to be programmed into
+ * the orderid field of the channel's TCHAN_TPRI_CTRL register.
+ *
+ * @fdepth: UDMAP transmit channel FIFO depth configuration to be programmed
+ * into the fdepth field of the TCHAN_TFIFO_DEPTH register. Sets the number of
+ * Tx FIFO bytes which are allowed to be stored for the channel. Check the UDMAP
+ * section of the TRM for restrictions regarding this parameter.
+ *
+ * @tx_sched_priority: UDMAP transmit channel tx scheduling priority
+ * configuration to be programmed into the priority field of the channel's
+ * TCHAN_TST_SCHED register.
+ *
+ * @tx_burst_size: UDMAP transmit channel burst size configuration to be
+ * programmed into the tx_burst_size field of the TCHAN_TCFG register.
+ */
+struct ti_sci_msg_rm_udmap_tx_ch_cfg_req {
+	struct ti_sci_msg_hdr hdr;
+	u32 valid_params;
+	u16 nav_id;
+	u16 index;
+	u8 tx_pause_on_err;
+	u8 tx_filt_einfo;
+	u8 tx_filt_pswords;
+	u8 tx_atype;
+	u8 tx_chan_type;
+	u8 tx_supr_tdpkt;
+	u16 tx_fetch_size;
+	u8 tx_credit_count;
+	u16 txcq_qnum;
+	u8 tx_priority;
+	u8 tx_qos;
+	u8 tx_orderid;
+	u16 fdepth;
+	u8 tx_sched_priority;
+	u8 tx_burst_size;
+} __packed;
+
+/**
+ * Configures a Navigator Subsystem UDMAP receive channel
+ *
+ * Configures the non-real-time registers of a Navigator Subsystem UDMAP
+ * receive channel.  The channel index must be assigned to the host defined
+ * in the TISCI header via the RM board configuration resource assignment
+ * range list.
+ *
+ * @hdr: Generic Header
+ *
+ * @valid_params: Bitfield defining validity of rx channel configuration
+ * parameters.
+ * The rx channel configuration fields are not valid, and will not be used for
+ * ch configuration, if their corresponding valid bit is zero.
+ * Valid bit usage:
+ *    0 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_pause_on_err
+ *    1 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_atype
+ *    2 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_chan_type
+ *    3 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_fetch_size
+ *    4 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rxcq_qnum
+ *    5 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_priority
+ *    6 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_qos
+ *    7 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_orderid
+ *    8 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_sched_priority
+ *    9 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::flowid_start
+ *   10 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::flowid_cnt
+ *   11 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_ignore_short
+ *   12 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_ignore_long
+ *   14 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_burst_size
+ *
+ * @nav_id: SoC device ID of Navigator Subsystem where rx channel is located
+ *
+ * @index: UDMAP receive channel index.
+ *
+ * @rx_fetch_size: UDMAP receive channel number of 32-bit descriptor words to
+ * fetch configuration to be programmed into the rx_fetch_size field of the
+ * channel's RCHAN_RCFG register.
+ *
+ * @rxcq_qnum: UDMAP receive channel completion queue configuration to be
+ * programmed into the rxcq_qnum field of the RCHAN_RCQ register.
+ * The specified completion queue must be assigned to the host, or a subordinate
+ * of the host, requesting configuration of the receive channel.
+ *
+ * @rx_priority: UDMAP receive channel receive priority value to be programmed
+ * into the priority field of the channel's RCHAN_RPRI_CTRL register.
+ *
+ * @rx_qos: UDMAP receive channel receive qos value to be programmed into the
+ * qos field of the channel's RCHAN_RPRI_CTRL register.
+ *
+ * @rx_orderid: UDMAP receive channel bus order id value to be programmed into
+ * the orderid field of the channel's RCHAN_RPRI_CTRL register.
+ *
+ * @rx_sched_priority: UDMAP receive channel rx scheduling priority
+ * configuration to be programmed into the priority field of the channel's
+ * RCHAN_RST_SCHED register.
+ *
+ * @flowid_start: UDMAP receive channel additional flows starting index
+ * configuration to program into the flow_start field of the RCHAN_RFLOW_RNG
+ * register. Specifies the starting index for flow IDs the receive channel is to
+ * make use of beyond the default flow. flowid_start and @ref flowid_cnt must be
+ * set as valid and configured together. The starting flow ID set by
+ * @ref flowid_cnt must be a flow index within the Navigator Subsystem's subset
+ * of flows beyond the default flows statically mapped to receive channels.
+ * The additional flows must be assigned to the host, or a subordinate of the
+ * host, requesting configuration of the receive channel.
+ *
+ * @flowid_cnt: UDMAP receive channel additional flows count configuration to
+ * program into the flowid_cnt field of the RCHAN_RFLOW_RNG register.
+ * This field specifies how many flow IDs are in the additional contiguous range
+ * of legal flow IDs for the channel.  @ref flowid_start and flowid_cnt must be
+ * set as valid and configured together. Disabling the valid_params field bit
+ * for flowid_cnt indicates no flow IDs other than the default are to be
+ * allocated and used by the receive channel. @ref flowid_start plus flowid_cnt
+ * cannot be greater than the number of receive flows in the receive channel's
+ * Navigator Subsystem.  The additional flows must be assigned to the host, or a
+ * subordinate of the host, requesting configuration of the receive channel.
+ *
+ * @rx_pause_on_err: UDMAP receive channel pause on error configuration to be
+ * programmed into the rx_pause_on_err field of the channel's RCHAN_RCFG
+ * register.
+ *
+ * @rx_atype: UDMAP receive channel non Ring Accelerator access pointer
+ * interpretation configuration to be programmed into the rx_atype field of the
+ * channel's RCHAN_RCFG register.
+ *
+ * @rx_chan_type: UDMAP receive channel functional channel type and work passing
+ * mechanism configuration to be programmed into the rx_chan_type field of the
+ * channel's RCHAN_RCFG register.
+ *
+ * @rx_ignore_short: UDMAP receive channel short packet treatment configuration
+ * to be programmed into the rx_ignore_short field of the RCHAN_RCFG register.
+ *
+ * @rx_ignore_long: UDMAP receive channel long packet treatment configuration to
+ * be programmed into the rx_ignore_long field of the RCHAN_RCFG register.
+ *
+ * @rx_burst_size: UDMAP receive channel burst size configuration to be
+ * programmed into the rx_burst_size field of the RCHAN_RCFG register.
+ */
+struct ti_sci_msg_rm_udmap_rx_ch_cfg_req {
+	struct ti_sci_msg_hdr hdr;
+	u32 valid_params;
+	u16 nav_id;
+	u16 index;
+	u16 rx_fetch_size;
+	u16 rxcq_qnum;
+	u8 rx_priority;
+	u8 rx_qos;
+	u8 rx_orderid;
+	u8 rx_sched_priority;
+	u16 flowid_start;
+	u16 flowid_cnt;
+	u8 rx_pause_on_err;
+	u8 rx_atype;
+	u8 rx_chan_type;
+	u8 rx_ignore_short;
+	u8 rx_ignore_long;
+	u8 rx_burst_size;
+} __packed;
+
+/**
+ * Configures a Navigator Subsystem UDMAP receive flow
+ *
+ * Configures a Navigator Subsystem UDMAP receive flow's registers.
+ * Configuration does not include the flow registers which handle size-based
+ * free descriptor queue routing.
+ *
+ * The flow index must be assigned to the host defined in the TISCI header via
+ * the RM board configuration resource assignment range list.
+ *
+ * @hdr: Standard TISCI header
+ *
+ * @valid_params
+ * Bitfield defining validity of rx flow configuration parameters.  The
+ * rx flow configuration fields are not valid, and will not be used for flow
+ * configuration, if their corresponding valid bit is zero.  Valid bit usage:
+ *     0 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_einfo_present
+ *     1 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_psinfo_present
+ *     2 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_error_handling
+ *     3 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_desc_type
+ *     4 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_sop_offset
+ *     5 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_dest_qnum
+ *     6 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_src_tag_hi
+ *     7 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_src_tag_lo
+ *     8 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_dest_tag_hi
+ *     9 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_dest_tag_lo
+ *    10 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_src_tag_hi_sel
+ *    11 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_src_tag_lo_sel
+ *    12 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_dest_tag_hi_sel
+ *    13 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_dest_tag_lo_sel
+ *    14 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_fdq0_sz0_qnum
+ *    15 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_fdq1_sz0_qnum
+ *    16 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_fdq2_sz0_qnum
+ *    17 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_fdq3_sz0_qnum
+ *    18 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_ps_location
+ *
+ * @nav_id: SoC device ID of Navigator Subsystem from which the receive flow is
+ * allocated
+ *
+ * @flow_index: UDMAP receive flow index for non-optional configuration.
+ *
+ * @rx_einfo_present:
+ * UDMAP receive flow extended packet info present configuration to be
+ * programmed into the rx_einfo_present field of the flow's RFLOW_RFA register.
+ *
+ * @rx_psinfo_present:
+ * UDMAP receive flow PS words present configuration to be programmed into the
+ * rx_psinfo_present field of the flow's RFLOW_RFA register.
+ *
+ * @rx_error_handling:
+ * UDMAP receive flow error handling configuration to be programmed into the
+ * rx_error_handling field of the flow's RFLOW_RFA register.
+ *
+ * @rx_desc_type:
+ * UDMAP receive flow descriptor type configuration to be programmed into the
+ * rx_desc_type field field of the flow's RFLOW_RFA register.
+ *
+ * @rx_sop_offset:
+ * UDMAP receive flow start of packet offset configuration to be programmed
+ * into the rx_sop_offset field of the RFLOW_RFA register.  See the UDMAP
+ * section of the TRM for more information on this setting.  Valid values for
+ * this field are 0-255 bytes.
+ *
+ * @rx_dest_qnum:
+ * UDMAP receive flow destination queue configuration to be programmed into the
+ * rx_dest_qnum field of the flow's RFLOW_RFA register.  The specified
+ * destination queue must be valid within the Navigator Subsystem and must be
+ * owned by the host, or a subordinate of the host, requesting allocation and
+ * configuration of the receive flow.
+ *
+ * @rx_src_tag_hi:
+ * UDMAP receive flow source tag high byte constant configuration to be
+ * programmed into the rx_src_tag_hi field of the flow's RFLOW_RFB register.
+ * See the UDMAP section of the TRM for more information on this setting.
+ *
+ * @rx_src_tag_lo:
+ * UDMAP receive flow source tag low byte constant configuration to be
+ * programmed into the rx_src_tag_lo field of the flow's RFLOW_RFB register.
+ * See the UDMAP section of the TRM for more information on this setting.
+ *
+ * @rx_dest_tag_hi:
+ * UDMAP receive flow destination tag high byte constant configuration to be
+ * programmed into the rx_dest_tag_hi field of the flow's RFLOW_RFB register.
+ * See the UDMAP section of the TRM for more information on this setting.
+ *
+ * @rx_dest_tag_lo:
+ * UDMAP receive flow destination tag low byte constant configuration to be
+ * programmed into the rx_dest_tag_lo field of the flow's RFLOW_RFB register.
+ * See the UDMAP section of the TRM for more information on this setting.
+ *
+ * @rx_src_tag_hi_sel:
+ * UDMAP receive flow source tag high byte selector configuration to be
+ * programmed into the rx_src_tag_hi_sel field of the RFLOW_RFC register.  See
+ * the UDMAP section of the TRM for more information on this setting.
+ *
+ * @rx_src_tag_lo_sel:
+ * UDMAP receive flow source tag low byte selector configuration to be
+ * programmed into the rx_src_tag_lo_sel field of the RFLOW_RFC register.  See
+ * the UDMAP section of the TRM for more information on this setting.
+ *
+ * @rx_dest_tag_hi_sel:
+ * UDMAP receive flow destination tag high byte selector configuration to be
+ * programmed into the rx_dest_tag_hi_sel field of the RFLOW_RFC register.  See
+ * the UDMAP section of the TRM for more information on this setting.
+ *
+ * @rx_dest_tag_lo_sel:
+ * UDMAP receive flow destination tag low byte selector configuration to be
+ * programmed into the rx_dest_tag_lo_sel field of the RFLOW_RFC register.  See
+ * the UDMAP section of the TRM for more information on this setting.
+ *
+ * @rx_fdq0_sz0_qnum:
+ * UDMAP receive flow free descriptor queue 0 configuration to be programmed
+ * into the rx_fdq0_sz0_qnum field of the flow's RFLOW_RFD register.  See the
+ * UDMAP section of the TRM for more information on this setting. The specified
+ * free queue must be valid within the Navigator Subsystem and must be owned
+ * by the host, or a subordinate of the host, requesting allocation and
+ * configuration of the receive flow.
+ *
+ * @rx_fdq1_qnum:
+ * UDMAP receive flow free descriptor queue 1 configuration to be programmed
+ * into the rx_fdq1_qnum field of the flow's RFLOW_RFD register.  See the
+ * UDMAP section of the TRM for more information on this setting.  The specified
+ * free queue must be valid within the Navigator Subsystem and must be owned
+ * by the host, or a subordinate of the host, requesting allocation and
+ * configuration of the receive flow.
+ *
+ * @rx_fdq2_qnum:
+ * UDMAP receive flow free descriptor queue 2 configuration to be programmed
+ * into the rx_fdq2_qnum field of the flow's RFLOW_RFE register.  See the
+ * UDMAP section of the TRM for more information on this setting.  The specified
+ * free queue must be valid within the Navigator Subsystem and must be owned
+ * by the host, or a subordinate of the host, requesting allocation and
+ * configuration of the receive flow.
+ *
+ * @rx_fdq3_qnum:
+ * UDMAP receive flow free descriptor queue 3 configuration to be programmed
+ * into the rx_fdq3_qnum field of the flow's RFLOW_RFE register.  See the
+ * UDMAP section of the TRM for more information on this setting.  The specified
+ * free queue must be valid within the Navigator Subsystem and must be owned
+ * by the host, or a subordinate of the host, requesting allocation and
+ * configuration of the receive flow.
+ *
+ * @rx_ps_location:
+ * UDMAP receive flow PS words location configuration to be programmed into the
+ * rx_ps_location field of the flow's RFLOW_RFA register.
+ */
+struct ti_sci_msg_rm_udmap_flow_cfg_req {
+	struct ti_sci_msg_hdr hdr;
+	u32 valid_params;
+	u16 nav_id;
+	u16 flow_index;
+	u8 rx_einfo_present;
+	u8 rx_psinfo_present;
+	u8 rx_error_handling;
+	u8 rx_desc_type;
+	u16 rx_sop_offset;
+	u16 rx_dest_qnum;
+	u8 rx_src_tag_hi;
+	u8 rx_src_tag_lo;
+	u8 rx_dest_tag_hi;
+	u8 rx_dest_tag_lo;
+	u8 rx_src_tag_hi_sel;
+	u8 rx_src_tag_lo_sel;
+	u8 rx_dest_tag_hi_sel;
+	u8 rx_dest_tag_lo_sel;
+	u16 rx_fdq0_sz0_qnum;
+	u16 rx_fdq1_qnum;
+	u16 rx_fdq2_qnum;
+	u16 rx_fdq3_qnum;
+	u8 rx_ps_location;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_proc_request - Request a processor
+ * @hdr:		Generic Header
+ * @processor_id:	ID of processor being requested
+ *
+ * Request type is TI_SCI_MSG_PROC_REQUEST, response is a generic ACK/NACK
+ * message.
+ */
+struct ti_sci_msg_req_proc_request {
+	struct ti_sci_msg_hdr hdr;
+	u8 processor_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_proc_release - Release a processor
+ * @hdr:		Generic Header
+ * @processor_id:	ID of processor being released
+ *
+ * Request type is TI_SCI_MSG_PROC_RELEASE, response is a generic ACK/NACK
+ * message.
+ */
+struct ti_sci_msg_req_proc_release {
+	struct ti_sci_msg_hdr hdr;
+	u8 processor_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_proc_handover - Handover a processor to a host
+ * @hdr:		Generic Header
+ * @processor_id:	ID of processor being handed over
+ * @host_id:		Host ID the control needs to be transferred to
+ *
+ * Request type is TI_SCI_MSG_PROC_HANDOVER, response is a generic ACK/NACK
+ * message.
+ */
+struct ti_sci_msg_req_proc_handover {
+	struct ti_sci_msg_hdr hdr;
+	u8 processor_id;
+	u8 host_id;
+} __packed;
+
+/* Boot Vector masks */
+#define TI_SCI_ADDR_LOW_MASK			GENMASK_ULL(31, 0)
+#define TI_SCI_ADDR_HIGH_MASK			GENMASK_ULL(63, 32)
+#define TI_SCI_ADDR_HIGH_SHIFT			32
+
+/**
+ * struct ti_sci_msg_req_set_config - Set Processor boot configuration
+ * @hdr:		Generic Header
+ * @processor_id:	ID of processor being configured
+ * @bootvector_low:	Lower 32 bit address (Little Endian) of boot vector
+ * @bootvector_high:	Higher 32 bit address (Little Endian) of boot vector
+ * @config_flags_set:	Optional Processor specific Config Flags to set.
+ *			Setting a bit here implies the corresponding mode
+ *			will be set
+ * @config_flags_clear:	Optional Processor specific Config Flags to clear.
+ *			Setting a bit here implies the corresponding mode
+ *			will be cleared
+ *
+ * Request type is TI_SCI_MSG_PROC_HANDOVER, response is a generic ACK/NACK
+ * message.
+ */
+struct ti_sci_msg_req_set_config {
+	struct ti_sci_msg_hdr hdr;
+	u8 processor_id;
+	u32 bootvector_low;
+	u32 bootvector_high;
+	u32 config_flags_set;
+	u32 config_flags_clear;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_ctrl - Set Processor boot control flags
+ * @hdr:		Generic Header
+ * @processor_id:	ID of processor being configured
+ * @control_flags_set:	Optional Processor specific Control Flags to set.
+ *			Setting a bit here implies the corresponding mode
+ *			will be set
+ * @control_flags_clear:Optional Processor specific Control Flags to clear.
+ *			Setting a bit here implies the corresponding mode
+ *			will be cleared
+ *
+ * Request type is TI_SCI_MSG_SET_CTRL, response is a generic ACK/NACK
+ * message.
+ */
+struct ti_sci_msg_req_set_ctrl {
+	struct ti_sci_msg_hdr hdr;
+	u8 processor_id;
+	u32 control_flags_set;
+	u32 control_flags_clear;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_status - Processor boot status request
+ * @hdr:		Generic Header
+ * @processor_id:	ID of processor whose status is being requested
+ *
+ * Request type is TI_SCI_MSG_GET_STATUS, response is an appropriate
+ * message, or NACK in case of inability to satisfy request.
+ */
+struct ti_sci_msg_req_get_status {
+	struct ti_sci_msg_hdr hdr;
+	u8 processor_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_status - Processor boot status response
+ * @hdr:		Generic Header
+ * @processor_id:	ID of processor whose status is returned
+ * @bootvector_low:	Lower 32 bit address (Little Endian) of boot vector
+ * @bootvector_high:	Higher 32 bit address (Little Endian) of boot vector
+ * @config_flags:	Optional Processor specific Config Flags set currently
+ * @control_flags:	Optional Processor specific Control Flags set currently
+ * @status_flags:	Optional Processor specific Status Flags set currently
+ *
+ * Response structure to a TI_SCI_MSG_GET_STATUS request.
+ */
+struct ti_sci_msg_resp_get_status {
+	struct ti_sci_msg_hdr hdr;
+	u8 processor_id;
+	u32 bootvector_low;
+	u32 bootvector_high;
+	u32 config_flags;
+	u32 control_flags;
+	u32 status_flags;
+} __packed;
+
 #endif /* __TI_SCI_H */
diff --git a/drivers/firmware/trusted_foundations.c b/drivers/firmware/trusted_foundations.c
new file mode 100644
index 0000000..fc544e1
--- /dev/null
+++ b/drivers/firmware/trusted_foundations.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Trusted Foundations support for ARM CPUs
+ *
+ * Copyright (c) 2013, NVIDIA Corporation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of.h>
+
+#include <linux/firmware/trusted_foundations.h>
+
+#include <asm/firmware.h>
+#include <asm/hardware/cache-l2x0.h>
+#include <asm/outercache.h>
+
+#define TF_CACHE_MAINT		0xfffff100
+
+#define TF_CACHE_ENABLE		1
+#define TF_CACHE_DISABLE	2
+
+#define TF_SET_CPU_BOOT_ADDR_SMC 0xfffff200
+
+#define TF_CPU_PM		0xfffffffc
+#define TF_CPU_PM_S3		0xffffffe3
+#define TF_CPU_PM_S2		0xffffffe6
+#define TF_CPU_PM_S2_NO_MC_CLK	0xffffffe5
+#define TF_CPU_PM_S1		0xffffffe4
+#define TF_CPU_PM_S1_NOFLUSH_L2	0xffffffe7
+
+static unsigned long cpu_boot_addr;
+
+static void tf_generic_smc(u32 type, u32 arg1, u32 arg2)
+{
+	register u32 r0 asm("r0") = type;
+	register u32 r1 asm("r1") = arg1;
+	register u32 r2 asm("r2") = arg2;
+
+	asm volatile(
+		".arch_extension	sec\n\t"
+		"stmfd	sp!, {r4 - r11}\n\t"
+		__asmeq("%0", "r0")
+		__asmeq("%1", "r1")
+		__asmeq("%2", "r2")
+		"mov	r3, #0\n\t"
+		"mov	r4, #0\n\t"
+		"smc	#0\n\t"
+		"ldmfd	sp!, {r4 - r11}\n\t"
+		:
+		: "r" (r0), "r" (r1), "r" (r2)
+		: "memory", "r3", "r12", "lr");
+}
+
+static int tf_set_cpu_boot_addr(int cpu, unsigned long boot_addr)
+{
+	cpu_boot_addr = boot_addr;
+	tf_generic_smc(TF_SET_CPU_BOOT_ADDR_SMC, cpu_boot_addr, 0);
+
+	return 0;
+}
+
+static int tf_prepare_idle(unsigned long mode)
+{
+	switch (mode) {
+	case TF_PM_MODE_LP0:
+		tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S3, cpu_boot_addr);
+		break;
+
+	case TF_PM_MODE_LP1:
+		tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S2, cpu_boot_addr);
+		break;
+
+	case TF_PM_MODE_LP1_NO_MC_CLK:
+		tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S2_NO_MC_CLK,
+			       cpu_boot_addr);
+		break;
+
+	case TF_PM_MODE_LP2:
+		tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S1, cpu_boot_addr);
+		break;
+
+	case TF_PM_MODE_LP2_NOFLUSH_L2:
+		tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S1_NOFLUSH_L2,
+			       cpu_boot_addr);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_CACHE_L2X0
+static void tf_cache_write_sec(unsigned long val, unsigned int reg)
+{
+	u32 l2x0_way_mask = 0xff;
+
+	switch (reg) {
+	case L2X0_CTRL:
+		if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_ASSOCIATIVITY_16)
+			l2x0_way_mask = 0xffff;
+
+		if (val == L2X0_CTRL_EN)
+			tf_generic_smc(TF_CACHE_MAINT, TF_CACHE_ENABLE,
+				       l2x0_saved_regs.aux_ctrl);
+		else
+			tf_generic_smc(TF_CACHE_MAINT, TF_CACHE_DISABLE,
+				       l2x0_way_mask);
+		break;
+
+	default:
+		break;
+	}
+}
+
+static int tf_init_cache(void)
+{
+	outer_cache.write_sec = tf_cache_write_sec;
+
+	return 0;
+}
+#endif /* CONFIG_CACHE_L2X0 */
+
+static const struct firmware_ops trusted_foundations_ops = {
+	.set_cpu_boot_addr = tf_set_cpu_boot_addr,
+	.prepare_idle = tf_prepare_idle,
+#ifdef CONFIG_CACHE_L2X0
+	.l2x0_init = tf_init_cache,
+#endif
+};
+
+void register_trusted_foundations(struct trusted_foundations_platform_data *pd)
+{
+	/*
+	 * we are not using version information for now since currently
+	 * supported SMCs are compatible with all TF releases
+	 */
+	register_firmware_ops(&trusted_foundations_ops);
+}
+
+void of_register_trusted_foundations(void)
+{
+	struct device_node *node;
+	struct trusted_foundations_platform_data pdata;
+	int err;
+
+	node = of_find_compatible_node(NULL, NULL, "tlm,trusted-foundations");
+	if (!node)
+		return;
+
+	err = of_property_read_u32(node, "tlm,version-major",
+				   &pdata.version_major);
+	if (err != 0)
+		panic("Trusted Foundation: missing version-major property\n");
+	err = of_property_read_u32(node, "tlm,version-minor",
+				   &pdata.version_minor);
+	if (err != 0)
+		panic("Trusted Foundation: missing version-minor property\n");
+	register_trusted_foundations(&pdata);
+}
+
+bool trusted_foundations_registered(void)
+{
+	return firmware_ops == &trusted_foundations_ops;
+}
diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c
new file mode 100644
index 0000000..72be589
--- /dev/null
+++ b/drivers/firmware/turris-mox-rwtm.c
@@ -0,0 +1,384 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Turris Mox rWTM firmware driver
+ *
+ * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz>
+ */
+
+#include <linux/armada-37xx-rwtm-mailbox.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/hw_random.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define DRIVER_NAME		"turris-mox-rwtm"
+
+/*
+ * The macros and constants below come from Turris Mox's rWTM firmware code.
+ * This firmware is open source and it's sources can be found at
+ * https://gitlab.labs.nic.cz/turris/mox-boot-builder/tree/master/wtmi.
+ */
+
+#define MBOX_STS_SUCCESS	(0 << 30)
+#define MBOX_STS_FAIL		(1 << 30)
+#define MBOX_STS_BADCMD		(2 << 30)
+#define MBOX_STS_ERROR(s)	((s) & (3 << 30))
+#define MBOX_STS_VALUE(s)	(((s) >> 10) & 0xfffff)
+#define MBOX_STS_CMD(s)		((s) & 0x3ff)
+
+enum mbox_cmd {
+	MBOX_CMD_GET_RANDOM	= 1,
+	MBOX_CMD_BOARD_INFO	= 2,
+	MBOX_CMD_ECDSA_PUB_KEY	= 3,
+	MBOX_CMD_HASH		= 4,
+	MBOX_CMD_SIGN		= 5,
+	MBOX_CMD_VERIFY		= 6,
+
+	MBOX_CMD_OTP_READ	= 7,
+	MBOX_CMD_OTP_WRITE	= 8,
+};
+
+struct mox_kobject;
+
+struct mox_rwtm {
+	struct device *dev;
+	struct mbox_client mbox_client;
+	struct mbox_chan *mbox;
+	struct mox_kobject *kobj;
+	struct hwrng hwrng;
+
+	struct armada_37xx_rwtm_rx_msg reply;
+
+	void *buf;
+	dma_addr_t buf_phys;
+
+	struct mutex busy;
+	struct completion cmd_done;
+
+	/* board information */
+	int has_board_info;
+	u64 serial_number;
+	int board_version, ram_size;
+	u8 mac_address1[6], mac_address2[6];
+
+	/* public key burned in eFuse */
+	int has_pubkey;
+	u8 pubkey[135];
+};
+
+struct mox_kobject {
+	struct kobject kobj;
+	struct mox_rwtm *rwtm;
+};
+
+static inline struct kobject *rwtm_to_kobj(struct mox_rwtm *rwtm)
+{
+	return &rwtm->kobj->kobj;
+}
+
+static inline struct mox_rwtm *to_rwtm(struct kobject *kobj)
+{
+	return container_of(kobj, struct mox_kobject, kobj)->rwtm;
+}
+
+static void mox_kobj_release(struct kobject *kobj)
+{
+	kfree(to_rwtm(kobj)->kobj);
+}
+
+static struct kobj_type mox_kobj_ktype = {
+	.release	= mox_kobj_release,
+	.sysfs_ops	= &kobj_sysfs_ops,
+};
+
+static int mox_kobj_create(struct mox_rwtm *rwtm)
+{
+	rwtm->kobj = kzalloc(sizeof(*rwtm->kobj), GFP_KERNEL);
+	if (!rwtm->kobj)
+		return -ENOMEM;
+
+	kobject_init(rwtm_to_kobj(rwtm), &mox_kobj_ktype);
+	if (kobject_add(rwtm_to_kobj(rwtm), firmware_kobj, "turris-mox-rwtm")) {
+		kobject_put(rwtm_to_kobj(rwtm));
+		return -ENXIO;
+	}
+
+	rwtm->kobj->rwtm = rwtm;
+
+	return 0;
+}
+
+#define MOX_ATTR_RO(name, format, cat)				\
+static ssize_t							\
+name##_show(struct kobject *kobj, struct kobj_attribute *a,	\
+	    char *buf)						\
+{								\
+	struct mox_rwtm *rwtm = to_rwtm(kobj);	\
+	if (!rwtm->has_##cat)					\
+		return -ENODATA;				\
+	return sprintf(buf, format, rwtm->name);		\
+}								\
+static struct kobj_attribute mox_attr_##name = __ATTR_RO(name)
+
+MOX_ATTR_RO(serial_number, "%016llX\n", board_info);
+MOX_ATTR_RO(board_version, "%i\n", board_info);
+MOX_ATTR_RO(ram_size, "%i\n", board_info);
+MOX_ATTR_RO(mac_address1, "%pM\n", board_info);
+MOX_ATTR_RO(mac_address2, "%pM\n", board_info);
+MOX_ATTR_RO(pubkey, "%s\n", pubkey);
+
+static int mox_get_status(enum mbox_cmd cmd, u32 retval)
+{
+	if (MBOX_STS_CMD(retval) != cmd ||
+	    MBOX_STS_ERROR(retval) != MBOX_STS_SUCCESS)
+		return -EIO;
+	else if (MBOX_STS_ERROR(retval) == MBOX_STS_FAIL)
+		return -(int)MBOX_STS_VALUE(retval);
+	else
+		return MBOX_STS_VALUE(retval);
+}
+
+static const struct attribute *mox_rwtm_attrs[] = {
+	&mox_attr_serial_number.attr,
+	&mox_attr_board_version.attr,
+	&mox_attr_ram_size.attr,
+	&mox_attr_mac_address1.attr,
+	&mox_attr_mac_address2.attr,
+	&mox_attr_pubkey.attr,
+	NULL
+};
+
+static void mox_rwtm_rx_callback(struct mbox_client *cl, void *data)
+{
+	struct mox_rwtm *rwtm = dev_get_drvdata(cl->dev);
+	struct armada_37xx_rwtm_rx_msg *msg = data;
+
+	rwtm->reply = *msg;
+	complete(&rwtm->cmd_done);
+}
+
+static void reply_to_mac_addr(u8 *mac, u32 t1, u32 t2)
+{
+	mac[0] = t1 >> 8;
+	mac[1] = t1;
+	mac[2] = t2 >> 24;
+	mac[3] = t2 >> 16;
+	mac[4] = t2 >> 8;
+	mac[5] = t2;
+}
+
+static int mox_get_board_info(struct mox_rwtm *rwtm)
+{
+	struct armada_37xx_rwtm_tx_msg msg;
+	struct armada_37xx_rwtm_rx_msg *reply = &rwtm->reply;
+	int ret;
+
+	msg.command = MBOX_CMD_BOARD_INFO;
+	ret = mbox_send_message(rwtm->mbox, &msg);
+	if (ret < 0)
+		return ret;
+
+	ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2);
+	if (ret < 0)
+		return ret;
+
+	ret = mox_get_status(MBOX_CMD_BOARD_INFO, reply->retval);
+	if (ret < 0 && ret != -ENODATA) {
+		return ret;
+	} else if (ret == -ENODATA) {
+		dev_warn(rwtm->dev,
+			 "Board does not have manufacturing information burned!\n");
+	} else {
+		rwtm->serial_number = reply->status[1];
+		rwtm->serial_number <<= 32;
+		rwtm->serial_number |= reply->status[0];
+			rwtm->board_version = reply->status[2];
+		rwtm->ram_size = reply->status[3];
+		reply_to_mac_addr(rwtm->mac_address1, reply->status[4],
+				  reply->status[5]);
+		reply_to_mac_addr(rwtm->mac_address2, reply->status[6],
+				  reply->status[7]);
+		rwtm->has_board_info = 1;
+
+		pr_info("Turris Mox serial number %016llX\n",
+			rwtm->serial_number);
+		pr_info("           board version %i\n", rwtm->board_version);
+		pr_info("           burned RAM size %i MiB\n", rwtm->ram_size);
+	}
+
+	msg.command = MBOX_CMD_ECDSA_PUB_KEY;
+	ret = mbox_send_message(rwtm->mbox, &msg);
+	if (ret < 0)
+		return ret;
+
+	ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2);
+	if (ret < 0)
+		return ret;
+
+	ret = mox_get_status(MBOX_CMD_ECDSA_PUB_KEY, reply->retval);
+	if (ret < 0 && ret != -ENODATA) {
+		return ret;
+	} else if (ret == -ENODATA) {
+		dev_warn(rwtm->dev, "Board has no public key burned!\n");
+	} else {
+		u32 *s = reply->status;
+
+		rwtm->has_pubkey = 1;
+		sprintf(rwtm->pubkey,
+			"%06x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x",
+			ret, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7],
+			s[8], s[9], s[10], s[11], s[12], s[13], s[14], s[15]);
+	}
+
+	return 0;
+}
+
+static int mox_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+	struct mox_rwtm *rwtm = (struct mox_rwtm *) rng->priv;
+	struct armada_37xx_rwtm_tx_msg msg;
+	int ret;
+
+	if (max > 4096)
+		max = 4096;
+
+	msg.command = MBOX_CMD_GET_RANDOM;
+	msg.args[0] = 1;
+	msg.args[1] = rwtm->buf_phys;
+	msg.args[2] = (max + 3) & ~3;
+
+	if (!wait) {
+		if (!mutex_trylock(&rwtm->busy))
+			return -EBUSY;
+	} else {
+		mutex_lock(&rwtm->busy);
+	}
+
+	ret = mbox_send_message(rwtm->mbox, &msg);
+	if (ret < 0)
+		goto unlock_mutex;
+
+	ret = wait_for_completion_interruptible(&rwtm->cmd_done);
+	if (ret < 0)
+		goto unlock_mutex;
+
+	ret = mox_get_status(MBOX_CMD_GET_RANDOM, rwtm->reply.retval);
+	if (ret < 0)
+		goto unlock_mutex;
+
+	memcpy(data, rwtm->buf, max);
+	ret = max;
+
+unlock_mutex:
+	mutex_unlock(&rwtm->busy);
+	return ret;
+}
+
+static int turris_mox_rwtm_probe(struct platform_device *pdev)
+{
+	struct mox_rwtm *rwtm;
+	struct device *dev = &pdev->dev;
+	int ret;
+
+	rwtm = devm_kzalloc(dev, sizeof(*rwtm), GFP_KERNEL);
+	if (!rwtm)
+		return -ENOMEM;
+
+	rwtm->dev = dev;
+	rwtm->buf = dmam_alloc_coherent(dev, PAGE_SIZE, &rwtm->buf_phys,
+					GFP_KERNEL);
+	if (!rwtm->buf)
+		return -ENOMEM;
+
+	ret = mox_kobj_create(rwtm);
+	if (ret < 0) {
+		dev_err(dev, "Cannot create turris-mox-rwtm kobject!\n");
+		return ret;
+	}
+
+	ret = sysfs_create_files(rwtm_to_kobj(rwtm), mox_rwtm_attrs);
+	if (ret < 0) {
+		dev_err(dev, "Cannot create sysfs files!\n");
+		goto put_kobj;
+	}
+
+	platform_set_drvdata(pdev, rwtm);
+
+	mutex_init(&rwtm->busy);
+
+	rwtm->mbox_client.dev = dev;
+	rwtm->mbox_client.rx_callback = mox_rwtm_rx_callback;
+
+	rwtm->mbox = mbox_request_channel(&rwtm->mbox_client, 0);
+	if (IS_ERR(rwtm->mbox)) {
+		ret = PTR_ERR(rwtm->mbox);
+		if (ret != -EPROBE_DEFER)
+			dev_err(dev, "Cannot request mailbox channel: %i\n",
+				ret);
+		goto remove_files;
+	}
+
+	init_completion(&rwtm->cmd_done);
+
+	ret = mox_get_board_info(rwtm);
+	if (ret < 0)
+		dev_warn(dev, "Cannot read board information: %i\n", ret);
+
+	rwtm->hwrng.name = DRIVER_NAME "_hwrng";
+	rwtm->hwrng.read = mox_hwrng_read;
+	rwtm->hwrng.priv = (unsigned long) rwtm;
+	rwtm->hwrng.quality = 1024;
+
+	ret = devm_hwrng_register(dev, &rwtm->hwrng);
+	if (ret < 0) {
+		dev_err(dev, "Cannot register HWRNG: %i\n", ret);
+		goto free_channel;
+	}
+
+	return 0;
+
+free_channel:
+	mbox_free_channel(rwtm->mbox);
+remove_files:
+	sysfs_remove_files(rwtm_to_kobj(rwtm), mox_rwtm_attrs);
+put_kobj:
+	kobject_put(rwtm_to_kobj(rwtm));
+	return ret;
+}
+
+static int turris_mox_rwtm_remove(struct platform_device *pdev)
+{
+	struct mox_rwtm *rwtm = platform_get_drvdata(pdev);
+
+	sysfs_remove_files(rwtm_to_kobj(rwtm), mox_rwtm_attrs);
+	kobject_put(rwtm_to_kobj(rwtm));
+	mbox_free_channel(rwtm->mbox);
+
+	return 0;
+}
+
+static const struct of_device_id turris_mox_rwtm_match[] = {
+	{ .compatible = "cznic,turris-mox-rwtm", },
+	{ },
+};
+
+MODULE_DEVICE_TABLE(of, turris_mox_rwtm_match);
+
+static struct platform_driver turris_mox_rwtm_driver = {
+	.probe	= turris_mox_rwtm_probe,
+	.remove	= turris_mox_rwtm_remove,
+	.driver	= {
+		.name		= DRIVER_NAME,
+		.of_match_table	= turris_mox_rwtm_match,
+	},
+};
+module_platform_driver(turris_mox_rwtm_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Turris Mox rWTM firmware driver");
+MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
diff --git a/drivers/firmware/xilinx/Kconfig b/drivers/firmware/xilinx/Kconfig
new file mode 100644
index 0000000..bd33bbf
--- /dev/null
+++ b/drivers/firmware/xilinx/Kconfig
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0
+# Kconfig for Xilinx firmwares
+
+menu "Zynq MPSoC Firmware Drivers"
+	depends on ARCH_ZYNQMP
+
+config ZYNQMP_FIRMWARE
+	bool "Enable Xilinx Zynq MPSoC firmware interface"
+	select MFD_CORE
+	help
+	  Firmware interface driver is used by different
+	  drivers to communicate with the firmware for
+	  various platform management services.
+	  Say yes to enable ZynqMP firmware interface driver.
+	  If in doubt, say N.
+
+config ZYNQMP_FIRMWARE_DEBUG
+	bool "Enable Xilinx Zynq MPSoC firmware debug APIs"
+	depends on ZYNQMP_FIRMWARE && DEBUG_FS
+	help
+	  Say yes to enable ZynqMP firmware interface debug APIs.
+	  If in doubt, say N.
+
+endmenu
diff --git a/drivers/firmware/xilinx/Makefile b/drivers/firmware/xilinx/Makefile
new file mode 100644
index 0000000..875a537
--- /dev/null
+++ b/drivers/firmware/xilinx/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for Xilinx firmwares
+
+obj-$(CONFIG_ZYNQMP_FIRMWARE) += zynqmp.o
+obj-$(CONFIG_ZYNQMP_FIRMWARE_DEBUG) += zynqmp-debug.o
diff --git a/drivers/firmware/xilinx/zynqmp-debug.c b/drivers/firmware/xilinx/zynqmp-debug.c
new file mode 100644
index 0000000..c6d0724
--- /dev/null
+++ b/drivers/firmware/xilinx/zynqmp-debug.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Zynq MPSoC Firmware layer for debugfs APIs
+ *
+ *  Copyright (C) 2014-2018 Xilinx, Inc.
+ *
+ *  Michal Simek <michal.simek@xilinx.com>
+ *  Davorin Mista <davorin.mista@aggios.com>
+ *  Jolly Shah <jollys@xilinx.com>
+ *  Rajan Vaja <rajanv@xilinx.com>
+ */
+
+#include <linux/compiler.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+
+#include <linux/firmware/xlnx-zynqmp.h>
+#include "zynqmp-debug.h"
+
+#define PM_API_NAME_LEN			50
+
+struct pm_api_info {
+	u32 api_id;
+	char api_name[PM_API_NAME_LEN];
+	char api_name_len;
+};
+
+static char debugfs_buf[PAGE_SIZE];
+
+#define PM_API(id)		 {id, #id, strlen(#id)}
+static struct pm_api_info pm_api_list[] = {
+	PM_API(PM_GET_API_VERSION),
+	PM_API(PM_QUERY_DATA),
+};
+
+struct dentry *firmware_debugfs_root;
+
+/**
+ * zynqmp_pm_argument_value() - Extract argument value from a PM-API request
+ * @arg:	Entered PM-API argument in string format
+ *
+ * Return: Argument value in unsigned integer format on success
+ *	   0 otherwise
+ */
+static u64 zynqmp_pm_argument_value(char *arg)
+{
+	u64 value;
+
+	if (!arg)
+		return 0;
+
+	if (!kstrtou64(arg, 0, &value))
+		return value;
+
+	return 0;
+}
+
+/**
+ * get_pm_api_id() - Extract API-ID from a PM-API request
+ * @pm_api_req:		Entered PM-API argument in string format
+ * @pm_id:		API-ID
+ *
+ * Return: 0 on success else error code
+ */
+static int get_pm_api_id(char *pm_api_req, u32 *pm_id)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(pm_api_list) ; i++) {
+		if (!strncasecmp(pm_api_req, pm_api_list[i].api_name,
+				 pm_api_list[i].api_name_len)) {
+			*pm_id = pm_api_list[i].api_id;
+			break;
+		}
+	}
+
+	/* If no name was entered look for PM-API ID instead */
+	if (i == ARRAY_SIZE(pm_api_list) && kstrtouint(pm_api_req, 10, pm_id))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int process_api_request(u32 pm_id, u64 *pm_api_arg, u32 *pm_api_ret)
+{
+	const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+	u32 pm_api_version;
+	int ret;
+	struct zynqmp_pm_query_data qdata = {0};
+
+	switch (pm_id) {
+	case PM_GET_API_VERSION:
+		ret = eemi_ops->get_api_version(&pm_api_version);
+		sprintf(debugfs_buf, "PM-API Version = %d.%d\n",
+			pm_api_version >> 16, pm_api_version & 0xffff);
+		break;
+	case PM_QUERY_DATA:
+		qdata.qid = pm_api_arg[0];
+		qdata.arg1 = pm_api_arg[1];
+		qdata.arg2 = pm_api_arg[2];
+		qdata.arg3 = pm_api_arg[3];
+
+		ret = eemi_ops->query_data(qdata, pm_api_ret);
+		if (ret)
+			break;
+
+		switch (qdata.qid) {
+		case PM_QID_CLOCK_GET_NAME:
+			sprintf(debugfs_buf, "Clock name = %s\n",
+				(char *)pm_api_ret);
+			break;
+		case PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS:
+			sprintf(debugfs_buf, "Multiplier = %d, Divider = %d\n",
+				pm_api_ret[1], pm_api_ret[2]);
+			break;
+		default:
+			sprintf(debugfs_buf,
+				"data[0] = 0x%08x\ndata[1] = 0x%08x\n data[2] = 0x%08x\ndata[3] = 0x%08x\n",
+				pm_api_ret[0], pm_api_ret[1],
+				pm_api_ret[2], pm_api_ret[3]);
+		}
+		break;
+	default:
+		sprintf(debugfs_buf, "Unsupported PM-API request\n");
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+/**
+ * zynqmp_pm_debugfs_api_write() - debugfs write function
+ * @file:	User file
+ * @ptr:	User entered PM-API string
+ * @len:	Length of the userspace buffer
+ * @off:	Offset within the file
+ *
+ * Used for triggering pm api functions by writing
+ * echo <pm_api_id>	> /sys/kernel/debug/zynqmp_pm/power or
+ * echo <pm_api_name>	> /sys/kernel/debug/zynqmp_pm/power
+ *
+ * Return: Number of bytes copied if PM-API request succeeds,
+ *	   the corresponding error code otherwise
+ */
+static ssize_t zynqmp_pm_debugfs_api_write(struct file *file,
+					   const char __user *ptr, size_t len,
+					   loff_t *off)
+{
+	char *kern_buff, *tmp_buff;
+	char *pm_api_req;
+	u32 pm_id = 0;
+	u64 pm_api_arg[4] = {0, 0, 0, 0};
+	/* Return values from PM APIs calls */
+	u32 pm_api_ret[4] = {0, 0, 0, 0};
+
+	int ret;
+	int i = 0;
+
+	strcpy(debugfs_buf, "");
+
+	if (*off != 0 || len <= 1 || len > PAGE_SIZE - 1)
+		return -EINVAL;
+
+	kern_buff = memdup_user_nul(ptr, len);
+	if (IS_ERR(kern_buff))
+		return PTR_ERR(kern_buff);
+	tmp_buff = kern_buff;
+
+	/* Read the API name from a user request */
+	pm_api_req = strsep(&kern_buff, " ");
+
+	ret = get_pm_api_id(pm_api_req, &pm_id);
+	if (ret < 0)
+		goto err;
+
+	/* Read node_id and arguments from the PM-API request */
+	pm_api_req = strsep(&kern_buff, " ");
+	while ((i < ARRAY_SIZE(pm_api_arg)) && pm_api_req) {
+		pm_api_arg[i++] = zynqmp_pm_argument_value(pm_api_req);
+		pm_api_req = strsep(&kern_buff, " ");
+	}
+
+	ret = process_api_request(pm_id, pm_api_arg, pm_api_ret);
+
+err:
+	kfree(tmp_buff);
+	if (ret)
+		return ret;
+
+	return len;
+}
+
+/**
+ * zynqmp_pm_debugfs_api_read() - debugfs read function
+ * @file:	User file
+ * @ptr:	Requested pm_api_version string
+ * @len:	Length of the userspace buffer
+ * @off:	Offset within the file
+ *
+ * Return: Length of the version string on success
+ *	   else error code
+ */
+static ssize_t zynqmp_pm_debugfs_api_read(struct file *file, char __user *ptr,
+					  size_t len, loff_t *off)
+{
+	return simple_read_from_buffer(ptr, len, off, debugfs_buf,
+				       strlen(debugfs_buf));
+}
+
+/* Setup debugfs fops */
+static const struct file_operations fops_zynqmp_pm_dbgfs = {
+	.owner = THIS_MODULE,
+	.write = zynqmp_pm_debugfs_api_write,
+	.read = zynqmp_pm_debugfs_api_read,
+};
+
+/**
+ * zynqmp_pm_api_debugfs_init - Initialize debugfs interface
+ *
+ * Return:	None
+ */
+void zynqmp_pm_api_debugfs_init(void)
+{
+	/* Initialize debugfs interface */
+	firmware_debugfs_root = debugfs_create_dir("zynqmp-firmware", NULL);
+	debugfs_create_file("pm", 0660, firmware_debugfs_root, NULL,
+			    &fops_zynqmp_pm_dbgfs);
+}
+
+/**
+ * zynqmp_pm_api_debugfs_exit - Remove debugfs interface
+ *
+ * Return:	None
+ */
+void zynqmp_pm_api_debugfs_exit(void)
+{
+	debugfs_remove_recursive(firmware_debugfs_root);
+}
diff --git a/drivers/firmware/xilinx/zynqmp-debug.h b/drivers/firmware/xilinx/zynqmp-debug.h
new file mode 100644
index 0000000..9929f8b
--- /dev/null
+++ b/drivers/firmware/xilinx/zynqmp-debug.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx Zynq MPSoC Firmware layer
+ *
+ *  Copyright (C) 2014-2018 Xilinx
+ *
+ *  Michal Simek <michal.simek@xilinx.com>
+ *  Davorin Mista <davorin.mista@aggios.com>
+ *  Jolly Shah <jollys@xilinx.com>
+ *  Rajan Vaja <rajanv@xilinx.com>
+ */
+
+#ifndef __FIRMWARE_ZYNQMP_DEBUG_H__
+#define __FIRMWARE_ZYNQMP_DEBUG_H__
+
+#if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE_DEBUG)
+void zynqmp_pm_api_debugfs_init(void);
+void zynqmp_pm_api_debugfs_exit(void);
+#else
+static inline void zynqmp_pm_api_debugfs_init(void) { }
+static inline void zynqmp_pm_api_debugfs_exit(void) { }
+#endif
+
+#endif /* __FIRMWARE_ZYNQMP_DEBUG_H__ */
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
new file mode 100644
index 0000000..fd3d837
--- /dev/null
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -0,0 +1,785 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Zynq MPSoC Firmware layer
+ *
+ *  Copyright (C) 2014-2018 Xilinx, Inc.
+ *
+ *  Michal Simek <michal.simek@xilinx.com>
+ *  Davorin Mista <davorin.mista@aggios.com>
+ *  Jolly Shah <jollys@xilinx.com>
+ *  Rajan Vaja <rajanv@xilinx.com>
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/compiler.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <linux/firmware/xlnx-zynqmp.h>
+#include "zynqmp-debug.h"
+
+static const struct zynqmp_eemi_ops *eemi_ops_tbl;
+
+static const struct mfd_cell firmware_devs[] = {
+	{
+		.name = "zynqmp_power_controller",
+	},
+};
+
+/**
+ * zynqmp_pm_ret_code() - Convert PMU-FW error codes to Linux error codes
+ * @ret_status:		PMUFW return code
+ *
+ * Return: corresponding Linux error code
+ */
+static int zynqmp_pm_ret_code(u32 ret_status)
+{
+	switch (ret_status) {
+	case XST_PM_SUCCESS:
+	case XST_PM_DOUBLE_REQ:
+		return 0;
+	case XST_PM_NO_ACCESS:
+		return -EACCES;
+	case XST_PM_ABORT_SUSPEND:
+		return -ECANCELED;
+	case XST_PM_INTERNAL:
+	case XST_PM_CONFLICT:
+	case XST_PM_INVALID_NODE:
+	default:
+		return -EINVAL;
+	}
+}
+
+static noinline int do_fw_call_fail(u64 arg0, u64 arg1, u64 arg2,
+				    u32 *ret_payload)
+{
+	return -ENODEV;
+}
+
+/*
+ * PM function call wrapper
+ * Invoke do_fw_call_smc or do_fw_call_hvc, depending on the configuration
+ */
+static int (*do_fw_call)(u64, u64, u64, u32 *ret_payload) = do_fw_call_fail;
+
+/**
+ * do_fw_call_smc() - Call system-level platform management layer (SMC)
+ * @arg0:		Argument 0 to SMC call
+ * @arg1:		Argument 1 to SMC call
+ * @arg2:		Argument 2 to SMC call
+ * @ret_payload:	Returned value array
+ *
+ * Invoke platform management function via SMC call (no hypervisor present).
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static noinline int do_fw_call_smc(u64 arg0, u64 arg1, u64 arg2,
+				   u32 *ret_payload)
+{
+	struct arm_smccc_res res;
+
+	arm_smccc_smc(arg0, arg1, arg2, 0, 0, 0, 0, 0, &res);
+
+	if (ret_payload) {
+		ret_payload[0] = lower_32_bits(res.a0);
+		ret_payload[1] = upper_32_bits(res.a0);
+		ret_payload[2] = lower_32_bits(res.a1);
+		ret_payload[3] = upper_32_bits(res.a1);
+	}
+
+	return zynqmp_pm_ret_code((enum pm_ret_status)res.a0);
+}
+
+/**
+ * do_fw_call_hvc() - Call system-level platform management layer (HVC)
+ * @arg0:		Argument 0 to HVC call
+ * @arg1:		Argument 1 to HVC call
+ * @arg2:		Argument 2 to HVC call
+ * @ret_payload:	Returned value array
+ *
+ * Invoke platform management function via HVC
+ * HVC-based for communication through hypervisor
+ * (no direct communication with ATF).
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static noinline int do_fw_call_hvc(u64 arg0, u64 arg1, u64 arg2,
+				   u32 *ret_payload)
+{
+	struct arm_smccc_res res;
+
+	arm_smccc_hvc(arg0, arg1, arg2, 0, 0, 0, 0, 0, &res);
+
+	if (ret_payload) {
+		ret_payload[0] = lower_32_bits(res.a0);
+		ret_payload[1] = upper_32_bits(res.a0);
+		ret_payload[2] = lower_32_bits(res.a1);
+		ret_payload[3] = upper_32_bits(res.a1);
+	}
+
+	return zynqmp_pm_ret_code((enum pm_ret_status)res.a0);
+}
+
+/**
+ * zynqmp_pm_invoke_fn() - Invoke the system-level platform management layer
+ *			   caller function depending on the configuration
+ * @pm_api_id:		Requested PM-API call
+ * @arg0:		Argument 0 to requested PM-API call
+ * @arg1:		Argument 1 to requested PM-API call
+ * @arg2:		Argument 2 to requested PM-API call
+ * @arg3:		Argument 3 to requested PM-API call
+ * @ret_payload:	Returned value array
+ *
+ * Invoke platform management function for SMC or HVC call, depending on
+ * configuration.
+ * Following SMC Calling Convention (SMCCC) for SMC64:
+ * Pm Function Identifier,
+ * PM_SIP_SVC + PM_API_ID =
+ *	((SMC_TYPE_FAST << FUNCID_TYPE_SHIFT)
+ *	((SMC_64) << FUNCID_CC_SHIFT)
+ *	((SIP_START) << FUNCID_OEN_SHIFT)
+ *	((PM_API_ID) & FUNCID_NUM_MASK))
+ *
+ * PM_SIP_SVC	- Registered ZynqMP SIP Service Call.
+ * PM_API_ID	- Platform Management API ID.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1,
+			u32 arg2, u32 arg3, u32 *ret_payload)
+{
+	/*
+	 * Added SIP service call Function Identifier
+	 * Make sure to stay in x0 register
+	 */
+	u64 smc_arg[4];
+
+	smc_arg[0] = PM_SIP_SVC | pm_api_id;
+	smc_arg[1] = ((u64)arg1 << 32) | arg0;
+	smc_arg[2] = ((u64)arg3 << 32) | arg2;
+
+	return do_fw_call(smc_arg[0], smc_arg[1], smc_arg[2], ret_payload);
+}
+
+static u32 pm_api_version;
+static u32 pm_tz_version;
+
+/**
+ * zynqmp_pm_get_api_version() - Get version number of PMU PM firmware
+ * @version:	Returned version value
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_get_api_version(u32 *version)
+{
+	u32 ret_payload[PAYLOAD_ARG_CNT];
+	int ret;
+
+	if (!version)
+		return -EINVAL;
+
+	/* Check is PM API version already verified */
+	if (pm_api_version > 0) {
+		*version = pm_api_version;
+		return 0;
+	}
+	ret = zynqmp_pm_invoke_fn(PM_GET_API_VERSION, 0, 0, 0, 0, ret_payload);
+	*version = ret_payload[1];
+
+	return ret;
+}
+
+/**
+ * zynqmp_pm_get_chipid - Get silicon ID registers
+ * @idcode:     IDCODE register
+ * @version:    version register
+ *
+ * Return:      Returns the status of the operation and the idcode and version
+ *              registers in @idcode and @version.
+ */
+static int zynqmp_pm_get_chipid(u32 *idcode, u32 *version)
+{
+	u32 ret_payload[PAYLOAD_ARG_CNT];
+	int ret;
+
+	if (!idcode || !version)
+		return -EINVAL;
+
+	ret = zynqmp_pm_invoke_fn(PM_GET_CHIPID, 0, 0, 0, 0, ret_payload);
+	*idcode = ret_payload[1];
+	*version = ret_payload[2];
+
+	return ret;
+}
+
+/**
+ * zynqmp_pm_get_trustzone_version() - Get secure trustzone firmware version
+ * @version:	Returned version value
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_get_trustzone_version(u32 *version)
+{
+	u32 ret_payload[PAYLOAD_ARG_CNT];
+	int ret;
+
+	if (!version)
+		return -EINVAL;
+
+	/* Check is PM trustzone version already verified */
+	if (pm_tz_version > 0) {
+		*version = pm_tz_version;
+		return 0;
+	}
+	ret = zynqmp_pm_invoke_fn(PM_GET_TRUSTZONE_VERSION, 0, 0,
+				  0, 0, ret_payload);
+	*version = ret_payload[1];
+
+	return ret;
+}
+
+/**
+ * get_set_conduit_method() - Choose SMC or HVC based communication
+ * @np:		Pointer to the device_node structure
+ *
+ * Use SMC or HVC-based functions to communicate with EL2/EL3.
+ *
+ * Return: Returns 0 on success or error code
+ */
+static int get_set_conduit_method(struct device_node *np)
+{
+	const char *method;
+
+	if (of_property_read_string(np, "method", &method)) {
+		pr_warn("%s missing \"method\" property\n", __func__);
+		return -ENXIO;
+	}
+
+	if (!strcmp("hvc", method)) {
+		do_fw_call = do_fw_call_hvc;
+	} else if (!strcmp("smc", method)) {
+		do_fw_call = do_fw_call_smc;
+	} else {
+		pr_warn("%s Invalid \"method\" property: %s\n",
+			__func__, method);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * zynqmp_pm_query_data() - Get query data from firmware
+ * @qdata:	Variable to the zynqmp_pm_query_data structure
+ * @out:	Returned output value
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out)
+{
+	int ret;
+
+	ret = zynqmp_pm_invoke_fn(PM_QUERY_DATA, qdata.qid, qdata.arg1,
+				  qdata.arg2, qdata.arg3, out);
+
+	/*
+	 * For clock name query, all bytes in SMC response are clock name
+	 * characters and return code is always success. For invalid clocks,
+	 * clock name bytes would be zeros.
+	 */
+	return qdata.qid == PM_QID_CLOCK_GET_NAME ? 0 : ret;
+}
+
+/**
+ * zynqmp_pm_clock_enable() - Enable the clock for given id
+ * @clock_id:	ID of the clock to be enabled
+ *
+ * This function is used by master to enable the clock
+ * including peripherals and PLL clocks.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_clock_enable(u32 clock_id)
+{
+	return zynqmp_pm_invoke_fn(PM_CLOCK_ENABLE, clock_id, 0, 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_clock_disable() - Disable the clock for given id
+ * @clock_id:	ID of the clock to be disable
+ *
+ * This function is used by master to disable the clock
+ * including peripherals and PLL clocks.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_clock_disable(u32 clock_id)
+{
+	return zynqmp_pm_invoke_fn(PM_CLOCK_DISABLE, clock_id, 0, 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_clock_getstate() - Get the clock state for given id
+ * @clock_id:	ID of the clock to be queried
+ * @state:	1/0 (Enabled/Disabled)
+ *
+ * This function is used by master to get the state of clock
+ * including peripherals and PLL clocks.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state)
+{
+	u32 ret_payload[PAYLOAD_ARG_CNT];
+	int ret;
+
+	ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETSTATE, clock_id, 0,
+				  0, 0, ret_payload);
+	*state = ret_payload[1];
+
+	return ret;
+}
+
+/**
+ * zynqmp_pm_clock_setdivider() - Set the clock divider for given id
+ * @clock_id:	ID of the clock
+ * @divider:	divider value
+ *
+ * This function is used by master to set divider for any clock
+ * to achieve desired rate.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider)
+{
+	return zynqmp_pm_invoke_fn(PM_CLOCK_SETDIVIDER, clock_id, divider,
+				   0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_clock_getdivider() - Get the clock divider for given id
+ * @clock_id:	ID of the clock
+ * @divider:	divider value
+ *
+ * This function is used by master to get divider values
+ * for any clock.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider)
+{
+	u32 ret_payload[PAYLOAD_ARG_CNT];
+	int ret;
+
+	ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETDIVIDER, clock_id, 0,
+				  0, 0, ret_payload);
+	*divider = ret_payload[1];
+
+	return ret;
+}
+
+/**
+ * zynqmp_pm_clock_setrate() - Set the clock rate for given id
+ * @clock_id:	ID of the clock
+ * @rate:	rate value in hz
+ *
+ * This function is used by master to set rate for any clock.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate)
+{
+	return zynqmp_pm_invoke_fn(PM_CLOCK_SETRATE, clock_id,
+				   lower_32_bits(rate),
+				   upper_32_bits(rate),
+				   0, NULL);
+}
+
+/**
+ * zynqmp_pm_clock_getrate() - Get the clock rate for given id
+ * @clock_id:	ID of the clock
+ * @rate:	rate value in hz
+ *
+ * This function is used by master to get rate
+ * for any clock.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate)
+{
+	u32 ret_payload[PAYLOAD_ARG_CNT];
+	int ret;
+
+	ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETRATE, clock_id, 0,
+				  0, 0, ret_payload);
+	*rate = ((u64)ret_payload[2] << 32) | ret_payload[1];
+
+	return ret;
+}
+
+/**
+ * zynqmp_pm_clock_setparent() - Set the clock parent for given id
+ * @clock_id:	ID of the clock
+ * @parent_id:	parent id
+ *
+ * This function is used by master to set parent for any clock.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id)
+{
+	return zynqmp_pm_invoke_fn(PM_CLOCK_SETPARENT, clock_id,
+				   parent_id, 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_clock_getparent() - Get the clock parent for given id
+ * @clock_id:	ID of the clock
+ * @parent_id:	parent id
+ *
+ * This function is used by master to get parent index
+ * for any clock.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id)
+{
+	u32 ret_payload[PAYLOAD_ARG_CNT];
+	int ret;
+
+	ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETPARENT, clock_id, 0,
+				  0, 0, ret_payload);
+	*parent_id = ret_payload[1];
+
+	return ret;
+}
+
+/**
+ * zynqmp_is_valid_ioctl() - Check whether IOCTL ID is valid or not
+ * @ioctl_id:	IOCTL ID
+ *
+ * Return: 1 if IOCTL is valid else 0
+ */
+static inline int zynqmp_is_valid_ioctl(u32 ioctl_id)
+{
+	switch (ioctl_id) {
+	case IOCTL_SET_PLL_FRAC_MODE:
+	case IOCTL_GET_PLL_FRAC_MODE:
+	case IOCTL_SET_PLL_FRAC_DATA:
+	case IOCTL_GET_PLL_FRAC_DATA:
+		return 1;
+	default:
+		return 0;
+	}
+}
+
+/**
+ * zynqmp_pm_ioctl() - PM IOCTL API for device control and configs
+ * @node_id:	Node ID of the device
+ * @ioctl_id:	ID of the requested IOCTL
+ * @arg1:	Argument 1 to requested IOCTL call
+ * @arg2:	Argument 2 to requested IOCTL call
+ * @out:	Returned output value
+ *
+ * This function calls IOCTL to firmware for device control and configuration.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_ioctl(u32 node_id, u32 ioctl_id, u32 arg1, u32 arg2,
+			   u32 *out)
+{
+	if (!zynqmp_is_valid_ioctl(ioctl_id))
+		return -EINVAL;
+
+	return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, ioctl_id,
+				   arg1, arg2, out);
+}
+
+/**
+ * zynqmp_pm_reset_assert - Request setting of reset (1 - assert, 0 - release)
+ * @reset:		Reset to be configured
+ * @assert_flag:	Flag stating should reset be asserted (1) or
+ *			released (0)
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
+				  const enum zynqmp_pm_reset_action assert_flag)
+{
+	return zynqmp_pm_invoke_fn(PM_RESET_ASSERT, reset, assert_flag,
+				   0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_reset_get_status - Get status of the reset
+ * @reset:      Reset whose status should be returned
+ * @status:     Returned status
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset,
+				      u32 *status)
+{
+	u32 ret_payload[PAYLOAD_ARG_CNT];
+	int ret;
+
+	if (!status)
+		return -EINVAL;
+
+	ret = zynqmp_pm_invoke_fn(PM_RESET_GET_STATUS, reset, 0,
+				  0, 0, ret_payload);
+	*status = ret_payload[1];
+
+	return ret;
+}
+
+/**
+ * zynqmp_pm_fpga_load - Perform the fpga load
+ * @address:	Address to write to
+ * @size:	pl bitstream size
+ * @flags:	Bitstream type
+ *	-XILINX_ZYNQMP_PM_FPGA_FULL:  FPGA full reconfiguration
+ *	-XILINX_ZYNQMP_PM_FPGA_PARTIAL: FPGA partial reconfiguration
+ *
+ * This function provides access to pmufw. To transfer
+ * the required bitstream into PL.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_fpga_load(const u64 address, const u32 size,
+			       const u32 flags)
+{
+	return zynqmp_pm_invoke_fn(PM_FPGA_LOAD, lower_32_bits(address),
+				   upper_32_bits(address), size, flags, NULL);
+}
+
+/**
+ * zynqmp_pm_fpga_get_status - Read value from PCAP status register
+ * @value: Value to read
+ *
+ * This function provides access to the pmufw to get the PCAP
+ * status
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_fpga_get_status(u32 *value)
+{
+	u32 ret_payload[PAYLOAD_ARG_CNT];
+	int ret;
+
+	if (!value)
+		return -EINVAL;
+
+	ret = zynqmp_pm_invoke_fn(PM_FPGA_GET_STATUS, 0, 0, 0, 0, ret_payload);
+	*value = ret_payload[1];
+
+	return ret;
+}
+
+/**
+ * zynqmp_pm_init_finalize() - PM call to inform firmware that the caller
+ *			       master has initialized its own power management
+ *
+ * This API function is to be used for notify the power management controller
+ * about the completed power management initialization.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_init_finalize(void)
+{
+	return zynqmp_pm_invoke_fn(PM_PM_INIT_FINALIZE, 0, 0, 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_set_suspend_mode()	- Set system suspend mode
+ * @mode:	Mode to set for system suspend
+ *
+ * This API function is used to set mode of system suspend.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_set_suspend_mode(u32 mode)
+{
+	return zynqmp_pm_invoke_fn(PM_SET_SUSPEND_MODE, mode, 0, 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_request_node() - Request a node with specific capabilities
+ * @node:		Node ID of the slave
+ * @capabilities:	Requested capabilities of the slave
+ * @qos:		Quality of service (not supported)
+ * @ack:		Flag to specify whether acknowledge is requested
+ *
+ * This function is used by master to request particular node from firmware.
+ * Every master must request node before using it.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
+				  const u32 qos,
+				  const enum zynqmp_pm_request_ack ack)
+{
+	return zynqmp_pm_invoke_fn(PM_REQUEST_NODE, node, capabilities,
+				   qos, ack, NULL);
+}
+
+/**
+ * zynqmp_pm_release_node() - Release a node
+ * @node:	Node ID of the slave
+ *
+ * This function is used by master to inform firmware that master
+ * has released node. Once released, master must not use that node
+ * without re-request.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_release_node(const u32 node)
+{
+	return zynqmp_pm_invoke_fn(PM_RELEASE_NODE, node, 0, 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_set_requirement() - PM call to set requirement for PM slaves
+ * @node:		Node ID of the slave
+ * @capabilities:	Requested capabilities of the slave
+ * @qos:		Quality of service (not supported)
+ * @ack:		Flag to specify whether acknowledge is requested
+ *
+ * This API function is to be used for slaves a PU already has requested
+ * to change its capabilities.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
+				     const u32 qos,
+				     const enum zynqmp_pm_request_ack ack)
+{
+	return zynqmp_pm_invoke_fn(PM_SET_REQUIREMENT, node, capabilities,
+				   qos, ack, NULL);
+}
+
+static const struct zynqmp_eemi_ops eemi_ops = {
+	.get_api_version = zynqmp_pm_get_api_version,
+	.get_chipid = zynqmp_pm_get_chipid,
+	.query_data = zynqmp_pm_query_data,
+	.clock_enable = zynqmp_pm_clock_enable,
+	.clock_disable = zynqmp_pm_clock_disable,
+	.clock_getstate = zynqmp_pm_clock_getstate,
+	.clock_setdivider = zynqmp_pm_clock_setdivider,
+	.clock_getdivider = zynqmp_pm_clock_getdivider,
+	.clock_setrate = zynqmp_pm_clock_setrate,
+	.clock_getrate = zynqmp_pm_clock_getrate,
+	.clock_setparent = zynqmp_pm_clock_setparent,
+	.clock_getparent = zynqmp_pm_clock_getparent,
+	.ioctl = zynqmp_pm_ioctl,
+	.reset_assert = zynqmp_pm_reset_assert,
+	.reset_get_status = zynqmp_pm_reset_get_status,
+	.init_finalize = zynqmp_pm_init_finalize,
+	.set_suspend_mode = zynqmp_pm_set_suspend_mode,
+	.request_node = zynqmp_pm_request_node,
+	.release_node = zynqmp_pm_release_node,
+	.set_requirement = zynqmp_pm_set_requirement,
+	.fpga_load = zynqmp_pm_fpga_load,
+	.fpga_get_status = zynqmp_pm_fpga_get_status,
+};
+
+/**
+ * zynqmp_pm_get_eemi_ops - Get eemi ops functions
+ *
+ * Return: Pointer of eemi_ops structure
+ */
+const struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void)
+{
+	if (eemi_ops_tbl)
+		return eemi_ops_tbl;
+	else
+		return ERR_PTR(-EPROBE_DEFER);
+
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_get_eemi_ops);
+
+static int zynqmp_firmware_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *np;
+	int ret;
+
+	np = of_find_compatible_node(NULL, NULL, "xlnx,zynqmp");
+	if (!np)
+		return 0;
+	of_node_put(np);
+
+	ret = get_set_conduit_method(dev->of_node);
+	if (ret)
+		return ret;
+
+	/* Check PM API version number */
+	zynqmp_pm_get_api_version(&pm_api_version);
+	if (pm_api_version < ZYNQMP_PM_VERSION) {
+		panic("%s Platform Management API version error. Expected: v%d.%d - Found: v%d.%d\n",
+		      __func__,
+		      ZYNQMP_PM_VERSION_MAJOR, ZYNQMP_PM_VERSION_MINOR,
+		      pm_api_version >> 16, pm_api_version & 0xFFFF);
+	}
+
+	pr_info("%s Platform Management API v%d.%d\n", __func__,
+		pm_api_version >> 16, pm_api_version & 0xFFFF);
+
+	/* Check trustzone version number */
+	ret = zynqmp_pm_get_trustzone_version(&pm_tz_version);
+	if (ret)
+		panic("Legacy trustzone found without version support\n");
+
+	if (pm_tz_version < ZYNQMP_TZ_VERSION)
+		panic("%s Trustzone version error. Expected: v%d.%d - Found: v%d.%d\n",
+		      __func__,
+		      ZYNQMP_TZ_VERSION_MAJOR, ZYNQMP_TZ_VERSION_MINOR,
+		      pm_tz_version >> 16, pm_tz_version & 0xFFFF);
+
+	pr_info("%s Trustzone version v%d.%d\n", __func__,
+		pm_tz_version >> 16, pm_tz_version & 0xFFFF);
+
+	/* Assign eemi_ops_table */
+	eemi_ops_tbl = &eemi_ops;
+
+	zynqmp_pm_api_debugfs_init();
+
+	ret = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE, firmware_devs,
+			      ARRAY_SIZE(firmware_devs), NULL, 0, NULL);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to add MFD devices %d\n", ret);
+		return ret;
+	}
+
+	return of_platform_populate(dev->of_node, NULL, NULL, dev);
+}
+
+static int zynqmp_firmware_remove(struct platform_device *pdev)
+{
+	mfd_remove_devices(&pdev->dev);
+	zynqmp_pm_api_debugfs_exit();
+
+	return 0;
+}
+
+static const struct of_device_id zynqmp_firmware_of_match[] = {
+	{.compatible = "xlnx,zynqmp-firmware"},
+	{},
+};
+MODULE_DEVICE_TABLE(of, zynqmp_firmware_of_match);
+
+static struct platform_driver zynqmp_firmware_driver = {
+	.driver = {
+		.name = "zynqmp_firmware",
+		.of_match_table = zynqmp_firmware_of_match,
+	},
+	.probe = zynqmp_firmware_probe,
+	.remove = zynqmp_firmware_remove,
+};
+module_platform_driver(zynqmp_firmware_driver);