v4.19.13 snapshot.
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
new file mode 100644
index 0000000..6e83880
--- /dev/null
+++ b/drivers/firmware/Kconfig
@@ -0,0 +1,295 @@
+#
+# For a description of the syntax of this configuration file,
+# see Documentation/kbuild/kconfig-language.txt.
+#
+
+menu "Firmware Drivers"
+
+config ARM_PSCI_FW
+	bool
+
+config ARM_PSCI_CHECKER
+	bool "ARM PSCI checker"
+	depends on ARM_PSCI_FW && HOTPLUG_CPU && CPU_IDLE && !TORTURE_TEST
+	help
+	  Run the PSCI checker during startup. This checks that hotplug and
+	  suspend operations work correctly when using PSCI.
+
+	  The torture tests may interfere with the PSCI checker by turning CPUs
+	  on and off through hotplug, so for now torture tests and PSCI checker
+	  are mutually exclusive.
+
+config ARM_SCMI_PROTOCOL
+	bool "ARM System Control and Management Interface (SCMI) Message Protocol"
+	depends on ARM || ARM64 || COMPILE_TEST
+	depends on MAILBOX
+	help
+	  ARM System Control and Management Interface (SCMI) protocol is a
+	  set of operating system-independent software interfaces that are
+	  used in system management. SCMI is extensible and currently provides
+	  interfaces for: Discovery and self-description of the interfaces
+	  it supports, Power domain management which is the ability to place
+	  a given device or domain into the various power-saving states that
+	  it supports, Performance management which is the ability to control
+	  the performance of a domain that is composed of compute engines
+	  such as application processors and other accelerators, Clock
+	  management which is the ability to set and inquire rates on platform
+	  managed clocks and Sensor management which is the ability to read
+	  sensor data, and be notified of sensor value.
+
+	  This protocol library provides interface for all the client drivers
+	  making use of the features offered by the SCMI.
+
+config ARM_SCMI_POWER_DOMAIN
+	tristate "SCMI power domain driver"
+	depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
+	default y
+	select PM_GENERIC_DOMAINS if PM
+	help
+	  This enables support for the SCMI power domains which can be
+	  enabled or disabled via the SCP firmware
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called scmi_pm_domain. Note this may needed early in boot
+	  before rootfs may be available.
+
+config ARM_SCPI_PROTOCOL
+	tristate "ARM System Control and Power Interface (SCPI) Message Protocol"
+	depends on ARM || ARM64 || COMPILE_TEST
+	depends on MAILBOX
+	help
+	  System Control and Power Interface (SCPI) Message Protocol is
+	  defined for the purpose of communication between the Application
+	  Cores(AP) and the System Control Processor(SCP). The MHU peripheral
+	  provides a mechanism for inter-processor communication between SCP
+	  and AP.
+
+	  SCP controls most of the power managament on the Application
+	  Processors. It offers control and management of: the core/cluster
+	  power states, various power domain DVFS including the core/cluster,
+	  certain system clocks configuration, thermal sensors and many
+	  others.
+
+	  This protocol library provides interface for all the client drivers
+	  making use of the features offered by the SCP.
+
+config ARM_SCPI_POWER_DOMAIN
+	tristate "SCPI power domain driver"
+	depends on ARM_SCPI_PROTOCOL || (COMPILE_TEST && OF)
+	default y
+	select PM_GENERIC_DOMAINS if PM
+	help
+	  This enables support for the SCPI power domains which can be
+	  enabled or disabled via the SCP firmware
+
+config ARM_SDE_INTERFACE
+	bool "ARM Software Delegated Exception Interface (SDEI)"
+	depends on ARM64
+	help
+	  The Software Delegated Exception Interface (SDEI) is an ARM
+	  standard for registering callbacks from the platform firmware
+	  into the OS. This is typically used to implement RAS notifications.
+
+config EDD
+	tristate "BIOS Enhanced Disk Drive calls determine boot disk"
+	depends on X86
+	help
+	  Say Y or M here if you want to enable BIOS Enhanced Disk Drive
+	  Services real mode BIOS calls to determine which disk
+	  BIOS tries boot from.  This information is then exported via sysfs.
+
+	  This option is experimental and is known to fail to boot on some
+          obscure configurations. Most disk controller BIOS vendors do
+          not yet implement this feature.
+
+config EDD_OFF
+	bool "Sets default behavior for EDD detection to off"
+	depends on EDD
+	default n
+	help
+	  Say Y if you want EDD disabled by default, even though it is compiled into the
+	  kernel. Say N if you want EDD enabled by default. EDD can be dynamically set
+	  using the kernel parameter 'edd={on|skipmbr|off}'.
+
+config FIRMWARE_MEMMAP
+    bool "Add firmware-provided memory map to sysfs" if EXPERT
+    default X86
+    help
+      Add the firmware-provided (unmodified) memory map to /sys/firmware/memmap.
+      That memory map is used for example by kexec to set up parameter area
+      for the next kernel, but can also be used for debugging purposes.
+
+      See also Documentation/ABI/testing/sysfs-firmware-memmap.
+
+config EFI_PCDP
+	bool "Console device selection via EFI PCDP or HCDP table"
+	depends on ACPI && EFI && IA64
+	default y if IA64
+	help
+	  If your firmware supplies the PCDP table, and you want to
+	  automatically use the primary console device it describes
+	  as the Linux console, say Y here.
+
+	  If your firmware supplies the HCDP table, and you want to
+	  use the first serial port it describes as the Linux console,
+	  say Y here.  If your EFI ConOut path contains only a UART
+	  device, it will become the console automatically.  Otherwise,
+	  you must specify the "console=hcdp" kernel boot argument.
+
+	  Neither the PCDP nor the HCDP affects naming of serial devices,
+	  so a serial console may be /dev/ttyS0, /dev/ttyS1, etc, depending
+	  on how the driver discovers devices.
+
+	  You must also enable the appropriate drivers (serial, VGA, etc.)
+
+	  See DIG64_HCDPv20_042804.pdf available from
+	  <http://www.dig64.org/specifications/> 
+
+config DELL_RBU
+	tristate "BIOS update support for DELL systems via sysfs"
+	depends on X86
+	select FW_LOADER
+	select FW_LOADER_USER_HELPER
+	help
+	 Say m if you want to have the option of updating the BIOS for your
+	 DELL system. Note you need a Dell OpenManage or Dell Update package (DUP)
+	 supporting application to communicate with the BIOS regarding the new
+	 image for the image update to take effect.
+	 See <file:Documentation/dell_rbu.txt> for more details on the driver.
+
+config DCDBAS
+	tristate "Dell Systems Management Base Driver"
+	depends on X86
+	help
+	  The Dell Systems Management Base Driver provides a sysfs interface
+	  for systems management software to perform System Management
+	  Interrupts (SMIs) and Host Control Actions (system power cycle or
+	  power off after OS shutdown) on certain Dell systems.
+
+	  See <file:Documentation/dcdbas.txt> for more details on the driver
+	  and the Dell systems on which Dell systems management software makes
+	  use of this driver.
+
+	  Say Y or M here to enable the driver for use by Dell systems
+	  management software such as Dell OpenManage.
+
+config DMIID
+    bool "Export DMI identification via sysfs to userspace"
+    depends on DMI
+    default y
+	help
+	  Say Y here if you want to query SMBIOS/DMI system identification
+	  information from userspace through /sys/class/dmi/id/ or if you want
+	  DMI-based module auto-loading.
+
+config DMI_SYSFS
+	tristate "DMI table support in sysfs"
+	depends on SYSFS && DMI
+	default n
+	help
+	  Say Y or M here to enable the exporting of the raw DMI table
+	  data via sysfs.  This is useful for consuming the data without
+	  requiring any access to /dev/mem at all.  Tables are found
+	  under /sys/firmware/dmi when this option is enabled and
+	  loaded.
+
+config DMI_SCAN_MACHINE_NON_EFI_FALLBACK
+	bool
+
+config ISCSI_IBFT_FIND
+	bool "iSCSI Boot Firmware Table Attributes"
+	depends on X86 && ACPI
+	default n
+	help
+	  This option enables the kernel to find the region of memory
+	  in which the ISCSI Boot Firmware Table (iBFT) resides. This
+	  is necessary for iSCSI Boot Firmware Table Attributes module to work
+	  properly.
+
+config ISCSI_IBFT
+	tristate "iSCSI Boot Firmware Table Attributes module"
+	select ISCSI_BOOT_SYSFS
+	depends on ISCSI_IBFT_FIND && SCSI && SCSI_LOWLEVEL
+	default	n
+	help
+	  This option enables support for detection and exposing of iSCSI
+	  Boot Firmware Table (iBFT) via sysfs to userspace. If you wish to
+	  detect iSCSI boot parameters dynamically during system boot, say Y.
+	  Otherwise, say N.
+
+config RASPBERRYPI_FIRMWARE
+	tristate "Raspberry Pi Firmware Driver"
+	depends on BCM2835_MBOX
+	help
+	  This option enables support for communicating with the firmware on the
+	  Raspberry Pi.
+
+config FW_CFG_SYSFS
+	tristate "QEMU fw_cfg device support in sysfs"
+	depends on SYSFS && (ARM || ARM64 || PPC_PMAC || SPARC || X86)
+	depends on HAS_IOPORT_MAP
+	default n
+	help
+	  Say Y or M here to enable the exporting of the QEMU firmware
+	  configuration (fw_cfg) file entries via sysfs. Entries are
+	  found under /sys/firmware/fw_cfg when this option is enabled
+	  and loaded.
+
+config FW_CFG_SYSFS_CMDLINE
+	bool "QEMU fw_cfg device parameter parsing"
+	depends on FW_CFG_SYSFS
+	help
+	  Allow the qemu_fw_cfg device to be initialized via the kernel
+	  command line or using a module parameter.
+	  WARNING: Using incorrect parameters (base address in particular)
+	  may crash your system.
+
+config QCOM_SCM
+	bool
+	depends on ARM || ARM64
+	select RESET_CONTROLLER
+
+config QCOM_SCM_32
+	def_bool y
+	depends on QCOM_SCM && ARM
+
+config QCOM_SCM_64
+	def_bool y
+	depends on QCOM_SCM && ARM64
+
+config QCOM_SCM_DOWNLOAD_MODE_DEFAULT
+	bool "Qualcomm download mode enabled by default"
+	depends on QCOM_SCM
+	help
+	  A device with "download mode" enabled will upon an unexpected
+	  warm-restart enter a special debug mode that allows the user to
+	  "download" memory content over USB for offline postmortem analysis.
+	  The feature can be enabled/disabled on the kernel command line.
+
+	  Say Y here to enable "download mode" by default.
+
+config TI_SCI_PROTOCOL
+	tristate "TI System Control Interface (TISCI) Message Protocol"
+	depends on TI_MESSAGE_MANAGER
+	help
+	  TI System Control Interface (TISCI) Message Protocol is used to manage
+	  compute systems such as ARM, DSP etc with the system controller in
+	  complex System on Chip(SoC) such as those found on certain keystone
+	  generation SoC from TI.
+
+	  System controller provides various facilities including power
+	  management function support.
+
+	  This protocol library is used by client drivers to use the features
+	  provided by the system controller.
+
+config HAVE_ARM_SMCCC
+	bool
+
+source "drivers/firmware/broadcom/Kconfig"
+source "drivers/firmware/google/Kconfig"
+source "drivers/firmware/efi/Kconfig"
+source "drivers/firmware/meson/Kconfig"
+source "drivers/firmware/tegra/Kconfig"
+
+endmenu
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
new file mode 100644
index 0000000..e18a041
--- /dev/null
+++ b/drivers/firmware/Makefile
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the linux kernel.
+#
+obj-$(CONFIG_ARM_PSCI_FW)	+= psci.o
+obj-$(CONFIG_ARM_PSCI_CHECKER)	+= psci_checker.o
+obj-$(CONFIG_ARM_SCPI_PROTOCOL)	+= arm_scpi.o
+obj-$(CONFIG_ARM_SCPI_POWER_DOMAIN) += scpi_pm_domain.o
+obj-$(CONFIG_ARM_SDE_INTERFACE)	+= arm_sdei.o
+obj-$(CONFIG_DMI)		+= dmi_scan.o
+obj-$(CONFIG_DMI_SYSFS)		+= dmi-sysfs.o
+obj-$(CONFIG_EDD)		+= edd.o
+obj-$(CONFIG_EFI_PCDP)		+= pcdp.o
+obj-$(CONFIG_DELL_RBU)          += dell_rbu.o
+obj-$(CONFIG_DCDBAS)		+= dcdbas.o
+obj-$(CONFIG_DMIID)		+= dmi-id.o
+obj-$(CONFIG_ISCSI_IBFT_FIND)	+= iscsi_ibft_find.o
+obj-$(CONFIG_ISCSI_IBFT)	+= iscsi_ibft.o
+obj-$(CONFIG_FIRMWARE_MEMMAP)	+= memmap.o
+obj-$(CONFIG_RASPBERRYPI_FIRMWARE) += raspberrypi.o
+obj-$(CONFIG_FW_CFG_SYSFS)	+= qemu_fw_cfg.o
+obj-$(CONFIG_QCOM_SCM)		+= qcom_scm.o
+obj-$(CONFIG_QCOM_SCM_64)	+= qcom_scm-64.o
+obj-$(CONFIG_QCOM_SCM_32)	+= qcom_scm-32.o
+CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch armv7-a\n.arch_extension sec,-DREQUIRES_SEC=1) -march=armv7-a
+obj-$(CONFIG_TI_SCI_PROTOCOL)	+= ti_sci.o
+
+obj-$(CONFIG_ARM_SCMI_PROTOCOL)	+= arm_scmi/
+obj-y				+= broadcom/
+obj-y				+= meson/
+obj-$(CONFIG_GOOGLE_FIRMWARE)	+= google/
+obj-$(CONFIG_EFI)		+= efi/
+obj-$(CONFIG_UEFI_CPER)		+= efi/
+obj-y				+= tegra/
diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile
new file mode 100644
index 0000000..99e36c5
--- /dev/null
+++ b/drivers/firmware/arm_scmi/Makefile
@@ -0,0 +1,5 @@
+obj-y	= scmi-bus.o scmi-driver.o scmi-protocols.o
+scmi-bus-y = bus.o
+scmi-driver-y = driver.o
+scmi-protocols-y = base.o clock.o perf.o power.o sensors.o
+obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o
diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c
new file mode 100644
index 0000000..9dff33e
--- /dev/null
+++ b/drivers/firmware/arm_scmi/base.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Base Protocol
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ */
+
+#include "common.h"
+
+enum scmi_base_protocol_cmd {
+	BASE_DISCOVER_VENDOR = 0x3,
+	BASE_DISCOVER_SUB_VENDOR = 0x4,
+	BASE_DISCOVER_IMPLEMENT_VERSION = 0x5,
+	BASE_DISCOVER_LIST_PROTOCOLS = 0x6,
+	BASE_DISCOVER_AGENT = 0x7,
+	BASE_NOTIFY_ERRORS = 0x8,
+};
+
+struct scmi_msg_resp_base_attributes {
+	u8 num_protocols;
+	u8 num_agents;
+	__le16 reserved;
+};
+
+/**
+ * scmi_base_attributes_get() - gets the implementation details
+ *	that are associated with the base protocol.
+ *
+ * @handle: SCMI entity handle
+ *
+ * Return: 0 on success, else appropriate SCMI error.
+ */
+static int scmi_base_attributes_get(const struct scmi_handle *handle)
+{
+	int ret;
+	struct scmi_xfer *t;
+	struct scmi_msg_resp_base_attributes *attr_info;
+	struct scmi_revision_info *rev = handle->version;
+
+	ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
+				 SCMI_PROTOCOL_BASE, 0, sizeof(*attr_info), &t);
+	if (ret)
+		return ret;
+
+	ret = scmi_do_xfer(handle, t);
+	if (!ret) {
+		attr_info = t->rx.buf;
+		rev->num_protocols = attr_info->num_protocols;
+		rev->num_agents = attr_info->num_agents;
+	}
+
+	scmi_xfer_put(handle, t);
+
+	return ret;
+}
+
+/**
+ * scmi_base_vendor_id_get() - gets vendor/subvendor identifier ASCII string.
+ *
+ * @handle: SCMI entity handle
+ * @sub_vendor: specify true if sub-vendor ID is needed
+ *
+ * Return: 0 on success, else appropriate SCMI error.
+ */
+static int
+scmi_base_vendor_id_get(const struct scmi_handle *handle, bool sub_vendor)
+{
+	u8 cmd;
+	int ret, size;
+	char *vendor_id;
+	struct scmi_xfer *t;
+	struct scmi_revision_info *rev = handle->version;
+
+	if (sub_vendor) {
+		cmd = BASE_DISCOVER_SUB_VENDOR;
+		vendor_id = rev->sub_vendor_id;
+		size = ARRAY_SIZE(rev->sub_vendor_id);
+	} else {
+		cmd = BASE_DISCOVER_VENDOR;
+		vendor_id = rev->vendor_id;
+		size = ARRAY_SIZE(rev->vendor_id);
+	}
+
+	ret = scmi_xfer_get_init(handle, cmd, SCMI_PROTOCOL_BASE, 0, size, &t);
+	if (ret)
+		return ret;
+
+	ret = scmi_do_xfer(handle, t);
+	if (!ret)
+		memcpy(vendor_id, t->rx.buf, size);
+
+	scmi_xfer_put(handle, t);
+
+	return ret;
+}
+
+/**
+ * scmi_base_implementation_version_get() - gets a vendor-specific
+ *	implementation 32-bit version. The format of the version number is
+ *	vendor-specific
+ *
+ * @handle: SCMI entity handle
+ *
+ * Return: 0 on success, else appropriate SCMI error.
+ */
+static int
+scmi_base_implementation_version_get(const struct scmi_handle *handle)
+{
+	int ret;
+	__le32 *impl_ver;
+	struct scmi_xfer *t;
+	struct scmi_revision_info *rev = handle->version;
+
+	ret = scmi_xfer_get_init(handle, BASE_DISCOVER_IMPLEMENT_VERSION,
+				 SCMI_PROTOCOL_BASE, 0, sizeof(*impl_ver), &t);
+	if (ret)
+		return ret;
+
+	ret = scmi_do_xfer(handle, t);
+	if (!ret) {
+		impl_ver = t->rx.buf;
+		rev->impl_ver = le32_to_cpu(*impl_ver);
+	}
+
+	scmi_xfer_put(handle, t);
+
+	return ret;
+}
+
+/**
+ * scmi_base_implementation_list_get() - gets the list of protocols it is
+ *	OSPM is allowed to access
+ *
+ * @handle: SCMI entity handle
+ * @protocols_imp: pointer to hold the list of protocol identifiers
+ *
+ * Return: 0 on success, else appropriate SCMI error.
+ */
+static int scmi_base_implementation_list_get(const struct scmi_handle *handle,
+					     u8 *protocols_imp)
+{
+	u8 *list;
+	int ret, loop;
+	struct scmi_xfer *t;
+	__le32 *num_skip, *num_ret;
+	u32 tot_num_ret = 0, loop_num_ret;
+	struct device *dev = handle->dev;
+
+	ret = scmi_xfer_get_init(handle, BASE_DISCOVER_LIST_PROTOCOLS,
+				 SCMI_PROTOCOL_BASE, sizeof(*num_skip), 0, &t);
+	if (ret)
+		return ret;
+
+	num_skip = t->tx.buf;
+	num_ret = t->rx.buf;
+	list = t->rx.buf + sizeof(*num_ret);
+
+	do {
+		/* Set the number of protocols to be skipped/already read */
+		*num_skip = cpu_to_le32(tot_num_ret);
+
+		ret = scmi_do_xfer(handle, t);
+		if (ret)
+			break;
+
+		loop_num_ret = le32_to_cpu(*num_ret);
+		if (tot_num_ret + loop_num_ret > MAX_PROTOCOLS_IMP) {
+			dev_err(dev, "No. of Protocol > MAX_PROTOCOLS_IMP");
+			break;
+		}
+
+		for (loop = 0; loop < loop_num_ret; loop++)
+			protocols_imp[tot_num_ret + loop] = *(list + loop);
+
+		tot_num_ret += loop_num_ret;
+	} while (loop_num_ret);
+
+	scmi_xfer_put(handle, t);
+
+	return ret;
+}
+
+/**
+ * scmi_base_discover_agent_get() - discover the name of an agent
+ *
+ * @handle: SCMI entity handle
+ * @id: Agent identifier
+ * @name: Agent identifier ASCII string
+ *
+ * An agent id of 0 is reserved to identify the platform itself.
+ * Generally operating system is represented as "OSPM"
+ *
+ * Return: 0 on success, else appropriate SCMI error.
+ */
+static int scmi_base_discover_agent_get(const struct scmi_handle *handle,
+					int id, char *name)
+{
+	int ret;
+	struct scmi_xfer *t;
+
+	ret = scmi_xfer_get_init(handle, BASE_DISCOVER_AGENT,
+				 SCMI_PROTOCOL_BASE, sizeof(__le32),
+				 SCMI_MAX_STR_SIZE, &t);
+	if (ret)
+		return ret;
+
+	*(__le32 *)t->tx.buf = cpu_to_le32(id);
+
+	ret = scmi_do_xfer(handle, t);
+	if (!ret)
+		memcpy(name, t->rx.buf, SCMI_MAX_STR_SIZE);
+
+	scmi_xfer_put(handle, t);
+
+	return ret;
+}
+
+int scmi_base_protocol_init(struct scmi_handle *h)
+{
+	int id, ret;
+	u8 *prot_imp;
+	u32 version;
+	char name[SCMI_MAX_STR_SIZE];
+	const struct scmi_handle *handle = h;
+	struct device *dev = handle->dev;
+	struct scmi_revision_info *rev = handle->version;
+
+	ret = scmi_version_get(handle, SCMI_PROTOCOL_BASE, &version);
+	if (ret)
+		return ret;
+
+	prot_imp = devm_kcalloc(dev, MAX_PROTOCOLS_IMP, sizeof(u8), GFP_KERNEL);
+	if (!prot_imp)
+		return -ENOMEM;
+
+	rev->major_ver = PROTOCOL_REV_MAJOR(version),
+	rev->minor_ver = PROTOCOL_REV_MINOR(version);
+
+	scmi_base_attributes_get(handle);
+	scmi_base_vendor_id_get(handle, false);
+	scmi_base_vendor_id_get(handle, true);
+	scmi_base_implementation_version_get(handle);
+	scmi_base_implementation_list_get(handle, prot_imp);
+	scmi_setup_protocol_implemented(handle, prot_imp);
+
+	dev_info(dev, "SCMI Protocol v%d.%d '%s:%s' Firmware version 0x%x\n",
+		 rev->major_ver, rev->minor_ver, rev->vendor_id,
+		 rev->sub_vendor_id, rev->impl_ver);
+	dev_dbg(dev, "Found %d protocol(s) %d agent(s)\n", rev->num_protocols,
+		rev->num_agents);
+
+	for (id = 0; id < rev->num_agents; id++) {
+		scmi_base_discover_agent_get(handle, id, name);
+		dev_dbg(dev, "Agent %d: %s\n", id, name);
+	}
+
+	return 0;
+}
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
new file mode 100644
index 0000000..472c88a
--- /dev/null
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Message Protocol bus layer
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+
+#include "common.h"
+
+static DEFINE_IDA(scmi_bus_id);
+static DEFINE_IDR(scmi_protocols);
+static DEFINE_SPINLOCK(protocol_lock);
+
+static const struct scmi_device_id *
+scmi_dev_match_id(struct scmi_device *scmi_dev, struct scmi_driver *scmi_drv)
+{
+	const struct scmi_device_id *id = scmi_drv->id_table;
+
+	if (!id)
+		return NULL;
+
+	for (; id->protocol_id; id++)
+		if (id->protocol_id == scmi_dev->protocol_id)
+			return id;
+
+	return NULL;
+}
+
+static int scmi_dev_match(struct device *dev, struct device_driver *drv)
+{
+	struct scmi_driver *scmi_drv = to_scmi_driver(drv);
+	struct scmi_device *scmi_dev = to_scmi_dev(dev);
+	const struct scmi_device_id *id;
+
+	id = scmi_dev_match_id(scmi_dev, scmi_drv);
+	if (id)
+		return 1;
+
+	return 0;
+}
+
+static int scmi_protocol_init(int protocol_id, struct scmi_handle *handle)
+{
+	scmi_prot_init_fn_t fn = idr_find(&scmi_protocols, protocol_id);
+
+	if (unlikely(!fn))
+		return -EINVAL;
+	return fn(handle);
+}
+
+static int scmi_dev_probe(struct device *dev)
+{
+	struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver);
+	struct scmi_device *scmi_dev = to_scmi_dev(dev);
+	const struct scmi_device_id *id;
+	int ret;
+
+	id = scmi_dev_match_id(scmi_dev, scmi_drv);
+	if (!id)
+		return -ENODEV;
+
+	if (!scmi_dev->handle)
+		return -EPROBE_DEFER;
+
+	ret = scmi_protocol_init(scmi_dev->protocol_id, scmi_dev->handle);
+	if (ret)
+		return ret;
+
+	return scmi_drv->probe(scmi_dev);
+}
+
+static int scmi_dev_remove(struct device *dev)
+{
+	struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver);
+	struct scmi_device *scmi_dev = to_scmi_dev(dev);
+
+	if (scmi_drv->remove)
+		scmi_drv->remove(scmi_dev);
+
+	return 0;
+}
+
+static struct bus_type scmi_bus_type = {
+	.name =	"scmi_protocol",
+	.match = scmi_dev_match,
+	.probe = scmi_dev_probe,
+	.remove = scmi_dev_remove,
+};
+
+int scmi_driver_register(struct scmi_driver *driver, struct module *owner,
+			 const char *mod_name)
+{
+	int retval;
+
+	driver->driver.bus = &scmi_bus_type;
+	driver->driver.name = driver->name;
+	driver->driver.owner = owner;
+	driver->driver.mod_name = mod_name;
+
+	retval = driver_register(&driver->driver);
+	if (!retval)
+		pr_debug("registered new scmi driver %s\n", driver->name);
+
+	return retval;
+}
+EXPORT_SYMBOL_GPL(scmi_driver_register);
+
+void scmi_driver_unregister(struct scmi_driver *driver)
+{
+	driver_unregister(&driver->driver);
+}
+EXPORT_SYMBOL_GPL(scmi_driver_unregister);
+
+struct scmi_device *
+scmi_device_create(struct device_node *np, struct device *parent, int protocol)
+{
+	int id, retval;
+	struct scmi_device *scmi_dev;
+
+	scmi_dev = kzalloc(sizeof(*scmi_dev), GFP_KERNEL);
+	if (!scmi_dev)
+		return NULL;
+
+	id = ida_simple_get(&scmi_bus_id, 1, 0, GFP_KERNEL);
+	if (id < 0)
+		goto free_mem;
+
+	scmi_dev->id = id;
+	scmi_dev->protocol_id = protocol;
+	scmi_dev->dev.parent = parent;
+	scmi_dev->dev.of_node = np;
+	scmi_dev->dev.bus = &scmi_bus_type;
+	dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id);
+
+	retval = device_register(&scmi_dev->dev);
+	if (retval)
+		goto put_dev;
+
+	return scmi_dev;
+put_dev:
+	put_device(&scmi_dev->dev);
+	ida_simple_remove(&scmi_bus_id, id);
+free_mem:
+	kfree(scmi_dev);
+	return NULL;
+}
+
+void scmi_device_destroy(struct scmi_device *scmi_dev)
+{
+	scmi_handle_put(scmi_dev->handle);
+	device_unregister(&scmi_dev->dev);
+	ida_simple_remove(&scmi_bus_id, scmi_dev->id);
+	kfree(scmi_dev);
+}
+
+void scmi_set_handle(struct scmi_device *scmi_dev)
+{
+	scmi_dev->handle = scmi_handle_get(&scmi_dev->dev);
+}
+
+int scmi_protocol_register(int protocol_id, scmi_prot_init_fn_t fn)
+{
+	int ret;
+
+	spin_lock(&protocol_lock);
+	ret = idr_alloc(&scmi_protocols, fn, protocol_id, protocol_id + 1,
+			GFP_ATOMIC);
+	spin_unlock(&protocol_lock);
+	if (ret != protocol_id)
+		pr_err("unable to allocate SCMI idr slot, err %d\n", ret);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(scmi_protocol_register);
+
+void scmi_protocol_unregister(int protocol_id)
+{
+	spin_lock(&protocol_lock);
+	idr_remove(&scmi_protocols, protocol_id);
+	spin_unlock(&protocol_lock);
+}
+EXPORT_SYMBOL_GPL(scmi_protocol_unregister);
+
+static int __scmi_devices_unregister(struct device *dev, void *data)
+{
+	struct scmi_device *scmi_dev = to_scmi_dev(dev);
+
+	scmi_device_destroy(scmi_dev);
+	return 0;
+}
+
+static void scmi_devices_unregister(void)
+{
+	bus_for_each_dev(&scmi_bus_type, NULL, NULL, __scmi_devices_unregister);
+}
+
+static int __init scmi_bus_init(void)
+{
+	int retval;
+
+	retval = bus_register(&scmi_bus_type);
+	if (retval)
+		pr_err("scmi protocol bus register failed (%d)\n", retval);
+
+	return retval;
+}
+subsys_initcall(scmi_bus_init);
+
+static void __exit scmi_bus_exit(void)
+{
+	scmi_devices_unregister();
+	bus_unregister(&scmi_bus_type);
+	ida_destroy(&scmi_bus_id);
+}
+module_exit(scmi_bus_exit);
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
new file mode 100644
index 0000000..e4119eb
--- /dev/null
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -0,0 +1,343 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Clock Protocol
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ */
+
+#include "common.h"
+
+enum scmi_clock_protocol_cmd {
+	CLOCK_ATTRIBUTES = 0x3,
+	CLOCK_DESCRIBE_RATES = 0x4,
+	CLOCK_RATE_SET = 0x5,
+	CLOCK_RATE_GET = 0x6,
+	CLOCK_CONFIG_SET = 0x7,
+};
+
+struct scmi_msg_resp_clock_protocol_attributes {
+	__le16 num_clocks;
+	u8 max_async_req;
+	u8 reserved;
+};
+
+struct scmi_msg_resp_clock_attributes {
+	__le32 attributes;
+#define	CLOCK_ENABLE	BIT(0)
+	    u8 name[SCMI_MAX_STR_SIZE];
+};
+
+struct scmi_clock_set_config {
+	__le32 id;
+	__le32 attributes;
+};
+
+struct scmi_msg_clock_describe_rates {
+	__le32 id;
+	__le32 rate_index;
+};
+
+struct scmi_msg_resp_clock_describe_rates {
+	__le32 num_rates_flags;
+#define NUM_RETURNED(x)		((x) & 0xfff)
+#define RATE_DISCRETE(x)	!((x) & BIT(12))
+#define NUM_REMAINING(x)	((x) >> 16)
+	struct {
+		__le32 value_low;
+		__le32 value_high;
+	} rate[0];
+#define RATE_TO_U64(X)		\
+({				\
+	typeof(X) x = (X);	\
+	le32_to_cpu((x).value_low) | (u64)le32_to_cpu((x).value_high) << 32; \
+})
+};
+
+struct scmi_clock_set_rate {
+	__le32 flags;
+#define CLOCK_SET_ASYNC		BIT(0)
+#define CLOCK_SET_DELAYED	BIT(1)
+#define CLOCK_SET_ROUND_UP	BIT(2)
+#define CLOCK_SET_ROUND_AUTO	BIT(3)
+	__le32 id;
+	__le32 value_low;
+	__le32 value_high;
+};
+
+struct clock_info {
+	int num_clocks;
+	int max_async_req;
+	struct scmi_clock_info *clk;
+};
+
+static int scmi_clock_protocol_attributes_get(const struct scmi_handle *handle,
+					      struct clock_info *ci)
+{
+	int ret;
+	struct scmi_xfer *t;
+	struct scmi_msg_resp_clock_protocol_attributes *attr;
+
+	ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
+				 SCMI_PROTOCOL_CLOCK, 0, sizeof(*attr), &t);
+	if (ret)
+		return ret;
+
+	attr = t->rx.buf;
+
+	ret = scmi_do_xfer(handle, t);
+	if (!ret) {
+		ci->num_clocks = le16_to_cpu(attr->num_clocks);
+		ci->max_async_req = attr->max_async_req;
+	}
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int scmi_clock_attributes_get(const struct scmi_handle *handle,
+				     u32 clk_id, struct scmi_clock_info *clk)
+{
+	int ret;
+	struct scmi_xfer *t;
+	struct scmi_msg_resp_clock_attributes *attr;
+
+	ret = scmi_xfer_get_init(handle, CLOCK_ATTRIBUTES, SCMI_PROTOCOL_CLOCK,
+				 sizeof(clk_id), sizeof(*attr), &t);
+	if (ret)
+		return ret;
+
+	*(__le32 *)t->tx.buf = cpu_to_le32(clk_id);
+	attr = t->rx.buf;
+
+	ret = scmi_do_xfer(handle, t);
+	if (!ret)
+		memcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE);
+	else
+		clk->name[0] = '\0';
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int
+scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
+			      struct scmi_clock_info *clk)
+{
+	u64 *rate;
+	int ret, cnt;
+	bool rate_discrete = false;
+	u32 tot_rate_cnt = 0, rates_flag;
+	u16 num_returned, num_remaining;
+	struct scmi_xfer *t;
+	struct scmi_msg_clock_describe_rates *clk_desc;
+	struct scmi_msg_resp_clock_describe_rates *rlist;
+
+	ret = scmi_xfer_get_init(handle, CLOCK_DESCRIBE_RATES,
+				 SCMI_PROTOCOL_CLOCK, sizeof(*clk_desc), 0, &t);
+	if (ret)
+		return ret;
+
+	clk_desc = t->tx.buf;
+	rlist = t->rx.buf;
+
+	do {
+		clk_desc->id = cpu_to_le32(clk_id);
+		/* Set the number of rates to be skipped/already read */
+		clk_desc->rate_index = cpu_to_le32(tot_rate_cnt);
+
+		ret = scmi_do_xfer(handle, t);
+		if (ret)
+			goto err;
+
+		rates_flag = le32_to_cpu(rlist->num_rates_flags);
+		num_remaining = NUM_REMAINING(rates_flag);
+		rate_discrete = RATE_DISCRETE(rates_flag);
+		num_returned = NUM_RETURNED(rates_flag);
+
+		if (tot_rate_cnt + num_returned > SCMI_MAX_NUM_RATES) {
+			dev_err(handle->dev, "No. of rates > MAX_NUM_RATES");
+			break;
+		}
+
+		if (!rate_discrete) {
+			clk->range.min_rate = RATE_TO_U64(rlist->rate[0]);
+			clk->range.max_rate = RATE_TO_U64(rlist->rate[1]);
+			clk->range.step_size = RATE_TO_U64(rlist->rate[2]);
+			dev_dbg(handle->dev, "Min %llu Max %llu Step %llu Hz\n",
+				clk->range.min_rate, clk->range.max_rate,
+				clk->range.step_size);
+			break;
+		}
+
+		rate = &clk->list.rates[tot_rate_cnt];
+		for (cnt = 0; cnt < num_returned; cnt++, rate++) {
+			*rate = RATE_TO_U64(rlist->rate[cnt]);
+			dev_dbg(handle->dev, "Rate %llu Hz\n", *rate);
+		}
+
+		tot_rate_cnt += num_returned;
+		/*
+		 * check for both returned and remaining to avoid infinite
+		 * loop due to buggy firmware
+		 */
+	} while (num_returned && num_remaining);
+
+	if (rate_discrete)
+		clk->list.num_rates = tot_rate_cnt;
+
+err:
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int
+scmi_clock_rate_get(const struct scmi_handle *handle, u32 clk_id, u64 *value)
+{
+	int ret;
+	struct scmi_xfer *t;
+
+	ret = scmi_xfer_get_init(handle, CLOCK_RATE_GET, SCMI_PROTOCOL_CLOCK,
+				 sizeof(__le32), sizeof(u64), &t);
+	if (ret)
+		return ret;
+
+	*(__le32 *)t->tx.buf = cpu_to_le32(clk_id);
+
+	ret = scmi_do_xfer(handle, t);
+	if (!ret) {
+		__le32 *pval = t->rx.buf;
+
+		*value = le32_to_cpu(*pval);
+		*value |= (u64)le32_to_cpu(*(pval + 1)) << 32;
+	}
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int scmi_clock_rate_set(const struct scmi_handle *handle, u32 clk_id,
+			       u32 config, u64 rate)
+{
+	int ret;
+	struct scmi_xfer *t;
+	struct scmi_clock_set_rate *cfg;
+
+	ret = scmi_xfer_get_init(handle, CLOCK_RATE_SET, SCMI_PROTOCOL_CLOCK,
+				 sizeof(*cfg), 0, &t);
+	if (ret)
+		return ret;
+
+	cfg = t->tx.buf;
+	cfg->flags = cpu_to_le32(config);
+	cfg->id = cpu_to_le32(clk_id);
+	cfg->value_low = cpu_to_le32(rate & 0xffffffff);
+	cfg->value_high = cpu_to_le32(rate >> 32);
+
+	ret = scmi_do_xfer(handle, t);
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int
+scmi_clock_config_set(const struct scmi_handle *handle, u32 clk_id, u32 config)
+{
+	int ret;
+	struct scmi_xfer *t;
+	struct scmi_clock_set_config *cfg;
+
+	ret = scmi_xfer_get_init(handle, CLOCK_CONFIG_SET, SCMI_PROTOCOL_CLOCK,
+				 sizeof(*cfg), 0, &t);
+	if (ret)
+		return ret;
+
+	cfg = t->tx.buf;
+	cfg->id = cpu_to_le32(clk_id);
+	cfg->attributes = cpu_to_le32(config);
+
+	ret = scmi_do_xfer(handle, t);
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int scmi_clock_enable(const struct scmi_handle *handle, u32 clk_id)
+{
+	return scmi_clock_config_set(handle, clk_id, CLOCK_ENABLE);
+}
+
+static int scmi_clock_disable(const struct scmi_handle *handle, u32 clk_id)
+{
+	return scmi_clock_config_set(handle, clk_id, 0);
+}
+
+static int scmi_clock_count_get(const struct scmi_handle *handle)
+{
+	struct clock_info *ci = handle->clk_priv;
+
+	return ci->num_clocks;
+}
+
+static const struct scmi_clock_info *
+scmi_clock_info_get(const struct scmi_handle *handle, u32 clk_id)
+{
+	struct clock_info *ci = handle->clk_priv;
+	struct scmi_clock_info *clk = ci->clk + clk_id;
+
+	if (!clk->name[0])
+		return NULL;
+
+	return clk;
+}
+
+static struct scmi_clk_ops clk_ops = {
+	.count_get = scmi_clock_count_get,
+	.info_get = scmi_clock_info_get,
+	.rate_get = scmi_clock_rate_get,
+	.rate_set = scmi_clock_rate_set,
+	.enable = scmi_clock_enable,
+	.disable = scmi_clock_disable,
+};
+
+static int scmi_clock_protocol_init(struct scmi_handle *handle)
+{
+	u32 version;
+	int clkid, ret;
+	struct clock_info *cinfo;
+
+	scmi_version_get(handle, SCMI_PROTOCOL_CLOCK, &version);
+
+	dev_dbg(handle->dev, "Clock Version %d.%d\n",
+		PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+	cinfo = devm_kzalloc(handle->dev, sizeof(*cinfo), GFP_KERNEL);
+	if (!cinfo)
+		return -ENOMEM;
+
+	scmi_clock_protocol_attributes_get(handle, cinfo);
+
+	cinfo->clk = devm_kcalloc(handle->dev, cinfo->num_clocks,
+				  sizeof(*cinfo->clk), GFP_KERNEL);
+	if (!cinfo->clk)
+		return -ENOMEM;
+
+	for (clkid = 0; clkid < cinfo->num_clocks; clkid++) {
+		struct scmi_clock_info *clk = cinfo->clk + clkid;
+
+		ret = scmi_clock_attributes_get(handle, clkid, clk);
+		if (!ret)
+			scmi_clock_describe_rates_get(handle, clkid, clk);
+	}
+
+	handle->clk_ops = &clk_ops;
+	handle->clk_priv = cinfo;
+
+	return 0;
+}
+
+static int __init scmi_clock_init(void)
+{
+	return scmi_protocol_register(SCMI_PROTOCOL_CLOCK,
+				      &scmi_clock_protocol_init);
+}
+subsys_initcall(scmi_clock_init);
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
new file mode 100644
index 0000000..937a930
--- /dev/null
+++ b/drivers/firmware/arm_scmi/common.h
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Message Protocol
+ * driver common header file containing some definitions, structures
+ * and function prototypes used in all the different SCMI protocols.
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/scmi_protocol.h>
+#include <linux/types.h>
+
+#define PROTOCOL_REV_MINOR_MASK	GENMASK(15, 0)
+#define PROTOCOL_REV_MAJOR_MASK	GENMASK(31, 16)
+#define PROTOCOL_REV_MAJOR(x)	(u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x)))
+#define PROTOCOL_REV_MINOR(x)	(u16)(FIELD_GET(PROTOCOL_REV_MINOR_MASK, (x)))
+#define MAX_PROTOCOLS_IMP	16
+#define MAX_OPPS		16
+
+enum scmi_common_cmd {
+	PROTOCOL_VERSION = 0x0,
+	PROTOCOL_ATTRIBUTES = 0x1,
+	PROTOCOL_MESSAGE_ATTRIBUTES = 0x2,
+};
+
+/**
+ * struct scmi_msg_resp_prot_version - Response for a message
+ *
+ * @major_version: Major version of the ABI that firmware supports
+ * @minor_version: Minor version of the ABI that firmware supports
+ *
+ * In general, ABI version changes follow the rule that minor version increments
+ * are backward compatible. Major revision changes in ABI may not be
+ * backward compatible.
+ *
+ * Response to a generic message with message type SCMI_MSG_VERSION
+ */
+struct scmi_msg_resp_prot_version {
+	__le16 minor_version;
+	__le16 major_version;
+};
+
+/**
+ * struct scmi_msg_hdr - Message(Tx/Rx) header
+ *
+ * @id: The identifier of the command being sent
+ * @protocol_id: The identifier of the protocol used to send @id command
+ * @seq: The token to identify the message. when a message/command returns,
+ *	the platform returns the whole message header unmodified including
+ *	the token
+ * @status: Status of the transfer once it's complete
+ * @poll_completion: Indicate if the transfer needs to be polled for
+ *	completion or interrupt mode is used
+ */
+struct scmi_msg_hdr {
+	u8 id;
+	u8 protocol_id;
+	u16 seq;
+	u32 status;
+	bool poll_completion;
+};
+
+/**
+ * struct scmi_msg - Message(Tx/Rx) structure
+ *
+ * @buf: Buffer pointer
+ * @len: Length of data in the Buffer
+ */
+struct scmi_msg {
+	void *buf;
+	size_t len;
+};
+
+/**
+ * struct scmi_xfer - Structure representing a message flow
+ *
+ * @hdr: Transmit message header
+ * @tx: Transmit message
+ * @rx: Receive message, the buffer should be pre-allocated to store
+ *	message. If request-ACK protocol is used, we can reuse the same
+ *	buffer for the rx path as we use for the tx path.
+ * @done: completion event
+ */
+struct scmi_xfer {
+	struct scmi_msg_hdr hdr;
+	struct scmi_msg tx;
+	struct scmi_msg rx;
+	struct completion done;
+};
+
+void scmi_xfer_put(const struct scmi_handle *h, struct scmi_xfer *xfer);
+int scmi_do_xfer(const struct scmi_handle *h, struct scmi_xfer *xfer);
+int scmi_xfer_get_init(const struct scmi_handle *h, u8 msg_id, u8 prot_id,
+		       size_t tx_size, size_t rx_size, struct scmi_xfer **p);
+int scmi_handle_put(const struct scmi_handle *handle);
+struct scmi_handle *scmi_handle_get(struct device *dev);
+void scmi_set_handle(struct scmi_device *scmi_dev);
+int scmi_version_get(const struct scmi_handle *h, u8 protocol, u32 *version);
+void scmi_setup_protocol_implemented(const struct scmi_handle *handle,
+				     u8 *prot_imp);
+
+int scmi_base_protocol_init(struct scmi_handle *h);
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
new file mode 100644
index 0000000..8f952f2
--- /dev/null
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -0,0 +1,871 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Message Protocol driver
+ *
+ * SCMI Message Protocol is used between the System Control Processor(SCP)
+ * and the Application Processors(AP). The Message Handling Unit(MHU)
+ * provides a mechanism for inter-processor communication between SCP's
+ * Cortex M3 and AP.
+ *
+ * SCP offers control and management of the core/cluster power states,
+ * various power domain DVFS including the core/cluster, certain system
+ * clocks configuration, thermal sensors and many others.
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/ktime.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/processor.h>
+#include <linux/semaphore.h>
+#include <linux/slab.h>
+
+#include "common.h"
+
+#define MSG_ID_MASK		GENMASK(7, 0)
+#define MSG_TYPE_MASK		GENMASK(9, 8)
+#define MSG_PROTOCOL_ID_MASK	GENMASK(17, 10)
+#define MSG_TOKEN_ID_MASK	GENMASK(27, 18)
+#define MSG_XTRACT_TOKEN(hdr)	FIELD_GET(MSG_TOKEN_ID_MASK, (hdr))
+#define MSG_TOKEN_MAX		(MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1)
+
+enum scmi_error_codes {
+	SCMI_SUCCESS = 0,	/* Success */
+	SCMI_ERR_SUPPORT = -1,	/* Not supported */
+	SCMI_ERR_PARAMS = -2,	/* Invalid Parameters */
+	SCMI_ERR_ACCESS = -3,	/* Invalid access/permission denied */
+	SCMI_ERR_ENTRY = -4,	/* Not found */
+	SCMI_ERR_RANGE = -5,	/* Value out of range */
+	SCMI_ERR_BUSY = -6,	/* Device busy */
+	SCMI_ERR_COMMS = -7,	/* Communication Error */
+	SCMI_ERR_GENERIC = -8,	/* Generic Error */
+	SCMI_ERR_HARDWARE = -9,	/* Hardware Error */
+	SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
+	SCMI_ERR_MAX
+};
+
+/* List of all SCMI devices active in system */
+static LIST_HEAD(scmi_list);
+/* Protection for the entire list */
+static DEFINE_MUTEX(scmi_list_mutex);
+
+/**
+ * struct scmi_xfers_info - Structure to manage transfer information
+ *
+ * @xfer_block: Preallocated Message array
+ * @xfer_alloc_table: Bitmap table for allocated messages.
+ *	Index of this bitmap table is also used for message
+ *	sequence identifier.
+ * @xfer_lock: Protection for message allocation
+ */
+struct scmi_xfers_info {
+	struct scmi_xfer *xfer_block;
+	unsigned long *xfer_alloc_table;
+	spinlock_t xfer_lock;
+};
+
+/**
+ * struct scmi_desc - Description of SoC integration
+ *
+ * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
+ * @max_msg: Maximum number of messages that can be pending
+ *	simultaneously in the system
+ * @max_msg_size: Maximum size of data per message that can be handled.
+ */
+struct scmi_desc {
+	int max_rx_timeout_ms;
+	int max_msg;
+	int max_msg_size;
+};
+
+/**
+ * struct scmi_chan_info - Structure representing a SCMI channel informfation
+ *
+ * @cl: Mailbox Client
+ * @chan: Transmit/Receive mailbox channel
+ * @payload: Transmit/Receive mailbox channel payload area
+ * @dev: Reference to device in the SCMI hierarchy corresponding to this
+ *	 channel
+ * @handle: Pointer to SCMI entity handle
+ */
+struct scmi_chan_info {
+	struct mbox_client cl;
+	struct mbox_chan *chan;
+	void __iomem *payload;
+	struct device *dev;
+	struct scmi_handle *handle;
+};
+
+/**
+ * struct scmi_info - Structure representing a SCMI instance
+ *
+ * @dev: Device pointer
+ * @desc: SoC description for this instance
+ * @handle: Instance of SCMI handle to send to clients
+ * @version: SCMI revision information containing protocol version,
+ *	implementation version and (sub-)vendor identification.
+ * @minfo: Message info
+ * @tx_idr: IDR object to map protocol id to channel info pointer
+ * @protocols_imp: List of protocols implemented, currently maximum of
+ *	MAX_PROTOCOLS_IMP elements allocated by the base protocol
+ * @node: List head
+ * @users: Number of users of this instance
+ */
+struct scmi_info {
+	struct device *dev;
+	const struct scmi_desc *desc;
+	struct scmi_revision_info version;
+	struct scmi_handle handle;
+	struct scmi_xfers_info minfo;
+	struct idr tx_idr;
+	u8 *protocols_imp;
+	struct list_head node;
+	int users;
+};
+
+#define client_to_scmi_chan_info(c) container_of(c, struct scmi_chan_info, cl)
+#define handle_to_scmi_info(h)	container_of(h, struct scmi_info, handle)
+
+/*
+ * SCMI specification requires all parameters, message headers, return
+ * arguments or any protocol data to be expressed in little endian
+ * format only.
+ */
+struct scmi_shared_mem {
+	__le32 reserved;
+	__le32 channel_status;
+#define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR	BIT(1)
+#define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE	BIT(0)
+	__le32 reserved1[2];
+	__le32 flags;
+#define SCMI_SHMEM_FLAG_INTR_ENABLED	BIT(0)
+	__le32 length;
+	__le32 msg_header;
+	u8 msg_payload[0];
+};
+
+static const int scmi_linux_errmap[] = {
+	/* better than switch case as long as return value is continuous */
+	0,			/* SCMI_SUCCESS */
+	-EOPNOTSUPP,		/* SCMI_ERR_SUPPORT */
+	-EINVAL,		/* SCMI_ERR_PARAM */
+	-EACCES,		/* SCMI_ERR_ACCESS */
+	-ENOENT,		/* SCMI_ERR_ENTRY */
+	-ERANGE,		/* SCMI_ERR_RANGE */
+	-EBUSY,			/* SCMI_ERR_BUSY */
+	-ECOMM,			/* SCMI_ERR_COMMS */
+	-EIO,			/* SCMI_ERR_GENERIC */
+	-EREMOTEIO,		/* SCMI_ERR_HARDWARE */
+	-EPROTO,		/* SCMI_ERR_PROTOCOL */
+};
+
+static inline int scmi_to_linux_errno(int errno)
+{
+	if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX)
+		return scmi_linux_errmap[-errno];
+	return -EIO;
+}
+
+/**
+ * scmi_dump_header_dbg() - Helper to dump a message header.
+ *
+ * @dev: Device pointer corresponding to the SCMI entity
+ * @hdr: pointer to header.
+ */
+static inline void scmi_dump_header_dbg(struct device *dev,
+					struct scmi_msg_hdr *hdr)
+{
+	dev_dbg(dev, "Command ID: %x Sequence ID: %x Protocol: %x\n",
+		hdr->id, hdr->seq, hdr->protocol_id);
+}
+
+static void scmi_fetch_response(struct scmi_xfer *xfer,
+				struct scmi_shared_mem __iomem *mem)
+{
+	xfer->hdr.status = ioread32(mem->msg_payload);
+	/* Skip the length of header and statues in payload area i.e 8 bytes*/
+	xfer->rx.len = min_t(size_t, xfer->rx.len, ioread32(&mem->length) - 8);
+
+	/* Take a copy to the rx buffer.. */
+	memcpy_fromio(xfer->rx.buf, mem->msg_payload + 4, xfer->rx.len);
+}
+
+/**
+ * scmi_rx_callback() - mailbox client callback for receive messages
+ *
+ * @cl: client pointer
+ * @m: mailbox message
+ *
+ * Processes one received message to appropriate transfer information and
+ * signals completion of the transfer.
+ *
+ * NOTE: This function will be invoked in IRQ context, hence should be
+ * as optimal as possible.
+ */
+static void scmi_rx_callback(struct mbox_client *cl, void *m)
+{
+	u16 xfer_id;
+	struct scmi_xfer *xfer;
+	struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
+	struct device *dev = cinfo->dev;
+	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+	struct scmi_xfers_info *minfo = &info->minfo;
+	struct scmi_shared_mem __iomem *mem = cinfo->payload;
+
+	xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header));
+
+	/* Are we even expecting this? */
+	if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
+		dev_err(dev, "message for %d is not expected!\n", xfer_id);
+		return;
+	}
+
+	xfer = &minfo->xfer_block[xfer_id];
+
+	scmi_dump_header_dbg(dev, &xfer->hdr);
+	/* Is the message of valid length? */
+	if (xfer->rx.len > info->desc->max_msg_size) {
+		dev_err(dev, "unable to handle %zu xfer(max %d)\n",
+			xfer->rx.len, info->desc->max_msg_size);
+		return;
+	}
+
+	scmi_fetch_response(xfer, mem);
+	complete(&xfer->done);
+}
+
+/**
+ * pack_scmi_header() - packs and returns 32-bit header
+ *
+ * @hdr: pointer to header containing all the information on message id,
+ *	protocol id and sequence id.
+ *
+ * Return: 32-bit packed command header to be sent to the platform.
+ */
+static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
+{
+	return FIELD_PREP(MSG_ID_MASK, hdr->id) |
+		FIELD_PREP(MSG_TOKEN_ID_MASK, hdr->seq) |
+		FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id);
+}
+
+/**
+ * scmi_tx_prepare() - mailbox client callback to prepare for the transfer
+ *
+ * @cl: client pointer
+ * @m: mailbox message
+ *
+ * This function prepares the shared memory which contains the header and the
+ * payload.
+ */
+static void scmi_tx_prepare(struct mbox_client *cl, void *m)
+{
+	struct scmi_xfer *t = m;
+	struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
+	struct scmi_shared_mem __iomem *mem = cinfo->payload;
+
+	/* Mark channel busy + clear error */
+	iowrite32(0x0, &mem->channel_status);
+	iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
+		  &mem->flags);
+	iowrite32(sizeof(mem->msg_header) + t->tx.len, &mem->length);
+	iowrite32(pack_scmi_header(&t->hdr), &mem->msg_header);
+	if (t->tx.buf)
+		memcpy_toio(mem->msg_payload, t->tx.buf, t->tx.len);
+}
+
+/**
+ * scmi_xfer_get() - Allocate one message
+ *
+ * @handle: Pointer to SCMI entity handle
+ *
+ * Helper function which is used by various command functions that are
+ * exposed to clients of this driver for allocating a message traffic event.
+ *
+ * This function can sleep depending on pending requests already in the system
+ * for the SCMI entity. Further, this also holds a spinlock to maintain
+ * integrity of internal data structures.
+ *
+ * Return: 0 if all went fine, else corresponding error.
+ */
+static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle)
+{
+	u16 xfer_id;
+	struct scmi_xfer *xfer;
+	unsigned long flags, bit_pos;
+	struct scmi_info *info = handle_to_scmi_info(handle);
+	struct scmi_xfers_info *minfo = &info->minfo;
+
+	/* Keep the locked section as small as possible */
+	spin_lock_irqsave(&minfo->xfer_lock, flags);
+	bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
+				      info->desc->max_msg);
+	if (bit_pos == info->desc->max_msg) {
+		spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+		return ERR_PTR(-ENOMEM);
+	}
+	set_bit(bit_pos, minfo->xfer_alloc_table);
+	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+
+	xfer_id = bit_pos;
+
+	xfer = &minfo->xfer_block[xfer_id];
+	xfer->hdr.seq = xfer_id;
+	reinit_completion(&xfer->done);
+
+	return xfer;
+}
+
+/**
+ * scmi_xfer_put() - Release a message
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @xfer: message that was reserved by scmi_xfer_get
+ *
+ * This holds a spinlock to maintain integrity of internal data structures.
+ */
+void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
+{
+	unsigned long flags;
+	struct scmi_info *info = handle_to_scmi_info(handle);
+	struct scmi_xfers_info *minfo = &info->minfo;
+
+	/*
+	 * Keep the locked section as small as possible
+	 * NOTE: we might escape with smp_mb and no lock here..
+	 * but just be conservative and symmetric.
+	 */
+	spin_lock_irqsave(&minfo->xfer_lock, flags);
+	clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
+	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+}
+
+static bool
+scmi_xfer_poll_done(const struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
+{
+	struct scmi_shared_mem __iomem *mem = cinfo->payload;
+	u16 xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header));
+
+	if (xfer->hdr.seq != xfer_id)
+		return false;
+
+	return ioread32(&mem->channel_status) &
+		(SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
+		SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
+}
+
+#define SCMI_MAX_POLL_TO_NS	(100 * NSEC_PER_USEC)
+
+static bool scmi_xfer_done_no_timeout(const struct scmi_chan_info *cinfo,
+				      struct scmi_xfer *xfer, ktime_t stop)
+{
+	ktime_t __cur = ktime_get();
+
+	return scmi_xfer_poll_done(cinfo, xfer) || ktime_after(__cur, stop);
+}
+
+/**
+ * scmi_do_xfer() - Do one transfer
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @xfer: Transfer to initiate and wait for response
+ *
+ * Return: -ETIMEDOUT in case of no response, if transmit error,
+ *	return corresponding error, else if all goes well,
+ *	return 0.
+ */
+int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
+{
+	int ret;
+	int timeout;
+	struct scmi_info *info = handle_to_scmi_info(handle);
+	struct device *dev = info->dev;
+	struct scmi_chan_info *cinfo;
+
+	cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id);
+	if (unlikely(!cinfo))
+		return -EINVAL;
+
+	ret = mbox_send_message(cinfo->chan, xfer);
+	if (ret < 0) {
+		dev_dbg(dev, "mbox send fail %d\n", ret);
+		return ret;
+	}
+
+	/* mbox_send_message returns non-negative value on success, so reset */
+	ret = 0;
+
+	if (xfer->hdr.poll_completion) {
+		ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS);
+
+		spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop));
+
+		if (ktime_before(ktime_get(), stop))
+			scmi_fetch_response(xfer, cinfo->payload);
+		else
+			ret = -ETIMEDOUT;
+	} else {
+		/* And we wait for the response. */
+		timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
+		if (!wait_for_completion_timeout(&xfer->done, timeout)) {
+			dev_err(dev, "mbox timed out in resp(caller: %pS)\n",
+				(void *)_RET_IP_);
+			ret = -ETIMEDOUT;
+		}
+	}
+
+	if (!ret && xfer->hdr.status)
+		ret = scmi_to_linux_errno(xfer->hdr.status);
+
+	/*
+	 * NOTE: we might prefer not to need the mailbox ticker to manage the
+	 * transfer queueing since the protocol layer queues things by itself.
+	 * Unfortunately, we have to kick the mailbox framework after we have
+	 * received our message.
+	 */
+	mbox_client_txdone(cinfo->chan, ret);
+
+	return ret;
+}
+
+/**
+ * scmi_xfer_get_init() - Allocate and initialise one message
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @msg_id: Message identifier
+ * @prot_id: Protocol identifier for the message
+ * @tx_size: transmit message size
+ * @rx_size: receive message size
+ * @p: pointer to the allocated and initialised message
+ *
+ * This function allocates the message using @scmi_xfer_get and
+ * initialise the header.
+ *
+ * Return: 0 if all went fine with @p pointing to message, else
+ *	corresponding error.
+ */
+int scmi_xfer_get_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
+		       size_t tx_size, size_t rx_size, struct scmi_xfer **p)
+{
+	int ret;
+	struct scmi_xfer *xfer;
+	struct scmi_info *info = handle_to_scmi_info(handle);
+	struct device *dev = info->dev;
+
+	/* Ensure we have sane transfer sizes */
+	if (rx_size > info->desc->max_msg_size ||
+	    tx_size > info->desc->max_msg_size)
+		return -ERANGE;
+
+	xfer = scmi_xfer_get(handle);
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "failed to get free message slot(%d)\n", ret);
+		return ret;
+	}
+
+	xfer->tx.len = tx_size;
+	xfer->rx.len = rx_size ? : info->desc->max_msg_size;
+	xfer->hdr.id = msg_id;
+	xfer->hdr.protocol_id = prot_id;
+	xfer->hdr.poll_completion = false;
+
+	*p = xfer;
+
+	return 0;
+}
+
+/**
+ * scmi_version_get() - command to get the revision of the SCMI entity
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @protocol: Protocol identifier for the message
+ * @version: Holds returned version of protocol.
+ *
+ * Updates the SCMI information in the internal data structure.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+int scmi_version_get(const struct scmi_handle *handle, u8 protocol,
+		     u32 *version)
+{
+	int ret;
+	__le32 *rev_info;
+	struct scmi_xfer *t;
+
+	ret = scmi_xfer_get_init(handle, PROTOCOL_VERSION, protocol, 0,
+				 sizeof(*version), &t);
+	if (ret)
+		return ret;
+
+	ret = scmi_do_xfer(handle, t);
+	if (!ret) {
+		rev_info = t->rx.buf;
+		*version = le32_to_cpu(*rev_info);
+	}
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+void scmi_setup_protocol_implemented(const struct scmi_handle *handle,
+				     u8 *prot_imp)
+{
+	struct scmi_info *info = handle_to_scmi_info(handle);
+
+	info->protocols_imp = prot_imp;
+}
+
+static bool
+scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
+{
+	int i;
+	struct scmi_info *info = handle_to_scmi_info(handle);
+
+	if (!info->protocols_imp)
+		return false;
+
+	for (i = 0; i < MAX_PROTOCOLS_IMP; i++)
+		if (info->protocols_imp[i] == prot_id)
+			return true;
+	return false;
+}
+
+/**
+ * scmi_handle_get() - Get the SCMI handle for a device
+ *
+ * @dev: pointer to device for which we want SCMI handle
+ *
+ * NOTE: The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of SCMI protocol library.
+ * scmi_handle_put must be balanced with successful scmi_handle_get
+ *
+ * Return: pointer to handle if successful, NULL on error
+ */
+struct scmi_handle *scmi_handle_get(struct device *dev)
+{
+	struct list_head *p;
+	struct scmi_info *info;
+	struct scmi_handle *handle = NULL;
+
+	mutex_lock(&scmi_list_mutex);
+	list_for_each(p, &scmi_list) {
+		info = list_entry(p, struct scmi_info, node);
+		if (dev->parent == info->dev) {
+			handle = &info->handle;
+			info->users++;
+			break;
+		}
+	}
+	mutex_unlock(&scmi_list_mutex);
+
+	return handle;
+}
+
+/**
+ * scmi_handle_put() - Release the handle acquired by scmi_handle_get
+ *
+ * @handle: handle acquired by scmi_handle_get
+ *
+ * NOTE: The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of SCMI protocol library.
+ * scmi_handle_put must be balanced with successful scmi_handle_get
+ *
+ * Return: 0 is successfully released
+ *	if null was passed, it returns -EINVAL;
+ */
+int scmi_handle_put(const struct scmi_handle *handle)
+{
+	struct scmi_info *info;
+
+	if (!handle)
+		return -EINVAL;
+
+	info = handle_to_scmi_info(handle);
+	mutex_lock(&scmi_list_mutex);
+	if (!WARN_ON(!info->users))
+		info->users--;
+	mutex_unlock(&scmi_list_mutex);
+
+	return 0;
+}
+
+static const struct scmi_desc scmi_generic_desc = {
+	.max_rx_timeout_ms = 30,	/* We may increase this if required */
+	.max_msg = 20,		/* Limited by MBOX_TX_QUEUE_LEN */
+	.max_msg_size = 128,
+};
+
+/* Each compatible listed below must have descriptor associated with it */
+static const struct of_device_id scmi_of_match[] = {
+	{ .compatible = "arm,scmi", .data = &scmi_generic_desc },
+	{ /* Sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, scmi_of_match);
+
+static int scmi_xfer_info_init(struct scmi_info *sinfo)
+{
+	int i;
+	struct scmi_xfer *xfer;
+	struct device *dev = sinfo->dev;
+	const struct scmi_desc *desc = sinfo->desc;
+	struct scmi_xfers_info *info = &sinfo->minfo;
+
+	/* Pre-allocated messages, no more than what hdr.seq can support */
+	if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) {
+		dev_err(dev, "Maximum message of %d exceeds supported %ld\n",
+			desc->max_msg, MSG_TOKEN_MAX);
+		return -EINVAL;
+	}
+
+	info->xfer_block = devm_kcalloc(dev, desc->max_msg,
+					sizeof(*info->xfer_block), GFP_KERNEL);
+	if (!info->xfer_block)
+		return -ENOMEM;
+
+	info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(desc->max_msg),
+					      sizeof(long), GFP_KERNEL);
+	if (!info->xfer_alloc_table)
+		return -ENOMEM;
+
+	/* Pre-initialize the buffer pointer to pre-allocated buffers */
+	for (i = 0, xfer = info->xfer_block; i < desc->max_msg; i++, xfer++) {
+		xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
+					    GFP_KERNEL);
+		if (!xfer->rx.buf)
+			return -ENOMEM;
+
+		xfer->tx.buf = xfer->rx.buf;
+		init_completion(&xfer->done);
+	}
+
+	spin_lock_init(&info->xfer_lock);
+
+	return 0;
+}
+
+static int scmi_mailbox_check(struct device_node *np)
+{
+	struct of_phandle_args arg;
+
+	return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells", 0, &arg);
+}
+
+static int scmi_mbox_free_channel(int id, void *p, void *data)
+{
+	struct scmi_chan_info *cinfo = p;
+	struct idr *idr = data;
+
+	if (!IS_ERR_OR_NULL(cinfo->chan)) {
+		mbox_free_channel(cinfo->chan);
+		cinfo->chan = NULL;
+	}
+
+	idr_remove(idr, id);
+
+	return 0;
+}
+
+static int scmi_remove(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct scmi_info *info = platform_get_drvdata(pdev);
+	struct idr *idr = &info->tx_idr;
+
+	mutex_lock(&scmi_list_mutex);
+	if (info->users)
+		ret = -EBUSY;
+	else
+		list_del(&info->node);
+	mutex_unlock(&scmi_list_mutex);
+
+	if (ret)
+		return ret;
+
+	/* Safe to free channels since no more users */
+	ret = idr_for_each(idr, scmi_mbox_free_channel, idr);
+	idr_destroy(&info->tx_idr);
+
+	return ret;
+}
+
+static inline int
+scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev, int prot_id)
+{
+	int ret;
+	struct resource res;
+	resource_size_t size;
+	struct device_node *shmem, *np = dev->of_node;
+	struct scmi_chan_info *cinfo;
+	struct mbox_client *cl;
+
+	if (scmi_mailbox_check(np)) {
+		cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE);
+		goto idr_alloc;
+	}
+
+	cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
+	if (!cinfo)
+		return -ENOMEM;
+
+	cinfo->dev = dev;
+
+	cl = &cinfo->cl;
+	cl->dev = dev;
+	cl->rx_callback = scmi_rx_callback;
+	cl->tx_prepare = scmi_tx_prepare;
+	cl->tx_block = false;
+	cl->knows_txdone = true;
+
+	shmem = of_parse_phandle(np, "shmem", 0);
+	ret = of_address_to_resource(shmem, 0, &res);
+	of_node_put(shmem);
+	if (ret) {
+		dev_err(dev, "failed to get SCMI Tx payload mem resource\n");
+		return ret;
+	}
+
+	size = resource_size(&res);
+	cinfo->payload = devm_ioremap(info->dev, res.start, size);
+	if (!cinfo->payload) {
+		dev_err(dev, "failed to ioremap SCMI Tx payload\n");
+		return -EADDRNOTAVAIL;
+	}
+
+	/* Transmit channel is first entry i.e. index 0 */
+	cinfo->chan = mbox_request_channel(cl, 0);
+	if (IS_ERR(cinfo->chan)) {
+		ret = PTR_ERR(cinfo->chan);
+		if (ret != -EPROBE_DEFER)
+			dev_err(dev, "failed to request SCMI Tx mailbox\n");
+		return ret;
+	}
+
+idr_alloc:
+	ret = idr_alloc(&info->tx_idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
+	if (ret != prot_id) {
+		dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
+		return ret;
+	}
+
+	cinfo->handle = &info->handle;
+	return 0;
+}
+
+static inline void
+scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
+			    int prot_id)
+{
+	struct scmi_device *sdev;
+
+	sdev = scmi_device_create(np, info->dev, prot_id);
+	if (!sdev) {
+		dev_err(info->dev, "failed to create %d protocol device\n",
+			prot_id);
+		return;
+	}
+
+	if (scmi_mbox_chan_setup(info, &sdev->dev, prot_id)) {
+		dev_err(&sdev->dev, "failed to setup transport\n");
+		scmi_device_destroy(sdev);
+		return;
+	}
+
+	/* setup handle now as the transport is ready */
+	scmi_set_handle(sdev);
+}
+
+static int scmi_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct scmi_handle *handle;
+	const struct scmi_desc *desc;
+	struct scmi_info *info;
+	struct device *dev = &pdev->dev;
+	struct device_node *child, *np = dev->of_node;
+
+	/* Only mailbox method supported, check for the presence of one */
+	if (scmi_mailbox_check(np)) {
+		dev_err(dev, "no mailbox found in %pOF\n", np);
+		return -EINVAL;
+	}
+
+	desc = of_match_device(scmi_of_match, dev)->data;
+
+	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	info->dev = dev;
+	info->desc = desc;
+	INIT_LIST_HEAD(&info->node);
+
+	ret = scmi_xfer_info_init(info);
+	if (ret)
+		return ret;
+
+	platform_set_drvdata(pdev, info);
+	idr_init(&info->tx_idr);
+
+	handle = &info->handle;
+	handle->dev = info->dev;
+	handle->version = &info->version;
+
+	ret = scmi_mbox_chan_setup(info, dev, SCMI_PROTOCOL_BASE);
+	if (ret)
+		return ret;
+
+	ret = scmi_base_protocol_init(handle);
+	if (ret) {
+		dev_err(dev, "unable to communicate with SCMI(%d)\n", ret);
+		return ret;
+	}
+
+	mutex_lock(&scmi_list_mutex);
+	list_add_tail(&info->node, &scmi_list);
+	mutex_unlock(&scmi_list_mutex);
+
+	for_each_available_child_of_node(np, child) {
+		u32 prot_id;
+
+		if (of_property_read_u32(child, "reg", &prot_id))
+			continue;
+
+		if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
+			dev_err(dev, "Out of range protocol %d\n", prot_id);
+
+		if (!scmi_is_protocol_implemented(handle, prot_id)) {
+			dev_err(dev, "SCMI protocol %d not implemented\n",
+				prot_id);
+			continue;
+		}
+
+		scmi_create_protocol_device(child, info, prot_id);
+	}
+
+	return 0;
+}
+
+static struct platform_driver scmi_driver = {
+	.driver = {
+		   .name = "arm-scmi",
+		   .of_match_table = scmi_of_match,
+		   },
+	.probe = scmi_probe,
+	.remove = scmi_remove,
+};
+
+module_platform_driver(scmi_driver);
+
+MODULE_ALIAS("platform: arm-scmi");
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("ARM SCMI protocol driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
new file mode 100644
index 0000000..6434294
--- /dev/null
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -0,0 +1,482 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Performance Protocol
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ */
+
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/sort.h>
+
+#include "common.h"
+
+enum scmi_performance_protocol_cmd {
+	PERF_DOMAIN_ATTRIBUTES = 0x3,
+	PERF_DESCRIBE_LEVELS = 0x4,
+	PERF_LIMITS_SET = 0x5,
+	PERF_LIMITS_GET = 0x6,
+	PERF_LEVEL_SET = 0x7,
+	PERF_LEVEL_GET = 0x8,
+	PERF_NOTIFY_LIMITS = 0x9,
+	PERF_NOTIFY_LEVEL = 0xa,
+};
+
+struct scmi_opp {
+	u32 perf;
+	u32 power;
+	u32 trans_latency_us;
+};
+
+struct scmi_msg_resp_perf_attributes {
+	__le16 num_domains;
+	__le16 flags;
+#define POWER_SCALE_IN_MILLIWATT(x)	((x) & BIT(0))
+	__le32 stats_addr_low;
+	__le32 stats_addr_high;
+	__le32 stats_size;
+};
+
+struct scmi_msg_resp_perf_domain_attributes {
+	__le32 flags;
+#define SUPPORTS_SET_LIMITS(x)		((x) & BIT(31))
+#define SUPPORTS_SET_PERF_LVL(x)	((x) & BIT(30))
+#define SUPPORTS_PERF_LIMIT_NOTIFY(x)	((x) & BIT(29))
+#define SUPPORTS_PERF_LEVEL_NOTIFY(x)	((x) & BIT(28))
+	__le32 rate_limit_us;
+	__le32 sustained_freq_khz;
+	__le32 sustained_perf_level;
+	    u8 name[SCMI_MAX_STR_SIZE];
+};
+
+struct scmi_msg_perf_describe_levels {
+	__le32 domain;
+	__le32 level_index;
+};
+
+struct scmi_perf_set_limits {
+	__le32 domain;
+	__le32 max_level;
+	__le32 min_level;
+};
+
+struct scmi_perf_get_limits {
+	__le32 max_level;
+	__le32 min_level;
+};
+
+struct scmi_perf_set_level {
+	__le32 domain;
+	__le32 level;
+};
+
+struct scmi_perf_notify_level_or_limits {
+	__le32 domain;
+	__le32 notify_enable;
+};
+
+struct scmi_msg_resp_perf_describe_levels {
+	__le16 num_returned;
+	__le16 num_remaining;
+	struct {
+		__le32 perf_val;
+		__le32 power;
+		__le16 transition_latency_us;
+		__le16 reserved;
+	} opp[0];
+};
+
+struct perf_dom_info {
+	bool set_limits;
+	bool set_perf;
+	bool perf_limit_notify;
+	bool perf_level_notify;
+	u32 opp_count;
+	u32 sustained_freq_khz;
+	u32 sustained_perf_level;
+	u32 mult_factor;
+	char name[SCMI_MAX_STR_SIZE];
+	struct scmi_opp opp[MAX_OPPS];
+};
+
+struct scmi_perf_info {
+	int num_domains;
+	bool power_scale_mw;
+	u64 stats_addr;
+	u32 stats_size;
+	struct perf_dom_info *dom_info;
+};
+
+static int scmi_perf_attributes_get(const struct scmi_handle *handle,
+				    struct scmi_perf_info *pi)
+{
+	int ret;
+	struct scmi_xfer *t;
+	struct scmi_msg_resp_perf_attributes *attr;
+
+	ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
+				 SCMI_PROTOCOL_PERF, 0, sizeof(*attr), &t);
+	if (ret)
+		return ret;
+
+	attr = t->rx.buf;
+
+	ret = scmi_do_xfer(handle, t);
+	if (!ret) {
+		u16 flags = le16_to_cpu(attr->flags);
+
+		pi->num_domains = le16_to_cpu(attr->num_domains);
+		pi->power_scale_mw = POWER_SCALE_IN_MILLIWATT(flags);
+		pi->stats_addr = le32_to_cpu(attr->stats_addr_low) |
+				(u64)le32_to_cpu(attr->stats_addr_high) << 32;
+		pi->stats_size = le32_to_cpu(attr->stats_size);
+	}
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int
+scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
+				struct perf_dom_info *dom_info)
+{
+	int ret;
+	struct scmi_xfer *t;
+	struct scmi_msg_resp_perf_domain_attributes *attr;
+
+	ret = scmi_xfer_get_init(handle, PERF_DOMAIN_ATTRIBUTES,
+				 SCMI_PROTOCOL_PERF, sizeof(domain),
+				 sizeof(*attr), &t);
+	if (ret)
+		return ret;
+
+	*(__le32 *)t->tx.buf = cpu_to_le32(domain);
+	attr = t->rx.buf;
+
+	ret = scmi_do_xfer(handle, t);
+	if (!ret) {
+		u32 flags = le32_to_cpu(attr->flags);
+
+		dom_info->set_limits = SUPPORTS_SET_LIMITS(flags);
+		dom_info->set_perf = SUPPORTS_SET_PERF_LVL(flags);
+		dom_info->perf_limit_notify = SUPPORTS_PERF_LIMIT_NOTIFY(flags);
+		dom_info->perf_level_notify = SUPPORTS_PERF_LEVEL_NOTIFY(flags);
+		dom_info->sustained_freq_khz =
+					le32_to_cpu(attr->sustained_freq_khz);
+		dom_info->sustained_perf_level =
+					le32_to_cpu(attr->sustained_perf_level);
+		if (!dom_info->sustained_freq_khz ||
+		    !dom_info->sustained_perf_level)
+			/* CPUFreq converts to kHz, hence default 1000 */
+			dom_info->mult_factor =	1000;
+		else
+			dom_info->mult_factor =
+					(dom_info->sustained_freq_khz * 1000) /
+					dom_info->sustained_perf_level;
+		memcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
+	}
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int opp_cmp_func(const void *opp1, const void *opp2)
+{
+	const struct scmi_opp *t1 = opp1, *t2 = opp2;
+
+	return t1->perf - t2->perf;
+}
+
+static int
+scmi_perf_describe_levels_get(const struct scmi_handle *handle, u32 domain,
+			      struct perf_dom_info *perf_dom)
+{
+	int ret, cnt;
+	u32 tot_opp_cnt = 0;
+	u16 num_returned, num_remaining;
+	struct scmi_xfer *t;
+	struct scmi_opp *opp;
+	struct scmi_msg_perf_describe_levels *dom_info;
+	struct scmi_msg_resp_perf_describe_levels *level_info;
+
+	ret = scmi_xfer_get_init(handle, PERF_DESCRIBE_LEVELS,
+				 SCMI_PROTOCOL_PERF, sizeof(*dom_info), 0, &t);
+	if (ret)
+		return ret;
+
+	dom_info = t->tx.buf;
+	level_info = t->rx.buf;
+
+	do {
+		dom_info->domain = cpu_to_le32(domain);
+		/* Set the number of OPPs to be skipped/already read */
+		dom_info->level_index = cpu_to_le32(tot_opp_cnt);
+
+		ret = scmi_do_xfer(handle, t);
+		if (ret)
+			break;
+
+		num_returned = le16_to_cpu(level_info->num_returned);
+		num_remaining = le16_to_cpu(level_info->num_remaining);
+		if (tot_opp_cnt + num_returned > MAX_OPPS) {
+			dev_err(handle->dev, "No. of OPPs exceeded MAX_OPPS");
+			break;
+		}
+
+		opp = &perf_dom->opp[tot_opp_cnt];
+		for (cnt = 0; cnt < num_returned; cnt++, opp++) {
+			opp->perf = le32_to_cpu(level_info->opp[cnt].perf_val);
+			opp->power = le32_to_cpu(level_info->opp[cnt].power);
+			opp->trans_latency_us = le16_to_cpu
+				(level_info->opp[cnt].transition_latency_us);
+
+			dev_dbg(handle->dev, "Level %d Power %d Latency %dus\n",
+				opp->perf, opp->power, opp->trans_latency_us);
+		}
+
+		tot_opp_cnt += num_returned;
+		/*
+		 * check for both returned and remaining to avoid infinite
+		 * loop due to buggy firmware
+		 */
+	} while (num_returned && num_remaining);
+
+	perf_dom->opp_count = tot_opp_cnt;
+	scmi_xfer_put(handle, t);
+
+	sort(perf_dom->opp, tot_opp_cnt, sizeof(*opp), opp_cmp_func, NULL);
+	return ret;
+}
+
+static int scmi_perf_limits_set(const struct scmi_handle *handle, u32 domain,
+				u32 max_perf, u32 min_perf)
+{
+	int ret;
+	struct scmi_xfer *t;
+	struct scmi_perf_set_limits *limits;
+
+	ret = scmi_xfer_get_init(handle, PERF_LIMITS_SET, SCMI_PROTOCOL_PERF,
+				 sizeof(*limits), 0, &t);
+	if (ret)
+		return ret;
+
+	limits = t->tx.buf;
+	limits->domain = cpu_to_le32(domain);
+	limits->max_level = cpu_to_le32(max_perf);
+	limits->min_level = cpu_to_le32(min_perf);
+
+	ret = scmi_do_xfer(handle, t);
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain,
+				u32 *max_perf, u32 *min_perf)
+{
+	int ret;
+	struct scmi_xfer *t;
+	struct scmi_perf_get_limits *limits;
+
+	ret = scmi_xfer_get_init(handle, PERF_LIMITS_GET, SCMI_PROTOCOL_PERF,
+				 sizeof(__le32), 0, &t);
+	if (ret)
+		return ret;
+
+	*(__le32 *)t->tx.buf = cpu_to_le32(domain);
+
+	ret = scmi_do_xfer(handle, t);
+	if (!ret) {
+		limits = t->rx.buf;
+
+		*max_perf = le32_to_cpu(limits->max_level);
+		*min_perf = le32_to_cpu(limits->min_level);
+	}
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int scmi_perf_level_set(const struct scmi_handle *handle, u32 domain,
+			       u32 level, bool poll)
+{
+	int ret;
+	struct scmi_xfer *t;
+	struct scmi_perf_set_level *lvl;
+
+	ret = scmi_xfer_get_init(handle, PERF_LEVEL_SET, SCMI_PROTOCOL_PERF,
+				 sizeof(*lvl), 0, &t);
+	if (ret)
+		return ret;
+
+	t->hdr.poll_completion = poll;
+	lvl = t->tx.buf;
+	lvl->domain = cpu_to_le32(domain);
+	lvl->level = cpu_to_le32(level);
+
+	ret = scmi_do_xfer(handle, t);
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain,
+			       u32 *level, bool poll)
+{
+	int ret;
+	struct scmi_xfer *t;
+
+	ret = scmi_xfer_get_init(handle, PERF_LEVEL_GET, SCMI_PROTOCOL_PERF,
+				 sizeof(u32), sizeof(u32), &t);
+	if (ret)
+		return ret;
+
+	t->hdr.poll_completion = poll;
+	*(__le32 *)t->tx.buf = cpu_to_le32(domain);
+
+	ret = scmi_do_xfer(handle, t);
+	if (!ret)
+		*level = le32_to_cpu(*(__le32 *)t->rx.buf);
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+/* Device specific ops */
+static int scmi_dev_domain_id(struct device *dev)
+{
+	struct of_phandle_args clkspec;
+
+	if (of_parse_phandle_with_args(dev->of_node, "clocks", "#clock-cells",
+				       0, &clkspec))
+		return -EINVAL;
+
+	return clkspec.args[0];
+}
+
+static int scmi_dvfs_device_opps_add(const struct scmi_handle *handle,
+				     struct device *dev)
+{
+	int idx, ret, domain;
+	unsigned long freq;
+	struct scmi_opp *opp;
+	struct perf_dom_info *dom;
+	struct scmi_perf_info *pi = handle->perf_priv;
+
+	domain = scmi_dev_domain_id(dev);
+	if (domain < 0)
+		return domain;
+
+	dom = pi->dom_info + domain;
+
+	for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
+		freq = opp->perf * dom->mult_factor;
+
+		ret = dev_pm_opp_add(dev, freq, 0);
+		if (ret) {
+			dev_warn(dev, "failed to add opp %luHz\n", freq);
+
+			while (idx-- > 0) {
+				freq = (--opp)->perf * dom->mult_factor;
+				dev_pm_opp_remove(dev, freq);
+			}
+			return ret;
+		}
+	}
+	return 0;
+}
+
+static int scmi_dvfs_transition_latency_get(const struct scmi_handle *handle,
+					    struct device *dev)
+{
+	struct perf_dom_info *dom;
+	struct scmi_perf_info *pi = handle->perf_priv;
+	int domain = scmi_dev_domain_id(dev);
+
+	if (domain < 0)
+		return domain;
+
+	dom = pi->dom_info + domain;
+	/* uS to nS */
+	return dom->opp[dom->opp_count - 1].trans_latency_us * 1000;
+}
+
+static int scmi_dvfs_freq_set(const struct scmi_handle *handle, u32 domain,
+			      unsigned long freq, bool poll)
+{
+	struct scmi_perf_info *pi = handle->perf_priv;
+	struct perf_dom_info *dom = pi->dom_info + domain;
+
+	return scmi_perf_level_set(handle, domain, freq / dom->mult_factor,
+				   poll);
+}
+
+static int scmi_dvfs_freq_get(const struct scmi_handle *handle, u32 domain,
+			      unsigned long *freq, bool poll)
+{
+	int ret;
+	u32 level;
+	struct scmi_perf_info *pi = handle->perf_priv;
+	struct perf_dom_info *dom = pi->dom_info + domain;
+
+	ret = scmi_perf_level_get(handle, domain, &level, poll);
+	if (!ret)
+		*freq = level * dom->mult_factor;
+
+	return ret;
+}
+
+static struct scmi_perf_ops perf_ops = {
+	.limits_set = scmi_perf_limits_set,
+	.limits_get = scmi_perf_limits_get,
+	.level_set = scmi_perf_level_set,
+	.level_get = scmi_perf_level_get,
+	.device_domain_id = scmi_dev_domain_id,
+	.transition_latency_get = scmi_dvfs_transition_latency_get,
+	.device_opps_add = scmi_dvfs_device_opps_add,
+	.freq_set = scmi_dvfs_freq_set,
+	.freq_get = scmi_dvfs_freq_get,
+};
+
+static int scmi_perf_protocol_init(struct scmi_handle *handle)
+{
+	int domain;
+	u32 version;
+	struct scmi_perf_info *pinfo;
+
+	scmi_version_get(handle, SCMI_PROTOCOL_PERF, &version);
+
+	dev_dbg(handle->dev, "Performance Version %d.%d\n",
+		PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+	pinfo = devm_kzalloc(handle->dev, sizeof(*pinfo), GFP_KERNEL);
+	if (!pinfo)
+		return -ENOMEM;
+
+	scmi_perf_attributes_get(handle, pinfo);
+
+	pinfo->dom_info = devm_kcalloc(handle->dev, pinfo->num_domains,
+				       sizeof(*pinfo->dom_info), GFP_KERNEL);
+	if (!pinfo->dom_info)
+		return -ENOMEM;
+
+	for (domain = 0; domain < pinfo->num_domains; domain++) {
+		struct perf_dom_info *dom = pinfo->dom_info + domain;
+
+		scmi_perf_domain_attributes_get(handle, domain, dom);
+		scmi_perf_describe_levels_get(handle, domain, dom);
+	}
+
+	handle->perf_ops = &perf_ops;
+	handle->perf_priv = pinfo;
+
+	return 0;
+}
+
+static int __init scmi_perf_init(void)
+{
+	return scmi_protocol_register(SCMI_PROTOCOL_PERF,
+				      &scmi_perf_protocol_init);
+}
+subsys_initcall(scmi_perf_init);
diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c
new file mode 100644
index 0000000..cfa033b
--- /dev/null
+++ b/drivers/firmware/arm_scmi/power.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Power Protocol
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ */
+
+#include "common.h"
+
+enum scmi_power_protocol_cmd {
+	POWER_DOMAIN_ATTRIBUTES = 0x3,
+	POWER_STATE_SET = 0x4,
+	POWER_STATE_GET = 0x5,
+	POWER_STATE_NOTIFY = 0x6,
+};
+
+struct scmi_msg_resp_power_attributes {
+	__le16 num_domains;
+	__le16 reserved;
+	__le32 stats_addr_low;
+	__le32 stats_addr_high;
+	__le32 stats_size;
+};
+
+struct scmi_msg_resp_power_domain_attributes {
+	__le32 flags;
+#define SUPPORTS_STATE_SET_NOTIFY(x)	((x) & BIT(31))
+#define SUPPORTS_STATE_SET_ASYNC(x)	((x) & BIT(30))
+#define SUPPORTS_STATE_SET_SYNC(x)	((x) & BIT(29))
+	    u8 name[SCMI_MAX_STR_SIZE];
+};
+
+struct scmi_power_set_state {
+	__le32 flags;
+#define STATE_SET_ASYNC		BIT(0)
+	__le32 domain;
+	__le32 state;
+};
+
+struct scmi_power_state_notify {
+	__le32 domain;
+	__le32 notify_enable;
+};
+
+struct power_dom_info {
+	bool state_set_sync;
+	bool state_set_async;
+	bool state_set_notify;
+	char name[SCMI_MAX_STR_SIZE];
+};
+
+struct scmi_power_info {
+	int num_domains;
+	u64 stats_addr;
+	u32 stats_size;
+	struct power_dom_info *dom_info;
+};
+
+static int scmi_power_attributes_get(const struct scmi_handle *handle,
+				     struct scmi_power_info *pi)
+{
+	int ret;
+	struct scmi_xfer *t;
+	struct scmi_msg_resp_power_attributes *attr;
+
+	ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
+				 SCMI_PROTOCOL_POWER, 0, sizeof(*attr), &t);
+	if (ret)
+		return ret;
+
+	attr = t->rx.buf;
+
+	ret = scmi_do_xfer(handle, t);
+	if (!ret) {
+		pi->num_domains = le16_to_cpu(attr->num_domains);
+		pi->stats_addr = le32_to_cpu(attr->stats_addr_low) |
+				(u64)le32_to_cpu(attr->stats_addr_high) << 32;
+		pi->stats_size = le32_to_cpu(attr->stats_size);
+	}
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int
+scmi_power_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
+				 struct power_dom_info *dom_info)
+{
+	int ret;
+	struct scmi_xfer *t;
+	struct scmi_msg_resp_power_domain_attributes *attr;
+
+	ret = scmi_xfer_get_init(handle, POWER_DOMAIN_ATTRIBUTES,
+				 SCMI_PROTOCOL_POWER, sizeof(domain),
+				 sizeof(*attr), &t);
+	if (ret)
+		return ret;
+
+	*(__le32 *)t->tx.buf = cpu_to_le32(domain);
+	attr = t->rx.buf;
+
+	ret = scmi_do_xfer(handle, t);
+	if (!ret) {
+		u32 flags = le32_to_cpu(attr->flags);
+
+		dom_info->state_set_notify = SUPPORTS_STATE_SET_NOTIFY(flags);
+		dom_info->state_set_async = SUPPORTS_STATE_SET_ASYNC(flags);
+		dom_info->state_set_sync = SUPPORTS_STATE_SET_SYNC(flags);
+		memcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
+	}
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int
+scmi_power_state_set(const struct scmi_handle *handle, u32 domain, u32 state)
+{
+	int ret;
+	struct scmi_xfer *t;
+	struct scmi_power_set_state *st;
+
+	ret = scmi_xfer_get_init(handle, POWER_STATE_SET, SCMI_PROTOCOL_POWER,
+				 sizeof(*st), 0, &t);
+	if (ret)
+		return ret;
+
+	st = t->tx.buf;
+	st->flags = cpu_to_le32(0);
+	st->domain = cpu_to_le32(domain);
+	st->state = cpu_to_le32(state);
+
+	ret = scmi_do_xfer(handle, t);
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int
+scmi_power_state_get(const struct scmi_handle *handle, u32 domain, u32 *state)
+{
+	int ret;
+	struct scmi_xfer *t;
+
+	ret = scmi_xfer_get_init(handle, POWER_STATE_GET, SCMI_PROTOCOL_POWER,
+				 sizeof(u32), sizeof(u32), &t);
+	if (ret)
+		return ret;
+
+	*(__le32 *)t->tx.buf = cpu_to_le32(domain);
+
+	ret = scmi_do_xfer(handle, t);
+	if (!ret)
+		*state = le32_to_cpu(*(__le32 *)t->rx.buf);
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int scmi_power_num_domains_get(const struct scmi_handle *handle)
+{
+	struct scmi_power_info *pi = handle->power_priv;
+
+	return pi->num_domains;
+}
+
+static char *scmi_power_name_get(const struct scmi_handle *handle, u32 domain)
+{
+	struct scmi_power_info *pi = handle->power_priv;
+	struct power_dom_info *dom = pi->dom_info + domain;
+
+	return dom->name;
+}
+
+static struct scmi_power_ops power_ops = {
+	.num_domains_get = scmi_power_num_domains_get,
+	.name_get = scmi_power_name_get,
+	.state_set = scmi_power_state_set,
+	.state_get = scmi_power_state_get,
+};
+
+static int scmi_power_protocol_init(struct scmi_handle *handle)
+{
+	int domain;
+	u32 version;
+	struct scmi_power_info *pinfo;
+
+	scmi_version_get(handle, SCMI_PROTOCOL_POWER, &version);
+
+	dev_dbg(handle->dev, "Power Version %d.%d\n",
+		PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+	pinfo = devm_kzalloc(handle->dev, sizeof(*pinfo), GFP_KERNEL);
+	if (!pinfo)
+		return -ENOMEM;
+
+	scmi_power_attributes_get(handle, pinfo);
+
+	pinfo->dom_info = devm_kcalloc(handle->dev, pinfo->num_domains,
+				       sizeof(*pinfo->dom_info), GFP_KERNEL);
+	if (!pinfo->dom_info)
+		return -ENOMEM;
+
+	for (domain = 0; domain < pinfo->num_domains; domain++) {
+		struct power_dom_info *dom = pinfo->dom_info + domain;
+
+		scmi_power_domain_attributes_get(handle, domain, dom);
+	}
+
+	handle->power_ops = &power_ops;
+	handle->power_priv = pinfo;
+
+	return 0;
+}
+
+static int __init scmi_power_init(void)
+{
+	return scmi_protocol_register(SCMI_PROTOCOL_POWER,
+				      &scmi_power_protocol_init);
+}
+subsys_initcall(scmi_power_init);
diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c
new file mode 100644
index 0000000..87f737e
--- /dev/null
+++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SCMI Generic power domain support.
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pm_domain.h>
+#include <linux/scmi_protocol.h>
+
+struct scmi_pm_domain {
+	struct generic_pm_domain genpd;
+	const struct scmi_handle *handle;
+	const char *name;
+	u32 domain;
+};
+
+#define to_scmi_pd(gpd) container_of(gpd, struct scmi_pm_domain, genpd)
+
+static int scmi_pd_power(struct generic_pm_domain *domain, bool power_on)
+{
+	int ret;
+	u32 state, ret_state;
+	struct scmi_pm_domain *pd = to_scmi_pd(domain);
+	const struct scmi_power_ops *ops = pd->handle->power_ops;
+
+	if (power_on)
+		state = SCMI_POWER_STATE_GENERIC_ON;
+	else
+		state = SCMI_POWER_STATE_GENERIC_OFF;
+
+	ret = ops->state_set(pd->handle, pd->domain, state);
+	if (!ret)
+		ret = ops->state_get(pd->handle, pd->domain, &ret_state);
+	if (!ret && state != ret_state)
+		return -EIO;
+
+	return ret;
+}
+
+static int scmi_pd_power_on(struct generic_pm_domain *domain)
+{
+	return scmi_pd_power(domain, true);
+}
+
+static int scmi_pd_power_off(struct generic_pm_domain *domain)
+{
+	return scmi_pd_power(domain, false);
+}
+
+static int scmi_pm_domain_probe(struct scmi_device *sdev)
+{
+	int num_domains, i;
+	struct device *dev = &sdev->dev;
+	struct device_node *np = dev->of_node;
+	struct scmi_pm_domain *scmi_pd;
+	struct genpd_onecell_data *scmi_pd_data;
+	struct generic_pm_domain **domains;
+	const struct scmi_handle *handle = sdev->handle;
+
+	if (!handle || !handle->power_ops)
+		return -ENODEV;
+
+	num_domains = handle->power_ops->num_domains_get(handle);
+	if (num_domains < 0) {
+		dev_err(dev, "number of domains not found\n");
+		return num_domains;
+	}
+
+	scmi_pd = devm_kcalloc(dev, num_domains, sizeof(*scmi_pd), GFP_KERNEL);
+	if (!scmi_pd)
+		return -ENOMEM;
+
+	scmi_pd_data = devm_kzalloc(dev, sizeof(*scmi_pd_data), GFP_KERNEL);
+	if (!scmi_pd_data)
+		return -ENOMEM;
+
+	domains = devm_kcalloc(dev, num_domains, sizeof(*domains), GFP_KERNEL);
+	if (!domains)
+		return -ENOMEM;
+
+	for (i = 0; i < num_domains; i++, scmi_pd++) {
+		u32 state;
+
+		domains[i] = &scmi_pd->genpd;
+
+		scmi_pd->domain = i;
+		scmi_pd->handle = handle;
+		scmi_pd->name = handle->power_ops->name_get(handle, i);
+		scmi_pd->genpd.name = scmi_pd->name;
+		scmi_pd->genpd.power_off = scmi_pd_power_off;
+		scmi_pd->genpd.power_on = scmi_pd_power_on;
+
+		if (handle->power_ops->state_get(handle, i, &state)) {
+			dev_warn(dev, "failed to get state for domain %d\n", i);
+			continue;
+		}
+
+		pm_genpd_init(&scmi_pd->genpd, NULL,
+			      state == SCMI_POWER_STATE_GENERIC_OFF);
+	}
+
+	scmi_pd_data->domains = domains;
+	scmi_pd_data->num_domains = num_domains;
+
+	of_genpd_add_provider_onecell(np, scmi_pd_data);
+
+	return 0;
+}
+
+static const struct scmi_device_id scmi_id_table[] = {
+	{ SCMI_PROTOCOL_POWER },
+	{ },
+};
+MODULE_DEVICE_TABLE(scmi, scmi_id_table);
+
+static struct scmi_driver scmi_power_domain_driver = {
+	.name = "scmi-power-domain",
+	.probe = scmi_pm_domain_probe,
+	.id_table = scmi_id_table,
+};
+module_scmi_driver(scmi_power_domain_driver);
+
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("ARM SCMI power domain driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
new file mode 100644
index 0000000..27f2092
--- /dev/null
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Sensor Protocol
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ */
+
+#include "common.h"
+
+enum scmi_sensor_protocol_cmd {
+	SENSOR_DESCRIPTION_GET = 0x3,
+	SENSOR_CONFIG_SET = 0x4,
+	SENSOR_TRIP_POINT_SET = 0x5,
+	SENSOR_READING_GET = 0x6,
+};
+
+struct scmi_msg_resp_sensor_attributes {
+	__le16 num_sensors;
+	u8 max_requests;
+	u8 reserved;
+	__le32 reg_addr_low;
+	__le32 reg_addr_high;
+	__le32 reg_size;
+};
+
+struct scmi_msg_resp_sensor_description {
+	__le16 num_returned;
+	__le16 num_remaining;
+	struct {
+		__le32 id;
+		__le32 attributes_low;
+#define SUPPORTS_ASYNC_READ(x)	((x) & BIT(31))
+#define NUM_TRIP_POINTS(x)	(((x) >> 4) & 0xff)
+		__le32 attributes_high;
+#define SENSOR_TYPE(x)		((x) & 0xff)
+#define SENSOR_SCALE(x)		(((x) >> 11) & 0x3f)
+#define SENSOR_UPDATE_SCALE(x)	(((x) >> 22) & 0x1f)
+#define SENSOR_UPDATE_BASE(x)	(((x) >> 27) & 0x1f)
+		    u8 name[SCMI_MAX_STR_SIZE];
+	} desc[0];
+};
+
+struct scmi_msg_set_sensor_config {
+	__le32 id;
+	__le32 event_control;
+};
+
+struct scmi_msg_set_sensor_trip_point {
+	__le32 id;
+	__le32 event_control;
+#define SENSOR_TP_EVENT_MASK	(0x3)
+#define SENSOR_TP_DISABLED	0x0
+#define SENSOR_TP_POSITIVE	0x1
+#define SENSOR_TP_NEGATIVE	0x2
+#define SENSOR_TP_BOTH		0x3
+#define SENSOR_TP_ID(x)		(((x) & 0xff) << 4)
+	__le32 value_low;
+	__le32 value_high;
+};
+
+struct scmi_msg_sensor_reading_get {
+	__le32 id;
+	__le32 flags;
+#define SENSOR_READ_ASYNC	BIT(0)
+};
+
+struct sensors_info {
+	int num_sensors;
+	int max_requests;
+	u64 reg_addr;
+	u32 reg_size;
+	struct scmi_sensor_info *sensors;
+};
+
+static int scmi_sensor_attributes_get(const struct scmi_handle *handle,
+				      struct sensors_info *si)
+{
+	int ret;
+	struct scmi_xfer *t;
+	struct scmi_msg_resp_sensor_attributes *attr;
+
+	ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
+				 SCMI_PROTOCOL_SENSOR, 0, sizeof(*attr), &t);
+	if (ret)
+		return ret;
+
+	attr = t->rx.buf;
+
+	ret = scmi_do_xfer(handle, t);
+	if (!ret) {
+		si->num_sensors = le16_to_cpu(attr->num_sensors);
+		si->max_requests = attr->max_requests;
+		si->reg_addr = le32_to_cpu(attr->reg_addr_low) |
+				(u64)le32_to_cpu(attr->reg_addr_high) << 32;
+		si->reg_size = le32_to_cpu(attr->reg_size);
+	}
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int scmi_sensor_description_get(const struct scmi_handle *handle,
+				       struct sensors_info *si)
+{
+	int ret, cnt;
+	u32 desc_index = 0;
+	u16 num_returned, num_remaining;
+	struct scmi_xfer *t;
+	struct scmi_msg_resp_sensor_description *buf;
+
+	ret = scmi_xfer_get_init(handle, SENSOR_DESCRIPTION_GET,
+				 SCMI_PROTOCOL_SENSOR, sizeof(__le32), 0, &t);
+	if (ret)
+		return ret;
+
+	buf = t->rx.buf;
+
+	do {
+		/* Set the number of sensors to be skipped/already read */
+		*(__le32 *)t->tx.buf = cpu_to_le32(desc_index);
+
+		ret = scmi_do_xfer(handle, t);
+		if (ret)
+			break;
+
+		num_returned = le16_to_cpu(buf->num_returned);
+		num_remaining = le16_to_cpu(buf->num_remaining);
+
+		if (desc_index + num_returned > si->num_sensors) {
+			dev_err(handle->dev, "No. of sensors can't exceed %d",
+				si->num_sensors);
+			break;
+		}
+
+		for (cnt = 0; cnt < num_returned; cnt++) {
+			u32 attrh;
+			struct scmi_sensor_info *s;
+
+			attrh = le32_to_cpu(buf->desc[cnt].attributes_high);
+			s = &si->sensors[desc_index + cnt];
+			s->id = le32_to_cpu(buf->desc[cnt].id);
+			s->type = SENSOR_TYPE(attrh);
+			memcpy(s->name, buf->desc[cnt].name, SCMI_MAX_STR_SIZE);
+		}
+
+		desc_index += num_returned;
+		/*
+		 * check for both returned and remaining to avoid infinite
+		 * loop due to buggy firmware
+		 */
+	} while (num_returned && num_remaining);
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int
+scmi_sensor_configuration_set(const struct scmi_handle *handle, u32 sensor_id)
+{
+	int ret;
+	u32 evt_cntl = BIT(0);
+	struct scmi_xfer *t;
+	struct scmi_msg_set_sensor_config *cfg;
+
+	ret = scmi_xfer_get_init(handle, SENSOR_CONFIG_SET,
+				 SCMI_PROTOCOL_SENSOR, sizeof(*cfg), 0, &t);
+	if (ret)
+		return ret;
+
+	cfg = t->tx.buf;
+	cfg->id = cpu_to_le32(sensor_id);
+	cfg->event_control = cpu_to_le32(evt_cntl);
+
+	ret = scmi_do_xfer(handle, t);
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int scmi_sensor_trip_point_set(const struct scmi_handle *handle,
+				      u32 sensor_id, u8 trip_id, u64 trip_value)
+{
+	int ret;
+	u32 evt_cntl = SENSOR_TP_BOTH;
+	struct scmi_xfer *t;
+	struct scmi_msg_set_sensor_trip_point *trip;
+
+	ret = scmi_xfer_get_init(handle, SENSOR_TRIP_POINT_SET,
+				 SCMI_PROTOCOL_SENSOR, sizeof(*trip), 0, &t);
+	if (ret)
+		return ret;
+
+	trip = t->tx.buf;
+	trip->id = cpu_to_le32(sensor_id);
+	trip->event_control = cpu_to_le32(evt_cntl | SENSOR_TP_ID(trip_id));
+	trip->value_low = cpu_to_le32(trip_value & 0xffffffff);
+	trip->value_high = cpu_to_le32(trip_value >> 32);
+
+	ret = scmi_do_xfer(handle, t);
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int scmi_sensor_reading_get(const struct scmi_handle *handle,
+				   u32 sensor_id, bool async, u64 *value)
+{
+	int ret;
+	struct scmi_xfer *t;
+	struct scmi_msg_sensor_reading_get *sensor;
+
+	ret = scmi_xfer_get_init(handle, SENSOR_READING_GET,
+				 SCMI_PROTOCOL_SENSOR, sizeof(*sensor),
+				 sizeof(u64), &t);
+	if (ret)
+		return ret;
+
+	sensor = t->tx.buf;
+	sensor->id = cpu_to_le32(sensor_id);
+	sensor->flags = cpu_to_le32(async ? SENSOR_READ_ASYNC : 0);
+
+	ret = scmi_do_xfer(handle, t);
+	if (!ret) {
+		__le32 *pval = t->rx.buf;
+
+		*value = le32_to_cpu(*pval);
+		*value |= (u64)le32_to_cpu(*(pval + 1)) << 32;
+	}
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static const struct scmi_sensor_info *
+scmi_sensor_info_get(const struct scmi_handle *handle, u32 sensor_id)
+{
+	struct sensors_info *si = handle->sensor_priv;
+
+	return si->sensors + sensor_id;
+}
+
+static int scmi_sensor_count_get(const struct scmi_handle *handle)
+{
+	struct sensors_info *si = handle->sensor_priv;
+
+	return si->num_sensors;
+}
+
+static struct scmi_sensor_ops sensor_ops = {
+	.count_get = scmi_sensor_count_get,
+	.info_get = scmi_sensor_info_get,
+	.configuration_set = scmi_sensor_configuration_set,
+	.trip_point_set = scmi_sensor_trip_point_set,
+	.reading_get = scmi_sensor_reading_get,
+};
+
+static int scmi_sensors_protocol_init(struct scmi_handle *handle)
+{
+	u32 version;
+	struct sensors_info *sinfo;
+
+	scmi_version_get(handle, SCMI_PROTOCOL_SENSOR, &version);
+
+	dev_dbg(handle->dev, "Sensor Version %d.%d\n",
+		PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+	sinfo = devm_kzalloc(handle->dev, sizeof(*sinfo), GFP_KERNEL);
+	if (!sinfo)
+		return -ENOMEM;
+
+	scmi_sensor_attributes_get(handle, sinfo);
+
+	sinfo->sensors = devm_kcalloc(handle->dev, sinfo->num_sensors,
+				      sizeof(*sinfo->sensors), GFP_KERNEL);
+	if (!sinfo->sensors)
+		return -ENOMEM;
+
+	scmi_sensor_description_get(handle, sinfo);
+
+	handle->sensor_ops = &sensor_ops;
+	handle->sensor_priv = sinfo;
+
+	return 0;
+}
+
+static int __init scmi_sensors_init(void)
+{
+	return scmi_protocol_register(SCMI_PROTOCOL_SENSOR,
+				      &scmi_sensors_protocol_init);
+}
+subsys_initcall(scmi_sensors_init);
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
new file mode 100644
index 0000000..c7d06a3
--- /dev/null
+++ b/drivers/firmware/arm_scpi.c
@@ -0,0 +1,1052 @@
+/*
+ * System Control and Power Interface (SCPI) Message Protocol driver
+ *
+ * SCPI Message Protocol is used between the System Control Processor(SCP)
+ * and the Application Processors(AP). The Message Handling Unit(MHU)
+ * provides a mechanism for inter-processor communication between SCP's
+ * Cortex M3 and AP.
+ *
+ * SCP offers control and management of the core/cluster power states,
+ * various power domain DVFS including the core/cluster, certain system
+ * clocks configuration, thermal sensors and many others.
+ *
+ * Copyright (C) 2015 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitmap.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/printk.h>
+#include <linux/pm_opp.h>
+#include <linux/scpi_protocol.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/spinlock.h>
+
+#define CMD_ID_MASK		GENMASK(6, 0)
+#define CMD_TOKEN_ID_MASK	GENMASK(15, 8)
+#define CMD_DATA_SIZE_MASK	GENMASK(24, 16)
+#define CMD_LEGACY_DATA_SIZE_MASK	GENMASK(28, 20)
+#define PACK_SCPI_CMD(cmd_id, tx_sz)		\
+	(FIELD_PREP(CMD_ID_MASK, cmd_id) |	\
+	FIELD_PREP(CMD_DATA_SIZE_MASK, tx_sz))
+#define PACK_LEGACY_SCPI_CMD(cmd_id, tx_sz)	\
+	(FIELD_PREP(CMD_ID_MASK, cmd_id) |	\
+	FIELD_PREP(CMD_LEGACY_DATA_SIZE_MASK, tx_sz))
+
+#define CMD_SIZE(cmd)	FIELD_GET(CMD_DATA_SIZE_MASK, cmd)
+#define CMD_UNIQ_MASK	(CMD_TOKEN_ID_MASK | CMD_ID_MASK)
+#define CMD_XTRACT_UNIQ(cmd)	((cmd) & CMD_UNIQ_MASK)
+
+#define SCPI_SLOT		0
+
+#define MAX_DVFS_DOMAINS	8
+#define MAX_DVFS_OPPS		16
+
+#define PROTO_REV_MAJOR_MASK	GENMASK(31, 16)
+#define PROTO_REV_MINOR_MASK	GENMASK(15, 0)
+
+#define FW_REV_MAJOR_MASK	GENMASK(31, 24)
+#define FW_REV_MINOR_MASK	GENMASK(23, 16)
+#define FW_REV_PATCH_MASK	GENMASK(15, 0)
+
+#define MAX_RX_TIMEOUT		(msecs_to_jiffies(30))
+
+enum scpi_error_codes {
+	SCPI_SUCCESS = 0, /* Success */
+	SCPI_ERR_PARAM = 1, /* Invalid parameter(s) */
+	SCPI_ERR_ALIGN = 2, /* Invalid alignment */
+	SCPI_ERR_SIZE = 3, /* Invalid size */
+	SCPI_ERR_HANDLER = 4, /* Invalid handler/callback */
+	SCPI_ERR_ACCESS = 5, /* Invalid access/permission denied */
+	SCPI_ERR_RANGE = 6, /* Value out of range */
+	SCPI_ERR_TIMEOUT = 7, /* Timeout has occurred */
+	SCPI_ERR_NOMEM = 8, /* Invalid memory area or pointer */
+	SCPI_ERR_PWRSTATE = 9, /* Invalid power state */
+	SCPI_ERR_SUPPORT = 10, /* Not supported or disabled */
+	SCPI_ERR_DEVICE = 11, /* Device error */
+	SCPI_ERR_BUSY = 12, /* Device busy */
+	SCPI_ERR_MAX
+};
+
+/* SCPI Standard commands */
+enum scpi_std_cmd {
+	SCPI_CMD_INVALID		= 0x00,
+	SCPI_CMD_SCPI_READY		= 0x01,
+	SCPI_CMD_SCPI_CAPABILITIES	= 0x02,
+	SCPI_CMD_SET_CSS_PWR_STATE	= 0x03,
+	SCPI_CMD_GET_CSS_PWR_STATE	= 0x04,
+	SCPI_CMD_SET_SYS_PWR_STATE	= 0x05,
+	SCPI_CMD_SET_CPU_TIMER		= 0x06,
+	SCPI_CMD_CANCEL_CPU_TIMER	= 0x07,
+	SCPI_CMD_DVFS_CAPABILITIES	= 0x08,
+	SCPI_CMD_GET_DVFS_INFO		= 0x09,
+	SCPI_CMD_SET_DVFS		= 0x0a,
+	SCPI_CMD_GET_DVFS		= 0x0b,
+	SCPI_CMD_GET_DVFS_STAT		= 0x0c,
+	SCPI_CMD_CLOCK_CAPABILITIES	= 0x0d,
+	SCPI_CMD_GET_CLOCK_INFO		= 0x0e,
+	SCPI_CMD_SET_CLOCK_VALUE	= 0x0f,
+	SCPI_CMD_GET_CLOCK_VALUE	= 0x10,
+	SCPI_CMD_PSU_CAPABILITIES	= 0x11,
+	SCPI_CMD_GET_PSU_INFO		= 0x12,
+	SCPI_CMD_SET_PSU		= 0x13,
+	SCPI_CMD_GET_PSU		= 0x14,
+	SCPI_CMD_SENSOR_CAPABILITIES	= 0x15,
+	SCPI_CMD_SENSOR_INFO		= 0x16,
+	SCPI_CMD_SENSOR_VALUE		= 0x17,
+	SCPI_CMD_SENSOR_CFG_PERIODIC	= 0x18,
+	SCPI_CMD_SENSOR_CFG_BOUNDS	= 0x19,
+	SCPI_CMD_SENSOR_ASYNC_VALUE	= 0x1a,
+	SCPI_CMD_SET_DEVICE_PWR_STATE	= 0x1b,
+	SCPI_CMD_GET_DEVICE_PWR_STATE	= 0x1c,
+	SCPI_CMD_COUNT
+};
+
+/* SCPI Legacy Commands */
+enum legacy_scpi_std_cmd {
+	LEGACY_SCPI_CMD_INVALID			= 0x00,
+	LEGACY_SCPI_CMD_SCPI_READY		= 0x01,
+	LEGACY_SCPI_CMD_SCPI_CAPABILITIES	= 0x02,
+	LEGACY_SCPI_CMD_EVENT			= 0x03,
+	LEGACY_SCPI_CMD_SET_CSS_PWR_STATE	= 0x04,
+	LEGACY_SCPI_CMD_GET_CSS_PWR_STATE	= 0x05,
+	LEGACY_SCPI_CMD_CFG_PWR_STATE_STAT	= 0x06,
+	LEGACY_SCPI_CMD_GET_PWR_STATE_STAT	= 0x07,
+	LEGACY_SCPI_CMD_SYS_PWR_STATE		= 0x08,
+	LEGACY_SCPI_CMD_L2_READY		= 0x09,
+	LEGACY_SCPI_CMD_SET_AP_TIMER		= 0x0a,
+	LEGACY_SCPI_CMD_CANCEL_AP_TIME		= 0x0b,
+	LEGACY_SCPI_CMD_DVFS_CAPABILITIES	= 0x0c,
+	LEGACY_SCPI_CMD_GET_DVFS_INFO		= 0x0d,
+	LEGACY_SCPI_CMD_SET_DVFS		= 0x0e,
+	LEGACY_SCPI_CMD_GET_DVFS		= 0x0f,
+	LEGACY_SCPI_CMD_GET_DVFS_STAT		= 0x10,
+	LEGACY_SCPI_CMD_SET_RTC			= 0x11,
+	LEGACY_SCPI_CMD_GET_RTC			= 0x12,
+	LEGACY_SCPI_CMD_CLOCK_CAPABILITIES	= 0x13,
+	LEGACY_SCPI_CMD_SET_CLOCK_INDEX		= 0x14,
+	LEGACY_SCPI_CMD_SET_CLOCK_VALUE		= 0x15,
+	LEGACY_SCPI_CMD_GET_CLOCK_VALUE		= 0x16,
+	LEGACY_SCPI_CMD_PSU_CAPABILITIES	= 0x17,
+	LEGACY_SCPI_CMD_SET_PSU			= 0x18,
+	LEGACY_SCPI_CMD_GET_PSU			= 0x19,
+	LEGACY_SCPI_CMD_SENSOR_CAPABILITIES	= 0x1a,
+	LEGACY_SCPI_CMD_SENSOR_INFO		= 0x1b,
+	LEGACY_SCPI_CMD_SENSOR_VALUE		= 0x1c,
+	LEGACY_SCPI_CMD_SENSOR_CFG_PERIODIC	= 0x1d,
+	LEGACY_SCPI_CMD_SENSOR_CFG_BOUNDS	= 0x1e,
+	LEGACY_SCPI_CMD_SENSOR_ASYNC_VALUE	= 0x1f,
+	LEGACY_SCPI_CMD_COUNT
+};
+
+/* List all commands that are required to go through the high priority link */
+static int legacy_hpriority_cmds[] = {
+	LEGACY_SCPI_CMD_GET_CSS_PWR_STATE,
+	LEGACY_SCPI_CMD_CFG_PWR_STATE_STAT,
+	LEGACY_SCPI_CMD_GET_PWR_STATE_STAT,
+	LEGACY_SCPI_CMD_SET_DVFS,
+	LEGACY_SCPI_CMD_GET_DVFS,
+	LEGACY_SCPI_CMD_SET_RTC,
+	LEGACY_SCPI_CMD_GET_RTC,
+	LEGACY_SCPI_CMD_SET_CLOCK_INDEX,
+	LEGACY_SCPI_CMD_SET_CLOCK_VALUE,
+	LEGACY_SCPI_CMD_GET_CLOCK_VALUE,
+	LEGACY_SCPI_CMD_SET_PSU,
+	LEGACY_SCPI_CMD_GET_PSU,
+	LEGACY_SCPI_CMD_SENSOR_CFG_PERIODIC,
+	LEGACY_SCPI_CMD_SENSOR_CFG_BOUNDS,
+};
+
+/* List all commands used by this driver, used as indexes */
+enum scpi_drv_cmds {
+	CMD_SCPI_CAPABILITIES = 0,
+	CMD_GET_CLOCK_INFO,
+	CMD_GET_CLOCK_VALUE,
+	CMD_SET_CLOCK_VALUE,
+	CMD_GET_DVFS,
+	CMD_SET_DVFS,
+	CMD_GET_DVFS_INFO,
+	CMD_SENSOR_CAPABILITIES,
+	CMD_SENSOR_INFO,
+	CMD_SENSOR_VALUE,
+	CMD_SET_DEVICE_PWR_STATE,
+	CMD_GET_DEVICE_PWR_STATE,
+	CMD_MAX_COUNT,
+};
+
+static int scpi_std_commands[CMD_MAX_COUNT] = {
+	SCPI_CMD_SCPI_CAPABILITIES,
+	SCPI_CMD_GET_CLOCK_INFO,
+	SCPI_CMD_GET_CLOCK_VALUE,
+	SCPI_CMD_SET_CLOCK_VALUE,
+	SCPI_CMD_GET_DVFS,
+	SCPI_CMD_SET_DVFS,
+	SCPI_CMD_GET_DVFS_INFO,
+	SCPI_CMD_SENSOR_CAPABILITIES,
+	SCPI_CMD_SENSOR_INFO,
+	SCPI_CMD_SENSOR_VALUE,
+	SCPI_CMD_SET_DEVICE_PWR_STATE,
+	SCPI_CMD_GET_DEVICE_PWR_STATE,
+};
+
+static int scpi_legacy_commands[CMD_MAX_COUNT] = {
+	LEGACY_SCPI_CMD_SCPI_CAPABILITIES,
+	-1, /* GET_CLOCK_INFO */
+	LEGACY_SCPI_CMD_GET_CLOCK_VALUE,
+	LEGACY_SCPI_CMD_SET_CLOCK_VALUE,
+	LEGACY_SCPI_CMD_GET_DVFS,
+	LEGACY_SCPI_CMD_SET_DVFS,
+	LEGACY_SCPI_CMD_GET_DVFS_INFO,
+	LEGACY_SCPI_CMD_SENSOR_CAPABILITIES,
+	LEGACY_SCPI_CMD_SENSOR_INFO,
+	LEGACY_SCPI_CMD_SENSOR_VALUE,
+	-1, /* SET_DEVICE_PWR_STATE */
+	-1, /* GET_DEVICE_PWR_STATE */
+};
+
+struct scpi_xfer {
+	u32 slot; /* has to be first element */
+	u32 cmd;
+	u32 status;
+	const void *tx_buf;
+	void *rx_buf;
+	unsigned int tx_len;
+	unsigned int rx_len;
+	struct list_head node;
+	struct completion done;
+};
+
+struct scpi_chan {
+	struct mbox_client cl;
+	struct mbox_chan *chan;
+	void __iomem *tx_payload;
+	void __iomem *rx_payload;
+	struct list_head rx_pending;
+	struct list_head xfers_list;
+	struct scpi_xfer *xfers;
+	spinlock_t rx_lock; /* locking for the rx pending list */
+	struct mutex xfers_lock;
+	u8 token;
+};
+
+struct scpi_drvinfo {
+	u32 protocol_version;
+	u32 firmware_version;
+	bool is_legacy;
+	int num_chans;
+	int *commands;
+	DECLARE_BITMAP(cmd_priority, LEGACY_SCPI_CMD_COUNT);
+	atomic_t next_chan;
+	struct scpi_ops *scpi_ops;
+	struct scpi_chan *channels;
+	struct scpi_dvfs_info *dvfs[MAX_DVFS_DOMAINS];
+};
+
+/*
+ * The SCP firmware only executes in little-endian mode, so any buffers
+ * shared through SCPI should have their contents converted to little-endian
+ */
+struct scpi_shared_mem {
+	__le32 command;
+	__le32 status;
+	u8 payload[0];
+} __packed;
+
+struct legacy_scpi_shared_mem {
+	__le32 status;
+	u8 payload[0];
+} __packed;
+
+struct scp_capabilities {
+	__le32 protocol_version;
+	__le32 event_version;
+	__le32 platform_version;
+	__le32 commands[4];
+} __packed;
+
+struct clk_get_info {
+	__le16 id;
+	__le16 flags;
+	__le32 min_rate;
+	__le32 max_rate;
+	u8 name[20];
+} __packed;
+
+struct clk_set_value {
+	__le16 id;
+	__le16 reserved;
+	__le32 rate;
+} __packed;
+
+struct legacy_clk_set_value {
+	__le32 rate;
+	__le16 id;
+	__le16 reserved;
+} __packed;
+
+struct dvfs_info {
+	u8 domain;
+	u8 opp_count;
+	__le16 latency;
+	struct {
+		__le32 freq;
+		__le32 m_volt;
+	} opps[MAX_DVFS_OPPS];
+} __packed;
+
+struct dvfs_set {
+	u8 domain;
+	u8 index;
+} __packed;
+
+struct _scpi_sensor_info {
+	__le16 sensor_id;
+	u8 class;
+	u8 trigger_type;
+	char name[20];
+};
+
+struct dev_pstate_set {
+	__le16 dev_id;
+	u8 pstate;
+} __packed;
+
+static struct scpi_drvinfo *scpi_info;
+
+static int scpi_linux_errmap[SCPI_ERR_MAX] = {
+	/* better than switch case as long as return value is continuous */
+	0, /* SCPI_SUCCESS */
+	-EINVAL, /* SCPI_ERR_PARAM */
+	-ENOEXEC, /* SCPI_ERR_ALIGN */
+	-EMSGSIZE, /* SCPI_ERR_SIZE */
+	-EINVAL, /* SCPI_ERR_HANDLER */
+	-EACCES, /* SCPI_ERR_ACCESS */
+	-ERANGE, /* SCPI_ERR_RANGE */
+	-ETIMEDOUT, /* SCPI_ERR_TIMEOUT */
+	-ENOMEM, /* SCPI_ERR_NOMEM */
+	-EINVAL, /* SCPI_ERR_PWRSTATE */
+	-EOPNOTSUPP, /* SCPI_ERR_SUPPORT */
+	-EIO, /* SCPI_ERR_DEVICE */
+	-EBUSY, /* SCPI_ERR_BUSY */
+};
+
+static inline int scpi_to_linux_errno(int errno)
+{
+	if (errno >= SCPI_SUCCESS && errno < SCPI_ERR_MAX)
+		return scpi_linux_errmap[errno];
+	return -EIO;
+}
+
+static void scpi_process_cmd(struct scpi_chan *ch, u32 cmd)
+{
+	unsigned long flags;
+	struct scpi_xfer *t, *match = NULL;
+
+	spin_lock_irqsave(&ch->rx_lock, flags);
+	if (list_empty(&ch->rx_pending)) {
+		spin_unlock_irqrestore(&ch->rx_lock, flags);
+		return;
+	}
+
+	/* Command type is not replied by the SCP Firmware in legacy Mode
+	 * We should consider that command is the head of pending RX commands
+	 * if the list is not empty. In TX only mode, the list would be empty.
+	 */
+	if (scpi_info->is_legacy) {
+		match = list_first_entry(&ch->rx_pending, struct scpi_xfer,
+					 node);
+		list_del(&match->node);
+	} else {
+		list_for_each_entry(t, &ch->rx_pending, node)
+			if (CMD_XTRACT_UNIQ(t->cmd) == CMD_XTRACT_UNIQ(cmd)) {
+				list_del(&t->node);
+				match = t;
+				break;
+			}
+	}
+	/* check if wait_for_completion is in progress or timed-out */
+	if (match && !completion_done(&match->done)) {
+		unsigned int len;
+
+		if (scpi_info->is_legacy) {
+			struct legacy_scpi_shared_mem __iomem *mem =
+							ch->rx_payload;
+
+			/* RX Length is not replied by the legacy Firmware */
+			len = match->rx_len;
+
+			match->status = ioread32(&mem->status);
+			memcpy_fromio(match->rx_buf, mem->payload, len);
+		} else {
+			struct scpi_shared_mem __iomem *mem = ch->rx_payload;
+
+			len = min_t(unsigned int, match->rx_len, CMD_SIZE(cmd));
+
+			match->status = ioread32(&mem->status);
+			memcpy_fromio(match->rx_buf, mem->payload, len);
+		}
+
+		if (match->rx_len > len)
+			memset(match->rx_buf + len, 0, match->rx_len - len);
+		complete(&match->done);
+	}
+	spin_unlock_irqrestore(&ch->rx_lock, flags);
+}
+
+static void scpi_handle_remote_msg(struct mbox_client *c, void *msg)
+{
+	struct scpi_chan *ch = container_of(c, struct scpi_chan, cl);
+	struct scpi_shared_mem __iomem *mem = ch->rx_payload;
+	u32 cmd = 0;
+
+	if (!scpi_info->is_legacy)
+		cmd = ioread32(&mem->command);
+
+	scpi_process_cmd(ch, cmd);
+}
+
+static void scpi_tx_prepare(struct mbox_client *c, void *msg)
+{
+	unsigned long flags;
+	struct scpi_xfer *t = msg;
+	struct scpi_chan *ch = container_of(c, struct scpi_chan, cl);
+	struct scpi_shared_mem __iomem *mem = ch->tx_payload;
+
+	if (t->tx_buf) {
+		if (scpi_info->is_legacy)
+			memcpy_toio(ch->tx_payload, t->tx_buf, t->tx_len);
+		else
+			memcpy_toio(mem->payload, t->tx_buf, t->tx_len);
+	}
+
+	if (t->rx_buf) {
+		if (!(++ch->token))
+			++ch->token;
+		t->cmd |= FIELD_PREP(CMD_TOKEN_ID_MASK, ch->token);
+		spin_lock_irqsave(&ch->rx_lock, flags);
+		list_add_tail(&t->node, &ch->rx_pending);
+		spin_unlock_irqrestore(&ch->rx_lock, flags);
+	}
+
+	if (!scpi_info->is_legacy)
+		iowrite32(t->cmd, &mem->command);
+}
+
+static struct scpi_xfer *get_scpi_xfer(struct scpi_chan *ch)
+{
+	struct scpi_xfer *t;
+
+	mutex_lock(&ch->xfers_lock);
+	if (list_empty(&ch->xfers_list)) {
+		mutex_unlock(&ch->xfers_lock);
+		return NULL;
+	}
+	t = list_first_entry(&ch->xfers_list, struct scpi_xfer, node);
+	list_del(&t->node);
+	mutex_unlock(&ch->xfers_lock);
+	return t;
+}
+
+static void put_scpi_xfer(struct scpi_xfer *t, struct scpi_chan *ch)
+{
+	mutex_lock(&ch->xfers_lock);
+	list_add_tail(&t->node, &ch->xfers_list);
+	mutex_unlock(&ch->xfers_lock);
+}
+
+static int scpi_send_message(u8 idx, void *tx_buf, unsigned int tx_len,
+			     void *rx_buf, unsigned int rx_len)
+{
+	int ret;
+	u8 chan;
+	u8 cmd;
+	struct scpi_xfer *msg;
+	struct scpi_chan *scpi_chan;
+
+	if (scpi_info->commands[idx] < 0)
+		return -EOPNOTSUPP;
+
+	cmd = scpi_info->commands[idx];
+
+	if (scpi_info->is_legacy)
+		chan = test_bit(cmd, scpi_info->cmd_priority) ? 1 : 0;
+	else
+		chan = atomic_inc_return(&scpi_info->next_chan) %
+			scpi_info->num_chans;
+	scpi_chan = scpi_info->channels + chan;
+
+	msg = get_scpi_xfer(scpi_chan);
+	if (!msg)
+		return -ENOMEM;
+
+	if (scpi_info->is_legacy) {
+		msg->cmd = PACK_LEGACY_SCPI_CMD(cmd, tx_len);
+		msg->slot = msg->cmd;
+	} else {
+		msg->slot = BIT(SCPI_SLOT);
+		msg->cmd = PACK_SCPI_CMD(cmd, tx_len);
+	}
+	msg->tx_buf = tx_buf;
+	msg->tx_len = tx_len;
+	msg->rx_buf = rx_buf;
+	msg->rx_len = rx_len;
+	reinit_completion(&msg->done);
+
+	ret = mbox_send_message(scpi_chan->chan, msg);
+	if (ret < 0 || !rx_buf)
+		goto out;
+
+	if (!wait_for_completion_timeout(&msg->done, MAX_RX_TIMEOUT))
+		ret = -ETIMEDOUT;
+	else
+		/* first status word */
+		ret = msg->status;
+out:
+	if (ret < 0 && rx_buf) /* remove entry from the list if timed-out */
+		scpi_process_cmd(scpi_chan, msg->cmd);
+
+	put_scpi_xfer(msg, scpi_chan);
+	/* SCPI error codes > 0, translate them to Linux scale*/
+	return ret > 0 ? scpi_to_linux_errno(ret) : ret;
+}
+
+static u32 scpi_get_version(void)
+{
+	return scpi_info->protocol_version;
+}
+
+static int
+scpi_clk_get_range(u16 clk_id, unsigned long *min, unsigned long *max)
+{
+	int ret;
+	struct clk_get_info clk;
+	__le16 le_clk_id = cpu_to_le16(clk_id);
+
+	ret = scpi_send_message(CMD_GET_CLOCK_INFO, &le_clk_id,
+				sizeof(le_clk_id), &clk, sizeof(clk));
+	if (!ret) {
+		*min = le32_to_cpu(clk.min_rate);
+		*max = le32_to_cpu(clk.max_rate);
+	}
+	return ret;
+}
+
+static unsigned long scpi_clk_get_val(u16 clk_id)
+{
+	int ret;
+	__le32 rate;
+	__le16 le_clk_id = cpu_to_le16(clk_id);
+
+	ret = scpi_send_message(CMD_GET_CLOCK_VALUE, &le_clk_id,
+				sizeof(le_clk_id), &rate, sizeof(rate));
+
+	return ret ? ret : le32_to_cpu(rate);
+}
+
+static int scpi_clk_set_val(u16 clk_id, unsigned long rate)
+{
+	int stat;
+	struct clk_set_value clk = {
+		.id = cpu_to_le16(clk_id),
+		.rate = cpu_to_le32(rate)
+	};
+
+	return scpi_send_message(CMD_SET_CLOCK_VALUE, &clk, sizeof(clk),
+				 &stat, sizeof(stat));
+}
+
+static int legacy_scpi_clk_set_val(u16 clk_id, unsigned long rate)
+{
+	int stat;
+	struct legacy_clk_set_value clk = {
+		.id = cpu_to_le16(clk_id),
+		.rate = cpu_to_le32(rate)
+	};
+
+	return scpi_send_message(CMD_SET_CLOCK_VALUE, &clk, sizeof(clk),
+				 &stat, sizeof(stat));
+}
+
+static int scpi_dvfs_get_idx(u8 domain)
+{
+	int ret;
+	u8 dvfs_idx;
+
+	ret = scpi_send_message(CMD_GET_DVFS, &domain, sizeof(domain),
+				&dvfs_idx, sizeof(dvfs_idx));
+
+	return ret ? ret : dvfs_idx;
+}
+
+static int scpi_dvfs_set_idx(u8 domain, u8 index)
+{
+	int stat;
+	struct dvfs_set dvfs = {domain, index};
+
+	return scpi_send_message(CMD_SET_DVFS, &dvfs, sizeof(dvfs),
+				 &stat, sizeof(stat));
+}
+
+static int opp_cmp_func(const void *opp1, const void *opp2)
+{
+	const struct scpi_opp *t1 = opp1, *t2 = opp2;
+
+	return t1->freq - t2->freq;
+}
+
+static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain)
+{
+	struct scpi_dvfs_info *info;
+	struct scpi_opp *opp;
+	struct dvfs_info buf;
+	int ret, i;
+
+	if (domain >= MAX_DVFS_DOMAINS)
+		return ERR_PTR(-EINVAL);
+
+	if (scpi_info->dvfs[domain])	/* data already populated */
+		return scpi_info->dvfs[domain];
+
+	ret = scpi_send_message(CMD_GET_DVFS_INFO, &domain, sizeof(domain),
+				&buf, sizeof(buf));
+	if (ret)
+		return ERR_PTR(ret);
+
+	info = kmalloc(sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return ERR_PTR(-ENOMEM);
+
+	info->count = buf.opp_count;
+	info->latency = le16_to_cpu(buf.latency) * 1000; /* uS to nS */
+
+	info->opps = kcalloc(info->count, sizeof(*opp), GFP_KERNEL);
+	if (!info->opps) {
+		kfree(info);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	for (i = 0, opp = info->opps; i < info->count; i++, opp++) {
+		opp->freq = le32_to_cpu(buf.opps[i].freq);
+		opp->m_volt = le32_to_cpu(buf.opps[i].m_volt);
+	}
+
+	sort(info->opps, info->count, sizeof(*opp), opp_cmp_func, NULL);
+
+	scpi_info->dvfs[domain] = info;
+	return info;
+}
+
+static int scpi_dev_domain_id(struct device *dev)
+{
+	struct of_phandle_args clkspec;
+
+	if (of_parse_phandle_with_args(dev->of_node, "clocks", "#clock-cells",
+				       0, &clkspec))
+		return -EINVAL;
+
+	return clkspec.args[0];
+}
+
+static struct scpi_dvfs_info *scpi_dvfs_info(struct device *dev)
+{
+	int domain = scpi_dev_domain_id(dev);
+
+	if (domain < 0)
+		return ERR_PTR(domain);
+
+	return scpi_dvfs_get_info(domain);
+}
+
+static int scpi_dvfs_get_transition_latency(struct device *dev)
+{
+	struct scpi_dvfs_info *info = scpi_dvfs_info(dev);
+
+	if (IS_ERR(info))
+		return PTR_ERR(info);
+
+	return info->latency;
+}
+
+static int scpi_dvfs_add_opps_to_device(struct device *dev)
+{
+	int idx, ret;
+	struct scpi_opp *opp;
+	struct scpi_dvfs_info *info = scpi_dvfs_info(dev);
+
+	if (IS_ERR(info))
+		return PTR_ERR(info);
+
+	if (!info->opps)
+		return -EIO;
+
+	for (opp = info->opps, idx = 0; idx < info->count; idx++, opp++) {
+		ret = dev_pm_opp_add(dev, opp->freq, opp->m_volt * 1000);
+		if (ret) {
+			dev_warn(dev, "failed to add opp %uHz %umV\n",
+				 opp->freq, opp->m_volt);
+			while (idx-- > 0)
+				dev_pm_opp_remove(dev, (--opp)->freq);
+			return ret;
+		}
+	}
+	return 0;
+}
+
+static int scpi_sensor_get_capability(u16 *sensors)
+{
+	__le16 cap;
+	int ret;
+
+	ret = scpi_send_message(CMD_SENSOR_CAPABILITIES, NULL, 0, &cap,
+				sizeof(cap));
+	if (!ret)
+		*sensors = le16_to_cpu(cap);
+
+	return ret;
+}
+
+static int scpi_sensor_get_info(u16 sensor_id, struct scpi_sensor_info *info)
+{
+	__le16 id = cpu_to_le16(sensor_id);
+	struct _scpi_sensor_info _info;
+	int ret;
+
+	ret = scpi_send_message(CMD_SENSOR_INFO, &id, sizeof(id),
+				&_info, sizeof(_info));
+	if (!ret) {
+		memcpy(info, &_info, sizeof(*info));
+		info->sensor_id = le16_to_cpu(_info.sensor_id);
+	}
+
+	return ret;
+}
+
+static int scpi_sensor_get_value(u16 sensor, u64 *val)
+{
+	__le16 id = cpu_to_le16(sensor);
+	__le64 value;
+	int ret;
+
+	ret = scpi_send_message(CMD_SENSOR_VALUE, &id, sizeof(id),
+				&value, sizeof(value));
+	if (ret)
+		return ret;
+
+	if (scpi_info->is_legacy)
+		/* only 32-bits supported, upper 32 bits can be junk */
+		*val = le32_to_cpup((__le32 *)&value);
+	else
+		*val = le64_to_cpu(value);
+
+	return 0;
+}
+
+static int scpi_device_get_power_state(u16 dev_id)
+{
+	int ret;
+	u8 pstate;
+	__le16 id = cpu_to_le16(dev_id);
+
+	ret = scpi_send_message(CMD_GET_DEVICE_PWR_STATE, &id,
+				sizeof(id), &pstate, sizeof(pstate));
+	return ret ? ret : pstate;
+}
+
+static int scpi_device_set_power_state(u16 dev_id, u8 pstate)
+{
+	int stat;
+	struct dev_pstate_set dev_set = {
+		.dev_id = cpu_to_le16(dev_id),
+		.pstate = pstate,
+	};
+
+	return scpi_send_message(CMD_SET_DEVICE_PWR_STATE, &dev_set,
+				 sizeof(dev_set), &stat, sizeof(stat));
+}
+
+static struct scpi_ops scpi_ops = {
+	.get_version = scpi_get_version,
+	.clk_get_range = scpi_clk_get_range,
+	.clk_get_val = scpi_clk_get_val,
+	.clk_set_val = scpi_clk_set_val,
+	.dvfs_get_idx = scpi_dvfs_get_idx,
+	.dvfs_set_idx = scpi_dvfs_set_idx,
+	.dvfs_get_info = scpi_dvfs_get_info,
+	.device_domain_id = scpi_dev_domain_id,
+	.get_transition_latency = scpi_dvfs_get_transition_latency,
+	.add_opps_to_device = scpi_dvfs_add_opps_to_device,
+	.sensor_get_capability = scpi_sensor_get_capability,
+	.sensor_get_info = scpi_sensor_get_info,
+	.sensor_get_value = scpi_sensor_get_value,
+	.device_get_power_state = scpi_device_get_power_state,
+	.device_set_power_state = scpi_device_set_power_state,
+};
+
+struct scpi_ops *get_scpi_ops(void)
+{
+	return scpi_info ? scpi_info->scpi_ops : NULL;
+}
+EXPORT_SYMBOL_GPL(get_scpi_ops);
+
+static int scpi_init_versions(struct scpi_drvinfo *info)
+{
+	int ret;
+	struct scp_capabilities caps;
+
+	ret = scpi_send_message(CMD_SCPI_CAPABILITIES, NULL, 0,
+				&caps, sizeof(caps));
+	if (!ret) {
+		info->protocol_version = le32_to_cpu(caps.protocol_version);
+		info->firmware_version = le32_to_cpu(caps.platform_version);
+	}
+	/* Ignore error if not implemented */
+	if (scpi_info->is_legacy && ret == -EOPNOTSUPP)
+		return 0;
+
+	return ret;
+}
+
+static ssize_t protocol_version_show(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	struct scpi_drvinfo *scpi_info = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%lu.%lu\n",
+		FIELD_GET(PROTO_REV_MAJOR_MASK, scpi_info->protocol_version),
+		FIELD_GET(PROTO_REV_MINOR_MASK, scpi_info->protocol_version));
+}
+static DEVICE_ATTR_RO(protocol_version);
+
+static ssize_t firmware_version_show(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	struct scpi_drvinfo *scpi_info = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%lu.%lu.%lu\n",
+		FIELD_GET(FW_REV_MAJOR_MASK, scpi_info->firmware_version),
+		FIELD_GET(FW_REV_MINOR_MASK, scpi_info->firmware_version),
+		FIELD_GET(FW_REV_PATCH_MASK, scpi_info->firmware_version));
+}
+static DEVICE_ATTR_RO(firmware_version);
+
+static struct attribute *versions_attrs[] = {
+	&dev_attr_firmware_version.attr,
+	&dev_attr_protocol_version.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(versions);
+
+static void scpi_free_channels(void *data)
+{
+	struct scpi_drvinfo *info = data;
+	int i;
+
+	for (i = 0; i < info->num_chans; i++)
+		mbox_free_channel(info->channels[i].chan);
+}
+
+static int scpi_remove(struct platform_device *pdev)
+{
+	int i;
+	struct scpi_drvinfo *info = platform_get_drvdata(pdev);
+
+	scpi_info = NULL; /* stop exporting SCPI ops through get_scpi_ops */
+
+	for (i = 0; i < MAX_DVFS_DOMAINS && info->dvfs[i]; i++) {
+		kfree(info->dvfs[i]->opps);
+		kfree(info->dvfs[i]);
+	}
+
+	return 0;
+}
+
+#define MAX_SCPI_XFERS		10
+static int scpi_alloc_xfer_list(struct device *dev, struct scpi_chan *ch)
+{
+	int i;
+	struct scpi_xfer *xfers;
+
+	xfers = devm_kcalloc(dev, MAX_SCPI_XFERS, sizeof(*xfers), GFP_KERNEL);
+	if (!xfers)
+		return -ENOMEM;
+
+	ch->xfers = xfers;
+	for (i = 0; i < MAX_SCPI_XFERS; i++, xfers++) {
+		init_completion(&xfers->done);
+		list_add_tail(&xfers->node, &ch->xfers_list);
+	}
+
+	return 0;
+}
+
+static const struct of_device_id legacy_scpi_of_match[] = {
+	{.compatible = "arm,scpi-pre-1.0"},
+	{},
+};
+
+static int scpi_probe(struct platform_device *pdev)
+{
+	int count, idx, ret;
+	struct resource res;
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+
+	scpi_info = devm_kzalloc(dev, sizeof(*scpi_info), GFP_KERNEL);
+	if (!scpi_info)
+		return -ENOMEM;
+
+	if (of_match_device(legacy_scpi_of_match, &pdev->dev))
+		scpi_info->is_legacy = true;
+
+	count = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
+	if (count < 0) {
+		dev_err(dev, "no mboxes property in '%pOF'\n", np);
+		return -ENODEV;
+	}
+
+	scpi_info->channels = devm_kcalloc(dev, count, sizeof(struct scpi_chan),
+					   GFP_KERNEL);
+	if (!scpi_info->channels)
+		return -ENOMEM;
+
+	ret = devm_add_action(dev, scpi_free_channels, scpi_info);
+	if (ret)
+		return ret;
+
+	for (; scpi_info->num_chans < count; scpi_info->num_chans++) {
+		resource_size_t size;
+		int idx = scpi_info->num_chans;
+		struct scpi_chan *pchan = scpi_info->channels + idx;
+		struct mbox_client *cl = &pchan->cl;
+		struct device_node *shmem = of_parse_phandle(np, "shmem", idx);
+
+		ret = of_address_to_resource(shmem, 0, &res);
+		of_node_put(shmem);
+		if (ret) {
+			dev_err(dev, "failed to get SCPI payload mem resource\n");
+			return ret;
+		}
+
+		size = resource_size(&res);
+		pchan->rx_payload = devm_ioremap(dev, res.start, size);
+		if (!pchan->rx_payload) {
+			dev_err(dev, "failed to ioremap SCPI payload\n");
+			return -EADDRNOTAVAIL;
+		}
+		pchan->tx_payload = pchan->rx_payload + (size >> 1);
+
+		cl->dev = dev;
+		cl->rx_callback = scpi_handle_remote_msg;
+		cl->tx_prepare = scpi_tx_prepare;
+		cl->tx_block = true;
+		cl->tx_tout = 20;
+		cl->knows_txdone = false; /* controller can't ack */
+
+		INIT_LIST_HEAD(&pchan->rx_pending);
+		INIT_LIST_HEAD(&pchan->xfers_list);
+		spin_lock_init(&pchan->rx_lock);
+		mutex_init(&pchan->xfers_lock);
+
+		ret = scpi_alloc_xfer_list(dev, pchan);
+		if (!ret) {
+			pchan->chan = mbox_request_channel(cl, idx);
+			if (!IS_ERR(pchan->chan))
+				continue;
+			ret = PTR_ERR(pchan->chan);
+			if (ret != -EPROBE_DEFER)
+				dev_err(dev, "failed to get channel%d err %d\n",
+					idx, ret);
+		}
+		return ret;
+	}
+
+	scpi_info->commands = scpi_std_commands;
+
+	platform_set_drvdata(pdev, scpi_info);
+
+	if (scpi_info->is_legacy) {
+		/* Replace with legacy variants */
+		scpi_ops.clk_set_val = legacy_scpi_clk_set_val;
+		scpi_info->commands = scpi_legacy_commands;
+
+		/* Fill priority bitmap */
+		for (idx = 0; idx < ARRAY_SIZE(legacy_hpriority_cmds); idx++)
+			set_bit(legacy_hpriority_cmds[idx],
+				scpi_info->cmd_priority);
+	}
+
+	ret = scpi_init_versions(scpi_info);
+	if (ret) {
+		dev_err(dev, "incorrect or no SCP firmware found\n");
+		return ret;
+	}
+
+	if (scpi_info->is_legacy && !scpi_info->protocol_version &&
+	    !scpi_info->firmware_version)
+		dev_info(dev, "SCP Protocol legacy pre-1.0 firmware\n");
+	else
+		dev_info(dev, "SCP Protocol %lu.%lu Firmware %lu.%lu.%lu version\n",
+			 FIELD_GET(PROTO_REV_MAJOR_MASK,
+				   scpi_info->protocol_version),
+			 FIELD_GET(PROTO_REV_MINOR_MASK,
+				   scpi_info->protocol_version),
+			 FIELD_GET(FW_REV_MAJOR_MASK,
+				   scpi_info->firmware_version),
+			 FIELD_GET(FW_REV_MINOR_MASK,
+				   scpi_info->firmware_version),
+			 FIELD_GET(FW_REV_PATCH_MASK,
+				   scpi_info->firmware_version));
+	scpi_info->scpi_ops = &scpi_ops;
+
+	ret = devm_device_add_groups(dev, versions_groups);
+	if (ret)
+		dev_err(dev, "unable to create sysfs version group\n");
+
+	return devm_of_platform_populate(dev);
+}
+
+static const struct of_device_id scpi_of_match[] = {
+	{.compatible = "arm,scpi"},
+	{.compatible = "arm,scpi-pre-1.0"},
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, scpi_of_match);
+
+static struct platform_driver scpi_driver = {
+	.driver = {
+		.name = "scpi_protocol",
+		.of_match_table = scpi_of_match,
+	},
+	.probe = scpi_probe,
+	.remove = scpi_remove,
+};
+module_platform_driver(scpi_driver);
+
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("ARM SCPI mailbox protocol driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
new file mode 100644
index 0000000..1ea7164
--- /dev/null
+++ b/drivers/firmware/arm_sdei.c
@@ -0,0 +1,1092 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2017 Arm Ltd.
+#define pr_fmt(fmt) "sdei: " fmt
+
+#include <linux/acpi.h>
+#include <linux/arm_sdei.h>
+#include <linux/arm-smccc.h>
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/cpuhotplug.h>
+#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
+#include <linux/errno.h>
+#include <linux/hardirq.h>
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <linux/kvm_host.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/percpu.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/ptrace.h>
+#include <linux/preempt.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+
+/*
+ * The call to use to reach the firmware.
+ */
+static asmlinkage void (*sdei_firmware_call)(unsigned long function_id,
+		      unsigned long arg0, unsigned long arg1,
+		      unsigned long arg2, unsigned long arg3,
+		      unsigned long arg4, struct arm_smccc_res *res);
+
+/* entry point from firmware to arch asm code */
+static unsigned long sdei_entry_point;
+
+struct sdei_event {
+	/* These three are protected by the sdei_list_lock */
+	struct list_head	list;
+	bool			reregister;
+	bool			reenable;
+
+	u32			event_num;
+	u8			type;
+	u8			priority;
+
+	/* This pointer is handed to firmware as the event argument. */
+	union {
+		/* Shared events */
+		struct sdei_registered_event *registered;
+
+		/* CPU private events */
+		struct sdei_registered_event __percpu *private_registered;
+	};
+};
+
+/* Take the mutex for any API call or modification. Take the mutex first. */
+static DEFINE_MUTEX(sdei_events_lock);
+
+/* and then hold this when modifying the list */
+static DEFINE_SPINLOCK(sdei_list_lock);
+static LIST_HEAD(sdei_list);
+
+/* Private events are registered/enabled via IPI passing one of these */
+struct sdei_crosscall_args {
+	struct sdei_event *event;
+	atomic_t errors;
+	int first_error;
+};
+
+#define CROSSCALL_INIT(arg, event)	(arg.event = event, \
+					 arg.first_error = 0, \
+					 atomic_set(&arg.errors, 0))
+
+static inline int sdei_do_cross_call(void *fn, struct sdei_event * event)
+{
+	struct sdei_crosscall_args arg;
+
+	CROSSCALL_INIT(arg, event);
+	on_each_cpu(fn, &arg, true);
+
+	return arg.first_error;
+}
+
+static inline void
+sdei_cross_call_return(struct sdei_crosscall_args *arg, int err)
+{
+	if (err && (atomic_inc_return(&arg->errors) == 1))
+		arg->first_error = err;
+}
+
+static int sdei_to_linux_errno(unsigned long sdei_err)
+{
+	switch (sdei_err) {
+	case SDEI_NOT_SUPPORTED:
+		return -EOPNOTSUPP;
+	case SDEI_INVALID_PARAMETERS:
+		return -EINVAL;
+	case SDEI_DENIED:
+		return -EPERM;
+	case SDEI_PENDING:
+		return -EINPROGRESS;
+	case SDEI_OUT_OF_RESOURCE:
+		return -ENOMEM;
+	}
+
+	/* Not an error value ... */
+	return sdei_err;
+}
+
+/*
+ * If x0 is any of these values, then the call failed, use sdei_to_linux_errno()
+ * to translate.
+ */
+static int sdei_is_err(struct arm_smccc_res *res)
+{
+	switch (res->a0) {
+	case SDEI_NOT_SUPPORTED:
+	case SDEI_INVALID_PARAMETERS:
+	case SDEI_DENIED:
+	case SDEI_PENDING:
+	case SDEI_OUT_OF_RESOURCE:
+		return true;
+	}
+
+	return false;
+}
+
+static int invoke_sdei_fn(unsigned long function_id, unsigned long arg0,
+			  unsigned long arg1, unsigned long arg2,
+			  unsigned long arg3, unsigned long arg4,
+			  u64 *result)
+{
+	int err = 0;
+	struct arm_smccc_res res;
+
+	if (sdei_firmware_call) {
+		sdei_firmware_call(function_id, arg0, arg1, arg2, arg3, arg4,
+				   &res);
+		if (sdei_is_err(&res))
+			err = sdei_to_linux_errno(res.a0);
+	} else {
+		/*
+		 * !sdei_firmware_call means we failed to probe or called
+		 * sdei_mark_interface_broken(). -EIO is not an error returned
+		 * by sdei_to_linux_errno() and is used to suppress messages
+		 * from this driver.
+		 */
+		err = -EIO;
+		res.a0 = SDEI_NOT_SUPPORTED;
+	}
+
+	if (result)
+		*result = res.a0;
+
+	return err;
+}
+
+static struct sdei_event *sdei_event_find(u32 event_num)
+{
+	struct sdei_event *e, *found = NULL;
+
+	lockdep_assert_held(&sdei_events_lock);
+
+	spin_lock(&sdei_list_lock);
+	list_for_each_entry(e, &sdei_list, list) {
+		if (e->event_num == event_num) {
+			found = e;
+			break;
+		}
+	}
+	spin_unlock(&sdei_list_lock);
+
+	return found;
+}
+
+int sdei_api_event_context(u32 query, u64 *result)
+{
+	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_CONTEXT, query, 0, 0, 0, 0,
+			      result);
+}
+NOKPROBE_SYMBOL(sdei_api_event_context);
+
+static int sdei_api_event_get_info(u32 event, u32 info, u64 *result)
+{
+	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_GET_INFO, event, info, 0,
+			      0, 0, result);
+}
+
+static struct sdei_event *sdei_event_create(u32 event_num,
+					    sdei_event_callback *cb,
+					    void *cb_arg)
+{
+	int err;
+	u64 result;
+	struct sdei_event *event;
+	struct sdei_registered_event *reg;
+
+	lockdep_assert_held(&sdei_events_lock);
+
+	event = kzalloc(sizeof(*event), GFP_KERNEL);
+	if (!event)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&event->list);
+	event->event_num = event_num;
+
+	err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY,
+				      &result);
+	if (err) {
+		kfree(event);
+		return ERR_PTR(err);
+	}
+	event->priority = result;
+
+	err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_TYPE,
+				      &result);
+	if (err) {
+		kfree(event);
+		return ERR_PTR(err);
+	}
+	event->type = result;
+
+	if (event->type == SDEI_EVENT_TYPE_SHARED) {
+		reg = kzalloc(sizeof(*reg), GFP_KERNEL);
+		if (!reg) {
+			kfree(event);
+			return ERR_PTR(-ENOMEM);
+		}
+
+		reg->event_num = event_num;
+		reg->priority = event->priority;
+
+		reg->callback = cb;
+		reg->callback_arg = cb_arg;
+		event->registered = reg;
+	} else {
+		int cpu;
+		struct sdei_registered_event __percpu *regs;
+
+		regs = alloc_percpu(struct sdei_registered_event);
+		if (!regs) {
+			kfree(event);
+			return ERR_PTR(-ENOMEM);
+		}
+
+		for_each_possible_cpu(cpu) {
+			reg = per_cpu_ptr(regs, cpu);
+
+			reg->event_num = event->event_num;
+			reg->priority = event->priority;
+			reg->callback = cb;
+			reg->callback_arg = cb_arg;
+		}
+
+		event->private_registered = regs;
+	}
+
+	if (sdei_event_find(event_num)) {
+		kfree(event->registered);
+		kfree(event);
+		event = ERR_PTR(-EBUSY);
+	} else {
+		spin_lock(&sdei_list_lock);
+		list_add(&event->list, &sdei_list);
+		spin_unlock(&sdei_list_lock);
+	}
+
+	return event;
+}
+
+static void sdei_event_destroy(struct sdei_event *event)
+{
+	lockdep_assert_held(&sdei_events_lock);
+
+	spin_lock(&sdei_list_lock);
+	list_del(&event->list);
+	spin_unlock(&sdei_list_lock);
+
+	if (event->type == SDEI_EVENT_TYPE_SHARED)
+		kfree(event->registered);
+	else
+		free_percpu(event->private_registered);
+
+	kfree(event);
+}
+
+static int sdei_api_get_version(u64 *version)
+{
+	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_VERSION, 0, 0, 0, 0, 0, version);
+}
+
+int sdei_mask_local_cpu(void)
+{
+	int err;
+
+	WARN_ON_ONCE(preemptible());
+
+	err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_MASK, 0, 0, 0, 0, 0, NULL);
+	if (err && err != -EIO) {
+		pr_warn_once("failed to mask CPU[%u]: %d\n",
+			      smp_processor_id(), err);
+		return err;
+	}
+
+	return 0;
+}
+
+static void _ipi_mask_cpu(void *ignored)
+{
+	sdei_mask_local_cpu();
+}
+
+int sdei_unmask_local_cpu(void)
+{
+	int err;
+
+	WARN_ON_ONCE(preemptible());
+
+	err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_UNMASK, 0, 0, 0, 0, 0, NULL);
+	if (err && err != -EIO) {
+		pr_warn_once("failed to unmask CPU[%u]: %d\n",
+			     smp_processor_id(), err);
+		return err;
+	}
+
+	return 0;
+}
+
+static void _ipi_unmask_cpu(void *ignored)
+{
+	sdei_unmask_local_cpu();
+}
+
+static void _ipi_private_reset(void *ignored)
+{
+	int err;
+
+	err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PRIVATE_RESET, 0, 0, 0, 0, 0,
+			     NULL);
+	if (err && err != -EIO)
+		pr_warn_once("failed to reset CPU[%u]: %d\n",
+			     smp_processor_id(), err);
+}
+
+static int sdei_api_shared_reset(void)
+{
+	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_SHARED_RESET, 0, 0, 0, 0, 0,
+			      NULL);
+}
+
+static void sdei_mark_interface_broken(void)
+{
+	pr_err("disabling SDEI firmware interface\n");
+	on_each_cpu(&_ipi_mask_cpu, NULL, true);
+	sdei_firmware_call = NULL;
+}
+
+static int sdei_platform_reset(void)
+{
+	int err;
+
+	on_each_cpu(&_ipi_private_reset, NULL, true);
+	err = sdei_api_shared_reset();
+	if (err) {
+		pr_err("Failed to reset platform: %d\n", err);
+		sdei_mark_interface_broken();
+	}
+
+	return err;
+}
+
+static int sdei_api_event_enable(u32 event_num)
+{
+	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_ENABLE, event_num, 0, 0, 0,
+			      0, NULL);
+}
+
+/* Called directly by the hotplug callbacks */
+static void _local_event_enable(void *data)
+{
+	int err;
+	struct sdei_crosscall_args *arg = data;
+
+	WARN_ON_ONCE(preemptible());
+
+	err = sdei_api_event_enable(arg->event->event_num);
+
+	sdei_cross_call_return(arg, err);
+}
+
+int sdei_event_enable(u32 event_num)
+{
+	int err = -EINVAL;
+	struct sdei_event *event;
+
+	mutex_lock(&sdei_events_lock);
+	event = sdei_event_find(event_num);
+	if (!event) {
+		mutex_unlock(&sdei_events_lock);
+		return -ENOENT;
+	}
+
+	spin_lock(&sdei_list_lock);
+	event->reenable = true;
+	spin_unlock(&sdei_list_lock);
+
+	if (event->type == SDEI_EVENT_TYPE_SHARED)
+		err = sdei_api_event_enable(event->event_num);
+	else
+		err = sdei_do_cross_call(_local_event_enable, event);
+	mutex_unlock(&sdei_events_lock);
+
+	return err;
+}
+EXPORT_SYMBOL(sdei_event_enable);
+
+static int sdei_api_event_disable(u32 event_num)
+{
+	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_DISABLE, event_num, 0, 0,
+			      0, 0, NULL);
+}
+
+static void _ipi_event_disable(void *data)
+{
+	int err;
+	struct sdei_crosscall_args *arg = data;
+
+	err = sdei_api_event_disable(arg->event->event_num);
+
+	sdei_cross_call_return(arg, err);
+}
+
+int sdei_event_disable(u32 event_num)
+{
+	int err = -EINVAL;
+	struct sdei_event *event;
+
+	mutex_lock(&sdei_events_lock);
+	event = sdei_event_find(event_num);
+	if (!event) {
+		mutex_unlock(&sdei_events_lock);
+		return -ENOENT;
+	}
+
+	spin_lock(&sdei_list_lock);
+	event->reenable = false;
+	spin_unlock(&sdei_list_lock);
+
+	if (event->type == SDEI_EVENT_TYPE_SHARED)
+		err = sdei_api_event_disable(event->event_num);
+	else
+		err = sdei_do_cross_call(_ipi_event_disable, event);
+	mutex_unlock(&sdei_events_lock);
+
+	return err;
+}
+EXPORT_SYMBOL(sdei_event_disable);
+
+static int sdei_api_event_unregister(u32 event_num)
+{
+	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_UNREGISTER, event_num, 0,
+			      0, 0, 0, NULL);
+}
+
+/* Called directly by the hotplug callbacks */
+static void _local_event_unregister(void *data)
+{
+	int err;
+	struct sdei_crosscall_args *arg = data;
+
+	WARN_ON_ONCE(preemptible());
+
+	err = sdei_api_event_unregister(arg->event->event_num);
+
+	sdei_cross_call_return(arg, err);
+}
+
+static int _sdei_event_unregister(struct sdei_event *event)
+{
+	lockdep_assert_held(&sdei_events_lock);
+
+	spin_lock(&sdei_list_lock);
+	event->reregister = false;
+	event->reenable = false;
+	spin_unlock(&sdei_list_lock);
+
+	if (event->type == SDEI_EVENT_TYPE_SHARED)
+		return sdei_api_event_unregister(event->event_num);
+
+	return sdei_do_cross_call(_local_event_unregister, event);
+}
+
+int sdei_event_unregister(u32 event_num)
+{
+	int err;
+	struct sdei_event *event;
+
+	WARN_ON(in_nmi());
+
+	mutex_lock(&sdei_events_lock);
+	event = sdei_event_find(event_num);
+	do {
+		if (!event) {
+			pr_warn("Event %u not registered\n", event_num);
+			err = -ENOENT;
+			break;
+		}
+
+		err = _sdei_event_unregister(event);
+		if (err)
+			break;
+
+		sdei_event_destroy(event);
+	} while (0);
+	mutex_unlock(&sdei_events_lock);
+
+	return err;
+}
+EXPORT_SYMBOL(sdei_event_unregister);
+
+/*
+ * unregister events, but don't destroy them as they are re-registered by
+ * sdei_reregister_shared().
+ */
+static int sdei_unregister_shared(void)
+{
+	int err = 0;
+	struct sdei_event *event;
+
+	mutex_lock(&sdei_events_lock);
+	spin_lock(&sdei_list_lock);
+	list_for_each_entry(event, &sdei_list, list) {
+		if (event->type != SDEI_EVENT_TYPE_SHARED)
+			continue;
+
+		err = _sdei_event_unregister(event);
+		if (err)
+			break;
+	}
+	spin_unlock(&sdei_list_lock);
+	mutex_unlock(&sdei_events_lock);
+
+	return err;
+}
+
+static int sdei_api_event_register(u32 event_num, unsigned long entry_point,
+				   void *arg, u64 flags, u64 affinity)
+{
+	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_REGISTER, event_num,
+			      (unsigned long)entry_point, (unsigned long)arg,
+			      flags, affinity, NULL);
+}
+
+/* Called directly by the hotplug callbacks */
+static void _local_event_register(void *data)
+{
+	int err;
+	struct sdei_registered_event *reg;
+	struct sdei_crosscall_args *arg = data;
+
+	WARN_ON(preemptible());
+
+	reg = per_cpu_ptr(arg->event->private_registered, smp_processor_id());
+	err = sdei_api_event_register(arg->event->event_num, sdei_entry_point,
+				      reg, 0, 0);
+
+	sdei_cross_call_return(arg, err);
+}
+
+static int _sdei_event_register(struct sdei_event *event)
+{
+	int err;
+
+	lockdep_assert_held(&sdei_events_lock);
+
+	spin_lock(&sdei_list_lock);
+	event->reregister = true;
+	spin_unlock(&sdei_list_lock);
+
+	if (event->type == SDEI_EVENT_TYPE_SHARED)
+		return sdei_api_event_register(event->event_num,
+					       sdei_entry_point,
+					       event->registered,
+					       SDEI_EVENT_REGISTER_RM_ANY, 0);
+
+
+	err = sdei_do_cross_call(_local_event_register, event);
+	if (err) {
+		spin_lock(&sdei_list_lock);
+		event->reregister = false;
+		event->reenable = false;
+		spin_unlock(&sdei_list_lock);
+
+		sdei_do_cross_call(_local_event_unregister, event);
+	}
+
+	return err;
+}
+
+int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg)
+{
+	int err;
+	struct sdei_event *event;
+
+	WARN_ON(in_nmi());
+
+	mutex_lock(&sdei_events_lock);
+	do {
+		if (sdei_event_find(event_num)) {
+			pr_warn("Event %u already registered\n", event_num);
+			err = -EBUSY;
+			break;
+		}
+
+		event = sdei_event_create(event_num, cb, arg);
+		if (IS_ERR(event)) {
+			err = PTR_ERR(event);
+			pr_warn("Failed to create event %u: %d\n", event_num,
+				err);
+			break;
+		}
+
+		err = _sdei_event_register(event);
+		if (err) {
+			sdei_event_destroy(event);
+			pr_warn("Failed to register event %u: %d\n", event_num,
+				err);
+		}
+	} while (0);
+	mutex_unlock(&sdei_events_lock);
+
+	return err;
+}
+EXPORT_SYMBOL(sdei_event_register);
+
+static int sdei_reregister_event(struct sdei_event *event)
+{
+	int err;
+
+	lockdep_assert_held(&sdei_events_lock);
+
+	err = _sdei_event_register(event);
+	if (err) {
+		pr_err("Failed to re-register event %u\n", event->event_num);
+		sdei_event_destroy(event);
+		return err;
+	}
+
+	if (event->reenable) {
+		if (event->type == SDEI_EVENT_TYPE_SHARED)
+			err = sdei_api_event_enable(event->event_num);
+		else
+			err = sdei_do_cross_call(_local_event_enable, event);
+	}
+
+	if (err)
+		pr_err("Failed to re-enable event %u\n", event->event_num);
+
+	return err;
+}
+
+static int sdei_reregister_shared(void)
+{
+	int err = 0;
+	struct sdei_event *event;
+
+	mutex_lock(&sdei_events_lock);
+	spin_lock(&sdei_list_lock);
+	list_for_each_entry(event, &sdei_list, list) {
+		if (event->type != SDEI_EVENT_TYPE_SHARED)
+			continue;
+
+		if (event->reregister) {
+			err = sdei_reregister_event(event);
+			if (err)
+				break;
+		}
+	}
+	spin_unlock(&sdei_list_lock);
+	mutex_unlock(&sdei_events_lock);
+
+	return err;
+}
+
+static int sdei_cpuhp_down(unsigned int cpu)
+{
+	struct sdei_event *event;
+	struct sdei_crosscall_args arg;
+
+	/* un-register private events */
+	spin_lock(&sdei_list_lock);
+	list_for_each_entry(event, &sdei_list, list) {
+		if (event->type == SDEI_EVENT_TYPE_SHARED)
+			continue;
+
+		CROSSCALL_INIT(arg, event);
+		/* call the cross-call function locally... */
+		_local_event_unregister(&arg);
+		if (arg.first_error)
+			pr_err("Failed to unregister event %u: %d\n",
+			       event->event_num, arg.first_error);
+	}
+	spin_unlock(&sdei_list_lock);
+
+	return sdei_mask_local_cpu();
+}
+
+static int sdei_cpuhp_up(unsigned int cpu)
+{
+	struct sdei_event *event;
+	struct sdei_crosscall_args arg;
+
+	/* re-register/enable private events */
+	spin_lock(&sdei_list_lock);
+	list_for_each_entry(event, &sdei_list, list) {
+		if (event->type == SDEI_EVENT_TYPE_SHARED)
+			continue;
+
+		if (event->reregister) {
+			CROSSCALL_INIT(arg, event);
+			/* call the cross-call function locally... */
+			_local_event_register(&arg);
+			if (arg.first_error)
+				pr_err("Failed to re-register event %u: %d\n",
+				       event->event_num, arg.first_error);
+		}
+
+		if (event->reenable) {
+			CROSSCALL_INIT(arg, event);
+			_local_event_enable(&arg);
+			if (arg.first_error)
+				pr_err("Failed to re-enable event %u: %d\n",
+				       event->event_num, arg.first_error);
+		}
+	}
+	spin_unlock(&sdei_list_lock);
+
+	return sdei_unmask_local_cpu();
+}
+
+/* When entering idle, mask/unmask events for this cpu */
+static int sdei_pm_notifier(struct notifier_block *nb, unsigned long action,
+			    void *data)
+{
+	int rv;
+
+	switch (action) {
+	case CPU_PM_ENTER:
+		rv = sdei_mask_local_cpu();
+		break;
+	case CPU_PM_EXIT:
+	case CPU_PM_ENTER_FAILED:
+		rv = sdei_unmask_local_cpu();
+		break;
+	default:
+		return NOTIFY_DONE;
+	}
+
+	if (rv)
+		return notifier_from_errno(rv);
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block sdei_pm_nb = {
+	.notifier_call = sdei_pm_notifier,
+};
+
+static int sdei_device_suspend(struct device *dev)
+{
+	on_each_cpu(_ipi_mask_cpu, NULL, true);
+
+	return 0;
+}
+
+static int sdei_device_resume(struct device *dev)
+{
+	on_each_cpu(_ipi_unmask_cpu, NULL, true);
+
+	return 0;
+}
+
+/*
+ * We need all events to be reregistered when we resume from hibernate.
+ *
+ * The sequence is freeze->thaw. Reboot. freeze->restore. We unregister
+ * events during freeze, then re-register and re-enable them during thaw
+ * and restore.
+ */
+static int sdei_device_freeze(struct device *dev)
+{
+	int err;
+
+	/* unregister private events */
+	cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING);
+
+	err = sdei_unregister_shared();
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static int sdei_device_thaw(struct device *dev)
+{
+	int err;
+
+	/* re-register shared events */
+	err = sdei_reregister_shared();
+	if (err) {
+		pr_warn("Failed to re-register shared events...\n");
+		sdei_mark_interface_broken();
+		return err;
+	}
+
+	err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI",
+				&sdei_cpuhp_up, &sdei_cpuhp_down);
+	if (err)
+		pr_warn("Failed to re-register CPU hotplug notifier...\n");
+
+	return err;
+}
+
+static int sdei_device_restore(struct device *dev)
+{
+	int err;
+
+	err = sdei_platform_reset();
+	if (err)
+		return err;
+
+	return sdei_device_thaw(dev);
+}
+
+static const struct dev_pm_ops sdei_pm_ops = {
+	.suspend = sdei_device_suspend,
+	.resume = sdei_device_resume,
+	.freeze = sdei_device_freeze,
+	.thaw = sdei_device_thaw,
+	.restore = sdei_device_restore,
+};
+
+/*
+ * Mask all CPUs and unregister all events on panic, reboot or kexec.
+ */
+static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action,
+				void *data)
+{
+	/*
+	 * We are going to reset the interface, after this there is no point
+	 * doing work when we take CPUs offline.
+	 */
+	cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING);
+
+	sdei_platform_reset();
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block sdei_reboot_nb = {
+	.notifier_call = sdei_reboot_notifier,
+};
+
+static void sdei_smccc_smc(unsigned long function_id,
+			   unsigned long arg0, unsigned long arg1,
+			   unsigned long arg2, unsigned long arg3,
+			   unsigned long arg4, struct arm_smccc_res *res)
+{
+	arm_smccc_smc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
+}
+
+static void sdei_smccc_hvc(unsigned long function_id,
+			   unsigned long arg0, unsigned long arg1,
+			   unsigned long arg2, unsigned long arg3,
+			   unsigned long arg4, struct arm_smccc_res *res)
+{
+	arm_smccc_hvc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
+}
+
+static int sdei_get_conduit(struct platform_device *pdev)
+{
+	const char *method;
+	struct device_node *np = pdev->dev.of_node;
+
+	sdei_firmware_call = NULL;
+	if (np) {
+		if (of_property_read_string(np, "method", &method)) {
+			pr_warn("missing \"method\" property\n");
+			return CONDUIT_INVALID;
+		}
+
+		if (!strcmp("hvc", method)) {
+			sdei_firmware_call = &sdei_smccc_hvc;
+			return CONDUIT_HVC;
+		} else if (!strcmp("smc", method)) {
+			sdei_firmware_call = &sdei_smccc_smc;
+			return CONDUIT_SMC;
+		}
+
+		pr_warn("invalid \"method\" property: %s\n", method);
+	} else if (IS_ENABLED(CONFIG_ACPI) && !acpi_disabled) {
+		if (acpi_psci_use_hvc()) {
+			sdei_firmware_call = &sdei_smccc_hvc;
+			return CONDUIT_HVC;
+		} else {
+			sdei_firmware_call = &sdei_smccc_smc;
+			return CONDUIT_SMC;
+		}
+	}
+
+	return CONDUIT_INVALID;
+}
+
+static int sdei_probe(struct platform_device *pdev)
+{
+	int err;
+	u64 ver = 0;
+	int conduit;
+
+	conduit = sdei_get_conduit(pdev);
+	if (!sdei_firmware_call)
+		return 0;
+
+	err = sdei_api_get_version(&ver);
+	if (err == -EOPNOTSUPP)
+		pr_err("advertised but not implemented in platform firmware\n");
+	if (err) {
+		pr_err("Failed to get SDEI version: %d\n", err);
+		sdei_mark_interface_broken();
+		return err;
+	}
+
+	pr_info("SDEIv%d.%d (0x%x) detected in firmware.\n",
+		(int)SDEI_VERSION_MAJOR(ver), (int)SDEI_VERSION_MINOR(ver),
+		(int)SDEI_VERSION_VENDOR(ver));
+
+	if (SDEI_VERSION_MAJOR(ver) != 1) {
+		pr_warn("Conflicting SDEI version detected.\n");
+		sdei_mark_interface_broken();
+		return -EINVAL;
+	}
+
+	err = sdei_platform_reset();
+	if (err)
+		return err;
+
+	sdei_entry_point = sdei_arch_get_entry_point(conduit);
+	if (!sdei_entry_point) {
+		/* Not supported due to hardware or boot configuration */
+		sdei_mark_interface_broken();
+		return 0;
+	}
+
+	err = cpu_pm_register_notifier(&sdei_pm_nb);
+	if (err) {
+		pr_warn("Failed to register CPU PM notifier...\n");
+		goto error;
+	}
+
+	err = register_reboot_notifier(&sdei_reboot_nb);
+	if (err) {
+		pr_warn("Failed to register reboot notifier...\n");
+		goto remove_cpupm;
+	}
+
+	err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI",
+				&sdei_cpuhp_up, &sdei_cpuhp_down);
+	if (err) {
+		pr_warn("Failed to register CPU hotplug notifier...\n");
+		goto remove_reboot;
+	}
+
+	return 0;
+
+remove_reboot:
+	unregister_reboot_notifier(&sdei_reboot_nb);
+
+remove_cpupm:
+	cpu_pm_unregister_notifier(&sdei_pm_nb);
+
+error:
+	sdei_mark_interface_broken();
+	return err;
+}
+
+static const struct of_device_id sdei_of_match[] = {
+	{ .compatible = "arm,sdei-1.0" },
+	{}
+};
+
+static struct platform_driver sdei_driver = {
+	.driver		= {
+		.name			= "sdei",
+		.pm			= &sdei_pm_ops,
+		.of_match_table		= sdei_of_match,
+	},
+	.probe		= sdei_probe,
+};
+
+static bool __init sdei_present_dt(void)
+{
+	struct platform_device *pdev;
+	struct device_node *np, *fw_np;
+
+	fw_np = of_find_node_by_name(NULL, "firmware");
+	if (!fw_np)
+		return false;
+
+	np = of_find_matching_node(fw_np, sdei_of_match);
+	of_node_put(fw_np);
+	if (!np)
+		return false;
+
+	pdev = of_platform_device_create(np, sdei_driver.driver.name, NULL);
+	of_node_put(np);
+	if (!pdev)
+		return false;
+
+	return true;
+}
+
+static bool __init sdei_present_acpi(void)
+{
+	acpi_status status;
+	struct platform_device *pdev;
+	struct acpi_table_header *sdei_table_header;
+
+	if (acpi_disabled)
+		return false;
+
+	status = acpi_get_table(ACPI_SIG_SDEI, 0, &sdei_table_header);
+	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+		const char *msg = acpi_format_exception(status);
+
+		pr_info("Failed to get ACPI:SDEI table, %s\n", msg);
+	}
+	if (ACPI_FAILURE(status))
+		return false;
+
+	pdev = platform_device_register_simple(sdei_driver.driver.name, 0, NULL,
+					       0);
+	if (IS_ERR(pdev))
+		return false;
+
+	return true;
+}
+
+static int __init sdei_init(void)
+{
+	if (sdei_present_dt() || sdei_present_acpi())
+		platform_driver_register(&sdei_driver);
+
+	return 0;
+}
+
+/*
+ * On an ACPI system SDEI needs to be ready before HEST:GHES tries to register
+ * its events. ACPI is initialised from a subsys_initcall(), GHES is initialised
+ * by device_initcall(). We want to be called in the middle.
+ */
+subsys_initcall_sync(sdei_init);
+
+int sdei_event_handler(struct pt_regs *regs,
+		       struct sdei_registered_event *arg)
+{
+	int err;
+	mm_segment_t orig_addr_limit;
+	u32 event_num = arg->event_num;
+
+	orig_addr_limit = get_fs();
+	set_fs(USER_DS);
+
+	err = arg->callback(event_num, regs, arg->callback_arg);
+	if (err)
+		pr_err_ratelimited("event %u on CPU %u failed with error: %d\n",
+				   event_num, smp_processor_id(), err);
+
+	set_fs(orig_addr_limit);
+
+	return err;
+}
+NOKPROBE_SYMBOL(sdei_event_handler);
diff --git a/drivers/firmware/broadcom/Kconfig b/drivers/firmware/broadcom/Kconfig
new file mode 100644
index 0000000..f77cdb3
--- /dev/null
+++ b/drivers/firmware/broadcom/Kconfig
@@ -0,0 +1,23 @@
+config BCM47XX_NVRAM
+	bool "Broadcom NVRAM driver"
+	depends on BCM47XX || ARCH_BCM_5301X
+	help
+	  Broadcom home routers contain flash partition called "nvram" with all
+	  important hardware configuration as well as some minor user setup.
+	  NVRAM partition contains a text-like data representing name=value
+	  pairs.
+	  This driver provides an easy way to get value of requested parameter.
+	  It simply reads content of NVRAM and parses it. It doesn't control any
+	  hardware part itself.
+
+config BCM47XX_SPROM
+	bool "Broadcom SPROM driver"
+	depends on BCM47XX_NVRAM
+	select GENERIC_NET_UTILS
+	help
+	  Broadcom devices store configuration data in SPROM. Accessing it is
+	  specific to the bus host type, e.g. PCI(e) devices have it mapped in
+	  a PCI BAR.
+	  In case of SoC devices SPROM content is stored on a flash used by
+	  bootloader firmware CFE. This driver provides method to ssb and bcma
+	  drivers to read SPROM on SoC.
diff --git a/drivers/firmware/broadcom/Makefile b/drivers/firmware/broadcom/Makefile
new file mode 100644
index 0000000..f93efc4
--- /dev/null
+++ b/drivers/firmware/broadcom/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_BCM47XX_NVRAM)		+= bcm47xx_nvram.o
+obj-$(CONFIG_BCM47XX_SPROM)		+= bcm47xx_sprom.o
diff --git a/drivers/firmware/broadcom/bcm47xx_nvram.c b/drivers/firmware/broadcom/bcm47xx_nvram.c
new file mode 100644
index 0000000..d25f080
--- /dev/null
+++ b/drivers/firmware/broadcom/bcm47xx_nvram.c
@@ -0,0 +1,242 @@
+/*
+ * BCM947xx nvram variable access
+ *
+ * Copyright (C) 2005 Broadcom Corporation
+ * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2010-2012 Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * This program is free software; you can redistribute	it and/or modify it
+ * under  the terms of	the GNU General	 Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/mtd/mtd.h>
+#include <linux/bcm47xx_nvram.h>
+
+#define NVRAM_MAGIC			0x48534C46	/* 'FLSH' */
+#define NVRAM_SPACE			0x10000
+#define NVRAM_MAX_GPIO_ENTRIES		32
+#define NVRAM_MAX_GPIO_VALUE_LEN	30
+
+#define FLASH_MIN		0x00020000	/* Minimum flash size */
+
+struct nvram_header {
+	u32 magic;
+	u32 len;
+	u32 crc_ver_init;	/* 0:7 crc, 8:15 ver, 16:31 sdram_init */
+	u32 config_refresh;	/* 0:15 sdram_config, 16:31 sdram_refresh */
+	u32 config_ncdl;	/* ncdl values for memc */
+};
+
+static char nvram_buf[NVRAM_SPACE];
+static size_t nvram_len;
+static const u32 nvram_sizes[] = {0x6000, 0x8000, 0xF000, 0x10000};
+
+static u32 find_nvram_size(void __iomem *end)
+{
+	struct nvram_header __iomem *header;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(nvram_sizes); i++) {
+		header = (struct nvram_header *)(end - nvram_sizes[i]);
+		if (header->magic == NVRAM_MAGIC)
+			return nvram_sizes[i];
+	}
+
+	return 0;
+}
+
+/* Probe for NVRAM header */
+static int nvram_find_and_copy(void __iomem *iobase, u32 lim)
+{
+	struct nvram_header __iomem *header;
+	u32 off;
+	u32 size;
+
+	if (nvram_len) {
+		pr_warn("nvram already initialized\n");
+		return -EEXIST;
+	}
+
+	/* TODO: when nvram is on nand flash check for bad blocks first. */
+	off = FLASH_MIN;
+	while (off <= lim) {
+		/* Windowed flash access */
+		size = find_nvram_size(iobase + off);
+		if (size) {
+			header = (struct nvram_header *)(iobase + off - size);
+			goto found;
+		}
+		off <<= 1;
+	}
+
+	/* Try embedded NVRAM at 4 KB and 1 KB as last resorts */
+	header = (struct nvram_header *)(iobase + 4096);
+	if (header->magic == NVRAM_MAGIC) {
+		size = NVRAM_SPACE;
+		goto found;
+	}
+
+	header = (struct nvram_header *)(iobase + 1024);
+	if (header->magic == NVRAM_MAGIC) {
+		size = NVRAM_SPACE;
+		goto found;
+	}
+
+	pr_err("no nvram found\n");
+	return -ENXIO;
+
+found:
+	__ioread32_copy(nvram_buf, header, sizeof(*header) / 4);
+	nvram_len = ((struct nvram_header *)(nvram_buf))->len;
+	if (nvram_len > size) {
+		pr_err("The nvram size according to the header seems to be bigger than the partition on flash\n");
+		nvram_len = size;
+	}
+	if (nvram_len >= NVRAM_SPACE) {
+		pr_err("nvram on flash (%i bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n",
+		       nvram_len, NVRAM_SPACE - 1);
+		nvram_len = NVRAM_SPACE - 1;
+	}
+	/* proceed reading data after header */
+	__ioread32_copy(nvram_buf + sizeof(*header), header + 1,
+			DIV_ROUND_UP(nvram_len, 4));
+	nvram_buf[NVRAM_SPACE - 1] = '\0';
+
+	return 0;
+}
+
+/*
+ * On bcm47xx we need access to the NVRAM very early, so we can't use mtd
+ * subsystem to access flash. We can't even use platform device / driver to
+ * store memory offset.
+ * To handle this we provide following symbol. It's supposed to be called as
+ * soon as we get info about flash device, before any NVRAM entry is needed.
+ */
+int bcm47xx_nvram_init_from_mem(u32 base, u32 lim)
+{
+	void __iomem *iobase;
+	int err;
+
+	iobase = ioremap_nocache(base, lim);
+	if (!iobase)
+		return -ENOMEM;
+
+	err = nvram_find_and_copy(iobase, lim);
+
+	iounmap(iobase);
+
+	return err;
+}
+
+static int nvram_init(void)
+{
+#ifdef CONFIG_MTD
+	struct mtd_info *mtd;
+	struct nvram_header header;
+	size_t bytes_read;
+	int err;
+
+	mtd = get_mtd_device_nm("nvram");
+	if (IS_ERR(mtd))
+		return -ENODEV;
+
+	err = mtd_read(mtd, 0, sizeof(header), &bytes_read, (uint8_t *)&header);
+	if (!err && header.magic == NVRAM_MAGIC &&
+	    header.len > sizeof(header)) {
+		nvram_len = header.len;
+		if (nvram_len >= NVRAM_SPACE) {
+			pr_err("nvram on flash (%i bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n",
+				header.len, NVRAM_SPACE);
+			nvram_len = NVRAM_SPACE - 1;
+		}
+
+		err = mtd_read(mtd, 0, nvram_len, &nvram_len,
+			       (u8 *)nvram_buf);
+		return err;
+	}
+#endif
+
+	return -ENXIO;
+}
+
+int bcm47xx_nvram_getenv(const char *name, char *val, size_t val_len)
+{
+	char *var, *value, *end, *eq;
+	int err;
+
+	if (!name)
+		return -EINVAL;
+
+	if (!nvram_len) {
+		err = nvram_init();
+		if (err)
+			return err;
+	}
+
+	/* Look for name=value and return value */
+	var = &nvram_buf[sizeof(struct nvram_header)];
+	end = nvram_buf + sizeof(nvram_buf);
+	while (var < end && *var) {
+		eq = strchr(var, '=');
+		if (!eq)
+			break;
+		value = eq + 1;
+		if (eq - var == strlen(name) &&
+		    strncmp(var, name, eq - var) == 0)
+			return snprintf(val, val_len, "%s", value);
+		var = value + strlen(value) + 1;
+	}
+	return -ENOENT;
+}
+EXPORT_SYMBOL(bcm47xx_nvram_getenv);
+
+int bcm47xx_nvram_gpio_pin(const char *name)
+{
+	int i, err;
+	char nvram_var[] = "gpioXX";
+	char buf[NVRAM_MAX_GPIO_VALUE_LEN];
+
+	/* TODO: Optimize it to don't call getenv so many times */
+	for (i = 0; i < NVRAM_MAX_GPIO_ENTRIES; i++) {
+		err = snprintf(nvram_var, sizeof(nvram_var), "gpio%i", i);
+		if (err <= 0)
+			continue;
+		err = bcm47xx_nvram_getenv(nvram_var, buf, sizeof(buf));
+		if (err <= 0)
+			continue;
+		if (!strcmp(name, buf))
+			return i;
+	}
+	return -ENOENT;
+}
+EXPORT_SYMBOL(bcm47xx_nvram_gpio_pin);
+
+char *bcm47xx_nvram_get_contents(size_t *nvram_size)
+{
+	int err;
+	char *nvram;
+
+	if (!nvram_len) {
+		err = nvram_init();
+		if (err)
+			return NULL;
+	}
+
+	*nvram_size = nvram_len - sizeof(struct nvram_header);
+	nvram = vmalloc(*nvram_size);
+	if (!nvram)
+		return NULL;
+	memcpy(nvram, &nvram_buf[sizeof(struct nvram_header)], *nvram_size);
+
+	return nvram;
+}
+EXPORT_SYMBOL(bcm47xx_nvram_get_contents);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/broadcom/bcm47xx_sprom.c b/drivers/firmware/broadcom/bcm47xx_sprom.c
new file mode 100644
index 0000000..4787f86
--- /dev/null
+++ b/drivers/firmware/broadcom/bcm47xx_sprom.c
@@ -0,0 +1,725 @@
+/*
+ *  Copyright (C) 2004 Florian Schirmer <jolt@tuxbox.org>
+ *  Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org>
+ *  Copyright (C) 2006 Michael Buesch <m@bues.ch>
+ *  Copyright (C) 2010 Waldemar Brodkorb <wbx@openadk.org>
+ *  Copyright (C) 2010-2012 Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
+ *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
+ *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *  You should have received a copy of the  GNU General Public License along
+ *  with this program; if not, write  to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/bcm47xx_nvram.h>
+#include <linux/bcma/bcma.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/ssb/ssb.h>
+
+static void create_key(const char *prefix, const char *postfix,
+		       const char *name, char *buf, int len)
+{
+	if (prefix && postfix)
+		snprintf(buf, len, "%s%s%s", prefix, name, postfix);
+	else if (prefix)
+		snprintf(buf, len, "%s%s", prefix, name);
+	else if (postfix)
+		snprintf(buf, len, "%s%s", name, postfix);
+	else
+		snprintf(buf, len, "%s", name);
+}
+
+static int get_nvram_var(const char *prefix, const char *postfix,
+			 const char *name, char *buf, int len, bool fallback)
+{
+	char key[40];
+	int err;
+
+	create_key(prefix, postfix, name, key, sizeof(key));
+
+	err = bcm47xx_nvram_getenv(key, buf, len);
+	if (fallback && err == -ENOENT && prefix) {
+		create_key(NULL, postfix, name, key, sizeof(key));
+		err = bcm47xx_nvram_getenv(key, buf, len);
+	}
+	return err;
+}
+
+#define NVRAM_READ_VAL(type)						\
+static void nvram_read_ ## type(const char *prefix,			\
+				const char *postfix, const char *name,	\
+				type *val, type allset, bool fallback)	\
+{									\
+	char buf[100];							\
+	int err;							\
+	type var;							\
+									\
+	err = get_nvram_var(prefix, postfix, name, buf, sizeof(buf),	\
+			    fallback);					\
+	if (err < 0)							\
+		return;							\
+	err = kstrto ## type(strim(buf), 0, &var);			\
+	if (err) {							\
+		pr_warn("can not parse nvram name %s%s%s with value %s got %i\n",	\
+			prefix, name, postfix, buf, err);		\
+		return;							\
+	}								\
+	if (allset && var == allset)					\
+		return;							\
+	*val = var;							\
+}
+
+NVRAM_READ_VAL(u8)
+NVRAM_READ_VAL(s8)
+NVRAM_READ_VAL(u16)
+NVRAM_READ_VAL(u32)
+
+#undef NVRAM_READ_VAL
+
+static void nvram_read_u32_2(const char *prefix, const char *name,
+			     u16 *val_lo, u16 *val_hi, bool fallback)
+{
+	char buf[100];
+	int err;
+	u32 val;
+
+	err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
+	if (err < 0)
+		return;
+	err = kstrtou32(strim(buf), 0, &val);
+	if (err) {
+		pr_warn("can not parse nvram name %s%s with value %s got %i\n",
+			prefix, name, buf, err);
+		return;
+	}
+	*val_lo = (val & 0x0000FFFFU);
+	*val_hi = (val & 0xFFFF0000U) >> 16;
+}
+
+static void nvram_read_leddc(const char *prefix, const char *name,
+			     u8 *leddc_on_time, u8 *leddc_off_time,
+			     bool fallback)
+{
+	char buf[100];
+	int err;
+	u32 val;
+
+	err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
+	if (err < 0)
+		return;
+	err = kstrtou32(strim(buf), 0, &val);
+	if (err) {
+		pr_warn("can not parse nvram name %s%s with value %s got %i\n",
+			prefix, name, buf, err);
+		return;
+	}
+
+	if (val == 0xffff || val == 0xffffffff)
+		return;
+
+	*leddc_on_time = val & 0xff;
+	*leddc_off_time = (val >> 16) & 0xff;
+}
+
+static void nvram_read_macaddr(const char *prefix, const char *name,
+			       u8 val[6], bool fallback)
+{
+	char buf[100];
+	int err;
+
+	err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
+	if (err < 0)
+		return;
+
+	strreplace(buf, '-', ':');
+	if (!mac_pton(buf, val))
+		pr_warn("Can not parse mac address: %s\n", buf);
+}
+
+static void nvram_read_alpha2(const char *prefix, const char *name,
+			     char val[2], bool fallback)
+{
+	char buf[10];
+	int err;
+
+	err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
+	if (err < 0)
+		return;
+	if (buf[0] == '0')
+		return;
+	if (strlen(buf) > 2) {
+		pr_warn("alpha2 is too long %s\n", buf);
+		return;
+	}
+	memcpy(val, buf, 2);
+}
+
+/* This is one-function-only macro, it uses local "sprom" variable! */
+#define ENTRY(_revmask, _type, _prefix, _name, _val, _allset, _fallback) \
+	if (_revmask & BIT(sprom->revision)) \
+		nvram_read_ ## _type(_prefix, NULL, _name, &sprom->_val, \
+				     _allset, _fallback)
+/*
+ * Special version of filling function that can be safely called for any SPROM
+ * revision. For every NVRAM to SPROM mapping it contains bitmask of revisions
+ * for which the mapping is valid.
+ * It obviously requires some hexadecimal/bitmasks knowledge, but allows
+ * writing cleaner code (easy revisions handling).
+ * Note that while SPROM revision 0 was never used, we still keep BIT(0)
+ * reserved for it, just to keep numbering sane.
+ */
+static void bcm47xx_sprom_fill_auto(struct ssb_sprom *sprom,
+				    const char *prefix, bool fallback)
+{
+	const char *pre = prefix;
+	bool fb = fallback;
+
+	/* Broadcom extracts it for rev 8+ but it was found on 2 and 4 too */
+	ENTRY(0xfffffffe, u16, pre, "devid", dev_id, 0, fallback);
+
+	ENTRY(0xfffffffe, u16, pre, "boardrev", board_rev, 0, true);
+	ENTRY(0xfffffffe, u32, pre, "boardflags", boardflags, 0, fb);
+	ENTRY(0xfffffff0, u32, pre, "boardflags2", boardflags2, 0, fb);
+	ENTRY(0xfffff800, u32, pre, "boardflags3", boardflags3, 0, fb);
+	ENTRY(0x00000002, u16, pre, "boardflags", boardflags_lo, 0, fb);
+	ENTRY(0xfffffffc, u16, pre, "boardtype", board_type, 0, true);
+	ENTRY(0xfffffffe, u16, pre, "boardnum", board_num, 0, fb);
+	ENTRY(0x00000002, u8, pre, "cc", country_code, 0, fb);
+	ENTRY(0xfffffff8, u8, pre, "regrev", regrev, 0, fb);
+
+	ENTRY(0xfffffffe, u8, pre, "ledbh0", gpio0, 0xff, fb);
+	ENTRY(0xfffffffe, u8, pre, "ledbh1", gpio1, 0xff, fb);
+	ENTRY(0xfffffffe, u8, pre, "ledbh2", gpio2, 0xff, fb);
+	ENTRY(0xfffffffe, u8, pre, "ledbh3", gpio3, 0xff, fb);
+
+	ENTRY(0x0000070e, u16, pre, "pa0b0", pa0b0, 0, fb);
+	ENTRY(0x0000070e, u16, pre, "pa0b1", pa0b1, 0, fb);
+	ENTRY(0x0000070e, u16, pre, "pa0b2", pa0b2, 0, fb);
+	ENTRY(0x0000070e, u8, pre, "pa0itssit", itssi_bg, 0, fb);
+	ENTRY(0x0000070e, u8, pre, "pa0maxpwr", maxpwr_bg, 0, fb);
+
+	ENTRY(0x0000070c, u8, pre, "opo", opo, 0, fb);
+	ENTRY(0xfffffffe, u8, pre, "aa2g", ant_available_bg, 0, fb);
+	ENTRY(0xfffffffe, u8, pre, "aa5g", ant_available_a, 0, fb);
+	ENTRY(0x000007fe, s8, pre, "ag0", antenna_gain.a0, 0, fb);
+	ENTRY(0x000007fe, s8, pre, "ag1", antenna_gain.a1, 0, fb);
+	ENTRY(0x000007f0, s8, pre, "ag2", antenna_gain.a2, 0, fb);
+	ENTRY(0x000007f0, s8, pre, "ag3", antenna_gain.a3, 0, fb);
+
+	ENTRY(0x0000070e, u16, pre, "pa1b0", pa1b0, 0, fb);
+	ENTRY(0x0000070e, u16, pre, "pa1b1", pa1b1, 0, fb);
+	ENTRY(0x0000070e, u16, pre, "pa1b2", pa1b2, 0, fb);
+	ENTRY(0x0000070c, u16, pre, "pa1lob0", pa1lob0, 0, fb);
+	ENTRY(0x0000070c, u16, pre, "pa1lob1", pa1lob1, 0, fb);
+	ENTRY(0x0000070c, u16, pre, "pa1lob2", pa1lob2, 0, fb);
+	ENTRY(0x0000070c, u16, pre, "pa1hib0", pa1hib0, 0, fb);
+	ENTRY(0x0000070c, u16, pre, "pa1hib1", pa1hib1, 0, fb);
+	ENTRY(0x0000070c, u16, pre, "pa1hib2", pa1hib2, 0, fb);
+	ENTRY(0x0000070e, u8, pre, "pa1itssit", itssi_a, 0, fb);
+	ENTRY(0x0000070e, u8, pre, "pa1maxpwr", maxpwr_a, 0, fb);
+	ENTRY(0x0000070c, u8, pre, "pa1lomaxpwr", maxpwr_al, 0, fb);
+	ENTRY(0x0000070c, u8, pre, "pa1himaxpwr", maxpwr_ah, 0, fb);
+
+	ENTRY(0x00000708, u8, pre, "bxa2g", bxa2g, 0, fb);
+	ENTRY(0x00000708, u8, pre, "rssisav2g", rssisav2g, 0, fb);
+	ENTRY(0x00000708, u8, pre, "rssismc2g", rssismc2g, 0, fb);
+	ENTRY(0x00000708, u8, pre, "rssismf2g", rssismf2g, 0, fb);
+	ENTRY(0x00000708, u8, pre, "bxa5g", bxa5g, 0, fb);
+	ENTRY(0x00000708, u8, pre, "rssisav5g", rssisav5g, 0, fb);
+	ENTRY(0x00000708, u8, pre, "rssismc5g", rssismc5g, 0, fb);
+	ENTRY(0x00000708, u8, pre, "rssismf5g", rssismf5g, 0, fb);
+	ENTRY(0x00000708, u8, pre, "tri2g", tri2g, 0, fb);
+	ENTRY(0x00000708, u8, pre, "tri5g", tri5g, 0, fb);
+	ENTRY(0x00000708, u8, pre, "tri5gl", tri5gl, 0, fb);
+	ENTRY(0x00000708, u8, pre, "tri5gh", tri5gh, 0, fb);
+	ENTRY(0x00000708, s8, pre, "rxpo2g", rxpo2g, 0, fb);
+	ENTRY(0x00000708, s8, pre, "rxpo5g", rxpo5g, 0, fb);
+	ENTRY(0xfffffff0, u8, pre, "txchain", txchain, 0xf, fb);
+	ENTRY(0xfffffff0, u8, pre, "rxchain", rxchain, 0xf, fb);
+	ENTRY(0xfffffff0, u8, pre, "antswitch", antswitch, 0xff, fb);
+	ENTRY(0x00000700, u8, pre, "tssipos2g", fem.ghz2.tssipos, 0, fb);
+	ENTRY(0x00000700, u8, pre, "extpagain2g", fem.ghz2.extpa_gain, 0, fb);
+	ENTRY(0x00000700, u8, pre, "pdetrange2g", fem.ghz2.pdet_range, 0, fb);
+	ENTRY(0x00000700, u8, pre, "triso2g", fem.ghz2.tr_iso, 0, fb);
+	ENTRY(0x00000700, u8, pre, "antswctl2g", fem.ghz2.antswlut, 0, fb);
+	ENTRY(0x00000700, u8, pre, "tssipos5g", fem.ghz5.tssipos, 0, fb);
+	ENTRY(0x00000700, u8, pre, "extpagain5g", fem.ghz5.extpa_gain, 0, fb);
+	ENTRY(0x00000700, u8, pre, "pdetrange5g", fem.ghz5.pdet_range, 0, fb);
+	ENTRY(0x00000700, u8, pre, "triso5g", fem.ghz5.tr_iso, 0, fb);
+	ENTRY(0x00000700, u8, pre, "antswctl5g", fem.ghz5.antswlut, 0, fb);
+	ENTRY(0x000000f0, u8, pre, "txpid2ga0", txpid2g[0], 0, fb);
+	ENTRY(0x000000f0, u8, pre, "txpid2ga1", txpid2g[1], 0, fb);
+	ENTRY(0x000000f0, u8, pre, "txpid2ga2", txpid2g[2], 0, fb);
+	ENTRY(0x000000f0, u8, pre, "txpid2ga3", txpid2g[3], 0, fb);
+	ENTRY(0x000000f0, u8, pre, "txpid5ga0", txpid5g[0], 0, fb);
+	ENTRY(0x000000f0, u8, pre, "txpid5ga1", txpid5g[1], 0, fb);
+	ENTRY(0x000000f0, u8, pre, "txpid5ga2", txpid5g[2], 0, fb);
+	ENTRY(0x000000f0, u8, pre, "txpid5ga3", txpid5g[3], 0, fb);
+	ENTRY(0x000000f0, u8, pre, "txpid5gla0", txpid5gl[0], 0, fb);
+	ENTRY(0x000000f0, u8, pre, "txpid5gla1", txpid5gl[1], 0, fb);
+	ENTRY(0x000000f0, u8, pre, "txpid5gla2", txpid5gl[2], 0, fb);
+	ENTRY(0x000000f0, u8, pre, "txpid5gla3", txpid5gl[3], 0, fb);
+	ENTRY(0x000000f0, u8, pre, "txpid5gha0", txpid5gh[0], 0, fb);
+	ENTRY(0x000000f0, u8, pre, "txpid5gha1", txpid5gh[1], 0, fb);
+	ENTRY(0x000000f0, u8, pre, "txpid5gha2", txpid5gh[2], 0, fb);
+	ENTRY(0x000000f0, u8, pre, "txpid5gha3", txpid5gh[3], 0, fb);
+
+	ENTRY(0xffffff00, u8, pre, "tempthresh", tempthresh, 0, fb);
+	ENTRY(0xffffff00, u8, pre, "tempoffset", tempoffset, 0, fb);
+	ENTRY(0xffffff00, u16, pre, "rawtempsense", rawtempsense, 0, fb);
+	ENTRY(0xffffff00, u8, pre, "measpower", measpower, 0, fb);
+	ENTRY(0xffffff00, u8, pre, "tempsense_slope", tempsense_slope, 0, fb);
+	ENTRY(0xffffff00, u8, pre, "tempcorrx", tempcorrx, 0, fb);
+	ENTRY(0xffffff00, u8, pre, "tempsense_option", tempsense_option, 0, fb);
+	ENTRY(0x00000700, u8, pre, "freqoffset_corr", freqoffset_corr, 0, fb);
+	ENTRY(0x00000700, u8, pre, "iqcal_swp_dis", iqcal_swp_dis, 0, fb);
+	ENTRY(0x00000700, u8, pre, "hw_iqcal_en", hw_iqcal_en, 0, fb);
+	ENTRY(0x00000700, u8, pre, "elna2g", elna2g, 0, fb);
+	ENTRY(0x00000700, u8, pre, "elna5g", elna5g, 0, fb);
+	ENTRY(0xffffff00, u8, pre, "phycal_tempdelta", phycal_tempdelta, 0, fb);
+	ENTRY(0xffffff00, u8, pre, "temps_period", temps_period, 0, fb);
+	ENTRY(0xffffff00, u8, pre, "temps_hysteresis", temps_hysteresis, 0, fb);
+	ENTRY(0xffffff00, u8, pre, "measpower1", measpower1, 0, fb);
+	ENTRY(0xffffff00, u8, pre, "measpower2", measpower2, 0, fb);
+
+	ENTRY(0x000001f0, u16, pre, "cck2gpo", cck2gpo, 0, fb);
+	ENTRY(0x000001f0, u32, pre, "ofdm2gpo", ofdm2gpo, 0, fb);
+	ENTRY(0x000001f0, u32, pre, "ofdm5gpo", ofdm5gpo, 0, fb);
+	ENTRY(0x000001f0, u32, pre, "ofdm5glpo", ofdm5glpo, 0, fb);
+	ENTRY(0x000001f0, u32, pre, "ofdm5ghpo", ofdm5ghpo, 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs2gpo0", mcs2gpo[0], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs2gpo1", mcs2gpo[1], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs2gpo2", mcs2gpo[2], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs2gpo3", mcs2gpo[3], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs2gpo4", mcs2gpo[4], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs2gpo5", mcs2gpo[5], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs2gpo6", mcs2gpo[6], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs2gpo7", mcs2gpo[7], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5gpo0", mcs5gpo[0], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5gpo1", mcs5gpo[1], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5gpo2", mcs5gpo[2], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5gpo3", mcs5gpo[3], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5gpo4", mcs5gpo[4], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5gpo5", mcs5gpo[5], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5gpo6", mcs5gpo[6], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5gpo7", mcs5gpo[7], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5glpo0", mcs5glpo[0], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5glpo1", mcs5glpo[1], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5glpo2", mcs5glpo[2], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5glpo3", mcs5glpo[3], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5glpo4", mcs5glpo[4], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5glpo5", mcs5glpo[5], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5glpo6", mcs5glpo[6], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5glpo7", mcs5glpo[7], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5ghpo0", mcs5ghpo[0], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5ghpo1", mcs5ghpo[1], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5ghpo2", mcs5ghpo[2], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5ghpo3", mcs5ghpo[3], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5ghpo4", mcs5ghpo[4], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5ghpo5", mcs5ghpo[5], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5ghpo6", mcs5ghpo[6], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5ghpo7", mcs5ghpo[7], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "cddpo", cddpo, 0, fb);
+	ENTRY(0x000001f0, u16, pre, "stbcpo", stbcpo, 0, fb);
+	ENTRY(0x000001f0, u16, pre, "bw40po", bw40po, 0, fb);
+	ENTRY(0x000001f0, u16, pre, "bwduppo", bwduppo, 0, fb);
+
+	ENTRY(0xfffffe00, u16, pre, "cckbw202gpo", cckbw202gpo, 0, fb);
+	ENTRY(0xfffffe00, u16, pre, "cckbw20ul2gpo", cckbw20ul2gpo, 0, fb);
+	ENTRY(0x00000600, u32, pre, "legofdmbw202gpo", legofdmbw202gpo, 0, fb);
+	ENTRY(0x00000600, u32, pre, "legofdmbw20ul2gpo", legofdmbw20ul2gpo, 0, fb);
+	ENTRY(0x00000600, u32, pre, "legofdmbw205glpo", legofdmbw205glpo, 0, fb);
+	ENTRY(0x00000600, u32, pre, "legofdmbw20ul5glpo", legofdmbw20ul5glpo, 0, fb);
+	ENTRY(0x00000600, u32, pre, "legofdmbw205gmpo", legofdmbw205gmpo, 0, fb);
+	ENTRY(0x00000600, u32, pre, "legofdmbw20ul5gmpo", legofdmbw20ul5gmpo, 0, fb);
+	ENTRY(0x00000600, u32, pre, "legofdmbw205ghpo", legofdmbw205ghpo, 0, fb);
+	ENTRY(0x00000600, u32, pre, "legofdmbw20ul5ghpo", legofdmbw20ul5ghpo, 0, fb);
+	ENTRY(0xfffffe00, u32, pre, "mcsbw202gpo", mcsbw202gpo, 0, fb);
+	ENTRY(0x00000600, u32, pre, "mcsbw20ul2gpo", mcsbw20ul2gpo, 0, fb);
+	ENTRY(0xfffffe00, u32, pre, "mcsbw402gpo", mcsbw402gpo, 0, fb);
+	ENTRY(0xfffffe00, u32, pre, "mcsbw205glpo", mcsbw205glpo, 0, fb);
+	ENTRY(0x00000600, u32, pre, "mcsbw20ul5glpo", mcsbw20ul5glpo, 0, fb);
+	ENTRY(0xfffffe00, u32, pre, "mcsbw405glpo", mcsbw405glpo, 0, fb);
+	ENTRY(0xfffffe00, u32, pre, "mcsbw205gmpo", mcsbw205gmpo, 0, fb);
+	ENTRY(0x00000600, u32, pre, "mcsbw20ul5gmpo", mcsbw20ul5gmpo, 0, fb);
+	ENTRY(0xfffffe00, u32, pre, "mcsbw405gmpo", mcsbw405gmpo, 0, fb);
+	ENTRY(0xfffffe00, u32, pre, "mcsbw205ghpo", mcsbw205ghpo, 0, fb);
+	ENTRY(0x00000600, u32, pre, "mcsbw20ul5ghpo", mcsbw20ul5ghpo, 0, fb);
+	ENTRY(0xfffffe00, u32, pre, "mcsbw405ghpo", mcsbw405ghpo, 0, fb);
+	ENTRY(0x00000600, u16, pre, "mcs32po", mcs32po, 0, fb);
+	ENTRY(0x00000600, u16, pre, "legofdm40duppo", legofdm40duppo, 0, fb);
+	ENTRY(0x00000700, u8, pre, "pcieingress_war", pcieingress_war, 0, fb);
+
+	/* TODO: rev 11 support */
+	ENTRY(0x00000700, u8, pre, "rxgainerr2ga0", rxgainerr2ga[0], 0, fb);
+	ENTRY(0x00000700, u8, pre, "rxgainerr2ga1", rxgainerr2ga[1], 0, fb);
+	ENTRY(0x00000700, u8, pre, "rxgainerr2ga2", rxgainerr2ga[2], 0, fb);
+	ENTRY(0x00000700, u8, pre, "rxgainerr5gla0", rxgainerr5gla[0], 0, fb);
+	ENTRY(0x00000700, u8, pre, "rxgainerr5gla1", rxgainerr5gla[1], 0, fb);
+	ENTRY(0x00000700, u8, pre, "rxgainerr5gla2", rxgainerr5gla[2], 0, fb);
+	ENTRY(0x00000700, u8, pre, "rxgainerr5gma0", rxgainerr5gma[0], 0, fb);
+	ENTRY(0x00000700, u8, pre, "rxgainerr5gma1", rxgainerr5gma[1], 0, fb);
+	ENTRY(0x00000700, u8, pre, "rxgainerr5gma2", rxgainerr5gma[2], 0, fb);
+	ENTRY(0x00000700, u8, pre, "rxgainerr5gha0", rxgainerr5gha[0], 0, fb);
+	ENTRY(0x00000700, u8, pre, "rxgainerr5gha1", rxgainerr5gha[1], 0, fb);
+	ENTRY(0x00000700, u8, pre, "rxgainerr5gha2", rxgainerr5gha[2], 0, fb);
+	ENTRY(0x00000700, u8, pre, "rxgainerr5gua0", rxgainerr5gua[0], 0, fb);
+	ENTRY(0x00000700, u8, pre, "rxgainerr5gua1", rxgainerr5gua[1], 0, fb);
+	ENTRY(0x00000700, u8, pre, "rxgainerr5gua2", rxgainerr5gua[2], 0, fb);
+
+	ENTRY(0xfffffe00, u8, pre, "sar2g", sar2g, 0, fb);
+	ENTRY(0xfffffe00, u8, pre, "sar5g", sar5g, 0, fb);
+
+	/* TODO: rev 11 support */
+	ENTRY(0x00000700, u8, pre, "noiselvl2ga0", noiselvl2ga[0], 0, fb);
+	ENTRY(0x00000700, u8, pre, "noiselvl2ga1", noiselvl2ga[1], 0, fb);
+	ENTRY(0x00000700, u8, pre, "noiselvl2ga2", noiselvl2ga[2], 0, fb);
+	ENTRY(0x00000700, u8, pre, "noiselvl5gla0", noiselvl5gla[0], 0, fb);
+	ENTRY(0x00000700, u8, pre, "noiselvl5gla1", noiselvl5gla[1], 0, fb);
+	ENTRY(0x00000700, u8, pre, "noiselvl5gla2", noiselvl5gla[2], 0, fb);
+	ENTRY(0x00000700, u8, pre, "noiselvl5gma0", noiselvl5gma[0], 0, fb);
+	ENTRY(0x00000700, u8, pre, "noiselvl5gma1", noiselvl5gma[1], 0, fb);
+	ENTRY(0x00000700, u8, pre, "noiselvl5gma2", noiselvl5gma[2], 0, fb);
+	ENTRY(0x00000700, u8, pre, "noiselvl5gha0", noiselvl5gha[0], 0, fb);
+	ENTRY(0x00000700, u8, pre, "noiselvl5gha1", noiselvl5gha[1], 0, fb);
+	ENTRY(0x00000700, u8, pre, "noiselvl5gha2", noiselvl5gha[2], 0, fb);
+	ENTRY(0x00000700, u8, pre, "noiselvl5gua0", noiselvl5gua[0], 0, fb);
+	ENTRY(0x00000700, u8, pre, "noiselvl5gua1", noiselvl5gua[1], 0, fb);
+	ENTRY(0x00000700, u8, pre, "noiselvl5gua2", noiselvl5gua[2], 0, fb);
+}
+#undef ENTRY /* It's specififc, uses local variable, don't use it (again). */
+
+static void bcm47xx_fill_sprom_path_r4589(struct ssb_sprom *sprom,
+					  const char *prefix, bool fallback)
+{
+	char postfix[2];
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(sprom->core_pwr_info); i++) {
+		struct ssb_sprom_core_pwr_info *pwr_info;
+
+		pwr_info = &sprom->core_pwr_info[i];
+
+		snprintf(postfix, sizeof(postfix), "%i", i);
+		nvram_read_u8(prefix, postfix, "maxp2ga",
+			      &pwr_info->maxpwr_2g, 0, fallback);
+		nvram_read_u8(prefix, postfix, "itt2ga",
+			      &pwr_info->itssi_2g, 0, fallback);
+		nvram_read_u8(prefix, postfix, "itt5ga",
+			      &pwr_info->itssi_5g, 0, fallback);
+		nvram_read_u16(prefix, postfix, "pa2gw0a",
+			       &pwr_info->pa_2g[0], 0, fallback);
+		nvram_read_u16(prefix, postfix, "pa2gw1a",
+			       &pwr_info->pa_2g[1], 0, fallback);
+		nvram_read_u16(prefix, postfix, "pa2gw2a",
+			       &pwr_info->pa_2g[2], 0, fallback);
+		nvram_read_u8(prefix, postfix, "maxp5ga",
+			      &pwr_info->maxpwr_5g, 0, fallback);
+		nvram_read_u8(prefix, postfix, "maxp5gha",
+			      &pwr_info->maxpwr_5gh, 0, fallback);
+		nvram_read_u8(prefix, postfix, "maxp5gla",
+			      &pwr_info->maxpwr_5gl, 0, fallback);
+		nvram_read_u16(prefix, postfix, "pa5gw0a",
+			       &pwr_info->pa_5g[0], 0, fallback);
+		nvram_read_u16(prefix, postfix, "pa5gw1a",
+			       &pwr_info->pa_5g[1], 0, fallback);
+		nvram_read_u16(prefix, postfix, "pa5gw2a",
+			       &pwr_info->pa_5g[2], 0, fallback);
+		nvram_read_u16(prefix, postfix, "pa5glw0a",
+			       &pwr_info->pa_5gl[0], 0, fallback);
+		nvram_read_u16(prefix, postfix, "pa5glw1a",
+			       &pwr_info->pa_5gl[1], 0, fallback);
+		nvram_read_u16(prefix, postfix, "pa5glw2a",
+			       &pwr_info->pa_5gl[2], 0, fallback);
+		nvram_read_u16(prefix, postfix, "pa5ghw0a",
+			       &pwr_info->pa_5gh[0], 0, fallback);
+		nvram_read_u16(prefix, postfix, "pa5ghw1a",
+			       &pwr_info->pa_5gh[1], 0, fallback);
+		nvram_read_u16(prefix, postfix, "pa5ghw2a",
+			       &pwr_info->pa_5gh[2], 0, fallback);
+	}
+}
+
+static void bcm47xx_fill_sprom_path_r45(struct ssb_sprom *sprom,
+					const char *prefix, bool fallback)
+{
+	char postfix[2];
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(sprom->core_pwr_info); i++) {
+		struct ssb_sprom_core_pwr_info *pwr_info;
+
+		pwr_info = &sprom->core_pwr_info[i];
+
+		snprintf(postfix, sizeof(postfix), "%i", i);
+		nvram_read_u16(prefix, postfix, "pa2gw3a",
+			       &pwr_info->pa_2g[3], 0, fallback);
+		nvram_read_u16(prefix, postfix, "pa5gw3a",
+			       &pwr_info->pa_5g[3], 0, fallback);
+		nvram_read_u16(prefix, postfix, "pa5glw3a",
+			       &pwr_info->pa_5gl[3], 0, fallback);
+		nvram_read_u16(prefix, postfix, "pa5ghw3a",
+			       &pwr_info->pa_5gh[3], 0, fallback);
+	}
+}
+
+static bool bcm47xx_is_valid_mac(u8 *mac)
+{
+	return mac && !(mac[0] == 0x00 && mac[1] == 0x90 && mac[2] == 0x4c);
+}
+
+static int bcm47xx_increase_mac_addr(u8 *mac, u8 num)
+{
+	u8 *oui = mac + ETH_ALEN/2 - 1;
+	u8 *p = mac + ETH_ALEN - 1;
+
+	do {
+		(*p) += num;
+		if (*p > num)
+			break;
+		p--;
+		num = 1;
+	} while (p != oui);
+
+	if (p == oui) {
+		pr_err("unable to fetch mac address\n");
+		return -ENOENT;
+	}
+	return 0;
+}
+
+static int mac_addr_used = 2;
+
+static void bcm47xx_fill_sprom_ethernet(struct ssb_sprom *sprom,
+					const char *prefix, bool fallback)
+{
+	bool fb = fallback;
+
+	nvram_read_macaddr(prefix, "et0macaddr", sprom->et0mac, fallback);
+	nvram_read_u8(prefix, NULL, "et0mdcport", &sprom->et0mdcport, 0,
+		      fallback);
+	nvram_read_u8(prefix, NULL, "et0phyaddr", &sprom->et0phyaddr, 0,
+		      fallback);
+
+	nvram_read_macaddr(prefix, "et1macaddr", sprom->et1mac, fallback);
+	nvram_read_u8(prefix, NULL, "et1mdcport", &sprom->et1mdcport, 0,
+		      fallback);
+	nvram_read_u8(prefix, NULL, "et1phyaddr", &sprom->et1phyaddr, 0,
+		      fallback);
+
+	nvram_read_macaddr(prefix, "et2macaddr", sprom->et2mac, fb);
+	nvram_read_u8(prefix, NULL, "et2mdcport", &sprom->et2mdcport, 0, fb);
+	nvram_read_u8(prefix, NULL, "et2phyaddr", &sprom->et2phyaddr, 0, fb);
+
+	nvram_read_macaddr(prefix, "macaddr", sprom->il0mac, fallback);
+	nvram_read_macaddr(prefix, "il0macaddr", sprom->il0mac, fallback);
+
+	/* The address prefix 00:90:4C is used by Broadcom in their initial
+	 * configuration. When a mac address with the prefix 00:90:4C is used
+	 * all devices from the same series are sharing the same mac address.
+	 * To prevent mac address collisions we replace them with a mac address
+	 * based on the base address.
+	 */
+	if (!bcm47xx_is_valid_mac(sprom->il0mac)) {
+		u8 mac[6];
+
+		nvram_read_macaddr(NULL, "et0macaddr", mac, false);
+		if (bcm47xx_is_valid_mac(mac)) {
+			int err = bcm47xx_increase_mac_addr(mac, mac_addr_used);
+
+			if (!err) {
+				ether_addr_copy(sprom->il0mac, mac);
+				mac_addr_used++;
+			}
+		}
+	}
+}
+
+static void bcm47xx_fill_board_data(struct ssb_sprom *sprom, const char *prefix,
+				    bool fallback)
+{
+	nvram_read_u32_2(prefix, "boardflags", &sprom->boardflags_lo,
+			 &sprom->boardflags_hi, fallback);
+	nvram_read_u32_2(prefix, "boardflags2", &sprom->boardflags2_lo,
+			 &sprom->boardflags2_hi, fallback);
+}
+
+void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix,
+			bool fallback)
+{
+	bcm47xx_fill_sprom_ethernet(sprom, prefix, fallback);
+	bcm47xx_fill_board_data(sprom, prefix, fallback);
+
+	nvram_read_u8(prefix, NULL, "sromrev", &sprom->revision, 0, fallback);
+
+	/* Entries requiring custom functions */
+	nvram_read_alpha2(prefix, "ccode", sprom->alpha2, fallback);
+	if (sprom->revision >= 3)
+		nvram_read_leddc(prefix, "leddc", &sprom->leddc_on_time,
+				 &sprom->leddc_off_time, fallback);
+
+	switch (sprom->revision) {
+	case 4:
+	case 5:
+		bcm47xx_fill_sprom_path_r4589(sprom, prefix, fallback);
+		bcm47xx_fill_sprom_path_r45(sprom, prefix, fallback);
+		break;
+	case 8:
+	case 9:
+		bcm47xx_fill_sprom_path_r4589(sprom, prefix, fallback);
+		break;
+	}
+
+	bcm47xx_sprom_fill_auto(sprom, prefix, fallback);
+}
+
+#if IS_BUILTIN(CONFIG_SSB) && IS_ENABLED(CONFIG_SSB_SPROM)
+static int bcm47xx_get_sprom_ssb(struct ssb_bus *bus, struct ssb_sprom *out)
+{
+	char prefix[10];
+
+	switch (bus->bustype) {
+	case SSB_BUSTYPE_SSB:
+		bcm47xx_fill_sprom(out, NULL, false);
+		return 0;
+	case SSB_BUSTYPE_PCI:
+		memset(out, 0, sizeof(struct ssb_sprom));
+		snprintf(prefix, sizeof(prefix), "pci/%u/%u/",
+			 bus->host_pci->bus->number + 1,
+			 PCI_SLOT(bus->host_pci->devfn));
+		bcm47xx_fill_sprom(out, prefix, false);
+		return 0;
+	default:
+		pr_warn("Unable to fill SPROM for given bustype.\n");
+		return -EINVAL;
+	}
+}
+#endif
+
+#if IS_BUILTIN(CONFIG_BCMA)
+/*
+ * Having many NVRAM entries for PCI devices led to repeating prefixes like
+ * pci/1/1/ all the time and wasting flash space. So at some point Broadcom
+ * decided to introduce prefixes like 0: 1: 2: etc.
+ * If we find e.g. devpath0=pci/2/1 or devpath0=pci/2/1/ we should use 0:
+ * instead of pci/2/1/.
+ */
+static void bcm47xx_sprom_apply_prefix_alias(char *prefix, size_t prefix_size)
+{
+	size_t prefix_len = strlen(prefix);
+	size_t short_len = prefix_len - 1;
+	char nvram_var[10];
+	char buf[20];
+	int i;
+
+	/* Passed prefix has to end with a slash */
+	if (prefix_len <= 0 || prefix[prefix_len - 1] != '/')
+		return;
+
+	for (i = 0; i < 3; i++) {
+		if (snprintf(nvram_var, sizeof(nvram_var), "devpath%d", i) <= 0)
+			continue;
+		if (bcm47xx_nvram_getenv(nvram_var, buf, sizeof(buf)) < 0)
+			continue;
+		if (!strcmp(buf, prefix) ||
+		    (short_len && strlen(buf) == short_len && !strncmp(buf, prefix, short_len))) {
+			snprintf(prefix, prefix_size, "%d:", i);
+			return;
+		}
+	}
+}
+
+static int bcm47xx_get_sprom_bcma(struct bcma_bus *bus, struct ssb_sprom *out)
+{
+	struct bcma_boardinfo *binfo = &bus->boardinfo;
+	struct bcma_device *core;
+	char buf[10];
+	char *prefix;
+	bool fallback = false;
+
+	switch (bus->hosttype) {
+	case BCMA_HOSTTYPE_PCI:
+		memset(out, 0, sizeof(struct ssb_sprom));
+		/* On BCM47XX all PCI buses share the same domain */
+		if (IS_ENABLED(CONFIG_BCM47XX))
+			snprintf(buf, sizeof(buf), "pci/%u/%u/",
+				 bus->host_pci->bus->number + 1,
+				 PCI_SLOT(bus->host_pci->devfn));
+		else
+			snprintf(buf, sizeof(buf), "pci/%u/%u/",
+				 pci_domain_nr(bus->host_pci->bus) + 1,
+				 bus->host_pci->bus->number);
+		bcm47xx_sprom_apply_prefix_alias(buf, sizeof(buf));
+		prefix = buf;
+		break;
+	case BCMA_HOSTTYPE_SOC:
+		memset(out, 0, sizeof(struct ssb_sprom));
+		core = bcma_find_core(bus, BCMA_CORE_80211);
+		if (core) {
+			snprintf(buf, sizeof(buf), "sb/%u/",
+				 core->core_index);
+			prefix = buf;
+			fallback = true;
+		} else {
+			prefix = NULL;
+		}
+		break;
+	default:
+		pr_warn("Unable to fill SPROM for given bustype.\n");
+		return -EINVAL;
+	}
+
+	nvram_read_u16(prefix, NULL, "boardvendor", &binfo->vendor, 0, true);
+	if (!binfo->vendor)
+		binfo->vendor = SSB_BOARDVENDOR_BCM;
+	nvram_read_u16(prefix, NULL, "boardtype", &binfo->type, 0, true);
+
+	bcm47xx_fill_sprom(out, prefix, fallback);
+
+	return 0;
+}
+#endif
+
+static unsigned int bcm47xx_sprom_registered;
+
+/*
+ * On bcm47xx we need to register SPROM fallback handler very early, so we can't
+ * use anything like platform device / driver for this.
+ */
+int bcm47xx_sprom_register_fallbacks(void)
+{
+	if (bcm47xx_sprom_registered)
+		return 0;
+
+#if IS_BUILTIN(CONFIG_SSB) && IS_ENABLED(CONFIG_SSB_SPROM)
+	if (ssb_arch_register_fallback_sprom(&bcm47xx_get_sprom_ssb))
+		pr_warn("Failed to register ssb SPROM handler\n");
+#endif
+
+#if IS_BUILTIN(CONFIG_BCMA)
+	if (bcma_arch_register_fallback_sprom(&bcm47xx_get_sprom_bcma))
+		pr_warn("Failed to register bcma SPROM handler\n");
+#endif
+
+	bcm47xx_sprom_registered = 1;
+
+	return 0;
+}
+
+fs_initcall(bcm47xx_sprom_register_fallbacks);
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
new file mode 100644
index 0000000..0bdea60
--- /dev/null
+++ b/drivers/firmware/dcdbas.c
@@ -0,0 +1,650 @@
+/*
+ *  dcdbas.c: Dell Systems Management Base Driver
+ *
+ *  The Dell Systems Management Base Driver provides a sysfs interface for
+ *  systems management software to perform System Management Interrupts (SMIs)
+ *  and Host Control Actions (power cycle or power off after OS shutdown) on
+ *  Dell systems.
+ *
+ *  See Documentation/dcdbas.txt for more information.
+ *
+ *  Copyright (C) 1995-2006 Dell Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License v2.0 as published by
+ *  the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/cpu.h>
+#include <linux/gfp.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mc146818rtc.h>
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <asm/io.h>
+
+#include "dcdbas.h"
+
+#define DRIVER_NAME		"dcdbas"
+#define DRIVER_VERSION		"5.6.0-3.2"
+#define DRIVER_DESCRIPTION	"Dell Systems Management Base Driver"
+
+static struct platform_device *dcdbas_pdev;
+
+static u8 *smi_data_buf;
+static dma_addr_t smi_data_buf_handle;
+static unsigned long smi_data_buf_size;
+static u32 smi_data_buf_phys_addr;
+static DEFINE_MUTEX(smi_data_lock);
+
+static unsigned int host_control_action;
+static unsigned int host_control_smi_type;
+static unsigned int host_control_on_shutdown;
+
+/**
+ * smi_data_buf_free: free SMI data buffer
+ */
+static void smi_data_buf_free(void)
+{
+	if (!smi_data_buf)
+		return;
+
+	dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n",
+		__func__, smi_data_buf_phys_addr, smi_data_buf_size);
+
+	dma_free_coherent(&dcdbas_pdev->dev, smi_data_buf_size, smi_data_buf,
+			  smi_data_buf_handle);
+	smi_data_buf = NULL;
+	smi_data_buf_handle = 0;
+	smi_data_buf_phys_addr = 0;
+	smi_data_buf_size = 0;
+}
+
+/**
+ * smi_data_buf_realloc: grow SMI data buffer if needed
+ */
+static int smi_data_buf_realloc(unsigned long size)
+{
+	void *buf;
+	dma_addr_t handle;
+
+	if (smi_data_buf_size >= size)
+		return 0;
+
+	if (size > MAX_SMI_DATA_BUF_SIZE)
+		return -EINVAL;
+
+	/* new buffer is needed */
+	buf = dma_alloc_coherent(&dcdbas_pdev->dev, size, &handle, GFP_KERNEL);
+	if (!buf) {
+		dev_dbg(&dcdbas_pdev->dev,
+			"%s: failed to allocate memory size %lu\n",
+			__func__, size);
+		return -ENOMEM;
+	}
+	/* memory zeroed by dma_alloc_coherent */
+
+	if (smi_data_buf)
+		memcpy(buf, smi_data_buf, smi_data_buf_size);
+
+	/* free any existing buffer */
+	smi_data_buf_free();
+
+	/* set up new buffer for use */
+	smi_data_buf = buf;
+	smi_data_buf_handle = handle;
+	smi_data_buf_phys_addr = (u32) virt_to_phys(buf);
+	smi_data_buf_size = size;
+
+	dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n",
+		__func__, smi_data_buf_phys_addr, smi_data_buf_size);
+
+	return 0;
+}
+
+static ssize_t smi_data_buf_phys_addr_show(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	return sprintf(buf, "%x\n", smi_data_buf_phys_addr);
+}
+
+static ssize_t smi_data_buf_size_show(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf)
+{
+	return sprintf(buf, "%lu\n", smi_data_buf_size);
+}
+
+static ssize_t smi_data_buf_size_store(struct device *dev,
+				       struct device_attribute *attr,
+				       const char *buf, size_t count)
+{
+	unsigned long buf_size;
+	ssize_t ret;
+
+	buf_size = simple_strtoul(buf, NULL, 10);
+
+	/* make sure SMI data buffer is at least buf_size */
+	mutex_lock(&smi_data_lock);
+	ret = smi_data_buf_realloc(buf_size);
+	mutex_unlock(&smi_data_lock);
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+static ssize_t smi_data_read(struct file *filp, struct kobject *kobj,
+			     struct bin_attribute *bin_attr,
+			     char *buf, loff_t pos, size_t count)
+{
+	ssize_t ret;
+
+	mutex_lock(&smi_data_lock);
+	ret = memory_read_from_buffer(buf, count, &pos, smi_data_buf,
+					smi_data_buf_size);
+	mutex_unlock(&smi_data_lock);
+	return ret;
+}
+
+static ssize_t smi_data_write(struct file *filp, struct kobject *kobj,
+			      struct bin_attribute *bin_attr,
+			      char *buf, loff_t pos, size_t count)
+{
+	ssize_t ret;
+
+	if ((pos + count) > MAX_SMI_DATA_BUF_SIZE)
+		return -EINVAL;
+
+	mutex_lock(&smi_data_lock);
+
+	ret = smi_data_buf_realloc(pos + count);
+	if (ret)
+		goto out;
+
+	memcpy(smi_data_buf + pos, buf, count);
+	ret = count;
+out:
+	mutex_unlock(&smi_data_lock);
+	return ret;
+}
+
+static ssize_t host_control_action_show(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	return sprintf(buf, "%u\n", host_control_action);
+}
+
+static ssize_t host_control_action_store(struct device *dev,
+					 struct device_attribute *attr,
+					 const char *buf, size_t count)
+{
+	ssize_t ret;
+
+	/* make sure buffer is available for host control command */
+	mutex_lock(&smi_data_lock);
+	ret = smi_data_buf_realloc(sizeof(struct apm_cmd));
+	mutex_unlock(&smi_data_lock);
+	if (ret)
+		return ret;
+
+	host_control_action = simple_strtoul(buf, NULL, 10);
+	return count;
+}
+
+static ssize_t host_control_smi_type_show(struct device *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	return sprintf(buf, "%u\n", host_control_smi_type);
+}
+
+static ssize_t host_control_smi_type_store(struct device *dev,
+					   struct device_attribute *attr,
+					   const char *buf, size_t count)
+{
+	host_control_smi_type = simple_strtoul(buf, NULL, 10);
+	return count;
+}
+
+static ssize_t host_control_on_shutdown_show(struct device *dev,
+					     struct device_attribute *attr,
+					     char *buf)
+{
+	return sprintf(buf, "%u\n", host_control_on_shutdown);
+}
+
+static ssize_t host_control_on_shutdown_store(struct device *dev,
+					      struct device_attribute *attr,
+					      const char *buf, size_t count)
+{
+	host_control_on_shutdown = simple_strtoul(buf, NULL, 10);
+	return count;
+}
+
+static int raise_smi(void *par)
+{
+	struct smi_cmd *smi_cmd = par;
+
+	if (smp_processor_id() != 0) {
+		dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
+			__func__);
+		return -EBUSY;
+	}
+
+	/* generate SMI */
+	/* inb to force posted write through and make SMI happen now */
+	asm volatile (
+		"outb %b0,%w1\n"
+		"inb %w1"
+		: /* no output args */
+		: "a" (smi_cmd->command_code),
+		  "d" (smi_cmd->command_address),
+		  "b" (smi_cmd->ebx),
+		  "c" (smi_cmd->ecx)
+		: "memory"
+	);
+
+	return 0;
+}
+/**
+ * dcdbas_smi_request: generate SMI request
+ *
+ * Called with smi_data_lock.
+ */
+int dcdbas_smi_request(struct smi_cmd *smi_cmd)
+{
+	int ret;
+
+	if (smi_cmd->magic != SMI_CMD_MAGIC) {
+		dev_info(&dcdbas_pdev->dev, "%s: invalid magic value\n",
+			 __func__);
+		return -EBADR;
+	}
+
+	/* SMI requires CPU 0 */
+	get_online_cpus();
+	ret = smp_call_on_cpu(0, raise_smi, smi_cmd, true);
+	put_online_cpus();
+
+	return ret;
+}
+
+/**
+ * smi_request_store:
+ *
+ * The valid values are:
+ * 0: zero SMI data buffer
+ * 1: generate calling interface SMI
+ * 2: generate raw SMI
+ *
+ * User application writes smi_cmd to smi_data before telling driver
+ * to generate SMI.
+ */
+static ssize_t smi_request_store(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf, size_t count)
+{
+	struct smi_cmd *smi_cmd;
+	unsigned long val = simple_strtoul(buf, NULL, 10);
+	ssize_t ret;
+
+	mutex_lock(&smi_data_lock);
+
+	if (smi_data_buf_size < sizeof(struct smi_cmd)) {
+		ret = -ENODEV;
+		goto out;
+	}
+	smi_cmd = (struct smi_cmd *)smi_data_buf;
+
+	switch (val) {
+	case 2:
+		/* Raw SMI */
+		ret = dcdbas_smi_request(smi_cmd);
+		if (!ret)
+			ret = count;
+		break;
+	case 1:
+		/* Calling Interface SMI */
+		smi_cmd->ebx = (u32) virt_to_phys(smi_cmd->command_buffer);
+		ret = dcdbas_smi_request(smi_cmd);
+		if (!ret)
+			ret = count;
+		break;
+	case 0:
+		memset(smi_data_buf, 0, smi_data_buf_size);
+		ret = count;
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+out:
+	mutex_unlock(&smi_data_lock);
+	return ret;
+}
+EXPORT_SYMBOL(dcdbas_smi_request);
+
+/**
+ * host_control_smi: generate host control SMI
+ *
+ * Caller must set up the host control command in smi_data_buf.
+ */
+static int host_control_smi(void)
+{
+	struct apm_cmd *apm_cmd;
+	u8 *data;
+	unsigned long flags;
+	u32 num_ticks;
+	s8 cmd_status;
+	u8 index;
+
+	apm_cmd = (struct apm_cmd *)smi_data_buf;
+	apm_cmd->status = ESM_STATUS_CMD_UNSUCCESSFUL;
+
+	switch (host_control_smi_type) {
+	case HC_SMITYPE_TYPE1:
+		spin_lock_irqsave(&rtc_lock, flags);
+		/* write SMI data buffer physical address */
+		data = (u8 *)&smi_data_buf_phys_addr;
+		for (index = PE1300_CMOS_CMD_STRUCT_PTR;
+		     index < (PE1300_CMOS_CMD_STRUCT_PTR + 4);
+		     index++, data++) {
+			outb(index,
+			     (CMOS_BASE_PORT + CMOS_PAGE2_INDEX_PORT_PIIX4));
+			outb(*data,
+			     (CMOS_BASE_PORT + CMOS_PAGE2_DATA_PORT_PIIX4));
+		}
+
+		/* first set status to -1 as called by spec */
+		cmd_status = ESM_STATUS_CMD_UNSUCCESSFUL;
+		outb((u8) cmd_status, PCAT_APM_STATUS_PORT);
+
+		/* generate SMM call */
+		outb(ESM_APM_CMD, PCAT_APM_CONTROL_PORT);
+		spin_unlock_irqrestore(&rtc_lock, flags);
+
+		/* wait a few to see if it executed */
+		num_ticks = TIMEOUT_USEC_SHORT_SEMA_BLOCKING;
+		while ((cmd_status = inb(PCAT_APM_STATUS_PORT))
+		       == ESM_STATUS_CMD_UNSUCCESSFUL) {
+			num_ticks--;
+			if (num_ticks == EXPIRED_TIMER)
+				return -ETIME;
+		}
+		break;
+
+	case HC_SMITYPE_TYPE2:
+	case HC_SMITYPE_TYPE3:
+		spin_lock_irqsave(&rtc_lock, flags);
+		/* write SMI data buffer physical address */
+		data = (u8 *)&smi_data_buf_phys_addr;
+		for (index = PE1400_CMOS_CMD_STRUCT_PTR;
+		     index < (PE1400_CMOS_CMD_STRUCT_PTR + 4);
+		     index++, data++) {
+			outb(index, (CMOS_BASE_PORT + CMOS_PAGE1_INDEX_PORT));
+			outb(*data, (CMOS_BASE_PORT + CMOS_PAGE1_DATA_PORT));
+		}
+
+		/* generate SMM call */
+		if (host_control_smi_type == HC_SMITYPE_TYPE3)
+			outb(ESM_APM_CMD, PCAT_APM_CONTROL_PORT);
+		else
+			outb(ESM_APM_CMD, PE1400_APM_CONTROL_PORT);
+
+		/* restore RTC index pointer since it was written to above */
+		CMOS_READ(RTC_REG_C);
+		spin_unlock_irqrestore(&rtc_lock, flags);
+
+		/* read control port back to serialize write */
+		cmd_status = inb(PE1400_APM_CONTROL_PORT);
+
+		/* wait a few to see if it executed */
+		num_ticks = TIMEOUT_USEC_SHORT_SEMA_BLOCKING;
+		while (apm_cmd->status == ESM_STATUS_CMD_UNSUCCESSFUL) {
+			num_ticks--;
+			if (num_ticks == EXPIRED_TIMER)
+				return -ETIME;
+		}
+		break;
+
+	default:
+		dev_dbg(&dcdbas_pdev->dev, "%s: invalid SMI type %u\n",
+			__func__, host_control_smi_type);
+		return -ENOSYS;
+	}
+
+	return 0;
+}
+
+/**
+ * dcdbas_host_control: initiate host control
+ *
+ * This function is called by the driver after the system has
+ * finished shutting down if the user application specified a
+ * host control action to perform on shutdown.  It is safe to
+ * use smi_data_buf at this point because the system has finished
+ * shutting down and no userspace apps are running.
+ */
+static void dcdbas_host_control(void)
+{
+	struct apm_cmd *apm_cmd;
+	u8 action;
+
+	if (host_control_action == HC_ACTION_NONE)
+		return;
+
+	action = host_control_action;
+	host_control_action = HC_ACTION_NONE;
+
+	if (!smi_data_buf) {
+		dev_dbg(&dcdbas_pdev->dev, "%s: no SMI buffer\n", __func__);
+		return;
+	}
+
+	if (smi_data_buf_size < sizeof(struct apm_cmd)) {
+		dev_dbg(&dcdbas_pdev->dev, "%s: SMI buffer too small\n",
+			__func__);
+		return;
+	}
+
+	apm_cmd = (struct apm_cmd *)smi_data_buf;
+
+	/* power off takes precedence */
+	if (action & HC_ACTION_HOST_CONTROL_POWEROFF) {
+		apm_cmd->command = ESM_APM_POWER_CYCLE;
+		apm_cmd->reserved = 0;
+		*((s16 *)&apm_cmd->parameters.shortreq.parm[0]) = (s16) 0;
+		host_control_smi();
+	} else if (action & HC_ACTION_HOST_CONTROL_POWERCYCLE) {
+		apm_cmd->command = ESM_APM_POWER_CYCLE;
+		apm_cmd->reserved = 0;
+		*((s16 *)&apm_cmd->parameters.shortreq.parm[0]) = (s16) 20;
+		host_control_smi();
+	}
+}
+
+/**
+ * dcdbas_reboot_notify: handle reboot notification for host control
+ */
+static int dcdbas_reboot_notify(struct notifier_block *nb, unsigned long code,
+				void *unused)
+{
+	switch (code) {
+	case SYS_DOWN:
+	case SYS_HALT:
+	case SYS_POWER_OFF:
+		if (host_control_on_shutdown) {
+			/* firmware is going to perform host control action */
+			printk(KERN_WARNING "Please wait for shutdown "
+			       "action to complete...\n");
+			dcdbas_host_control();
+		}
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block dcdbas_reboot_nb = {
+	.notifier_call = dcdbas_reboot_notify,
+	.next = NULL,
+	.priority = INT_MIN
+};
+
+static DCDBAS_BIN_ATTR_RW(smi_data);
+
+static struct bin_attribute *dcdbas_bin_attrs[] = {
+	&bin_attr_smi_data,
+	NULL
+};
+
+static DCDBAS_DEV_ATTR_RW(smi_data_buf_size);
+static DCDBAS_DEV_ATTR_RO(smi_data_buf_phys_addr);
+static DCDBAS_DEV_ATTR_WO(smi_request);
+static DCDBAS_DEV_ATTR_RW(host_control_action);
+static DCDBAS_DEV_ATTR_RW(host_control_smi_type);
+static DCDBAS_DEV_ATTR_RW(host_control_on_shutdown);
+
+static struct attribute *dcdbas_dev_attrs[] = {
+	&dev_attr_smi_data_buf_size.attr,
+	&dev_attr_smi_data_buf_phys_addr.attr,
+	&dev_attr_smi_request.attr,
+	&dev_attr_host_control_action.attr,
+	&dev_attr_host_control_smi_type.attr,
+	&dev_attr_host_control_on_shutdown.attr,
+	NULL
+};
+
+static const struct attribute_group dcdbas_attr_group = {
+	.attrs = dcdbas_dev_attrs,
+	.bin_attrs = dcdbas_bin_attrs,
+};
+
+static int dcdbas_probe(struct platform_device *dev)
+{
+	int error;
+
+	host_control_action = HC_ACTION_NONE;
+	host_control_smi_type = HC_SMITYPE_NONE;
+
+	dcdbas_pdev = dev;
+
+	/*
+	 * BIOS SMI calls require buffer addresses be in 32-bit address space.
+	 * This is done by setting the DMA mask below.
+	 */
+	error = dma_set_coherent_mask(&dcdbas_pdev->dev, DMA_BIT_MASK(32));
+	if (error)
+		return error;
+
+	error = sysfs_create_group(&dev->dev.kobj, &dcdbas_attr_group);
+	if (error)
+		return error;
+
+	register_reboot_notifier(&dcdbas_reboot_nb);
+
+	dev_info(&dev->dev, "%s (version %s)\n",
+		 DRIVER_DESCRIPTION, DRIVER_VERSION);
+
+	return 0;
+}
+
+static int dcdbas_remove(struct platform_device *dev)
+{
+	unregister_reboot_notifier(&dcdbas_reboot_nb);
+	sysfs_remove_group(&dev->dev.kobj, &dcdbas_attr_group);
+
+	return 0;
+}
+
+static struct platform_driver dcdbas_driver = {
+	.driver		= {
+		.name	= DRIVER_NAME,
+	},
+	.probe		= dcdbas_probe,
+	.remove		= dcdbas_remove,
+};
+
+static const struct platform_device_info dcdbas_dev_info __initconst = {
+	.name		= DRIVER_NAME,
+	.id		= -1,
+	.dma_mask	= DMA_BIT_MASK(32),
+};
+
+static struct platform_device *dcdbas_pdev_reg;
+
+/**
+ * dcdbas_init: initialize driver
+ */
+static int __init dcdbas_init(void)
+{
+	int error;
+
+	error = platform_driver_register(&dcdbas_driver);
+	if (error)
+		return error;
+
+	dcdbas_pdev_reg = platform_device_register_full(&dcdbas_dev_info);
+	if (IS_ERR(dcdbas_pdev_reg)) {
+		error = PTR_ERR(dcdbas_pdev_reg);
+		goto err_unregister_driver;
+	}
+
+	return 0;
+
+ err_unregister_driver:
+	platform_driver_unregister(&dcdbas_driver);
+	return error;
+}
+
+/**
+ * dcdbas_exit: perform driver cleanup
+ */
+static void __exit dcdbas_exit(void)
+{
+	/*
+	 * make sure functions that use dcdbas_pdev are called
+	 * before platform_device_unregister
+	 */
+	unregister_reboot_notifier(&dcdbas_reboot_nb);
+
+	/*
+	 * We have to free the buffer here instead of dcdbas_remove
+	 * because only in module exit function we can be sure that
+	 * all sysfs attributes belonging to this module have been
+	 * released.
+	 */
+	if (dcdbas_pdev)
+		smi_data_buf_free();
+	platform_device_unregister(dcdbas_pdev_reg);
+	platform_driver_unregister(&dcdbas_driver);
+}
+
+subsys_initcall_sync(dcdbas_init);
+module_exit(dcdbas_exit);
+
+MODULE_DESCRIPTION(DRIVER_DESCRIPTION " (version " DRIVER_VERSION ")");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_AUTHOR("Dell Inc.");
+MODULE_LICENSE("GPL");
+/* Any System or BIOS claiming to be by Dell */
+MODULE_ALIAS("dmi:*:[bs]vnD[Ee][Ll][Ll]*:*");
diff --git a/drivers/firmware/dcdbas.h b/drivers/firmware/dcdbas.h
new file mode 100644
index 0000000..ca3cb0a
--- /dev/null
+++ b/drivers/firmware/dcdbas.h
@@ -0,0 +1,107 @@
+/*
+ *  dcdbas.h: Definitions for Dell Systems Management Base driver
+ *
+ *  Copyright (C) 1995-2005 Dell Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License v2.0 as published by
+ *  the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#ifndef _DCDBAS_H_
+#define _DCDBAS_H_
+
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+#define MAX_SMI_DATA_BUF_SIZE			(256 * 1024)
+
+#define HC_ACTION_NONE				(0)
+#define HC_ACTION_HOST_CONTROL_POWEROFF		BIT(1)
+#define HC_ACTION_HOST_CONTROL_POWERCYCLE	BIT(2)
+
+#define HC_SMITYPE_NONE				(0)
+#define HC_SMITYPE_TYPE1			(1)
+#define HC_SMITYPE_TYPE2			(2)
+#define HC_SMITYPE_TYPE3			(3)
+
+#define ESM_APM_CMD				(0x0A0)
+#define ESM_APM_POWER_CYCLE			(0x10)
+#define ESM_STATUS_CMD_UNSUCCESSFUL		(-1)
+
+#define CMOS_BASE_PORT				(0x070)
+#define CMOS_PAGE1_INDEX_PORT			(0)
+#define CMOS_PAGE1_DATA_PORT			(1)
+#define CMOS_PAGE2_INDEX_PORT_PIIX4		(2)
+#define CMOS_PAGE2_DATA_PORT_PIIX4		(3)
+#define PE1400_APM_CONTROL_PORT			(0x0B0)
+#define PCAT_APM_CONTROL_PORT			(0x0B2)
+#define PCAT_APM_STATUS_PORT			(0x0B3)
+#define PE1300_CMOS_CMD_STRUCT_PTR		(0x38)
+#define PE1400_CMOS_CMD_STRUCT_PTR		(0x70)
+
+#define MAX_SYSMGMT_SHORTCMD_PARMBUF_LEN	(14)
+#define MAX_SYSMGMT_LONGCMD_SGENTRY_NUM		(16)
+
+#define TIMEOUT_USEC_SHORT_SEMA_BLOCKING	(10000)
+#define EXPIRED_TIMER				(0)
+
+#define SMI_CMD_MAGIC				(0x534D4931)
+
+#define DCDBAS_DEV_ATTR_RW(_name) \
+	DEVICE_ATTR(_name,0600,_name##_show,_name##_store);
+
+#define DCDBAS_DEV_ATTR_RO(_name) \
+	DEVICE_ATTR(_name,0400,_name##_show,NULL);
+
+#define DCDBAS_DEV_ATTR_WO(_name) \
+	DEVICE_ATTR(_name,0200,NULL,_name##_store);
+
+#define DCDBAS_BIN_ATTR_RW(_name) \
+struct bin_attribute bin_attr_##_name = { \
+	.attr =  { .name = __stringify(_name), \
+		   .mode = 0600 }, \
+	.read =  _name##_read, \
+	.write = _name##_write, \
+}
+
+struct smi_cmd {
+	__u32 magic;
+	__u32 ebx;
+	__u32 ecx;
+	__u16 command_address;
+	__u8 command_code;
+	__u8 reserved;
+	__u8 command_buffer[1];
+} __attribute__ ((packed));
+
+struct apm_cmd {
+	__u8 command;
+	__s8 status;
+	__u16 reserved;
+	union {
+		struct {
+			__u8 parm[MAX_SYSMGMT_SHORTCMD_PARMBUF_LEN];
+		} __attribute__ ((packed)) shortreq;
+
+		struct {
+			__u16 num_sg_entries;
+			struct {
+				__u32 size;
+				__u64 addr;
+			} __attribute__ ((packed))
+			    sglist[MAX_SYSMGMT_LONGCMD_SGENTRY_NUM];
+		} __attribute__ ((packed)) longreq;
+	} __attribute__ ((packed)) parameters;
+} __attribute__ ((packed));
+
+int dcdbas_smi_request(struct smi_cmd *smi_cmd);
+
+#endif /* _DCDBAS_H_ */
+
diff --git a/drivers/firmware/dell_rbu.c b/drivers/firmware/dell_rbu.c
new file mode 100644
index 0000000..fb8af5c
--- /dev/null
+++ b/drivers/firmware/dell_rbu.c
@@ -0,0 +1,745 @@
+/*
+ * dell_rbu.c
+ * Bios Update driver for Dell systems
+ * Author: Dell Inc
+ *         Abhay Salunke <abhay_salunke@dell.com>
+ *
+ * Copyright (C) 2005 Dell Inc.
+ *
+ * Remote BIOS Update (rbu) driver is used for updating DELL BIOS by
+ * creating entries in the /sys file systems on Linux 2.6 and higher
+ * kernels. The driver supports two mechanism to update the BIOS namely
+ * contiguous and packetized. Both these methods still require having some
+ * application to set the CMOS bit indicating the BIOS to update itself
+ * after a reboot.
+ *
+ * Contiguous method:
+ * This driver writes the incoming data in a monolithic image by allocating
+ * contiguous physical pages large enough to accommodate the incoming BIOS
+ * image size.
+ *
+ * Packetized method:
+ * The driver writes the incoming packet image by allocating a new packet
+ * on every time the packet data is written. This driver requires an
+ * application to break the BIOS image in to fixed sized packet chunks.
+ *
+ * See Documentation/dell_rbu.txt for more info.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2.0 as published by
+ * the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/blkdev.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/moduleparam.h>
+#include <linux/firmware.h>
+#include <linux/dma-mapping.h>
+
+MODULE_AUTHOR("Abhay Salunke <abhay_salunke@dell.com>");
+MODULE_DESCRIPTION("Driver for updating BIOS image on DELL systems");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("3.2");
+
+#define BIOS_SCAN_LIMIT 0xffffffff
+#define MAX_IMAGE_LENGTH 16
+static struct _rbu_data {
+	void *image_update_buffer;
+	unsigned long image_update_buffer_size;
+	unsigned long bios_image_size;
+	int image_update_ordernum;
+	int dma_alloc;
+	spinlock_t lock;
+	unsigned long packet_read_count;
+	unsigned long num_packets;
+	unsigned long packetsize;
+	unsigned long imagesize;
+	int entry_created;
+} rbu_data;
+
+static char image_type[MAX_IMAGE_LENGTH + 1] = "mono";
+module_param_string(image_type, image_type, sizeof (image_type), 0);
+MODULE_PARM_DESC(image_type,
+	"BIOS image type. choose- mono or packet or init");
+
+static unsigned long allocation_floor = 0x100000;
+module_param(allocation_floor, ulong, 0644);
+MODULE_PARM_DESC(allocation_floor,
+    "Minimum address for allocations when using Packet mode");
+
+struct packet_data {
+	struct list_head list;
+	size_t length;
+	void *data;
+	int ordernum;
+};
+
+static struct packet_data packet_data_head;
+
+static struct platform_device *rbu_device;
+static int context;
+static dma_addr_t dell_rbu_dmaaddr;
+
+static void init_packet_head(void)
+{
+	INIT_LIST_HEAD(&packet_data_head.list);
+	rbu_data.packet_read_count = 0;
+	rbu_data.num_packets = 0;
+	rbu_data.packetsize = 0;
+	rbu_data.imagesize = 0;
+}
+
+static int create_packet(void *data, size_t length)
+{
+	struct packet_data *newpacket;
+	int ordernum = 0;
+	int retval = 0;
+	unsigned int packet_array_size = 0;
+	void **invalid_addr_packet_array = NULL;
+	void *packet_data_temp_buf = NULL;
+	unsigned int idx = 0;
+
+	pr_debug("create_packet: entry \n");
+
+	if (!rbu_data.packetsize) {
+		pr_debug("create_packet: packetsize not specified\n");
+		retval = -EINVAL;
+		goto out_noalloc;
+	}
+
+	spin_unlock(&rbu_data.lock);
+
+	newpacket = kzalloc(sizeof (struct packet_data), GFP_KERNEL);
+
+	if (!newpacket) {
+		printk(KERN_WARNING
+			"dell_rbu:%s: failed to allocate new "
+			"packet\n", __func__);
+		retval = -ENOMEM;
+		spin_lock(&rbu_data.lock);
+		goto out_noalloc;
+	}
+
+	ordernum = get_order(length);
+
+	/*
+	 * BIOS errata mean we cannot allocate packets below 1MB or they will
+	 * be overwritten by BIOS.
+	 *
+	 * array to temporarily hold packets
+	 * that are below the allocation floor
+	 *
+	 * NOTE: very simplistic because we only need the floor to be at 1MB
+	 *       due to BIOS errata. This shouldn't be used for higher floors
+	 *       or you will run out of mem trying to allocate the array.
+	 */
+	packet_array_size = max(
+	       		(unsigned int)(allocation_floor / rbu_data.packetsize),
+			(unsigned int)1);
+	invalid_addr_packet_array = kcalloc(packet_array_size, sizeof(void *),
+						GFP_KERNEL);
+
+	if (!invalid_addr_packet_array) {
+		printk(KERN_WARNING
+			"dell_rbu:%s: failed to allocate "
+			"invalid_addr_packet_array \n",
+			__func__);
+		retval = -ENOMEM;
+		spin_lock(&rbu_data.lock);
+		goto out_alloc_packet;
+	}
+
+	while (!packet_data_temp_buf) {
+		packet_data_temp_buf = (unsigned char *)
+			__get_free_pages(GFP_KERNEL, ordernum);
+		if (!packet_data_temp_buf) {
+			printk(KERN_WARNING
+				"dell_rbu:%s: failed to allocate new "
+				"packet\n", __func__);
+			retval = -ENOMEM;
+			spin_lock(&rbu_data.lock);
+			goto out_alloc_packet_array;
+		}
+
+		if ((unsigned long)virt_to_phys(packet_data_temp_buf)
+				< allocation_floor) {
+			pr_debug("packet 0x%lx below floor at 0x%lx.\n",
+					(unsigned long)virt_to_phys(
+						packet_data_temp_buf),
+					allocation_floor);
+			invalid_addr_packet_array[idx++] = packet_data_temp_buf;
+			packet_data_temp_buf = NULL;
+		}
+	}
+	spin_lock(&rbu_data.lock);
+
+	newpacket->data = packet_data_temp_buf;
+
+	pr_debug("create_packet: newpacket at physical addr %lx\n",
+		(unsigned long)virt_to_phys(newpacket->data));
+
+	/* packets may not have fixed size */
+	newpacket->length = length;
+	newpacket->ordernum = ordernum;
+	++rbu_data.num_packets;
+
+	/* initialize the newly created packet headers */
+	INIT_LIST_HEAD(&newpacket->list);
+	list_add_tail(&newpacket->list, &packet_data_head.list);
+
+	memcpy(newpacket->data, data, length);
+
+	pr_debug("create_packet: exit \n");
+
+out_alloc_packet_array:
+	/* always free packet array */
+	for (;idx>0;idx--) {
+		pr_debug("freeing unused packet below floor 0x%lx.\n",
+			(unsigned long)virt_to_phys(
+				invalid_addr_packet_array[idx-1]));
+		free_pages((unsigned long)invalid_addr_packet_array[idx-1],
+			ordernum);
+	}
+	kfree(invalid_addr_packet_array);
+
+out_alloc_packet:
+	/* if error, free data */
+	if (retval)
+		kfree(newpacket);
+
+out_noalloc:
+	return retval;
+}
+
+static int packetize_data(const u8 *data, size_t length)
+{
+	int rc = 0;
+	int done = 0;
+	int packet_length;
+	u8 *temp;
+	u8 *end = (u8 *) data + length;
+	pr_debug("packetize_data: data length %zd\n", length);
+	if (!rbu_data.packetsize) {
+		printk(KERN_WARNING
+			"dell_rbu: packetsize not specified\n");
+		return -EIO;
+	}
+
+	temp = (u8 *) data;
+
+	/* packetize the hunk */
+	while (!done) {
+		if ((temp + rbu_data.packetsize) < end)
+			packet_length = rbu_data.packetsize;
+		else {
+			/* this is the last packet */
+			packet_length = end - temp;
+			done = 1;
+		}
+
+		if ((rc = create_packet(temp, packet_length)))
+			return rc;
+
+		pr_debug("%p:%td\n", temp, (end - temp));
+		temp += packet_length;
+	}
+
+	rbu_data.imagesize = length;
+
+	return rc;
+}
+
+static int do_packet_read(char *data, struct list_head *ptemp_list,
+	int length, int bytes_read, int *list_read_count)
+{
+	void *ptemp_buf;
+	struct packet_data *newpacket = NULL;
+	int bytes_copied = 0;
+	int j = 0;
+
+	newpacket = list_entry(ptemp_list, struct packet_data, list);
+	*list_read_count += newpacket->length;
+
+	if (*list_read_count > bytes_read) {
+		/* point to the start of unread data */
+		j = newpacket->length - (*list_read_count - bytes_read);
+		/* point to the offset in the packet buffer */
+		ptemp_buf = (u8 *) newpacket->data + j;
+		/*
+		 * check if there is enough room in
+		 * * the incoming buffer
+		 */
+		if (length > (*list_read_count - bytes_read))
+			/*
+			 * copy what ever is there in this
+			 * packet and move on
+			 */
+			bytes_copied = (*list_read_count - bytes_read);
+		else
+			/* copy the remaining */
+			bytes_copied = length;
+		memcpy(data, ptemp_buf, bytes_copied);
+	}
+	return bytes_copied;
+}
+
+static int packet_read_list(char *data, size_t * pread_length)
+{
+	struct list_head *ptemp_list;
+	int temp_count = 0;
+	int bytes_copied = 0;
+	int bytes_read = 0;
+	int remaining_bytes = 0;
+	char *pdest = data;
+
+	/* check if we have any packets */
+	if (0 == rbu_data.num_packets)
+		return -ENOMEM;
+
+	remaining_bytes = *pread_length;
+	bytes_read = rbu_data.packet_read_count;
+
+	ptemp_list = (&packet_data_head.list)->next;
+	while (!list_empty(ptemp_list)) {
+		bytes_copied = do_packet_read(pdest, ptemp_list,
+			remaining_bytes, bytes_read, &temp_count);
+		remaining_bytes -= bytes_copied;
+		bytes_read += bytes_copied;
+		pdest += bytes_copied;
+		/*
+		 * check if we reached end of buffer before reaching the
+		 * last packet
+		 */
+		if (remaining_bytes == 0)
+			break;
+
+		ptemp_list = ptemp_list->next;
+	}
+	/*finally set the bytes read */
+	*pread_length = bytes_read - rbu_data.packet_read_count;
+	rbu_data.packet_read_count = bytes_read;
+	return 0;
+}
+
+static void packet_empty_list(void)
+{
+	struct list_head *ptemp_list;
+	struct list_head *pnext_list;
+	struct packet_data *newpacket;
+
+	ptemp_list = (&packet_data_head.list)->next;
+	while (!list_empty(ptemp_list)) {
+		newpacket =
+			list_entry(ptemp_list, struct packet_data, list);
+		pnext_list = ptemp_list->next;
+		list_del(ptemp_list);
+		ptemp_list = pnext_list;
+		/*
+		 * zero out the RBU packet memory before freeing
+		 * to make sure there are no stale RBU packets left in memory
+		 */
+		memset(newpacket->data, 0, rbu_data.packetsize);
+		free_pages((unsigned long) newpacket->data,
+			newpacket->ordernum);
+		kfree(newpacket);
+	}
+	rbu_data.packet_read_count = 0;
+	rbu_data.num_packets = 0;
+	rbu_data.imagesize = 0;
+}
+
+/*
+ * img_update_free: Frees the buffer allocated for storing BIOS image
+ * Always called with lock held and returned with lock held
+ */
+static void img_update_free(void)
+{
+	if (!rbu_data.image_update_buffer)
+		return;
+	/*
+	 * zero out this buffer before freeing it to get rid of any stale
+	 * BIOS image copied in memory.
+	 */
+	memset(rbu_data.image_update_buffer, 0,
+		rbu_data.image_update_buffer_size);
+	if (rbu_data.dma_alloc == 1)
+		dma_free_coherent(NULL, rbu_data.bios_image_size,
+			rbu_data.image_update_buffer, dell_rbu_dmaaddr);
+	else
+		free_pages((unsigned long) rbu_data.image_update_buffer,
+			rbu_data.image_update_ordernum);
+
+	/*
+	 * Re-initialize the rbu_data variables after a free
+	 */
+	rbu_data.image_update_ordernum = -1;
+	rbu_data.image_update_buffer = NULL;
+	rbu_data.image_update_buffer_size = 0;
+	rbu_data.bios_image_size = 0;
+	rbu_data.dma_alloc = 0;
+}
+
+/*
+ * img_update_realloc: This function allocates the contiguous pages to
+ * accommodate the requested size of data. The memory address and size
+ * values are stored globally and on every call to this function the new
+ * size is checked to see if more data is required than the existing size.
+ * If true the previous memory is freed and new allocation is done to
+ * accommodate the new size. If the incoming size is less then than the
+ * already allocated size, then that memory is reused. This function is
+ * called with lock held and returns with lock held.
+ */
+static int img_update_realloc(unsigned long size)
+{
+	unsigned char *image_update_buffer = NULL;
+	unsigned long rc;
+	unsigned long img_buf_phys_addr;
+	int ordernum;
+	int dma_alloc = 0;
+
+	/*
+	 * check if the buffer of sufficient size has been
+	 * already allocated
+	 */
+	if (rbu_data.image_update_buffer_size >= size) {
+		/*
+		 * check for corruption
+		 */
+		if ((size != 0) && (rbu_data.image_update_buffer == NULL)) {
+			printk(KERN_ERR "dell_rbu:%s: corruption "
+				"check failed\n", __func__);
+			return -EINVAL;
+		}
+		/*
+		 * we have a valid pre-allocated buffer with
+		 * sufficient size
+		 */
+		return 0;
+	}
+
+	/*
+	 * free any previously allocated buffer
+	 */
+	img_update_free();
+
+	spin_unlock(&rbu_data.lock);
+
+	ordernum = get_order(size);
+	image_update_buffer =
+		(unsigned char *) __get_free_pages(GFP_KERNEL, ordernum);
+
+	img_buf_phys_addr =
+		(unsigned long) virt_to_phys(image_update_buffer);
+
+	if (img_buf_phys_addr > BIOS_SCAN_LIMIT) {
+		free_pages((unsigned long) image_update_buffer, ordernum);
+		ordernum = -1;
+		image_update_buffer = dma_alloc_coherent(NULL, size,
+			&dell_rbu_dmaaddr, GFP_KERNEL);
+		dma_alloc = 1;
+	}
+
+	spin_lock(&rbu_data.lock);
+
+	if (image_update_buffer != NULL) {
+		rbu_data.image_update_buffer = image_update_buffer;
+		rbu_data.image_update_buffer_size = size;
+		rbu_data.bios_image_size =
+			rbu_data.image_update_buffer_size;
+		rbu_data.image_update_ordernum = ordernum;
+		rbu_data.dma_alloc = dma_alloc;
+		rc = 0;
+	} else {
+		pr_debug("Not enough memory for image update:"
+			"size = %ld\n", size);
+		rc = -ENOMEM;
+	}
+
+	return rc;
+}
+
+static ssize_t read_packet_data(char *buffer, loff_t pos, size_t count)
+{
+	int retval;
+	size_t bytes_left;
+	size_t data_length;
+	char *ptempBuf = buffer;
+
+	/* check to see if we have something to return */
+	if (rbu_data.num_packets == 0) {
+		pr_debug("read_packet_data: no packets written\n");
+		retval = -ENOMEM;
+		goto read_rbu_data_exit;
+	}
+
+	if (pos > rbu_data.imagesize) {
+		retval = 0;
+		printk(KERN_WARNING "dell_rbu:read_packet_data: "
+			"data underrun\n");
+		goto read_rbu_data_exit;
+	}
+
+	bytes_left = rbu_data.imagesize - pos;
+	data_length = min(bytes_left, count);
+
+	if ((retval = packet_read_list(ptempBuf, &data_length)) < 0)
+		goto read_rbu_data_exit;
+
+	if ((pos + count) > rbu_data.imagesize) {
+		rbu_data.packet_read_count = 0;
+		/* this was the last copy */
+		retval = bytes_left;
+	} else
+		retval = count;
+
+      read_rbu_data_exit:
+	return retval;
+}
+
+static ssize_t read_rbu_mono_data(char *buffer, loff_t pos, size_t count)
+{
+	/* check to see if we have something to return */
+	if ((rbu_data.image_update_buffer == NULL) ||
+		(rbu_data.bios_image_size == 0)) {
+		pr_debug("read_rbu_data_mono: image_update_buffer %p ,"
+			"bios_image_size %lu\n",
+			rbu_data.image_update_buffer,
+			rbu_data.bios_image_size);
+		return -ENOMEM;
+	}
+
+	return memory_read_from_buffer(buffer, count, &pos,
+			rbu_data.image_update_buffer, rbu_data.bios_image_size);
+}
+
+static ssize_t read_rbu_data(struct file *filp, struct kobject *kobj,
+			     struct bin_attribute *bin_attr,
+			     char *buffer, loff_t pos, size_t count)
+{
+	ssize_t ret_count = 0;
+
+	spin_lock(&rbu_data.lock);
+
+	if (!strcmp(image_type, "mono"))
+		ret_count = read_rbu_mono_data(buffer, pos, count);
+	else if (!strcmp(image_type, "packet"))
+		ret_count = read_packet_data(buffer, pos, count);
+	else
+		pr_debug("read_rbu_data: invalid image type specified\n");
+
+	spin_unlock(&rbu_data.lock);
+	return ret_count;
+}
+
+static void callbackfn_rbu(const struct firmware *fw, void *context)
+{
+	rbu_data.entry_created = 0;
+
+	if (!fw)
+		return;
+
+	if (!fw->size)
+		goto out;
+
+	spin_lock(&rbu_data.lock);
+	if (!strcmp(image_type, "mono")) {
+		if (!img_update_realloc(fw->size))
+			memcpy(rbu_data.image_update_buffer,
+				fw->data, fw->size);
+	} else if (!strcmp(image_type, "packet")) {
+		/*
+		 * we need to free previous packets if a
+		 * new hunk of packets needs to be downloaded
+		 */
+		packet_empty_list();
+		if (packetize_data(fw->data, fw->size))
+			/* Incase something goes wrong when we are
+			 * in middle of packetizing the data, we
+			 * need to free up whatever packets might
+			 * have been created before we quit.
+			 */
+			packet_empty_list();
+	} else
+		pr_debug("invalid image type specified.\n");
+	spin_unlock(&rbu_data.lock);
+ out:
+	release_firmware(fw);
+}
+
+static ssize_t read_rbu_image_type(struct file *filp, struct kobject *kobj,
+				   struct bin_attribute *bin_attr,
+				   char *buffer, loff_t pos, size_t count)
+{
+	int size = 0;
+	if (!pos)
+		size = scnprintf(buffer, count, "%s\n", image_type);
+	return size;
+}
+
+static ssize_t write_rbu_image_type(struct file *filp, struct kobject *kobj,
+				    struct bin_attribute *bin_attr,
+				    char *buffer, loff_t pos, size_t count)
+{
+	int rc = count;
+	int req_firm_rc = 0;
+	int i;
+	spin_lock(&rbu_data.lock);
+	/*
+	 * Find the first newline or space
+	 */
+	for (i = 0; i < count; ++i)
+		if (buffer[i] == '\n' || buffer[i] == ' ') {
+			buffer[i] = '\0';
+			break;
+		}
+	if (i == count)
+		buffer[count] = '\0';
+
+	if (strstr(buffer, "mono"))
+		strcpy(image_type, "mono");
+	else if (strstr(buffer, "packet"))
+		strcpy(image_type, "packet");
+	else if (strstr(buffer, "init")) {
+		/*
+		 * If due to the user error the driver gets in a bad
+		 * state where even though it is loaded , the
+		 * /sys/class/firmware/dell_rbu entries are missing.
+		 * to cover this situation the user can recreate entries
+		 * by writing init to image_type.
+		 */
+		if (!rbu_data.entry_created) {
+			spin_unlock(&rbu_data.lock);
+			req_firm_rc = request_firmware_nowait(THIS_MODULE,
+				FW_ACTION_NOHOTPLUG, "dell_rbu",
+				&rbu_device->dev, GFP_KERNEL, &context,
+				callbackfn_rbu);
+			if (req_firm_rc) {
+				printk(KERN_ERR
+					"dell_rbu:%s request_firmware_nowait"
+					" failed %d\n", __func__, rc);
+				rc = -EIO;
+			} else
+				rbu_data.entry_created = 1;
+
+			spin_lock(&rbu_data.lock);
+		}
+	} else {
+		printk(KERN_WARNING "dell_rbu: image_type is invalid\n");
+		spin_unlock(&rbu_data.lock);
+		return -EINVAL;
+	}
+
+	/* we must free all previous allocations */
+	packet_empty_list();
+	img_update_free();
+	spin_unlock(&rbu_data.lock);
+
+	return rc;
+}
+
+static ssize_t read_rbu_packet_size(struct file *filp, struct kobject *kobj,
+				    struct bin_attribute *bin_attr,
+				    char *buffer, loff_t pos, size_t count)
+{
+	int size = 0;
+	if (!pos) {
+		spin_lock(&rbu_data.lock);
+		size = scnprintf(buffer, count, "%lu\n", rbu_data.packetsize);
+		spin_unlock(&rbu_data.lock);
+	}
+	return size;
+}
+
+static ssize_t write_rbu_packet_size(struct file *filp, struct kobject *kobj,
+				     struct bin_attribute *bin_attr,
+				     char *buffer, loff_t pos, size_t count)
+{
+	unsigned long temp;
+	spin_lock(&rbu_data.lock);
+	packet_empty_list();
+	sscanf(buffer, "%lu", &temp);
+	if (temp < 0xffffffff)
+		rbu_data.packetsize = temp;
+
+	spin_unlock(&rbu_data.lock);
+	return count;
+}
+
+static struct bin_attribute rbu_data_attr = {
+	.attr = {.name = "data", .mode = 0444},
+	.read = read_rbu_data,
+};
+
+static struct bin_attribute rbu_image_type_attr = {
+	.attr = {.name = "image_type", .mode = 0644},
+	.read = read_rbu_image_type,
+	.write = write_rbu_image_type,
+};
+
+static struct bin_attribute rbu_packet_size_attr = {
+	.attr = {.name = "packet_size", .mode = 0644},
+	.read = read_rbu_packet_size,
+	.write = write_rbu_packet_size,
+};
+
+static int __init dcdrbu_init(void)
+{
+	int rc;
+	spin_lock_init(&rbu_data.lock);
+
+	init_packet_head();
+	rbu_device = platform_device_register_simple("dell_rbu", -1, NULL, 0);
+	if (IS_ERR(rbu_device)) {
+		printk(KERN_ERR
+			"dell_rbu:%s:platform_device_register_simple "
+			"failed\n", __func__);
+		return PTR_ERR(rbu_device);
+	}
+
+	rc = sysfs_create_bin_file(&rbu_device->dev.kobj, &rbu_data_attr);
+	if (rc)
+		goto out_devreg;
+	rc = sysfs_create_bin_file(&rbu_device->dev.kobj, &rbu_image_type_attr);
+	if (rc)
+		goto out_data;
+	rc = sysfs_create_bin_file(&rbu_device->dev.kobj,
+		&rbu_packet_size_attr);
+	if (rc)
+		goto out_imtype;
+
+	rbu_data.entry_created = 0;
+	return 0;
+
+out_imtype:
+	sysfs_remove_bin_file(&rbu_device->dev.kobj, &rbu_image_type_attr);
+out_data:
+	sysfs_remove_bin_file(&rbu_device->dev.kobj, &rbu_data_attr);
+out_devreg:
+	platform_device_unregister(rbu_device);
+	return rc;
+}
+
+static __exit void dcdrbu_exit(void)
+{
+	spin_lock(&rbu_data.lock);
+	packet_empty_list();
+	img_update_free();
+	spin_unlock(&rbu_data.lock);
+	platform_device_unregister(rbu_device);
+}
+
+module_exit(dcdrbu_exit);
+module_init(dcdrbu_init);
+
+/* vim:noet:ts=8:sw=8
+*/
diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
new file mode 100644
index 0000000..624a11c
--- /dev/null
+++ b/drivers/firmware/dmi-id.c
@@ -0,0 +1,249 @@
+/*
+ * Export SMBIOS/DMI info via sysfs to userspace
+ *
+ * Copyright 2007, Lennart Poettering
+ *
+ * Licensed under GPLv2
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/dmi.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+
+struct dmi_device_attribute{
+	struct device_attribute dev_attr;
+	int field;
+};
+#define to_dmi_dev_attr(_dev_attr) \
+	container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
+
+static ssize_t sys_dmi_field_show(struct device *dev,
+				  struct device_attribute *attr,
+				  char *page)
+{
+	int field = to_dmi_dev_attr(attr)->field;
+	ssize_t len;
+	len = scnprintf(page, PAGE_SIZE, "%s\n", dmi_get_system_info(field));
+	page[len-1] = '\n';
+	return len;
+}
+
+#define DMI_ATTR(_name, _mode, _show, _field)			\
+	{ .dev_attr = __ATTR(_name, _mode, _show, NULL),	\
+	  .field = _field }
+
+#define DEFINE_DMI_ATTR_WITH_SHOW(_name, _mode, _field)		\
+static struct dmi_device_attribute sys_dmi_##_name##_attr =	\
+	DMI_ATTR(_name, _mode, sys_dmi_field_show, _field);
+
+DEFINE_DMI_ATTR_WITH_SHOW(bios_vendor,		0444, DMI_BIOS_VENDOR);
+DEFINE_DMI_ATTR_WITH_SHOW(bios_version,		0444, DMI_BIOS_VERSION);
+DEFINE_DMI_ATTR_WITH_SHOW(bios_date,		0444, DMI_BIOS_DATE);
+DEFINE_DMI_ATTR_WITH_SHOW(sys_vendor,		0444, DMI_SYS_VENDOR);
+DEFINE_DMI_ATTR_WITH_SHOW(product_name,		0444, DMI_PRODUCT_NAME);
+DEFINE_DMI_ATTR_WITH_SHOW(product_version,	0444, DMI_PRODUCT_VERSION);
+DEFINE_DMI_ATTR_WITH_SHOW(product_serial,	0400, DMI_PRODUCT_SERIAL);
+DEFINE_DMI_ATTR_WITH_SHOW(product_uuid,		0400, DMI_PRODUCT_UUID);
+DEFINE_DMI_ATTR_WITH_SHOW(product_sku,		0444, DMI_PRODUCT_SKU);
+DEFINE_DMI_ATTR_WITH_SHOW(product_family,	0444, DMI_PRODUCT_FAMILY);
+DEFINE_DMI_ATTR_WITH_SHOW(board_vendor,		0444, DMI_BOARD_VENDOR);
+DEFINE_DMI_ATTR_WITH_SHOW(board_name,		0444, DMI_BOARD_NAME);
+DEFINE_DMI_ATTR_WITH_SHOW(board_version,	0444, DMI_BOARD_VERSION);
+DEFINE_DMI_ATTR_WITH_SHOW(board_serial,		0400, DMI_BOARD_SERIAL);
+DEFINE_DMI_ATTR_WITH_SHOW(board_asset_tag,	0444, DMI_BOARD_ASSET_TAG);
+DEFINE_DMI_ATTR_WITH_SHOW(chassis_vendor,	0444, DMI_CHASSIS_VENDOR);
+DEFINE_DMI_ATTR_WITH_SHOW(chassis_type,		0444, DMI_CHASSIS_TYPE);
+DEFINE_DMI_ATTR_WITH_SHOW(chassis_version,	0444, DMI_CHASSIS_VERSION);
+DEFINE_DMI_ATTR_WITH_SHOW(chassis_serial,	0400, DMI_CHASSIS_SERIAL);
+DEFINE_DMI_ATTR_WITH_SHOW(chassis_asset_tag,	0444, DMI_CHASSIS_ASSET_TAG);
+
+static void ascii_filter(char *d, const char *s)
+{
+	/* Filter out characters we don't want to see in the modalias string */
+	for (; *s; s++)
+		if (*s > ' ' && *s < 127 && *s != ':')
+			*(d++) = *s;
+
+	*d = 0;
+}
+
+static ssize_t get_modalias(char *buffer, size_t buffer_size)
+{
+	static const struct mafield {
+		const char *prefix;
+		int field;
+	} fields[] = {
+		{ "bvn", DMI_BIOS_VENDOR },
+		{ "bvr", DMI_BIOS_VERSION },
+		{ "bd",  DMI_BIOS_DATE },
+		{ "svn", DMI_SYS_VENDOR },
+		{ "pn",  DMI_PRODUCT_NAME },
+		{ "pvr", DMI_PRODUCT_VERSION },
+		{ "rvn", DMI_BOARD_VENDOR },
+		{ "rn",  DMI_BOARD_NAME },
+		{ "rvr", DMI_BOARD_VERSION },
+		{ "cvn", DMI_CHASSIS_VENDOR },
+		{ "ct",  DMI_CHASSIS_TYPE },
+		{ "cvr", DMI_CHASSIS_VERSION },
+		{ NULL,  DMI_NONE }
+	};
+
+	ssize_t l, left;
+	char *p;
+	const struct mafield *f;
+
+	strcpy(buffer, "dmi");
+	p = buffer + 3; left = buffer_size - 4;
+
+	for (f = fields; f->prefix && left > 0; f++) {
+		const char *c;
+		char *t;
+
+		c = dmi_get_system_info(f->field);
+		if (!c)
+			continue;
+
+		t = kmalloc(strlen(c) + 1, GFP_KERNEL);
+		if (!t)
+			break;
+		ascii_filter(t, c);
+		l = scnprintf(p, left, ":%s%s", f->prefix, t);
+		kfree(t);
+
+		p += l;
+		left -= l;
+	}
+
+	p[0] = ':';
+	p[1] = 0;
+
+	return p - buffer + 1;
+}
+
+static ssize_t sys_dmi_modalias_show(struct device *dev,
+				     struct device_attribute *attr, char *page)
+{
+	ssize_t r;
+	r = get_modalias(page, PAGE_SIZE-1);
+	page[r] = '\n';
+	page[r+1] = 0;
+	return r+1;
+}
+
+static struct device_attribute sys_dmi_modalias_attr =
+	__ATTR(modalias, 0444, sys_dmi_modalias_show, NULL);
+
+static struct attribute *sys_dmi_attributes[DMI_STRING_MAX+2];
+
+static struct attribute_group sys_dmi_attribute_group = {
+	.attrs = sys_dmi_attributes,
+};
+
+static const struct attribute_group* sys_dmi_attribute_groups[] = {
+	&sys_dmi_attribute_group,
+	NULL
+};
+
+static int dmi_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	ssize_t len;
+
+	if (add_uevent_var(env, "MODALIAS="))
+		return -ENOMEM;
+	len = get_modalias(&env->buf[env->buflen - 1],
+			   sizeof(env->buf) - env->buflen);
+	if (len >= (sizeof(env->buf) - env->buflen))
+		return -ENOMEM;
+	env->buflen += len;
+	return 0;
+}
+
+static struct class dmi_class = {
+	.name = "dmi",
+	.dev_release = (void(*)(struct device *)) kfree,
+	.dev_uevent = dmi_dev_uevent,
+};
+
+static struct device *dmi_dev;
+
+/* Initialization */
+
+#define ADD_DMI_ATTR(_name, _field) \
+	if (dmi_get_system_info(_field)) \
+		sys_dmi_attributes[i++] = &sys_dmi_##_name##_attr.dev_attr.attr;
+
+/* In a separate function to keep gcc 3.2 happy - do NOT merge this in
+   dmi_id_init! */
+static void __init dmi_id_init_attr_table(void)
+{
+	int i;
+
+	/* Not necessarily all DMI fields are available on all
+	 * systems, hence let's built an attribute table of just
+	 * what's available */
+	i = 0;
+	ADD_DMI_ATTR(bios_vendor,       DMI_BIOS_VENDOR);
+	ADD_DMI_ATTR(bios_version,      DMI_BIOS_VERSION);
+	ADD_DMI_ATTR(bios_date,         DMI_BIOS_DATE);
+	ADD_DMI_ATTR(sys_vendor,        DMI_SYS_VENDOR);
+	ADD_DMI_ATTR(product_name,      DMI_PRODUCT_NAME);
+	ADD_DMI_ATTR(product_version,   DMI_PRODUCT_VERSION);
+	ADD_DMI_ATTR(product_serial,    DMI_PRODUCT_SERIAL);
+	ADD_DMI_ATTR(product_uuid,      DMI_PRODUCT_UUID);
+	ADD_DMI_ATTR(product_family,    DMI_PRODUCT_FAMILY);
+	ADD_DMI_ATTR(product_sku,       DMI_PRODUCT_SKU);
+	ADD_DMI_ATTR(board_vendor,      DMI_BOARD_VENDOR);
+	ADD_DMI_ATTR(board_name,        DMI_BOARD_NAME);
+	ADD_DMI_ATTR(board_version,     DMI_BOARD_VERSION);
+	ADD_DMI_ATTR(board_serial,      DMI_BOARD_SERIAL);
+	ADD_DMI_ATTR(board_asset_tag,   DMI_BOARD_ASSET_TAG);
+	ADD_DMI_ATTR(chassis_vendor,    DMI_CHASSIS_VENDOR);
+	ADD_DMI_ATTR(chassis_type,      DMI_CHASSIS_TYPE);
+	ADD_DMI_ATTR(chassis_version,   DMI_CHASSIS_VERSION);
+	ADD_DMI_ATTR(chassis_serial,    DMI_CHASSIS_SERIAL);
+	ADD_DMI_ATTR(chassis_asset_tag, DMI_CHASSIS_ASSET_TAG);
+	sys_dmi_attributes[i++] = &sys_dmi_modalias_attr.attr;
+}
+
+static int __init dmi_id_init(void)
+{
+	int ret;
+
+	if (!dmi_available)
+		return -ENODEV;
+
+	dmi_id_init_attr_table();
+
+	ret = class_register(&dmi_class);
+	if (ret)
+		return ret;
+
+	dmi_dev = kzalloc(sizeof(*dmi_dev), GFP_KERNEL);
+	if (!dmi_dev) {
+		ret = -ENOMEM;
+		goto fail_class_unregister;
+	}
+
+	dmi_dev->class = &dmi_class;
+	dev_set_name(dmi_dev, "id");
+	dmi_dev->groups = sys_dmi_attribute_groups;
+
+	ret = device_register(dmi_dev);
+	if (ret)
+		goto fail_put_dmi_dev;
+
+	return 0;
+
+fail_put_dmi_dev:
+	put_device(dmi_dev);
+
+fail_class_unregister:
+	class_unregister(&dmi_class);
+
+	return ret;
+}
+
+arch_initcall(dmi_id_init);
diff --git a/drivers/firmware/dmi-sysfs.c b/drivers/firmware/dmi-sysfs.c
new file mode 100644
index 0000000..ecf2eeb
--- /dev/null
+++ b/drivers/firmware/dmi-sysfs.c
@@ -0,0 +1,697 @@
+/*
+ * dmi-sysfs.c
+ *
+ * This module exports the DMI tables read-only to userspace through the
+ * sysfs file system.
+ *
+ * Data is currently found below
+ *    /sys/firmware/dmi/...
+ *
+ * DMI attributes are presented in attribute files with names
+ * formatted using %d-%d, so that the first integer indicates the
+ * structure type (0-255), and the second field is the instance of that
+ * entry.
+ *
+ * Copyright 2011 Google, Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kobject.h>
+#include <linux/dmi.h>
+#include <linux/capability.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <asm/dmi.h>
+
+#define MAX_ENTRY_TYPE 255 /* Most of these aren't used, but we consider
+			      the top entry type is only 8 bits */
+
+struct dmi_sysfs_entry {
+	struct dmi_header dh;
+	struct kobject kobj;
+	int instance;
+	int position;
+	struct list_head list;
+	struct kobject *child;
+};
+
+/*
+ * Global list of dmi_sysfs_entry.  Even though this should only be
+ * manipulated at setup and teardown, the lazy nature of the kobject
+ * system means we get lazy removes.
+ */
+static LIST_HEAD(entry_list);
+static DEFINE_SPINLOCK(entry_list_lock);
+
+/* dmi_sysfs_attribute - Top level attribute. used by all entries. */
+struct dmi_sysfs_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct dmi_sysfs_entry *entry, char *buf);
+};
+
+#define DMI_SYSFS_ATTR(_entry, _name) \
+struct dmi_sysfs_attribute dmi_sysfs_attr_##_entry##_##_name = { \
+	.attr = {.name = __stringify(_name), .mode = 0400}, \
+	.show = dmi_sysfs_##_entry##_##_name, \
+}
+
+/*
+ * dmi_sysfs_mapped_attribute - Attribute where we require the entry be
+ * mapped in.  Use in conjunction with dmi_sysfs_specialize_attr_ops.
+ */
+struct dmi_sysfs_mapped_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct dmi_sysfs_entry *entry,
+			const struct dmi_header *dh,
+			char *buf);
+};
+
+#define DMI_SYSFS_MAPPED_ATTR(_entry, _name) \
+struct dmi_sysfs_mapped_attribute dmi_sysfs_attr_##_entry##_##_name = { \
+	.attr = {.name = __stringify(_name), .mode = 0400}, \
+	.show = dmi_sysfs_##_entry##_##_name, \
+}
+
+/*************************************************
+ * Generic DMI entry support.
+ *************************************************/
+static void dmi_entry_free(struct kobject *kobj)
+{
+	kfree(kobj);
+}
+
+static struct dmi_sysfs_entry *to_entry(struct kobject *kobj)
+{
+	return container_of(kobj, struct dmi_sysfs_entry, kobj);
+}
+
+static struct dmi_sysfs_attribute *to_attr(struct attribute *attr)
+{
+	return container_of(attr, struct dmi_sysfs_attribute, attr);
+}
+
+static ssize_t dmi_sysfs_attr_show(struct kobject *kobj,
+				   struct attribute *_attr, char *buf)
+{
+	struct dmi_sysfs_entry *entry = to_entry(kobj);
+	struct dmi_sysfs_attribute *attr = to_attr(_attr);
+
+	/* DMI stuff is only ever admin visible */
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
+	return attr->show(entry, buf);
+}
+
+static const struct sysfs_ops dmi_sysfs_attr_ops = {
+	.show = dmi_sysfs_attr_show,
+};
+
+typedef ssize_t (*dmi_callback)(struct dmi_sysfs_entry *,
+				const struct dmi_header *dh, void *);
+
+struct find_dmi_data {
+	struct dmi_sysfs_entry	*entry;
+	dmi_callback		callback;
+	void			*private;
+	int			instance_countdown;
+	ssize_t			ret;
+};
+
+static void find_dmi_entry_helper(const struct dmi_header *dh,
+				  void *_data)
+{
+	struct find_dmi_data *data = _data;
+	struct dmi_sysfs_entry *entry = data->entry;
+
+	/* Is this the entry we want? */
+	if (dh->type != entry->dh.type)
+		return;
+
+	if (data->instance_countdown != 0) {
+		/* try the next instance? */
+		data->instance_countdown--;
+		return;
+	}
+
+	/*
+	 * Don't ever revisit the instance.  Short circuit later
+	 * instances by letting the instance_countdown run negative
+	 */
+	data->instance_countdown--;
+
+	/* Found the entry */
+	data->ret = data->callback(entry, dh, data->private);
+}
+
+/* State for passing the read parameters through dmi_find_entry() */
+struct dmi_read_state {
+	char *buf;
+	loff_t pos;
+	size_t count;
+};
+
+static ssize_t find_dmi_entry(struct dmi_sysfs_entry *entry,
+			      dmi_callback callback, void *private)
+{
+	struct find_dmi_data data = {
+		.entry = entry,
+		.callback = callback,
+		.private = private,
+		.instance_countdown = entry->instance,
+		.ret = -EIO,  /* To signal the entry disappeared */
+	};
+	int ret;
+
+	ret = dmi_walk(find_dmi_entry_helper, &data);
+	/* This shouldn't happen, but just in case. */
+	if (ret)
+		return -EINVAL;
+	return data.ret;
+}
+
+/*
+ * Calculate and return the byte length of the dmi entry identified by
+ * dh.  This includes both the formatted portion as well as the
+ * unformatted string space, including the two trailing nul characters.
+ */
+static size_t dmi_entry_length(const struct dmi_header *dh)
+{
+	const char *p = (const char *)dh;
+
+	p += dh->length;
+
+	while (p[0] || p[1])
+		p++;
+
+	return 2 + p - (const char *)dh;
+}
+
+/*************************************************
+ * Support bits for specialized DMI entry support
+ *************************************************/
+struct dmi_entry_attr_show_data {
+	struct attribute *attr;
+	char *buf;
+};
+
+static ssize_t dmi_entry_attr_show_helper(struct dmi_sysfs_entry *entry,
+					  const struct dmi_header *dh,
+					  void *_data)
+{
+	struct dmi_entry_attr_show_data *data = _data;
+	struct dmi_sysfs_mapped_attribute *attr;
+
+	attr = container_of(data->attr,
+			    struct dmi_sysfs_mapped_attribute, attr);
+	return attr->show(entry, dh, data->buf);
+}
+
+static ssize_t dmi_entry_attr_show(struct kobject *kobj,
+				   struct attribute *attr,
+				   char *buf)
+{
+	struct dmi_entry_attr_show_data data = {
+		.attr = attr,
+		.buf  = buf,
+	};
+	/* Find the entry according to our parent and call the
+	 * normalized show method hanging off of the attribute */
+	return find_dmi_entry(to_entry(kobj->parent),
+			      dmi_entry_attr_show_helper, &data);
+}
+
+static const struct sysfs_ops dmi_sysfs_specialize_attr_ops = {
+	.show = dmi_entry_attr_show,
+};
+
+/*************************************************
+ * Specialized DMI entry support.
+ *************************************************/
+
+/*** Type 15 - System Event Table ***/
+
+#define DMI_SEL_ACCESS_METHOD_IO8	0x00
+#define DMI_SEL_ACCESS_METHOD_IO2x8	0x01
+#define DMI_SEL_ACCESS_METHOD_IO16	0x02
+#define DMI_SEL_ACCESS_METHOD_PHYS32	0x03
+#define DMI_SEL_ACCESS_METHOD_GPNV	0x04
+
+struct dmi_system_event_log {
+	struct dmi_header header;
+	u16	area_length;
+	u16	header_start_offset;
+	u16	data_start_offset;
+	u8	access_method;
+	u8	status;
+	u32	change_token;
+	union {
+		struct {
+			u16 index_addr;
+			u16 data_addr;
+		} io;
+		u32	phys_addr32;
+		u16	gpnv_handle;
+		u32	access_method_address;
+	};
+	u8	header_format;
+	u8	type_descriptors_supported_count;
+	u8	per_log_type_descriptor_length;
+	u8	supported_log_type_descriptos[0];
+} __packed;
+
+#define DMI_SYSFS_SEL_FIELD(_field) \
+static ssize_t dmi_sysfs_sel_##_field(struct dmi_sysfs_entry *entry, \
+				      const struct dmi_header *dh, \
+				      char *buf) \
+{ \
+	struct dmi_system_event_log sel; \
+	if (sizeof(sel) > dmi_entry_length(dh)) \
+		return -EIO; \
+	memcpy(&sel, dh, sizeof(sel)); \
+	return sprintf(buf, "%u\n", sel._field); \
+} \
+static DMI_SYSFS_MAPPED_ATTR(sel, _field)
+
+DMI_SYSFS_SEL_FIELD(area_length);
+DMI_SYSFS_SEL_FIELD(header_start_offset);
+DMI_SYSFS_SEL_FIELD(data_start_offset);
+DMI_SYSFS_SEL_FIELD(access_method);
+DMI_SYSFS_SEL_FIELD(status);
+DMI_SYSFS_SEL_FIELD(change_token);
+DMI_SYSFS_SEL_FIELD(access_method_address);
+DMI_SYSFS_SEL_FIELD(header_format);
+DMI_SYSFS_SEL_FIELD(type_descriptors_supported_count);
+DMI_SYSFS_SEL_FIELD(per_log_type_descriptor_length);
+
+static struct attribute *dmi_sysfs_sel_attrs[] = {
+	&dmi_sysfs_attr_sel_area_length.attr,
+	&dmi_sysfs_attr_sel_header_start_offset.attr,
+	&dmi_sysfs_attr_sel_data_start_offset.attr,
+	&dmi_sysfs_attr_sel_access_method.attr,
+	&dmi_sysfs_attr_sel_status.attr,
+	&dmi_sysfs_attr_sel_change_token.attr,
+	&dmi_sysfs_attr_sel_access_method_address.attr,
+	&dmi_sysfs_attr_sel_header_format.attr,
+	&dmi_sysfs_attr_sel_type_descriptors_supported_count.attr,
+	&dmi_sysfs_attr_sel_per_log_type_descriptor_length.attr,
+	NULL,
+};
+
+
+static struct kobj_type dmi_system_event_log_ktype = {
+	.release = dmi_entry_free,
+	.sysfs_ops = &dmi_sysfs_specialize_attr_ops,
+	.default_attrs = dmi_sysfs_sel_attrs,
+};
+
+typedef u8 (*sel_io_reader)(const struct dmi_system_event_log *sel,
+			    loff_t offset);
+
+static DEFINE_MUTEX(io_port_lock);
+
+static u8 read_sel_8bit_indexed_io(const struct dmi_system_event_log *sel,
+				   loff_t offset)
+{
+	u8 ret;
+
+	mutex_lock(&io_port_lock);
+	outb((u8)offset, sel->io.index_addr);
+	ret = inb(sel->io.data_addr);
+	mutex_unlock(&io_port_lock);
+	return ret;
+}
+
+static u8 read_sel_2x8bit_indexed_io(const struct dmi_system_event_log *sel,
+				     loff_t offset)
+{
+	u8 ret;
+
+	mutex_lock(&io_port_lock);
+	outb((u8)offset, sel->io.index_addr);
+	outb((u8)(offset >> 8), sel->io.index_addr + 1);
+	ret = inb(sel->io.data_addr);
+	mutex_unlock(&io_port_lock);
+	return ret;
+}
+
+static u8 read_sel_16bit_indexed_io(const struct dmi_system_event_log *sel,
+				    loff_t offset)
+{
+	u8 ret;
+
+	mutex_lock(&io_port_lock);
+	outw((u16)offset, sel->io.index_addr);
+	ret = inb(sel->io.data_addr);
+	mutex_unlock(&io_port_lock);
+	return ret;
+}
+
+static sel_io_reader sel_io_readers[] = {
+	[DMI_SEL_ACCESS_METHOD_IO8]	= read_sel_8bit_indexed_io,
+	[DMI_SEL_ACCESS_METHOD_IO2x8]	= read_sel_2x8bit_indexed_io,
+	[DMI_SEL_ACCESS_METHOD_IO16]	= read_sel_16bit_indexed_io,
+};
+
+static ssize_t dmi_sel_raw_read_io(struct dmi_sysfs_entry *entry,
+				   const struct dmi_system_event_log *sel,
+				   char *buf, loff_t pos, size_t count)
+{
+	ssize_t wrote = 0;
+
+	sel_io_reader io_reader = sel_io_readers[sel->access_method];
+
+	while (count && pos < sel->area_length) {
+		count--;
+		*(buf++) = io_reader(sel, pos++);
+		wrote++;
+	}
+
+	return wrote;
+}
+
+static ssize_t dmi_sel_raw_read_phys32(struct dmi_sysfs_entry *entry,
+				       const struct dmi_system_event_log *sel,
+				       char *buf, loff_t pos, size_t count)
+{
+	u8 __iomem *mapped;
+	ssize_t wrote = 0;
+
+	mapped = dmi_remap(sel->access_method_address, sel->area_length);
+	if (!mapped)
+		return -EIO;
+
+	while (count && pos < sel->area_length) {
+		count--;
+		*(buf++) = readb(mapped + pos++);
+		wrote++;
+	}
+
+	dmi_unmap(mapped);
+	return wrote;
+}
+
+static ssize_t dmi_sel_raw_read_helper(struct dmi_sysfs_entry *entry,
+				       const struct dmi_header *dh,
+				       void *_state)
+{
+	struct dmi_read_state *state = _state;
+	struct dmi_system_event_log sel;
+
+	if (sizeof(sel) > dmi_entry_length(dh))
+		return -EIO;
+
+	memcpy(&sel, dh, sizeof(sel));
+
+	switch (sel.access_method) {
+	case DMI_SEL_ACCESS_METHOD_IO8:
+	case DMI_SEL_ACCESS_METHOD_IO2x8:
+	case DMI_SEL_ACCESS_METHOD_IO16:
+		return dmi_sel_raw_read_io(entry, &sel, state->buf,
+					   state->pos, state->count);
+	case DMI_SEL_ACCESS_METHOD_PHYS32:
+		return dmi_sel_raw_read_phys32(entry, &sel, state->buf,
+					       state->pos, state->count);
+	case DMI_SEL_ACCESS_METHOD_GPNV:
+		pr_info("dmi-sysfs: GPNV support missing.\n");
+		return -EIO;
+	default:
+		pr_info("dmi-sysfs: Unknown access method %02x\n",
+			sel.access_method);
+		return -EIO;
+	}
+}
+
+static ssize_t dmi_sel_raw_read(struct file *filp, struct kobject *kobj,
+				struct bin_attribute *bin_attr,
+				char *buf, loff_t pos, size_t count)
+{
+	struct dmi_sysfs_entry *entry = to_entry(kobj->parent);
+	struct dmi_read_state state = {
+		.buf = buf,
+		.pos = pos,
+		.count = count,
+	};
+
+	return find_dmi_entry(entry, dmi_sel_raw_read_helper, &state);
+}
+
+static struct bin_attribute dmi_sel_raw_attr = {
+	.attr = {.name = "raw_event_log", .mode = 0400},
+	.read = dmi_sel_raw_read,
+};
+
+static int dmi_system_event_log(struct dmi_sysfs_entry *entry)
+{
+	int ret;
+
+	entry->child = kzalloc(sizeof(*entry->child), GFP_KERNEL);
+	if (!entry->child)
+		return -ENOMEM;
+	ret = kobject_init_and_add(entry->child,
+				   &dmi_system_event_log_ktype,
+				   &entry->kobj,
+				   "system_event_log");
+	if (ret)
+		goto out_free;
+
+	ret = sysfs_create_bin_file(entry->child, &dmi_sel_raw_attr);
+	if (ret)
+		goto out_del;
+
+	return 0;
+
+out_del:
+	kobject_del(entry->child);
+out_free:
+	kfree(entry->child);
+	return ret;
+}
+
+/*************************************************
+ * Generic DMI entry support.
+ *************************************************/
+
+static ssize_t dmi_sysfs_entry_length(struct dmi_sysfs_entry *entry, char *buf)
+{
+	return sprintf(buf, "%d\n", entry->dh.length);
+}
+
+static ssize_t dmi_sysfs_entry_handle(struct dmi_sysfs_entry *entry, char *buf)
+{
+	return sprintf(buf, "%d\n", entry->dh.handle);
+}
+
+static ssize_t dmi_sysfs_entry_type(struct dmi_sysfs_entry *entry, char *buf)
+{
+	return sprintf(buf, "%d\n", entry->dh.type);
+}
+
+static ssize_t dmi_sysfs_entry_instance(struct dmi_sysfs_entry *entry,
+					char *buf)
+{
+	return sprintf(buf, "%d\n", entry->instance);
+}
+
+static ssize_t dmi_sysfs_entry_position(struct dmi_sysfs_entry *entry,
+					char *buf)
+{
+	return sprintf(buf, "%d\n", entry->position);
+}
+
+static DMI_SYSFS_ATTR(entry, length);
+static DMI_SYSFS_ATTR(entry, handle);
+static DMI_SYSFS_ATTR(entry, type);
+static DMI_SYSFS_ATTR(entry, instance);
+static DMI_SYSFS_ATTR(entry, position);
+
+static struct attribute *dmi_sysfs_entry_attrs[] = {
+	&dmi_sysfs_attr_entry_length.attr,
+	&dmi_sysfs_attr_entry_handle.attr,
+	&dmi_sysfs_attr_entry_type.attr,
+	&dmi_sysfs_attr_entry_instance.attr,
+	&dmi_sysfs_attr_entry_position.attr,
+	NULL,
+};
+
+static ssize_t dmi_entry_raw_read_helper(struct dmi_sysfs_entry *entry,
+					 const struct dmi_header *dh,
+					 void *_state)
+{
+	struct dmi_read_state *state = _state;
+	size_t entry_length;
+
+	entry_length = dmi_entry_length(dh);
+
+	return memory_read_from_buffer(state->buf, state->count,
+				       &state->pos, dh, entry_length);
+}
+
+static ssize_t dmi_entry_raw_read(struct file *filp,
+				  struct kobject *kobj,
+				  struct bin_attribute *bin_attr,
+				  char *buf, loff_t pos, size_t count)
+{
+	struct dmi_sysfs_entry *entry = to_entry(kobj);
+	struct dmi_read_state state = {
+		.buf = buf,
+		.pos = pos,
+		.count = count,
+	};
+
+	return find_dmi_entry(entry, dmi_entry_raw_read_helper, &state);
+}
+
+static const struct bin_attribute dmi_entry_raw_attr = {
+	.attr = {.name = "raw", .mode = 0400},
+	.read = dmi_entry_raw_read,
+};
+
+static void dmi_sysfs_entry_release(struct kobject *kobj)
+{
+	struct dmi_sysfs_entry *entry = to_entry(kobj);
+
+	spin_lock(&entry_list_lock);
+	list_del(&entry->list);
+	spin_unlock(&entry_list_lock);
+	kfree(entry);
+}
+
+static struct kobj_type dmi_sysfs_entry_ktype = {
+	.release = dmi_sysfs_entry_release,
+	.sysfs_ops = &dmi_sysfs_attr_ops,
+	.default_attrs = dmi_sysfs_entry_attrs,
+};
+
+static struct kset *dmi_kset;
+
+/* Global count of all instances seen.  Only for setup */
+static int __initdata instance_counts[MAX_ENTRY_TYPE + 1];
+
+/* Global positional count of all entries seen.  Only for setup */
+static int __initdata position_count;
+
+static void __init dmi_sysfs_register_handle(const struct dmi_header *dh,
+					     void *_ret)
+{
+	struct dmi_sysfs_entry *entry;
+	int *ret = _ret;
+
+	/* If a previous entry saw an error, short circuit */
+	if (*ret)
+		return;
+
+	/* Allocate and register a new entry into the entries set */
+	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry) {
+		*ret = -ENOMEM;
+		return;
+	}
+
+	/* Set the key */
+	memcpy(&entry->dh, dh, sizeof(*dh));
+	entry->instance = instance_counts[dh->type]++;
+	entry->position = position_count++;
+
+	entry->kobj.kset = dmi_kset;
+	*ret = kobject_init_and_add(&entry->kobj, &dmi_sysfs_entry_ktype, NULL,
+				    "%d-%d", dh->type, entry->instance);
+
+	if (*ret) {
+		kfree(entry);
+		return;
+	}
+
+	/* Thread on the global list for cleanup */
+	spin_lock(&entry_list_lock);
+	list_add_tail(&entry->list, &entry_list);
+	spin_unlock(&entry_list_lock);
+
+	/* Handle specializations by type */
+	switch (dh->type) {
+	case DMI_ENTRY_SYSTEM_EVENT_LOG:
+		*ret = dmi_system_event_log(entry);
+		break;
+	default:
+		/* No specialization */
+		break;
+	}
+	if (*ret)
+		goto out_err;
+
+	/* Create the raw binary file to access the entry */
+	*ret = sysfs_create_bin_file(&entry->kobj, &dmi_entry_raw_attr);
+	if (*ret)
+		goto out_err;
+
+	return;
+out_err:
+	kobject_put(entry->child);
+	kobject_put(&entry->kobj);
+	return;
+}
+
+static void cleanup_entry_list(void)
+{
+	struct dmi_sysfs_entry *entry, *next;
+
+	/* No locks, we are on our way out */
+	list_for_each_entry_safe(entry, next, &entry_list, list) {
+		kobject_put(entry->child);
+		kobject_put(&entry->kobj);
+	}
+}
+
+static int __init dmi_sysfs_init(void)
+{
+	int error;
+	int val;
+
+	if (!dmi_kobj) {
+		pr_debug("dmi-sysfs: dmi entry is absent.\n");
+		error = -ENODATA;
+		goto err;
+	}
+
+	dmi_kset = kset_create_and_add("entries", NULL, dmi_kobj);
+	if (!dmi_kset) {
+		error = -ENOMEM;
+		goto err;
+	}
+
+	val = 0;
+	error = dmi_walk(dmi_sysfs_register_handle, &val);
+	if (error)
+		goto err;
+	if (val) {
+		error = val;
+		goto err;
+	}
+
+	pr_debug("dmi-sysfs: loaded.\n");
+
+	return 0;
+err:
+	cleanup_entry_list();
+	kset_unregister(dmi_kset);
+	return error;
+}
+
+/* clean up everything. */
+static void __exit dmi_sysfs_exit(void)
+{
+	pr_debug("dmi-sysfs: unloading.\n");
+	cleanup_entry_list();
+	kset_unregister(dmi_kset);
+}
+
+module_init(dmi_sysfs_init);
+module_exit(dmi_sysfs_exit);
+
+MODULE_AUTHOR("Mike Waychison <mikew@google.com>");
+MODULE_DESCRIPTION("DMI sysfs support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
new file mode 100644
index 0000000..f248354
--- /dev/null
+++ b/drivers/firmware/dmi_scan.c
@@ -0,0 +1,1127 @@
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/dmi.h>
+#include <linux/efi.h>
+#include <linux/bootmem.h>
+#include <linux/random.h>
+#include <asm/dmi.h>
+#include <asm/unaligned.h>
+
+struct kobject *dmi_kobj;
+EXPORT_SYMBOL_GPL(dmi_kobj);
+
+/*
+ * DMI stands for "Desktop Management Interface".  It is part
+ * of and an antecedent to, SMBIOS, which stands for System
+ * Management BIOS.  See further: http://www.dmtf.org/standards
+ */
+static const char dmi_empty_string[] = "";
+
+static u32 dmi_ver __initdata;
+static u32 dmi_len;
+static u16 dmi_num;
+static u8 smbios_entry_point[32];
+static int smbios_entry_point_size;
+
+/* DMI system identification string used during boot */
+static char dmi_ids_string[128] __initdata;
+
+static struct dmi_memdev_info {
+	const char *device;
+	const char *bank;
+	u64 size;		/* bytes */
+	u16 handle;
+} *dmi_memdev;
+static int dmi_memdev_nr;
+
+static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s)
+{
+	const u8 *bp = ((u8 *) dm) + dm->length;
+	const u8 *nsp;
+
+	if (s) {
+		while (--s > 0 && *bp)
+			bp += strlen(bp) + 1;
+
+		/* Strings containing only spaces are considered empty */
+		nsp = bp;
+		while (*nsp == ' ')
+			nsp++;
+		if (*nsp != '\0')
+			return bp;
+	}
+
+	return dmi_empty_string;
+}
+
+static const char * __init dmi_string(const struct dmi_header *dm, u8 s)
+{
+	const char *bp = dmi_string_nosave(dm, s);
+	char *str;
+	size_t len;
+
+	if (bp == dmi_empty_string)
+		return dmi_empty_string;
+
+	len = strlen(bp) + 1;
+	str = dmi_alloc(len);
+	if (str != NULL)
+		strcpy(str, bp);
+
+	return str;
+}
+
+/*
+ *	We have to be cautious here. We have seen BIOSes with DMI pointers
+ *	pointing to completely the wrong place for example
+ */
+static void dmi_decode_table(u8 *buf,
+			     void (*decode)(const struct dmi_header *, void *),
+			     void *private_data)
+{
+	u8 *data = buf;
+	int i = 0;
+
+	/*
+	 * Stop when we have seen all the items the table claimed to have
+	 * (SMBIOS < 3.0 only) OR we reach an end-of-table marker (SMBIOS
+	 * >= 3.0 only) OR we run off the end of the table (should never
+	 * happen but sometimes does on bogus implementations.)
+	 */
+	while ((!dmi_num || i < dmi_num) &&
+	       (data - buf + sizeof(struct dmi_header)) <= dmi_len) {
+		const struct dmi_header *dm = (const struct dmi_header *)data;
+
+		/*
+		 *  We want to know the total length (formatted area and
+		 *  strings) before decoding to make sure we won't run off the
+		 *  table in dmi_decode or dmi_string
+		 */
+		data += dm->length;
+		while ((data - buf < dmi_len - 1) && (data[0] || data[1]))
+			data++;
+		if (data - buf < dmi_len - 1)
+			decode(dm, private_data);
+
+		data += 2;
+		i++;
+
+		/*
+		 * 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0]
+		 * For tables behind a 64-bit entry point, we have no item
+		 * count and no exact table length, so stop on end-of-table
+		 * marker. For tables behind a 32-bit entry point, we have
+		 * seen OEM structures behind the end-of-table marker on
+		 * some systems, so don't trust it.
+		 */
+		if (!dmi_num && dm->type == DMI_ENTRY_END_OF_TABLE)
+			break;
+	}
+
+	/* Trim DMI table length if needed */
+	if (dmi_len > data - buf)
+		dmi_len = data - buf;
+}
+
+static phys_addr_t dmi_base;
+
+static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
+		void *))
+{
+	u8 *buf;
+	u32 orig_dmi_len = dmi_len;
+
+	buf = dmi_early_remap(dmi_base, orig_dmi_len);
+	if (buf == NULL)
+		return -ENOMEM;
+
+	dmi_decode_table(buf, decode, NULL);
+
+	add_device_randomness(buf, dmi_len);
+
+	dmi_early_unmap(buf, orig_dmi_len);
+	return 0;
+}
+
+static int __init dmi_checksum(const u8 *buf, u8 len)
+{
+	u8 sum = 0;
+	int a;
+
+	for (a = 0; a < len; a++)
+		sum += buf[a];
+
+	return sum == 0;
+}
+
+static const char *dmi_ident[DMI_STRING_MAX];
+static LIST_HEAD(dmi_devices);
+int dmi_available;
+
+/*
+ *	Save a DMI string
+ */
+static void __init dmi_save_ident(const struct dmi_header *dm, int slot,
+		int string)
+{
+	const char *d = (const char *) dm;
+	const char *p;
+
+	if (dmi_ident[slot] || dm->length <= string)
+		return;
+
+	p = dmi_string(dm, d[string]);
+	if (p == NULL)
+		return;
+
+	dmi_ident[slot] = p;
+}
+
+static void __init dmi_save_uuid(const struct dmi_header *dm, int slot,
+		int index)
+{
+	const u8 *d;
+	char *s;
+	int is_ff = 1, is_00 = 1, i;
+
+	if (dmi_ident[slot] || dm->length < index + 16)
+		return;
+
+	d = (u8 *) dm + index;
+	for (i = 0; i < 16 && (is_ff || is_00); i++) {
+		if (d[i] != 0x00)
+			is_00 = 0;
+		if (d[i] != 0xFF)
+			is_ff = 0;
+	}
+
+	if (is_ff || is_00)
+		return;
+
+	s = dmi_alloc(16*2+4+1);
+	if (!s)
+		return;
+
+	/*
+	 * As of version 2.6 of the SMBIOS specification, the first 3 fields of
+	 * the UUID are supposed to be little-endian encoded.  The specification
+	 * says that this is the defacto standard.
+	 */
+	if (dmi_ver >= 0x020600)
+		sprintf(s, "%pUl", d);
+	else
+		sprintf(s, "%pUb", d);
+
+	dmi_ident[slot] = s;
+}
+
+static void __init dmi_save_type(const struct dmi_header *dm, int slot,
+		int index)
+{
+	const u8 *d;
+	char *s;
+
+	if (dmi_ident[slot] || dm->length <= index)
+		return;
+
+	s = dmi_alloc(4);
+	if (!s)
+		return;
+
+	d = (u8 *) dm + index;
+	sprintf(s, "%u", *d & 0x7F);
+	dmi_ident[slot] = s;
+}
+
+static void __init dmi_save_one_device(int type, const char *name)
+{
+	struct dmi_device *dev;
+
+	/* No duplicate device */
+	if (dmi_find_device(type, name, NULL))
+		return;
+
+	dev = dmi_alloc(sizeof(*dev) + strlen(name) + 1);
+	if (!dev)
+		return;
+
+	dev->type = type;
+	strcpy((char *)(dev + 1), name);
+	dev->name = (char *)(dev + 1);
+	dev->device_data = NULL;
+	list_add(&dev->list, &dmi_devices);
+}
+
+static void __init dmi_save_devices(const struct dmi_header *dm)
+{
+	int i, count = (dm->length - sizeof(struct dmi_header)) / 2;
+
+	for (i = 0; i < count; i++) {
+		const char *d = (char *)(dm + 1) + (i * 2);
+
+		/* Skip disabled device */
+		if ((*d & 0x80) == 0)
+			continue;
+
+		dmi_save_one_device(*d & 0x7f, dmi_string_nosave(dm, *(d + 1)));
+	}
+}
+
+static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm)
+{
+	int i, count;
+	struct dmi_device *dev;
+
+	if (dm->length < 0x05)
+		return;
+
+	count = *(u8 *)(dm + 1);
+	for (i = 1; i <= count; i++) {
+		const char *devname = dmi_string(dm, i);
+
+		if (devname == dmi_empty_string)
+			continue;
+
+		dev = dmi_alloc(sizeof(*dev));
+		if (!dev)
+			break;
+
+		dev->type = DMI_DEV_TYPE_OEM_STRING;
+		dev->name = devname;
+		dev->device_data = NULL;
+
+		list_add(&dev->list, &dmi_devices);
+	}
+}
+
+static void __init dmi_save_ipmi_device(const struct dmi_header *dm)
+{
+	struct dmi_device *dev;
+	void *data;
+
+	data = dmi_alloc(dm->length);
+	if (data == NULL)
+		return;
+
+	memcpy(data, dm, dm->length);
+
+	dev = dmi_alloc(sizeof(*dev));
+	if (!dev)
+		return;
+
+	dev->type = DMI_DEV_TYPE_IPMI;
+	dev->name = "IPMI controller";
+	dev->device_data = data;
+
+	list_add_tail(&dev->list, &dmi_devices);
+}
+
+static void __init dmi_save_dev_pciaddr(int instance, int segment, int bus,
+					int devfn, const char *name, int type)
+{
+	struct dmi_dev_onboard *dev;
+
+	/* Ignore invalid values */
+	if (type == DMI_DEV_TYPE_DEV_SLOT &&
+	    segment == 0xFFFF && bus == 0xFF && devfn == 0xFF)
+		return;
+
+	dev = dmi_alloc(sizeof(*dev) + strlen(name) + 1);
+	if (!dev)
+		return;
+
+	dev->instance = instance;
+	dev->segment = segment;
+	dev->bus = bus;
+	dev->devfn = devfn;
+
+	strcpy((char *)&dev[1], name);
+	dev->dev.type = type;
+	dev->dev.name = (char *)&dev[1];
+	dev->dev.device_data = dev;
+
+	list_add(&dev->dev.list, &dmi_devices);
+}
+
+static void __init dmi_save_extended_devices(const struct dmi_header *dm)
+{
+	const char *name;
+	const u8 *d = (u8 *)dm;
+
+	if (dm->length < 0x0B)
+		return;
+
+	/* Skip disabled device */
+	if ((d[0x5] & 0x80) == 0)
+		return;
+
+	name = dmi_string_nosave(dm, d[0x4]);
+	dmi_save_dev_pciaddr(d[0x6], *(u16 *)(d + 0x7), d[0x9], d[0xA], name,
+			     DMI_DEV_TYPE_DEV_ONBOARD);
+	dmi_save_one_device(d[0x5] & 0x7f, name);
+}
+
+static void __init dmi_save_system_slot(const struct dmi_header *dm)
+{
+	const u8 *d = (u8 *)dm;
+
+	/* Need SMBIOS 2.6+ structure */
+	if (dm->length < 0x11)
+		return;
+	dmi_save_dev_pciaddr(*(u16 *)(d + 0x9), *(u16 *)(d + 0xD), d[0xF],
+			     d[0x10], dmi_string_nosave(dm, d[0x4]),
+			     DMI_DEV_TYPE_DEV_SLOT);
+}
+
+static void __init count_mem_devices(const struct dmi_header *dm, void *v)
+{
+	if (dm->type != DMI_ENTRY_MEM_DEVICE)
+		return;
+	dmi_memdev_nr++;
+}
+
+static void __init save_mem_devices(const struct dmi_header *dm, void *v)
+{
+	const char *d = (const char *)dm;
+	static int nr;
+	u64 bytes;
+	u16 size;
+
+	if (dm->type != DMI_ENTRY_MEM_DEVICE || dm->length < 0x12)
+		return;
+	if (nr >= dmi_memdev_nr) {
+		pr_warn(FW_BUG "Too many DIMM entries in SMBIOS table\n");
+		return;
+	}
+	dmi_memdev[nr].handle = get_unaligned(&dm->handle);
+	dmi_memdev[nr].device = dmi_string(dm, d[0x10]);
+	dmi_memdev[nr].bank = dmi_string(dm, d[0x11]);
+
+	size = get_unaligned((u16 *)&d[0xC]);
+	if (size == 0)
+		bytes = 0;
+	else if (size == 0xffff)
+		bytes = ~0ull;
+	else if (size & 0x8000)
+		bytes = (u64)(size & 0x7fff) << 10;
+	else if (size != 0x7fff)
+		bytes = (u64)size << 20;
+	else
+		bytes = (u64)get_unaligned((u32 *)&d[0x1C]) << 20;
+
+	dmi_memdev[nr].size = bytes;
+	nr++;
+}
+
+void __init dmi_memdev_walk(void)
+{
+	if (!dmi_available)
+		return;
+
+	if (dmi_walk_early(count_mem_devices) == 0 && dmi_memdev_nr) {
+		dmi_memdev = dmi_alloc(sizeof(*dmi_memdev) * dmi_memdev_nr);
+		if (dmi_memdev)
+			dmi_walk_early(save_mem_devices);
+	}
+}
+
+/*
+ *	Process a DMI table entry. Right now all we care about are the BIOS
+ *	and machine entries. For 2.5 we should pull the smbus controller info
+ *	out of here.
+ */
+static void __init dmi_decode(const struct dmi_header *dm, void *dummy)
+{
+	switch (dm->type) {
+	case 0:		/* BIOS Information */
+		dmi_save_ident(dm, DMI_BIOS_VENDOR, 4);
+		dmi_save_ident(dm, DMI_BIOS_VERSION, 5);
+		dmi_save_ident(dm, DMI_BIOS_DATE, 8);
+		break;
+	case 1:		/* System Information */
+		dmi_save_ident(dm, DMI_SYS_VENDOR, 4);
+		dmi_save_ident(dm, DMI_PRODUCT_NAME, 5);
+		dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6);
+		dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7);
+		dmi_save_uuid(dm, DMI_PRODUCT_UUID, 8);
+		dmi_save_ident(dm, DMI_PRODUCT_SKU, 25);
+		dmi_save_ident(dm, DMI_PRODUCT_FAMILY, 26);
+		break;
+	case 2:		/* Base Board Information */
+		dmi_save_ident(dm, DMI_BOARD_VENDOR, 4);
+		dmi_save_ident(dm, DMI_BOARD_NAME, 5);
+		dmi_save_ident(dm, DMI_BOARD_VERSION, 6);
+		dmi_save_ident(dm, DMI_BOARD_SERIAL, 7);
+		dmi_save_ident(dm, DMI_BOARD_ASSET_TAG, 8);
+		break;
+	case 3:		/* Chassis Information */
+		dmi_save_ident(dm, DMI_CHASSIS_VENDOR, 4);
+		dmi_save_type(dm, DMI_CHASSIS_TYPE, 5);
+		dmi_save_ident(dm, DMI_CHASSIS_VERSION, 6);
+		dmi_save_ident(dm, DMI_CHASSIS_SERIAL, 7);
+		dmi_save_ident(dm, DMI_CHASSIS_ASSET_TAG, 8);
+		break;
+	case 9:		/* System Slots */
+		dmi_save_system_slot(dm);
+		break;
+	case 10:	/* Onboard Devices Information */
+		dmi_save_devices(dm);
+		break;
+	case 11:	/* OEM Strings */
+		dmi_save_oem_strings_devices(dm);
+		break;
+	case 38:	/* IPMI Device Information */
+		dmi_save_ipmi_device(dm);
+		break;
+	case 41:	/* Onboard Devices Extended Information */
+		dmi_save_extended_devices(dm);
+	}
+}
+
+static int __init print_filtered(char *buf, size_t len, const char *info)
+{
+	int c = 0;
+	const char *p;
+
+	if (!info)
+		return c;
+
+	for (p = info; *p; p++)
+		if (isprint(*p))
+			c += scnprintf(buf + c, len - c, "%c", *p);
+		else
+			c += scnprintf(buf + c, len - c, "\\x%02x", *p & 0xff);
+	return c;
+}
+
+static void __init dmi_format_ids(char *buf, size_t len)
+{
+	int c = 0;
+	const char *board;	/* Board Name is optional */
+
+	c += print_filtered(buf + c, len - c,
+			    dmi_get_system_info(DMI_SYS_VENDOR));
+	c += scnprintf(buf + c, len - c, " ");
+	c += print_filtered(buf + c, len - c,
+			    dmi_get_system_info(DMI_PRODUCT_NAME));
+
+	board = dmi_get_system_info(DMI_BOARD_NAME);
+	if (board) {
+		c += scnprintf(buf + c, len - c, "/");
+		c += print_filtered(buf + c, len - c, board);
+	}
+	c += scnprintf(buf + c, len - c, ", BIOS ");
+	c += print_filtered(buf + c, len - c,
+			    dmi_get_system_info(DMI_BIOS_VERSION));
+	c += scnprintf(buf + c, len - c, " ");
+	c += print_filtered(buf + c, len - c,
+			    dmi_get_system_info(DMI_BIOS_DATE));
+}
+
+/*
+ * Check for DMI/SMBIOS headers in the system firmware image.  Any
+ * SMBIOS header must start 16 bytes before the DMI header, so take a
+ * 32 byte buffer and check for DMI at offset 16 and SMBIOS at offset
+ * 0.  If the DMI header is present, set dmi_ver accordingly (SMBIOS
+ * takes precedence) and return 0.  Otherwise return 1.
+ */
+static int __init dmi_present(const u8 *buf)
+{
+	u32 smbios_ver;
+
+	if (memcmp(buf, "_SM_", 4) == 0 &&
+	    buf[5] < 32 && dmi_checksum(buf, buf[5])) {
+		smbios_ver = get_unaligned_be16(buf + 6);
+		smbios_entry_point_size = buf[5];
+		memcpy(smbios_entry_point, buf, smbios_entry_point_size);
+
+		/* Some BIOS report weird SMBIOS version, fix that up */
+		switch (smbios_ver) {
+		case 0x021F:
+		case 0x0221:
+			pr_debug("SMBIOS version fixup (2.%d->2.%d)\n",
+				 smbios_ver & 0xFF, 3);
+			smbios_ver = 0x0203;
+			break;
+		case 0x0233:
+			pr_debug("SMBIOS version fixup (2.%d->2.%d)\n", 51, 6);
+			smbios_ver = 0x0206;
+			break;
+		}
+	} else {
+		smbios_ver = 0;
+	}
+
+	buf += 16;
+
+	if (memcmp(buf, "_DMI_", 5) == 0 && dmi_checksum(buf, 15)) {
+		if (smbios_ver)
+			dmi_ver = smbios_ver;
+		else
+			dmi_ver = (buf[14] & 0xF0) << 4 | (buf[14] & 0x0F);
+		dmi_ver <<= 8;
+		dmi_num = get_unaligned_le16(buf + 12);
+		dmi_len = get_unaligned_le16(buf + 6);
+		dmi_base = get_unaligned_le32(buf + 8);
+
+		if (dmi_walk_early(dmi_decode) == 0) {
+			if (smbios_ver) {
+				pr_info("SMBIOS %d.%d present.\n",
+					dmi_ver >> 16, (dmi_ver >> 8) & 0xFF);
+			} else {
+				smbios_entry_point_size = 15;
+				memcpy(smbios_entry_point, buf,
+				       smbios_entry_point_size);
+				pr_info("Legacy DMI %d.%d present.\n",
+					dmi_ver >> 16, (dmi_ver >> 8) & 0xFF);
+			}
+			dmi_format_ids(dmi_ids_string, sizeof(dmi_ids_string));
+			pr_info("DMI: %s\n", dmi_ids_string);
+			return 0;
+		}
+	}
+
+	return 1;
+}
+
+/*
+ * Check for the SMBIOS 3.0 64-bit entry point signature. Unlike the legacy
+ * 32-bit entry point, there is no embedded DMI header (_DMI_) in here.
+ */
+static int __init dmi_smbios3_present(const u8 *buf)
+{
+	if (memcmp(buf, "_SM3_", 5) == 0 &&
+	    buf[6] < 32 && dmi_checksum(buf, buf[6])) {
+		dmi_ver = get_unaligned_be32(buf + 6) & 0xFFFFFF;
+		dmi_num = 0;			/* No longer specified */
+		dmi_len = get_unaligned_le32(buf + 12);
+		dmi_base = get_unaligned_le64(buf + 16);
+		smbios_entry_point_size = buf[6];
+		memcpy(smbios_entry_point, buf, smbios_entry_point_size);
+
+		if (dmi_walk_early(dmi_decode) == 0) {
+			pr_info("SMBIOS %d.%d.%d present.\n",
+				dmi_ver >> 16, (dmi_ver >> 8) & 0xFF,
+				dmi_ver & 0xFF);
+			dmi_format_ids(dmi_ids_string, sizeof(dmi_ids_string));
+			pr_info("DMI: %s\n", dmi_ids_string);
+			return 0;
+		}
+	}
+	return 1;
+}
+
+void __init dmi_scan_machine(void)
+{
+	char __iomem *p, *q;
+	char buf[32];
+
+	if (efi_enabled(EFI_CONFIG_TABLES)) {
+		/*
+		 * According to the DMTF SMBIOS reference spec v3.0.0, it is
+		 * allowed to define both the 64-bit entry point (smbios3) and
+		 * the 32-bit entry point (smbios), in which case they should
+		 * either both point to the same SMBIOS structure table, or the
+		 * table pointed to by the 64-bit entry point should contain a
+		 * superset of the table contents pointed to by the 32-bit entry
+		 * point (section 5.2)
+		 * This implies that the 64-bit entry point should have
+		 * precedence if it is defined and supported by the OS. If we
+		 * have the 64-bit entry point, but fail to decode it, fall
+		 * back to the legacy one (if available)
+		 */
+		if (efi.smbios3 != EFI_INVALID_TABLE_ADDR) {
+			p = dmi_early_remap(efi.smbios3, 32);
+			if (p == NULL)
+				goto error;
+			memcpy_fromio(buf, p, 32);
+			dmi_early_unmap(p, 32);
+
+			if (!dmi_smbios3_present(buf)) {
+				dmi_available = 1;
+				return;
+			}
+		}
+		if (efi.smbios == EFI_INVALID_TABLE_ADDR)
+			goto error;
+
+		/* This is called as a core_initcall() because it isn't
+		 * needed during early boot.  This also means we can
+		 * iounmap the space when we're done with it.
+		 */
+		p = dmi_early_remap(efi.smbios, 32);
+		if (p == NULL)
+			goto error;
+		memcpy_fromio(buf, p, 32);
+		dmi_early_unmap(p, 32);
+
+		if (!dmi_present(buf)) {
+			dmi_available = 1;
+			return;
+		}
+	} else if (IS_ENABLED(CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK)) {
+		p = dmi_early_remap(0xF0000, 0x10000);
+		if (p == NULL)
+			goto error;
+
+		/*
+		 * Same logic as above, look for a 64-bit entry point
+		 * first, and if not found, fall back to 32-bit entry point.
+		 */
+		memcpy_fromio(buf, p, 16);
+		for (q = p + 16; q < p + 0x10000; q += 16) {
+			memcpy_fromio(buf + 16, q, 16);
+			if (!dmi_smbios3_present(buf)) {
+				dmi_available = 1;
+				dmi_early_unmap(p, 0x10000);
+				return;
+			}
+			memcpy(buf, buf + 16, 16);
+		}
+
+		/*
+		 * Iterate over all possible DMI header addresses q.
+		 * Maintain the 32 bytes around q in buf.  On the
+		 * first iteration, substitute zero for the
+		 * out-of-range bytes so there is no chance of falsely
+		 * detecting an SMBIOS header.
+		 */
+		memset(buf, 0, 16);
+		for (q = p; q < p + 0x10000; q += 16) {
+			memcpy_fromio(buf + 16, q, 16);
+			if (!dmi_present(buf)) {
+				dmi_available = 1;
+				dmi_early_unmap(p, 0x10000);
+				return;
+			}
+			memcpy(buf, buf + 16, 16);
+		}
+		dmi_early_unmap(p, 0x10000);
+	}
+ error:
+	pr_info("DMI not present or invalid.\n");
+}
+
+static ssize_t raw_table_read(struct file *file, struct kobject *kobj,
+			      struct bin_attribute *attr, char *buf,
+			      loff_t pos, size_t count)
+{
+	memcpy(buf, attr->private + pos, count);
+	return count;
+}
+
+static BIN_ATTR(smbios_entry_point, S_IRUSR, raw_table_read, NULL, 0);
+static BIN_ATTR(DMI, S_IRUSR, raw_table_read, NULL, 0);
+
+static int __init dmi_init(void)
+{
+	struct kobject *tables_kobj;
+	u8 *dmi_table;
+	int ret = -ENOMEM;
+
+	if (!dmi_available)
+		return 0;
+
+	/*
+	 * Set up dmi directory at /sys/firmware/dmi. This entry should stay
+	 * even after farther error, as it can be used by other modules like
+	 * dmi-sysfs.
+	 */
+	dmi_kobj = kobject_create_and_add("dmi", firmware_kobj);
+	if (!dmi_kobj)
+		goto err;
+
+	tables_kobj = kobject_create_and_add("tables", dmi_kobj);
+	if (!tables_kobj)
+		goto err;
+
+	dmi_table = dmi_remap(dmi_base, dmi_len);
+	if (!dmi_table)
+		goto err_tables;
+
+	bin_attr_smbios_entry_point.size = smbios_entry_point_size;
+	bin_attr_smbios_entry_point.private = smbios_entry_point;
+	ret = sysfs_create_bin_file(tables_kobj, &bin_attr_smbios_entry_point);
+	if (ret)
+		goto err_unmap;
+
+	bin_attr_DMI.size = dmi_len;
+	bin_attr_DMI.private = dmi_table;
+	ret = sysfs_create_bin_file(tables_kobj, &bin_attr_DMI);
+	if (!ret)
+		return 0;
+
+	sysfs_remove_bin_file(tables_kobj,
+			      &bin_attr_smbios_entry_point);
+ err_unmap:
+	dmi_unmap(dmi_table);
+ err_tables:
+	kobject_del(tables_kobj);
+	kobject_put(tables_kobj);
+ err:
+	pr_err("dmi: Firmware registration failed.\n");
+
+	return ret;
+}
+subsys_initcall(dmi_init);
+
+/**
+ * dmi_set_dump_stack_arch_desc - set arch description for dump_stack()
+ *
+ * Invoke dump_stack_set_arch_desc() with DMI system information so that
+ * DMI identifiers are printed out on task dumps.  Arch boot code should
+ * call this function after dmi_scan_machine() if it wants to print out DMI
+ * identifiers on task dumps.
+ */
+void __init dmi_set_dump_stack_arch_desc(void)
+{
+	dump_stack_set_arch_desc("%s", dmi_ids_string);
+}
+
+/**
+ *	dmi_matches - check if dmi_system_id structure matches system DMI data
+ *	@dmi: pointer to the dmi_system_id structure to check
+ */
+static bool dmi_matches(const struct dmi_system_id *dmi)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dmi->matches); i++) {
+		int s = dmi->matches[i].slot;
+		if (s == DMI_NONE)
+			break;
+		if (s == DMI_OEM_STRING) {
+			/* DMI_OEM_STRING must be exact match */
+			const struct dmi_device *valid;
+
+			valid = dmi_find_device(DMI_DEV_TYPE_OEM_STRING,
+						dmi->matches[i].substr, NULL);
+			if (valid)
+				continue;
+		} else if (dmi_ident[s]) {
+			if (dmi->matches[i].exact_match) {
+				if (!strcmp(dmi_ident[s],
+					    dmi->matches[i].substr))
+					continue;
+			} else {
+				if (strstr(dmi_ident[s],
+					   dmi->matches[i].substr))
+					continue;
+			}
+		}
+
+		/* No match */
+		return false;
+	}
+	return true;
+}
+
+/**
+ *	dmi_is_end_of_table - check for end-of-table marker
+ *	@dmi: pointer to the dmi_system_id structure to check
+ */
+static bool dmi_is_end_of_table(const struct dmi_system_id *dmi)
+{
+	return dmi->matches[0].slot == DMI_NONE;
+}
+
+/**
+ *	dmi_check_system - check system DMI data
+ *	@list: array of dmi_system_id structures to match against
+ *		All non-null elements of the list must match
+ *		their slot's (field index's) data (i.e., each
+ *		list string must be a substring of the specified
+ *		DMI slot's string data) to be considered a
+ *		successful match.
+ *
+ *	Walk the blacklist table running matching functions until someone
+ *	returns non zero or we hit the end. Callback function is called for
+ *	each successful match. Returns the number of matches.
+ *
+ *	dmi_scan_machine must be called before this function is called.
+ */
+int dmi_check_system(const struct dmi_system_id *list)
+{
+	int count = 0;
+	const struct dmi_system_id *d;
+
+	for (d = list; !dmi_is_end_of_table(d); d++)
+		if (dmi_matches(d)) {
+			count++;
+			if (d->callback && d->callback(d))
+				break;
+		}
+
+	return count;
+}
+EXPORT_SYMBOL(dmi_check_system);
+
+/**
+ *	dmi_first_match - find dmi_system_id structure matching system DMI data
+ *	@list: array of dmi_system_id structures to match against
+ *		All non-null elements of the list must match
+ *		their slot's (field index's) data (i.e., each
+ *		list string must be a substring of the specified
+ *		DMI slot's string data) to be considered a
+ *		successful match.
+ *
+ *	Walk the blacklist table until the first match is found.  Return the
+ *	pointer to the matching entry or NULL if there's no match.
+ *
+ *	dmi_scan_machine must be called before this function is called.
+ */
+const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list)
+{
+	const struct dmi_system_id *d;
+
+	for (d = list; !dmi_is_end_of_table(d); d++)
+		if (dmi_matches(d))
+			return d;
+
+	return NULL;
+}
+EXPORT_SYMBOL(dmi_first_match);
+
+/**
+ *	dmi_get_system_info - return DMI data value
+ *	@field: data index (see enum dmi_field)
+ *
+ *	Returns one DMI data value, can be used to perform
+ *	complex DMI data checks.
+ */
+const char *dmi_get_system_info(int field)
+{
+	return dmi_ident[field];
+}
+EXPORT_SYMBOL(dmi_get_system_info);
+
+/**
+ * dmi_name_in_serial - Check if string is in the DMI product serial information
+ * @str: string to check for
+ */
+int dmi_name_in_serial(const char *str)
+{
+	int f = DMI_PRODUCT_SERIAL;
+	if (dmi_ident[f] && strstr(dmi_ident[f], str))
+		return 1;
+	return 0;
+}
+
+/**
+ *	dmi_name_in_vendors - Check if string is in the DMI system or board vendor name
+ *	@str: Case sensitive Name
+ */
+int dmi_name_in_vendors(const char *str)
+{
+	static int fields[] = { DMI_SYS_VENDOR, DMI_BOARD_VENDOR, DMI_NONE };
+	int i;
+	for (i = 0; fields[i] != DMI_NONE; i++) {
+		int f = fields[i];
+		if (dmi_ident[f] && strstr(dmi_ident[f], str))
+			return 1;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(dmi_name_in_vendors);
+
+/**
+ *	dmi_find_device - find onboard device by type/name
+ *	@type: device type or %DMI_DEV_TYPE_ANY to match all device types
+ *	@name: device name string or %NULL to match all
+ *	@from: previous device found in search, or %NULL for new search.
+ *
+ *	Iterates through the list of known onboard devices. If a device is
+ *	found with a matching @type and @name, a pointer to its device
+ *	structure is returned.  Otherwise, %NULL is returned.
+ *	A new search is initiated by passing %NULL as the @from argument.
+ *	If @from is not %NULL, searches continue from next device.
+ */
+const struct dmi_device *dmi_find_device(int type, const char *name,
+				    const struct dmi_device *from)
+{
+	const struct list_head *head = from ? &from->list : &dmi_devices;
+	struct list_head *d;
+
+	for (d = head->next; d != &dmi_devices; d = d->next) {
+		const struct dmi_device *dev =
+			list_entry(d, struct dmi_device, list);
+
+		if (((type == DMI_DEV_TYPE_ANY) || (dev->type == type)) &&
+		    ((name == NULL) || (strcmp(dev->name, name) == 0)))
+			return dev;
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL(dmi_find_device);
+
+/**
+ *	dmi_get_date - parse a DMI date
+ *	@field:	data index (see enum dmi_field)
+ *	@yearp: optional out parameter for the year
+ *	@monthp: optional out parameter for the month
+ *	@dayp: optional out parameter for the day
+ *
+ *	The date field is assumed to be in the form resembling
+ *	[mm[/dd]]/yy[yy] and the result is stored in the out
+ *	parameters any or all of which can be omitted.
+ *
+ *	If the field doesn't exist, all out parameters are set to zero
+ *	and false is returned.  Otherwise, true is returned with any
+ *	invalid part of date set to zero.
+ *
+ *	On return, year, month and day are guaranteed to be in the
+ *	range of [0,9999], [0,12] and [0,31] respectively.
+ */
+bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp)
+{
+	int year = 0, month = 0, day = 0;
+	bool exists;
+	const char *s, *y;
+	char *e;
+
+	s = dmi_get_system_info(field);
+	exists = s;
+	if (!exists)
+		goto out;
+
+	/*
+	 * Determine year first.  We assume the date string resembles
+	 * mm/dd/yy[yy] but the original code extracted only the year
+	 * from the end.  Keep the behavior in the spirit of no
+	 * surprises.
+	 */
+	y = strrchr(s, '/');
+	if (!y)
+		goto out;
+
+	y++;
+	year = simple_strtoul(y, &e, 10);
+	if (y != e && year < 100) {	/* 2-digit year */
+		year += 1900;
+		if (year < 1996)	/* no dates < spec 1.0 */
+			year += 100;
+	}
+	if (year > 9999)		/* year should fit in %04d */
+		year = 0;
+
+	/* parse the mm and dd */
+	month = simple_strtoul(s, &e, 10);
+	if (s == e || *e != '/' || !month || month > 12) {
+		month = 0;
+		goto out;
+	}
+
+	s = e + 1;
+	day = simple_strtoul(s, &e, 10);
+	if (s == y || s == e || *e != '/' || day > 31)
+		day = 0;
+out:
+	if (yearp)
+		*yearp = year;
+	if (monthp)
+		*monthp = month;
+	if (dayp)
+		*dayp = day;
+	return exists;
+}
+EXPORT_SYMBOL(dmi_get_date);
+
+/**
+ *	dmi_get_bios_year - get a year out of DMI_BIOS_DATE field
+ *
+ *	Returns year on success, -ENXIO if DMI is not selected,
+ *	or a different negative error code if DMI field is not present
+ *	or not parseable.
+ */
+int dmi_get_bios_year(void)
+{
+	bool exists;
+	int year;
+
+	exists = dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL);
+	if (!exists)
+		return -ENODATA;
+
+	return year ? year : -ERANGE;
+}
+EXPORT_SYMBOL(dmi_get_bios_year);
+
+/**
+ *	dmi_walk - Walk the DMI table and get called back for every record
+ *	@decode: Callback function
+ *	@private_data: Private data to be passed to the callback function
+ *
+ *	Returns 0 on success, -ENXIO if DMI is not selected or not present,
+ *	or a different negative error code if DMI walking fails.
+ */
+int dmi_walk(void (*decode)(const struct dmi_header *, void *),
+	     void *private_data)
+{
+	u8 *buf;
+
+	if (!dmi_available)
+		return -ENXIO;
+
+	buf = dmi_remap(dmi_base, dmi_len);
+	if (buf == NULL)
+		return -ENOMEM;
+
+	dmi_decode_table(buf, decode, private_data);
+
+	dmi_unmap(buf);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(dmi_walk);
+
+/**
+ * dmi_match - compare a string to the dmi field (if exists)
+ * @f: DMI field identifier
+ * @str: string to compare the DMI field to
+ *
+ * Returns true if the requested field equals to the str (including NULL).
+ */
+bool dmi_match(enum dmi_field f, const char *str)
+{
+	const char *info = dmi_get_system_info(f);
+
+	if (info == NULL || str == NULL)
+		return info == str;
+
+	return !strcmp(info, str);
+}
+EXPORT_SYMBOL_GPL(dmi_match);
+
+void dmi_memdev_name(u16 handle, const char **bank, const char **device)
+{
+	int n;
+
+	if (dmi_memdev == NULL)
+		return;
+
+	for (n = 0; n < dmi_memdev_nr; n++) {
+		if (handle == dmi_memdev[n].handle) {
+			*bank = dmi_memdev[n].bank;
+			*device = dmi_memdev[n].device;
+			break;
+		}
+	}
+}
+EXPORT_SYMBOL_GPL(dmi_memdev_name);
+
+u64 dmi_memdev_size(u16 handle)
+{
+	int n;
+
+	if (dmi_memdev) {
+		for (n = 0; n < dmi_memdev_nr; n++) {
+			if (handle == dmi_memdev[n].handle)
+				return dmi_memdev[n].size;
+		}
+	}
+	return ~0ull;
+}
+EXPORT_SYMBOL_GPL(dmi_memdev_size);
diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
new file mode 100644
index 0000000..1b82c89
--- /dev/null
+++ b/drivers/firmware/edd.c
@@ -0,0 +1,799 @@
+/*
+ * linux/drivers/firmware/edd.c
+ *  Copyright (C) 2002, 2003, 2004 Dell Inc.
+ *  by Matt Domsch <Matt_Domsch@dell.com>
+ *  disk signature by Matt Domsch, Andrew Wilks, and Sandeep K. Shandilya
+ *  legacy CHS by Patrick J. LoPresti <patl@users.sourceforge.net>
+ *
+ * BIOS Enhanced Disk Drive Services (EDD)
+ * conformant to T13 Committee www.t13.org
+ *   projects 1572D, 1484D, 1386D, 1226DT
+ *
+ * This code takes information provided by BIOS EDD calls
+ * fn41 - Check Extensions Present and
+ * fn48 - Get Device Parameters with EDD extensions
+ * made in setup.S, copied to safe structures in setup.c,
+ * and presents it in sysfs.
+ *
+ * Please see http://linux.dell.com/edd/results.html for
+ * the list of BIOSs which have been reported to implement EDD.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2.0 as published by
+ * the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/stat.h>
+#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/slab.h>
+#include <linux/limits.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/edd.h>
+
+#define EDD_VERSION "0.16"
+#define EDD_DATE    "2004-Jun-25"
+
+MODULE_AUTHOR("Matt Domsch <Matt_Domsch@Dell.com>");
+MODULE_DESCRIPTION("sysfs interface to BIOS EDD information");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(EDD_VERSION);
+
+#define left (PAGE_SIZE - (p - buf) - 1)
+
+struct edd_device {
+	unsigned int index;
+	unsigned int mbr_signature;
+	struct edd_info *info;
+	struct kobject kobj;
+};
+
+struct edd_attribute {
+	struct attribute attr;
+	ssize_t(*show) (struct edd_device * edev, char *buf);
+	int (*test) (struct edd_device * edev);
+};
+
+/* forward declarations */
+static int edd_dev_is_type(struct edd_device *edev, const char *type);
+static struct pci_dev *edd_get_pci_dev(struct edd_device *edev);
+
+static struct edd_device *edd_devices[EDD_MBR_SIG_MAX];
+
+#define EDD_DEVICE_ATTR(_name,_mode,_show,_test) \
+struct edd_attribute edd_attr_##_name = { 	\
+	.attr = {.name = __stringify(_name), .mode = _mode },	\
+	.show	= _show,				\
+	.test	= _test,				\
+};
+
+static int
+edd_has_mbr_signature(struct edd_device *edev)
+{
+	return edev->index < min_t(unsigned char, edd.mbr_signature_nr, EDD_MBR_SIG_MAX);
+}
+
+static int
+edd_has_edd_info(struct edd_device *edev)
+{
+	return edev->index < min_t(unsigned char, edd.edd_info_nr, EDDMAXNR);
+}
+
+static inline struct edd_info *
+edd_dev_get_info(struct edd_device *edev)
+{
+	return edev->info;
+}
+
+static inline void
+edd_dev_set_info(struct edd_device *edev, int i)
+{
+	edev->index = i;
+	if (edd_has_mbr_signature(edev))
+		edev->mbr_signature = edd.mbr_signature[i];
+	if (edd_has_edd_info(edev))
+		edev->info = &edd.edd_info[i];
+}
+
+#define to_edd_attr(_attr) container_of(_attr,struct edd_attribute,attr)
+#define to_edd_device(obj) container_of(obj,struct edd_device,kobj)
+
+static ssize_t
+edd_attr_show(struct kobject * kobj, struct attribute *attr, char *buf)
+{
+	struct edd_device *dev = to_edd_device(kobj);
+	struct edd_attribute *edd_attr = to_edd_attr(attr);
+	ssize_t ret = -EIO;
+
+	if (edd_attr->show)
+		ret = edd_attr->show(dev, buf);
+	return ret;
+}
+
+static const struct sysfs_ops edd_attr_ops = {
+	.show = edd_attr_show,
+};
+
+static ssize_t
+edd_show_host_bus(struct edd_device *edev, char *buf)
+{
+	struct edd_info *info;
+	char *p = buf;
+	int i;
+
+	if (!edev)
+		return -EINVAL;
+	info = edd_dev_get_info(edev);
+	if (!info || !buf)
+		return -EINVAL;
+
+	for (i = 0; i < 4; i++) {
+		if (isprint(info->params.host_bus_type[i])) {
+			p += scnprintf(p, left, "%c", info->params.host_bus_type[i]);
+		} else {
+			p += scnprintf(p, left, " ");
+		}
+	}
+
+	if (!strncmp(info->params.host_bus_type, "ISA", 3)) {
+		p += scnprintf(p, left, "\tbase_address: %x\n",
+			     info->params.interface_path.isa.base_address);
+	} else if (!strncmp(info->params.host_bus_type, "PCIX", 4) ||
+		   !strncmp(info->params.host_bus_type, "PCI", 3) ||
+		   !strncmp(info->params.host_bus_type, "XPRS", 4)) {
+		p += scnprintf(p, left,
+			     "\t%02x:%02x.%d  channel: %u\n",
+			     info->params.interface_path.pci.bus,
+			     info->params.interface_path.pci.slot,
+			     info->params.interface_path.pci.function,
+			     info->params.interface_path.pci.channel);
+	} else if (!strncmp(info->params.host_bus_type, "IBND", 4) ||
+		   !strncmp(info->params.host_bus_type, "HTPT", 4)) {
+		p += scnprintf(p, left,
+			     "\tTBD: %llx\n",
+			     info->params.interface_path.ibnd.reserved);
+
+	} else {
+		p += scnprintf(p, left, "\tunknown: %llx\n",
+			     info->params.interface_path.unknown.reserved);
+	}
+	return (p - buf);
+}
+
+static ssize_t
+edd_show_interface(struct edd_device *edev, char *buf)
+{
+	struct edd_info *info;
+	char *p = buf;
+	int i;
+
+	if (!edev)
+		return -EINVAL;
+	info = edd_dev_get_info(edev);
+	if (!info || !buf)
+		return -EINVAL;
+
+	for (i = 0; i < 8; i++) {
+		if (isprint(info->params.interface_type[i])) {
+			p += scnprintf(p, left, "%c", info->params.interface_type[i]);
+		} else {
+			p += scnprintf(p, left, " ");
+		}
+	}
+	if (!strncmp(info->params.interface_type, "ATAPI", 5)) {
+		p += scnprintf(p, left, "\tdevice: %u  lun: %u\n",
+			     info->params.device_path.atapi.device,
+			     info->params.device_path.atapi.lun);
+	} else if (!strncmp(info->params.interface_type, "ATA", 3)) {
+		p += scnprintf(p, left, "\tdevice: %u\n",
+			     info->params.device_path.ata.device);
+	} else if (!strncmp(info->params.interface_type, "SCSI", 4)) {
+		p += scnprintf(p, left, "\tid: %u  lun: %llu\n",
+			     info->params.device_path.scsi.id,
+			     info->params.device_path.scsi.lun);
+	} else if (!strncmp(info->params.interface_type, "USB", 3)) {
+		p += scnprintf(p, left, "\tserial_number: %llx\n",
+			     info->params.device_path.usb.serial_number);
+	} else if (!strncmp(info->params.interface_type, "1394", 4)) {
+		p += scnprintf(p, left, "\teui: %llx\n",
+			     info->params.device_path.i1394.eui);
+	} else if (!strncmp(info->params.interface_type, "FIBRE", 5)) {
+		p += scnprintf(p, left, "\twwid: %llx lun: %llx\n",
+			     info->params.device_path.fibre.wwid,
+			     info->params.device_path.fibre.lun);
+	} else if (!strncmp(info->params.interface_type, "I2O", 3)) {
+		p += scnprintf(p, left, "\tidentity_tag: %llx\n",
+			     info->params.device_path.i2o.identity_tag);
+	} else if (!strncmp(info->params.interface_type, "RAID", 4)) {
+		p += scnprintf(p, left, "\tidentity_tag: %x\n",
+			     info->params.device_path.raid.array_number);
+	} else if (!strncmp(info->params.interface_type, "SATA", 4)) {
+		p += scnprintf(p, left, "\tdevice: %u\n",
+			     info->params.device_path.sata.device);
+	} else {
+		p += scnprintf(p, left, "\tunknown: %llx %llx\n",
+			     info->params.device_path.unknown.reserved1,
+			     info->params.device_path.unknown.reserved2);
+	}
+
+	return (p - buf);
+}
+
+/**
+ * edd_show_raw_data() - copies raw data to buffer for userspace to parse
+ * @edev: target edd_device
+ * @buf: output buffer
+ *
+ * Returns: number of bytes written, or -EINVAL on failure
+ */
+static ssize_t
+edd_show_raw_data(struct edd_device *edev, char *buf)
+{
+	struct edd_info *info;
+	ssize_t len = sizeof (info->params);
+	if (!edev)
+		return -EINVAL;
+	info = edd_dev_get_info(edev);
+	if (!info || !buf)
+		return -EINVAL;
+
+	if (!(info->params.key == 0xBEDD || info->params.key == 0xDDBE))
+		len = info->params.length;
+
+	/* In case of buggy BIOSs */
+	if (len > (sizeof(info->params)))
+		len = sizeof(info->params);
+
+	memcpy(buf, &info->params, len);
+	return len;
+}
+
+static ssize_t
+edd_show_version(struct edd_device *edev, char *buf)
+{
+	struct edd_info *info;
+	char *p = buf;
+	if (!edev)
+		return -EINVAL;
+	info = edd_dev_get_info(edev);
+	if (!info || !buf)
+		return -EINVAL;
+
+	p += scnprintf(p, left, "0x%02x\n", info->version);
+	return (p - buf);
+}
+
+static ssize_t
+edd_show_mbr_signature(struct edd_device *edev, char *buf)
+{
+	char *p = buf;
+	p += scnprintf(p, left, "0x%08x\n", edev->mbr_signature);
+	return (p - buf);
+}
+
+static ssize_t
+edd_show_extensions(struct edd_device *edev, char *buf)
+{
+	struct edd_info *info;
+	char *p = buf;
+	if (!edev)
+		return -EINVAL;
+	info = edd_dev_get_info(edev);
+	if (!info || !buf)
+		return -EINVAL;
+
+	if (info->interface_support & EDD_EXT_FIXED_DISK_ACCESS) {
+		p += scnprintf(p, left, "Fixed disk access\n");
+	}
+	if (info->interface_support & EDD_EXT_DEVICE_LOCKING_AND_EJECTING) {
+		p += scnprintf(p, left, "Device locking and ejecting\n");
+	}
+	if (info->interface_support & EDD_EXT_ENHANCED_DISK_DRIVE_SUPPORT) {
+		p += scnprintf(p, left, "Enhanced Disk Drive support\n");
+	}
+	if (info->interface_support & EDD_EXT_64BIT_EXTENSIONS) {
+		p += scnprintf(p, left, "64-bit extensions\n");
+	}
+	return (p - buf);
+}
+
+static ssize_t
+edd_show_info_flags(struct edd_device *edev, char *buf)
+{
+	struct edd_info *info;
+	char *p = buf;
+	if (!edev)
+		return -EINVAL;
+	info = edd_dev_get_info(edev);
+	if (!info || !buf)
+		return -EINVAL;
+
+	if (info->params.info_flags & EDD_INFO_DMA_BOUNDARY_ERROR_TRANSPARENT)
+		p += scnprintf(p, left, "DMA boundary error transparent\n");
+	if (info->params.info_flags & EDD_INFO_GEOMETRY_VALID)
+		p += scnprintf(p, left, "geometry valid\n");
+	if (info->params.info_flags & EDD_INFO_REMOVABLE)
+		p += scnprintf(p, left, "removable\n");
+	if (info->params.info_flags & EDD_INFO_WRITE_VERIFY)
+		p += scnprintf(p, left, "write verify\n");
+	if (info->params.info_flags & EDD_INFO_MEDIA_CHANGE_NOTIFICATION)
+		p += scnprintf(p, left, "media change notification\n");
+	if (info->params.info_flags & EDD_INFO_LOCKABLE)
+		p += scnprintf(p, left, "lockable\n");
+	if (info->params.info_flags & EDD_INFO_NO_MEDIA_PRESENT)
+		p += scnprintf(p, left, "no media present\n");
+	if (info->params.info_flags & EDD_INFO_USE_INT13_FN50)
+		p += scnprintf(p, left, "use int13 fn50\n");
+	return (p - buf);
+}
+
+static ssize_t
+edd_show_legacy_max_cylinder(struct edd_device *edev, char *buf)
+{
+	struct edd_info *info;
+	char *p = buf;
+	if (!edev)
+		return -EINVAL;
+	info = edd_dev_get_info(edev);
+	if (!info || !buf)
+		return -EINVAL;
+
+	p += snprintf(p, left, "%u\n", info->legacy_max_cylinder);
+	return (p - buf);
+}
+
+static ssize_t
+edd_show_legacy_max_head(struct edd_device *edev, char *buf)
+{
+	struct edd_info *info;
+	char *p = buf;
+	if (!edev)
+		return -EINVAL;
+	info = edd_dev_get_info(edev);
+	if (!info || !buf)
+		return -EINVAL;
+
+	p += snprintf(p, left, "%u\n", info->legacy_max_head);
+	return (p - buf);
+}
+
+static ssize_t
+edd_show_legacy_sectors_per_track(struct edd_device *edev, char *buf)
+{
+	struct edd_info *info;
+	char *p = buf;
+	if (!edev)
+		return -EINVAL;
+	info = edd_dev_get_info(edev);
+	if (!info || !buf)
+		return -EINVAL;
+
+	p += snprintf(p, left, "%u\n", info->legacy_sectors_per_track);
+	return (p - buf);
+}
+
+static ssize_t
+edd_show_default_cylinders(struct edd_device *edev, char *buf)
+{
+	struct edd_info *info;
+	char *p = buf;
+	if (!edev)
+		return -EINVAL;
+	info = edd_dev_get_info(edev);
+	if (!info || !buf)
+		return -EINVAL;
+
+	p += scnprintf(p, left, "%u\n", info->params.num_default_cylinders);
+	return (p - buf);
+}
+
+static ssize_t
+edd_show_default_heads(struct edd_device *edev, char *buf)
+{
+	struct edd_info *info;
+	char *p = buf;
+	if (!edev)
+		return -EINVAL;
+	info = edd_dev_get_info(edev);
+	if (!info || !buf)
+		return -EINVAL;
+
+	p += scnprintf(p, left, "%u\n", info->params.num_default_heads);
+	return (p - buf);
+}
+
+static ssize_t
+edd_show_default_sectors_per_track(struct edd_device *edev, char *buf)
+{
+	struct edd_info *info;
+	char *p = buf;
+	if (!edev)
+		return -EINVAL;
+	info = edd_dev_get_info(edev);
+	if (!info || !buf)
+		return -EINVAL;
+
+	p += scnprintf(p, left, "%u\n", info->params.sectors_per_track);
+	return (p - buf);
+}
+
+static ssize_t
+edd_show_sectors(struct edd_device *edev, char *buf)
+{
+	struct edd_info *info;
+	char *p = buf;
+	if (!edev)
+		return -EINVAL;
+	info = edd_dev_get_info(edev);
+	if (!info || !buf)
+		return -EINVAL;
+
+	p += scnprintf(p, left, "%llu\n", info->params.number_of_sectors);
+	return (p - buf);
+}
+
+
+/*
+ * Some device instances may not have all the above attributes,
+ * or the attribute values may be meaningless (i.e. if
+ * the device is < EDD 3.0, it won't have host_bus and interface
+ * information), so don't bother making files for them.  Likewise
+ * if the default_{cylinders,heads,sectors_per_track} values
+ * are zero, the BIOS doesn't provide sane values, don't bother
+ * creating files for them either.
+ */
+
+static int
+edd_has_legacy_max_cylinder(struct edd_device *edev)
+{
+	struct edd_info *info;
+	if (!edev)
+		return 0;
+	info = edd_dev_get_info(edev);
+	if (!info)
+		return 0;
+	return info->legacy_max_cylinder > 0;
+}
+
+static int
+edd_has_legacy_max_head(struct edd_device *edev)
+{
+	struct edd_info *info;
+	if (!edev)
+		return 0;
+	info = edd_dev_get_info(edev);
+	if (!info)
+		return 0;
+	return info->legacy_max_head > 0;
+}
+
+static int
+edd_has_legacy_sectors_per_track(struct edd_device *edev)
+{
+	struct edd_info *info;
+	if (!edev)
+		return 0;
+	info = edd_dev_get_info(edev);
+	if (!info)
+		return 0;
+	return info->legacy_sectors_per_track > 0;
+}
+
+static int
+edd_has_default_cylinders(struct edd_device *edev)
+{
+	struct edd_info *info;
+	if (!edev)
+		return 0;
+	info = edd_dev_get_info(edev);
+	if (!info)
+		return 0;
+	return info->params.num_default_cylinders > 0;
+}
+
+static int
+edd_has_default_heads(struct edd_device *edev)
+{
+	struct edd_info *info;
+	if (!edev)
+		return 0;
+	info = edd_dev_get_info(edev);
+	if (!info)
+		return 0;
+	return info->params.num_default_heads > 0;
+}
+
+static int
+edd_has_default_sectors_per_track(struct edd_device *edev)
+{
+	struct edd_info *info;
+	if (!edev)
+		return 0;
+	info = edd_dev_get_info(edev);
+	if (!info)
+		return 0;
+	return info->params.sectors_per_track > 0;
+}
+
+static int
+edd_has_edd30(struct edd_device *edev)
+{
+	struct edd_info *info;
+	int i;
+	u8 csum = 0;
+
+	if (!edev)
+		return 0;
+	info = edd_dev_get_info(edev);
+	if (!info)
+		return 0;
+
+	if (!(info->params.key == 0xBEDD || info->params.key == 0xDDBE)) {
+		return 0;
+	}
+
+
+	/* We support only T13 spec */
+	if (info->params.device_path_info_length != 44)
+		return 0;
+
+	for (i = 30; i < info->params.device_path_info_length + 30; i++)
+		csum += *(((u8 *)&info->params) + i);
+
+	if (csum)
+		return 0;
+
+	return 1;
+}
+
+
+static EDD_DEVICE_ATTR(raw_data, 0444, edd_show_raw_data, edd_has_edd_info);
+static EDD_DEVICE_ATTR(version, 0444, edd_show_version, edd_has_edd_info);
+static EDD_DEVICE_ATTR(extensions, 0444, edd_show_extensions, edd_has_edd_info);
+static EDD_DEVICE_ATTR(info_flags, 0444, edd_show_info_flags, edd_has_edd_info);
+static EDD_DEVICE_ATTR(sectors, 0444, edd_show_sectors, edd_has_edd_info);
+static EDD_DEVICE_ATTR(legacy_max_cylinder, 0444,
+                       edd_show_legacy_max_cylinder,
+		       edd_has_legacy_max_cylinder);
+static EDD_DEVICE_ATTR(legacy_max_head, 0444, edd_show_legacy_max_head,
+		       edd_has_legacy_max_head);
+static EDD_DEVICE_ATTR(legacy_sectors_per_track, 0444,
+                       edd_show_legacy_sectors_per_track,
+		       edd_has_legacy_sectors_per_track);
+static EDD_DEVICE_ATTR(default_cylinders, 0444, edd_show_default_cylinders,
+		       edd_has_default_cylinders);
+static EDD_DEVICE_ATTR(default_heads, 0444, edd_show_default_heads,
+		       edd_has_default_heads);
+static EDD_DEVICE_ATTR(default_sectors_per_track, 0444,
+		       edd_show_default_sectors_per_track,
+		       edd_has_default_sectors_per_track);
+static EDD_DEVICE_ATTR(interface, 0444, edd_show_interface, edd_has_edd30);
+static EDD_DEVICE_ATTR(host_bus, 0444, edd_show_host_bus, edd_has_edd30);
+static EDD_DEVICE_ATTR(mbr_signature, 0444, edd_show_mbr_signature, edd_has_mbr_signature);
+
+
+/* These are default attributes that are added for every edd
+ * device discovered.  There are none.
+ */
+static struct attribute * def_attrs[] = {
+	NULL,
+};
+
+/* These attributes are conditional and only added for some devices. */
+static struct edd_attribute * edd_attrs[] = {
+	&edd_attr_raw_data,
+	&edd_attr_version,
+	&edd_attr_extensions,
+	&edd_attr_info_flags,
+	&edd_attr_sectors,
+	&edd_attr_legacy_max_cylinder,
+	&edd_attr_legacy_max_head,
+	&edd_attr_legacy_sectors_per_track,
+	&edd_attr_default_cylinders,
+	&edd_attr_default_heads,
+	&edd_attr_default_sectors_per_track,
+	&edd_attr_interface,
+	&edd_attr_host_bus,
+	&edd_attr_mbr_signature,
+	NULL,
+};
+
+/**
+ *	edd_release - free edd structure
+ *	@kobj:	kobject of edd structure
+ *
+ *	This is called when the refcount of the edd structure
+ *	reaches 0. This should happen right after we unregister,
+ *	but just in case, we use the release callback anyway.
+ */
+
+static void edd_release(struct kobject * kobj)
+{
+	struct edd_device * dev = to_edd_device(kobj);
+	kfree(dev);
+}
+
+static struct kobj_type edd_ktype = {
+	.release	= edd_release,
+	.sysfs_ops	= &edd_attr_ops,
+	.default_attrs	= def_attrs,
+};
+
+static struct kset *edd_kset;
+
+
+/**
+ * edd_dev_is_type() - is this EDD device a 'type' device?
+ * @edev: target edd_device
+ * @type: a host bus or interface identifier string per the EDD spec
+ *
+ * Returns 1 (TRUE) if it is a 'type' device, 0 otherwise.
+ */
+static int
+edd_dev_is_type(struct edd_device *edev, const char *type)
+{
+	struct edd_info *info;
+	if (!edev)
+		return 0;
+	info = edd_dev_get_info(edev);
+
+	if (type && info) {
+		if (!strncmp(info->params.host_bus_type, type, strlen(type)) ||
+		    !strncmp(info->params.interface_type, type, strlen(type)))
+			return 1;
+	}
+	return 0;
+}
+
+/**
+ * edd_get_pci_dev() - finds pci_dev that matches edev
+ * @edev: edd_device
+ *
+ * Returns pci_dev if found, or NULL
+ */
+static struct pci_dev *
+edd_get_pci_dev(struct edd_device *edev)
+{
+	struct edd_info *info = edd_dev_get_info(edev);
+
+	if (edd_dev_is_type(edev, "PCI") || edd_dev_is_type(edev, "XPRS")) {
+		return pci_get_domain_bus_and_slot(0,
+				info->params.interface_path.pci.bus,
+				PCI_DEVFN(info->params.interface_path.pci.slot,
+				info->params.interface_path.pci.function));
+	}
+	return NULL;
+}
+
+static int
+edd_create_symlink_to_pcidev(struct edd_device *edev)
+{
+
+	struct pci_dev *pci_dev = edd_get_pci_dev(edev);
+	int ret;
+	if (!pci_dev)
+		return 1;
+	ret = sysfs_create_link(&edev->kobj,&pci_dev->dev.kobj,"pci_dev");
+	pci_dev_put(pci_dev);
+	return ret;
+}
+
+static inline void
+edd_device_unregister(struct edd_device *edev)
+{
+	kobject_put(&edev->kobj);
+}
+
+static void edd_populate_dir(struct edd_device * edev)
+{
+	struct edd_attribute * attr;
+	int error = 0;
+	int i;
+
+	for (i = 0; (attr = edd_attrs[i]) && !error; i++) {
+		if (!attr->test ||
+		    (attr->test && attr->test(edev)))
+			error = sysfs_create_file(&edev->kobj,&attr->attr);
+	}
+
+	if (!error) {
+		edd_create_symlink_to_pcidev(edev);
+	}
+}
+
+static int
+edd_device_register(struct edd_device *edev, int i)
+{
+	int error;
+
+	if (!edev)
+		return 1;
+	edd_dev_set_info(edev, i);
+	edev->kobj.kset = edd_kset;
+	error = kobject_init_and_add(&edev->kobj, &edd_ktype, NULL,
+				     "int13_dev%02x", 0x80 + i);
+	if (!error) {
+		edd_populate_dir(edev);
+		kobject_uevent(&edev->kobj, KOBJ_ADD);
+	}
+	return error;
+}
+
+static inline int edd_num_devices(void)
+{
+	return max_t(unsigned char,
+		     min_t(unsigned char, EDD_MBR_SIG_MAX, edd.mbr_signature_nr),
+		     min_t(unsigned char, EDDMAXNR, edd.edd_info_nr));
+}
+
+/**
+ * edd_init() - creates sysfs tree of EDD data
+ */
+static int __init
+edd_init(void)
+{
+	int i;
+	int rc=0;
+	struct edd_device *edev;
+
+	if (!edd_num_devices())
+		return -ENODEV;
+
+	printk(KERN_INFO "BIOS EDD facility v%s %s, %d devices found\n",
+	       EDD_VERSION, EDD_DATE, edd_num_devices());
+
+	edd_kset = kset_create_and_add("edd", NULL, firmware_kobj);
+	if (!edd_kset)
+		return -ENOMEM;
+
+	for (i = 0; i < edd_num_devices(); i++) {
+		edev = kzalloc(sizeof (*edev), GFP_KERNEL);
+		if (!edev) {
+			rc = -ENOMEM;
+			goto out;
+		}
+
+		rc = edd_device_register(edev, i);
+		if (rc) {
+			kfree(edev);
+			goto out;
+		}
+		edd_devices[i] = edev;
+	}
+
+	return 0;
+
+out:
+	while (--i >= 0)
+		edd_device_unregister(edd_devices[i]);
+	kset_unregister(edd_kset);
+	return rc;
+}
+
+static void __exit
+edd_exit(void)
+{
+	int i;
+	struct edd_device *edev;
+
+	for (i = 0; i < edd_num_devices(); i++) {
+		if ((edev = edd_devices[i]))
+			edd_device_unregister(edev);
+	}
+	kset_unregister(edd_kset);
+}
+
+late_initcall(edd_init);
+module_exit(edd_exit);
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
new file mode 100644
index 0000000..89110df
--- /dev/null
+++ b/drivers/firmware/efi/Kconfig
@@ -0,0 +1,200 @@
+menu "EFI (Extensible Firmware Interface) Support"
+	depends on EFI
+
+config EFI_VARS
+	tristate "EFI Variable Support via sysfs"
+	depends on EFI
+	default n
+	help
+	  If you say Y here, you are able to get EFI (Extensible Firmware
+	  Interface) variable information via sysfs.  You may read,
+	  write, create, and destroy EFI variables through this interface.
+
+	  Note that using this driver in concert with efibootmgr requires
+	  at least test release version 0.5.0-test3 or later, which is
+	  available from:
+	  <http://linux.dell.com/efibootmgr/testing/efibootmgr-0.5.0-test3.tar.gz>
+
+	  Subsequent efibootmgr releases may be found at:
+	  <http://github.com/vathpela/efibootmgr>
+
+config EFI_ESRT
+	bool
+	depends on EFI && !IA64
+	default y
+
+config EFI_VARS_PSTORE
+	tristate "Register efivars backend for pstore"
+	depends on EFI_VARS && PSTORE
+	default y
+	help
+	  Say Y here to enable use efivars as a backend to pstore. This
+	  will allow writing console messages, crash dumps, or anything
+	  else supported by pstore to EFI variables.
+
+config EFI_VARS_PSTORE_DEFAULT_DISABLE
+	bool "Disable using efivars as a pstore backend by default"
+	depends on EFI_VARS_PSTORE
+	default n
+	help
+	  Saying Y here will disable the use of efivars as a storage
+	  backend for pstore by default. This setting can be overridden
+	  using the efivars module's pstore_disable parameter.
+
+config EFI_RUNTIME_MAP
+	bool "Export efi runtime maps to sysfs"
+	depends on X86 && EFI && KEXEC_CORE
+	default y
+	help
+	  Export efi runtime memory maps to /sys/firmware/efi/runtime-map.
+	  That memory map is used for example by kexec to set up efi virtual
+	  mapping the 2nd kernel, but can also be used for debugging purposes.
+
+	  See also Documentation/ABI/testing/sysfs-firmware-efi-runtime-map.
+
+config EFI_FAKE_MEMMAP
+	bool "Enable EFI fake memory map"
+	depends on EFI && X86
+	default n
+	help
+	  Saying Y here will enable "efi_fake_mem" boot option.
+	  By specifying this parameter, you can add arbitrary attribute
+	  to specific memory range by updating original (firmware provided)
+	  EFI memmap.
+	  This is useful for debugging of EFI memmap related feature.
+	  e.g. Address Range Mirroring feature.
+
+config EFI_MAX_FAKE_MEM
+	int "maximum allowable number of ranges in efi_fake_mem boot option"
+	depends on EFI_FAKE_MEMMAP
+	range 1 128
+	default 8
+	help
+	  Maximum allowable number of ranges in efi_fake_mem boot option.
+	  Ranges can be set up to this value using comma-separated list.
+	  The default value is 8.
+
+config EFI_PARAMS_FROM_FDT
+	bool
+	help
+	  Select this config option from the architecture Kconfig if
+	  the EFI runtime support gets system table address, memory
+          map address, and other parameters from the device tree.
+
+config EFI_RUNTIME_WRAPPERS
+	bool
+
+config EFI_ARMSTUB
+	bool
+
+config EFI_ARMSTUB_DTB_LOADER
+	bool "Enable the DTB loader"
+	depends on EFI_ARMSTUB
+	default y
+	help
+	  Select this config option to add support for the dtb= command
+	  line parameter, allowing a device tree blob to be loaded into
+	  memory from the EFI System Partition by the stub.
+
+	  If the device tree is provided by the platform or by
+	  the bootloader this option may not be needed.
+	  But, for various development reasons and to maintain existing
+	  functionality for bootloaders that do not have such support
+	  this option is necessary.
+
+config EFI_BOOTLOADER_CONTROL
+	tristate "EFI Bootloader Control"
+	depends on EFI_VARS
+	default n
+	---help---
+	  This module installs a reboot hook, such that if reboot() is
+	  invoked with a string argument NNN, "NNN" is copied to the
+	  "LoaderEntryOneShot" EFI variable, to be read by the
+	  bootloader. If the string matches one of the boot labels
+	  defined in its configuration, the bootloader will boot once
+	  to that label. The "LoaderEntryRebootReason" EFI variable is
+	  set with the reboot reason: "reboot" or "shutdown". The
+	  bootloader reads this reboot reason and takes particular
+	  action according to its policy.
+
+config EFI_CAPSULE_LOADER
+	tristate "EFI capsule loader"
+	depends on EFI
+	help
+	  This option exposes a loader interface "/dev/efi_capsule_loader" for
+	  users to load EFI capsules. This driver requires working runtime
+	  capsule support in the firmware, which many OEMs do not provide.
+
+	  Most users should say N.
+
+config EFI_CAPSULE_QUIRK_QUARK_CSH
+	bool "Add support for Quark capsules with non-standard headers"
+	depends on X86 && !64BIT
+	select EFI_CAPSULE_LOADER
+	default y
+	help
+	  Add support for processing Quark X1000 EFI capsules, whose header
+	  layout deviates from the layout mandated by the UEFI specification.
+
+config EFI_TEST
+	tristate "EFI Runtime Service Tests Support"
+	depends on EFI
+	default n
+	help
+	  This driver uses the efi.<service> function pointers directly instead
+	  of going through the efivar API, because it is not trying to test the
+	  kernel subsystem, just for testing the UEFI runtime service
+	  interfaces which are provided by the firmware. This driver is used
+	  by the Firmware Test Suite (FWTS) for testing the UEFI runtime
+	  interfaces readiness of the firmware.
+	  Details for FWTS are available from:
+	  <https://wiki.ubuntu.com/FirmwareTestSuite>
+
+	  Say Y here to enable the runtime services support via /dev/efi_test.
+	  If unsure, say N.
+
+config APPLE_PROPERTIES
+	bool "Apple Device Properties"
+	depends on EFI_STUB && X86
+	select EFI_DEV_PATH_PARSER
+	select UCS2_STRING
+	help
+	  Retrieve properties from EFI on Apple Macs and assign them to
+	  devices, allowing for improved support of Apple hardware.
+	  Properties that would otherwise be missing include the
+	  Thunderbolt Device ROM and GPU configuration data.
+
+	  If unsure, say Y if you have a Mac.  Otherwise N.
+
+config RESET_ATTACK_MITIGATION
+	bool "Reset memory attack mitigation"
+	depends on EFI_STUB
+	help
+	  Request that the firmware clear the contents of RAM after a reboot
+	  using the TCG Platform Reset Attack Mitigation specification. This
+	  protects against an attacker forcibly rebooting the system while it
+	  still contains secrets in RAM, booting another OS and extracting the
+	  secrets. This should only be enabled when userland is configured to
+	  clear the MemoryOverwriteRequest flag on clean shutdown after secrets
+	  have been evicted, since otherwise it will trigger even on clean
+	  reboots.
+
+endmenu
+
+config UEFI_CPER
+	bool
+
+config UEFI_CPER_ARM
+	bool
+	depends on UEFI_CPER && ( ARM || ARM64 )
+	default y
+
+config UEFI_CPER_X86
+	bool
+	depends on UEFI_CPER && X86
+	default y
+
+config EFI_DEV_PATH_PARSER
+	bool
+	depends on ACPI
+	default n
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
new file mode 100644
index 0000000..5f9f503
--- /dev/null
+++ b/drivers/firmware/efi/Makefile
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for linux kernel
+#
+
+#
+# ARM64 maps efi runtime services in userspace addresses
+# which don't have KASAN shadow. So dereference of these addresses
+# in efi_call_virt() will cause crash if this code instrumented.
+#
+KASAN_SANITIZE_runtime-wrappers.o	:= n
+
+obj-$(CONFIG_ACPI_BGRT) 		+= efi-bgrt.o
+obj-$(CONFIG_EFI)			+= efi.o vars.o reboot.o memattr.o tpm.o
+obj-$(CONFIG_EFI)			+= capsule.o memmap.o
+obj-$(CONFIG_EFI_VARS)			+= efivars.o
+obj-$(CONFIG_EFI_ESRT)			+= esrt.o
+obj-$(CONFIG_EFI_VARS_PSTORE)		+= efi-pstore.o
+obj-$(CONFIG_UEFI_CPER)			+= cper.o
+obj-$(CONFIG_EFI_RUNTIME_MAP)		+= runtime-map.o
+obj-$(CONFIG_EFI_RUNTIME_WRAPPERS)	+= runtime-wrappers.o
+obj-$(CONFIG_EFI_STUB)			+= libstub/
+obj-$(CONFIG_EFI_FAKE_MEMMAP)		+= fake_mem.o
+obj-$(CONFIG_EFI_BOOTLOADER_CONTROL)	+= efibc.o
+obj-$(CONFIG_EFI_TEST)			+= test/
+obj-$(CONFIG_EFI_DEV_PATH_PARSER)	+= dev-path-parser.o
+obj-$(CONFIG_APPLE_PROPERTIES)		+= apple-properties.o
+
+arm-obj-$(CONFIG_EFI)			:= arm-init.o arm-runtime.o
+obj-$(CONFIG_ARM)			+= $(arm-obj-y)
+obj-$(CONFIG_ARM64)			+= $(arm-obj-y)
+obj-$(CONFIG_EFI_CAPSULE_LOADER)	+= capsule-loader.o
+obj-$(CONFIG_UEFI_CPER_ARM)		+= cper-arm.o
+obj-$(CONFIG_UEFI_CPER_X86)		+= cper-x86.o
diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c
new file mode 100644
index 0000000..60a9571
--- /dev/null
+++ b/drivers/firmware/efi/apple-properties.c
@@ -0,0 +1,245 @@
+/*
+ * apple-properties.c - EFI device properties on Macs
+ * Copyright (C) 2016 Lukas Wunner <lukas@wunner.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Note, all properties are considered as u8 arrays.
+ * To get a value of any of them the caller must use device_property_read_u8_array().
+ */
+
+#define pr_fmt(fmt) "apple-properties: " fmt
+
+#include <linux/bootmem.h>
+#include <linux/efi.h>
+#include <linux/io.h>
+#include <linux/platform_data/x86/apple.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/ucs2_string.h>
+#include <asm/setup.h>
+
+static bool dump_properties __initdata;
+
+static int __init dump_properties_enable(char *arg)
+{
+	dump_properties = true;
+	return 0;
+}
+
+__setup("dump_apple_properties", dump_properties_enable);
+
+struct dev_header {
+	u32 len;
+	u32 prop_count;
+	struct efi_dev_path path[0];
+	/*
+	 * followed by key/value pairs, each key and value preceded by u32 len,
+	 * len includes itself, value may be empty (in which case its len is 4)
+	 */
+};
+
+struct properties_header {
+	u32 len;
+	u32 version;
+	u32 dev_count;
+	struct dev_header dev_header[0];
+};
+
+static void __init unmarshal_key_value_pairs(struct dev_header *dev_header,
+					     struct device *dev, void *ptr,
+					     struct property_entry entry[])
+{
+	int i;
+
+	for (i = 0; i < dev_header->prop_count; i++) {
+		int remaining = dev_header->len - (ptr - (void *)dev_header);
+		u32 key_len, val_len;
+		char *key;
+
+		if (sizeof(key_len) > remaining)
+			break;
+
+		key_len = *(typeof(key_len) *)ptr;
+		if (key_len + sizeof(val_len) > remaining ||
+		    key_len < sizeof(key_len) + sizeof(efi_char16_t) ||
+		    *(efi_char16_t *)(ptr + sizeof(key_len)) == 0) {
+			dev_err(dev, "invalid property name len at %#zx\n",
+				ptr - (void *)dev_header);
+			break;
+		}
+
+		val_len = *(typeof(val_len) *)(ptr + key_len);
+		if (key_len + val_len > remaining ||
+		    val_len < sizeof(val_len)) {
+			dev_err(dev, "invalid property val len at %#zx\n",
+				ptr - (void *)dev_header + key_len);
+			break;
+		}
+
+		/* 4 bytes to accommodate UTF-8 code points + null byte */
+		key = kzalloc((key_len - sizeof(key_len)) * 4 + 1, GFP_KERNEL);
+		if (!key) {
+			dev_err(dev, "cannot allocate property name\n");
+			break;
+		}
+		ucs2_as_utf8(key, ptr + sizeof(key_len),
+			     key_len - sizeof(key_len));
+
+		entry[i].name = key;
+		entry[i].length = val_len - sizeof(val_len);
+		entry[i].is_array = !!entry[i].length;
+		entry[i].type = DEV_PROP_U8;
+		entry[i].pointer.u8_data = ptr + key_len + sizeof(val_len);
+
+		if (dump_properties) {
+			dev_info(dev, "property: %s\n", entry[i].name);
+			print_hex_dump(KERN_INFO, pr_fmt(), DUMP_PREFIX_OFFSET,
+				16, 1, entry[i].pointer.u8_data,
+				entry[i].length, true);
+		}
+
+		ptr += key_len + val_len;
+	}
+
+	if (i != dev_header->prop_count) {
+		dev_err(dev, "got %d device properties, expected %u\n", i,
+			dev_header->prop_count);
+		print_hex_dump(KERN_ERR, pr_fmt(), DUMP_PREFIX_OFFSET,
+			16, 1, dev_header, dev_header->len, true);
+		return;
+	}
+
+	dev_info(dev, "assigning %d device properties\n", i);
+}
+
+static int __init unmarshal_devices(struct properties_header *properties)
+{
+	size_t offset = offsetof(struct properties_header, dev_header[0]);
+
+	while (offset + sizeof(struct dev_header) < properties->len) {
+		struct dev_header *dev_header = (void *)properties + offset;
+		struct property_entry *entry = NULL;
+		struct device *dev;
+		size_t len;
+		int ret, i;
+		void *ptr;
+
+		if (offset + dev_header->len > properties->len ||
+		    dev_header->len <= sizeof(*dev_header)) {
+			pr_err("invalid len in dev_header at %#zx\n", offset);
+			return -EINVAL;
+		}
+
+		ptr = dev_header->path;
+		len = dev_header->len - sizeof(*dev_header);
+
+		dev = efi_get_device_by_path((struct efi_dev_path **)&ptr, &len);
+		if (IS_ERR(dev)) {
+			pr_err("device path parse error %ld at %#zx:\n",
+			       PTR_ERR(dev), ptr - (void *)dev_header);
+			print_hex_dump(KERN_ERR, pr_fmt(), DUMP_PREFIX_OFFSET,
+			       16, 1, dev_header, dev_header->len, true);
+			dev = NULL;
+			goto skip_device;
+		}
+
+		entry = kcalloc(dev_header->prop_count + 1, sizeof(*entry),
+				GFP_KERNEL);
+		if (!entry) {
+			dev_err(dev, "cannot allocate properties\n");
+			goto skip_device;
+		}
+
+		unmarshal_key_value_pairs(dev_header, dev, ptr, entry);
+		if (!entry[0].name)
+			goto skip_device;
+
+		ret = device_add_properties(dev, entry); /* makes deep copy */
+		if (ret)
+			dev_err(dev, "error %d assigning properties\n", ret);
+
+		for (i = 0; entry[i].name; i++)
+			kfree(entry[i].name);
+
+skip_device:
+		kfree(entry);
+		put_device(dev);
+		offset += dev_header->len;
+	}
+
+	return 0;
+}
+
+static int __init map_properties(void)
+{
+	struct properties_header *properties;
+	struct setup_data *data;
+	u32 data_len;
+	u64 pa_data;
+	int ret;
+
+	if (!x86_apple_machine)
+		return 0;
+
+	pa_data = boot_params.hdr.setup_data;
+	while (pa_data) {
+		data = memremap(pa_data, sizeof(*data), MEMREMAP_WB);
+		if (!data) {
+			pr_err("cannot map setup_data header\n");
+			return -ENOMEM;
+		}
+
+		if (data->type != SETUP_APPLE_PROPERTIES) {
+			pa_data = data->next;
+			memunmap(data);
+			continue;
+		}
+
+		data_len = data->len;
+		memunmap(data);
+
+		data = memremap(pa_data, sizeof(*data) + data_len, MEMREMAP_WB);
+		if (!data) {
+			pr_err("cannot map setup_data payload\n");
+			return -ENOMEM;
+		}
+
+		properties = (struct properties_header *)data->data;
+		if (properties->version != 1) {
+			pr_err("unsupported version:\n");
+			print_hex_dump(KERN_ERR, pr_fmt(), DUMP_PREFIX_OFFSET,
+			       16, 1, properties, data_len, true);
+			ret = -ENOTSUPP;
+		} else if (properties->len != data_len) {
+			pr_err("length mismatch, expected %u\n", data_len);
+			print_hex_dump(KERN_ERR, pr_fmt(), DUMP_PREFIX_OFFSET,
+			       16, 1, properties, data_len, true);
+			ret = -EINVAL;
+		} else
+			ret = unmarshal_devices(properties);
+
+		/*
+		 * Can only free the setup_data payload but not its header
+		 * to avoid breaking the chain of ->next pointers.
+		 */
+		data->len = 0;
+		memunmap(data);
+		free_bootmem_late(pa_data + sizeof(*data), data_len);
+
+		return ret;
+	}
+	return 0;
+}
+
+fs_initcall(map_properties);
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
new file mode 100644
index 0000000..1a6a77d
--- /dev/null
+++ b/drivers/firmware/efi/arm-init.c
@@ -0,0 +1,285 @@
+/*
+ * Extensible Firmware Interface
+ *
+ * Based on Extensible Firmware Interface Specification version 2.4
+ *
+ * Copyright (C) 2013 - 2015 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#define pr_fmt(fmt)	"efi: " fmt
+
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/memblock.h>
+#include <linux/mm_types.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/platform_device.h>
+#include <linux/screen_info.h>
+
+#include <asm/efi.h>
+
+u64 efi_system_table;
+
+static int __init is_memory(efi_memory_desc_t *md)
+{
+	if (md->attribute & (EFI_MEMORY_WB|EFI_MEMORY_WT|EFI_MEMORY_WC))
+		return 1;
+	return 0;
+}
+
+/*
+ * Translate a EFI virtual address into a physical address: this is necessary,
+ * as some data members of the EFI system table are virtually remapped after
+ * SetVirtualAddressMap() has been called.
+ */
+static phys_addr_t efi_to_phys(unsigned long addr)
+{
+	efi_memory_desc_t *md;
+
+	for_each_efi_memory_desc(md) {
+		if (!(md->attribute & EFI_MEMORY_RUNTIME))
+			continue;
+		if (md->virt_addr == 0)
+			/* no virtual mapping has been installed by the stub */
+			break;
+		if (md->virt_addr <= addr &&
+		    (addr - md->virt_addr) < (md->num_pages << EFI_PAGE_SHIFT))
+			return md->phys_addr + addr - md->virt_addr;
+	}
+	return addr;
+}
+
+static __initdata unsigned long screen_info_table = EFI_INVALID_TABLE_ADDR;
+
+static __initdata efi_config_table_type_t arch_tables[] = {
+	{LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID, NULL, &screen_info_table},
+	{NULL_GUID, NULL, NULL}
+};
+
+static void __init init_screen_info(void)
+{
+	struct screen_info *si;
+
+	if (screen_info_table != EFI_INVALID_TABLE_ADDR) {
+		si = early_memremap_ro(screen_info_table, sizeof(*si));
+		if (!si) {
+			pr_err("Could not map screen_info config table\n");
+			return;
+		}
+		screen_info = *si;
+		early_memunmap(si, sizeof(*si));
+
+		/* dummycon on ARM needs non-zero values for columns/lines */
+		screen_info.orig_video_cols = 80;
+		screen_info.orig_video_lines = 25;
+	}
+
+	if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI &&
+	    memblock_is_map_memory(screen_info.lfb_base))
+		memblock_mark_nomap(screen_info.lfb_base, screen_info.lfb_size);
+}
+
+static int __init uefi_init(void)
+{
+	efi_char16_t *c16;
+	void *config_tables;
+	size_t table_size;
+	char vendor[100] = "unknown";
+	int i, retval;
+
+	efi.systab = early_memremap_ro(efi_system_table,
+				       sizeof(efi_system_table_t));
+	if (efi.systab == NULL) {
+		pr_warn("Unable to map EFI system table.\n");
+		return -ENOMEM;
+	}
+
+	set_bit(EFI_BOOT, &efi.flags);
+	if (IS_ENABLED(CONFIG_64BIT))
+		set_bit(EFI_64BIT, &efi.flags);
+
+	/*
+	 * Verify the EFI Table
+	 */
+	if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) {
+		pr_err("System table signature incorrect\n");
+		retval = -EINVAL;
+		goto out;
+	}
+	if ((efi.systab->hdr.revision >> 16) < 2)
+		pr_warn("Warning: EFI system table version %d.%02d, expected 2.00 or greater\n",
+			efi.systab->hdr.revision >> 16,
+			efi.systab->hdr.revision & 0xffff);
+
+	efi.runtime_version = efi.systab->hdr.revision;
+
+	/* Show what we know for posterity */
+	c16 = early_memremap_ro(efi_to_phys(efi.systab->fw_vendor),
+				sizeof(vendor) * sizeof(efi_char16_t));
+	if (c16) {
+		for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
+			vendor[i] = c16[i];
+		vendor[i] = '\0';
+		early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t));
+	}
+
+	pr_info("EFI v%u.%.02u by %s\n",
+		efi.systab->hdr.revision >> 16,
+		efi.systab->hdr.revision & 0xffff, vendor);
+
+	table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables;
+	config_tables = early_memremap_ro(efi_to_phys(efi.systab->tables),
+					  table_size);
+	if (config_tables == NULL) {
+		pr_warn("Unable to map EFI config table array.\n");
+		retval = -ENOMEM;
+		goto out;
+	}
+	retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables,
+					 sizeof(efi_config_table_t),
+					 arch_tables);
+
+	if (!retval)
+		efi.config_table = (unsigned long)efi.systab->tables;
+
+	early_memunmap(config_tables, table_size);
+out:
+	early_memunmap(efi.systab,  sizeof(efi_system_table_t));
+	return retval;
+}
+
+/*
+ * Return true for regions that can be used as System RAM.
+ */
+static __init int is_usable_memory(efi_memory_desc_t *md)
+{
+	switch (md->type) {
+	case EFI_LOADER_CODE:
+	case EFI_LOADER_DATA:
+	case EFI_ACPI_RECLAIM_MEMORY:
+	case EFI_BOOT_SERVICES_CODE:
+	case EFI_BOOT_SERVICES_DATA:
+	case EFI_CONVENTIONAL_MEMORY:
+	case EFI_PERSISTENT_MEMORY:
+		/*
+		 * According to the spec, these regions are no longer reserved
+		 * after calling ExitBootServices(). However, we can only use
+		 * them as System RAM if they can be mapped writeback cacheable.
+		 */
+		return (md->attribute & EFI_MEMORY_WB);
+	default:
+		break;
+	}
+	return false;
+}
+
+static __init void reserve_regions(void)
+{
+	efi_memory_desc_t *md;
+	u64 paddr, npages, size;
+
+	if (efi_enabled(EFI_DBG))
+		pr_info("Processing EFI memory map:\n");
+
+	/*
+	 * Discard memblocks discovered so far: if there are any at this
+	 * point, they originate from memory nodes in the DT, and UEFI
+	 * uses its own memory map instead.
+	 */
+	memblock_dump_all();
+	memblock_remove(0, PHYS_ADDR_MAX);
+
+	for_each_efi_memory_desc(md) {
+		paddr = md->phys_addr;
+		npages = md->num_pages;
+
+		if (efi_enabled(EFI_DBG)) {
+			char buf[64];
+
+			pr_info("  0x%012llx-0x%012llx %s\n",
+				paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1,
+				efi_md_typeattr_format(buf, sizeof(buf), md));
+		}
+
+		memrange_efi_to_native(&paddr, &npages);
+		size = npages << PAGE_SHIFT;
+
+		if (is_memory(md)) {
+			early_init_dt_add_memory_arch(paddr, size);
+
+			if (!is_usable_memory(md))
+				memblock_mark_nomap(paddr, size);
+
+			/* keep ACPI reclaim memory intact for kexec etc. */
+			if (md->type == EFI_ACPI_RECLAIM_MEMORY)
+				memblock_reserve(paddr, size);
+		}
+	}
+}
+
+void __init efi_init(void)
+{
+	struct efi_memory_map_data data;
+	struct efi_fdt_params params;
+
+	/* Grab UEFI information placed in FDT by stub */
+	if (!efi_get_fdt_params(&params))
+		return;
+
+	efi_system_table = params.system_table;
+
+	data.desc_version = params.desc_ver;
+	data.desc_size = params.desc_size;
+	data.size = params.mmap_size;
+	data.phys_map = params.mmap;
+
+	if (efi_memmap_init_early(&data) < 0) {
+		/*
+		* If we are booting via UEFI, the UEFI memory map is the only
+		* description of memory we have, so there is little point in
+		* proceeding if we cannot access it.
+		*/
+		panic("Unable to map EFI memory map.\n");
+	}
+
+	WARN(efi.memmap.desc_version != 1,
+	     "Unexpected EFI_MEMORY_DESCRIPTOR version %ld",
+	      efi.memmap.desc_version);
+
+	if (uefi_init() < 0) {
+		efi_memmap_unmap();
+		return;
+	}
+
+	reserve_regions();
+	efi_esrt_init();
+
+	memblock_reserve(params.mmap & PAGE_MASK,
+			 PAGE_ALIGN(params.mmap_size +
+				    (params.mmap & ~PAGE_MASK)));
+
+	init_screen_info();
+
+	/* ARM does not permit early mappings to persist across paging_init() */
+	if (IS_ENABLED(CONFIG_ARM))
+		efi_memmap_unmap();
+}
+
+static int __init register_gop_device(void)
+{
+	void *pd;
+
+	if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
+		return 0;
+
+	pd = platform_device_register_data(NULL, "efi-framebuffer", 0,
+					   &screen_info, sizeof(screen_info));
+	return PTR_ERR_OR_ZERO(pd);
+}
+subsys_initcall(register_gop_device);
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
new file mode 100644
index 0000000..a00934d
--- /dev/null
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -0,0 +1,177 @@
+/*
+ * Extensible Firmware Interface
+ *
+ * Based on Extensible Firmware Interface Specification version 2.4
+ *
+ * Copyright (C) 2013, 2014 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/dmi.h>
+#include <linux/efi.h>
+#include <linux/io.h>
+#include <linux/memblock.h>
+#include <linux/mm_types.h>
+#include <linux/preempt.h>
+#include <linux/rbtree.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <asm/cacheflush.h>
+#include <asm/efi.h>
+#include <asm/mmu.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+
+extern u64 efi_system_table;
+
+#ifdef CONFIG_ARM64_PTDUMP_DEBUGFS
+#include <asm/ptdump.h>
+
+static struct ptdump_info efi_ptdump_info = {
+	.mm		= &efi_mm,
+	.markers	= (struct addr_marker[]){
+		{ 0,		"UEFI runtime start" },
+		{ TASK_SIZE_64,	"UEFI runtime end" }
+	},
+	.base_addr	= 0,
+};
+
+static int __init ptdump_init(void)
+{
+	if (!efi_enabled(EFI_RUNTIME_SERVICES))
+		return 0;
+
+	return ptdump_debugfs_register(&efi_ptdump_info, "efi_page_tables");
+}
+device_initcall(ptdump_init);
+
+#endif
+
+static bool __init efi_virtmap_init(void)
+{
+	efi_memory_desc_t *md;
+	bool systab_found;
+
+	efi_mm.pgd = pgd_alloc(&efi_mm);
+	mm_init_cpumask(&efi_mm);
+	init_new_context(NULL, &efi_mm);
+
+	systab_found = false;
+	for_each_efi_memory_desc(md) {
+		phys_addr_t phys = md->phys_addr;
+		int ret;
+
+		if (!(md->attribute & EFI_MEMORY_RUNTIME))
+			continue;
+		if (md->virt_addr == 0)
+			return false;
+
+		ret = efi_create_mapping(&efi_mm, md);
+		if (ret) {
+			pr_warn("  EFI remap %pa: failed to create mapping (%d)\n",
+				&phys, ret);
+			return false;
+		}
+		/*
+		 * If this entry covers the address of the UEFI system table,
+		 * calculate and record its virtual address.
+		 */
+		if (efi_system_table >= phys &&
+		    efi_system_table < phys + (md->num_pages * EFI_PAGE_SIZE)) {
+			efi.systab = (void *)(unsigned long)(efi_system_table -
+							     phys + md->virt_addr);
+			systab_found = true;
+		}
+	}
+	if (!systab_found) {
+		pr_err("No virtual mapping found for the UEFI System Table\n");
+		return false;
+	}
+
+	if (efi_memattr_apply_permissions(&efi_mm, efi_set_mapping_permissions))
+		return false;
+
+	return true;
+}
+
+/*
+ * Enable the UEFI Runtime Services if all prerequisites are in place, i.e.,
+ * non-early mapping of the UEFI system table and virtual mappings for all
+ * EFI_MEMORY_RUNTIME regions.
+ */
+static int __init arm_enable_runtime_services(void)
+{
+	u64 mapsize;
+
+	if (!efi_enabled(EFI_BOOT)) {
+		pr_info("EFI services will not be available.\n");
+		return 0;
+	}
+
+	efi_memmap_unmap();
+
+	mapsize = efi.memmap.desc_size * efi.memmap.nr_map;
+
+	if (efi_memmap_init_late(efi.memmap.phys_map, mapsize)) {
+		pr_err("Failed to remap EFI memory map\n");
+		return 0;
+	}
+
+	if (efi_runtime_disabled()) {
+		pr_info("EFI runtime services will be disabled.\n");
+		return 0;
+	}
+
+	if (efi_enabled(EFI_RUNTIME_SERVICES)) {
+		pr_info("EFI runtime services access via paravirt.\n");
+		return 0;
+	}
+
+	pr_info("Remapping and enabling EFI services.\n");
+
+	if (!efi_virtmap_init()) {
+		pr_err("UEFI virtual mapping missing or invalid -- runtime services will not be available\n");
+		return -ENOMEM;
+	}
+
+	/* Set up runtime services function pointers */
+	efi_native_runtime_setup();
+	set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+
+	return 0;
+}
+early_initcall(arm_enable_runtime_services);
+
+void efi_virtmap_load(void)
+{
+	preempt_disable();
+	efi_set_pgd(&efi_mm);
+}
+
+void efi_virtmap_unload(void)
+{
+	efi_set_pgd(current->active_mm);
+	preempt_enable();
+}
+
+
+static int __init arm_dmi_init(void)
+{
+	/*
+	 * On arm64/ARM, DMI depends on UEFI, and dmi_scan_machine() needs to
+	 * be called early because dmi_id_init(), which is an arch_initcall
+	 * itself, depends on dmi_scan_machine() having been called already.
+	 */
+	dmi_scan_machine();
+	if (dmi_available)
+		dmi_set_dump_stack_arch_desc();
+	return 0;
+}
+core_initcall(arm_dmi_init);
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
new file mode 100644
index 0000000..9668898
--- /dev/null
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -0,0 +1,361 @@
+/*
+ * EFI capsule loader driver.
+ *
+ * Copyright 2015 Intel Corporation
+ *
+ * This file is part of the Linux kernel, and is made available under
+ * the terms of the GNU General Public License version 2.
+ */
+
+#define pr_fmt(fmt) "efi: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/highmem.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/efi.h>
+#include <linux/vmalloc.h>
+
+#define NO_FURTHER_WRITE_ACTION -1
+
+/**
+ * efi_free_all_buff_pages - free all previous allocated buffer pages
+ * @cap_info: pointer to current instance of capsule_info structure
+ *
+ *	In addition to freeing buffer pages, it flags NO_FURTHER_WRITE_ACTION
+ *	to cease processing data in subsequent write(2) calls until close(2)
+ *	is called.
+ **/
+static void efi_free_all_buff_pages(struct capsule_info *cap_info)
+{
+	while (cap_info->index > 0)
+		__free_page(cap_info->pages[--cap_info->index]);
+
+	cap_info->index = NO_FURTHER_WRITE_ACTION;
+}
+
+int __efi_capsule_setup_info(struct capsule_info *cap_info)
+{
+	size_t pages_needed;
+	int ret;
+	void *temp_page;
+
+	pages_needed = ALIGN(cap_info->total_size, PAGE_SIZE) / PAGE_SIZE;
+
+	if (pages_needed == 0) {
+		pr_err("invalid capsule size\n");
+		return -EINVAL;
+	}
+
+	/* Check if the capsule binary supported */
+	ret = efi_capsule_supported(cap_info->header.guid,
+				    cap_info->header.flags,
+				    cap_info->header.imagesize,
+				    &cap_info->reset_type);
+	if (ret) {
+		pr_err("capsule not supported\n");
+		return ret;
+	}
+
+	temp_page = krealloc(cap_info->pages,
+			     pages_needed * sizeof(void *),
+			     GFP_KERNEL | __GFP_ZERO);
+	if (!temp_page)
+		return -ENOMEM;
+
+	cap_info->pages = temp_page;
+
+	temp_page = krealloc(cap_info->phys,
+			     pages_needed * sizeof(phys_addr_t *),
+			     GFP_KERNEL | __GFP_ZERO);
+	if (!temp_page)
+		return -ENOMEM;
+
+	cap_info->phys = temp_page;
+
+	return 0;
+}
+
+/**
+ * efi_capsule_setup_info - obtain the efi capsule header in the binary and
+ *			    setup capsule_info structure
+ * @cap_info: pointer to current instance of capsule_info structure
+ * @kbuff: a mapped first page buffer pointer
+ * @hdr_bytes: the total received number of bytes for efi header
+ *
+ * Platforms with non-standard capsule update mechanisms can override
+ * this __weak function so they can perform any required capsule
+ * image munging. See quark_quirk_function() for an example.
+ **/
+int __weak efi_capsule_setup_info(struct capsule_info *cap_info, void *kbuff,
+				  size_t hdr_bytes)
+{
+	/* Only process data block that is larger than efi header size */
+	if (hdr_bytes < sizeof(efi_capsule_header_t))
+		return 0;
+
+	memcpy(&cap_info->header, kbuff, sizeof(cap_info->header));
+	cap_info->total_size = cap_info->header.imagesize;
+
+	return __efi_capsule_setup_info(cap_info);
+}
+
+/**
+ * efi_capsule_submit_update - invoke the efi_capsule_update API once binary
+ *			       upload done
+ * @cap_info: pointer to current instance of capsule_info structure
+ **/
+static ssize_t efi_capsule_submit_update(struct capsule_info *cap_info)
+{
+	bool do_vunmap = false;
+	int ret;
+
+	/*
+	 * cap_info->capsule may have been assigned already by a quirk
+	 * handler, so only overwrite it if it is NULL
+	 */
+	if (!cap_info->capsule) {
+		cap_info->capsule = vmap(cap_info->pages, cap_info->index,
+					 VM_MAP, PAGE_KERNEL);
+		if (!cap_info->capsule)
+			return -ENOMEM;
+		do_vunmap = true;
+	}
+
+	ret = efi_capsule_update(cap_info->capsule, cap_info->phys);
+	if (do_vunmap)
+		vunmap(cap_info->capsule);
+	if (ret) {
+		pr_err("capsule update failed\n");
+		return ret;
+	}
+
+	/* Indicate capsule binary uploading is done */
+	cap_info->index = NO_FURTHER_WRITE_ACTION;
+
+	if (cap_info->header.flags & EFI_CAPSULE_PERSIST_ACROSS_RESET) {
+		pr_info("Successfully uploaded capsule file with reboot type '%s'\n",
+			!cap_info->reset_type ? "RESET_COLD" :
+			cap_info->reset_type == 1 ? "RESET_WARM" :
+			"RESET_SHUTDOWN");
+	} else {
+		pr_info("Successfully processed capsule file\n");
+	}
+
+	return 0;
+}
+
+/**
+ * efi_capsule_write - store the capsule binary and pass it to
+ *		       efi_capsule_update() API
+ * @file: file pointer
+ * @buff: buffer pointer
+ * @count: number of bytes in @buff
+ * @offp: not used
+ *
+ *	Expectation:
+ *	- A user space tool should start at the beginning of capsule binary and
+ *	  pass data in sequentially.
+ *	- Users should close and re-open this file note in order to upload more
+ *	  capsules.
+ *	- After an error returned, user should close the file and restart the
+ *	  operation for the next try otherwise -EIO will be returned until the
+ *	  file is closed.
+ *	- An EFI capsule header must be located at the beginning of capsule
+ *	  binary file and passed in as first block data of write operation.
+ **/
+static ssize_t efi_capsule_write(struct file *file, const char __user *buff,
+				 size_t count, loff_t *offp)
+{
+	int ret = 0;
+	struct capsule_info *cap_info = file->private_data;
+	struct page *page;
+	void *kbuff = NULL;
+	size_t write_byte;
+
+	if (count == 0)
+		return 0;
+
+	/* Return error while NO_FURTHER_WRITE_ACTION is flagged */
+	if (cap_info->index < 0)
+		return -EIO;
+
+	/* Only alloc a new page when previous page is full */
+	if (!cap_info->page_bytes_remain) {
+		page = alloc_page(GFP_KERNEL);
+		if (!page) {
+			ret = -ENOMEM;
+			goto failed;
+		}
+
+		cap_info->pages[cap_info->index] = page;
+		cap_info->phys[cap_info->index] = page_to_phys(page);
+		cap_info->page_bytes_remain = PAGE_SIZE;
+		cap_info->index++;
+	} else {
+		page = cap_info->pages[cap_info->index - 1];
+	}
+
+	kbuff = kmap(page);
+	kbuff += PAGE_SIZE - cap_info->page_bytes_remain;
+
+	/* Copy capsule binary data from user space to kernel space buffer */
+	write_byte = min_t(size_t, count, cap_info->page_bytes_remain);
+	if (copy_from_user(kbuff, buff, write_byte)) {
+		ret = -EFAULT;
+		goto fail_unmap;
+	}
+	cap_info->page_bytes_remain -= write_byte;
+
+	/* Setup capsule binary info structure */
+	if (cap_info->header.headersize == 0) {
+		ret = efi_capsule_setup_info(cap_info, kbuff - cap_info->count,
+					     cap_info->count + write_byte);
+		if (ret)
+			goto fail_unmap;
+	}
+
+	cap_info->count += write_byte;
+	kunmap(page);
+
+	/* Submit the full binary to efi_capsule_update() API */
+	if (cap_info->header.headersize > 0 &&
+	    cap_info->count >= cap_info->total_size) {
+		if (cap_info->count > cap_info->total_size) {
+			pr_err("capsule upload size exceeded header defined size\n");
+			ret = -EINVAL;
+			goto failed;
+		}
+
+		ret = efi_capsule_submit_update(cap_info);
+		if (ret)
+			goto failed;
+	}
+
+	return write_byte;
+
+fail_unmap:
+	kunmap(page);
+failed:
+	efi_free_all_buff_pages(cap_info);
+	return ret;
+}
+
+/**
+ * efi_capsule_flush - called by file close or file flush
+ * @file: file pointer
+ * @id: not used
+ *
+ *	If a capsule is being partially uploaded then calling this function
+ *	will be treated as upload termination and will free those completed
+ *	buffer pages and -ECANCELED will be returned.
+ **/
+static int efi_capsule_flush(struct file *file, fl_owner_t id)
+{
+	int ret = 0;
+	struct capsule_info *cap_info = file->private_data;
+
+	if (cap_info->index > 0) {
+		pr_err("capsule upload not complete\n");
+		efi_free_all_buff_pages(cap_info);
+		ret = -ECANCELED;
+	}
+
+	return ret;
+}
+
+/**
+ * efi_capsule_release - called by file close
+ * @inode: not used
+ * @file: file pointer
+ *
+ *	We will not free successfully submitted pages since efi update
+ *	requires data to be maintained across system reboot.
+ **/
+static int efi_capsule_release(struct inode *inode, struct file *file)
+{
+	struct capsule_info *cap_info = file->private_data;
+
+	kfree(cap_info->pages);
+	kfree(cap_info->phys);
+	kfree(file->private_data);
+	file->private_data = NULL;
+	return 0;
+}
+
+/**
+ * efi_capsule_open - called by file open
+ * @inode: not used
+ * @file: file pointer
+ *
+ *	Will allocate each capsule_info memory for each file open call.
+ *	This provided the capability to support multiple file open feature
+ *	where user is not needed to wait for others to finish in order to
+ *	upload their capsule binary.
+ **/
+static int efi_capsule_open(struct inode *inode, struct file *file)
+{
+	struct capsule_info *cap_info;
+
+	cap_info = kzalloc(sizeof(*cap_info), GFP_KERNEL);
+	if (!cap_info)
+		return -ENOMEM;
+
+	cap_info->pages = kzalloc(sizeof(void *), GFP_KERNEL);
+	if (!cap_info->pages) {
+		kfree(cap_info);
+		return -ENOMEM;
+	}
+
+	cap_info->phys = kzalloc(sizeof(void *), GFP_KERNEL);
+	if (!cap_info->phys) {
+		kfree(cap_info->pages);
+		kfree(cap_info);
+		return -ENOMEM;
+	}
+
+	file->private_data = cap_info;
+
+	return 0;
+}
+
+static const struct file_operations efi_capsule_fops = {
+	.owner = THIS_MODULE,
+	.open = efi_capsule_open,
+	.write = efi_capsule_write,
+	.flush = efi_capsule_flush,
+	.release = efi_capsule_release,
+	.llseek = no_llseek,
+};
+
+static struct miscdevice efi_capsule_misc = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "efi_capsule_loader",
+	.fops = &efi_capsule_fops,
+};
+
+static int __init efi_capsule_loader_init(void)
+{
+	int ret;
+
+	if (!efi_enabled(EFI_RUNTIME_SERVICES))
+		return -ENODEV;
+
+	ret = misc_register(&efi_capsule_misc);
+	if (ret)
+		pr_err("Unable to register capsule loader device\n");
+
+	return ret;
+}
+module_init(efi_capsule_loader_init);
+
+static void __exit efi_capsule_loader_exit(void)
+{
+	misc_deregister(&efi_capsule_misc);
+}
+module_exit(efi_capsule_loader_exit);
+
+MODULE_DESCRIPTION("EFI capsule firmware binary loader");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/efi/capsule.c b/drivers/firmware/efi/capsule.c
new file mode 100644
index 0000000..4938c29
--- /dev/null
+++ b/drivers/firmware/efi/capsule.c
@@ -0,0 +1,305 @@
+/*
+ * EFI capsule support.
+ *
+ * Copyright 2013 Intel Corporation; author Matt Fleming
+ *
+ * This file is part of the Linux kernel, and is made available under
+ * the terms of the GNU General Public License version 2.
+ */
+
+#define pr_fmt(fmt) "efi: " fmt
+
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/highmem.h>
+#include <linux/efi.h>
+#include <linux/vmalloc.h>
+#include <asm/io.h>
+
+typedef struct {
+	u64 length;
+	u64 data;
+} efi_capsule_block_desc_t;
+
+static bool capsule_pending;
+static bool stop_capsules;
+static int efi_reset_type = -1;
+
+/*
+ * capsule_mutex serialises access to both capsule_pending and
+ * efi_reset_type and stop_capsules.
+ */
+static DEFINE_MUTEX(capsule_mutex);
+
+/**
+ * efi_capsule_pending - has a capsule been passed to the firmware?
+ * @reset_type: store the type of EFI reset if capsule is pending
+ *
+ * To ensure that the registered capsule is processed correctly by the
+ * firmware we need to perform a specific type of reset. If a capsule is
+ * pending return the reset type in @reset_type.
+ *
+ * This function will race with callers of efi_capsule_update(), for
+ * example, calling this function while somebody else is in
+ * efi_capsule_update() but hasn't reached efi_capsue_update_locked()
+ * will miss the updates to capsule_pending and efi_reset_type after
+ * efi_capsule_update_locked() completes.
+ *
+ * A non-racy use is from platform reboot code because we use
+ * system_state to ensure no capsules can be sent to the firmware once
+ * we're at SYSTEM_RESTART. See efi_capsule_update_locked().
+ */
+bool efi_capsule_pending(int *reset_type)
+{
+	if (!capsule_pending)
+		return false;
+
+	if (reset_type)
+		*reset_type = efi_reset_type;
+
+	return true;
+}
+
+/*
+ * Whitelist of EFI capsule flags that we support.
+ *
+ * We do not handle EFI_CAPSULE_INITIATE_RESET because that would
+ * require us to prepare the kernel for reboot. Refuse to load any
+ * capsules with that flag and any other flags that we do not know how
+ * to handle.
+ */
+#define EFI_CAPSULE_SUPPORTED_FLAG_MASK			\
+	(EFI_CAPSULE_PERSIST_ACROSS_RESET | EFI_CAPSULE_POPULATE_SYSTEM_TABLE)
+
+/**
+ * efi_capsule_supported - does the firmware support the capsule?
+ * @guid: vendor guid of capsule
+ * @flags: capsule flags
+ * @size: size of capsule data
+ * @reset: the reset type required for this capsule
+ *
+ * Check whether a capsule with @flags is supported by the firmware
+ * and that @size doesn't exceed the maximum size for a capsule.
+ *
+ * No attempt is made to check @reset against the reset type required
+ * by any pending capsules because of the races involved.
+ */
+int efi_capsule_supported(efi_guid_t guid, u32 flags, size_t size, int *reset)
+{
+	efi_capsule_header_t capsule;
+	efi_capsule_header_t *cap_list[] = { &capsule };
+	efi_status_t status;
+	u64 max_size;
+
+	if (flags & ~EFI_CAPSULE_SUPPORTED_FLAG_MASK)
+		return -EINVAL;
+
+	capsule.headersize = capsule.imagesize = sizeof(capsule);
+	memcpy(&capsule.guid, &guid, sizeof(efi_guid_t));
+	capsule.flags = flags;
+
+	status = efi.query_capsule_caps(cap_list, 1, &max_size, reset);
+	if (status != EFI_SUCCESS)
+		return efi_status_to_err(status);
+
+	if (size > max_size)
+		return -ENOSPC;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(efi_capsule_supported);
+
+/*
+ * Every scatter gather list (block descriptor) page must end with a
+ * continuation pointer. The last continuation pointer of the last
+ * page must be zero to mark the end of the chain.
+ */
+#define SGLIST_PER_PAGE	((PAGE_SIZE / sizeof(efi_capsule_block_desc_t)) - 1)
+
+/*
+ * How many scatter gather list (block descriptor) pages do we need
+ * to map @count pages?
+ */
+static inline unsigned int sg_pages_num(unsigned int count)
+{
+	return DIV_ROUND_UP(count, SGLIST_PER_PAGE);
+}
+
+/**
+ * efi_capsule_update_locked - pass a single capsule to the firmware
+ * @capsule: capsule to send to the firmware
+ * @sg_pages: array of scatter gather (block descriptor) pages
+ * @reset: the reset type required for @capsule
+ *
+ * Since this function must be called under capsule_mutex check
+ * whether efi_reset_type will conflict with @reset, and atomically
+ * set it and capsule_pending if a capsule was successfully sent to
+ * the firmware.
+ *
+ * We also check to see if the system is about to restart, and if so,
+ * abort. This avoids races between efi_capsule_update() and
+ * efi_capsule_pending().
+ */
+static int
+efi_capsule_update_locked(efi_capsule_header_t *capsule,
+			  struct page **sg_pages, int reset)
+{
+	efi_physical_addr_t sglist_phys;
+	efi_status_t status;
+
+	lockdep_assert_held(&capsule_mutex);
+
+	/*
+	 * If someone has already registered a capsule that requires a
+	 * different reset type, we're out of luck and must abort.
+	 */
+	if (efi_reset_type >= 0 && efi_reset_type != reset) {
+		pr_err("Conflicting capsule reset type %d (%d).\n",
+		       reset, efi_reset_type);
+		return -EINVAL;
+	}
+
+	/*
+	 * If the system is getting ready to restart it may have
+	 * called efi_capsule_pending() to make decisions (such as
+	 * whether to force an EFI reboot), and we're racing against
+	 * that call. Abort in that case.
+	 */
+	if (unlikely(stop_capsules)) {
+		pr_warn("Capsule update raced with reboot, aborting.\n");
+		return -EINVAL;
+	}
+
+	sglist_phys = page_to_phys(sg_pages[0]);
+
+	status = efi.update_capsule(&capsule, 1, sglist_phys);
+	if (status == EFI_SUCCESS) {
+		capsule_pending = true;
+		efi_reset_type = reset;
+	}
+
+	return efi_status_to_err(status);
+}
+
+/**
+ * efi_capsule_update - send a capsule to the firmware
+ * @capsule: capsule to send to firmware
+ * @pages: an array of capsule data pages
+ *
+ * Build a scatter gather list with EFI capsule block descriptors to
+ * map the capsule described by @capsule with its data in @pages and
+ * send it to the firmware via the UpdateCapsule() runtime service.
+ *
+ * @capsule must be a virtual mapping of the complete capsule update in the
+ * kernel address space, as the capsule can be consumed immediately.
+ * A capsule_header_t that describes the entire contents of the capsule
+ * must be at the start of the first data page.
+ *
+ * Even though this function will validate that the firmware supports
+ * the capsule guid, users will likely want to check that
+ * efi_capsule_supported() returns true before calling this function
+ * because it makes it easier to print helpful error messages.
+ *
+ * If the capsule is successfully submitted to the firmware, any
+ * subsequent calls to efi_capsule_pending() will return true. @pages
+ * must not be released or modified if this function returns
+ * successfully.
+ *
+ * Callers must be prepared for this function to fail, which can
+ * happen if we raced with system reboot or if there is already a
+ * pending capsule that has a reset type that conflicts with the one
+ * required by @capsule. Do NOT use efi_capsule_pending() to detect
+ * this conflict since that would be racy. Instead, submit the capsule
+ * to efi_capsule_update() and check the return value.
+ *
+ * Return 0 on success, a converted EFI status code on failure.
+ */
+int efi_capsule_update(efi_capsule_header_t *capsule, phys_addr_t *pages)
+{
+	u32 imagesize = capsule->imagesize;
+	efi_guid_t guid = capsule->guid;
+	unsigned int count, sg_count;
+	u32 flags = capsule->flags;
+	struct page **sg_pages;
+	int rv, reset_type;
+	int i, j;
+
+	rv = efi_capsule_supported(guid, flags, imagesize, &reset_type);
+	if (rv)
+		return rv;
+
+	count = DIV_ROUND_UP(imagesize, PAGE_SIZE);
+	sg_count = sg_pages_num(count);
+
+	sg_pages = kcalloc(sg_count, sizeof(*sg_pages), GFP_KERNEL);
+	if (!sg_pages)
+		return -ENOMEM;
+
+	for (i = 0; i < sg_count; i++) {
+		sg_pages[i] = alloc_page(GFP_KERNEL);
+		if (!sg_pages[i]) {
+			rv = -ENOMEM;
+			goto out;
+		}
+	}
+
+	for (i = 0; i < sg_count; i++) {
+		efi_capsule_block_desc_t *sglist;
+
+		sglist = kmap(sg_pages[i]);
+
+		for (j = 0; j < SGLIST_PER_PAGE && count > 0; j++) {
+			u64 sz = min_t(u64, imagesize,
+				       PAGE_SIZE - (u64)*pages % PAGE_SIZE);
+
+			sglist[j].length = sz;
+			sglist[j].data = *pages++;
+
+			imagesize -= sz;
+			count--;
+		}
+
+		/* Continuation pointer */
+		sglist[j].length = 0;
+
+		if (i + 1 == sg_count)
+			sglist[j].data = 0;
+		else
+			sglist[j].data = page_to_phys(sg_pages[i + 1]);
+
+		kunmap(sg_pages[i]);
+	}
+
+	mutex_lock(&capsule_mutex);
+	rv = efi_capsule_update_locked(capsule, sg_pages, reset_type);
+	mutex_unlock(&capsule_mutex);
+
+out:
+	for (i = 0; rv && i < sg_count; i++) {
+		if (sg_pages[i])
+			__free_page(sg_pages[i]);
+	}
+
+	kfree(sg_pages);
+	return rv;
+}
+EXPORT_SYMBOL_GPL(efi_capsule_update);
+
+static int capsule_reboot_notify(struct notifier_block *nb, unsigned long event, void *cmd)
+{
+	mutex_lock(&capsule_mutex);
+	stop_capsules = true;
+	mutex_unlock(&capsule_mutex);
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block capsule_reboot_nb = {
+	.notifier_call = capsule_reboot_notify,
+};
+
+static int __init capsule_reboot_register(void)
+{
+	return register_reboot_notifier(&capsule_reboot_nb);
+}
+core_initcall(capsule_reboot_register);
diff --git a/drivers/firmware/efi/cper-arm.c b/drivers/firmware/efi/cper-arm.c
new file mode 100644
index 0000000..5028113
--- /dev/null
+++ b/drivers/firmware/efi/cper-arm.c
@@ -0,0 +1,354 @@
+/*
+ * UEFI Common Platform Error Record (CPER) support
+ *
+ * Copyright (C) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/cper.h>
+#include <linux/dmi.h>
+#include <linux/acpi.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/printk.h>
+#include <linux/bcd.h>
+#include <acpi/ghes.h>
+#include <ras/ras_event.h>
+
+static const char * const arm_reg_ctx_strs[] = {
+	"AArch32 general purpose registers",
+	"AArch32 EL1 context registers",
+	"AArch32 EL2 context registers",
+	"AArch32 secure context registers",
+	"AArch64 general purpose registers",
+	"AArch64 EL1 context registers",
+	"AArch64 EL2 context registers",
+	"AArch64 EL3 context registers",
+	"Misc. system register structure",
+};
+
+static const char * const arm_err_trans_type_strs[] = {
+	"Instruction",
+	"Data Access",
+	"Generic",
+};
+
+static const char * const arm_bus_err_op_strs[] = {
+	"Generic error (type cannot be determined)",
+	"Generic read (type of instruction or data request cannot be determined)",
+	"Generic write (type of instruction of data request cannot be determined)",
+	"Data read",
+	"Data write",
+	"Instruction fetch",
+	"Prefetch",
+};
+
+static const char * const arm_cache_err_op_strs[] = {
+	"Generic error (type cannot be determined)",
+	"Generic read (type of instruction or data request cannot be determined)",
+	"Generic write (type of instruction of data request cannot be determined)",
+	"Data read",
+	"Data write",
+	"Instruction fetch",
+	"Prefetch",
+	"Eviction",
+	"Snooping (processor initiated a cache snoop that resulted in an error)",
+	"Snooped (processor raised a cache error caused by another processor or device snooping its cache)",
+	"Management",
+};
+
+static const char * const arm_tlb_err_op_strs[] = {
+	"Generic error (type cannot be determined)",
+	"Generic read (type of instruction or data request cannot be determined)",
+	"Generic write (type of instruction of data request cannot be determined)",
+	"Data read",
+	"Data write",
+	"Instruction fetch",
+	"Prefetch",
+	"Local management operation (processor initiated a TLB management operation that resulted in an error)",
+	"External management operation (processor raised a TLB error caused by another processor or device broadcasting TLB operations)",
+};
+
+static const char * const arm_bus_err_part_type_strs[] = {
+	"Local processor originated request",
+	"Local processor responded to request",
+	"Local processor observed",
+	"Generic",
+};
+
+static const char * const arm_bus_err_addr_space_strs[] = {
+	"External Memory Access",
+	"Internal Memory Access",
+	"Unknown",
+	"Device Memory Access",
+};
+
+static void cper_print_arm_err_info(const char *pfx, u32 type,
+				    u64 error_info)
+{
+	u8 trans_type, op_type, level, participation_type, address_space;
+	u16 mem_attributes;
+	bool proc_context_corrupt, corrected, precise_pc, restartable_pc;
+	bool time_out, access_mode;
+
+	/* If the type is unknown, bail. */
+	if (type > CPER_ARM_MAX_TYPE)
+		return;
+
+	/*
+	 * Vendor type errors have error information values that are vendor
+	 * specific.
+	 */
+	if (type == CPER_ARM_VENDOR_ERROR)
+		return;
+
+	if (error_info & CPER_ARM_ERR_VALID_TRANSACTION_TYPE) {
+		trans_type = ((error_info >> CPER_ARM_ERR_TRANSACTION_SHIFT)
+			      & CPER_ARM_ERR_TRANSACTION_MASK);
+		if (trans_type < ARRAY_SIZE(arm_err_trans_type_strs)) {
+			printk("%stransaction type: %s\n", pfx,
+			       arm_err_trans_type_strs[trans_type]);
+		}
+	}
+
+	if (error_info & CPER_ARM_ERR_VALID_OPERATION_TYPE) {
+		op_type = ((error_info >> CPER_ARM_ERR_OPERATION_SHIFT)
+			   & CPER_ARM_ERR_OPERATION_MASK);
+		switch (type) {
+		case CPER_ARM_CACHE_ERROR:
+			if (op_type < ARRAY_SIZE(arm_cache_err_op_strs)) {
+				printk("%soperation type: %s\n", pfx,
+				       arm_cache_err_op_strs[op_type]);
+			}
+			break;
+		case CPER_ARM_TLB_ERROR:
+			if (op_type < ARRAY_SIZE(arm_tlb_err_op_strs)) {
+				printk("%soperation type: %s\n", pfx,
+				       arm_tlb_err_op_strs[op_type]);
+			}
+			break;
+		case CPER_ARM_BUS_ERROR:
+			if (op_type < ARRAY_SIZE(arm_bus_err_op_strs)) {
+				printk("%soperation type: %s\n", pfx,
+				       arm_bus_err_op_strs[op_type]);
+			}
+			break;
+		}
+	}
+
+	if (error_info & CPER_ARM_ERR_VALID_LEVEL) {
+		level = ((error_info >> CPER_ARM_ERR_LEVEL_SHIFT)
+			 & CPER_ARM_ERR_LEVEL_MASK);
+		switch (type) {
+		case CPER_ARM_CACHE_ERROR:
+			printk("%scache level: %d\n", pfx, level);
+			break;
+		case CPER_ARM_TLB_ERROR:
+			printk("%sTLB level: %d\n", pfx, level);
+			break;
+		case CPER_ARM_BUS_ERROR:
+			printk("%saffinity level at which the bus error occurred: %d\n",
+			       pfx, level);
+			break;
+		}
+	}
+
+	if (error_info & CPER_ARM_ERR_VALID_PROC_CONTEXT_CORRUPT) {
+		proc_context_corrupt = ((error_info >> CPER_ARM_ERR_PC_CORRUPT_SHIFT)
+					& CPER_ARM_ERR_PC_CORRUPT_MASK);
+		if (proc_context_corrupt)
+			printk("%sprocessor context corrupted\n", pfx);
+		else
+			printk("%sprocessor context not corrupted\n", pfx);
+	}
+
+	if (error_info & CPER_ARM_ERR_VALID_CORRECTED) {
+		corrected = ((error_info >> CPER_ARM_ERR_CORRECTED_SHIFT)
+			     & CPER_ARM_ERR_CORRECTED_MASK);
+		if (corrected)
+			printk("%sthe error has been corrected\n", pfx);
+		else
+			printk("%sthe error has not been corrected\n", pfx);
+	}
+
+	if (error_info & CPER_ARM_ERR_VALID_PRECISE_PC) {
+		precise_pc = ((error_info >> CPER_ARM_ERR_PRECISE_PC_SHIFT)
+			      & CPER_ARM_ERR_PRECISE_PC_MASK);
+		if (precise_pc)
+			printk("%sPC is precise\n", pfx);
+		else
+			printk("%sPC is imprecise\n", pfx);
+	}
+
+	if (error_info & CPER_ARM_ERR_VALID_RESTARTABLE_PC) {
+		restartable_pc = ((error_info >> CPER_ARM_ERR_RESTARTABLE_PC_SHIFT)
+				  & CPER_ARM_ERR_RESTARTABLE_PC_MASK);
+		if (restartable_pc)
+			printk("%sProgram execution can be restarted reliably at the PC associated with the error.\n", pfx);
+	}
+
+	/* The rest of the fields are specific to bus errors */
+	if (type != CPER_ARM_BUS_ERROR)
+		return;
+
+	if (error_info & CPER_ARM_ERR_VALID_PARTICIPATION_TYPE) {
+		participation_type = ((error_info >> CPER_ARM_ERR_PARTICIPATION_TYPE_SHIFT)
+				      & CPER_ARM_ERR_PARTICIPATION_TYPE_MASK);
+		if (participation_type < ARRAY_SIZE(arm_bus_err_part_type_strs)) {
+			printk("%sparticipation type: %s\n", pfx,
+			       arm_bus_err_part_type_strs[participation_type]);
+		}
+	}
+
+	if (error_info & CPER_ARM_ERR_VALID_TIME_OUT) {
+		time_out = ((error_info >> CPER_ARM_ERR_TIME_OUT_SHIFT)
+			    & CPER_ARM_ERR_TIME_OUT_MASK);
+		if (time_out)
+			printk("%srequest timed out\n", pfx);
+	}
+
+	if (error_info & CPER_ARM_ERR_VALID_ADDRESS_SPACE) {
+		address_space = ((error_info >> CPER_ARM_ERR_ADDRESS_SPACE_SHIFT)
+				 & CPER_ARM_ERR_ADDRESS_SPACE_MASK);
+		if (address_space < ARRAY_SIZE(arm_bus_err_addr_space_strs)) {
+			printk("%saddress space: %s\n", pfx,
+			       arm_bus_err_addr_space_strs[address_space]);
+		}
+	}
+
+	if (error_info & CPER_ARM_ERR_VALID_MEM_ATTRIBUTES) {
+		mem_attributes = ((error_info >> CPER_ARM_ERR_MEM_ATTRIBUTES_SHIFT)
+				  & CPER_ARM_ERR_MEM_ATTRIBUTES_MASK);
+		printk("%smemory access attributes:0x%x\n", pfx, mem_attributes);
+	}
+
+	if (error_info & CPER_ARM_ERR_VALID_ACCESS_MODE) {
+		access_mode = ((error_info >> CPER_ARM_ERR_ACCESS_MODE_SHIFT)
+			       & CPER_ARM_ERR_ACCESS_MODE_MASK);
+		if (access_mode)
+			printk("%saccess mode: normal\n", pfx);
+		else
+			printk("%saccess mode: secure\n", pfx);
+	}
+}
+
+void cper_print_proc_arm(const char *pfx,
+			 const struct cper_sec_proc_arm *proc)
+{
+	int i, len, max_ctx_type;
+	struct cper_arm_err_info *err_info;
+	struct cper_arm_ctx_info *ctx_info;
+	char newpfx[64], infopfx[64];
+
+	printk("%sMIDR: 0x%016llx\n", pfx, proc->midr);
+
+	len = proc->section_length - (sizeof(*proc) +
+		proc->err_info_num * (sizeof(*err_info)));
+	if (len < 0) {
+		printk("%ssection length: %d\n", pfx, proc->section_length);
+		printk("%ssection length is too small\n", pfx);
+		printk("%sfirmware-generated error record is incorrect\n", pfx);
+		printk("%sERR_INFO_NUM is %d\n", pfx, proc->err_info_num);
+		return;
+	}
+
+	if (proc->validation_bits & CPER_ARM_VALID_MPIDR)
+		printk("%sMultiprocessor Affinity Register (MPIDR): 0x%016llx\n",
+			pfx, proc->mpidr);
+
+	if (proc->validation_bits & CPER_ARM_VALID_AFFINITY_LEVEL)
+		printk("%serror affinity level: %d\n", pfx,
+			proc->affinity_level);
+
+	if (proc->validation_bits & CPER_ARM_VALID_RUNNING_STATE) {
+		printk("%srunning state: 0x%x\n", pfx, proc->running_state);
+		printk("%sPower State Coordination Interface state: %d\n",
+			pfx, proc->psci_state);
+	}
+
+	snprintf(newpfx, sizeof(newpfx), "%s ", pfx);
+
+	err_info = (struct cper_arm_err_info *)(proc + 1);
+	for (i = 0; i < proc->err_info_num; i++) {
+		printk("%sError info structure %d:\n", pfx, i);
+
+		printk("%snum errors: %d\n", pfx, err_info->multiple_error + 1);
+
+		if (err_info->validation_bits & CPER_ARM_INFO_VALID_FLAGS) {
+			if (err_info->flags & CPER_ARM_INFO_FLAGS_FIRST)
+				printk("%sfirst error captured\n", newpfx);
+			if (err_info->flags & CPER_ARM_INFO_FLAGS_LAST)
+				printk("%slast error captured\n", newpfx);
+			if (err_info->flags & CPER_ARM_INFO_FLAGS_PROPAGATED)
+				printk("%spropagated error captured\n",
+				       newpfx);
+			if (err_info->flags & CPER_ARM_INFO_FLAGS_OVERFLOW)
+				printk("%soverflow occurred, error info is incomplete\n",
+				       newpfx);
+		}
+
+		printk("%serror_type: %d, %s\n", newpfx, err_info->type,
+			err_info->type < ARRAY_SIZE(cper_proc_error_type_strs) ?
+			cper_proc_error_type_strs[err_info->type] : "unknown");
+		if (err_info->validation_bits & CPER_ARM_INFO_VALID_ERR_INFO) {
+			printk("%serror_info: 0x%016llx\n", newpfx,
+			       err_info->error_info);
+			snprintf(infopfx, sizeof(infopfx), "%s ", newpfx);
+			cper_print_arm_err_info(infopfx, err_info->type,
+						err_info->error_info);
+		}
+		if (err_info->validation_bits & CPER_ARM_INFO_VALID_VIRT_ADDR)
+			printk("%svirtual fault address: 0x%016llx\n",
+				newpfx, err_info->virt_fault_addr);
+		if (err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR)
+			printk("%sphysical fault address: 0x%016llx\n",
+				newpfx, err_info->physical_fault_addr);
+		err_info += 1;
+	}
+
+	ctx_info = (struct cper_arm_ctx_info *)err_info;
+	max_ctx_type = ARRAY_SIZE(arm_reg_ctx_strs) - 1;
+	for (i = 0; i < proc->context_info_num; i++) {
+		int size = sizeof(*ctx_info) + ctx_info->size;
+
+		printk("%sContext info structure %d:\n", pfx, i);
+		if (len < size) {
+			printk("%ssection length is too small\n", newpfx);
+			printk("%sfirmware-generated error record is incorrect\n", pfx);
+			return;
+		}
+		if (ctx_info->type > max_ctx_type) {
+			printk("%sInvalid context type: %d (max: %d)\n",
+				newpfx, ctx_info->type, max_ctx_type);
+			return;
+		}
+		printk("%sregister context type: %s\n", newpfx,
+			arm_reg_ctx_strs[ctx_info->type]);
+		print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4,
+				(ctx_info + 1), ctx_info->size, 0);
+		len -= size;
+		ctx_info = (struct cper_arm_ctx_info *)((long)ctx_info + size);
+	}
+
+	if (len > 0) {
+		printk("%sVendor specific error info has %u bytes:\n", pfx,
+		       len);
+		print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4, ctx_info,
+				len, true);
+	}
+}
diff --git a/drivers/firmware/efi/cper-x86.c b/drivers/firmware/efi/cper-x86.c
new file mode 100644
index 0000000..2531de4
--- /dev/null
+++ b/drivers/firmware/efi/cper-x86.c
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018, Advanced Micro Devices, Inc.
+
+#include <linux/cper.h>
+
+/*
+ * We don't need a "CPER_IA" prefix since these are all locally defined.
+ * This will save us a lot of line space.
+ */
+#define VALID_LAPIC_ID			BIT_ULL(0)
+#define VALID_CPUID_INFO		BIT_ULL(1)
+#define VALID_PROC_ERR_INFO_NUM(bits)	(((bits) & GENMASK_ULL(7, 2)) >> 2)
+#define VALID_PROC_CXT_INFO_NUM(bits)	(((bits) & GENMASK_ULL(13, 8)) >> 8)
+
+#define INFO_ERR_STRUCT_TYPE_CACHE					\
+	GUID_INIT(0xA55701F5, 0xE3EF, 0x43DE, 0xAC, 0x72, 0x24, 0x9B,	\
+		  0x57, 0x3F, 0xAD, 0x2C)
+#define INFO_ERR_STRUCT_TYPE_TLB					\
+	GUID_INIT(0xFC06B535, 0x5E1F, 0x4562, 0x9F, 0x25, 0x0A, 0x3B,	\
+		  0x9A, 0xDB, 0x63, 0xC3)
+#define INFO_ERR_STRUCT_TYPE_BUS					\
+	GUID_INIT(0x1CF3F8B3, 0xC5B1, 0x49a2, 0xAA, 0x59, 0x5E, 0xEF,	\
+		  0x92, 0xFF, 0xA6, 0x3C)
+#define INFO_ERR_STRUCT_TYPE_MS						\
+	GUID_INIT(0x48AB7F57, 0xDC34, 0x4f6c, 0xA7, 0xD3, 0xB0, 0xB5,	\
+		  0xB0, 0xA7, 0x43, 0x14)
+
+#define INFO_VALID_CHECK_INFO		BIT_ULL(0)
+#define INFO_VALID_TARGET_ID		BIT_ULL(1)
+#define INFO_VALID_REQUESTOR_ID		BIT_ULL(2)
+#define INFO_VALID_RESPONDER_ID		BIT_ULL(3)
+#define INFO_VALID_IP			BIT_ULL(4)
+
+#define CHECK_VALID_TRANS_TYPE		BIT_ULL(0)
+#define CHECK_VALID_OPERATION		BIT_ULL(1)
+#define CHECK_VALID_LEVEL		BIT_ULL(2)
+#define CHECK_VALID_PCC			BIT_ULL(3)
+#define CHECK_VALID_UNCORRECTED		BIT_ULL(4)
+#define CHECK_VALID_PRECISE_IP		BIT_ULL(5)
+#define CHECK_VALID_RESTARTABLE_IP	BIT_ULL(6)
+#define CHECK_VALID_OVERFLOW		BIT_ULL(7)
+
+#define CHECK_VALID_BUS_PART_TYPE	BIT_ULL(8)
+#define CHECK_VALID_BUS_TIME_OUT	BIT_ULL(9)
+#define CHECK_VALID_BUS_ADDR_SPACE	BIT_ULL(10)
+
+#define CHECK_VALID_BITS(check)		(((check) & GENMASK_ULL(15, 0)))
+#define CHECK_TRANS_TYPE(check)		(((check) & GENMASK_ULL(17, 16)) >> 16)
+#define CHECK_OPERATION(check)		(((check) & GENMASK_ULL(21, 18)) >> 18)
+#define CHECK_LEVEL(check)		(((check) & GENMASK_ULL(24, 22)) >> 22)
+#define CHECK_PCC			BIT_ULL(25)
+#define CHECK_UNCORRECTED		BIT_ULL(26)
+#define CHECK_PRECISE_IP		BIT_ULL(27)
+#define CHECK_RESTARTABLE_IP		BIT_ULL(28)
+#define CHECK_OVERFLOW			BIT_ULL(29)
+
+#define CHECK_BUS_PART_TYPE(check)	(((check) & GENMASK_ULL(31, 30)) >> 30)
+#define CHECK_BUS_TIME_OUT		BIT_ULL(32)
+#define CHECK_BUS_ADDR_SPACE(check)	(((check) & GENMASK_ULL(34, 33)) >> 33)
+
+#define CHECK_VALID_MS_ERR_TYPE		BIT_ULL(0)
+#define CHECK_VALID_MS_PCC		BIT_ULL(1)
+#define CHECK_VALID_MS_UNCORRECTED	BIT_ULL(2)
+#define CHECK_VALID_MS_PRECISE_IP	BIT_ULL(3)
+#define CHECK_VALID_MS_RESTARTABLE_IP	BIT_ULL(4)
+#define CHECK_VALID_MS_OVERFLOW		BIT_ULL(5)
+
+#define CHECK_MS_ERR_TYPE(check)	(((check) & GENMASK_ULL(18, 16)) >> 16)
+#define CHECK_MS_PCC			BIT_ULL(19)
+#define CHECK_MS_UNCORRECTED		BIT_ULL(20)
+#define CHECK_MS_PRECISE_IP		BIT_ULL(21)
+#define CHECK_MS_RESTARTABLE_IP		BIT_ULL(22)
+#define CHECK_MS_OVERFLOW		BIT_ULL(23)
+
+#define CTX_TYPE_MSR			1
+#define CTX_TYPE_MMREG			7
+
+enum err_types {
+	ERR_TYPE_CACHE = 0,
+	ERR_TYPE_TLB,
+	ERR_TYPE_BUS,
+	ERR_TYPE_MS,
+	N_ERR_TYPES
+};
+
+static enum err_types cper_get_err_type(const guid_t *err_type)
+{
+	if (guid_equal(err_type, &INFO_ERR_STRUCT_TYPE_CACHE))
+		return ERR_TYPE_CACHE;
+	else if (guid_equal(err_type, &INFO_ERR_STRUCT_TYPE_TLB))
+		return ERR_TYPE_TLB;
+	else if (guid_equal(err_type, &INFO_ERR_STRUCT_TYPE_BUS))
+		return ERR_TYPE_BUS;
+	else if (guid_equal(err_type, &INFO_ERR_STRUCT_TYPE_MS))
+		return ERR_TYPE_MS;
+	else
+		return N_ERR_TYPES;
+}
+
+static const char * const ia_check_trans_type_strs[] = {
+	"Instruction",
+	"Data Access",
+	"Generic",
+};
+
+static const char * const ia_check_op_strs[] = {
+	"generic error",
+	"generic read",
+	"generic write",
+	"data read",
+	"data write",
+	"instruction fetch",
+	"prefetch",
+	"eviction",
+	"snoop",
+};
+
+static const char * const ia_check_bus_part_type_strs[] = {
+	"Local Processor originated request",
+	"Local Processor responded to request",
+	"Local Processor observed",
+	"Generic",
+};
+
+static const char * const ia_check_bus_addr_space_strs[] = {
+	"Memory Access",
+	"Reserved",
+	"I/O",
+	"Other Transaction",
+};
+
+static const char * const ia_check_ms_error_type_strs[] = {
+	"No Error",
+	"Unclassified",
+	"Microcode ROM Parity Error",
+	"External Error",
+	"FRC Error",
+	"Internal Unclassified",
+};
+
+static const char * const ia_reg_ctx_strs[] = {
+	"Unclassified Data",
+	"MSR Registers (Machine Check and other MSRs)",
+	"32-bit Mode Execution Context",
+	"64-bit Mode Execution Context",
+	"FXSAVE Context",
+	"32-bit Mode Debug Registers (DR0-DR7)",
+	"64-bit Mode Debug Registers (DR0-DR7)",
+	"Memory Mapped Registers",
+};
+
+static inline void print_bool(char *str, const char *pfx, u64 check, u64 bit)
+{
+	printk("%s%s: %s\n", pfx, str, (check & bit) ? "true" : "false");
+}
+
+static void print_err_info_ms(const char *pfx, u16 validation_bits, u64 check)
+{
+	if (validation_bits & CHECK_VALID_MS_ERR_TYPE) {
+		u8 err_type = CHECK_MS_ERR_TYPE(check);
+
+		printk("%sError Type: %u, %s\n", pfx, err_type,
+		       err_type < ARRAY_SIZE(ia_check_ms_error_type_strs) ?
+		       ia_check_ms_error_type_strs[err_type] : "unknown");
+	}
+
+	if (validation_bits & CHECK_VALID_MS_PCC)
+		print_bool("Processor Context Corrupt", pfx, check, CHECK_MS_PCC);
+
+	if (validation_bits & CHECK_VALID_MS_UNCORRECTED)
+		print_bool("Uncorrected", pfx, check, CHECK_MS_UNCORRECTED);
+
+	if (validation_bits & CHECK_VALID_MS_PRECISE_IP)
+		print_bool("Precise IP", pfx, check, CHECK_MS_PRECISE_IP);
+
+	if (validation_bits & CHECK_VALID_MS_RESTARTABLE_IP)
+		print_bool("Restartable IP", pfx, check, CHECK_MS_RESTARTABLE_IP);
+
+	if (validation_bits & CHECK_VALID_MS_OVERFLOW)
+		print_bool("Overflow", pfx, check, CHECK_MS_OVERFLOW);
+}
+
+static void print_err_info(const char *pfx, u8 err_type, u64 check)
+{
+	u16 validation_bits = CHECK_VALID_BITS(check);
+
+	/*
+	 * The MS Check structure varies a lot from the others, so use a
+	 * separate function for decoding.
+	 */
+	if (err_type == ERR_TYPE_MS)
+		return print_err_info_ms(pfx, validation_bits, check);
+
+	if (validation_bits & CHECK_VALID_TRANS_TYPE) {
+		u8 trans_type = CHECK_TRANS_TYPE(check);
+
+		printk("%sTransaction Type: %u, %s\n", pfx, trans_type,
+		       trans_type < ARRAY_SIZE(ia_check_trans_type_strs) ?
+		       ia_check_trans_type_strs[trans_type] : "unknown");
+	}
+
+	if (validation_bits & CHECK_VALID_OPERATION) {
+		u8 op = CHECK_OPERATION(check);
+
+		/*
+		 * CACHE has more operation types than TLB or BUS, though the
+		 * name and the order are the same.
+		 */
+		u8 max_ops = (err_type == ERR_TYPE_CACHE) ? 9 : 7;
+
+		printk("%sOperation: %u, %s\n", pfx, op,
+		       op < max_ops ? ia_check_op_strs[op] : "unknown");
+	}
+
+	if (validation_bits & CHECK_VALID_LEVEL)
+		printk("%sLevel: %llu\n", pfx, CHECK_LEVEL(check));
+
+	if (validation_bits & CHECK_VALID_PCC)
+		print_bool("Processor Context Corrupt", pfx, check, CHECK_PCC);
+
+	if (validation_bits & CHECK_VALID_UNCORRECTED)
+		print_bool("Uncorrected", pfx, check, CHECK_UNCORRECTED);
+
+	if (validation_bits & CHECK_VALID_PRECISE_IP)
+		print_bool("Precise IP", pfx, check, CHECK_PRECISE_IP);
+
+	if (validation_bits & CHECK_VALID_RESTARTABLE_IP)
+		print_bool("Restartable IP", pfx, check, CHECK_RESTARTABLE_IP);
+
+	if (validation_bits & CHECK_VALID_OVERFLOW)
+		print_bool("Overflow", pfx, check, CHECK_OVERFLOW);
+
+	if (err_type != ERR_TYPE_BUS)
+		return;
+
+	if (validation_bits & CHECK_VALID_BUS_PART_TYPE) {
+		u8 part_type = CHECK_BUS_PART_TYPE(check);
+
+		printk("%sParticipation Type: %u, %s\n", pfx, part_type,
+		       part_type < ARRAY_SIZE(ia_check_bus_part_type_strs) ?
+		       ia_check_bus_part_type_strs[part_type] : "unknown");
+	}
+
+	if (validation_bits & CHECK_VALID_BUS_TIME_OUT)
+		print_bool("Time Out", pfx, check, CHECK_BUS_TIME_OUT);
+
+	if (validation_bits & CHECK_VALID_BUS_ADDR_SPACE) {
+		u8 addr_space = CHECK_BUS_ADDR_SPACE(check);
+
+		printk("%sAddress Space: %u, %s\n", pfx, addr_space,
+		       addr_space < ARRAY_SIZE(ia_check_bus_addr_space_strs) ?
+		       ia_check_bus_addr_space_strs[addr_space] : "unknown");
+	}
+}
+
+void cper_print_proc_ia(const char *pfx, const struct cper_sec_proc_ia *proc)
+{
+	int i;
+	struct cper_ia_err_info *err_info;
+	struct cper_ia_proc_ctx *ctx_info;
+	char newpfx[64], infopfx[64];
+	u8 err_type;
+
+	if (proc->validation_bits & VALID_LAPIC_ID)
+		printk("%sLocal APIC_ID: 0x%llx\n", pfx, proc->lapic_id);
+
+	if (proc->validation_bits & VALID_CPUID_INFO) {
+		printk("%sCPUID Info:\n", pfx);
+		print_hex_dump(pfx, "", DUMP_PREFIX_OFFSET, 16, 4, proc->cpuid,
+			       sizeof(proc->cpuid), 0);
+	}
+
+	snprintf(newpfx, sizeof(newpfx), "%s ", pfx);
+
+	err_info = (struct cper_ia_err_info *)(proc + 1);
+	for (i = 0; i < VALID_PROC_ERR_INFO_NUM(proc->validation_bits); i++) {
+		printk("%sError Information Structure %d:\n", pfx, i);
+
+		err_type = cper_get_err_type(&err_info->err_type);
+		printk("%sError Structure Type: %s\n", newpfx,
+		       err_type < ARRAY_SIZE(cper_proc_error_type_strs) ?
+		       cper_proc_error_type_strs[err_type] : "unknown");
+
+		if (err_type >= N_ERR_TYPES) {
+			printk("%sError Structure Type: %pUl\n", newpfx,
+			       &err_info->err_type);
+		}
+
+		if (err_info->validation_bits & INFO_VALID_CHECK_INFO) {
+			printk("%sCheck Information: 0x%016llx\n", newpfx,
+			       err_info->check_info);
+
+			if (err_type < N_ERR_TYPES) {
+				snprintf(infopfx, sizeof(infopfx), "%s ",
+					 newpfx);
+
+				print_err_info(infopfx, err_type,
+					       err_info->check_info);
+			}
+		}
+
+		if (err_info->validation_bits & INFO_VALID_TARGET_ID) {
+			printk("%sTarget Identifier: 0x%016llx\n",
+			       newpfx, err_info->target_id);
+		}
+
+		if (err_info->validation_bits & INFO_VALID_REQUESTOR_ID) {
+			printk("%sRequestor Identifier: 0x%016llx\n",
+			       newpfx, err_info->requestor_id);
+		}
+
+		if (err_info->validation_bits & INFO_VALID_RESPONDER_ID) {
+			printk("%sResponder Identifier: 0x%016llx\n",
+			       newpfx, err_info->responder_id);
+		}
+
+		if (err_info->validation_bits & INFO_VALID_IP) {
+			printk("%sInstruction Pointer: 0x%016llx\n",
+			       newpfx, err_info->ip);
+		}
+
+		err_info++;
+	}
+
+	ctx_info = (struct cper_ia_proc_ctx *)err_info;
+	for (i = 0; i < VALID_PROC_CXT_INFO_NUM(proc->validation_bits); i++) {
+		int size = sizeof(*ctx_info) + ctx_info->reg_arr_size;
+		int groupsize = 4;
+
+		printk("%sContext Information Structure %d:\n", pfx, i);
+
+		printk("%sRegister Context Type: %s\n", newpfx,
+		       ctx_info->reg_ctx_type < ARRAY_SIZE(ia_reg_ctx_strs) ?
+		       ia_reg_ctx_strs[ctx_info->reg_ctx_type] : "unknown");
+
+		printk("%sRegister Array Size: 0x%04x\n", newpfx,
+		       ctx_info->reg_arr_size);
+
+		if (ctx_info->reg_ctx_type == CTX_TYPE_MSR) {
+			groupsize = 8; /* MSRs are 8 bytes wide. */
+			printk("%sMSR Address: 0x%08x\n", newpfx,
+			       ctx_info->msr_addr);
+		}
+
+		if (ctx_info->reg_ctx_type == CTX_TYPE_MMREG) {
+			printk("%sMM Register Address: 0x%016llx\n", newpfx,
+			       ctx_info->mm_reg_addr);
+		}
+
+		printk("%sRegister Array:\n", newpfx);
+		print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, groupsize,
+			       (ctx_info + 1), ctx_info->reg_arr_size, 0);
+
+		ctx_info = (struct cper_ia_proc_ctx *)((long)ctx_info + size);
+	}
+}
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
new file mode 100644
index 0000000..a7902fc
--- /dev/null
+++ b/drivers/firmware/efi/cper.c
@@ -0,0 +1,568 @@
+/*
+ * UEFI Common Platform Error Record (CPER) support
+ *
+ * Copyright (C) 2010, Intel Corp.
+ *	Author: Huang Ying <ying.huang@intel.com>
+ *
+ * CPER is the format used to describe platform hardware error by
+ * various tables, such as ERST, BERT and HEST etc.
+ *
+ * For more information about CPER, please refer to Appendix N of UEFI
+ * Specification version 2.4.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/cper.h>
+#include <linux/dmi.h>
+#include <linux/acpi.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/printk.h>
+#include <linux/bcd.h>
+#include <acpi/ghes.h>
+#include <ras/ras_event.h>
+
+static char rcd_decode_str[CPER_REC_LEN];
+
+/*
+ * CPER record ID need to be unique even after reboot, because record
+ * ID is used as index for ERST storage, while CPER records from
+ * multiple boot may co-exist in ERST.
+ */
+u64 cper_next_record_id(void)
+{
+	static atomic64_t seq;
+
+	if (!atomic64_read(&seq)) {
+		time64_t time = ktime_get_real_seconds();
+
+		/*
+		 * This code is unlikely to still be needed in year 2106,
+		 * but just in case, let's use a few more bits for timestamps
+		 * after y2038 to be sure they keep increasing monotonically
+		 * for the next few hundred years...
+		 */
+		if (time < 0x80000000)
+			atomic64_set(&seq, (ktime_get_real_seconds()) << 32);
+		else
+			atomic64_set(&seq, 0x8000000000000000ull |
+					   ktime_get_real_seconds() << 24);
+	}
+
+	return atomic64_inc_return(&seq);
+}
+EXPORT_SYMBOL_GPL(cper_next_record_id);
+
+static const char * const severity_strs[] = {
+	"recoverable",
+	"fatal",
+	"corrected",
+	"info",
+};
+
+const char *cper_severity_str(unsigned int severity)
+{
+	return severity < ARRAY_SIZE(severity_strs) ?
+		severity_strs[severity] : "unknown";
+}
+EXPORT_SYMBOL_GPL(cper_severity_str);
+
+/*
+ * cper_print_bits - print strings for set bits
+ * @pfx: prefix for each line, including log level and prefix string
+ * @bits: bit mask
+ * @strs: string array, indexed by bit position
+ * @strs_size: size of the string array: @strs
+ *
+ * For each set bit in @bits, print the corresponding string in @strs.
+ * If the output length is longer than 80, multiple line will be
+ * printed, with @pfx is printed at the beginning of each line.
+ */
+void cper_print_bits(const char *pfx, unsigned int bits,
+		     const char * const strs[], unsigned int strs_size)
+{
+	int i, len = 0;
+	const char *str;
+	char buf[84];
+
+	for (i = 0; i < strs_size; i++) {
+		if (!(bits & (1U << i)))
+			continue;
+		str = strs[i];
+		if (!str)
+			continue;
+		if (len && len + strlen(str) + 2 > 80) {
+			printk("%s\n", buf);
+			len = 0;
+		}
+		if (!len)
+			len = snprintf(buf, sizeof(buf), "%s%s", pfx, str);
+		else
+			len += snprintf(buf+len, sizeof(buf)-len, ", %s", str);
+	}
+	if (len)
+		printk("%s\n", buf);
+}
+
+static const char * const proc_type_strs[] = {
+	"IA32/X64",
+	"IA64",
+	"ARM",
+};
+
+static const char * const proc_isa_strs[] = {
+	"IA32",
+	"IA64",
+	"X64",
+	"ARM A32/T32",
+	"ARM A64",
+};
+
+const char * const cper_proc_error_type_strs[] = {
+	"cache error",
+	"TLB error",
+	"bus error",
+	"micro-architectural error",
+};
+
+static const char * const proc_op_strs[] = {
+	"unknown or generic",
+	"data read",
+	"data write",
+	"instruction execution",
+};
+
+static const char * const proc_flag_strs[] = {
+	"restartable",
+	"precise IP",
+	"overflow",
+	"corrected",
+};
+
+static void cper_print_proc_generic(const char *pfx,
+				    const struct cper_sec_proc_generic *proc)
+{
+	if (proc->validation_bits & CPER_PROC_VALID_TYPE)
+		printk("%s""processor_type: %d, %s\n", pfx, proc->proc_type,
+		       proc->proc_type < ARRAY_SIZE(proc_type_strs) ?
+		       proc_type_strs[proc->proc_type] : "unknown");
+	if (proc->validation_bits & CPER_PROC_VALID_ISA)
+		printk("%s""processor_isa: %d, %s\n", pfx, proc->proc_isa,
+		       proc->proc_isa < ARRAY_SIZE(proc_isa_strs) ?
+		       proc_isa_strs[proc->proc_isa] : "unknown");
+	if (proc->validation_bits & CPER_PROC_VALID_ERROR_TYPE) {
+		printk("%s""error_type: 0x%02x\n", pfx, proc->proc_error_type);
+		cper_print_bits(pfx, proc->proc_error_type,
+				cper_proc_error_type_strs,
+				ARRAY_SIZE(cper_proc_error_type_strs));
+	}
+	if (proc->validation_bits & CPER_PROC_VALID_OPERATION)
+		printk("%s""operation: %d, %s\n", pfx, proc->operation,
+		       proc->operation < ARRAY_SIZE(proc_op_strs) ?
+		       proc_op_strs[proc->operation] : "unknown");
+	if (proc->validation_bits & CPER_PROC_VALID_FLAGS) {
+		printk("%s""flags: 0x%02x\n", pfx, proc->flags);
+		cper_print_bits(pfx, proc->flags, proc_flag_strs,
+				ARRAY_SIZE(proc_flag_strs));
+	}
+	if (proc->validation_bits & CPER_PROC_VALID_LEVEL)
+		printk("%s""level: %d\n", pfx, proc->level);
+	if (proc->validation_bits & CPER_PROC_VALID_VERSION)
+		printk("%s""version_info: 0x%016llx\n", pfx, proc->cpu_version);
+	if (proc->validation_bits & CPER_PROC_VALID_ID)
+		printk("%s""processor_id: 0x%016llx\n", pfx, proc->proc_id);
+	if (proc->validation_bits & CPER_PROC_VALID_TARGET_ADDRESS)
+		printk("%s""target_address: 0x%016llx\n",
+		       pfx, proc->target_addr);
+	if (proc->validation_bits & CPER_PROC_VALID_REQUESTOR_ID)
+		printk("%s""requestor_id: 0x%016llx\n",
+		       pfx, proc->requestor_id);
+	if (proc->validation_bits & CPER_PROC_VALID_RESPONDER_ID)
+		printk("%s""responder_id: 0x%016llx\n",
+		       pfx, proc->responder_id);
+	if (proc->validation_bits & CPER_PROC_VALID_IP)
+		printk("%s""IP: 0x%016llx\n", pfx, proc->ip);
+}
+
+static const char * const mem_err_type_strs[] = {
+	"unknown",
+	"no error",
+	"single-bit ECC",
+	"multi-bit ECC",
+	"single-symbol chipkill ECC",
+	"multi-symbol chipkill ECC",
+	"master abort",
+	"target abort",
+	"parity error",
+	"watchdog timeout",
+	"invalid address",
+	"mirror Broken",
+	"memory sparing",
+	"scrub corrected error",
+	"scrub uncorrected error",
+	"physical memory map-out event",
+};
+
+const char *cper_mem_err_type_str(unsigned int etype)
+{
+	return etype < ARRAY_SIZE(mem_err_type_strs) ?
+		mem_err_type_strs[etype] : "unknown";
+}
+EXPORT_SYMBOL_GPL(cper_mem_err_type_str);
+
+static int cper_mem_err_location(struct cper_mem_err_compact *mem, char *msg)
+{
+	u32 len, n;
+
+	if (!msg)
+		return 0;
+
+	n = 0;
+	len = CPER_REC_LEN - 1;
+	if (mem->validation_bits & CPER_MEM_VALID_NODE)
+		n += scnprintf(msg + n, len - n, "node: %d ", mem->node);
+	if (mem->validation_bits & CPER_MEM_VALID_CARD)
+		n += scnprintf(msg + n, len - n, "card: %d ", mem->card);
+	if (mem->validation_bits & CPER_MEM_VALID_MODULE)
+		n += scnprintf(msg + n, len - n, "module: %d ", mem->module);
+	if (mem->validation_bits & CPER_MEM_VALID_RANK_NUMBER)
+		n += scnprintf(msg + n, len - n, "rank: %d ", mem->rank);
+	if (mem->validation_bits & CPER_MEM_VALID_BANK)
+		n += scnprintf(msg + n, len - n, "bank: %d ", mem->bank);
+	if (mem->validation_bits & CPER_MEM_VALID_DEVICE)
+		n += scnprintf(msg + n, len - n, "device: %d ", mem->device);
+	if (mem->validation_bits & CPER_MEM_VALID_ROW)
+		n += scnprintf(msg + n, len - n, "row: %d ", mem->row);
+	if (mem->validation_bits & CPER_MEM_VALID_COLUMN)
+		n += scnprintf(msg + n, len - n, "column: %d ", mem->column);
+	if (mem->validation_bits & CPER_MEM_VALID_BIT_POSITION)
+		n += scnprintf(msg + n, len - n, "bit_position: %d ",
+			       mem->bit_pos);
+	if (mem->validation_bits & CPER_MEM_VALID_REQUESTOR_ID)
+		n += scnprintf(msg + n, len - n, "requestor_id: 0x%016llx ",
+			       mem->requestor_id);
+	if (mem->validation_bits & CPER_MEM_VALID_RESPONDER_ID)
+		n += scnprintf(msg + n, len - n, "responder_id: 0x%016llx ",
+			       mem->responder_id);
+	if (mem->validation_bits & CPER_MEM_VALID_TARGET_ID)
+		scnprintf(msg + n, len - n, "target_id: 0x%016llx ",
+			  mem->target_id);
+
+	msg[n] = '\0';
+	return n;
+}
+
+static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg)
+{
+	u32 len, n;
+	const char *bank = NULL, *device = NULL;
+
+	if (!msg || !(mem->validation_bits & CPER_MEM_VALID_MODULE_HANDLE))
+		return 0;
+
+	n = 0;
+	len = CPER_REC_LEN - 1;
+	dmi_memdev_name(mem->mem_dev_handle, &bank, &device);
+	if (bank && device)
+		n = snprintf(msg, len, "DIMM location: %s %s ", bank, device);
+	else
+		n = snprintf(msg, len,
+			     "DIMM location: not present. DMI handle: 0x%.4x ",
+			     mem->mem_dev_handle);
+
+	msg[n] = '\0';
+	return n;
+}
+
+void cper_mem_err_pack(const struct cper_sec_mem_err *mem,
+		       struct cper_mem_err_compact *cmem)
+{
+	cmem->validation_bits = mem->validation_bits;
+	cmem->node = mem->node;
+	cmem->card = mem->card;
+	cmem->module = mem->module;
+	cmem->bank = mem->bank;
+	cmem->device = mem->device;
+	cmem->row = mem->row;
+	cmem->column = mem->column;
+	cmem->bit_pos = mem->bit_pos;
+	cmem->requestor_id = mem->requestor_id;
+	cmem->responder_id = mem->responder_id;
+	cmem->target_id = mem->target_id;
+	cmem->rank = mem->rank;
+	cmem->mem_array_handle = mem->mem_array_handle;
+	cmem->mem_dev_handle = mem->mem_dev_handle;
+}
+
+const char *cper_mem_err_unpack(struct trace_seq *p,
+				struct cper_mem_err_compact *cmem)
+{
+	const char *ret = trace_seq_buffer_ptr(p);
+
+	if (cper_mem_err_location(cmem, rcd_decode_str))
+		trace_seq_printf(p, "%s", rcd_decode_str);
+	if (cper_dimm_err_location(cmem, rcd_decode_str))
+		trace_seq_printf(p, "%s", rcd_decode_str);
+	trace_seq_putc(p, '\0');
+
+	return ret;
+}
+
+static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem,
+	int len)
+{
+	struct cper_mem_err_compact cmem;
+
+	/* Don't trust UEFI 2.1/2.2 structure with bad validation bits */
+	if (len == sizeof(struct cper_sec_mem_err_old) &&
+	    (mem->validation_bits & ~(CPER_MEM_VALID_RANK_NUMBER - 1))) {
+		pr_err(FW_WARN "valid bits set for fields beyond structure\n");
+		return;
+	}
+	if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS)
+		printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status);
+	if (mem->validation_bits & CPER_MEM_VALID_PA)
+		printk("%s""physical_address: 0x%016llx\n",
+		       pfx, mem->physical_addr);
+	if (mem->validation_bits & CPER_MEM_VALID_PA_MASK)
+		printk("%s""physical_address_mask: 0x%016llx\n",
+		       pfx, mem->physical_addr_mask);
+	cper_mem_err_pack(mem, &cmem);
+	if (cper_mem_err_location(&cmem, rcd_decode_str))
+		printk("%s%s\n", pfx, rcd_decode_str);
+	if (mem->validation_bits & CPER_MEM_VALID_ERROR_TYPE) {
+		u8 etype = mem->error_type;
+		printk("%s""error_type: %d, %s\n", pfx, etype,
+		       cper_mem_err_type_str(etype));
+	}
+	if (cper_dimm_err_location(&cmem, rcd_decode_str))
+		printk("%s%s\n", pfx, rcd_decode_str);
+}
+
+static const char * const pcie_port_type_strs[] = {
+	"PCIe end point",
+	"legacy PCI end point",
+	"unknown",
+	"unknown",
+	"root port",
+	"upstream switch port",
+	"downstream switch port",
+	"PCIe to PCI/PCI-X bridge",
+	"PCI/PCI-X to PCIe bridge",
+	"root complex integrated endpoint device",
+	"root complex event collector",
+};
+
+static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
+			    const struct acpi_hest_generic_data *gdata)
+{
+	if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE)
+		printk("%s""port_type: %d, %s\n", pfx, pcie->port_type,
+		       pcie->port_type < ARRAY_SIZE(pcie_port_type_strs) ?
+		       pcie_port_type_strs[pcie->port_type] : "unknown");
+	if (pcie->validation_bits & CPER_PCIE_VALID_VERSION)
+		printk("%s""version: %d.%d\n", pfx,
+		       pcie->version.major, pcie->version.minor);
+	if (pcie->validation_bits & CPER_PCIE_VALID_COMMAND_STATUS)
+		printk("%s""command: 0x%04x, status: 0x%04x\n", pfx,
+		       pcie->command, pcie->status);
+	if (pcie->validation_bits & CPER_PCIE_VALID_DEVICE_ID) {
+		const __u8 *p;
+		printk("%s""device_id: %04x:%02x:%02x.%x\n", pfx,
+		       pcie->device_id.segment, pcie->device_id.bus,
+		       pcie->device_id.device, pcie->device_id.function);
+		printk("%s""slot: %d\n", pfx,
+		       pcie->device_id.slot >> CPER_PCIE_SLOT_SHIFT);
+		printk("%s""secondary_bus: 0x%02x\n", pfx,
+		       pcie->device_id.secondary_bus);
+		printk("%s""vendor_id: 0x%04x, device_id: 0x%04x\n", pfx,
+		       pcie->device_id.vendor_id, pcie->device_id.device_id);
+		p = pcie->device_id.class_code;
+		printk("%s""class_code: %02x%02x%02x\n", pfx, p[0], p[1], p[2]);
+	}
+	if (pcie->validation_bits & CPER_PCIE_VALID_SERIAL_NUMBER)
+		printk("%s""serial number: 0x%04x, 0x%04x\n", pfx,
+		       pcie->serial_number.lower, pcie->serial_number.upper);
+	if (pcie->validation_bits & CPER_PCIE_VALID_BRIDGE_CONTROL_STATUS)
+		printk(
+	"%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n",
+	pfx, pcie->bridge.secondary_status, pcie->bridge.control);
+}
+
+static void cper_print_tstamp(const char *pfx,
+				   struct acpi_hest_generic_data_v300 *gdata)
+{
+	__u8 hour, min, sec, day, mon, year, century, *timestamp;
+
+	if (gdata->validation_bits & ACPI_HEST_GEN_VALID_TIMESTAMP) {
+		timestamp = (__u8 *)&(gdata->time_stamp);
+		sec       = bcd2bin(timestamp[0]);
+		min       = bcd2bin(timestamp[1]);
+		hour      = bcd2bin(timestamp[2]);
+		day       = bcd2bin(timestamp[4]);
+		mon       = bcd2bin(timestamp[5]);
+		year      = bcd2bin(timestamp[6]);
+		century   = bcd2bin(timestamp[7]);
+
+		printk("%s%ststamp: %02d%02d-%02d-%02d %02d:%02d:%02d\n", pfx,
+		       (timestamp[3] & 0x1 ? "precise " : "imprecise "),
+		       century, year, mon, day, hour, min, sec);
+	}
+}
+
+static void
+cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata,
+			   int sec_no)
+{
+	guid_t *sec_type = (guid_t *)gdata->section_type;
+	__u16 severity;
+	char newpfx[64];
+
+	if (acpi_hest_get_version(gdata) >= 3)
+		cper_print_tstamp(pfx, (struct acpi_hest_generic_data_v300 *)gdata);
+
+	severity = gdata->error_severity;
+	printk("%s""Error %d, type: %s\n", pfx, sec_no,
+	       cper_severity_str(severity));
+	if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
+		printk("%s""fru_id: %pUl\n", pfx, gdata->fru_id);
+	if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
+		printk("%s""fru_text: %.20s\n", pfx, gdata->fru_text);
+
+	snprintf(newpfx, sizeof(newpfx), "%s ", pfx);
+	if (guid_equal(sec_type, &CPER_SEC_PROC_GENERIC)) {
+		struct cper_sec_proc_generic *proc_err = acpi_hest_get_payload(gdata);
+
+		printk("%s""section_type: general processor error\n", newpfx);
+		if (gdata->error_data_length >= sizeof(*proc_err))
+			cper_print_proc_generic(newpfx, proc_err);
+		else
+			goto err_section_too_small;
+	} else if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
+		struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
+
+		printk("%s""section_type: memory error\n", newpfx);
+		if (gdata->error_data_length >=
+		    sizeof(struct cper_sec_mem_err_old))
+			cper_print_mem(newpfx, mem_err,
+				       gdata->error_data_length);
+		else
+			goto err_section_too_small;
+	} else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
+		struct cper_sec_pcie *pcie = acpi_hest_get_payload(gdata);
+
+		printk("%s""section_type: PCIe error\n", newpfx);
+		if (gdata->error_data_length >= sizeof(*pcie))
+			cper_print_pcie(newpfx, pcie, gdata);
+		else
+			goto err_section_too_small;
+#if defined(CONFIG_ARM64) || defined(CONFIG_ARM)
+	} else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
+		struct cper_sec_proc_arm *arm_err = acpi_hest_get_payload(gdata);
+
+		printk("%ssection_type: ARM processor error\n", newpfx);
+		if (gdata->error_data_length >= sizeof(*arm_err))
+			cper_print_proc_arm(newpfx, arm_err);
+		else
+			goto err_section_too_small;
+#endif
+#if defined(CONFIG_UEFI_CPER_X86)
+	} else if (guid_equal(sec_type, &CPER_SEC_PROC_IA)) {
+		struct cper_sec_proc_ia *ia_err = acpi_hest_get_payload(gdata);
+
+		printk("%ssection_type: IA32/X64 processor error\n", newpfx);
+		if (gdata->error_data_length >= sizeof(*ia_err))
+			cper_print_proc_ia(newpfx, ia_err);
+		else
+			goto err_section_too_small;
+#endif
+	} else {
+		const void *err = acpi_hest_get_payload(gdata);
+
+		printk("%ssection type: unknown, %pUl\n", newpfx, sec_type);
+		printk("%ssection length: %#x\n", newpfx,
+		       gdata->error_data_length);
+		print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4, err,
+			       gdata->error_data_length, true);
+	}
+
+	return;
+
+err_section_too_small:
+	pr_err(FW_WARN "error section length is too small\n");
+}
+
+void cper_estatus_print(const char *pfx,
+			const struct acpi_hest_generic_status *estatus)
+{
+	struct acpi_hest_generic_data *gdata;
+	int sec_no = 0;
+	char newpfx[64];
+	__u16 severity;
+
+	severity = estatus->error_severity;
+	if (severity == CPER_SEV_CORRECTED)
+		printk("%s%s\n", pfx,
+		       "It has been corrected by h/w "
+		       "and requires no further action");
+	printk("%s""event severity: %s\n", pfx, cper_severity_str(severity));
+	snprintf(newpfx, sizeof(newpfx), "%s ", pfx);
+
+	apei_estatus_for_each_section(estatus, gdata) {
+		cper_estatus_print_section(newpfx, gdata, sec_no);
+		sec_no++;
+	}
+}
+EXPORT_SYMBOL_GPL(cper_estatus_print);
+
+int cper_estatus_check_header(const struct acpi_hest_generic_status *estatus)
+{
+	if (estatus->data_length &&
+	    estatus->data_length < sizeof(struct acpi_hest_generic_data))
+		return -EINVAL;
+	if (estatus->raw_data_length &&
+	    estatus->raw_data_offset < sizeof(*estatus) + estatus->data_length)
+		return -EINVAL;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cper_estatus_check_header);
+
+int cper_estatus_check(const struct acpi_hest_generic_status *estatus)
+{
+	struct acpi_hest_generic_data *gdata;
+	unsigned int data_len, gedata_len;
+	int rc;
+
+	rc = cper_estatus_check_header(estatus);
+	if (rc)
+		return rc;
+	data_len = estatus->data_length;
+
+	apei_estatus_for_each_section(estatus, gdata) {
+		gedata_len = acpi_hest_get_error_length(gdata);
+		if (gedata_len > data_len - acpi_hest_get_size(gdata))
+			return -EINVAL;
+		data_len -= acpi_hest_get_record_size(gdata);
+	}
+	if (data_len)
+		return -EINVAL;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cper_estatus_check);
diff --git a/drivers/firmware/efi/dev-path-parser.c b/drivers/firmware/efi/dev-path-parser.c
new file mode 100644
index 0000000..85d1834
--- /dev/null
+++ b/drivers/firmware/efi/dev-path-parser.c
@@ -0,0 +1,203 @@
+/*
+ * dev-path-parser.c - EFI Device Path parser
+ * Copyright (C) 2016 Lukas Wunner <lukas@wunner.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/acpi.h>
+#include <linux/efi.h>
+#include <linux/pci.h>
+
+struct acpi_hid_uid {
+	struct acpi_device_id hid[2];
+	char uid[11]; /* UINT_MAX + null byte */
+};
+
+static int __init match_acpi_dev(struct device *dev, void *data)
+{
+	struct acpi_hid_uid hid_uid = *(struct acpi_hid_uid *)data;
+	struct acpi_device *adev = to_acpi_device(dev);
+
+	if (acpi_match_device_ids(adev, hid_uid.hid))
+		return 0;
+
+	if (adev->pnp.unique_id)
+		return !strcmp(adev->pnp.unique_id, hid_uid.uid);
+	else
+		return !strcmp("0", hid_uid.uid);
+}
+
+static long __init parse_acpi_path(struct efi_dev_path *node,
+				   struct device *parent, struct device **child)
+{
+	struct acpi_hid_uid hid_uid = {};
+	struct device *phys_dev;
+
+	if (node->length != 12)
+		return -EINVAL;
+
+	sprintf(hid_uid.hid[0].id, "%c%c%c%04X",
+		'A' + ((node->acpi.hid >> 10) & 0x1f) - 1,
+		'A' + ((node->acpi.hid >>  5) & 0x1f) - 1,
+		'A' + ((node->acpi.hid >>  0) & 0x1f) - 1,
+			node->acpi.hid >> 16);
+	sprintf(hid_uid.uid, "%u", node->acpi.uid);
+
+	*child = bus_find_device(&acpi_bus_type, NULL, &hid_uid,
+				 match_acpi_dev);
+	if (!*child)
+		return -ENODEV;
+
+	phys_dev = acpi_get_first_physical_node(to_acpi_device(*child));
+	if (phys_dev) {
+		get_device(phys_dev);
+		put_device(*child);
+		*child = phys_dev;
+	}
+
+	return 0;
+}
+
+static int __init match_pci_dev(struct device *dev, void *data)
+{
+	unsigned int devfn = *(unsigned int *)data;
+
+	return dev_is_pci(dev) && to_pci_dev(dev)->devfn == devfn;
+}
+
+static long __init parse_pci_path(struct efi_dev_path *node,
+				  struct device *parent, struct device **child)
+{
+	unsigned int devfn;
+
+	if (node->length != 6)
+		return -EINVAL;
+	if (!parent)
+		return -EINVAL;
+
+	devfn = PCI_DEVFN(node->pci.dev, node->pci.fn);
+
+	*child = device_find_child(parent, &devfn, match_pci_dev);
+	if (!*child)
+		return -ENODEV;
+
+	return 0;
+}
+
+/*
+ * Insert parsers for further node types here.
+ *
+ * Each parser takes a pointer to the @node and to the @parent (will be NULL
+ * for the first device path node). If a device corresponding to @node was
+ * found below @parent, its reference count should be incremented and the
+ * device returned in @child.
+ *
+ * The return value should be 0 on success or a negative int on failure.
+ * The special return values 0x01 (EFI_DEV_END_INSTANCE) and 0xFF
+ * (EFI_DEV_END_ENTIRE) signal the end of the device path, only
+ * parse_end_path() is supposed to return this.
+ *
+ * Be sure to validate the node length and contents before commencing the
+ * search for a device.
+ */
+
+static long __init parse_end_path(struct efi_dev_path *node,
+				  struct device *parent, struct device **child)
+{
+	if (node->length != 4)
+		return -EINVAL;
+	if (node->sub_type != EFI_DEV_END_INSTANCE &&
+	    node->sub_type != EFI_DEV_END_ENTIRE)
+		return -EINVAL;
+	if (!parent)
+		return -ENODEV;
+
+	*child = get_device(parent);
+	return node->sub_type;
+}
+
+/**
+ * efi_get_device_by_path - find device by EFI Device Path
+ * @node: EFI Device Path
+ * @len: maximum length of EFI Device Path in bytes
+ *
+ * Parse a series of EFI Device Path nodes at @node and find the corresponding
+ * device.  If the device was found, its reference count is incremented and a
+ * pointer to it is returned.  The caller needs to drop the reference with
+ * put_device() after use.  The @node pointer is updated to point to the
+ * location immediately after the "End of Hardware Device Path" node.
+ *
+ * If another Device Path instance follows, @len is decremented by the number
+ * of bytes consumed.  Otherwise @len is set to %0.
+ *
+ * If a Device Path node is malformed or its corresponding device is not found,
+ * @node is updated to point to this offending node and an ERR_PTR is returned.
+ *
+ * If @len is initially %0, the function returns %NULL.  Thus, to iterate over
+ * all instances in a path, the following idiom may be used:
+ *
+ *	while (!IS_ERR_OR_NULL(dev = efi_get_device_by_path(&node, &len))) {
+ *		// do something with dev
+ *		put_device(dev);
+ *	}
+ *	if (IS_ERR(dev))
+ *		// report error
+ *
+ * Devices can only be found if they're already instantiated. Most buses
+ * instantiate devices in the "subsys" initcall level, hence the earliest
+ * initcall level in which this function should be called is "fs".
+ *
+ * Returns the device on success or
+ *	%ERR_PTR(-ENODEV) if no device was found,
+ *	%ERR_PTR(-EINVAL) if a node is malformed or exceeds @len,
+ *	%ERR_PTR(-ENOTSUPP) if support for a node type is not yet implemented.
+ */
+struct device * __init efi_get_device_by_path(struct efi_dev_path **node,
+					      size_t *len)
+{
+	struct device *parent = NULL, *child;
+	long ret = 0;
+
+	if (!*len)
+		return NULL;
+
+	while (!ret) {
+		if (*len < 4 || *len < (*node)->length)
+			ret = -EINVAL;
+		else if ((*node)->type     == EFI_DEV_ACPI &&
+			 (*node)->sub_type == EFI_DEV_BASIC_ACPI)
+			ret = parse_acpi_path(*node, parent, &child);
+		else if ((*node)->type     == EFI_DEV_HW &&
+			 (*node)->sub_type == EFI_DEV_PCI)
+			ret = parse_pci_path(*node, parent, &child);
+		else if (((*node)->type    == EFI_DEV_END_PATH ||
+			  (*node)->type    == EFI_DEV_END_PATH2))
+			ret = parse_end_path(*node, parent, &child);
+		else
+			ret = -ENOTSUPP;
+
+		put_device(parent);
+		if (ret < 0)
+			return ERR_PTR(ret);
+
+		parent = child;
+		*node  = (void *)*node + (*node)->length;
+		*len  -= (*node)->length;
+	}
+
+	if (ret == EFI_DEV_END_ENTIRE)
+		*len = 0;
+
+	return child;
+}
diff --git a/drivers/firmware/efi/efi-bgrt.c b/drivers/firmware/efi/efi-bgrt.c
new file mode 100644
index 0000000..b22ccfb
--- /dev/null
+++ b/drivers/firmware/efi/efi-bgrt.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2012 Intel Corporation
+ * Author: Josh Triplett <josh@joshtriplett.org>
+ *
+ * Based on the bgrt driver:
+ * Copyright 2012 Red Hat, Inc <mjg@redhat.com>
+ * Author: Matthew Garrett
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/efi.h>
+#include <linux/efi-bgrt.h>
+
+struct acpi_table_bgrt bgrt_tab;
+size_t bgrt_image_size;
+
+struct bmp_header {
+	u16 id;
+	u32 size;
+} __packed;
+
+void __init efi_bgrt_init(struct acpi_table_header *table)
+{
+	void *image;
+	struct bmp_header bmp_header;
+	struct acpi_table_bgrt *bgrt = &bgrt_tab;
+
+	if (acpi_disabled)
+		return;
+
+	if (!efi_enabled(EFI_MEMMAP))
+		return;
+
+	if (table->length < sizeof(bgrt_tab)) {
+		pr_notice("Ignoring BGRT: invalid length %u (expected %zu)\n",
+		       table->length, sizeof(bgrt_tab));
+		return;
+	}
+	*bgrt = *(struct acpi_table_bgrt *)table;
+	if (bgrt->version != 1) {
+		pr_notice("Ignoring BGRT: invalid version %u (expected 1)\n",
+		       bgrt->version);
+		goto out;
+	}
+	if (bgrt->status & 0xfe) {
+		pr_notice("Ignoring BGRT: reserved status bits are non-zero %u\n",
+		       bgrt->status);
+		goto out;
+	}
+	if (bgrt->image_type != 0) {
+		pr_notice("Ignoring BGRT: invalid image type %u (expected 0)\n",
+		       bgrt->image_type);
+		goto out;
+	}
+	if (!bgrt->image_address) {
+		pr_notice("Ignoring BGRT: null image address\n");
+		goto out;
+	}
+
+	if (efi_mem_type(bgrt->image_address) != EFI_BOOT_SERVICES_DATA) {
+		pr_notice("Ignoring BGRT: invalid image address\n");
+		goto out;
+	}
+	image = early_memremap(bgrt->image_address, sizeof(bmp_header));
+	if (!image) {
+		pr_notice("Ignoring BGRT: failed to map image header memory\n");
+		goto out;
+	}
+
+	memcpy(&bmp_header, image, sizeof(bmp_header));
+	early_memunmap(image, sizeof(bmp_header));
+	if (bmp_header.id != 0x4d42) {
+		pr_notice("Ignoring BGRT: Incorrect BMP magic number 0x%x (expected 0x4d42)\n",
+			bmp_header.id);
+		goto out;
+	}
+	bgrt_image_size = bmp_header.size;
+	efi_mem_reserve(bgrt->image_address, bgrt_image_size);
+
+	return;
+out:
+	memset(bgrt, 0, sizeof(bgrt_tab));
+}
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
new file mode 100644
index 0000000..cfe87b4
--- /dev/null
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -0,0 +1,399 @@
+#include <linux/efi.h>
+#include <linux/module.h>
+#include <linux/pstore.h>
+#include <linux/slab.h>
+#include <linux/ucs2_string.h>
+
+#define DUMP_NAME_LEN 66
+
+static bool efivars_pstore_disable =
+	IS_ENABLED(CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE);
+
+module_param_named(pstore_disable, efivars_pstore_disable, bool, 0644);
+
+#define PSTORE_EFI_ATTRIBUTES \
+	(EFI_VARIABLE_NON_VOLATILE | \
+	 EFI_VARIABLE_BOOTSERVICE_ACCESS | \
+	 EFI_VARIABLE_RUNTIME_ACCESS)
+
+static int efi_pstore_open(struct pstore_info *psi)
+{
+	psi->data = NULL;
+	return 0;
+}
+
+static int efi_pstore_close(struct pstore_info *psi)
+{
+	psi->data = NULL;
+	return 0;
+}
+
+static inline u64 generic_id(u64 timestamp, unsigned int part, int count)
+{
+	return (timestamp * 100 + part) * 1000 + count;
+}
+
+static int efi_pstore_read_func(struct efivar_entry *entry,
+				struct pstore_record *record)
+{
+	efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
+	char name[DUMP_NAME_LEN], data_type;
+	int i;
+	int cnt;
+	unsigned int part;
+	unsigned long size;
+	u64 time;
+
+	if (efi_guidcmp(entry->var.VendorGuid, vendor))
+		return 0;
+
+	for (i = 0; i < DUMP_NAME_LEN; i++)
+		name[i] = entry->var.VariableName[i];
+
+	if (sscanf(name, "dump-type%u-%u-%d-%llu-%c",
+		   &record->type, &part, &cnt, &time, &data_type) == 5) {
+		record->id = generic_id(time, part, cnt);
+		record->part = part;
+		record->count = cnt;
+		record->time.tv_sec = time;
+		record->time.tv_nsec = 0;
+		if (data_type == 'C')
+			record->compressed = true;
+		else
+			record->compressed = false;
+		record->ecc_notice_size = 0;
+	} else if (sscanf(name, "dump-type%u-%u-%d-%llu",
+		   &record->type, &part, &cnt, &time) == 4) {
+		record->id = generic_id(time, part, cnt);
+		record->part = part;
+		record->count = cnt;
+		record->time.tv_sec = time;
+		record->time.tv_nsec = 0;
+		record->compressed = false;
+		record->ecc_notice_size = 0;
+	} else if (sscanf(name, "dump-type%u-%u-%llu",
+			  &record->type, &part, &time) == 3) {
+		/*
+		 * Check if an old format,
+		 * which doesn't support holding
+		 * multiple logs, remains.
+		 */
+		record->id = generic_id(time, part, 0);
+		record->part = part;
+		record->count = 0;
+		record->time.tv_sec = time;
+		record->time.tv_nsec = 0;
+		record->compressed = false;
+		record->ecc_notice_size = 0;
+	} else
+		return 0;
+
+	entry->var.DataSize = 1024;
+	__efivar_entry_get(entry, &entry->var.Attributes,
+			   &entry->var.DataSize, entry->var.Data);
+	size = entry->var.DataSize;
+	memcpy(record->buf, entry->var.Data,
+	       (size_t)min_t(unsigned long, EFIVARS_DATA_SIZE_MAX, size));
+
+	return size;
+}
+
+/**
+ * efi_pstore_scan_sysfs_enter
+ * @pos: scanning entry
+ * @next: next entry
+ * @head: list head
+ */
+static void efi_pstore_scan_sysfs_enter(struct efivar_entry *pos,
+					struct efivar_entry *next,
+					struct list_head *head)
+{
+	pos->scanning = true;
+	if (&next->list != head)
+		next->scanning = true;
+}
+
+/**
+ * __efi_pstore_scan_sysfs_exit
+ * @entry: deleting entry
+ * @turn_off_scanning: Check if a scanning flag should be turned off
+ */
+static inline int __efi_pstore_scan_sysfs_exit(struct efivar_entry *entry,
+						bool turn_off_scanning)
+{
+	if (entry->deleting) {
+		list_del(&entry->list);
+		efivar_entry_iter_end();
+		efivar_unregister(entry);
+		if (efivar_entry_iter_begin())
+			return -EINTR;
+	} else if (turn_off_scanning)
+		entry->scanning = false;
+
+	return 0;
+}
+
+/**
+ * efi_pstore_scan_sysfs_exit
+ * @pos: scanning entry
+ * @next: next entry
+ * @head: list head
+ * @stop: a flag checking if scanning will stop
+ */
+static int efi_pstore_scan_sysfs_exit(struct efivar_entry *pos,
+				       struct efivar_entry *next,
+				       struct list_head *head, bool stop)
+{
+	int ret = __efi_pstore_scan_sysfs_exit(pos, true);
+
+	if (ret)
+		return ret;
+
+	if (stop)
+		ret = __efi_pstore_scan_sysfs_exit(next, &next->list != head);
+	return ret;
+}
+
+/**
+ * efi_pstore_sysfs_entry_iter
+ *
+ * @record: pstore record to pass to callback
+ *
+ * You MUST call efivar_enter_iter_begin() before this function, and
+ * efivar_entry_iter_end() afterwards.
+ *
+ */
+static int efi_pstore_sysfs_entry_iter(struct pstore_record *record)
+{
+	struct efivar_entry **pos = (struct efivar_entry **)&record->psi->data;
+	struct efivar_entry *entry, *n;
+	struct list_head *head = &efivar_sysfs_list;
+	int size = 0;
+	int ret;
+
+	if (!*pos) {
+		list_for_each_entry_safe(entry, n, head, list) {
+			efi_pstore_scan_sysfs_enter(entry, n, head);
+
+			size = efi_pstore_read_func(entry, record);
+			ret = efi_pstore_scan_sysfs_exit(entry, n, head,
+							 size < 0);
+			if (ret)
+				return ret;
+			if (size)
+				break;
+		}
+		*pos = n;
+		return size;
+	}
+
+	list_for_each_entry_safe_from((*pos), n, head, list) {
+		efi_pstore_scan_sysfs_enter((*pos), n, head);
+
+		size = efi_pstore_read_func((*pos), record);
+		ret = efi_pstore_scan_sysfs_exit((*pos), n, head, size < 0);
+		if (ret)
+			return ret;
+		if (size)
+			break;
+	}
+	*pos = n;
+	return size;
+}
+
+/**
+ * efi_pstore_read
+ *
+ * This function returns a size of NVRAM entry logged via efi_pstore_write().
+ * The meaning and behavior of efi_pstore/pstore are as below.
+ *
+ * size > 0: Got data of an entry logged via efi_pstore_write() successfully,
+ *           and pstore filesystem will continue reading subsequent entries.
+ * size == 0: Entry was not logged via efi_pstore_write(),
+ *            and efi_pstore driver will continue reading subsequent entries.
+ * size < 0: Failed to get data of entry logging via efi_pstore_write(),
+ *           and pstore will stop reading entry.
+ */
+static ssize_t efi_pstore_read(struct pstore_record *record)
+{
+	ssize_t size;
+
+	record->buf = kzalloc(EFIVARS_DATA_SIZE_MAX, GFP_KERNEL);
+	if (!record->buf)
+		return -ENOMEM;
+
+	if (efivar_entry_iter_begin()) {
+		size = -EINTR;
+		goto out;
+	}
+	size = efi_pstore_sysfs_entry_iter(record);
+	efivar_entry_iter_end();
+
+out:
+	if (size <= 0) {
+		kfree(record->buf);
+		record->buf = NULL;
+	}
+	return size;
+}
+
+static int efi_pstore_write(struct pstore_record *record)
+{
+	char name[DUMP_NAME_LEN];
+	efi_char16_t efi_name[DUMP_NAME_LEN];
+	efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
+	int i, ret = 0;
+
+	record->id = generic_id(record->time.tv_sec, record->part,
+				record->count);
+
+	/* Since we copy the entire length of name, make sure it is wiped. */
+	memset(name, 0, sizeof(name));
+
+	snprintf(name, sizeof(name), "dump-type%u-%u-%d-%lld-%c",
+		 record->type, record->part, record->count,
+		 (long long)record->time.tv_sec,
+		 record->compressed ? 'C' : 'D');
+
+	for (i = 0; i < DUMP_NAME_LEN; i++)
+		efi_name[i] = name[i];
+
+	ret = efivar_entry_set_safe(efi_name, vendor, PSTORE_EFI_ATTRIBUTES,
+			      !pstore_cannot_block_path(record->reason),
+			      record->size, record->psi->buf);
+
+	if (record->reason == KMSG_DUMP_OOPS)
+		efivar_run_worker();
+
+	return ret;
+};
+
+/*
+ * Clean up an entry with the same name
+ */
+static int efi_pstore_erase_func(struct efivar_entry *entry, void *data)
+{
+	efi_char16_t *efi_name = data;
+	efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
+	unsigned long ucs2_len = ucs2_strlen(efi_name);
+
+	if (efi_guidcmp(entry->var.VendorGuid, vendor))
+		return 0;
+
+	if (ucs2_strncmp(entry->var.VariableName, efi_name, (size_t)ucs2_len))
+		return 0;
+
+	if (entry->scanning) {
+		/*
+		 * Skip deletion because this entry will be deleted
+		 * after scanning is completed.
+		 */
+		entry->deleting = true;
+	} else
+		list_del(&entry->list);
+
+	/* found */
+	__efivar_entry_delete(entry);
+
+	return 1;
+}
+
+static int efi_pstore_erase_name(const char *name)
+{
+	struct efivar_entry *entry = NULL;
+	efi_char16_t efi_name[DUMP_NAME_LEN];
+	int found, i;
+
+	for (i = 0; i < DUMP_NAME_LEN; i++) {
+		efi_name[i] = name[i];
+		if (name[i] == '\0')
+			break;
+	}
+
+	if (efivar_entry_iter_begin())
+		return -EINTR;
+
+	found = __efivar_entry_iter(efi_pstore_erase_func, &efivar_sysfs_list,
+				    efi_name, &entry);
+	efivar_entry_iter_end();
+
+	if (found && !entry->scanning)
+		efivar_unregister(entry);
+
+	return found ? 0 : -ENOENT;
+}
+
+static int efi_pstore_erase(struct pstore_record *record)
+{
+	char name[DUMP_NAME_LEN];
+	int ret;
+
+	snprintf(name, sizeof(name), "dump-type%u-%u-%d-%lld",
+		 record->type, record->part, record->count,
+		 (long long)record->time.tv_sec);
+	ret = efi_pstore_erase_name(name);
+	if (ret != -ENOENT)
+		return ret;
+
+	snprintf(name, sizeof(name), "dump-type%u-%u-%lld",
+		record->type, record->part, (long long)record->time.tv_sec);
+	ret = efi_pstore_erase_name(name);
+
+	return ret;
+}
+
+static struct pstore_info efi_pstore_info = {
+	.owner		= THIS_MODULE,
+	.name		= "efi",
+	.flags		= PSTORE_FLAGS_DMESG,
+	.open		= efi_pstore_open,
+	.close		= efi_pstore_close,
+	.read		= efi_pstore_read,
+	.write		= efi_pstore_write,
+	.erase		= efi_pstore_erase,
+};
+
+static __init int efivars_pstore_init(void)
+{
+	if (!efi_enabled(EFI_RUNTIME_SERVICES))
+		return 0;
+
+	if (!efivars_kobject())
+		return 0;
+
+	if (efivars_pstore_disable)
+		return 0;
+
+	efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
+	if (!efi_pstore_info.buf)
+		return -ENOMEM;
+
+	efi_pstore_info.bufsize = 1024;
+	spin_lock_init(&efi_pstore_info.buf_lock);
+
+	if (pstore_register(&efi_pstore_info)) {
+		kfree(efi_pstore_info.buf);
+		efi_pstore_info.buf = NULL;
+		efi_pstore_info.bufsize = 0;
+	}
+
+	return 0;
+}
+
+static __exit void efivars_pstore_exit(void)
+{
+	if (!efi_pstore_info.bufsize)
+		return;
+
+	pstore_unregister(&efi_pstore_info);
+	kfree(efi_pstore_info.buf);
+	efi_pstore_info.buf = NULL;
+	efi_pstore_info.bufsize = 0;
+}
+
+module_init(efivars_pstore_init);
+module_exit(efivars_pstore_exit);
+
+MODULE_DESCRIPTION("EFI variable backend for pstore");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:efivars");
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
new file mode 100644
index 0000000..2a29dd9
--- /dev/null
+++ b/drivers/firmware/efi/efi.c
@@ -0,0 +1,982 @@
+/*
+ * efi.c - EFI subsystem
+ *
+ * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
+ * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
+ * Copyright (C) 2013 Tom Gundersen <teg@jklm.no>
+ *
+ * This code registers /sys/firmware/efi{,/efivars} when EFI is supported,
+ * allowing the efivarfs to be mounted or the efivars module to be loaded.
+ * The existance of /sys/firmware/efi may also be used by userspace to
+ * determine that the system supports EFI.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/efi.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/io.h>
+#include <linux/kexec.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/ucs2_string.h>
+#include <linux/memblock.h>
+
+#include <asm/early_ioremap.h>
+
+struct efi __read_mostly efi = {
+	.mps			= EFI_INVALID_TABLE_ADDR,
+	.acpi			= EFI_INVALID_TABLE_ADDR,
+	.acpi20			= EFI_INVALID_TABLE_ADDR,
+	.smbios			= EFI_INVALID_TABLE_ADDR,
+	.smbios3		= EFI_INVALID_TABLE_ADDR,
+	.sal_systab		= EFI_INVALID_TABLE_ADDR,
+	.boot_info		= EFI_INVALID_TABLE_ADDR,
+	.hcdp			= EFI_INVALID_TABLE_ADDR,
+	.uga			= EFI_INVALID_TABLE_ADDR,
+	.uv_systab		= EFI_INVALID_TABLE_ADDR,
+	.fw_vendor		= EFI_INVALID_TABLE_ADDR,
+	.runtime		= EFI_INVALID_TABLE_ADDR,
+	.config_table		= EFI_INVALID_TABLE_ADDR,
+	.esrt			= EFI_INVALID_TABLE_ADDR,
+	.properties_table	= EFI_INVALID_TABLE_ADDR,
+	.mem_attr_table		= EFI_INVALID_TABLE_ADDR,
+	.rng_seed		= EFI_INVALID_TABLE_ADDR,
+	.tpm_log		= EFI_INVALID_TABLE_ADDR
+};
+EXPORT_SYMBOL(efi);
+
+static unsigned long *efi_tables[] = {
+	&efi.mps,
+	&efi.acpi,
+	&efi.acpi20,
+	&efi.smbios,
+	&efi.smbios3,
+	&efi.sal_systab,
+	&efi.boot_info,
+	&efi.hcdp,
+	&efi.uga,
+	&efi.uv_systab,
+	&efi.fw_vendor,
+	&efi.runtime,
+	&efi.config_table,
+	&efi.esrt,
+	&efi.properties_table,
+	&efi.mem_attr_table,
+};
+
+struct mm_struct efi_mm = {
+	.mm_rb			= RB_ROOT,
+	.mm_users		= ATOMIC_INIT(2),
+	.mm_count		= ATOMIC_INIT(1),
+	.mmap_sem		= __RWSEM_INITIALIZER(efi_mm.mmap_sem),
+	.page_table_lock	= __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
+	.mmlist			= LIST_HEAD_INIT(efi_mm.mmlist),
+	.cpu_bitmap		= { [BITS_TO_LONGS(NR_CPUS)] = 0},
+};
+
+struct workqueue_struct *efi_rts_wq;
+
+static bool disable_runtime;
+static int __init setup_noefi(char *arg)
+{
+	disable_runtime = true;
+	return 0;
+}
+early_param("noefi", setup_noefi);
+
+bool efi_runtime_disabled(void)
+{
+	return disable_runtime;
+}
+
+static int __init parse_efi_cmdline(char *str)
+{
+	if (!str) {
+		pr_warn("need at least one option\n");
+		return -EINVAL;
+	}
+
+	if (parse_option_str(str, "debug"))
+		set_bit(EFI_DBG, &efi.flags);
+
+	if (parse_option_str(str, "noruntime"))
+		disable_runtime = true;
+
+	return 0;
+}
+early_param("efi", parse_efi_cmdline);
+
+struct kobject *efi_kobj;
+
+/*
+ * Let's not leave out systab information that snuck into
+ * the efivars driver
+ * Note, do not add more fields in systab sysfs file as it breaks sysfs
+ * one value per file rule!
+ */
+static ssize_t systab_show(struct kobject *kobj,
+			   struct kobj_attribute *attr, char *buf)
+{
+	char *str = buf;
+
+	if (!kobj || !buf)
+		return -EINVAL;
+
+	if (efi.mps != EFI_INVALID_TABLE_ADDR)
+		str += sprintf(str, "MPS=0x%lx\n", efi.mps);
+	if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
+		str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
+	if (efi.acpi != EFI_INVALID_TABLE_ADDR)
+		str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
+	/*
+	 * If both SMBIOS and SMBIOS3 entry points are implemented, the
+	 * SMBIOS3 entry point shall be preferred, so we list it first to
+	 * let applications stop parsing after the first match.
+	 */
+	if (efi.smbios3 != EFI_INVALID_TABLE_ADDR)
+		str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3);
+	if (efi.smbios != EFI_INVALID_TABLE_ADDR)
+		str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
+	if (efi.hcdp != EFI_INVALID_TABLE_ADDR)
+		str += sprintf(str, "HCDP=0x%lx\n", efi.hcdp);
+	if (efi.boot_info != EFI_INVALID_TABLE_ADDR)
+		str += sprintf(str, "BOOTINFO=0x%lx\n", efi.boot_info);
+	if (efi.uga != EFI_INVALID_TABLE_ADDR)
+		str += sprintf(str, "UGA=0x%lx\n", efi.uga);
+
+	return str - buf;
+}
+
+static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
+
+#define EFI_FIELD(var) efi.var
+
+#define EFI_ATTR_SHOW(name) \
+static ssize_t name##_show(struct kobject *kobj, \
+				struct kobj_attribute *attr, char *buf) \
+{ \
+	return sprintf(buf, "0x%lx\n", EFI_FIELD(name)); \
+}
+
+EFI_ATTR_SHOW(fw_vendor);
+EFI_ATTR_SHOW(runtime);
+EFI_ATTR_SHOW(config_table);
+
+static ssize_t fw_platform_size_show(struct kobject *kobj,
+				     struct kobj_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32);
+}
+
+static struct kobj_attribute efi_attr_fw_vendor = __ATTR_RO(fw_vendor);
+static struct kobj_attribute efi_attr_runtime = __ATTR_RO(runtime);
+static struct kobj_attribute efi_attr_config_table = __ATTR_RO(config_table);
+static struct kobj_attribute efi_attr_fw_platform_size =
+	__ATTR_RO(fw_platform_size);
+
+static struct attribute *efi_subsys_attrs[] = {
+	&efi_attr_systab.attr,
+	&efi_attr_fw_vendor.attr,
+	&efi_attr_runtime.attr,
+	&efi_attr_config_table.attr,
+	&efi_attr_fw_platform_size.attr,
+	NULL,
+};
+
+static umode_t efi_attr_is_visible(struct kobject *kobj,
+				   struct attribute *attr, int n)
+{
+	if (attr == &efi_attr_fw_vendor.attr) {
+		if (efi_enabled(EFI_PARAVIRT) ||
+				efi.fw_vendor == EFI_INVALID_TABLE_ADDR)
+			return 0;
+	} else if (attr == &efi_attr_runtime.attr) {
+		if (efi.runtime == EFI_INVALID_TABLE_ADDR)
+			return 0;
+	} else if (attr == &efi_attr_config_table.attr) {
+		if (efi.config_table == EFI_INVALID_TABLE_ADDR)
+			return 0;
+	}
+
+	return attr->mode;
+}
+
+static const struct attribute_group efi_subsys_attr_group = {
+	.attrs = efi_subsys_attrs,
+	.is_visible = efi_attr_is_visible,
+};
+
+static struct efivars generic_efivars;
+static struct efivar_operations generic_ops;
+
+static int generic_ops_register(void)
+{
+	generic_ops.get_variable = efi.get_variable;
+	generic_ops.set_variable = efi.set_variable;
+	generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
+	generic_ops.get_next_variable = efi.get_next_variable;
+	generic_ops.query_variable_store = efi_query_variable_store;
+
+	return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
+}
+
+static void generic_ops_unregister(void)
+{
+	efivars_unregister(&generic_efivars);
+}
+
+#if IS_ENABLED(CONFIG_ACPI)
+#define EFIVAR_SSDT_NAME_MAX	16
+static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
+static int __init efivar_ssdt_setup(char *str)
+{
+	if (strlen(str) < sizeof(efivar_ssdt))
+		memcpy(efivar_ssdt, str, strlen(str));
+	else
+		pr_warn("efivar_ssdt: name too long: %s\n", str);
+	return 0;
+}
+__setup("efivar_ssdt=", efivar_ssdt_setup);
+
+static __init int efivar_ssdt_iter(efi_char16_t *name, efi_guid_t vendor,
+				   unsigned long name_size, void *data)
+{
+	struct efivar_entry *entry;
+	struct list_head *list = data;
+	char utf8_name[EFIVAR_SSDT_NAME_MAX];
+	int limit = min_t(unsigned long, EFIVAR_SSDT_NAME_MAX, name_size);
+
+	ucs2_as_utf8(utf8_name, name, limit - 1);
+	if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
+		return 0;
+
+	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry)
+		return 0;
+
+	memcpy(entry->var.VariableName, name, name_size);
+	memcpy(&entry->var.VendorGuid, &vendor, sizeof(efi_guid_t));
+
+	efivar_entry_add(entry, list);
+
+	return 0;
+}
+
+static __init int efivar_ssdt_load(void)
+{
+	LIST_HEAD(entries);
+	struct efivar_entry *entry, *aux;
+	unsigned long size;
+	void *data;
+	int ret;
+
+	ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries);
+
+	list_for_each_entry_safe(entry, aux, &entries, list) {
+		pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt,
+			&entry->var.VendorGuid);
+
+		list_del(&entry->list);
+
+		ret = efivar_entry_size(entry, &size);
+		if (ret) {
+			pr_err("failed to get var size\n");
+			goto free_entry;
+		}
+
+		data = kmalloc(size, GFP_KERNEL);
+		if (!data) {
+			ret = -ENOMEM;
+			goto free_entry;
+		}
+
+		ret = efivar_entry_get(entry, NULL, &size, data);
+		if (ret) {
+			pr_err("failed to get var data\n");
+			goto free_data;
+		}
+
+		ret = acpi_load_table(data);
+		if (ret) {
+			pr_err("failed to load table: %d\n", ret);
+			goto free_data;
+		}
+
+		goto free_entry;
+
+free_data:
+		kfree(data);
+
+free_entry:
+		kfree(entry);
+	}
+
+	return ret;
+}
+#else
+static inline int efivar_ssdt_load(void) { return 0; }
+#endif
+
+/*
+ * We register the efi subsystem with the firmware subsystem and the
+ * efivars subsystem with the efi subsystem, if the system was booted with
+ * EFI.
+ */
+static int __init efisubsys_init(void)
+{
+	int error;
+
+	if (!efi_enabled(EFI_BOOT))
+		return 0;
+
+	/*
+	 * Since we process only one efi_runtime_service() at a time, an
+	 * ordered workqueue (which creates only one execution context)
+	 * should suffice all our needs.
+	 */
+	efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
+	if (!efi_rts_wq) {
+		pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
+		clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+		return 0;
+	}
+
+	/* We register the efi directory at /sys/firmware/efi */
+	efi_kobj = kobject_create_and_add("efi", firmware_kobj);
+	if (!efi_kobj) {
+		pr_err("efi: Firmware registration failed.\n");
+		return -ENOMEM;
+	}
+
+	error = generic_ops_register();
+	if (error)
+		goto err_put;
+
+	if (efi_enabled(EFI_RUNTIME_SERVICES))
+		efivar_ssdt_load();
+
+	error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
+	if (error) {
+		pr_err("efi: Sysfs attribute export failed with error %d.\n",
+		       error);
+		goto err_unregister;
+	}
+
+	error = efi_runtime_map_init(efi_kobj);
+	if (error)
+		goto err_remove_group;
+
+	/* and the standard mountpoint for efivarfs */
+	error = sysfs_create_mount_point(efi_kobj, "efivars");
+	if (error) {
+		pr_err("efivars: Subsystem registration failed.\n");
+		goto err_remove_group;
+	}
+
+	return 0;
+
+err_remove_group:
+	sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
+err_unregister:
+	generic_ops_unregister();
+err_put:
+	kobject_put(efi_kobj);
+	return error;
+}
+
+subsys_initcall(efisubsys_init);
+
+/*
+ * Find the efi memory descriptor for a given physical address.  Given a
+ * physical address, determine if it exists within an EFI Memory Map entry,
+ * and if so, populate the supplied memory descriptor with the appropriate
+ * data.
+ */
+int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
+{
+	efi_memory_desc_t *md;
+
+	if (!efi_enabled(EFI_MEMMAP)) {
+		pr_err_once("EFI_MEMMAP is not enabled.\n");
+		return -EINVAL;
+	}
+
+	if (!out_md) {
+		pr_err_once("out_md is null.\n");
+		return -EINVAL;
+        }
+
+	for_each_efi_memory_desc(md) {
+		u64 size;
+		u64 end;
+
+		size = md->num_pages << EFI_PAGE_SHIFT;
+		end = md->phys_addr + size;
+		if (phys_addr >= md->phys_addr && phys_addr < end) {
+			memcpy(out_md, md, sizeof(*out_md));
+			return 0;
+		}
+	}
+	return -ENOENT;
+}
+
+/*
+ * Calculate the highest address of an efi memory descriptor.
+ */
+u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
+{
+	u64 size = md->num_pages << EFI_PAGE_SHIFT;
+	u64 end = md->phys_addr + size;
+	return end;
+}
+
+void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {}
+
+/**
+ * efi_mem_reserve - Reserve an EFI memory region
+ * @addr: Physical address to reserve
+ * @size: Size of reservation
+ *
+ * Mark a region as reserved from general kernel allocation and
+ * prevent it being released by efi_free_boot_services().
+ *
+ * This function should be called drivers once they've parsed EFI
+ * configuration tables to figure out where their data lives, e.g.
+ * efi_esrt_init().
+ */
+void __init efi_mem_reserve(phys_addr_t addr, u64 size)
+{
+	if (!memblock_is_region_reserved(addr, size))
+		memblock_reserve(addr, size);
+
+	/*
+	 * Some architectures (x86) reserve all boot services ranges
+	 * until efi_free_boot_services() because of buggy firmware
+	 * implementations. This means the above memblock_reserve() is
+	 * superfluous on x86 and instead what it needs to do is
+	 * ensure the @start, @size is not freed.
+	 */
+	efi_arch_mem_reserve(addr, size);
+}
+
+static __initdata efi_config_table_type_t common_tables[] = {
+	{ACPI_20_TABLE_GUID, "ACPI 2.0", &efi.acpi20},
+	{ACPI_TABLE_GUID, "ACPI", &efi.acpi},
+	{HCDP_TABLE_GUID, "HCDP", &efi.hcdp},
+	{MPS_TABLE_GUID, "MPS", &efi.mps},
+	{SAL_SYSTEM_TABLE_GUID, "SALsystab", &efi.sal_systab},
+	{SMBIOS_TABLE_GUID, "SMBIOS", &efi.smbios},
+	{SMBIOS3_TABLE_GUID, "SMBIOS 3.0", &efi.smbios3},
+	{UGA_IO_PROTOCOL_GUID, "UGA", &efi.uga},
+	{EFI_SYSTEM_RESOURCE_TABLE_GUID, "ESRT", &efi.esrt},
+	{EFI_PROPERTIES_TABLE_GUID, "PROP", &efi.properties_table},
+	{EFI_MEMORY_ATTRIBUTES_TABLE_GUID, "MEMATTR", &efi.mem_attr_table},
+	{LINUX_EFI_RANDOM_SEED_TABLE_GUID, "RNG", &efi.rng_seed},
+	{LINUX_EFI_TPM_EVENT_LOG_GUID, "TPMEventLog", &efi.tpm_log},
+	{NULL_GUID, NULL, NULL},
+};
+
+static __init int match_config_table(efi_guid_t *guid,
+				     unsigned long table,
+				     efi_config_table_type_t *table_types)
+{
+	int i;
+
+	if (table_types) {
+		for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
+			if (!efi_guidcmp(*guid, table_types[i].guid)) {
+				*(table_types[i].ptr) = table;
+				if (table_types[i].name)
+					pr_cont(" %s=0x%lx ",
+						table_types[i].name, table);
+				return 1;
+			}
+		}
+	}
+
+	return 0;
+}
+
+int __init efi_config_parse_tables(void *config_tables, int count, int sz,
+				   efi_config_table_type_t *arch_tables)
+{
+	void *tablep;
+	int i;
+
+	tablep = config_tables;
+	pr_info("");
+	for (i = 0; i < count; i++) {
+		efi_guid_t guid;
+		unsigned long table;
+
+		if (efi_enabled(EFI_64BIT)) {
+			u64 table64;
+			guid = ((efi_config_table_64_t *)tablep)->guid;
+			table64 = ((efi_config_table_64_t *)tablep)->table;
+			table = table64;
+#ifndef CONFIG_64BIT
+			if (table64 >> 32) {
+				pr_cont("\n");
+				pr_err("Table located above 4GB, disabling EFI.\n");
+				return -EINVAL;
+			}
+#endif
+		} else {
+			guid = ((efi_config_table_32_t *)tablep)->guid;
+			table = ((efi_config_table_32_t *)tablep)->table;
+		}
+
+		if (!match_config_table(&guid, table, common_tables))
+			match_config_table(&guid, table, arch_tables);
+
+		tablep += sz;
+	}
+	pr_cont("\n");
+	set_bit(EFI_CONFIG_TABLES, &efi.flags);
+
+	if (efi.rng_seed != EFI_INVALID_TABLE_ADDR) {
+		struct linux_efi_random_seed *seed;
+		u32 size = 0;
+
+		seed = early_memremap(efi.rng_seed, sizeof(*seed));
+		if (seed != NULL) {
+			size = seed->size;
+			early_memunmap(seed, sizeof(*seed));
+		} else {
+			pr_err("Could not map UEFI random seed!\n");
+		}
+		if (size > 0) {
+			seed = early_memremap(efi.rng_seed,
+					      sizeof(*seed) + size);
+			if (seed != NULL) {
+				pr_notice("seeding entropy pool\n");
+				add_device_randomness(seed->bits, seed->size);
+				early_memunmap(seed, sizeof(*seed) + size);
+			} else {
+				pr_err("Could not map UEFI random seed!\n");
+			}
+		}
+	}
+
+	if (efi_enabled(EFI_MEMMAP))
+		efi_memattr_init();
+
+	efi_tpm_eventlog_init();
+
+	/* Parse the EFI Properties table if it exists */
+	if (efi.properties_table != EFI_INVALID_TABLE_ADDR) {
+		efi_properties_table_t *tbl;
+
+		tbl = early_memremap(efi.properties_table, sizeof(*tbl));
+		if (tbl == NULL) {
+			pr_err("Could not map Properties table!\n");
+			return -ENOMEM;
+		}
+
+		if (tbl->memory_protection_attribute &
+		    EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA)
+			set_bit(EFI_NX_PE_DATA, &efi.flags);
+
+		early_memunmap(tbl, sizeof(*tbl));
+	}
+
+	return 0;
+}
+
+int __init efi_config_init(efi_config_table_type_t *arch_tables)
+{
+	void *config_tables;
+	int sz, ret;
+
+	if (efi_enabled(EFI_64BIT))
+		sz = sizeof(efi_config_table_64_t);
+	else
+		sz = sizeof(efi_config_table_32_t);
+
+	/*
+	 * Let's see what config tables the firmware passed to us.
+	 */
+	config_tables = early_memremap(efi.systab->tables,
+				       efi.systab->nr_tables * sz);
+	if (config_tables == NULL) {
+		pr_err("Could not map Configuration table!\n");
+		return -ENOMEM;
+	}
+
+	ret = efi_config_parse_tables(config_tables, efi.systab->nr_tables, sz,
+				      arch_tables);
+
+	early_memunmap(config_tables, efi.systab->nr_tables * sz);
+	return ret;
+}
+
+#ifdef CONFIG_EFI_VARS_MODULE
+static int __init efi_load_efivars(void)
+{
+	struct platform_device *pdev;
+
+	if (!efi_enabled(EFI_RUNTIME_SERVICES))
+		return 0;
+
+	pdev = platform_device_register_simple("efivars", 0, NULL, 0);
+	return PTR_ERR_OR_ZERO(pdev);
+}
+device_initcall(efi_load_efivars);
+#endif
+
+#ifdef CONFIG_EFI_PARAMS_FROM_FDT
+
+#define UEFI_PARAM(name, prop, field)			   \
+	{						   \
+		{ name },				   \
+		{ prop },				   \
+		offsetof(struct efi_fdt_params, field),    \
+		FIELD_SIZEOF(struct efi_fdt_params, field) \
+	}
+
+struct params {
+	const char name[32];
+	const char propname[32];
+	int offset;
+	int size;
+};
+
+static __initdata struct params fdt_params[] = {
+	UEFI_PARAM("System Table", "linux,uefi-system-table", system_table),
+	UEFI_PARAM("MemMap Address", "linux,uefi-mmap-start", mmap),
+	UEFI_PARAM("MemMap Size", "linux,uefi-mmap-size", mmap_size),
+	UEFI_PARAM("MemMap Desc. Size", "linux,uefi-mmap-desc-size", desc_size),
+	UEFI_PARAM("MemMap Desc. Version", "linux,uefi-mmap-desc-ver", desc_ver)
+};
+
+static __initdata struct params xen_fdt_params[] = {
+	UEFI_PARAM("System Table", "xen,uefi-system-table", system_table),
+	UEFI_PARAM("MemMap Address", "xen,uefi-mmap-start", mmap),
+	UEFI_PARAM("MemMap Size", "xen,uefi-mmap-size", mmap_size),
+	UEFI_PARAM("MemMap Desc. Size", "xen,uefi-mmap-desc-size", desc_size),
+	UEFI_PARAM("MemMap Desc. Version", "xen,uefi-mmap-desc-ver", desc_ver)
+};
+
+#define EFI_FDT_PARAMS_SIZE	ARRAY_SIZE(fdt_params)
+
+static __initdata struct {
+	const char *uname;
+	const char *subnode;
+	struct params *params;
+} dt_params[] = {
+	{ "hypervisor", "uefi", xen_fdt_params },
+	{ "chosen", NULL, fdt_params },
+};
+
+struct param_info {
+	int found;
+	void *params;
+	const char *missing;
+};
+
+static int __init __find_uefi_params(unsigned long node,
+				     struct param_info *info,
+				     struct params *params)
+{
+	const void *prop;
+	void *dest;
+	u64 val;
+	int i, len;
+
+	for (i = 0; i < EFI_FDT_PARAMS_SIZE; i++) {
+		prop = of_get_flat_dt_prop(node, params[i].propname, &len);
+		if (!prop) {
+			info->missing = params[i].name;
+			return 0;
+		}
+
+		dest = info->params + params[i].offset;
+		info->found++;
+
+		val = of_read_number(prop, len / sizeof(u32));
+
+		if (params[i].size == sizeof(u32))
+			*(u32 *)dest = val;
+		else
+			*(u64 *)dest = val;
+
+		if (efi_enabled(EFI_DBG))
+			pr_info("  %s: 0x%0*llx\n", params[i].name,
+				params[i].size * 2, val);
+	}
+
+	return 1;
+}
+
+static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
+				       int depth, void *data)
+{
+	struct param_info *info = data;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dt_params); i++) {
+		const char *subnode = dt_params[i].subnode;
+
+		if (depth != 1 || strcmp(uname, dt_params[i].uname) != 0) {
+			info->missing = dt_params[i].params[0].name;
+			continue;
+		}
+
+		if (subnode) {
+			int err = of_get_flat_dt_subnode_by_name(node, subnode);
+
+			if (err < 0)
+				return 0;
+
+			node = err;
+		}
+
+		return __find_uefi_params(node, info, dt_params[i].params);
+	}
+
+	return 0;
+}
+
+int __init efi_get_fdt_params(struct efi_fdt_params *params)
+{
+	struct param_info info;
+	int ret;
+
+	pr_info("Getting EFI parameters from FDT:\n");
+
+	info.found = 0;
+	info.params = params;
+
+	ret = of_scan_flat_dt(fdt_find_uefi_params, &info);
+	if (!info.found)
+		pr_info("UEFI not found.\n");
+	else if (!ret)
+		pr_err("Can't find '%s' in device tree!\n",
+		       info.missing);
+
+	return ret;
+}
+#endif /* CONFIG_EFI_PARAMS_FROM_FDT */
+
+static __initdata char memory_type_name[][20] = {
+	"Reserved",
+	"Loader Code",
+	"Loader Data",
+	"Boot Code",
+	"Boot Data",
+	"Runtime Code",
+	"Runtime Data",
+	"Conventional Memory",
+	"Unusable Memory",
+	"ACPI Reclaim Memory",
+	"ACPI Memory NVS",
+	"Memory Mapped I/O",
+	"MMIO Port Space",
+	"PAL Code",
+	"Persistent Memory",
+};
+
+char * __init efi_md_typeattr_format(char *buf, size_t size,
+				     const efi_memory_desc_t *md)
+{
+	char *pos;
+	int type_len;
+	u64 attr;
+
+	pos = buf;
+	if (md->type >= ARRAY_SIZE(memory_type_name))
+		type_len = snprintf(pos, size, "[type=%u", md->type);
+	else
+		type_len = snprintf(pos, size, "[%-*s",
+				    (int)(sizeof(memory_type_name[0]) - 1),
+				    memory_type_name[md->type]);
+	if (type_len >= size)
+		return buf;
+
+	pos += type_len;
+	size -= type_len;
+
+	attr = md->attribute;
+	if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
+		     EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
+		     EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
+		     EFI_MEMORY_NV |
+		     EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
+		snprintf(pos, size, "|attr=0x%016llx]",
+			 (unsigned long long)attr);
+	else
+		snprintf(pos, size,
+			 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
+			 attr & EFI_MEMORY_RUNTIME ? "RUN" : "",
+			 attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "",
+			 attr & EFI_MEMORY_NV      ? "NV"  : "",
+			 attr & EFI_MEMORY_XP      ? "XP"  : "",
+			 attr & EFI_MEMORY_RP      ? "RP"  : "",
+			 attr & EFI_MEMORY_WP      ? "WP"  : "",
+			 attr & EFI_MEMORY_RO      ? "RO"  : "",
+			 attr & EFI_MEMORY_UCE     ? "UCE" : "",
+			 attr & EFI_MEMORY_WB      ? "WB"  : "",
+			 attr & EFI_MEMORY_WT      ? "WT"  : "",
+			 attr & EFI_MEMORY_WC      ? "WC"  : "",
+			 attr & EFI_MEMORY_UC      ? "UC"  : "");
+	return buf;
+}
+
+/*
+ * IA64 has a funky EFI memory map that doesn't work the same way as
+ * other architectures.
+ */
+#ifndef CONFIG_IA64
+/*
+ * efi_mem_attributes - lookup memmap attributes for physical address
+ * @phys_addr: the physical address to lookup
+ *
+ * Search in the EFI memory map for the region covering
+ * @phys_addr. Returns the EFI memory attributes if the region
+ * was found in the memory map, 0 otherwise.
+ */
+u64 efi_mem_attributes(unsigned long phys_addr)
+{
+	efi_memory_desc_t *md;
+
+	if (!efi_enabled(EFI_MEMMAP))
+		return 0;
+
+	for_each_efi_memory_desc(md) {
+		if ((md->phys_addr <= phys_addr) &&
+		    (phys_addr < (md->phys_addr +
+		    (md->num_pages << EFI_PAGE_SHIFT))))
+			return md->attribute;
+	}
+	return 0;
+}
+
+/*
+ * efi_mem_type - lookup memmap type for physical address
+ * @phys_addr: the physical address to lookup
+ *
+ * Search in the EFI memory map for the region covering @phys_addr.
+ * Returns the EFI memory type if the region was found in the memory
+ * map, EFI_RESERVED_TYPE (zero) otherwise.
+ */
+int efi_mem_type(unsigned long phys_addr)
+{
+	const efi_memory_desc_t *md;
+
+	if (!efi_enabled(EFI_MEMMAP))
+		return -ENOTSUPP;
+
+	for_each_efi_memory_desc(md) {
+		if ((md->phys_addr <= phys_addr) &&
+		    (phys_addr < (md->phys_addr +
+				  (md->num_pages << EFI_PAGE_SHIFT))))
+			return md->type;
+	}
+	return -EINVAL;
+}
+#endif
+
+int efi_status_to_err(efi_status_t status)
+{
+	int err;
+
+	switch (status) {
+	case EFI_SUCCESS:
+		err = 0;
+		break;
+	case EFI_INVALID_PARAMETER:
+		err = -EINVAL;
+		break;
+	case EFI_OUT_OF_RESOURCES:
+		err = -ENOSPC;
+		break;
+	case EFI_DEVICE_ERROR:
+		err = -EIO;
+		break;
+	case EFI_WRITE_PROTECTED:
+		err = -EROFS;
+		break;
+	case EFI_SECURITY_VIOLATION:
+		err = -EACCES;
+		break;
+	case EFI_NOT_FOUND:
+		err = -ENOENT;
+		break;
+	case EFI_ABORTED:
+		err = -EINTR;
+		break;
+	default:
+		err = -EINVAL;
+	}
+
+	return err;
+}
+
+bool efi_is_table_address(unsigned long phys_addr)
+{
+	unsigned int i;
+
+	if (phys_addr == EFI_INVALID_TABLE_ADDR)
+		return false;
+
+	for (i = 0; i < ARRAY_SIZE(efi_tables); i++)
+		if (*(efi_tables[i]) == phys_addr)
+			return true;
+
+	return false;
+}
+
+#ifdef CONFIG_KEXEC
+static int update_efi_random_seed(struct notifier_block *nb,
+				  unsigned long code, void *unused)
+{
+	struct linux_efi_random_seed *seed;
+	u32 size = 0;
+
+	if (!kexec_in_progress)
+		return NOTIFY_DONE;
+
+	seed = memremap(efi.rng_seed, sizeof(*seed), MEMREMAP_WB);
+	if (seed != NULL) {
+		size = min(seed->size, EFI_RANDOM_SEED_SIZE);
+		memunmap(seed);
+	} else {
+		pr_err("Could not map UEFI random seed!\n");
+	}
+	if (size > 0) {
+		seed = memremap(efi.rng_seed, sizeof(*seed) + size,
+				MEMREMAP_WB);
+		if (seed != NULL) {
+			seed->size = size;
+			get_random_bytes(seed->bits, seed->size);
+			memunmap(seed);
+		} else {
+			pr_err("Could not map UEFI random seed!\n");
+		}
+	}
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block efi_random_seed_nb = {
+	.notifier_call = update_efi_random_seed,
+};
+
+static int register_update_efi_random_seed(void)
+{
+	if (efi.rng_seed == EFI_INVALID_TABLE_ADDR)
+		return 0;
+	return register_reboot_notifier(&efi_random_seed_nb);
+}
+late_initcall(register_update_efi_random_seed);
+#endif
diff --git a/drivers/firmware/efi/efibc.c b/drivers/firmware/efi/efibc.c
new file mode 100644
index 0000000..503bbe2
--- /dev/null
+++ b/drivers/firmware/efi/efibc.c
@@ -0,0 +1,113 @@
+/*
+ * efibc: control EFI bootloaders which obey LoaderEntryOneShot var
+ * Copyright (c) 2013-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#define pr_fmt(fmt) "efibc: " fmt
+
+#include <linux/efi.h>
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+
+static void efibc_str_to_str16(const char *str, efi_char16_t *str16)
+{
+	size_t i;
+
+	for (i = 0; i < strlen(str); i++)
+		str16[i] = str[i];
+
+	str16[i] = '\0';
+}
+
+static int efibc_set_variable(const char *name, const char *value)
+{
+	int ret;
+	efi_guid_t guid = LINUX_EFI_LOADER_ENTRY_GUID;
+	struct efivar_entry *entry;
+	size_t size = (strlen(value) + 1) * sizeof(efi_char16_t);
+
+	if (size > sizeof(entry->var.Data)) {
+		pr_err("value is too large (%zu bytes) for '%s' EFI variable\n", size, name);
+		return -EINVAL;
+	}
+
+	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry) {
+		pr_err("failed to allocate efivar entry for '%s' EFI variable\n", name);
+		return -ENOMEM;
+	}
+
+	efibc_str_to_str16(name, entry->var.VariableName);
+	efibc_str_to_str16(value, (efi_char16_t *)entry->var.Data);
+	memcpy(&entry->var.VendorGuid, &guid, sizeof(guid));
+
+	ret = efivar_entry_set(entry,
+			       EFI_VARIABLE_NON_VOLATILE
+			       | EFI_VARIABLE_BOOTSERVICE_ACCESS
+			       | EFI_VARIABLE_RUNTIME_ACCESS,
+			       size, entry->var.Data, NULL);
+	if (ret)
+		pr_err("failed to set %s EFI variable: 0x%x\n",
+		       name, ret);
+
+	kfree(entry);
+	return ret;
+}
+
+static int efibc_reboot_notifier_call(struct notifier_block *notifier,
+				      unsigned long event, void *data)
+{
+	const char *reason = "shutdown";
+	int ret;
+
+	if (event == SYS_RESTART)
+		reason = "reboot";
+
+	ret = efibc_set_variable("LoaderEntryRebootReason", reason);
+	if (ret || !data)
+		return NOTIFY_DONE;
+
+	efibc_set_variable("LoaderEntryOneShot", (char *)data);
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block efibc_reboot_notifier = {
+	.notifier_call = efibc_reboot_notifier_call,
+};
+
+static int __init efibc_init(void)
+{
+	int ret;
+
+	if (!efi_enabled(EFI_RUNTIME_SERVICES))
+		return -ENODEV;
+
+	ret = register_reboot_notifier(&efibc_reboot_notifier);
+	if (ret)
+		pr_err("unable to register reboot notifier\n");
+
+	return ret;
+}
+module_init(efibc_init);
+
+static void __exit efibc_exit(void)
+{
+	unregister_reboot_notifier(&efibc_reboot_notifier);
+}
+module_exit(efibc_exit);
+
+MODULE_AUTHOR("Jeremy Compostella <jeremy.compostella@intel.com>");
+MODULE_AUTHOR("Matt Gumbel <matthew.k.gumbel@intel.com");
+MODULE_DESCRIPTION("EFI Bootloader Control");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
new file mode 100644
index 0000000..3e626fd
--- /dev/null
+++ b/drivers/firmware/efi/efivars.c
@@ -0,0 +1,762 @@
+/*
+ * Originally from efivars.c,
+ *
+ * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
+ * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
+ *
+ * This code takes all variables accessible from EFI runtime and
+ *  exports them via sysfs
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * Changelog:
+ *
+ *  17 May 2004 - Matt Domsch <Matt_Domsch@dell.com>
+ *   remove check for efi_enabled in exit
+ *   add MODULE_VERSION
+ *
+ *  26 Apr 2004 - Matt Domsch <Matt_Domsch@dell.com>
+ *   minor bug fixes
+ *
+ *  21 Apr 2004 - Matt Tolentino <matthew.e.tolentino@intel.com)
+ *   converted driver to export variable information via sysfs
+ *   and moved to drivers/firmware directory
+ *   bumped revision number to v0.07 to reflect conversion & move
+ *
+ *  10 Dec 2002 - Matt Domsch <Matt_Domsch@dell.com>
+ *   fix locking per Peter Chubb's findings
+ *
+ *  25 Mar 2002 - Matt Domsch <Matt_Domsch@dell.com>
+ *   move uuid_unparse() to include/asm-ia64/efi.h:efi_guid_to_str()
+ *
+ *  12 Feb 2002 - Matt Domsch <Matt_Domsch@dell.com>
+ *   use list_for_each_safe when deleting vars.
+ *   remove ifdef CONFIG_SMP around include <linux/smp.h>
+ *   v0.04 release to linux-ia64@linuxia64.org
+ *
+ *  20 April 2001 - Matt Domsch <Matt_Domsch@dell.com>
+ *   Moved vars from /proc/efi to /proc/efi/vars, and made
+ *   efi.c own the /proc/efi directory.
+ *   v0.03 release to linux-ia64@linuxia64.org
+ *
+ *  26 March 2001 - Matt Domsch <Matt_Domsch@dell.com>
+ *   At the request of Stephane, moved ownership of /proc/efi
+ *   to efi.c, and now efivars lives under /proc/efi/vars.
+ *
+ *  12 March 2001 - Matt Domsch <Matt_Domsch@dell.com>
+ *   Feedback received from Stephane Eranian incorporated.
+ *   efivar_write() checks copy_from_user() return value.
+ *   efivar_read/write() returns proper errno.
+ *   v0.02 release to linux-ia64@linuxia64.org
+ *
+ *  26 February 2001 - Matt Domsch <Matt_Domsch@dell.com>
+ *   v0.01 release to linux-ia64@linuxia64.org
+ */
+
+#include <linux/efi.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/ucs2_string.h>
+#include <linux/compat.h>
+
+#define EFIVARS_VERSION "0.08"
+#define EFIVARS_DATE "2004-May-17"
+
+MODULE_AUTHOR("Matt Domsch <Matt_Domsch@Dell.com>");
+MODULE_DESCRIPTION("sysfs interface to EFI Variables");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(EFIVARS_VERSION);
+MODULE_ALIAS("platform:efivars");
+
+LIST_HEAD(efivar_sysfs_list);
+EXPORT_SYMBOL_GPL(efivar_sysfs_list);
+
+static struct kset *efivars_kset;
+
+static struct bin_attribute *efivars_new_var;
+static struct bin_attribute *efivars_del_var;
+
+struct compat_efi_variable {
+	efi_char16_t  VariableName[EFI_VAR_NAME_LEN/sizeof(efi_char16_t)];
+	efi_guid_t    VendorGuid;
+	__u32         DataSize;
+	__u8          Data[1024];
+	__u32         Status;
+	__u32         Attributes;
+} __packed;
+
+struct efivar_attribute {
+	struct attribute attr;
+	ssize_t (*show) (struct efivar_entry *entry, char *buf);
+	ssize_t (*store)(struct efivar_entry *entry, const char *buf, size_t count);
+};
+
+#define EFIVAR_ATTR(_name, _mode, _show, _store) \
+struct efivar_attribute efivar_attr_##_name = { \
+	.attr = {.name = __stringify(_name), .mode = _mode}, \
+	.show = _show, \
+	.store = _store, \
+};
+
+#define to_efivar_attr(_attr) container_of(_attr, struct efivar_attribute, attr)
+#define to_efivar_entry(obj)  container_of(obj, struct efivar_entry, kobj)
+
+/*
+ * Prototype for sysfs creation function
+ */
+static int
+efivar_create_sysfs_entry(struct efivar_entry *new_var);
+
+static ssize_t
+efivar_guid_read(struct efivar_entry *entry, char *buf)
+{
+	struct efi_variable *var = &entry->var;
+	char *str = buf;
+
+	if (!entry || !buf)
+		return 0;
+
+	efi_guid_to_str(&var->VendorGuid, str);
+	str += strlen(str);
+	str += sprintf(str, "\n");
+
+	return str - buf;
+}
+
+static ssize_t
+efivar_attr_read(struct efivar_entry *entry, char *buf)
+{
+	struct efi_variable *var = &entry->var;
+	char *str = buf;
+
+	if (!entry || !buf)
+		return -EINVAL;
+
+	var->DataSize = 1024;
+	if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
+		return -EIO;
+
+	if (var->Attributes & EFI_VARIABLE_NON_VOLATILE)
+		str += sprintf(str, "EFI_VARIABLE_NON_VOLATILE\n");
+	if (var->Attributes & EFI_VARIABLE_BOOTSERVICE_ACCESS)
+		str += sprintf(str, "EFI_VARIABLE_BOOTSERVICE_ACCESS\n");
+	if (var->Attributes & EFI_VARIABLE_RUNTIME_ACCESS)
+		str += sprintf(str, "EFI_VARIABLE_RUNTIME_ACCESS\n");
+	if (var->Attributes & EFI_VARIABLE_HARDWARE_ERROR_RECORD)
+		str += sprintf(str, "EFI_VARIABLE_HARDWARE_ERROR_RECORD\n");
+	if (var->Attributes & EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS)
+		str += sprintf(str,
+			"EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS\n");
+	if (var->Attributes &
+			EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS)
+		str += sprintf(str,
+			"EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS\n");
+	if (var->Attributes & EFI_VARIABLE_APPEND_WRITE)
+		str += sprintf(str, "EFI_VARIABLE_APPEND_WRITE\n");
+	return str - buf;
+}
+
+static ssize_t
+efivar_size_read(struct efivar_entry *entry, char *buf)
+{
+	struct efi_variable *var = &entry->var;
+	char *str = buf;
+
+	if (!entry || !buf)
+		return -EINVAL;
+
+	var->DataSize = 1024;
+	if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
+		return -EIO;
+
+	str += sprintf(str, "0x%lx\n", var->DataSize);
+	return str - buf;
+}
+
+static ssize_t
+efivar_data_read(struct efivar_entry *entry, char *buf)
+{
+	struct efi_variable *var = &entry->var;
+
+	if (!entry || !buf)
+		return -EINVAL;
+
+	var->DataSize = 1024;
+	if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
+		return -EIO;
+
+	memcpy(buf, var->Data, var->DataSize);
+	return var->DataSize;
+}
+
+static inline int
+sanity_check(struct efi_variable *var, efi_char16_t *name, efi_guid_t vendor,
+	     unsigned long size, u32 attributes, u8 *data)
+{
+	/*
+	 * If only updating the variable data, then the name
+	 * and guid should remain the same
+	 */
+	if (memcmp(name, var->VariableName, sizeof(var->VariableName)) ||
+		efi_guidcmp(vendor, var->VendorGuid)) {
+		printk(KERN_ERR "efivars: Cannot edit the wrong variable!\n");
+		return -EINVAL;
+	}
+
+	if ((size <= 0) || (attributes == 0)){
+		printk(KERN_ERR "efivars: DataSize & Attributes must be valid!\n");
+		return -EINVAL;
+	}
+
+	if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
+	    efivar_validate(vendor, name, data, size) == false) {
+		printk(KERN_ERR "efivars: Malformed variable content\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static inline bool is_compat(void)
+{
+	if (IS_ENABLED(CONFIG_COMPAT) && in_compat_syscall())
+		return true;
+
+	return false;
+}
+
+static void
+copy_out_compat(struct efi_variable *dst, struct compat_efi_variable *src)
+{
+	memcpy(dst->VariableName, src->VariableName, EFI_VAR_NAME_LEN);
+	memcpy(dst->Data, src->Data, sizeof(src->Data));
+
+	dst->VendorGuid = src->VendorGuid;
+	dst->DataSize = src->DataSize;
+	dst->Attributes = src->Attributes;
+}
+
+/*
+ * We allow each variable to be edited via rewriting the
+ * entire efi variable structure.
+ */
+static ssize_t
+efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
+{
+	struct efi_variable *new_var, *var = &entry->var;
+	efi_char16_t *name;
+	unsigned long size;
+	efi_guid_t vendor;
+	u32 attributes;
+	u8 *data;
+	int err;
+
+	if (is_compat()) {
+		struct compat_efi_variable *compat;
+
+		if (count != sizeof(*compat))
+			return -EINVAL;
+
+		compat = (struct compat_efi_variable *)buf;
+		attributes = compat->Attributes;
+		vendor = compat->VendorGuid;
+		name = compat->VariableName;
+		size = compat->DataSize;
+		data = compat->Data;
+
+		err = sanity_check(var, name, vendor, size, attributes, data);
+		if (err)
+			return err;
+
+		copy_out_compat(&entry->var, compat);
+	} else {
+		if (count != sizeof(struct efi_variable))
+			return -EINVAL;
+
+		new_var = (struct efi_variable *)buf;
+
+		attributes = new_var->Attributes;
+		vendor = new_var->VendorGuid;
+		name = new_var->VariableName;
+		size = new_var->DataSize;
+		data = new_var->Data;
+
+		err = sanity_check(var, name, vendor, size, attributes, data);
+		if (err)
+			return err;
+
+		memcpy(&entry->var, new_var, count);
+	}
+
+	err = efivar_entry_set(entry, attributes, size, data, NULL);
+	if (err) {
+		printk(KERN_WARNING "efivars: set_variable() failed: status=%d\n", err);
+		return -EIO;
+	}
+
+	return count;
+}
+
+static ssize_t
+efivar_show_raw(struct efivar_entry *entry, char *buf)
+{
+	struct efi_variable *var = &entry->var;
+	struct compat_efi_variable *compat;
+	size_t size;
+
+	if (!entry || !buf)
+		return 0;
+
+	var->DataSize = 1024;
+	if (efivar_entry_get(entry, &entry->var.Attributes,
+			     &entry->var.DataSize, entry->var.Data))
+		return -EIO;
+
+	if (is_compat()) {
+		compat = (struct compat_efi_variable *)buf;
+
+		size = sizeof(*compat);
+		memcpy(compat->VariableName, var->VariableName,
+			EFI_VAR_NAME_LEN);
+		memcpy(compat->Data, var->Data, sizeof(compat->Data));
+
+		compat->VendorGuid = var->VendorGuid;
+		compat->DataSize = var->DataSize;
+		compat->Attributes = var->Attributes;
+	} else {
+		size = sizeof(*var);
+		memcpy(buf, var, size);
+	}
+
+	return size;
+}
+
+/*
+ * Generic read/write functions that call the specific functions of
+ * the attributes...
+ */
+static ssize_t efivar_attr_show(struct kobject *kobj, struct attribute *attr,
+				char *buf)
+{
+	struct efivar_entry *var = to_efivar_entry(kobj);
+	struct efivar_attribute *efivar_attr = to_efivar_attr(attr);
+	ssize_t ret = -EIO;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
+	if (efivar_attr->show) {
+		ret = efivar_attr->show(var, buf);
+	}
+	return ret;
+}
+
+static ssize_t efivar_attr_store(struct kobject *kobj, struct attribute *attr,
+				const char *buf, size_t count)
+{
+	struct efivar_entry *var = to_efivar_entry(kobj);
+	struct efivar_attribute *efivar_attr = to_efivar_attr(attr);
+	ssize_t ret = -EIO;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
+	if (efivar_attr->store)
+		ret = efivar_attr->store(var, buf, count);
+
+	return ret;
+}
+
+static const struct sysfs_ops efivar_attr_ops = {
+	.show = efivar_attr_show,
+	.store = efivar_attr_store,
+};
+
+static void efivar_release(struct kobject *kobj)
+{
+	struct efivar_entry *var = to_efivar_entry(kobj);
+	kfree(var);
+}
+
+static EFIVAR_ATTR(guid, 0400, efivar_guid_read, NULL);
+static EFIVAR_ATTR(attributes, 0400, efivar_attr_read, NULL);
+static EFIVAR_ATTR(size, 0400, efivar_size_read, NULL);
+static EFIVAR_ATTR(data, 0400, efivar_data_read, NULL);
+static EFIVAR_ATTR(raw_var, 0600, efivar_show_raw, efivar_store_raw);
+
+static struct attribute *def_attrs[] = {
+	&efivar_attr_guid.attr,
+	&efivar_attr_size.attr,
+	&efivar_attr_attributes.attr,
+	&efivar_attr_data.attr,
+	&efivar_attr_raw_var.attr,
+	NULL,
+};
+
+static struct kobj_type efivar_ktype = {
+	.release = efivar_release,
+	.sysfs_ops = &efivar_attr_ops,
+	.default_attrs = def_attrs,
+};
+
+static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
+			     struct bin_attribute *bin_attr,
+			     char *buf, loff_t pos, size_t count)
+{
+	struct compat_efi_variable *compat = (struct compat_efi_variable *)buf;
+	struct efi_variable *new_var = (struct efi_variable *)buf;
+	struct efivar_entry *new_entry;
+	bool need_compat = is_compat();
+	efi_char16_t *name;
+	unsigned long size;
+	u32 attributes;
+	u8 *data;
+	int err;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
+	if (need_compat) {
+		if (count != sizeof(*compat))
+			return -EINVAL;
+
+		attributes = compat->Attributes;
+		name = compat->VariableName;
+		size = compat->DataSize;
+		data = compat->Data;
+	} else {
+		if (count != sizeof(*new_var))
+			return -EINVAL;
+
+		attributes = new_var->Attributes;
+		name = new_var->VariableName;
+		size = new_var->DataSize;
+		data = new_var->Data;
+	}
+
+	if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
+	    efivar_validate(new_var->VendorGuid, name, data,
+			    size) == false) {
+		printk(KERN_ERR "efivars: Malformed variable content\n");
+		return -EINVAL;
+	}
+
+	new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
+	if (!new_entry)
+		return -ENOMEM;
+
+	if (need_compat)
+		copy_out_compat(&new_entry->var, compat);
+	else
+		memcpy(&new_entry->var, new_var, sizeof(*new_var));
+
+	err = efivar_entry_set(new_entry, attributes, size,
+			       data, &efivar_sysfs_list);
+	if (err) {
+		if (err == -EEXIST)
+			err = -EINVAL;
+		goto out;
+	}
+
+	if (efivar_create_sysfs_entry(new_entry)) {
+		printk(KERN_WARNING "efivars: failed to create sysfs entry.\n");
+		kfree(new_entry);
+	}
+	return count;
+
+out:
+	kfree(new_entry);
+	return err;
+}
+
+static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
+			     struct bin_attribute *bin_attr,
+			     char *buf, loff_t pos, size_t count)
+{
+	struct efi_variable *del_var = (struct efi_variable *)buf;
+	struct compat_efi_variable *compat;
+	struct efivar_entry *entry;
+	efi_char16_t *name;
+	efi_guid_t vendor;
+	int err = 0;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
+	if (is_compat()) {
+		if (count != sizeof(*compat))
+			return -EINVAL;
+
+		compat = (struct compat_efi_variable *)buf;
+		name = compat->VariableName;
+		vendor = compat->VendorGuid;
+	} else {
+		if (count != sizeof(*del_var))
+			return -EINVAL;
+
+		name = del_var->VariableName;
+		vendor = del_var->VendorGuid;
+	}
+
+	if (efivar_entry_iter_begin())
+		return -EINTR;
+	entry = efivar_entry_find(name, vendor, &efivar_sysfs_list, true);
+	if (!entry)
+		err = -EINVAL;
+	else if (__efivar_entry_delete(entry))
+		err = -EIO;
+
+	if (err) {
+		efivar_entry_iter_end();
+		return err;
+	}
+
+	if (!entry->scanning) {
+		efivar_entry_iter_end();
+		efivar_unregister(entry);
+	} else
+		efivar_entry_iter_end();
+
+	/* It's dead Jim.... */
+	return count;
+}
+
+/**
+ * efivar_create_sysfs_entry - create a new entry in sysfs
+ * @new_var: efivar entry to create
+ *
+ * Returns 0 on success, negative error code on failure
+ */
+static int
+efivar_create_sysfs_entry(struct efivar_entry *new_var)
+{
+	int short_name_size;
+	char *short_name;
+	unsigned long utf8_name_size;
+	efi_char16_t *variable_name = new_var->var.VariableName;
+	int ret;
+
+	/*
+	 * Length of the variable bytes in UTF8, plus the '-' separator,
+	 * plus the GUID, plus trailing NUL
+	 */
+	utf8_name_size = ucs2_utf8size(variable_name);
+	short_name_size = utf8_name_size + 1 + EFI_VARIABLE_GUID_LEN + 1;
+
+	short_name = kmalloc(short_name_size, GFP_KERNEL);
+	if (!short_name)
+		return -ENOMEM;
+
+	ucs2_as_utf8(short_name, variable_name, short_name_size);
+
+	/* This is ugly, but necessary to separate one vendor's
+	   private variables from another's.         */
+	short_name[utf8_name_size] = '-';
+	efi_guid_to_str(&new_var->var.VendorGuid,
+			 short_name + utf8_name_size + 1);
+
+	new_var->kobj.kset = efivars_kset;
+
+	ret = kobject_init_and_add(&new_var->kobj, &efivar_ktype,
+				   NULL, "%s", short_name);
+	kfree(short_name);
+	if (ret)
+		return ret;
+
+	kobject_uevent(&new_var->kobj, KOBJ_ADD);
+	if (efivar_entry_add(new_var, &efivar_sysfs_list)) {
+		efivar_unregister(new_var);
+		return -EINTR;
+	}
+
+	return 0;
+}
+
+static int
+create_efivars_bin_attributes(void)
+{
+	struct bin_attribute *attr;
+	int error;
+
+	/* new_var */
+	attr = kzalloc(sizeof(*attr), GFP_KERNEL);
+	if (!attr)
+		return -ENOMEM;
+
+	attr->attr.name = "new_var";
+	attr->attr.mode = 0200;
+	attr->write = efivar_create;
+	efivars_new_var = attr;
+
+	/* del_var */
+	attr = kzalloc(sizeof(*attr), GFP_KERNEL);
+	if (!attr) {
+		error = -ENOMEM;
+		goto out_free;
+	}
+	attr->attr.name = "del_var";
+	attr->attr.mode = 0200;
+	attr->write = efivar_delete;
+	efivars_del_var = attr;
+
+	sysfs_bin_attr_init(efivars_new_var);
+	sysfs_bin_attr_init(efivars_del_var);
+
+	/* Register */
+	error = sysfs_create_bin_file(&efivars_kset->kobj, efivars_new_var);
+	if (error) {
+		printk(KERN_ERR "efivars: unable to create new_var sysfs file"
+			" due to error %d\n", error);
+		goto out_free;
+	}
+
+	error = sysfs_create_bin_file(&efivars_kset->kobj, efivars_del_var);
+	if (error) {
+		printk(KERN_ERR "efivars: unable to create del_var sysfs file"
+			" due to error %d\n", error);
+		sysfs_remove_bin_file(&efivars_kset->kobj, efivars_new_var);
+		goto out_free;
+	}
+
+	return 0;
+out_free:
+	kfree(efivars_del_var);
+	efivars_del_var = NULL;
+	kfree(efivars_new_var);
+	efivars_new_var = NULL;
+	return error;
+}
+
+static int efivar_update_sysfs_entry(efi_char16_t *name, efi_guid_t vendor,
+				     unsigned long name_size, void *data)
+{
+	struct efivar_entry *entry = data;
+
+	if (efivar_entry_find(name, vendor, &efivar_sysfs_list, false))
+		return 0;
+
+	memcpy(entry->var.VariableName, name, name_size);
+	memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t));
+
+	return 1;
+}
+
+static void efivar_update_sysfs_entries(struct work_struct *work)
+{
+	struct efivar_entry *entry;
+	int err;
+
+	/* Add new sysfs entries */
+	while (1) {
+		entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+		if (!entry)
+			return;
+
+		err = efivar_init(efivar_update_sysfs_entry, entry,
+				  false, &efivar_sysfs_list);
+		if (!err)
+			break;
+
+		efivar_create_sysfs_entry(entry);
+	}
+
+	kfree(entry);
+}
+
+static int efivars_sysfs_callback(efi_char16_t *name, efi_guid_t vendor,
+				  unsigned long name_size, void *data)
+{
+	struct efivar_entry *entry;
+
+	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry)
+		return -ENOMEM;
+
+	memcpy(entry->var.VariableName, name, name_size);
+	memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t));
+
+	efivar_create_sysfs_entry(entry);
+
+	return 0;
+}
+
+static int efivar_sysfs_destroy(struct efivar_entry *entry, void *data)
+{
+	int err = efivar_entry_remove(entry);
+
+	if (err)
+		return err;
+	efivar_unregister(entry);
+	return 0;
+}
+
+static void efivars_sysfs_exit(void)
+{
+	/* Remove all entries and destroy */
+	int err;
+
+	err = __efivar_entry_iter(efivar_sysfs_destroy, &efivar_sysfs_list,
+				  NULL, NULL);
+	if (err) {
+		pr_err("efivars: Failed to destroy sysfs entries\n");
+		return;
+	}
+
+	if (efivars_new_var)
+		sysfs_remove_bin_file(&efivars_kset->kobj, efivars_new_var);
+	if (efivars_del_var)
+		sysfs_remove_bin_file(&efivars_kset->kobj, efivars_del_var);
+	kfree(efivars_new_var);
+	kfree(efivars_del_var);
+	kset_unregister(efivars_kset);
+}
+
+int efivars_sysfs_init(void)
+{
+	struct kobject *parent_kobj = efivars_kobject();
+	int error = 0;
+
+	if (!efi_enabled(EFI_RUNTIME_SERVICES))
+		return -ENODEV;
+
+	/* No efivars has been registered yet */
+	if (!parent_kobj)
+		return 0;
+
+	printk(KERN_INFO "EFI Variables Facility v%s %s\n", EFIVARS_VERSION,
+	       EFIVARS_DATE);
+
+	efivars_kset = kset_create_and_add("vars", NULL, parent_kobj);
+	if (!efivars_kset) {
+		printk(KERN_ERR "efivars: Subsystem registration failed.\n");
+		return -ENOMEM;
+	}
+
+	efivar_init(efivars_sysfs_callback, NULL, true, &efivar_sysfs_list);
+
+	error = create_efivars_bin_attributes();
+	if (error) {
+		efivars_sysfs_exit();
+		return error;
+	}
+
+	INIT_WORK(&efivar_work, efivar_update_sysfs_entries);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(efivars_sysfs_init);
+
+module_init(efivars_sysfs_init);
+module_exit(efivars_sysfs_exit);
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
new file mode 100644
index 0000000..5d06bd2
--- /dev/null
+++ b/drivers/firmware/efi/esrt.c
@@ -0,0 +1,436 @@
+/*
+ * esrt.c
+ *
+ * This module exports EFI System Resource Table (ESRT) entries into userspace
+ * through the sysfs file system. The ESRT provides a read-only catalog of
+ * system components for which the system accepts firmware upgrades via UEFI's
+ * "Capsule Update" feature. This module allows userland utilities to evaluate
+ * what firmware updates can be applied to this system, and potentially arrange
+ * for those updates to occur.
+ *
+ * Data is currently found below /sys/firmware/efi/esrt/...
+ */
+#define pr_fmt(fmt) "esrt: " fmt
+
+#include <linux/capability.h>
+#include <linux/device.h>
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/list.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <asm/io.h>
+#include <asm/early_ioremap.h>
+
+struct efi_system_resource_entry_v1 {
+	efi_guid_t	fw_class;
+	u32		fw_type;
+	u32		fw_version;
+	u32		lowest_supported_fw_version;
+	u32		capsule_flags;
+	u32		last_attempt_version;
+	u32		last_attempt_status;
+};
+
+/*
+ * _count and _version are what they seem like.  _max is actually just
+ * accounting info for the firmware when creating the table; it should never
+ * have been exposed to us.  To wit, the spec says:
+ * The maximum number of resource array entries that can be within the
+ * table without reallocating the table, must not be zero.
+ * Since there's no guidance about what that means in terms of memory layout,
+ * it means nothing to us.
+ */
+struct efi_system_resource_table {
+	u32	fw_resource_count;
+	u32	fw_resource_count_max;
+	u64	fw_resource_version;
+	u8	entries[];
+};
+
+static phys_addr_t esrt_data;
+static size_t esrt_data_size;
+
+static struct efi_system_resource_table *esrt;
+
+struct esre_entry {
+	union {
+		struct efi_system_resource_entry_v1 *esre1;
+	} esre;
+
+	struct kobject kobj;
+	struct list_head list;
+};
+
+/* global list of esre_entry. */
+static LIST_HEAD(entry_list);
+
+/* entry attribute */
+struct esre_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct esre_entry *entry, char *buf);
+	ssize_t (*store)(struct esre_entry *entry,
+			 const char *buf, size_t count);
+};
+
+static struct esre_entry *to_entry(struct kobject *kobj)
+{
+	return container_of(kobj, struct esre_entry, kobj);
+}
+
+static struct esre_attribute *to_attr(struct attribute *attr)
+{
+	return container_of(attr, struct esre_attribute, attr);
+}
+
+static ssize_t esre_attr_show(struct kobject *kobj,
+			      struct attribute *_attr, char *buf)
+{
+	struct esre_entry *entry = to_entry(kobj);
+	struct esre_attribute *attr = to_attr(_attr);
+
+	/* Don't tell normal users what firmware versions we've got... */
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
+	return attr->show(entry, buf);
+}
+
+static const struct sysfs_ops esre_attr_ops = {
+	.show = esre_attr_show,
+};
+
+/* Generic ESRT Entry ("ESRE") support. */
+static ssize_t fw_class_show(struct esre_entry *entry, char *buf)
+{
+	char *str = buf;
+
+	efi_guid_to_str(&entry->esre.esre1->fw_class, str);
+	str += strlen(str);
+	str += sprintf(str, "\n");
+
+	return str - buf;
+}
+
+static struct esre_attribute esre_fw_class = __ATTR_RO_MODE(fw_class, 0400);
+
+#define esre_attr_decl(name, size, fmt) \
+static ssize_t name##_show(struct esre_entry *entry, char *buf) \
+{ \
+	return sprintf(buf, fmt "\n", \
+		       le##size##_to_cpu(entry->esre.esre1->name)); \
+} \
+\
+static struct esre_attribute esre_##name = __ATTR_RO_MODE(name, 0400)
+
+esre_attr_decl(fw_type, 32, "%u");
+esre_attr_decl(fw_version, 32, "%u");
+esre_attr_decl(lowest_supported_fw_version, 32, "%u");
+esre_attr_decl(capsule_flags, 32, "0x%x");
+esre_attr_decl(last_attempt_version, 32, "%u");
+esre_attr_decl(last_attempt_status, 32, "%u");
+
+static struct attribute *esre1_attrs[] = {
+	&esre_fw_class.attr,
+	&esre_fw_type.attr,
+	&esre_fw_version.attr,
+	&esre_lowest_supported_fw_version.attr,
+	&esre_capsule_flags.attr,
+	&esre_last_attempt_version.attr,
+	&esre_last_attempt_status.attr,
+	NULL
+};
+static void esre_release(struct kobject *kobj)
+{
+	struct esre_entry *entry = to_entry(kobj);
+
+	list_del(&entry->list);
+	kfree(entry);
+}
+
+static struct kobj_type esre1_ktype = {
+	.release = esre_release,
+	.sysfs_ops = &esre_attr_ops,
+	.default_attrs = esre1_attrs,
+};
+
+
+static struct kobject *esrt_kobj;
+static struct kset *esrt_kset;
+
+static int esre_create_sysfs_entry(void *esre, int entry_num)
+{
+	struct esre_entry *entry;
+
+	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry)
+		return -ENOMEM;
+
+	entry->kobj.kset = esrt_kset;
+
+	if (esrt->fw_resource_version == 1) {
+		int rc = 0;
+
+		entry->esre.esre1 = esre;
+		rc = kobject_init_and_add(&entry->kobj, &esre1_ktype, NULL,
+					  "entry%d", entry_num);
+		if (rc) {
+			kfree(entry);
+			return rc;
+		}
+	}
+
+	list_add_tail(&entry->list, &entry_list);
+	return 0;
+}
+
+/* support for displaying ESRT fields at the top level */
+#define esrt_attr_decl(name, size, fmt) \
+static ssize_t name##_show(struct kobject *kobj, \
+				  struct kobj_attribute *attr, char *buf)\
+{ \
+	return sprintf(buf, fmt "\n", le##size##_to_cpu(esrt->name)); \
+} \
+\
+static struct kobj_attribute esrt_##name = __ATTR_RO_MODE(name, 0400)
+
+esrt_attr_decl(fw_resource_count, 32, "%u");
+esrt_attr_decl(fw_resource_count_max, 32, "%u");
+esrt_attr_decl(fw_resource_version, 64, "%llu");
+
+static struct attribute *esrt_attrs[] = {
+	&esrt_fw_resource_count.attr,
+	&esrt_fw_resource_count_max.attr,
+	&esrt_fw_resource_version.attr,
+	NULL,
+};
+
+static inline int esrt_table_exists(void)
+{
+	if (!efi_enabled(EFI_CONFIG_TABLES))
+		return 0;
+	if (efi.esrt == EFI_INVALID_TABLE_ADDR)
+		return 0;
+	return 1;
+}
+
+static umode_t esrt_attr_is_visible(struct kobject *kobj,
+				    struct attribute *attr, int n)
+{
+	if (!esrt_table_exists())
+		return 0;
+	return attr->mode;
+}
+
+static const struct attribute_group esrt_attr_group = {
+	.attrs = esrt_attrs,
+	.is_visible = esrt_attr_is_visible,
+};
+
+/*
+ * remap the table, validate it, mark it reserved and unmap it.
+ */
+void __init efi_esrt_init(void)
+{
+	void *va;
+	struct efi_system_resource_table tmpesrt;
+	struct efi_system_resource_entry_v1 *v1_entries;
+	size_t size, max, entry_size, entries_size;
+	efi_memory_desc_t md;
+	int rc;
+	phys_addr_t end;
+
+	pr_debug("esrt-init: loading.\n");
+	if (!esrt_table_exists())
+		return;
+
+	rc = efi_mem_desc_lookup(efi.esrt, &md);
+	if (rc < 0 ||
+	    (!(md.attribute & EFI_MEMORY_RUNTIME) &&
+	     md.type != EFI_BOOT_SERVICES_DATA &&
+	     md.type != EFI_RUNTIME_SERVICES_DATA)) {
+		pr_warn("ESRT header is not in the memory map.\n");
+		return;
+	}
+
+	max = efi_mem_desc_end(&md);
+	if (max < efi.esrt) {
+		pr_err("EFI memory descriptor is invalid. (esrt: %p max: %p)\n",
+		       (void *)efi.esrt, (void *)max);
+		return;
+	}
+
+	size = sizeof(*esrt);
+	max -= efi.esrt;
+
+	if (max < size) {
+		pr_err("ESRT header doesn't fit on single memory map entry. (size: %zu max: %zu)\n",
+		       size, max);
+		return;
+	}
+
+	va = early_memremap(efi.esrt, size);
+	if (!va) {
+		pr_err("early_memremap(%p, %zu) failed.\n", (void *)efi.esrt,
+		       size);
+		return;
+	}
+
+	memcpy(&tmpesrt, va, sizeof(tmpesrt));
+	early_memunmap(va, size);
+
+	if (tmpesrt.fw_resource_version == 1) {
+		entry_size = sizeof (*v1_entries);
+	} else {
+		pr_err("Unsupported ESRT version %lld.\n",
+		       tmpesrt.fw_resource_version);
+		return;
+	}
+
+	if (tmpesrt.fw_resource_count > 0 && max - size < entry_size) {
+		pr_err("ESRT memory map entry can only hold the header. (max: %zu size: %zu)\n",
+		       max - size, entry_size);
+		return;
+	}
+
+	/*
+	 * The format doesn't really give us any boundary to test here,
+	 * so I'm making up 128 as the max number of individually updatable
+	 * components we support.
+	 * 128 should be pretty excessive, but there's still some chance
+	 * somebody will do that someday and we'll need to raise this.
+	 */
+	if (tmpesrt.fw_resource_count > 128) {
+		pr_err("ESRT says fw_resource_count has very large value %d.\n",
+		       tmpesrt.fw_resource_count);
+		return;
+	}
+
+	/*
+	 * We know it can't be larger than N * sizeof() here, and N is limited
+	 * by the previous test to a small number, so there's no overflow.
+	 */
+	entries_size = tmpesrt.fw_resource_count * entry_size;
+	if (max < size + entries_size) {
+		pr_err("ESRT does not fit on single memory map entry (size: %zu max: %zu)\n",
+		       size, max);
+		return;
+	}
+
+	size += entries_size;
+
+	esrt_data = (phys_addr_t)efi.esrt;
+	esrt_data_size = size;
+
+	end = esrt_data + size;
+	pr_info("Reserving ESRT space from %pa to %pa.\n", &esrt_data, &end);
+	if (md.type == EFI_BOOT_SERVICES_DATA)
+		efi_mem_reserve(esrt_data, esrt_data_size);
+
+	pr_debug("esrt-init: loaded.\n");
+}
+
+static int __init register_entries(void)
+{
+	struct efi_system_resource_entry_v1 *v1_entries = (void *)esrt->entries;
+	int i, rc;
+
+	if (!esrt_table_exists())
+		return 0;
+
+	for (i = 0; i < le32_to_cpu(esrt->fw_resource_count); i++) {
+		void *esre = NULL;
+		if (esrt->fw_resource_version == 1) {
+			esre = &v1_entries[i];
+		} else {
+			pr_err("Unsupported ESRT version %lld.\n",
+			       esrt->fw_resource_version);
+			return -EINVAL;
+		}
+
+		rc = esre_create_sysfs_entry(esre, i);
+		if (rc < 0) {
+			pr_err("ESRT entry creation failed with error %d.\n",
+			       rc);
+			return rc;
+		}
+	}
+	return 0;
+}
+
+static void cleanup_entry_list(void)
+{
+	struct esre_entry *entry, *next;
+
+	list_for_each_entry_safe(entry, next, &entry_list, list) {
+		kobject_put(&entry->kobj);
+	}
+}
+
+static int __init esrt_sysfs_init(void)
+{
+	int error;
+
+	pr_debug("esrt-sysfs: loading.\n");
+	if (!esrt_data || !esrt_data_size)
+		return -ENOSYS;
+
+	esrt = memremap(esrt_data, esrt_data_size, MEMREMAP_WB);
+	if (!esrt) {
+		pr_err("memremap(%pa, %zu) failed.\n", &esrt_data,
+		       esrt_data_size);
+		return -ENOMEM;
+	}
+
+	esrt_kobj = kobject_create_and_add("esrt", efi_kobj);
+	if (!esrt_kobj) {
+		pr_err("Firmware table registration failed.\n");
+		error = -ENOMEM;
+		goto err;
+	}
+
+	error = sysfs_create_group(esrt_kobj, &esrt_attr_group);
+	if (error) {
+		pr_err("Sysfs attribute export failed with error %d.\n",
+		       error);
+		goto err_remove_esrt;
+	}
+
+	esrt_kset = kset_create_and_add("entries", NULL, esrt_kobj);
+	if (!esrt_kset) {
+		pr_err("kset creation failed.\n");
+		error = -ENOMEM;
+		goto err_remove_group;
+	}
+
+	error = register_entries();
+	if (error)
+		goto err_cleanup_list;
+
+	pr_debug("esrt-sysfs: loaded.\n");
+
+	return 0;
+err_cleanup_list:
+	cleanup_entry_list();
+	kset_unregister(esrt_kset);
+err_remove_group:
+	sysfs_remove_group(esrt_kobj, &esrt_attr_group);
+err_remove_esrt:
+	kobject_put(esrt_kobj);
+err:
+	memunmap(esrt);
+	esrt = NULL;
+	return error;
+}
+device_initcall(esrt_sysfs_init);
+
+/*
+MODULE_AUTHOR("Peter Jones <pjones@redhat.com>");
+MODULE_DESCRIPTION("EFI System Resource Table support");
+MODULE_LICENSE("GPL");
+*/
diff --git a/drivers/firmware/efi/fake_mem.c b/drivers/firmware/efi/fake_mem.c
new file mode 100644
index 0000000..6c7d60c
--- /dev/null
+++ b/drivers/firmware/efi/fake_mem.c
@@ -0,0 +1,141 @@
+/*
+ * fake_mem.c
+ *
+ * Copyright (C) 2015 FUJITSU LIMITED
+ * Author: Taku Izumi <izumi.taku@jp.fujitsu.com>
+ *
+ * This code introduces new boot option named "efi_fake_mem"
+ * By specifying this parameter, you can add arbitrary attribute to
+ * specific memory range by updating original (firmware provided) EFI
+ * memmap.
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms and conditions of the GNU General Public License,
+ *  version 2, as published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ *  more details.
+ *
+ *  You should have received a copy of the GNU General Public License along with
+ *  this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ *  The full GNU General Public License is included in this distribution in
+ *  the file called "COPYING".
+ */
+
+#include <linux/kernel.h>
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/memblock.h>
+#include <linux/types.h>
+#include <linux/sort.h>
+#include <asm/efi.h>
+
+#define EFI_MAX_FAKEMEM CONFIG_EFI_MAX_FAKE_MEM
+
+static struct efi_mem_range fake_mems[EFI_MAX_FAKEMEM];
+static int nr_fake_mem;
+
+static int __init cmp_fake_mem(const void *x1, const void *x2)
+{
+	const struct efi_mem_range *m1 = x1;
+	const struct efi_mem_range *m2 = x2;
+
+	if (m1->range.start < m2->range.start)
+		return -1;
+	if (m1->range.start > m2->range.start)
+		return 1;
+	return 0;
+}
+
+void __init efi_fake_memmap(void)
+{
+	int new_nr_map = efi.memmap.nr_map;
+	efi_memory_desc_t *md;
+	phys_addr_t new_memmap_phy;
+	void *new_memmap;
+	int i;
+
+	if (!nr_fake_mem)
+		return;
+
+	/* count up the number of EFI memory descriptor */
+	for (i = 0; i < nr_fake_mem; i++) {
+		for_each_efi_memory_desc(md) {
+			struct range *r = &fake_mems[i].range;
+
+			new_nr_map += efi_memmap_split_count(md, r);
+		}
+	}
+
+	/* allocate memory for new EFI memmap */
+	new_memmap_phy = efi_memmap_alloc(new_nr_map);
+	if (!new_memmap_phy)
+		return;
+
+	/* create new EFI memmap */
+	new_memmap = early_memremap(new_memmap_phy,
+				    efi.memmap.desc_size * new_nr_map);
+	if (!new_memmap) {
+		memblock_free(new_memmap_phy, efi.memmap.desc_size * new_nr_map);
+		return;
+	}
+
+	for (i = 0; i < nr_fake_mem; i++)
+		efi_memmap_insert(&efi.memmap, new_memmap, &fake_mems[i]);
+
+	/* swap into new EFI memmap */
+	early_memunmap(new_memmap, efi.memmap.desc_size * new_nr_map);
+
+	efi_memmap_install(new_memmap_phy, new_nr_map);
+
+	/* print new EFI memmap */
+	efi_print_memmap();
+}
+
+static int __init setup_fake_mem(char *p)
+{
+	u64 start = 0, mem_size = 0, attribute = 0;
+	int i;
+
+	if (!p)
+		return -EINVAL;
+
+	while (*p != '\0') {
+		mem_size = memparse(p, &p);
+		if (*p == '@')
+			start = memparse(p+1, &p);
+		else
+			break;
+
+		if (*p == ':')
+			attribute = simple_strtoull(p+1, &p, 0);
+		else
+			break;
+
+		if (nr_fake_mem >= EFI_MAX_FAKEMEM)
+			break;
+
+		fake_mems[nr_fake_mem].range.start = start;
+		fake_mems[nr_fake_mem].range.end = start + mem_size - 1;
+		fake_mems[nr_fake_mem].attribute = attribute;
+		nr_fake_mem++;
+
+		if (*p == ',')
+			p++;
+	}
+
+	sort(fake_mems, nr_fake_mem, sizeof(struct efi_mem_range),
+	     cmp_fake_mem, NULL);
+
+	for (i = 0; i < nr_fake_mem; i++)
+		pr_info("efi_fake_mem: add attr=0x%016llx to [mem 0x%016llx-0x%016llx]",
+			fake_mems[i].attribute, fake_mems[i].range.start,
+			fake_mems[i].range.end);
+
+	return *p == '\0' ? 0 : -EINVAL;
+}
+
+early_param("efi_fake_mem", setup_fake_mem);
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
new file mode 100644
index 0000000..c516276
--- /dev/null
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -0,0 +1,99 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# The stub may be linked into the kernel proper or into a separate boot binary,
+# but in either case, it executes before the kernel does (with MMU disabled) so
+# things like ftrace and stack-protector are likely to cause trouble if left
+# enabled, even if doing so doesn't break the build.
+#
+cflags-$(CONFIG_X86_32)		:= -march=i386
+cflags-$(CONFIG_X86_64)		:= -mcmodel=small
+cflags-$(CONFIG_X86)		+= -m$(BITS) -D__KERNEL__ -O2 \
+				   -fPIC -fno-strict-aliasing -mno-red-zone \
+				   -mno-mmx -mno-sse -fshort-wchar
+
+# arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
+# disable the stackleak plugin
+cflags-$(CONFIG_ARM64)		:= $(subst -pg,,$(KBUILD_CFLAGS)) -fpie \
+				   $(DISABLE_STACKLEAK_PLUGIN)
+cflags-$(CONFIG_ARM)		:= $(subst -pg,,$(KBUILD_CFLAGS)) \
+				   -fno-builtin -fpic \
+				   $(call cc-option,-mno-single-pic-base)
+
+cflags-$(CONFIG_EFI_ARMSTUB)	+= -I$(srctree)/scripts/dtc/libfdt
+
+KBUILD_CFLAGS			:= $(cflags-y) -DDISABLE_BRANCH_PROFILING \
+				   -D__NO_FORTIFY \
+				   $(call cc-option,-ffreestanding) \
+				   $(call cc-option,-fno-stack-protector) \
+				   -D__DISABLE_EXPORTS
+
+GCOV_PROFILE			:= n
+KASAN_SANITIZE			:= n
+UBSAN_SANITIZE			:= n
+OBJECT_FILES_NON_STANDARD	:= y
+
+# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
+KCOV_INSTRUMENT			:= n
+
+lib-y				:= efi-stub-helper.o gop.o secureboot.o tpm.o
+
+# include the stub's generic dependencies from lib/ when building for ARM/arm64
+arm-deps-y := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c
+arm-deps-$(CONFIG_ARM64) += sort.c
+
+$(obj)/lib-%.o: $(srctree)/lib/%.c FORCE
+	$(call if_changed_rule,cc_o_c)
+
+lib-$(CONFIG_EFI_ARMSTUB)	+= arm-stub.o fdt.o string.o random.o \
+				   $(patsubst %.c,lib-%.o,$(arm-deps-y))
+
+lib-$(CONFIG_ARM)		+= arm32-stub.o
+lib-$(CONFIG_ARM64)		+= arm64-stub.o
+CFLAGS_arm64-stub.o 		:= -DTEXT_OFFSET=$(TEXT_OFFSET)
+
+#
+# arm64 puts the stub in the kernel proper, which will unnecessarily retain all
+# code indefinitely unless it is annotated as __init/__initdata/__initconst etc.
+# So let's apply the __init annotations at the section level, by prefixing
+# the section names directly. This will ensure that even all the inline string
+# literals are covered.
+# The fact that the stub and the kernel proper are essentially the same binary
+# also means that we need to be extra careful to make sure that the stub does
+# not rely on any absolute symbol references, considering that the virtual
+# kernel mapping that the linker uses is not active yet when the stub is
+# executing. So build all C dependencies of the EFI stub into libstub, and do
+# a verification pass to see if any absolute relocations exist in any of the
+# object files.
+#
+extra-$(CONFIG_EFI_ARMSTUB)	:= $(lib-y)
+lib-$(CONFIG_EFI_ARMSTUB)	:= $(patsubst %.o,%.stub.o,$(lib-y))
+
+STUBCOPY_RM-y			:= -R *ksymtab* -R *kcrctab*
+STUBCOPY_FLAGS-$(CONFIG_ARM64)	+= --prefix-alloc-sections=.init \
+				   --prefix-symbols=__efistub_
+STUBCOPY_RELOC-$(CONFIG_ARM64)	:= R_AARCH64_ABS
+
+$(obj)/%.stub.o: $(obj)/%.o FORCE
+	$(call if_changed,stubcopy)
+
+#
+# Strip debug sections and some other sections that may legally contain
+# absolute relocations, so that we can inspect the remaining sections for
+# such relocations. If none are found, regenerate the output object, but
+# this time, use objcopy and leave all sections in place.
+#
+quiet_cmd_stubcopy = STUBCPY $@
+      cmd_stubcopy = if $(STRIP) --strip-debug $(STUBCOPY_RM-y) -o $@ $<; \
+		     then if $(OBJDUMP) -r $@ | grep $(STUBCOPY_RELOC-y); \
+		     then (echo >&2 "$@: absolute symbol references not allowed in the EFI stub"; \
+			   rm -f $@; /bin/false); 			  \
+		     else $(OBJCOPY) $(STUBCOPY_FLAGS-y) $< $@; fi	  \
+		     else /bin/false; fi
+
+#
+# ARM discards the .data section because it disallows r/w data in the
+# decompressor. So move our .data to .data.efistub, which is preserved
+# explicitly by the decompressor linker script.
+#
+STUBCOPY_FLAGS-$(CONFIG_ARM)	+= --rename-section .data=.data.efistub
+STUBCOPY_RELOC-$(CONFIG_ARM)	:= R_ARM_ABS
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
new file mode 100644
index 0000000..6920033
--- /dev/null
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -0,0 +1,374 @@
+/*
+ * EFI stub implementation that is shared by arm and arm64 architectures.
+ * This should be #included by the EFI stub implementation files.
+ *
+ * Copyright (C) 2013,2014 Linaro Limited
+ *     Roy Franz <roy.franz@linaro.org
+ * Copyright (C) 2013 Red Hat, Inc.
+ *     Mark Salter <msalter@redhat.com>
+ *
+ * This file is part of the Linux kernel, and is made available under the
+ * terms of the GNU General Public License version 2.
+ *
+ */
+
+#include <linux/efi.h>
+#include <linux/sort.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+/*
+ * This is the base address at which to start allocating virtual memory ranges
+ * for UEFI Runtime Services. This is in the low TTBR0 range so that we can use
+ * any allocation we choose, and eliminate the risk of a conflict after kexec.
+ * The value chosen is the largest non-zero power of 2 suitable for this purpose
+ * both on 32-bit and 64-bit ARM CPUs, to maximize the likelihood that it can
+ * be mapped efficiently.
+ * Since 32-bit ARM could potentially execute with a 1G/3G user/kernel split,
+ * map everything below 1 GB. (512 MB is a reasonable upper bound for the
+ * entire footprint of the UEFI runtime services memory regions)
+ */
+#define EFI_RT_VIRTUAL_BASE	SZ_512M
+#define EFI_RT_VIRTUAL_SIZE	SZ_512M
+
+#ifdef CONFIG_ARM64
+# define EFI_RT_VIRTUAL_LIMIT	TASK_SIZE_64
+#else
+# define EFI_RT_VIRTUAL_LIMIT	TASK_SIZE
+#endif
+
+static u64 virtmap_base = EFI_RT_VIRTUAL_BASE;
+
+void efi_char16_printk(efi_system_table_t *sys_table_arg,
+			      efi_char16_t *str)
+{
+	struct efi_simple_text_output_protocol *out;
+
+	out = (struct efi_simple_text_output_protocol *)sys_table_arg->con_out;
+	out->output_string(out, str);
+}
+
+static struct screen_info *setup_graphics(efi_system_table_t *sys_table_arg)
+{
+	efi_guid_t gop_proto = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID;
+	efi_status_t status;
+	unsigned long size;
+	void **gop_handle = NULL;
+	struct screen_info *si = NULL;
+
+	size = 0;
+	status = efi_call_early(locate_handle, EFI_LOCATE_BY_PROTOCOL,
+				&gop_proto, NULL, &size, gop_handle);
+	if (status == EFI_BUFFER_TOO_SMALL) {
+		si = alloc_screen_info(sys_table_arg);
+		if (!si)
+			return NULL;
+		efi_setup_gop(sys_table_arg, si, &gop_proto, size);
+	}
+	return si;
+}
+
+/*
+ * This function handles the architcture specific differences between arm and
+ * arm64 regarding where the kernel image must be loaded and any memory that
+ * must be reserved. On failure it is required to free all
+ * all allocations it has made.
+ */
+efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
+				 unsigned long *image_addr,
+				 unsigned long *image_size,
+				 unsigned long *reserve_addr,
+				 unsigned long *reserve_size,
+				 unsigned long dram_base,
+				 efi_loaded_image_t *image);
+/*
+ * EFI entry point for the arm/arm64 EFI stubs.  This is the entrypoint
+ * that is described in the PE/COFF header.  Most of the code is the same
+ * for both archictectures, with the arch-specific code provided in the
+ * handle_kernel_image() function.
+ */
+unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
+			       unsigned long *image_addr)
+{
+	efi_loaded_image_t *image;
+	efi_status_t status;
+	unsigned long image_size = 0;
+	unsigned long dram_base;
+	/* addr/point and size pairs for memory management*/
+	unsigned long initrd_addr;
+	u64 initrd_size = 0;
+	unsigned long fdt_addr = 0;  /* Original DTB */
+	unsigned long fdt_size = 0;
+	char *cmdline_ptr = NULL;
+	int cmdline_size = 0;
+	unsigned long new_fdt_addr;
+	efi_guid_t loaded_image_proto = LOADED_IMAGE_PROTOCOL_GUID;
+	unsigned long reserve_addr = 0;
+	unsigned long reserve_size = 0;
+	enum efi_secureboot_mode secure_boot;
+	struct screen_info *si;
+
+	/* Check if we were booted by the EFI firmware */
+	if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
+		goto fail;
+
+	status = check_platform_features(sys_table);
+	if (status != EFI_SUCCESS)
+		goto fail;
+
+	/*
+	 * Get a handle to the loaded image protocol.  This is used to get
+	 * information about the running image, such as size and the command
+	 * line.
+	 */
+	status = sys_table->boottime->handle_protocol(handle,
+					&loaded_image_proto, (void *)&image);
+	if (status != EFI_SUCCESS) {
+		pr_efi_err(sys_table, "Failed to get loaded image protocol\n");
+		goto fail;
+	}
+
+	dram_base = get_dram_base(sys_table);
+	if (dram_base == EFI_ERROR) {
+		pr_efi_err(sys_table, "Failed to find DRAM base\n");
+		goto fail;
+	}
+
+	/*
+	 * Get the command line from EFI, using the LOADED_IMAGE
+	 * protocol. We are going to copy the command line into the
+	 * device tree, so this can be allocated anywhere.
+	 */
+	cmdline_ptr = efi_convert_cmdline(sys_table, image, &cmdline_size);
+	if (!cmdline_ptr) {
+		pr_efi_err(sys_table, "getting command line via LOADED_IMAGE_PROTOCOL\n");
+		goto fail;
+	}
+
+	if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) ||
+	    IS_ENABLED(CONFIG_CMDLINE_FORCE) ||
+	    cmdline_size == 0)
+		efi_parse_options(CONFIG_CMDLINE);
+
+	if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && cmdline_size > 0)
+		efi_parse_options(cmdline_ptr);
+
+	pr_efi(sys_table, "Booting Linux Kernel...\n");
+
+	si = setup_graphics(sys_table);
+
+	status = handle_kernel_image(sys_table, image_addr, &image_size,
+				     &reserve_addr,
+				     &reserve_size,
+				     dram_base, image);
+	if (status != EFI_SUCCESS) {
+		pr_efi_err(sys_table, "Failed to relocate kernel\n");
+		goto fail_free_cmdline;
+	}
+
+	/* Ask the firmware to clear memory on unclean shutdown */
+	efi_enable_reset_attack_mitigation(sys_table);
+
+	secure_boot = efi_get_secureboot(sys_table);
+
+	/*
+	 * Unauthenticated device tree data is a security hazard, so ignore
+	 * 'dtb=' unless UEFI Secure Boot is disabled.  We assume that secure
+	 * boot is enabled if we can't determine its state.
+	 */
+	if (!IS_ENABLED(CONFIG_EFI_ARMSTUB_DTB_LOADER) ||
+	     secure_boot != efi_secureboot_mode_disabled) {
+		if (strstr(cmdline_ptr, "dtb="))
+			pr_efi(sys_table, "Ignoring DTB from command line.\n");
+	} else {
+		status = handle_cmdline_files(sys_table, image, cmdline_ptr,
+					      "dtb=",
+					      ~0UL, &fdt_addr, &fdt_size);
+
+		if (status != EFI_SUCCESS) {
+			pr_efi_err(sys_table, "Failed to load device tree!\n");
+			goto fail_free_image;
+		}
+	}
+
+	if (fdt_addr) {
+		pr_efi(sys_table, "Using DTB from command line\n");
+	} else {
+		/* Look for a device tree configuration table entry. */
+		fdt_addr = (uintptr_t)get_fdt(sys_table, &fdt_size);
+		if (fdt_addr)
+			pr_efi(sys_table, "Using DTB from configuration table\n");
+	}
+
+	if (!fdt_addr)
+		pr_efi(sys_table, "Generating empty DTB\n");
+
+	status = handle_cmdline_files(sys_table, image, cmdline_ptr, "initrd=",
+				      efi_get_max_initrd_addr(dram_base,
+							      *image_addr),
+				      (unsigned long *)&initrd_addr,
+				      (unsigned long *)&initrd_size);
+	if (status != EFI_SUCCESS)
+		pr_efi_err(sys_table, "Failed initrd from command line!\n");
+
+	efi_random_get_seed(sys_table);
+
+	/* hibernation expects the runtime regions to stay in the same place */
+	if (!IS_ENABLED(CONFIG_HIBERNATION) && !nokaslr()) {
+		/*
+		 * Randomize the base of the UEFI runtime services region.
+		 * Preserve the 2 MB alignment of the region by taking a
+		 * shift of 21 bit positions into account when scaling
+		 * the headroom value using a 32-bit random value.
+		 */
+		static const u64 headroom = EFI_RT_VIRTUAL_LIMIT -
+					    EFI_RT_VIRTUAL_BASE -
+					    EFI_RT_VIRTUAL_SIZE;
+		u32 rnd;
+
+		status = efi_get_random_bytes(sys_table, sizeof(rnd),
+					      (u8 *)&rnd);
+		if (status == EFI_SUCCESS) {
+			virtmap_base = EFI_RT_VIRTUAL_BASE +
+				       (((headroom >> 21) * rnd) >> (32 - 21));
+		}
+	}
+
+	new_fdt_addr = fdt_addr;
+	status = allocate_new_fdt_and_exit_boot(sys_table, handle,
+				&new_fdt_addr, efi_get_max_fdt_addr(dram_base),
+				initrd_addr, initrd_size, cmdline_ptr,
+				fdt_addr, fdt_size);
+
+	/*
+	 * If all went well, we need to return the FDT address to the
+	 * calling function so it can be passed to kernel as part of
+	 * the kernel boot protocol.
+	 */
+	if (status == EFI_SUCCESS)
+		return new_fdt_addr;
+
+	pr_efi_err(sys_table, "Failed to update FDT and exit boot services\n");
+
+	efi_free(sys_table, initrd_size, initrd_addr);
+	efi_free(sys_table, fdt_size, fdt_addr);
+
+fail_free_image:
+	efi_free(sys_table, image_size, *image_addr);
+	efi_free(sys_table, reserve_size, reserve_addr);
+fail_free_cmdline:
+	free_screen_info(sys_table, si);
+	efi_free(sys_table, cmdline_size, (unsigned long)cmdline_ptr);
+fail:
+	return EFI_ERROR;
+}
+
+static int cmp_mem_desc(const void *l, const void *r)
+{
+	const efi_memory_desc_t *left = l, *right = r;
+
+	return (left->phys_addr > right->phys_addr) ? 1 : -1;
+}
+
+/*
+ * Returns whether region @left ends exactly where region @right starts,
+ * or false if either argument is NULL.
+ */
+static bool regions_are_adjacent(efi_memory_desc_t *left,
+				 efi_memory_desc_t *right)
+{
+	u64 left_end;
+
+	if (left == NULL || right == NULL)
+		return false;
+
+	left_end = left->phys_addr + left->num_pages * EFI_PAGE_SIZE;
+
+	return left_end == right->phys_addr;
+}
+
+/*
+ * Returns whether region @left and region @right have compatible memory type
+ * mapping attributes, and are both EFI_MEMORY_RUNTIME regions.
+ */
+static bool regions_have_compatible_memory_type_attrs(efi_memory_desc_t *left,
+						      efi_memory_desc_t *right)
+{
+	static const u64 mem_type_mask = EFI_MEMORY_WB | EFI_MEMORY_WT |
+					 EFI_MEMORY_WC | EFI_MEMORY_UC |
+					 EFI_MEMORY_RUNTIME;
+
+	return ((left->attribute ^ right->attribute) & mem_type_mask) == 0;
+}
+
+/*
+ * efi_get_virtmap() - create a virtual mapping for the EFI memory map
+ *
+ * This function populates the virt_addr fields of all memory region descriptors
+ * in @memory_map whose EFI_MEMORY_RUNTIME attribute is set. Those descriptors
+ * are also copied to @runtime_map, and their total count is returned in @count.
+ */
+void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
+		     unsigned long desc_size, efi_memory_desc_t *runtime_map,
+		     int *count)
+{
+	u64 efi_virt_base = virtmap_base;
+	efi_memory_desc_t *in, *prev = NULL, *out = runtime_map;
+	int l;
+
+	/*
+	 * To work around potential issues with the Properties Table feature
+	 * introduced in UEFI 2.5, which may split PE/COFF executable images
+	 * in memory into several RuntimeServicesCode and RuntimeServicesData
+	 * regions, we need to preserve the relative offsets between adjacent
+	 * EFI_MEMORY_RUNTIME regions with the same memory type attributes.
+	 * The easiest way to find adjacent regions is to sort the memory map
+	 * before traversing it.
+	 */
+	if (IS_ENABLED(CONFIG_ARM64))
+		sort(memory_map, map_size / desc_size, desc_size, cmp_mem_desc,
+		     NULL);
+
+	for (l = 0; l < map_size; l += desc_size, prev = in) {
+		u64 paddr, size;
+
+		in = (void *)memory_map + l;
+		if (!(in->attribute & EFI_MEMORY_RUNTIME))
+			continue;
+
+		paddr = in->phys_addr;
+		size = in->num_pages * EFI_PAGE_SIZE;
+
+		/*
+		 * Make the mapping compatible with 64k pages: this allows
+		 * a 4k page size kernel to kexec a 64k page size kernel and
+		 * vice versa.
+		 */
+		if ((IS_ENABLED(CONFIG_ARM64) &&
+		     !regions_are_adjacent(prev, in)) ||
+		    !regions_have_compatible_memory_type_attrs(prev, in)) {
+
+			paddr = round_down(in->phys_addr, SZ_64K);
+			size += in->phys_addr - paddr;
+
+			/*
+			 * Avoid wasting memory on PTEs by choosing a virtual
+			 * base that is compatible with section mappings if this
+			 * region has the appropriate size and physical
+			 * alignment. (Sections are 2 MB on 4k granule kernels)
+			 */
+			if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M)
+				efi_virt_base = round_up(efi_virt_base, SZ_2M);
+			else
+				efi_virt_base = round_up(efi_virt_base, SZ_64K);
+		}
+
+		in->virt_addr = efi_virt_base + in->phys_addr - paddr;
+		efi_virt_base += size;
+
+		memcpy(out, in, desc_size);
+		out = (void *)out + desc_size;
+		++*count;
+	}
+}
diff --git a/drivers/firmware/efi/libstub/arm32-stub.c b/drivers/firmware/efi/libstub/arm32-stub.c
new file mode 100644
index 0000000..becbda4
--- /dev/null
+++ b/drivers/firmware/efi/libstub/arm32-stub.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright (C) 2013 Linaro Ltd;  <roy.franz@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/efi.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+efi_status_t check_platform_features(efi_system_table_t *sys_table_arg)
+{
+	int block;
+
+	/* non-LPAE kernels can run anywhere */
+	if (!IS_ENABLED(CONFIG_ARM_LPAE))
+		return EFI_SUCCESS;
+
+	/* LPAE kernels need compatible hardware */
+	block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
+	if (block < 5) {
+		pr_efi_err(sys_table_arg, "This LPAE kernel is not supported by your CPU\n");
+		return EFI_UNSUPPORTED;
+	}
+	return EFI_SUCCESS;
+}
+
+static efi_guid_t screen_info_guid = LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID;
+
+struct screen_info *alloc_screen_info(efi_system_table_t *sys_table_arg)
+{
+	struct screen_info *si;
+	efi_status_t status;
+
+	/*
+	 * Unlike on arm64, where we can directly fill out the screen_info
+	 * structure from the stub, we need to allocate a buffer to hold
+	 * its contents while we hand over to the kernel proper from the
+	 * decompressor.
+	 */
+	status = efi_call_early(allocate_pool, EFI_RUNTIME_SERVICES_DATA,
+				sizeof(*si), (void **)&si);
+
+	if (status != EFI_SUCCESS)
+		return NULL;
+
+	status = efi_call_early(install_configuration_table,
+				&screen_info_guid, si);
+	if (status == EFI_SUCCESS)
+		return si;
+
+	efi_call_early(free_pool, si);
+	return NULL;
+}
+
+void free_screen_info(efi_system_table_t *sys_table_arg, struct screen_info *si)
+{
+	if (!si)
+		return;
+
+	efi_call_early(install_configuration_table, &screen_info_guid, NULL);
+	efi_call_early(free_pool, si);
+}
+
+static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg,
+					unsigned long dram_base,
+					unsigned long *reserve_addr,
+					unsigned long *reserve_size)
+{
+	efi_physical_addr_t alloc_addr;
+	efi_memory_desc_t *memory_map;
+	unsigned long nr_pages, map_size, desc_size, buff_size;
+	efi_status_t status;
+	unsigned long l;
+
+	struct efi_boot_memmap map = {
+		.map		= &memory_map,
+		.map_size	= &map_size,
+		.desc_size	= &desc_size,
+		.desc_ver	= NULL,
+		.key_ptr	= NULL,
+		.buff_size	= &buff_size,
+	};
+
+	/*
+	 * Reserve memory for the uncompressed kernel image. This is
+	 * all that prevents any future allocations from conflicting
+	 * with the kernel. Since we can't tell from the compressed
+	 * image how much DRAM the kernel actually uses (due to BSS
+	 * size uncertainty) we allocate the maximum possible size.
+	 * Do this very early, as prints can cause memory allocations
+	 * that may conflict with this.
+	 */
+	alloc_addr = dram_base + MAX_UNCOMP_KERNEL_SIZE;
+	nr_pages = MAX_UNCOMP_KERNEL_SIZE / EFI_PAGE_SIZE;
+	status = efi_call_early(allocate_pages, EFI_ALLOCATE_MAX_ADDRESS,
+				EFI_BOOT_SERVICES_DATA, nr_pages, &alloc_addr);
+	if (status == EFI_SUCCESS) {
+		if (alloc_addr == dram_base) {
+			*reserve_addr = alloc_addr;
+			*reserve_size = MAX_UNCOMP_KERNEL_SIZE;
+			return EFI_SUCCESS;
+		}
+		/*
+		 * If we end up here, the allocation succeeded but starts below
+		 * dram_base. This can only occur if the real base of DRAM is
+		 * not a multiple of 128 MB, in which case dram_base will have
+		 * been rounded up. Since this implies that a part of the region
+		 * was already occupied, we need to fall through to the code
+		 * below to ensure that the existing allocations don't conflict.
+		 * For this reason, we use EFI_BOOT_SERVICES_DATA above and not
+		 * EFI_LOADER_DATA, which we wouldn't able to distinguish from
+		 * allocations that we want to disallow.
+		 */
+	}
+
+	/*
+	 * If the allocation above failed, we may still be able to proceed:
+	 * if the only allocations in the region are of types that will be
+	 * released to the OS after ExitBootServices(), the decompressor can
+	 * safely overwrite them.
+	 */
+	status = efi_get_memory_map(sys_table_arg, &map);
+	if (status != EFI_SUCCESS) {
+		pr_efi_err(sys_table_arg,
+			   "reserve_kernel_base(): Unable to retrieve memory map.\n");
+		return status;
+	}
+
+	for (l = 0; l < map_size; l += desc_size) {
+		efi_memory_desc_t *desc;
+		u64 start, end;
+
+		desc = (void *)memory_map + l;
+		start = desc->phys_addr;
+		end = start + desc->num_pages * EFI_PAGE_SIZE;
+
+		/* Skip if entry does not intersect with region */
+		if (start >= dram_base + MAX_UNCOMP_KERNEL_SIZE ||
+		    end <= dram_base)
+			continue;
+
+		switch (desc->type) {
+		case EFI_BOOT_SERVICES_CODE:
+		case EFI_BOOT_SERVICES_DATA:
+			/* Ignore types that are released to the OS anyway */
+			continue;
+
+		case EFI_CONVENTIONAL_MEMORY:
+			/*
+			 * Reserve the intersection between this entry and the
+			 * region.
+			 */
+			start = max(start, (u64)dram_base);
+			end = min(end, (u64)dram_base + MAX_UNCOMP_KERNEL_SIZE);
+
+			status = efi_call_early(allocate_pages,
+						EFI_ALLOCATE_ADDRESS,
+						EFI_LOADER_DATA,
+						(end - start) / EFI_PAGE_SIZE,
+						&start);
+			if (status != EFI_SUCCESS) {
+				pr_efi_err(sys_table_arg,
+					"reserve_kernel_base(): alloc failed.\n");
+				goto out;
+			}
+			break;
+
+		case EFI_LOADER_CODE:
+		case EFI_LOADER_DATA:
+			/*
+			 * These regions may be released and reallocated for
+			 * another purpose (including EFI_RUNTIME_SERVICE_DATA)
+			 * at any time during the execution of the OS loader,
+			 * so we cannot consider them as safe.
+			 */
+		default:
+			/*
+			 * Treat any other allocation in the region as unsafe */
+			status = EFI_OUT_OF_RESOURCES;
+			goto out;
+		}
+	}
+
+	status = EFI_SUCCESS;
+out:
+	efi_call_early(free_pool, memory_map);
+	return status;
+}
+
+efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
+				 unsigned long *image_addr,
+				 unsigned long *image_size,
+				 unsigned long *reserve_addr,
+				 unsigned long *reserve_size,
+				 unsigned long dram_base,
+				 efi_loaded_image_t *image)
+{
+	efi_status_t status;
+
+	/*
+	 * Verify that the DRAM base address is compatible with the ARM
+	 * boot protocol, which determines the base of DRAM by masking
+	 * off the low 27 bits of the address at which the zImage is
+	 * loaded. These assumptions are made by the decompressor,
+	 * before any memory map is available.
+	 */
+	dram_base = round_up(dram_base, SZ_128M);
+
+	status = reserve_kernel_base(sys_table, dram_base, reserve_addr,
+				     reserve_size);
+	if (status != EFI_SUCCESS) {
+		pr_efi_err(sys_table, "Unable to allocate memory for uncompressed kernel.\n");
+		return status;
+	}
+
+	/*
+	 * Relocate the zImage, so that it appears in the lowest 128 MB
+	 * memory window.
+	 */
+	*image_size = image->image_size;
+	status = efi_relocate_kernel(sys_table, image_addr, *image_size,
+				     *image_size,
+				     dram_base + MAX_UNCOMP_KERNEL_SIZE, 0);
+	if (status != EFI_SUCCESS) {
+		pr_efi_err(sys_table, "Failed to relocate kernel.\n");
+		efi_free(sys_table, *reserve_size, *reserve_addr);
+		*reserve_size = 0;
+		return status;
+	}
+
+	/*
+	 * Check to see if we were able to allocate memory low enough
+	 * in memory. The kernel determines the base of DRAM from the
+	 * address at which the zImage is loaded.
+	 */
+	if (*image_addr + *image_size > dram_base + ZIMAGE_OFFSET_LIMIT) {
+		pr_efi_err(sys_table, "Failed to relocate kernel, no low memory available.\n");
+		efi_free(sys_table, *reserve_size, *reserve_addr);
+		*reserve_size = 0;
+		efi_free(sys_table, *image_size, *image_addr);
+		*image_size = 0;
+		return EFI_LOAD_ERROR;
+	}
+	return EFI_SUCCESS;
+}
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
new file mode 100644
index 0000000..1b4d465
--- /dev/null
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2013, 2014 Linaro Ltd;  <roy.franz@linaro.org>
+ *
+ * This file implements the EFI boot stub for the arm64 kernel.
+ * Adapted from ARM version by Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+/*
+ * To prevent the compiler from emitting GOT-indirected (and thus absolute)
+ * references to the section markers, override their visibility as 'hidden'
+ */
+#pragma GCC visibility push(hidden)
+#include <asm/sections.h>
+#pragma GCC visibility pop
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+#include <asm/memory.h>
+#include <asm/sysreg.h>
+
+#include "efistub.h"
+
+efi_status_t check_platform_features(efi_system_table_t *sys_table_arg)
+{
+	u64 tg;
+
+	/* UEFI mandates support for 4 KB granularity, no need to check */
+	if (IS_ENABLED(CONFIG_ARM64_4K_PAGES))
+		return EFI_SUCCESS;
+
+	tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_TGRAN_SHIFT) & 0xf;
+	if (tg != ID_AA64MMFR0_TGRAN_SUPPORTED) {
+		if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
+			pr_efi_err(sys_table_arg, "This 64 KB granular kernel is not supported by your CPU\n");
+		else
+			pr_efi_err(sys_table_arg, "This 16 KB granular kernel is not supported by your CPU\n");
+		return EFI_UNSUPPORTED;
+	}
+	return EFI_SUCCESS;
+}
+
+efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg,
+				 unsigned long *image_addr,
+				 unsigned long *image_size,
+				 unsigned long *reserve_addr,
+				 unsigned long *reserve_size,
+				 unsigned long dram_base,
+				 efi_loaded_image_t *image)
+{
+	efi_status_t status;
+	unsigned long kernel_size, kernel_memsize = 0;
+	void *old_image_addr = (void *)*image_addr;
+	unsigned long preferred_offset;
+	u64 phys_seed = 0;
+
+	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+		if (!nokaslr()) {
+			status = efi_get_random_bytes(sys_table_arg,
+						      sizeof(phys_seed),
+						      (u8 *)&phys_seed);
+			if (status == EFI_NOT_FOUND) {
+				pr_efi(sys_table_arg, "EFI_RNG_PROTOCOL unavailable, no randomness supplied\n");
+			} else if (status != EFI_SUCCESS) {
+				pr_efi_err(sys_table_arg, "efi_get_random_bytes() failed\n");
+				return status;
+			}
+		} else {
+			pr_efi(sys_table_arg, "KASLR disabled on kernel command line\n");
+		}
+	}
+
+	/*
+	 * The preferred offset of the kernel Image is TEXT_OFFSET bytes beyond
+	 * a 2 MB aligned base, which itself may be lower than dram_base, as
+	 * long as the resulting offset equals or exceeds it.
+	 */
+	preferred_offset = round_down(dram_base, MIN_KIMG_ALIGN) + TEXT_OFFSET;
+	if (preferred_offset < dram_base)
+		preferred_offset += MIN_KIMG_ALIGN;
+
+	kernel_size = _edata - _text;
+	kernel_memsize = kernel_size + (_end - _edata);
+
+	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && phys_seed != 0) {
+		/*
+		 * If CONFIG_DEBUG_ALIGN_RODATA is not set, produce a
+		 * displacement in the interval [0, MIN_KIMG_ALIGN) that
+		 * doesn't violate this kernel's de-facto alignment
+		 * constraints.
+		 */
+		u32 mask = (MIN_KIMG_ALIGN - 1) & ~(EFI_KIMG_ALIGN - 1);
+		u32 offset = !IS_ENABLED(CONFIG_DEBUG_ALIGN_RODATA) ?
+			     (phys_seed >> 32) & mask : TEXT_OFFSET;
+
+		/*
+		 * With CONFIG_RANDOMIZE_TEXT_OFFSET=y, TEXT_OFFSET may not
+		 * be a multiple of EFI_KIMG_ALIGN, and we must ensure that
+		 * we preserve the misalignment of 'offset' relative to
+		 * EFI_KIMG_ALIGN so that statically allocated objects whose
+		 * alignment exceeds PAGE_SIZE appear correctly aligned in
+		 * memory.
+		 */
+		offset |= TEXT_OFFSET % EFI_KIMG_ALIGN;
+
+		/*
+		 * If KASLR is enabled, and we have some randomness available,
+		 * locate the kernel at a randomized offset in physical memory.
+		 */
+		*reserve_size = kernel_memsize + offset;
+		status = efi_random_alloc(sys_table_arg, *reserve_size,
+					  MIN_KIMG_ALIGN, reserve_addr,
+					  (u32)phys_seed);
+
+		*image_addr = *reserve_addr + offset;
+	} else {
+		/*
+		 * Else, try a straight allocation at the preferred offset.
+		 * This will work around the issue where, if dram_base == 0x0,
+		 * efi_low_alloc() refuses to allocate at 0x0 (to prevent the
+		 * address of the allocation to be mistaken for a FAIL return
+		 * value or a NULL pointer). It will also ensure that, on
+		 * platforms where the [dram_base, dram_base + TEXT_OFFSET)
+		 * interval is partially occupied by the firmware (like on APM
+		 * Mustang), we can still place the kernel at the address
+		 * 'dram_base + TEXT_OFFSET'.
+		 */
+		if (*image_addr == preferred_offset)
+			return EFI_SUCCESS;
+
+		*image_addr = *reserve_addr = preferred_offset;
+		*reserve_size = round_up(kernel_memsize, EFI_ALLOC_ALIGN);
+
+		status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS,
+					EFI_LOADER_DATA,
+					*reserve_size / EFI_PAGE_SIZE,
+					(efi_physical_addr_t *)reserve_addr);
+	}
+
+	if (status != EFI_SUCCESS) {
+		*reserve_size = kernel_memsize + TEXT_OFFSET;
+		status = efi_low_alloc(sys_table_arg, *reserve_size,
+				       MIN_KIMG_ALIGN, reserve_addr);
+
+		if (status != EFI_SUCCESS) {
+			pr_efi_err(sys_table_arg, "Failed to relocate kernel\n");
+			*reserve_size = 0;
+			return status;
+		}
+		*image_addr = *reserve_addr + TEXT_OFFSET;
+	}
+	memcpy((void *)*image_addr, old_image_addr, kernel_size);
+
+	return EFI_SUCCESS;
+}
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
new file mode 100644
index 0000000..e94975f
--- /dev/null
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -0,0 +1,921 @@
+/*
+ * Helper functions used by the EFI stub on multiple
+ * architectures. This should be #included by the EFI stub
+ * implementation files.
+ *
+ * Copyright 2011 Intel Corporation; author Matt Fleming
+ *
+ * This file is part of the Linux kernel, and is made available
+ * under the terms of the GNU General Public License version 2.
+ *
+ */
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+/*
+ * Some firmware implementations have problems reading files in one go.
+ * A read chunk size of 1MB seems to work for most platforms.
+ *
+ * Unfortunately, reading files in chunks triggers *other* bugs on some
+ * platforms, so we provide a way to disable this workaround, which can
+ * be done by passing "efi=nochunk" on the EFI boot stub command line.
+ *
+ * If you experience issues with initrd images being corrupt it's worth
+ * trying efi=nochunk, but chunking is enabled by default because there
+ * are far more machines that require the workaround than those that
+ * break with it enabled.
+ */
+#define EFI_READ_CHUNK_SIZE	(1024 * 1024)
+
+static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE;
+
+static int __section(.data) __nokaslr;
+static int __section(.data) __quiet;
+
+int __pure nokaslr(void)
+{
+	return __nokaslr;
+}
+int __pure is_quiet(void)
+{
+	return __quiet;
+}
+
+#define EFI_MMAP_NR_SLACK_SLOTS	8
+
+struct file_info {
+	efi_file_handle_t *handle;
+	u64 size;
+};
+
+void efi_printk(efi_system_table_t *sys_table_arg, char *str)
+{
+	char *s8;
+
+	for (s8 = str; *s8; s8++) {
+		efi_char16_t ch[2] = { 0 };
+
+		ch[0] = *s8;
+		if (*s8 == '\n') {
+			efi_char16_t nl[2] = { '\r', 0 };
+			efi_char16_printk(sys_table_arg, nl);
+		}
+
+		efi_char16_printk(sys_table_arg, ch);
+	}
+}
+
+static inline bool mmap_has_headroom(unsigned long buff_size,
+				     unsigned long map_size,
+				     unsigned long desc_size)
+{
+	unsigned long slack = buff_size - map_size;
+
+	return slack / desc_size >= EFI_MMAP_NR_SLACK_SLOTS;
+}
+
+efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
+				struct efi_boot_memmap *map)
+{
+	efi_memory_desc_t *m = NULL;
+	efi_status_t status;
+	unsigned long key;
+	u32 desc_version;
+
+	*map->desc_size =	sizeof(*m);
+	*map->map_size =	*map->desc_size * 32;
+	*map->buff_size =	*map->map_size;
+again:
+	status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+				*map->map_size, (void **)&m);
+	if (status != EFI_SUCCESS)
+		goto fail;
+
+	*map->desc_size = 0;
+	key = 0;
+	status = efi_call_early(get_memory_map, map->map_size, m,
+				&key, map->desc_size, &desc_version);
+	if (status == EFI_BUFFER_TOO_SMALL ||
+	    !mmap_has_headroom(*map->buff_size, *map->map_size,
+			       *map->desc_size)) {
+		efi_call_early(free_pool, m);
+		/*
+		 * Make sure there is some entries of headroom so that the
+		 * buffer can be reused for a new map after allocations are
+		 * no longer permitted.  Its unlikely that the map will grow to
+		 * exceed this headroom once we are ready to trigger
+		 * ExitBootServices()
+		 */
+		*map->map_size += *map->desc_size * EFI_MMAP_NR_SLACK_SLOTS;
+		*map->buff_size = *map->map_size;
+		goto again;
+	}
+
+	if (status != EFI_SUCCESS)
+		efi_call_early(free_pool, m);
+
+	if (map->key_ptr && status == EFI_SUCCESS)
+		*map->key_ptr = key;
+	if (map->desc_ver && status == EFI_SUCCESS)
+		*map->desc_ver = desc_version;
+
+fail:
+	*map->map = m;
+	return status;
+}
+
+
+unsigned long get_dram_base(efi_system_table_t *sys_table_arg)
+{
+	efi_status_t status;
+	unsigned long map_size, buff_size;
+	unsigned long membase  = EFI_ERROR;
+	struct efi_memory_map map;
+	efi_memory_desc_t *md;
+	struct efi_boot_memmap boot_map;
+
+	boot_map.map =		(efi_memory_desc_t **)&map.map;
+	boot_map.map_size =	&map_size;
+	boot_map.desc_size =	&map.desc_size;
+	boot_map.desc_ver =	NULL;
+	boot_map.key_ptr =	NULL;
+	boot_map.buff_size =	&buff_size;
+
+	status = efi_get_memory_map(sys_table_arg, &boot_map);
+	if (status != EFI_SUCCESS)
+		return membase;
+
+	map.map_end = map.map + map_size;
+
+	for_each_efi_memory_desc_in_map(&map, md) {
+		if (md->attribute & EFI_MEMORY_WB) {
+			if (membase > md->phys_addr)
+				membase = md->phys_addr;
+		}
+	}
+
+	efi_call_early(free_pool, map.map);
+
+	return membase;
+}
+
+/*
+ * Allocate at the highest possible address that is not above 'max'.
+ */
+efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
+			    unsigned long size, unsigned long align,
+			    unsigned long *addr, unsigned long max)
+{
+	unsigned long map_size, desc_size, buff_size;
+	efi_memory_desc_t *map;
+	efi_status_t status;
+	unsigned long nr_pages;
+	u64 max_addr = 0;
+	int i;
+	struct efi_boot_memmap boot_map;
+
+	boot_map.map =		&map;
+	boot_map.map_size =	&map_size;
+	boot_map.desc_size =	&desc_size;
+	boot_map.desc_ver =	NULL;
+	boot_map.key_ptr =	NULL;
+	boot_map.buff_size =	&buff_size;
+
+	status = efi_get_memory_map(sys_table_arg, &boot_map);
+	if (status != EFI_SUCCESS)
+		goto fail;
+
+	/*
+	 * Enforce minimum alignment that EFI or Linux requires when
+	 * requesting a specific address.  We are doing page-based (or
+	 * larger) allocations, and both the address and size must meet
+	 * alignment constraints.
+	 */
+	if (align < EFI_ALLOC_ALIGN)
+		align = EFI_ALLOC_ALIGN;
+
+	size = round_up(size, EFI_ALLOC_ALIGN);
+	nr_pages = size / EFI_PAGE_SIZE;
+again:
+	for (i = 0; i < map_size / desc_size; i++) {
+		efi_memory_desc_t *desc;
+		unsigned long m = (unsigned long)map;
+		u64 start, end;
+
+		desc = efi_early_memdesc_ptr(m, desc_size, i);
+		if (desc->type != EFI_CONVENTIONAL_MEMORY)
+			continue;
+
+		if (desc->num_pages < nr_pages)
+			continue;
+
+		start = desc->phys_addr;
+		end = start + desc->num_pages * EFI_PAGE_SIZE;
+
+		if (end > max)
+			end = max;
+
+		if ((start + size) > end)
+			continue;
+
+		if (round_down(end - size, align) < start)
+			continue;
+
+		start = round_down(end - size, align);
+
+		/*
+		 * Don't allocate at 0x0. It will confuse code that
+		 * checks pointers against NULL.
+		 */
+		if (start == 0x0)
+			continue;
+
+		if (start > max_addr)
+			max_addr = start;
+	}
+
+	if (!max_addr)
+		status = EFI_NOT_FOUND;
+	else {
+		status = efi_call_early(allocate_pages,
+					EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
+					nr_pages, &max_addr);
+		if (status != EFI_SUCCESS) {
+			max = max_addr;
+			max_addr = 0;
+			goto again;
+		}
+
+		*addr = max_addr;
+	}
+
+	efi_call_early(free_pool, map);
+fail:
+	return status;
+}
+
+/*
+ * Allocate at the lowest possible address.
+ */
+efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
+			   unsigned long size, unsigned long align,
+			   unsigned long *addr)
+{
+	unsigned long map_size, desc_size, buff_size;
+	efi_memory_desc_t *map;
+	efi_status_t status;
+	unsigned long nr_pages;
+	int i;
+	struct efi_boot_memmap boot_map;
+
+	boot_map.map =		&map;
+	boot_map.map_size =	&map_size;
+	boot_map.desc_size =	&desc_size;
+	boot_map.desc_ver =	NULL;
+	boot_map.key_ptr =	NULL;
+	boot_map.buff_size =	&buff_size;
+
+	status = efi_get_memory_map(sys_table_arg, &boot_map);
+	if (status != EFI_SUCCESS)
+		goto fail;
+
+	/*
+	 * Enforce minimum alignment that EFI or Linux requires when
+	 * requesting a specific address.  We are doing page-based (or
+	 * larger) allocations, and both the address and size must meet
+	 * alignment constraints.
+	 */
+	if (align < EFI_ALLOC_ALIGN)
+		align = EFI_ALLOC_ALIGN;
+
+	size = round_up(size, EFI_ALLOC_ALIGN);
+	nr_pages = size / EFI_PAGE_SIZE;
+	for (i = 0; i < map_size / desc_size; i++) {
+		efi_memory_desc_t *desc;
+		unsigned long m = (unsigned long)map;
+		u64 start, end;
+
+		desc = efi_early_memdesc_ptr(m, desc_size, i);
+
+		if (desc->type != EFI_CONVENTIONAL_MEMORY)
+			continue;
+
+		if (desc->num_pages < nr_pages)
+			continue;
+
+		start = desc->phys_addr;
+		end = start + desc->num_pages * EFI_PAGE_SIZE;
+
+		/*
+		 * Don't allocate at 0x0. It will confuse code that
+		 * checks pointers against NULL. Skip the first 8
+		 * bytes so we start at a nice even number.
+		 */
+		if (start == 0x0)
+			start += 8;
+
+		start = round_up(start, align);
+		if ((start + size) > end)
+			continue;
+
+		status = efi_call_early(allocate_pages,
+					EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
+					nr_pages, &start);
+		if (status == EFI_SUCCESS) {
+			*addr = start;
+			break;
+		}
+	}
+
+	if (i == map_size / desc_size)
+		status = EFI_NOT_FOUND;
+
+	efi_call_early(free_pool, map);
+fail:
+	return status;
+}
+
+void efi_free(efi_system_table_t *sys_table_arg, unsigned long size,
+	      unsigned long addr)
+{
+	unsigned long nr_pages;
+
+	if (!size)
+		return;
+
+	nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
+	efi_call_early(free_pages, addr, nr_pages);
+}
+
+static efi_status_t efi_file_size(efi_system_table_t *sys_table_arg, void *__fh,
+				  efi_char16_t *filename_16, void **handle,
+				  u64 *file_sz)
+{
+	efi_file_handle_t *h, *fh = __fh;
+	efi_file_info_t *info;
+	efi_status_t status;
+	efi_guid_t info_guid = EFI_FILE_INFO_ID;
+	unsigned long info_sz;
+
+	status = efi_call_proto(efi_file_handle, open, fh, &h, filename_16,
+				EFI_FILE_MODE_READ, (u64)0);
+	if (status != EFI_SUCCESS) {
+		efi_printk(sys_table_arg, "Failed to open file: ");
+		efi_char16_printk(sys_table_arg, filename_16);
+		efi_printk(sys_table_arg, "\n");
+		return status;
+	}
+
+	*handle = h;
+
+	info_sz = 0;
+	status = efi_call_proto(efi_file_handle, get_info, h, &info_guid,
+				&info_sz, NULL);
+	if (status != EFI_BUFFER_TOO_SMALL) {
+		efi_printk(sys_table_arg, "Failed to get file info size\n");
+		return status;
+	}
+
+grow:
+	status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+				info_sz, (void **)&info);
+	if (status != EFI_SUCCESS) {
+		efi_printk(sys_table_arg, "Failed to alloc mem for file info\n");
+		return status;
+	}
+
+	status = efi_call_proto(efi_file_handle, get_info, h, &info_guid,
+				&info_sz, info);
+	if (status == EFI_BUFFER_TOO_SMALL) {
+		efi_call_early(free_pool, info);
+		goto grow;
+	}
+
+	*file_sz = info->file_size;
+	efi_call_early(free_pool, info);
+
+	if (status != EFI_SUCCESS)
+		efi_printk(sys_table_arg, "Failed to get initrd info\n");
+
+	return status;
+}
+
+static efi_status_t efi_file_read(void *handle, unsigned long *size, void *addr)
+{
+	return efi_call_proto(efi_file_handle, read, handle, size, addr);
+}
+
+static efi_status_t efi_file_close(void *handle)
+{
+	return efi_call_proto(efi_file_handle, close, handle);
+}
+
+static efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg,
+				    efi_loaded_image_t *image,
+				    efi_file_handle_t **__fh)
+{
+	efi_file_io_interface_t *io;
+	efi_file_handle_t *fh;
+	efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
+	efi_status_t status;
+	void *handle = (void *)(unsigned long)efi_table_attr(efi_loaded_image,
+							     device_handle,
+							     image);
+
+	status = efi_call_early(handle_protocol, handle,
+				&fs_proto, (void **)&io);
+	if (status != EFI_SUCCESS) {
+		efi_printk(sys_table_arg, "Failed to handle fs_proto\n");
+		return status;
+	}
+
+	status = efi_call_proto(efi_file_io_interface, open_volume, io, &fh);
+	if (status != EFI_SUCCESS)
+		efi_printk(sys_table_arg, "Failed to open volume\n");
+	else
+		*__fh = fh;
+
+	return status;
+}
+
+/*
+ * Parse the ASCII string 'cmdline' for EFI options, denoted by the efi=
+ * option, e.g. efi=nochunk.
+ *
+ * It should be noted that efi= is parsed in two very different
+ * environments, first in the early boot environment of the EFI boot
+ * stub, and subsequently during the kernel boot.
+ */
+efi_status_t efi_parse_options(char const *cmdline)
+{
+	char *str;
+
+	str = strstr(cmdline, "nokaslr");
+	if (str == cmdline || (str && str > cmdline && *(str - 1) == ' '))
+		__nokaslr = 1;
+
+	str = strstr(cmdline, "quiet");
+	if (str == cmdline || (str && str > cmdline && *(str - 1) == ' '))
+		__quiet = 1;
+
+	/*
+	 * If no EFI parameters were specified on the cmdline we've got
+	 * nothing to do.
+	 */
+	str = strstr(cmdline, "efi=");
+	if (!str)
+		return EFI_SUCCESS;
+
+	/* Skip ahead to first argument */
+	str += strlen("efi=");
+
+	/*
+	 * Remember, because efi= is also used by the kernel we need to
+	 * skip over arguments we don't understand.
+	 */
+	while (*str && *str != ' ') {
+		if (!strncmp(str, "nochunk", 7)) {
+			str += strlen("nochunk");
+			__chunk_size = -1UL;
+		}
+
+		/* Group words together, delimited by "," */
+		while (*str && *str != ' ' && *str != ',')
+			str++;
+
+		if (*str == ',')
+			str++;
+	}
+
+	return EFI_SUCCESS;
+}
+
+/*
+ * Check the cmdline for a LILO-style file= arguments.
+ *
+ * We only support loading a file from the same filesystem as
+ * the kernel image.
+ */
+efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
+				  efi_loaded_image_t *image,
+				  char *cmd_line, char *option_string,
+				  unsigned long max_addr,
+				  unsigned long *load_addr,
+				  unsigned long *load_size)
+{
+	struct file_info *files;
+	unsigned long file_addr;
+	u64 file_size_total;
+	efi_file_handle_t *fh = NULL;
+	efi_status_t status;
+	int nr_files;
+	char *str;
+	int i, j, k;
+
+	file_addr = 0;
+	file_size_total = 0;
+
+	str = cmd_line;
+
+	j = 0;			/* See close_handles */
+
+	if (!load_addr || !load_size)
+		return EFI_INVALID_PARAMETER;
+
+	*load_addr = 0;
+	*load_size = 0;
+
+	if (!str || !*str)
+		return EFI_SUCCESS;
+
+	for (nr_files = 0; *str; nr_files++) {
+		str = strstr(str, option_string);
+		if (!str)
+			break;
+
+		str += strlen(option_string);
+
+		/* Skip any leading slashes */
+		while (*str == '/' || *str == '\\')
+			str++;
+
+		while (*str && *str != ' ' && *str != '\n')
+			str++;
+	}
+
+	if (!nr_files)
+		return EFI_SUCCESS;
+
+	status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+				nr_files * sizeof(*files), (void **)&files);
+	if (status != EFI_SUCCESS) {
+		pr_efi_err(sys_table_arg, "Failed to alloc mem for file handle list\n");
+		goto fail;
+	}
+
+	str = cmd_line;
+	for (i = 0; i < nr_files; i++) {
+		struct file_info *file;
+		efi_char16_t filename_16[256];
+		efi_char16_t *p;
+
+		str = strstr(str, option_string);
+		if (!str)
+			break;
+
+		str += strlen(option_string);
+
+		file = &files[i];
+		p = filename_16;
+
+		/* Skip any leading slashes */
+		while (*str == '/' || *str == '\\')
+			str++;
+
+		while (*str && *str != ' ' && *str != '\n') {
+			if ((u8 *)p >= (u8 *)filename_16 + sizeof(filename_16))
+				break;
+
+			if (*str == '/') {
+				*p++ = '\\';
+				str++;
+			} else {
+				*p++ = *str++;
+			}
+		}
+
+		*p = '\0';
+
+		/* Only open the volume once. */
+		if (!i) {
+			status = efi_open_volume(sys_table_arg, image, &fh);
+			if (status != EFI_SUCCESS)
+				goto free_files;
+		}
+
+		status = efi_file_size(sys_table_arg, fh, filename_16,
+				       (void **)&file->handle, &file->size);
+		if (status != EFI_SUCCESS)
+			goto close_handles;
+
+		file_size_total += file->size;
+	}
+
+	if (file_size_total) {
+		unsigned long addr;
+
+		/*
+		 * Multiple files need to be at consecutive addresses in memory,
+		 * so allocate enough memory for all the files.  This is used
+		 * for loading multiple files.
+		 */
+		status = efi_high_alloc(sys_table_arg, file_size_total, 0x1000,
+				    &file_addr, max_addr);
+		if (status != EFI_SUCCESS) {
+			pr_efi_err(sys_table_arg, "Failed to alloc highmem for files\n");
+			goto close_handles;
+		}
+
+		/* We've run out of free low memory. */
+		if (file_addr > max_addr) {
+			pr_efi_err(sys_table_arg, "We've run out of free low memory\n");
+			status = EFI_INVALID_PARAMETER;
+			goto free_file_total;
+		}
+
+		addr = file_addr;
+		for (j = 0; j < nr_files; j++) {
+			unsigned long size;
+
+			size = files[j].size;
+			while (size) {
+				unsigned long chunksize;
+
+				if (IS_ENABLED(CONFIG_X86) && size > __chunk_size)
+					chunksize = __chunk_size;
+				else
+					chunksize = size;
+
+				status = efi_file_read(files[j].handle,
+						       &chunksize,
+						       (void *)addr);
+				if (status != EFI_SUCCESS) {
+					pr_efi_err(sys_table_arg, "Failed to read file\n");
+					goto free_file_total;
+				}
+				addr += chunksize;
+				size -= chunksize;
+			}
+
+			efi_file_close(files[j].handle);
+		}
+
+	}
+
+	efi_call_early(free_pool, files);
+
+	*load_addr = file_addr;
+	*load_size = file_size_total;
+
+	return status;
+
+free_file_total:
+	efi_free(sys_table_arg, file_size_total, file_addr);
+
+close_handles:
+	for (k = j; k < i; k++)
+		efi_file_close(files[k].handle);
+free_files:
+	efi_call_early(free_pool, files);
+fail:
+	*load_addr = 0;
+	*load_size = 0;
+
+	return status;
+}
+/*
+ * Relocate a kernel image, either compressed or uncompressed.
+ * In the ARM64 case, all kernel images are currently
+ * uncompressed, and as such when we relocate it we need to
+ * allocate additional space for the BSS segment. Any low
+ * memory that this function should avoid needs to be
+ * unavailable in the EFI memory map, as if the preferred
+ * address is not available the lowest available address will
+ * be used.
+ */
+efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg,
+				 unsigned long *image_addr,
+				 unsigned long image_size,
+				 unsigned long alloc_size,
+				 unsigned long preferred_addr,
+				 unsigned long alignment)
+{
+	unsigned long cur_image_addr;
+	unsigned long new_addr = 0;
+	efi_status_t status;
+	unsigned long nr_pages;
+	efi_physical_addr_t efi_addr = preferred_addr;
+
+	if (!image_addr || !image_size || !alloc_size)
+		return EFI_INVALID_PARAMETER;
+	if (alloc_size < image_size)
+		return EFI_INVALID_PARAMETER;
+
+	cur_image_addr = *image_addr;
+
+	/*
+	 * The EFI firmware loader could have placed the kernel image
+	 * anywhere in memory, but the kernel has restrictions on the
+	 * max physical address it can run at.  Some architectures
+	 * also have a prefered address, so first try to relocate
+	 * to the preferred address.  If that fails, allocate as low
+	 * as possible while respecting the required alignment.
+	 */
+	nr_pages = round_up(alloc_size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
+	status = efi_call_early(allocate_pages,
+				EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
+				nr_pages, &efi_addr);
+	new_addr = efi_addr;
+	/*
+	 * If preferred address allocation failed allocate as low as
+	 * possible.
+	 */
+	if (status != EFI_SUCCESS) {
+		status = efi_low_alloc(sys_table_arg, alloc_size, alignment,
+				       &new_addr);
+	}
+	if (status != EFI_SUCCESS) {
+		pr_efi_err(sys_table_arg, "Failed to allocate usable memory for kernel.\n");
+		return status;
+	}
+
+	/*
+	 * We know source/dest won't overlap since both memory ranges
+	 * have been allocated by UEFI, so we can safely use memcpy.
+	 */
+	memcpy((void *)new_addr, (void *)cur_image_addr, image_size);
+
+	/* Return the new address of the relocated image. */
+	*image_addr = new_addr;
+
+	return status;
+}
+
+/*
+ * Get the number of UTF-8 bytes corresponding to an UTF-16 character.
+ * This overestimates for surrogates, but that is okay.
+ */
+static int efi_utf8_bytes(u16 c)
+{
+	return 1 + (c >= 0x80) + (c >= 0x800);
+}
+
+/*
+ * Convert an UTF-16 string, not necessarily null terminated, to UTF-8.
+ */
+static u8 *efi_utf16_to_utf8(u8 *dst, const u16 *src, int n)
+{
+	unsigned int c;
+
+	while (n--) {
+		c = *src++;
+		if (n && c >= 0xd800 && c <= 0xdbff &&
+		    *src >= 0xdc00 && *src <= 0xdfff) {
+			c = 0x10000 + ((c & 0x3ff) << 10) + (*src & 0x3ff);
+			src++;
+			n--;
+		}
+		if (c >= 0xd800 && c <= 0xdfff)
+			c = 0xfffd; /* Unmatched surrogate */
+		if (c < 0x80) {
+			*dst++ = c;
+			continue;
+		}
+		if (c < 0x800) {
+			*dst++ = 0xc0 + (c >> 6);
+			goto t1;
+		}
+		if (c < 0x10000) {
+			*dst++ = 0xe0 + (c >> 12);
+			goto t2;
+		}
+		*dst++ = 0xf0 + (c >> 18);
+		*dst++ = 0x80 + ((c >> 12) & 0x3f);
+	t2:
+		*dst++ = 0x80 + ((c >> 6) & 0x3f);
+	t1:
+		*dst++ = 0x80 + (c & 0x3f);
+	}
+
+	return dst;
+}
+
+#ifndef MAX_CMDLINE_ADDRESS
+#define MAX_CMDLINE_ADDRESS	ULONG_MAX
+#endif
+
+/*
+ * Convert the unicode UEFI command line to ASCII to pass to kernel.
+ * Size of memory allocated return in *cmd_line_len.
+ * Returns NULL on error.
+ */
+char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
+			  efi_loaded_image_t *image,
+			  int *cmd_line_len)
+{
+	const u16 *s2;
+	u8 *s1 = NULL;
+	unsigned long cmdline_addr = 0;
+	int load_options_chars = image->load_options_size / 2; /* UTF-16 */
+	const u16 *options = image->load_options;
+	int options_bytes = 0;  /* UTF-8 bytes */
+	int options_chars = 0;  /* UTF-16 chars */
+	efi_status_t status;
+	u16 zero = 0;
+
+	if (options) {
+		s2 = options;
+		while (*s2 && *s2 != '\n'
+		       && options_chars < load_options_chars) {
+			options_bytes += efi_utf8_bytes(*s2++);
+			options_chars++;
+		}
+	}
+
+	if (!options_chars) {
+		/* No command line options, so return empty string*/
+		options = &zero;
+	}
+
+	options_bytes++;	/* NUL termination */
+
+	status = efi_high_alloc(sys_table_arg, options_bytes, 0,
+				&cmdline_addr, MAX_CMDLINE_ADDRESS);
+	if (status != EFI_SUCCESS)
+		return NULL;
+
+	s1 = (u8 *)cmdline_addr;
+	s2 = (const u16 *)options;
+
+	s1 = efi_utf16_to_utf8(s1, s2, options_chars);
+	*s1 = '\0';
+
+	*cmd_line_len = options_bytes;
+	return (char *)cmdline_addr;
+}
+
+/*
+ * Handle calling ExitBootServices according to the requirements set out by the
+ * spec.  Obtains the current memory map, and returns that info after calling
+ * ExitBootServices.  The client must specify a function to perform any
+ * processing of the memory map data prior to ExitBootServices.  A client
+ * specific structure may be passed to the function via priv.  The client
+ * function may be called multiple times.
+ */
+efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table_arg,
+				    void *handle,
+				    struct efi_boot_memmap *map,
+				    void *priv,
+				    efi_exit_boot_map_processing priv_func)
+{
+	efi_status_t status;
+
+	status = efi_get_memory_map(sys_table_arg, map);
+
+	if (status != EFI_SUCCESS)
+		goto fail;
+
+	status = priv_func(sys_table_arg, map, priv);
+	if (status != EFI_SUCCESS)
+		goto free_map;
+
+	status = efi_call_early(exit_boot_services, handle, *map->key_ptr);
+
+	if (status == EFI_INVALID_PARAMETER) {
+		/*
+		 * The memory map changed between efi_get_memory_map() and
+		 * exit_boot_services().  Per the UEFI Spec v2.6, Section 6.4:
+		 * EFI_BOOT_SERVICES.ExitBootServices we need to get the
+		 * updated map, and try again.  The spec implies one retry
+		 * should be sufficent, which is confirmed against the EDK2
+		 * implementation.  Per the spec, we can only invoke
+		 * get_memory_map() and exit_boot_services() - we cannot alloc
+		 * so efi_get_memory_map() cannot be used, and we must reuse
+		 * the buffer.  For all practical purposes, the headroom in the
+		 * buffer should account for any changes in the map so the call
+		 * to get_memory_map() is expected to succeed here.
+		 */
+		*map->map_size = *map->buff_size;
+		status = efi_call_early(get_memory_map,
+					map->map_size,
+					*map->map,
+					map->key_ptr,
+					map->desc_size,
+					map->desc_ver);
+
+		/* exit_boot_services() was called, thus cannot free */
+		if (status != EFI_SUCCESS)
+			goto fail;
+
+		status = priv_func(sys_table_arg, map, priv);
+		/* exit_boot_services() was called, thus cannot free */
+		if (status != EFI_SUCCESS)
+			goto fail;
+
+		status = efi_call_early(exit_boot_services, handle, *map->key_ptr);
+	}
+
+	/* exit_boot_services() was called, thus cannot free */
+	if (status != EFI_SUCCESS)
+		goto fail;
+
+	return EFI_SUCCESS;
+
+free_map:
+	efi_call_early(free_pool, *map->map);
+fail:
+	return status;
+}
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
new file mode 100644
index 0000000..32799cf
--- /dev/null
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _DRIVERS_FIRMWARE_EFI_EFISTUB_H
+#define _DRIVERS_FIRMWARE_EFI_EFISTUB_H
+
+/* error code which can't be mistaken for valid address */
+#define EFI_ERROR	(~0UL)
+
+/*
+ * __init annotations should not be used in the EFI stub, since the code is
+ * either included in the decompressor (x86, ARM) where they have no effect,
+ * or the whole stub is __init annotated at the section level (arm64), by
+ * renaming the sections, in which case the __init annotation will be
+ * redundant, and will result in section names like .init.init.text, and our
+ * linker script does not expect that.
+ */
+#undef __init
+
+/*
+ * Allow the platform to override the allocation granularity: this allows
+ * systems that have the capability to run with a larger page size to deal
+ * with the allocations for initrd and fdt more efficiently.
+ */
+#ifndef EFI_ALLOC_ALIGN
+#define EFI_ALLOC_ALIGN		EFI_PAGE_SIZE
+#endif
+
+extern int __pure nokaslr(void);
+extern int __pure is_quiet(void);
+
+#define pr_efi(sys_table, msg)		do {				\
+	if (!is_quiet()) efi_printk(sys_table, "EFI stub: "msg);	\
+} while (0)
+
+#define pr_efi_err(sys_table, msg) efi_printk(sys_table, "EFI stub: ERROR: "msg)
+
+void efi_char16_printk(efi_system_table_t *, efi_char16_t *);
+
+unsigned long get_dram_base(efi_system_table_t *sys_table_arg);
+
+efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
+					    void *handle,
+					    unsigned long *new_fdt_addr,
+					    unsigned long max_addr,
+					    u64 initrd_addr, u64 initrd_size,
+					    char *cmdline_ptr,
+					    unsigned long fdt_addr,
+					    unsigned long fdt_size);
+
+void *get_fdt(efi_system_table_t *sys_table, unsigned long *fdt_size);
+
+void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
+		     unsigned long desc_size, efi_memory_desc_t *runtime_map,
+		     int *count);
+
+efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table,
+				  unsigned long size, u8 *out);
+
+efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
+			      unsigned long size, unsigned long align,
+			      unsigned long *addr, unsigned long random_seed);
+
+efi_status_t check_platform_features(efi_system_table_t *sys_table_arg);
+
+efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg);
+
+#endif
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
new file mode 100644
index 0000000..0c0d231
--- /dev/null
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -0,0 +1,391 @@
+/*
+ * FDT related Helper functions used by the EFI stub on multiple
+ * architectures. This should be #included by the EFI stub
+ * implementation files.
+ *
+ * Copyright 2013 Linaro Limited; author Roy Franz
+ *
+ * This file is part of the Linux kernel, and is made available
+ * under the terms of the GNU General Public License version 2.
+ *
+ */
+
+#include <linux/efi.h>
+#include <linux/libfdt.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+#define EFI_DT_ADDR_CELLS_DEFAULT 2
+#define EFI_DT_SIZE_CELLS_DEFAULT 2
+
+static void fdt_update_cell_size(efi_system_table_t *sys_table, void *fdt)
+{
+	int offset;
+
+	offset = fdt_path_offset(fdt, "/");
+	/* Set the #address-cells and #size-cells values for an empty tree */
+
+	fdt_setprop_u32(fdt, offset, "#address-cells",
+			EFI_DT_ADDR_CELLS_DEFAULT);
+
+	fdt_setprop_u32(fdt, offset, "#size-cells", EFI_DT_SIZE_CELLS_DEFAULT);
+}
+
+static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
+			       unsigned long orig_fdt_size,
+			       void *fdt, int new_fdt_size, char *cmdline_ptr,
+			       u64 initrd_addr, u64 initrd_size)
+{
+	int node, num_rsv;
+	int status;
+	u32 fdt_val32;
+	u64 fdt_val64;
+
+	/* Do some checks on provided FDT, if it exists*/
+	if (orig_fdt) {
+		if (fdt_check_header(orig_fdt)) {
+			pr_efi_err(sys_table, "Device Tree header not valid!\n");
+			return EFI_LOAD_ERROR;
+		}
+		/*
+		 * We don't get the size of the FDT if we get if from a
+		 * configuration table.
+		 */
+		if (orig_fdt_size && fdt_totalsize(orig_fdt) > orig_fdt_size) {
+			pr_efi_err(sys_table, "Truncated device tree! foo!\n");
+			return EFI_LOAD_ERROR;
+		}
+	}
+
+	if (orig_fdt) {
+		status = fdt_open_into(orig_fdt, fdt, new_fdt_size);
+	} else {
+		status = fdt_create_empty_tree(fdt, new_fdt_size);
+		if (status == 0) {
+			/*
+			 * Any failure from the following function is non
+			 * critical
+			 */
+			fdt_update_cell_size(sys_table, fdt);
+		}
+	}
+
+	if (status != 0)
+		goto fdt_set_fail;
+
+	/*
+	 * Delete all memory reserve map entries. When booting via UEFI,
+	 * kernel will use the UEFI memory map to find reserved regions.
+	 */
+	num_rsv = fdt_num_mem_rsv(fdt);
+	while (num_rsv-- > 0)
+		fdt_del_mem_rsv(fdt, num_rsv);
+
+	node = fdt_subnode_offset(fdt, 0, "chosen");
+	if (node < 0) {
+		node = fdt_add_subnode(fdt, 0, "chosen");
+		if (node < 0) {
+			status = node; /* node is error code when negative */
+			goto fdt_set_fail;
+		}
+	}
+
+	if ((cmdline_ptr != NULL) && (strlen(cmdline_ptr) > 0)) {
+		status = fdt_setprop(fdt, node, "bootargs", cmdline_ptr,
+				     strlen(cmdline_ptr) + 1);
+		if (status)
+			goto fdt_set_fail;
+	}
+
+	/* Set initrd address/end in device tree, if present */
+	if (initrd_size != 0) {
+		u64 initrd_image_end;
+		u64 initrd_image_start = cpu_to_fdt64(initrd_addr);
+
+		status = fdt_setprop(fdt, node, "linux,initrd-start",
+				     &initrd_image_start, sizeof(u64));
+		if (status)
+			goto fdt_set_fail;
+		initrd_image_end = cpu_to_fdt64(initrd_addr + initrd_size);
+		status = fdt_setprop(fdt, node, "linux,initrd-end",
+				     &initrd_image_end, sizeof(u64));
+		if (status)
+			goto fdt_set_fail;
+	}
+
+	/* Add FDT entries for EFI runtime services in chosen node. */
+	node = fdt_subnode_offset(fdt, 0, "chosen");
+	fdt_val64 = cpu_to_fdt64((u64)(unsigned long)sys_table);
+	status = fdt_setprop(fdt, node, "linux,uefi-system-table",
+			     &fdt_val64, sizeof(fdt_val64));
+	if (status)
+		goto fdt_set_fail;
+
+	fdt_val64 = U64_MAX; /* placeholder */
+	status = fdt_setprop(fdt, node, "linux,uefi-mmap-start",
+			     &fdt_val64,  sizeof(fdt_val64));
+	if (status)
+		goto fdt_set_fail;
+
+	fdt_val32 = U32_MAX; /* placeholder */
+	status = fdt_setprop(fdt, node, "linux,uefi-mmap-size",
+			     &fdt_val32,  sizeof(fdt_val32));
+	if (status)
+		goto fdt_set_fail;
+
+	status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-size",
+			     &fdt_val32, sizeof(fdt_val32));
+	if (status)
+		goto fdt_set_fail;
+
+	status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-ver",
+			     &fdt_val32, sizeof(fdt_val32));
+	if (status)
+		goto fdt_set_fail;
+
+	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+		efi_status_t efi_status;
+
+		efi_status = efi_get_random_bytes(sys_table, sizeof(fdt_val64),
+						  (u8 *)&fdt_val64);
+		if (efi_status == EFI_SUCCESS) {
+			status = fdt_setprop(fdt, node, "kaslr-seed",
+					     &fdt_val64, sizeof(fdt_val64));
+			if (status)
+				goto fdt_set_fail;
+		} else if (efi_status != EFI_NOT_FOUND) {
+			return efi_status;
+		}
+	}
+
+	/* shrink the FDT back to its minimum size */
+	fdt_pack(fdt);
+
+	return EFI_SUCCESS;
+
+fdt_set_fail:
+	if (status == -FDT_ERR_NOSPACE)
+		return EFI_BUFFER_TOO_SMALL;
+
+	return EFI_LOAD_ERROR;
+}
+
+static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
+{
+	int node = fdt_path_offset(fdt, "/chosen");
+	u64 fdt_val64;
+	u32 fdt_val32;
+	int err;
+
+	if (node < 0)
+		return EFI_LOAD_ERROR;
+
+	fdt_val64 = cpu_to_fdt64((unsigned long)*map->map);
+	err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-start",
+				  &fdt_val64, sizeof(fdt_val64));
+	if (err)
+		return EFI_LOAD_ERROR;
+
+	fdt_val32 = cpu_to_fdt32(*map->map_size);
+	err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-size",
+				  &fdt_val32, sizeof(fdt_val32));
+	if (err)
+		return EFI_LOAD_ERROR;
+
+	fdt_val32 = cpu_to_fdt32(*map->desc_size);
+	err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-desc-size",
+				  &fdt_val32, sizeof(fdt_val32));
+	if (err)
+		return EFI_LOAD_ERROR;
+
+	fdt_val32 = cpu_to_fdt32(*map->desc_ver);
+	err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-desc-ver",
+				  &fdt_val32, sizeof(fdt_val32));
+	if (err)
+		return EFI_LOAD_ERROR;
+
+	return EFI_SUCCESS;
+}
+
+#ifndef EFI_FDT_ALIGN
+#define EFI_FDT_ALIGN EFI_PAGE_SIZE
+#endif
+
+struct exit_boot_struct {
+	efi_memory_desc_t *runtime_map;
+	int *runtime_entry_count;
+	void *new_fdt_addr;
+};
+
+static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
+				   struct efi_boot_memmap *map,
+				   void *priv)
+{
+	struct exit_boot_struct *p = priv;
+	/*
+	 * Update the memory map with virtual addresses. The function will also
+	 * populate @runtime_map with copies of just the EFI_MEMORY_RUNTIME
+	 * entries so that we can pass it straight to SetVirtualAddressMap()
+	 */
+	efi_get_virtmap(*map->map, *map->map_size, *map->desc_size,
+			p->runtime_map, p->runtime_entry_count);
+
+	return update_fdt_memmap(p->new_fdt_addr, map);
+}
+
+#ifndef MAX_FDT_SIZE
+#define MAX_FDT_SIZE	SZ_2M
+#endif
+
+/*
+ * Allocate memory for a new FDT, then add EFI, commandline, and
+ * initrd related fields to the FDT.  This routine increases the
+ * FDT allocation size until the allocated memory is large
+ * enough.  EFI allocations are in EFI_PAGE_SIZE granules,
+ * which are fixed at 4K bytes, so in most cases the first
+ * allocation should succeed.
+ * EFI boot services are exited at the end of this function.
+ * There must be no allocations between the get_memory_map()
+ * call and the exit_boot_services() call, so the exiting of
+ * boot services is very tightly tied to the creation of the FDT
+ * with the final memory map in it.
+ */
+
+efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
+					    void *handle,
+					    unsigned long *new_fdt_addr,
+					    unsigned long max_addr,
+					    u64 initrd_addr, u64 initrd_size,
+					    char *cmdline_ptr,
+					    unsigned long fdt_addr,
+					    unsigned long fdt_size)
+{
+	unsigned long map_size, desc_size, buff_size;
+	u32 desc_ver;
+	unsigned long mmap_key;
+	efi_memory_desc_t *memory_map, *runtime_map;
+	efi_status_t status;
+	int runtime_entry_count = 0;
+	struct efi_boot_memmap map;
+	struct exit_boot_struct priv;
+
+	map.map =	&runtime_map;
+	map.map_size =	&map_size;
+	map.desc_size =	&desc_size;
+	map.desc_ver =	&desc_ver;
+	map.key_ptr =	&mmap_key;
+	map.buff_size =	&buff_size;
+
+	/*
+	 * Get a copy of the current memory map that we will use to prepare
+	 * the input for SetVirtualAddressMap(). We don't have to worry about
+	 * subsequent allocations adding entries, since they could not affect
+	 * the number of EFI_MEMORY_RUNTIME regions.
+	 */
+	status = efi_get_memory_map(sys_table, &map);
+	if (status != EFI_SUCCESS) {
+		pr_efi_err(sys_table, "Unable to retrieve UEFI memory map.\n");
+		return status;
+	}
+
+	pr_efi(sys_table,
+	       "Exiting boot services and installing virtual address map...\n");
+
+	map.map = &memory_map;
+	status = efi_high_alloc(sys_table, MAX_FDT_SIZE, EFI_FDT_ALIGN,
+				new_fdt_addr, max_addr);
+	if (status != EFI_SUCCESS) {
+		pr_efi_err(sys_table,
+			   "Unable to allocate memory for new device tree.\n");
+		goto fail;
+	}
+
+	/*
+	 * Now that we have done our final memory allocation (and free)
+	 * we can get the memory map key needed for exit_boot_services().
+	 */
+	status = efi_get_memory_map(sys_table, &map);
+	if (status != EFI_SUCCESS)
+		goto fail_free_new_fdt;
+
+	status = update_fdt(sys_table, (void *)fdt_addr, fdt_size,
+			    (void *)*new_fdt_addr, MAX_FDT_SIZE, cmdline_ptr,
+			    initrd_addr, initrd_size);
+
+	if (status != EFI_SUCCESS) {
+		pr_efi_err(sys_table, "Unable to construct new device tree.\n");
+		goto fail_free_new_fdt;
+	}
+
+	priv.runtime_map = runtime_map;
+	priv.runtime_entry_count = &runtime_entry_count;
+	priv.new_fdt_addr = (void *)*new_fdt_addr;
+	status = efi_exit_boot_services(sys_table, handle, &map, &priv,
+					exit_boot_func);
+
+	if (status == EFI_SUCCESS) {
+		efi_set_virtual_address_map_t *svam;
+
+		/* Install the new virtual address map */
+		svam = sys_table->runtime->set_virtual_address_map;
+		status = svam(runtime_entry_count * desc_size, desc_size,
+			      desc_ver, runtime_map);
+
+		/*
+		 * We are beyond the point of no return here, so if the call to
+		 * SetVirtualAddressMap() failed, we need to signal that to the
+		 * incoming kernel but proceed normally otherwise.
+		 */
+		if (status != EFI_SUCCESS) {
+			int l;
+
+			/*
+			 * Set the virtual address field of all
+			 * EFI_MEMORY_RUNTIME entries to 0. This will signal
+			 * the incoming kernel that no virtual translation has
+			 * been installed.
+			 */
+			for (l = 0; l < map_size; l += desc_size) {
+				efi_memory_desc_t *p = (void *)memory_map + l;
+
+				if (p->attribute & EFI_MEMORY_RUNTIME)
+					p->virt_addr = 0;
+			}
+		}
+		return EFI_SUCCESS;
+	}
+
+	pr_efi_err(sys_table, "Exit boot services failed.\n");
+
+fail_free_new_fdt:
+	efi_free(sys_table, MAX_FDT_SIZE, *new_fdt_addr);
+
+fail:
+	sys_table->boottime->free_pool(runtime_map);
+	return EFI_LOAD_ERROR;
+}
+
+void *get_fdt(efi_system_table_t *sys_table, unsigned long *fdt_size)
+{
+	efi_guid_t fdt_guid = DEVICE_TREE_GUID;
+	efi_config_table_t *tables;
+	void *fdt;
+	int i;
+
+	tables = (efi_config_table_t *) sys_table->tables;
+	fdt = NULL;
+
+	for (i = 0; i < sys_table->nr_tables; i++)
+		if (efi_guidcmp(tables[i].guid, fdt_guid) == 0) {
+			fdt = (void *) tables[i].table;
+			if (fdt_check_header(fdt) != 0) {
+				pr_efi_err(sys_table, "Invalid header detected on UEFI supplied FDT, ignoring ...\n");
+				return NULL;
+			}
+			*fdt_size = fdt_totalsize(fdt);
+			break;
+	 }
+
+	return fdt;
+}
diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c
new file mode 100644
index 0000000..24c461d
--- /dev/null
+++ b/drivers/firmware/efi/libstub/gop.c
@@ -0,0 +1,356 @@
+/* -----------------------------------------------------------------------
+ *
+ *   Copyright 2011 Intel Corporation; author Matt Fleming
+ *
+ *   This file is part of the Linux kernel, and is made available under
+ *   the terms of the GNU General Public License version 2.
+ *
+ * ----------------------------------------------------------------------- */
+
+#include <linux/efi.h>
+#include <linux/screen_info.h>
+#include <asm/efi.h>
+#include <asm/setup.h>
+
+static void find_bits(unsigned long mask, u8 *pos, u8 *size)
+{
+	u8 first, len;
+
+	first = 0;
+	len = 0;
+
+	if (mask) {
+		while (!(mask & 0x1)) {
+			mask = mask >> 1;
+			first++;
+		}
+
+		while (mask & 0x1) {
+			mask = mask >> 1;
+			len++;
+		}
+	}
+
+	*pos = first;
+	*size = len;
+}
+
+static void
+setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
+		 struct efi_pixel_bitmask pixel_info, int pixel_format)
+{
+	if (pixel_format == PIXEL_RGB_RESERVED_8BIT_PER_COLOR) {
+		si->lfb_depth = 32;
+		si->lfb_linelength = pixels_per_scan_line * 4;
+		si->red_size = 8;
+		si->red_pos = 0;
+		si->green_size = 8;
+		si->green_pos = 8;
+		si->blue_size = 8;
+		si->blue_pos = 16;
+		si->rsvd_size = 8;
+		si->rsvd_pos = 24;
+	} else if (pixel_format == PIXEL_BGR_RESERVED_8BIT_PER_COLOR) {
+		si->lfb_depth = 32;
+		si->lfb_linelength = pixels_per_scan_line * 4;
+		si->red_size = 8;
+		si->red_pos = 16;
+		si->green_size = 8;
+		si->green_pos = 8;
+		si->blue_size = 8;
+		si->blue_pos = 0;
+		si->rsvd_size = 8;
+		si->rsvd_pos = 24;
+	} else if (pixel_format == PIXEL_BIT_MASK) {
+		find_bits(pixel_info.red_mask, &si->red_pos, &si->red_size);
+		find_bits(pixel_info.green_mask, &si->green_pos,
+			  &si->green_size);
+		find_bits(pixel_info.blue_mask, &si->blue_pos, &si->blue_size);
+		find_bits(pixel_info.reserved_mask, &si->rsvd_pos,
+			  &si->rsvd_size);
+		si->lfb_depth = si->red_size + si->green_size +
+			si->blue_size + si->rsvd_size;
+		si->lfb_linelength = (pixels_per_scan_line * si->lfb_depth) / 8;
+	} else {
+		si->lfb_depth = 4;
+		si->lfb_linelength = si->lfb_width / 2;
+		si->red_size = 0;
+		si->red_pos = 0;
+		si->green_size = 0;
+		si->green_pos = 0;
+		si->blue_size = 0;
+		si->blue_pos = 0;
+		si->rsvd_size = 0;
+		si->rsvd_pos = 0;
+	}
+}
+
+static efi_status_t
+__gop_query32(efi_system_table_t *sys_table_arg,
+	      struct efi_graphics_output_protocol_32 *gop32,
+	      struct efi_graphics_output_mode_info **info,
+	      unsigned long *size, u64 *fb_base)
+{
+	struct efi_graphics_output_protocol_mode_32 *mode;
+	efi_graphics_output_protocol_query_mode query_mode;
+	efi_status_t status;
+	unsigned long m;
+
+	m = gop32->mode;
+	mode = (struct efi_graphics_output_protocol_mode_32 *)m;
+	query_mode = (void *)(unsigned long)gop32->query_mode;
+
+	status = __efi_call_early(query_mode, (void *)gop32, mode->mode, size,
+				  info);
+	if (status != EFI_SUCCESS)
+		return status;
+
+	*fb_base = mode->frame_buffer_base;
+	return status;
+}
+
+static efi_status_t
+setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
+            efi_guid_t *proto, unsigned long size, void **gop_handle)
+{
+	struct efi_graphics_output_protocol_32 *gop32, *first_gop;
+	unsigned long nr_gops;
+	u16 width, height;
+	u32 pixels_per_scan_line;
+	u32 ext_lfb_base;
+	u64 fb_base;
+	struct efi_pixel_bitmask pixel_info;
+	int pixel_format;
+	efi_status_t status = EFI_NOT_FOUND;
+	u32 *handles = (u32 *)(unsigned long)gop_handle;
+	int i;
+
+	first_gop = NULL;
+	gop32 = NULL;
+
+	nr_gops = size / sizeof(u32);
+	for (i = 0; i < nr_gops; i++) {
+		struct efi_graphics_output_mode_info *info = NULL;
+		efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
+		bool conout_found = false;
+		void *dummy = NULL;
+		efi_handle_t h = (efi_handle_t)(unsigned long)handles[i];
+		u64 current_fb_base;
+
+		status = efi_call_early(handle_protocol, h,
+					proto, (void **)&gop32);
+		if (status != EFI_SUCCESS)
+			continue;
+
+		status = efi_call_early(handle_protocol, h,
+					&conout_proto, &dummy);
+		if (status == EFI_SUCCESS)
+			conout_found = true;
+
+		status = __gop_query32(sys_table_arg, gop32, &info, &size,
+				       &current_fb_base);
+		if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
+		    info->pixel_format != PIXEL_BLT_ONLY) {
+			/*
+			 * Systems that use the UEFI Console Splitter may
+			 * provide multiple GOP devices, not all of which are
+			 * backed by real hardware. The workaround is to search
+			 * for a GOP implementing the ConOut protocol, and if
+			 * one isn't found, to just fall back to the first GOP.
+			 */
+			width = info->horizontal_resolution;
+			height = info->vertical_resolution;
+			pixel_format = info->pixel_format;
+			pixel_info = info->pixel_information;
+			pixels_per_scan_line = info->pixels_per_scan_line;
+			fb_base = current_fb_base;
+
+			/*
+			 * Once we've found a GOP supporting ConOut,
+			 * don't bother looking any further.
+			 */
+			first_gop = gop32;
+			if (conout_found)
+				break;
+		}
+	}
+
+	/* Did we find any GOPs? */
+	if (!first_gop)
+		goto out;
+
+	/* EFI framebuffer */
+	si->orig_video_isVGA = VIDEO_TYPE_EFI;
+
+	si->lfb_width = width;
+	si->lfb_height = height;
+	si->lfb_base = fb_base;
+
+	ext_lfb_base = (u64)(unsigned long)fb_base >> 32;
+	if (ext_lfb_base) {
+		si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
+		si->ext_lfb_base = ext_lfb_base;
+	}
+
+	si->pages = 1;
+
+	setup_pixel_info(si, pixels_per_scan_line, pixel_info, pixel_format);
+
+	si->lfb_size = si->lfb_linelength * si->lfb_height;
+
+	si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
+out:
+	return status;
+}
+
+static efi_status_t
+__gop_query64(efi_system_table_t *sys_table_arg,
+	      struct efi_graphics_output_protocol_64 *gop64,
+	      struct efi_graphics_output_mode_info **info,
+	      unsigned long *size, u64 *fb_base)
+{
+	struct efi_graphics_output_protocol_mode_64 *mode;
+	efi_graphics_output_protocol_query_mode query_mode;
+	efi_status_t status;
+	unsigned long m;
+
+	m = gop64->mode;
+	mode = (struct efi_graphics_output_protocol_mode_64 *)m;
+	query_mode = (void *)(unsigned long)gop64->query_mode;
+
+	status = __efi_call_early(query_mode, (void *)gop64, mode->mode, size,
+				  info);
+	if (status != EFI_SUCCESS)
+		return status;
+
+	*fb_base = mode->frame_buffer_base;
+	return status;
+}
+
+static efi_status_t
+setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
+	    efi_guid_t *proto, unsigned long size, void **gop_handle)
+{
+	struct efi_graphics_output_protocol_64 *gop64, *first_gop;
+	unsigned long nr_gops;
+	u16 width, height;
+	u32 pixels_per_scan_line;
+	u32 ext_lfb_base;
+	u64 fb_base;
+	struct efi_pixel_bitmask pixel_info;
+	int pixel_format;
+	efi_status_t status = EFI_NOT_FOUND;
+	u64 *handles = (u64 *)(unsigned long)gop_handle;
+	int i;
+
+	first_gop = NULL;
+	gop64 = NULL;
+
+	nr_gops = size / sizeof(u64);
+	for (i = 0; i < nr_gops; i++) {
+		struct efi_graphics_output_mode_info *info = NULL;
+		efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
+		bool conout_found = false;
+		void *dummy = NULL;
+		efi_handle_t h = (efi_handle_t)(unsigned long)handles[i];
+		u64 current_fb_base;
+
+		status = efi_call_early(handle_protocol, h,
+					proto, (void **)&gop64);
+		if (status != EFI_SUCCESS)
+			continue;
+
+		status = efi_call_early(handle_protocol, h,
+					&conout_proto, &dummy);
+		if (status == EFI_SUCCESS)
+			conout_found = true;
+
+		status = __gop_query64(sys_table_arg, gop64, &info, &size,
+				       &current_fb_base);
+		if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
+		    info->pixel_format != PIXEL_BLT_ONLY) {
+			/*
+			 * Systems that use the UEFI Console Splitter may
+			 * provide multiple GOP devices, not all of which are
+			 * backed by real hardware. The workaround is to search
+			 * for a GOP implementing the ConOut protocol, and if
+			 * one isn't found, to just fall back to the first GOP.
+			 */
+			width = info->horizontal_resolution;
+			height = info->vertical_resolution;
+			pixel_format = info->pixel_format;
+			pixel_info = info->pixel_information;
+			pixels_per_scan_line = info->pixels_per_scan_line;
+			fb_base = current_fb_base;
+
+			/*
+			 * Once we've found a GOP supporting ConOut,
+			 * don't bother looking any further.
+			 */
+			first_gop = gop64;
+			if (conout_found)
+				break;
+		}
+	}
+
+	/* Did we find any GOPs? */
+	if (!first_gop)
+		goto out;
+
+	/* EFI framebuffer */
+	si->orig_video_isVGA = VIDEO_TYPE_EFI;
+
+	si->lfb_width = width;
+	si->lfb_height = height;
+	si->lfb_base = fb_base;
+
+	ext_lfb_base = (u64)(unsigned long)fb_base >> 32;
+	if (ext_lfb_base) {
+		si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
+		si->ext_lfb_base = ext_lfb_base;
+	}
+
+	si->pages = 1;
+
+	setup_pixel_info(si, pixels_per_scan_line, pixel_info, pixel_format);
+
+	si->lfb_size = si->lfb_linelength * si->lfb_height;
+
+	si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
+out:
+	return status;
+}
+
+/*
+ * See if we have Graphics Output Protocol
+ */
+efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
+			   struct screen_info *si, efi_guid_t *proto,
+			   unsigned long size)
+{
+	efi_status_t status;
+	void **gop_handle = NULL;
+
+	status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+				size, (void **)&gop_handle);
+	if (status != EFI_SUCCESS)
+		return status;
+
+	status = efi_call_early(locate_handle,
+				EFI_LOCATE_BY_PROTOCOL,
+				proto, NULL, &size, gop_handle);
+	if (status != EFI_SUCCESS)
+		goto free_handle;
+
+	if (efi_is_64bit()) {
+		status = setup_gop64(sys_table_arg, si, proto, size,
+				     gop_handle);
+	} else {
+		status = setup_gop32(sys_table_arg, si, proto, size,
+				     gop_handle);
+	}
+
+free_handle:
+	efi_call_early(free_pool, gop_handle);
+	return status;
+}
diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c
new file mode 100644
index 0000000..e0e603a
--- /dev/null
+++ b/drivers/firmware/efi/libstub/random.c
@@ -0,0 +1,192 @@
+/*
+ * Copyright (C) 2016 Linaro Ltd;  <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/efi.h>
+#include <linux/log2.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+struct efi_rng_protocol {
+	efi_status_t (*get_info)(struct efi_rng_protocol *,
+				 unsigned long *, efi_guid_t *);
+	efi_status_t (*get_rng)(struct efi_rng_protocol *,
+				efi_guid_t *, unsigned long, u8 *out);
+};
+
+efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table_arg,
+				  unsigned long size, u8 *out)
+{
+	efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID;
+	efi_status_t status;
+	struct efi_rng_protocol *rng;
+
+	status = efi_call_early(locate_protocol, &rng_proto, NULL,
+				(void **)&rng);
+	if (status != EFI_SUCCESS)
+		return status;
+
+	return rng->get_rng(rng, NULL, size, out);
+}
+
+/*
+ * Return the number of slots covered by this entry, i.e., the number of
+ * addresses it covers that are suitably aligned and supply enough room
+ * for the allocation.
+ */
+static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
+					 unsigned long size,
+					 unsigned long align_shift)
+{
+	unsigned long align = 1UL << align_shift;
+	u64 first_slot, last_slot, region_end;
+
+	if (md->type != EFI_CONVENTIONAL_MEMORY)
+		return 0;
+
+	region_end = min((u64)ULONG_MAX, md->phys_addr + md->num_pages*EFI_PAGE_SIZE - 1);
+
+	first_slot = round_up(md->phys_addr, align);
+	last_slot = round_down(region_end - size + 1, align);
+
+	if (first_slot > last_slot)
+		return 0;
+
+	return ((unsigned long)(last_slot - first_slot) >> align_shift) + 1;
+}
+
+/*
+ * The UEFI memory descriptors have a virtual address field that is only used
+ * when installing the virtual mapping using SetVirtualAddressMap(). Since it
+ * is unused here, we can reuse it to keep track of each descriptor's slot
+ * count.
+ */
+#define MD_NUM_SLOTS(md)	((md)->virt_addr)
+
+efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
+			      unsigned long size,
+			      unsigned long align,
+			      unsigned long *addr,
+			      unsigned long random_seed)
+{
+	unsigned long map_size, desc_size, total_slots = 0, target_slot;
+	unsigned long buff_size;
+	efi_status_t status;
+	efi_memory_desc_t *memory_map;
+	int map_offset;
+	struct efi_boot_memmap map;
+
+	map.map =	&memory_map;
+	map.map_size =	&map_size;
+	map.desc_size =	&desc_size;
+	map.desc_ver =	NULL;
+	map.key_ptr =	NULL;
+	map.buff_size =	&buff_size;
+
+	status = efi_get_memory_map(sys_table_arg, &map);
+	if (status != EFI_SUCCESS)
+		return status;
+
+	if (align < EFI_ALLOC_ALIGN)
+		align = EFI_ALLOC_ALIGN;
+
+	/* count the suitable slots in each memory map entry */
+	for (map_offset = 0; map_offset < map_size; map_offset += desc_size) {
+		efi_memory_desc_t *md = (void *)memory_map + map_offset;
+		unsigned long slots;
+
+		slots = get_entry_num_slots(md, size, ilog2(align));
+		MD_NUM_SLOTS(md) = slots;
+		total_slots += slots;
+	}
+
+	/* find a random number between 0 and total_slots */
+	target_slot = (total_slots * (u16)random_seed) >> 16;
+
+	/*
+	 * target_slot is now a value in the range [0, total_slots), and so
+	 * it corresponds with exactly one of the suitable slots we recorded
+	 * when iterating over the memory map the first time around.
+	 *
+	 * So iterate over the memory map again, subtracting the number of
+	 * slots of each entry at each iteration, until we have found the entry
+	 * that covers our chosen slot. Use the residual value of target_slot
+	 * to calculate the randomly chosen address, and allocate it directly
+	 * using EFI_ALLOCATE_ADDRESS.
+	 */
+	for (map_offset = 0; map_offset < map_size; map_offset += desc_size) {
+		efi_memory_desc_t *md = (void *)memory_map + map_offset;
+		efi_physical_addr_t target;
+		unsigned long pages;
+
+		if (target_slot >= MD_NUM_SLOTS(md)) {
+			target_slot -= MD_NUM_SLOTS(md);
+			continue;
+		}
+
+		target = round_up(md->phys_addr, align) + target_slot * align;
+		pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
+
+		status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS,
+					EFI_LOADER_DATA, pages, &target);
+		if (status == EFI_SUCCESS)
+			*addr = target;
+		break;
+	}
+
+	efi_call_early(free_pool, memory_map);
+
+	return status;
+}
+
+efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg)
+{
+	efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID;
+	efi_guid_t rng_algo_raw = EFI_RNG_ALGORITHM_RAW;
+	efi_guid_t rng_table_guid = LINUX_EFI_RANDOM_SEED_TABLE_GUID;
+	struct efi_rng_protocol *rng;
+	struct linux_efi_random_seed *seed;
+	efi_status_t status;
+
+	status = efi_call_early(locate_protocol, &rng_proto, NULL,
+				(void **)&rng);
+	if (status != EFI_SUCCESS)
+		return status;
+
+	status = efi_call_early(allocate_pool, EFI_RUNTIME_SERVICES_DATA,
+				sizeof(*seed) + EFI_RANDOM_SEED_SIZE,
+				(void **)&seed);
+	if (status != EFI_SUCCESS)
+		return status;
+
+	status = rng->get_rng(rng, &rng_algo_raw, EFI_RANDOM_SEED_SIZE,
+			      seed->bits);
+	if (status == EFI_UNSUPPORTED)
+		/*
+		 * Use whatever algorithm we have available if the raw algorithm
+		 * is not implemented.
+		 */
+		status = rng->get_rng(rng, NULL, EFI_RANDOM_SEED_SIZE,
+				      seed->bits);
+
+	if (status != EFI_SUCCESS)
+		goto err_freepool;
+
+	seed->size = EFI_RANDOM_SEED_SIZE;
+	status = efi_call_early(install_configuration_table, &rng_table_guid,
+				seed);
+	if (status != EFI_SUCCESS)
+		goto err_freepool;
+
+	return EFI_SUCCESS;
+
+err_freepool:
+	efi_call_early(free_pool, seed);
+	return status;
+}
diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c
new file mode 100644
index 0000000..72d9dfb
--- /dev/null
+++ b/drivers/firmware/efi/libstub/secureboot.c
@@ -0,0 +1,83 @@
+/*
+ * Secure boot handling.
+ *
+ * Copyright (C) 2013,2014 Linaro Limited
+ *     Roy Franz <roy.franz@linaro.org
+ * Copyright (C) 2013 Red Hat, Inc.
+ *     Mark Salter <msalter@redhat.com>
+ *
+ * This file is part of the Linux kernel, and is made available under the
+ * terms of the GNU General Public License version 2.
+ */
+#include <linux/efi.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+/* BIOS variables */
+static const efi_guid_t efi_variable_guid = EFI_GLOBAL_VARIABLE_GUID;
+static const efi_char16_t efi_SecureBoot_name[] = L"SecureBoot";
+static const efi_char16_t efi_SetupMode_name[] = L"SetupMode";
+
+/* SHIM variables */
+static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID;
+static const efi_char16_t shim_MokSBState_name[] = L"MokSBState";
+
+#define get_efi_var(name, vendor, ...) \
+	efi_call_runtime(get_variable, \
+			 (efi_char16_t *)(name), (efi_guid_t *)(vendor), \
+			 __VA_ARGS__);
+
+/*
+ * Determine whether we're in secure boot mode.
+ *
+ * Please keep the logic in sync with
+ * arch/x86/xen/efi.c:xen_efi_get_secureboot().
+ */
+enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table_arg)
+{
+	u32 attr;
+	u8 secboot, setupmode, moksbstate;
+	unsigned long size;
+	efi_status_t status;
+
+	size = sizeof(secboot);
+	status = get_efi_var(efi_SecureBoot_name, &efi_variable_guid,
+			     NULL, &size, &secboot);
+	if (status == EFI_NOT_FOUND)
+		return efi_secureboot_mode_disabled;
+	if (status != EFI_SUCCESS)
+		goto out_efi_err;
+
+	size = sizeof(setupmode);
+	status = get_efi_var(efi_SetupMode_name, &efi_variable_guid,
+			     NULL, &size, &setupmode);
+	if (status != EFI_SUCCESS)
+		goto out_efi_err;
+
+	if (secboot == 0 || setupmode == 1)
+		return efi_secureboot_mode_disabled;
+
+	/*
+	 * See if a user has put the shim into insecure mode. If so, and if the
+	 * variable doesn't have the runtime attribute set, we might as well
+	 * honor that.
+	 */
+	size = sizeof(moksbstate);
+	status = get_efi_var(shim_MokSBState_name, &shim_guid,
+			     &attr, &size, &moksbstate);
+
+	/* If it fails, we don't care why. Default to secure */
+	if (status != EFI_SUCCESS)
+		goto secure_boot_enabled;
+	if (!(attr & EFI_VARIABLE_RUNTIME_ACCESS) && moksbstate == 1)
+		return efi_secureboot_mode_disabled;
+
+secure_boot_enabled:
+	pr_efi(sys_table_arg, "UEFI Secure Boot is enabled.\n");
+	return efi_secureboot_mode_enabled;
+
+out_efi_err:
+	pr_efi_err(sys_table_arg, "Could not determine UEFI Secure Boot status.\n");
+	return efi_secureboot_mode_unknown;
+}
diff --git a/drivers/firmware/efi/libstub/string.c b/drivers/firmware/efi/libstub/string.c
new file mode 100644
index 0000000..ed10e3f
--- /dev/null
+++ b/drivers/firmware/efi/libstub/string.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Taken from:
+ *  linux/lib/string.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+
+#ifndef __HAVE_ARCH_STRSTR
+/**
+ * strstr - Find the first substring in a %NUL terminated string
+ * @s1: The string to be searched
+ * @s2: The string to search for
+ */
+char *strstr(const char *s1, const char *s2)
+{
+	size_t l1, l2;
+
+	l2 = strlen(s2);
+	if (!l2)
+		return (char *)s1;
+	l1 = strlen(s1);
+	while (l1 >= l2) {
+		l1--;
+		if (!memcmp(s1, s2, l2))
+			return (char *)s1;
+		s1++;
+	}
+	return NULL;
+}
+#endif
+
+#ifndef __HAVE_ARCH_STRNCMP
+/**
+ * strncmp - Compare two length-limited strings
+ * @cs: One string
+ * @ct: Another string
+ * @count: The maximum number of bytes to compare
+ */
+int strncmp(const char *cs, const char *ct, size_t count)
+{
+	unsigned char c1, c2;
+
+	while (count) {
+		c1 = *cs++;
+		c2 = *ct++;
+		if (c1 != c2)
+			return c1 < c2 ? -1 : 1;
+		if (!c1)
+			break;
+		count--;
+	}
+	return 0;
+}
+#endif
diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c
new file mode 100644
index 0000000..a90b0b8
--- /dev/null
+++ b/drivers/firmware/efi/libstub/tpm.c
@@ -0,0 +1,136 @@
+/*
+ * TPM handling.
+ *
+ * Copyright (C) 2016 CoreOS, Inc
+ * Copyright (C) 2017 Google, Inc.
+ *     Matthew Garrett <mjg59@google.com>
+ *     Thiebaud Weksteen <tweek@google.com>
+ *
+ * This file is part of the Linux kernel, and is made available under the
+ * terms of the GNU General Public License version 2.
+ */
+#include <linux/efi.h>
+#include <linux/tpm_eventlog.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+#ifdef CONFIG_RESET_ATTACK_MITIGATION
+static const efi_char16_t efi_MemoryOverWriteRequest_name[] =
+	L"MemoryOverwriteRequestControl";
+
+#define MEMORY_ONLY_RESET_CONTROL_GUID \
+	EFI_GUID(0xe20939be, 0x32d4, 0x41be, 0xa1, 0x50, 0x89, 0x7f, 0x85, 0xd4, 0x98, 0x29)
+
+#define get_efi_var(name, vendor, ...) \
+	efi_call_runtime(get_variable, \
+			 (efi_char16_t *)(name), (efi_guid_t *)(vendor), \
+			 __VA_ARGS__)
+
+#define set_efi_var(name, vendor, ...) \
+	efi_call_runtime(set_variable, \
+			 (efi_char16_t *)(name), (efi_guid_t *)(vendor), \
+			 __VA_ARGS__)
+
+/*
+ * Enable reboot attack mitigation. This requests that the firmware clear the
+ * RAM on next reboot before proceeding with boot, ensuring that any secrets
+ * are cleared. If userland has ensured that all secrets have been removed
+ * from RAM before reboot it can simply reset this variable.
+ */
+void efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg)
+{
+	u8 val = 1;
+	efi_guid_t var_guid = MEMORY_ONLY_RESET_CONTROL_GUID;
+	efi_status_t status;
+	unsigned long datasize = 0;
+
+	status = get_efi_var(efi_MemoryOverWriteRequest_name, &var_guid,
+			     NULL, &datasize, NULL);
+
+	if (status == EFI_NOT_FOUND)
+		return;
+
+	set_efi_var(efi_MemoryOverWriteRequest_name, &var_guid,
+		    EFI_VARIABLE_NON_VOLATILE |
+		    EFI_VARIABLE_BOOTSERVICE_ACCESS |
+		    EFI_VARIABLE_RUNTIME_ACCESS, sizeof(val), &val);
+}
+
+#endif
+
+static void efi_retrieve_tpm2_eventlog_1_2(efi_system_table_t *sys_table_arg)
+{
+	efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
+	efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID;
+	efi_status_t status;
+	efi_physical_addr_t log_location = 0, log_last_entry = 0;
+	struct linux_efi_tpm_eventlog *log_tbl = NULL;
+	unsigned long first_entry_addr, last_entry_addr;
+	size_t log_size, last_entry_size;
+	efi_bool_t truncated;
+	void *tcg2_protocol = NULL;
+
+	status = efi_call_early(locate_protocol, &tcg2_guid, NULL,
+				&tcg2_protocol);
+	if (status != EFI_SUCCESS)
+		return;
+
+	status = efi_call_proto(efi_tcg2_protocol, get_event_log, tcg2_protocol,
+				EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2,
+				&log_location, &log_last_entry, &truncated);
+	if (status != EFI_SUCCESS)
+		return;
+
+	if (!log_location)
+		return;
+	first_entry_addr = (unsigned long) log_location;
+
+	/*
+	 * We populate the EFI table even if the logs are empty.
+	 */
+	if (!log_last_entry) {
+		log_size = 0;
+	} else {
+		last_entry_addr = (unsigned long) log_last_entry;
+		/*
+		 * get_event_log only returns the address of the last entry.
+		 * We need to calculate its size to deduce the full size of
+		 * the logs.
+		 */
+		last_entry_size = sizeof(struct tcpa_event) +
+			((struct tcpa_event *) last_entry_addr)->event_size;
+		log_size = log_last_entry - log_location + last_entry_size;
+	}
+
+	/* Allocate space for the logs and copy them. */
+	status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+				sizeof(*log_tbl) + log_size,
+				(void **) &log_tbl);
+
+	if (status != EFI_SUCCESS) {
+		efi_printk(sys_table_arg,
+			   "Unable to allocate memory for event log\n");
+		return;
+	}
+
+	memset(log_tbl, 0, sizeof(*log_tbl) + log_size);
+	log_tbl->size = log_size;
+	log_tbl->version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
+	memcpy(log_tbl->log, (void *) first_entry_addr, log_size);
+
+	status = efi_call_early(install_configuration_table,
+				&linux_eventlog_guid, log_tbl);
+	if (status != EFI_SUCCESS)
+		goto err_free;
+	return;
+
+err_free:
+	efi_call_early(free_pool, log_tbl);
+}
+
+void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg)
+{
+	/* Only try to retrieve the logs in 1.2 format. */
+	efi_retrieve_tpm2_eventlog_1_2(sys_table_arg);
+}
diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
new file mode 100644
index 0000000..8986757
--- /dev/null
+++ b/drivers/firmware/efi/memattr.c
@@ -0,0 +1,186 @@
+/*
+ * Copyright (C) 2016 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt)	"efi: memattr: " fmt
+
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/memblock.h>
+
+#include <asm/early_ioremap.h>
+
+static int __initdata tbl_size;
+
+/*
+ * Reserve the memory associated with the Memory Attributes configuration
+ * table, if it exists.
+ */
+int __init efi_memattr_init(void)
+{
+	efi_memory_attributes_table_t *tbl;
+
+	if (efi.mem_attr_table == EFI_INVALID_TABLE_ADDR)
+		return 0;
+
+	tbl = early_memremap(efi.mem_attr_table, sizeof(*tbl));
+	if (!tbl) {
+		pr_err("Failed to map EFI Memory Attributes table @ 0x%lx\n",
+		       efi.mem_attr_table);
+		return -ENOMEM;
+	}
+
+	if (tbl->version > 1) {
+		pr_warn("Unexpected EFI Memory Attributes table version %d\n",
+			tbl->version);
+		goto unmap;
+	}
+
+	tbl_size = sizeof(*tbl) + tbl->num_entries * tbl->desc_size;
+	memblock_reserve(efi.mem_attr_table, tbl_size);
+	set_bit(EFI_MEM_ATTR, &efi.flags);
+
+unmap:
+	early_memunmap(tbl, sizeof(*tbl));
+	return 0;
+}
+
+/*
+ * Returns a copy @out of the UEFI memory descriptor @in if it is covered
+ * entirely by a UEFI memory map entry with matching attributes. The virtual
+ * address of @out is set according to the matching entry that was found.
+ */
+static bool entry_is_valid(const efi_memory_desc_t *in, efi_memory_desc_t *out)
+{
+	u64 in_paddr = in->phys_addr;
+	u64 in_size = in->num_pages << EFI_PAGE_SHIFT;
+	efi_memory_desc_t *md;
+
+	*out = *in;
+
+	if (in->type != EFI_RUNTIME_SERVICES_CODE &&
+	    in->type != EFI_RUNTIME_SERVICES_DATA) {
+		pr_warn("Entry type should be RuntimeServiceCode/Data\n");
+		return false;
+	}
+
+	if (!(in->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))) {
+		pr_warn("Entry attributes invalid: RO and XP bits both cleared\n");
+		return false;
+	}
+
+	if (PAGE_SIZE > EFI_PAGE_SIZE &&
+	    (!PAGE_ALIGNED(in->phys_addr) ||
+	     !PAGE_ALIGNED(in->num_pages << EFI_PAGE_SHIFT))) {
+		/*
+		 * Since arm64 may execute with page sizes of up to 64 KB, the
+		 * UEFI spec mandates that RuntimeServices memory regions must
+		 * be 64 KB aligned. We need to validate this here since we will
+		 * not be able to tighten permissions on such regions without
+		 * affecting adjacent regions.
+		 */
+		pr_warn("Entry address region misaligned\n");
+		return false;
+	}
+
+	for_each_efi_memory_desc(md) {
+		u64 md_paddr = md->phys_addr;
+		u64 md_size = md->num_pages << EFI_PAGE_SHIFT;
+
+		if (!(md->attribute & EFI_MEMORY_RUNTIME))
+			continue;
+		if (md->virt_addr == 0) {
+			/* no virtual mapping has been installed by the stub */
+			break;
+		}
+
+		if (md_paddr > in_paddr || (in_paddr - md_paddr) >= md_size)
+			continue;
+
+		/*
+		 * This entry covers the start of @in, check whether
+		 * it covers the end as well.
+		 */
+		if (md_paddr + md_size < in_paddr + in_size) {
+			pr_warn("Entry covers multiple EFI memory map regions\n");
+			return false;
+		}
+
+		if (md->type != in->type) {
+			pr_warn("Entry type deviates from EFI memory map region type\n");
+			return false;
+		}
+
+		out->virt_addr = in_paddr + (md->virt_addr - md_paddr);
+
+		return true;
+	}
+
+	pr_warn("No matching entry found in the EFI memory map\n");
+	return false;
+}
+
+/*
+ * To be called after the EFI page tables have been populated. If a memory
+ * attributes table is available, its contents will be used to update the
+ * mappings with tightened permissions as described by the table.
+ * This requires the UEFI memory map to have already been populated with
+ * virtual addresses.
+ */
+int __init efi_memattr_apply_permissions(struct mm_struct *mm,
+					 efi_memattr_perm_setter fn)
+{
+	efi_memory_attributes_table_t *tbl;
+	int i, ret;
+
+	if (tbl_size <= sizeof(*tbl))
+		return 0;
+
+	/*
+	 * We need the EFI memory map to be setup so we can use it to
+	 * lookup the virtual addresses of all entries in the  of EFI
+	 * Memory Attributes table. If it isn't available, this
+	 * function should not be called.
+	 */
+	if (WARN_ON(!efi_enabled(EFI_MEMMAP)))
+		return 0;
+
+	tbl = memremap(efi.mem_attr_table, tbl_size, MEMREMAP_WB);
+	if (!tbl) {
+		pr_err("Failed to map EFI Memory Attributes table @ 0x%lx\n",
+		       efi.mem_attr_table);
+		return -ENOMEM;
+	}
+
+	if (efi_enabled(EFI_DBG))
+		pr_info("Processing EFI Memory Attributes table:\n");
+
+	for (i = ret = 0; ret == 0 && i < tbl->num_entries; i++) {
+		efi_memory_desc_t md;
+		unsigned long size;
+		bool valid;
+		char buf[64];
+
+		valid = entry_is_valid((void *)tbl->entry + i * tbl->desc_size,
+				       &md);
+		size = md.num_pages << EFI_PAGE_SHIFT;
+		if (efi_enabled(EFI_DBG) || !valid)
+			pr_info("%s 0x%012llx-0x%012llx %s\n",
+				valid ? "" : "!", md.phys_addr,
+				md.phys_addr + size - 1,
+				efi_md_typeattr_format(buf, sizeof(buf), &md));
+
+		if (valid) {
+			ret = fn(mm, &md);
+			if (ret)
+				pr_err("Error updating mappings, skipping subsequent md's\n");
+		}
+	}
+	memunmap(tbl);
+	return ret;
+}
diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
new file mode 100644
index 0000000..1907db2
--- /dev/null
+++ b/drivers/firmware/efi/memmap.c
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common EFI memory map functions.
+ */
+
+#define pr_fmt(fmt) "efi: " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/efi.h>
+#include <linux/io.h>
+#include <asm/early_ioremap.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+
+static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size)
+{
+	return memblock_alloc(size, 0);
+}
+
+static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
+{
+	unsigned int order = get_order(size);
+	struct page *p = alloc_pages(GFP_KERNEL, order);
+
+	if (!p)
+		return 0;
+
+	return PFN_PHYS(page_to_pfn(p));
+}
+
+/**
+ * efi_memmap_alloc - Allocate memory for the EFI memory map
+ * @num_entries: Number of entries in the allocated map.
+ *
+ * Depending on whether mm_init() has already been invoked or not,
+ * either memblock or "normal" page allocation is used.
+ *
+ * Returns the physical address of the allocated memory map on
+ * success, zero on failure.
+ */
+phys_addr_t __init efi_memmap_alloc(unsigned int num_entries)
+{
+	unsigned long size = num_entries * efi.memmap.desc_size;
+
+	if (slab_is_available())
+		return __efi_memmap_alloc_late(size);
+
+	return __efi_memmap_alloc_early(size);
+}
+
+/**
+ * __efi_memmap_init - Common code for mapping the EFI memory map
+ * @data: EFI memory map data
+ * @late: Use early or late mapping function?
+ *
+ * This function takes care of figuring out which function to use to
+ * map the EFI memory map in efi.memmap based on how far into the boot
+ * we are.
+ *
+ * During bootup @late should be %false since we only have access to
+ * the early_memremap*() functions as the vmalloc space isn't setup.
+ * Once the kernel is fully booted we can fallback to the more robust
+ * memremap*() API.
+ *
+ * Returns zero on success, a negative error code on failure.
+ */
+static int __init
+__efi_memmap_init(struct efi_memory_map_data *data, bool late)
+{
+	struct efi_memory_map map;
+	phys_addr_t phys_map;
+
+	if (efi_enabled(EFI_PARAVIRT))
+		return 0;
+
+	phys_map = data->phys_map;
+
+	if (late)
+		map.map = memremap(phys_map, data->size, MEMREMAP_WB);
+	else
+		map.map = early_memremap(phys_map, data->size);
+
+	if (!map.map) {
+		pr_err("Could not map the memory map!\n");
+		return -ENOMEM;
+	}
+
+	map.phys_map = data->phys_map;
+	map.nr_map = data->size / data->desc_size;
+	map.map_end = map.map + data->size;
+
+	map.desc_version = data->desc_version;
+	map.desc_size = data->desc_size;
+	map.late = late;
+
+	set_bit(EFI_MEMMAP, &efi.flags);
+
+	efi.memmap = map;
+
+	return 0;
+}
+
+/**
+ * efi_memmap_init_early - Map the EFI memory map data structure
+ * @data: EFI memory map data
+ *
+ * Use early_memremap() to map the passed in EFI memory map and assign
+ * it to efi.memmap.
+ */
+int __init efi_memmap_init_early(struct efi_memory_map_data *data)
+{
+	/* Cannot go backwards */
+	WARN_ON(efi.memmap.late);
+
+	return __efi_memmap_init(data, false);
+}
+
+void __init efi_memmap_unmap(void)
+{
+	if (!efi_enabled(EFI_MEMMAP))
+		return;
+
+	if (!efi.memmap.late) {
+		unsigned long size;
+
+		size = efi.memmap.desc_size * efi.memmap.nr_map;
+		early_memunmap(efi.memmap.map, size);
+	} else {
+		memunmap(efi.memmap.map);
+	}
+
+	efi.memmap.map = NULL;
+	clear_bit(EFI_MEMMAP, &efi.flags);
+}
+
+/**
+ * efi_memmap_init_late - Map efi.memmap with memremap()
+ * @phys_addr: Physical address of the new EFI memory map
+ * @size: Size in bytes of the new EFI memory map
+ *
+ * Setup a mapping of the EFI memory map using ioremap_cache(). This
+ * function should only be called once the vmalloc space has been
+ * setup and is therefore not suitable for calling during early EFI
+ * initialise, e.g. in efi_init(). Additionally, it expects
+ * efi_memmap_init_early() to have already been called.
+ *
+ * The reason there are two EFI memmap initialisation
+ * (efi_memmap_init_early() and this late version) is because the
+ * early EFI memmap should be explicitly unmapped once EFI
+ * initialisation is complete as the fixmap space used to map the EFI
+ * memmap (via early_memremap()) is a scarce resource.
+ *
+ * This late mapping is intended to persist for the duration of
+ * runtime so that things like efi_mem_desc_lookup() and
+ * efi_mem_attributes() always work.
+ *
+ * Returns zero on success, a negative error code on failure.
+ */
+int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size)
+{
+	struct efi_memory_map_data data = {
+		.phys_map = addr,
+		.size = size,
+	};
+
+	/* Did we forget to unmap the early EFI memmap? */
+	WARN_ON(efi.memmap.map);
+
+	/* Were we already called? */
+	WARN_ON(efi.memmap.late);
+
+	/*
+	 * It makes no sense to allow callers to register different
+	 * values for the following fields. Copy them out of the
+	 * existing early EFI memmap.
+	 */
+	data.desc_version = efi.memmap.desc_version;
+	data.desc_size = efi.memmap.desc_size;
+
+	return __efi_memmap_init(&data, true);
+}
+
+/**
+ * efi_memmap_install - Install a new EFI memory map in efi.memmap
+ * @addr: Physical address of the memory map
+ * @nr_map: Number of entries in the memory map
+ *
+ * Unlike efi_memmap_init_*(), this function does not allow the caller
+ * to switch from early to late mappings. It simply uses the existing
+ * mapping function and installs the new memmap.
+ *
+ * Returns zero on success, a negative error code on failure.
+ */
+int __init efi_memmap_install(phys_addr_t addr, unsigned int nr_map)
+{
+	struct efi_memory_map_data data;
+
+	efi_memmap_unmap();
+
+	data.phys_map = addr;
+	data.size = efi.memmap.desc_size * nr_map;
+	data.desc_version = efi.memmap.desc_version;
+	data.desc_size = efi.memmap.desc_size;
+
+	return __efi_memmap_init(&data, efi.memmap.late);
+}
+
+/**
+ * efi_memmap_split_count - Count number of additional EFI memmap entries
+ * @md: EFI memory descriptor to split
+ * @range: Address range (start, end) to split around
+ *
+ * Returns the number of additional EFI memmap entries required to
+ * accomodate @range.
+ */
+int __init efi_memmap_split_count(efi_memory_desc_t *md, struct range *range)
+{
+	u64 m_start, m_end;
+	u64 start, end;
+	int count = 0;
+
+	start = md->phys_addr;
+	end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1;
+
+	/* modifying range */
+	m_start = range->start;
+	m_end = range->end;
+
+	if (m_start <= start) {
+		/* split into 2 parts */
+		if (start < m_end && m_end < end)
+			count++;
+	}
+
+	if (start < m_start && m_start < end) {
+		/* split into 3 parts */
+		if (m_end < end)
+			count += 2;
+		/* split into 2 parts */
+		if (end <= m_end)
+			count++;
+	}
+
+	return count;
+}
+
+/**
+ * efi_memmap_insert - Insert a memory region in an EFI memmap
+ * @old_memmap: The existing EFI memory map structure
+ * @buf: Address of buffer to store new map
+ * @mem: Memory map entry to insert
+ *
+ * It is suggested that you call efi_memmap_split_count() first
+ * to see how large @buf needs to be.
+ */
+void __init efi_memmap_insert(struct efi_memory_map *old_memmap, void *buf,
+			      struct efi_mem_range *mem)
+{
+	u64 m_start, m_end, m_attr;
+	efi_memory_desc_t *md;
+	u64 start, end;
+	void *old, *new;
+
+	/* modifying range */
+	m_start = mem->range.start;
+	m_end = mem->range.end;
+	m_attr = mem->attribute;
+
+	/*
+	 * The EFI memory map deals with regions in EFI_PAGE_SIZE
+	 * units. Ensure that the region described by 'mem' is aligned
+	 * correctly.
+	 */
+	if (!IS_ALIGNED(m_start, EFI_PAGE_SIZE) ||
+	    !IS_ALIGNED(m_end + 1, EFI_PAGE_SIZE)) {
+		WARN_ON(1);
+		return;
+	}
+
+	for (old = old_memmap->map, new = buf;
+	     old < old_memmap->map_end;
+	     old += old_memmap->desc_size, new += old_memmap->desc_size) {
+
+		/* copy original EFI memory descriptor */
+		memcpy(new, old, old_memmap->desc_size);
+		md = new;
+		start = md->phys_addr;
+		end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
+
+		if (m_start <= start && end <= m_end)
+			md->attribute |= m_attr;
+
+		if (m_start <= start &&
+		    (start < m_end && m_end < end)) {
+			/* first part */
+			md->attribute |= m_attr;
+			md->num_pages = (m_end - md->phys_addr + 1) >>
+				EFI_PAGE_SHIFT;
+			/* latter part */
+			new += old_memmap->desc_size;
+			memcpy(new, old, old_memmap->desc_size);
+			md = new;
+			md->phys_addr = m_end + 1;
+			md->num_pages = (end - md->phys_addr + 1) >>
+				EFI_PAGE_SHIFT;
+		}
+
+		if ((start < m_start && m_start < end) && m_end < end) {
+			/* first part */
+			md->num_pages = (m_start - md->phys_addr) >>
+				EFI_PAGE_SHIFT;
+			/* middle part */
+			new += old_memmap->desc_size;
+			memcpy(new, old, old_memmap->desc_size);
+			md = new;
+			md->attribute |= m_attr;
+			md->phys_addr = m_start;
+			md->num_pages = (m_end - m_start + 1) >>
+				EFI_PAGE_SHIFT;
+			/* last part */
+			new += old_memmap->desc_size;
+			memcpy(new, old, old_memmap->desc_size);
+			md = new;
+			md->phys_addr = m_end + 1;
+			md->num_pages = (end - m_end) >>
+				EFI_PAGE_SHIFT;
+		}
+
+		if ((start < m_start && m_start < end) &&
+		    (end <= m_end)) {
+			/* first part */
+			md->num_pages = (m_start - md->phys_addr) >>
+				EFI_PAGE_SHIFT;
+			/* latter part */
+			new += old_memmap->desc_size;
+			memcpy(new, old, old_memmap->desc_size);
+			md = new;
+			md->phys_addr = m_start;
+			md->num_pages = (end - md->phys_addr + 1) >>
+				EFI_PAGE_SHIFT;
+			md->attribute |= m_attr;
+		}
+	}
+}
diff --git a/drivers/firmware/efi/reboot.c b/drivers/firmware/efi/reboot.c
new file mode 100644
index 0000000..7effff9
--- /dev/null
+++ b/drivers/firmware/efi/reboot.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2014 Intel Corporation; author Matt Fleming
+ * Copyright (c) 2014 Red Hat, Inc., Mark Salter <msalter@redhat.com>
+ */
+#include <linux/efi.h>
+#include <linux/reboot.h>
+
+static void (*orig_pm_power_off)(void);
+
+int efi_reboot_quirk_mode = -1;
+
+void efi_reboot(enum reboot_mode reboot_mode, const char *__unused)
+{
+	const char *str[] = { "cold", "warm", "shutdown", "platform" };
+	int efi_mode, cap_reset_mode;
+
+	if (!efi_enabled(EFI_RUNTIME_SERVICES))
+		return;
+
+	switch (reboot_mode) {
+	case REBOOT_WARM:
+	case REBOOT_SOFT:
+		efi_mode = EFI_RESET_WARM;
+		break;
+	default:
+		efi_mode = EFI_RESET_COLD;
+		break;
+	}
+
+	/*
+	 * If a quirk forced an EFI reset mode, always use that.
+	 */
+	if (efi_reboot_quirk_mode != -1)
+		efi_mode = efi_reboot_quirk_mode;
+
+	if (efi_capsule_pending(&cap_reset_mode)) {
+		if (efi_mode != cap_reset_mode)
+			printk(KERN_CRIT "efi: %s reset requested but pending "
+			       "capsule update requires %s reset... Performing "
+			       "%s reset.\n", str[efi_mode], str[cap_reset_mode],
+			       str[cap_reset_mode]);
+		efi_mode = cap_reset_mode;
+	}
+
+	efi.reset_system(efi_mode, EFI_SUCCESS, 0, NULL);
+}
+
+bool __weak efi_poweroff_required(void)
+{
+	return false;
+}
+
+static void efi_power_off(void)
+{
+	efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL);
+	/*
+	 * The above call should not return, if it does fall back to
+	 * the original power off method (typically ACPI poweroff).
+	 */
+	if (orig_pm_power_off)
+		orig_pm_power_off();
+}
+
+static int __init efi_shutdown_init(void)
+{
+	if (!efi_enabled(EFI_RUNTIME_SERVICES))
+		return -ENODEV;
+
+	if (efi_poweroff_required()) {
+		orig_pm_power_off = pm_power_off;
+		pm_power_off = efi_power_off;
+	}
+
+	return 0;
+}
+late_initcall(efi_shutdown_init);
diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
new file mode 100644
index 0000000..84a11d0
--- /dev/null
+++ b/drivers/firmware/efi/runtime-map.c
@@ -0,0 +1,193 @@
+/*
+ * linux/drivers/efi/runtime-map.c
+ * Copyright (C) 2013 Red Hat, Inc., Dave Young <dyoung@redhat.com>
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/efi.h>
+#include <linux/slab.h>
+
+#include <asm/setup.h>
+
+struct efi_runtime_map_entry {
+	efi_memory_desc_t md;
+	struct kobject kobj;   /* kobject for each entry */
+};
+
+static struct efi_runtime_map_entry **map_entries;
+
+struct map_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct efi_runtime_map_entry *entry, char *buf);
+};
+
+static inline struct map_attribute *to_map_attr(struct attribute *attr)
+{
+	return container_of(attr, struct map_attribute, attr);
+}
+
+static ssize_t type_show(struct efi_runtime_map_entry *entry, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%x\n", entry->md.type);
+}
+
+#define EFI_RUNTIME_FIELD(var) entry->md.var
+
+#define EFI_RUNTIME_U64_ATTR_SHOW(name) \
+static ssize_t name##_show(struct efi_runtime_map_entry *entry, char *buf) \
+{ \
+	return snprintf(buf, PAGE_SIZE, "0x%llx\n", EFI_RUNTIME_FIELD(name)); \
+}
+
+EFI_RUNTIME_U64_ATTR_SHOW(phys_addr);
+EFI_RUNTIME_U64_ATTR_SHOW(virt_addr);
+EFI_RUNTIME_U64_ATTR_SHOW(num_pages);
+EFI_RUNTIME_U64_ATTR_SHOW(attribute);
+
+static inline struct efi_runtime_map_entry *to_map_entry(struct kobject *kobj)
+{
+	return container_of(kobj, struct efi_runtime_map_entry, kobj);
+}
+
+static ssize_t map_attr_show(struct kobject *kobj, struct attribute *attr,
+			      char *buf)
+{
+	struct efi_runtime_map_entry *entry = to_map_entry(kobj);
+	struct map_attribute *map_attr = to_map_attr(attr);
+
+	return map_attr->show(entry, buf);
+}
+
+static struct map_attribute map_type_attr = __ATTR_RO_MODE(type, 0400);
+static struct map_attribute map_phys_addr_attr = __ATTR_RO_MODE(phys_addr, 0400);
+static struct map_attribute map_virt_addr_attr = __ATTR_RO_MODE(virt_addr, 0400);
+static struct map_attribute map_num_pages_attr = __ATTR_RO_MODE(num_pages, 0400);
+static struct map_attribute map_attribute_attr = __ATTR_RO_MODE(attribute, 0400);
+
+/*
+ * These are default attributes that are added for every memmap entry.
+ */
+static struct attribute *def_attrs[] = {
+	&map_type_attr.attr,
+	&map_phys_addr_attr.attr,
+	&map_virt_addr_attr.attr,
+	&map_num_pages_attr.attr,
+	&map_attribute_attr.attr,
+	NULL
+};
+
+static const struct sysfs_ops map_attr_ops = {
+	.show = map_attr_show,
+};
+
+static void map_release(struct kobject *kobj)
+{
+	struct efi_runtime_map_entry *entry;
+
+	entry = to_map_entry(kobj);
+	kfree(entry);
+}
+
+static struct kobj_type __refdata map_ktype = {
+	.sysfs_ops	= &map_attr_ops,
+	.default_attrs	= def_attrs,
+	.release	= map_release,
+};
+
+static struct kset *map_kset;
+
+static struct efi_runtime_map_entry *
+add_sysfs_runtime_map_entry(struct kobject *kobj, int nr,
+			    efi_memory_desc_t *md)
+{
+	int ret;
+	struct efi_runtime_map_entry *entry;
+
+	if (!map_kset) {
+		map_kset = kset_create_and_add("runtime-map", NULL, kobj);
+		if (!map_kset)
+			return ERR_PTR(-ENOMEM);
+	}
+
+	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry) {
+		kset_unregister(map_kset);
+		map_kset = NULL;
+		return ERR_PTR(-ENOMEM);
+	}
+
+	memcpy(&entry->md, md, sizeof(efi_memory_desc_t));
+
+	kobject_init(&entry->kobj, &map_ktype);
+	entry->kobj.kset = map_kset;
+	ret = kobject_add(&entry->kobj, NULL, "%d", nr);
+	if (ret) {
+		kobject_put(&entry->kobj);
+		kset_unregister(map_kset);
+		map_kset = NULL;
+		return ERR_PTR(ret);
+	}
+
+	return entry;
+}
+
+int efi_get_runtime_map_size(void)
+{
+	return efi.memmap.nr_map * efi.memmap.desc_size;
+}
+
+int efi_get_runtime_map_desc_size(void)
+{
+	return efi.memmap.desc_size;
+}
+
+int efi_runtime_map_copy(void *buf, size_t bufsz)
+{
+	size_t sz = efi_get_runtime_map_size();
+
+	if (sz > bufsz)
+		sz = bufsz;
+
+	memcpy(buf, efi.memmap.map, sz);
+	return 0;
+}
+
+int __init efi_runtime_map_init(struct kobject *efi_kobj)
+{
+	int i, j, ret = 0;
+	struct efi_runtime_map_entry *entry;
+	efi_memory_desc_t *md;
+
+	if (!efi_enabled(EFI_MEMMAP))
+		return 0;
+
+	map_entries = kcalloc(efi.memmap.nr_map, sizeof(entry), GFP_KERNEL);
+	if (!map_entries) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	i = 0;
+	for_each_efi_memory_desc(md) {
+		entry = add_sysfs_runtime_map_entry(efi_kobj, i, md);
+		if (IS_ERR(entry)) {
+			ret = PTR_ERR(entry);
+			goto out_add_entry;
+		}
+		*(map_entries + i++) = entry;
+	}
+
+	return 0;
+out_add_entry:
+	for (j = i - 1; j >= 0; j--) {
+		entry = *(map_entries + j);
+		kobject_put(&entry->kobj);
+	}
+out:
+	return ret;
+}
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
new file mode 100644
index 0000000..aa66cbf
--- /dev/null
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -0,0 +1,486 @@
+/*
+ * runtime-wrappers.c - Runtime Services function call wrappers
+ *
+ * Implementation summary:
+ * -----------------------
+ * 1. When user/kernel thread requests to execute efi_runtime_service(),
+ * enqueue work to efi_rts_wq.
+ * 2. Caller thread waits for completion until the work is finished
+ * because it's dependent on the return status and execution of
+ * efi_runtime_service().
+ * For instance, get_variable() and get_next_variable().
+ *
+ * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * Split off from arch/x86/platform/efi/efi.c
+ *
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
+ * Copyright (C) 1999-2002 Hewlett-Packard Co.
+ * Copyright (C) 2005-2008 Intel Co.
+ * Copyright (C) 2013 SuSE Labs
+ *
+ * This file is released under the GPLv2.
+ */
+
+#define pr_fmt(fmt)	"efi: " fmt
+
+#include <linux/bug.h>
+#include <linux/efi.h>
+#include <linux/irqflags.h>
+#include <linux/mutex.h>
+#include <linux/semaphore.h>
+#include <linux/stringify.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+
+#include <asm/efi.h>
+
+/*
+ * Wrap around the new efi_call_virt_generic() macros so that the
+ * code doesn't get too cluttered:
+ */
+#define efi_call_virt(f, args...)   \
+	efi_call_virt_pointer(efi.systab->runtime, f, args)
+#define __efi_call_virt(f, args...) \
+	__efi_call_virt_pointer(efi.systab->runtime, f, args)
+
+/* efi_runtime_service() function identifiers */
+enum efi_rts_ids {
+	GET_TIME,
+	SET_TIME,
+	GET_WAKEUP_TIME,
+	SET_WAKEUP_TIME,
+	GET_VARIABLE,
+	GET_NEXT_VARIABLE,
+	SET_VARIABLE,
+	QUERY_VARIABLE_INFO,
+	GET_NEXT_HIGH_MONO_COUNT,
+	UPDATE_CAPSULE,
+	QUERY_CAPSULE_CAPS,
+};
+
+/*
+ * efi_runtime_work:	Details of EFI Runtime Service work
+ * @arg<1-5>:		EFI Runtime Service function arguments
+ * @status:		Status of executing EFI Runtime Service
+ * @efi_rts_id:		EFI Runtime Service function identifier
+ * @efi_rts_comp:	Struct used for handling completions
+ */
+struct efi_runtime_work {
+	void *arg1;
+	void *arg2;
+	void *arg3;
+	void *arg4;
+	void *arg5;
+	efi_status_t status;
+	struct work_struct work;
+	enum efi_rts_ids efi_rts_id;
+	struct completion efi_rts_comp;
+};
+
+/*
+ * efi_queue_work:	Queue efi_runtime_service() and wait until it's done
+ * @rts:		efi_runtime_service() function identifier
+ * @rts_arg<1-5>:	efi_runtime_service() function arguments
+ *
+ * Accesses to efi_runtime_services() are serialized by a binary
+ * semaphore (efi_runtime_lock) and caller waits until the work is
+ * finished, hence _only_ one work is queued at a time and the caller
+ * thread waits for completion.
+ */
+#define efi_queue_work(_rts, _arg1, _arg2, _arg3, _arg4, _arg5)		\
+({									\
+	struct efi_runtime_work efi_rts_work;				\
+	efi_rts_work.status = EFI_ABORTED;				\
+									\
+	init_completion(&efi_rts_work.efi_rts_comp);			\
+	INIT_WORK_ONSTACK(&efi_rts_work.work, efi_call_rts);		\
+	efi_rts_work.arg1 = _arg1;					\
+	efi_rts_work.arg2 = _arg2;					\
+	efi_rts_work.arg3 = _arg3;					\
+	efi_rts_work.arg4 = _arg4;					\
+	efi_rts_work.arg5 = _arg5;					\
+	efi_rts_work.efi_rts_id = _rts;					\
+									\
+	/*								\
+	 * queue_work() returns 0 if work was already on queue,         \
+	 * _ideally_ this should never happen.                          \
+	 */								\
+	if (queue_work(efi_rts_wq, &efi_rts_work.work))			\
+		wait_for_completion(&efi_rts_work.efi_rts_comp);	\
+	else								\
+		pr_err("Failed to queue work to efi_rts_wq.\n");	\
+									\
+	efi_rts_work.status;						\
+})
+
+void efi_call_virt_check_flags(unsigned long flags, const char *call)
+{
+	unsigned long cur_flags, mismatch;
+
+	local_save_flags(cur_flags);
+
+	mismatch = flags ^ cur_flags;
+	if (!WARN_ON_ONCE(mismatch & ARCH_EFI_IRQ_FLAGS_MASK))
+		return;
+
+	add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_NOW_UNRELIABLE);
+	pr_err_ratelimited(FW_BUG "IRQ flags corrupted (0x%08lx=>0x%08lx) by EFI %s\n",
+			   flags, cur_flags, call);
+	local_irq_restore(flags);
+}
+
+/*
+ * According to section 7.1 of the UEFI spec, Runtime Services are not fully
+ * reentrant, and there are particular combinations of calls that need to be
+ * serialized. (source: UEFI Specification v2.4A)
+ *
+ * Table 31. Rules for Reentry Into Runtime Services
+ * +------------------------------------+-------------------------------+
+ * | If previous call is busy in	| Forbidden to call		|
+ * +------------------------------------+-------------------------------+
+ * | Any				| SetVirtualAddressMap()	|
+ * +------------------------------------+-------------------------------+
+ * | ConvertPointer()			| ConvertPointer()		|
+ * +------------------------------------+-------------------------------+
+ * | SetVariable()			| ResetSystem()			|
+ * | UpdateCapsule()			|				|
+ * | SetTime()				|				|
+ * | SetWakeupTime()			|				|
+ * | GetNextHighMonotonicCount()	|				|
+ * +------------------------------------+-------------------------------+
+ * | GetVariable()			| GetVariable()			|
+ * | GetNextVariableName()		| GetNextVariableName()		|
+ * | SetVariable()			| SetVariable()			|
+ * | QueryVariableInfo()		| QueryVariableInfo()		|
+ * | UpdateCapsule()			| UpdateCapsule()		|
+ * | QueryCapsuleCapabilities()		| QueryCapsuleCapabilities()	|
+ * | GetNextHighMonotonicCount()	| GetNextHighMonotonicCount()	|
+ * +------------------------------------+-------------------------------+
+ * | GetTime()				| GetTime()			|
+ * | SetTime()				| SetTime()			|
+ * | GetWakeupTime()			| GetWakeupTime()		|
+ * | SetWakeupTime()			| SetWakeupTime()		|
+ * +------------------------------------+-------------------------------+
+ *
+ * Due to the fact that the EFI pstore may write to the variable store in
+ * interrupt context, we need to use a lock for at least the groups that
+ * contain SetVariable() and QueryVariableInfo(). That leaves little else, as
+ * none of the remaining functions are actually ever called at runtime.
+ * So let's just use a single lock to serialize all Runtime Services calls.
+ */
+static DEFINE_SEMAPHORE(efi_runtime_lock);
+
+/*
+ * Calls the appropriate efi_runtime_service() with the appropriate
+ * arguments.
+ *
+ * Semantics followed by efi_call_rts() to understand efi_runtime_work:
+ * 1. If argument was a pointer, recast it from void pointer to original
+ * pointer type.
+ * 2. If argument was a value, recast it from void pointer to original
+ * pointer type and dereference it.
+ */
+static void efi_call_rts(struct work_struct *work)
+{
+	struct efi_runtime_work *efi_rts_work;
+	void *arg1, *arg2, *arg3, *arg4, *arg5;
+	efi_status_t status = EFI_NOT_FOUND;
+
+	efi_rts_work = container_of(work, struct efi_runtime_work, work);
+	arg1 = efi_rts_work->arg1;
+	arg2 = efi_rts_work->arg2;
+	arg3 = efi_rts_work->arg3;
+	arg4 = efi_rts_work->arg4;
+	arg5 = efi_rts_work->arg5;
+
+	switch (efi_rts_work->efi_rts_id) {
+	case GET_TIME:
+		status = efi_call_virt(get_time, (efi_time_t *)arg1,
+				       (efi_time_cap_t *)arg2);
+		break;
+	case SET_TIME:
+		status = efi_call_virt(set_time, (efi_time_t *)arg1);
+		break;
+	case GET_WAKEUP_TIME:
+		status = efi_call_virt(get_wakeup_time, (efi_bool_t *)arg1,
+				       (efi_bool_t *)arg2, (efi_time_t *)arg3);
+		break;
+	case SET_WAKEUP_TIME:
+		status = efi_call_virt(set_wakeup_time, *(efi_bool_t *)arg1,
+				       (efi_time_t *)arg2);
+		break;
+	case GET_VARIABLE:
+		status = efi_call_virt(get_variable, (efi_char16_t *)arg1,
+				       (efi_guid_t *)arg2, (u32 *)arg3,
+				       (unsigned long *)arg4, (void *)arg5);
+		break;
+	case GET_NEXT_VARIABLE:
+		status = efi_call_virt(get_next_variable, (unsigned long *)arg1,
+				       (efi_char16_t *)arg2,
+				       (efi_guid_t *)arg3);
+		break;
+	case SET_VARIABLE:
+		status = efi_call_virt(set_variable, (efi_char16_t *)arg1,
+				       (efi_guid_t *)arg2, *(u32 *)arg3,
+				       *(unsigned long *)arg4, (void *)arg5);
+		break;
+	case QUERY_VARIABLE_INFO:
+		status = efi_call_virt(query_variable_info, *(u32 *)arg1,
+				       (u64 *)arg2, (u64 *)arg3, (u64 *)arg4);
+		break;
+	case GET_NEXT_HIGH_MONO_COUNT:
+		status = efi_call_virt(get_next_high_mono_count, (u32 *)arg1);
+		break;
+	case UPDATE_CAPSULE:
+		status = efi_call_virt(update_capsule,
+				       (efi_capsule_header_t **)arg1,
+				       *(unsigned long *)arg2,
+				       *(unsigned long *)arg3);
+		break;
+	case QUERY_CAPSULE_CAPS:
+		status = efi_call_virt(query_capsule_caps,
+				       (efi_capsule_header_t **)arg1,
+				       *(unsigned long *)arg2, (u64 *)arg3,
+				       (int *)arg4);
+		break;
+	default:
+		/*
+		 * Ideally, we should never reach here because a caller of this
+		 * function should have put the right efi_runtime_service()
+		 * function identifier into efi_rts_work->efi_rts_id
+		 */
+		pr_err("Requested executing invalid EFI Runtime Service.\n");
+	}
+	efi_rts_work->status = status;
+	complete(&efi_rts_work->efi_rts_comp);
+}
+
+static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
+{
+	efi_status_t status;
+
+	if (down_interruptible(&efi_runtime_lock))
+		return EFI_ABORTED;
+	status = efi_queue_work(GET_TIME, tm, tc, NULL, NULL, NULL);
+	up(&efi_runtime_lock);
+	return status;
+}
+
+static efi_status_t virt_efi_set_time(efi_time_t *tm)
+{
+	efi_status_t status;
+
+	if (down_interruptible(&efi_runtime_lock))
+		return EFI_ABORTED;
+	status = efi_queue_work(SET_TIME, tm, NULL, NULL, NULL, NULL);
+	up(&efi_runtime_lock);
+	return status;
+}
+
+static efi_status_t virt_efi_get_wakeup_time(efi_bool_t *enabled,
+					     efi_bool_t *pending,
+					     efi_time_t *tm)
+{
+	efi_status_t status;
+
+	if (down_interruptible(&efi_runtime_lock))
+		return EFI_ABORTED;
+	status = efi_queue_work(GET_WAKEUP_TIME, enabled, pending, tm, NULL,
+				NULL);
+	up(&efi_runtime_lock);
+	return status;
+}
+
+static efi_status_t virt_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
+{
+	efi_status_t status;
+
+	if (down_interruptible(&efi_runtime_lock))
+		return EFI_ABORTED;
+	status = efi_queue_work(SET_WAKEUP_TIME, &enabled, tm, NULL, NULL,
+				NULL);
+	up(&efi_runtime_lock);
+	return status;
+}
+
+static efi_status_t virt_efi_get_variable(efi_char16_t *name,
+					  efi_guid_t *vendor,
+					  u32 *attr,
+					  unsigned long *data_size,
+					  void *data)
+{
+	efi_status_t status;
+
+	if (down_interruptible(&efi_runtime_lock))
+		return EFI_ABORTED;
+	status = efi_queue_work(GET_VARIABLE, name, vendor, attr, data_size,
+				data);
+	up(&efi_runtime_lock);
+	return status;
+}
+
+static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
+					       efi_char16_t *name,
+					       efi_guid_t *vendor)
+{
+	efi_status_t status;
+
+	if (down_interruptible(&efi_runtime_lock))
+		return EFI_ABORTED;
+	status = efi_queue_work(GET_NEXT_VARIABLE, name_size, name, vendor,
+				NULL, NULL);
+	up(&efi_runtime_lock);
+	return status;
+}
+
+static efi_status_t virt_efi_set_variable(efi_char16_t *name,
+					  efi_guid_t *vendor,
+					  u32 attr,
+					  unsigned long data_size,
+					  void *data)
+{
+	efi_status_t status;
+
+	if (down_interruptible(&efi_runtime_lock))
+		return EFI_ABORTED;
+	status = efi_queue_work(SET_VARIABLE, name, vendor, &attr, &data_size,
+				data);
+	up(&efi_runtime_lock);
+	return status;
+}
+
+static efi_status_t
+virt_efi_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
+				  u32 attr, unsigned long data_size,
+				  void *data)
+{
+	efi_status_t status;
+
+	if (down_trylock(&efi_runtime_lock))
+		return EFI_NOT_READY;
+
+	status = efi_call_virt(set_variable, name, vendor, attr, data_size,
+			       data);
+	up(&efi_runtime_lock);
+	return status;
+}
+
+
+static efi_status_t virt_efi_query_variable_info(u32 attr,
+						 u64 *storage_space,
+						 u64 *remaining_space,
+						 u64 *max_variable_size)
+{
+	efi_status_t status;
+
+	if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
+		return EFI_UNSUPPORTED;
+
+	if (down_interruptible(&efi_runtime_lock))
+		return EFI_ABORTED;
+	status = efi_queue_work(QUERY_VARIABLE_INFO, &attr, storage_space,
+				remaining_space, max_variable_size, NULL);
+	up(&efi_runtime_lock);
+	return status;
+}
+
+static efi_status_t
+virt_efi_query_variable_info_nonblocking(u32 attr,
+					 u64 *storage_space,
+					 u64 *remaining_space,
+					 u64 *max_variable_size)
+{
+	efi_status_t status;
+
+	if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
+		return EFI_UNSUPPORTED;
+
+	if (down_trylock(&efi_runtime_lock))
+		return EFI_NOT_READY;
+
+	status = efi_call_virt(query_variable_info, attr, storage_space,
+			       remaining_space, max_variable_size);
+	up(&efi_runtime_lock);
+	return status;
+}
+
+static efi_status_t virt_efi_get_next_high_mono_count(u32 *count)
+{
+	efi_status_t status;
+
+	if (down_interruptible(&efi_runtime_lock))
+		return EFI_ABORTED;
+	status = efi_queue_work(GET_NEXT_HIGH_MONO_COUNT, count, NULL, NULL,
+				NULL, NULL);
+	up(&efi_runtime_lock);
+	return status;
+}
+
+static void virt_efi_reset_system(int reset_type,
+				  efi_status_t status,
+				  unsigned long data_size,
+				  efi_char16_t *data)
+{
+	if (down_interruptible(&efi_runtime_lock)) {
+		pr_warn("failed to invoke the reset_system() runtime service:\n"
+			"could not get exclusive access to the firmware\n");
+		return;
+	}
+	__efi_call_virt(reset_system, reset_type, status, data_size, data);
+	up(&efi_runtime_lock);
+}
+
+static efi_status_t virt_efi_update_capsule(efi_capsule_header_t **capsules,
+					    unsigned long count,
+					    unsigned long sg_list)
+{
+	efi_status_t status;
+
+	if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
+		return EFI_UNSUPPORTED;
+
+	if (down_interruptible(&efi_runtime_lock))
+		return EFI_ABORTED;
+	status = efi_queue_work(UPDATE_CAPSULE, capsules, &count, &sg_list,
+				NULL, NULL);
+	up(&efi_runtime_lock);
+	return status;
+}
+
+static efi_status_t virt_efi_query_capsule_caps(efi_capsule_header_t **capsules,
+						unsigned long count,
+						u64 *max_size,
+						int *reset_type)
+{
+	efi_status_t status;
+
+	if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
+		return EFI_UNSUPPORTED;
+
+	if (down_interruptible(&efi_runtime_lock))
+		return EFI_ABORTED;
+	status = efi_queue_work(QUERY_CAPSULE_CAPS, capsules, &count,
+				max_size, reset_type, NULL);
+	up(&efi_runtime_lock);
+	return status;
+}
+
+void efi_native_runtime_setup(void)
+{
+	efi.get_time = virt_efi_get_time;
+	efi.set_time = virt_efi_set_time;
+	efi.get_wakeup_time = virt_efi_get_wakeup_time;
+	efi.set_wakeup_time = virt_efi_set_wakeup_time;
+	efi.get_variable = virt_efi_get_variable;
+	efi.get_next_variable = virt_efi_get_next_variable;
+	efi.set_variable = virt_efi_set_variable;
+	efi.set_variable_nonblocking = virt_efi_set_variable_nonblocking;
+	efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count;
+	efi.reset_system = virt_efi_reset_system;
+	efi.query_variable_info = virt_efi_query_variable_info;
+	efi.query_variable_info_nonblocking = virt_efi_query_variable_info_nonblocking;
+	efi.update_capsule = virt_efi_update_capsule;
+	efi.query_capsule_caps = virt_efi_query_capsule_caps;
+}
diff --git a/drivers/firmware/efi/test/Makefile b/drivers/firmware/efi/test/Makefile
new file mode 100644
index 0000000..bcd4577
--- /dev/null
+++ b/drivers/firmware/efi/test/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_EFI_TEST)			+= efi_test.o
diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c
new file mode 100644
index 0000000..41c48a1
--- /dev/null
+++ b/drivers/firmware/efi/test/efi_test.c
@@ -0,0 +1,742 @@
+/*
+ * EFI Test Driver for Runtime Services
+ *
+ * Copyright(C) 2012-2016 Canonical Ltd.
+ *
+ * This driver exports EFI runtime services interfaces into userspace, which
+ * allow to use and test UEFI runtime services provided by firmware.
+ *
+ */
+
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/efi.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "efi_test.h"
+
+MODULE_AUTHOR("Ivan Hu <ivan.hu@canonical.com>");
+MODULE_DESCRIPTION("EFI Test Driver");
+MODULE_LICENSE("GPL");
+
+/*
+ * Count the bytes in 'str', including the terminating NULL.
+ *
+ * Note this function returns the number of *bytes*, not the number of
+ * ucs2 characters.
+ */
+static inline size_t user_ucs2_strsize(efi_char16_t  __user *str)
+{
+	efi_char16_t *s = str, c;
+	size_t len;
+
+	if (!str)
+		return 0;
+
+	/* Include terminating NULL */
+	len = sizeof(efi_char16_t);
+
+	if (get_user(c, s++)) {
+		/* Can't read userspace memory for size */
+		return 0;
+	}
+
+	while (c != 0) {
+		if (get_user(c, s++)) {
+			/* Can't read userspace memory for size */
+			return 0;
+		}
+		len += sizeof(efi_char16_t);
+	}
+	return len;
+}
+
+/*
+ * Allocate a buffer and copy a ucs2 string from user space into it.
+ */
+static inline int
+copy_ucs2_from_user_len(efi_char16_t **dst, efi_char16_t __user *src,
+			size_t len)
+{
+	efi_char16_t *buf;
+
+	if (!src) {
+		*dst = NULL;
+		return 0;
+	}
+
+	if (!access_ok(VERIFY_READ, src, 1))
+		return -EFAULT;
+
+	buf = memdup_user(src, len);
+	if (IS_ERR(buf)) {
+		*dst = NULL;
+		return PTR_ERR(buf);
+	}
+	*dst = buf;
+
+	return 0;
+}
+
+/*
+ * Count the bytes in 'str', including the terminating NULL.
+ *
+ * Just a wrap for user_ucs2_strsize
+ */
+static inline int
+get_ucs2_strsize_from_user(efi_char16_t __user *src, size_t *len)
+{
+	if (!access_ok(VERIFY_READ, src, 1))
+		return -EFAULT;
+
+	*len = user_ucs2_strsize(src);
+	if (*len == 0)
+		return -EFAULT;
+
+	return 0;
+}
+
+/*
+ * Calculate the required buffer allocation size and copy a ucs2 string
+ * from user space into it.
+ *
+ * This function differs from copy_ucs2_from_user_len() because it
+ * calculates the size of the buffer to allocate by taking the length of
+ * the string 'src'.
+ *
+ * If a non-zero value is returned, the caller MUST NOT access 'dst'.
+ *
+ * It is the caller's responsibility to free 'dst'.
+ */
+static inline int
+copy_ucs2_from_user(efi_char16_t **dst, efi_char16_t __user *src)
+{
+	size_t len;
+
+	if (!access_ok(VERIFY_READ, src, 1))
+		return -EFAULT;
+
+	len = user_ucs2_strsize(src);
+	if (len == 0)
+		return -EFAULT;
+	return copy_ucs2_from_user_len(dst, src, len);
+}
+
+/*
+ * Copy a ucs2 string to a user buffer.
+ *
+ * This function is a simple wrapper around copy_to_user() that does
+ * nothing if 'src' is NULL, which is useful for reducing the amount of
+ * NULL checking the caller has to do.
+ *
+ * 'len' specifies the number of bytes to copy.
+ */
+static inline int
+copy_ucs2_to_user_len(efi_char16_t __user *dst, efi_char16_t *src, size_t len)
+{
+	if (!src)
+		return 0;
+
+	if (!access_ok(VERIFY_WRITE, dst, 1))
+		return -EFAULT;
+
+	return copy_to_user(dst, src, len);
+}
+
+static long efi_runtime_get_variable(unsigned long arg)
+{
+	struct efi_getvariable __user *getvariable_user;
+	struct efi_getvariable getvariable;
+	unsigned long datasize = 0, prev_datasize, *dz;
+	efi_guid_t vendor_guid, *vd = NULL;
+	efi_status_t status;
+	efi_char16_t *name = NULL;
+	u32 attr, *at;
+	void *data = NULL;
+	int rv = 0;
+
+	getvariable_user = (struct efi_getvariable __user *)arg;
+
+	if (copy_from_user(&getvariable, getvariable_user,
+			   sizeof(getvariable)))
+		return -EFAULT;
+	if (getvariable.data_size &&
+	    get_user(datasize, getvariable.data_size))
+		return -EFAULT;
+	if (getvariable.vendor_guid) {
+		if (copy_from_user(&vendor_guid, getvariable.vendor_guid,
+					sizeof(vendor_guid)))
+			return -EFAULT;
+		vd = &vendor_guid;
+	}
+
+	if (getvariable.variable_name) {
+		rv = copy_ucs2_from_user(&name, getvariable.variable_name);
+		if (rv)
+			return rv;
+	}
+
+	at = getvariable.attributes ? &attr : NULL;
+	dz = getvariable.data_size ? &datasize : NULL;
+
+	if (getvariable.data_size && getvariable.data) {
+		data = kmalloc(datasize, GFP_KERNEL);
+		if (!data) {
+			kfree(name);
+			return -ENOMEM;
+		}
+	}
+
+	prev_datasize = datasize;
+	status = efi.get_variable(name, vd, at, dz, data);
+	kfree(name);
+
+	if (put_user(status, getvariable.status)) {
+		rv = -EFAULT;
+		goto out;
+	}
+
+	if (status != EFI_SUCCESS) {
+		if (status == EFI_BUFFER_TOO_SMALL) {
+			if (dz && put_user(datasize, getvariable.data_size)) {
+				rv = -EFAULT;
+				goto out;
+			}
+		}
+		rv = -EINVAL;
+		goto out;
+	}
+
+	if (prev_datasize < datasize) {
+		rv = -EINVAL;
+		goto out;
+	}
+
+	if (data) {
+		if (copy_to_user(getvariable.data, data, datasize)) {
+			rv = -EFAULT;
+			goto out;
+		}
+	}
+
+	if (at && put_user(attr, getvariable.attributes)) {
+		rv = -EFAULT;
+		goto out;
+	}
+
+	if (dz && put_user(datasize, getvariable.data_size))
+		rv = -EFAULT;
+
+out:
+	kfree(data);
+	return rv;
+
+}
+
+static long efi_runtime_set_variable(unsigned long arg)
+{
+	struct efi_setvariable __user *setvariable_user;
+	struct efi_setvariable setvariable;
+	efi_guid_t vendor_guid;
+	efi_status_t status;
+	efi_char16_t *name = NULL;
+	void *data;
+	int rv = 0;
+
+	setvariable_user = (struct efi_setvariable __user *)arg;
+
+	if (copy_from_user(&setvariable, setvariable_user, sizeof(setvariable)))
+		return -EFAULT;
+	if (copy_from_user(&vendor_guid, setvariable.vendor_guid,
+				sizeof(vendor_guid)))
+		return -EFAULT;
+
+	if (setvariable.variable_name) {
+		rv = copy_ucs2_from_user(&name, setvariable.variable_name);
+		if (rv)
+			return rv;
+	}
+
+	data = memdup_user(setvariable.data, setvariable.data_size);
+	if (IS_ERR(data)) {
+		kfree(name);
+		return PTR_ERR(data);
+	}
+
+	status = efi.set_variable(name, &vendor_guid,
+				setvariable.attributes,
+				setvariable.data_size, data);
+
+	if (put_user(status, setvariable.status)) {
+		rv = -EFAULT;
+		goto out;
+	}
+
+	rv = status == EFI_SUCCESS ? 0 : -EINVAL;
+
+out:
+	kfree(data);
+	kfree(name);
+
+	return rv;
+}
+
+static long efi_runtime_get_time(unsigned long arg)
+{
+	struct efi_gettime __user *gettime_user;
+	struct efi_gettime  gettime;
+	efi_status_t status;
+	efi_time_cap_t cap;
+	efi_time_t efi_time;
+
+	gettime_user = (struct efi_gettime __user *)arg;
+	if (copy_from_user(&gettime, gettime_user, sizeof(gettime)))
+		return -EFAULT;
+
+	status = efi.get_time(gettime.time ? &efi_time : NULL,
+			      gettime.capabilities ? &cap : NULL);
+
+	if (put_user(status, gettime.status))
+		return -EFAULT;
+
+	if (status != EFI_SUCCESS)
+		return -EINVAL;
+
+	if (gettime.capabilities) {
+		efi_time_cap_t __user *cap_local;
+
+		cap_local = (efi_time_cap_t *)gettime.capabilities;
+		if (put_user(cap.resolution, &(cap_local->resolution)) ||
+			put_user(cap.accuracy, &(cap_local->accuracy)) ||
+			put_user(cap.sets_to_zero, &(cap_local->sets_to_zero)))
+			return -EFAULT;
+	}
+	if (gettime.time) {
+		if (copy_to_user(gettime.time, &efi_time, sizeof(efi_time_t)))
+			return -EFAULT;
+	}
+
+	return 0;
+}
+
+static long efi_runtime_set_time(unsigned long arg)
+{
+	struct efi_settime __user *settime_user;
+	struct efi_settime settime;
+	efi_status_t status;
+	efi_time_t efi_time;
+
+	settime_user = (struct efi_settime __user *)arg;
+	if (copy_from_user(&settime, settime_user, sizeof(settime)))
+		return -EFAULT;
+	if (copy_from_user(&efi_time, settime.time,
+					sizeof(efi_time_t)))
+		return -EFAULT;
+	status = efi.set_time(&efi_time);
+
+	if (put_user(status, settime.status))
+		return -EFAULT;
+
+	return status == EFI_SUCCESS ? 0 : -EINVAL;
+}
+
+static long efi_runtime_get_waketime(unsigned long arg)
+{
+	struct efi_getwakeuptime __user *getwakeuptime_user;
+	struct efi_getwakeuptime getwakeuptime;
+	efi_bool_t enabled, pending;
+	efi_status_t status;
+	efi_time_t efi_time;
+
+	getwakeuptime_user = (struct efi_getwakeuptime __user *)arg;
+	if (copy_from_user(&getwakeuptime, getwakeuptime_user,
+				sizeof(getwakeuptime)))
+		return -EFAULT;
+
+	status = efi.get_wakeup_time(
+		getwakeuptime.enabled ? (efi_bool_t *)&enabled : NULL,
+		getwakeuptime.pending ? (efi_bool_t *)&pending : NULL,
+		getwakeuptime.time ? &efi_time : NULL);
+
+	if (put_user(status, getwakeuptime.status))
+		return -EFAULT;
+
+	if (status != EFI_SUCCESS)
+		return -EINVAL;
+
+	if (getwakeuptime.enabled && put_user(enabled,
+						getwakeuptime.enabled))
+		return -EFAULT;
+
+	if (getwakeuptime.time) {
+		if (copy_to_user(getwakeuptime.time, &efi_time,
+				sizeof(efi_time_t)))
+			return -EFAULT;
+	}
+
+	return 0;
+}
+
+static long efi_runtime_set_waketime(unsigned long arg)
+{
+	struct efi_setwakeuptime __user *setwakeuptime_user;
+	struct efi_setwakeuptime setwakeuptime;
+	efi_bool_t enabled;
+	efi_status_t status;
+	efi_time_t efi_time;
+
+	setwakeuptime_user = (struct efi_setwakeuptime __user *)arg;
+
+	if (copy_from_user(&setwakeuptime, setwakeuptime_user,
+				sizeof(setwakeuptime)))
+		return -EFAULT;
+
+	enabled = setwakeuptime.enabled;
+	if (setwakeuptime.time) {
+		if (copy_from_user(&efi_time, setwakeuptime.time,
+					sizeof(efi_time_t)))
+			return -EFAULT;
+
+		status = efi.set_wakeup_time(enabled, &efi_time);
+	} else
+		status = efi.set_wakeup_time(enabled, NULL);
+
+	if (put_user(status, setwakeuptime.status))
+		return -EFAULT;
+
+	return status == EFI_SUCCESS ? 0 : -EINVAL;
+}
+
+static long efi_runtime_get_nextvariablename(unsigned long arg)
+{
+	struct efi_getnextvariablename __user *getnextvariablename_user;
+	struct efi_getnextvariablename getnextvariablename;
+	unsigned long name_size, prev_name_size = 0, *ns = NULL;
+	efi_status_t status;
+	efi_guid_t *vd = NULL;
+	efi_guid_t vendor_guid;
+	efi_char16_t *name = NULL;
+	int rv = 0;
+
+	getnextvariablename_user = (struct efi_getnextvariablename __user *)arg;
+
+	if (copy_from_user(&getnextvariablename, getnextvariablename_user,
+			   sizeof(getnextvariablename)))
+		return -EFAULT;
+
+	if (getnextvariablename.variable_name_size) {
+		if (get_user(name_size, getnextvariablename.variable_name_size))
+			return -EFAULT;
+		ns = &name_size;
+		prev_name_size = name_size;
+	}
+
+	if (getnextvariablename.vendor_guid) {
+		if (copy_from_user(&vendor_guid,
+				getnextvariablename.vendor_guid,
+				sizeof(vendor_guid)))
+			return -EFAULT;
+		vd = &vendor_guid;
+	}
+
+	if (getnextvariablename.variable_name) {
+		size_t name_string_size = 0;
+
+		rv = get_ucs2_strsize_from_user(
+				getnextvariablename.variable_name,
+				&name_string_size);
+		if (rv)
+			return rv;
+		/*
+		 * The name_size may be smaller than the real buffer size where
+		 * variable name located in some use cases. The most typical
+		 * case is passing a 0 to get the required buffer size for the
+		 * 1st time call. So we need to copy the content from user
+		 * space for at least the string size of variable name, or else
+		 * the name passed to UEFI may not be terminated as we expected.
+		 */
+		rv = copy_ucs2_from_user_len(&name,
+				getnextvariablename.variable_name,
+				prev_name_size > name_string_size ?
+				prev_name_size : name_string_size);
+		if (rv)
+			return rv;
+	}
+
+	status = efi.get_next_variable(ns, name, vd);
+
+	if (put_user(status, getnextvariablename.status)) {
+		rv = -EFAULT;
+		goto out;
+	}
+
+	if (status != EFI_SUCCESS) {
+		if (status == EFI_BUFFER_TOO_SMALL) {
+			if (ns && put_user(*ns,
+				getnextvariablename.variable_name_size)) {
+				rv = -EFAULT;
+				goto out;
+			}
+		}
+		rv = -EINVAL;
+		goto out;
+	}
+
+	if (name) {
+		if (copy_ucs2_to_user_len(getnextvariablename.variable_name,
+						name, prev_name_size)) {
+			rv = -EFAULT;
+			goto out;
+		}
+	}
+
+	if (ns) {
+		if (put_user(*ns, getnextvariablename.variable_name_size)) {
+			rv = -EFAULT;
+			goto out;
+		}
+	}
+
+	if (vd) {
+		if (copy_to_user(getnextvariablename.vendor_guid, vd,
+							sizeof(efi_guid_t)))
+			rv = -EFAULT;
+	}
+
+out:
+	kfree(name);
+	return rv;
+}
+
+static long efi_runtime_get_nexthighmonocount(unsigned long arg)
+{
+	struct efi_getnexthighmonotoniccount __user *getnexthighmonocount_user;
+	struct efi_getnexthighmonotoniccount getnexthighmonocount;
+	efi_status_t status;
+	u32 count;
+
+	getnexthighmonocount_user = (struct
+			efi_getnexthighmonotoniccount __user *)arg;
+
+	if (copy_from_user(&getnexthighmonocount,
+			   getnexthighmonocount_user,
+			   sizeof(getnexthighmonocount)))
+		return -EFAULT;
+
+	status = efi.get_next_high_mono_count(
+		getnexthighmonocount.high_count ? &count : NULL);
+
+	if (put_user(status, getnexthighmonocount.status))
+		return -EFAULT;
+
+	if (status != EFI_SUCCESS)
+		return -EINVAL;
+
+	if (getnexthighmonocount.high_count &&
+	    put_user(count, getnexthighmonocount.high_count))
+		return -EFAULT;
+
+	return 0;
+}
+
+static long efi_runtime_query_variableinfo(unsigned long arg)
+{
+	struct efi_queryvariableinfo __user *queryvariableinfo_user;
+	struct efi_queryvariableinfo queryvariableinfo;
+	efi_status_t status;
+	u64 max_storage, remaining, max_size;
+
+	queryvariableinfo_user = (struct efi_queryvariableinfo __user *)arg;
+
+	if (copy_from_user(&queryvariableinfo, queryvariableinfo_user,
+			   sizeof(queryvariableinfo)))
+		return -EFAULT;
+
+	status = efi.query_variable_info(queryvariableinfo.attributes,
+					 &max_storage, &remaining, &max_size);
+
+	if (put_user(status, queryvariableinfo.status))
+		return -EFAULT;
+
+	if (status != EFI_SUCCESS)
+		return -EINVAL;
+
+	if (put_user(max_storage,
+		     queryvariableinfo.maximum_variable_storage_size))
+		return -EFAULT;
+
+	if (put_user(remaining,
+		     queryvariableinfo.remaining_variable_storage_size))
+		return -EFAULT;
+
+	if (put_user(max_size, queryvariableinfo.maximum_variable_size))
+		return -EFAULT;
+
+	return 0;
+}
+
+static long efi_runtime_query_capsulecaps(unsigned long arg)
+{
+	struct efi_querycapsulecapabilities __user *qcaps_user;
+	struct efi_querycapsulecapabilities qcaps;
+	efi_capsule_header_t *capsules;
+	efi_status_t status;
+	u64 max_size;
+	int i, reset_type;
+	int rv = 0;
+
+	qcaps_user = (struct efi_querycapsulecapabilities __user *)arg;
+
+	if (copy_from_user(&qcaps, qcaps_user, sizeof(qcaps)))
+		return -EFAULT;
+
+	if (qcaps.capsule_count == ULONG_MAX)
+		return -EINVAL;
+
+	capsules = kcalloc(qcaps.capsule_count + 1,
+			   sizeof(efi_capsule_header_t), GFP_KERNEL);
+	if (!capsules)
+		return -ENOMEM;
+
+	for (i = 0; i < qcaps.capsule_count; i++) {
+		efi_capsule_header_t *c;
+		/*
+		 * We cannot dereference qcaps.capsule_header_array directly to
+		 * obtain the address of the capsule as it resides in the
+		 * user space
+		 */
+		if (get_user(c, qcaps.capsule_header_array + i)) {
+			rv = -EFAULT;
+			goto out;
+		}
+		if (copy_from_user(&capsules[i], c,
+				sizeof(efi_capsule_header_t))) {
+			rv = -EFAULT;
+			goto out;
+		}
+	}
+
+	qcaps.capsule_header_array = &capsules;
+
+	status = efi.query_capsule_caps((efi_capsule_header_t **)
+					qcaps.capsule_header_array,
+					qcaps.capsule_count,
+					&max_size, &reset_type);
+
+	if (put_user(status, qcaps.status)) {
+		rv = -EFAULT;
+		goto out;
+	}
+
+	if (status != EFI_SUCCESS) {
+		rv = -EINVAL;
+		goto out;
+	}
+
+	if (put_user(max_size, qcaps.maximum_capsule_size)) {
+		rv = -EFAULT;
+		goto out;
+	}
+
+	if (put_user(reset_type, qcaps.reset_type))
+		rv = -EFAULT;
+
+out:
+	kfree(capsules);
+	return rv;
+}
+
+static long efi_test_ioctl(struct file *file, unsigned int cmd,
+							unsigned long arg)
+{
+	switch (cmd) {
+	case EFI_RUNTIME_GET_VARIABLE:
+		return efi_runtime_get_variable(arg);
+
+	case EFI_RUNTIME_SET_VARIABLE:
+		return efi_runtime_set_variable(arg);
+
+	case EFI_RUNTIME_GET_TIME:
+		return efi_runtime_get_time(arg);
+
+	case EFI_RUNTIME_SET_TIME:
+		return efi_runtime_set_time(arg);
+
+	case EFI_RUNTIME_GET_WAKETIME:
+		return efi_runtime_get_waketime(arg);
+
+	case EFI_RUNTIME_SET_WAKETIME:
+		return efi_runtime_set_waketime(arg);
+
+	case EFI_RUNTIME_GET_NEXTVARIABLENAME:
+		return efi_runtime_get_nextvariablename(arg);
+
+	case EFI_RUNTIME_GET_NEXTHIGHMONOTONICCOUNT:
+		return efi_runtime_get_nexthighmonocount(arg);
+
+	case EFI_RUNTIME_QUERY_VARIABLEINFO:
+		return efi_runtime_query_variableinfo(arg);
+
+	case EFI_RUNTIME_QUERY_CAPSULECAPABILITIES:
+		return efi_runtime_query_capsulecaps(arg);
+	}
+
+	return -ENOTTY;
+}
+
+static int efi_test_open(struct inode *inode, struct file *file)
+{
+	/*
+	 * nothing special to do here
+	 * We do accept multiple open files at the same time as we
+	 * synchronize on the per call operation.
+	 */
+	return 0;
+}
+
+static int efi_test_close(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+/*
+ *	The various file operations we support.
+ */
+static const struct file_operations efi_test_fops = {
+	.owner		= THIS_MODULE,
+	.unlocked_ioctl	= efi_test_ioctl,
+	.open		= efi_test_open,
+	.release	= efi_test_close,
+	.llseek		= no_llseek,
+};
+
+static struct miscdevice efi_test_dev = {
+	MISC_DYNAMIC_MINOR,
+	"efi_test",
+	&efi_test_fops
+};
+
+static int __init efi_test_init(void)
+{
+	int ret;
+
+	ret = misc_register(&efi_test_dev);
+	if (ret) {
+		pr_err("efi_test: can't misc_register on minor=%d\n",
+			MISC_DYNAMIC_MINOR);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void __exit efi_test_exit(void)
+{
+	misc_deregister(&efi_test_dev);
+}
+
+module_init(efi_test_init);
+module_exit(efi_test_exit);
diff --git a/drivers/firmware/efi/test/efi_test.h b/drivers/firmware/efi/test/efi_test.h
new file mode 100644
index 0000000..9812c6a
--- /dev/null
+++ b/drivers/firmware/efi/test/efi_test.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * EFI Test driver Header
+ *
+ * Copyright(C) 2012-2016 Canonical Ltd.
+ *
+ */
+
+#ifndef _DRIVERS_FIRMWARE_EFI_TEST_H_
+#define _DRIVERS_FIRMWARE_EFI_TEST_H_
+
+#include <linux/efi.h>
+
+struct efi_getvariable {
+	efi_char16_t	*variable_name;
+	efi_guid_t	*vendor_guid;
+	u32		*attributes;
+	unsigned long	*data_size;
+	void		*data;
+	efi_status_t	*status;
+} __packed;
+
+struct efi_setvariable {
+	efi_char16_t	*variable_name;
+	efi_guid_t	*vendor_guid;
+	u32		attributes;
+	unsigned long	data_size;
+	void		*data;
+	efi_status_t	*status;
+} __packed;
+
+struct efi_getnextvariablename {
+	unsigned long	*variable_name_size;
+	efi_char16_t	*variable_name;
+	efi_guid_t	*vendor_guid;
+	efi_status_t	*status;
+} __packed;
+
+struct efi_queryvariableinfo {
+	u32		attributes;
+	u64		*maximum_variable_storage_size;
+	u64		*remaining_variable_storage_size;
+	u64		*maximum_variable_size;
+	efi_status_t	*status;
+} __packed;
+
+struct efi_gettime {
+	efi_time_t	*time;
+	efi_time_cap_t	*capabilities;
+	efi_status_t	*status;
+} __packed;
+
+struct efi_settime {
+	efi_time_t	*time;
+	efi_status_t	*status;
+} __packed;
+
+struct efi_getwakeuptime {
+	efi_bool_t	*enabled;
+	efi_bool_t	*pending;
+	efi_time_t	*time;
+	efi_status_t	*status;
+} __packed;
+
+struct efi_setwakeuptime {
+	efi_bool_t	enabled;
+	efi_time_t	*time;
+	efi_status_t	*status;
+} __packed;
+
+struct efi_getnexthighmonotoniccount {
+	u32		*high_count;
+	efi_status_t	*status;
+} __packed;
+
+struct efi_querycapsulecapabilities {
+	efi_capsule_header_t	**capsule_header_array;
+	unsigned long		capsule_count;
+	u64			*maximum_capsule_size;
+	int			*reset_type;
+	efi_status_t		*status;
+} __packed;
+
+#define EFI_RUNTIME_GET_VARIABLE \
+	_IOWR('p', 0x01, struct efi_getvariable)
+#define EFI_RUNTIME_SET_VARIABLE \
+	_IOW('p', 0x02, struct efi_setvariable)
+
+#define EFI_RUNTIME_GET_TIME \
+	_IOR('p', 0x03, struct efi_gettime)
+#define EFI_RUNTIME_SET_TIME \
+	_IOW('p', 0x04, struct efi_settime)
+
+#define EFI_RUNTIME_GET_WAKETIME \
+	_IOR('p', 0x05, struct efi_getwakeuptime)
+#define EFI_RUNTIME_SET_WAKETIME \
+	_IOW('p', 0x06, struct efi_setwakeuptime)
+
+#define EFI_RUNTIME_GET_NEXTVARIABLENAME \
+	_IOWR('p', 0x07, struct efi_getnextvariablename)
+
+#define EFI_RUNTIME_QUERY_VARIABLEINFO \
+	_IOR('p', 0x08, struct efi_queryvariableinfo)
+
+#define EFI_RUNTIME_GET_NEXTHIGHMONOTONICCOUNT \
+	_IOR('p', 0x09, struct efi_getnexthighmonotoniccount)
+
+#define EFI_RUNTIME_QUERY_CAPSULECAPABILITIES \
+	_IOR('p', 0x0A, struct efi_querycapsulecapabilities)
+
+#endif /* _DRIVERS_FIRMWARE_EFI_TEST_H_ */
diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c
new file mode 100644
index 0000000..0cbeb3d
--- /dev/null
+++ b/drivers/firmware/efi/tpm.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2017 Google, Inc.
+ *     Thiebaud Weksteen <tweek@google.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/memblock.h>
+
+#include <asm/early_ioremap.h>
+
+/*
+ * Reserve the memory associated with the TPM Event Log configuration table.
+ */
+int __init efi_tpm_eventlog_init(void)
+{
+	struct linux_efi_tpm_eventlog *log_tbl;
+	unsigned int tbl_size;
+
+	if (efi.tpm_log == EFI_INVALID_TABLE_ADDR)
+		return 0;
+
+	log_tbl = early_memremap(efi.tpm_log, sizeof(*log_tbl));
+	if (!log_tbl) {
+		pr_err("Failed to map TPM Event Log table @ 0x%lx\n",
+			efi.tpm_log);
+		efi.tpm_log = EFI_INVALID_TABLE_ADDR;
+		return -ENOMEM;
+	}
+
+	tbl_size = sizeof(*log_tbl) + log_tbl->size;
+	memblock_reserve(efi.tpm_log, tbl_size);
+	early_memunmap(log_tbl, sizeof(*log_tbl));
+	return 0;
+}
+
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
new file mode 100644
index 0000000..9336ffd
--- /dev/null
+++ b/drivers/firmware/efi/vars.c
@@ -0,0 +1,1187 @@
+/*
+ * Originally from efivars.c
+ *
+ * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
+ * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/capability.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/smp.h>
+#include <linux/efi.h>
+#include <linux/sysfs.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/ucs2_string.h>
+
+/* Private pointer to registered efivars */
+static struct efivars *__efivars;
+
+/*
+ * efivars_lock protects three things:
+ * 1) efivarfs_list and efivars_sysfs_list
+ * 2) ->ops calls
+ * 3) (un)registration of __efivars
+ */
+static DEFINE_SEMAPHORE(efivars_lock);
+
+static bool efivar_wq_enabled = true;
+DECLARE_WORK(efivar_work, NULL);
+EXPORT_SYMBOL_GPL(efivar_work);
+
+static bool
+validate_device_path(efi_char16_t *var_name, int match, u8 *buffer,
+		     unsigned long len)
+{
+	struct efi_generic_dev_path *node;
+	int offset = 0;
+
+	node = (struct efi_generic_dev_path *)buffer;
+
+	if (len < sizeof(*node))
+		return false;
+
+	while (offset <= len - sizeof(*node) &&
+	       node->length >= sizeof(*node) &&
+		node->length <= len - offset) {
+		offset += node->length;
+
+		if ((node->type == EFI_DEV_END_PATH ||
+		     node->type == EFI_DEV_END_PATH2) &&
+		    node->sub_type == EFI_DEV_END_ENTIRE)
+			return true;
+
+		node = (struct efi_generic_dev_path *)(buffer + offset);
+	}
+
+	/*
+	 * If we're here then either node->length pointed past the end
+	 * of the buffer or we reached the end of the buffer without
+	 * finding a device path end node.
+	 */
+	return false;
+}
+
+static bool
+validate_boot_order(efi_char16_t *var_name, int match, u8 *buffer,
+		    unsigned long len)
+{
+	/* An array of 16-bit integers */
+	if ((len % 2) != 0)
+		return false;
+
+	return true;
+}
+
+static bool
+validate_load_option(efi_char16_t *var_name, int match, u8 *buffer,
+		     unsigned long len)
+{
+	u16 filepathlength;
+	int i, desclength = 0, namelen;
+
+	namelen = ucs2_strnlen(var_name, EFI_VAR_NAME_LEN);
+
+	/* Either "Boot" or "Driver" followed by four digits of hex */
+	for (i = match; i < match+4; i++) {
+		if (var_name[i] > 127 ||
+		    hex_to_bin(var_name[i] & 0xff) < 0)
+			return true;
+	}
+
+	/* Reject it if there's 4 digits of hex and then further content */
+	if (namelen > match + 4)
+		return false;
+
+	/* A valid entry must be at least 8 bytes */
+	if (len < 8)
+		return false;
+
+	filepathlength = buffer[4] | buffer[5] << 8;
+
+	/*
+	 * There's no stored length for the description, so it has to be
+	 * found by hand
+	 */
+	desclength = ucs2_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2;
+
+	/* Each boot entry must have a descriptor */
+	if (!desclength)
+		return false;
+
+	/*
+	 * If the sum of the length of the description, the claimed filepath
+	 * length and the original header are greater than the length of the
+	 * variable, it's malformed
+	 */
+	if ((desclength + filepathlength + 6) > len)
+		return false;
+
+	/*
+	 * And, finally, check the filepath
+	 */
+	return validate_device_path(var_name, match, buffer + desclength + 6,
+				    filepathlength);
+}
+
+static bool
+validate_uint16(efi_char16_t *var_name, int match, u8 *buffer,
+		unsigned long len)
+{
+	/* A single 16-bit integer */
+	if (len != 2)
+		return false;
+
+	return true;
+}
+
+static bool
+validate_ascii_string(efi_char16_t *var_name, int match, u8 *buffer,
+		      unsigned long len)
+{
+	int i;
+
+	for (i = 0; i < len; i++) {
+		if (buffer[i] > 127)
+			return false;
+
+		if (buffer[i] == 0)
+			return true;
+	}
+
+	return false;
+}
+
+struct variable_validate {
+	efi_guid_t vendor;
+	char *name;
+	bool (*validate)(efi_char16_t *var_name, int match, u8 *data,
+			 unsigned long len);
+};
+
+/*
+ * This is the list of variables we need to validate, as well as the
+ * whitelist for what we think is safe not to default to immutable.
+ *
+ * If it has a validate() method that's not NULL, it'll go into the
+ * validation routine.  If not, it is assumed valid, but still used for
+ * whitelisting.
+ *
+ * Note that it's sorted by {vendor,name}, but globbed names must come after
+ * any other name with the same prefix.
+ */
+static const struct variable_validate variable_validate[] = {
+	{ EFI_GLOBAL_VARIABLE_GUID, "BootNext", validate_uint16 },
+	{ EFI_GLOBAL_VARIABLE_GUID, "BootOrder", validate_boot_order },
+	{ EFI_GLOBAL_VARIABLE_GUID, "Boot*", validate_load_option },
+	{ EFI_GLOBAL_VARIABLE_GUID, "DriverOrder", validate_boot_order },
+	{ EFI_GLOBAL_VARIABLE_GUID, "Driver*", validate_load_option },
+	{ EFI_GLOBAL_VARIABLE_GUID, "ConIn", validate_device_path },
+	{ EFI_GLOBAL_VARIABLE_GUID, "ConInDev", validate_device_path },
+	{ EFI_GLOBAL_VARIABLE_GUID, "ConOut", validate_device_path },
+	{ EFI_GLOBAL_VARIABLE_GUID, "ConOutDev", validate_device_path },
+	{ EFI_GLOBAL_VARIABLE_GUID, "ErrOut", validate_device_path },
+	{ EFI_GLOBAL_VARIABLE_GUID, "ErrOutDev", validate_device_path },
+	{ EFI_GLOBAL_VARIABLE_GUID, "Lang", validate_ascii_string },
+	{ EFI_GLOBAL_VARIABLE_GUID, "OsIndications", NULL },
+	{ EFI_GLOBAL_VARIABLE_GUID, "PlatformLang", validate_ascii_string },
+	{ EFI_GLOBAL_VARIABLE_GUID, "Timeout", validate_uint16 },
+	{ LINUX_EFI_CRASH_GUID, "*", NULL },
+	{ NULL_GUID, "", NULL },
+};
+
+/*
+ * Check if @var_name matches the pattern given in @match_name.
+ *
+ * @var_name: an array of @len non-NUL characters.
+ * @match_name: a NUL-terminated pattern string, optionally ending in "*". A
+ *              final "*" character matches any trailing characters @var_name,
+ *              including the case when there are none left in @var_name.
+ * @match: on output, the number of non-wildcard characters in @match_name
+ *         that @var_name matches, regardless of the return value.
+ * @return: whether @var_name fully matches @match_name.
+ */
+static bool
+variable_matches(const char *var_name, size_t len, const char *match_name,
+		 int *match)
+{
+	for (*match = 0; ; (*match)++) {
+		char c = match_name[*match];
+
+		switch (c) {
+		case '*':
+			/* Wildcard in @match_name means we've matched. */
+			return true;
+
+		case '\0':
+			/* @match_name has ended. Has @var_name too? */
+			return (*match == len);
+
+		default:
+			/*
+			 * We've reached a non-wildcard char in @match_name.
+			 * Continue only if there's an identical character in
+			 * @var_name.
+			 */
+			if (*match < len && c == var_name[*match])
+				continue;
+			return false;
+		}
+	}
+}
+
+bool
+efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
+		unsigned long data_size)
+{
+	int i;
+	unsigned long utf8_size;
+	u8 *utf8_name;
+
+	utf8_size = ucs2_utf8size(var_name);
+	utf8_name = kmalloc(utf8_size + 1, GFP_KERNEL);
+	if (!utf8_name)
+		return false;
+
+	ucs2_as_utf8(utf8_name, var_name, utf8_size);
+	utf8_name[utf8_size] = '\0';
+
+	for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
+		const char *name = variable_validate[i].name;
+		int match = 0;
+
+		if (efi_guidcmp(vendor, variable_validate[i].vendor))
+			continue;
+
+		if (variable_matches(utf8_name, utf8_size+1, name, &match)) {
+			if (variable_validate[i].validate == NULL)
+				break;
+			kfree(utf8_name);
+			return variable_validate[i].validate(var_name, match,
+							     data, data_size);
+		}
+	}
+	kfree(utf8_name);
+	return true;
+}
+EXPORT_SYMBOL_GPL(efivar_validate);
+
+bool
+efivar_variable_is_removable(efi_guid_t vendor, const char *var_name,
+			     size_t len)
+{
+	int i;
+	bool found = false;
+	int match = 0;
+
+	/*
+	 * Check if our variable is in the validated variables list
+	 */
+	for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
+		if (efi_guidcmp(variable_validate[i].vendor, vendor))
+			continue;
+
+		if (variable_matches(var_name, len,
+				     variable_validate[i].name, &match)) {
+			found = true;
+			break;
+		}
+	}
+
+	/*
+	 * If it's in our list, it is removable.
+	 */
+	return found;
+}
+EXPORT_SYMBOL_GPL(efivar_variable_is_removable);
+
+static efi_status_t
+check_var_size(u32 attributes, unsigned long size)
+{
+	const struct efivar_operations *fops = __efivars->ops;
+
+	if (!fops->query_variable_store)
+		return EFI_UNSUPPORTED;
+
+	return fops->query_variable_store(attributes, size, false);
+}
+
+static efi_status_t
+check_var_size_nonblocking(u32 attributes, unsigned long size)
+{
+	const struct efivar_operations *fops = __efivars->ops;
+
+	if (!fops->query_variable_store)
+		return EFI_UNSUPPORTED;
+
+	return fops->query_variable_store(attributes, size, true);
+}
+
+static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor,
+				struct list_head *head)
+{
+	struct efivar_entry *entry, *n;
+	unsigned long strsize1, strsize2;
+	bool found = false;
+
+	strsize1 = ucs2_strsize(variable_name, 1024);
+	list_for_each_entry_safe(entry, n, head, list) {
+		strsize2 = ucs2_strsize(entry->var.VariableName, 1024);
+		if (strsize1 == strsize2 &&
+			!memcmp(variable_name, &(entry->var.VariableName),
+				strsize2) &&
+			!efi_guidcmp(entry->var.VendorGuid,
+				*vendor)) {
+			found = true;
+			break;
+		}
+	}
+	return found;
+}
+
+/*
+ * Returns the size of variable_name, in bytes, including the
+ * terminating NULL character, or variable_name_size if no NULL
+ * character is found among the first variable_name_size bytes.
+ */
+static unsigned long var_name_strnsize(efi_char16_t *variable_name,
+				       unsigned long variable_name_size)
+{
+	unsigned long len;
+	efi_char16_t c;
+
+	/*
+	 * The variable name is, by definition, a NULL-terminated
+	 * string, so make absolutely sure that variable_name_size is
+	 * the value we expect it to be. If not, return the real size.
+	 */
+	for (len = 2; len <= variable_name_size; len += sizeof(c)) {
+		c = variable_name[(len / sizeof(c)) - 1];
+		if (!c)
+			break;
+	}
+
+	return min(len, variable_name_size);
+}
+
+/*
+ * Print a warning when duplicate EFI variables are encountered and
+ * disable the sysfs workqueue since the firmware is buggy.
+ */
+static void dup_variable_bug(efi_char16_t *str16, efi_guid_t *vendor_guid,
+			     unsigned long len16)
+{
+	size_t i, len8 = len16 / sizeof(efi_char16_t);
+	char *str8;
+
+	/*
+	 * Disable the workqueue since the algorithm it uses for
+	 * detecting new variables won't work with this buggy
+	 * implementation of GetNextVariableName().
+	 */
+	efivar_wq_enabled = false;
+
+	str8 = kzalloc(len8, GFP_KERNEL);
+	if (!str8)
+		return;
+
+	for (i = 0; i < len8; i++)
+		str8[i] = str16[i];
+
+	printk(KERN_WARNING "efivars: duplicate variable: %s-%pUl\n",
+	       str8, vendor_guid);
+	kfree(str8);
+}
+
+/**
+ * efivar_init - build the initial list of EFI variables
+ * @func: callback function to invoke for every variable
+ * @data: function-specific data to pass to @func
+ * @atomic: do we need to execute the @func-loop atomically?
+ * @duplicates: error if we encounter duplicates on @head?
+ * @head: initialised head of variable list
+ *
+ * Get every EFI variable from the firmware and invoke @func. @func
+ * should call efivar_entry_add() to build the list of variables.
+ *
+ * Returns 0 on success, or a kernel error code on failure.
+ */
+int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
+		void *data, bool duplicates, struct list_head *head)
+{
+	const struct efivar_operations *ops = __efivars->ops;
+	unsigned long variable_name_size = 1024;
+	efi_char16_t *variable_name;
+	efi_status_t status;
+	efi_guid_t vendor_guid;
+	int err = 0;
+
+	variable_name = kzalloc(variable_name_size, GFP_KERNEL);
+	if (!variable_name) {
+		printk(KERN_ERR "efivars: Memory allocation failed.\n");
+		return -ENOMEM;
+	}
+
+	if (down_interruptible(&efivars_lock)) {
+		err = -EINTR;
+		goto free;
+	}
+
+	/*
+	 * Per EFI spec, the maximum storage allocated for both
+	 * the variable name and variable data is 1024 bytes.
+	 */
+
+	do {
+		variable_name_size = 1024;
+
+		status = ops->get_next_variable(&variable_name_size,
+						variable_name,
+						&vendor_guid);
+		switch (status) {
+		case EFI_SUCCESS:
+			if (duplicates)
+				up(&efivars_lock);
+
+			variable_name_size = var_name_strnsize(variable_name,
+							       variable_name_size);
+
+			/*
+			 * Some firmware implementations return the
+			 * same variable name on multiple calls to
+			 * get_next_variable(). Terminate the loop
+			 * immediately as there is no guarantee that
+			 * we'll ever see a different variable name,
+			 * and may end up looping here forever.
+			 */
+			if (duplicates &&
+			    variable_is_present(variable_name, &vendor_guid,
+						head)) {
+				dup_variable_bug(variable_name, &vendor_guid,
+						 variable_name_size);
+				status = EFI_NOT_FOUND;
+			} else {
+				err = func(variable_name, vendor_guid,
+					   variable_name_size, data);
+				if (err)
+					status = EFI_NOT_FOUND;
+			}
+
+			if (duplicates) {
+				if (down_interruptible(&efivars_lock)) {
+					err = -EINTR;
+					goto free;
+				}
+			}
+
+			break;
+		case EFI_NOT_FOUND:
+			break;
+		default:
+			printk(KERN_WARNING "efivars: get_next_variable: status=%lx\n",
+				status);
+			status = EFI_NOT_FOUND;
+			break;
+		}
+
+	} while (status != EFI_NOT_FOUND);
+
+	up(&efivars_lock);
+free:
+	kfree(variable_name);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(efivar_init);
+
+/**
+ * efivar_entry_add - add entry to variable list
+ * @entry: entry to add to list
+ * @head: list head
+ *
+ * Returns 0 on success, or a kernel error code on failure.
+ */
+int efivar_entry_add(struct efivar_entry *entry, struct list_head *head)
+{
+	if (down_interruptible(&efivars_lock))
+		return -EINTR;
+	list_add(&entry->list, head);
+	up(&efivars_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(efivar_entry_add);
+
+/**
+ * efivar_entry_remove - remove entry from variable list
+ * @entry: entry to remove from list
+ *
+ * Returns 0 on success, or a kernel error code on failure.
+ */
+int efivar_entry_remove(struct efivar_entry *entry)
+{
+	if (down_interruptible(&efivars_lock))
+		return -EINTR;
+	list_del(&entry->list);
+	up(&efivars_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(efivar_entry_remove);
+
+/*
+ * efivar_entry_list_del_unlock - remove entry from variable list
+ * @entry: entry to remove
+ *
+ * Remove @entry from the variable list and release the list lock.
+ *
+ * NOTE: slightly weird locking semantics here - we expect to be
+ * called with the efivars lock already held, and we release it before
+ * returning. This is because this function is usually called after
+ * set_variable() while the lock is still held.
+ */
+static void efivar_entry_list_del_unlock(struct efivar_entry *entry)
+{
+	list_del(&entry->list);
+	up(&efivars_lock);
+}
+
+/**
+ * __efivar_entry_delete - delete an EFI variable
+ * @entry: entry containing EFI variable to delete
+ *
+ * Delete the variable from the firmware but leave @entry on the
+ * variable list.
+ *
+ * This function differs from efivar_entry_delete() because it does
+ * not remove @entry from the variable list. Also, it is safe to be
+ * called from within a efivar_entry_iter_begin() and
+ * efivar_entry_iter_end() region, unlike efivar_entry_delete().
+ *
+ * Returns 0 on success, or a converted EFI status code if
+ * set_variable() fails.
+ */
+int __efivar_entry_delete(struct efivar_entry *entry)
+{
+	const struct efivar_operations *ops = __efivars->ops;
+	efi_status_t status;
+
+	status = ops->set_variable(entry->var.VariableName,
+				   &entry->var.VendorGuid,
+				   0, 0, NULL);
+
+	return efi_status_to_err(status);
+}
+EXPORT_SYMBOL_GPL(__efivar_entry_delete);
+
+/**
+ * efivar_entry_delete - delete variable and remove entry from list
+ * @entry: entry containing variable to delete
+ *
+ * Delete the variable from the firmware and remove @entry from the
+ * variable list. It is the caller's responsibility to free @entry
+ * once we return.
+ *
+ * Returns 0 on success, -EINTR if we can't grab the semaphore,
+ * converted EFI status code if set_variable() fails.
+ */
+int efivar_entry_delete(struct efivar_entry *entry)
+{
+	const struct efivar_operations *ops = __efivars->ops;
+	efi_status_t status;
+
+	if (down_interruptible(&efivars_lock))
+		return -EINTR;
+
+	status = ops->set_variable(entry->var.VariableName,
+				   &entry->var.VendorGuid,
+				   0, 0, NULL);
+	if (!(status == EFI_SUCCESS || status == EFI_NOT_FOUND)) {
+		up(&efivars_lock);
+		return efi_status_to_err(status);
+	}
+
+	efivar_entry_list_del_unlock(entry);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(efivar_entry_delete);
+
+/**
+ * efivar_entry_set - call set_variable()
+ * @entry: entry containing the EFI variable to write
+ * @attributes: variable attributes
+ * @size: size of @data buffer
+ * @data: buffer containing variable data
+ * @head: head of variable list
+ *
+ * Calls set_variable() for an EFI variable. If creating a new EFI
+ * variable, this function is usually followed by efivar_entry_add().
+ *
+ * Before writing the variable, the remaining EFI variable storage
+ * space is checked to ensure there is enough room available.
+ *
+ * If @head is not NULL a lookup is performed to determine whether
+ * the entry is already on the list.
+ *
+ * Returns 0 on success, -EINTR if we can't grab the semaphore,
+ * -EEXIST if a lookup is performed and the entry already exists on
+ * the list, or a converted EFI status code if set_variable() fails.
+ */
+int efivar_entry_set(struct efivar_entry *entry, u32 attributes,
+		     unsigned long size, void *data, struct list_head *head)
+{
+	const struct efivar_operations *ops = __efivars->ops;
+	efi_status_t status;
+	efi_char16_t *name = entry->var.VariableName;
+	efi_guid_t vendor = entry->var.VendorGuid;
+
+	if (down_interruptible(&efivars_lock))
+		return -EINTR;
+	if (head && efivar_entry_find(name, vendor, head, false)) {
+		up(&efivars_lock);
+		return -EEXIST;
+	}
+
+	status = check_var_size(attributes, size + ucs2_strsize(name, 1024));
+	if (status == EFI_SUCCESS || status == EFI_UNSUPPORTED)
+		status = ops->set_variable(name, &vendor,
+					   attributes, size, data);
+
+	up(&efivars_lock);
+
+	return efi_status_to_err(status);
+
+}
+EXPORT_SYMBOL_GPL(efivar_entry_set);
+
+/*
+ * efivar_entry_set_nonblocking - call set_variable_nonblocking()
+ *
+ * This function is guaranteed to not block and is suitable for calling
+ * from crash/panic handlers.
+ *
+ * Crucially, this function will not block if it cannot acquire
+ * efivars_lock. Instead, it returns -EBUSY.
+ */
+static int
+efivar_entry_set_nonblocking(efi_char16_t *name, efi_guid_t vendor,
+			     u32 attributes, unsigned long size, void *data)
+{
+	const struct efivar_operations *ops = __efivars->ops;
+	efi_status_t status;
+
+	if (down_trylock(&efivars_lock))
+		return -EBUSY;
+
+	status = check_var_size_nonblocking(attributes,
+					    size + ucs2_strsize(name, 1024));
+	if (status != EFI_SUCCESS) {
+		up(&efivars_lock);
+		return -ENOSPC;
+	}
+
+	status = ops->set_variable_nonblocking(name, &vendor, attributes,
+					       size, data);
+
+	up(&efivars_lock);
+	return efi_status_to_err(status);
+}
+
+/**
+ * efivar_entry_set_safe - call set_variable() if enough space in firmware
+ * @name: buffer containing the variable name
+ * @vendor: variable vendor guid
+ * @attributes: variable attributes
+ * @block: can we block in this context?
+ * @size: size of @data buffer
+ * @data: buffer containing variable data
+ *
+ * Ensures there is enough free storage in the firmware for this variable, and
+ * if so, calls set_variable(). If creating a new EFI variable, this function
+ * is usually followed by efivar_entry_add().
+ *
+ * Returns 0 on success, -ENOSPC if the firmware does not have enough
+ * space for set_variable() to succeed, or a converted EFI status code
+ * if set_variable() fails.
+ */
+int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes,
+			  bool block, unsigned long size, void *data)
+{
+	const struct efivar_operations *ops = __efivars->ops;
+	efi_status_t status;
+
+	if (!ops->query_variable_store)
+		return -ENOSYS;
+
+	/*
+	 * If the EFI variable backend provides a non-blocking
+	 * ->set_variable() operation and we're in a context where we
+	 * cannot block, then we need to use it to avoid live-locks,
+	 * since the implication is that the regular ->set_variable()
+	 * will block.
+	 *
+	 * If no ->set_variable_nonblocking() is provided then
+	 * ->set_variable() is assumed to be non-blocking.
+	 */
+	if (!block && ops->set_variable_nonblocking)
+		return efivar_entry_set_nonblocking(name, vendor, attributes,
+						    size, data);
+
+	if (!block) {
+		if (down_trylock(&efivars_lock))
+			return -EBUSY;
+	} else {
+		if (down_interruptible(&efivars_lock))
+			return -EINTR;
+	}
+
+	status = check_var_size(attributes, size + ucs2_strsize(name, 1024));
+	if (status != EFI_SUCCESS) {
+		up(&efivars_lock);
+		return -ENOSPC;
+	}
+
+	status = ops->set_variable(name, &vendor, attributes, size, data);
+
+	up(&efivars_lock);
+
+	return efi_status_to_err(status);
+}
+EXPORT_SYMBOL_GPL(efivar_entry_set_safe);
+
+/**
+ * efivar_entry_find - search for an entry
+ * @name: the EFI variable name
+ * @guid: the EFI variable vendor's guid
+ * @head: head of the variable list
+ * @remove: should we remove the entry from the list?
+ *
+ * Search for an entry on the variable list that has the EFI variable
+ * name @name and vendor guid @guid. If an entry is found on the list
+ * and @remove is true, the entry is removed from the list.
+ *
+ * The caller MUST call efivar_entry_iter_begin() and
+ * efivar_entry_iter_end() before and after the invocation of this
+ * function, respectively.
+ *
+ * Returns the entry if found on the list, %NULL otherwise.
+ */
+struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
+				       struct list_head *head, bool remove)
+{
+	struct efivar_entry *entry, *n;
+	int strsize1, strsize2;
+	bool found = false;
+
+	list_for_each_entry_safe(entry, n, head, list) {
+		strsize1 = ucs2_strsize(name, 1024);
+		strsize2 = ucs2_strsize(entry->var.VariableName, 1024);
+		if (strsize1 == strsize2 &&
+		    !memcmp(name, &(entry->var.VariableName), strsize1) &&
+		    !efi_guidcmp(guid, entry->var.VendorGuid)) {
+			found = true;
+			break;
+		}
+	}
+
+	if (!found)
+		return NULL;
+
+	if (remove) {
+		if (entry->scanning) {
+			/*
+			 * The entry will be deleted
+			 * after scanning is completed.
+			 */
+			entry->deleting = true;
+		} else
+			list_del(&entry->list);
+	}
+
+	return entry;
+}
+EXPORT_SYMBOL_GPL(efivar_entry_find);
+
+/**
+ * efivar_entry_size - obtain the size of a variable
+ * @entry: entry for this variable
+ * @size: location to store the variable's size
+ */
+int efivar_entry_size(struct efivar_entry *entry, unsigned long *size)
+{
+	const struct efivar_operations *ops = __efivars->ops;
+	efi_status_t status;
+
+	*size = 0;
+
+	if (down_interruptible(&efivars_lock))
+		return -EINTR;
+	status = ops->get_variable(entry->var.VariableName,
+				   &entry->var.VendorGuid, NULL, size, NULL);
+	up(&efivars_lock);
+
+	if (status != EFI_BUFFER_TOO_SMALL)
+		return efi_status_to_err(status);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(efivar_entry_size);
+
+/**
+ * __efivar_entry_get - call get_variable()
+ * @entry: read data for this variable
+ * @attributes: variable attributes
+ * @size: size of @data buffer
+ * @data: buffer to store variable data
+ *
+ * The caller MUST call efivar_entry_iter_begin() and
+ * efivar_entry_iter_end() before and after the invocation of this
+ * function, respectively.
+ */
+int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
+		       unsigned long *size, void *data)
+{
+	const struct efivar_operations *ops = __efivars->ops;
+	efi_status_t status;
+
+	status = ops->get_variable(entry->var.VariableName,
+				   &entry->var.VendorGuid,
+				   attributes, size, data);
+
+	return efi_status_to_err(status);
+}
+EXPORT_SYMBOL_GPL(__efivar_entry_get);
+
+/**
+ * efivar_entry_get - call get_variable()
+ * @entry: read data for this variable
+ * @attributes: variable attributes
+ * @size: size of @data buffer
+ * @data: buffer to store variable data
+ */
+int efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
+		     unsigned long *size, void *data)
+{
+	const struct efivar_operations *ops = __efivars->ops;
+	efi_status_t status;
+
+	if (down_interruptible(&efivars_lock))
+		return -EINTR;
+	status = ops->get_variable(entry->var.VariableName,
+				   &entry->var.VendorGuid,
+				   attributes, size, data);
+	up(&efivars_lock);
+
+	return efi_status_to_err(status);
+}
+EXPORT_SYMBOL_GPL(efivar_entry_get);
+
+/**
+ * efivar_entry_set_get_size - call set_variable() and get new size (atomic)
+ * @entry: entry containing variable to set and get
+ * @attributes: attributes of variable to be written
+ * @size: size of data buffer
+ * @data: buffer containing data to write
+ * @set: did the set_variable() call succeed?
+ *
+ * This is a pretty special (complex) function. See efivarfs_file_write().
+ *
+ * Atomically call set_variable() for @entry and if the call is
+ * successful, return the new size of the variable from get_variable()
+ * in @size. The success of set_variable() is indicated by @set.
+ *
+ * Returns 0 on success, -EINVAL if the variable data is invalid,
+ * -ENOSPC if the firmware does not have enough available space, or a
+ * converted EFI status code if either of set_variable() or
+ * get_variable() fail.
+ *
+ * If the EFI variable does not exist when calling set_variable()
+ * (EFI_NOT_FOUND), @entry is removed from the variable list.
+ */
+int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
+			      unsigned long *size, void *data, bool *set)
+{
+	const struct efivar_operations *ops = __efivars->ops;
+	efi_char16_t *name = entry->var.VariableName;
+	efi_guid_t *vendor = &entry->var.VendorGuid;
+	efi_status_t status;
+	int err;
+
+	*set = false;
+
+	if (efivar_validate(*vendor, name, data, *size) == false)
+		return -EINVAL;
+
+	/*
+	 * The lock here protects the get_variable call, the conditional
+	 * set_variable call, and removal of the variable from the efivars
+	 * list (in the case of an authenticated delete).
+	 */
+	if (down_interruptible(&efivars_lock))
+		return -EINTR;
+
+	/*
+	 * Ensure that the available space hasn't shrunk below the safe level
+	 */
+	status = check_var_size(attributes, *size + ucs2_strsize(name, 1024));
+	if (status != EFI_SUCCESS) {
+		if (status != EFI_UNSUPPORTED) {
+			err = efi_status_to_err(status);
+			goto out;
+		}
+
+		if (*size > 65536) {
+			err = -ENOSPC;
+			goto out;
+		}
+	}
+
+	status = ops->set_variable(name, vendor, attributes, *size, data);
+	if (status != EFI_SUCCESS) {
+		err = efi_status_to_err(status);
+		goto out;
+	}
+
+	*set = true;
+
+	/*
+	 * Writing to the variable may have caused a change in size (which
+	 * could either be an append or an overwrite), or the variable to be
+	 * deleted. Perform a GetVariable() so we can tell what actually
+	 * happened.
+	 */
+	*size = 0;
+	status = ops->get_variable(entry->var.VariableName,
+				   &entry->var.VendorGuid,
+				   NULL, size, NULL);
+
+	if (status == EFI_NOT_FOUND)
+		efivar_entry_list_del_unlock(entry);
+	else
+		up(&efivars_lock);
+
+	if (status && status != EFI_BUFFER_TOO_SMALL)
+		return efi_status_to_err(status);
+
+	return 0;
+
+out:
+	up(&efivars_lock);
+	return err;
+
+}
+EXPORT_SYMBOL_GPL(efivar_entry_set_get_size);
+
+/**
+ * efivar_entry_iter_begin - begin iterating the variable list
+ *
+ * Lock the variable list to prevent entry insertion and removal until
+ * efivar_entry_iter_end() is called. This function is usually used in
+ * conjunction with __efivar_entry_iter() or efivar_entry_iter().
+ */
+int efivar_entry_iter_begin(void)
+{
+	return down_interruptible(&efivars_lock);
+}
+EXPORT_SYMBOL_GPL(efivar_entry_iter_begin);
+
+/**
+ * efivar_entry_iter_end - finish iterating the variable list
+ *
+ * Unlock the variable list and allow modifications to the list again.
+ */
+void efivar_entry_iter_end(void)
+{
+	up(&efivars_lock);
+}
+EXPORT_SYMBOL_GPL(efivar_entry_iter_end);
+
+/**
+ * __efivar_entry_iter - iterate over variable list
+ * @func: callback function
+ * @head: head of the variable list
+ * @data: function-specific data to pass to callback
+ * @prev: entry to begin iterating from
+ *
+ * Iterate over the list of EFI variables and call @func with every
+ * entry on the list. It is safe for @func to remove entries in the
+ * list via efivar_entry_delete().
+ *
+ * You MUST call efivar_enter_iter_begin() before this function, and
+ * efivar_entry_iter_end() afterwards.
+ *
+ * It is possible to begin iteration from an arbitrary entry within
+ * the list by passing @prev. @prev is updated on return to point to
+ * the last entry passed to @func. To begin iterating from the
+ * beginning of the list @prev must be %NULL.
+ *
+ * The restrictions for @func are the same as documented for
+ * efivar_entry_iter().
+ */
+int __efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
+			struct list_head *head, void *data,
+			struct efivar_entry **prev)
+{
+	struct efivar_entry *entry, *n;
+	int err = 0;
+
+	if (!prev || !*prev) {
+		list_for_each_entry_safe(entry, n, head, list) {
+			err = func(entry, data);
+			if (err)
+				break;
+		}
+
+		if (prev)
+			*prev = entry;
+
+		return err;
+	}
+
+
+	list_for_each_entry_safe_continue((*prev), n, head, list) {
+		err = func(*prev, data);
+		if (err)
+			break;
+	}
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(__efivar_entry_iter);
+
+/**
+ * efivar_entry_iter - iterate over variable list
+ * @func: callback function
+ * @head: head of variable list
+ * @data: function-specific data to pass to callback
+ *
+ * Iterate over the list of EFI variables and call @func with every
+ * entry on the list. It is safe for @func to remove entries in the
+ * list via efivar_entry_delete() while iterating.
+ *
+ * Some notes for the callback function:
+ *  - a non-zero return value indicates an error and terminates the loop
+ *  - @func is called from atomic context
+ */
+int efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
+		      struct list_head *head, void *data)
+{
+	int err = 0;
+
+	err = efivar_entry_iter_begin();
+	if (err)
+		return err;
+	err = __efivar_entry_iter(func, head, data, NULL);
+	efivar_entry_iter_end();
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(efivar_entry_iter);
+
+/**
+ * efivars_kobject - get the kobject for the registered efivars
+ *
+ * If efivars_register() has not been called we return NULL,
+ * otherwise return the kobject used at registration time.
+ */
+struct kobject *efivars_kobject(void)
+{
+	if (!__efivars)
+		return NULL;
+
+	return __efivars->kobject;
+}
+EXPORT_SYMBOL_GPL(efivars_kobject);
+
+/**
+ * efivar_run_worker - schedule the efivar worker thread
+ */
+void efivar_run_worker(void)
+{
+	if (efivar_wq_enabled)
+		schedule_work(&efivar_work);
+}
+EXPORT_SYMBOL_GPL(efivar_run_worker);
+
+/**
+ * efivars_register - register an efivars
+ * @efivars: efivars to register
+ * @ops: efivars operations
+ * @kobject: @efivars-specific kobject
+ *
+ * Only a single efivars can be registered at any time.
+ */
+int efivars_register(struct efivars *efivars,
+		     const struct efivar_operations *ops,
+		     struct kobject *kobject)
+{
+	if (down_interruptible(&efivars_lock))
+		return -EINTR;
+
+	efivars->ops = ops;
+	efivars->kobject = kobject;
+
+	__efivars = efivars;
+
+	pr_info("Registered efivars operations\n");
+
+	up(&efivars_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(efivars_register);
+
+/**
+ * efivars_unregister - unregister an efivars
+ * @efivars: efivars to unregister
+ *
+ * The caller must have already removed every entry from the list,
+ * failure to do so is an error.
+ */
+int efivars_unregister(struct efivars *efivars)
+{
+	int rv;
+
+	if (down_interruptible(&efivars_lock))
+		return -EINTR;
+
+	if (!__efivars) {
+		printk(KERN_ERR "efivars not registered\n");
+		rv = -EINVAL;
+		goto out;
+	}
+
+	if (__efivars != efivars) {
+		rv = -EINVAL;
+		goto out;
+	}
+
+	pr_info("Unregistered efivars operations\n");
+	__efivars = NULL;
+
+	rv = 0;
+out:
+	up(&efivars_lock);
+	return rv;
+}
+EXPORT_SYMBOL_GPL(efivars_unregister);
diff --git a/drivers/firmware/google/Kconfig b/drivers/firmware/google/Kconfig
new file mode 100644
index 0000000..a456a00
--- /dev/null
+++ b/drivers/firmware/google/Kconfig
@@ -0,0 +1,82 @@
+menuconfig GOOGLE_FIRMWARE
+	bool "Google Firmware Drivers"
+	default n
+	help
+	  These firmware drivers are used by Google's servers.  They are
+	  only useful if you are working directly on one of their
+	  proprietary servers.  If in doubt, say "N".
+
+if GOOGLE_FIRMWARE
+
+config GOOGLE_SMI
+	tristate "SMI interface for Google platforms"
+	depends on X86 && ACPI && DMI && EFI
+	select EFI_VARS
+	help
+	  Say Y here if you want to enable SMI callbacks for Google
+	  platforms.  This provides an interface for writing to and
+	  clearing the EFI event log and reading and writing NVRAM
+	  variables.
+
+config GOOGLE_COREBOOT_TABLE
+	tristate
+	depends on GOOGLE_COREBOOT_TABLE_ACPI || GOOGLE_COREBOOT_TABLE_OF
+
+config GOOGLE_COREBOOT_TABLE_ACPI
+	tristate "Coreboot Table Access - ACPI"
+	depends on ACPI
+	select GOOGLE_COREBOOT_TABLE
+	help
+	  This option enables the coreboot_table module, which provides other
+	  firmware modules to access to the coreboot table. The coreboot table
+	  pointer is accessed through the ACPI "GOOGCB00" object.
+	  If unsure say N.
+
+config GOOGLE_COREBOOT_TABLE_OF
+	tristate "Coreboot Table Access - Device Tree"
+	depends on OF
+	select GOOGLE_COREBOOT_TABLE
+	help
+	  This option enable the coreboot_table module, which provide other
+	  firmware modules to access coreboot table. The coreboot table pointer
+	  is accessed through the device tree node /firmware/coreboot.
+	  If unsure say N.
+
+config GOOGLE_MEMCONSOLE
+	tristate
+	depends on GOOGLE_MEMCONSOLE_X86_LEGACY || GOOGLE_MEMCONSOLE_COREBOOT
+
+config GOOGLE_MEMCONSOLE_X86_LEGACY
+	tristate "Firmware Memory Console - X86 Legacy support"
+	depends on X86 && ACPI && DMI
+	select GOOGLE_MEMCONSOLE
+	help
+	  This option enables the kernel to search for a firmware log in
+	  the EBDA on Google servers.  If found, this log is exported to
+	  userland in the file /sys/firmware/log.
+
+config GOOGLE_FRAMEBUFFER_COREBOOT
+	tristate "Coreboot Framebuffer"
+	depends on FB_SIMPLE
+	depends on GOOGLE_COREBOOT_TABLE
+	help
+	  This option enables the kernel to search for a framebuffer in
+	  the coreboot table.  If found, it is registered with simplefb.
+
+config GOOGLE_MEMCONSOLE_COREBOOT
+	tristate "Firmware Memory Console"
+	depends on GOOGLE_COREBOOT_TABLE
+	select GOOGLE_MEMCONSOLE
+	help
+	  This option enables the kernel to search for a firmware log in
+	  the coreboot table.  If found, this log is exported to userland
+	  in the file /sys/firmware/log.
+
+config GOOGLE_VPD
+	tristate "Vital Product Data"
+	depends on GOOGLE_COREBOOT_TABLE
+	help
+	  This option enables the kernel to expose the content of Google VPD
+	  under /sys/firmware/vpd.
+
+endif # GOOGLE_FIRMWARE
diff --git a/drivers/firmware/google/Makefile b/drivers/firmware/google/Makefile
new file mode 100644
index 0000000..d0b3fba
--- /dev/null
+++ b/drivers/firmware/google/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_GOOGLE_SMI)		+= gsmi.o
+obj-$(CONFIG_GOOGLE_COREBOOT_TABLE)        += coreboot_table.o
+obj-$(CONFIG_GOOGLE_COREBOOT_TABLE_ACPI)   += coreboot_table-acpi.o
+obj-$(CONFIG_GOOGLE_COREBOOT_TABLE_OF)     += coreboot_table-of.o
+obj-$(CONFIG_GOOGLE_FRAMEBUFFER_COREBOOT)  += framebuffer-coreboot.o
+obj-$(CONFIG_GOOGLE_MEMCONSOLE)            += memconsole.o
+obj-$(CONFIG_GOOGLE_MEMCONSOLE_COREBOOT)   += memconsole-coreboot.o
+obj-$(CONFIG_GOOGLE_MEMCONSOLE_X86_LEGACY) += memconsole-x86-legacy.o
+
+vpd-sysfs-y := vpd.o vpd_decode.o
+obj-$(CONFIG_GOOGLE_VPD)		+= vpd-sysfs.o
diff --git a/drivers/firmware/google/coreboot_table-acpi.c b/drivers/firmware/google/coreboot_table-acpi.c
new file mode 100644
index 0000000..77197fe
--- /dev/null
+++ b/drivers/firmware/google/coreboot_table-acpi.c
@@ -0,0 +1,88 @@
+/*
+ * coreboot_table-acpi.c
+ *
+ * Using ACPI to locate Coreboot table and provide coreboot table access.
+ *
+ * Copyright 2017 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2.0 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "coreboot_table.h"
+
+static int coreboot_table_acpi_probe(struct platform_device *pdev)
+{
+	phys_addr_t phyaddr;
+	resource_size_t len;
+	struct coreboot_table_header __iomem *header = NULL;
+	struct resource *res;
+	void __iomem *ptr = NULL;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -EINVAL;
+
+	len = resource_size(res);
+	if (!res->start || !len)
+		return -EINVAL;
+
+	phyaddr = res->start;
+	header = ioremap_cache(phyaddr, sizeof(*header));
+	if (header == NULL)
+		return -ENOMEM;
+
+	ptr = ioremap_cache(phyaddr,
+			    header->header_bytes + header->table_bytes);
+	iounmap(header);
+	if (!ptr)
+		return -ENOMEM;
+
+	return coreboot_table_init(&pdev->dev, ptr);
+}
+
+static int coreboot_table_acpi_remove(struct platform_device *pdev)
+{
+	return coreboot_table_exit();
+}
+
+static const struct acpi_device_id cros_coreboot_acpi_match[] = {
+	{ "GOOGCB00", 0 },
+	{ "BOOT0000", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(acpi, cros_coreboot_acpi_match);
+
+static struct platform_driver coreboot_table_acpi_driver = {
+	.probe = coreboot_table_acpi_probe,
+	.remove = coreboot_table_acpi_remove,
+	.driver = {
+		.name = "coreboot_table_acpi",
+		.acpi_match_table = ACPI_PTR(cros_coreboot_acpi_match),
+	},
+};
+
+static int __init coreboot_table_acpi_init(void)
+{
+	return platform_driver_register(&coreboot_table_acpi_driver);
+}
+
+module_init(coreboot_table_acpi_init);
+
+MODULE_AUTHOR("Google, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/coreboot_table-of.c b/drivers/firmware/google/coreboot_table-of.c
new file mode 100644
index 0000000..f15bf40
--- /dev/null
+++ b/drivers/firmware/google/coreboot_table-of.c
@@ -0,0 +1,82 @@
+/*
+ * coreboot_table-of.c
+ *
+ * Coreboot table access through open firmware.
+ *
+ * Copyright 2017 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2.0 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include "coreboot_table.h"
+
+static int coreboot_table_of_probe(struct platform_device *pdev)
+{
+	struct device_node *fw_dn = pdev->dev.of_node;
+	void __iomem *ptr;
+
+	ptr = of_iomap(fw_dn, 0);
+	of_node_put(fw_dn);
+	if (!ptr)
+		return -ENOMEM;
+
+	return coreboot_table_init(&pdev->dev, ptr);
+}
+
+static int coreboot_table_of_remove(struct platform_device *pdev)
+{
+	return coreboot_table_exit();
+}
+
+static const struct of_device_id coreboot_of_match[] = {
+	{ .compatible = "coreboot" },
+	{},
+};
+
+static struct platform_driver coreboot_table_of_driver = {
+	.probe = coreboot_table_of_probe,
+	.remove = coreboot_table_of_remove,
+	.driver = {
+		.name = "coreboot_table_of",
+		.of_match_table = coreboot_of_match,
+	},
+};
+
+static int __init platform_coreboot_table_of_init(void)
+{
+	struct platform_device *pdev;
+	struct device_node *of_node;
+
+	/* Limit device creation to the presence of /firmware/coreboot node */
+	of_node = of_find_node_by_path("/firmware/coreboot");
+	if (!of_node)
+		return -ENODEV;
+
+	if (!of_match_node(coreboot_of_match, of_node))
+		return -ENODEV;
+
+	pdev = of_platform_device_create(of_node, "coreboot_table_of", NULL);
+	if (!pdev)
+		return -ENODEV;
+
+	return platform_driver_register(&coreboot_table_of_driver);
+}
+
+module_init(platform_coreboot_table_of_init);
+
+MODULE_AUTHOR("Google, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c
new file mode 100644
index 0000000..898bb9a
--- /dev/null
+++ b/drivers/firmware/google/coreboot_table.c
@@ -0,0 +1,159 @@
+/*
+ * coreboot_table.c
+ *
+ * Module providing coreboot table access.
+ *
+ * Copyright 2017 Google Inc.
+ * Copyright 2017 Samuel Holland <samuel@sholland.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2.0 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "coreboot_table.h"
+
+#define CB_DEV(d) container_of(d, struct coreboot_device, dev)
+#define CB_DRV(d) container_of(d, struct coreboot_driver, drv)
+
+static struct coreboot_table_header __iomem *ptr_header;
+
+static int coreboot_bus_match(struct device *dev, struct device_driver *drv)
+{
+	struct coreboot_device *device = CB_DEV(dev);
+	struct coreboot_driver *driver = CB_DRV(drv);
+
+	return device->entry.tag == driver->tag;
+}
+
+static int coreboot_bus_probe(struct device *dev)
+{
+	int ret = -ENODEV;
+	struct coreboot_device *device = CB_DEV(dev);
+	struct coreboot_driver *driver = CB_DRV(dev->driver);
+
+	if (driver->probe)
+		ret = driver->probe(device);
+
+	return ret;
+}
+
+static int coreboot_bus_remove(struct device *dev)
+{
+	int ret = 0;
+	struct coreboot_device *device = CB_DEV(dev);
+	struct coreboot_driver *driver = CB_DRV(dev->driver);
+
+	if (driver->remove)
+		ret = driver->remove(device);
+
+	return ret;
+}
+
+static struct bus_type coreboot_bus_type = {
+	.name		= "coreboot",
+	.match		= coreboot_bus_match,
+	.probe		= coreboot_bus_probe,
+	.remove		= coreboot_bus_remove,
+};
+
+static int __init coreboot_bus_init(void)
+{
+	return bus_register(&coreboot_bus_type);
+}
+module_init(coreboot_bus_init);
+
+static void coreboot_device_release(struct device *dev)
+{
+	struct coreboot_device *device = CB_DEV(dev);
+
+	kfree(device);
+}
+
+int coreboot_driver_register(struct coreboot_driver *driver)
+{
+	driver->drv.bus = &coreboot_bus_type;
+
+	return driver_register(&driver->drv);
+}
+EXPORT_SYMBOL(coreboot_driver_register);
+
+void coreboot_driver_unregister(struct coreboot_driver *driver)
+{
+	driver_unregister(&driver->drv);
+}
+EXPORT_SYMBOL(coreboot_driver_unregister);
+
+int coreboot_table_init(struct device *dev, void __iomem *ptr)
+{
+	int i, ret;
+	void *ptr_entry;
+	struct coreboot_device *device;
+	struct coreboot_table_entry entry;
+	struct coreboot_table_header header;
+
+	ptr_header = ptr;
+	memcpy_fromio(&header, ptr_header, sizeof(header));
+
+	if (strncmp(header.signature, "LBIO", sizeof(header.signature))) {
+		pr_warn("coreboot_table: coreboot table missing or corrupt!\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	ptr_entry = (void *)ptr_header + header.header_bytes;
+	for (i = 0; i < header.table_entries; i++) {
+		memcpy_fromio(&entry, ptr_entry, sizeof(entry));
+
+		device = kzalloc(sizeof(struct device) + entry.size, GFP_KERNEL);
+		if (!device) {
+			ret = -ENOMEM;
+			break;
+		}
+
+		dev_set_name(&device->dev, "coreboot%d", i);
+		device->dev.parent = dev;
+		device->dev.bus = &coreboot_bus_type;
+		device->dev.release = coreboot_device_release;
+		memcpy_fromio(&device->entry, ptr_entry, entry.size);
+
+		ret = device_register(&device->dev);
+		if (ret) {
+			put_device(&device->dev);
+			break;
+		}
+
+		ptr_entry += entry.size;
+	}
+out:
+	iounmap(ptr);
+	return ret;
+}
+EXPORT_SYMBOL(coreboot_table_init);
+
+int coreboot_table_exit(void)
+{
+	if (ptr_header) {
+		bus_unregister(&coreboot_bus_type);
+		ptr_header = NULL;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(coreboot_table_exit);
+
+MODULE_AUTHOR("Google, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/coreboot_table.h b/drivers/firmware/google/coreboot_table.h
new file mode 100644
index 0000000..8ad95a9
--- /dev/null
+++ b/drivers/firmware/google/coreboot_table.h
@@ -0,0 +1,100 @@
+/*
+ * coreboot_table.h
+ *
+ * Internal header for coreboot table access.
+ *
+ * Copyright 2014 Gerd Hoffmann <kraxel@redhat.com>
+ * Copyright 2017 Google Inc.
+ * Copyright 2017 Samuel Holland <samuel@sholland.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2.0 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __COREBOOT_TABLE_H
+#define __COREBOOT_TABLE_H
+
+#include <linux/io.h>
+
+/* Coreboot table header structure */
+struct coreboot_table_header {
+	char signature[4];
+	u32 header_bytes;
+	u32 header_checksum;
+	u32 table_bytes;
+	u32 table_checksum;
+	u32 table_entries;
+};
+
+/* List of coreboot entry structures that is used */
+/* Generic */
+struct coreboot_table_entry {
+	u32 tag;
+	u32 size;
+};
+
+/* Points to a CBMEM entry */
+struct lb_cbmem_ref {
+	u32 tag;
+	u32 size;
+
+	u64 cbmem_addr;
+};
+
+/* Describes framebuffer setup by coreboot */
+struct lb_framebuffer {
+	u32 tag;
+	u32 size;
+
+	u64 physical_address;
+	u32 x_resolution;
+	u32 y_resolution;
+	u32 bytes_per_line;
+	u8  bits_per_pixel;
+	u8  red_mask_pos;
+	u8  red_mask_size;
+	u8  green_mask_pos;
+	u8  green_mask_size;
+	u8  blue_mask_pos;
+	u8  blue_mask_size;
+	u8  reserved_mask_pos;
+	u8  reserved_mask_size;
+};
+
+/* A device, additionally with information from coreboot. */
+struct coreboot_device {
+	struct device dev;
+	union {
+		struct coreboot_table_entry entry;
+		struct lb_cbmem_ref cbmem_ref;
+		struct lb_framebuffer framebuffer;
+	};
+};
+
+/* A driver for handling devices described in coreboot tables. */
+struct coreboot_driver {
+	int (*probe)(struct coreboot_device *);
+	int (*remove)(struct coreboot_device *);
+	struct device_driver drv;
+	u32 tag;
+};
+
+/* Register a driver that uses the data from a coreboot table. */
+int coreboot_driver_register(struct coreboot_driver *driver);
+
+/* Unregister a driver that uses the data from a coreboot table. */
+void coreboot_driver_unregister(struct coreboot_driver *driver);
+
+/* Initialize coreboot table module given a pointer to iomem */
+int coreboot_table_init(struct device *dev, void __iomem *ptr);
+
+/* Cleanup coreboot table module */
+int coreboot_table_exit(void);
+
+#endif /* __COREBOOT_TABLE_H */
diff --git a/drivers/firmware/google/framebuffer-coreboot.c b/drivers/firmware/google/framebuffer-coreboot.c
new file mode 100644
index 0000000..b8b49c0
--- /dev/null
+++ b/drivers/firmware/google/framebuffer-coreboot.c
@@ -0,0 +1,115 @@
+/*
+ * framebuffer-coreboot.c
+ *
+ * Memory based framebuffer accessed through coreboot table.
+ *
+ * Copyright 2012-2013 David Herrmann <dh.herrmann@gmail.com>
+ * Copyright 2017 Google Inc.
+ * Copyright 2017 Samuel Holland <samuel@sholland.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2.0 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_data/simplefb.h>
+#include <linux/platform_device.h>
+
+#include "coreboot_table.h"
+
+#define CB_TAG_FRAMEBUFFER 0x12
+
+static const struct simplefb_format formats[] = SIMPLEFB_FORMATS;
+
+static int framebuffer_probe(struct coreboot_device *dev)
+{
+	int i;
+	u32 length;
+	struct lb_framebuffer *fb = &dev->framebuffer;
+	struct platform_device *pdev;
+	struct resource res;
+	struct simplefb_platform_data pdata = {
+		.width = fb->x_resolution,
+		.height = fb->y_resolution,
+		.stride = fb->bytes_per_line,
+		.format = NULL,
+	};
+
+	for (i = 0; i < ARRAY_SIZE(formats); ++i) {
+		if (fb->bits_per_pixel     == formats[i].bits_per_pixel &&
+		    fb->red_mask_pos       == formats[i].red.offset &&
+		    fb->red_mask_size      == formats[i].red.length &&
+		    fb->green_mask_pos     == formats[i].green.offset &&
+		    fb->green_mask_size    == formats[i].green.length &&
+		    fb->blue_mask_pos      == formats[i].blue.offset &&
+		    fb->blue_mask_size     == formats[i].blue.length &&
+		    fb->reserved_mask_pos  == formats[i].transp.offset &&
+		    fb->reserved_mask_size == formats[i].transp.length)
+			pdata.format = formats[i].name;
+	}
+	if (!pdata.format)
+		return -ENODEV;
+
+	memset(&res, 0, sizeof(res));
+	res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+	res.name = "Coreboot Framebuffer";
+	res.start = fb->physical_address;
+	length = PAGE_ALIGN(fb->y_resolution * fb->bytes_per_line);
+	res.end = res.start + length - 1;
+	if (res.end <= res.start)
+		return -EINVAL;
+
+	pdev = platform_device_register_resndata(&dev->dev,
+						 "simple-framebuffer", 0,
+						 &res, 1, &pdata,
+						 sizeof(pdata));
+	if (IS_ERR(pdev))
+		pr_warn("coreboot: could not register framebuffer\n");
+	else
+		dev_set_drvdata(&dev->dev, pdev);
+
+	return PTR_ERR_OR_ZERO(pdev);
+}
+
+static int framebuffer_remove(struct coreboot_device *dev)
+{
+	struct platform_device *pdev = dev_get_drvdata(&dev->dev);
+
+	platform_device_unregister(pdev);
+
+	return 0;
+}
+
+static struct coreboot_driver framebuffer_driver = {
+	.probe = framebuffer_probe,
+	.remove = framebuffer_remove,
+	.drv = {
+		.name = "framebuffer",
+	},
+	.tag = CB_TAG_FRAMEBUFFER,
+};
+
+static int __init coreboot_framebuffer_init(void)
+{
+	return coreboot_driver_register(&framebuffer_driver);
+}
+
+static void coreboot_framebuffer_exit(void)
+{
+	coreboot_driver_unregister(&framebuffer_driver);
+}
+
+module_init(coreboot_framebuffer_init);
+module_exit(coreboot_framebuffer_exit);
+
+MODULE_AUTHOR("Samuel Holland <samuel@sholland.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
new file mode 100644
index 0000000..c8f169b
--- /dev/null
+++ b/drivers/firmware/google/gsmi.c
@@ -0,0 +1,941 @@
+/*
+ * Copyright 2010 Google Inc. All Rights Reserved.
+ * Author: dlaurie@google.com (Duncan Laurie)
+ *
+ * Re-worked to expose sysfs APIs by mikew@google.com (Mike Waychison)
+ *
+ * EFI SMI interface for Google platforms
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/ioctl.h>
+#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/dmi.h>
+#include <linux/kdebug.h>
+#include <linux/reboot.h>
+#include <linux/efi.h>
+#include <linux/module.h>
+#include <linux/ucs2_string.h>
+
+#define GSMI_SHUTDOWN_CLEAN	0	/* Clean Shutdown */
+/* TODO(mikew@google.com): Tie in HARDLOCKUP_DETECTOR with NMIWDT */
+#define GSMI_SHUTDOWN_NMIWDT	1	/* NMI Watchdog */
+#define GSMI_SHUTDOWN_PANIC	2	/* Panic */
+#define GSMI_SHUTDOWN_OOPS	3	/* Oops */
+#define GSMI_SHUTDOWN_DIE	4	/* Die -- No longer meaningful */
+#define GSMI_SHUTDOWN_MCE	5	/* Machine Check */
+#define GSMI_SHUTDOWN_SOFTWDT	6	/* Software Watchdog */
+#define GSMI_SHUTDOWN_MBE	7	/* Uncorrected ECC */
+#define GSMI_SHUTDOWN_TRIPLE	8	/* Triple Fault */
+
+#define DRIVER_VERSION		"1.0"
+#define GSMI_GUID_SIZE		16
+#define GSMI_BUF_SIZE		1024
+#define GSMI_BUF_ALIGN		sizeof(u64)
+#define GSMI_CALLBACK		0xef
+
+/* SMI return codes */
+#define GSMI_SUCCESS		0x00
+#define GSMI_UNSUPPORTED2	0x03
+#define GSMI_LOG_FULL		0x0b
+#define GSMI_VAR_NOT_FOUND	0x0e
+#define GSMI_HANDSHAKE_SPIN	0x7d
+#define GSMI_HANDSHAKE_CF	0x7e
+#define GSMI_HANDSHAKE_NONE	0x7f
+#define GSMI_INVALID_PARAMETER	0x82
+#define GSMI_UNSUPPORTED	0x83
+#define GSMI_BUFFER_TOO_SMALL	0x85
+#define GSMI_NOT_READY		0x86
+#define GSMI_DEVICE_ERROR	0x87
+#define GSMI_NOT_FOUND		0x8e
+
+#define QUIRKY_BOARD_HASH 0x78a30a50
+
+/* Internally used commands passed to the firmware */
+#define GSMI_CMD_GET_NVRAM_VAR		0x01
+#define GSMI_CMD_GET_NEXT_VAR		0x02
+#define GSMI_CMD_SET_NVRAM_VAR		0x03
+#define GSMI_CMD_SET_EVENT_LOG		0x08
+#define GSMI_CMD_CLEAR_EVENT_LOG	0x09
+#define GSMI_CMD_CLEAR_CONFIG		0x20
+#define GSMI_CMD_HANDSHAKE_TYPE		0xC1
+
+/* Magic entry type for kernel events */
+#define GSMI_LOG_ENTRY_TYPE_KERNEL     0xDEAD
+
+/* SMI buffers must be in 32bit physical address space */
+struct gsmi_buf {
+	u8 *start;			/* start of buffer */
+	size_t length;			/* length of buffer */
+	dma_addr_t handle;		/* dma allocation handle */
+	u32 address;			/* physical address of buffer */
+};
+
+struct gsmi_device {
+	struct platform_device *pdev;	/* platform device */
+	struct gsmi_buf *name_buf;	/* variable name buffer */
+	struct gsmi_buf *data_buf;	/* generic data buffer */
+	struct gsmi_buf *param_buf;	/* parameter buffer */
+	spinlock_t lock;		/* serialize access to SMIs */
+	u16 smi_cmd;			/* SMI command port */
+	int handshake_type;		/* firmware handler interlock type */
+	struct dma_pool *dma_pool;	/* DMA buffer pool */
+} gsmi_dev;
+
+/* Packed structures for communicating with the firmware */
+struct gsmi_nvram_var_param {
+	efi_guid_t	guid;
+	u32		name_ptr;
+	u32		attributes;
+	u32		data_len;
+	u32		data_ptr;
+} __packed;
+
+struct gsmi_get_next_var_param {
+	u8	guid[GSMI_GUID_SIZE];
+	u32	name_ptr;
+	u32	name_len;
+} __packed;
+
+struct gsmi_set_eventlog_param {
+	u32	data_ptr;
+	u32	data_len;
+	u32	type;
+} __packed;
+
+/* Event log formats */
+struct gsmi_log_entry_type_1 {
+	u16	type;
+	u32	instance;
+} __packed;
+
+
+/*
+ * Some platforms don't have explicit SMI handshake
+ * and need to wait for SMI to complete.
+ */
+#define GSMI_DEFAULT_SPINCOUNT	0x10000
+static unsigned int spincount = GSMI_DEFAULT_SPINCOUNT;
+module_param(spincount, uint, 0600);
+MODULE_PARM_DESC(spincount,
+	"The number of loop iterations to use when using the spin handshake.");
+
+static struct gsmi_buf *gsmi_buf_alloc(void)
+{
+	struct gsmi_buf *smibuf;
+
+	smibuf = kzalloc(sizeof(*smibuf), GFP_KERNEL);
+	if (!smibuf) {
+		printk(KERN_ERR "gsmi: out of memory\n");
+		return NULL;
+	}
+
+	/* allocate buffer in 32bit address space */
+	smibuf->start = dma_pool_alloc(gsmi_dev.dma_pool, GFP_KERNEL,
+				       &smibuf->handle);
+	if (!smibuf->start) {
+		printk(KERN_ERR "gsmi: failed to allocate name buffer\n");
+		kfree(smibuf);
+		return NULL;
+	}
+
+	/* fill in the buffer handle */
+	smibuf->length = GSMI_BUF_SIZE;
+	smibuf->address = (u32)virt_to_phys(smibuf->start);
+
+	return smibuf;
+}
+
+static void gsmi_buf_free(struct gsmi_buf *smibuf)
+{
+	if (smibuf) {
+		if (smibuf->start)
+			dma_pool_free(gsmi_dev.dma_pool, smibuf->start,
+				      smibuf->handle);
+		kfree(smibuf);
+	}
+}
+
+/*
+ * Make a call to gsmi func(sub).  GSMI error codes are translated to
+ * in-kernel errnos (0 on success, -ERRNO on error).
+ */
+static int gsmi_exec(u8 func, u8 sub)
+{
+	u16 cmd = (sub << 8) | func;
+	u16 result = 0;
+	int rc = 0;
+
+	/*
+	 * AH  : Subfunction number
+	 * AL  : Function number
+	 * EBX : Parameter block address
+	 * DX  : SMI command port
+	 *
+	 * Three protocols here. See also the comment in gsmi_init().
+	 */
+	if (gsmi_dev.handshake_type == GSMI_HANDSHAKE_CF) {
+		/*
+		 * If handshake_type == HANDSHAKE_CF then set CF on the
+		 * way in and wait for the handler to clear it; this avoids
+		 * corrupting register state on those chipsets which have
+		 * a delay between writing the SMI trigger register and
+		 * entering SMM.
+		 */
+		asm volatile (
+			"stc\n"
+			"outb %%al, %%dx\n"
+		"1:      jc 1b\n"
+			: "=a" (result)
+			: "0" (cmd),
+			  "d" (gsmi_dev.smi_cmd),
+			  "b" (gsmi_dev.param_buf->address)
+			: "memory", "cc"
+		);
+	} else if (gsmi_dev.handshake_type == GSMI_HANDSHAKE_SPIN) {
+		/*
+		 * If handshake_type == HANDSHAKE_SPIN we spin a
+		 * hundred-ish usecs to ensure the SMI has triggered.
+		 */
+		asm volatile (
+			"outb %%al, %%dx\n"
+		"1:      loop 1b\n"
+			: "=a" (result)
+			: "0" (cmd),
+			  "d" (gsmi_dev.smi_cmd),
+			  "b" (gsmi_dev.param_buf->address),
+			  "c" (spincount)
+			: "memory", "cc"
+		);
+	} else {
+		/*
+		 * If handshake_type == HANDSHAKE_NONE we do nothing;
+		 * either we don't need to or it's legacy firmware that
+		 * doesn't understand the CF protocol.
+		 */
+		asm volatile (
+			"outb %%al, %%dx\n\t"
+			: "=a" (result)
+			: "0" (cmd),
+			  "d" (gsmi_dev.smi_cmd),
+			  "b" (gsmi_dev.param_buf->address)
+			: "memory", "cc"
+		);
+	}
+
+	/* check return code from SMI handler */
+	switch (result) {
+	case GSMI_SUCCESS:
+		break;
+	case GSMI_VAR_NOT_FOUND:
+		/* not really an error, but let the caller know */
+		rc = 1;
+		break;
+	case GSMI_INVALID_PARAMETER:
+		printk(KERN_ERR "gsmi: exec 0x%04x: Invalid parameter\n", cmd);
+		rc = -EINVAL;
+		break;
+	case GSMI_BUFFER_TOO_SMALL:
+		printk(KERN_ERR "gsmi: exec 0x%04x: Buffer too small\n", cmd);
+		rc = -ENOMEM;
+		break;
+	case GSMI_UNSUPPORTED:
+	case GSMI_UNSUPPORTED2:
+		if (sub != GSMI_CMD_HANDSHAKE_TYPE)
+			printk(KERN_ERR "gsmi: exec 0x%04x: Not supported\n",
+			       cmd);
+		rc = -ENOSYS;
+		break;
+	case GSMI_NOT_READY:
+		printk(KERN_ERR "gsmi: exec 0x%04x: Not ready\n", cmd);
+		rc = -EBUSY;
+		break;
+	case GSMI_DEVICE_ERROR:
+		printk(KERN_ERR "gsmi: exec 0x%04x: Device error\n", cmd);
+		rc = -EFAULT;
+		break;
+	case GSMI_NOT_FOUND:
+		printk(KERN_ERR "gsmi: exec 0x%04x: Data not found\n", cmd);
+		rc = -ENOENT;
+		break;
+	case GSMI_LOG_FULL:
+		printk(KERN_ERR "gsmi: exec 0x%04x: Log full\n", cmd);
+		rc = -ENOSPC;
+		break;
+	case GSMI_HANDSHAKE_CF:
+	case GSMI_HANDSHAKE_SPIN:
+	case GSMI_HANDSHAKE_NONE:
+		rc = result;
+		break;
+	default:
+		printk(KERN_ERR "gsmi: exec 0x%04x: Unknown error 0x%04x\n",
+		       cmd, result);
+		rc = -ENXIO;
+	}
+
+	return rc;
+}
+
+static efi_status_t gsmi_get_variable(efi_char16_t *name,
+				      efi_guid_t *vendor, u32 *attr,
+				      unsigned long *data_size,
+				      void *data)
+{
+	struct gsmi_nvram_var_param param = {
+		.name_ptr = gsmi_dev.name_buf->address,
+		.data_ptr = gsmi_dev.data_buf->address,
+		.data_len = (u32)*data_size,
+	};
+	efi_status_t ret = EFI_SUCCESS;
+	unsigned long flags;
+	size_t name_len = ucs2_strnlen(name, GSMI_BUF_SIZE / 2);
+	int rc;
+
+	if (name_len >= GSMI_BUF_SIZE / 2)
+		return EFI_BAD_BUFFER_SIZE;
+
+	spin_lock_irqsave(&gsmi_dev.lock, flags);
+
+	/* Vendor guid */
+	memcpy(&param.guid, vendor, sizeof(param.guid));
+
+	/* variable name, already in UTF-16 */
+	memset(gsmi_dev.name_buf->start, 0, gsmi_dev.name_buf->length);
+	memcpy(gsmi_dev.name_buf->start, name, name_len * 2);
+
+	/* data pointer */
+	memset(gsmi_dev.data_buf->start, 0, gsmi_dev.data_buf->length);
+
+	/* parameter buffer */
+	memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
+	memcpy(gsmi_dev.param_buf->start, &param, sizeof(param));
+
+	rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_GET_NVRAM_VAR);
+	if (rc < 0) {
+		printk(KERN_ERR "gsmi: Get Variable failed\n");
+		ret = EFI_LOAD_ERROR;
+	} else if (rc == 1) {
+		/* variable was not found */
+		ret = EFI_NOT_FOUND;
+	} else {
+		/* Get the arguments back */
+		memcpy(&param, gsmi_dev.param_buf->start, sizeof(param));
+
+		/* The size reported is the min of all of our buffers */
+		*data_size = min_t(unsigned long, *data_size,
+						gsmi_dev.data_buf->length);
+		*data_size = min_t(unsigned long, *data_size, param.data_len);
+
+		/* Copy data back to return buffer. */
+		memcpy(data, gsmi_dev.data_buf->start, *data_size);
+
+		/* All variables are have the following attributes */
+		*attr = EFI_VARIABLE_NON_VOLATILE |
+			EFI_VARIABLE_BOOTSERVICE_ACCESS |
+			EFI_VARIABLE_RUNTIME_ACCESS;
+	}
+
+	spin_unlock_irqrestore(&gsmi_dev.lock, flags);
+
+	return ret;
+}
+
+static efi_status_t gsmi_get_next_variable(unsigned long *name_size,
+					   efi_char16_t *name,
+					   efi_guid_t *vendor)
+{
+	struct gsmi_get_next_var_param param = {
+		.name_ptr = gsmi_dev.name_buf->address,
+		.name_len = gsmi_dev.name_buf->length,
+	};
+	efi_status_t ret = EFI_SUCCESS;
+	int rc;
+	unsigned long flags;
+
+	/* For the moment, only support buffers that exactly match in size */
+	if (*name_size != GSMI_BUF_SIZE)
+		return EFI_BAD_BUFFER_SIZE;
+
+	/* Let's make sure the thing is at least null-terminated */
+	if (ucs2_strnlen(name, GSMI_BUF_SIZE / 2) == GSMI_BUF_SIZE / 2)
+		return EFI_INVALID_PARAMETER;
+
+	spin_lock_irqsave(&gsmi_dev.lock, flags);
+
+	/* guid */
+	memcpy(&param.guid, vendor, sizeof(param.guid));
+
+	/* variable name, already in UTF-16 */
+	memcpy(gsmi_dev.name_buf->start, name, *name_size);
+
+	/* parameter buffer */
+	memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
+	memcpy(gsmi_dev.param_buf->start, &param, sizeof(param));
+
+	rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_GET_NEXT_VAR);
+	if (rc < 0) {
+		printk(KERN_ERR "gsmi: Get Next Variable Name failed\n");
+		ret = EFI_LOAD_ERROR;
+	} else if (rc == 1) {
+		/* variable not found -- end of list */
+		ret = EFI_NOT_FOUND;
+	} else {
+		/* copy variable data back to return buffer */
+		memcpy(&param, gsmi_dev.param_buf->start, sizeof(param));
+
+		/* Copy the name back */
+		memcpy(name, gsmi_dev.name_buf->start, GSMI_BUF_SIZE);
+		*name_size = ucs2_strnlen(name, GSMI_BUF_SIZE / 2) * 2;
+
+		/* copy guid to return buffer */
+		memcpy(vendor, &param.guid, sizeof(param.guid));
+		ret = EFI_SUCCESS;
+	}
+
+	spin_unlock_irqrestore(&gsmi_dev.lock, flags);
+
+	return ret;
+}
+
+static efi_status_t gsmi_set_variable(efi_char16_t *name,
+				      efi_guid_t *vendor,
+				      u32 attr,
+				      unsigned long data_size,
+				      void *data)
+{
+	struct gsmi_nvram_var_param param = {
+		.name_ptr = gsmi_dev.name_buf->address,
+		.data_ptr = gsmi_dev.data_buf->address,
+		.data_len = (u32)data_size,
+		.attributes = EFI_VARIABLE_NON_VOLATILE |
+			      EFI_VARIABLE_BOOTSERVICE_ACCESS |
+			      EFI_VARIABLE_RUNTIME_ACCESS,
+	};
+	size_t name_len = ucs2_strnlen(name, GSMI_BUF_SIZE / 2);
+	efi_status_t ret = EFI_SUCCESS;
+	int rc;
+	unsigned long flags;
+
+	if (name_len >= GSMI_BUF_SIZE / 2)
+		return EFI_BAD_BUFFER_SIZE;
+
+	spin_lock_irqsave(&gsmi_dev.lock, flags);
+
+	/* guid */
+	memcpy(&param.guid, vendor, sizeof(param.guid));
+
+	/* variable name, already in UTF-16 */
+	memset(gsmi_dev.name_buf->start, 0, gsmi_dev.name_buf->length);
+	memcpy(gsmi_dev.name_buf->start, name, name_len * 2);
+
+	/* data pointer */
+	memset(gsmi_dev.data_buf->start, 0, gsmi_dev.data_buf->length);
+	memcpy(gsmi_dev.data_buf->start, data, data_size);
+
+	/* parameter buffer */
+	memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
+	memcpy(gsmi_dev.param_buf->start, &param, sizeof(param));
+
+	rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_SET_NVRAM_VAR);
+	if (rc < 0) {
+		printk(KERN_ERR "gsmi: Set Variable failed\n");
+		ret = EFI_INVALID_PARAMETER;
+	}
+
+	spin_unlock_irqrestore(&gsmi_dev.lock, flags);
+
+	return ret;
+}
+
+static const struct efivar_operations efivar_ops = {
+	.get_variable = gsmi_get_variable,
+	.set_variable = gsmi_set_variable,
+	.get_next_variable = gsmi_get_next_variable,
+};
+
+static ssize_t eventlog_write(struct file *filp, struct kobject *kobj,
+			       struct bin_attribute *bin_attr,
+			       char *buf, loff_t pos, size_t count)
+{
+	struct gsmi_set_eventlog_param param = {
+		.data_ptr = gsmi_dev.data_buf->address,
+	};
+	int rc = 0;
+	unsigned long flags;
+
+	/* Pull the type out */
+	if (count < sizeof(u32))
+		return -EINVAL;
+	param.type = *(u32 *)buf;
+	count -= sizeof(u32);
+	buf += sizeof(u32);
+
+	/* The remaining buffer is the data payload */
+	if (count > gsmi_dev.data_buf->length)
+		return -EINVAL;
+	param.data_len = count - sizeof(u32);
+
+	spin_lock_irqsave(&gsmi_dev.lock, flags);
+
+	/* data pointer */
+	memset(gsmi_dev.data_buf->start, 0, gsmi_dev.data_buf->length);
+	memcpy(gsmi_dev.data_buf->start, buf, param.data_len);
+
+	/* parameter buffer */
+	memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
+	memcpy(gsmi_dev.param_buf->start, &param, sizeof(param));
+
+	rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_SET_EVENT_LOG);
+	if (rc < 0)
+		printk(KERN_ERR "gsmi: Set Event Log failed\n");
+
+	spin_unlock_irqrestore(&gsmi_dev.lock, flags);
+
+	return rc;
+
+}
+
+static struct bin_attribute eventlog_bin_attr = {
+	.attr = {.name = "append_to_eventlog", .mode = 0200},
+	.write = eventlog_write,
+};
+
+static ssize_t gsmi_clear_eventlog_store(struct kobject *kobj,
+					 struct kobj_attribute *attr,
+					 const char *buf, size_t count)
+{
+	int rc;
+	unsigned long flags;
+	unsigned long val;
+	struct {
+		u32 percentage;
+		u32 data_type;
+	} param;
+
+	rc = kstrtoul(buf, 0, &val);
+	if (rc)
+		return rc;
+
+	/*
+	 * Value entered is a percentage, 0 through 100, anything else
+	 * is invalid.
+	 */
+	if (val > 100)
+		return -EINVAL;
+
+	/* data_type here selects the smbios event log. */
+	param.percentage = val;
+	param.data_type = 0;
+
+	spin_lock_irqsave(&gsmi_dev.lock, flags);
+
+	/* parameter buffer */
+	memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
+	memcpy(gsmi_dev.param_buf->start, &param, sizeof(param));
+
+	rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_CLEAR_EVENT_LOG);
+
+	spin_unlock_irqrestore(&gsmi_dev.lock, flags);
+
+	if (rc)
+		return rc;
+	return count;
+}
+
+static struct kobj_attribute gsmi_clear_eventlog_attr = {
+	.attr = {.name = "clear_eventlog", .mode = 0200},
+	.store = gsmi_clear_eventlog_store,
+};
+
+static ssize_t gsmi_clear_config_store(struct kobject *kobj,
+				       struct kobj_attribute *attr,
+				       const char *buf, size_t count)
+{
+	int rc;
+	unsigned long flags;
+
+	spin_lock_irqsave(&gsmi_dev.lock, flags);
+
+	/* clear parameter buffer */
+	memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
+
+	rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_CLEAR_CONFIG);
+
+	spin_unlock_irqrestore(&gsmi_dev.lock, flags);
+
+	if (rc)
+		return rc;
+	return count;
+}
+
+static struct kobj_attribute gsmi_clear_config_attr = {
+	.attr = {.name = "clear_config", .mode = 0200},
+	.store = gsmi_clear_config_store,
+};
+
+static const struct attribute *gsmi_attrs[] = {
+	&gsmi_clear_config_attr.attr,
+	&gsmi_clear_eventlog_attr.attr,
+	NULL,
+};
+
+static int gsmi_shutdown_reason(int reason)
+{
+	struct gsmi_log_entry_type_1 entry = {
+		.type     = GSMI_LOG_ENTRY_TYPE_KERNEL,
+		.instance = reason,
+	};
+	struct gsmi_set_eventlog_param param = {
+		.data_len = sizeof(entry),
+		.type     = 1,
+	};
+	static int saved_reason;
+	int rc = 0;
+	unsigned long flags;
+
+	/* avoid duplicate entries in the log */
+	if (saved_reason & (1 << reason))
+		return 0;
+
+	spin_lock_irqsave(&gsmi_dev.lock, flags);
+
+	saved_reason |= (1 << reason);
+
+	/* data pointer */
+	memset(gsmi_dev.data_buf->start, 0, gsmi_dev.data_buf->length);
+	memcpy(gsmi_dev.data_buf->start, &entry, sizeof(entry));
+
+	/* parameter buffer */
+	param.data_ptr = gsmi_dev.data_buf->address;
+	memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
+	memcpy(gsmi_dev.param_buf->start, &param, sizeof(param));
+
+	rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_SET_EVENT_LOG);
+
+	spin_unlock_irqrestore(&gsmi_dev.lock, flags);
+
+	if (rc < 0)
+		printk(KERN_ERR "gsmi: Log Shutdown Reason failed\n");
+	else
+		printk(KERN_EMERG "gsmi: Log Shutdown Reason 0x%02x\n",
+		       reason);
+
+	return rc;
+}
+
+static int gsmi_reboot_callback(struct notifier_block *nb,
+				unsigned long reason, void *arg)
+{
+	gsmi_shutdown_reason(GSMI_SHUTDOWN_CLEAN);
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block gsmi_reboot_notifier = {
+	.notifier_call = gsmi_reboot_callback
+};
+
+static int gsmi_die_callback(struct notifier_block *nb,
+			     unsigned long reason, void *arg)
+{
+	if (reason == DIE_OOPS)
+		gsmi_shutdown_reason(GSMI_SHUTDOWN_OOPS);
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block gsmi_die_notifier = {
+	.notifier_call = gsmi_die_callback
+};
+
+static int gsmi_panic_callback(struct notifier_block *nb,
+			       unsigned long reason, void *arg)
+{
+	gsmi_shutdown_reason(GSMI_SHUTDOWN_PANIC);
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block gsmi_panic_notifier = {
+	.notifier_call = gsmi_panic_callback,
+};
+
+/*
+ * This hash function was blatantly copied from include/linux/hash.h.
+ * It is used by this driver to obfuscate a board name that requires a
+ * quirk within this driver.
+ *
+ * Please do not remove this copy of the function as any changes to the
+ * global utility hash_64() function would break this driver's ability
+ * to identify a board and provide the appropriate quirk -- mikew@google.com
+ */
+static u64 __init local_hash_64(u64 val, unsigned bits)
+{
+	u64 hash = val;
+
+	/*  Sigh, gcc can't optimise this alone like it does for 32 bits. */
+	u64 n = hash;
+	n <<= 18;
+	hash -= n;
+	n <<= 33;
+	hash -= n;
+	n <<= 3;
+	hash += n;
+	n <<= 3;
+	hash -= n;
+	n <<= 4;
+	hash += n;
+	n <<= 2;
+	hash += n;
+
+	/* High bits are more random, so use them. */
+	return hash >> (64 - bits);
+}
+
+static u32 __init hash_oem_table_id(char s[8])
+{
+	u64 input;
+	memcpy(&input, s, 8);
+	return local_hash_64(input, 32);
+}
+
+static const struct dmi_system_id gsmi_dmi_table[] __initconst = {
+	{
+		.ident = "Google Board",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Google, Inc."),
+		},
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(dmi, gsmi_dmi_table);
+
+static __init int gsmi_system_valid(void)
+{
+	u32 hash;
+
+	if (!dmi_check_system(gsmi_dmi_table))
+		return -ENODEV;
+
+	/*
+	 * Only newer firmware supports the gsmi interface.  All older
+	 * firmware that didn't support this interface used to plug the
+	 * table name in the first four bytes of the oem_table_id field.
+	 * Newer firmware doesn't do that though, so use that as the
+	 * discriminant factor.  We have to do this in order to
+	 * whitewash our board names out of the public driver.
+	 */
+	if (!strncmp(acpi_gbl_FADT.header.oem_table_id, "FACP", 4)) {
+		printk(KERN_INFO "gsmi: Board is too old\n");
+		return -ENODEV;
+	}
+
+	/* Disable on board with 1.0 BIOS due to Google bug 2602657 */
+	hash = hash_oem_table_id(acpi_gbl_FADT.header.oem_table_id);
+	if (hash == QUIRKY_BOARD_HASH) {
+		const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);
+		if (strncmp(bios_ver, "1.0", 3) == 0) {
+			pr_info("gsmi: disabled on this board's BIOS %s\n",
+				bios_ver);
+			return -ENODEV;
+		}
+	}
+
+	/* check for valid SMI command port in ACPI FADT */
+	if (acpi_gbl_FADT.smi_command == 0) {
+		pr_info("gsmi: missing smi_command\n");
+		return -ENODEV;
+	}
+
+	/* Found */
+	return 0;
+}
+
+static struct kobject *gsmi_kobj;
+static struct efivars efivars;
+
+static const struct platform_device_info gsmi_dev_info = {
+	.name		= "gsmi",
+	.id		= -1,
+	/* SMI callbacks require 32bit addresses */
+	.dma_mask	= DMA_BIT_MASK(32),
+};
+
+static __init int gsmi_init(void)
+{
+	unsigned long flags;
+	int ret;
+
+	ret = gsmi_system_valid();
+	if (ret)
+		return ret;
+
+	gsmi_dev.smi_cmd = acpi_gbl_FADT.smi_command;
+
+	/* register device */
+	gsmi_dev.pdev = platform_device_register_full(&gsmi_dev_info);
+	if (IS_ERR(gsmi_dev.pdev)) {
+		printk(KERN_ERR "gsmi: unable to register platform device\n");
+		return PTR_ERR(gsmi_dev.pdev);
+	}
+
+	/* SMI access needs to be serialized */
+	spin_lock_init(&gsmi_dev.lock);
+
+	ret = -ENOMEM;
+	gsmi_dev.dma_pool = dma_pool_create("gsmi", &gsmi_dev.pdev->dev,
+					     GSMI_BUF_SIZE, GSMI_BUF_ALIGN, 0);
+	if (!gsmi_dev.dma_pool)
+		goto out_err;
+
+	/*
+	 * pre-allocate buffers because sometimes we are called when
+	 * this is not feasible: oops, panic, die, mce, etc
+	 */
+	gsmi_dev.name_buf = gsmi_buf_alloc();
+	if (!gsmi_dev.name_buf) {
+		printk(KERN_ERR "gsmi: failed to allocate name buffer\n");
+		goto out_err;
+	}
+
+	gsmi_dev.data_buf = gsmi_buf_alloc();
+	if (!gsmi_dev.data_buf) {
+		printk(KERN_ERR "gsmi: failed to allocate data buffer\n");
+		goto out_err;
+	}
+
+	gsmi_dev.param_buf = gsmi_buf_alloc();
+	if (!gsmi_dev.param_buf) {
+		printk(KERN_ERR "gsmi: failed to allocate param buffer\n");
+		goto out_err;
+	}
+
+	/*
+	 * Determine type of handshake used to serialize the SMI
+	 * entry. See also gsmi_exec().
+	 *
+	 * There's a "behavior" present on some chipsets where writing the
+	 * SMI trigger register in the southbridge doesn't result in an
+	 * immediate SMI. Rather, the processor can execute "a few" more
+	 * instructions before the SMI takes effect. To ensure synchronous
+	 * behavior, implement a handshake between the kernel driver and the
+	 * firmware handler to spin until released. This ioctl determines
+	 * the type of handshake.
+	 *
+	 * NONE: The firmware handler does not implement any
+	 * handshake. Either it doesn't need to, or it's legacy firmware
+	 * that doesn't know it needs to and never will.
+	 *
+	 * CF: The firmware handler will clear the CF in the saved
+	 * state before returning. The driver may set the CF and test for
+	 * it to clear before proceeding.
+	 *
+	 * SPIN: The firmware handler does not implement any handshake
+	 * but the driver should spin for a hundred or so microseconds
+	 * to ensure the SMI has triggered.
+	 *
+	 * Finally, the handler will return -ENOSYS if
+	 * GSMI_CMD_HANDSHAKE_TYPE is unimplemented, which implies
+	 * HANDSHAKE_NONE.
+	 */
+	spin_lock_irqsave(&gsmi_dev.lock, flags);
+	gsmi_dev.handshake_type = GSMI_HANDSHAKE_SPIN;
+	gsmi_dev.handshake_type =
+	    gsmi_exec(GSMI_CALLBACK, GSMI_CMD_HANDSHAKE_TYPE);
+	if (gsmi_dev.handshake_type == -ENOSYS)
+		gsmi_dev.handshake_type = GSMI_HANDSHAKE_NONE;
+	spin_unlock_irqrestore(&gsmi_dev.lock, flags);
+
+	/* Remove and clean up gsmi if the handshake could not complete. */
+	if (gsmi_dev.handshake_type == -ENXIO) {
+		printk(KERN_INFO "gsmi version " DRIVER_VERSION
+		       " failed to load\n");
+		ret = -ENODEV;
+		goto out_err;
+	}
+
+	/* Register in the firmware directory */
+	ret = -ENOMEM;
+	gsmi_kobj = kobject_create_and_add("gsmi", firmware_kobj);
+	if (!gsmi_kobj) {
+		printk(KERN_INFO "gsmi: Failed to create firmware kobj\n");
+		goto out_err;
+	}
+
+	/* Setup eventlog access */
+	ret = sysfs_create_bin_file(gsmi_kobj, &eventlog_bin_attr);
+	if (ret) {
+		printk(KERN_INFO "gsmi: Failed to setup eventlog");
+		goto out_err;
+	}
+
+	/* Other attributes */
+	ret = sysfs_create_files(gsmi_kobj, gsmi_attrs);
+	if (ret) {
+		printk(KERN_INFO "gsmi: Failed to add attrs");
+		goto out_remove_bin_file;
+	}
+
+	ret = efivars_register(&efivars, &efivar_ops, gsmi_kobj);
+	if (ret) {
+		printk(KERN_INFO "gsmi: Failed to register efivars\n");
+		goto out_remove_sysfs_files;
+	}
+
+	register_reboot_notifier(&gsmi_reboot_notifier);
+	register_die_notifier(&gsmi_die_notifier);
+	atomic_notifier_chain_register(&panic_notifier_list,
+				       &gsmi_panic_notifier);
+
+	printk(KERN_INFO "gsmi version " DRIVER_VERSION " loaded\n");
+
+	return 0;
+
+out_remove_sysfs_files:
+	sysfs_remove_files(gsmi_kobj, gsmi_attrs);
+out_remove_bin_file:
+	sysfs_remove_bin_file(gsmi_kobj, &eventlog_bin_attr);
+out_err:
+	kobject_put(gsmi_kobj);
+	gsmi_buf_free(gsmi_dev.param_buf);
+	gsmi_buf_free(gsmi_dev.data_buf);
+	gsmi_buf_free(gsmi_dev.name_buf);
+	dma_pool_destroy(gsmi_dev.dma_pool);
+	platform_device_unregister(gsmi_dev.pdev);
+	pr_info("gsmi: failed to load: %d\n", ret);
+	return ret;
+}
+
+static void __exit gsmi_exit(void)
+{
+	unregister_reboot_notifier(&gsmi_reboot_notifier);
+	unregister_die_notifier(&gsmi_die_notifier);
+	atomic_notifier_chain_unregister(&panic_notifier_list,
+					 &gsmi_panic_notifier);
+	efivars_unregister(&efivars);
+
+	sysfs_remove_files(gsmi_kobj, gsmi_attrs);
+	sysfs_remove_bin_file(gsmi_kobj, &eventlog_bin_attr);
+	kobject_put(gsmi_kobj);
+	gsmi_buf_free(gsmi_dev.param_buf);
+	gsmi_buf_free(gsmi_dev.data_buf);
+	gsmi_buf_free(gsmi_dev.name_buf);
+	dma_pool_destroy(gsmi_dev.dma_pool);
+	platform_device_unregister(gsmi_dev.pdev);
+}
+
+module_init(gsmi_init);
+module_exit(gsmi_exit);
+
+MODULE_AUTHOR("Google, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/memconsole-coreboot.c b/drivers/firmware/google/memconsole-coreboot.c
new file mode 100644
index 0000000..b29e107
--- /dev/null
+++ b/drivers/firmware/google/memconsole-coreboot.c
@@ -0,0 +1,134 @@
+/*
+ * memconsole-coreboot.c
+ *
+ * Memory based BIOS console accessed through coreboot table.
+ *
+ * Copyright 2017 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2.0 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "memconsole.h"
+#include "coreboot_table.h"
+
+#define CB_TAG_CBMEM_CONSOLE	0x17
+
+/* CBMEM firmware console log descriptor. */
+struct cbmem_cons {
+	u32 size_dont_access_after_boot;
+	u32 cursor;
+	u8  body[0];
+} __packed;
+
+#define CURSOR_MASK ((1 << 28) - 1)
+#define OVERFLOW (1 << 31)
+
+static struct cbmem_cons __iomem *cbmem_console;
+static u32 cbmem_console_size;
+
+/*
+ * The cbmem_console structure is read again on every access because it may
+ * change at any time if runtime firmware logs new messages. This may rarely
+ * lead to race conditions where the firmware overwrites the beginning of the
+ * ring buffer with more lines after we have already read |cursor|. It should be
+ * rare and harmless enough that we don't spend extra effort working around it.
+ */
+static ssize_t memconsole_coreboot_read(char *buf, loff_t pos, size_t count)
+{
+	u32 cursor = cbmem_console->cursor & CURSOR_MASK;
+	u32 flags = cbmem_console->cursor & ~CURSOR_MASK;
+	u32 size = cbmem_console_size;
+	struct seg {	/* describes ring buffer segments in logical order */
+		u32 phys;	/* physical offset from start of mem buffer */
+		u32 len;	/* length of segment */
+	} seg[2] = { {0}, {0} };
+	size_t done = 0;
+	int i;
+
+	if (flags & OVERFLOW) {
+		if (cursor > size)	/* Shouldn't really happen, but... */
+			cursor = 0;
+		seg[0] = (struct seg){.phys = cursor, .len = size - cursor};
+		seg[1] = (struct seg){.phys = 0, .len = cursor};
+	} else {
+		seg[0] = (struct seg){.phys = 0, .len = min(cursor, size)};
+	}
+
+	for (i = 0; i < ARRAY_SIZE(seg) && count > done; i++) {
+		done += memory_read_from_buffer(buf + done, count - done, &pos,
+			cbmem_console->body + seg[i].phys, seg[i].len);
+		pos -= seg[i].len;
+	}
+	return done;
+}
+
+static int memconsole_probe(struct coreboot_device *dev)
+{
+	struct cbmem_cons __iomem *tmp_cbmc;
+
+	tmp_cbmc = memremap(dev->cbmem_ref.cbmem_addr,
+			    sizeof(*tmp_cbmc), MEMREMAP_WB);
+
+	if (!tmp_cbmc)
+		return -ENOMEM;
+
+	/* Read size only once to prevent overrun attack through /dev/mem. */
+	cbmem_console_size = tmp_cbmc->size_dont_access_after_boot;
+	cbmem_console = memremap(dev->cbmem_ref.cbmem_addr,
+				 cbmem_console_size + sizeof(*cbmem_console),
+				 MEMREMAP_WB);
+	memunmap(tmp_cbmc);
+
+	if (!cbmem_console)
+		return -ENOMEM;
+
+	memconsole_setup(memconsole_coreboot_read);
+
+	return memconsole_sysfs_init();
+}
+
+static int memconsole_remove(struct coreboot_device *dev)
+{
+	memconsole_exit();
+
+	if (cbmem_console)
+		memunmap(cbmem_console);
+
+	return 0;
+}
+
+static struct coreboot_driver memconsole_driver = {
+	.probe = memconsole_probe,
+	.remove = memconsole_remove,
+	.drv = {
+		.name = "memconsole",
+	},
+	.tag = CB_TAG_CBMEM_CONSOLE,
+};
+
+static void coreboot_memconsole_exit(void)
+{
+	coreboot_driver_unregister(&memconsole_driver);
+}
+
+static int __init coreboot_memconsole_init(void)
+{
+	return coreboot_driver_register(&memconsole_driver);
+}
+
+module_exit(coreboot_memconsole_exit);
+module_init(coreboot_memconsole_init);
+
+MODULE_AUTHOR("Google, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/memconsole-x86-legacy.c b/drivers/firmware/google/memconsole-x86-legacy.c
new file mode 100644
index 0000000..19bcbd1
--- /dev/null
+++ b/drivers/firmware/google/memconsole-x86-legacy.c
@@ -0,0 +1,165 @@
+/*
+ * memconsole-x86-legacy.c
+ *
+ * EBDA specific parts of the memory based BIOS console.
+ *
+ * Copyright 2017 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2.0 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/dmi.h>
+#include <linux/mm.h>
+#include <asm/bios_ebda.h>
+#include <linux/acpi.h>
+
+#include "memconsole.h"
+
+#define BIOS_MEMCONSOLE_V1_MAGIC	0xDEADBABE
+#define BIOS_MEMCONSOLE_V2_MAGIC	(('M')|('C'<<8)|('O'<<16)|('N'<<24))
+
+struct biosmemcon_ebda {
+	u32 signature;
+	union {
+		struct {
+			u8  enabled;
+			u32 buffer_addr;
+			u16 start;
+			u16 end;
+			u16 num_chars;
+			u8  wrapped;
+		} __packed v1;
+		struct {
+			u32 buffer_addr;
+			/* Misdocumented as number of pages! */
+			u16 num_bytes;
+			u16 start;
+			u16 end;
+		} __packed v2;
+	};
+} __packed;
+
+static char *memconsole_baseaddr;
+static size_t memconsole_length;
+
+static ssize_t memconsole_read(char *buf, loff_t pos, size_t count)
+{
+	return memory_read_from_buffer(buf, count, &pos, memconsole_baseaddr,
+				       memconsole_length);
+}
+
+static void found_v1_header(struct biosmemcon_ebda *hdr)
+{
+	pr_info("memconsole: BIOS console v1 EBDA structure found at %p\n",
+		hdr);
+	pr_info("memconsole: BIOS console buffer at 0x%.8x, start = %d, end = %d, num = %d\n",
+		hdr->v1.buffer_addr, hdr->v1.start,
+		hdr->v1.end, hdr->v1.num_chars);
+
+	memconsole_baseaddr = phys_to_virt(hdr->v1.buffer_addr);
+	memconsole_length = hdr->v1.num_chars;
+	memconsole_setup(memconsole_read);
+}
+
+static void found_v2_header(struct biosmemcon_ebda *hdr)
+{
+	pr_info("memconsole: BIOS console v2 EBDA structure found at %p\n",
+		hdr);
+	pr_info("memconsole: BIOS console buffer at 0x%.8x, start = %d, end = %d, num_bytes = %d\n",
+		hdr->v2.buffer_addr, hdr->v2.start,
+		hdr->v2.end, hdr->v2.num_bytes);
+
+	memconsole_baseaddr = phys_to_virt(hdr->v2.buffer_addr + hdr->v2.start);
+	memconsole_length = hdr->v2.end - hdr->v2.start;
+	memconsole_setup(memconsole_read);
+}
+
+/*
+ * Search through the EBDA for the BIOS Memory Console, and
+ * set the global variables to point to it.  Return true if found.
+ */
+static bool memconsole_ebda_init(void)
+{
+	unsigned int address;
+	size_t length, cur;
+
+	address = get_bios_ebda();
+	if (!address) {
+		pr_info("memconsole: BIOS EBDA non-existent.\n");
+		return false;
+	}
+
+	/* EBDA length is byte 0 of EBDA (in KB) */
+	length = *(u8 *)phys_to_virt(address);
+	length <<= 10; /* convert to bytes */
+
+	/*
+	 * Search through EBDA for BIOS memory console structure
+	 * note: signature is not necessarily dword-aligned
+	 */
+	for (cur = 0; cur < length; cur++) {
+		struct biosmemcon_ebda *hdr = phys_to_virt(address + cur);
+
+		/* memconsole v1 */
+		if (hdr->signature == BIOS_MEMCONSOLE_V1_MAGIC) {
+			found_v1_header(hdr);
+			return true;
+		}
+
+		/* memconsole v2 */
+		if (hdr->signature == BIOS_MEMCONSOLE_V2_MAGIC) {
+			found_v2_header(hdr);
+			return true;
+		}
+	}
+
+	pr_info("memconsole: BIOS console EBDA structure not found!\n");
+	return false;
+}
+
+static const struct dmi_system_id memconsole_dmi_table[] __initconst = {
+	{
+		.ident = "Google Board",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Google, Inc."),
+		},
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(dmi, memconsole_dmi_table);
+
+static bool __init memconsole_find(void)
+{
+	if (!dmi_check_system(memconsole_dmi_table))
+		return false;
+
+	return memconsole_ebda_init();
+}
+
+static int __init memconsole_x86_init(void)
+{
+	if (!memconsole_find())
+		return -ENODEV;
+
+	return memconsole_sysfs_init();
+}
+
+static void __exit memconsole_x86_exit(void)
+{
+	memconsole_exit();
+}
+
+module_init(memconsole_x86_init);
+module_exit(memconsole_x86_exit);
+
+MODULE_AUTHOR("Google, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
new file mode 100644
index 0000000..166f07c
--- /dev/null
+++ b/drivers/firmware/google/memconsole.c
@@ -0,0 +1,60 @@
+/*
+ * memconsole.c
+ *
+ * Architecture-independent parts of the memory based BIOS console.
+ *
+ * Copyright 2017 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2.0 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/sysfs.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+
+#include "memconsole.h"
+
+static ssize_t (*memconsole_read_func)(char *, loff_t, size_t);
+
+static ssize_t memconsole_read(struct file *filp, struct kobject *kobp,
+			       struct bin_attribute *bin_attr, char *buf,
+			       loff_t pos, size_t count)
+{
+	if (WARN_ON_ONCE(!memconsole_read_func))
+		return -EIO;
+	return memconsole_read_func(buf, pos, count);
+}
+
+static struct bin_attribute memconsole_bin_attr = {
+	.attr = {.name = "log", .mode = 0444},
+	.read = memconsole_read,
+};
+
+void memconsole_setup(ssize_t (*read_func)(char *, loff_t, size_t))
+{
+	memconsole_read_func = read_func;
+}
+EXPORT_SYMBOL(memconsole_setup);
+
+int memconsole_sysfs_init(void)
+{
+	return sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
+}
+EXPORT_SYMBOL(memconsole_sysfs_init);
+
+void memconsole_exit(void)
+{
+	sysfs_remove_bin_file(firmware_kobj, &memconsole_bin_attr);
+}
+EXPORT_SYMBOL(memconsole_exit);
+
+MODULE_AUTHOR("Google, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/memconsole.h b/drivers/firmware/google/memconsole.h
new file mode 100644
index 0000000..ff1592d
--- /dev/null
+++ b/drivers/firmware/google/memconsole.h
@@ -0,0 +1,44 @@
+/*
+ * memconsole.h
+ *
+ * Internal headers of the memory based BIOS console.
+ *
+ * Copyright 2017 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2.0 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __FIRMWARE_GOOGLE_MEMCONSOLE_H
+#define __FIRMWARE_GOOGLE_MEMCONSOLE_H
+
+#include <linux/types.h>
+
+/*
+ * memconsole_setup
+ *
+ * Initialize the memory console, passing the function to handle read accesses.
+ */
+void memconsole_setup(ssize_t (*read_func)(char *, loff_t, size_t));
+
+/*
+ * memconsole_sysfs_init
+ *
+ * Update memory console length and create binary file
+ * for firmware object.
+ */
+int memconsole_sysfs_init(void);
+
+/* memconsole_exit
+ *
+ * Unmap the console buffer.
+ */
+void memconsole_exit(void);
+
+#endif /* __FIRMWARE_GOOGLE_MEMCONSOLE_H */
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
new file mode 100644
index 0000000..1aa67bb
--- /dev/null
+++ b/drivers/firmware/google/vpd.c
@@ -0,0 +1,342 @@
+/*
+ * vpd.c
+ *
+ * Driver for exporting VPD content to sysfs.
+ *
+ * Copyright 2017 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2.0 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include "coreboot_table.h"
+#include "vpd_decode.h"
+
+#define CB_TAG_VPD      0x2c
+#define VPD_CBMEM_MAGIC 0x43524f53
+
+static struct kobject *vpd_kobj;
+
+struct vpd_cbmem {
+	u32 magic;
+	u32 version;
+	u32 ro_size;
+	u32 rw_size;
+	u8  blob[0];
+};
+
+struct vpd_section {
+	bool enabled;
+	const char *name;
+	char *raw_name;                /* the string name_raw */
+	struct kobject *kobj;          /* vpd/name directory */
+	char *baseaddr;
+	struct bin_attribute bin_attr; /* vpd/name_raw bin_attribute */
+	struct list_head attribs;      /* key/value in vpd_attrib_info list */
+};
+
+struct vpd_attrib_info {
+	char *key;
+	const char *value;
+	struct bin_attribute bin_attr;
+	struct list_head list;
+};
+
+static struct vpd_section ro_vpd;
+static struct vpd_section rw_vpd;
+
+static ssize_t vpd_attrib_read(struct file *filp, struct kobject *kobp,
+			       struct bin_attribute *bin_attr, char *buf,
+			       loff_t pos, size_t count)
+{
+	struct vpd_attrib_info *info = bin_attr->private;
+
+	return memory_read_from_buffer(buf, count, &pos, info->value,
+				       info->bin_attr.size);
+}
+
+/*
+ * vpd_section_check_key_name()
+ *
+ * The VPD specification supports only [a-zA-Z0-9_]+ characters in key names but
+ * old firmware versions may have entries like "S/N" which are problematic when
+ * exporting them as sysfs attributes. These keys present in old firmwares are
+ * ignored.
+ *
+ * Returns VPD_OK for a valid key name, VPD_FAIL otherwise.
+ *
+ * @key: The key name to check
+ * @key_len: key name length
+ */
+static int vpd_section_check_key_name(const u8 *key, s32 key_len)
+{
+	int c;
+
+	while (key_len-- > 0) {
+		c = *key++;
+
+		if (!isalnum(c) && c != '_')
+			return VPD_FAIL;
+	}
+
+	return VPD_OK;
+}
+
+static int vpd_section_attrib_add(const u8 *key, s32 key_len,
+				  const u8 *value, s32 value_len,
+				  void *arg)
+{
+	int ret;
+	struct vpd_section *sec = arg;
+	struct vpd_attrib_info *info;
+
+	/*
+	 * Return VPD_OK immediately to decode next entry if the current key
+	 * name contains invalid characters.
+	 */
+	if (vpd_section_check_key_name(key, key_len) != VPD_OK)
+		return VPD_OK;
+
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	info->key = kstrndup(key, key_len, GFP_KERNEL);
+	if (!info->key) {
+		ret = -ENOMEM;
+		goto free_info;
+	}
+
+	sysfs_bin_attr_init(&info->bin_attr);
+	info->bin_attr.attr.name = info->key;
+	info->bin_attr.attr.mode = 0444;
+	info->bin_attr.size = value_len;
+	info->bin_attr.read = vpd_attrib_read;
+	info->bin_attr.private = info;
+
+	info->value = value;
+
+	INIT_LIST_HEAD(&info->list);
+
+	ret = sysfs_create_bin_file(sec->kobj, &info->bin_attr);
+	if (ret)
+		goto free_info_key;
+
+	list_add_tail(&info->list, &sec->attribs);
+	return 0;
+
+free_info_key:
+	kfree(info->key);
+free_info:
+	kfree(info);
+
+	return ret;
+}
+
+static void vpd_section_attrib_destroy(struct vpd_section *sec)
+{
+	struct vpd_attrib_info *info;
+	struct vpd_attrib_info *temp;
+
+	list_for_each_entry_safe(info, temp, &sec->attribs, list) {
+		sysfs_remove_bin_file(sec->kobj, &info->bin_attr);
+		kfree(info->key);
+		kfree(info);
+	}
+}
+
+static ssize_t vpd_section_read(struct file *filp, struct kobject *kobp,
+				struct bin_attribute *bin_attr, char *buf,
+				loff_t pos, size_t count)
+{
+	struct vpd_section *sec = bin_attr->private;
+
+	return memory_read_from_buffer(buf, count, &pos, sec->baseaddr,
+				       sec->bin_attr.size);
+}
+
+static int vpd_section_create_attribs(struct vpd_section *sec)
+{
+	s32 consumed;
+	int ret;
+
+	consumed = 0;
+	do {
+		ret = vpd_decode_string(sec->bin_attr.size, sec->baseaddr,
+					&consumed, vpd_section_attrib_add, sec);
+	} while (ret == VPD_OK);
+
+	return 0;
+}
+
+static int vpd_section_init(const char *name, struct vpd_section *sec,
+			    phys_addr_t physaddr, size_t size)
+{
+	int err;
+
+	sec->baseaddr = memremap(physaddr, size, MEMREMAP_WB);
+	if (!sec->baseaddr)
+		return -ENOMEM;
+
+	sec->name = name;
+
+	/* We want to export the raw partion with name ${name}_raw */
+	sec->raw_name = kasprintf(GFP_KERNEL, "%s_raw", name);
+	if (!sec->raw_name) {
+		err = -ENOMEM;
+		goto err_memunmap;
+	}
+
+	sysfs_bin_attr_init(&sec->bin_attr);
+	sec->bin_attr.attr.name = sec->raw_name;
+	sec->bin_attr.attr.mode = 0444;
+	sec->bin_attr.size = size;
+	sec->bin_attr.read = vpd_section_read;
+	sec->bin_attr.private = sec;
+
+	err = sysfs_create_bin_file(vpd_kobj, &sec->bin_attr);
+	if (err)
+		goto err_free_raw_name;
+
+	sec->kobj = kobject_create_and_add(name, vpd_kobj);
+	if (!sec->kobj) {
+		err = -EINVAL;
+		goto err_sysfs_remove;
+	}
+
+	INIT_LIST_HEAD(&sec->attribs);
+	vpd_section_create_attribs(sec);
+
+	sec->enabled = true;
+
+	return 0;
+
+err_sysfs_remove:
+	sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
+err_free_raw_name:
+	kfree(sec->raw_name);
+err_memunmap:
+	memunmap(sec->baseaddr);
+	return err;
+}
+
+static int vpd_section_destroy(struct vpd_section *sec)
+{
+	if (sec->enabled) {
+		vpd_section_attrib_destroy(sec);
+		kobject_put(sec->kobj);
+		sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
+		kfree(sec->raw_name);
+		memunmap(sec->baseaddr);
+		sec->enabled = false;
+	}
+
+	return 0;
+}
+
+static int vpd_sections_init(phys_addr_t physaddr)
+{
+	struct vpd_cbmem __iomem *temp;
+	struct vpd_cbmem header;
+	int ret = 0;
+
+	temp = memremap(physaddr, sizeof(struct vpd_cbmem), MEMREMAP_WB);
+	if (!temp)
+		return -ENOMEM;
+
+	memcpy_fromio(&header, temp, sizeof(struct vpd_cbmem));
+	memunmap(temp);
+
+	if (header.magic != VPD_CBMEM_MAGIC)
+		return -ENODEV;
+
+	if (header.ro_size) {
+		ret = vpd_section_init("ro", &ro_vpd,
+				       physaddr + sizeof(struct vpd_cbmem),
+				       header.ro_size);
+		if (ret)
+			return ret;
+	}
+
+	if (header.rw_size) {
+		ret = vpd_section_init("rw", &rw_vpd,
+				       physaddr + sizeof(struct vpd_cbmem) +
+				       header.ro_size, header.rw_size);
+		if (ret) {
+			vpd_section_destroy(&ro_vpd);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int vpd_probe(struct coreboot_device *dev)
+{
+	int ret;
+
+	vpd_kobj = kobject_create_and_add("vpd", firmware_kobj);
+	if (!vpd_kobj)
+		return -ENOMEM;
+
+	ret = vpd_sections_init(dev->cbmem_ref.cbmem_addr);
+	if (ret) {
+		kobject_put(vpd_kobj);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int vpd_remove(struct coreboot_device *dev)
+{
+	vpd_section_destroy(&ro_vpd);
+	vpd_section_destroy(&rw_vpd);
+
+	kobject_put(vpd_kobj);
+
+	return 0;
+}
+
+static struct coreboot_driver vpd_driver = {
+	.probe = vpd_probe,
+	.remove = vpd_remove,
+	.drv = {
+		.name = "vpd",
+	},
+	.tag = CB_TAG_VPD,
+};
+
+static int __init coreboot_vpd_init(void)
+{
+	return coreboot_driver_register(&vpd_driver);
+}
+
+static void __exit coreboot_vpd_exit(void)
+{
+	coreboot_driver_unregister(&vpd_driver);
+}
+
+module_init(coreboot_vpd_init);
+module_exit(coreboot_vpd_exit);
+
+MODULE_AUTHOR("Google, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/vpd_decode.c b/drivers/firmware/google/vpd_decode.c
new file mode 100644
index 0000000..943acaa
--- /dev/null
+++ b/drivers/firmware/google/vpd_decode.c
@@ -0,0 +1,99 @@
+/*
+ * vpd_decode.c
+ *
+ * Google VPD decoding routines.
+ *
+ * Copyright 2017 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2.0 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/export.h>
+
+#include "vpd_decode.h"
+
+static int vpd_decode_len(const s32 max_len, const u8 *in,
+			  s32 *length, s32 *decoded_len)
+{
+	u8 more;
+	int i = 0;
+
+	if (!length || !decoded_len)
+		return VPD_FAIL;
+
+	*length = 0;
+	do {
+		if (i >= max_len)
+			return VPD_FAIL;
+
+		more = in[i] & 0x80;
+		*length <<= 7;
+		*length |= in[i] & 0x7f;
+		++i;
+	} while (more);
+
+	*decoded_len = i;
+
+	return VPD_OK;
+}
+
+int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed,
+		      vpd_decode_callback callback, void *callback_arg)
+{
+	int type;
+	int res;
+	s32 key_len;
+	s32 value_len;
+	s32 decoded_len;
+	const u8 *key;
+	const u8 *value;
+
+	/* type */
+	if (*consumed >= max_len)
+		return VPD_FAIL;
+
+	type = input_buf[*consumed];
+
+	switch (type) {
+	case VPD_TYPE_INFO:
+	case VPD_TYPE_STRING:
+		(*consumed)++;
+
+		/* key */
+		res = vpd_decode_len(max_len - *consumed, &input_buf[*consumed],
+				     &key_len, &decoded_len);
+		if (res != VPD_OK || *consumed + decoded_len >= max_len)
+			return VPD_FAIL;
+
+		*consumed += decoded_len;
+		key = &input_buf[*consumed];
+		*consumed += key_len;
+
+		/* value */
+		res = vpd_decode_len(max_len - *consumed, &input_buf[*consumed],
+				     &value_len, &decoded_len);
+		if (res != VPD_OK || *consumed + decoded_len > max_len)
+			return VPD_FAIL;
+
+		*consumed += decoded_len;
+		value = &input_buf[*consumed];
+		*consumed += value_len;
+
+		if (type == VPD_TYPE_STRING)
+			return callback(key, key_len, value, value_len,
+					callback_arg);
+		break;
+
+	default:
+		return VPD_FAIL;
+	}
+
+	return VPD_OK;
+}
diff --git a/drivers/firmware/google/vpd_decode.h b/drivers/firmware/google/vpd_decode.h
new file mode 100644
index 0000000..be3d62c
--- /dev/null
+++ b/drivers/firmware/google/vpd_decode.h
@@ -0,0 +1,58 @@
+/*
+ * vpd_decode.h
+ *
+ * Google VPD decoding routines.
+ *
+ * Copyright 2017 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2.0 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __VPD_DECODE_H
+#define __VPD_DECODE_H
+
+#include <linux/types.h>
+
+enum {
+	VPD_OK = 0,
+	VPD_FAIL,
+};
+
+enum {
+	VPD_TYPE_TERMINATOR = 0,
+	VPD_TYPE_STRING,
+	VPD_TYPE_INFO                = 0xfe,
+	VPD_TYPE_IMPLICIT_TERMINATOR = 0xff,
+};
+
+/* Callback for vpd_decode_string to invoke. */
+typedef int vpd_decode_callback(const u8 *key, s32 key_len,
+				const u8 *value, s32 value_len,
+				void *arg);
+
+/*
+ * vpd_decode_string
+ *
+ * Given the encoded string, this function invokes callback with extracted
+ * (key, value). The *consumed will be plused the number of bytes consumed in
+ * this function.
+ *
+ * The input_buf points to the first byte of the input buffer.
+ *
+ * The *consumed starts from 0, which is actually the next byte to be decoded.
+ * It can be non-zero to be used in multiple calls.
+ *
+ * If one entry is successfully decoded, sends it to callback and returns the
+ * result.
+ */
+int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed,
+		      vpd_decode_callback callback, void *callback_arg);
+
+#endif  /* __VPD_DECODE_H */
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
new file mode 100644
index 0000000..6bc8e66
--- /dev/null
+++ b/drivers/firmware/iscsi_ibft.c
@@ -0,0 +1,897 @@
+/*
+ *  Copyright 2007-2010 Red Hat, Inc.
+ *  by Peter Jones <pjones@redhat.com>
+ *  Copyright 2008 IBM, Inc.
+ *  by Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ *  Copyright 2008
+ *  by Konrad Rzeszutek <ketuzsezr@darnok.org>
+ *
+ * This code exposes the iSCSI Boot Format Table to userland via sysfs.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2.0 as published by
+ * the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Changelog:
+ *
+ *  06 Jan 2010 - Peter Jones <pjones@redhat.com>
+ *    New changelog entries are in the git log from now on.  Not here.
+ *
+ *  14 Mar 2008 - Konrad Rzeszutek <ketuzsezr@darnok.org>
+ *    Updated comments and copyrights. (v0.4.9)
+ *
+ *  11 Feb 2008 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ *    Converted to using ibft_addr. (v0.4.8)
+ *
+ *   8 Feb 2008 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ *    Combined two functions in one: reserve_ibft_region. (v0.4.7)
+ *
+ *  30 Jan 2008 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ *   Added logic to handle IPv6 addresses. (v0.4.6)
+ *
+ *  25 Jan 2008 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ *   Added logic to handle badly not-to-spec iBFT. (v0.4.5)
+ *
+ *   4 Jan 2008 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ *   Added __init to function declarations. (v0.4.4)
+ *
+ *  21 Dec 2007 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ *   Updated kobject registration, combined unregister functions in one
+ *   and code and style cleanup. (v0.4.3)
+ *
+ *   5 Dec 2007 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ *   Added end-markers to enums and re-organized kobject registration. (v0.4.2)
+ *
+ *   4 Dec 2007 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ *   Created 'device' sysfs link to the NIC and style cleanup. (v0.4.1)
+ *
+ *  28 Nov 2007 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ *   Added sysfs-ibft documentation, moved 'find_ibft' function to
+ *   in its own file and added text attributes for every struct field.  (v0.4)
+ *
+ *  21 Nov 2007 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ *   Added text attributes emulating OpenFirmware /proc/device-tree naming.
+ *   Removed binary /sysfs interface (v0.3)
+ *
+ *  29 Aug 2007 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ *   Added functionality in setup.c to reserve iBFT region. (v0.2)
+ *
+ *  27 Aug 2007 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ *   First version exposing iBFT data via a binary /sysfs. (v0.1)
+ *
+ */
+
+
+#include <linux/blkdev.h>
+#include <linux/capability.h>
+#include <linux/ctype.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/iscsi_ibft.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/acpi.h>
+#include <linux/iscsi_boot_sysfs.h>
+
+#define IBFT_ISCSI_VERSION "0.5.0"
+#define IBFT_ISCSI_DATE "2010-Feb-25"
+
+MODULE_AUTHOR("Peter Jones <pjones@redhat.com> and "
+	      "Konrad Rzeszutek <ketuzsezr@darnok.org>");
+MODULE_DESCRIPTION("sysfs interface to BIOS iBFT information");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(IBFT_ISCSI_VERSION);
+
+struct ibft_hdr {
+	u8 id;
+	u8 version;
+	u16 length;
+	u8 index;
+	u8 flags;
+} __attribute__((__packed__));
+
+struct ibft_control {
+	struct ibft_hdr hdr;
+	u16 extensions;
+	u16 initiator_off;
+	u16 nic0_off;
+	u16 tgt0_off;
+	u16 nic1_off;
+	u16 tgt1_off;
+} __attribute__((__packed__));
+
+struct ibft_initiator {
+	struct ibft_hdr hdr;
+	char isns_server[16];
+	char slp_server[16];
+	char pri_radius_server[16];
+	char sec_radius_server[16];
+	u16 initiator_name_len;
+	u16 initiator_name_off;
+} __attribute__((__packed__));
+
+struct ibft_nic {
+	struct ibft_hdr hdr;
+	char ip_addr[16];
+	u8 subnet_mask_prefix;
+	u8 origin;
+	char gateway[16];
+	char primary_dns[16];
+	char secondary_dns[16];
+	char dhcp[16];
+	u16 vlan;
+	char mac[6];
+	u16 pci_bdf;
+	u16 hostname_len;
+	u16 hostname_off;
+} __attribute__((__packed__));
+
+struct ibft_tgt {
+	struct ibft_hdr hdr;
+	char ip_addr[16];
+	u16 port;
+	char lun[8];
+	u8 chap_type;
+	u8 nic_assoc;
+	u16 tgt_name_len;
+	u16 tgt_name_off;
+	u16 chap_name_len;
+	u16 chap_name_off;
+	u16 chap_secret_len;
+	u16 chap_secret_off;
+	u16 rev_chap_name_len;
+	u16 rev_chap_name_off;
+	u16 rev_chap_secret_len;
+	u16 rev_chap_secret_off;
+} __attribute__((__packed__));
+
+/*
+ * The kobject different types and its names.
+ *
+*/
+enum ibft_id {
+	id_reserved = 0, /* We don't support. */
+	id_control = 1, /* Should show up only once and is not exported. */
+	id_initiator = 2,
+	id_nic = 3,
+	id_target = 4,
+	id_extensions = 5, /* We don't support. */
+	id_end_marker,
+};
+
+/*
+ * The kobject and attribute structures.
+ */
+
+struct ibft_kobject {
+	struct acpi_table_ibft *header;
+	union {
+		struct ibft_initiator *initiator;
+		struct ibft_nic *nic;
+		struct ibft_tgt *tgt;
+		struct ibft_hdr *hdr;
+	};
+};
+
+static struct iscsi_boot_kset *boot_kset;
+
+/* fully null address */
+static const char nulls[16];
+
+/* IPv4-mapped IPv6 ::ffff:0.0.0.0 */
+static const char mapped_nulls[16] = { 0x00, 0x00, 0x00, 0x00,
+                                       0x00, 0x00, 0x00, 0x00,
+                                       0x00, 0x00, 0xff, 0xff,
+                                       0x00, 0x00, 0x00, 0x00 };
+
+static int address_not_null(u8 *ip)
+{
+	return (memcmp(ip, nulls, 16) && memcmp(ip, mapped_nulls, 16));
+}
+
+/*
+ * Helper functions to parse data properly.
+ */
+static ssize_t sprintf_ipaddr(char *buf, u8 *ip)
+{
+	char *str = buf;
+
+	if (ip[0] == 0 && ip[1] == 0 && ip[2] == 0 && ip[3] == 0 &&
+	    ip[4] == 0 && ip[5] == 0 && ip[6] == 0 && ip[7] == 0 &&
+	    ip[8] == 0 && ip[9] == 0 && ip[10] == 0xff && ip[11] == 0xff) {
+		/*
+		 * IPV4
+		 */
+		str += sprintf(buf, "%pI4", ip + 12);
+	} else {
+		/*
+		 * IPv6
+		 */
+		str += sprintf(str, "%pI6", ip);
+	}
+	str += sprintf(str, "\n");
+	return str - buf;
+}
+
+static ssize_t sprintf_string(char *str, int len, char *buf)
+{
+	return sprintf(str, "%.*s\n", len, buf);
+}
+
+/*
+ * Helper function to verify the IBFT header.
+ */
+static int ibft_verify_hdr(char *t, struct ibft_hdr *hdr, int id, int length)
+{
+	if (hdr->id != id) {
+		printk(KERN_ERR "iBFT error: We expected the %s " \
+				"field header.id to have %d but " \
+				"found %d instead!\n", t, id, hdr->id);
+		return -ENODEV;
+	}
+	if (hdr->length != length) {
+		printk(KERN_ERR "iBFT error: We expected the %s " \
+				"field header.length to have %d but " \
+				"found %d instead!\n", t, length, hdr->length);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+/*
+ *  Routines for parsing the iBFT data to be human readable.
+ */
+static ssize_t ibft_attr_show_initiator(void *data, int type, char *buf)
+{
+	struct ibft_kobject *entry = data;
+	struct ibft_initiator *initiator = entry->initiator;
+	void *ibft_loc = entry->header;
+	char *str = buf;
+
+	if (!initiator)
+		return 0;
+
+	switch (type) {
+	case ISCSI_BOOT_INI_INDEX:
+		str += sprintf(str, "%d\n", initiator->hdr.index);
+		break;
+	case ISCSI_BOOT_INI_FLAGS:
+		str += sprintf(str, "%d\n", initiator->hdr.flags);
+		break;
+	case ISCSI_BOOT_INI_ISNS_SERVER:
+		str += sprintf_ipaddr(str, initiator->isns_server);
+		break;
+	case ISCSI_BOOT_INI_SLP_SERVER:
+		str += sprintf_ipaddr(str, initiator->slp_server);
+		break;
+	case ISCSI_BOOT_INI_PRI_RADIUS_SERVER:
+		str += sprintf_ipaddr(str, initiator->pri_radius_server);
+		break;
+	case ISCSI_BOOT_INI_SEC_RADIUS_SERVER:
+		str += sprintf_ipaddr(str, initiator->sec_radius_server);
+		break;
+	case ISCSI_BOOT_INI_INITIATOR_NAME:
+		str += sprintf_string(str, initiator->initiator_name_len,
+				      (char *)ibft_loc +
+				      initiator->initiator_name_off);
+		break;
+	default:
+		break;
+	}
+
+	return str - buf;
+}
+
+static ssize_t ibft_attr_show_nic(void *data, int type, char *buf)
+{
+	struct ibft_kobject *entry = data;
+	struct ibft_nic *nic = entry->nic;
+	void *ibft_loc = entry->header;
+	char *str = buf;
+	__be32 val;
+
+	if (!nic)
+		return 0;
+
+	switch (type) {
+	case ISCSI_BOOT_ETH_INDEX:
+		str += sprintf(str, "%d\n", nic->hdr.index);
+		break;
+	case ISCSI_BOOT_ETH_FLAGS:
+		str += sprintf(str, "%d\n", nic->hdr.flags);
+		break;
+	case ISCSI_BOOT_ETH_IP_ADDR:
+		str += sprintf_ipaddr(str, nic->ip_addr);
+		break;
+	case ISCSI_BOOT_ETH_SUBNET_MASK:
+		val = cpu_to_be32(~((1 << (32-nic->subnet_mask_prefix))-1));
+		str += sprintf(str, "%pI4", &val);
+		break;
+	case ISCSI_BOOT_ETH_PREFIX_LEN:
+		str += sprintf(str, "%d\n", nic->subnet_mask_prefix);
+		break;
+	case ISCSI_BOOT_ETH_ORIGIN:
+		str += sprintf(str, "%d\n", nic->origin);
+		break;
+	case ISCSI_BOOT_ETH_GATEWAY:
+		str += sprintf_ipaddr(str, nic->gateway);
+		break;
+	case ISCSI_BOOT_ETH_PRIMARY_DNS:
+		str += sprintf_ipaddr(str, nic->primary_dns);
+		break;
+	case ISCSI_BOOT_ETH_SECONDARY_DNS:
+		str += sprintf_ipaddr(str, nic->secondary_dns);
+		break;
+	case ISCSI_BOOT_ETH_DHCP:
+		str += sprintf_ipaddr(str, nic->dhcp);
+		break;
+	case ISCSI_BOOT_ETH_VLAN:
+		str += sprintf(str, "%d\n", nic->vlan);
+		break;
+	case ISCSI_BOOT_ETH_MAC:
+		str += sprintf(str, "%pM\n", nic->mac);
+		break;
+	case ISCSI_BOOT_ETH_HOSTNAME:
+		str += sprintf_string(str, nic->hostname_len,
+				      (char *)ibft_loc + nic->hostname_off);
+		break;
+	default:
+		break;
+	}
+
+	return str - buf;
+};
+
+static ssize_t ibft_attr_show_target(void *data, int type, char *buf)
+{
+	struct ibft_kobject *entry = data;
+	struct ibft_tgt *tgt = entry->tgt;
+	void *ibft_loc = entry->header;
+	char *str = buf;
+	int i;
+
+	if (!tgt)
+		return 0;
+
+	switch (type) {
+	case ISCSI_BOOT_TGT_INDEX:
+		str += sprintf(str, "%d\n", tgt->hdr.index);
+		break;
+	case ISCSI_BOOT_TGT_FLAGS:
+		str += sprintf(str, "%d\n", tgt->hdr.flags);
+		break;
+	case ISCSI_BOOT_TGT_IP_ADDR:
+		str += sprintf_ipaddr(str, tgt->ip_addr);
+		break;
+	case ISCSI_BOOT_TGT_PORT:
+		str += sprintf(str, "%d\n", tgt->port);
+		break;
+	case ISCSI_BOOT_TGT_LUN:
+		for (i = 0; i < 8; i++)
+			str += sprintf(str, "%x", (u8)tgt->lun[i]);
+		str += sprintf(str, "\n");
+		break;
+	case ISCSI_BOOT_TGT_NIC_ASSOC:
+		str += sprintf(str, "%d\n", tgt->nic_assoc);
+		break;
+	case ISCSI_BOOT_TGT_CHAP_TYPE:
+		str += sprintf(str, "%d\n", tgt->chap_type);
+		break;
+	case ISCSI_BOOT_TGT_NAME:
+		str += sprintf_string(str, tgt->tgt_name_len,
+				      (char *)ibft_loc + tgt->tgt_name_off);
+		break;
+	case ISCSI_BOOT_TGT_CHAP_NAME:
+		str += sprintf_string(str, tgt->chap_name_len,
+				      (char *)ibft_loc + tgt->chap_name_off);
+		break;
+	case ISCSI_BOOT_TGT_CHAP_SECRET:
+		str += sprintf_string(str, tgt->chap_secret_len,
+				      (char *)ibft_loc + tgt->chap_secret_off);
+		break;
+	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
+		str += sprintf_string(str, tgt->rev_chap_name_len,
+				      (char *)ibft_loc +
+				      tgt->rev_chap_name_off);
+		break;
+	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
+		str += sprintf_string(str, tgt->rev_chap_secret_len,
+				      (char *)ibft_loc +
+				      tgt->rev_chap_secret_off);
+		break;
+	default:
+		break;
+	}
+
+	return str - buf;
+}
+
+static ssize_t ibft_attr_show_acpitbl(void *data, int type, char *buf)
+{
+	struct ibft_kobject *entry = data;
+	char *str = buf;
+
+	switch (type) {
+	case ISCSI_BOOT_ACPITBL_SIGNATURE:
+		str += sprintf_string(str, ACPI_NAME_SIZE,
+				      entry->header->header.signature);
+		break;
+	case ISCSI_BOOT_ACPITBL_OEM_ID:
+		str += sprintf_string(str, ACPI_OEM_ID_SIZE,
+				      entry->header->header.oem_id);
+		break;
+	case ISCSI_BOOT_ACPITBL_OEM_TABLE_ID:
+		str += sprintf_string(str, ACPI_OEM_TABLE_ID_SIZE,
+				      entry->header->header.oem_table_id);
+		break;
+	default:
+		break;
+	}
+
+	return str - buf;
+}
+
+static int __init ibft_check_device(void)
+{
+	int len;
+	u8 *pos;
+	u8 csum = 0;
+
+	len = ibft_addr->header.length;
+
+	/* Sanity checking of iBFT. */
+	if (ibft_addr->header.revision != 1) {
+		printk(KERN_ERR "iBFT module supports only revision 1, " \
+				"while this is %d.\n",
+				ibft_addr->header.revision);
+		return -ENOENT;
+	}
+	for (pos = (u8 *)ibft_addr; pos < (u8 *)ibft_addr + len; pos++)
+		csum += *pos;
+
+	if (csum) {
+		printk(KERN_ERR "iBFT has incorrect checksum (0x%x)!\n", csum);
+		return -ENOENT;
+	}
+
+	return 0;
+}
+
+/*
+ * Helper routiners to check to determine if the entry is valid
+ * in the proper iBFT structure.
+ */
+static umode_t ibft_check_nic_for(void *data, int type)
+{
+	struct ibft_kobject *entry = data;
+	struct ibft_nic *nic = entry->nic;
+	umode_t rc = 0;
+
+	switch (type) {
+	case ISCSI_BOOT_ETH_INDEX:
+	case ISCSI_BOOT_ETH_FLAGS:
+		rc = S_IRUGO;
+		break;
+	case ISCSI_BOOT_ETH_IP_ADDR:
+		if (address_not_null(nic->ip_addr))
+			rc = S_IRUGO;
+		break;
+	case ISCSI_BOOT_ETH_PREFIX_LEN:
+	case ISCSI_BOOT_ETH_SUBNET_MASK:
+		if (nic->subnet_mask_prefix)
+			rc = S_IRUGO;
+		break;
+	case ISCSI_BOOT_ETH_ORIGIN:
+		rc = S_IRUGO;
+		break;
+	case ISCSI_BOOT_ETH_GATEWAY:
+		if (address_not_null(nic->gateway))
+			rc = S_IRUGO;
+		break;
+	case ISCSI_BOOT_ETH_PRIMARY_DNS:
+		if (address_not_null(nic->primary_dns))
+			rc = S_IRUGO;
+		break;
+	case ISCSI_BOOT_ETH_SECONDARY_DNS:
+		if (address_not_null(nic->secondary_dns))
+			rc = S_IRUGO;
+		break;
+	case ISCSI_BOOT_ETH_DHCP:
+		if (address_not_null(nic->dhcp))
+			rc = S_IRUGO;
+		break;
+	case ISCSI_BOOT_ETH_VLAN:
+	case ISCSI_BOOT_ETH_MAC:
+		rc = S_IRUGO;
+		break;
+	case ISCSI_BOOT_ETH_HOSTNAME:
+		if (nic->hostname_off)
+			rc = S_IRUGO;
+		break;
+	default:
+		break;
+	}
+
+	return rc;
+}
+
+static umode_t __init ibft_check_tgt_for(void *data, int type)
+{
+	struct ibft_kobject *entry = data;
+	struct ibft_tgt *tgt = entry->tgt;
+	umode_t rc = 0;
+
+	switch (type) {
+	case ISCSI_BOOT_TGT_INDEX:
+	case ISCSI_BOOT_TGT_FLAGS:
+	case ISCSI_BOOT_TGT_IP_ADDR:
+	case ISCSI_BOOT_TGT_PORT:
+	case ISCSI_BOOT_TGT_LUN:
+	case ISCSI_BOOT_TGT_NIC_ASSOC:
+	case ISCSI_BOOT_TGT_CHAP_TYPE:
+		rc = S_IRUGO;
+	case ISCSI_BOOT_TGT_NAME:
+		if (tgt->tgt_name_len)
+			rc = S_IRUGO;
+		break;
+	case ISCSI_BOOT_TGT_CHAP_NAME:
+	case ISCSI_BOOT_TGT_CHAP_SECRET:
+		if (tgt->chap_name_len)
+			rc = S_IRUGO;
+		break;
+	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
+	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
+		if (tgt->rev_chap_name_len)
+			rc = S_IRUGO;
+		break;
+	default:
+		break;
+	}
+
+	return rc;
+}
+
+static umode_t __init ibft_check_initiator_for(void *data, int type)
+{
+	struct ibft_kobject *entry = data;
+	struct ibft_initiator *init = entry->initiator;
+	umode_t rc = 0;
+
+	switch (type) {
+	case ISCSI_BOOT_INI_INDEX:
+	case ISCSI_BOOT_INI_FLAGS:
+		rc = S_IRUGO;
+		break;
+	case ISCSI_BOOT_INI_ISNS_SERVER:
+		if (address_not_null(init->isns_server))
+			rc = S_IRUGO;
+		break;
+	case ISCSI_BOOT_INI_SLP_SERVER:
+		if (address_not_null(init->slp_server))
+			rc = S_IRUGO;
+		break;
+	case ISCSI_BOOT_INI_PRI_RADIUS_SERVER:
+		if (address_not_null(init->pri_radius_server))
+			rc = S_IRUGO;
+		break;
+	case ISCSI_BOOT_INI_SEC_RADIUS_SERVER:
+		if (address_not_null(init->sec_radius_server))
+			rc = S_IRUGO;
+		break;
+	case ISCSI_BOOT_INI_INITIATOR_NAME:
+		if (init->initiator_name_len)
+			rc = S_IRUGO;
+		break;
+	default:
+		break;
+	}
+
+	return rc;
+}
+
+static umode_t __init ibft_check_acpitbl_for(void *data, int type)
+{
+
+	umode_t rc = 0;
+
+	switch (type) {
+	case ISCSI_BOOT_ACPITBL_SIGNATURE:
+	case ISCSI_BOOT_ACPITBL_OEM_ID:
+	case ISCSI_BOOT_ACPITBL_OEM_TABLE_ID:
+		rc = S_IRUGO;
+		break;
+	default:
+		break;
+	}
+
+	return rc;
+}
+
+static void ibft_kobj_release(void *data)
+{
+	kfree(data);
+}
+
+/*
+ * Helper function for ibft_register_kobjects.
+ */
+static int __init ibft_create_kobject(struct acpi_table_ibft *header,
+				      struct ibft_hdr *hdr)
+{
+	struct iscsi_boot_kobj *boot_kobj = NULL;
+	struct ibft_kobject *ibft_kobj = NULL;
+	struct ibft_nic *nic = (struct ibft_nic *)hdr;
+	struct pci_dev *pci_dev;
+	int rc = 0;
+
+	ibft_kobj = kzalloc(sizeof(*ibft_kobj), GFP_KERNEL);
+	if (!ibft_kobj)
+		return -ENOMEM;
+
+	ibft_kobj->header = header;
+	ibft_kobj->hdr = hdr;
+
+	switch (hdr->id) {
+	case id_initiator:
+		rc = ibft_verify_hdr("initiator", hdr, id_initiator,
+				     sizeof(*ibft_kobj->initiator));
+		if (rc)
+			break;
+
+		boot_kobj = iscsi_boot_create_initiator(boot_kset, hdr->index,
+						ibft_kobj,
+						ibft_attr_show_initiator,
+						ibft_check_initiator_for,
+						ibft_kobj_release);
+		if (!boot_kobj) {
+			rc = -ENOMEM;
+			goto free_ibft_obj;
+		}
+		break;
+	case id_nic:
+		rc = ibft_verify_hdr("ethernet", hdr, id_nic,
+				     sizeof(*ibft_kobj->nic));
+		if (rc)
+			break;
+
+		boot_kobj = iscsi_boot_create_ethernet(boot_kset, hdr->index,
+						       ibft_kobj,
+						       ibft_attr_show_nic,
+						       ibft_check_nic_for,
+						       ibft_kobj_release);
+		if (!boot_kobj) {
+			rc = -ENOMEM;
+			goto free_ibft_obj;
+		}
+		break;
+	case id_target:
+		rc = ibft_verify_hdr("target", hdr, id_target,
+				     sizeof(*ibft_kobj->tgt));
+		if (rc)
+			break;
+
+		boot_kobj = iscsi_boot_create_target(boot_kset, hdr->index,
+						     ibft_kobj,
+						     ibft_attr_show_target,
+						     ibft_check_tgt_for,
+						     ibft_kobj_release);
+		if (!boot_kobj) {
+			rc = -ENOMEM;
+			goto free_ibft_obj;
+		}
+		break;
+	case id_reserved:
+	case id_control:
+	case id_extensions:
+		/* Fields which we don't support. Ignore them */
+		rc = 1;
+		break;
+	default:
+		printk(KERN_ERR "iBFT has unknown structure type (%d). " \
+				"Report this bug to %.6s!\n", hdr->id,
+				header->header.oem_id);
+		rc = 1;
+		break;
+	}
+
+	if (rc) {
+		/* Skip adding this kobject, but exit with non-fatal error. */
+		rc = 0;
+		goto free_ibft_obj;
+	}
+
+	if (hdr->id == id_nic) {
+		/*
+		* We don't search for the device in other domains than
+		* zero. This is because on x86 platforms the BIOS
+		* executes only devices which are in domain 0. Furthermore, the
+		* iBFT spec doesn't have a domain id field :-(
+		*/
+		pci_dev = pci_get_domain_bus_and_slot(0,
+						(nic->pci_bdf & 0xff00) >> 8,
+						(nic->pci_bdf & 0xff));
+		if (pci_dev) {
+			rc = sysfs_create_link(&boot_kobj->kobj,
+					       &pci_dev->dev.kobj, "device");
+			pci_dev_put(pci_dev);
+		}
+	}
+	return 0;
+
+free_ibft_obj:
+	kfree(ibft_kobj);
+	return rc;
+}
+
+/*
+ * Scan the IBFT table structure for the NIC and Target fields. When
+ * found add them on the passed-in list. We do not support the other
+ * fields at this point, so they are skipped.
+ */
+static int __init ibft_register_kobjects(struct acpi_table_ibft *header)
+{
+	struct ibft_control *control = NULL;
+	struct iscsi_boot_kobj *boot_kobj;
+	struct ibft_kobject *ibft_kobj;
+	void *ptr, *end;
+	int rc = 0;
+	u16 offset;
+	u16 eot_offset;
+
+	control = (void *)header + sizeof(*header);
+	end = (void *)control + control->hdr.length;
+	eot_offset = (void *)header + header->header.length - (void *)control;
+	rc = ibft_verify_hdr("control", (struct ibft_hdr *)control, id_control,
+			     sizeof(*control));
+
+	/* iBFT table safety checking */
+	rc |= ((control->hdr.index) ? -ENODEV : 0);
+	if (rc) {
+		printk(KERN_ERR "iBFT error: Control header is invalid!\n");
+		return rc;
+	}
+	for (ptr = &control->initiator_off; ptr < end; ptr += sizeof(u16)) {
+		offset = *(u16 *)ptr;
+		if (offset && offset < header->header.length &&
+						offset < eot_offset) {
+			rc = ibft_create_kobject(header,
+						 (void *)header + offset);
+			if (rc)
+				break;
+		}
+	}
+	if (rc)
+		return rc;
+
+	ibft_kobj = kzalloc(sizeof(*ibft_kobj), GFP_KERNEL);
+	if (!ibft_kobj)
+		return -ENOMEM;
+
+	ibft_kobj->header = header;
+	ibft_kobj->hdr = NULL; /*for ibft_unregister*/
+
+	boot_kobj = iscsi_boot_create_acpitbl(boot_kset, 0,
+					ibft_kobj,
+					ibft_attr_show_acpitbl,
+					ibft_check_acpitbl_for,
+					ibft_kobj_release);
+	if (!boot_kobj)  {
+		kfree(ibft_kobj);
+		rc = -ENOMEM;
+	}
+
+	return rc;
+}
+
+static void ibft_unregister(void)
+{
+	struct iscsi_boot_kobj *boot_kobj, *tmp_kobj;
+	struct ibft_kobject *ibft_kobj;
+
+	list_for_each_entry_safe(boot_kobj, tmp_kobj,
+				 &boot_kset->kobj_list, list) {
+		ibft_kobj = boot_kobj->data;
+		if (ibft_kobj->hdr && ibft_kobj->hdr->id == id_nic)
+			sysfs_remove_link(&boot_kobj->kobj, "device");
+	};
+}
+
+static void ibft_cleanup(void)
+{
+	if (boot_kset) {
+		ibft_unregister();
+		iscsi_boot_destroy_kset(boot_kset);
+	}
+}
+
+static void __exit ibft_exit(void)
+{
+	ibft_cleanup();
+}
+
+#ifdef CONFIG_ACPI
+static const struct {
+	char *sign;
+} ibft_signs[] = {
+	/*
+	 * One spec says "IBFT", the other says "iBFT". We have to check
+	 * for both.
+	 */
+	{ ACPI_SIG_IBFT },
+	{ "iBFT" },
+	{ "BIFT" },	/* Broadcom iSCSI Offload */
+};
+
+static void __init acpi_find_ibft_region(void)
+{
+	int i;
+	struct acpi_table_header *table = NULL;
+
+	if (acpi_disabled)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++) {
+		acpi_get_table(ibft_signs[i].sign, 0, &table);
+		ibft_addr = (struct acpi_table_ibft *)table;
+	}
+}
+#else
+static void __init acpi_find_ibft_region(void)
+{
+}
+#endif
+
+/*
+ * ibft_init() - creates sysfs tree entries for the iBFT data.
+ */
+static int __init ibft_init(void)
+{
+	int rc = 0;
+
+	/*
+	   As on UEFI systems the setup_arch()/find_ibft_region()
+	   is called before ACPI tables are parsed and it only does
+	   legacy finding.
+	*/
+	if (!ibft_addr)
+		acpi_find_ibft_region();
+
+	if (ibft_addr) {
+		pr_info("iBFT detected.\n");
+
+		rc = ibft_check_device();
+		if (rc)
+			return rc;
+
+		boot_kset = iscsi_boot_create_kset("ibft");
+		if (!boot_kset)
+			return -ENOMEM;
+
+		/* Scan the IBFT for data and register the kobjects. */
+		rc = ibft_register_kobjects(ibft_addr);
+		if (rc)
+			goto out_free;
+	} else
+		printk(KERN_INFO "No iBFT detected.\n");
+
+	return 0;
+
+out_free:
+	ibft_cleanup();
+	return rc;
+}
+
+module_init(ibft_init);
+module_exit(ibft_exit);
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c
new file mode 100644
index 0000000..2224f1d
--- /dev/null
+++ b/drivers/firmware/iscsi_ibft_find.c
@@ -0,0 +1,112 @@
+/*
+ *  Copyright 2007-2010 Red Hat, Inc.
+ *  by Peter Jones <pjones@redhat.com>
+ *  Copyright 2007 IBM, Inc.
+ *  by Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ *  Copyright 2008
+ *  by Konrad Rzeszutek <ketuzsezr@darnok.org>
+ *
+ * This code finds the iSCSI Boot Format Table.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2.0 as published by
+ * the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bootmem.h>
+#include <linux/blkdev.h>
+#include <linux/ctype.h>
+#include <linux/device.h>
+#include <linux/efi.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/acpi.h>
+#include <linux/iscsi_ibft.h>
+
+#include <asm/mmzone.h>
+
+/*
+ * Physical location of iSCSI Boot Format Table.
+ */
+struct acpi_table_ibft *ibft_addr;
+EXPORT_SYMBOL_GPL(ibft_addr);
+
+static const struct {
+	char *sign;
+} ibft_signs[] = {
+	{ "iBFT" },
+	{ "BIFT" },	/* Broadcom iSCSI Offload */
+};
+
+#define IBFT_SIGN_LEN 4
+#define IBFT_START 0x80000 /* 512kB */
+#define IBFT_END 0x100000 /* 1MB */
+#define VGA_MEM 0xA0000 /* VGA buffer */
+#define VGA_SIZE 0x20000 /* 128kB */
+
+static int __init find_ibft_in_mem(void)
+{
+	unsigned long pos;
+	unsigned int len = 0;
+	void *virt;
+	int i;
+
+	for (pos = IBFT_START; pos < IBFT_END; pos += 16) {
+		/* The table can't be inside the VGA BIOS reserved space,
+		 * so skip that area */
+		if (pos == VGA_MEM)
+			pos += VGA_SIZE;
+		virt = isa_bus_to_virt(pos);
+
+		for (i = 0; i < ARRAY_SIZE(ibft_signs); i++) {
+			if (memcmp(virt, ibft_signs[i].sign, IBFT_SIGN_LEN) ==
+			    0) {
+				unsigned long *addr =
+				    (unsigned long *)isa_bus_to_virt(pos + 4);
+				len = *addr;
+				/* if the length of the table extends past 1M,
+				 * the table cannot be valid. */
+				if (pos + len <= (IBFT_END-1)) {
+					ibft_addr = (struct acpi_table_ibft *)virt;
+					pr_info("iBFT found at 0x%lx.\n", pos);
+					goto done;
+				}
+			}
+		}
+	}
+done:
+	return len;
+}
+/*
+ * Routine used to find the iSCSI Boot Format Table. The logical
+ * kernel address is set in the ibft_addr global variable.
+ */
+unsigned long __init find_ibft_region(unsigned long *sizep)
+{
+	ibft_addr = NULL;
+
+	/* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will
+	 * only use ACPI for this */
+
+	if (!efi_enabled(EFI_BOOT))
+		find_ibft_in_mem();
+
+	if (ibft_addr) {
+		*sizep = PAGE_ALIGN(ibft_addr->header.length);
+		return (u64)isa_virt_to_bus(ibft_addr);
+	}
+
+	*sizep = 0;
+	return 0;
+}
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
new file mode 100644
index 0000000..5de3ed2
--- /dev/null
+++ b/drivers/firmware/memmap.c
@@ -0,0 +1,426 @@
+/*
+ * linux/drivers/firmware/memmap.c
+ *  Copyright (C) 2008 SUSE LINUX Products GmbH
+ *  by Bernhard Walle <bernhard.walle@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2.0 as published by
+ * the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/string.h>
+#include <linux/firmware-map.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/bootmem.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+
+/*
+ * Data types ------------------------------------------------------------------
+ */
+
+/*
+ * Firmware map entry. Because firmware memory maps are flat and not
+ * hierarchical, it's ok to organise them in a linked list. No parent
+ * information is necessary as for the resource tree.
+ */
+struct firmware_map_entry {
+	/*
+	 * start and end must be u64 rather than resource_size_t, because e820
+	 * resources can lie at addresses above 4G.
+	 */
+	u64			start;	/* start of the memory range */
+	u64			end;	/* end of the memory range (incl.) */
+	const char		*type;	/* type of the memory range */
+	struct list_head	list;	/* entry for the linked list */
+	struct kobject		kobj;   /* kobject for each entry */
+};
+
+/*
+ * Forward declarations --------------------------------------------------------
+ */
+static ssize_t memmap_attr_show(struct kobject *kobj,
+				struct attribute *attr, char *buf);
+static ssize_t start_show(struct firmware_map_entry *entry, char *buf);
+static ssize_t end_show(struct firmware_map_entry *entry, char *buf);
+static ssize_t type_show(struct firmware_map_entry *entry, char *buf);
+
+static struct firmware_map_entry * __meminit
+firmware_map_find_entry(u64 start, u64 end, const char *type);
+
+/*
+ * Static data -----------------------------------------------------------------
+ */
+
+struct memmap_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct firmware_map_entry *entry, char *buf);
+};
+
+static struct memmap_attribute memmap_start_attr = __ATTR_RO(start);
+static struct memmap_attribute memmap_end_attr   = __ATTR_RO(end);
+static struct memmap_attribute memmap_type_attr  = __ATTR_RO(type);
+
+/*
+ * These are default attributes that are added for every memmap entry.
+ */
+static struct attribute *def_attrs[] = {
+	&memmap_start_attr.attr,
+	&memmap_end_attr.attr,
+	&memmap_type_attr.attr,
+	NULL
+};
+
+static const struct sysfs_ops memmap_attr_ops = {
+	.show = memmap_attr_show,
+};
+
+/* Firmware memory map entries. */
+static LIST_HEAD(map_entries);
+static DEFINE_SPINLOCK(map_entries_lock);
+
+/*
+ * For memory hotplug, there is no way to free memory map entries allocated
+ * by boot mem after the system is up. So when we hot-remove memory whose
+ * map entry is allocated by bootmem, we need to remember the storage and
+ * reuse it when the memory is hot-added again.
+ */
+static LIST_HEAD(map_entries_bootmem);
+static DEFINE_SPINLOCK(map_entries_bootmem_lock);
+
+
+static inline struct firmware_map_entry *
+to_memmap_entry(struct kobject *kobj)
+{
+	return container_of(kobj, struct firmware_map_entry, kobj);
+}
+
+static void __meminit release_firmware_map_entry(struct kobject *kobj)
+{
+	struct firmware_map_entry *entry = to_memmap_entry(kobj);
+
+	if (PageReserved(virt_to_page(entry))) {
+		/*
+		 * Remember the storage allocated by bootmem, and reuse it when
+		 * the memory is hot-added again. The entry will be added to
+		 * map_entries_bootmem here, and deleted from &map_entries in
+		 * firmware_map_remove_entry().
+		 */
+		spin_lock(&map_entries_bootmem_lock);
+		list_add(&entry->list, &map_entries_bootmem);
+		spin_unlock(&map_entries_bootmem_lock);
+
+		return;
+	}
+
+	kfree(entry);
+}
+
+static struct kobj_type __refdata memmap_ktype = {
+	.release	= release_firmware_map_entry,
+	.sysfs_ops	= &memmap_attr_ops,
+	.default_attrs	= def_attrs,
+};
+
+/*
+ * Registration functions ------------------------------------------------------
+ */
+
+/**
+ * firmware_map_add_entry() - Does the real work to add a firmware memmap entry.
+ * @start: Start of the memory range.
+ * @end:   End of the memory range (exclusive).
+ * @type:  Type of the memory range.
+ * @entry: Pre-allocated (either kmalloc() or bootmem allocator), uninitialised
+ *         entry.
+ *
+ * Common implementation of firmware_map_add() and firmware_map_add_early()
+ * which expects a pre-allocated struct firmware_map_entry.
+ *
+ * Return: 0 always
+ */
+static int firmware_map_add_entry(u64 start, u64 end,
+				  const char *type,
+				  struct firmware_map_entry *entry)
+{
+	BUG_ON(start > end);
+
+	entry->start = start;
+	entry->end = end - 1;
+	entry->type = type;
+	INIT_LIST_HEAD(&entry->list);
+	kobject_init(&entry->kobj, &memmap_ktype);
+
+	spin_lock(&map_entries_lock);
+	list_add_tail(&entry->list, &map_entries);
+	spin_unlock(&map_entries_lock);
+
+	return 0;
+}
+
+/**
+ * firmware_map_remove_entry() - Does the real work to remove a firmware
+ * memmap entry.
+ * @entry: removed entry.
+ *
+ * The caller must hold map_entries_lock, and release it properly.
+ */
+static inline void firmware_map_remove_entry(struct firmware_map_entry *entry)
+{
+	list_del(&entry->list);
+}
+
+/*
+ * Add memmap entry on sysfs
+ */
+static int add_sysfs_fw_map_entry(struct firmware_map_entry *entry)
+{
+	static int map_entries_nr;
+	static struct kset *mmap_kset;
+
+	if (entry->kobj.state_in_sysfs)
+		return -EEXIST;
+
+	if (!mmap_kset) {
+		mmap_kset = kset_create_and_add("memmap", NULL, firmware_kobj);
+		if (!mmap_kset)
+			return -ENOMEM;
+	}
+
+	entry->kobj.kset = mmap_kset;
+	if (kobject_add(&entry->kobj, NULL, "%d", map_entries_nr++))
+		kobject_put(&entry->kobj);
+
+	return 0;
+}
+
+/*
+ * Remove memmap entry on sysfs
+ */
+static inline void remove_sysfs_fw_map_entry(struct firmware_map_entry *entry)
+{
+	kobject_put(&entry->kobj);
+}
+
+/**
+ * firmware_map_find_entry_in_list() - Search memmap entry in a given list.
+ * @start: Start of the memory range.
+ * @end:   End of the memory range (exclusive).
+ * @type:  Type of the memory range.
+ * @list:  In which to find the entry.
+ *
+ * This function is to find the memmap entey of a given memory range in a
+ * given list. The caller must hold map_entries_lock, and must not release
+ * the lock until the processing of the returned entry has completed.
+ *
+ * Return: Pointer to the entry to be found on success, or NULL on failure.
+ */
+static struct firmware_map_entry * __meminit
+firmware_map_find_entry_in_list(u64 start, u64 end, const char *type,
+				struct list_head *list)
+{
+	struct firmware_map_entry *entry;
+
+	list_for_each_entry(entry, list, list)
+		if ((entry->start == start) && (entry->end == end) &&
+		    (!strcmp(entry->type, type))) {
+			return entry;
+		}
+
+	return NULL;
+}
+
+/**
+ * firmware_map_find_entry() - Search memmap entry in map_entries.
+ * @start: Start of the memory range.
+ * @end:   End of the memory range (exclusive).
+ * @type:  Type of the memory range.
+ *
+ * This function is to find the memmap entey of a given memory range.
+ * The caller must hold map_entries_lock, and must not release the lock
+ * until the processing of the returned entry has completed.
+ *
+ * Return: Pointer to the entry to be found on success, or NULL on failure.
+ */
+static struct firmware_map_entry * __meminit
+firmware_map_find_entry(u64 start, u64 end, const char *type)
+{
+	return firmware_map_find_entry_in_list(start, end, type, &map_entries);
+}
+
+/**
+ * firmware_map_find_entry_bootmem() - Search memmap entry in map_entries_bootmem.
+ * @start: Start of the memory range.
+ * @end:   End of the memory range (exclusive).
+ * @type:  Type of the memory range.
+ *
+ * This function is similar to firmware_map_find_entry except that it find the
+ * given entry in map_entries_bootmem.
+ *
+ * Return: Pointer to the entry to be found on success, or NULL on failure.
+ */
+static struct firmware_map_entry * __meminit
+firmware_map_find_entry_bootmem(u64 start, u64 end, const char *type)
+{
+	return firmware_map_find_entry_in_list(start, end, type,
+					       &map_entries_bootmem);
+}
+
+/**
+ * firmware_map_add_hotplug() - Adds a firmware mapping entry when we do
+ * memory hotplug.
+ * @start: Start of the memory range.
+ * @end:   End of the memory range (exclusive)
+ * @type:  Type of the memory range.
+ *
+ * Adds a firmware mapping entry. This function is for memory hotplug, it is
+ * similar to function firmware_map_add_early(). The only difference is that
+ * it will create the syfs entry dynamically.
+ *
+ * Return: 0 on success, or -ENOMEM if no memory could be allocated.
+ */
+int __meminit firmware_map_add_hotplug(u64 start, u64 end, const char *type)
+{
+	struct firmware_map_entry *entry;
+
+	entry = firmware_map_find_entry(start, end - 1, type);
+	if (entry)
+		return 0;
+
+	entry = firmware_map_find_entry_bootmem(start, end - 1, type);
+	if (!entry) {
+		entry = kzalloc(sizeof(struct firmware_map_entry), GFP_ATOMIC);
+		if (!entry)
+			return -ENOMEM;
+	} else {
+		/* Reuse storage allocated by bootmem. */
+		spin_lock(&map_entries_bootmem_lock);
+		list_del(&entry->list);
+		spin_unlock(&map_entries_bootmem_lock);
+
+		memset(entry, 0, sizeof(*entry));
+	}
+
+	firmware_map_add_entry(start, end, type, entry);
+	/* create the memmap entry */
+	add_sysfs_fw_map_entry(entry);
+
+	return 0;
+}
+
+/**
+ * firmware_map_add_early() - Adds a firmware mapping entry.
+ * @start: Start of the memory range.
+ * @end:   End of the memory range.
+ * @type:  Type of the memory range.
+ *
+ * Adds a firmware mapping entry. This function uses the bootmem allocator
+ * for memory allocation.
+ *
+ * That function must be called before late_initcall.
+ *
+ * Return: 0 on success, or -ENOMEM if no memory could be allocated.
+ */
+int __init firmware_map_add_early(u64 start, u64 end, const char *type)
+{
+	struct firmware_map_entry *entry;
+
+	entry = memblock_virt_alloc(sizeof(struct firmware_map_entry), 0);
+	if (WARN_ON(!entry))
+		return -ENOMEM;
+
+	return firmware_map_add_entry(start, end, type, entry);
+}
+
+/**
+ * firmware_map_remove() - remove a firmware mapping entry
+ * @start: Start of the memory range.
+ * @end:   End of the memory range.
+ * @type:  Type of the memory range.
+ *
+ * removes a firmware mapping entry.
+ *
+ * Return: 0 on success, or -EINVAL if no entry.
+ */
+int __meminit firmware_map_remove(u64 start, u64 end, const char *type)
+{
+	struct firmware_map_entry *entry;
+
+	spin_lock(&map_entries_lock);
+	entry = firmware_map_find_entry(start, end - 1, type);
+	if (!entry) {
+		spin_unlock(&map_entries_lock);
+		return -EINVAL;
+	}
+
+	firmware_map_remove_entry(entry);
+	spin_unlock(&map_entries_lock);
+
+	/* remove the memmap entry */
+	remove_sysfs_fw_map_entry(entry);
+
+	return 0;
+}
+
+/*
+ * Sysfs functions -------------------------------------------------------------
+ */
+
+static ssize_t start_show(struct firmware_map_entry *entry, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+		(unsigned long long)entry->start);
+}
+
+static ssize_t end_show(struct firmware_map_entry *entry, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+		(unsigned long long)entry->end);
+}
+
+static ssize_t type_show(struct firmware_map_entry *entry, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%s\n", entry->type);
+}
+
+static inline struct memmap_attribute *to_memmap_attr(struct attribute *attr)
+{
+	return container_of(attr, struct memmap_attribute, attr);
+}
+
+static ssize_t memmap_attr_show(struct kobject *kobj,
+				struct attribute *attr, char *buf)
+{
+	struct firmware_map_entry *entry = to_memmap_entry(kobj);
+	struct memmap_attribute *memmap_attr = to_memmap_attr(attr);
+
+	return memmap_attr->show(entry, buf);
+}
+
+/*
+ * Initialises stuff and adds the entries in the map_entries list to
+ * sysfs. Important is that firmware_map_add() and firmware_map_add_early()
+ * must be called before late_initcall. That's just because that function
+ * is called as late_initcall() function, which means that if you call
+ * firmware_map_add() or firmware_map_add_early() afterwards, the entries
+ * are not added to sysfs.
+ */
+static int __init firmware_memmap_init(void)
+{
+	struct firmware_map_entry *entry;
+
+	list_for_each_entry(entry, &map_entries, list)
+		add_sysfs_fw_map_entry(entry);
+
+	return 0;
+}
+late_initcall(firmware_memmap_init);
+
diff --git a/drivers/firmware/meson/Kconfig b/drivers/firmware/meson/Kconfig
new file mode 100644
index 0000000..170d7e8
--- /dev/null
+++ b/drivers/firmware/meson/Kconfig
@@ -0,0 +1,9 @@
+#
+# Amlogic Secure Monitor driver
+#
+config MESON_SM
+	bool
+	default ARCH_MESON
+	depends on ARM64_4K_PAGES
+	help
+	  Say y here to enable the Amlogic secure monitor driver
diff --git a/drivers/firmware/meson/Makefile b/drivers/firmware/meson/Makefile
new file mode 100644
index 0000000..9ab3884
--- /dev/null
+++ b/drivers/firmware/meson/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_MESON_SM) +=	meson_sm.o
diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c
new file mode 100644
index 0000000..0ec2ca8
--- /dev/null
+++ b/drivers/firmware/meson/meson_sm.c
@@ -0,0 +1,259 @@
+/*
+ * Amlogic Secure Monitor driver
+ *
+ * Copyright (C) 2016 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "meson-sm: " fmt
+
+#include <linux/arm-smccc.h>
+#include <linux/bug.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/types.h>
+#include <linux/sizes.h>
+
+#include <linux/firmware/meson/meson_sm.h>
+
+struct meson_sm_cmd {
+	unsigned int index;
+	u32 smc_id;
+};
+#define CMD(d, s) { .index = (d), .smc_id = (s), }
+
+struct meson_sm_chip {
+	unsigned int shmem_size;
+	u32 cmd_shmem_in_base;
+	u32 cmd_shmem_out_base;
+	struct meson_sm_cmd cmd[];
+};
+
+struct meson_sm_chip gxbb_chip = {
+	.shmem_size		= SZ_4K,
+	.cmd_shmem_in_base	= 0x82000020,
+	.cmd_shmem_out_base	= 0x82000021,
+	.cmd = {
+		CMD(SM_EFUSE_READ,	0x82000030),
+		CMD(SM_EFUSE_WRITE,	0x82000031),
+		CMD(SM_EFUSE_USER_MAX,	0x82000033),
+		{ /* sentinel */ },
+	},
+};
+
+struct meson_sm_firmware {
+	const struct meson_sm_chip *chip;
+	void __iomem *sm_shmem_in_base;
+	void __iomem *sm_shmem_out_base;
+};
+
+static struct meson_sm_firmware fw;
+
+static u32 meson_sm_get_cmd(const struct meson_sm_chip *chip,
+			    unsigned int cmd_index)
+{
+	const struct meson_sm_cmd *cmd = chip->cmd;
+
+	while (cmd->smc_id && cmd->index != cmd_index)
+		cmd++;
+
+	return cmd->smc_id;
+}
+
+static u32 __meson_sm_call(u32 cmd, u32 arg0, u32 arg1, u32 arg2,
+			   u32 arg3, u32 arg4)
+{
+	struct arm_smccc_res res;
+
+	arm_smccc_smc(cmd, arg0, arg1, arg2, arg3, arg4, 0, 0, &res);
+	return res.a0;
+}
+
+static void __iomem *meson_sm_map_shmem(u32 cmd_shmem, unsigned int size)
+{
+	u32 sm_phy_base;
+
+	sm_phy_base = __meson_sm_call(cmd_shmem, 0, 0, 0, 0, 0);
+	if (!sm_phy_base)
+		return 0;
+
+	return ioremap_cache(sm_phy_base, size);
+}
+
+/**
+ * meson_sm_call - generic SMC32 call to the secure-monitor
+ *
+ * @cmd_index:	Index of the SMC32 function ID
+ * @ret:	Returned value
+ * @arg0:	SMC32 Argument 0
+ * @arg1:	SMC32 Argument 1
+ * @arg2:	SMC32 Argument 2
+ * @arg3:	SMC32 Argument 3
+ * @arg4:	SMC32 Argument 4
+ *
+ * Return:	0 on success, a negative value on error
+ */
+int meson_sm_call(unsigned int cmd_index, u32 *ret, u32 arg0,
+		  u32 arg1, u32 arg2, u32 arg3, u32 arg4)
+{
+	u32 cmd, lret;
+
+	if (!fw.chip)
+		return -ENOENT;
+
+	cmd = meson_sm_get_cmd(fw.chip, cmd_index);
+	if (!cmd)
+		return -EINVAL;
+
+	lret = __meson_sm_call(cmd, arg0, arg1, arg2, arg3, arg4);
+
+	if (ret)
+		*ret = lret;
+
+	return 0;
+}
+EXPORT_SYMBOL(meson_sm_call);
+
+/**
+ * meson_sm_call_read - retrieve data from secure-monitor
+ *
+ * @buffer:	Buffer to store the retrieved data
+ * @bsize:	Size of the buffer
+ * @cmd_index:	Index of the SMC32 function ID
+ * @arg0:	SMC32 Argument 0
+ * @arg1:	SMC32 Argument 1
+ * @arg2:	SMC32 Argument 2
+ * @arg3:	SMC32 Argument 3
+ * @arg4:	SMC32 Argument 4
+ *
+ * Return:	size of read data on success, a negative value on error
+ *		When 0 is returned there is no guarantee about the amount of
+ *		data read and bsize bytes are copied in buffer.
+ */
+int meson_sm_call_read(void *buffer, unsigned int bsize, unsigned int cmd_index,
+		       u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4)
+{
+	u32 size;
+	int ret;
+
+	if (!fw.chip)
+		return -ENOENT;
+
+	if (!fw.chip->cmd_shmem_out_base)
+		return -EINVAL;
+
+	if (bsize > fw.chip->shmem_size)
+		return -EINVAL;
+
+	if (meson_sm_call(cmd_index, &size, arg0, arg1, arg2, arg3, arg4) < 0)
+		return -EINVAL;
+
+	if (size > bsize)
+		return -EINVAL;
+
+	ret = size;
+
+	if (!size)
+		size = bsize;
+
+	if (buffer)
+		memcpy(buffer, fw.sm_shmem_out_base, size);
+
+	return ret;
+}
+EXPORT_SYMBOL(meson_sm_call_read);
+
+/**
+ * meson_sm_call_write - send data to secure-monitor
+ *
+ * @buffer:	Buffer containing data to send
+ * @size:	Size of the data to send
+ * @cmd_index:	Index of the SMC32 function ID
+ * @arg0:	SMC32 Argument 0
+ * @arg1:	SMC32 Argument 1
+ * @arg2:	SMC32 Argument 2
+ * @arg3:	SMC32 Argument 3
+ * @arg4:	SMC32 Argument 4
+ *
+ * Return:	size of sent data on success, a negative value on error
+ */
+int meson_sm_call_write(void *buffer, unsigned int size, unsigned int cmd_index,
+			u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4)
+{
+	u32 written;
+
+	if (!fw.chip)
+		return -ENOENT;
+
+	if (size > fw.chip->shmem_size)
+		return -EINVAL;
+
+	if (!fw.chip->cmd_shmem_in_base)
+		return -EINVAL;
+
+	memcpy(fw.sm_shmem_in_base, buffer, size);
+
+	if (meson_sm_call(cmd_index, &written, arg0, arg1, arg2, arg3, arg4) < 0)
+		return -EINVAL;
+
+	if (!written)
+		return -EINVAL;
+
+	return written;
+}
+EXPORT_SYMBOL(meson_sm_call_write);
+
+static const struct of_device_id meson_sm_ids[] = {
+	{ .compatible = "amlogic,meson-gxbb-sm", .data = &gxbb_chip },
+	{ /* sentinel */ },
+};
+
+static int __init meson_sm_probe(struct platform_device *pdev)
+{
+	const struct meson_sm_chip *chip;
+
+	chip = of_match_device(meson_sm_ids, &pdev->dev)->data;
+
+	if (chip->cmd_shmem_in_base) {
+		fw.sm_shmem_in_base = meson_sm_map_shmem(chip->cmd_shmem_in_base,
+							 chip->shmem_size);
+		if (WARN_ON(!fw.sm_shmem_in_base))
+			goto out;
+	}
+
+	if (chip->cmd_shmem_out_base) {
+		fw.sm_shmem_out_base = meson_sm_map_shmem(chip->cmd_shmem_out_base,
+							  chip->shmem_size);
+		if (WARN_ON(!fw.sm_shmem_out_base))
+			goto out_in_base;
+	}
+
+	fw.chip = chip;
+	pr_info("secure-monitor enabled\n");
+
+	return 0;
+
+out_in_base:
+	iounmap(fw.sm_shmem_in_base);
+out:
+	return -EINVAL;
+}
+
+static struct platform_driver meson_sm_driver = {
+	.driver = {
+		.name = "meson-sm",
+		.of_match_table = of_match_ptr(meson_sm_ids),
+	},
+};
+module_platform_driver_probe(meson_sm_driver, meson_sm_probe);
diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c
new file mode 100644
index 0000000..e83d6ae
--- /dev/null
+++ b/drivers/firmware/pcdp.c
@@ -0,0 +1,136 @@
+/*
+ * Parse the EFI PCDP table to locate the console device.
+ *
+ * (c) Copyright 2002, 2003, 2004 Hewlett-Packard Development Company, L.P.
+ *	Khalid Aziz <khalid.aziz@hp.com>
+ *	Alex Williamson <alex.williamson@hp.com>
+ *	Bjorn Helgaas <bjorn.helgaas@hp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/console.h>
+#include <linux/efi.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <asm/vga.h>
+#include "pcdp.h"
+
+static int __init
+setup_serial_console(struct pcdp_uart *uart)
+{
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+	int mmio;
+	static char options[64], *p = options;
+	char parity;
+
+	mmio = (uart->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY);
+	p += sprintf(p, "uart8250,%s,0x%llx",
+		mmio ? "mmio" : "io", uart->addr.address);
+	if (uart->baud) {
+		p += sprintf(p, ",%llu", uart->baud);
+		if (uart->bits) {
+			switch (uart->parity) {
+			    case 0x2: parity = 'e'; break;
+			    case 0x3: parity = 'o'; break;
+			    default:  parity = 'n';
+			}
+			p += sprintf(p, "%c%d", parity, uart->bits);
+		}
+	}
+
+	add_preferred_console("uart", 8250, &options[9]);
+	return setup_earlycon(options);
+#else
+	return -ENODEV;
+#endif
+}
+
+static int __init
+setup_vga_console(struct pcdp_device *dev)
+{
+#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
+	u8 *if_ptr;
+
+	if_ptr = ((u8 *)dev + sizeof(struct pcdp_device));
+	if (if_ptr[0] == PCDP_IF_PCI) {
+		struct pcdp_if_pci if_pci;
+
+		/* struct copy since ifptr might not be correctly aligned */
+
+		memcpy(&if_pci, if_ptr, sizeof(if_pci));
+
+		if (if_pci.trans & PCDP_PCI_TRANS_IOPORT)
+			vga_console_iobase = if_pci.ioport_tra;
+
+		if (if_pci.trans & PCDP_PCI_TRANS_MMIO)
+			vga_console_membase = if_pci.mmio_tra;
+	}
+
+	if (efi_mem_type(vga_console_membase + 0xA0000) == EFI_CONVENTIONAL_MEMORY) {
+		printk(KERN_ERR "PCDP: VGA selected, but frame buffer is not MMIO!\n");
+		return -ENODEV;
+	}
+
+	conswitchp = &vga_con;
+	printk(KERN_INFO "PCDP: VGA console\n");
+	return 0;
+#else
+	return -ENODEV;
+#endif
+}
+
+int __init
+efi_setup_pcdp_console(char *cmdline)
+{
+	struct pcdp *pcdp;
+	struct pcdp_uart *uart;
+	struct pcdp_device *dev, *end;
+	int i, serial = 0;
+	int rc = -ENODEV;
+
+	if (efi.hcdp == EFI_INVALID_TABLE_ADDR)
+		return -ENODEV;
+
+	pcdp = early_memremap(efi.hcdp, 4096);
+	printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, efi.hcdp);
+
+	if (strstr(cmdline, "console=hcdp")) {
+		if (pcdp->rev < 3)
+			serial = 1;
+	} else if (strstr(cmdline, "console=")) {
+		printk(KERN_INFO "Explicit \"console=\"; ignoring PCDP\n");
+		goto out;
+	}
+
+	if (pcdp->rev < 3 && efi_uart_console_only())
+		serial = 1;
+
+	for (i = 0, uart = pcdp->uart; i < pcdp->num_uarts; i++, uart++) {
+		if (uart->flags & PCDP_UART_PRIMARY_CONSOLE || serial) {
+			if (uart->type == PCDP_CONSOLE_UART) {
+				rc = setup_serial_console(uart);
+				goto out;
+			}
+		}
+	}
+
+	end = (struct pcdp_device *) ((u8 *) pcdp + pcdp->length);
+	for (dev = (struct pcdp_device *) (pcdp->uart + pcdp->num_uarts);
+	     dev < end;
+	     dev = (struct pcdp_device *) ((u8 *) dev + dev->length)) {
+		if (dev->flags & PCDP_PRIMARY_CONSOLE) {
+			if (dev->type == PCDP_CONSOLE_VGA) {
+				rc = setup_vga_console(dev);
+				goto out;
+			}
+		}
+	}
+
+out:
+	early_memunmap(pcdp, 4096);
+	return rc;
+}
diff --git a/drivers/firmware/pcdp.h b/drivers/firmware/pcdp.h
new file mode 100644
index 0000000..e553060
--- /dev/null
+++ b/drivers/firmware/pcdp.h
@@ -0,0 +1,111 @@
+/*
+ * Definitions for PCDP-defined console devices
+ *
+ * For DIG64_HCDPv10a_01.pdf and DIG64_PCDPv20.pdf (v1.0a and v2.0 resp.),
+ * please see <http://www.dig64.org/specifications/>
+ *
+ * (c) Copyright 2002, 2004 Hewlett-Packard Development Company, L.P.
+ *	Khalid Aziz <khalid.aziz@hp.com>
+ *	Bjorn Helgaas <bjorn.helgaas@hp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define PCDP_CONSOLE			0
+#define PCDP_DEBUG			1
+#define PCDP_CONSOLE_OUTPUT		2
+#define PCDP_CONSOLE_INPUT		3
+
+#define PCDP_UART			(0 << 3)
+#define PCDP_VGA			(1 << 3)
+#define PCDP_USB			(2 << 3)
+
+/* pcdp_uart.type and pcdp_device.type */
+#define PCDP_CONSOLE_UART		(PCDP_UART | PCDP_CONSOLE)
+#define PCDP_DEBUG_UART			(PCDP_UART | PCDP_DEBUG)
+#define PCDP_CONSOLE_VGA		(PCDP_VGA  | PCDP_CONSOLE_OUTPUT)
+#define PCDP_CONSOLE_USB		(PCDP_USB  | PCDP_CONSOLE_INPUT)
+
+/* pcdp_uart.flags */
+#define PCDP_UART_EDGE_SENSITIVE	(1 << 0)
+#define PCDP_UART_ACTIVE_LOW		(1 << 1)
+#define PCDP_UART_PRIMARY_CONSOLE	(1 << 2)
+#define PCDP_UART_IRQ			(1 << 6) /* in pci_func for rev < 3 */
+#define PCDP_UART_PCI			(1 << 7) /* in pci_func for rev < 3 */
+
+struct pcdp_uart {
+	u8				type;
+	u8				bits;
+	u8				parity;
+	u8				stop_bits;
+	u8				pci_seg;
+	u8				pci_bus;
+	u8				pci_dev;
+	u8				pci_func;
+	u64				baud;
+	struct acpi_generic_address	addr;
+	u16				pci_dev_id;
+	u16				pci_vendor_id;
+	u32				gsi;
+	u32				clock_rate;
+	u8				pci_prog_intfc;
+	u8				flags;
+	u16				conout_index;
+	u32				reserved;
+} __attribute__((packed));
+
+#define PCDP_IF_PCI	1
+
+/* pcdp_if_pci.trans */
+#define PCDP_PCI_TRANS_IOPORT	0x02
+#define PCDP_PCI_TRANS_MMIO	0x01
+
+struct pcdp_if_pci {
+	u8			interconnect;
+	u8			reserved;
+	u16			length;
+	u8			segment;
+	u8			bus;
+	u8			dev;
+	u8			fun;
+	u16			dev_id;
+	u16			vendor_id;
+	u32			acpi_interrupt;
+	u64			mmio_tra;
+	u64			ioport_tra;
+	u8			flags;
+	u8			trans;
+} __attribute__((packed));
+
+struct pcdp_vga {
+	u8			count;		/* address space descriptors */
+} __attribute__((packed));
+
+/* pcdp_device.flags */
+#define PCDP_PRIMARY_CONSOLE	1
+
+struct pcdp_device {
+	u8			type;
+	u8			flags;
+	u16			length;
+	u16			efi_index;
+	/* next data is pcdp_if_pci or pcdp_if_acpi (not yet supported) */
+	/* next data is device specific type (currently only pcdp_vga) */
+} __attribute__((packed));
+
+struct pcdp {
+	u8			signature[4];
+	u32			length;
+	u8			rev;		/* PCDP v2.0 is rev 3 */
+	u8			chksum;
+	u8			oemid[6];
+	u8			oem_tabid[8];
+	u32			oem_rev;
+	u8			creator_id[4];
+	u32			creator_rev;
+	u32			num_uarts;
+	struct pcdp_uart	uart[0];	/* actual size is num_uarts */
+	/* remainder of table is pcdp_device structures */
+} __attribute__((packed));
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
new file mode 100644
index 0000000..c80ec1d
--- /dev/null
+++ b/drivers/firmware/psci.c
@@ -0,0 +1,708 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2015 ARM Limited
+ */
+
+#define pr_fmt(fmt) "psci: " fmt
+
+#include <linux/acpi.h>
+#include <linux/arm-smccc.h>
+#include <linux/cpuidle.h>
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <linux/of.h>
+#include <linux/pm.h>
+#include <linux/printk.h>
+#include <linux/psci.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+
+#include <uapi/linux/psci.h>
+
+#include <asm/cpuidle.h>
+#include <asm/cputype.h>
+#include <asm/system_misc.h>
+#include <asm/smp_plat.h>
+#include <asm/suspend.h>
+
+/*
+ * While a 64-bit OS can make calls with SMC32 calling conventions, for some
+ * calls it is necessary to use SMC64 to pass or return 64-bit values.
+ * For such calls PSCI_FN_NATIVE(version, name) will choose the appropriate
+ * (native-width) function ID.
+ */
+#ifdef CONFIG_64BIT
+#define PSCI_FN_NATIVE(version, name)	PSCI_##version##_FN64_##name
+#else
+#define PSCI_FN_NATIVE(version, name)	PSCI_##version##_FN_##name
+#endif
+
+/*
+ * The CPU any Trusted OS is resident on. The trusted OS may reject CPU_OFF
+ * calls to its resident CPU, so we must avoid issuing those. We never migrate
+ * a Trusted OS even if it claims to be capable of migration -- doing so will
+ * require cooperation with a Trusted OS driver.
+ */
+static int resident_cpu = -1;
+
+bool psci_tos_resident_on(int cpu)
+{
+	return cpu == resident_cpu;
+}
+
+struct psci_operations psci_ops = {
+	.conduit = PSCI_CONDUIT_NONE,
+	.smccc_version = SMCCC_VERSION_1_0,
+};
+
+typedef unsigned long (psci_fn)(unsigned long, unsigned long,
+				unsigned long, unsigned long);
+static psci_fn *invoke_psci_fn;
+
+enum psci_function {
+	PSCI_FN_CPU_SUSPEND,
+	PSCI_FN_CPU_ON,
+	PSCI_FN_CPU_OFF,
+	PSCI_FN_MIGRATE,
+	PSCI_FN_MAX,
+};
+
+static u32 psci_function_id[PSCI_FN_MAX];
+
+#define PSCI_0_2_POWER_STATE_MASK		\
+				(PSCI_0_2_POWER_STATE_ID_MASK | \
+				PSCI_0_2_POWER_STATE_TYPE_MASK | \
+				PSCI_0_2_POWER_STATE_AFFL_MASK)
+
+#define PSCI_1_0_EXT_POWER_STATE_MASK		\
+				(PSCI_1_0_EXT_POWER_STATE_ID_MASK | \
+				PSCI_1_0_EXT_POWER_STATE_TYPE_MASK)
+
+static u32 psci_cpu_suspend_feature;
+
+static inline bool psci_has_ext_power_state(void)
+{
+	return psci_cpu_suspend_feature &
+				PSCI_1_0_FEATURES_CPU_SUSPEND_PF_MASK;
+}
+
+static inline bool psci_power_state_loses_context(u32 state)
+{
+	const u32 mask = psci_has_ext_power_state() ?
+					PSCI_1_0_EXT_POWER_STATE_TYPE_MASK :
+					PSCI_0_2_POWER_STATE_TYPE_MASK;
+
+	return state & mask;
+}
+
+static inline bool psci_power_state_is_valid(u32 state)
+{
+	const u32 valid_mask = psci_has_ext_power_state() ?
+			       PSCI_1_0_EXT_POWER_STATE_MASK :
+			       PSCI_0_2_POWER_STATE_MASK;
+
+	return !(state & ~valid_mask);
+}
+
+static unsigned long __invoke_psci_fn_hvc(unsigned long function_id,
+			unsigned long arg0, unsigned long arg1,
+			unsigned long arg2)
+{
+	struct arm_smccc_res res;
+
+	arm_smccc_hvc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res);
+	return res.a0;
+}
+
+static unsigned long __invoke_psci_fn_smc(unsigned long function_id,
+			unsigned long arg0, unsigned long arg1,
+			unsigned long arg2)
+{
+	struct arm_smccc_res res;
+
+	arm_smccc_smc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res);
+	return res.a0;
+}
+
+static int psci_to_linux_errno(int errno)
+{
+	switch (errno) {
+	case PSCI_RET_SUCCESS:
+		return 0;
+	case PSCI_RET_NOT_SUPPORTED:
+		return -EOPNOTSUPP;
+	case PSCI_RET_INVALID_PARAMS:
+	case PSCI_RET_INVALID_ADDRESS:
+		return -EINVAL;
+	case PSCI_RET_DENIED:
+		return -EPERM;
+	};
+
+	return -EINVAL;
+}
+
+static u32 psci_get_version(void)
+{
+	return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
+}
+
+static int psci_cpu_suspend(u32 state, unsigned long entry_point)
+{
+	int err;
+	u32 fn;
+
+	fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
+	err = invoke_psci_fn(fn, state, entry_point, 0);
+	return psci_to_linux_errno(err);
+}
+
+static int psci_cpu_off(u32 state)
+{
+	int err;
+	u32 fn;
+
+	fn = psci_function_id[PSCI_FN_CPU_OFF];
+	err = invoke_psci_fn(fn, state, 0, 0);
+	return psci_to_linux_errno(err);
+}
+
+static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point)
+{
+	int err;
+	u32 fn;
+
+	fn = psci_function_id[PSCI_FN_CPU_ON];
+	err = invoke_psci_fn(fn, cpuid, entry_point, 0);
+	return psci_to_linux_errno(err);
+}
+
+static int psci_migrate(unsigned long cpuid)
+{
+	int err;
+	u32 fn;
+
+	fn = psci_function_id[PSCI_FN_MIGRATE];
+	err = invoke_psci_fn(fn, cpuid, 0, 0);
+	return psci_to_linux_errno(err);
+}
+
+static int psci_affinity_info(unsigned long target_affinity,
+		unsigned long lowest_affinity_level)
+{
+	return invoke_psci_fn(PSCI_FN_NATIVE(0_2, AFFINITY_INFO),
+			      target_affinity, lowest_affinity_level, 0);
+}
+
+static int psci_migrate_info_type(void)
+{
+	return invoke_psci_fn(PSCI_0_2_FN_MIGRATE_INFO_TYPE, 0, 0, 0);
+}
+
+static unsigned long psci_migrate_info_up_cpu(void)
+{
+	return invoke_psci_fn(PSCI_FN_NATIVE(0_2, MIGRATE_INFO_UP_CPU),
+			      0, 0, 0);
+}
+
+static void set_conduit(enum psci_conduit conduit)
+{
+	switch (conduit) {
+	case PSCI_CONDUIT_HVC:
+		invoke_psci_fn = __invoke_psci_fn_hvc;
+		break;
+	case PSCI_CONDUIT_SMC:
+		invoke_psci_fn = __invoke_psci_fn_smc;
+		break;
+	default:
+		WARN(1, "Unexpected PSCI conduit %d\n", conduit);
+	}
+
+	psci_ops.conduit = conduit;
+}
+
+static int get_set_conduit_method(struct device_node *np)
+{
+	const char *method;
+
+	pr_info("probing for conduit method from DT.\n");
+
+	if (of_property_read_string(np, "method", &method)) {
+		pr_warn("missing \"method\" property\n");
+		return -ENXIO;
+	}
+
+	if (!strcmp("hvc", method)) {
+		set_conduit(PSCI_CONDUIT_HVC);
+	} else if (!strcmp("smc", method)) {
+		set_conduit(PSCI_CONDUIT_SMC);
+	} else {
+		pr_warn("invalid \"method\" property: %s\n", method);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
+{
+	invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
+}
+
+static void psci_sys_poweroff(void)
+{
+	invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
+}
+
+static int __init psci_features(u32 psci_func_id)
+{
+	return invoke_psci_fn(PSCI_1_0_FN_PSCI_FEATURES,
+			      psci_func_id, 0, 0);
+}
+
+#ifdef CONFIG_CPU_IDLE
+static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
+
+static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
+{
+	int i, ret, count = 0;
+	u32 *psci_states;
+	struct device_node *state_node;
+
+	/* Count idle states */
+	while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
+					      count))) {
+		count++;
+		of_node_put(state_node);
+	}
+
+	if (!count)
+		return -ENODEV;
+
+	psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
+	if (!psci_states)
+		return -ENOMEM;
+
+	for (i = 0; i < count; i++) {
+		u32 state;
+
+		state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
+
+		ret = of_property_read_u32(state_node,
+					   "arm,psci-suspend-param",
+					   &state);
+		if (ret) {
+			pr_warn(" * %pOF missing arm,psci-suspend-param property\n",
+				state_node);
+			of_node_put(state_node);
+			goto free_mem;
+		}
+
+		of_node_put(state_node);
+		pr_debug("psci-power-state %#x index %d\n", state, i);
+		if (!psci_power_state_is_valid(state)) {
+			pr_warn("Invalid PSCI power state %#x\n", state);
+			ret = -EINVAL;
+			goto free_mem;
+		}
+		psci_states[i] = state;
+	}
+	/* Idle states parsed correctly, initialize per-cpu pointer */
+	per_cpu(psci_power_state, cpu) = psci_states;
+	return 0;
+
+free_mem:
+	kfree(psci_states);
+	return ret;
+}
+
+#ifdef CONFIG_ACPI
+#include <acpi/processor.h>
+
+static int __maybe_unused psci_acpi_cpu_init_idle(unsigned int cpu)
+{
+	int i, count;
+	u32 *psci_states;
+	struct acpi_lpi_state *lpi;
+	struct acpi_processor *pr = per_cpu(processors, cpu);
+
+	if (unlikely(!pr || !pr->flags.has_lpi))
+		return -EINVAL;
+
+	count = pr->power.count - 1;
+	if (count <= 0)
+		return -ENODEV;
+
+	psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
+	if (!psci_states)
+		return -ENOMEM;
+
+	for (i = 0; i < count; i++) {
+		u32 state;
+
+		lpi = &pr->power.lpi_states[i + 1];
+		/*
+		 * Only bits[31:0] represent a PSCI power_state while
+		 * bits[63:32] must be 0x0 as per ARM ACPI FFH Specification
+		 */
+		state = lpi->address;
+		if (!psci_power_state_is_valid(state)) {
+			pr_warn("Invalid PSCI power state %#x\n", state);
+			kfree(psci_states);
+			return -EINVAL;
+		}
+		psci_states[i] = state;
+	}
+	/* Idle states parsed correctly, initialize per-cpu pointer */
+	per_cpu(psci_power_state, cpu) = psci_states;
+	return 0;
+}
+#else
+static int __maybe_unused psci_acpi_cpu_init_idle(unsigned int cpu)
+{
+	return -EINVAL;
+}
+#endif
+
+int psci_cpu_init_idle(unsigned int cpu)
+{
+	struct device_node *cpu_node;
+	int ret;
+
+	/*
+	 * If the PSCI cpu_suspend function hook has not been initialized
+	 * idle states must not be enabled, so bail out
+	 */
+	if (!psci_ops.cpu_suspend)
+		return -EOPNOTSUPP;
+
+	if (!acpi_disabled)
+		return psci_acpi_cpu_init_idle(cpu);
+
+	cpu_node = of_get_cpu_node(cpu, NULL);
+	if (!cpu_node)
+		return -ENODEV;
+
+	ret = psci_dt_cpu_init_idle(cpu_node, cpu);
+
+	of_node_put(cpu_node);
+
+	return ret;
+}
+
+static int psci_suspend_finisher(unsigned long index)
+{
+	u32 *state = __this_cpu_read(psci_power_state);
+
+	return psci_ops.cpu_suspend(state[index - 1],
+				    __pa_symbol(cpu_resume));
+}
+
+int psci_cpu_suspend_enter(unsigned long index)
+{
+	int ret;
+	u32 *state = __this_cpu_read(psci_power_state);
+	/*
+	 * idle state index 0 corresponds to wfi, should never be called
+	 * from the cpu_suspend operations
+	 */
+	if (WARN_ON_ONCE(!index))
+		return -EINVAL;
+
+	if (!psci_power_state_loses_context(state[index - 1]))
+		ret = psci_ops.cpu_suspend(state[index - 1], 0);
+	else
+		ret = cpu_suspend(index, psci_suspend_finisher);
+
+	return ret;
+}
+
+/* ARM specific CPU idle operations */
+#ifdef CONFIG_ARM
+static const struct cpuidle_ops psci_cpuidle_ops __initconst = {
+	.suspend = psci_cpu_suspend_enter,
+	.init = psci_dt_cpu_init_idle,
+};
+
+CPUIDLE_METHOD_OF_DECLARE(psci, "psci", &psci_cpuidle_ops);
+#endif
+#endif
+
+static int psci_system_suspend(unsigned long unused)
+{
+	return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND),
+			      __pa_symbol(cpu_resume), 0, 0);
+}
+
+static int psci_system_suspend_enter(suspend_state_t state)
+{
+	return cpu_suspend(0, psci_system_suspend);
+}
+
+static const struct platform_suspend_ops psci_suspend_ops = {
+	.valid          = suspend_valid_only_mem,
+	.enter          = psci_system_suspend_enter,
+};
+
+static void __init psci_init_system_suspend(void)
+{
+	int ret;
+
+	if (!IS_ENABLED(CONFIG_SUSPEND))
+		return;
+
+	ret = psci_features(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND));
+
+	if (ret != PSCI_RET_NOT_SUPPORTED)
+		suspend_set_ops(&psci_suspend_ops);
+}
+
+static void __init psci_init_cpu_suspend(void)
+{
+	int feature = psci_features(psci_function_id[PSCI_FN_CPU_SUSPEND]);
+
+	if (feature != PSCI_RET_NOT_SUPPORTED)
+		psci_cpu_suspend_feature = feature;
+}
+
+/*
+ * Detect the presence of a resident Trusted OS which may cause CPU_OFF to
+ * return DENIED (which would be fatal).
+ */
+static void __init psci_init_migrate(void)
+{
+	unsigned long cpuid;
+	int type, cpu = -1;
+
+	type = psci_ops.migrate_info_type();
+
+	if (type == PSCI_0_2_TOS_MP) {
+		pr_info("Trusted OS migration not required\n");
+		return;
+	}
+
+	if (type == PSCI_RET_NOT_SUPPORTED) {
+		pr_info("MIGRATE_INFO_TYPE not supported.\n");
+		return;
+	}
+
+	if (type != PSCI_0_2_TOS_UP_MIGRATE &&
+	    type != PSCI_0_2_TOS_UP_NO_MIGRATE) {
+		pr_err("MIGRATE_INFO_TYPE returned unknown type (%d)\n", type);
+		return;
+	}
+
+	cpuid = psci_migrate_info_up_cpu();
+	if (cpuid & ~MPIDR_HWID_BITMASK) {
+		pr_warn("MIGRATE_INFO_UP_CPU reported invalid physical ID (0x%lx)\n",
+			cpuid);
+		return;
+	}
+
+	cpu = get_logical_index(cpuid);
+	resident_cpu = cpu >= 0 ? cpu : -1;
+
+	pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid);
+}
+
+static void __init psci_init_smccc(void)
+{
+	u32 ver = ARM_SMCCC_VERSION_1_0;
+	int feature;
+
+	feature = psci_features(ARM_SMCCC_VERSION_FUNC_ID);
+
+	if (feature != PSCI_RET_NOT_SUPPORTED) {
+		u32 ret;
+		ret = invoke_psci_fn(ARM_SMCCC_VERSION_FUNC_ID, 0, 0, 0);
+		if (ret == ARM_SMCCC_VERSION_1_1) {
+			psci_ops.smccc_version = SMCCC_VERSION_1_1;
+			ver = ret;
+		}
+	}
+
+	/*
+	 * Conveniently, the SMCCC and PSCI versions are encoded the
+	 * same way. No, this isn't accidental.
+	 */
+	pr_info("SMC Calling Convention v%d.%d\n",
+		PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver));
+
+}
+
+static void __init psci_0_2_set_functions(void)
+{
+	pr_info("Using standard PSCI v0.2 function IDs\n");
+	psci_ops.get_version = psci_get_version;
+
+	psci_function_id[PSCI_FN_CPU_SUSPEND] =
+					PSCI_FN_NATIVE(0_2, CPU_SUSPEND);
+	psci_ops.cpu_suspend = psci_cpu_suspend;
+
+	psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
+	psci_ops.cpu_off = psci_cpu_off;
+
+	psci_function_id[PSCI_FN_CPU_ON] = PSCI_FN_NATIVE(0_2, CPU_ON);
+	psci_ops.cpu_on = psci_cpu_on;
+
+	psci_function_id[PSCI_FN_MIGRATE] = PSCI_FN_NATIVE(0_2, MIGRATE);
+	psci_ops.migrate = psci_migrate;
+
+	psci_ops.affinity_info = psci_affinity_info;
+
+	psci_ops.migrate_info_type = psci_migrate_info_type;
+
+	arm_pm_restart = psci_sys_reset;
+
+	pm_power_off = psci_sys_poweroff;
+}
+
+/*
+ * Probe function for PSCI firmware versions >= 0.2
+ */
+static int __init psci_probe(void)
+{
+	u32 ver = psci_get_version();
+
+	pr_info("PSCIv%d.%d detected in firmware.\n",
+			PSCI_VERSION_MAJOR(ver),
+			PSCI_VERSION_MINOR(ver));
+
+	if (PSCI_VERSION_MAJOR(ver) == 0 && PSCI_VERSION_MINOR(ver) < 2) {
+		pr_err("Conflicting PSCI version detected.\n");
+		return -EINVAL;
+	}
+
+	psci_0_2_set_functions();
+
+	psci_init_migrate();
+
+	if (PSCI_VERSION_MAJOR(ver) >= 1) {
+		psci_init_smccc();
+		psci_init_cpu_suspend();
+		psci_init_system_suspend();
+	}
+
+	return 0;
+}
+
+typedef int (*psci_initcall_t)(const struct device_node *);
+
+/*
+ * PSCI init function for PSCI versions >=0.2
+ *
+ * Probe based on PSCI PSCI_VERSION function
+ */
+static int __init psci_0_2_init(struct device_node *np)
+{
+	int err;
+
+	err = get_set_conduit_method(np);
+
+	if (err)
+		goto out_put_node;
+	/*
+	 * Starting with v0.2, the PSCI specification introduced a call
+	 * (PSCI_VERSION) that allows probing the firmware version, so
+	 * that PSCI function IDs and version specific initialization
+	 * can be carried out according to the specific version reported
+	 * by firmware
+	 */
+	err = psci_probe();
+
+out_put_node:
+	of_node_put(np);
+	return err;
+}
+
+/*
+ * PSCI < v0.2 get PSCI Function IDs via DT.
+ */
+static int __init psci_0_1_init(struct device_node *np)
+{
+	u32 id;
+	int err;
+
+	err = get_set_conduit_method(np);
+
+	if (err)
+		goto out_put_node;
+
+	pr_info("Using PSCI v0.1 Function IDs from DT\n");
+
+	if (!of_property_read_u32(np, "cpu_suspend", &id)) {
+		psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
+		psci_ops.cpu_suspend = psci_cpu_suspend;
+	}
+
+	if (!of_property_read_u32(np, "cpu_off", &id)) {
+		psci_function_id[PSCI_FN_CPU_OFF] = id;
+		psci_ops.cpu_off = psci_cpu_off;
+	}
+
+	if (!of_property_read_u32(np, "cpu_on", &id)) {
+		psci_function_id[PSCI_FN_CPU_ON] = id;
+		psci_ops.cpu_on = psci_cpu_on;
+	}
+
+	if (!of_property_read_u32(np, "migrate", &id)) {
+		psci_function_id[PSCI_FN_MIGRATE] = id;
+		psci_ops.migrate = psci_migrate;
+	}
+
+out_put_node:
+	of_node_put(np);
+	return err;
+}
+
+static const struct of_device_id psci_of_match[] __initconst = {
+	{ .compatible = "arm,psci",	.data = psci_0_1_init},
+	{ .compatible = "arm,psci-0.2",	.data = psci_0_2_init},
+	{ .compatible = "arm,psci-1.0",	.data = psci_0_2_init},
+	{},
+};
+
+int __init psci_dt_init(void)
+{
+	struct device_node *np;
+	const struct of_device_id *matched_np;
+	psci_initcall_t init_fn;
+
+	np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
+
+	if (!np || !of_device_is_available(np))
+		return -ENODEV;
+
+	init_fn = (psci_initcall_t)matched_np->data;
+	return init_fn(np);
+}
+
+#ifdef CONFIG_ACPI
+/*
+ * We use PSCI 0.2+ when ACPI is deployed on ARM64 and it's
+ * explicitly clarified in SBBR
+ */
+int __init psci_acpi_init(void)
+{
+	if (!acpi_psci_present()) {
+		pr_info("is not implemented in ACPI.\n");
+		return -EOPNOTSUPP;
+	}
+
+	pr_info("probing for conduit method from ACPI.\n");
+
+	if (acpi_psci_use_hvc())
+		set_conduit(PSCI_CONDUIT_HVC);
+	else
+		set_conduit(PSCI_CONDUIT_SMC);
+
+	return psci_probe();
+}
+#endif
diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c
new file mode 100644
index 0000000..3469436
--- /dev/null
+++ b/drivers/firmware/psci_checker.c
@@ -0,0 +1,505 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2016 ARM Limited
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/atomic.h>
+#include <linux/completion.h>
+#include <linux/cpu.h>
+#include <linux/cpuidle.h>
+#include <linux/cpu_pm.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <uapi/linux/sched/types.h>
+#include <linux/module.h>
+#include <linux/preempt.h>
+#include <linux/psci.h>
+#include <linux/slab.h>
+#include <linux/tick.h>
+#include <linux/topology.h>
+
+#include <asm/cpuidle.h>
+
+#include <uapi/linux/psci.h>
+
+#define NUM_SUSPEND_CYCLE (10)
+
+static unsigned int nb_available_cpus;
+static int tos_resident_cpu = -1;
+
+static atomic_t nb_active_threads;
+static struct completion suspend_threads_started =
+	COMPLETION_INITIALIZER(suspend_threads_started);
+static struct completion suspend_threads_done =
+	COMPLETION_INITIALIZER(suspend_threads_done);
+
+/*
+ * We assume that PSCI operations are used if they are available. This is not
+ * necessarily true on arm64, since the decision is based on the
+ * "enable-method" property of each CPU in the DT, but given that there is no
+ * arch-specific way to check this, we assume that the DT is sensible.
+ */
+static int psci_ops_check(void)
+{
+	int migrate_type = -1;
+	int cpu;
+
+	if (!(psci_ops.cpu_off && psci_ops.cpu_on && psci_ops.cpu_suspend)) {
+		pr_warn("Missing PSCI operations, aborting tests\n");
+		return -EOPNOTSUPP;
+	}
+
+	if (psci_ops.migrate_info_type)
+		migrate_type = psci_ops.migrate_info_type();
+
+	if (migrate_type == PSCI_0_2_TOS_UP_MIGRATE ||
+	    migrate_type == PSCI_0_2_TOS_UP_NO_MIGRATE) {
+		/* There is a UP Trusted OS, find on which core it resides. */
+		for_each_online_cpu(cpu)
+			if (psci_tos_resident_on(cpu)) {
+				tos_resident_cpu = cpu;
+				break;
+			}
+		if (tos_resident_cpu == -1)
+			pr_warn("UP Trusted OS resides on no online CPU\n");
+	}
+
+	return 0;
+}
+
+/*
+ * offlined_cpus is a temporary array but passing it as an argument avoids
+ * multiple allocations.
+ */
+static unsigned int down_and_up_cpus(const struct cpumask *cpus,
+				     struct cpumask *offlined_cpus)
+{
+	int cpu;
+	int err = 0;
+
+	cpumask_clear(offlined_cpus);
+
+	/* Try to power down all CPUs in the mask. */
+	for_each_cpu(cpu, cpus) {
+		int ret = cpu_down(cpu);
+
+		/*
+		 * cpu_down() checks the number of online CPUs before the TOS
+		 * resident CPU.
+		 */
+		if (cpumask_weight(offlined_cpus) + 1 == nb_available_cpus) {
+			if (ret != -EBUSY) {
+				pr_err("Unexpected return code %d while trying "
+				       "to power down last online CPU %d\n",
+				       ret, cpu);
+				++err;
+			}
+		} else if (cpu == tos_resident_cpu) {
+			if (ret != -EPERM) {
+				pr_err("Unexpected return code %d while trying "
+				       "to power down TOS resident CPU %d\n",
+				       ret, cpu);
+				++err;
+			}
+		} else if (ret != 0) {
+			pr_err("Error occurred (%d) while trying "
+			       "to power down CPU %d\n", ret, cpu);
+			++err;
+		}
+
+		if (ret == 0)
+			cpumask_set_cpu(cpu, offlined_cpus);
+	}
+
+	/* Try to power up all the CPUs that have been offlined. */
+	for_each_cpu(cpu, offlined_cpus) {
+		int ret = cpu_up(cpu);
+
+		if (ret != 0) {
+			pr_err("Error occurred (%d) while trying "
+			       "to power up CPU %d\n", ret, cpu);
+			++err;
+		} else {
+			cpumask_clear_cpu(cpu, offlined_cpus);
+		}
+	}
+
+	/*
+	 * Something went bad at some point and some CPUs could not be turned
+	 * back on.
+	 */
+	WARN_ON(!cpumask_empty(offlined_cpus) ||
+		num_online_cpus() != nb_available_cpus);
+
+	return err;
+}
+
+static void free_cpu_groups(int num, cpumask_var_t **pcpu_groups)
+{
+	int i;
+	cpumask_var_t *cpu_groups = *pcpu_groups;
+
+	for (i = 0; i < num; ++i)
+		free_cpumask_var(cpu_groups[i]);
+	kfree(cpu_groups);
+}
+
+static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups)
+{
+	int num_groups = 0;
+	cpumask_var_t tmp, *cpu_groups;
+
+	if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
+		return -ENOMEM;
+
+	cpu_groups = kcalloc(nb_available_cpus, sizeof(cpu_groups),
+			     GFP_KERNEL);
+	if (!cpu_groups)
+		return -ENOMEM;
+
+	cpumask_copy(tmp, cpu_online_mask);
+
+	while (!cpumask_empty(tmp)) {
+		const struct cpumask *cpu_group =
+			topology_core_cpumask(cpumask_any(tmp));
+
+		if (!alloc_cpumask_var(&cpu_groups[num_groups], GFP_KERNEL)) {
+			free_cpu_groups(num_groups, &cpu_groups);
+			return -ENOMEM;
+		}
+		cpumask_copy(cpu_groups[num_groups++], cpu_group);
+		cpumask_andnot(tmp, tmp, cpu_group);
+	}
+
+	free_cpumask_var(tmp);
+	*pcpu_groups = cpu_groups;
+
+	return num_groups;
+}
+
+static int hotplug_tests(void)
+{
+	int i, nb_cpu_group, err = -ENOMEM;
+	cpumask_var_t offlined_cpus, *cpu_groups;
+	char *page_buf;
+
+	if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL))
+		return err;
+
+	nb_cpu_group = alloc_init_cpu_groups(&cpu_groups);
+	if (nb_cpu_group < 0)
+		goto out_free_cpus;
+	page_buf = (char *)__get_free_page(GFP_KERNEL);
+	if (!page_buf)
+		goto out_free_cpu_groups;
+
+	err = 0;
+	/*
+	 * Of course the last CPU cannot be powered down and cpu_down() should
+	 * refuse doing that.
+	 */
+	pr_info("Trying to turn off and on again all CPUs\n");
+	err += down_and_up_cpus(cpu_online_mask, offlined_cpus);
+
+	/*
+	 * Take down CPUs by cpu group this time. When the last CPU is turned
+	 * off, the cpu group itself should shut down.
+	 */
+	for (i = 0; i < nb_cpu_group; ++i) {
+		ssize_t len = cpumap_print_to_pagebuf(true, page_buf,
+						      cpu_groups[i]);
+		/* Remove trailing newline. */
+		page_buf[len - 1] = '\0';
+		pr_info("Trying to turn off and on again group %d (CPUs %s)\n",
+			i, page_buf);
+		err += down_and_up_cpus(cpu_groups[i], offlined_cpus);
+	}
+
+	free_page((unsigned long)page_buf);
+out_free_cpu_groups:
+	free_cpu_groups(nb_cpu_group, &cpu_groups);
+out_free_cpus:
+	free_cpumask_var(offlined_cpus);
+	return err;
+}
+
+static void dummy_callback(struct timer_list *unused) {}
+
+static int suspend_cpu(int index, bool broadcast)
+{
+	int ret;
+
+	arch_cpu_idle_enter();
+
+	if (broadcast) {
+		/*
+		 * The local timer will be shut down, we need to enter tick
+		 * broadcast.
+		 */
+		ret = tick_broadcast_enter();
+		if (ret) {
+			/*
+			 * In the absence of hardware broadcast mechanism,
+			 * this CPU might be used to broadcast wakeups, which
+			 * may be why entering tick broadcast has failed.
+			 * There is little the kernel can do to work around
+			 * that, so enter WFI instead (idle state 0).
+			 */
+			cpu_do_idle();
+			ret = 0;
+			goto out_arch_exit;
+		}
+	}
+
+	/*
+	 * Replicate the common ARM cpuidle enter function
+	 * (arm_enter_idle_state).
+	 */
+	ret = CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, index);
+
+	if (broadcast)
+		tick_broadcast_exit();
+
+out_arch_exit:
+	arch_cpu_idle_exit();
+
+	return ret;
+}
+
+static int suspend_test_thread(void *arg)
+{
+	int cpu = (long)arg;
+	int i, nb_suspend = 0, nb_shallow_sleep = 0, nb_err = 0;
+	struct sched_param sched_priority = { .sched_priority = MAX_RT_PRIO-1 };
+	struct cpuidle_device *dev;
+	struct cpuidle_driver *drv;
+	/* No need for an actual callback, we just want to wake up the CPU. */
+	struct timer_list wakeup_timer;
+
+	/* Wait for the main thread to give the start signal. */
+	wait_for_completion(&suspend_threads_started);
+
+	/* Set maximum priority to preempt all other threads on this CPU. */
+	if (sched_setscheduler_nocheck(current, SCHED_FIFO, &sched_priority))
+		pr_warn("Failed to set suspend thread scheduler on CPU %d\n",
+			cpu);
+
+	dev = this_cpu_read(cpuidle_devices);
+	drv = cpuidle_get_cpu_driver(dev);
+
+	pr_info("CPU %d entering suspend cycles, states 1 through %d\n",
+		cpu, drv->state_count - 1);
+
+	timer_setup_on_stack(&wakeup_timer, dummy_callback, 0);
+	for (i = 0; i < NUM_SUSPEND_CYCLE; ++i) {
+		int index;
+		/*
+		 * Test all possible states, except 0 (which is usually WFI and
+		 * doesn't use PSCI).
+		 */
+		for (index = 1; index < drv->state_count; ++index) {
+			struct cpuidle_state *state = &drv->states[index];
+			bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP;
+			int ret;
+
+			/*
+			 * Set the timer to wake this CPU up in some time (which
+			 * should be largely sufficient for entering suspend).
+			 * If the local tick is disabled when entering suspend,
+			 * suspend_cpu() takes care of switching to a broadcast
+			 * tick, so the timer will still wake us up.
+			 */
+			mod_timer(&wakeup_timer, jiffies +
+				  usecs_to_jiffies(state->target_residency));
+
+			/* IRQs must be disabled during suspend operations. */
+			local_irq_disable();
+
+			ret = suspend_cpu(index, broadcast);
+
+			/*
+			 * We have woken up. Re-enable IRQs to handle any
+			 * pending interrupt, do not wait until the end of the
+			 * loop.
+			 */
+			local_irq_enable();
+
+			if (ret == index) {
+				++nb_suspend;
+			} else if (ret >= 0) {
+				/* We did not enter the expected state. */
+				++nb_shallow_sleep;
+			} else {
+				pr_err("Failed to suspend CPU %d: error %d "
+				       "(requested state %d, cycle %d)\n",
+				       cpu, ret, index, i);
+				++nb_err;
+			}
+		}
+	}
+
+	/*
+	 * Disable the timer to make sure that the timer will not trigger
+	 * later.
+	 */
+	del_timer(&wakeup_timer);
+	destroy_timer_on_stack(&wakeup_timer);
+
+	if (atomic_dec_return_relaxed(&nb_active_threads) == 0)
+		complete(&suspend_threads_done);
+
+	/* Give up on RT scheduling and wait for termination. */
+	sched_priority.sched_priority = 0;
+	if (sched_setscheduler_nocheck(current, SCHED_NORMAL, &sched_priority))
+		pr_warn("Failed to set suspend thread scheduler on CPU %d\n",
+			cpu);
+	for (;;) {
+		/* Needs to be set first to avoid missing a wakeup. */
+		set_current_state(TASK_INTERRUPTIBLE);
+		if (kthread_should_stop()) {
+			__set_current_state(TASK_RUNNING);
+			break;
+		}
+		schedule();
+	}
+
+	pr_info("CPU %d suspend test results: success %d, shallow states %d, errors %d\n",
+		cpu, nb_suspend, nb_shallow_sleep, nb_err);
+
+	return nb_err;
+}
+
+static int suspend_tests(void)
+{
+	int i, cpu, err = 0;
+	struct task_struct **threads;
+	int nb_threads = 0;
+
+	threads = kmalloc_array(nb_available_cpus, sizeof(*threads),
+				GFP_KERNEL);
+	if (!threads)
+		return -ENOMEM;
+
+	/*
+	 * Stop cpuidle to prevent the idle tasks from entering a deep sleep
+	 * mode, as it might interfere with the suspend threads on other CPUs.
+	 * This does not prevent the suspend threads from using cpuidle (only
+	 * the idle tasks check this status). Take the idle lock so that
+	 * the cpuidle driver and device look-up can be carried out safely.
+	 */
+	cpuidle_pause_and_lock();
+
+	for_each_online_cpu(cpu) {
+		struct task_struct *thread;
+		/* Check that cpuidle is available on that CPU. */
+		struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
+		struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
+
+		if (!dev || !drv) {
+			pr_warn("cpuidle not available on CPU %d, ignoring\n",
+				cpu);
+			continue;
+		}
+
+		thread = kthread_create_on_cpu(suspend_test_thread,
+					       (void *)(long)cpu, cpu,
+					       "psci_suspend_test");
+		if (IS_ERR(thread))
+			pr_err("Failed to create kthread on CPU %d\n", cpu);
+		else
+			threads[nb_threads++] = thread;
+	}
+
+	if (nb_threads < 1) {
+		err = -ENODEV;
+		goto out;
+	}
+
+	atomic_set(&nb_active_threads, nb_threads);
+
+	/*
+	 * Wake up the suspend threads. To avoid the main thread being preempted
+	 * before all the threads have been unparked, the suspend threads will
+	 * wait for the completion of suspend_threads_started.
+	 */
+	for (i = 0; i < nb_threads; ++i)
+		wake_up_process(threads[i]);
+	complete_all(&suspend_threads_started);
+
+	wait_for_completion(&suspend_threads_done);
+
+
+	/* Stop and destroy all threads, get return status. */
+	for (i = 0; i < nb_threads; ++i)
+		err += kthread_stop(threads[i]);
+ out:
+	cpuidle_resume_and_unlock();
+	kfree(threads);
+	return err;
+}
+
+static int __init psci_checker(void)
+{
+	int ret;
+
+	/*
+	 * Since we're in an initcall, we assume that all the CPUs that all
+	 * CPUs that can be onlined have been onlined.
+	 *
+	 * The tests assume that hotplug is enabled but nobody else is using it,
+	 * otherwise the results will be unpredictable. However, since there
+	 * is no userspace yet in initcalls, that should be fine, as long as
+	 * no torture test is running at the same time (see Kconfig).
+	 */
+	nb_available_cpus = num_online_cpus();
+
+	/* Check PSCI operations are set up and working. */
+	ret = psci_ops_check();
+	if (ret)
+		return ret;
+
+	pr_info("PSCI checker started using %u CPUs\n", nb_available_cpus);
+
+	pr_info("Starting hotplug tests\n");
+	ret = hotplug_tests();
+	if (ret == 0)
+		pr_info("Hotplug tests passed OK\n");
+	else if (ret > 0)
+		pr_err("%d error(s) encountered in hotplug tests\n", ret);
+	else {
+		pr_err("Out of memory\n");
+		return ret;
+	}
+
+	pr_info("Starting suspend tests (%d cycles per state)\n",
+		NUM_SUSPEND_CYCLE);
+	ret = suspend_tests();
+	if (ret == 0)
+		pr_info("Suspend tests passed OK\n");
+	else if (ret > 0)
+		pr_err("%d error(s) encountered in suspend tests\n", ret);
+	else {
+		switch (ret) {
+		case -ENOMEM:
+			pr_err("Out of memory\n");
+			break;
+		case -ENODEV:
+			pr_warn("Could not start suspend tests on any CPU\n");
+			break;
+		}
+	}
+
+	pr_info("PSCI checker completed\n");
+	return ret < 0 ? ret : 0;
+}
+late_initcall(psci_checker);
diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c
new file mode 100644
index 0000000..4e24e59
--- /dev/null
+++ b/drivers/firmware/qcom_scm-32.c
@@ -0,0 +1,629 @@
+/* Copyright (c) 2010,2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/qcom_scm.h>
+#include <linux/dma-mapping.h>
+
+#include "qcom_scm.h"
+
+#define QCOM_SCM_FLAG_COLDBOOT_CPU0	0x00
+#define QCOM_SCM_FLAG_COLDBOOT_CPU1	0x01
+#define QCOM_SCM_FLAG_COLDBOOT_CPU2	0x08
+#define QCOM_SCM_FLAG_COLDBOOT_CPU3	0x20
+
+#define QCOM_SCM_FLAG_WARMBOOT_CPU0	0x04
+#define QCOM_SCM_FLAG_WARMBOOT_CPU1	0x02
+#define QCOM_SCM_FLAG_WARMBOOT_CPU2	0x10
+#define QCOM_SCM_FLAG_WARMBOOT_CPU3	0x40
+
+struct qcom_scm_entry {
+	int flag;
+	void *entry;
+};
+
+static struct qcom_scm_entry qcom_scm_wb[] = {
+	{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 },
+	{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 },
+	{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 },
+	{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 },
+};
+
+static DEFINE_MUTEX(qcom_scm_lock);
+
+/**
+ * struct qcom_scm_command - one SCM command buffer
+ * @len: total available memory for command and response
+ * @buf_offset: start of command buffer
+ * @resp_hdr_offset: start of response buffer
+ * @id: command to be executed
+ * @buf: buffer returned from qcom_scm_get_command_buffer()
+ *
+ * An SCM command is laid out in memory as follows:
+ *
+ *	------------------- <--- struct qcom_scm_command
+ *	| command header  |
+ *	------------------- <--- qcom_scm_get_command_buffer()
+ *	| command buffer  |
+ *	------------------- <--- struct qcom_scm_response and
+ *	| response header |      qcom_scm_command_to_response()
+ *	------------------- <--- qcom_scm_get_response_buffer()
+ *	| response buffer |
+ *	-------------------
+ *
+ * There can be arbitrary padding between the headers and buffers so
+ * you should always use the appropriate qcom_scm_get_*_buffer() routines
+ * to access the buffers in a safe manner.
+ */
+struct qcom_scm_command {
+	__le32 len;
+	__le32 buf_offset;
+	__le32 resp_hdr_offset;
+	__le32 id;
+	__le32 buf[0];
+};
+
+/**
+ * struct qcom_scm_response - one SCM response buffer
+ * @len: total available memory for response
+ * @buf_offset: start of response data relative to start of qcom_scm_response
+ * @is_complete: indicates if the command has finished processing
+ */
+struct qcom_scm_response {
+	__le32 len;
+	__le32 buf_offset;
+	__le32 is_complete;
+};
+
+/**
+ * qcom_scm_command_to_response() - Get a pointer to a qcom_scm_response
+ * @cmd: command
+ *
+ * Returns a pointer to a response for a command.
+ */
+static inline struct qcom_scm_response *qcom_scm_command_to_response(
+		const struct qcom_scm_command *cmd)
+{
+	return (void *)cmd + le32_to_cpu(cmd->resp_hdr_offset);
+}
+
+/**
+ * qcom_scm_get_command_buffer() - Get a pointer to a command buffer
+ * @cmd: command
+ *
+ * Returns a pointer to the command buffer of a command.
+ */
+static inline void *qcom_scm_get_command_buffer(const struct qcom_scm_command *cmd)
+{
+	return (void *)cmd->buf;
+}
+
+/**
+ * qcom_scm_get_response_buffer() - Get a pointer to a response buffer
+ * @rsp: response
+ *
+ * Returns a pointer to a response buffer of a response.
+ */
+static inline void *qcom_scm_get_response_buffer(const struct qcom_scm_response *rsp)
+{
+	return (void *)rsp + le32_to_cpu(rsp->buf_offset);
+}
+
+static u32 smc(u32 cmd_addr)
+{
+	int context_id;
+	register u32 r0 asm("r0") = 1;
+	register u32 r1 asm("r1") = (u32)&context_id;
+	register u32 r2 asm("r2") = cmd_addr;
+	do {
+		asm volatile(
+			__asmeq("%0", "r0")
+			__asmeq("%1", "r0")
+			__asmeq("%2", "r1")
+			__asmeq("%3", "r2")
+#ifdef REQUIRES_SEC
+			".arch_extension sec\n"
+#endif
+			"smc	#0	@ switch to secure world\n"
+			: "=r" (r0)
+			: "r" (r0), "r" (r1), "r" (r2)
+			: "r3", "r12");
+	} while (r0 == QCOM_SCM_INTERRUPTED);
+
+	return r0;
+}
+
+/**
+ * qcom_scm_call() - Send an SCM command
+ * @dev: struct device
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @cmd_buf: command buffer
+ * @cmd_len: length of the command buffer
+ * @resp_buf: response buffer
+ * @resp_len: length of the response buffer
+ *
+ * Sends a command to the SCM and waits for the command to finish processing.
+ *
+ * A note on cache maintenance:
+ * Note that any buffers that are expected to be accessed by the secure world
+ * must be flushed before invoking qcom_scm_call and invalidated in the cache
+ * immediately after qcom_scm_call returns. Cache maintenance on the command
+ * and response buffers is taken care of by qcom_scm_call; however, callers are
+ * responsible for any other cached buffers passed over to the secure world.
+ */
+static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
+			 const void *cmd_buf, size_t cmd_len, void *resp_buf,
+			 size_t resp_len)
+{
+	int ret;
+	struct qcom_scm_command *cmd;
+	struct qcom_scm_response *rsp;
+	size_t alloc_len = sizeof(*cmd) + cmd_len + sizeof(*rsp) + resp_len;
+	dma_addr_t cmd_phys;
+
+	cmd = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL);
+	if (!cmd)
+		return -ENOMEM;
+
+	cmd->len = cpu_to_le32(alloc_len);
+	cmd->buf_offset = cpu_to_le32(sizeof(*cmd));
+	cmd->resp_hdr_offset = cpu_to_le32(sizeof(*cmd) + cmd_len);
+
+	cmd->id = cpu_to_le32((svc_id << 10) | cmd_id);
+	if (cmd_buf)
+		memcpy(qcom_scm_get_command_buffer(cmd), cmd_buf, cmd_len);
+
+	rsp = qcom_scm_command_to_response(cmd);
+
+	cmd_phys = dma_map_single(dev, cmd, alloc_len, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, cmd_phys)) {
+		kfree(cmd);
+		return -ENOMEM;
+	}
+
+	mutex_lock(&qcom_scm_lock);
+	ret = smc(cmd_phys);
+	if (ret < 0)
+		ret = qcom_scm_remap_error(ret);
+	mutex_unlock(&qcom_scm_lock);
+	if (ret)
+		goto out;
+
+	do {
+		dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len,
+					sizeof(*rsp), DMA_FROM_DEVICE);
+	} while (!rsp->is_complete);
+
+	if (resp_buf) {
+		dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len +
+					le32_to_cpu(rsp->buf_offset),
+					resp_len, DMA_FROM_DEVICE);
+		memcpy(resp_buf, qcom_scm_get_response_buffer(rsp),
+		       resp_len);
+	}
+out:
+	dma_unmap_single(dev, cmd_phys, alloc_len, DMA_TO_DEVICE);
+	kfree(cmd);
+	return ret;
+}
+
+#define SCM_CLASS_REGISTER	(0x2 << 8)
+#define SCM_MASK_IRQS		BIT(5)
+#define SCM_ATOMIC(svc, cmd, n) (((((svc) << 10)|((cmd) & 0x3ff)) << 12) | \
+				SCM_CLASS_REGISTER | \
+				SCM_MASK_IRQS | \
+				(n & 0xf))
+
+/**
+ * qcom_scm_call_atomic1() - Send an atomic SCM command with one argument
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @arg1: first argument
+ *
+ * This shall only be used with commands that are guaranteed to be
+ * uninterruptable, atomic and SMP safe.
+ */
+static s32 qcom_scm_call_atomic1(u32 svc, u32 cmd, u32 arg1)
+{
+	int context_id;
+
+	register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 1);
+	register u32 r1 asm("r1") = (u32)&context_id;
+	register u32 r2 asm("r2") = arg1;
+
+	asm volatile(
+			__asmeq("%0", "r0")
+			__asmeq("%1", "r0")
+			__asmeq("%2", "r1")
+			__asmeq("%3", "r2")
+#ifdef REQUIRES_SEC
+			".arch_extension sec\n"
+#endif
+			"smc    #0      @ switch to secure world\n"
+			: "=r" (r0)
+			: "r" (r0), "r" (r1), "r" (r2)
+			: "r3", "r12");
+	return r0;
+}
+
+/**
+ * qcom_scm_call_atomic2() - Send an atomic SCM command with two arguments
+ * @svc_id:	service identifier
+ * @cmd_id:	command identifier
+ * @arg1:	first argument
+ * @arg2:	second argument
+ *
+ * This shall only be used with commands that are guaranteed to be
+ * uninterruptable, atomic and SMP safe.
+ */
+static s32 qcom_scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2)
+{
+	int context_id;
+
+	register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 2);
+	register u32 r1 asm("r1") = (u32)&context_id;
+	register u32 r2 asm("r2") = arg1;
+	register u32 r3 asm("r3") = arg2;
+
+	asm volatile(
+			__asmeq("%0", "r0")
+			__asmeq("%1", "r0")
+			__asmeq("%2", "r1")
+			__asmeq("%3", "r2")
+			__asmeq("%4", "r3")
+#ifdef REQUIRES_SEC
+			".arch_extension sec\n"
+#endif
+			"smc    #0      @ switch to secure world\n"
+			: "=r" (r0)
+			: "r" (r0), "r" (r1), "r" (r2), "r" (r3)
+			: "r12");
+	return r0;
+}
+
+u32 qcom_scm_get_version(void)
+{
+	int context_id;
+	static u32 version = -1;
+	register u32 r0 asm("r0");
+	register u32 r1 asm("r1");
+
+	if (version != -1)
+		return version;
+
+	mutex_lock(&qcom_scm_lock);
+
+	r0 = 0x1 << 8;
+	r1 = (u32)&context_id;
+	do {
+		asm volatile(
+			__asmeq("%0", "r0")
+			__asmeq("%1", "r1")
+			__asmeq("%2", "r0")
+			__asmeq("%3", "r1")
+#ifdef REQUIRES_SEC
+			".arch_extension sec\n"
+#endif
+			"smc	#0	@ switch to secure world\n"
+			: "=r" (r0), "=r" (r1)
+			: "r" (r0), "r" (r1)
+			: "r2", "r3", "r12");
+	} while (r0 == QCOM_SCM_INTERRUPTED);
+
+	version = r1;
+	mutex_unlock(&qcom_scm_lock);
+
+	return version;
+}
+EXPORT_SYMBOL(qcom_scm_get_version);
+
+/**
+ * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
+ * @entry: Entry point function for the cpus
+ * @cpus: The cpumask of cpus that will use the entry point
+ *
+ * Set the cold boot address of the cpus. Any cpu outside the supported
+ * range would be removed from the cpu present mask.
+ */
+int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
+{
+	int flags = 0;
+	int cpu;
+	int scm_cb_flags[] = {
+		QCOM_SCM_FLAG_COLDBOOT_CPU0,
+		QCOM_SCM_FLAG_COLDBOOT_CPU1,
+		QCOM_SCM_FLAG_COLDBOOT_CPU2,
+		QCOM_SCM_FLAG_COLDBOOT_CPU3,
+	};
+
+	if (!cpus || (cpus && cpumask_empty(cpus)))
+		return -EINVAL;
+
+	for_each_cpu(cpu, cpus) {
+		if (cpu < ARRAY_SIZE(scm_cb_flags))
+			flags |= scm_cb_flags[cpu];
+		else
+			set_cpu_present(cpu, false);
+	}
+
+	return qcom_scm_call_atomic2(QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR,
+				    flags, virt_to_phys(entry));
+}
+
+/**
+ * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
+ * @entry: Entry point function for the cpus
+ * @cpus: The cpumask of cpus that will use the entry point
+ *
+ * Set the Linux entry point for the SCM to transfer control to when coming
+ * out of a power down. CPU power down may be executed on cpuidle or hotplug.
+ */
+int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry,
+				  const cpumask_t *cpus)
+{
+	int ret;
+	int flags = 0;
+	int cpu;
+	struct {
+		__le32 flags;
+		__le32 addr;
+	} cmd;
+
+	/*
+	 * Reassign only if we are switching from hotplug entry point
+	 * to cpuidle entry point or vice versa.
+	 */
+	for_each_cpu(cpu, cpus) {
+		if (entry == qcom_scm_wb[cpu].entry)
+			continue;
+		flags |= qcom_scm_wb[cpu].flag;
+	}
+
+	/* No change in entry function */
+	if (!flags)
+		return 0;
+
+	cmd.addr = cpu_to_le32(virt_to_phys(entry));
+	cmd.flags = cpu_to_le32(flags);
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR,
+			    &cmd, sizeof(cmd), NULL, 0);
+	if (!ret) {
+		for_each_cpu(cpu, cpus)
+			qcom_scm_wb[cpu].entry = entry;
+	}
+
+	return ret;
+}
+
+/**
+ * qcom_scm_cpu_power_down() - Power down the cpu
+ * @flags - Flags to flush cache
+ *
+ * This is an end point to power down cpu. If there was a pending interrupt,
+ * the control would return from this function, otherwise, the cpu jumps to the
+ * warm boot entry point set for this cpu upon reset.
+ */
+void __qcom_scm_cpu_power_down(u32 flags)
+{
+	qcom_scm_call_atomic1(QCOM_SCM_SVC_BOOT, QCOM_SCM_CMD_TERMINATE_PC,
+			flags & QCOM_SCM_FLUSH_FLAG_MASK);
+}
+
+int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, u32 cmd_id)
+{
+	int ret;
+	__le32 svc_cmd = cpu_to_le32((svc_id << 10) | cmd_id);
+	__le32 ret_val = 0;
+
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD,
+			    &svc_cmd, sizeof(svc_cmd), &ret_val,
+			    sizeof(ret_val));
+	if (ret)
+		return ret;
+
+	return le32_to_cpu(ret_val);
+}
+
+int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req,
+			u32 req_cnt, u32 *resp)
+{
+	if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
+		return -ERANGE;
+
+	return qcom_scm_call(dev, QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP,
+		req, req_cnt * sizeof(*req), resp, sizeof(*resp));
+}
+
+void __qcom_scm_init(void)
+{
+}
+
+bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral)
+{
+	__le32 out;
+	__le32 in;
+	int ret;
+
+	in = cpu_to_le32(peripheral);
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
+			    QCOM_SCM_PAS_IS_SUPPORTED_CMD,
+			    &in, sizeof(in),
+			    &out, sizeof(out));
+
+	return ret ? false : !!out;
+}
+
+int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral,
+			      dma_addr_t metadata_phys)
+{
+	__le32 scm_ret;
+	int ret;
+	struct {
+		__le32 proc;
+		__le32 image_addr;
+	} request;
+
+	request.proc = cpu_to_le32(peripheral);
+	request.image_addr = cpu_to_le32(metadata_phys);
+
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
+			    QCOM_SCM_PAS_INIT_IMAGE_CMD,
+			    &request, sizeof(request),
+			    &scm_ret, sizeof(scm_ret));
+
+	return ret ? : le32_to_cpu(scm_ret);
+}
+
+int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral,
+			     phys_addr_t addr, phys_addr_t size)
+{
+	__le32 scm_ret;
+	int ret;
+	struct {
+		__le32 proc;
+		__le32 addr;
+		__le32 len;
+	} request;
+
+	request.proc = cpu_to_le32(peripheral);
+	request.addr = cpu_to_le32(addr);
+	request.len = cpu_to_le32(size);
+
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
+			    QCOM_SCM_PAS_MEM_SETUP_CMD,
+			    &request, sizeof(request),
+			    &scm_ret, sizeof(scm_ret));
+
+	return ret ? : le32_to_cpu(scm_ret);
+}
+
+int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral)
+{
+	__le32 out;
+	__le32 in;
+	int ret;
+
+	in = cpu_to_le32(peripheral);
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
+			    QCOM_SCM_PAS_AUTH_AND_RESET_CMD,
+			    &in, sizeof(in),
+			    &out, sizeof(out));
+
+	return ret ? : le32_to_cpu(out);
+}
+
+int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral)
+{
+	__le32 out;
+	__le32 in;
+	int ret;
+
+	in = cpu_to_le32(peripheral);
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
+			    QCOM_SCM_PAS_SHUTDOWN_CMD,
+			    &in, sizeof(in),
+			    &out, sizeof(out));
+
+	return ret ? : le32_to_cpu(out);
+}
+
+int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
+{
+	__le32 out;
+	__le32 in = cpu_to_le32(reset);
+	int ret;
+
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MSS_RESET,
+			&in, sizeof(in),
+			&out, sizeof(out));
+
+	return ret ? : le32_to_cpu(out);
+}
+
+int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
+{
+	return qcom_scm_call_atomic2(QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_DLOAD_MODE,
+				     enable ? QCOM_SCM_SET_DLOAD_MODE : 0, 0);
+}
+
+int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id)
+{
+	struct {
+		__le32 state;
+		__le32 id;
+	} req;
+	__le32 scm_ret = 0;
+	int ret;
+
+	req.state = cpu_to_le32(state);
+	req.id = cpu_to_le32(id);
+
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_REMOTE_STATE,
+			    &req, sizeof(req), &scm_ret, sizeof(scm_ret));
+
+	return ret ? : le32_to_cpu(scm_ret);
+}
+
+int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
+			  size_t mem_sz, phys_addr_t src, size_t src_sz,
+			  phys_addr_t dest, size_t dest_sz)
+{
+	return -ENODEV;
+}
+
+int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id,
+			       u32 spare)
+{
+	return -ENODEV;
+}
+
+int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
+				      size_t *size)
+{
+	return -ENODEV;
+}
+
+int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size,
+				      u32 spare)
+{
+	return -ENODEV;
+}
+
+int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr,
+			unsigned int *val)
+{
+	int ret;
+
+	ret = qcom_scm_call_atomic1(QCOM_SCM_SVC_IO, QCOM_SCM_IO_READ, addr);
+	if (ret >= 0)
+		*val = ret;
+
+	return ret < 0 ? ret : 0;
+}
+
+int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val)
+{
+	return qcom_scm_call_atomic2(QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE,
+				     addr, val);
+}
diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c
new file mode 100644
index 0000000..688525d
--- /dev/null
+++ b/drivers/firmware/qcom_scm-64.c
@@ -0,0 +1,512 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/qcom_scm.h>
+#include <linux/arm-smccc.h>
+#include <linux/dma-mapping.h>
+
+#include "qcom_scm.h"
+
+#define QCOM_SCM_FNID(s, c) ((((s) & 0xFF) << 8) | ((c) & 0xFF))
+
+#define MAX_QCOM_SCM_ARGS 10
+#define MAX_QCOM_SCM_RETS 3
+
+enum qcom_scm_arg_types {
+	QCOM_SCM_VAL,
+	QCOM_SCM_RO,
+	QCOM_SCM_RW,
+	QCOM_SCM_BUFVAL,
+};
+
+#define QCOM_SCM_ARGS_IMPL(num, a, b, c, d, e, f, g, h, i, j, ...) (\
+			   (((a) & 0x3) << 4) | \
+			   (((b) & 0x3) << 6) | \
+			   (((c) & 0x3) << 8) | \
+			   (((d) & 0x3) << 10) | \
+			   (((e) & 0x3) << 12) | \
+			   (((f) & 0x3) << 14) | \
+			   (((g) & 0x3) << 16) | \
+			   (((h) & 0x3) << 18) | \
+			   (((i) & 0x3) << 20) | \
+			   (((j) & 0x3) << 22) | \
+			   ((num) & 0xf))
+
+#define QCOM_SCM_ARGS(...) QCOM_SCM_ARGS_IMPL(__VA_ARGS__, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+
+/**
+ * struct qcom_scm_desc
+ * @arginfo:	Metadata describing the arguments in args[]
+ * @args:	The array of arguments for the secure syscall
+ * @res:	The values returned by the secure syscall
+ */
+struct qcom_scm_desc {
+	u32 arginfo;
+	u64 args[MAX_QCOM_SCM_ARGS];
+};
+
+static u64 qcom_smccc_convention = -1;
+static DEFINE_MUTEX(qcom_scm_lock);
+
+#define QCOM_SCM_EBUSY_WAIT_MS 30
+#define QCOM_SCM_EBUSY_MAX_RETRY 20
+
+#define N_EXT_QCOM_SCM_ARGS 7
+#define FIRST_EXT_ARG_IDX 3
+#define N_REGISTER_ARGS (MAX_QCOM_SCM_ARGS - N_EXT_QCOM_SCM_ARGS + 1)
+
+/**
+ * qcom_scm_call() - Invoke a syscall in the secure world
+ * @dev:	device
+ * @svc_id:	service identifier
+ * @cmd_id:	command identifier
+ * @desc:	Descriptor structure containing arguments and return values
+ *
+ * Sends a command to the SCM and waits for the command to finish processing.
+ * This should *only* be called in pre-emptible context.
+*/
+static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
+			 const struct qcom_scm_desc *desc,
+			 struct arm_smccc_res *res)
+{
+	int arglen = desc->arginfo & 0xf;
+	int retry_count = 0, i;
+	u32 fn_id = QCOM_SCM_FNID(svc_id, cmd_id);
+	u64 cmd, x5 = desc->args[FIRST_EXT_ARG_IDX];
+	dma_addr_t args_phys = 0;
+	void *args_virt = NULL;
+	size_t alloc_len;
+	struct arm_smccc_quirk quirk = {.id = ARM_SMCCC_QUIRK_QCOM_A6};
+
+	if (unlikely(arglen > N_REGISTER_ARGS)) {
+		alloc_len = N_EXT_QCOM_SCM_ARGS * sizeof(u64);
+		args_virt = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL);
+
+		if (!args_virt)
+			return -ENOMEM;
+
+		if (qcom_smccc_convention == ARM_SMCCC_SMC_32) {
+			__le32 *args = args_virt;
+
+			for (i = 0; i < N_EXT_QCOM_SCM_ARGS; i++)
+				args[i] = cpu_to_le32(desc->args[i +
+						      FIRST_EXT_ARG_IDX]);
+		} else {
+			__le64 *args = args_virt;
+
+			for (i = 0; i < N_EXT_QCOM_SCM_ARGS; i++)
+				args[i] = cpu_to_le64(desc->args[i +
+						      FIRST_EXT_ARG_IDX]);
+		}
+
+		args_phys = dma_map_single(dev, args_virt, alloc_len,
+					   DMA_TO_DEVICE);
+
+		if (dma_mapping_error(dev, args_phys)) {
+			kfree(args_virt);
+			return -ENOMEM;
+		}
+
+		x5 = args_phys;
+	}
+
+	do {
+		mutex_lock(&qcom_scm_lock);
+
+		cmd = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL,
+					 qcom_smccc_convention,
+					 ARM_SMCCC_OWNER_SIP, fn_id);
+
+		quirk.state.a6 = 0;
+
+		do {
+			arm_smccc_smc_quirk(cmd, desc->arginfo, desc->args[0],
+				      desc->args[1], desc->args[2], x5,
+				      quirk.state.a6, 0, res, &quirk);
+
+			if (res->a0 == QCOM_SCM_INTERRUPTED)
+				cmd = res->a0;
+
+		} while (res->a0 == QCOM_SCM_INTERRUPTED);
+
+		mutex_unlock(&qcom_scm_lock);
+
+		if (res->a0 == QCOM_SCM_V2_EBUSY) {
+			if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
+				break;
+			msleep(QCOM_SCM_EBUSY_WAIT_MS);
+		}
+	}  while (res->a0 == QCOM_SCM_V2_EBUSY);
+
+	if (args_virt) {
+		dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE);
+		kfree(args_virt);
+	}
+
+	if (res->a0 < 0)
+		return qcom_scm_remap_error(res->a0);
+
+	return 0;
+}
+
+/**
+ * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
+ * @entry: Entry point function for the cpus
+ * @cpus: The cpumask of cpus that will use the entry point
+ *
+ * Set the cold boot address of the cpus. Any cpu outside the supported
+ * range would be removed from the cpu present mask.
+ */
+int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
+{
+	return -ENOTSUPP;
+}
+
+/**
+ * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
+ * @dev: Device pointer
+ * @entry: Entry point function for the cpus
+ * @cpus: The cpumask of cpus that will use the entry point
+ *
+ * Set the Linux entry point for the SCM to transfer control to when coming
+ * out of a power down. CPU power down may be executed on cpuidle or hotplug.
+ */
+int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry,
+				  const cpumask_t *cpus)
+{
+	return -ENOTSUPP;
+}
+
+/**
+ * qcom_scm_cpu_power_down() - Power down the cpu
+ * @flags - Flags to flush cache
+ *
+ * This is an end point to power down cpu. If there was a pending interrupt,
+ * the control would return from this function, otherwise, the cpu jumps to the
+ * warm boot entry point set for this cpu upon reset.
+ */
+void __qcom_scm_cpu_power_down(u32 flags)
+{
+}
+
+int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, u32 cmd_id)
+{
+	int ret;
+	struct qcom_scm_desc desc = {0};
+	struct arm_smccc_res res;
+
+	desc.arginfo = QCOM_SCM_ARGS(1);
+	desc.args[0] = QCOM_SCM_FNID(svc_id, cmd_id) |
+			(ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
+
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD,
+			    &desc, &res);
+
+	return ret ? : res.a1;
+}
+
+int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req,
+			u32 req_cnt, u32 *resp)
+{
+	int ret;
+	struct qcom_scm_desc desc = {0};
+	struct arm_smccc_res res;
+
+	if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
+		return -ERANGE;
+
+	desc.args[0] = req[0].addr;
+	desc.args[1] = req[0].val;
+	desc.args[2] = req[1].addr;
+	desc.args[3] = req[1].val;
+	desc.args[4] = req[2].addr;
+	desc.args[5] = req[2].val;
+	desc.args[6] = req[3].addr;
+	desc.args[7] = req[3].val;
+	desc.args[8] = req[4].addr;
+	desc.args[9] = req[4].val;
+	desc.arginfo = QCOM_SCM_ARGS(10);
+
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP, &desc,
+			    &res);
+	*resp = res.a1;
+
+	return ret;
+}
+
+void __qcom_scm_init(void)
+{
+	u64 cmd;
+	struct arm_smccc_res res;
+	u32 function = QCOM_SCM_FNID(QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD);
+
+	/* First try a SMC64 call */
+	cmd = ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64,
+				 ARM_SMCCC_OWNER_SIP, function);
+
+	arm_smccc_smc(cmd, QCOM_SCM_ARGS(1), cmd & (~BIT(ARM_SMCCC_TYPE_SHIFT)),
+		      0, 0, 0, 0, 0, &res);
+
+	if (!res.a0 && res.a1)
+		qcom_smccc_convention = ARM_SMCCC_SMC_64;
+	else
+		qcom_smccc_convention = ARM_SMCCC_SMC_32;
+}
+
+bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral)
+{
+	int ret;
+	struct qcom_scm_desc desc = {0};
+	struct arm_smccc_res res;
+
+	desc.args[0] = peripheral;
+	desc.arginfo = QCOM_SCM_ARGS(1);
+
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
+				QCOM_SCM_PAS_IS_SUPPORTED_CMD,
+				&desc, &res);
+
+	return ret ? false : !!res.a1;
+}
+
+int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral,
+			      dma_addr_t metadata_phys)
+{
+	int ret;
+	struct qcom_scm_desc desc = {0};
+	struct arm_smccc_res res;
+
+	desc.args[0] = peripheral;
+	desc.args[1] = metadata_phys;
+	desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW);
+
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_INIT_IMAGE_CMD,
+				&desc, &res);
+
+	return ret ? : res.a1;
+}
+
+int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral,
+			      phys_addr_t addr, phys_addr_t size)
+{
+	int ret;
+	struct qcom_scm_desc desc = {0};
+	struct arm_smccc_res res;
+
+	desc.args[0] = peripheral;
+	desc.args[1] = addr;
+	desc.args[2] = size;
+	desc.arginfo = QCOM_SCM_ARGS(3);
+
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MEM_SETUP_CMD,
+				&desc, &res);
+
+	return ret ? : res.a1;
+}
+
+int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral)
+{
+	int ret;
+	struct qcom_scm_desc desc = {0};
+	struct arm_smccc_res res;
+
+	desc.args[0] = peripheral;
+	desc.arginfo = QCOM_SCM_ARGS(1);
+
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
+				QCOM_SCM_PAS_AUTH_AND_RESET_CMD,
+				&desc, &res);
+
+	return ret ? : res.a1;
+}
+
+int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral)
+{
+	int ret;
+	struct qcom_scm_desc desc = {0};
+	struct arm_smccc_res res;
+
+	desc.args[0] = peripheral;
+	desc.arginfo = QCOM_SCM_ARGS(1);
+
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_SHUTDOWN_CMD,
+			&desc, &res);
+
+	return ret ? : res.a1;
+}
+
+int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
+{
+	struct qcom_scm_desc desc = {0};
+	struct arm_smccc_res res;
+	int ret;
+
+	desc.args[0] = reset;
+	desc.args[1] = 0;
+	desc.arginfo = QCOM_SCM_ARGS(2);
+
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MSS_RESET, &desc,
+			    &res);
+
+	return ret ? : res.a1;
+}
+
+int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id)
+{
+	struct qcom_scm_desc desc = {0};
+	struct arm_smccc_res res;
+	int ret;
+
+	desc.args[0] = state;
+	desc.args[1] = id;
+	desc.arginfo = QCOM_SCM_ARGS(2);
+
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_REMOTE_STATE,
+			    &desc, &res);
+
+	return ret ? : res.a1;
+}
+
+int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
+			  size_t mem_sz, phys_addr_t src, size_t src_sz,
+			  phys_addr_t dest, size_t dest_sz)
+{
+	int ret;
+	struct qcom_scm_desc desc = {0};
+	struct arm_smccc_res res;
+
+	desc.args[0] = mem_region;
+	desc.args[1] = mem_sz;
+	desc.args[2] = src;
+	desc.args[3] = src_sz;
+	desc.args[4] = dest;
+	desc.args[5] = dest_sz;
+	desc.args[6] = 0;
+
+	desc.arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
+				     QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
+				     QCOM_SCM_VAL, QCOM_SCM_VAL);
+
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP,
+			    QCOM_MEM_PROT_ASSIGN_ID,
+			    &desc, &res);
+
+	return ret ? : res.a1;
+}
+
+int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id, u32 spare)
+{
+	struct qcom_scm_desc desc = {0};
+	struct arm_smccc_res res;
+	int ret;
+
+	desc.args[0] = device_id;
+	desc.args[1] = spare;
+	desc.arginfo = QCOM_SCM_ARGS(2);
+
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP, QCOM_SCM_RESTORE_SEC_CFG,
+			    &desc, &res);
+
+	return ret ? : res.a1;
+}
+
+int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
+				      size_t *size)
+{
+	struct qcom_scm_desc desc = {0};
+	struct arm_smccc_res res;
+	int ret;
+
+	desc.args[0] = spare;
+	desc.arginfo = QCOM_SCM_ARGS(1);
+
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP,
+			    QCOM_SCM_IOMMU_SECURE_PTBL_SIZE, &desc, &res);
+
+	if (size)
+		*size = res.a1;
+
+	return ret ? : res.a2;
+}
+
+int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size,
+				      u32 spare)
+{
+	struct qcom_scm_desc desc = {0};
+	struct arm_smccc_res res;
+	int ret;
+
+	desc.args[0] = addr;
+	desc.args[1] = size;
+	desc.args[2] = spare;
+	desc.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
+				     QCOM_SCM_VAL);
+
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP,
+			    QCOM_SCM_IOMMU_SECURE_PTBL_INIT, &desc, &res);
+
+	/* the pg table has been initialized already, ignore the error */
+	if (ret == -EPERM)
+		ret = 0;
+
+	return ret;
+}
+
+int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
+{
+	struct qcom_scm_desc desc = {0};
+	struct arm_smccc_res res;
+
+	desc.args[0] = QCOM_SCM_SET_DLOAD_MODE;
+	desc.args[1] = enable ? QCOM_SCM_SET_DLOAD_MODE : 0;
+	desc.arginfo = QCOM_SCM_ARGS(2);
+
+	return qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_DLOAD_MODE,
+			     &desc, &res);
+}
+
+int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr,
+			unsigned int *val)
+{
+	struct qcom_scm_desc desc = {0};
+	struct arm_smccc_res res;
+	int ret;
+
+	desc.args[0] = addr;
+	desc.arginfo = QCOM_SCM_ARGS(1);
+
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_READ,
+			    &desc, &res);
+	if (ret >= 0)
+		*val = res.a1;
+
+	return ret < 0 ? ret : 0;
+}
+
+int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val)
+{
+	struct qcom_scm_desc desc = {0};
+	struct arm_smccc_res res;
+
+	desc.args[0] = addr;
+	desc.args[1] = val;
+	desc.arginfo = QCOM_SCM_ARGS(2);
+
+	return qcom_scm_call(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE,
+			     &desc, &res);
+}
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
new file mode 100644
index 0000000..e778af7
--- /dev/null
+++ b/drivers/firmware/qcom_scm.c
@@ -0,0 +1,630 @@
+/*
+ * Qualcomm SCM driver
+ *
+ * Copyright (c) 2010,2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/cpumask.h>
+#include <linux/export.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/qcom_scm.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/clk.h>
+#include <linux/reset-controller.h>
+
+#include "qcom_scm.h"
+
+static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT);
+module_param(download_mode, bool, 0);
+
+#define SCM_HAS_CORE_CLK	BIT(0)
+#define SCM_HAS_IFACE_CLK	BIT(1)
+#define SCM_HAS_BUS_CLK		BIT(2)
+
+struct qcom_scm {
+	struct device *dev;
+	struct clk *core_clk;
+	struct clk *iface_clk;
+	struct clk *bus_clk;
+	struct reset_controller_dev reset;
+
+	u64 dload_mode_addr;
+};
+
+struct qcom_scm_current_perm_info {
+	__le32 vmid;
+	__le32 perm;
+	__le64 ctx;
+	__le32 ctx_size;
+	__le32 unused;
+};
+
+struct qcom_scm_mem_map_info {
+	__le64 mem_addr;
+	__le64 mem_size;
+};
+
+static struct qcom_scm *__scm;
+
+static int qcom_scm_clk_enable(void)
+{
+	int ret;
+
+	ret = clk_prepare_enable(__scm->core_clk);
+	if (ret)
+		goto bail;
+
+	ret = clk_prepare_enable(__scm->iface_clk);
+	if (ret)
+		goto disable_core;
+
+	ret = clk_prepare_enable(__scm->bus_clk);
+	if (ret)
+		goto disable_iface;
+
+	return 0;
+
+disable_iface:
+	clk_disable_unprepare(__scm->iface_clk);
+disable_core:
+	clk_disable_unprepare(__scm->core_clk);
+bail:
+	return ret;
+}
+
+static void qcom_scm_clk_disable(void)
+{
+	clk_disable_unprepare(__scm->core_clk);
+	clk_disable_unprepare(__scm->iface_clk);
+	clk_disable_unprepare(__scm->bus_clk);
+}
+
+/**
+ * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
+ * @entry: Entry point function for the cpus
+ * @cpus: The cpumask of cpus that will use the entry point
+ *
+ * Set the cold boot address of the cpus. Any cpu outside the supported
+ * range would be removed from the cpu present mask.
+ */
+int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
+{
+	return __qcom_scm_set_cold_boot_addr(entry, cpus);
+}
+EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr);
+
+/**
+ * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
+ * @entry: Entry point function for the cpus
+ * @cpus: The cpumask of cpus that will use the entry point
+ *
+ * Set the Linux entry point for the SCM to transfer control to when coming
+ * out of a power down. CPU power down may be executed on cpuidle or hotplug.
+ */
+int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
+{
+	return __qcom_scm_set_warm_boot_addr(__scm->dev, entry, cpus);
+}
+EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr);
+
+/**
+ * qcom_scm_cpu_power_down() - Power down the cpu
+ * @flags - Flags to flush cache
+ *
+ * This is an end point to power down cpu. If there was a pending interrupt,
+ * the control would return from this function, otherwise, the cpu jumps to the
+ * warm boot entry point set for this cpu upon reset.
+ */
+void qcom_scm_cpu_power_down(u32 flags)
+{
+	__qcom_scm_cpu_power_down(flags);
+}
+EXPORT_SYMBOL(qcom_scm_cpu_power_down);
+
+/**
+ * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
+ *
+ * Return true if HDCP is supported, false if not.
+ */
+bool qcom_scm_hdcp_available(void)
+{
+	int ret = qcom_scm_clk_enable();
+
+	if (ret)
+		return ret;
+
+	ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
+						QCOM_SCM_CMD_HDCP);
+
+	qcom_scm_clk_disable();
+
+	return ret > 0 ? true : false;
+}
+EXPORT_SYMBOL(qcom_scm_hdcp_available);
+
+/**
+ * qcom_scm_hdcp_req() - Send HDCP request.
+ * @req: HDCP request array
+ * @req_cnt: HDCP request array count
+ * @resp: response buffer passed to SCM
+ *
+ * Write HDCP register(s) through SCM.
+ */
+int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
+{
+	int ret = qcom_scm_clk_enable();
+
+	if (ret)
+		return ret;
+
+	ret = __qcom_scm_hdcp_req(__scm->dev, req, req_cnt, resp);
+	qcom_scm_clk_disable();
+	return ret;
+}
+EXPORT_SYMBOL(qcom_scm_hdcp_req);
+
+/**
+ * qcom_scm_pas_supported() - Check if the peripheral authentication service is
+ *			      available for the given peripherial
+ * @peripheral:	peripheral id
+ *
+ * Returns true if PAS is supported for this peripheral, otherwise false.
+ */
+bool qcom_scm_pas_supported(u32 peripheral)
+{
+	int ret;
+
+	ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
+					   QCOM_SCM_PAS_IS_SUPPORTED_CMD);
+	if (ret <= 0)
+		return false;
+
+	return __qcom_scm_pas_supported(__scm->dev, peripheral);
+}
+EXPORT_SYMBOL(qcom_scm_pas_supported);
+
+/**
+ * qcom_scm_pas_init_image() - Initialize peripheral authentication service
+ *			       state machine for a given peripheral, using the
+ *			       metadata
+ * @peripheral: peripheral id
+ * @metadata:	pointer to memory containing ELF header, program header table
+ *		and optional blob of data used for authenticating the metadata
+ *		and the rest of the firmware
+ * @size:	size of the metadata
+ *
+ * Returns 0 on success.
+ */
+int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size)
+{
+	dma_addr_t mdata_phys;
+	void *mdata_buf;
+	int ret;
+
+	/*
+	 * During the scm call memory protection will be enabled for the meta
+	 * data blob, so make sure it's physically contiguous, 4K aligned and
+	 * non-cachable to avoid XPU violations.
+	 */
+	mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
+				       GFP_KERNEL);
+	if (!mdata_buf) {
+		dev_err(__scm->dev, "Allocation of metadata buffer failed.\n");
+		return -ENOMEM;
+	}
+	memcpy(mdata_buf, metadata, size);
+
+	ret = qcom_scm_clk_enable();
+	if (ret)
+		goto free_metadata;
+
+	ret = __qcom_scm_pas_init_image(__scm->dev, peripheral, mdata_phys);
+
+	qcom_scm_clk_disable();
+
+free_metadata:
+	dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
+
+	return ret;
+}
+EXPORT_SYMBOL(qcom_scm_pas_init_image);
+
+/**
+ * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
+ *			      for firmware loading
+ * @peripheral:	peripheral id
+ * @addr:	start address of memory area to prepare
+ * @size:	size of the memory area to prepare
+ *
+ * Returns 0 on success.
+ */
+int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
+{
+	int ret;
+
+	ret = qcom_scm_clk_enable();
+	if (ret)
+		return ret;
+
+	ret = __qcom_scm_pas_mem_setup(__scm->dev, peripheral, addr, size);
+	qcom_scm_clk_disable();
+
+	return ret;
+}
+EXPORT_SYMBOL(qcom_scm_pas_mem_setup);
+
+/**
+ * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
+ *				   and reset the remote processor
+ * @peripheral:	peripheral id
+ *
+ * Return 0 on success.
+ */
+int qcom_scm_pas_auth_and_reset(u32 peripheral)
+{
+	int ret;
+
+	ret = qcom_scm_clk_enable();
+	if (ret)
+		return ret;
+
+	ret = __qcom_scm_pas_auth_and_reset(__scm->dev, peripheral);
+	qcom_scm_clk_disable();
+
+	return ret;
+}
+EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset);
+
+/**
+ * qcom_scm_pas_shutdown() - Shut down the remote processor
+ * @peripheral: peripheral id
+ *
+ * Returns 0 on success.
+ */
+int qcom_scm_pas_shutdown(u32 peripheral)
+{
+	int ret;
+
+	ret = qcom_scm_clk_enable();
+	if (ret)
+		return ret;
+
+	ret = __qcom_scm_pas_shutdown(__scm->dev, peripheral);
+	qcom_scm_clk_disable();
+
+	return ret;
+}
+EXPORT_SYMBOL(qcom_scm_pas_shutdown);
+
+static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
+				     unsigned long idx)
+{
+	if (idx != 0)
+		return -EINVAL;
+
+	return __qcom_scm_pas_mss_reset(__scm->dev, 1);
+}
+
+static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
+				       unsigned long idx)
+{
+	if (idx != 0)
+		return -EINVAL;
+
+	return __qcom_scm_pas_mss_reset(__scm->dev, 0);
+}
+
+static const struct reset_control_ops qcom_scm_pas_reset_ops = {
+	.assert = qcom_scm_pas_reset_assert,
+	.deassert = qcom_scm_pas_reset_deassert,
+};
+
+int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
+{
+	return __qcom_scm_restore_sec_cfg(__scm->dev, device_id, spare);
+}
+EXPORT_SYMBOL(qcom_scm_restore_sec_cfg);
+
+int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
+{
+	return __qcom_scm_iommu_secure_ptbl_size(__scm->dev, spare, size);
+}
+EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size);
+
+int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
+{
+	return __qcom_scm_iommu_secure_ptbl_init(__scm->dev, addr, size, spare);
+}
+EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
+
+int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
+{
+	return __qcom_scm_io_readl(__scm->dev, addr, val);
+}
+EXPORT_SYMBOL(qcom_scm_io_readl);
+
+int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
+{
+	return __qcom_scm_io_writel(__scm->dev, addr, val);
+}
+EXPORT_SYMBOL(qcom_scm_io_writel);
+
+static void qcom_scm_set_download_mode(bool enable)
+{
+	bool avail;
+	int ret = 0;
+
+	avail = __qcom_scm_is_call_available(__scm->dev,
+					     QCOM_SCM_SVC_BOOT,
+					     QCOM_SCM_SET_DLOAD_MODE);
+	if (avail) {
+		ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
+	} else if (__scm->dload_mode_addr) {
+		ret = __qcom_scm_io_writel(__scm->dev, __scm->dload_mode_addr,
+					   enable ? QCOM_SCM_SET_DLOAD_MODE : 0);
+	} else {
+		dev_err(__scm->dev,
+			"No available mechanism for setting download mode\n");
+	}
+
+	if (ret)
+		dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
+}
+
+static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
+{
+	struct device_node *tcsr;
+	struct device_node *np = dev->of_node;
+	struct resource res;
+	u32 offset;
+	int ret;
+
+	tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
+	if (!tcsr)
+		return 0;
+
+	ret = of_address_to_resource(tcsr, 0, &res);
+	of_node_put(tcsr);
+	if (ret)
+		return ret;
+
+	ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
+	if (ret < 0)
+		return ret;
+
+	*addr = res.start + offset;
+
+	return 0;
+}
+
+/**
+ * qcom_scm_is_available() - Checks if SCM is available
+ */
+bool qcom_scm_is_available(void)
+{
+	return !!__scm;
+}
+EXPORT_SYMBOL(qcom_scm_is_available);
+
+int qcom_scm_set_remote_state(u32 state, u32 id)
+{
+	return __qcom_scm_set_remote_state(__scm->dev, state, id);
+}
+EXPORT_SYMBOL(qcom_scm_set_remote_state);
+
+/**
+ * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
+ * @mem_addr: mem region whose ownership need to be reassigned
+ * @mem_sz:   size of the region.
+ * @srcvm:    vmid for current set of owners, each set bit in
+ *            flag indicate a unique owner
+ * @newvm:    array having new owners and corrsponding permission
+ *            flags
+ * @dest_cnt: number of owners in next set.
+ *
+ * Return negative errno on failure, 0 on success, with @srcvm updated.
+ */
+int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
+			unsigned int *srcvm,
+			struct qcom_scm_vmperm *newvm, int dest_cnt)
+{
+	struct qcom_scm_current_perm_info *destvm;
+	struct qcom_scm_mem_map_info *mem_to_map;
+	phys_addr_t mem_to_map_phys;
+	phys_addr_t dest_phys;
+	phys_addr_t ptr_phys;
+	size_t mem_to_map_sz;
+	size_t dest_sz;
+	size_t src_sz;
+	size_t ptr_sz;
+	int next_vm;
+	__le32 *src;
+	void *ptr;
+	int ret;
+	int len;
+	int i;
+
+	src_sz = hweight_long(*srcvm) * sizeof(*src);
+	mem_to_map_sz = sizeof(*mem_to_map);
+	dest_sz = dest_cnt * sizeof(*destvm);
+	ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
+			ALIGN(dest_sz, SZ_64);
+
+	ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
+	if (!ptr)
+		return -ENOMEM;
+
+	/* Fill source vmid detail */
+	src = ptr;
+	len = hweight_long(*srcvm);
+	for (i = 0; i < len; i++) {
+		src[i] = cpu_to_le32(ffs(*srcvm) - 1);
+		*srcvm ^= 1 << (ffs(*srcvm) - 1);
+	}
+
+	/* Fill details of mem buff to map */
+	mem_to_map = ptr + ALIGN(src_sz, SZ_64);
+	mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
+	mem_to_map[0].mem_addr = cpu_to_le64(mem_addr);
+	mem_to_map[0].mem_size = cpu_to_le64(mem_sz);
+
+	next_vm = 0;
+	/* Fill details of next vmid detail */
+	destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
+	dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
+	for (i = 0; i < dest_cnt; i++) {
+		destvm[i].vmid = cpu_to_le32(newvm[i].vmid);
+		destvm[i].perm = cpu_to_le32(newvm[i].perm);
+		destvm[i].ctx = 0;
+		destvm[i].ctx_size = 0;
+		next_vm |= BIT(newvm[i].vmid);
+	}
+
+	ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
+				    ptr_phys, src_sz, dest_phys, dest_sz);
+	dma_free_coherent(__scm->dev, ALIGN(ptr_sz, SZ_64), ptr, ptr_phys);
+	if (ret) {
+		dev_err(__scm->dev,
+			"Assign memory protection call failed %d.\n", ret);
+		return -EINVAL;
+	}
+
+	*srcvm = next_vm;
+	return 0;
+}
+EXPORT_SYMBOL(qcom_scm_assign_mem);
+
+static int qcom_scm_probe(struct platform_device *pdev)
+{
+	struct qcom_scm *scm;
+	unsigned long clks;
+	int ret;
+
+	scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
+	if (!scm)
+		return -ENOMEM;
+
+	ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
+	if (ret < 0)
+		return ret;
+
+	clks = (unsigned long)of_device_get_match_data(&pdev->dev);
+	if (clks & SCM_HAS_CORE_CLK) {
+		scm->core_clk = devm_clk_get(&pdev->dev, "core");
+		if (IS_ERR(scm->core_clk)) {
+			if (PTR_ERR(scm->core_clk) != -EPROBE_DEFER)
+				dev_err(&pdev->dev,
+					"failed to acquire core clk\n");
+			return PTR_ERR(scm->core_clk);
+		}
+	}
+
+	if (clks & SCM_HAS_IFACE_CLK) {
+		scm->iface_clk = devm_clk_get(&pdev->dev, "iface");
+		if (IS_ERR(scm->iface_clk)) {
+			if (PTR_ERR(scm->iface_clk) != -EPROBE_DEFER)
+				dev_err(&pdev->dev,
+					"failed to acquire iface clk\n");
+			return PTR_ERR(scm->iface_clk);
+		}
+	}
+
+	if (clks & SCM_HAS_BUS_CLK) {
+		scm->bus_clk = devm_clk_get(&pdev->dev, "bus");
+		if (IS_ERR(scm->bus_clk)) {
+			if (PTR_ERR(scm->bus_clk) != -EPROBE_DEFER)
+				dev_err(&pdev->dev,
+					"failed to acquire bus clk\n");
+			return PTR_ERR(scm->bus_clk);
+		}
+	}
+
+	scm->reset.ops = &qcom_scm_pas_reset_ops;
+	scm->reset.nr_resets = 1;
+	scm->reset.of_node = pdev->dev.of_node;
+	ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
+	if (ret)
+		return ret;
+
+	/* vote for max clk rate for highest performance */
+	ret = clk_set_rate(scm->core_clk, INT_MAX);
+	if (ret)
+		return ret;
+
+	__scm = scm;
+	__scm->dev = &pdev->dev;
+
+	__qcom_scm_init();
+
+	/*
+	 * If requested enable "download mode", from this point on warmboot
+	 * will cause the the boot stages to enter download mode, unless
+	 * disabled below by a clean shutdown/reboot.
+	 */
+	if (download_mode)
+		qcom_scm_set_download_mode(true);
+
+	return 0;
+}
+
+static void qcom_scm_shutdown(struct platform_device *pdev)
+{
+	/* Clean shutdown, disable download mode to allow normal restart */
+	if (download_mode)
+		qcom_scm_set_download_mode(false);
+}
+
+static const struct of_device_id qcom_scm_dt_match[] = {
+	{ .compatible = "qcom,scm-apq8064",
+	  /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */
+	},
+	{ .compatible = "qcom,scm-msm8660",
+	  .data = (void *) SCM_HAS_CORE_CLK,
+	},
+	{ .compatible = "qcom,scm-msm8960",
+	  .data = (void *) SCM_HAS_CORE_CLK,
+	},
+	{ .compatible = "qcom,scm-msm8996",
+	  .data = NULL, /* no clocks */
+	},
+	{ .compatible = "qcom,scm-ipq4019",
+	  .data = NULL, /* no clocks */
+	},
+	{ .compatible = "qcom,scm",
+	  .data = (void *)(SCM_HAS_CORE_CLK
+			   | SCM_HAS_IFACE_CLK
+			   | SCM_HAS_BUS_CLK),
+	},
+	{}
+};
+
+static struct platform_driver qcom_scm_driver = {
+	.driver = {
+		.name	= "qcom_scm",
+		.of_match_table = qcom_scm_dt_match,
+	},
+	.probe = qcom_scm_probe,
+	.shutdown = qcom_scm_shutdown,
+};
+
+static int __init qcom_scm_init(void)
+{
+	return platform_driver_register(&qcom_scm_driver);
+}
+subsys_initcall(qcom_scm_init);
diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
new file mode 100644
index 0000000..dcd7f79
--- /dev/null
+++ b/drivers/firmware/qcom_scm.h
@@ -0,0 +1,112 @@
+/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __QCOM_SCM_INT_H
+#define __QCOM_SCM_INT_H
+
+#define QCOM_SCM_SVC_BOOT		0x1
+#define QCOM_SCM_BOOT_ADDR		0x1
+#define QCOM_SCM_SET_DLOAD_MODE		0x10
+#define QCOM_SCM_BOOT_ADDR_MC		0x11
+#define QCOM_SCM_SET_REMOTE_STATE	0xa
+extern int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id);
+extern int __qcom_scm_set_dload_mode(struct device *dev, bool enable);
+
+#define QCOM_SCM_FLAG_HLOS		0x01
+#define QCOM_SCM_FLAG_COLDBOOT_MC	0x02
+#define QCOM_SCM_FLAG_WARMBOOT_MC	0x04
+extern int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry,
+		const cpumask_t *cpus);
+extern int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus);
+
+#define QCOM_SCM_CMD_TERMINATE_PC	0x2
+#define QCOM_SCM_FLUSH_FLAG_MASK	0x3
+#define QCOM_SCM_CMD_CORE_HOTPLUGGED	0x10
+extern void __qcom_scm_cpu_power_down(u32 flags);
+
+#define QCOM_SCM_SVC_IO			0x5
+#define QCOM_SCM_IO_READ		0x1
+#define QCOM_SCM_IO_WRITE		0x2
+extern int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr, unsigned int *val);
+extern int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val);
+
+#define QCOM_SCM_SVC_INFO		0x6
+#define QCOM_IS_CALL_AVAIL_CMD		0x1
+extern int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
+		u32 cmd_id);
+
+#define QCOM_SCM_SVC_HDCP		0x11
+#define QCOM_SCM_CMD_HDCP		0x01
+extern int __qcom_scm_hdcp_req(struct device *dev,
+		struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp);
+
+extern void __qcom_scm_init(void);
+
+#define QCOM_SCM_SVC_PIL		0x2
+#define QCOM_SCM_PAS_INIT_IMAGE_CMD	0x1
+#define QCOM_SCM_PAS_MEM_SETUP_CMD	0x2
+#define QCOM_SCM_PAS_AUTH_AND_RESET_CMD	0x5
+#define QCOM_SCM_PAS_SHUTDOWN_CMD	0x6
+#define QCOM_SCM_PAS_IS_SUPPORTED_CMD	0x7
+#define QCOM_SCM_PAS_MSS_RESET		0xa
+extern bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral);
+extern int  __qcom_scm_pas_init_image(struct device *dev, u32 peripheral,
+		dma_addr_t metadata_phys);
+extern int  __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral,
+		phys_addr_t addr, phys_addr_t size);
+extern int  __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral);
+extern int  __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral);
+extern int  __qcom_scm_pas_mss_reset(struct device *dev, bool reset);
+
+/* common error codes */
+#define QCOM_SCM_V2_EBUSY	-12
+#define QCOM_SCM_ENOMEM		-5
+#define QCOM_SCM_EOPNOTSUPP	-4
+#define QCOM_SCM_EINVAL_ADDR	-3
+#define QCOM_SCM_EINVAL_ARG	-2
+#define QCOM_SCM_ERROR		-1
+#define QCOM_SCM_INTERRUPTED	1
+
+static inline int qcom_scm_remap_error(int err)
+{
+	switch (err) {
+	case QCOM_SCM_ERROR:
+		return -EIO;
+	case QCOM_SCM_EINVAL_ADDR:
+	case QCOM_SCM_EINVAL_ARG:
+		return -EINVAL;
+	case QCOM_SCM_EOPNOTSUPP:
+		return -EOPNOTSUPP;
+	case QCOM_SCM_ENOMEM:
+		return -ENOMEM;
+	case QCOM_SCM_V2_EBUSY:
+		return -EBUSY;
+	}
+	return -EINVAL;
+}
+
+#define QCOM_SCM_SVC_MP			0xc
+#define QCOM_SCM_RESTORE_SEC_CFG	2
+extern int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id,
+				      u32 spare);
+#define QCOM_SCM_IOMMU_SECURE_PTBL_SIZE	3
+#define QCOM_SCM_IOMMU_SECURE_PTBL_INIT	4
+extern int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
+					     size_t *size);
+extern int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr,
+					     u32 size, u32 spare);
+#define QCOM_MEM_PROT_ASSIGN_ID	0x16
+extern int  __qcom_scm_assign_mem(struct device *dev,
+				  phys_addr_t mem_region, size_t mem_sz,
+				  phys_addr_t src, size_t src_sz,
+				  phys_addr_t dest, size_t dest_sz);
+
+#endif
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
new file mode 100644
index 0000000..039e0f9
--- /dev/null
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -0,0 +1,940 @@
+/*
+ * drivers/firmware/qemu_fw_cfg.c
+ *
+ * Copyright 2015 Carnegie Mellon University
+ *
+ * Expose entries from QEMU's firmware configuration (fw_cfg) device in
+ * sysfs (read-only, under "/sys/firmware/qemu_fw_cfg/...").
+ *
+ * The fw_cfg device may be instantiated via either an ACPI node (on x86
+ * and select subsets of aarch64), a Device Tree node (on arm), or using
+ * a kernel module (or command line) parameter with the following syntax:
+ *
+ *      [qemu_fw_cfg.]ioport=<size>@<base>[:<ctrl_off>:<data_off>[:<dma_off>]]
+ * or
+ *      [qemu_fw_cfg.]mmio=<size>@<base>[:<ctrl_off>:<data_off>[:<dma_off>]]
+ *
+ * where:
+ *      <size>     := size of ioport or mmio range
+ *      <base>     := physical base address of ioport or mmio range
+ *      <ctrl_off> := (optional) offset of control register
+ *      <data_off> := (optional) offset of data register
+ *      <dma_off> := (optional) offset of dma register
+ *
+ * e.g.:
+ *      qemu_fw_cfg.ioport=12@0x510:0:1:4	(the default on x86)
+ * or
+ *      qemu_fw_cfg.mmio=16@0x9020000:8:0:16	(the default on arm)
+ */
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <uapi/linux/qemu_fw_cfg.h>
+#include <linux/delay.h>
+#include <linux/crash_dump.h>
+#include <linux/crash_core.h>
+
+MODULE_AUTHOR("Gabriel L. Somlo <somlo@cmu.edu>");
+MODULE_DESCRIPTION("QEMU fw_cfg sysfs support");
+MODULE_LICENSE("GPL");
+
+/* fw_cfg revision attribute, in /sys/firmware/qemu_fw_cfg top-level dir. */
+static u32 fw_cfg_rev;
+
+/* fw_cfg device i/o register addresses */
+static bool fw_cfg_is_mmio;
+static phys_addr_t fw_cfg_p_base;
+static resource_size_t fw_cfg_p_size;
+static void __iomem *fw_cfg_dev_base;
+static void __iomem *fw_cfg_reg_ctrl;
+static void __iomem *fw_cfg_reg_data;
+static void __iomem *fw_cfg_reg_dma;
+
+/* atomic access to fw_cfg device (potentially slow i/o, so using mutex) */
+static DEFINE_MUTEX(fw_cfg_dev_lock);
+
+/* pick appropriate endianness for selector key */
+static void fw_cfg_sel_endianness(u16 key)
+{
+	if (fw_cfg_is_mmio)
+		iowrite16be(key, fw_cfg_reg_ctrl);
+	else
+		iowrite16(key, fw_cfg_reg_ctrl);
+}
+
+#ifdef CONFIG_CRASH_CORE
+static inline bool fw_cfg_dma_enabled(void)
+{
+	return (fw_cfg_rev & FW_CFG_VERSION_DMA) && fw_cfg_reg_dma;
+}
+
+/* qemu fw_cfg device is sync today, but spec says it may become async */
+static void fw_cfg_wait_for_control(struct fw_cfg_dma_access *d)
+{
+	for (;;) {
+		u32 ctrl = be32_to_cpu(READ_ONCE(d->control));
+
+		/* do not reorder the read to d->control */
+		rmb();
+		if ((ctrl & ~FW_CFG_DMA_CTL_ERROR) == 0)
+			return;
+
+		cpu_relax();
+	}
+}
+
+static ssize_t fw_cfg_dma_transfer(void *address, u32 length, u32 control)
+{
+	phys_addr_t dma;
+	struct fw_cfg_dma_access *d = NULL;
+	ssize_t ret = length;
+
+	d = kmalloc(sizeof(*d), GFP_KERNEL);
+	if (!d) {
+		ret = -ENOMEM;
+		goto end;
+	}
+
+	/* fw_cfg device does not need IOMMU protection, so use physical addresses */
+	*d = (struct fw_cfg_dma_access) {
+		.address = cpu_to_be64(address ? virt_to_phys(address) : 0),
+		.length = cpu_to_be32(length),
+		.control = cpu_to_be32(control)
+	};
+
+	dma = virt_to_phys(d);
+
+	iowrite32be((u64)dma >> 32, fw_cfg_reg_dma);
+	/* force memory to sync before notifying device via MMIO */
+	wmb();
+	iowrite32be(dma, fw_cfg_reg_dma + 4);
+
+	fw_cfg_wait_for_control(d);
+
+	if (be32_to_cpu(READ_ONCE(d->control)) & FW_CFG_DMA_CTL_ERROR) {
+		ret = -EIO;
+	}
+
+end:
+	kfree(d);
+
+	return ret;
+}
+#endif
+
+/* read chunk of given fw_cfg blob (caller responsible for sanity-check) */
+static ssize_t fw_cfg_read_blob(u16 key,
+				void *buf, loff_t pos, size_t count)
+{
+	u32 glk = -1U;
+	acpi_status status;
+
+	/* If we have ACPI, ensure mutual exclusion against any potential
+	 * device access by the firmware, e.g. via AML methods:
+	 */
+	status = acpi_acquire_global_lock(ACPI_WAIT_FOREVER, &glk);
+	if (ACPI_FAILURE(status) && status != AE_NOT_CONFIGURED) {
+		/* Should never get here */
+		WARN(1, "fw_cfg_read_blob: Failed to lock ACPI!\n");
+		memset(buf, 0, count);
+		return -EINVAL;
+	}
+
+	mutex_lock(&fw_cfg_dev_lock);
+	fw_cfg_sel_endianness(key);
+	while (pos-- > 0)
+		ioread8(fw_cfg_reg_data);
+	ioread8_rep(fw_cfg_reg_data, buf, count);
+	mutex_unlock(&fw_cfg_dev_lock);
+
+	acpi_release_global_lock(glk);
+	return count;
+}
+
+#ifdef CONFIG_CRASH_CORE
+/* write chunk of given fw_cfg blob (caller responsible for sanity-check) */
+static ssize_t fw_cfg_write_blob(u16 key,
+				 void *buf, loff_t pos, size_t count)
+{
+	u32 glk = -1U;
+	acpi_status status;
+	ssize_t ret = count;
+
+	/* If we have ACPI, ensure mutual exclusion against any potential
+	 * device access by the firmware, e.g. via AML methods:
+	 */
+	status = acpi_acquire_global_lock(ACPI_WAIT_FOREVER, &glk);
+	if (ACPI_FAILURE(status) && status != AE_NOT_CONFIGURED) {
+		/* Should never get here */
+		WARN(1, "%s: Failed to lock ACPI!\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&fw_cfg_dev_lock);
+	if (pos == 0) {
+		ret = fw_cfg_dma_transfer(buf, count, key << 16
+					  | FW_CFG_DMA_CTL_SELECT
+					  | FW_CFG_DMA_CTL_WRITE);
+	} else {
+		fw_cfg_sel_endianness(key);
+		ret = fw_cfg_dma_transfer(NULL, pos, FW_CFG_DMA_CTL_SKIP);
+		if (ret < 0)
+			goto end;
+		ret = fw_cfg_dma_transfer(buf, count, FW_CFG_DMA_CTL_WRITE);
+	}
+
+end:
+	mutex_unlock(&fw_cfg_dev_lock);
+
+	acpi_release_global_lock(glk);
+
+	return ret;
+}
+#endif /* CONFIG_CRASH_CORE */
+
+/* clean up fw_cfg device i/o */
+static void fw_cfg_io_cleanup(void)
+{
+	if (fw_cfg_is_mmio) {
+		iounmap(fw_cfg_dev_base);
+		release_mem_region(fw_cfg_p_base, fw_cfg_p_size);
+	} else {
+		ioport_unmap(fw_cfg_dev_base);
+		release_region(fw_cfg_p_base, fw_cfg_p_size);
+	}
+}
+
+/* arch-specific ctrl & data register offsets are not available in ACPI, DT */
+#if !(defined(FW_CFG_CTRL_OFF) && defined(FW_CFG_DATA_OFF))
+# if (defined(CONFIG_ARM) || defined(CONFIG_ARM64))
+#  define FW_CFG_CTRL_OFF 0x08
+#  define FW_CFG_DATA_OFF 0x00
+#  define FW_CFG_DMA_OFF 0x10
+# elif (defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC32)) /* ppc/mac,sun4m */
+#  define FW_CFG_CTRL_OFF 0x00
+#  define FW_CFG_DATA_OFF 0x02
+# elif (defined(CONFIG_X86) || defined(CONFIG_SPARC64)) /* x86, sun4u */
+#  define FW_CFG_CTRL_OFF 0x00
+#  define FW_CFG_DATA_OFF 0x01
+#  define FW_CFG_DMA_OFF 0x04
+# else
+#  error "QEMU FW_CFG not available on this architecture!"
+# endif
+#endif
+
+/* initialize fw_cfg device i/o from platform data */
+static int fw_cfg_do_platform_probe(struct platform_device *pdev)
+{
+	char sig[FW_CFG_SIG_SIZE];
+	struct resource *range, *ctrl, *data, *dma;
+
+	/* acquire i/o range details */
+	fw_cfg_is_mmio = false;
+	range = platform_get_resource(pdev, IORESOURCE_IO, 0);
+	if (!range) {
+		fw_cfg_is_mmio = true;
+		range = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		if (!range)
+			return -EINVAL;
+	}
+	fw_cfg_p_base = range->start;
+	fw_cfg_p_size = resource_size(range);
+
+	if (fw_cfg_is_mmio) {
+		if (!request_mem_region(fw_cfg_p_base,
+					fw_cfg_p_size, "fw_cfg_mem"))
+			return -EBUSY;
+		fw_cfg_dev_base = ioremap(fw_cfg_p_base, fw_cfg_p_size);
+		if (!fw_cfg_dev_base) {
+			release_mem_region(fw_cfg_p_base, fw_cfg_p_size);
+			return -EFAULT;
+		}
+	} else {
+		if (!request_region(fw_cfg_p_base,
+				    fw_cfg_p_size, "fw_cfg_io"))
+			return -EBUSY;
+		fw_cfg_dev_base = ioport_map(fw_cfg_p_base, fw_cfg_p_size);
+		if (!fw_cfg_dev_base) {
+			release_region(fw_cfg_p_base, fw_cfg_p_size);
+			return -EFAULT;
+		}
+	}
+
+	/* were custom register offsets provided (e.g. on the command line)? */
+	ctrl = platform_get_resource_byname(pdev, IORESOURCE_REG, "ctrl");
+	data = platform_get_resource_byname(pdev, IORESOURCE_REG, "data");
+	dma = platform_get_resource_byname(pdev, IORESOURCE_REG, "dma");
+	if (ctrl && data) {
+		fw_cfg_reg_ctrl = fw_cfg_dev_base + ctrl->start;
+		fw_cfg_reg_data = fw_cfg_dev_base + data->start;
+	} else {
+		/* use architecture-specific offsets */
+		fw_cfg_reg_ctrl = fw_cfg_dev_base + FW_CFG_CTRL_OFF;
+		fw_cfg_reg_data = fw_cfg_dev_base + FW_CFG_DATA_OFF;
+	}
+
+	if (dma)
+		fw_cfg_reg_dma = fw_cfg_dev_base + dma->start;
+#ifdef FW_CFG_DMA_OFF
+	else
+		fw_cfg_reg_dma = fw_cfg_dev_base + FW_CFG_DMA_OFF;
+#endif
+
+	/* verify fw_cfg device signature */
+	if (fw_cfg_read_blob(FW_CFG_SIGNATURE, sig,
+				0, FW_CFG_SIG_SIZE) < 0 ||
+		memcmp(sig, "QEMU", FW_CFG_SIG_SIZE) != 0) {
+		fw_cfg_io_cleanup();
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static ssize_t fw_cfg_showrev(struct kobject *k, struct attribute *a, char *buf)
+{
+	return sprintf(buf, "%u\n", fw_cfg_rev);
+}
+
+static const struct {
+	struct attribute attr;
+	ssize_t (*show)(struct kobject *k, struct attribute *a, char *buf);
+} fw_cfg_rev_attr = {
+	.attr = { .name = "rev", .mode = S_IRUSR },
+	.show = fw_cfg_showrev,
+};
+
+/* fw_cfg_sysfs_entry type */
+struct fw_cfg_sysfs_entry {
+	struct kobject kobj;
+	u32 size;
+	u16 select;
+	char name[FW_CFG_MAX_FILE_PATH];
+	struct list_head list;
+};
+
+#ifdef CONFIG_CRASH_CORE
+static ssize_t fw_cfg_write_vmcoreinfo(const struct fw_cfg_file *f)
+{
+	static struct fw_cfg_vmcoreinfo *data;
+	ssize_t ret;
+
+	data = kmalloc(sizeof(struct fw_cfg_vmcoreinfo), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	*data = (struct fw_cfg_vmcoreinfo) {
+		.guest_format = cpu_to_le16(FW_CFG_VMCOREINFO_FORMAT_ELF),
+		.size = cpu_to_le32(VMCOREINFO_NOTE_SIZE),
+		.paddr = cpu_to_le64(paddr_vmcoreinfo_note())
+	};
+	/* spare ourself reading host format support for now since we
+	 * don't know what else to format - host may ignore ours
+	 */
+	ret = fw_cfg_write_blob(be16_to_cpu(f->select), data,
+				0, sizeof(struct fw_cfg_vmcoreinfo));
+
+	kfree(data);
+	return ret;
+}
+#endif /* CONFIG_CRASH_CORE */
+
+/* get fw_cfg_sysfs_entry from kobject member */
+static inline struct fw_cfg_sysfs_entry *to_entry(struct kobject *kobj)
+{
+	return container_of(kobj, struct fw_cfg_sysfs_entry, kobj);
+}
+
+/* fw_cfg_sysfs_attribute type */
+struct fw_cfg_sysfs_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct fw_cfg_sysfs_entry *entry, char *buf);
+};
+
+/* get fw_cfg_sysfs_attribute from attribute member */
+static inline struct fw_cfg_sysfs_attribute *to_attr(struct attribute *attr)
+{
+	return container_of(attr, struct fw_cfg_sysfs_attribute, attr);
+}
+
+/* global cache of fw_cfg_sysfs_entry objects */
+static LIST_HEAD(fw_cfg_entry_cache);
+
+/* kobjects removed lazily by kernel, mutual exclusion needed */
+static DEFINE_SPINLOCK(fw_cfg_cache_lock);
+
+static inline void fw_cfg_sysfs_cache_enlist(struct fw_cfg_sysfs_entry *entry)
+{
+	spin_lock(&fw_cfg_cache_lock);
+	list_add_tail(&entry->list, &fw_cfg_entry_cache);
+	spin_unlock(&fw_cfg_cache_lock);
+}
+
+static inline void fw_cfg_sysfs_cache_delist(struct fw_cfg_sysfs_entry *entry)
+{
+	spin_lock(&fw_cfg_cache_lock);
+	list_del(&entry->list);
+	spin_unlock(&fw_cfg_cache_lock);
+}
+
+static void fw_cfg_sysfs_cache_cleanup(void)
+{
+	struct fw_cfg_sysfs_entry *entry, *next;
+
+	list_for_each_entry_safe(entry, next, &fw_cfg_entry_cache, list) {
+		/* will end up invoking fw_cfg_sysfs_cache_delist()
+		 * via each object's release() method (i.e. destructor)
+		 */
+		kobject_put(&entry->kobj);
+	}
+}
+
+/* default_attrs: per-entry attributes and show methods */
+
+#define FW_CFG_SYSFS_ATTR(_attr) \
+struct fw_cfg_sysfs_attribute fw_cfg_sysfs_attr_##_attr = { \
+	.attr = { .name = __stringify(_attr), .mode = S_IRUSR }, \
+	.show = fw_cfg_sysfs_show_##_attr, \
+}
+
+static ssize_t fw_cfg_sysfs_show_size(struct fw_cfg_sysfs_entry *e, char *buf)
+{
+	return sprintf(buf, "%u\n", e->size);
+}
+
+static ssize_t fw_cfg_sysfs_show_key(struct fw_cfg_sysfs_entry *e, char *buf)
+{
+	return sprintf(buf, "%u\n", e->select);
+}
+
+static ssize_t fw_cfg_sysfs_show_name(struct fw_cfg_sysfs_entry *e, char *buf)
+{
+	return sprintf(buf, "%s\n", e->name);
+}
+
+static FW_CFG_SYSFS_ATTR(size);
+static FW_CFG_SYSFS_ATTR(key);
+static FW_CFG_SYSFS_ATTR(name);
+
+static struct attribute *fw_cfg_sysfs_entry_attrs[] = {
+	&fw_cfg_sysfs_attr_size.attr,
+	&fw_cfg_sysfs_attr_key.attr,
+	&fw_cfg_sysfs_attr_name.attr,
+	NULL,
+};
+
+/* sysfs_ops: find fw_cfg_[entry, attribute] and call appropriate show method */
+static ssize_t fw_cfg_sysfs_attr_show(struct kobject *kobj, struct attribute *a,
+				      char *buf)
+{
+	struct fw_cfg_sysfs_entry *entry = to_entry(kobj);
+	struct fw_cfg_sysfs_attribute *attr = to_attr(a);
+
+	return attr->show(entry, buf);
+}
+
+static const struct sysfs_ops fw_cfg_sysfs_attr_ops = {
+	.show = fw_cfg_sysfs_attr_show,
+};
+
+/* release: destructor, to be called via kobject_put() */
+static void fw_cfg_sysfs_release_entry(struct kobject *kobj)
+{
+	struct fw_cfg_sysfs_entry *entry = to_entry(kobj);
+
+	fw_cfg_sysfs_cache_delist(entry);
+	kfree(entry);
+}
+
+/* kobj_type: ties together all properties required to register an entry */
+static struct kobj_type fw_cfg_sysfs_entry_ktype = {
+	.default_attrs = fw_cfg_sysfs_entry_attrs,
+	.sysfs_ops = &fw_cfg_sysfs_attr_ops,
+	.release = fw_cfg_sysfs_release_entry,
+};
+
+/* raw-read method and attribute */
+static ssize_t fw_cfg_sysfs_read_raw(struct file *filp, struct kobject *kobj,
+				     struct bin_attribute *bin_attr,
+				     char *buf, loff_t pos, size_t count)
+{
+	struct fw_cfg_sysfs_entry *entry = to_entry(kobj);
+
+	if (pos > entry->size)
+		return -EINVAL;
+
+	if (count > entry->size - pos)
+		count = entry->size - pos;
+
+	return fw_cfg_read_blob(entry->select, buf, pos, count);
+}
+
+static struct bin_attribute fw_cfg_sysfs_attr_raw = {
+	.attr = { .name = "raw", .mode = S_IRUSR },
+	.read = fw_cfg_sysfs_read_raw,
+};
+
+/*
+ * Create a kset subdirectory matching each '/' delimited dirname token
+ * in 'name', starting with sysfs kset/folder 'dir'; At the end, create
+ * a symlink directed at the given 'target'.
+ * NOTE: We do this on a best-effort basis, since 'name' is not guaranteed
+ * to be a well-behaved path name. Whenever a symlink vs. kset directory
+ * name collision occurs, the kernel will issue big scary warnings while
+ * refusing to add the offending link or directory. We follow up with our
+ * own, slightly less scary error messages explaining the situation :)
+ */
+static int fw_cfg_build_symlink(struct kset *dir,
+				struct kobject *target, const char *name)
+{
+	int ret;
+	struct kset *subdir;
+	struct kobject *ko;
+	char *name_copy, *p, *tok;
+
+	if (!dir || !target || !name || !*name)
+		return -EINVAL;
+
+	/* clone a copy of name for parsing */
+	name_copy = p = kstrdup(name, GFP_KERNEL);
+	if (!name_copy)
+		return -ENOMEM;
+
+	/* create folders for each dirname token, then symlink for basename */
+	while ((tok = strsep(&p, "/")) && *tok) {
+
+		/* last (basename) token? If so, add symlink here */
+		if (!p || !*p) {
+			ret = sysfs_create_link(&dir->kobj, target, tok);
+			break;
+		}
+
+		/* does the current dir contain an item named after tok ? */
+		ko = kset_find_obj(dir, tok);
+		if (ko) {
+			/* drop reference added by kset_find_obj */
+			kobject_put(ko);
+
+			/* ko MUST be a kset - we're about to use it as one ! */
+			if (ko->ktype != dir->kobj.ktype) {
+				ret = -EINVAL;
+				break;
+			}
+
+			/* descend into already existing subdirectory */
+			dir = to_kset(ko);
+		} else {
+			/* create new subdirectory kset */
+			subdir = kzalloc(sizeof(struct kset), GFP_KERNEL);
+			if (!subdir) {
+				ret = -ENOMEM;
+				break;
+			}
+			subdir->kobj.kset = dir;
+			subdir->kobj.ktype = dir->kobj.ktype;
+			ret = kobject_set_name(&subdir->kobj, "%s", tok);
+			if (ret) {
+				kfree(subdir);
+				break;
+			}
+			ret = kset_register(subdir);
+			if (ret) {
+				kfree(subdir);
+				break;
+			}
+
+			/* descend into newly created subdirectory */
+			dir = subdir;
+		}
+	}
+
+	/* we're done with cloned copy of name */
+	kfree(name_copy);
+	return ret;
+}
+
+/* recursively unregister fw_cfg/by_name/ kset directory tree */
+static void fw_cfg_kset_unregister_recursive(struct kset *kset)
+{
+	struct kobject *k, *next;
+
+	list_for_each_entry_safe(k, next, &kset->list, entry)
+		/* all set members are ksets too, but check just in case... */
+		if (k->ktype == kset->kobj.ktype)
+			fw_cfg_kset_unregister_recursive(to_kset(k));
+
+	/* symlinks are cleanly and automatically removed with the directory */
+	kset_unregister(kset);
+}
+
+/* kobjects & kset representing top-level, by_key, and by_name folders */
+static struct kobject *fw_cfg_top_ko;
+static struct kobject *fw_cfg_sel_ko;
+static struct kset *fw_cfg_fname_kset;
+
+/* register an individual fw_cfg file */
+static int fw_cfg_register_file(const struct fw_cfg_file *f)
+{
+	int err;
+	struct fw_cfg_sysfs_entry *entry;
+
+#ifdef CONFIG_CRASH_CORE
+	if (fw_cfg_dma_enabled() &&
+		strcmp(f->name, FW_CFG_VMCOREINFO_FILENAME) == 0 &&
+		!is_kdump_kernel()) {
+		if (fw_cfg_write_vmcoreinfo(f) < 0)
+			pr_warn("fw_cfg: failed to write vmcoreinfo");
+	}
+#endif
+
+	/* allocate new entry */
+	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry)
+		return -ENOMEM;
+
+	/* set file entry information */
+	entry->size = be32_to_cpu(f->size);
+	entry->select = be16_to_cpu(f->select);
+	memcpy(entry->name, f->name, FW_CFG_MAX_FILE_PATH);
+
+	/* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */
+	err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype,
+				   fw_cfg_sel_ko, "%d", entry->select);
+	if (err)
+		goto err_register;
+
+	/* add raw binary content access */
+	err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw);
+	if (err)
+		goto err_add_raw;
+
+	/* try adding "/sys/firmware/qemu_fw_cfg/by_name/" symlink */
+	fw_cfg_build_symlink(fw_cfg_fname_kset, &entry->kobj, entry->name);
+
+	/* success, add entry to global cache */
+	fw_cfg_sysfs_cache_enlist(entry);
+	return 0;
+
+err_add_raw:
+	kobject_del(&entry->kobj);
+err_register:
+	kfree(entry);
+	return err;
+}
+
+/* iterate over all fw_cfg directory entries, registering each one */
+static int fw_cfg_register_dir_entries(void)
+{
+	int ret = 0;
+	__be32 files_count;
+	u32 count, i;
+	struct fw_cfg_file *dir;
+	size_t dir_size;
+
+	ret = fw_cfg_read_blob(FW_CFG_FILE_DIR, &files_count,
+			0, sizeof(files_count));
+	if (ret < 0)
+		return ret;
+
+	count = be32_to_cpu(files_count);
+	dir_size = count * sizeof(struct fw_cfg_file);
+
+	dir = kmalloc(dir_size, GFP_KERNEL);
+	if (!dir)
+		return -ENOMEM;
+
+	ret = fw_cfg_read_blob(FW_CFG_FILE_DIR, dir,
+			sizeof(files_count), dir_size);
+	if (ret < 0)
+		goto end;
+
+	for (i = 0; i < count; i++) {
+		ret = fw_cfg_register_file(&dir[i]);
+		if (ret)
+			break;
+	}
+
+end:
+	kfree(dir);
+	return ret;
+}
+
+/* unregister top-level or by_key folder */
+static inline void fw_cfg_kobj_cleanup(struct kobject *kobj)
+{
+	kobject_del(kobj);
+	kobject_put(kobj);
+}
+
+static int fw_cfg_sysfs_probe(struct platform_device *pdev)
+{
+	int err;
+	__le32 rev;
+
+	/* NOTE: If we supported multiple fw_cfg devices, we'd first create
+	 * a subdirectory named after e.g. pdev->id, then hang per-device
+	 * by_key (and by_name) subdirectories underneath it. However, only
+	 * one fw_cfg device exist system-wide, so if one was already found
+	 * earlier, we might as well stop here.
+	 */
+	if (fw_cfg_sel_ko)
+		return -EBUSY;
+
+	/* create by_key and by_name subdirs of /sys/firmware/qemu_fw_cfg/ */
+	err = -ENOMEM;
+	fw_cfg_sel_ko = kobject_create_and_add("by_key", fw_cfg_top_ko);
+	if (!fw_cfg_sel_ko)
+		goto err_sel;
+	fw_cfg_fname_kset = kset_create_and_add("by_name", NULL, fw_cfg_top_ko);
+	if (!fw_cfg_fname_kset)
+		goto err_name;
+
+	/* initialize fw_cfg device i/o from platform data */
+	err = fw_cfg_do_platform_probe(pdev);
+	if (err)
+		goto err_probe;
+
+	/* get revision number, add matching top-level attribute */
+	err = fw_cfg_read_blob(FW_CFG_ID, &rev, 0, sizeof(rev));
+	if (err < 0)
+		goto err_probe;
+
+	fw_cfg_rev = le32_to_cpu(rev);
+	err = sysfs_create_file(fw_cfg_top_ko, &fw_cfg_rev_attr.attr);
+	if (err)
+		goto err_rev;
+
+	/* process fw_cfg file directory entry, registering each file */
+	err = fw_cfg_register_dir_entries();
+	if (err)
+		goto err_dir;
+
+	/* success */
+	pr_debug("fw_cfg: loaded.\n");
+	return 0;
+
+err_dir:
+	fw_cfg_sysfs_cache_cleanup();
+	sysfs_remove_file(fw_cfg_top_ko, &fw_cfg_rev_attr.attr);
+err_rev:
+	fw_cfg_io_cleanup();
+err_probe:
+	fw_cfg_kset_unregister_recursive(fw_cfg_fname_kset);
+err_name:
+	fw_cfg_kobj_cleanup(fw_cfg_sel_ko);
+err_sel:
+	return err;
+}
+
+static int fw_cfg_sysfs_remove(struct platform_device *pdev)
+{
+	pr_debug("fw_cfg: unloading.\n");
+	fw_cfg_sysfs_cache_cleanup();
+	sysfs_remove_file(fw_cfg_top_ko, &fw_cfg_rev_attr.attr);
+	fw_cfg_io_cleanup();
+	fw_cfg_kset_unregister_recursive(fw_cfg_fname_kset);
+	fw_cfg_kobj_cleanup(fw_cfg_sel_ko);
+	return 0;
+}
+
+static const struct of_device_id fw_cfg_sysfs_mmio_match[] = {
+	{ .compatible = "qemu,fw-cfg-mmio", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, fw_cfg_sysfs_mmio_match);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id fw_cfg_sysfs_acpi_match[] = {
+	{ FW_CFG_ACPI_DEVICE_ID, },
+	{},
+};
+MODULE_DEVICE_TABLE(acpi, fw_cfg_sysfs_acpi_match);
+#endif
+
+static struct platform_driver fw_cfg_sysfs_driver = {
+	.probe = fw_cfg_sysfs_probe,
+	.remove = fw_cfg_sysfs_remove,
+	.driver = {
+		.name = "fw_cfg",
+		.of_match_table = fw_cfg_sysfs_mmio_match,
+		.acpi_match_table = ACPI_PTR(fw_cfg_sysfs_acpi_match),
+	},
+};
+
+#ifdef CONFIG_FW_CFG_SYSFS_CMDLINE
+
+static struct platform_device *fw_cfg_cmdline_dev;
+
+/* this probably belongs in e.g. include/linux/types.h,
+ * but right now we are the only ones doing it...
+ */
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+#define __PHYS_ADDR_PREFIX "ll"
+#else
+#define __PHYS_ADDR_PREFIX ""
+#endif
+
+/* use special scanf/printf modifier for phys_addr_t, resource_size_t */
+#define PH_ADDR_SCAN_FMT "@%" __PHYS_ADDR_PREFIX "i%n" \
+			 ":%" __PHYS_ADDR_PREFIX "i" \
+			 ":%" __PHYS_ADDR_PREFIX "i%n" \
+			 ":%" __PHYS_ADDR_PREFIX "i%n"
+
+#define PH_ADDR_PR_1_FMT "0x%" __PHYS_ADDR_PREFIX "x@" \
+			 "0x%" __PHYS_ADDR_PREFIX "x"
+
+#define PH_ADDR_PR_3_FMT PH_ADDR_PR_1_FMT \
+			 ":%" __PHYS_ADDR_PREFIX "u" \
+			 ":%" __PHYS_ADDR_PREFIX "u"
+
+#define PH_ADDR_PR_4_FMT PH_ADDR_PR_3_FMT \
+			 ":%" __PHYS_ADDR_PREFIX "u"
+
+static int fw_cfg_cmdline_set(const char *arg, const struct kernel_param *kp)
+{
+	struct resource res[4] = {};
+	char *str;
+	phys_addr_t base;
+	resource_size_t size, ctrl_off, data_off, dma_off;
+	int processed, consumed = 0;
+
+	/* only one fw_cfg device can exist system-wide, so if one
+	 * was processed on the command line already, we might as
+	 * well stop here.
+	 */
+	if (fw_cfg_cmdline_dev) {
+		/* avoid leaking previously registered device */
+		platform_device_unregister(fw_cfg_cmdline_dev);
+		return -EINVAL;
+	}
+
+	/* consume "<size>" portion of command line argument */
+	size = memparse(arg, &str);
+
+	/* get "@<base>[:<ctrl_off>:<data_off>[:<dma_off>]]" chunks */
+	processed = sscanf(str, PH_ADDR_SCAN_FMT,
+			   &base, &consumed,
+			   &ctrl_off, &data_off, &consumed,
+			   &dma_off, &consumed);
+
+	/* sscanf() must process precisely 1, 3 or 4 chunks:
+	 * <base> is mandatory, optionally followed by <ctrl_off>
+	 * and <data_off>, and <dma_off>;
+	 * there must be no extra characters after the last chunk,
+	 * so str[consumed] must be '\0'.
+	 */
+	if (str[consumed] ||
+	    (processed != 1 && processed != 3 && processed != 4))
+		return -EINVAL;
+
+	res[0].start = base;
+	res[0].end = base + size - 1;
+	res[0].flags = !strcmp(kp->name, "mmio") ? IORESOURCE_MEM :
+						   IORESOURCE_IO;
+
+	/* insert register offsets, if provided */
+	if (processed > 1) {
+		res[1].name = "ctrl";
+		res[1].start = ctrl_off;
+		res[1].flags = IORESOURCE_REG;
+		res[2].name = "data";
+		res[2].start = data_off;
+		res[2].flags = IORESOURCE_REG;
+	}
+	if (processed > 3) {
+		res[3].name = "dma";
+		res[3].start = dma_off;
+		res[3].flags = IORESOURCE_REG;
+	}
+
+	/* "processed" happens to nicely match the number of resources
+	 * we need to pass in to this platform device.
+	 */
+	fw_cfg_cmdline_dev = platform_device_register_simple("fw_cfg",
+					PLATFORM_DEVID_NONE, res, processed);
+
+	return PTR_ERR_OR_ZERO(fw_cfg_cmdline_dev);
+}
+
+static int fw_cfg_cmdline_get(char *buf, const struct kernel_param *kp)
+{
+	/* stay silent if device was not configured via the command
+	 * line, or if the parameter name (ioport/mmio) doesn't match
+	 * the device setting
+	 */
+	if (!fw_cfg_cmdline_dev ||
+	    (!strcmp(kp->name, "mmio") ^
+	     (fw_cfg_cmdline_dev->resource[0].flags == IORESOURCE_MEM)))
+		return 0;
+
+	switch (fw_cfg_cmdline_dev->num_resources) {
+	case 1:
+		return snprintf(buf, PAGE_SIZE, PH_ADDR_PR_1_FMT,
+				resource_size(&fw_cfg_cmdline_dev->resource[0]),
+				fw_cfg_cmdline_dev->resource[0].start);
+	case 3:
+		return snprintf(buf, PAGE_SIZE, PH_ADDR_PR_3_FMT,
+				resource_size(&fw_cfg_cmdline_dev->resource[0]),
+				fw_cfg_cmdline_dev->resource[0].start,
+				fw_cfg_cmdline_dev->resource[1].start,
+				fw_cfg_cmdline_dev->resource[2].start);
+	case 4:
+		return snprintf(buf, PAGE_SIZE, PH_ADDR_PR_4_FMT,
+				resource_size(&fw_cfg_cmdline_dev->resource[0]),
+				fw_cfg_cmdline_dev->resource[0].start,
+				fw_cfg_cmdline_dev->resource[1].start,
+				fw_cfg_cmdline_dev->resource[2].start,
+				fw_cfg_cmdline_dev->resource[3].start);
+	}
+
+	/* Should never get here */
+	WARN(1, "Unexpected number of resources: %d\n",
+		fw_cfg_cmdline_dev->num_resources);
+	return 0;
+}
+
+static const struct kernel_param_ops fw_cfg_cmdline_param_ops = {
+	.set = fw_cfg_cmdline_set,
+	.get = fw_cfg_cmdline_get,
+};
+
+device_param_cb(ioport, &fw_cfg_cmdline_param_ops, NULL, S_IRUSR);
+device_param_cb(mmio, &fw_cfg_cmdline_param_ops, NULL, S_IRUSR);
+
+#endif /* CONFIG_FW_CFG_SYSFS_CMDLINE */
+
+static int __init fw_cfg_sysfs_init(void)
+{
+	int ret;
+
+	/* create /sys/firmware/qemu_fw_cfg/ top level directory */
+	fw_cfg_top_ko = kobject_create_and_add("qemu_fw_cfg", firmware_kobj);
+	if (!fw_cfg_top_ko)
+		return -ENOMEM;
+
+	ret = platform_driver_register(&fw_cfg_sysfs_driver);
+	if (ret)
+		fw_cfg_kobj_cleanup(fw_cfg_top_ko);
+
+	return ret;
+}
+
+static void __exit fw_cfg_sysfs_exit(void)
+{
+	platform_driver_unregister(&fw_cfg_sysfs_driver);
+
+#ifdef CONFIG_FW_CFG_SYSFS_CMDLINE
+	platform_device_unregister(fw_cfg_cmdline_dev);
+#endif
+
+	/* clean up /sys/firmware/qemu_fw_cfg/ */
+	fw_cfg_kobj_cleanup(fw_cfg_top_ko);
+}
+
+module_init(fw_cfg_sysfs_init);
+module_exit(fw_cfg_sysfs_exit);
diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c
new file mode 100644
index 0000000..a200a21
--- /dev/null
+++ b/drivers/firmware/raspberrypi.c
@@ -0,0 +1,285 @@
+/*
+ * Defines interfaces for interacting wtih the Raspberry Pi firmware's
+ * property channel.
+ *
+ * Copyright © 2015 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <soc/bcm2835/raspberrypi-firmware.h>
+
+#define MBOX_MSG(chan, data28)		(((data28) & ~0xf) | ((chan) & 0xf))
+#define MBOX_CHAN(msg)			((msg) & 0xf)
+#define MBOX_DATA28(msg)		((msg) & ~0xf)
+#define MBOX_CHAN_PROPERTY		8
+
+#define MAX_RPI_FW_PROP_BUF_SIZE	32
+
+static struct platform_device *rpi_hwmon;
+
+struct rpi_firmware {
+	struct mbox_client cl;
+	struct mbox_chan *chan; /* The property channel. */
+	struct completion c;
+	u32 enabled;
+};
+
+static DEFINE_MUTEX(transaction_lock);
+
+static void response_callback(struct mbox_client *cl, void *msg)
+{
+	struct rpi_firmware *fw = container_of(cl, struct rpi_firmware, cl);
+	complete(&fw->c);
+}
+
+/*
+ * Sends a request to the firmware through the BCM2835 mailbox driver,
+ * and synchronously waits for the reply.
+ */
+static int
+rpi_firmware_transaction(struct rpi_firmware *fw, u32 chan, u32 data)
+{
+	u32 message = MBOX_MSG(chan, data);
+	int ret;
+
+	WARN_ON(data & 0xf);
+
+	mutex_lock(&transaction_lock);
+	reinit_completion(&fw->c);
+	ret = mbox_send_message(fw->chan, &message);
+	if (ret >= 0) {
+		wait_for_completion(&fw->c);
+		ret = 0;
+	} else {
+		dev_err(fw->cl.dev, "mbox_send_message returned %d\n", ret);
+	}
+	mutex_unlock(&transaction_lock);
+
+	return ret;
+}
+
+/**
+ * rpi_firmware_property_list - Submit firmware property list
+ * @fw:		Pointer to firmware structure from rpi_firmware_get().
+ * @data:	Buffer holding tags.
+ * @tag_size:	Size of tags buffer.
+ *
+ * Submits a set of concatenated tags to the VPU firmware through the
+ * mailbox property interface.
+ *
+ * The buffer header and the ending tag are added by this function and
+ * don't need to be supplied, just the actual tags for your operation.
+ * See struct rpi_firmware_property_tag_header for the per-tag
+ * structure.
+ */
+int rpi_firmware_property_list(struct rpi_firmware *fw,
+			       void *data, size_t tag_size)
+{
+	size_t size = tag_size + 12;
+	u32 *buf;
+	dma_addr_t bus_addr;
+	int ret;
+
+	/* Packets are processed a dword at a time. */
+	if (size & 3)
+		return -EINVAL;
+
+	buf = dma_alloc_coherent(fw->cl.dev, PAGE_ALIGN(size), &bus_addr,
+				 GFP_ATOMIC);
+	if (!buf)
+		return -ENOMEM;
+
+	/* The firmware will error out without parsing in this case. */
+	WARN_ON(size >= 1024 * 1024);
+
+	buf[0] = size;
+	buf[1] = RPI_FIRMWARE_STATUS_REQUEST;
+	memcpy(&buf[2], data, tag_size);
+	buf[size / 4 - 1] = RPI_FIRMWARE_PROPERTY_END;
+	wmb();
+
+	ret = rpi_firmware_transaction(fw, MBOX_CHAN_PROPERTY, bus_addr);
+
+	rmb();
+	memcpy(data, &buf[2], tag_size);
+	if (ret == 0 && buf[1] != RPI_FIRMWARE_STATUS_SUCCESS) {
+		/*
+		 * The tag name here might not be the one causing the
+		 * error, if there were multiple tags in the request.
+		 * But single-tag is the most common, so go with it.
+		 */
+		dev_err(fw->cl.dev, "Request 0x%08x returned status 0x%08x\n",
+			buf[2], buf[1]);
+		ret = -EINVAL;
+	}
+
+	dma_free_coherent(fw->cl.dev, PAGE_ALIGN(size), buf, bus_addr);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rpi_firmware_property_list);
+
+/**
+ * rpi_firmware_property - Submit single firmware property
+ * @fw:		Pointer to firmware structure from rpi_firmware_get().
+ * @tag:	One of enum_mbox_property_tag.
+ * @tag_data:	Tag data buffer.
+ * @buf_size:	Buffer size.
+ *
+ * Submits a single tag to the VPU firmware through the mailbox
+ * property interface.
+ *
+ * This is a convenience wrapper around
+ * rpi_firmware_property_list() to avoid some of the
+ * boilerplate in property calls.
+ */
+int rpi_firmware_property(struct rpi_firmware *fw,
+			  u32 tag, void *tag_data, size_t buf_size)
+{
+	/* Single tags are very small (generally 8 bytes), so the
+	 * stack should be safe.
+	 */
+	u8 data[sizeof(struct rpi_firmware_property_tag_header) +
+		MAX_RPI_FW_PROP_BUF_SIZE];
+	struct rpi_firmware_property_tag_header *header =
+		(struct rpi_firmware_property_tag_header *)data;
+	int ret;
+
+	if (WARN_ON(buf_size > sizeof(data) - sizeof(*header)))
+		return -EINVAL;
+
+	header->tag = tag;
+	header->buf_size = buf_size;
+	header->req_resp_size = 0;
+	memcpy(data + sizeof(struct rpi_firmware_property_tag_header),
+	       tag_data, buf_size);
+
+	ret = rpi_firmware_property_list(fw, &data, buf_size + sizeof(*header));
+	memcpy(tag_data,
+	       data + sizeof(struct rpi_firmware_property_tag_header),
+	       buf_size);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rpi_firmware_property);
+
+static void
+rpi_firmware_print_firmware_revision(struct rpi_firmware *fw)
+{
+	u32 packet;
+	int ret = rpi_firmware_property(fw,
+					RPI_FIRMWARE_GET_FIRMWARE_REVISION,
+					&packet, sizeof(packet));
+
+	if (ret == 0) {
+		struct tm tm;
+
+		time64_to_tm(packet, 0, &tm);
+
+		dev_info(fw->cl.dev,
+			 "Attached to firmware from %04ld-%02d-%02d %02d:%02d\n",
+			 tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
+			 tm.tm_hour, tm.tm_min);
+	}
+}
+
+static void
+rpi_register_hwmon_driver(struct device *dev, struct rpi_firmware *fw)
+{
+	u32 packet;
+	int ret = rpi_firmware_property(fw, RPI_FIRMWARE_GET_THROTTLED,
+					&packet, sizeof(packet));
+
+	if (ret)
+		return;
+
+	rpi_hwmon = platform_device_register_data(dev, "raspberrypi-hwmon",
+						  -1, NULL, 0);
+}
+
+static int rpi_firmware_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct rpi_firmware *fw;
+
+	fw = devm_kzalloc(dev, sizeof(*fw), GFP_KERNEL);
+	if (!fw)
+		return -ENOMEM;
+
+	fw->cl.dev = dev;
+	fw->cl.rx_callback = response_callback;
+	fw->cl.tx_block = true;
+
+	fw->chan = mbox_request_channel(&fw->cl, 0);
+	if (IS_ERR(fw->chan)) {
+		int ret = PTR_ERR(fw->chan);
+		if (ret != -EPROBE_DEFER)
+			dev_err(dev, "Failed to get mbox channel: %d\n", ret);
+		return ret;
+	}
+
+	init_completion(&fw->c);
+
+	platform_set_drvdata(pdev, fw);
+
+	rpi_firmware_print_firmware_revision(fw);
+	rpi_register_hwmon_driver(dev, fw);
+
+	return 0;
+}
+
+static int rpi_firmware_remove(struct platform_device *pdev)
+{
+	struct rpi_firmware *fw = platform_get_drvdata(pdev);
+
+	platform_device_unregister(rpi_hwmon);
+	rpi_hwmon = NULL;
+	mbox_free_channel(fw->chan);
+
+	return 0;
+}
+
+/**
+ * rpi_firmware_get - Get pointer to rpi_firmware structure.
+ * @firmware_node:    Pointer to the firmware Device Tree node.
+ *
+ * Returns NULL is the firmware device is not ready.
+ */
+struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node)
+{
+	struct platform_device *pdev = of_find_device_by_node(firmware_node);
+
+	if (!pdev)
+		return NULL;
+
+	return platform_get_drvdata(pdev);
+}
+EXPORT_SYMBOL_GPL(rpi_firmware_get);
+
+static const struct of_device_id rpi_firmware_of_match[] = {
+	{ .compatible = "raspberrypi,bcm2835-firmware", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, rpi_firmware_of_match);
+
+static struct platform_driver rpi_firmware_driver = {
+	.driver = {
+		.name = "raspberrypi-firmware",
+		.of_match_table = rpi_firmware_of_match,
+	},
+	.probe		= rpi_firmware_probe,
+	.remove		= rpi_firmware_remove,
+};
+module_platform_driver(rpi_firmware_driver);
+
+MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
+MODULE_DESCRIPTION("Raspberry Pi firmware driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/scpi_pm_domain.c b/drivers/firmware/scpi_pm_domain.c
new file mode 100644
index 0000000..f395dec
--- /dev/null
+++ b/drivers/firmware/scpi_pm_domain.c
@@ -0,0 +1,163 @@
+/*
+ * SCPI Generic power domain support.
+ *
+ * Copyright (C) 2016 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/pm_domain.h>
+#include <linux/scpi_protocol.h>
+
+struct scpi_pm_domain {
+	struct generic_pm_domain genpd;
+	struct scpi_ops *ops;
+	u32 domain;
+	char name[30];
+};
+
+/*
+ * These device power state values are not well-defined in the specification.
+ * In case, different implementations use different values, we can make these
+ * specific to compatibles rather than getting these values from device tree.
+ */
+enum scpi_power_domain_state {
+	SCPI_PD_STATE_ON = 0,
+	SCPI_PD_STATE_OFF = 3,
+};
+
+#define to_scpi_pd(gpd) container_of(gpd, struct scpi_pm_domain, genpd)
+
+static int scpi_pd_power(struct scpi_pm_domain *pd, bool power_on)
+{
+	int ret;
+	enum scpi_power_domain_state state;
+
+	if (power_on)
+		state = SCPI_PD_STATE_ON;
+	else
+		state = SCPI_PD_STATE_OFF;
+
+	ret = pd->ops->device_set_power_state(pd->domain, state);
+	if (ret)
+		return ret;
+
+	return !(state == pd->ops->device_get_power_state(pd->domain));
+}
+
+static int scpi_pd_power_on(struct generic_pm_domain *domain)
+{
+	struct scpi_pm_domain *pd = to_scpi_pd(domain);
+
+	return scpi_pd_power(pd, true);
+}
+
+static int scpi_pd_power_off(struct generic_pm_domain *domain)
+{
+	struct scpi_pm_domain *pd = to_scpi_pd(domain);
+
+	return scpi_pd_power(pd, false);
+}
+
+static int scpi_pm_domain_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	struct scpi_pm_domain *scpi_pd;
+	struct genpd_onecell_data *scpi_pd_data;
+	struct generic_pm_domain **domains;
+	struct scpi_ops *scpi_ops;
+	int ret, num_domains, i;
+
+	scpi_ops = get_scpi_ops();
+	if (!scpi_ops)
+		return -EPROBE_DEFER;
+
+	if (!np) {
+		dev_err(dev, "device tree node not found\n");
+		return -ENODEV;
+	}
+
+	if (!scpi_ops->device_set_power_state ||
+	    !scpi_ops->device_get_power_state) {
+		dev_err(dev, "power domains not supported in the firmware\n");
+		return -ENODEV;
+	}
+
+	ret = of_property_read_u32(np, "num-domains", &num_domains);
+	if (ret) {
+		dev_err(dev, "number of domains not found\n");
+		return -EINVAL;
+	}
+
+	scpi_pd = devm_kcalloc(dev, num_domains, sizeof(*scpi_pd), GFP_KERNEL);
+	if (!scpi_pd)
+		return -ENOMEM;
+
+	scpi_pd_data = devm_kzalloc(dev, sizeof(*scpi_pd_data), GFP_KERNEL);
+	if (!scpi_pd_data)
+		return -ENOMEM;
+
+	domains = devm_kcalloc(dev, num_domains, sizeof(*domains), GFP_KERNEL);
+	if (!domains)
+		return -ENOMEM;
+
+	for (i = 0; i < num_domains; i++, scpi_pd++) {
+		domains[i] = &scpi_pd->genpd;
+
+		scpi_pd->domain = i;
+		scpi_pd->ops = scpi_ops;
+		sprintf(scpi_pd->name, "%s.%d", np->name, i);
+		scpi_pd->genpd.name = scpi_pd->name;
+		scpi_pd->genpd.power_off = scpi_pd_power_off;
+		scpi_pd->genpd.power_on = scpi_pd_power_on;
+
+		/*
+		 * Treat all power domains as off at boot.
+		 *
+		 * The SCP firmware itself may have switched on some domains,
+		 * but for reference counting purpose, keep it this way.
+		 */
+		pm_genpd_init(&scpi_pd->genpd, NULL, true);
+	}
+
+	scpi_pd_data->domains = domains;
+	scpi_pd_data->num_domains = num_domains;
+
+	of_genpd_add_provider_onecell(np, scpi_pd_data);
+
+	return 0;
+}
+
+static const struct of_device_id scpi_power_domain_ids[] = {
+	{ .compatible = "arm,scpi-power-domains", },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, scpi_power_domain_ids);
+
+static struct platform_driver scpi_power_domain_driver = {
+	.driver	= {
+		.name = "scpi_power_domain",
+		.of_match_table = scpi_power_domain_ids,
+	},
+	.probe = scpi_pm_domain_probe,
+};
+module_platform_driver(scpi_power_domain_driver);
+
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("ARM SCPI power domain driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/tegra/Kconfig b/drivers/firmware/tegra/Kconfig
new file mode 100644
index 0000000..ff2730d
--- /dev/null
+++ b/drivers/firmware/tegra/Kconfig
@@ -0,0 +1,25 @@
+menu "Tegra firmware driver"
+
+config TEGRA_IVC
+	bool "Tegra IVC protocol"
+	depends on ARCH_TEGRA
+	help
+	  IVC (Inter-VM Communication) protocol is part of the IPC
+	  (Inter Processor Communication) framework on Tegra. It maintains the
+	  data and the different commuication channels in SysRAM or RAM and
+	  keeps the content is synchronization between host CPU and remote
+	  processors.
+
+config TEGRA_BPMP
+	bool "Tegra BPMP driver"
+	depends on ARCH_TEGRA && TEGRA_HSP_MBOX && TEGRA_IVC
+	help
+	  BPMP (Boot and Power Management Processor) is designed to off-loading
+	  the PM functions which include clock/DVFS/thermal/power from the CPU.
+	  It needs HSP as the HW synchronization and notification module and
+	  IVC module as the message communication protocol.
+
+	  This driver manages the IPC interface between host CPU and the
+	  firmware running on BPMP.
+
+endmenu
diff --git a/drivers/firmware/tegra/Makefile b/drivers/firmware/tegra/Makefile
new file mode 100644
index 0000000..1b826dc
--- /dev/null
+++ b/drivers/firmware/tegra/Makefile
@@ -0,0 +1,4 @@
+tegra-bpmp-y			= bpmp.o
+tegra-bpmp-$(CONFIG_DEBUG_FS)	+= bpmp-debugfs.o
+obj-$(CONFIG_TEGRA_BPMP)	+= tegra-bpmp.o
+obj-$(CONFIG_TEGRA_IVC)		+= ivc.o
diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c
new file mode 100644
index 0000000..f7f6a0a
--- /dev/null
+++ b/drivers/firmware/tegra/bpmp-debugfs.c
@@ -0,0 +1,444 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/uaccess.h>
+
+#include <soc/tegra/bpmp.h>
+#include <soc/tegra/bpmp-abi.h>
+
+struct seqbuf {
+	char *buf;
+	size_t pos;
+	size_t size;
+};
+
+static void seqbuf_init(struct seqbuf *seqbuf, void *buf, size_t size)
+{
+	seqbuf->buf = buf;
+	seqbuf->size = size;
+	seqbuf->pos = 0;
+}
+
+static size_t seqbuf_avail(struct seqbuf *seqbuf)
+{
+	return seqbuf->pos < seqbuf->size ? seqbuf->size - seqbuf->pos : 0;
+}
+
+static size_t seqbuf_status(struct seqbuf *seqbuf)
+{
+	return seqbuf->pos <= seqbuf->size ? 0 : -EOVERFLOW;
+}
+
+static int seqbuf_eof(struct seqbuf *seqbuf)
+{
+	return seqbuf->pos >= seqbuf->size;
+}
+
+static int seqbuf_read(struct seqbuf *seqbuf, void *buf, size_t nbyte)
+{
+	nbyte = min(nbyte, seqbuf_avail(seqbuf));
+	memcpy(buf, seqbuf->buf + seqbuf->pos, nbyte);
+	seqbuf->pos += nbyte;
+	return seqbuf_status(seqbuf);
+}
+
+static int seqbuf_read_u32(struct seqbuf *seqbuf, uint32_t *v)
+{
+	int err;
+
+	err = seqbuf_read(seqbuf, v, 4);
+	*v = le32_to_cpu(*v);
+	return err;
+}
+
+static int seqbuf_read_str(struct seqbuf *seqbuf, const char **str)
+{
+	*str = seqbuf->buf + seqbuf->pos;
+	seqbuf->pos += strnlen(*str, seqbuf_avail(seqbuf));
+	seqbuf->pos++;
+	return seqbuf_status(seqbuf);
+}
+
+static void seqbuf_seek(struct seqbuf *seqbuf, ssize_t offset)
+{
+	seqbuf->pos += offset;
+}
+
+/* map filename in Linux debugfs to corresponding entry in BPMP */
+static const char *get_filename(struct tegra_bpmp *bpmp,
+				const struct file *file, char *buf, int size)
+{
+	char root_path_buf[512];
+	const char *root_path;
+	const char *filename;
+	size_t root_len;
+
+	root_path = dentry_path(bpmp->debugfs_mirror, root_path_buf,
+				sizeof(root_path_buf));
+	if (IS_ERR(root_path))
+		return NULL;
+
+	root_len = strlen(root_path);
+
+	filename = dentry_path(file->f_path.dentry, buf, size);
+	if (IS_ERR(filename))
+		return NULL;
+
+	if (strlen(filename) < root_len ||
+			strncmp(filename, root_path, root_len))
+		return NULL;
+
+	filename += root_len;
+
+	return filename;
+}
+
+static int mrq_debugfs_read(struct tegra_bpmp *bpmp,
+			    dma_addr_t name, size_t sz_name,
+			    dma_addr_t data, size_t sz_data,
+			    size_t *nbytes)
+{
+	struct mrq_debugfs_request req = {
+		.cmd = cpu_to_le32(CMD_DEBUGFS_READ),
+		.fop = {
+			.fnameaddr = cpu_to_le32((uint32_t)name),
+			.fnamelen = cpu_to_le32((uint32_t)sz_name),
+			.dataaddr = cpu_to_le32((uint32_t)data),
+			.datalen = cpu_to_le32((uint32_t)sz_data),
+		},
+	};
+	struct mrq_debugfs_response resp;
+	struct tegra_bpmp_message msg = {
+		.mrq = MRQ_DEBUGFS,
+		.tx = {
+			.data = &req,
+			.size = sizeof(req),
+		},
+		.rx = {
+			.data = &resp,
+			.size = sizeof(resp),
+		},
+	};
+	int err;
+
+	err = tegra_bpmp_transfer(bpmp, &msg);
+	if (err < 0)
+		return err;
+
+	*nbytes = (size_t)resp.fop.nbytes;
+
+	return 0;
+}
+
+static int mrq_debugfs_write(struct tegra_bpmp *bpmp,
+			     dma_addr_t name, size_t sz_name,
+			     dma_addr_t data, size_t sz_data)
+{
+	const struct mrq_debugfs_request req = {
+		.cmd = cpu_to_le32(CMD_DEBUGFS_WRITE),
+		.fop = {
+			.fnameaddr = cpu_to_le32((uint32_t)name),
+			.fnamelen = cpu_to_le32((uint32_t)sz_name),
+			.dataaddr = cpu_to_le32((uint32_t)data),
+			.datalen = cpu_to_le32((uint32_t)sz_data),
+		},
+	};
+	struct tegra_bpmp_message msg = {
+		.mrq = MRQ_DEBUGFS,
+		.tx = {
+			.data = &req,
+			.size = sizeof(req),
+		},
+	};
+
+	return tegra_bpmp_transfer(bpmp, &msg);
+}
+
+static int mrq_debugfs_dumpdir(struct tegra_bpmp *bpmp, dma_addr_t addr,
+			       size_t size, size_t *nbytes)
+{
+	const struct mrq_debugfs_request req = {
+		.cmd = cpu_to_le32(CMD_DEBUGFS_DUMPDIR),
+		.dumpdir = {
+			.dataaddr = cpu_to_le32((uint32_t)addr),
+			.datalen = cpu_to_le32((uint32_t)size),
+		},
+	};
+	struct mrq_debugfs_response resp;
+	struct tegra_bpmp_message msg = {
+		.mrq = MRQ_DEBUGFS,
+		.tx = {
+			.data = &req,
+			.size = sizeof(req),
+		},
+		.rx = {
+			.data = &resp,
+			.size = sizeof(resp),
+		},
+	};
+	int err;
+
+	err = tegra_bpmp_transfer(bpmp, &msg);
+	if (err < 0)
+		return err;
+
+	*nbytes = (size_t)resp.dumpdir.nbytes;
+
+	return 0;
+}
+
+static int debugfs_show(struct seq_file *m, void *p)
+{
+	struct file *file = m->private;
+	struct inode *inode = file_inode(file);
+	struct tegra_bpmp *bpmp = inode->i_private;
+	const size_t datasize = m->size;
+	const size_t namesize = SZ_256;
+	void *datavirt, *namevirt;
+	dma_addr_t dataphys, namephys;
+	char buf[256];
+	const char *filename;
+	size_t len, nbytes;
+	int ret;
+
+	filename = get_filename(bpmp, file, buf, sizeof(buf));
+	if (!filename)
+		return -ENOENT;
+
+	namevirt = dma_alloc_coherent(bpmp->dev, namesize, &namephys,
+				      GFP_KERNEL | GFP_DMA32);
+	if (!namevirt)
+		return -ENOMEM;
+
+	datavirt = dma_alloc_coherent(bpmp->dev, datasize, &dataphys,
+				      GFP_KERNEL | GFP_DMA32);
+	if (!datavirt) {
+		ret = -ENOMEM;
+		goto free_namebuf;
+	}
+
+	len = strlen(filename);
+	strncpy(namevirt, filename, namesize);
+
+	ret = mrq_debugfs_read(bpmp, namephys, len, dataphys, datasize,
+			       &nbytes);
+
+	if (!ret)
+		seq_write(m, datavirt, nbytes);
+
+	dma_free_coherent(bpmp->dev, datasize, datavirt, dataphys);
+free_namebuf:
+	dma_free_coherent(bpmp->dev, namesize, namevirt, namephys);
+
+	return ret;
+}
+
+static int debugfs_open(struct inode *inode, struct file *file)
+{
+	return single_open_size(file, debugfs_show, file, SZ_128K);
+}
+
+static ssize_t debugfs_store(struct file *file, const char __user *buf,
+		size_t count, loff_t *f_pos)
+{
+	struct inode *inode = file_inode(file);
+	struct tegra_bpmp *bpmp = inode->i_private;
+	const size_t datasize = count;
+	const size_t namesize = SZ_256;
+	void *datavirt, *namevirt;
+	dma_addr_t dataphys, namephys;
+	char fnamebuf[256];
+	const char *filename;
+	size_t len;
+	int ret;
+
+	filename = get_filename(bpmp, file, fnamebuf, sizeof(fnamebuf));
+	if (!filename)
+		return -ENOENT;
+
+	namevirt = dma_alloc_coherent(bpmp->dev, namesize, &namephys,
+				      GFP_KERNEL | GFP_DMA32);
+	if (!namevirt)
+		return -ENOMEM;
+
+	datavirt = dma_alloc_coherent(bpmp->dev, datasize, &dataphys,
+				      GFP_KERNEL | GFP_DMA32);
+	if (!datavirt) {
+		ret = -ENOMEM;
+		goto free_namebuf;
+	}
+
+	len = strlen(filename);
+	strncpy(namevirt, filename, namesize);
+
+	if (copy_from_user(datavirt, buf, count)) {
+		ret = -EFAULT;
+		goto free_databuf;
+	}
+
+	ret = mrq_debugfs_write(bpmp, namephys, len, dataphys,
+				count);
+
+free_databuf:
+	dma_free_coherent(bpmp->dev, datasize, datavirt, dataphys);
+free_namebuf:
+	dma_free_coherent(bpmp->dev, namesize, namevirt, namephys);
+
+	return ret ?: count;
+}
+
+static const struct file_operations debugfs_fops = {
+	.open		= debugfs_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.write		= debugfs_store,
+	.release	= single_release,
+};
+
+static int bpmp_populate_dir(struct tegra_bpmp *bpmp, struct seqbuf *seqbuf,
+			     struct dentry *parent, uint32_t depth)
+{
+	int err;
+	uint32_t d, t;
+	const char *name;
+	struct dentry *dentry;
+
+	while (!seqbuf_eof(seqbuf)) {
+		err = seqbuf_read_u32(seqbuf, &d);
+		if (err < 0)
+			return err;
+
+		if (d < depth) {
+			seqbuf_seek(seqbuf, -4);
+			/* go up a level */
+			return 0;
+		} else if (d != depth) {
+			/* malformed data received from BPMP */
+			return -EIO;
+		}
+
+		err = seqbuf_read_u32(seqbuf, &t);
+		if (err < 0)
+			return err;
+		err = seqbuf_read_str(seqbuf, &name);
+		if (err < 0)
+			return err;
+
+		if (t & DEBUGFS_S_ISDIR) {
+			dentry = debugfs_create_dir(name, parent);
+			if (!dentry)
+				return -ENOMEM;
+			err = bpmp_populate_dir(bpmp, seqbuf, dentry, depth+1);
+			if (err < 0)
+				return err;
+		} else {
+			umode_t mode;
+
+			mode = t & DEBUGFS_S_IRUSR ? S_IRUSR : 0;
+			mode |= t & DEBUGFS_S_IWUSR ? S_IWUSR : 0;
+			dentry = debugfs_create_file(name, mode,
+						     parent, bpmp,
+						     &debugfs_fops);
+			if (!dentry)
+				return -ENOMEM;
+		}
+	}
+
+	return 0;
+}
+
+static int create_debugfs_mirror(struct tegra_bpmp *bpmp, void *buf,
+				 size_t bufsize, struct dentry *root)
+{
+	struct seqbuf seqbuf;
+	int err;
+
+	bpmp->debugfs_mirror = debugfs_create_dir("debug", root);
+	if (!bpmp->debugfs_mirror)
+		return -ENOMEM;
+
+	seqbuf_init(&seqbuf, buf, bufsize);
+	err = bpmp_populate_dir(bpmp, &seqbuf, bpmp->debugfs_mirror, 0);
+	if (err < 0) {
+		debugfs_remove_recursive(bpmp->debugfs_mirror);
+		bpmp->debugfs_mirror = NULL;
+	}
+
+	return err;
+}
+
+static int mrq_is_supported(struct tegra_bpmp *bpmp, unsigned int mrq)
+{
+	struct mrq_query_abi_request req = { .mrq = cpu_to_le32(mrq) };
+	struct mrq_query_abi_response resp;
+	struct tegra_bpmp_message msg = {
+		.mrq = MRQ_QUERY_ABI,
+		.tx = {
+			.data = &req,
+			.size = sizeof(req),
+		},
+		.rx = {
+			.data = &resp,
+			.size = sizeof(resp),
+		},
+	};
+	int ret;
+
+	ret = tegra_bpmp_transfer(bpmp, &msg);
+	if (ret < 0) {
+		/* something went wrong; assume not supported */
+		dev_warn(bpmp->dev, "tegra_bpmp_transfer failed (%d)\n", ret);
+		return 0;
+	}
+
+	return resp.status ? 0 : 1;
+}
+
+int tegra_bpmp_init_debugfs(struct tegra_bpmp *bpmp)
+{
+	dma_addr_t phys;
+	void *virt;
+	const size_t sz = SZ_256K;
+	size_t nbytes;
+	int ret;
+	struct dentry *root;
+
+	if (!mrq_is_supported(bpmp, MRQ_DEBUGFS))
+		return 0;
+
+	root = debugfs_create_dir("bpmp", NULL);
+	if (!root)
+		return -ENOMEM;
+
+	virt = dma_alloc_coherent(bpmp->dev, sz, &phys,
+				  GFP_KERNEL | GFP_DMA32);
+	if (!virt) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ret = mrq_debugfs_dumpdir(bpmp, phys, sz, &nbytes);
+	if (ret < 0)
+		goto free;
+
+	ret = create_debugfs_mirror(bpmp, virt, nbytes, root);
+free:
+	dma_free_coherent(bpmp->dev, sz, virt, phys);
+out:
+	if (ret < 0)
+		debugfs_remove(root);
+
+	return ret;
+}
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
new file mode 100644
index 0000000..14a456a
--- /dev/null
+++ b/drivers/firmware/tegra/bpmp.c
@@ -0,0 +1,882 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/clk/tegra.h>
+#include <linux/genalloc.h>
+#include <linux/mailbox_client.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/semaphore.h>
+#include <linux/sched/clock.h>
+
+#include <soc/tegra/bpmp.h>
+#include <soc/tegra/bpmp-abi.h>
+#include <soc/tegra/ivc.h>
+
+#define MSG_ACK		BIT(0)
+#define MSG_RING	BIT(1)
+
+static inline struct tegra_bpmp *
+mbox_client_to_bpmp(struct mbox_client *client)
+{
+	return container_of(client, struct tegra_bpmp, mbox.client);
+}
+
+struct tegra_bpmp *tegra_bpmp_get(struct device *dev)
+{
+	struct platform_device *pdev;
+	struct tegra_bpmp *bpmp;
+	struct device_node *np;
+
+	np = of_parse_phandle(dev->of_node, "nvidia,bpmp", 0);
+	if (!np)
+		return ERR_PTR(-ENOENT);
+
+	pdev = of_find_device_by_node(np);
+	if (!pdev) {
+		bpmp = ERR_PTR(-ENODEV);
+		goto put;
+	}
+
+	bpmp = platform_get_drvdata(pdev);
+	if (!bpmp) {
+		bpmp = ERR_PTR(-EPROBE_DEFER);
+		put_device(&pdev->dev);
+		goto put;
+	}
+
+put:
+	of_node_put(np);
+	return bpmp;
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_get);
+
+void tegra_bpmp_put(struct tegra_bpmp *bpmp)
+{
+	if (bpmp)
+		put_device(bpmp->dev);
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_put);
+
+static int
+tegra_bpmp_channel_get_thread_index(struct tegra_bpmp_channel *channel)
+{
+	struct tegra_bpmp *bpmp = channel->bpmp;
+	unsigned int count;
+	int index;
+
+	count = bpmp->soc->channels.thread.count;
+
+	index = channel - channel->bpmp->threaded_channels;
+	if (index < 0 || index >= count)
+		return -EINVAL;
+
+	return index;
+}
+
+static bool tegra_bpmp_message_valid(const struct tegra_bpmp_message *msg)
+{
+	return (msg->tx.size <= MSG_DATA_MIN_SZ) &&
+	       (msg->rx.size <= MSG_DATA_MIN_SZ) &&
+	       (msg->tx.size == 0 || msg->tx.data) &&
+	       (msg->rx.size == 0 || msg->rx.data);
+}
+
+static bool tegra_bpmp_master_acked(struct tegra_bpmp_channel *channel)
+{
+	void *frame;
+
+	frame = tegra_ivc_read_get_next_frame(channel->ivc);
+	if (IS_ERR(frame)) {
+		channel->ib = NULL;
+		return false;
+	}
+
+	channel->ib = frame;
+
+	return true;
+}
+
+static int tegra_bpmp_wait_ack(struct tegra_bpmp_channel *channel)
+{
+	unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
+	ktime_t end;
+
+	end = ktime_add_us(ktime_get(), timeout);
+
+	do {
+		if (tegra_bpmp_master_acked(channel))
+			return 0;
+	} while (ktime_before(ktime_get(), end));
+
+	return -ETIMEDOUT;
+}
+
+static bool tegra_bpmp_master_free(struct tegra_bpmp_channel *channel)
+{
+	void *frame;
+
+	frame = tegra_ivc_write_get_next_frame(channel->ivc);
+	if (IS_ERR(frame)) {
+		channel->ob = NULL;
+		return false;
+	}
+
+	channel->ob = frame;
+
+	return true;
+}
+
+static int tegra_bpmp_wait_master_free(struct tegra_bpmp_channel *channel)
+{
+	unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
+	ktime_t start, now;
+
+	start = ns_to_ktime(local_clock());
+
+	do {
+		if (tegra_bpmp_master_free(channel))
+			return 0;
+
+		now = ns_to_ktime(local_clock());
+	} while (ktime_us_delta(now, start) < timeout);
+
+	return -ETIMEDOUT;
+}
+
+static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
+					 void *data, size_t size, int *ret)
+{
+	int err;
+
+	if (data && size > 0)
+		memcpy(data, channel->ib->data, size);
+
+	err = tegra_ivc_read_advance(channel->ivc);
+	if (err < 0)
+		return err;
+
+	*ret = channel->ib->code;
+
+	return 0;
+}
+
+static ssize_t tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
+				       void *data, size_t size, int *ret)
+{
+	struct tegra_bpmp *bpmp = channel->bpmp;
+	unsigned long flags;
+	ssize_t err;
+	int index;
+
+	index = tegra_bpmp_channel_get_thread_index(channel);
+	if (index < 0) {
+		err = index;
+		goto unlock;
+	}
+
+	spin_lock_irqsave(&bpmp->lock, flags);
+	err = __tegra_bpmp_channel_read(channel, data, size, ret);
+	clear_bit(index, bpmp->threaded.allocated);
+	spin_unlock_irqrestore(&bpmp->lock, flags);
+
+unlock:
+	up(&bpmp->threaded.lock);
+
+	return err;
+}
+
+static ssize_t __tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
+					  unsigned int mrq, unsigned long flags,
+					  const void *data, size_t size)
+{
+	channel->ob->code = mrq;
+	channel->ob->flags = flags;
+
+	if (data && size > 0)
+		memcpy(channel->ob->data, data, size);
+
+	return tegra_ivc_write_advance(channel->ivc);
+}
+
+static struct tegra_bpmp_channel *
+tegra_bpmp_write_threaded(struct tegra_bpmp *bpmp, unsigned int mrq,
+			  const void *data, size_t size)
+{
+	unsigned long timeout = bpmp->soc->channels.thread.timeout;
+	unsigned int count = bpmp->soc->channels.thread.count;
+	struct tegra_bpmp_channel *channel;
+	unsigned long flags;
+	unsigned int index;
+	int err;
+
+	err = down_timeout(&bpmp->threaded.lock, usecs_to_jiffies(timeout));
+	if (err < 0)
+		return ERR_PTR(err);
+
+	spin_lock_irqsave(&bpmp->lock, flags);
+
+	index = find_first_zero_bit(bpmp->threaded.allocated, count);
+	if (index == count) {
+		err = -EBUSY;
+		goto unlock;
+	}
+
+	channel = &bpmp->threaded_channels[index];
+
+	if (!tegra_bpmp_master_free(channel)) {
+		err = -EBUSY;
+		goto unlock;
+	}
+
+	set_bit(index, bpmp->threaded.allocated);
+
+	err = __tegra_bpmp_channel_write(channel, mrq, MSG_ACK | MSG_RING,
+					 data, size);
+	if (err < 0)
+		goto clear_allocated;
+
+	set_bit(index, bpmp->threaded.busy);
+
+	spin_unlock_irqrestore(&bpmp->lock, flags);
+	return channel;
+
+clear_allocated:
+	clear_bit(index, bpmp->threaded.allocated);
+unlock:
+	spin_unlock_irqrestore(&bpmp->lock, flags);
+	up(&bpmp->threaded.lock);
+
+	return ERR_PTR(err);
+}
+
+static ssize_t tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
+					unsigned int mrq, unsigned long flags,
+					const void *data, size_t size)
+{
+	int err;
+
+	err = tegra_bpmp_wait_master_free(channel);
+	if (err < 0)
+		return err;
+
+	return __tegra_bpmp_channel_write(channel, mrq, flags, data, size);
+}
+
+int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
+			       struct tegra_bpmp_message *msg)
+{
+	struct tegra_bpmp_channel *channel;
+	int err;
+
+	if (WARN_ON(!irqs_disabled()))
+		return -EPERM;
+
+	if (!tegra_bpmp_message_valid(msg))
+		return -EINVAL;
+
+	channel = bpmp->tx_channel;
+
+	spin_lock(&bpmp->atomic_tx_lock);
+
+	err = tegra_bpmp_channel_write(channel, msg->mrq, MSG_ACK,
+				       msg->tx.data, msg->tx.size);
+	if (err < 0) {
+		spin_unlock(&bpmp->atomic_tx_lock);
+		return err;
+	}
+
+	spin_unlock(&bpmp->atomic_tx_lock);
+
+	err = mbox_send_message(bpmp->mbox.channel, NULL);
+	if (err < 0)
+		return err;
+
+	mbox_client_txdone(bpmp->mbox.channel, 0);
+
+	err = tegra_bpmp_wait_ack(channel);
+	if (err < 0)
+		return err;
+
+	return __tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size,
+					 &msg->rx.ret);
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_transfer_atomic);
+
+int tegra_bpmp_transfer(struct tegra_bpmp *bpmp,
+			struct tegra_bpmp_message *msg)
+{
+	struct tegra_bpmp_channel *channel;
+	unsigned long timeout;
+	int err;
+
+	if (WARN_ON(irqs_disabled()))
+		return -EPERM;
+
+	if (!tegra_bpmp_message_valid(msg))
+		return -EINVAL;
+
+	channel = tegra_bpmp_write_threaded(bpmp, msg->mrq, msg->tx.data,
+					    msg->tx.size);
+	if (IS_ERR(channel))
+		return PTR_ERR(channel);
+
+	err = mbox_send_message(bpmp->mbox.channel, NULL);
+	if (err < 0)
+		return err;
+
+	mbox_client_txdone(bpmp->mbox.channel, 0);
+
+	timeout = usecs_to_jiffies(bpmp->soc->channels.thread.timeout);
+
+	err = wait_for_completion_timeout(&channel->completion, timeout);
+	if (err == 0)
+		return -ETIMEDOUT;
+
+	return tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size,
+				       &msg->rx.ret);
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_transfer);
+
+static struct tegra_bpmp_mrq *tegra_bpmp_find_mrq(struct tegra_bpmp *bpmp,
+						  unsigned int mrq)
+{
+	struct tegra_bpmp_mrq *entry;
+
+	list_for_each_entry(entry, &bpmp->mrqs, list)
+		if (entry->mrq == mrq)
+			return entry;
+
+	return NULL;
+}
+
+void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel, int code,
+			   const void *data, size_t size)
+{
+	unsigned long flags = channel->ib->flags;
+	struct tegra_bpmp *bpmp = channel->bpmp;
+	struct tegra_bpmp_mb_data *frame;
+	int err;
+
+	if (WARN_ON(size > MSG_DATA_MIN_SZ))
+		return;
+
+	err = tegra_ivc_read_advance(channel->ivc);
+	if (WARN_ON(err < 0))
+		return;
+
+	if ((flags & MSG_ACK) == 0)
+		return;
+
+	frame = tegra_ivc_write_get_next_frame(channel->ivc);
+	if (WARN_ON(IS_ERR(frame)))
+		return;
+
+	frame->code = code;
+
+	if (data && size > 0)
+		memcpy(frame->data, data, size);
+
+	err = tegra_ivc_write_advance(channel->ivc);
+	if (WARN_ON(err < 0))
+		return;
+
+	if (flags & MSG_RING) {
+		err = mbox_send_message(bpmp->mbox.channel, NULL);
+		if (WARN_ON(err < 0))
+			return;
+
+		mbox_client_txdone(bpmp->mbox.channel, 0);
+	}
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_return);
+
+static void tegra_bpmp_handle_mrq(struct tegra_bpmp *bpmp,
+				  unsigned int mrq,
+				  struct tegra_bpmp_channel *channel)
+{
+	struct tegra_bpmp_mrq *entry;
+	u32 zero = 0;
+
+	spin_lock(&bpmp->lock);
+
+	entry = tegra_bpmp_find_mrq(bpmp, mrq);
+	if (!entry) {
+		spin_unlock(&bpmp->lock);
+		tegra_bpmp_mrq_return(channel, -EINVAL, &zero, sizeof(zero));
+		return;
+	}
+
+	entry->handler(mrq, channel, entry->data);
+
+	spin_unlock(&bpmp->lock);
+}
+
+int tegra_bpmp_request_mrq(struct tegra_bpmp *bpmp, unsigned int mrq,
+			   tegra_bpmp_mrq_handler_t handler, void *data)
+{
+	struct tegra_bpmp_mrq *entry;
+	unsigned long flags;
+
+	if (!handler)
+		return -EINVAL;
+
+	entry = devm_kzalloc(bpmp->dev, sizeof(*entry), GFP_KERNEL);
+	if (!entry)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&bpmp->lock, flags);
+
+	entry->mrq = mrq;
+	entry->handler = handler;
+	entry->data = data;
+	list_add(&entry->list, &bpmp->mrqs);
+
+	spin_unlock_irqrestore(&bpmp->lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_request_mrq);
+
+void tegra_bpmp_free_mrq(struct tegra_bpmp *bpmp, unsigned int mrq, void *data)
+{
+	struct tegra_bpmp_mrq *entry;
+	unsigned long flags;
+
+	spin_lock_irqsave(&bpmp->lock, flags);
+
+	entry = tegra_bpmp_find_mrq(bpmp, mrq);
+	if (!entry)
+		goto unlock;
+
+	list_del(&entry->list);
+	devm_kfree(bpmp->dev, entry);
+
+unlock:
+	spin_unlock_irqrestore(&bpmp->lock, flags);
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_free_mrq);
+
+static void tegra_bpmp_mrq_handle_ping(unsigned int mrq,
+				       struct tegra_bpmp_channel *channel,
+				       void *data)
+{
+	struct mrq_ping_request *request;
+	struct mrq_ping_response response;
+
+	request = (struct mrq_ping_request *)channel->ib->data;
+
+	memset(&response, 0, sizeof(response));
+	response.reply = request->challenge << 1;
+
+	tegra_bpmp_mrq_return(channel, 0, &response, sizeof(response));
+}
+
+static int tegra_bpmp_ping(struct tegra_bpmp *bpmp)
+{
+	struct mrq_ping_response response;
+	struct mrq_ping_request request;
+	struct tegra_bpmp_message msg;
+	unsigned long flags;
+	ktime_t start, end;
+	int err;
+
+	memset(&request, 0, sizeof(request));
+	request.challenge = 1;
+
+	memset(&response, 0, sizeof(response));
+
+	memset(&msg, 0, sizeof(msg));
+	msg.mrq = MRQ_PING;
+	msg.tx.data = &request;
+	msg.tx.size = sizeof(request);
+	msg.rx.data = &response;
+	msg.rx.size = sizeof(response);
+
+	local_irq_save(flags);
+	start = ktime_get();
+	err = tegra_bpmp_transfer_atomic(bpmp, &msg);
+	end = ktime_get();
+	local_irq_restore(flags);
+
+	if (!err)
+		dev_dbg(bpmp->dev,
+			"ping ok: challenge: %u, response: %u, time: %lld\n",
+			request.challenge, response.reply,
+			ktime_to_us(ktime_sub(end, start)));
+
+	return err;
+}
+
+static int tegra_bpmp_get_firmware_tag(struct tegra_bpmp *bpmp, char *tag,
+				       size_t size)
+{
+	struct mrq_query_tag_request request;
+	struct tegra_bpmp_message msg;
+	unsigned long flags;
+	dma_addr_t phys;
+	void *virt;
+	int err;
+
+	virt = dma_alloc_coherent(bpmp->dev, MSG_DATA_MIN_SZ, &phys,
+				  GFP_KERNEL | GFP_DMA32);
+	if (!virt)
+		return -ENOMEM;
+
+	memset(&request, 0, sizeof(request));
+	request.addr = phys;
+
+	memset(&msg, 0, sizeof(msg));
+	msg.mrq = MRQ_QUERY_TAG;
+	msg.tx.data = &request;
+	msg.tx.size = sizeof(request);
+
+	local_irq_save(flags);
+	err = tegra_bpmp_transfer_atomic(bpmp, &msg);
+	local_irq_restore(flags);
+
+	if (err == 0)
+		strlcpy(tag, virt, size);
+
+	dma_free_coherent(bpmp->dev, MSG_DATA_MIN_SZ, virt, phys);
+
+	return err;
+}
+
+static void tegra_bpmp_channel_signal(struct tegra_bpmp_channel *channel)
+{
+	unsigned long flags = channel->ob->flags;
+
+	if ((flags & MSG_RING) == 0)
+		return;
+
+	complete(&channel->completion);
+}
+
+static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data)
+{
+	struct tegra_bpmp *bpmp = mbox_client_to_bpmp(client);
+	struct tegra_bpmp_channel *channel;
+	unsigned int i, count;
+	unsigned long *busy;
+
+	channel = bpmp->rx_channel;
+	count = bpmp->soc->channels.thread.count;
+	busy = bpmp->threaded.busy;
+
+	if (tegra_bpmp_master_acked(channel))
+		tegra_bpmp_handle_mrq(bpmp, channel->ib->code, channel);
+
+	spin_lock(&bpmp->lock);
+
+	for_each_set_bit(i, busy, count) {
+		struct tegra_bpmp_channel *channel;
+
+		channel = &bpmp->threaded_channels[i];
+
+		if (tegra_bpmp_master_acked(channel)) {
+			tegra_bpmp_channel_signal(channel);
+			clear_bit(i, busy);
+		}
+	}
+
+	spin_unlock(&bpmp->lock);
+}
+
+static void tegra_bpmp_ivc_notify(struct tegra_ivc *ivc, void *data)
+{
+	struct tegra_bpmp *bpmp = data;
+	int err;
+
+	if (WARN_ON(bpmp->mbox.channel == NULL))
+		return;
+
+	err = mbox_send_message(bpmp->mbox.channel, NULL);
+	if (err < 0)
+		return;
+
+	mbox_client_txdone(bpmp->mbox.channel, 0);
+}
+
+static int tegra_bpmp_channel_init(struct tegra_bpmp_channel *channel,
+				   struct tegra_bpmp *bpmp,
+				   unsigned int index)
+{
+	size_t message_size, queue_size;
+	unsigned int offset;
+	int err;
+
+	channel->ivc = devm_kzalloc(bpmp->dev, sizeof(*channel->ivc),
+				    GFP_KERNEL);
+	if (!channel->ivc)
+		return -ENOMEM;
+
+	message_size = tegra_ivc_align(MSG_MIN_SZ);
+	queue_size = tegra_ivc_total_queue_size(message_size);
+	offset = queue_size * index;
+
+	err = tegra_ivc_init(channel->ivc, NULL,
+			     bpmp->rx.virt + offset, bpmp->rx.phys + offset,
+			     bpmp->tx.virt + offset, bpmp->tx.phys + offset,
+			     1, message_size, tegra_bpmp_ivc_notify,
+			     bpmp);
+	if (err < 0) {
+		dev_err(bpmp->dev, "failed to setup IVC for channel %u: %d\n",
+			index, err);
+		return err;
+	}
+
+	init_completion(&channel->completion);
+	channel->bpmp = bpmp;
+
+	return 0;
+}
+
+static void tegra_bpmp_channel_reset(struct tegra_bpmp_channel *channel)
+{
+	/* reset the channel state */
+	tegra_ivc_reset(channel->ivc);
+
+	/* sync the channel state with BPMP */
+	while (tegra_ivc_notified(channel->ivc))
+		;
+}
+
+static void tegra_bpmp_channel_cleanup(struct tegra_bpmp_channel *channel)
+{
+	tegra_ivc_cleanup(channel->ivc);
+}
+
+static int tegra_bpmp_probe(struct platform_device *pdev)
+{
+	struct tegra_bpmp *bpmp;
+	unsigned int i;
+	char tag[32];
+	size_t size;
+	int err;
+
+	bpmp = devm_kzalloc(&pdev->dev, sizeof(*bpmp), GFP_KERNEL);
+	if (!bpmp)
+		return -ENOMEM;
+
+	bpmp->soc = of_device_get_match_data(&pdev->dev);
+	bpmp->dev = &pdev->dev;
+
+	bpmp->tx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 0);
+	if (!bpmp->tx.pool) {
+		dev_err(&pdev->dev, "TX shmem pool not found\n");
+		return -ENOMEM;
+	}
+
+	bpmp->tx.virt = gen_pool_dma_alloc(bpmp->tx.pool, 4096, &bpmp->tx.phys);
+	if (!bpmp->tx.virt) {
+		dev_err(&pdev->dev, "failed to allocate from TX pool\n");
+		return -ENOMEM;
+	}
+
+	bpmp->rx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 1);
+	if (!bpmp->rx.pool) {
+		dev_err(&pdev->dev, "RX shmem pool not found\n");
+		err = -ENOMEM;
+		goto free_tx;
+	}
+
+	bpmp->rx.virt = gen_pool_dma_alloc(bpmp->rx.pool, 4096, &bpmp->rx.phys);
+	if (!bpmp->rx.virt) {
+		dev_err(&pdev->dev, "failed to allocate from RX pool\n");
+		err = -ENOMEM;
+		goto free_tx;
+	}
+
+	INIT_LIST_HEAD(&bpmp->mrqs);
+	spin_lock_init(&bpmp->lock);
+
+	bpmp->threaded.count = bpmp->soc->channels.thread.count;
+	sema_init(&bpmp->threaded.lock, bpmp->threaded.count);
+
+	size = BITS_TO_LONGS(bpmp->threaded.count) * sizeof(long);
+
+	bpmp->threaded.allocated = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+	if (!bpmp->threaded.allocated) {
+		err = -ENOMEM;
+		goto free_rx;
+	}
+
+	bpmp->threaded.busy = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+	if (!bpmp->threaded.busy) {
+		err = -ENOMEM;
+		goto free_rx;
+	}
+
+	spin_lock_init(&bpmp->atomic_tx_lock);
+	bpmp->tx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->tx_channel),
+					GFP_KERNEL);
+	if (!bpmp->tx_channel) {
+		err = -ENOMEM;
+		goto free_rx;
+	}
+
+	bpmp->rx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->rx_channel),
+	                                GFP_KERNEL);
+	if (!bpmp->rx_channel) {
+		err = -ENOMEM;
+		goto free_rx;
+	}
+
+	bpmp->threaded_channels = devm_kcalloc(&pdev->dev, bpmp->threaded.count,
+					       sizeof(*bpmp->threaded_channels),
+					       GFP_KERNEL);
+	if (!bpmp->threaded_channels) {
+		err = -ENOMEM;
+		goto free_rx;
+	}
+
+	err = tegra_bpmp_channel_init(bpmp->tx_channel, bpmp,
+				      bpmp->soc->channels.cpu_tx.offset);
+	if (err < 0)
+		goto free_rx;
+
+	err = tegra_bpmp_channel_init(bpmp->rx_channel, bpmp,
+				      bpmp->soc->channels.cpu_rx.offset);
+	if (err < 0)
+		goto cleanup_tx_channel;
+
+	for (i = 0; i < bpmp->threaded.count; i++) {
+		err = tegra_bpmp_channel_init(
+			&bpmp->threaded_channels[i], bpmp,
+			bpmp->soc->channels.thread.offset + i);
+		if (err < 0)
+			goto cleanup_threaded_channels;
+	}
+
+	/* mbox registration */
+	bpmp->mbox.client.dev = &pdev->dev;
+	bpmp->mbox.client.rx_callback = tegra_bpmp_handle_rx;
+	bpmp->mbox.client.tx_block = false;
+	bpmp->mbox.client.knows_txdone = false;
+
+	bpmp->mbox.channel = mbox_request_channel(&bpmp->mbox.client, 0);
+	if (IS_ERR(bpmp->mbox.channel)) {
+		err = PTR_ERR(bpmp->mbox.channel);
+		dev_err(&pdev->dev, "failed to get HSP mailbox: %d\n", err);
+		goto cleanup_threaded_channels;
+	}
+
+	/* reset message channels */
+	tegra_bpmp_channel_reset(bpmp->tx_channel);
+	tegra_bpmp_channel_reset(bpmp->rx_channel);
+	for (i = 0; i < bpmp->threaded.count; i++)
+		tegra_bpmp_channel_reset(&bpmp->threaded_channels[i]);
+
+	err = tegra_bpmp_request_mrq(bpmp, MRQ_PING,
+				     tegra_bpmp_mrq_handle_ping, bpmp);
+	if (err < 0)
+		goto free_mbox;
+
+	err = tegra_bpmp_ping(bpmp);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to ping BPMP: %d\n", err);
+		goto free_mrq;
+	}
+
+	err = tegra_bpmp_get_firmware_tag(bpmp, tag, sizeof(tag) - 1);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to get firmware tag: %d\n", err);
+		goto free_mrq;
+	}
+
+	dev_info(&pdev->dev, "firmware: %s\n", tag);
+
+	platform_set_drvdata(pdev, bpmp);
+
+	err = of_platform_default_populate(pdev->dev.of_node, NULL, &pdev->dev);
+	if (err < 0)
+		goto free_mrq;
+
+	err = tegra_bpmp_init_clocks(bpmp);
+	if (err < 0)
+		goto free_mrq;
+
+	err = tegra_bpmp_init_resets(bpmp);
+	if (err < 0)
+		goto free_mrq;
+
+	err = tegra_bpmp_init_powergates(bpmp);
+	if (err < 0)
+		goto free_mrq;
+
+	err = tegra_bpmp_init_debugfs(bpmp);
+	if (err < 0)
+		dev_err(&pdev->dev, "debugfs initialization failed: %d\n", err);
+
+	return 0;
+
+free_mrq:
+	tegra_bpmp_free_mrq(bpmp, MRQ_PING, bpmp);
+free_mbox:
+	mbox_free_channel(bpmp->mbox.channel);
+cleanup_threaded_channels:
+	for (i = 0; i < bpmp->threaded.count; i++) {
+		if (bpmp->threaded_channels[i].bpmp)
+			tegra_bpmp_channel_cleanup(&bpmp->threaded_channels[i]);
+	}
+
+	tegra_bpmp_channel_cleanup(bpmp->rx_channel);
+cleanup_tx_channel:
+	tegra_bpmp_channel_cleanup(bpmp->tx_channel);
+free_rx:
+	gen_pool_free(bpmp->rx.pool, (unsigned long)bpmp->rx.virt, 4096);
+free_tx:
+	gen_pool_free(bpmp->tx.pool, (unsigned long)bpmp->tx.virt, 4096);
+	return err;
+}
+
+static const struct tegra_bpmp_soc tegra186_soc = {
+	.channels = {
+		.cpu_tx = {
+			.offset = 3,
+			.timeout = 60 * USEC_PER_SEC,
+		},
+		.thread = {
+			.offset = 0,
+			.count = 3,
+			.timeout = 600 * USEC_PER_SEC,
+		},
+		.cpu_rx = {
+			.offset = 13,
+			.timeout = 0,
+		},
+	},
+	.num_resets = 193,
+};
+
+static const struct of_device_id tegra_bpmp_match[] = {
+	{ .compatible = "nvidia,tegra186-bpmp", .data = &tegra186_soc },
+	{ }
+};
+
+static struct platform_driver tegra_bpmp_driver = {
+	.driver = {
+		.name = "tegra-bpmp",
+		.of_match_table = tegra_bpmp_match,
+	},
+	.probe = tegra_bpmp_probe,
+};
+
+static int __init tegra_bpmp_init(void)
+{
+	return platform_driver_register(&tegra_bpmp_driver);
+}
+core_initcall(tegra_bpmp_init);
diff --git a/drivers/firmware/tegra/ivc.c b/drivers/firmware/tegra/ivc.c
new file mode 100644
index 0000000..00de793
--- /dev/null
+++ b/drivers/firmware/tegra/ivc.c
@@ -0,0 +1,695 @@
+/*
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <soc/tegra/ivc.h>
+
+#define TEGRA_IVC_ALIGN 64
+
+/*
+ * IVC channel reset protocol.
+ *
+ * Each end uses its tx_channel.state to indicate its synchronization state.
+ */
+enum tegra_ivc_state {
+	/*
+	 * This value is zero for backwards compatibility with services that
+	 * assume channels to be initially zeroed. Such channels are in an
+	 * initially valid state, but cannot be asynchronously reset, and must
+	 * maintain a valid state at all times.
+	 *
+	 * The transmitting end can enter the established state from the sync or
+	 * ack state when it observes the receiving endpoint in the ack or
+	 * established state, indicating that has cleared the counters in our
+	 * rx_channel.
+	 */
+	TEGRA_IVC_STATE_ESTABLISHED = 0,
+
+	/*
+	 * If an endpoint is observed in the sync state, the remote endpoint is
+	 * allowed to clear the counters it owns asynchronously with respect to
+	 * the current endpoint. Therefore, the current endpoint is no longer
+	 * allowed to communicate.
+	 */
+	TEGRA_IVC_STATE_SYNC,
+
+	/*
+	 * When the transmitting end observes the receiving end in the sync
+	 * state, it can clear the w_count and r_count and transition to the ack
+	 * state. If the remote endpoint observes us in the ack state, it can
+	 * return to the established state once it has cleared its counters.
+	 */
+	TEGRA_IVC_STATE_ACK
+};
+
+/*
+ * This structure is divided into two-cache aligned parts, the first is only
+ * written through the tx.channel pointer, while the second is only written
+ * through the rx.channel pointer. This delineates ownership of the cache
+ * lines, which is critical to performance and necessary in non-cache coherent
+ * implementations.
+ */
+struct tegra_ivc_header {
+	union {
+		struct {
+			/* fields owned by the transmitting end */
+			u32 count;
+			u32 state;
+		};
+
+		u8 pad[TEGRA_IVC_ALIGN];
+	} tx;
+
+	union {
+		/* fields owned by the receiving end */
+		u32 count;
+		u8 pad[TEGRA_IVC_ALIGN];
+	} rx;
+};
+
+static inline void tegra_ivc_invalidate(struct tegra_ivc *ivc, dma_addr_t phys)
+{
+	if (!ivc->peer)
+		return;
+
+	dma_sync_single_for_cpu(ivc->peer, phys, TEGRA_IVC_ALIGN,
+				DMA_FROM_DEVICE);
+}
+
+static inline void tegra_ivc_flush(struct tegra_ivc *ivc, dma_addr_t phys)
+{
+	if (!ivc->peer)
+		return;
+
+	dma_sync_single_for_device(ivc->peer, phys, TEGRA_IVC_ALIGN,
+				   DMA_TO_DEVICE);
+}
+
+static inline bool tegra_ivc_empty(struct tegra_ivc *ivc,
+				   struct tegra_ivc_header *header)
+{
+	/*
+	 * This function performs multiple checks on the same values with
+	 * security implications, so create snapshots with READ_ONCE() to
+	 * ensure that these checks use the same values.
+	 */
+	u32 tx = READ_ONCE(header->tx.count);
+	u32 rx = READ_ONCE(header->rx.count);
+
+	/*
+	 * Perform an over-full check to prevent denial of service attacks
+	 * where a server could be easily fooled into believing that there's
+	 * an extremely large number of frames ready, since receivers are not
+	 * expected to check for full or over-full conditions.
+	 *
+	 * Although the channel isn't empty, this is an invalid case caused by
+	 * a potentially malicious peer, so returning empty is safer, because
+	 * it gives the impression that the channel has gone silent.
+	 */
+	if (tx - rx > ivc->num_frames)
+		return true;
+
+	return tx == rx;
+}
+
+static inline bool tegra_ivc_full(struct tegra_ivc *ivc,
+				  struct tegra_ivc_header *header)
+{
+	u32 tx = READ_ONCE(header->tx.count);
+	u32 rx = READ_ONCE(header->rx.count);
+
+	/*
+	 * Invalid cases where the counters indicate that the queue is over
+	 * capacity also appear full.
+	 */
+	return tx - rx >= ivc->num_frames;
+}
+
+static inline u32 tegra_ivc_available(struct tegra_ivc *ivc,
+				      struct tegra_ivc_header *header)
+{
+	u32 tx = READ_ONCE(header->tx.count);
+	u32 rx = READ_ONCE(header->rx.count);
+
+	/*
+	 * This function isn't expected to be used in scenarios where an
+	 * over-full situation can lead to denial of service attacks. See the
+	 * comment in tegra_ivc_empty() for an explanation about special
+	 * over-full considerations.
+	 */
+	return tx - rx;
+}
+
+static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc)
+{
+	WRITE_ONCE(ivc->tx.channel->tx.count,
+		   READ_ONCE(ivc->tx.channel->tx.count) + 1);
+
+	if (ivc->tx.position == ivc->num_frames - 1)
+		ivc->tx.position = 0;
+	else
+		ivc->tx.position++;
+}
+
+static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc)
+{
+	WRITE_ONCE(ivc->rx.channel->rx.count,
+		   READ_ONCE(ivc->rx.channel->rx.count) + 1);
+
+	if (ivc->rx.position == ivc->num_frames - 1)
+		ivc->rx.position = 0;
+	else
+		ivc->rx.position++;
+}
+
+static inline int tegra_ivc_check_read(struct tegra_ivc *ivc)
+{
+	unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
+
+	/*
+	 * tx.channel->state is set locally, so it is not synchronized with
+	 * state from the remote peer. The remote peer cannot reset its
+	 * transmit counters until we've acknowledged its synchronization
+	 * request, so no additional synchronization is required because an
+	 * asynchronous transition of rx.channel->state to
+	 * TEGRA_IVC_STATE_ACK is not allowed.
+	 */
+	if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
+		return -ECONNRESET;
+
+	/*
+	 * Avoid unnecessary invalidations when performing repeated accesses
+	 * to an IVC channel by checking the old queue pointers first.
+	 *
+	 * Synchronization is only necessary when these pointers indicate
+	 * empty or full.
+	 */
+	if (!tegra_ivc_empty(ivc, ivc->rx.channel))
+		return 0;
+
+	tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
+
+	if (tegra_ivc_empty(ivc, ivc->rx.channel))
+		return -ENOSPC;
+
+	return 0;
+}
+
+static inline int tegra_ivc_check_write(struct tegra_ivc *ivc)
+{
+	unsigned int offset = offsetof(struct tegra_ivc_header, rx.count);
+
+	if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
+		return -ECONNRESET;
+
+	if (!tegra_ivc_full(ivc, ivc->tx.channel))
+		return 0;
+
+	tegra_ivc_invalidate(ivc, ivc->tx.phys + offset);
+
+	if (tegra_ivc_full(ivc, ivc->tx.channel))
+		return -ENOSPC;
+
+	return 0;
+}
+
+static void *tegra_ivc_frame_virt(struct tegra_ivc *ivc,
+				  struct tegra_ivc_header *header,
+				  unsigned int frame)
+{
+	if (WARN_ON(frame >= ivc->num_frames))
+		return ERR_PTR(-EINVAL);
+
+	return (void *)(header + 1) + ivc->frame_size * frame;
+}
+
+static inline dma_addr_t tegra_ivc_frame_phys(struct tegra_ivc *ivc,
+					      dma_addr_t phys,
+					      unsigned int frame)
+{
+	unsigned long offset;
+
+	offset = sizeof(struct tegra_ivc_header) + ivc->frame_size * frame;
+
+	return phys + offset;
+}
+
+static inline void tegra_ivc_invalidate_frame(struct tegra_ivc *ivc,
+					      dma_addr_t phys,
+					      unsigned int frame,
+					      unsigned int offset,
+					      size_t size)
+{
+	if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
+		return;
+
+	phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset;
+
+	dma_sync_single_for_cpu(ivc->peer, phys, size, DMA_FROM_DEVICE);
+}
+
+static inline void tegra_ivc_flush_frame(struct tegra_ivc *ivc,
+					 dma_addr_t phys,
+					 unsigned int frame,
+					 unsigned int offset,
+					 size_t size)
+{
+	if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
+		return;
+
+	phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset;
+
+	dma_sync_single_for_device(ivc->peer, phys, size, DMA_TO_DEVICE);
+}
+
+/* directly peek at the next frame rx'ed */
+void *tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc)
+{
+	int err;
+
+	if (WARN_ON(ivc == NULL))
+		return ERR_PTR(-EINVAL);
+
+	err = tegra_ivc_check_read(ivc);
+	if (err < 0)
+		return ERR_PTR(err);
+
+	/*
+	 * Order observation of ivc->rx.position potentially indicating new
+	 * data before data read.
+	 */
+	smp_rmb();
+
+	tegra_ivc_invalidate_frame(ivc, ivc->rx.phys, ivc->rx.position, 0,
+				   ivc->frame_size);
+
+	return tegra_ivc_frame_virt(ivc, ivc->rx.channel, ivc->rx.position);
+}
+EXPORT_SYMBOL(tegra_ivc_read_get_next_frame);
+
+int tegra_ivc_read_advance(struct tegra_ivc *ivc)
+{
+	unsigned int rx = offsetof(struct tegra_ivc_header, rx.count);
+	unsigned int tx = offsetof(struct tegra_ivc_header, tx.count);
+	int err;
+
+	/*
+	 * No read barriers or synchronization here: the caller is expected to
+	 * have already observed the channel non-empty. This check is just to
+	 * catch programming errors.
+	 */
+	err = tegra_ivc_check_read(ivc);
+	if (err < 0)
+		return err;
+
+	tegra_ivc_advance_rx(ivc);
+
+	tegra_ivc_flush(ivc, ivc->rx.phys + rx);
+
+	/*
+	 * Ensure our write to ivc->rx.position occurs before our read from
+	 * ivc->tx.position.
+	 */
+	smp_mb();
+
+	/*
+	 * Notify only upon transition from full to non-full. The available
+	 * count can only asynchronously increase, so the worst possible
+	 * side-effect will be a spurious notification.
+	 */
+	tegra_ivc_invalidate(ivc, ivc->rx.phys + tx);
+
+	if (tegra_ivc_available(ivc, ivc->rx.channel) == ivc->num_frames - 1)
+		ivc->notify(ivc, ivc->notify_data);
+
+	return 0;
+}
+EXPORT_SYMBOL(tegra_ivc_read_advance);
+
+/* directly poke at the next frame to be tx'ed */
+void *tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc)
+{
+	int err;
+
+	err = tegra_ivc_check_write(ivc);
+	if (err < 0)
+		return ERR_PTR(err);
+
+	return tegra_ivc_frame_virt(ivc, ivc->tx.channel, ivc->tx.position);
+}
+EXPORT_SYMBOL(tegra_ivc_write_get_next_frame);
+
+/* advance the tx buffer */
+int tegra_ivc_write_advance(struct tegra_ivc *ivc)
+{
+	unsigned int tx = offsetof(struct tegra_ivc_header, tx.count);
+	unsigned int rx = offsetof(struct tegra_ivc_header, rx.count);
+	int err;
+
+	err = tegra_ivc_check_write(ivc);
+	if (err < 0)
+		return err;
+
+	tegra_ivc_flush_frame(ivc, ivc->tx.phys, ivc->tx.position, 0,
+			      ivc->frame_size);
+
+	/*
+	 * Order any possible stores to the frame before update of
+	 * ivc->tx.position.
+	 */
+	smp_wmb();
+
+	tegra_ivc_advance_tx(ivc);
+	tegra_ivc_flush(ivc, ivc->tx.phys + tx);
+
+	/*
+	 * Ensure our write to ivc->tx.position occurs before our read from
+	 * ivc->rx.position.
+	 */
+	smp_mb();
+
+	/*
+	 * Notify only upon transition from empty to non-empty. The available
+	 * count can only asynchronously decrease, so the worst possible
+	 * side-effect will be a spurious notification.
+	 */
+	tegra_ivc_invalidate(ivc, ivc->tx.phys + rx);
+
+	if (tegra_ivc_available(ivc, ivc->tx.channel) == 1)
+		ivc->notify(ivc, ivc->notify_data);
+
+	return 0;
+}
+EXPORT_SYMBOL(tegra_ivc_write_advance);
+
+void tegra_ivc_reset(struct tegra_ivc *ivc)
+{
+	unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
+
+	ivc->tx.channel->tx.state = TEGRA_IVC_STATE_SYNC;
+	tegra_ivc_flush(ivc, ivc->tx.phys + offset);
+	ivc->notify(ivc, ivc->notify_data);
+}
+EXPORT_SYMBOL(tegra_ivc_reset);
+
+/*
+ * =======================================================
+ *  IVC State Transition Table - see tegra_ivc_notified()
+ * =======================================================
+ *
+ *	local	remote	action
+ *	-----	------	-----------------------------------
+ *	SYNC	EST	<none>
+ *	SYNC	ACK	reset counters; move to EST; notify
+ *	SYNC	SYNC	reset counters; move to ACK; notify
+ *	ACK	EST	move to EST; notify
+ *	ACK	ACK	move to EST; notify
+ *	ACK	SYNC	reset counters; move to ACK; notify
+ *	EST	EST	<none>
+ *	EST	ACK	<none>
+ *	EST	SYNC	reset counters; move to ACK; notify
+ *
+ * ===============================================================
+ */
+
+int tegra_ivc_notified(struct tegra_ivc *ivc)
+{
+	unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
+	enum tegra_ivc_state state;
+
+	/* Copy the receiver's state out of shared memory. */
+	tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
+	state = READ_ONCE(ivc->rx.channel->tx.state);
+
+	if (state == TEGRA_IVC_STATE_SYNC) {
+		offset = offsetof(struct tegra_ivc_header, tx.count);
+
+		/*
+		 * Order observation of TEGRA_IVC_STATE_SYNC before stores
+		 * clearing tx.channel.
+		 */
+		smp_rmb();
+
+		/*
+		 * Reset tx.channel counters. The remote end is in the SYNC
+		 * state and won't make progress until we change our state,
+		 * so the counters are not in use at this time.
+		 */
+		ivc->tx.channel->tx.count = 0;
+		ivc->rx.channel->rx.count = 0;
+
+		ivc->tx.position = 0;
+		ivc->rx.position = 0;
+
+		/*
+		 * Ensure that counters appear cleared before new state can be
+		 * observed.
+		 */
+		smp_wmb();
+
+		/*
+		 * Move to ACK state. We have just cleared our counters, so it
+		 * is now safe for the remote end to start using these values.
+		 */
+		ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ACK;
+		tegra_ivc_flush(ivc, ivc->tx.phys + offset);
+
+		/*
+		 * Notify remote end to observe state transition.
+		 */
+		ivc->notify(ivc, ivc->notify_data);
+
+	} else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_SYNC &&
+		   state == TEGRA_IVC_STATE_ACK) {
+		offset = offsetof(struct tegra_ivc_header, tx.count);
+
+		/*
+		 * Order observation of ivc_state_sync before stores clearing
+		 * tx_channel.
+		 */
+		smp_rmb();
+
+		/*
+		 * Reset tx.channel counters. The remote end is in the ACK
+		 * state and won't make progress until we change our state,
+		 * so the counters are not in use at this time.
+		 */
+		ivc->tx.channel->tx.count = 0;
+		ivc->rx.channel->rx.count = 0;
+
+		ivc->tx.position = 0;
+		ivc->rx.position = 0;
+
+		/*
+		 * Ensure that counters appear cleared before new state can be
+		 * observed.
+		 */
+		smp_wmb();
+
+		/*
+		 * Move to ESTABLISHED state. We know that the remote end has
+		 * already cleared its counters, so it is safe to start
+		 * writing/reading on this channel.
+		 */
+		ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED;
+		tegra_ivc_flush(ivc, ivc->tx.phys + offset);
+
+		/*
+		 * Notify remote end to observe state transition.
+		 */
+		ivc->notify(ivc, ivc->notify_data);
+
+	} else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_ACK) {
+		offset = offsetof(struct tegra_ivc_header, tx.count);
+
+		/*
+		 * At this point, we have observed the peer to be in either
+		 * the ACK or ESTABLISHED state. Next, order observation of
+		 * peer state before storing to tx.channel.
+		 */
+		smp_rmb();
+
+		/*
+		 * Move to ESTABLISHED state. We know that we have previously
+		 * cleared our counters, and we know that the remote end has
+		 * cleared its counters, so it is safe to start writing/reading
+		 * on this channel.
+		 */
+		ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED;
+		tegra_ivc_flush(ivc, ivc->tx.phys + offset);
+
+		/*
+		 * Notify remote end to observe state transition.
+		 */
+		ivc->notify(ivc, ivc->notify_data);
+
+	} else {
+		/*
+		 * There is no need to handle any further action. Either the
+		 * channel is already fully established, or we are waiting for
+		 * the remote end to catch up with our current state. Refer
+		 * to the diagram in "IVC State Transition Table" above.
+		 */
+	}
+
+	if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
+		return -EAGAIN;
+
+	return 0;
+}
+EXPORT_SYMBOL(tegra_ivc_notified);
+
+size_t tegra_ivc_align(size_t size)
+{
+	return ALIGN(size, TEGRA_IVC_ALIGN);
+}
+EXPORT_SYMBOL(tegra_ivc_align);
+
+unsigned tegra_ivc_total_queue_size(unsigned queue_size)
+{
+	if (!IS_ALIGNED(queue_size, TEGRA_IVC_ALIGN)) {
+		pr_err("%s: queue_size (%u) must be %u-byte aligned\n",
+		       __func__, queue_size, TEGRA_IVC_ALIGN);
+		return 0;
+	}
+
+	return queue_size + sizeof(struct tegra_ivc_header);
+}
+EXPORT_SYMBOL(tegra_ivc_total_queue_size);
+
+static int tegra_ivc_check_params(unsigned long rx, unsigned long tx,
+				  unsigned int num_frames, size_t frame_size)
+{
+	BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, tx.count),
+				 TEGRA_IVC_ALIGN));
+	BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, rx.count),
+				 TEGRA_IVC_ALIGN));
+	BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct tegra_ivc_header),
+				 TEGRA_IVC_ALIGN));
+
+	if ((uint64_t)num_frames * (uint64_t)frame_size >= 0x100000000UL) {
+		pr_err("num_frames * frame_size overflows\n");
+		return -EINVAL;
+	}
+
+	if (!IS_ALIGNED(frame_size, TEGRA_IVC_ALIGN)) {
+		pr_err("frame size not adequately aligned: %zu\n", frame_size);
+		return -EINVAL;
+	}
+
+	/*
+	 * The headers must at least be aligned enough for counters
+	 * to be accessed atomically.
+	 */
+	if (!IS_ALIGNED(rx, TEGRA_IVC_ALIGN)) {
+		pr_err("IVC channel start not aligned: %#lx\n", rx);
+		return -EINVAL;
+	}
+
+	if (!IS_ALIGNED(tx, TEGRA_IVC_ALIGN)) {
+		pr_err("IVC channel start not aligned: %#lx\n", tx);
+		return -EINVAL;
+	}
+
+	if (rx < tx) {
+		if (rx + frame_size * num_frames > tx) {
+			pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
+			       rx, frame_size * num_frames, tx);
+			return -EINVAL;
+		}
+	} else {
+		if (tx + frame_size * num_frames > rx) {
+			pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
+			       tx, frame_size * num_frames, rx);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, void *rx,
+		   dma_addr_t rx_phys, void *tx, dma_addr_t tx_phys,
+		   unsigned int num_frames, size_t frame_size,
+		   void (*notify)(struct tegra_ivc *ivc, void *data),
+		   void *data)
+{
+	size_t queue_size;
+	int err;
+
+	if (WARN_ON(!ivc || !notify))
+		return -EINVAL;
+
+	/*
+	 * All sizes that can be returned by communication functions should
+	 * fit in an int.
+	 */
+	if (frame_size > INT_MAX)
+		return -E2BIG;
+
+	err = tegra_ivc_check_params((unsigned long)rx, (unsigned long)tx,
+				     num_frames, frame_size);
+	if (err < 0)
+		return err;
+
+	queue_size = tegra_ivc_total_queue_size(num_frames * frame_size);
+
+	if (peer) {
+		ivc->rx.phys = dma_map_single(peer, rx, queue_size,
+					      DMA_BIDIRECTIONAL);
+		if (dma_mapping_error(peer, ivc->rx.phys))
+			return -ENOMEM;
+
+		ivc->tx.phys = dma_map_single(peer, tx, queue_size,
+					      DMA_BIDIRECTIONAL);
+		if (dma_mapping_error(peer, ivc->tx.phys)) {
+			dma_unmap_single(peer, ivc->rx.phys, queue_size,
+					 DMA_BIDIRECTIONAL);
+			return -ENOMEM;
+		}
+	} else {
+		ivc->rx.phys = rx_phys;
+		ivc->tx.phys = tx_phys;
+	}
+
+	ivc->rx.channel = rx;
+	ivc->tx.channel = tx;
+	ivc->peer = peer;
+	ivc->notify = notify;
+	ivc->notify_data = data;
+	ivc->frame_size = frame_size;
+	ivc->num_frames = num_frames;
+
+	/*
+	 * These values aren't necessarily correct until the channel has been
+	 * reset.
+	 */
+	ivc->tx.position = 0;
+	ivc->rx.position = 0;
+
+	return 0;
+}
+EXPORT_SYMBOL(tegra_ivc_init);
+
+void tegra_ivc_cleanup(struct tegra_ivc *ivc)
+{
+	if (ivc->peer) {
+		size_t size = tegra_ivc_total_queue_size(ivc->num_frames *
+							 ivc->frame_size);
+
+		dma_unmap_single(ivc->peer, ivc->rx.phys, size,
+				 DMA_BIDIRECTIONAL);
+		dma_unmap_single(ivc->peer, ivc->tx.phys, size,
+				 DMA_BIDIRECTIONAL);
+	}
+}
+EXPORT_SYMBOL(tegra_ivc_cleanup);
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
new file mode 100644
index 0000000..7fa7447
--- /dev/null
+++ b/drivers/firmware/ti_sci.c
@@ -0,0 +1,1984 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments System Control Interface Protocol Driver
+ *
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ *	Nishanth Menon
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/bitmap.h>
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/semaphore.h>
+#include <linux/slab.h>
+#include <linux/soc/ti/ti-msgmgr.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+#include <linux/reboot.h>
+
+#include "ti_sci.h"
+
+/* List of all TI SCI devices active in system */
+static LIST_HEAD(ti_sci_list);
+/* Protection for the entire list */
+static DEFINE_MUTEX(ti_sci_list_mutex);
+
+/**
+ * struct ti_sci_xfer - Structure representing a message flow
+ * @tx_message:	Transmit message
+ * @rx_len:	Receive message length
+ * @xfer_buf:	Preallocated buffer to store receive message
+ *		Since we work with request-ACK protocol, we can
+ *		reuse the same buffer for the rx path as we
+ *		use for the tx path.
+ * @done:	completion event
+ */
+struct ti_sci_xfer {
+	struct ti_msgmgr_message tx_message;
+	u8 rx_len;
+	u8 *xfer_buf;
+	struct completion done;
+};
+
+/**
+ * struct ti_sci_xfers_info - Structure to manage transfer information
+ * @sem_xfer_count:	Counting Semaphore for managing max simultaneous
+ *			Messages.
+ * @xfer_block:		Preallocated Message array
+ * @xfer_alloc_table:	Bitmap table for allocated messages.
+ *			Index of this bitmap table is also used for message
+ *			sequence identifier.
+ * @xfer_lock:		Protection for message allocation
+ */
+struct ti_sci_xfers_info {
+	struct semaphore sem_xfer_count;
+	struct ti_sci_xfer *xfer_block;
+	unsigned long *xfer_alloc_table;
+	/* protect transfer allocation */
+	spinlock_t xfer_lock;
+};
+
+/**
+ * struct ti_sci_desc - Description of SoC integration
+ * @host_id:		Host identifier representing the compute entity
+ * @max_rx_timeout_ms:	Timeout for communication with SoC (in Milliseconds)
+ * @max_msgs: Maximum number of messages that can be pending
+ *		  simultaneously in the system
+ * @max_msg_size: Maximum size of data per message that can be handled.
+ */
+struct ti_sci_desc {
+	u8 host_id;
+	int max_rx_timeout_ms;
+	int max_msgs;
+	int max_msg_size;
+};
+
+/**
+ * struct ti_sci_info - Structure representing a TI SCI instance
+ * @dev:	Device pointer
+ * @desc:	SoC description for this instance
+ * @nb:	Reboot Notifier block
+ * @d:		Debugfs file entry
+ * @debug_region: Memory region where the debug message are available
+ * @debug_region_size: Debug region size
+ * @debug_buffer: Buffer allocated to copy debug messages.
+ * @handle:	Instance of TI SCI handle to send to clients.
+ * @cl:		Mailbox Client
+ * @chan_tx:	Transmit mailbox channel
+ * @chan_rx:	Receive mailbox channel
+ * @minfo:	Message info
+ * @node:	list head
+ * @users:	Number of users of this instance
+ */
+struct ti_sci_info {
+	struct device *dev;
+	struct notifier_block nb;
+	const struct ti_sci_desc *desc;
+	struct dentry *d;
+	void __iomem *debug_region;
+	char *debug_buffer;
+	size_t debug_region_size;
+	struct ti_sci_handle handle;
+	struct mbox_client cl;
+	struct mbox_chan *chan_tx;
+	struct mbox_chan *chan_rx;
+	struct ti_sci_xfers_info minfo;
+	struct list_head node;
+	/* protected by ti_sci_list_mutex */
+	int users;
+
+};
+
+#define cl_to_ti_sci_info(c)	container_of(c, struct ti_sci_info, cl)
+#define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
+#define reboot_to_ti_sci_info(n) container_of(n, struct ti_sci_info, nb)
+
+#ifdef CONFIG_DEBUG_FS
+
+/**
+ * ti_sci_debug_show() - Helper to dump the debug log
+ * @s:	sequence file pointer
+ * @unused:	unused.
+ *
+ * Return: 0
+ */
+static int ti_sci_debug_show(struct seq_file *s, void *unused)
+{
+	struct ti_sci_info *info = s->private;
+
+	memcpy_fromio(info->debug_buffer, info->debug_region,
+		      info->debug_region_size);
+	/*
+	 * We don't trust firmware to leave NULL terminated last byte (hence
+	 * we have allocated 1 extra 0 byte). Since we cannot guarantee any
+	 * specific data format for debug messages, We just present the data
+	 * in the buffer as is - we expect the messages to be self explanatory.
+	 */
+	seq_puts(s, info->debug_buffer);
+	return 0;
+}
+
+/**
+ * ti_sci_debug_open() - debug file open
+ * @inode:	inode pointer
+ * @file:	file pointer
+ *
+ * Return: result of single_open
+ */
+static int ti_sci_debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ti_sci_debug_show, inode->i_private);
+}
+
+/* log file operations */
+static const struct file_operations ti_sci_debug_fops = {
+	.open = ti_sci_debug_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+/**
+ * ti_sci_debugfs_create() - Create log debug file
+ * @pdev:	platform device pointer
+ * @info:	Pointer to SCI entity information
+ *
+ * Return: 0 if all went fine, else corresponding error.
+ */
+static int ti_sci_debugfs_create(struct platform_device *pdev,
+				 struct ti_sci_info *info)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	char debug_name[50] = "ti_sci_debug@";
+
+	/* Debug region is optional */
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					   "debug_messages");
+	info->debug_region = devm_ioremap_resource(dev, res);
+	if (IS_ERR(info->debug_region))
+		return 0;
+	info->debug_region_size = resource_size(res);
+
+	info->debug_buffer = devm_kcalloc(dev, info->debug_region_size + 1,
+					  sizeof(char), GFP_KERNEL);
+	if (!info->debug_buffer)
+		return -ENOMEM;
+	/* Setup NULL termination */
+	info->debug_buffer[info->debug_region_size] = 0;
+
+	info->d = debugfs_create_file(strncat(debug_name, dev_name(dev),
+					      sizeof(debug_name) -
+					      sizeof("ti_sci_debug@")),
+				      0444, NULL, info, &ti_sci_debug_fops);
+	if (IS_ERR(info->d))
+		return PTR_ERR(info->d);
+
+	dev_dbg(dev, "Debug region => %p, size = %zu bytes, resource: %pr\n",
+		info->debug_region, info->debug_region_size, res);
+	return 0;
+}
+
+/**
+ * ti_sci_debugfs_destroy() - clean up log debug file
+ * @pdev:	platform device pointer
+ * @info:	Pointer to SCI entity information
+ */
+static void ti_sci_debugfs_destroy(struct platform_device *pdev,
+				   struct ti_sci_info *info)
+{
+	if (IS_ERR(info->debug_region))
+		return;
+
+	debugfs_remove(info->d);
+}
+#else /* CONFIG_DEBUG_FS */
+static inline int ti_sci_debugfs_create(struct platform_device *dev,
+					struct ti_sci_info *info)
+{
+	return 0;
+}
+
+static inline void ti_sci_debugfs_destroy(struct platform_device *dev,
+					  struct ti_sci_info *info)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+/**
+ * ti_sci_dump_header_dbg() - Helper to dump a message header.
+ * @dev:	Device pointer corresponding to the SCI entity
+ * @hdr:	pointer to header.
+ */
+static inline void ti_sci_dump_header_dbg(struct device *dev,
+					  struct ti_sci_msg_hdr *hdr)
+{
+	dev_dbg(dev, "MSGHDR:type=0x%04x host=0x%02x seq=0x%02x flags=0x%08x\n",
+		hdr->type, hdr->host, hdr->seq, hdr->flags);
+}
+
+/**
+ * ti_sci_rx_callback() - mailbox client callback for receive messages
+ * @cl:	client pointer
+ * @m:	mailbox message
+ *
+ * Processes one received message to appropriate transfer information and
+ * signals completion of the transfer.
+ *
+ * NOTE: This function will be invoked in IRQ context, hence should be
+ * as optimal as possible.
+ */
+static void ti_sci_rx_callback(struct mbox_client *cl, void *m)
+{
+	struct ti_sci_info *info = cl_to_ti_sci_info(cl);
+	struct device *dev = info->dev;
+	struct ti_sci_xfers_info *minfo = &info->minfo;
+	struct ti_msgmgr_message *mbox_msg = m;
+	struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)mbox_msg->buf;
+	struct ti_sci_xfer *xfer;
+	u8 xfer_id;
+
+	xfer_id = hdr->seq;
+
+	/*
+	 * Are we even expecting this?
+	 * NOTE: barriers were implicit in locks used for modifying the bitmap
+	 */
+	if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
+		dev_err(dev, "Message for %d is not expected!\n", xfer_id);
+		return;
+	}
+
+	xfer = &minfo->xfer_block[xfer_id];
+
+	/* Is the message of valid length? */
+	if (mbox_msg->len > info->desc->max_msg_size) {
+		dev_err(dev, "Unable to handle %zu xfer(max %d)\n",
+			mbox_msg->len, info->desc->max_msg_size);
+		ti_sci_dump_header_dbg(dev, hdr);
+		return;
+	}
+	if (mbox_msg->len < xfer->rx_len) {
+		dev_err(dev, "Recv xfer %zu < expected %d length\n",
+			mbox_msg->len, xfer->rx_len);
+		ti_sci_dump_header_dbg(dev, hdr);
+		return;
+	}
+
+	ti_sci_dump_header_dbg(dev, hdr);
+	/* Take a copy to the rx buffer.. */
+	memcpy(xfer->xfer_buf, mbox_msg->buf, xfer->rx_len);
+	complete(&xfer->done);
+}
+
+/**
+ * ti_sci_get_one_xfer() - Allocate one message
+ * @info:	Pointer to SCI entity information
+ * @msg_type:	Message type
+ * @msg_flags:	Flag to set for the message
+ * @tx_message_size: transmit message size
+ * @rx_message_size: receive message size
+ *
+ * Helper function which is used by various command functions that are
+ * exposed to clients of this driver for allocating a message traffic event.
+ *
+ * This function can sleep depending on pending requests already in the system
+ * for the SCI entity. Further, this also holds a spinlock to maintain integrity
+ * of internal data structures.
+ *
+ * Return: 0 if all went fine, else corresponding error.
+ */
+static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info,
+					       u16 msg_type, u32 msg_flags,
+					       size_t tx_message_size,
+					       size_t rx_message_size)
+{
+	struct ti_sci_xfers_info *minfo = &info->minfo;
+	struct ti_sci_xfer *xfer;
+	struct ti_sci_msg_hdr *hdr;
+	unsigned long flags;
+	unsigned long bit_pos;
+	u8 xfer_id;
+	int ret;
+	int timeout;
+
+	/* Ensure we have sane transfer sizes */
+	if (rx_message_size > info->desc->max_msg_size ||
+	    tx_message_size > info->desc->max_msg_size ||
+	    rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr))
+		return ERR_PTR(-ERANGE);
+
+	/*
+	 * Ensure we have only controlled number of pending messages.
+	 * Ideally, we might just have to wait a single message, be
+	 * conservative and wait 5 times that..
+	 */
+	timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms) * 5;
+	ret = down_timeout(&minfo->sem_xfer_count, timeout);
+	if (ret < 0)
+		return ERR_PTR(ret);
+
+	/* Keep the locked section as small as possible */
+	spin_lock_irqsave(&minfo->xfer_lock, flags);
+	bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
+				      info->desc->max_msgs);
+	set_bit(bit_pos, minfo->xfer_alloc_table);
+	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+
+	/*
+	 * We already ensured in probe that we can have max messages that can
+	 * fit in  hdr.seq - NOTE: this improves access latencies
+	 * to predictable O(1) access, BUT, it opens us to risk if
+	 * remote misbehaves with corrupted message sequence responses.
+	 * If that happens, we are going to be messed up anyways..
+	 */
+	xfer_id = (u8)bit_pos;
+
+	xfer = &minfo->xfer_block[xfer_id];
+
+	hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+	xfer->tx_message.len = tx_message_size;
+	xfer->rx_len = (u8)rx_message_size;
+
+	reinit_completion(&xfer->done);
+
+	hdr->seq = xfer_id;
+	hdr->type = msg_type;
+	hdr->host = info->desc->host_id;
+	hdr->flags = msg_flags;
+
+	return xfer;
+}
+
+/**
+ * ti_sci_put_one_xfer() - Release a message
+ * @minfo:	transfer info pointer
+ * @xfer:	message that was reserved by ti_sci_get_one_xfer
+ *
+ * This holds a spinlock to maintain integrity of internal data structures.
+ */
+static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo,
+				struct ti_sci_xfer *xfer)
+{
+	unsigned long flags;
+	struct ti_sci_msg_hdr *hdr;
+	u8 xfer_id;
+
+	hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+	xfer_id = hdr->seq;
+
+	/*
+	 * Keep the locked section as small as possible
+	 * NOTE: we might escape with smp_mb and no lock here..
+	 * but just be conservative and symmetric.
+	 */
+	spin_lock_irqsave(&minfo->xfer_lock, flags);
+	clear_bit(xfer_id, minfo->xfer_alloc_table);
+	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+
+	/* Increment the count for the next user to get through */
+	up(&minfo->sem_xfer_count);
+}
+
+/**
+ * ti_sci_do_xfer() - Do one transfer
+ * @info:	Pointer to SCI entity information
+ * @xfer:	Transfer to initiate and wait for response
+ *
+ * Return: -ETIMEDOUT in case of no response, if transmit error,
+ *	   return corresponding error, else if all goes well,
+ *	   return 0.
+ */
+static inline int ti_sci_do_xfer(struct ti_sci_info *info,
+				 struct ti_sci_xfer *xfer)
+{
+	int ret;
+	int timeout;
+	struct device *dev = info->dev;
+
+	ret = mbox_send_message(info->chan_tx, &xfer->tx_message);
+	if (ret < 0)
+		return ret;
+
+	ret = 0;
+
+	/* And we wait for the response. */
+	timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
+	if (!wait_for_completion_timeout(&xfer->done, timeout)) {
+		dev_err(dev, "Mbox timedout in resp(caller: %pS)\n",
+			(void *)_RET_IP_);
+		ret = -ETIMEDOUT;
+	}
+	/*
+	 * NOTE: we might prefer not to need the mailbox ticker to manage the
+	 * transfer queueing since the protocol layer queues things by itself.
+	 * Unfortunately, we have to kick the mailbox framework after we have
+	 * received our message.
+	 */
+	mbox_client_txdone(info->chan_tx, ret);
+
+	return ret;
+}
+
+/**
+ * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
+ * @info:	Pointer to SCI entity information
+ *
+ * Updates the SCI information in the internal data structure.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_get_revision(struct ti_sci_info *info)
+{
+	struct device *dev = info->dev;
+	struct ti_sci_handle *handle = &info->handle;
+	struct ti_sci_version_info *ver = &handle->version;
+	struct ti_sci_msg_resp_version *rev_info;
+	struct ti_sci_xfer *xfer;
+	int ret;
+
+	/* No need to setup flags since it is expected to respond */
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION,
+				   0x0, sizeof(struct ti_sci_msg_hdr),
+				   sizeof(*rev_info));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "Message alloc failed(%d)\n", ret);
+		return ret;
+	}
+
+	rev_info = (struct ti_sci_msg_resp_version *)xfer->xfer_buf;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	ver->abi_major = rev_info->abi_major;
+	ver->abi_minor = rev_info->abi_minor;
+	ver->firmware_revision = rev_info->firmware_revision;
+	strncpy(ver->firmware_description, rev_info->firmware_description,
+		sizeof(ver->firmware_description));
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+	return ret;
+}
+
+/**
+ * ti_sci_is_response_ack() - Generic ACK/NACK message checkup
+ * @r:	pointer to response buffer
+ *
+ * Return: true if the response was an ACK, else returns false.
+ */
+static inline bool ti_sci_is_response_ack(void *r)
+{
+	struct ti_sci_msg_hdr *hdr = r;
+
+	return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
+}
+
+/**
+ * ti_sci_set_device_state() - Set device state helper
+ * @handle:	pointer to TI SCI handle
+ * @id:		Device identifier
+ * @flags:	flags to setup for the device
+ * @state:	State to move the device to
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
+				   u32 id, u32 flags, u8 state)
+{
+	struct ti_sci_info *info;
+	struct ti_sci_msg_req_set_device_state *req;
+	struct ti_sci_msg_hdr *resp;
+	struct ti_sci_xfer *xfer;
+	struct device *dev;
+	int ret = 0;
+
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	if (!handle)
+		return -EINVAL;
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
+				   flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "Message alloc failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_req_set_device_state *)xfer->xfer_buf;
+	req->id = id;
+	req->state = state;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+
+	return ret;
+}
+
+/**
+ * ti_sci_get_device_state() - Get device state helper
+ * @handle:	Handle to the device
+ * @id:		Device Identifier
+ * @clcnt:	Pointer to Context Loss Count
+ * @resets:	pointer to resets
+ * @p_state:	pointer to p_state
+ * @c_state:	pointer to c_state
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
+				   u32 id,  u32 *clcnt,  u32 *resets,
+				    u8 *p_state,  u8 *c_state)
+{
+	struct ti_sci_info *info;
+	struct ti_sci_msg_req_get_device_state *req;
+	struct ti_sci_msg_resp_get_device_state *resp;
+	struct ti_sci_xfer *xfer;
+	struct device *dev;
+	int ret = 0;
+
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	if (!handle)
+		return -EINVAL;
+
+	if (!clcnt && !resets && !p_state && !c_state)
+		return -EINVAL;
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	/* Response is expected, so need of any flags */
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
+				   0, sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "Message alloc failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_req_get_device_state *)xfer->xfer_buf;
+	req->id = id;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_resp_get_device_state *)xfer->xfer_buf;
+	if (!ti_sci_is_response_ack(resp)) {
+		ret = -ENODEV;
+		goto fail;
+	}
+
+	if (clcnt)
+		*clcnt = resp->context_loss_count;
+	if (resets)
+		*resets = resp->resets;
+	if (p_state)
+		*p_state = resp->programmed_state;
+	if (c_state)
+		*c_state = resp->current_state;
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+
+	return ret;
+}
+
+/**
+ * ti_sci_cmd_get_device() - command to request for device managed by TISCI
+ * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id:		Device Identifier
+ *
+ * Request for the device - NOTE: the client MUST maintain integrity of
+ * usage count by balancing get_device with put_device. No refcounting is
+ * managed by driver for that purpose.
+ *
+ * NOTE: The request is for exclusive access for the processor.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
+{
+	return ti_sci_set_device_state(handle, id,
+				       MSG_FLAG_DEVICE_EXCLUSIVE,
+				       MSG_DEVICE_SW_STATE_ON);
+}
+
+/**
+ * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI
+ * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id:		Device Identifier
+ *
+ * Request for the device - NOTE: the client MUST maintain integrity of
+ * usage count by balancing get_device with put_device. No refcounting is
+ * managed by driver for that purpose.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
+{
+	return ti_sci_set_device_state(handle, id,
+				       MSG_FLAG_DEVICE_EXCLUSIVE,
+				       MSG_DEVICE_SW_STATE_RETENTION);
+}
+
+/**
+ * ti_sci_cmd_put_device() - command to release a device managed by TISCI
+ * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id:		Device Identifier
+ *
+ * Request for the device - NOTE: the client MUST maintain integrity of
+ * usage count by balancing get_device with put_device. No refcounting is
+ * managed by driver for that purpose.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
+{
+	return ti_sci_set_device_state(handle, id,
+				       0, MSG_DEVICE_SW_STATE_AUTO_OFF);
+}
+
+/**
+ * ti_sci_cmd_dev_is_valid() - Is the device valid
+ * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id:		Device Identifier
+ *
+ * Return: 0 if all went fine and the device ID is valid, else return
+ * appropriate error.
+ */
+static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
+{
+	u8 unused;
+
+	/* check the device state which will also tell us if the ID is valid */
+	return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
+}
+
+/**
+ * ti_sci_cmd_dev_get_clcnt() - Get context loss counter
+ * @handle:	Pointer to TISCI handle
+ * @id:		Device Identifier
+ * @count:	Pointer to Context Loss counter to populate
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
+				    u32 *count)
+{
+	return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
+}
+
+/**
+ * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle
+ * @handle:	Pointer to TISCI handle
+ * @id:		Device Identifier
+ * @r_state:	true if requested to be idle
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
+				  bool *r_state)
+{
+	int ret;
+	u8 state;
+
+	if (!r_state)
+		return -EINVAL;
+
+	ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
+	if (ret)
+		return ret;
+
+	*r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
+
+	return 0;
+}
+
+/**
+ * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped
+ * @handle:	Pointer to TISCI handle
+ * @id:		Device Identifier
+ * @r_state:	true if requested to be stopped
+ * @curr_state:	true if currently stopped.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
+				  bool *r_state,  bool *curr_state)
+{
+	int ret;
+	u8 p_state, c_state;
+
+	if (!r_state && !curr_state)
+		return -EINVAL;
+
+	ret =
+	    ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
+	if (ret)
+		return ret;
+
+	if (r_state)
+		*r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
+	if (curr_state)
+		*curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
+
+	return 0;
+}
+
+/**
+ * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON
+ * @handle:	Pointer to TISCI handle
+ * @id:		Device Identifier
+ * @r_state:	true if requested to be ON
+ * @curr_state:	true if currently ON and active
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
+				bool *r_state,  bool *curr_state)
+{
+	int ret;
+	u8 p_state, c_state;
+
+	if (!r_state && !curr_state)
+		return -EINVAL;
+
+	ret =
+	    ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
+	if (ret)
+		return ret;
+
+	if (r_state)
+		*r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
+	if (curr_state)
+		*curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
+
+	return 0;
+}
+
+/**
+ * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning
+ * @handle:	Pointer to TISCI handle
+ * @id:		Device Identifier
+ * @curr_state:	true if currently transitioning.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
+				   bool *curr_state)
+{
+	int ret;
+	u8 state;
+
+	if (!curr_state)
+		return -EINVAL;
+
+	ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
+	if (ret)
+		return ret;
+
+	*curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
+
+	return 0;
+}
+
+/**
+ * ti_sci_cmd_set_device_resets() - command to set resets for device managed
+ *				    by TISCI
+ * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id:		Device Identifier
+ * @reset_state: Device specific reset bit field
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
+					u32 id, u32 reset_state)
+{
+	struct ti_sci_info *info;
+	struct ti_sci_msg_req_set_device_resets *req;
+	struct ti_sci_msg_hdr *resp;
+	struct ti_sci_xfer *xfer;
+	struct device *dev;
+	int ret = 0;
+
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	if (!handle)
+		return -EINVAL;
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
+				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "Message alloc failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_req_set_device_resets *)xfer->xfer_buf;
+	req->id = id;
+	req->resets = reset_state;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+
+	return ret;
+}
+
+/**
+ * ti_sci_cmd_get_device_resets() - Get reset state for device managed
+ *				    by TISCI
+ * @handle:		Pointer to TISCI handle
+ * @id:			Device Identifier
+ * @reset_state:	Pointer to reset state to populate
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
+					u32 id, u32 *reset_state)
+{
+	return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
+				       NULL);
+}
+
+/**
+ * ti_sci_set_clock_state() - Set clock state helper
+ * @handle:	pointer to TI SCI handle
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to modify.
+ * @flags:	Header flags as needed
+ * @state:	State to request for the clock.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
+				  u32 dev_id, u8 clk_id,
+				  u32 flags, u8 state)
+{
+	struct ti_sci_info *info;
+	struct ti_sci_msg_req_set_clock_state *req;
+	struct ti_sci_msg_hdr *resp;
+	struct ti_sci_xfer *xfer;
+	struct device *dev;
+	int ret = 0;
+
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	if (!handle)
+		return -EINVAL;
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
+				   flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "Message alloc failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_req_set_clock_state *)xfer->xfer_buf;
+	req->dev_id = dev_id;
+	req->clk_id = clk_id;
+	req->request_state = state;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+
+	return ret;
+}
+
+/**
+ * ti_sci_cmd_get_clock_state() - Get clock state helper
+ * @handle:	pointer to TI SCI handle
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to modify.
+ * @programmed_state:	State requested for clock to move to
+ * @current_state:	State that the clock is currently in
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
+				      u32 dev_id, u8 clk_id,
+				      u8 *programmed_state, u8 *current_state)
+{
+	struct ti_sci_info *info;
+	struct ti_sci_msg_req_get_clock_state *req;
+	struct ti_sci_msg_resp_get_clock_state *resp;
+	struct ti_sci_xfer *xfer;
+	struct device *dev;
+	int ret = 0;
+
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	if (!handle)
+		return -EINVAL;
+
+	if (!programmed_state && !current_state)
+		return -EINVAL;
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
+				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "Message alloc failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_req_get_clock_state *)xfer->xfer_buf;
+	req->dev_id = dev_id;
+	req->clk_id = clk_id;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->xfer_buf;
+
+	if (!ti_sci_is_response_ack(resp)) {
+		ret = -ENODEV;
+		goto fail;
+	}
+
+	if (programmed_state)
+		*programmed_state = resp->programmed_state;
+	if (current_state)
+		*current_state = resp->current_state;
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+
+	return ret;
+}
+
+/**
+ * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI
+ * @handle:	pointer to TI SCI handle
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to modify.
+ * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false'
+ * @can_change_freq: 'true' if frequency change is desired, else 'false'
+ * @enable_input_term: 'true' if input termination is desired, else 'false'
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
+				u8 clk_id, bool needs_ssc, bool can_change_freq,
+				bool enable_input_term)
+{
+	u32 flags = 0;
+
+	flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
+	flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
+	flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
+
+	return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
+				      MSG_CLOCK_SW_STATE_REQ);
+}
+
+/**
+ * ti_sci_cmd_idle_clock() - Idle a clock which is in our control
+ * @handle:	pointer to TI SCI handle
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to modify.
+ *
+ * NOTE: This clock must have been requested by get_clock previously.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
+				 u32 dev_id, u8 clk_id)
+{
+	return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
+				      MSG_CLOCK_SW_STATE_UNREQ);
+}
+
+/**
+ * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI
+ * @handle:	pointer to TI SCI handle
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to modify.
+ *
+ * NOTE: This clock must have been requested by get_clock previously.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
+				u32 dev_id, u8 clk_id)
+{
+	return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
+				      MSG_CLOCK_SW_STATE_AUTO);
+}
+
+/**
+ * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed
+ * @handle:	pointer to TI SCI handle
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to modify.
+ * @req_state: state indicating if the clock is auto managed
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
+				  u32 dev_id, u8 clk_id, bool *req_state)
+{
+	u8 state = 0;
+	int ret;
+
+	if (!req_state)
+		return -EINVAL;
+
+	ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
+	if (ret)
+		return ret;
+
+	*req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
+	return 0;
+}
+
+/**
+ * ti_sci_cmd_clk_is_on() - Is the clock ON
+ * @handle:	pointer to TI SCI handle
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to modify.
+ * @req_state: state indicating if the clock is managed by us and enabled
+ * @curr_state: state indicating if the clock is ready for operation
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
+				u8 clk_id, bool *req_state, bool *curr_state)
+{
+	u8 c_state = 0, r_state = 0;
+	int ret;
+
+	if (!req_state && !curr_state)
+		return -EINVAL;
+
+	ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
+					 &r_state, &c_state);
+	if (ret)
+		return ret;
+
+	if (req_state)
+		*req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
+	if (curr_state)
+		*curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
+	return 0;
+}
+
+/**
+ * ti_sci_cmd_clk_is_off() - Is the clock OFF
+ * @handle:	pointer to TI SCI handle
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to modify.
+ * @req_state: state indicating if the clock is managed by us and disabled
+ * @curr_state: state indicating if the clock is NOT ready for operation
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
+				 u8 clk_id, bool *req_state, bool *curr_state)
+{
+	u8 c_state = 0, r_state = 0;
+	int ret;
+
+	if (!req_state && !curr_state)
+		return -EINVAL;
+
+	ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
+					 &r_state, &c_state);
+	if (ret)
+		return ret;
+
+	if (req_state)
+		*req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
+	if (curr_state)
+		*curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
+	return 0;
+}
+
+/**
+ * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock
+ * @handle:	pointer to TI SCI handle
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to modify.
+ * @parent_id:	Parent clock identifier to set
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
+				     u32 dev_id, u8 clk_id, u8 parent_id)
+{
+	struct ti_sci_info *info;
+	struct ti_sci_msg_req_set_clock_parent *req;
+	struct ti_sci_msg_hdr *resp;
+	struct ti_sci_xfer *xfer;
+	struct device *dev;
+	int ret = 0;
+
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	if (!handle)
+		return -EINVAL;
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
+				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "Message alloc failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_req_set_clock_parent *)xfer->xfer_buf;
+	req->dev_id = dev_id;
+	req->clk_id = clk_id;
+	req->parent_id = parent_id;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+
+	return ret;
+}
+
+/**
+ * ti_sci_cmd_clk_get_parent() - Get current parent clock source
+ * @handle:	pointer to TI SCI handle
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to modify.
+ * @parent_id:	Current clock parent
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
+				     u32 dev_id, u8 clk_id, u8 *parent_id)
+{
+	struct ti_sci_info *info;
+	struct ti_sci_msg_req_get_clock_parent *req;
+	struct ti_sci_msg_resp_get_clock_parent *resp;
+	struct ti_sci_xfer *xfer;
+	struct device *dev;
+	int ret = 0;
+
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	if (!handle || !parent_id)
+		return -EINVAL;
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
+				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "Message alloc failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_req_get_clock_parent *)xfer->xfer_buf;
+	req->dev_id = dev_id;
+	req->clk_id = clk_id;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->xfer_buf;
+
+	if (!ti_sci_is_response_ack(resp))
+		ret = -ENODEV;
+	else
+		*parent_id = resp->parent_id;
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+
+	return ret;
+}
+
+/**
+ * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source
+ * @handle:	pointer to TI SCI handle
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to modify.
+ * @num_parents: Returns he number of parents to the current clock.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
+					  u32 dev_id, u8 clk_id,
+					  u8 *num_parents)
+{
+	struct ti_sci_info *info;
+	struct ti_sci_msg_req_get_clock_num_parents *req;
+	struct ti_sci_msg_resp_get_clock_num_parents *resp;
+	struct ti_sci_xfer *xfer;
+	struct device *dev;
+	int ret = 0;
+
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	if (!handle || !num_parents)
+		return -EINVAL;
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
+				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "Message alloc failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_req_get_clock_num_parents *)xfer->xfer_buf;
+	req->dev_id = dev_id;
+	req->clk_id = clk_id;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_resp_get_clock_num_parents *)xfer->xfer_buf;
+
+	if (!ti_sci_is_response_ack(resp))
+		ret = -ENODEV;
+	else
+		*num_parents = resp->num_parents;
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+
+	return ret;
+}
+
+/**
+ * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency
+ * @handle:	pointer to TI SCI handle
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to modify.
+ * @min_freq:	The minimum allowable frequency in Hz. This is the minimum
+ *		allowable programmed frequency and does not account for clock
+ *		tolerances and jitter.
+ * @target_freq: The target clock frequency in Hz. A frequency will be
+ *		processed as close to this target frequency as possible.
+ * @max_freq:	The maximum allowable frequency in Hz. This is the maximum
+ *		allowable programmed frequency and does not account for clock
+ *		tolerances and jitter.
+ * @match_freq:	Frequency match in Hz response.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
+					 u32 dev_id, u8 clk_id, u64 min_freq,
+					 u64 target_freq, u64 max_freq,
+					 u64 *match_freq)
+{
+	struct ti_sci_info *info;
+	struct ti_sci_msg_req_query_clock_freq *req;
+	struct ti_sci_msg_resp_query_clock_freq *resp;
+	struct ti_sci_xfer *xfer;
+	struct device *dev;
+	int ret = 0;
+
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	if (!handle || !match_freq)
+		return -EINVAL;
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
+				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "Message alloc failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_req_query_clock_freq *)xfer->xfer_buf;
+	req->dev_id = dev_id;
+	req->clk_id = clk_id;
+	req->min_freq_hz = min_freq;
+	req->target_freq_hz = target_freq;
+	req->max_freq_hz = max_freq;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->xfer_buf;
+
+	if (!ti_sci_is_response_ack(resp))
+		ret = -ENODEV;
+	else
+		*match_freq = resp->freq_hz;
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+
+	return ret;
+}
+
+/**
+ * ti_sci_cmd_clk_set_freq() - Set a frequency for clock
+ * @handle:	pointer to TI SCI handle
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to modify.
+ * @min_freq:	The minimum allowable frequency in Hz. This is the minimum
+ *		allowable programmed frequency and does not account for clock
+ *		tolerances and jitter.
+ * @target_freq: The target clock frequency in Hz. A frequency will be
+ *		processed as close to this target frequency as possible.
+ * @max_freq:	The maximum allowable frequency in Hz. This is the maximum
+ *		allowable programmed frequency and does not account for clock
+ *		tolerances and jitter.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
+				   u32 dev_id, u8 clk_id, u64 min_freq,
+				   u64 target_freq, u64 max_freq)
+{
+	struct ti_sci_info *info;
+	struct ti_sci_msg_req_set_clock_freq *req;
+	struct ti_sci_msg_hdr *resp;
+	struct ti_sci_xfer *xfer;
+	struct device *dev;
+	int ret = 0;
+
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	if (!handle)
+		return -EINVAL;
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
+				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "Message alloc failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_req_set_clock_freq *)xfer->xfer_buf;
+	req->dev_id = dev_id;
+	req->clk_id = clk_id;
+	req->min_freq_hz = min_freq;
+	req->target_freq_hz = target_freq;
+	req->max_freq_hz = max_freq;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+
+	return ret;
+}
+
+/**
+ * ti_sci_cmd_clk_get_freq() - Get current frequency
+ * @handle:	pointer to TI SCI handle
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to modify.
+ * @freq:	Currently frequency in Hz
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
+				   u32 dev_id, u8 clk_id, u64 *freq)
+{
+	struct ti_sci_info *info;
+	struct ti_sci_msg_req_get_clock_freq *req;
+	struct ti_sci_msg_resp_get_clock_freq *resp;
+	struct ti_sci_xfer *xfer;
+	struct device *dev;
+	int ret = 0;
+
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	if (!handle || !freq)
+		return -EINVAL;
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
+				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "Message alloc failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_req_get_clock_freq *)xfer->xfer_buf;
+	req->dev_id = dev_id;
+	req->clk_id = clk_id;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->xfer_buf;
+
+	if (!ti_sci_is_response_ack(resp))
+		ret = -ENODEV;
+	else
+		*freq = resp->freq_hz;
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+
+	return ret;
+}
+
+static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
+{
+	struct ti_sci_info *info;
+	struct ti_sci_msg_req_reboot *req;
+	struct ti_sci_msg_hdr *resp;
+	struct ti_sci_xfer *xfer;
+	struct device *dev;
+	int ret = 0;
+
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	if (!handle)
+		return -EINVAL;
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SYS_RESET,
+				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "Message alloc failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_req_reboot *)xfer->xfer_buf;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+	if (!ti_sci_is_response_ack(resp))
+		ret = -ENODEV;
+	else
+		ret = 0;
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+
+	return ret;
+}
+
+/*
+ * ti_sci_setup_ops() - Setup the operations structures
+ * @info:	pointer to TISCI pointer
+ */
+static void ti_sci_setup_ops(struct ti_sci_info *info)
+{
+	struct ti_sci_ops *ops = &info->handle.ops;
+	struct ti_sci_core_ops *core_ops = &ops->core_ops;
+	struct ti_sci_dev_ops *dops = &ops->dev_ops;
+	struct ti_sci_clk_ops *cops = &ops->clk_ops;
+
+	core_ops->reboot_device = ti_sci_cmd_core_reboot;
+
+	dops->get_device = ti_sci_cmd_get_device;
+	dops->idle_device = ti_sci_cmd_idle_device;
+	dops->put_device = ti_sci_cmd_put_device;
+
+	dops->is_valid = ti_sci_cmd_dev_is_valid;
+	dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
+	dops->is_idle = ti_sci_cmd_dev_is_idle;
+	dops->is_stop = ti_sci_cmd_dev_is_stop;
+	dops->is_on = ti_sci_cmd_dev_is_on;
+	dops->is_transitioning = ti_sci_cmd_dev_is_trans;
+	dops->set_device_resets = ti_sci_cmd_set_device_resets;
+	dops->get_device_resets = ti_sci_cmd_get_device_resets;
+
+	cops->get_clock = ti_sci_cmd_get_clock;
+	cops->idle_clock = ti_sci_cmd_idle_clock;
+	cops->put_clock = ti_sci_cmd_put_clock;
+	cops->is_auto = ti_sci_cmd_clk_is_auto;
+	cops->is_on = ti_sci_cmd_clk_is_on;
+	cops->is_off = ti_sci_cmd_clk_is_off;
+
+	cops->set_parent = ti_sci_cmd_clk_set_parent;
+	cops->get_parent = ti_sci_cmd_clk_get_parent;
+	cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
+
+	cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
+	cops->set_freq = ti_sci_cmd_clk_set_freq;
+	cops->get_freq = ti_sci_cmd_clk_get_freq;
+}
+
+/**
+ * ti_sci_get_handle() - Get the TI SCI handle for a device
+ * @dev:	Pointer to device for which we want SCI handle
+ *
+ * NOTE: The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of TI SCI protocol library.
+ * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
+ * Return: pointer to handle if successful, else:
+ * -EPROBE_DEFER if the instance is not ready
+ * -ENODEV if the required node handler is missing
+ * -EINVAL if invalid conditions are encountered.
+ */
+const struct ti_sci_handle *ti_sci_get_handle(struct device *dev)
+{
+	struct device_node *ti_sci_np;
+	struct list_head *p;
+	struct ti_sci_handle *handle = NULL;
+	struct ti_sci_info *info;
+
+	if (!dev) {
+		pr_err("I need a device pointer\n");
+		return ERR_PTR(-EINVAL);
+	}
+	ti_sci_np = of_get_parent(dev->of_node);
+	if (!ti_sci_np) {
+		dev_err(dev, "No OF information\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	mutex_lock(&ti_sci_list_mutex);
+	list_for_each(p, &ti_sci_list) {
+		info = list_entry(p, struct ti_sci_info, node);
+		if (ti_sci_np == info->dev->of_node) {
+			handle = &info->handle;
+			info->users++;
+			break;
+		}
+	}
+	mutex_unlock(&ti_sci_list_mutex);
+	of_node_put(ti_sci_np);
+
+	if (!handle)
+		return ERR_PTR(-EPROBE_DEFER);
+
+	return handle;
+}
+EXPORT_SYMBOL_GPL(ti_sci_get_handle);
+
+/**
+ * ti_sci_put_handle() - Release the handle acquired by ti_sci_get_handle
+ * @handle:	Handle acquired by ti_sci_get_handle
+ *
+ * NOTE: The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of TI SCI protocol library.
+ * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
+ *
+ * Return: 0 is successfully released
+ * if an error pointer was passed, it returns the error value back,
+ * if null was passed, it returns -EINVAL;
+ */
+int ti_sci_put_handle(const struct ti_sci_handle *handle)
+{
+	struct ti_sci_info *info;
+
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	if (!handle)
+		return -EINVAL;
+
+	info = handle_to_ti_sci_info(handle);
+	mutex_lock(&ti_sci_list_mutex);
+	if (!WARN_ON(!info->users))
+		info->users--;
+	mutex_unlock(&ti_sci_list_mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ti_sci_put_handle);
+
+static void devm_ti_sci_release(struct device *dev, void *res)
+{
+	const struct ti_sci_handle **ptr = res;
+	const struct ti_sci_handle *handle = *ptr;
+	int ret;
+
+	ret = ti_sci_put_handle(handle);
+	if (ret)
+		dev_err(dev, "failed to put handle %d\n", ret);
+}
+
+/**
+ * devm_ti_sci_get_handle() - Managed get handle
+ * @dev:	device for which we want SCI handle for.
+ *
+ * NOTE: This releases the handle once the device resources are
+ * no longer needed. MUST NOT BE released with ti_sci_put_handle.
+ * The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of TI SCI protocol library.
+ *
+ * Return: 0 if all went fine, else corresponding error.
+ */
+const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev)
+{
+	const struct ti_sci_handle **ptr;
+	const struct ti_sci_handle *handle;
+
+	ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
+	if (!ptr)
+		return ERR_PTR(-ENOMEM);
+	handle = ti_sci_get_handle(dev);
+
+	if (!IS_ERR(handle)) {
+		*ptr = handle;
+		devres_add(dev, ptr);
+	} else {
+		devres_free(ptr);
+	}
+
+	return handle;
+}
+EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle);
+
+static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
+				void *cmd)
+{
+	struct ti_sci_info *info = reboot_to_ti_sci_info(nb);
+	const struct ti_sci_handle *handle = &info->handle;
+
+	ti_sci_cmd_core_reboot(handle);
+
+	/* call fail OR pass, we should not be here in the first place */
+	return NOTIFY_BAD;
+}
+
+/* Description for K2G */
+static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
+	.host_id = 2,
+	/* Conservative duration */
+	.max_rx_timeout_ms = 1000,
+	/* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
+	.max_msgs = 20,
+	.max_msg_size = 64,
+};
+
+static const struct of_device_id ti_sci_of_match[] = {
+	{.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc},
+	{ /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ti_sci_of_match);
+
+static int ti_sci_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	const struct of_device_id *of_id;
+	const struct ti_sci_desc *desc;
+	struct ti_sci_xfer *xfer;
+	struct ti_sci_info *info = NULL;
+	struct ti_sci_xfers_info *minfo;
+	struct mbox_client *cl;
+	int ret = -EINVAL;
+	int i;
+	int reboot = 0;
+
+	of_id = of_match_device(ti_sci_of_match, dev);
+	if (!of_id) {
+		dev_err(dev, "OF data missing\n");
+		return -EINVAL;
+	}
+	desc = of_id->data;
+
+	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	info->dev = dev;
+	info->desc = desc;
+	reboot = of_property_read_bool(dev->of_node,
+				       "ti,system-reboot-controller");
+	INIT_LIST_HEAD(&info->node);
+	minfo = &info->minfo;
+
+	/*
+	 * Pre-allocate messages
+	 * NEVER allocate more than what we can indicate in hdr.seq
+	 * if we have data description bug, force a fix..
+	 */
+	if (WARN_ON(desc->max_msgs >=
+		    1 << 8 * sizeof(((struct ti_sci_msg_hdr *)0)->seq)))
+		return -EINVAL;
+
+	minfo->xfer_block = devm_kcalloc(dev,
+					 desc->max_msgs,
+					 sizeof(*minfo->xfer_block),
+					 GFP_KERNEL);
+	if (!minfo->xfer_block)
+		return -ENOMEM;
+
+	minfo->xfer_alloc_table = devm_kcalloc(dev,
+					       BITS_TO_LONGS(desc->max_msgs),
+					       sizeof(unsigned long),
+					       GFP_KERNEL);
+	if (!minfo->xfer_alloc_table)
+		return -ENOMEM;
+	bitmap_zero(minfo->xfer_alloc_table, desc->max_msgs);
+
+	/* Pre-initialize the buffer pointer to pre-allocated buffers */
+	for (i = 0, xfer = minfo->xfer_block; i < desc->max_msgs; i++, xfer++) {
+		xfer->xfer_buf = devm_kcalloc(dev, 1, desc->max_msg_size,
+					      GFP_KERNEL);
+		if (!xfer->xfer_buf)
+			return -ENOMEM;
+
+		xfer->tx_message.buf = xfer->xfer_buf;
+		init_completion(&xfer->done);
+	}
+
+	ret = ti_sci_debugfs_create(pdev, info);
+	if (ret)
+		dev_warn(dev, "Failed to create debug file\n");
+
+	platform_set_drvdata(pdev, info);
+
+	cl = &info->cl;
+	cl->dev = dev;
+	cl->tx_block = false;
+	cl->rx_callback = ti_sci_rx_callback;
+	cl->knows_txdone = true;
+
+	spin_lock_init(&minfo->xfer_lock);
+	sema_init(&minfo->sem_xfer_count, desc->max_msgs);
+
+	info->chan_rx = mbox_request_channel_byname(cl, "rx");
+	if (IS_ERR(info->chan_rx)) {
+		ret = PTR_ERR(info->chan_rx);
+		goto out;
+	}
+
+	info->chan_tx = mbox_request_channel_byname(cl, "tx");
+	if (IS_ERR(info->chan_tx)) {
+		ret = PTR_ERR(info->chan_tx);
+		goto out;
+	}
+	ret = ti_sci_cmd_get_revision(info);
+	if (ret) {
+		dev_err(dev, "Unable to communicate with TISCI(%d)\n", ret);
+		goto out;
+	}
+
+	ti_sci_setup_ops(info);
+
+	if (reboot) {
+		info->nb.notifier_call = tisci_reboot_handler;
+		info->nb.priority = 128;
+
+		ret = register_restart_handler(&info->nb);
+		if (ret) {
+			dev_err(dev, "reboot registration fail(%d)\n", ret);
+			return ret;
+		}
+	}
+
+	dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n",
+		 info->handle.version.abi_major, info->handle.version.abi_minor,
+		 info->handle.version.firmware_revision,
+		 info->handle.version.firmware_description);
+
+	mutex_lock(&ti_sci_list_mutex);
+	list_add_tail(&info->node, &ti_sci_list);
+	mutex_unlock(&ti_sci_list_mutex);
+
+	return of_platform_populate(dev->of_node, NULL, NULL, dev);
+out:
+	if (!IS_ERR(info->chan_tx))
+		mbox_free_channel(info->chan_tx);
+	if (!IS_ERR(info->chan_rx))
+		mbox_free_channel(info->chan_rx);
+	debugfs_remove(info->d);
+	return ret;
+}
+
+static int ti_sci_remove(struct platform_device *pdev)
+{
+	struct ti_sci_info *info;
+	struct device *dev = &pdev->dev;
+	int ret = 0;
+
+	of_platform_depopulate(dev);
+
+	info = platform_get_drvdata(pdev);
+
+	if (info->nb.notifier_call)
+		unregister_restart_handler(&info->nb);
+
+	mutex_lock(&ti_sci_list_mutex);
+	if (info->users)
+		ret = -EBUSY;
+	else
+		list_del(&info->node);
+	mutex_unlock(&ti_sci_list_mutex);
+
+	if (!ret) {
+		ti_sci_debugfs_destroy(pdev, info);
+
+		/* Safe to free channels since no more users */
+		mbox_free_channel(info->chan_tx);
+		mbox_free_channel(info->chan_rx);
+	}
+
+	return ret;
+}
+
+static struct platform_driver ti_sci_driver = {
+	.probe = ti_sci_probe,
+	.remove = ti_sci_remove,
+	.driver = {
+		   .name = "ti-sci",
+		   .of_match_table = of_match_ptr(ti_sci_of_match),
+	},
+};
+module_platform_driver(ti_sci_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI System Control Interface(SCI) driver");
+MODULE_AUTHOR("Nishanth Menon");
+MODULE_ALIAS("platform:ti-sci");
diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h
new file mode 100644
index 0000000..12bf316
--- /dev/null
+++ b/drivers/firmware/ti_sci.h
@@ -0,0 +1,464 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Texas Instruments System Control Interface (TISCI) Protocol
+ *
+ * Communication protocol with TI SCI hardware
+ * The system works in a message response protocol
+ * See: http://processors.wiki.ti.com/index.php/TISCI for details
+ *
+ * Copyright (C)  2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#ifndef __TI_SCI_H
+#define __TI_SCI_H
+
+/* Generic Messages */
+#define TI_SCI_MSG_ENABLE_WDT	0x0000
+#define TI_SCI_MSG_WAKE_RESET	0x0001
+#define TI_SCI_MSG_VERSION	0x0002
+#define TI_SCI_MSG_WAKE_REASON	0x0003
+#define TI_SCI_MSG_GOODBYE	0x0004
+#define TI_SCI_MSG_SYS_RESET	0x0005
+
+/* Device requests */
+#define TI_SCI_MSG_SET_DEVICE_STATE	0x0200
+#define TI_SCI_MSG_GET_DEVICE_STATE	0x0201
+#define TI_SCI_MSG_SET_DEVICE_RESETS	0x0202
+
+/* Clock requests */
+#define TI_SCI_MSG_SET_CLOCK_STATE	0x0100
+#define TI_SCI_MSG_GET_CLOCK_STATE	0x0101
+#define TI_SCI_MSG_SET_CLOCK_PARENT	0x0102
+#define TI_SCI_MSG_GET_CLOCK_PARENT	0x0103
+#define TI_SCI_MSG_GET_NUM_CLOCK_PARENTS 0x0104
+#define TI_SCI_MSG_SET_CLOCK_FREQ	0x010c
+#define TI_SCI_MSG_QUERY_CLOCK_FREQ	0x010d
+#define TI_SCI_MSG_GET_CLOCK_FREQ	0x010e
+
+/**
+ * struct ti_sci_msg_hdr - Generic Message Header for All messages and responses
+ * @type:	Type of messages: One of TI_SCI_MSG* values
+ * @host:	Host of the message
+ * @seq:	Message identifier indicating a transfer sequence
+ * @flags:	Flag for the message
+ */
+struct ti_sci_msg_hdr {
+	u16 type;
+	u8 host;
+	u8 seq;
+#define TI_SCI_MSG_FLAG(val)			(1 << (val))
+#define TI_SCI_FLAG_REQ_GENERIC_NORESPONSE	0x0
+#define TI_SCI_FLAG_REQ_ACK_ON_RECEIVED		TI_SCI_MSG_FLAG(0)
+#define TI_SCI_FLAG_REQ_ACK_ON_PROCESSED	TI_SCI_MSG_FLAG(1)
+#define TI_SCI_FLAG_RESP_GENERIC_NACK		0x0
+#define TI_SCI_FLAG_RESP_GENERIC_ACK		TI_SCI_MSG_FLAG(1)
+	/* Additional Flags */
+	u32 flags;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_version - Response for a message
+ * @hdr:		Generic header
+ * @firmware_description: String describing the firmware
+ * @firmware_revision:	Firmware revision
+ * @abi_major:		Major version of the ABI that firmware supports
+ * @abi_minor:		Minor version of the ABI that firmware supports
+ *
+ * In general, ABI version changes follow the rule that minor version increments
+ * are backward compatible. Major revision changes in ABI may not be
+ * backward compatible.
+ *
+ * Response to a generic message with message type TI_SCI_MSG_VERSION
+ */
+struct ti_sci_msg_resp_version {
+	struct ti_sci_msg_hdr hdr;
+	char firmware_description[32];
+	u16 firmware_revision;
+	u8 abi_major;
+	u8 abi_minor;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_reboot - Reboot the SoC
+ * @hdr:	Generic Header
+ *
+ * Request type is TI_SCI_MSG_SYS_RESET, responded with a generic
+ * ACK/NACK message.
+ */
+struct ti_sci_msg_req_reboot {
+	struct ti_sci_msg_hdr hdr;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_device_state - Set the desired state of the device
+ * @hdr:		Generic header
+ * @id:	Indicates which device to modify
+ * @reserved: Reserved space in message, must be 0 for backward compatibility
+ * @state: The desired state of the device.
+ *
+ * Certain flags can also be set to alter the device state:
+ * + MSG_FLAG_DEVICE_WAKE_ENABLED - Configure the device to be a wake source.
+ * The meaning of this flag will vary slightly from device to device and from
+ * SoC to SoC but it generally allows the device to wake the SoC out of deep
+ * suspend states.
+ * + MSG_FLAG_DEVICE_RESET_ISO - Enable reset isolation for this device.
+ * + MSG_FLAG_DEVICE_EXCLUSIVE - Claim this device exclusively. When passed
+ * with STATE_RETENTION or STATE_ON, it will claim the device exclusively.
+ * If another host already has this device set to STATE_RETENTION or STATE_ON,
+ * the message will fail. Once successful, other hosts attempting to set
+ * STATE_RETENTION or STATE_ON will fail.
+ *
+ * Request type is TI_SCI_MSG_SET_DEVICE_STATE, responded with a generic
+ * ACK/NACK message.
+ */
+struct ti_sci_msg_req_set_device_state {
+	/* Additional hdr->flags options */
+#define MSG_FLAG_DEVICE_WAKE_ENABLED	TI_SCI_MSG_FLAG(8)
+#define MSG_FLAG_DEVICE_RESET_ISO	TI_SCI_MSG_FLAG(9)
+#define MSG_FLAG_DEVICE_EXCLUSIVE	TI_SCI_MSG_FLAG(10)
+	struct ti_sci_msg_hdr hdr;
+	u32 id;
+	u32 reserved;
+
+#define MSG_DEVICE_SW_STATE_AUTO_OFF	0
+#define MSG_DEVICE_SW_STATE_RETENTION	1
+#define MSG_DEVICE_SW_STATE_ON		2
+	u8 state;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_device_state - Request to get device.
+ * @hdr:		Generic header
+ * @id:		Device Identifier
+ *
+ * Request type is TI_SCI_MSG_GET_DEVICE_STATE, responded device state
+ * information
+ */
+struct ti_sci_msg_req_get_device_state {
+	struct ti_sci_msg_hdr hdr;
+	u32 id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_device_state - Response to get device request.
+ * @hdr:		Generic header
+ * @context_loss_count: Indicates how many times the device has lost context. A
+ *	driver can use this monotonic counter to determine if the device has
+ *	lost context since the last time this message was exchanged.
+ * @resets: Programmed state of the reset lines.
+ * @programmed_state:	The state as programmed by set_device.
+ *			- Uses the MSG_DEVICE_SW_* macros
+ * @current_state:	The actual state of the hardware.
+ *
+ * Response to request TI_SCI_MSG_GET_DEVICE_STATE.
+ */
+struct ti_sci_msg_resp_get_device_state {
+	struct ti_sci_msg_hdr hdr;
+	u32 context_loss_count;
+	u32 resets;
+	u8 programmed_state;
+#define MSG_DEVICE_HW_STATE_OFF		0
+#define MSG_DEVICE_HW_STATE_ON		1
+#define MSG_DEVICE_HW_STATE_TRANS	2
+	u8 current_state;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_device_resets - Set the desired resets
+ *				configuration of the device
+ * @hdr:		Generic header
+ * @id:	Indicates which device to modify
+ * @resets: A bit field of resets for the device. The meaning, behavior,
+ *	and usage of the reset flags are device specific. 0 for a bit
+ *	indicates releasing the reset represented by that bit while 1
+ *	indicates keeping it held.
+ *
+ * Request type is TI_SCI_MSG_SET_DEVICE_RESETS, responded with a generic
+ * ACK/NACK message.
+ */
+struct ti_sci_msg_req_set_device_resets {
+	struct ti_sci_msg_hdr hdr;
+	u32 id;
+	u32 resets;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_clock_state - Request to setup a Clock state
+ * @hdr:	Generic Header, Certain flags can be set specific to the clocks:
+ *		MSG_FLAG_CLOCK_ALLOW_SSC: Allow this clock to be modified
+ *		via spread spectrum clocking.
+ *		MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE: Allow this clock's
+ *		frequency to be changed while it is running so long as it
+ *		is within the min/max limits.
+ *		MSG_FLAG_CLOCK_INPUT_TERM: Enable input termination, this
+ *		is only applicable to clock inputs on the SoC pseudo-device.
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to modify.
+ * @request_state: Request the state for the clock to be set to.
+ *		MSG_CLOCK_SW_STATE_UNREQ: The IP does not require this clock,
+ *		it can be disabled, regardless of the state of the device
+ *		MSG_CLOCK_SW_STATE_AUTO: Allow the System Controller to
+ *		automatically manage the state of this clock. If the device
+ *		is enabled, then the clock is enabled. If the device is set
+ *		to off or retention, then the clock is internally set as not
+ *		being required by the device.(default)
+ *		MSG_CLOCK_SW_STATE_REQ:  Configure the clock to be enabled,
+ *		regardless of the state of the device.
+ *
+ * Normally, all required clocks are managed by TISCI entity, this is used
+ * only for specific control *IF* required. Auto managed state is
+ * MSG_CLOCK_SW_STATE_AUTO, in other states, TISCI entity assume remote
+ * will explicitly control.
+ *
+ * Request type is TI_SCI_MSG_SET_CLOCK_STATE, response is a generic
+ * ACK or NACK message.
+ */
+struct ti_sci_msg_req_set_clock_state {
+	/* Additional hdr->flags options */
+#define MSG_FLAG_CLOCK_ALLOW_SSC		TI_SCI_MSG_FLAG(8)
+#define MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE	TI_SCI_MSG_FLAG(9)
+#define MSG_FLAG_CLOCK_INPUT_TERM		TI_SCI_MSG_FLAG(10)
+	struct ti_sci_msg_hdr hdr;
+	u32 dev_id;
+	u8 clk_id;
+#define MSG_CLOCK_SW_STATE_UNREQ	0
+#define MSG_CLOCK_SW_STATE_AUTO		1
+#define MSG_CLOCK_SW_STATE_REQ		2
+	u8 request_state;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_clock_state - Request for clock state
+ * @hdr:	Generic Header
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to get state of.
+ *
+ * Request type is TI_SCI_MSG_GET_CLOCK_STATE, response is state
+ * of the clock
+ */
+struct ti_sci_msg_req_get_clock_state {
+	struct ti_sci_msg_hdr hdr;
+	u32 dev_id;
+	u8 clk_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_clock_state - Response to get clock state
+ * @hdr:	Generic Header
+ * @programmed_state: Any programmed state of the clock. This is one of
+ *		MSG_CLOCK_SW_STATE* values.
+ * @current_state: Current state of the clock. This is one of:
+ *		MSG_CLOCK_HW_STATE_NOT_READY: Clock is not ready
+ *		MSG_CLOCK_HW_STATE_READY: Clock is ready
+ *
+ * Response to TI_SCI_MSG_GET_CLOCK_STATE.
+ */
+struct ti_sci_msg_resp_get_clock_state {
+	struct ti_sci_msg_hdr hdr;
+	u8 programmed_state;
+#define MSG_CLOCK_HW_STATE_NOT_READY	0
+#define MSG_CLOCK_HW_STATE_READY	1
+	u8 current_state;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_clock_parent - Set the clock parent
+ * @hdr:	Generic Header
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to modify.
+ * @parent_id:	The new clock parent is selectable by an index via this
+ *		parameter.
+ *
+ * Request type is TI_SCI_MSG_SET_CLOCK_PARENT, response is generic
+ * ACK / NACK message.
+ */
+struct ti_sci_msg_req_set_clock_parent {
+	struct ti_sci_msg_hdr hdr;
+	u32 dev_id;
+	u8 clk_id;
+	u8 parent_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_clock_parent - Get the clock parent
+ * @hdr:	Generic Header
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to get the parent for.
+ *
+ * Request type is TI_SCI_MSG_GET_CLOCK_PARENT, response is parent information
+ */
+struct ti_sci_msg_req_get_clock_parent {
+	struct ti_sci_msg_hdr hdr;
+	u32 dev_id;
+	u8 clk_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_clock_parent - Response with clock parent
+ * @hdr:	Generic Header
+ * @parent_id:	The current clock parent
+ *
+ * Response to TI_SCI_MSG_GET_CLOCK_PARENT.
+ */
+struct ti_sci_msg_resp_get_clock_parent {
+	struct ti_sci_msg_hdr hdr;
+	u8 parent_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_clock_num_parents - Request to get clock parents
+ * @hdr:	Generic header
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *
+ * This request provides information about how many clock parent options
+ * are available for a given clock to a device. This is typically used
+ * for input clocks.
+ *
+ * Request type is TI_SCI_MSG_GET_NUM_CLOCK_PARENTS, response is appropriate
+ * message, or NACK in case of inability to satisfy request.
+ */
+struct ti_sci_msg_req_get_clock_num_parents {
+	struct ti_sci_msg_hdr hdr;
+	u32 dev_id;
+	u8 clk_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_clock_num_parents - Response for get clk parents
+ * @hdr:		Generic header
+ * @num_parents:	Number of clock parents
+ *
+ * Response to TI_SCI_MSG_GET_NUM_CLOCK_PARENTS
+ */
+struct ti_sci_msg_resp_get_clock_num_parents {
+	struct ti_sci_msg_hdr hdr;
+	u8 num_parents;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_query_clock_freq - Request to query a frequency
+ * @hdr:	Generic Header
+ * @dev_id:	Device identifier this request is for
+ * @min_freq_hz: The minimum allowable frequency in Hz. This is the minimum
+ *		allowable programmed frequency and does not account for clock
+ *		tolerances and jitter.
+ * @target_freq_hz: The target clock frequency. A frequency will be found
+ *		as close to this target frequency as possible.
+ * @max_freq_hz: The maximum allowable frequency in Hz. This is the maximum
+ *		allowable programmed frequency and does not account for clock
+ *		tolerances and jitter.
+ * @clk_id:	Clock identifier for the device for this request.
+ *
+ * NOTE: Normally clock frequency management is automatically done by TISCI
+ * entity. In case of specific requests, TISCI evaluates capability to achieve
+ * requested frequency within provided range and responds with
+ * result message.
+ *
+ * Request type is TI_SCI_MSG_QUERY_CLOCK_FREQ, response is appropriate message,
+ * or NACK in case of inability to satisfy request.
+ */
+struct ti_sci_msg_req_query_clock_freq {
+	struct ti_sci_msg_hdr hdr;
+	u32 dev_id;
+	u64 min_freq_hz;
+	u64 target_freq_hz;
+	u64 max_freq_hz;
+	u8 clk_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_query_clock_freq - Response to a clock frequency query
+ * @hdr:	Generic Header
+ * @freq_hz:	Frequency that is the best match in Hz.
+ *
+ * Response to request type TI_SCI_MSG_QUERY_CLOCK_FREQ. NOTE: if the request
+ * cannot be satisfied, the message will be of type NACK.
+ */
+struct ti_sci_msg_resp_query_clock_freq {
+	struct ti_sci_msg_hdr hdr;
+	u64 freq_hz;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_clock_freq - Request to setup a clock frequency
+ * @hdr:	Generic Header
+ * @dev_id:	Device identifier this request is for
+ * @min_freq_hz: The minimum allowable frequency in Hz. This is the minimum
+ *		allowable programmed frequency and does not account for clock
+ *		tolerances and jitter.
+ * @target_freq_hz: The target clock frequency. The clock will be programmed
+ *		at a rate as close to this target frequency as possible.
+ * @max_freq_hz: The maximum allowable frequency in Hz. This is the maximum
+ *		allowable programmed frequency and does not account for clock
+ *		tolerances and jitter.
+ * @clk_id:	Clock identifier for the device for this request.
+ *
+ * NOTE: Normally clock frequency management is automatically done by TISCI
+ * entity. In case of specific requests, TISCI evaluates capability to achieve
+ * requested range and responds with success/failure message.
+ *
+ * This sets the desired frequency for a clock within an allowable
+ * range. This message will fail on an enabled clock unless
+ * MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE is set for the clock. Additionally,
+ * if other clocks have their frequency modified due to this message,
+ * they also must have the MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE or be disabled.
+ *
+ * Calling set frequency on a clock input to the SoC pseudo-device will
+ * inform the PMMC of that clock's frequency. Setting a frequency of
+ * zero will indicate the clock is disabled.
+ *
+ * Calling set frequency on clock outputs from the SoC pseudo-device will
+ * function similarly to setting the clock frequency on a device.
+ *
+ * Request type is TI_SCI_MSG_SET_CLOCK_FREQ, response is a generic ACK/NACK
+ * message.
+ */
+struct ti_sci_msg_req_set_clock_freq {
+	struct ti_sci_msg_hdr hdr;
+	u32 dev_id;
+	u64 min_freq_hz;
+	u64 target_freq_hz;
+	u64 max_freq_hz;
+	u8 clk_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_clock_freq - Request to get the clock frequency
+ * @hdr:	Generic Header
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *
+ * NOTE: Normally clock frequency management is automatically done by TISCI
+ * entity. In some cases, clock frequencies are configured by host.
+ *
+ * Request type is TI_SCI_MSG_GET_CLOCK_FREQ, responded with clock frequency
+ * that the clock is currently at.
+ */
+struct ti_sci_msg_req_get_clock_freq {
+	struct ti_sci_msg_hdr hdr;
+	u32 dev_id;
+	u8 clk_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_clock_freq - Response of clock frequency request
+ * @hdr:	Generic Header
+ * @freq_hz:	Frequency that the clock is currently on, in Hz.
+ *
+ * Response to request type TI_SCI_MSG_GET_CLOCK_FREQ.
+ */
+struct ti_sci_msg_resp_get_clock_freq {
+	struct ti_sci_msg_hdr hdr;
+	u64 freq_hz;
+} __packed;
+
+#endif /* __TI_SCI_H */