Update Linux to v5.10.109

Sourced from [1]

[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz

Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile
index 5f298f0..bc0d54f 100644
--- a/drivers/firmware/arm_scmi/Makefile
+++ b/drivers/firmware/arm_scmi/Makefile
@@ -1,6 +1,11 @@
 # SPDX-License-Identifier: GPL-2.0-only
-obj-y	= scmi-bus.o scmi-driver.o scmi-protocols.o
 scmi-bus-y = bus.o
-scmi-driver-y = driver.o
-scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o
+scmi-driver-y = driver.o notify.o
+scmi-transport-y = shmem.o
+scmi-transport-$(CONFIG_MAILBOX) += mailbox.o
+scmi-transport-$(CONFIG_HAVE_ARM_SMCCC_DISCOVERY) += smc.o
+scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o
+scmi-module-objs := $(scmi-bus-y) $(scmi-driver-y) $(scmi-protocols-y) \
+		    $(scmi-transport-y)
+obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-module.o
 obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o
diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c
index f986ee8..017e5d8 100644
--- a/drivers/firmware/arm_scmi/base.c
+++ b/drivers/firmware/arm_scmi/base.c
@@ -5,7 +5,15 @@
  * Copyright (C) 2018 ARM Ltd.
  */
 
+#define pr_fmt(fmt) "SCMI Notifications BASE - " fmt
+
+#include <linux/scmi_protocol.h>
+
 #include "common.h"
+#include "notify.h"
+
+#define SCMI_BASE_NUM_SOURCES		1
+#define SCMI_BASE_MAX_CMD_ERR_COUNT	1024
 
 enum scmi_base_protocol_cmd {
 	BASE_DISCOVER_VENDOR = 0x3,
@@ -14,6 +22,9 @@
 	BASE_DISCOVER_LIST_PROTOCOLS = 0x6,
 	BASE_DISCOVER_AGENT = 0x7,
 	BASE_NOTIFY_ERRORS = 0x8,
+	BASE_SET_DEVICE_PERMISSIONS = 0x9,
+	BASE_SET_PROTOCOL_PERMISSIONS = 0xa,
+	BASE_RESET_AGENT_CONFIGURATION = 0xb,
 };
 
 struct scmi_msg_resp_base_attributes {
@@ -22,6 +33,19 @@
 	__le16 reserved;
 };
 
+struct scmi_msg_base_error_notify {
+	__le32 event_control;
+#define BASE_TP_NOTIFY_ALL	BIT(0)
+};
+
+struct scmi_base_error_notify_payld {
+	__le32 agent_id;
+	__le32 error_status;
+#define IS_FATAL_ERROR(x)	((x) & BIT(31))
+#define ERROR_CMD_COUNT(x)	FIELD_GET(GENMASK(9, 0), (x))
+	__le64 msg_reports[SCMI_BASE_MAX_CMD_ERR_COUNT];
+};
+
 /**
  * scmi_base_attributes_get() - gets the implementation details
  *	that are associated with the base protocol.
@@ -217,6 +241,83 @@
 	return ret;
 }
 
+static int scmi_base_error_notify(const struct scmi_handle *handle, bool enable)
+{
+	int ret;
+	u32 evt_cntl = enable ? BASE_TP_NOTIFY_ALL : 0;
+	struct scmi_xfer *t;
+	struct scmi_msg_base_error_notify *cfg;
+
+	ret = scmi_xfer_get_init(handle, BASE_NOTIFY_ERRORS,
+				 SCMI_PROTOCOL_BASE, sizeof(*cfg), 0, &t);
+	if (ret)
+		return ret;
+
+	cfg = t->tx.buf;
+	cfg->event_control = cpu_to_le32(evt_cntl);
+
+	ret = scmi_do_xfer(handle, t);
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int scmi_base_set_notify_enabled(const struct scmi_handle *handle,
+					u8 evt_id, u32 src_id, bool enable)
+{
+	int ret;
+
+	ret = scmi_base_error_notify(handle, enable);
+	if (ret)
+		pr_debug("FAIL_ENABLED - evt[%X] ret:%d\n", evt_id, ret);
+
+	return ret;
+}
+
+static void *scmi_base_fill_custom_report(const struct scmi_handle *handle,
+					  u8 evt_id, ktime_t timestamp,
+					  const void *payld, size_t payld_sz,
+					  void *report, u32 *src_id)
+{
+	int i;
+	const struct scmi_base_error_notify_payld *p = payld;
+	struct scmi_base_error_report *r = report;
+
+	/*
+	 * BaseError notification payload is variable in size but
+	 * up to a maximum length determined by the struct ponted by p.
+	 * Instead payld_sz is the effective length of this notification
+	 * payload so cannot be greater of the maximum allowed size as
+	 * pointed by p.
+	 */
+	if (evt_id != SCMI_EVENT_BASE_ERROR_EVENT || sizeof(*p) < payld_sz)
+		return NULL;
+
+	r->timestamp = timestamp;
+	r->agent_id = le32_to_cpu(p->agent_id);
+	r->fatal = IS_FATAL_ERROR(le32_to_cpu(p->error_status));
+	r->cmd_count = ERROR_CMD_COUNT(le32_to_cpu(p->error_status));
+	for (i = 0; i < r->cmd_count; i++)
+		r->reports[i] = le64_to_cpu(p->msg_reports[i]);
+	*src_id = 0;
+
+	return r;
+}
+
+static const struct scmi_event base_events[] = {
+	{
+		.id = SCMI_EVENT_BASE_ERROR_EVENT,
+		.max_payld_sz = sizeof(struct scmi_base_error_notify_payld),
+		.max_report_sz = sizeof(struct scmi_base_error_report) +
+				  SCMI_BASE_MAX_CMD_ERR_COUNT * sizeof(u64),
+	},
+};
+
+static const struct scmi_event_ops base_event_ops = {
+	.set_notify_enabled = scmi_base_set_notify_enabled,
+	.fill_custom_report = scmi_base_fill_custom_report,
+};
+
 int scmi_base_protocol_init(struct scmi_handle *h)
 {
 	int id, ret;
@@ -251,6 +352,12 @@
 	dev_dbg(dev, "Found %d protocol(s) %d agent(s)\n", rev->num_protocols,
 		rev->num_agents);
 
+	scmi_register_protocol_events(handle, SCMI_PROTOCOL_BASE,
+				      (4 * SCMI_PROTO_QUEUE_SZ),
+				      &base_event_ops, base_events,
+				      ARRAY_SIZE(base_events),
+				      SCMI_BASE_NUM_SOURCES);
+
 	for (id = 0; id < rev->num_agents; id++) {
 		scmi_base_discover_agent_get(handle, id, name);
 		dev_dbg(dev, "Agent %d: %s\n", id, name);
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index 66d445b..def8a84 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -28,8 +28,12 @@
 		return NULL;
 
 	for (; id->protocol_id; id++)
-		if (id->protocol_id == scmi_dev->protocol_id)
-			return id;
+		if (id->protocol_id == scmi_dev->protocol_id) {
+			if (!id->name)
+				return id;
+			else if (!strcmp(id->name, scmi_dev->name))
+				return id;
+		}
 
 	return NULL;
 }
@@ -56,6 +60,11 @@
 	return fn(handle);
 }
 
+static int scmi_protocol_dummy_init(struct scmi_handle *handle)
+{
+	return 0;
+}
+
 static int scmi_dev_probe(struct device *dev)
 {
 	struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver);
@@ -74,6 +83,10 @@
 	if (ret)
 		return ret;
 
+	/* Skip protocol initialisation for additional devices */
+	idr_replace(&scmi_protocols, &scmi_protocol_dummy_init,
+		    scmi_dev->protocol_id);
+
 	return scmi_drv->probe(scmi_dev);
 }
 
@@ -128,7 +141,8 @@
 }
 
 struct scmi_device *
-scmi_device_create(struct device_node *np, struct device *parent, int protocol)
+scmi_device_create(struct device_node *np, struct device *parent, int protocol,
+		   const char *name)
 {
 	int id, retval;
 	struct scmi_device *scmi_dev;
@@ -137,8 +151,15 @@
 	if (!scmi_dev)
 		return NULL;
 
+	scmi_dev->name = kstrdup_const(name ?: "unknown", GFP_KERNEL);
+	if (!scmi_dev->name) {
+		kfree(scmi_dev);
+		return NULL;
+	}
+
 	id = ida_simple_get(&scmi_bus_id, 1, 0, GFP_KERNEL);
 	if (id < 0) {
+		kfree_const(scmi_dev->name);
 		kfree(scmi_dev);
 		return NULL;
 	}
@@ -157,6 +178,7 @@
 
 	return scmi_dev;
 put_dev:
+	kfree_const(scmi_dev->name);
 	put_device(&scmi_dev->dev);
 	ida_simple_remove(&scmi_bus_id, id);
 	return NULL;
@@ -164,6 +186,7 @@
 
 void scmi_device_destroy(struct scmi_device *scmi_dev)
 {
+	kfree_const(scmi_dev->name);
 	scmi_handle_put(scmi_dev->handle);
 	ida_simple_remove(&scmi_bus_id, scmi_dev->id);
 	device_unregister(&scmi_dev->dev);
@@ -210,7 +233,7 @@
 	bus_for_each_dev(&scmi_bus_type, NULL, NULL, __scmi_devices_unregister);
 }
 
-static int __init scmi_bus_init(void)
+int __init scmi_bus_init(void)
 {
 	int retval;
 
@@ -220,12 +243,10 @@
 
 	return retval;
 }
-subsys_initcall(scmi_bus_init);
 
-static void __exit scmi_bus_exit(void)
+void __exit scmi_bus_exit(void)
 {
 	scmi_devices_unregister();
 	bus_unregister(&scmi_bus_type);
 	ida_destroy(&scmi_bus_id);
 }
-module_exit(scmi_bus_exit);
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index 38400a8..4645677 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -5,6 +5,8 @@
  * Copyright (C) 2018 ARM Ltd.
  */
 
+#include <linux/sort.h>
+
 #include "common.h"
 
 enum scmi_clock_protocol_cmd {
@@ -65,6 +67,7 @@
 };
 
 struct clock_info {
+	u32 version;
 	int num_clocks;
 	int max_async_req;
 	atomic_t cur_async_req;
@@ -120,11 +123,23 @@
 	return ret;
 }
 
+static int rate_cmp_func(const void *_r1, const void *_r2)
+{
+	const u64 *r1 = _r1, *r2 = _r2;
+
+	if (*r1 < *r2)
+		return -1;
+	else if (*r1 == *r2)
+		return 0;
+	else
+		return 1;
+}
+
 static int
 scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
 			      struct scmi_clock_info *clk)
 {
-	u64 *rate;
+	u64 *rate = NULL;
 	int ret, cnt;
 	bool rate_discrete = false;
 	u32 tot_rate_cnt = 0, rates_flag;
@@ -185,8 +200,10 @@
 		 */
 	} while (num_returned && num_remaining);
 
-	if (rate_discrete)
+	if (rate_discrete && rate) {
 		clk->list.num_rates = tot_rate_cnt;
+		sort(rate, tot_rate_cnt, sizeof(*rate), rate_cmp_func, NULL);
+	}
 
 	clk->rate_discrete = rate_discrete;
 
@@ -303,7 +320,7 @@
 	return clk;
 }
 
-static struct scmi_clk_ops clk_ops = {
+static const struct scmi_clk_ops clk_ops = {
 	.count_get = scmi_clock_count_get,
 	.info_get = scmi_clock_info_get,
 	.rate_get = scmi_clock_rate_get,
@@ -342,15 +359,11 @@
 			scmi_clock_describe_rates_get(handle, clkid, clk);
 	}
 
+	cinfo->version = version;
 	handle->clk_ops = &clk_ops;
 	handle->clk_priv = cinfo;
 
 	return 0;
 }
 
-static int __init scmi_clock_init(void)
-{
-	return scmi_protocol_register(SCMI_PROTOCOL_CLOCK,
-				      &scmi_clock_protocol_init);
-}
-subsys_initcall(scmi_clock_init);
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(SCMI_PROTOCOL_CLOCK, clock)
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index 9a680b9..34b7ae7 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -6,6 +6,8 @@
  *
  * Copyright (C) 2018 ARM Ltd.
  */
+#ifndef _SCMI_COMMON_H
+#define _SCMI_COMMON_H
 
 #include <linux/bitfield.h>
 #include <linux/completion.h>
@@ -33,8 +35,8 @@
 /**
  * struct scmi_msg_resp_prot_version - Response for a message
  *
- * @major_version: Major version of the ABI that firmware supports
  * @minor_version: Minor version of the ABI that firmware supports
+ * @major_version: Major version of the ABI that firmware supports
  *
  * In general, ABI version changes follow the rule that minor version increments
  * are backward compatible. Major revision changes in ABI may not be
@@ -47,6 +49,19 @@
 	__le16 major_version;
 };
 
+#define MSG_ID_MASK		GENMASK(7, 0)
+#define MSG_XTRACT_ID(hdr)	FIELD_GET(MSG_ID_MASK, (hdr))
+#define MSG_TYPE_MASK		GENMASK(9, 8)
+#define MSG_XTRACT_TYPE(hdr)	FIELD_GET(MSG_TYPE_MASK, (hdr))
+#define MSG_TYPE_COMMAND	0
+#define MSG_TYPE_DELAYED_RESP	2
+#define MSG_TYPE_NOTIFICATION	3
+#define MSG_PROTOCOL_ID_MASK	GENMASK(17, 10)
+#define MSG_XTRACT_PROT_ID(hdr)	FIELD_GET(MSG_PROTOCOL_ID_MASK, (hdr))
+#define MSG_TOKEN_ID_MASK	GENMASK(27, 18)
+#define MSG_XTRACT_TOKEN(hdr)	FIELD_GET(MSG_TOKEN_ID_MASK, (hdr))
+#define MSG_TOKEN_MAX		(MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1)
+
 /**
  * struct scmi_msg_hdr - Message(Tx/Rx) header
  *
@@ -68,6 +83,33 @@
 };
 
 /**
+ * pack_scmi_header() - packs and returns 32-bit header
+ *
+ * @hdr: pointer to header containing all the information on message id,
+ *	protocol id and sequence id.
+ *
+ * Return: 32-bit packed message header to be sent to the platform.
+ */
+static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
+{
+	return FIELD_PREP(MSG_ID_MASK, hdr->id) |
+		FIELD_PREP(MSG_TOKEN_ID_MASK, hdr->seq) |
+		FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id);
+}
+
+/**
+ * unpack_scmi_header() - unpacks and records message and protocol id
+ *
+ * @msg_hdr: 32-bit packed message header sent from the platform
+ * @hdr: pointer to header to fetch message and protocol id.
+ */
+static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr)
+{
+	hdr->id = MSG_XTRACT_ID(msg_hdr);
+	hdr->protocol_id = MSG_XTRACT_PROT_ID(msg_hdr);
+}
+
+/**
  * struct scmi_msg - Message(Tx/Rx) structure
  *
  * @buf: Buffer pointer
@@ -81,15 +123,17 @@
 /**
  * struct scmi_xfer - Structure representing a message flow
  *
+ * @transfer_id: Unique ID for debug & profiling purpose
  * @hdr: Transmit message header
  * @tx: Transmit message
  * @rx: Receive message, the buffer should be pre-allocated to store
  *	message. If request-ACK protocol is used, we can reuse the same
  *	buffer for the rx path as we use for the tx path.
  * @done: command message transmit completion event
- * @async: pointer to delayed response message received event completion
+ * @async_done: pointer to delayed response message received event completion
  */
 struct scmi_xfer {
+	int transfer_id;
 	struct scmi_msg_hdr hdr;
 	struct scmi_msg tx;
 	struct scmi_msg rx;
@@ -113,3 +157,111 @@
 				     u8 *prot_imp);
 
 int scmi_base_protocol_init(struct scmi_handle *h);
+
+int __init scmi_bus_init(void);
+void __exit scmi_bus_exit(void);
+
+#define DECLARE_SCMI_REGISTER_UNREGISTER(func)		\
+	int __init scmi_##func##_register(void);	\
+	void __exit scmi_##func##_unregister(void)
+DECLARE_SCMI_REGISTER_UNREGISTER(clock);
+DECLARE_SCMI_REGISTER_UNREGISTER(perf);
+DECLARE_SCMI_REGISTER_UNREGISTER(power);
+DECLARE_SCMI_REGISTER_UNREGISTER(reset);
+DECLARE_SCMI_REGISTER_UNREGISTER(sensors);
+DECLARE_SCMI_REGISTER_UNREGISTER(system);
+
+#define DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(id, name) \
+int __init scmi_##name##_register(void) \
+{ \
+	return scmi_protocol_register((id), &scmi_##name##_protocol_init); \
+} \
+\
+void __exit scmi_##name##_unregister(void) \
+{ \
+	scmi_protocol_unregister((id)); \
+}
+
+/* SCMI Transport */
+/**
+ * struct scmi_chan_info - Structure representing a SCMI channel information
+ *
+ * @dev: Reference to device in the SCMI hierarchy corresponding to this
+ *	 channel
+ * @handle: Pointer to SCMI entity handle
+ * @transport_info: Transport layer related information
+ */
+struct scmi_chan_info {
+	struct device *dev;
+	struct scmi_handle *handle;
+	void *transport_info;
+};
+
+/**
+ * struct scmi_transport_ops - Structure representing a SCMI transport ops
+ *
+ * @chan_available: Callback to check if channel is available or not
+ * @chan_setup: Callback to allocate and setup a channel
+ * @chan_free: Callback to free a channel
+ * @send_message: Callback to send a message
+ * @mark_txdone: Callback to mark tx as done
+ * @fetch_response: Callback to fetch response
+ * @fetch_notification: Callback to fetch notification
+ * @clear_channel: Callback to clear a channel
+ * @poll_done: Callback to poll transfer status
+ */
+struct scmi_transport_ops {
+	bool (*chan_available)(struct device *dev, int idx);
+	int (*chan_setup)(struct scmi_chan_info *cinfo, struct device *dev,
+			  bool tx);
+	int (*chan_free)(int id, void *p, void *data);
+	int (*send_message)(struct scmi_chan_info *cinfo,
+			    struct scmi_xfer *xfer);
+	void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret);
+	void (*fetch_response)(struct scmi_chan_info *cinfo,
+			       struct scmi_xfer *xfer);
+	void (*fetch_notification)(struct scmi_chan_info *cinfo,
+				   size_t max_len, struct scmi_xfer *xfer);
+	void (*clear_channel)(struct scmi_chan_info *cinfo);
+	bool (*poll_done)(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer);
+};
+
+/**
+ * struct scmi_desc - Description of SoC integration
+ *
+ * @ops: Pointer to the transport specific ops structure
+ * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
+ * @max_msg: Maximum number of messages that can be pending
+ *	simultaneously in the system
+ * @max_msg_size: Maximum size of data per message that can be handled.
+ */
+struct scmi_desc {
+	const struct scmi_transport_ops *ops;
+	int max_rx_timeout_ms;
+	int max_msg;
+	int max_msg_size;
+};
+
+extern const struct scmi_desc scmi_mailbox_desc;
+#ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY
+extern const struct scmi_desc scmi_smc_desc;
+#endif
+
+void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr);
+void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id);
+
+/* shmem related declarations */
+struct scmi_shared_mem;
+
+void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
+		      struct scmi_xfer *xfer);
+u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem);
+void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
+			  struct scmi_xfer *xfer);
+void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
+			      size_t max_len, struct scmi_xfer *xfer);
+void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem);
+bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
+		     struct scmi_xfer *xfer);
+
+#endif /* _SCMI_COMMON_H */
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 4e43bdf..745b7f9 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -19,28 +19,17 @@
 #include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/ktime.h>
-#include <linux/mailbox_client.h>
 #include <linux/module.h>
 #include <linux/of_address.h>
 #include <linux/of_device.h>
 #include <linux/processor.h>
-#include <linux/semaphore.h>
 #include <linux/slab.h>
 
 #include "common.h"
+#include "notify.h"
 
-#define MSG_ID_MASK		GENMASK(7, 0)
-#define MSG_XTRACT_ID(hdr)	FIELD_GET(MSG_ID_MASK, (hdr))
-#define MSG_TYPE_MASK		GENMASK(9, 8)
-#define MSG_XTRACT_TYPE(hdr)	FIELD_GET(MSG_TYPE_MASK, (hdr))
-#define MSG_TYPE_COMMAND	0
-#define MSG_TYPE_DELAYED_RESP	2
-#define MSG_TYPE_NOTIFICATION	3
-#define MSG_PROTOCOL_ID_MASK	GENMASK(17, 10)
-#define MSG_XTRACT_PROT_ID(hdr)	FIELD_GET(MSG_PROTOCOL_ID_MASK, (hdr))
-#define MSG_TOKEN_ID_MASK	GENMASK(27, 18)
-#define MSG_XTRACT_TOKEN(hdr)	FIELD_GET(MSG_TOKEN_ID_MASK, (hdr))
-#define MSG_TOKEN_MAX		(MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1)
+#define CREATE_TRACE_POINTS
+#include <trace/events/scmi.h>
 
 enum scmi_error_codes {
 	SCMI_SUCCESS = 0,	/* Success */
@@ -60,6 +49,8 @@
 static LIST_HEAD(scmi_list);
 /* Protection for the entire list */
 static DEFINE_MUTEX(scmi_list_mutex);
+/* Track the unique id for the transfers for debug & profiling purpose */
+static atomic_t transfer_last_id;
 
 /**
  * struct scmi_xfers_info - Structure to manage transfer information
@@ -77,46 +68,15 @@
 };
 
 /**
- * struct scmi_desc - Description of SoC integration
- *
- * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
- * @max_msg: Maximum number of messages that can be pending
- *	simultaneously in the system
- * @max_msg_size: Maximum size of data per message that can be handled.
- */
-struct scmi_desc {
-	int max_rx_timeout_ms;
-	int max_msg;
-	int max_msg_size;
-};
-
-/**
- * struct scmi_chan_info - Structure representing a SCMI channel information
- *
- * @cl: Mailbox Client
- * @chan: Transmit/Receive mailbox channel
- * @payload: Transmit/Receive mailbox channel payload area
- * @dev: Reference to device in the SCMI hierarchy corresponding to this
- *	 channel
- * @handle: Pointer to SCMI entity handle
- */
-struct scmi_chan_info {
-	struct mbox_client cl;
-	struct mbox_chan *chan;
-	void __iomem *payload;
-	struct device *dev;
-	struct scmi_handle *handle;
-};
-
-/**
  * struct scmi_info - Structure representing a SCMI instance
  *
  * @dev: Device pointer
  * @desc: SoC description for this instance
- * @handle: Instance of SCMI handle to send to clients
  * @version: SCMI revision information containing protocol version,
  *	implementation version and (sub-)vendor identification.
+ * @handle: Instance of SCMI handle to send to clients
  * @tx_minfo: Universal Transmit Message management info
+ * @rx_minfo: Universal Receive Message management info
  * @tx_idr: IDR object to map protocol id to Tx channel info pointer
  * @rx_idr: IDR object to map protocol id to Rx channel info pointer
  * @protocols_imp: List of protocols implemented, currently maximum of
@@ -130,6 +90,7 @@
 	struct scmi_revision_info version;
 	struct scmi_handle handle;
 	struct scmi_xfers_info tx_minfo;
+	struct scmi_xfers_info rx_minfo;
 	struct idr tx_idr;
 	struct idr rx_idr;
 	u8 *protocols_imp;
@@ -137,27 +98,8 @@
 	int users;
 };
 
-#define client_to_scmi_chan_info(c) container_of(c, struct scmi_chan_info, cl)
 #define handle_to_scmi_info(h)	container_of(h, struct scmi_info, handle)
 
-/*
- * SCMI specification requires all parameters, message headers, return
- * arguments or any protocol data to be expressed in little endian
- * format only.
- */
-struct scmi_shared_mem {
-	__le32 reserved;
-	__le32 channel_status;
-#define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR	BIT(1)
-#define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE	BIT(0)
-	__le32 reserved1[2];
-	__le32 flags;
-#define SCMI_SHMEM_FLAG_INTR_ENABLED	BIT(0)
-	__le32 length;
-	__le32 msg_header;
-	u8 msg_payload[0];
-};
-
 static const int scmi_linux_errmap[] = {
 	/* better than switch case as long as return value is continuous */
 	0,			/* SCMI_SUCCESS */
@@ -195,77 +137,6 @@
 		hdr->id, hdr->seq, hdr->protocol_id);
 }
 
-static void scmi_fetch_response(struct scmi_xfer *xfer,
-				struct scmi_shared_mem __iomem *mem)
-{
-	xfer->hdr.status = ioread32(mem->msg_payload);
-	/* Skip the length of header and status in payload area i.e 8 bytes */
-	xfer->rx.len = min_t(size_t, xfer->rx.len, ioread32(&mem->length) - 8);
-
-	/* Take a copy to the rx buffer.. */
-	memcpy_fromio(xfer->rx.buf, mem->msg_payload + 4, xfer->rx.len);
-}
-
-/**
- * pack_scmi_header() - packs and returns 32-bit header
- *
- * @hdr: pointer to header containing all the information on message id,
- *	protocol id and sequence id.
- *
- * Return: 32-bit packed message header to be sent to the platform.
- */
-static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
-{
-	return FIELD_PREP(MSG_ID_MASK, hdr->id) |
-		FIELD_PREP(MSG_TOKEN_ID_MASK, hdr->seq) |
-		FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id);
-}
-
-/**
- * unpack_scmi_header() - unpacks and records message and protocol id
- *
- * @msg_hdr: 32-bit packed message header sent from the platform
- * @hdr: pointer to header to fetch message and protocol id.
- */
-static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr)
-{
-	hdr->id = MSG_XTRACT_ID(msg_hdr);
-	hdr->protocol_id = MSG_XTRACT_PROT_ID(msg_hdr);
-}
-
-/**
- * scmi_tx_prepare() - mailbox client callback to prepare for the transfer
- *
- * @cl: client pointer
- * @m: mailbox message
- *
- * This function prepares the shared memory which contains the header and the
- * payload.
- */
-static void scmi_tx_prepare(struct mbox_client *cl, void *m)
-{
-	struct scmi_xfer *t = m;
-	struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
-	struct scmi_shared_mem __iomem *mem = cinfo->payload;
-
-	/*
-	 * Ideally channel must be free by now unless OS timeout last
-	 * request and platform continued to process the same, wait
-	 * until it releases the shared memory, otherwise we may endup
-	 * overwriting its response with new message payload or vice-versa
-	 */
-	spin_until_cond(ioread32(&mem->channel_status) &
-			SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
-	/* Mark channel busy + clear error */
-	iowrite32(0x0, &mem->channel_status);
-	iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
-		  &mem->flags);
-	iowrite32(sizeof(mem->msg_header) + t->tx.len, &mem->length);
-	iowrite32(pack_scmi_header(&t->hdr), &mem->msg_header);
-	if (t->tx.buf)
-		memcpy_toio(mem->msg_payload, t->tx.buf, t->tx.len);
-}
-
 /**
  * scmi_xfer_get() - Allocate one message
  *
@@ -305,6 +176,7 @@
 	xfer = &minfo->xfer_block[xfer_id];
 	xfer->hdr.seq = xfer_id;
 	reinit_completion(&xfer->done);
+	xfer->transfer_id = atomic_inc_return(&transfer_last_id);
 
 	return xfer;
 }
@@ -332,44 +204,70 @@
 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
 }
 
-/**
- * scmi_rx_callback() - mailbox client callback for receive messages
- *
- * @cl: client pointer
- * @m: mailbox message
- *
- * Processes one received message to appropriate transfer information and
- * signals completion of the transfer.
- *
- * NOTE: This function will be invoked in IRQ context, hence should be
- * as optimal as possible.
- */
-static void scmi_rx_callback(struct mbox_client *cl, void *m)
+static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr)
 {
-	u8 msg_type;
-	u32 msg_hdr;
-	u16 xfer_id;
 	struct scmi_xfer *xfer;
-	struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
+	struct device *dev = cinfo->dev;
+	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+	struct scmi_xfers_info *minfo = &info->rx_minfo;
+	ktime_t ts;
+
+	ts = ktime_get_boottime();
+	xfer = scmi_xfer_get(cinfo->handle, minfo);
+	if (IS_ERR(xfer)) {
+		dev_err(dev, "failed to get free message slot (%ld)\n",
+			PTR_ERR(xfer));
+		info->desc->ops->clear_channel(cinfo);
+		return;
+	}
+
+	unpack_scmi_header(msg_hdr, &xfer->hdr);
+	scmi_dump_header_dbg(dev, &xfer->hdr);
+	info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
+					    xfer);
+	scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
+		    xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
+
+	trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
+			   xfer->hdr.protocol_id, xfer->hdr.seq,
+			   MSG_TYPE_NOTIFICATION);
+
+	__scmi_xfer_put(minfo, xfer);
+
+	info->desc->ops->clear_channel(cinfo);
+}
+
+static void scmi_handle_response(struct scmi_chan_info *cinfo,
+				 u16 xfer_id, u8 msg_type)
+{
+	struct scmi_xfer *xfer;
 	struct device *dev = cinfo->dev;
 	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
 	struct scmi_xfers_info *minfo = &info->tx_minfo;
-	struct scmi_shared_mem __iomem *mem = cinfo->payload;
-
-	msg_hdr = ioread32(&mem->msg_header);
-	msg_type = MSG_XTRACT_TYPE(msg_hdr);
-	xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
-
-	if (msg_type == MSG_TYPE_NOTIFICATION)
-		return; /* Notifications not yet supported */
 
 	/* Are we even expecting this? */
 	if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
 		dev_err(dev, "message for %d is not expected!\n", xfer_id);
+		info->desc->ops->clear_channel(cinfo);
 		return;
 	}
 
 	xfer = &minfo->xfer_block[xfer_id];
+	/*
+	 * Even if a response was indeed expected on this slot at this point,
+	 * a buggy platform could wrongly reply feeding us an unexpected
+	 * delayed response we're not prepared to handle: bail-out safely
+	 * blaming firmware.
+	 */
+	if (unlikely(msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done)) {
+		dev_err(dev,
+			"Delayed Response for %d not expected! Buggy F/W ?\n",
+			xfer_id);
+		info->desc->ops->clear_channel(cinfo);
+		/* It was unexpected, so nobody will clear the xfer if not us */
+		__scmi_xfer_put(minfo, xfer);
+		return;
+	}
 
 	/* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */
 	if (msg_type == MSG_TYPE_DELAYED_RESP)
@@ -377,12 +275,49 @@
 
 	scmi_dump_header_dbg(dev, &xfer->hdr);
 
-	scmi_fetch_response(xfer, mem);
+	info->desc->ops->fetch_response(cinfo, xfer);
 
-	if (msg_type == MSG_TYPE_DELAYED_RESP)
+	trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
+			   xfer->hdr.protocol_id, xfer->hdr.seq,
+			   msg_type);
+
+	if (msg_type == MSG_TYPE_DELAYED_RESP) {
+		info->desc->ops->clear_channel(cinfo);
 		complete(xfer->async_done);
-	else
+	} else {
 		complete(&xfer->done);
+	}
+}
+
+/**
+ * scmi_rx_callback() - callback for receiving messages
+ *
+ * @cinfo: SCMI channel info
+ * @msg_hdr: Message header
+ *
+ * Processes one received message to appropriate transfer information and
+ * signals completion of the transfer.
+ *
+ * NOTE: This function will be invoked in IRQ context, hence should be
+ * as optimal as possible.
+ */
+void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr)
+{
+	u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
+	u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
+
+	switch (msg_type) {
+	case MSG_TYPE_NOTIFICATION:
+		scmi_handle_notification(cinfo, msg_hdr);
+		break;
+	case MSG_TYPE_COMMAND:
+	case MSG_TYPE_DELAYED_RESP:
+		scmi_handle_response(cinfo, xfer_id, msg_type);
+		break;
+	default:
+		WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
+		break;
+	}
 }
 
 /**
@@ -398,28 +333,15 @@
 	__scmi_xfer_put(&info->tx_minfo, xfer);
 }
 
-static bool
-scmi_xfer_poll_done(const struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
-{
-	struct scmi_shared_mem __iomem *mem = cinfo->payload;
-	u16 xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header));
-
-	if (xfer->hdr.seq != xfer_id)
-		return false;
-
-	return ioread32(&mem->channel_status) &
-		(SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
-		SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
-}
-
 #define SCMI_MAX_POLL_TO_NS	(100 * NSEC_PER_USEC)
 
-static bool scmi_xfer_done_no_timeout(const struct scmi_chan_info *cinfo,
+static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
 				      struct scmi_xfer *xfer, ktime_t stop)
 {
-	ktime_t __cur = ktime_get();
+	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
 
-	return scmi_xfer_poll_done(cinfo, xfer) || ktime_after(__cur, stop);
+	return info->desc->ops->poll_done(cinfo, xfer) ||
+	       ktime_after(ktime_get(), stop);
 }
 
 /**
@@ -444,29 +366,30 @@
 	if (unlikely(!cinfo))
 		return -EINVAL;
 
-	ret = mbox_send_message(cinfo->chan, xfer);
+	trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
+			      xfer->hdr.protocol_id, xfer->hdr.seq,
+			      xfer->hdr.poll_completion);
+
+	ret = info->desc->ops->send_message(cinfo, xfer);
 	if (ret < 0) {
-		dev_dbg(dev, "mbox send fail %d\n", ret);
+		dev_dbg(dev, "Failed to send message %d\n", ret);
 		return ret;
 	}
 
-	/* mbox_send_message returns non-negative value on success, so reset */
-	ret = 0;
-
 	if (xfer->hdr.poll_completion) {
 		ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS);
 
 		spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop));
 
 		if (ktime_before(ktime_get(), stop))
-			scmi_fetch_response(xfer, cinfo->payload);
+			info->desc->ops->fetch_response(cinfo, xfer);
 		else
 			ret = -ETIMEDOUT;
 	} else {
 		/* And we wait for the response. */
 		timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
 		if (!wait_for_completion_timeout(&xfer->done, timeout)) {
-			dev_err(dev, "mbox timed out in resp(caller: %pS)\n",
+			dev_err(dev, "timed out in resp(caller: %pS)\n",
 				(void *)_RET_IP_);
 			ret = -ETIMEDOUT;
 		}
@@ -475,13 +398,11 @@
 	if (!ret && xfer->hdr.status)
 		ret = scmi_to_linux_errno(xfer->hdr.status);
 
-	/*
-	 * NOTE: we might prefer not to need the mailbox ticker to manage the
-	 * transfer queueing since the protocol layer queues things by itself.
-	 * Unfortunately, we have to kick the mailbox framework after we have
-	 * received our message.
-	 */
-	mbox_client_txdone(cinfo->chan, ret);
+	if (info->desc->ops->mark_txdone)
+		info->desc->ops->mark_txdone(cinfo, ret);
+
+	trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
+			    xfer->hdr.protocol_id, xfer->hdr.seq, ret);
 
 	return ret;
 }
@@ -689,13 +610,13 @@
 	return 0;
 }
 
-static int scmi_xfer_info_init(struct scmi_info *sinfo)
+static int __scmi_xfer_info_init(struct scmi_info *sinfo,
+				 struct scmi_xfers_info *info)
 {
 	int i;
 	struct scmi_xfer *xfer;
 	struct device *dev = sinfo->dev;
 	const struct scmi_desc *desc = sinfo->desc;
-	struct scmi_xfers_info *info = &sinfo->tx_minfo;
 
 	/* Pre-allocated messages, no more than what hdr.seq can support */
 	if (WARN_ON(!desc->max_msg || desc->max_msg > MSG_TOKEN_MAX)) {
@@ -731,29 +652,33 @@
 	return 0;
 }
 
-static int scmi_mailbox_check(struct device_node *np, int idx)
+static int scmi_xfer_info_init(struct scmi_info *sinfo)
 {
-	return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells",
-					  idx, NULL);
+	int ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
+
+	if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE))
+		ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
+
+	return ret;
 }
 
-static int scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev,
-				int prot_id, bool tx)
+static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
+			   int prot_id, bool tx)
 {
 	int ret, idx;
-	struct resource res;
-	resource_size_t size;
-	struct device_node *shmem, *np = dev->of_node;
 	struct scmi_chan_info *cinfo;
-	struct mbox_client *cl;
 	struct idr *idr;
-	const char *desc = tx ? "Tx" : "Rx";
 
 	/* Transmit channel is first entry i.e. index 0 */
 	idx = tx ? 0 : 1;
 	idr = tx ? &info->tx_idr : &info->rx_idr;
 
-	if (scmi_mailbox_check(np, idx)) {
+	/* check if already allocated, used for multiple device per protocol */
+	cinfo = idr_find(idr, prot_id);
+	if (cinfo)
+		return 0;
+
+	if (!info->desc->ops->chan_available(dev, idx)) {
 		cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
 		if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
 			return -EINVAL;
@@ -766,36 +691,9 @@
 
 	cinfo->dev = dev;
 
-	cl = &cinfo->cl;
-	cl->dev = dev;
-	cl->rx_callback = scmi_rx_callback;
-	cl->tx_prepare = tx ? scmi_tx_prepare : NULL;
-	cl->tx_block = false;
-	cl->knows_txdone = tx;
-
-	shmem = of_parse_phandle(np, "shmem", idx);
-	ret = of_address_to_resource(shmem, 0, &res);
-	of_node_put(shmem);
-	if (ret) {
-		dev_err(dev, "failed to get SCMI %s payload memory\n", desc);
+	ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
+	if (ret)
 		return ret;
-	}
-
-	size = resource_size(&res);
-	cinfo->payload = devm_ioremap(info->dev, res.start, size);
-	if (!cinfo->payload) {
-		dev_err(dev, "failed to ioremap SCMI %s payload\n", desc);
-		return -EADDRNOTAVAIL;
-	}
-
-	cinfo->chan = mbox_request_channel(cl, idx);
-	if (IS_ERR(cinfo->chan)) {
-		ret = PTR_ERR(cinfo->chan);
-		if (ret != -EPROBE_DEFER)
-			dev_err(dev, "failed to request SCMI %s mailbox\n",
-				desc);
-		return ret;
-	}
 
 idr_alloc:
 	ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
@@ -809,30 +707,30 @@
 }
 
 static inline int
-scmi_mbox_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
+scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
 {
-	int ret = scmi_mbox_chan_setup(info, dev, prot_id, true);
+	int ret = scmi_chan_setup(info, dev, prot_id, true);
 
 	if (!ret) /* Rx is optional, hence no error check */
-		scmi_mbox_chan_setup(info, dev, prot_id, false);
+		scmi_chan_setup(info, dev, prot_id, false);
 
 	return ret;
 }
 
 static inline void
 scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
-			    int prot_id)
+			    int prot_id, const char *name)
 {
 	struct scmi_device *sdev;
 
-	sdev = scmi_device_create(np, info->dev, prot_id);
+	sdev = scmi_device_create(np, info->dev, prot_id, name);
 	if (!sdev) {
 		dev_err(info->dev, "failed to create %d protocol device\n",
 			prot_id);
 		return;
 	}
 
-	if (scmi_mbox_txrx_setup(info, &sdev->dev, prot_id)) {
+	if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
 		dev_err(&sdev->dev, "failed to setup transport\n");
 		scmi_device_destroy(sdev);
 		return;
@@ -842,6 +740,41 @@
 	scmi_set_handle(sdev);
 }
 
+#define MAX_SCMI_DEV_PER_PROTOCOL	2
+struct scmi_prot_devnames {
+	int protocol_id;
+	char *names[MAX_SCMI_DEV_PER_PROTOCOL];
+};
+
+static struct scmi_prot_devnames devnames[] = {
+	{ SCMI_PROTOCOL_POWER,  { "genpd" },},
+	{ SCMI_PROTOCOL_SYSTEM, { "syspower" },},
+	{ SCMI_PROTOCOL_PERF,   { "cpufreq" },},
+	{ SCMI_PROTOCOL_CLOCK,  { "clocks" },},
+	{ SCMI_PROTOCOL_SENSOR, { "hwmon" },},
+	{ SCMI_PROTOCOL_RESET,  { "reset" },},
+};
+
+static inline void
+scmi_create_protocol_devices(struct device_node *np, struct scmi_info *info,
+			     int prot_id)
+{
+	int loop, cnt;
+
+	for (loop = 0; loop < ARRAY_SIZE(devnames); loop++) {
+		if (devnames[loop].protocol_id != prot_id)
+			continue;
+
+		for (cnt = 0; cnt < ARRAY_SIZE(devnames[loop].names); cnt++) {
+			const char *name = devnames[loop].names[cnt];
+
+			if (name)
+				scmi_create_protocol_device(np, info, prot_id,
+							    name);
+		}
+	}
+}
+
 static int scmi_probe(struct platform_device *pdev)
 {
 	int ret;
@@ -851,12 +784,6 @@
 	struct device *dev = &pdev->dev;
 	struct device_node *child, *np = dev->of_node;
 
-	/* Only mailbox method supported, check for the presence of one */
-	if (scmi_mailbox_check(np, 0)) {
-		dev_err(dev, "no mailbox found in %pOF\n", np);
-		return -EINVAL;
-	}
-
 	desc = of_device_get_match_data(dev);
 	if (!desc)
 		return -EINVAL;
@@ -869,10 +796,6 @@
 	info->desc = desc;
 	INIT_LIST_HEAD(&info->node);
 
-	ret = scmi_xfer_info_init(info);
-	if (ret)
-		return ret;
-
 	platform_set_drvdata(pdev, info);
 	idr_init(&info->tx_idr);
 	idr_init(&info->rx_idr);
@@ -881,10 +804,17 @@
 	handle->dev = info->dev;
 	handle->version = &info->version;
 
-	ret = scmi_mbox_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
+	ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
 	if (ret)
 		return ret;
 
+	ret = scmi_xfer_info_init(info);
+	if (ret)
+		return ret;
+
+	if (scmi_notification_init(handle))
+		dev_err(dev, "SCMI Notifications NOT available.\n");
+
 	ret = scmi_base_protocol_init(handle);
 	if (ret) {
 		dev_err(dev, "unable to communicate with SCMI(%d)\n", ret);
@@ -910,25 +840,15 @@
 			continue;
 		}
 
-		scmi_create_protocol_device(child, info, prot_id);
+		scmi_create_protocol_devices(child, info, prot_id);
 	}
 
 	return 0;
 }
 
-static int scmi_mbox_free_channel(int id, void *p, void *data)
+void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
 {
-	struct scmi_chan_info *cinfo = p;
-	struct idr *idr = data;
-
-	if (!IS_ERR_OR_NULL(cinfo->chan)) {
-		mbox_free_channel(cinfo->chan);
-		cinfo->chan = NULL;
-	}
-
 	idr_remove(idr, id);
-
-	return 0;
 }
 
 static int scmi_remove(struct platform_device *pdev)
@@ -947,26 +867,73 @@
 	if (ret)
 		return ret;
 
+	scmi_notification_exit(&info->handle);
+
 	/* Safe to free channels since no more users */
-	ret = idr_for_each(idr, scmi_mbox_free_channel, idr);
+	ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
 	idr_destroy(&info->tx_idr);
 
 	idr = &info->rx_idr;
-	ret = idr_for_each(idr, scmi_mbox_free_channel, idr);
+	ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
 	idr_destroy(&info->rx_idr);
 
 	return ret;
 }
 
-static const struct scmi_desc scmi_generic_desc = {
-	.max_rx_timeout_ms = 30,	/* We may increase this if required */
-	.max_msg = 20,		/* Limited by MBOX_TX_QUEUE_LEN */
-	.max_msg_size = 128,
+static ssize_t protocol_version_show(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	struct scmi_info *info = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%u.%u\n", info->version.major_ver,
+		       info->version.minor_ver);
+}
+static DEVICE_ATTR_RO(protocol_version);
+
+static ssize_t firmware_version_show(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	struct scmi_info *info = dev_get_drvdata(dev);
+
+	return sprintf(buf, "0x%x\n", info->version.impl_ver);
+}
+static DEVICE_ATTR_RO(firmware_version);
+
+static ssize_t vendor_id_show(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	struct scmi_info *info = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%s\n", info->version.vendor_id);
+}
+static DEVICE_ATTR_RO(vendor_id);
+
+static ssize_t sub_vendor_id_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct scmi_info *info = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%s\n", info->version.sub_vendor_id);
+}
+static DEVICE_ATTR_RO(sub_vendor_id);
+
+static struct attribute *versions_attrs[] = {
+	&dev_attr_firmware_version.attr,
+	&dev_attr_protocol_version.attr,
+	&dev_attr_vendor_id.attr,
+	&dev_attr_sub_vendor_id.attr,
+	NULL,
 };
+ATTRIBUTE_GROUPS(versions);
 
 /* Each compatible listed below must have descriptor associated with it */
 static const struct of_device_id scmi_of_match[] = {
-	{ .compatible = "arm,scmi", .data = &scmi_generic_desc },
+#ifdef CONFIG_MAILBOX
+	{ .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
+#endif
+#ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY
+	{ .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
+#endif
 	{ /* Sentinel */ },
 };
 
@@ -976,14 +943,43 @@
 	.driver = {
 		   .name = "arm-scmi",
 		   .of_match_table = scmi_of_match,
+		   .dev_groups = versions_groups,
 		   },
 	.probe = scmi_probe,
 	.remove = scmi_remove,
 };
 
-module_platform_driver(scmi_driver);
+static int __init scmi_driver_init(void)
+{
+	scmi_bus_init();
 
-MODULE_ALIAS("platform: arm-scmi");
+	scmi_clock_register();
+	scmi_perf_register();
+	scmi_power_register();
+	scmi_reset_register();
+	scmi_sensors_register();
+	scmi_system_register();
+
+	return platform_driver_register(&scmi_driver);
+}
+subsys_initcall(scmi_driver_init);
+
+static void __exit scmi_driver_exit(void)
+{
+	scmi_bus_exit();
+
+	scmi_clock_unregister();
+	scmi_perf_unregister();
+	scmi_power_unregister();
+	scmi_reset_unregister();
+	scmi_sensors_unregister();
+	scmi_system_unregister();
+
+	platform_driver_unregister(&scmi_driver);
+}
+module_exit(scmi_driver_exit);
+
+MODULE_ALIAS("platform:arm-scmi");
 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
 MODULE_DESCRIPTION("ARM SCMI protocol driver");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
new file mode 100644
index 0000000..4626404
--- /dev/null
+++ b/drivers/firmware/arm_scmi/mailbox.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Message Mailbox Transport
+ * driver.
+ *
+ * Copyright (C) 2019 ARM Ltd.
+ */
+
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/mailbox_client.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+#include "common.h"
+
+/**
+ * struct scmi_mailbox - Structure representing a SCMI mailbox transport
+ *
+ * @cl: Mailbox Client
+ * @chan: Transmit/Receive mailbox channel
+ * @cinfo: SCMI channel info
+ * @shmem: Transmit/Receive shared memory area
+ */
+struct scmi_mailbox {
+	struct mbox_client cl;
+	struct mbox_chan *chan;
+	struct scmi_chan_info *cinfo;
+	struct scmi_shared_mem __iomem *shmem;
+};
+
+#define client_to_scmi_mailbox(c) container_of(c, struct scmi_mailbox, cl)
+
+static void tx_prepare(struct mbox_client *cl, void *m)
+{
+	struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl);
+
+	shmem_tx_prepare(smbox->shmem, m);
+}
+
+static void rx_callback(struct mbox_client *cl, void *m)
+{
+	struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl);
+
+	scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem));
+}
+
+static bool mailbox_chan_available(struct device *dev, int idx)
+{
+	return !of_parse_phandle_with_args(dev->of_node, "mboxes",
+					   "#mbox-cells", idx, NULL);
+}
+
+static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
+			      bool tx)
+{
+	const char *desc = tx ? "Tx" : "Rx";
+	struct device *cdev = cinfo->dev;
+	struct scmi_mailbox *smbox;
+	struct device_node *shmem;
+	int ret, idx = tx ? 0 : 1;
+	struct mbox_client *cl;
+	resource_size_t size;
+	struct resource res;
+
+	smbox = devm_kzalloc(dev, sizeof(*smbox), GFP_KERNEL);
+	if (!smbox)
+		return -ENOMEM;
+
+	shmem = of_parse_phandle(cdev->of_node, "shmem", idx);
+	ret = of_address_to_resource(shmem, 0, &res);
+	of_node_put(shmem);
+	if (ret) {
+		dev_err(cdev, "failed to get SCMI %s shared memory\n", desc);
+		return ret;
+	}
+
+	size = resource_size(&res);
+	smbox->shmem = devm_ioremap(dev, res.start, size);
+	if (!smbox->shmem) {
+		dev_err(dev, "failed to ioremap SCMI %s shared memory\n", desc);
+		return -EADDRNOTAVAIL;
+	}
+
+	cl = &smbox->cl;
+	cl->dev = cdev;
+	cl->tx_prepare = tx ? tx_prepare : NULL;
+	cl->rx_callback = rx_callback;
+	cl->tx_block = false;
+	cl->knows_txdone = tx;
+
+	smbox->chan = mbox_request_channel(cl, tx ? 0 : 1);
+	if (IS_ERR(smbox->chan)) {
+		ret = PTR_ERR(smbox->chan);
+		if (ret != -EPROBE_DEFER)
+			dev_err(cdev, "failed to request SCMI %s mailbox\n",
+				tx ? "Tx" : "Rx");
+		return ret;
+	}
+
+	cinfo->transport_info = smbox;
+	smbox->cinfo = cinfo;
+
+	return 0;
+}
+
+static int mailbox_chan_free(int id, void *p, void *data)
+{
+	struct scmi_chan_info *cinfo = p;
+	struct scmi_mailbox *smbox = cinfo->transport_info;
+
+	if (smbox && !IS_ERR(smbox->chan)) {
+		mbox_free_channel(smbox->chan);
+		cinfo->transport_info = NULL;
+		smbox->chan = NULL;
+		smbox->cinfo = NULL;
+	}
+
+	scmi_free_channel(cinfo, data, id);
+
+	return 0;
+}
+
+static int mailbox_send_message(struct scmi_chan_info *cinfo,
+				struct scmi_xfer *xfer)
+{
+	struct scmi_mailbox *smbox = cinfo->transport_info;
+	int ret;
+
+	ret = mbox_send_message(smbox->chan, xfer);
+
+	/* mbox_send_message returns non-negative value on success, so reset */
+	if (ret > 0)
+		ret = 0;
+
+	return ret;
+}
+
+static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret)
+{
+	struct scmi_mailbox *smbox = cinfo->transport_info;
+
+	/*
+	 * NOTE: we might prefer not to need the mailbox ticker to manage the
+	 * transfer queueing since the protocol layer queues things by itself.
+	 * Unfortunately, we have to kick the mailbox framework after we have
+	 * received our message.
+	 */
+	mbox_client_txdone(smbox->chan, ret);
+}
+
+static void mailbox_fetch_response(struct scmi_chan_info *cinfo,
+				   struct scmi_xfer *xfer)
+{
+	struct scmi_mailbox *smbox = cinfo->transport_info;
+
+	shmem_fetch_response(smbox->shmem, xfer);
+}
+
+static void mailbox_fetch_notification(struct scmi_chan_info *cinfo,
+				       size_t max_len, struct scmi_xfer *xfer)
+{
+	struct scmi_mailbox *smbox = cinfo->transport_info;
+
+	shmem_fetch_notification(smbox->shmem, max_len, xfer);
+}
+
+static void mailbox_clear_channel(struct scmi_chan_info *cinfo)
+{
+	struct scmi_mailbox *smbox = cinfo->transport_info;
+
+	shmem_clear_channel(smbox->shmem);
+}
+
+static bool
+mailbox_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
+{
+	struct scmi_mailbox *smbox = cinfo->transport_info;
+
+	return shmem_poll_done(smbox->shmem, xfer);
+}
+
+static const struct scmi_transport_ops scmi_mailbox_ops = {
+	.chan_available = mailbox_chan_available,
+	.chan_setup = mailbox_chan_setup,
+	.chan_free = mailbox_chan_free,
+	.send_message = mailbox_send_message,
+	.mark_txdone = mailbox_mark_txdone,
+	.fetch_response = mailbox_fetch_response,
+	.fetch_notification = mailbox_fetch_notification,
+	.clear_channel = mailbox_clear_channel,
+	.poll_done = mailbox_poll_done,
+};
+
+const struct scmi_desc scmi_mailbox_desc = {
+	.ops = &scmi_mailbox_ops,
+	.max_rx_timeout_ms = 30, /* We may increase this if required */
+	.max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */
+	.max_msg_size = 128,
+};
diff --git a/drivers/firmware/arm_scmi/notify.c b/drivers/firmware/arm_scmi/notify.c
new file mode 100644
index 0000000..66196b2
--- /dev/null
+++ b/drivers/firmware/arm_scmi/notify.c
@@ -0,0 +1,1532 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Notification support
+ *
+ * Copyright (C) 2020 ARM Ltd.
+ */
+/**
+ * DOC: Theory of operation
+ *
+ * SCMI Protocol specification allows the platform to signal events to
+ * interested agents via notification messages: this is an implementation
+ * of the dispatch and delivery of such notifications to the interested users
+ * inside the Linux kernel.
+ *
+ * An SCMI Notification core instance is initialized for each active platform
+ * instance identified by the means of the usual &struct scmi_handle.
+ *
+ * Each SCMI Protocol implementation, during its initialization, registers with
+ * this core its set of supported events using scmi_register_protocol_events():
+ * all the needed descriptors are stored in the &struct registered_protocols and
+ * &struct registered_events arrays.
+ *
+ * Kernel users interested in some specific event can register their callbacks
+ * providing the usual notifier_block descriptor, since this core implements
+ * events' delivery using the standard Kernel notification chains machinery.
+ *
+ * Given the number of possible events defined by SCMI and the extensibility
+ * of the SCMI Protocol itself, the underlying notification chains are created
+ * and destroyed dynamically on demand depending on the number of users
+ * effectively registered for an event, so that no support structures or chains
+ * are allocated until at least one user has registered a notifier_block for
+ * such event. Similarly, events' generation itself is enabled at the platform
+ * level only after at least one user has registered, and it is shutdown after
+ * the last user for that event has gone.
+ *
+ * All users provided callbacks and allocated notification-chains are stored in
+ * the @registered_events_handlers hashtable. Callbacks' registration requests
+ * for still to be registered events are instead kept in the dedicated common
+ * hashtable @pending_events_handlers.
+ *
+ * An event is identified univocally by the tuple (proto_id, evt_id, src_id)
+ * and is served by its own dedicated notification chain; information contained
+ * in such tuples is used, in a few different ways, to generate the needed
+ * hash-keys.
+ *
+ * Here proto_id and evt_id are simply the protocol_id and message_id numbers
+ * as described in the SCMI Protocol specification, while src_id represents an
+ * optional, protocol dependent, source identifier (like domain_id, perf_id
+ * or sensor_id and so forth).
+ *
+ * Upon reception of a notification message from the platform the SCMI RX ISR
+ * passes the received message payload and some ancillary information (including
+ * an arrival timestamp in nanoseconds) to the core via @scmi_notify() which
+ * pushes the event-data itself on a protocol-dedicated kfifo queue for further
+ * deferred processing as specified in @scmi_events_dispatcher().
+ *
+ * Each protocol has it own dedicated work_struct and worker which, once kicked
+ * by the ISR, takes care to empty its own dedicated queue, deliverying the
+ * queued items into the proper notification-chain: notifications processing can
+ * proceed concurrently on distinct workers only between events belonging to
+ * different protocols while delivery of events within the same protocol is
+ * still strictly sequentially ordered by time of arrival.
+ *
+ * Events' information is then extracted from the SCMI Notification messages and
+ * conveyed, converted into a custom per-event report struct, as the void *data
+ * param to the user callback provided by the registered notifier_block, so that
+ * from the user perspective his callback will look invoked like:
+ *
+ * int user_cb(struct notifier_block *nb, unsigned long event_id, void *report)
+ *
+ */
+
+#define dev_fmt(fmt) "SCMI Notifications - " fmt
+#define pr_fmt(fmt) "SCMI Notifications - " fmt
+
+#include <linux/bitfield.h>
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/hashtable.h>
+#include <linux/kernel.h>
+#include <linux/ktime.h>
+#include <linux/kfifo.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/refcount.h>
+#include <linux/scmi_protocol.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "notify.h"
+
+#define SCMI_MAX_PROTO		256
+
+#define PROTO_ID_MASK		GENMASK(31, 24)
+#define EVT_ID_MASK		GENMASK(23, 16)
+#define SRC_ID_MASK		GENMASK(15, 0)
+
+/*
+ * Builds an unsigned 32bit key from the given input tuple to be used
+ * as a key in hashtables.
+ */
+#define MAKE_HASH_KEY(p, e, s)			\
+	(FIELD_PREP(PROTO_ID_MASK, (p)) |	\
+	   FIELD_PREP(EVT_ID_MASK, (e)) |	\
+	   FIELD_PREP(SRC_ID_MASK, (s)))
+
+#define MAKE_ALL_SRCS_KEY(p, e)		MAKE_HASH_KEY((p), (e), SRC_ID_MASK)
+
+/*
+ * Assumes that the stored obj includes its own hash-key in a field named 'key':
+ * with this simplification this macro can be equally used for all the objects'
+ * types hashed by this implementation.
+ *
+ * @__ht: The hashtable name
+ * @__obj: A pointer to the object type to be retrieved from the hashtable;
+ *	   it will be used as a cursor while scanning the hastable and it will
+ *	   be possibly left as NULL when @__k is not found
+ * @__k: The key to search for
+ */
+#define KEY_FIND(__ht, __obj, __k)				\
+({								\
+	typeof(__k) k_ = __k;					\
+	typeof(__obj) obj_;					\
+								\
+	hash_for_each_possible((__ht), obj_, hash, k_)		\
+		if (obj_->key == k_)				\
+			break;					\
+	__obj = obj_;						\
+})
+
+#define KEY_XTRACT_PROTO_ID(key)	FIELD_GET(PROTO_ID_MASK, (key))
+#define KEY_XTRACT_EVT_ID(key)		FIELD_GET(EVT_ID_MASK, (key))
+#define KEY_XTRACT_SRC_ID(key)		FIELD_GET(SRC_ID_MASK, (key))
+
+/*
+ * A set of macros used to access safely @registered_protocols and
+ * @registered_events arrays; these are fixed in size and each entry is possibly
+ * populated at protocols' registration time and then only read but NEVER
+ * modified or removed.
+ */
+#define SCMI_GET_PROTO(__ni, __pid)					\
+({									\
+	typeof(__ni) ni_ = __ni;					\
+	struct scmi_registered_events_desc *__pd = NULL;		\
+									\
+	if (ni_)							\
+		__pd = READ_ONCE(ni_->registered_protocols[(__pid)]);	\
+	__pd;								\
+})
+
+#define SCMI_GET_REVT_FROM_PD(__pd, __eid)				\
+({									\
+	typeof(__pd) pd_ = __pd;					\
+	typeof(__eid) eid_ = __eid;					\
+	struct scmi_registered_event *__revt = NULL;			\
+									\
+	if (pd_ && eid_ < pd_->num_events)				\
+		__revt = READ_ONCE(pd_->registered_events[eid_]);	\
+	__revt;								\
+})
+
+#define SCMI_GET_REVT(__ni, __pid, __eid)				\
+({									\
+	struct scmi_registered_event *__revt;				\
+	struct scmi_registered_events_desc *__pd;			\
+									\
+	__pd = SCMI_GET_PROTO((__ni), (__pid));				\
+	__revt = SCMI_GET_REVT_FROM_PD(__pd, (__eid));			\
+	__revt;								\
+})
+
+/* A couple of utility macros to limit cruft when calling protocols' helpers */
+#define REVT_NOTIFY_SET_STATUS(revt, eid, sid, state)		\
+({								\
+	typeof(revt) r = revt;					\
+	r->proto->ops->set_notify_enabled(r->proto->ni->handle,	\
+					(eid), (sid), (state));	\
+})
+
+#define REVT_NOTIFY_ENABLE(revt, eid, sid)			\
+	REVT_NOTIFY_SET_STATUS((revt), (eid), (sid), true)
+
+#define REVT_NOTIFY_DISABLE(revt, eid, sid)			\
+	REVT_NOTIFY_SET_STATUS((revt), (eid), (sid), false)
+
+#define REVT_FILL_REPORT(revt, ...)				\
+({								\
+	typeof(revt) r = revt;					\
+	r->proto->ops->fill_custom_report(r->proto->ni->handle,	\
+					  __VA_ARGS__);		\
+})
+
+#define SCMI_PENDING_HASH_SZ		4
+#define SCMI_REGISTERED_HASH_SZ		6
+
+struct scmi_registered_events_desc;
+
+/**
+ * struct scmi_notify_instance  - Represents an instance of the notification
+ * core
+ * @gid: GroupID used for devres
+ * @handle: A reference to the platform instance
+ * @init_work: A work item to perform final initializations of pending handlers
+ * @notify_wq: A reference to the allocated Kernel cmwq
+ * @pending_mtx: A mutex to protect @pending_events_handlers
+ * @registered_protocols: A statically allocated array containing pointers to
+ *			  all the registered protocol-level specific information
+ *			  related to events' handling
+ * @pending_events_handlers: An hashtable containing all pending events'
+ *			     handlers descriptors
+ *
+ * Each platform instance, represented by a handle, has its own instance of
+ * the notification subsystem represented by this structure.
+ */
+struct scmi_notify_instance {
+	void			*gid;
+	struct scmi_handle	*handle;
+	struct work_struct	init_work;
+	struct workqueue_struct	*notify_wq;
+	/* lock to protect pending_events_handlers */
+	struct mutex		pending_mtx;
+	struct scmi_registered_events_desc	**registered_protocols;
+	DECLARE_HASHTABLE(pending_events_handlers, SCMI_PENDING_HASH_SZ);
+};
+
+/**
+ * struct events_queue  - Describes a queue and its associated worker
+ * @sz: Size in bytes of the related kfifo
+ * @kfifo: A dedicated Kernel kfifo descriptor
+ * @notify_work: A custom work item bound to this queue
+ * @wq: A reference to the associated workqueue
+ *
+ * Each protocol has its own dedicated events_queue descriptor.
+ */
+struct events_queue {
+	size_t			sz;
+	struct kfifo		kfifo;
+	struct work_struct	notify_work;
+	struct workqueue_struct	*wq;
+};
+
+/**
+ * struct scmi_event_header  - A utility header
+ * @timestamp: The timestamp, in nanoseconds (boottime), which was associated
+ *	       to this event as soon as it entered the SCMI RX ISR
+ * @payld_sz: Effective size of the embedded message payload which follows
+ * @evt_id: Event ID (corresponds to the Event MsgID for this Protocol)
+ * @payld: A reference to the embedded event payload
+ *
+ * This header is prepended to each received event message payload before
+ * queueing it on the related &struct events_queue.
+ */
+struct scmi_event_header {
+	ktime_t timestamp;
+	size_t payld_sz;
+	unsigned char evt_id;
+	unsigned char payld[];
+};
+
+struct scmi_registered_event;
+
+/**
+ * struct scmi_registered_events_desc  - Protocol Specific information
+ * @id: Protocol ID
+ * @ops: Protocol specific and event-related operations
+ * @equeue: The embedded per-protocol events_queue
+ * @ni: A reference to the initialized instance descriptor
+ * @eh: A reference to pre-allocated buffer to be used as a scratch area by the
+ *	deferred worker when fetching data from the kfifo
+ * @eh_sz: Size of the pre-allocated buffer @eh
+ * @in_flight: A reference to an in flight &struct scmi_registered_event
+ * @num_events: Number of events in @registered_events
+ * @registered_events: A dynamically allocated array holding all the registered
+ *		       events' descriptors, whose fixed-size is determined at
+ *		       compile time.
+ * @registered_mtx: A mutex to protect @registered_events_handlers
+ * @registered_events_handlers: An hashtable containing all events' handlers
+ *				descriptors registered for this protocol
+ *
+ * All protocols that register at least one event have their protocol-specific
+ * information stored here, together with the embedded allocated events_queue.
+ * These descriptors are stored in the @registered_protocols array at protocol
+ * registration time.
+ *
+ * Once these descriptors are successfully registered, they are NEVER again
+ * removed or modified since protocols do not unregister ever, so that, once
+ * we safely grab a NON-NULL reference from the array we can keep it and use it.
+ */
+struct scmi_registered_events_desc {
+	u8				id;
+	const struct scmi_event_ops	*ops;
+	struct events_queue		equeue;
+	struct scmi_notify_instance	*ni;
+	struct scmi_event_header	*eh;
+	size_t				eh_sz;
+	void				*in_flight;
+	int				num_events;
+	struct scmi_registered_event	**registered_events;
+	/* mutex to protect registered_events_handlers */
+	struct mutex			registered_mtx;
+	DECLARE_HASHTABLE(registered_events_handlers, SCMI_REGISTERED_HASH_SZ);
+};
+
+/**
+ * struct scmi_registered_event  - Event Specific Information
+ * @proto: A reference to the associated protocol descriptor
+ * @evt: A reference to the associated event descriptor (as provided at
+ *       registration time)
+ * @report: A pre-allocated buffer used by the deferred worker to fill a
+ *	    customized event report
+ * @num_sources: The number of possible sources for this event as stated at
+ *		 events' registration time
+ * @sources: A reference to a dynamically allocated array used to refcount the
+ *	     events' enable requests for all the existing sources
+ * @sources_mtx: A mutex to serialize the access to @sources
+ *
+ * All registered events are represented by one of these structures that are
+ * stored in the @registered_events array at protocol registration time.
+ *
+ * Once these descriptors are successfully registered, they are NEVER again
+ * removed or modified since protocols do not unregister ever, so that once we
+ * safely grab a NON-NULL reference from the table we can keep it and use it.
+ */
+struct scmi_registered_event {
+	struct scmi_registered_events_desc *proto;
+	const struct scmi_event	*evt;
+	void		*report;
+	u32		num_sources;
+	refcount_t	*sources;
+	/* locking to serialize the access to sources */
+	struct mutex	sources_mtx;
+};
+
+/**
+ * struct scmi_event_handler  - Event handler information
+ * @key: The used hashkey
+ * @users: A reference count for number of active users for this handler
+ * @r_evt: A reference to the associated registered event; when this is NULL
+ *	   this handler is pending, which means that identifies a set of
+ *	   callbacks intended to be attached to an event which is still not
+ *	   known nor registered by any protocol at that point in time
+ * @chain: The notification chain dedicated to this specific event tuple
+ * @hash: The hlist_node used for collision handling
+ * @enabled: A boolean which records if event's generation has been already
+ *	     enabled for this handler as a whole
+ *
+ * This structure collects all the information needed to process a received
+ * event identified by the tuple (proto_id, evt_id, src_id).
+ * These descriptors are stored in a per-protocol @registered_events_handlers
+ * table using as a key a value derived from that tuple.
+ */
+struct scmi_event_handler {
+	u32				key;
+	refcount_t			users;
+	struct scmi_registered_event	*r_evt;
+	struct blocking_notifier_head	chain;
+	struct hlist_node		hash;
+	bool				enabled;
+};
+
+#define IS_HNDL_PENDING(hndl)	(!(hndl)->r_evt)
+
+static struct scmi_event_handler *
+scmi_get_active_handler(struct scmi_notify_instance *ni, u32 evt_key);
+static void scmi_put_active_handler(struct scmi_notify_instance *ni,
+				    struct scmi_event_handler *hndl);
+static void scmi_put_handler_unlocked(struct scmi_notify_instance *ni,
+				      struct scmi_event_handler *hndl);
+
+/**
+ * scmi_lookup_and_call_event_chain()  - Lookup the proper chain and call it
+ * @ni: A reference to the notification instance to use
+ * @evt_key: The key to use to lookup the related notification chain
+ * @report: The customized event-specific report to pass down to the callbacks
+ *	    as their *data parameter.
+ */
+static inline void
+scmi_lookup_and_call_event_chain(struct scmi_notify_instance *ni,
+				 u32 evt_key, void *report)
+{
+	int ret;
+	struct scmi_event_handler *hndl;
+
+	/*
+	 * Here ensure the event handler cannot vanish while using it.
+	 * It is legitimate, though, for an handler not to be found at all here,
+	 * e.g. when it has been unregistered by the user after some events had
+	 * already been queued.
+	 */
+	hndl = scmi_get_active_handler(ni, evt_key);
+	if (!hndl)
+		return;
+
+	ret = blocking_notifier_call_chain(&hndl->chain,
+					   KEY_XTRACT_EVT_ID(evt_key),
+					   report);
+	/* Notifiers are NOT supposed to cut the chain ... */
+	WARN_ON_ONCE(ret & NOTIFY_STOP_MASK);
+
+	scmi_put_active_handler(ni, hndl);
+}
+
+/**
+ * scmi_process_event_header()  - Dequeue and process an event header
+ * @eq: The queue to use
+ * @pd: The protocol descriptor to use
+ *
+ * Read an event header from the protocol queue into the dedicated scratch
+ * buffer and looks for a matching registered event; in case an anomalously
+ * sized read is detected just flush the queue.
+ *
+ * Return:
+ * * a reference to the matching registered event when found
+ * * ERR_PTR(-EINVAL) when NO registered event could be found
+ * * NULL when the queue is empty
+ */
+static inline struct scmi_registered_event *
+scmi_process_event_header(struct events_queue *eq,
+			  struct scmi_registered_events_desc *pd)
+{
+	unsigned int outs;
+	struct scmi_registered_event *r_evt;
+
+	outs = kfifo_out(&eq->kfifo, pd->eh,
+			 sizeof(struct scmi_event_header));
+	if (!outs)
+		return NULL;
+	if (outs != sizeof(struct scmi_event_header)) {
+		dev_err(pd->ni->handle->dev, "corrupted EVT header. Flush.\n");
+		kfifo_reset_out(&eq->kfifo);
+		return NULL;
+	}
+
+	r_evt = SCMI_GET_REVT_FROM_PD(pd, pd->eh->evt_id);
+	if (!r_evt)
+		r_evt = ERR_PTR(-EINVAL);
+
+	return r_evt;
+}
+
+/**
+ * scmi_process_event_payload()  - Dequeue and process an event payload
+ * @eq: The queue to use
+ * @pd: The protocol descriptor to use
+ * @r_evt: The registered event descriptor to use
+ *
+ * Read an event payload from the protocol queue into the dedicated scratch
+ * buffer, fills a custom report and then look for matching event handlers and
+ * call them; skip any unknown event (as marked by scmi_process_event_header())
+ * and in case an anomalously sized read is detected just flush the queue.
+ *
+ * Return: False when the queue is empty
+ */
+static inline bool
+scmi_process_event_payload(struct events_queue *eq,
+			   struct scmi_registered_events_desc *pd,
+			   struct scmi_registered_event *r_evt)
+{
+	u32 src_id, key;
+	unsigned int outs;
+	void *report = NULL;
+
+	outs = kfifo_out(&eq->kfifo, pd->eh->payld, pd->eh->payld_sz);
+	if (!outs)
+		return false;
+
+	/* Any in-flight event has now been officially processed */
+	pd->in_flight = NULL;
+
+	if (outs != pd->eh->payld_sz) {
+		dev_err(pd->ni->handle->dev, "corrupted EVT Payload. Flush.\n");
+		kfifo_reset_out(&eq->kfifo);
+		return false;
+	}
+
+	if (IS_ERR(r_evt)) {
+		dev_warn(pd->ni->handle->dev,
+			 "SKIP UNKNOWN EVT - proto:%X  evt:%d\n",
+			 pd->id, pd->eh->evt_id);
+		return true;
+	}
+
+	report = REVT_FILL_REPORT(r_evt, pd->eh->evt_id, pd->eh->timestamp,
+				  pd->eh->payld, pd->eh->payld_sz,
+				  r_evt->report, &src_id);
+	if (!report) {
+		dev_err(pd->ni->handle->dev,
+			"report not available - proto:%X  evt:%d\n",
+			pd->id, pd->eh->evt_id);
+		return true;
+	}
+
+	/* At first search for a generic ALL src_ids handler... */
+	key = MAKE_ALL_SRCS_KEY(pd->id, pd->eh->evt_id);
+	scmi_lookup_and_call_event_chain(pd->ni, key, report);
+
+	/* ...then search for any specific src_id */
+	key = MAKE_HASH_KEY(pd->id, pd->eh->evt_id, src_id);
+	scmi_lookup_and_call_event_chain(pd->ni, key, report);
+
+	return true;
+}
+
+/**
+ * scmi_events_dispatcher()  - Common worker logic for all work items.
+ * @work: The work item to use, which is associated to a dedicated events_queue
+ *
+ * Logic:
+ *  1. dequeue one pending RX notification (queued in SCMI RX ISR context)
+ *  2. generate a custom event report from the received event message
+ *  3. lookup for any registered ALL_SRC_IDs handler:
+ *    - > call the related notification chain passing in the report
+ *  4. lookup for any registered specific SRC_ID handler:
+ *    - > call the related notification chain passing in the report
+ *
+ * Note that:
+ * * a dedicated per-protocol kfifo queue is used: in this way an anomalous
+ *   flood of events cannot saturate other protocols' queues.
+ * * each per-protocol queue is associated to a distinct work_item, which
+ *   means, in turn, that:
+ *   + all protocols can process their dedicated queues concurrently
+ *     (since notify_wq:max_active != 1)
+ *   + anyway at most one worker instance is allowed to run on the same queue
+ *     concurrently: this ensures that we can have only one concurrent
+ *     reader/writer on the associated kfifo, so that we can use it lock-less
+ *
+ * Context: Process context.
+ */
+static void scmi_events_dispatcher(struct work_struct *work)
+{
+	struct events_queue *eq;
+	struct scmi_registered_events_desc *pd;
+	struct scmi_registered_event *r_evt;
+
+	eq = container_of(work, struct events_queue, notify_work);
+	pd = container_of(eq, struct scmi_registered_events_desc, equeue);
+	/*
+	 * In order to keep the queue lock-less and the number of memcopies
+	 * to the bare minimum needed, the dispatcher accounts for the
+	 * possibility of per-protocol in-flight events: i.e. an event whose
+	 * reception could end up being split across two subsequent runs of this
+	 * worker, first the header, then the payload.
+	 */
+	do {
+		if (!pd->in_flight) {
+			r_evt = scmi_process_event_header(eq, pd);
+			if (!r_evt)
+				break;
+			pd->in_flight = r_evt;
+		} else {
+			r_evt = pd->in_flight;
+		}
+	} while (scmi_process_event_payload(eq, pd, r_evt));
+}
+
+/**
+ * scmi_notify()  - Queues a notification for further deferred processing
+ * @handle: The handle identifying the platform instance from which the
+ *	    dispatched event is generated
+ * @proto_id: Protocol ID
+ * @evt_id: Event ID (msgID)
+ * @buf: Event Message Payload (without the header)
+ * @len: Event Message Payload size
+ * @ts: RX Timestamp in nanoseconds (boottime)
+ *
+ * Context: Called in interrupt context to queue a received event for
+ * deferred processing.
+ *
+ * Return: 0 on Success
+ */
+int scmi_notify(const struct scmi_handle *handle, u8 proto_id, u8 evt_id,
+		const void *buf, size_t len, ktime_t ts)
+{
+	struct scmi_registered_event *r_evt;
+	struct scmi_event_header eh;
+	struct scmi_notify_instance *ni;
+
+	/* Ensure notify_priv is updated */
+	smp_rmb();
+	if (!handle->notify_priv)
+		return 0;
+	ni = handle->notify_priv;
+
+	r_evt = SCMI_GET_REVT(ni, proto_id, evt_id);
+	if (!r_evt)
+		return -EINVAL;
+
+	if (len > r_evt->evt->max_payld_sz) {
+		dev_err(handle->dev, "discard badly sized message\n");
+		return -EINVAL;
+	}
+	if (kfifo_avail(&r_evt->proto->equeue.kfifo) < sizeof(eh) + len) {
+		dev_warn(handle->dev,
+			 "queue full, dropping proto_id:%d  evt_id:%d  ts:%lld\n",
+			 proto_id, evt_id, ktime_to_ns(ts));
+		return -ENOMEM;
+	}
+
+	eh.timestamp = ts;
+	eh.evt_id = evt_id;
+	eh.payld_sz = len;
+	/*
+	 * Header and payload are enqueued with two distinct kfifo_in() (so non
+	 * atomic), but this situation is handled properly on the consumer side
+	 * with in-flight events tracking.
+	 */
+	kfifo_in(&r_evt->proto->equeue.kfifo, &eh, sizeof(eh));
+	kfifo_in(&r_evt->proto->equeue.kfifo, buf, len);
+	/*
+	 * Don't care about return value here since we just want to ensure that
+	 * a work is queued all the times whenever some items have been pushed
+	 * on the kfifo:
+	 * - if work was already queued it will simply fail to queue a new one
+	 *   since it is not needed
+	 * - if work was not queued already it will be now, even in case work
+	 *   was in fact already running: this behavior avoids any possible race
+	 *   when this function pushes new items onto the kfifos after the
+	 *   related executing worker had already determined the kfifo to be
+	 *   empty and it was terminating.
+	 */
+	queue_work(r_evt->proto->equeue.wq,
+		   &r_evt->proto->equeue.notify_work);
+
+	return 0;
+}
+
+/**
+ * scmi_kfifo_free()  - Devres action helper to free the kfifo
+ * @kfifo: The kfifo to free
+ */
+static void scmi_kfifo_free(void *kfifo)
+{
+	kfifo_free((struct kfifo *)kfifo);
+}
+
+/**
+ * scmi_initialize_events_queue()  - Allocate/Initialize a kfifo buffer
+ * @ni: A reference to the notification instance to use
+ * @equeue: The events_queue to initialize
+ * @sz: Size of the kfifo buffer to allocate
+ *
+ * Allocate a buffer for the kfifo and initialize it.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_initialize_events_queue(struct scmi_notify_instance *ni,
+					struct events_queue *equeue, size_t sz)
+{
+	int ret;
+
+	if (kfifo_alloc(&equeue->kfifo, sz, GFP_KERNEL))
+		return -ENOMEM;
+	/* Size could have been roundup to power-of-two */
+	equeue->sz = kfifo_size(&equeue->kfifo);
+
+	ret = devm_add_action_or_reset(ni->handle->dev, scmi_kfifo_free,
+				       &equeue->kfifo);
+	if (ret)
+		return ret;
+
+	INIT_WORK(&equeue->notify_work, scmi_events_dispatcher);
+	equeue->wq = ni->notify_wq;
+
+	return ret;
+}
+
+/**
+ * scmi_allocate_registered_events_desc()  - Allocate a registered events'
+ * descriptor
+ * @ni: A reference to the &struct scmi_notify_instance notification instance
+ *	to use
+ * @proto_id: Protocol ID
+ * @queue_sz: Size of the associated queue to allocate
+ * @eh_sz: Size of the event header scratch area to pre-allocate
+ * @num_events: Number of events to support (size of @registered_events)
+ * @ops: Pointer to a struct holding references to protocol specific helpers
+ *	 needed during events handling
+ *
+ * It is supposed to be called only once for each protocol at protocol
+ * initialization time, so it warns if the requested protocol is found already
+ * registered.
+ *
+ * Return: The allocated and registered descriptor on Success
+ */
+static struct scmi_registered_events_desc *
+scmi_allocate_registered_events_desc(struct scmi_notify_instance *ni,
+				     u8 proto_id, size_t queue_sz, size_t eh_sz,
+				     int num_events,
+				     const struct scmi_event_ops *ops)
+{
+	int ret;
+	struct scmi_registered_events_desc *pd;
+
+	/* Ensure protocols are up to date */
+	smp_rmb();
+	if (WARN_ON(ni->registered_protocols[proto_id]))
+		return ERR_PTR(-EINVAL);
+
+	pd = devm_kzalloc(ni->handle->dev, sizeof(*pd), GFP_KERNEL);
+	if (!pd)
+		return ERR_PTR(-ENOMEM);
+	pd->id = proto_id;
+	pd->ops = ops;
+	pd->ni = ni;
+
+	ret = scmi_initialize_events_queue(ni, &pd->equeue, queue_sz);
+	if (ret)
+		return ERR_PTR(ret);
+
+	pd->eh = devm_kzalloc(ni->handle->dev, eh_sz, GFP_KERNEL);
+	if (!pd->eh)
+		return ERR_PTR(-ENOMEM);
+	pd->eh_sz = eh_sz;
+
+	pd->registered_events = devm_kcalloc(ni->handle->dev, num_events,
+					     sizeof(char *), GFP_KERNEL);
+	if (!pd->registered_events)
+		return ERR_PTR(-ENOMEM);
+	pd->num_events = num_events;
+
+	/* Initialize per protocol handlers table */
+	mutex_init(&pd->registered_mtx);
+	hash_init(pd->registered_events_handlers);
+
+	return pd;
+}
+
+/**
+ * scmi_register_protocol_events()  - Register Protocol Events with the core
+ * @handle: The handle identifying the platform instance against which the
+ *	    the protocol's events are registered
+ * @proto_id: Protocol ID
+ * @queue_sz: Size in bytes of the associated queue to be allocated
+ * @ops: Protocol specific event-related operations
+ * @evt: Event descriptor array
+ * @num_events: Number of events in @evt array
+ * @num_sources: Number of possible sources for this protocol on this
+ *		 platform.
+ *
+ * Used by SCMI Protocols initialization code to register with the notification
+ * core the list of supported events and their descriptors: takes care to
+ * pre-allocate and store all needed descriptors, scratch buffers and event
+ * queues.
+ *
+ * Return: 0 on Success
+ */
+int scmi_register_protocol_events(const struct scmi_handle *handle,
+				  u8 proto_id, size_t queue_sz,
+				  const struct scmi_event_ops *ops,
+				  const struct scmi_event *evt, int num_events,
+				  int num_sources)
+{
+	int i;
+	size_t payld_sz = 0;
+	struct scmi_registered_events_desc *pd;
+	struct scmi_notify_instance *ni;
+
+	if (!ops || !evt)
+		return -EINVAL;
+
+	/* Ensure notify_priv is updated */
+	smp_rmb();
+	if (!handle->notify_priv)
+		return -ENOMEM;
+	ni = handle->notify_priv;
+
+	/* Attach to the notification main devres group */
+	if (!devres_open_group(ni->handle->dev, ni->gid, GFP_KERNEL))
+		return -ENOMEM;
+
+	for (i = 0; i < num_events; i++)
+		payld_sz = max_t(size_t, payld_sz, evt[i].max_payld_sz);
+	payld_sz += sizeof(struct scmi_event_header);
+
+	pd = scmi_allocate_registered_events_desc(ni, proto_id, queue_sz,
+						  payld_sz, num_events, ops);
+	if (IS_ERR(pd))
+		goto err;
+
+	for (i = 0; i < num_events; i++, evt++) {
+		struct scmi_registered_event *r_evt;
+
+		r_evt = devm_kzalloc(ni->handle->dev, sizeof(*r_evt),
+				     GFP_KERNEL);
+		if (!r_evt)
+			goto err;
+		r_evt->proto = pd;
+		r_evt->evt = evt;
+
+		r_evt->sources = devm_kcalloc(ni->handle->dev, num_sources,
+					      sizeof(refcount_t), GFP_KERNEL);
+		if (!r_evt->sources)
+			goto err;
+		r_evt->num_sources = num_sources;
+		mutex_init(&r_evt->sources_mtx);
+
+		r_evt->report = devm_kzalloc(ni->handle->dev,
+					     evt->max_report_sz, GFP_KERNEL);
+		if (!r_evt->report)
+			goto err;
+
+		pd->registered_events[i] = r_evt;
+		/* Ensure events are updated */
+		smp_wmb();
+		dev_dbg(handle->dev, "registered event - %lX\n",
+			MAKE_ALL_SRCS_KEY(r_evt->proto->id, r_evt->evt->id));
+	}
+
+	/* Register protocol and events...it will never be removed */
+	ni->registered_protocols[proto_id] = pd;
+	/* Ensure protocols are updated */
+	smp_wmb();
+
+	devres_close_group(ni->handle->dev, ni->gid);
+
+	/*
+	 * Finalize any pending events' handler which could have been waiting
+	 * for this protocol's events registration.
+	 */
+	schedule_work(&ni->init_work);
+
+	return 0;
+
+err:
+	dev_warn(handle->dev, "Proto:%X - Registration Failed !\n", proto_id);
+	/* A failing protocol registration does not trigger full failure */
+	devres_close_group(ni->handle->dev, ni->gid);
+
+	return -ENOMEM;
+}
+
+/**
+ * scmi_allocate_event_handler()  - Allocate Event handler
+ * @ni: A reference to the notification instance to use
+ * @evt_key: 32bit key uniquely bind to the event identified by the tuple
+ *	     (proto_id, evt_id, src_id)
+ *
+ * Allocate an event handler and related notification chain associated with
+ * the provided event handler key.
+ * Note that, at this point, a related registered_event is still to be
+ * associated to this handler descriptor (hndl->r_evt == NULL), so the handler
+ * is initialized as pending.
+ *
+ * Context: Assumes to be called with @pending_mtx already acquired.
+ * Return: the freshly allocated structure on Success
+ */
+static struct scmi_event_handler *
+scmi_allocate_event_handler(struct scmi_notify_instance *ni, u32 evt_key)
+{
+	struct scmi_event_handler *hndl;
+
+	hndl = kzalloc(sizeof(*hndl), GFP_KERNEL);
+	if (!hndl)
+		return NULL;
+	hndl->key = evt_key;
+	BLOCKING_INIT_NOTIFIER_HEAD(&hndl->chain);
+	refcount_set(&hndl->users, 1);
+	/* New handlers are created pending */
+	hash_add(ni->pending_events_handlers, &hndl->hash, hndl->key);
+
+	return hndl;
+}
+
+/**
+ * scmi_free_event_handler()  - Free the provided Event handler
+ * @hndl: The event handler structure to free
+ *
+ * Context: Assumes to be called with proper locking acquired depending
+ *	    on the situation.
+ */
+static void scmi_free_event_handler(struct scmi_event_handler *hndl)
+{
+	hash_del(&hndl->hash);
+	kfree(hndl);
+}
+
+/**
+ * scmi_bind_event_handler()  - Helper to attempt binding an handler to an event
+ * @ni: A reference to the notification instance to use
+ * @hndl: The event handler to bind
+ *
+ * If an associated registered event is found, move the handler from the pending
+ * into the registered table.
+ *
+ * Context: Assumes to be called with @pending_mtx already acquired.
+ *
+ * Return: 0 on Success
+ */
+static inline int scmi_bind_event_handler(struct scmi_notify_instance *ni,
+					  struct scmi_event_handler *hndl)
+{
+	struct scmi_registered_event *r_evt;
+
+	r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(hndl->key),
+			      KEY_XTRACT_EVT_ID(hndl->key));
+	if (!r_evt)
+		return -EINVAL;
+
+	/* Remove from pending and insert into registered */
+	hash_del(&hndl->hash);
+	hndl->r_evt = r_evt;
+	mutex_lock(&r_evt->proto->registered_mtx);
+	hash_add(r_evt->proto->registered_events_handlers,
+		 &hndl->hash, hndl->key);
+	mutex_unlock(&r_evt->proto->registered_mtx);
+
+	return 0;
+}
+
+/**
+ * scmi_valid_pending_handler()  - Helper to check pending status of handlers
+ * @ni: A reference to the notification instance to use
+ * @hndl: The event handler to check
+ *
+ * An handler is considered pending when its r_evt == NULL, because the related
+ * event was still unknown at handler's registration time; anyway, since all
+ * protocols register their supported events once for all at protocols'
+ * initialization time, a pending handler cannot be considered valid anymore if
+ * the underlying event (which it is waiting for), belongs to an already
+ * initialized and registered protocol.
+ *
+ * Return: 0 on Success
+ */
+static inline int scmi_valid_pending_handler(struct scmi_notify_instance *ni,
+					     struct scmi_event_handler *hndl)
+{
+	struct scmi_registered_events_desc *pd;
+
+	if (!IS_HNDL_PENDING(hndl))
+		return -EINVAL;
+
+	pd = SCMI_GET_PROTO(ni, KEY_XTRACT_PROTO_ID(hndl->key));
+	if (pd)
+		return -EINVAL;
+
+	return 0;
+}
+
+/**
+ * scmi_register_event_handler()  - Register whenever possible an Event handler
+ * @ni: A reference to the notification instance to use
+ * @hndl: The event handler to register
+ *
+ * At first try to bind an event handler to its associated event, then check if
+ * it was at least a valid pending handler: if it was not bound nor valid return
+ * false.
+ *
+ * Valid pending incomplete bindings will be periodically retried by a dedicated
+ * worker which is kicked each time a new protocol completes its own
+ * registration phase.
+ *
+ * Context: Assumes to be called with @pending_mtx acquired.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_register_event_handler(struct scmi_notify_instance *ni,
+				       struct scmi_event_handler *hndl)
+{
+	int ret;
+
+	ret = scmi_bind_event_handler(ni, hndl);
+	if (!ret) {
+		dev_dbg(ni->handle->dev, "registered NEW handler - key:%X\n",
+			hndl->key);
+	} else {
+		ret = scmi_valid_pending_handler(ni, hndl);
+		if (!ret)
+			dev_dbg(ni->handle->dev,
+				"registered PENDING handler - key:%X\n",
+				hndl->key);
+	}
+
+	return ret;
+}
+
+/**
+ * __scmi_event_handler_get_ops()  - Utility to get or create an event handler
+ * @ni: A reference to the notification instance to use
+ * @evt_key: The event key to use
+ * @create: A boolean flag to specify if a handler must be created when
+ *	    not already existent
+ *
+ * Search for the desired handler matching the key in both the per-protocol
+ * registered table and the common pending table:
+ * * if found adjust users refcount
+ * * if not found and @create is true, create and register the new handler:
+ *   handler could end up being registered as pending if no matching event
+ *   could be found.
+ *
+ * An handler is guaranteed to reside in one and only one of the tables at
+ * any one time; to ensure this the whole search and create is performed
+ * holding the @pending_mtx lock, with @registered_mtx additionally acquired
+ * if needed.
+ *
+ * Note that when a nested acquisition of these mutexes is needed the locking
+ * order is always (same as in @init_work):
+ * 1. pending_mtx
+ * 2. registered_mtx
+ *
+ * Events generation is NOT enabled right after creation within this routine
+ * since at creation time we usually want to have all setup and ready before
+ * events really start flowing.
+ *
+ * Return: A properly refcounted handler on Success, NULL on Failure
+ */
+static inline struct scmi_event_handler *
+__scmi_event_handler_get_ops(struct scmi_notify_instance *ni,
+			     u32 evt_key, bool create)
+{
+	struct scmi_registered_event *r_evt;
+	struct scmi_event_handler *hndl = NULL;
+
+	r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(evt_key),
+			      KEY_XTRACT_EVT_ID(evt_key));
+
+	mutex_lock(&ni->pending_mtx);
+	/* Search registered events at first ... if possible at all */
+	if (r_evt) {
+		mutex_lock(&r_evt->proto->registered_mtx);
+		hndl = KEY_FIND(r_evt->proto->registered_events_handlers,
+				hndl, evt_key);
+		if (hndl)
+			refcount_inc(&hndl->users);
+		mutex_unlock(&r_evt->proto->registered_mtx);
+	}
+
+	/* ...then amongst pending. */
+	if (!hndl) {
+		hndl = KEY_FIND(ni->pending_events_handlers, hndl, evt_key);
+		if (hndl)
+			refcount_inc(&hndl->users);
+	}
+
+	/* Create if still not found and required */
+	if (!hndl && create) {
+		hndl = scmi_allocate_event_handler(ni, evt_key);
+		if (hndl && scmi_register_event_handler(ni, hndl)) {
+			dev_dbg(ni->handle->dev,
+				"purging UNKNOWN handler - key:%X\n",
+				hndl->key);
+			/* this hndl can be only a pending one */
+			scmi_put_handler_unlocked(ni, hndl);
+			hndl = NULL;
+		}
+	}
+	mutex_unlock(&ni->pending_mtx);
+
+	return hndl;
+}
+
+static struct scmi_event_handler *
+scmi_get_handler(struct scmi_notify_instance *ni, u32 evt_key)
+{
+	return __scmi_event_handler_get_ops(ni, evt_key, false);
+}
+
+static struct scmi_event_handler *
+scmi_get_or_create_handler(struct scmi_notify_instance *ni, u32 evt_key)
+{
+	return __scmi_event_handler_get_ops(ni, evt_key, true);
+}
+
+/**
+ * scmi_get_active_handler()  - Helper to get active handlers only
+ * @ni: A reference to the notification instance to use
+ * @evt_key: The event key to use
+ *
+ * Search for the desired handler matching the key only in the per-protocol
+ * table of registered handlers: this is called only from the dispatching path
+ * so want to be as quick as possible and do not care about pending.
+ *
+ * Return: A properly refcounted active handler
+ */
+static struct scmi_event_handler *
+scmi_get_active_handler(struct scmi_notify_instance *ni, u32 evt_key)
+{
+	struct scmi_registered_event *r_evt;
+	struct scmi_event_handler *hndl = NULL;
+
+	r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(evt_key),
+			      KEY_XTRACT_EVT_ID(evt_key));
+	if (r_evt) {
+		mutex_lock(&r_evt->proto->registered_mtx);
+		hndl = KEY_FIND(r_evt->proto->registered_events_handlers,
+				hndl, evt_key);
+		if (hndl)
+			refcount_inc(&hndl->users);
+		mutex_unlock(&r_evt->proto->registered_mtx);
+	}
+
+	return hndl;
+}
+
+/**
+ * __scmi_enable_evt()  - Enable/disable events generation
+ * @r_evt: The registered event to act upon
+ * @src_id: The src_id to act upon
+ * @enable: The action to perform: true->Enable, false->Disable
+ *
+ * Takes care of proper refcounting while performing enable/disable: handles
+ * the special case of ALL sources requests by itself.
+ * Returns successfully if at least one of the required src_id has been
+ * successfully enabled/disabled.
+ *
+ * Return: 0 on Success
+ */
+static inline int __scmi_enable_evt(struct scmi_registered_event *r_evt,
+				    u32 src_id, bool enable)
+{
+	int retvals = 0;
+	u32 num_sources;
+	refcount_t *sid;
+
+	if (src_id == SRC_ID_MASK) {
+		src_id = 0;
+		num_sources = r_evt->num_sources;
+	} else if (src_id < r_evt->num_sources) {
+		num_sources = 1;
+	} else {
+		return -EINVAL;
+	}
+
+	mutex_lock(&r_evt->sources_mtx);
+	if (enable) {
+		for (; num_sources; src_id++, num_sources--) {
+			int ret = 0;
+
+			sid = &r_evt->sources[src_id];
+			if (refcount_read(sid) == 0) {
+				ret = REVT_NOTIFY_ENABLE(r_evt, r_evt->evt->id,
+							 src_id);
+				if (!ret)
+					refcount_set(sid, 1);
+			} else {
+				refcount_inc(sid);
+			}
+			retvals += !ret;
+		}
+	} else {
+		for (; num_sources; src_id++, num_sources--) {
+			sid = &r_evt->sources[src_id];
+			if (refcount_dec_and_test(sid))
+				REVT_NOTIFY_DISABLE(r_evt,
+						    r_evt->evt->id, src_id);
+		}
+		retvals = 1;
+	}
+	mutex_unlock(&r_evt->sources_mtx);
+
+	return retvals ? 0 : -EINVAL;
+}
+
+static int scmi_enable_events(struct scmi_event_handler *hndl)
+{
+	int ret = 0;
+
+	if (!hndl->enabled) {
+		ret = __scmi_enable_evt(hndl->r_evt,
+					KEY_XTRACT_SRC_ID(hndl->key), true);
+		if (!ret)
+			hndl->enabled = true;
+	}
+
+	return ret;
+}
+
+static int scmi_disable_events(struct scmi_event_handler *hndl)
+{
+	int ret = 0;
+
+	if (hndl->enabled) {
+		ret = __scmi_enable_evt(hndl->r_evt,
+					KEY_XTRACT_SRC_ID(hndl->key), false);
+		if (!ret)
+			hndl->enabled = false;
+	}
+
+	return ret;
+}
+
+/**
+ * scmi_put_handler_unlocked()  - Put an event handler
+ * @ni: A reference to the notification instance to use
+ * @hndl: The event handler to act upon
+ *
+ * After having got exclusive access to the registered handlers hashtable,
+ * update the refcount and if @hndl is no more in use by anyone:
+ * * ask for events' generation disabling
+ * * unregister and free the handler itself
+ *
+ * Context: Assumes all the proper locking has been managed by the caller.
+ */
+static void scmi_put_handler_unlocked(struct scmi_notify_instance *ni,
+				      struct scmi_event_handler *hndl)
+{
+	if (refcount_dec_and_test(&hndl->users)) {
+		if (!IS_HNDL_PENDING(hndl))
+			scmi_disable_events(hndl);
+		scmi_free_event_handler(hndl);
+	}
+}
+
+static void scmi_put_handler(struct scmi_notify_instance *ni,
+			     struct scmi_event_handler *hndl)
+{
+	struct scmi_registered_event *r_evt = hndl->r_evt;
+
+	mutex_lock(&ni->pending_mtx);
+	if (r_evt)
+		mutex_lock(&r_evt->proto->registered_mtx);
+
+	scmi_put_handler_unlocked(ni, hndl);
+
+	if (r_evt)
+		mutex_unlock(&r_evt->proto->registered_mtx);
+	mutex_unlock(&ni->pending_mtx);
+}
+
+static void scmi_put_active_handler(struct scmi_notify_instance *ni,
+				    struct scmi_event_handler *hndl)
+{
+	struct scmi_registered_event *r_evt = hndl->r_evt;
+
+	mutex_lock(&r_evt->proto->registered_mtx);
+	scmi_put_handler_unlocked(ni, hndl);
+	mutex_unlock(&r_evt->proto->registered_mtx);
+}
+
+/**
+ * scmi_event_handler_enable_events()  - Enable events associated to an handler
+ * @hndl: The Event handler to act upon
+ *
+ * Return: 0 on Success
+ */
+static int scmi_event_handler_enable_events(struct scmi_event_handler *hndl)
+{
+	if (scmi_enable_events(hndl)) {
+		pr_err("Failed to ENABLE events for key:%X !\n", hndl->key);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * scmi_register_notifier()  - Register a notifier_block for an event
+ * @handle: The handle identifying the platform instance against which the
+ *	    callback is registered
+ * @proto_id: Protocol ID
+ * @evt_id: Event ID
+ * @src_id: Source ID, when NULL register for events coming form ALL possible
+ *	    sources
+ * @nb: A standard notifier block to register for the specified event
+ *
+ * Generic helper to register a notifier_block against a protocol event.
+ *
+ * A notifier_block @nb will be registered for each distinct event identified
+ * by the tuple (proto_id, evt_id, src_id) on a dedicated notification chain
+ * so that:
+ *
+ *	(proto_X, evt_Y, src_Z) --> chain_X_Y_Z
+ *
+ * @src_id meaning is protocol specific and identifies the origin of the event
+ * (like domain_id, sensor_id and so forth).
+ *
+ * @src_id can be NULL to signify that the caller is interested in receiving
+ * notifications from ALL the available sources for that protocol OR simply that
+ * the protocol does not support distinct sources.
+ *
+ * As soon as one user for the specified tuple appears, an handler is created,
+ * and that specific event's generation is enabled at the platform level, unless
+ * an associated registered event is found missing, meaning that the needed
+ * protocol is still to be initialized and the handler has just been registered
+ * as still pending.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_register_notifier(const struct scmi_handle *handle,
+				  u8 proto_id, u8 evt_id, u32 *src_id,
+				  struct notifier_block *nb)
+{
+	int ret = 0;
+	u32 evt_key;
+	struct scmi_event_handler *hndl;
+	struct scmi_notify_instance *ni;
+
+	/* Ensure notify_priv is updated */
+	smp_rmb();
+	if (!handle->notify_priv)
+		return -ENODEV;
+	ni = handle->notify_priv;
+
+	evt_key = MAKE_HASH_KEY(proto_id, evt_id,
+				src_id ? *src_id : SRC_ID_MASK);
+	hndl = scmi_get_or_create_handler(ni, evt_key);
+	if (!hndl)
+		return -EINVAL;
+
+	blocking_notifier_chain_register(&hndl->chain, nb);
+
+	/* Enable events for not pending handlers */
+	if (!IS_HNDL_PENDING(hndl)) {
+		ret = scmi_event_handler_enable_events(hndl);
+		if (ret)
+			scmi_put_handler(ni, hndl);
+	}
+
+	return ret;
+}
+
+/**
+ * scmi_unregister_notifier()  - Unregister a notifier_block for an event
+ * @handle: The handle identifying the platform instance against which the
+ *	    callback is unregistered
+ * @proto_id: Protocol ID
+ * @evt_id: Event ID
+ * @src_id: Source ID
+ * @nb: The notifier_block to unregister
+ *
+ * Takes care to unregister the provided @nb from the notification chain
+ * associated to the specified event and, if there are no more users for the
+ * event handler, frees also the associated event handler structures.
+ * (this could possibly cause disabling of event's generation at platform level)
+ *
+ * Return: 0 on Success
+ */
+static int scmi_unregister_notifier(const struct scmi_handle *handle,
+				    u8 proto_id, u8 evt_id, u32 *src_id,
+				    struct notifier_block *nb)
+{
+	u32 evt_key;
+	struct scmi_event_handler *hndl;
+	struct scmi_notify_instance *ni;
+
+	/* Ensure notify_priv is updated */
+	smp_rmb();
+	if (!handle->notify_priv)
+		return -ENODEV;
+	ni = handle->notify_priv;
+
+	evt_key = MAKE_HASH_KEY(proto_id, evt_id,
+				src_id ? *src_id : SRC_ID_MASK);
+	hndl = scmi_get_handler(ni, evt_key);
+	if (!hndl)
+		return -EINVAL;
+
+	/*
+	 * Note that this chain unregistration call is safe on its own
+	 * being internally protected by an rwsem.
+	 */
+	blocking_notifier_chain_unregister(&hndl->chain, nb);
+	scmi_put_handler(ni, hndl);
+
+	/*
+	 * This balances the initial get issued in @scmi_register_notifier.
+	 * If this notifier_block happened to be the last known user callback
+	 * for this event, the handler is here freed and the event's generation
+	 * stopped.
+	 *
+	 * Note that, an ongoing concurrent lookup on the delivery workqueue
+	 * path could still hold the refcount to 1 even after this routine
+	 * completes: in such a case it will be the final put on the delivery
+	 * path which will finally free this unused handler.
+	 */
+	scmi_put_handler(ni, hndl);
+
+	return 0;
+}
+
+/**
+ * scmi_protocols_late_init()  - Worker for late initialization
+ * @work: The work item to use associated to the proper SCMI instance
+ *
+ * This kicks in whenever a new protocol has completed its own registration via
+ * scmi_register_protocol_events(): it is in charge of scanning the table of
+ * pending handlers (registered by users while the related protocol was still
+ * not initialized) and finalizing their initialization whenever possible;
+ * invalid pending handlers are purged at this point in time.
+ */
+static void scmi_protocols_late_init(struct work_struct *work)
+{
+	int bkt;
+	struct scmi_event_handler *hndl;
+	struct scmi_notify_instance *ni;
+	struct hlist_node *tmp;
+
+	ni = container_of(work, struct scmi_notify_instance, init_work);
+
+	/* Ensure protocols and events are up to date */
+	smp_rmb();
+
+	mutex_lock(&ni->pending_mtx);
+	hash_for_each_safe(ni->pending_events_handlers, bkt, tmp, hndl, hash) {
+		int ret;
+
+		ret = scmi_bind_event_handler(ni, hndl);
+		if (!ret) {
+			dev_dbg(ni->handle->dev,
+				"finalized PENDING handler - key:%X\n",
+				hndl->key);
+			ret = scmi_event_handler_enable_events(hndl);
+			if (ret) {
+				dev_dbg(ni->handle->dev,
+					"purging INVALID handler - key:%X\n",
+					hndl->key);
+				scmi_put_active_handler(ni, hndl);
+			}
+		} else {
+			ret = scmi_valid_pending_handler(ni, hndl);
+			if (ret) {
+				dev_dbg(ni->handle->dev,
+					"purging PENDING handler - key:%X\n",
+					hndl->key);
+				/* this hndl can be only a pending one */
+				scmi_put_handler_unlocked(ni, hndl);
+			}
+		}
+	}
+	mutex_unlock(&ni->pending_mtx);
+}
+
+/*
+ * notify_ops are attached to the handle so that can be accessed
+ * directly from an scmi_driver to register its own notifiers.
+ */
+static const struct scmi_notify_ops notify_ops = {
+	.register_event_notifier = scmi_register_notifier,
+	.unregister_event_notifier = scmi_unregister_notifier,
+};
+
+/**
+ * scmi_notification_init()  - Initializes Notification Core Support
+ * @handle: The handle identifying the platform instance to initialize
+ *
+ * This function lays out all the basic resources needed by the notification
+ * core instance identified by the provided handle: once done, all of the
+ * SCMI Protocols can register their events with the core during their own
+ * initializations.
+ *
+ * Note that failing to initialize the core notifications support does not
+ * cause the whole SCMI Protocols stack to fail its initialization.
+ *
+ * SCMI Notification Initialization happens in 2 steps:
+ * * initialization: basic common allocations (this function)
+ * * registration: protocols asynchronously come into life and registers their
+ *		   own supported list of events with the core; this causes
+ *		   further per-protocol allocations
+ *
+ * Any user's callback registration attempt, referring a still not registered
+ * event, will be registered as pending and finalized later (if possible)
+ * by scmi_protocols_late_init() work.
+ * This allows for lazy initialization of SCMI Protocols due to late (or
+ * missing) SCMI drivers' modules loading.
+ *
+ * Return: 0 on Success
+ */
+int scmi_notification_init(struct scmi_handle *handle)
+{
+	void *gid;
+	struct scmi_notify_instance *ni;
+
+	gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
+	if (!gid)
+		return -ENOMEM;
+
+	ni = devm_kzalloc(handle->dev, sizeof(*ni), GFP_KERNEL);
+	if (!ni)
+		goto err;
+
+	ni->gid = gid;
+	ni->handle = handle;
+
+	ni->registered_protocols = devm_kcalloc(handle->dev, SCMI_MAX_PROTO,
+						sizeof(char *), GFP_KERNEL);
+	if (!ni->registered_protocols)
+		goto err;
+
+	ni->notify_wq = alloc_workqueue(dev_name(handle->dev),
+					WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS,
+					0);
+	if (!ni->notify_wq)
+		goto err;
+
+	mutex_init(&ni->pending_mtx);
+	hash_init(ni->pending_events_handlers);
+
+	INIT_WORK(&ni->init_work, scmi_protocols_late_init);
+
+	handle->notify_ops = &notify_ops;
+	handle->notify_priv = ni;
+	/* Ensure handle is up to date */
+	smp_wmb();
+
+	dev_info(handle->dev, "Core Enabled.\n");
+
+	devres_close_group(handle->dev, ni->gid);
+
+	return 0;
+
+err:
+	dev_warn(handle->dev, "Initialization Failed.\n");
+	devres_release_group(handle->dev, NULL);
+	return -ENOMEM;
+}
+
+/**
+ * scmi_notification_exit()  - Shutdown and clean Notification core
+ * @handle: The handle identifying the platform instance to shutdown
+ */
+void scmi_notification_exit(struct scmi_handle *handle)
+{
+	struct scmi_notify_instance *ni;
+
+	/* Ensure notify_priv is updated */
+	smp_rmb();
+	if (!handle->notify_priv)
+		return;
+	ni = handle->notify_priv;
+
+	handle->notify_priv = NULL;
+	/* Ensure handle is up to date */
+	smp_wmb();
+
+	/* Destroy while letting pending work complete */
+	destroy_workqueue(ni->notify_wq);
+
+	devres_release_group(ni->handle->dev, ni->gid);
+}
diff --git a/drivers/firmware/arm_scmi/notify.h b/drivers/firmware/arm_scmi/notify.h
new file mode 100644
index 0000000..3485f20
--- /dev/null
+++ b/drivers/firmware/arm_scmi/notify.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * System Control and Management Interface (SCMI) Message Protocol
+ * notification header file containing some definitions, structures
+ * and function prototypes related to SCMI Notification handling.
+ *
+ * Copyright (C) 2020 ARM Ltd.
+ */
+#ifndef _SCMI_NOTIFY_H
+#define _SCMI_NOTIFY_H
+
+#include <linux/device.h>
+#include <linux/ktime.h>
+#include <linux/types.h>
+
+#define SCMI_PROTO_QUEUE_SZ	4096
+
+/**
+ * struct scmi_event  - Describes an event to be supported
+ * @id: Event ID
+ * @max_payld_sz: Max possible size for the payload of a notification message
+ * @max_report_sz: Max possible size for the report of a notification message
+ *
+ * Each SCMI protocol, during its initialization phase, can describe the events
+ * it wishes to support in a few struct scmi_event and pass them to the core
+ * using scmi_register_protocol_events().
+ */
+struct scmi_event {
+	u8	id;
+	size_t	max_payld_sz;
+	size_t	max_report_sz;
+};
+
+/**
+ * struct scmi_event_ops  - Protocol helpers called by the notification core.
+ * @set_notify_enabled: Enable/disable the required evt_id/src_id notifications
+ *			using the proper custom protocol commands.
+ *			Return 0 on Success
+ * @fill_custom_report: fills a custom event report from the provided
+ *			event message payld identifying the event
+ *			specific src_id.
+ *			Return NULL on failure otherwise @report now fully
+ *			populated
+ *
+ * Context: Helpers described in &struct scmi_event_ops are called only in
+ *	    process context.
+ */
+struct scmi_event_ops {
+	int (*set_notify_enabled)(const struct scmi_handle *handle,
+				  u8 evt_id, u32 src_id, bool enabled);
+	void *(*fill_custom_report)(const struct scmi_handle *handle,
+				    u8 evt_id, ktime_t timestamp,
+				    const void *payld, size_t payld_sz,
+				    void *report, u32 *src_id);
+};
+
+int scmi_notification_init(struct scmi_handle *handle);
+void scmi_notification_exit(struct scmi_handle *handle);
+
+int scmi_register_protocol_events(const struct scmi_handle *handle,
+				  u8 proto_id, size_t queue_sz,
+				  const struct scmi_event_ops *ops,
+				  const struct scmi_event *evt, int num_events,
+				  int num_sources);
+int scmi_notify(const struct scmi_handle *handle, u8 proto_id, u8 evt_id,
+		const void *buf, size_t len, ktime_t ts);
+
+#endif /* _SCMI_NOTIFY_H */
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index 129a288..82fb3ba 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -5,15 +5,19 @@
  * Copyright (C) 2018 ARM Ltd.
  */
 
+#define pr_fmt(fmt) "SCMI Notifications PERF - " fmt
+
 #include <linux/bits.h>
 #include <linux/of.h>
 #include <linux/io.h>
 #include <linux/io-64-nonatomic-hi-lo.h>
 #include <linux/platform_device.h>
 #include <linux/pm_opp.h>
+#include <linux/scmi_protocol.h>
 #include <linux/sort.h>
 
 #include "common.h"
+#include "notify.h"
 
 enum scmi_performance_protocol_cmd {
 	PERF_DOMAIN_ATTRIBUTES = 0x3,
@@ -81,6 +85,19 @@
 	__le32 notify_enable;
 };
 
+struct scmi_perf_limits_notify_payld {
+	__le32 agent_id;
+	__le32 domain_id;
+	__le32 range_max;
+	__le32 range_min;
+};
+
+struct scmi_perf_level_notify_payld {
+	__le32 agent_id;
+	__le32 domain_id;
+	__le32 performance_level;
+};
+
 struct scmi_msg_resp_perf_describe_levels {
 	__le16 num_returned;
 	__le16 num_remaining;
@@ -89,7 +106,7 @@
 		__le32 power;
 		__le16 transition_latency_us;
 		__le16 reserved;
-	} opp[0];
+	} opp[];
 };
 
 struct scmi_perf_get_fc_info {
@@ -145,6 +162,7 @@
 };
 
 struct scmi_perf_info {
+	u32 version;
 	int num_domains;
 	bool power_scale_mw;
 	u64 stats_addr;
@@ -152,6 +170,11 @@
 	struct perf_dom_info *dom_info;
 };
 
+static enum scmi_performance_protocol_cmd evt_2_cmd[] = {
+	PERF_NOTIFY_LIMITS,
+	PERF_NOTIFY_LEVEL,
+};
+
 static int scmi_perf_attributes_get(const struct scmi_handle *handle,
 				    struct scmi_perf_info *pi)
 {
@@ -484,6 +507,29 @@
 	return scmi_perf_mb_level_get(handle, domain, level, poll);
 }
 
+static int scmi_perf_level_limits_notify(const struct scmi_handle *handle,
+					 u32 domain, int message_id,
+					 bool enable)
+{
+	int ret;
+	struct scmi_xfer *t;
+	struct scmi_perf_notify_level_or_limits *notify;
+
+	ret = scmi_xfer_get_init(handle, message_id, SCMI_PROTOCOL_PERF,
+				 sizeof(*notify), 0, &t);
+	if (ret)
+		return ret;
+
+	notify = t->tx.buf;
+	notify->domain = cpu_to_le32(domain);
+	notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
+
+	ret = scmi_do_xfer(handle, t);
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
 static bool scmi_perf_fc_size_is_valid(u32 msg, u32 size)
 {
 	if ((msg == PERF_LEVEL_GET || msg == PERF_LEVEL_SET) && size == 4)
@@ -693,7 +739,18 @@
 	return ret;
 }
 
-static struct scmi_perf_ops perf_ops = {
+static bool scmi_fast_switch_possible(const struct scmi_handle *handle,
+				      struct device *dev)
+{
+	struct perf_dom_info *dom;
+	struct scmi_perf_info *pi = handle->perf_priv;
+
+	dom = pi->dom_info + scmi_dev_domain_id(dev);
+
+	return dom->fc_info && dom->fc_info->level_set_addr;
+}
+
+static const struct scmi_perf_ops perf_ops = {
 	.limits_set = scmi_perf_limits_set,
 	.limits_get = scmi_perf_limits_get,
 	.level_set = scmi_perf_level_set,
@@ -704,6 +761,90 @@
 	.freq_set = scmi_dvfs_freq_set,
 	.freq_get = scmi_dvfs_freq_get,
 	.est_power_get = scmi_dvfs_est_power_get,
+	.fast_switch_possible = scmi_fast_switch_possible,
+};
+
+static int scmi_perf_set_notify_enabled(const struct scmi_handle *handle,
+					u8 evt_id, u32 src_id, bool enable)
+{
+	int ret, cmd_id;
+
+	if (evt_id >= ARRAY_SIZE(evt_2_cmd))
+		return -EINVAL;
+
+	cmd_id = evt_2_cmd[evt_id];
+	ret = scmi_perf_level_limits_notify(handle, src_id, cmd_id, enable);
+	if (ret)
+		pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
+			 evt_id, src_id, ret);
+
+	return ret;
+}
+
+static void *scmi_perf_fill_custom_report(const struct scmi_handle *handle,
+					  u8 evt_id, ktime_t timestamp,
+					  const void *payld, size_t payld_sz,
+					  void *report, u32 *src_id)
+{
+	void *rep = NULL;
+
+	switch (evt_id) {
+	case SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED:
+	{
+		const struct scmi_perf_limits_notify_payld *p = payld;
+		struct scmi_perf_limits_report *r = report;
+
+		if (sizeof(*p) != payld_sz)
+			break;
+
+		r->timestamp = timestamp;
+		r->agent_id = le32_to_cpu(p->agent_id);
+		r->domain_id = le32_to_cpu(p->domain_id);
+		r->range_max = le32_to_cpu(p->range_max);
+		r->range_min = le32_to_cpu(p->range_min);
+		*src_id = r->domain_id;
+		rep = r;
+		break;
+	}
+	case SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED:
+	{
+		const struct scmi_perf_level_notify_payld *p = payld;
+		struct scmi_perf_level_report *r = report;
+
+		if (sizeof(*p) != payld_sz)
+			break;
+
+		r->timestamp = timestamp;
+		r->agent_id = le32_to_cpu(p->agent_id);
+		r->domain_id = le32_to_cpu(p->domain_id);
+		r->performance_level = le32_to_cpu(p->performance_level);
+		*src_id = r->domain_id;
+		rep = r;
+		break;
+	}
+	default:
+		break;
+	}
+
+	return rep;
+}
+
+static const struct scmi_event perf_events[] = {
+	{
+		.id = SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED,
+		.max_payld_sz = sizeof(struct scmi_perf_limits_notify_payld),
+		.max_report_sz = sizeof(struct scmi_perf_limits_report),
+	},
+	{
+		.id = SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED,
+		.max_payld_sz = sizeof(struct scmi_perf_level_notify_payld),
+		.max_report_sz = sizeof(struct scmi_perf_level_report),
+	},
+};
+
+static const struct scmi_event_ops perf_event_ops = {
+	.set_notify_enabled = scmi_perf_set_notify_enabled,
+	.fill_custom_report = scmi_perf_fill_custom_report,
 };
 
 static int scmi_perf_protocol_init(struct scmi_handle *handle)
@@ -738,15 +879,17 @@
 			scmi_perf_domain_init_fc(handle, domain, &dom->fc_info);
 	}
 
+	scmi_register_protocol_events(handle,
+				      SCMI_PROTOCOL_PERF, SCMI_PROTO_QUEUE_SZ,
+				      &perf_event_ops, perf_events,
+				      ARRAY_SIZE(perf_events),
+				      pinfo->num_domains);
+
+	pinfo->version = version;
 	handle->perf_ops = &perf_ops;
 	handle->perf_priv = pinfo;
 
 	return 0;
 }
 
-static int __init scmi_perf_init(void)
-{
-	return scmi_protocol_register(SCMI_PROTOCOL_PERF,
-				      &scmi_perf_protocol_init);
-}
-subsys_initcall(scmi_perf_init);
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(SCMI_PROTOCOL_PERF, perf)
diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c
index 5abef70..1f37258 100644
--- a/drivers/firmware/arm_scmi/power.c
+++ b/drivers/firmware/arm_scmi/power.c
@@ -5,7 +5,12 @@
  * Copyright (C) 2018 ARM Ltd.
  */
 
+#define pr_fmt(fmt) "SCMI Notifications POWER - " fmt
+
+#include <linux/scmi_protocol.h>
+
 #include "common.h"
+#include "notify.h"
 
 enum scmi_power_protocol_cmd {
 	POWER_DOMAIN_ATTRIBUTES = 0x3,
@@ -42,6 +47,12 @@
 	__le32 notify_enable;
 };
 
+struct scmi_power_state_notify_payld {
+	__le32 agent_id;
+	__le32 domain_id;
+	__le32 power_state;
+};
+
 struct power_dom_info {
 	bool state_set_sync;
 	bool state_set_async;
@@ -50,6 +61,7 @@
 };
 
 struct scmi_power_info {
+	u32 version;
 	int num_domains;
 	u64 stats_addr;
 	u32 stats_size;
@@ -172,13 +184,82 @@
 	return dom->name;
 }
 
-static struct scmi_power_ops power_ops = {
+static const struct scmi_power_ops power_ops = {
 	.num_domains_get = scmi_power_num_domains_get,
 	.name_get = scmi_power_name_get,
 	.state_set = scmi_power_state_set,
 	.state_get = scmi_power_state_get,
 };
 
+static int scmi_power_request_notify(const struct scmi_handle *handle,
+				     u32 domain, bool enable)
+{
+	int ret;
+	struct scmi_xfer *t;
+	struct scmi_power_state_notify *notify;
+
+	ret = scmi_xfer_get_init(handle, POWER_STATE_NOTIFY,
+				 SCMI_PROTOCOL_POWER, sizeof(*notify), 0, &t);
+	if (ret)
+		return ret;
+
+	notify = t->tx.buf;
+	notify->domain = cpu_to_le32(domain);
+	notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
+
+	ret = scmi_do_xfer(handle, t);
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int scmi_power_set_notify_enabled(const struct scmi_handle *handle,
+					 u8 evt_id, u32 src_id, bool enable)
+{
+	int ret;
+
+	ret = scmi_power_request_notify(handle, src_id, enable);
+	if (ret)
+		pr_debug("FAIL_ENABLE - evt[%X] dom[%d] - ret:%d\n",
+			 evt_id, src_id, ret);
+
+	return ret;
+}
+
+static void *scmi_power_fill_custom_report(const struct scmi_handle *handle,
+					   u8 evt_id, ktime_t timestamp,
+					   const void *payld, size_t payld_sz,
+					   void *report, u32 *src_id)
+{
+	const struct scmi_power_state_notify_payld *p = payld;
+	struct scmi_power_state_changed_report *r = report;
+
+	if (evt_id != SCMI_EVENT_POWER_STATE_CHANGED || sizeof(*p) != payld_sz)
+		return NULL;
+
+	r->timestamp = timestamp;
+	r->agent_id = le32_to_cpu(p->agent_id);
+	r->domain_id = le32_to_cpu(p->domain_id);
+	r->power_state = le32_to_cpu(p->power_state);
+	*src_id = r->domain_id;
+
+	return r;
+}
+
+static const struct scmi_event power_events[] = {
+	{
+		.id = SCMI_EVENT_POWER_STATE_CHANGED,
+		.max_payld_sz = sizeof(struct scmi_power_state_notify_payld),
+		.max_report_sz =
+			sizeof(struct scmi_power_state_changed_report),
+	},
+};
+
+static const struct scmi_event_ops power_event_ops = {
+	.set_notify_enabled = scmi_power_set_notify_enabled,
+	.fill_custom_report = scmi_power_fill_custom_report,
+};
+
 static int scmi_power_protocol_init(struct scmi_handle *handle)
 {
 	int domain;
@@ -207,15 +288,17 @@
 		scmi_power_domain_attributes_get(handle, domain, dom);
 	}
 
+	scmi_register_protocol_events(handle,
+				      SCMI_PROTOCOL_POWER, SCMI_PROTO_QUEUE_SZ,
+				      &power_event_ops, power_events,
+				      ARRAY_SIZE(power_events),
+				      pinfo->num_domains);
+
+	pinfo->version = version;
 	handle->power_ops = &power_ops;
 	handle->power_priv = pinfo;
 
 	return 0;
 }
 
-static int __init scmi_power_init(void)
-{
-	return scmi_protocol_register(SCMI_PROTOCOL_POWER,
-				      &scmi_power_protocol_init);
-}
-subsys_initcall(scmi_power_init);
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(SCMI_PROTOCOL_POWER, power)
diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c
index 6d223f3..a981a22 100644
--- a/drivers/firmware/arm_scmi/reset.c
+++ b/drivers/firmware/arm_scmi/reset.c
@@ -5,7 +5,12 @@
  * Copyright (C) 2019 ARM Ltd.
  */
 
+#define pr_fmt(fmt) "SCMI Notifications RESET - " fmt
+
+#include <linux/scmi_protocol.h>
+
 #include "common.h"
+#include "notify.h"
 
 enum scmi_reset_protocol_cmd {
 	RESET_DOMAIN_ATTRIBUTES = 0x3,
@@ -13,10 +18,6 @@
 	RESET_NOTIFY = 0x5,
 };
 
-enum scmi_reset_protocol_notify {
-	RESET_ISSUED = 0x0,
-};
-
 #define NUM_RESET_DOMAIN_MASK	0xffff
 #define RESET_NOTIFY_ENABLE	BIT(0)
 
@@ -38,6 +39,18 @@
 #define ARCH_COLD_RESET		0
 };
 
+struct scmi_msg_reset_notify {
+	__le32 id;
+	__le32 event_control;
+#define RESET_TP_NOTIFY_ALL	BIT(0)
+};
+
+struct scmi_reset_issued_notify_payld {
+	__le32 agent_id;
+	__le32 domain_id;
+	__le32 reset_state;
+};
+
 struct reset_dom_info {
 	bool async_reset;
 	bool reset_notify;
@@ -46,6 +59,7 @@
 };
 
 struct scmi_reset_info {
+	u32 version;
 	int num_domains;
 	struct reset_dom_info *dom_info;
 };
@@ -178,7 +192,7 @@
 	return scmi_domain_reset(handle, domain, 0, ARCH_COLD_RESET);
 }
 
-static struct scmi_reset_ops reset_ops = {
+static const struct scmi_reset_ops reset_ops = {
 	.num_domains_get = scmi_reset_num_domains_get,
 	.name_get = scmi_reset_name_get,
 	.latency_get = scmi_reset_latency_get,
@@ -187,6 +201,75 @@
 	.deassert = scmi_reset_domain_deassert,
 };
 
+static int scmi_reset_notify(const struct scmi_handle *handle, u32 domain_id,
+			     bool enable)
+{
+	int ret;
+	u32 evt_cntl = enable ? RESET_TP_NOTIFY_ALL : 0;
+	struct scmi_xfer *t;
+	struct scmi_msg_reset_notify *cfg;
+
+	ret = scmi_xfer_get_init(handle, RESET_NOTIFY,
+				 SCMI_PROTOCOL_RESET, sizeof(*cfg), 0, &t);
+	if (ret)
+		return ret;
+
+	cfg = t->tx.buf;
+	cfg->id = cpu_to_le32(domain_id);
+	cfg->event_control = cpu_to_le32(evt_cntl);
+
+	ret = scmi_do_xfer(handle, t);
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int scmi_reset_set_notify_enabled(const struct scmi_handle *handle,
+					 u8 evt_id, u32 src_id, bool enable)
+{
+	int ret;
+
+	ret = scmi_reset_notify(handle, src_id, enable);
+	if (ret)
+		pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
+			 evt_id, src_id, ret);
+
+	return ret;
+}
+
+static void *scmi_reset_fill_custom_report(const struct scmi_handle *handle,
+					   u8 evt_id, ktime_t timestamp,
+					   const void *payld, size_t payld_sz,
+					   void *report, u32 *src_id)
+{
+	const struct scmi_reset_issued_notify_payld *p = payld;
+	struct scmi_reset_issued_report *r = report;
+
+	if (evt_id != SCMI_EVENT_RESET_ISSUED || sizeof(*p) != payld_sz)
+		return NULL;
+
+	r->timestamp = timestamp;
+	r->agent_id = le32_to_cpu(p->agent_id);
+	r->domain_id = le32_to_cpu(p->domain_id);
+	r->reset_state = le32_to_cpu(p->reset_state);
+	*src_id = r->domain_id;
+
+	return r;
+}
+
+static const struct scmi_event reset_events[] = {
+	{
+		.id = SCMI_EVENT_RESET_ISSUED,
+		.max_payld_sz = sizeof(struct scmi_reset_issued_notify_payld),
+		.max_report_sz = sizeof(struct scmi_reset_issued_report),
+	},
+};
+
+static const struct scmi_event_ops reset_event_ops = {
+	.set_notify_enabled = scmi_reset_set_notify_enabled,
+	.fill_custom_report = scmi_reset_fill_custom_report,
+};
+
 static int scmi_reset_protocol_init(struct scmi_handle *handle)
 {
 	int domain;
@@ -215,15 +298,17 @@
 		scmi_reset_domain_attributes_get(handle, domain, dom);
 	}
 
+	scmi_register_protocol_events(handle,
+				      SCMI_PROTOCOL_RESET, SCMI_PROTO_QUEUE_SZ,
+				      &reset_event_ops, reset_events,
+				      ARRAY_SIZE(reset_events),
+				      pinfo->num_domains);
+
+	pinfo->version = version;
 	handle->reset_ops = &reset_ops;
 	handle->reset_priv = pinfo;
 
 	return 0;
 }
 
-static int __init scmi_reset_init(void)
-{
-	return scmi_protocol_register(SCMI_PROTOCOL_RESET,
-				      &scmi_reset_protocol_init);
-}
-subsys_initcall(scmi_reset_init);
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(SCMI_PROTOCOL_RESET, reset)
diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c
index 041f815..a4e4aa9 100644
--- a/drivers/firmware/arm_scmi/scmi_pm_domain.c
+++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c
@@ -106,13 +106,11 @@
 	scmi_pd_data->domains = domains;
 	scmi_pd_data->num_domains = num_domains;
 
-	of_genpd_add_provider_onecell(np, scmi_pd_data);
-
-	return 0;
+	return of_genpd_add_provider_onecell(np, scmi_pd_data);
 }
 
 static const struct scmi_device_id scmi_id_table[] = {
-	{ SCMI_PROTOCOL_POWER },
+	{ SCMI_PROTOCOL_POWER, "genpd" },
 	{ },
 };
 MODULE_DEVICE_TABLE(scmi, scmi_id_table);
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
index 931208b..b4232d6 100644
--- a/drivers/firmware/arm_scmi/sensors.c
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -5,7 +5,12 @@
  * Copyright (C) 2018 ARM Ltd.
  */
 
+#define pr_fmt(fmt) "SCMI Notifications SENSOR - " fmt
+
+#include <linux/scmi_protocol.h>
+
 #include "common.h"
+#include "notify.h"
 
 enum scmi_sensor_protocol_cmd {
 	SENSOR_DESCRIPTION_GET = 0x3,
@@ -67,7 +72,14 @@
 #define SENSOR_READ_ASYNC	BIT(0)
 };
 
+struct scmi_sensor_trip_notify_payld {
+	__le32 agent_id;
+	__le32 sensor_id;
+	__le32 trip_point_desc;
+};
+
 struct sensors_info {
+	u32 version;
 	int num_sensors;
 	int max_requests;
 	u64 reg_addr;
@@ -265,14 +277,60 @@
 	return si->num_sensors;
 }
 
-static struct scmi_sensor_ops sensor_ops = {
+static const struct scmi_sensor_ops sensor_ops = {
 	.count_get = scmi_sensor_count_get,
 	.info_get = scmi_sensor_info_get,
-	.trip_point_notify = scmi_sensor_trip_point_notify,
 	.trip_point_config = scmi_sensor_trip_point_config,
 	.reading_get = scmi_sensor_reading_get,
 };
 
+static int scmi_sensor_set_notify_enabled(const struct scmi_handle *handle,
+					  u8 evt_id, u32 src_id, bool enable)
+{
+	int ret;
+
+	ret = scmi_sensor_trip_point_notify(handle, src_id, enable);
+	if (ret)
+		pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
+			 evt_id, src_id, ret);
+
+	return ret;
+}
+
+static void *scmi_sensor_fill_custom_report(const struct scmi_handle *handle,
+					    u8 evt_id, ktime_t timestamp,
+					    const void *payld, size_t payld_sz,
+					    void *report, u32 *src_id)
+{
+	const struct scmi_sensor_trip_notify_payld *p = payld;
+	struct scmi_sensor_trip_point_report *r = report;
+
+	if (evt_id != SCMI_EVENT_SENSOR_TRIP_POINT_EVENT ||
+	    sizeof(*p) != payld_sz)
+		return NULL;
+
+	r->timestamp = timestamp;
+	r->agent_id = le32_to_cpu(p->agent_id);
+	r->sensor_id = le32_to_cpu(p->sensor_id);
+	r->trip_point_desc = le32_to_cpu(p->trip_point_desc);
+	*src_id = r->sensor_id;
+
+	return r;
+}
+
+static const struct scmi_event sensor_events[] = {
+	{
+		.id = SCMI_EVENT_SENSOR_TRIP_POINT_EVENT,
+		.max_payld_sz = sizeof(struct scmi_sensor_trip_notify_payld),
+		.max_report_sz = sizeof(struct scmi_sensor_trip_point_report),
+	},
+};
+
+static const struct scmi_event_ops sensor_event_ops = {
+	.set_notify_enabled = scmi_sensor_set_notify_enabled,
+	.fill_custom_report = scmi_sensor_fill_custom_report,
+};
+
 static int scmi_sensors_protocol_init(struct scmi_handle *handle)
 {
 	u32 version;
@@ -296,15 +354,17 @@
 
 	scmi_sensor_description_get(handle, sinfo);
 
+	scmi_register_protocol_events(handle,
+				      SCMI_PROTOCOL_SENSOR, SCMI_PROTO_QUEUE_SZ,
+				      &sensor_event_ops, sensor_events,
+				      ARRAY_SIZE(sensor_events),
+				      sinfo->num_sensors);
+
+	sinfo->version = version;
 	handle->sensor_ops = &sensor_ops;
 	handle->sensor_priv = sinfo;
 
 	return 0;
 }
 
-static int __init scmi_sensors_init(void)
-{
-	return scmi_protocol_register(SCMI_PROTOCOL_SENSOR,
-				      &scmi_sensors_protocol_init);
-}
-subsys_initcall(scmi_sensors_init);
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(SCMI_PROTOCOL_SENSOR, sensors)
diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c
new file mode 100644
index 0000000..0e3eaea
--- /dev/null
+++ b/drivers/firmware/arm_scmi/shmem.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * For transport using shared mem structure.
+ *
+ * Copyright (C) 2019 ARM Ltd.
+ */
+
+#include <linux/io.h>
+#include <linux/processor.h>
+#include <linux/types.h>
+
+#include "common.h"
+
+/*
+ * SCMI specification requires all parameters, message headers, return
+ * arguments or any protocol data to be expressed in little endian
+ * format only.
+ */
+struct scmi_shared_mem {
+	__le32 reserved;
+	__le32 channel_status;
+#define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR	BIT(1)
+#define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE	BIT(0)
+	__le32 reserved1[2];
+	__le32 flags;
+#define SCMI_SHMEM_FLAG_INTR_ENABLED	BIT(0)
+	__le32 length;
+	__le32 msg_header;
+	u8 msg_payload[];
+};
+
+void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
+		      struct scmi_xfer *xfer)
+{
+	/*
+	 * Ideally channel must be free by now unless OS timeout last
+	 * request and platform continued to process the same, wait
+	 * until it releases the shared memory, otherwise we may endup
+	 * overwriting its response with new message payload or vice-versa
+	 */
+	spin_until_cond(ioread32(&shmem->channel_status) &
+			SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
+	/* Mark channel busy + clear error */
+	iowrite32(0x0, &shmem->channel_status);
+	iowrite32(xfer->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
+		  &shmem->flags);
+	iowrite32(sizeof(shmem->msg_header) + xfer->tx.len, &shmem->length);
+	iowrite32(pack_scmi_header(&xfer->hdr), &shmem->msg_header);
+	if (xfer->tx.buf)
+		memcpy_toio(shmem->msg_payload, xfer->tx.buf, xfer->tx.len);
+}
+
+u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem)
+{
+	return ioread32(&shmem->msg_header);
+}
+
+void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
+			  struct scmi_xfer *xfer)
+{
+	xfer->hdr.status = ioread32(shmem->msg_payload);
+	/* Skip the length of header and status in shmem area i.e 8 bytes */
+	xfer->rx.len = min_t(size_t, xfer->rx.len,
+			     ioread32(&shmem->length) - 8);
+
+	/* Take a copy to the rx buffer.. */
+	memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len);
+}
+
+void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
+			      size_t max_len, struct scmi_xfer *xfer)
+{
+	/* Skip only the length of header in shmem area i.e 4 bytes */
+	xfer->rx.len = min_t(size_t, max_len, ioread32(&shmem->length) - 4);
+
+	/* Take a copy to the rx buffer.. */
+	memcpy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len);
+}
+
+void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem)
+{
+	iowrite32(SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE, &shmem->channel_status);
+}
+
+bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
+		     struct scmi_xfer *xfer)
+{
+	u16 xfer_id;
+
+	xfer_id = MSG_XTRACT_TOKEN(ioread32(&shmem->msg_header));
+
+	if (xfer->hdr.seq != xfer_id)
+		return false;
+
+	return ioread32(&shmem->channel_status) &
+		(SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
+		 SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
+}
diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c
new file mode 100644
index 0000000..82a82a5
--- /dev/null
+++ b/drivers/firmware/arm_scmi/smc.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Message SMC/HVC
+ * Transport driver
+ *
+ * Copyright 2020 NXP
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+#include "common.h"
+
+/**
+ * struct scmi_smc - Structure representing a SCMI smc transport
+ *
+ * @cinfo: SCMI channel info
+ * @shmem: Transmit/Receive shared memory area
+ * @shmem_lock: Lock to protect access to Tx/Rx shared memory area
+ * @func_id: smc/hvc call function id
+ */
+
+struct scmi_smc {
+	struct scmi_chan_info *cinfo;
+	struct scmi_shared_mem __iomem *shmem;
+	struct mutex shmem_lock;
+	u32 func_id;
+};
+
+static bool smc_chan_available(struct device *dev, int idx)
+{
+	struct device_node *np = of_parse_phandle(dev->of_node, "shmem", 0);
+	if (!np)
+		return false;
+
+	of_node_put(np);
+	return true;
+}
+
+static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
+			  bool tx)
+{
+	struct device *cdev = cinfo->dev;
+	struct scmi_smc *scmi_info;
+	resource_size_t size;
+	struct resource res;
+	struct device_node *np;
+	u32 func_id;
+	int ret;
+
+	if (!tx)
+		return -ENODEV;
+
+	scmi_info = devm_kzalloc(dev, sizeof(*scmi_info), GFP_KERNEL);
+	if (!scmi_info)
+		return -ENOMEM;
+
+	np = of_parse_phandle(cdev->of_node, "shmem", 0);
+	ret = of_address_to_resource(np, 0, &res);
+	of_node_put(np);
+	if (ret) {
+		dev_err(cdev, "failed to get SCMI Tx shared memory\n");
+		return ret;
+	}
+
+	size = resource_size(&res);
+	scmi_info->shmem = devm_ioremap(dev, res.start, size);
+	if (!scmi_info->shmem) {
+		dev_err(dev, "failed to ioremap SCMI Tx shared memory\n");
+		return -EADDRNOTAVAIL;
+	}
+
+	ret = of_property_read_u32(dev->of_node, "arm,smc-id", &func_id);
+	if (ret < 0)
+		return ret;
+
+	scmi_info->func_id = func_id;
+	scmi_info->cinfo = cinfo;
+	mutex_init(&scmi_info->shmem_lock);
+	cinfo->transport_info = scmi_info;
+
+	return 0;
+}
+
+static int smc_chan_free(int id, void *p, void *data)
+{
+	struct scmi_chan_info *cinfo = p;
+	struct scmi_smc *scmi_info = cinfo->transport_info;
+
+	cinfo->transport_info = NULL;
+	scmi_info->cinfo = NULL;
+
+	scmi_free_channel(cinfo, data, id);
+
+	return 0;
+}
+
+static int smc_send_message(struct scmi_chan_info *cinfo,
+			    struct scmi_xfer *xfer)
+{
+	struct scmi_smc *scmi_info = cinfo->transport_info;
+	struct arm_smccc_res res;
+
+	mutex_lock(&scmi_info->shmem_lock);
+
+	shmem_tx_prepare(scmi_info->shmem, xfer);
+
+	arm_smccc_1_1_invoke(scmi_info->func_id, 0, 0, 0, 0, 0, 0, 0, &res);
+	scmi_rx_callback(scmi_info->cinfo, shmem_read_header(scmi_info->shmem));
+
+	mutex_unlock(&scmi_info->shmem_lock);
+
+	/* Only SMCCC_RET_NOT_SUPPORTED is valid error code */
+	if (res.a0)
+		return -EOPNOTSUPP;
+	return 0;
+}
+
+static void smc_fetch_response(struct scmi_chan_info *cinfo,
+			       struct scmi_xfer *xfer)
+{
+	struct scmi_smc *scmi_info = cinfo->transport_info;
+
+	shmem_fetch_response(scmi_info->shmem, xfer);
+}
+
+static bool
+smc_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
+{
+	struct scmi_smc *scmi_info = cinfo->transport_info;
+
+	return shmem_poll_done(scmi_info->shmem, xfer);
+}
+
+static const struct scmi_transport_ops scmi_smc_ops = {
+	.chan_available = smc_chan_available,
+	.chan_setup = smc_chan_setup,
+	.chan_free = smc_chan_free,
+	.send_message = smc_send_message,
+	.fetch_response = smc_fetch_response,
+	.poll_done = smc_poll_done,
+};
+
+const struct scmi_desc scmi_smc_desc = {
+	.ops = &scmi_smc_ops,
+	.max_rx_timeout_ms = 30,
+	.max_msg = 20,
+	.max_msg_size = 128,
+};
diff --git a/drivers/firmware/arm_scmi/system.c b/drivers/firmware/arm_scmi/system.c
new file mode 100644
index 0000000..283e12d
--- /dev/null
+++ b/drivers/firmware/arm_scmi/system.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) System Power Protocol
+ *
+ * Copyright (C) 2020 ARM Ltd.
+ */
+
+#define pr_fmt(fmt) "SCMI Notifications SYSTEM - " fmt
+
+#include <linux/scmi_protocol.h>
+
+#include "common.h"
+#include "notify.h"
+
+#define SCMI_SYSTEM_NUM_SOURCES		1
+
+enum scmi_system_protocol_cmd {
+	SYSTEM_POWER_STATE_NOTIFY = 0x5,
+};
+
+struct scmi_system_power_state_notify {
+	__le32 notify_enable;
+};
+
+struct scmi_system_power_state_notifier_payld {
+	__le32 agent_id;
+	__le32 flags;
+	__le32 system_state;
+};
+
+struct scmi_system_info {
+	u32 version;
+};
+
+static int scmi_system_request_notify(const struct scmi_handle *handle,
+				      bool enable)
+{
+	int ret;
+	struct scmi_xfer *t;
+	struct scmi_system_power_state_notify *notify;
+
+	ret = scmi_xfer_get_init(handle, SYSTEM_POWER_STATE_NOTIFY,
+				 SCMI_PROTOCOL_SYSTEM, sizeof(*notify), 0, &t);
+	if (ret)
+		return ret;
+
+	notify = t->tx.buf;
+	notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
+
+	ret = scmi_do_xfer(handle, t);
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int scmi_system_set_notify_enabled(const struct scmi_handle *handle,
+					  u8 evt_id, u32 src_id, bool enable)
+{
+	int ret;
+
+	ret = scmi_system_request_notify(handle, enable);
+	if (ret)
+		pr_debug("FAIL_ENABLE - evt[%X] - ret:%d\n", evt_id, ret);
+
+	return ret;
+}
+
+static void *scmi_system_fill_custom_report(const struct scmi_handle *handle,
+					    u8 evt_id, ktime_t timestamp,
+					    const void *payld, size_t payld_sz,
+					    void *report, u32 *src_id)
+{
+	const struct scmi_system_power_state_notifier_payld *p = payld;
+	struct scmi_system_power_state_notifier_report *r = report;
+
+	if (evt_id != SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER ||
+	    sizeof(*p) != payld_sz)
+		return NULL;
+
+	r->timestamp = timestamp;
+	r->agent_id = le32_to_cpu(p->agent_id);
+	r->flags = le32_to_cpu(p->flags);
+	r->system_state = le32_to_cpu(p->system_state);
+	*src_id = 0;
+
+	return r;
+}
+
+static const struct scmi_event system_events[] = {
+	{
+		.id = SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER,
+		.max_payld_sz =
+			sizeof(struct scmi_system_power_state_notifier_payld),
+		.max_report_sz =
+			sizeof(struct scmi_system_power_state_notifier_report),
+	},
+};
+
+static const struct scmi_event_ops system_event_ops = {
+	.set_notify_enabled = scmi_system_set_notify_enabled,
+	.fill_custom_report = scmi_system_fill_custom_report,
+};
+
+static int scmi_system_protocol_init(struct scmi_handle *handle)
+{
+	u32 version;
+	struct scmi_system_info *pinfo;
+
+	scmi_version_get(handle, SCMI_PROTOCOL_SYSTEM, &version);
+
+	dev_dbg(handle->dev, "System Power Version %d.%d\n",
+		PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+	pinfo = devm_kzalloc(handle->dev, sizeof(*pinfo), GFP_KERNEL);
+	if (!pinfo)
+		return -ENOMEM;
+
+	scmi_register_protocol_events(handle,
+				      SCMI_PROTOCOL_SYSTEM, SCMI_PROTO_QUEUE_SZ,
+				      &system_event_ops,
+				      system_events,
+				      ARRAY_SIZE(system_events),
+				      SCMI_SYSTEM_NUM_SOURCES);
+
+	pinfo->version = version;
+	handle->system_priv = pinfo;
+
+	return 0;
+}
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(SCMI_PROTOCOL_SYSTEM, system)