feat(runtime/rmi): initial implementation of RMI_PDEV_COMMUNICATE

Implements RMI_PDEV_COMMUNICATE handler. This part of the implementation
covers
- Validates device communication data passed by NS host
- Maps PDEV aux granules, heap
- Dispatches command SPDM_INIT_CONNECTION. This includes sending SPDM
  version, capabilities, algorithms to device and initializes the
  connection based on PCI CMA SPDM specification.

Signed-off-by: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com>
Signed-off-by: Soby Mathew <soby.mathew@arm.com>
Change-Id: Ic49a14c6c10fc9e15403ce29d1fbe2ff7ba6c83b
diff --git a/app/common/framework/src/no_app_support/app.c b/app/common/framework/src/no_app_support/app.c
index c6e9c8d..f296cf0 100644
--- a/app/common/framework/src/no_app_support/app.c
+++ b/app/common/framework/src/no_app_support/app.c
@@ -87,6 +87,12 @@
 	return 0;
 }
 
+void *app_get_heap_ptr(struct app_data_cfg *app_data)
+{
+	(void)app_data;
+	return NULL;
+}
+
 size_t app_get_required_granule_count(unsigned long app_id)
 {
 	(void)app_id;
diff --git a/app/device_assignment/el0_app/CMakeLists.txt b/app/device_assignment/el0_app/CMakeLists.txt
index 9d22f91..3195151 100644
--- a/app/device_assignment/el0_app/CMakeLists.txt
+++ b/app/device_assignment/el0_app/CMakeLists.txt
@@ -88,7 +88,8 @@
 
 target_sources(rmm-app-dev-assign-elf
     PRIVATE
-        "src/dev_assign_el0_app.c")
+        "src/dev_assign_el0_app.c"
+        "src/dev_assign_cmds.c")
 
 if (RMM_ARCH STREQUAL fake_host)
     target_sources(rmm-app-dev-assign-elf
diff --git a/app/device_assignment/el0_app/src/dev_assign_cmds.c b/app/device_assignment/el0_app/src/dev_assign_cmds.c
new file mode 100644
index 0000000..28c091a
--- /dev/null
+++ b/app/device_assignment/el0_app/src/dev_assign_cmds.c
@@ -0,0 +1,77 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include <debug.h>
+#include <dev_assign_private.h>
+#include <string.h>
+
+/*
+ * Process the libspdm connection state. Check if the responder (DSM) supports
+ * the required capabilities in the connection state
+ */
+static int process_spdm_init_connection(void *spdm_context)
+{
+	libspdm_data_parameter_t parameter;
+	libspdm_return_t status;
+	uint32_t data32 = 0;
+	size_t data_size;
+
+	/*
+	 * Check the responder transmit buffer size. RMM retrives device
+	 * certificate in parts. The maximum size of a single part is controlled
+	 * by config LIBSPDM_MAX_CERT_CHAIN_BLOCK_LEN. Check if responder data
+	 * transfer size is less than requester's MAX_CERT_CHAIN_BLOCK_LEN. Each
+	 * part of a certificate can in turn be retrieved in CHUNKS but this
+	 * requires RMM and responder to support CHUNK.
+	 */
+	(void)memset(&parameter, 0, sizeof(parameter));
+	parameter.location = LIBSPDM_DATA_LOCATION_CONNECTION;
+	data_size = sizeof(uint32_t);
+	status = libspdm_get_data(spdm_context,
+				  LIBSPDM_DATA_CAPABILITY_DATA_TRANSFER_SIZE,
+				  &parameter, &data32, &data_size);
+	if (status != LIBSPDM_STATUS_SUCCESS) {
+		return DEV_ASSIGN_STATUS_ERROR;
+	}
+
+	if (data32 < (uint32_t)LIBSPDM_MAX_CERT_CHAIN_BLOCK_LEN) {
+		ERROR("Responder data transfer size %d < %d\n", data32,
+		      LIBSPDM_MAX_CERT_CHAIN_BLOCK_LEN);
+		return DEV_ASSIGN_STATUS_ERROR;
+	}
+
+	return DEV_ASSIGN_STATUS_SUCCESS;
+}
+
+/* dsm_cmd_init_connection_main */
+int dev_assign_cmd_init_connection_main(struct dev_assign_info *info)
+{
+	void *spdm_context = info->libspdm_ctx;
+	libspdm_return_t status;
+
+	assert(spdm_context != NULL);
+
+	/*
+	 * Below are list of SPDM requester commands send to the device to
+	 * init connection and get certificate. These commands will result in
+	 * multiple async IO from libspdm layer which will yield the app.
+	 */
+
+	/*
+	 * Initialize the connection. This does GET_VERSION, GET_CAPABILITIES
+	 * NEGOTIATE_ALGORITHMS
+	 */
+	status = libspdm_init_connection(spdm_context, false);
+	if (status != LIBSPDM_STATUS_SUCCESS) {
+		return DEV_ASSIGN_STATUS_ERROR;
+	}
+
+	if (process_spdm_init_connection(spdm_context) !=
+			DEV_ASSIGN_STATUS_SUCCESS) {
+		return DEV_ASSIGN_STATUS_ERROR;
+	}
+
+	return (int)LIBSPDM_STATUS_SUCCESS;
+}
diff --git a/app/device_assignment/el0_app/src/dev_assign_el0_app.c b/app/device_assignment/el0_app/src/dev_assign_el0_app.c
index e4fe1c3..a1af487 100644
--- a/app/device_assignment/el0_app/src/dev_assign_el0_app.c
+++ b/app/device_assignment/el0_app/src/dev_assign_el0_app.c
@@ -17,10 +17,79 @@
 					      const void *request,
 					      uint64_t timeout)
 {
-	(void)spdm_context;
-	(void)request_size;
-	(void)request;
-	(void)timeout;
+	struct dev_assign_info *info;
+	int rc;
+	uintptr_t buf_offset;
+
+	info = spdm_to_dev_assign_info(spdm_context);
+
+	if ((uintptr_t)info->send_recv_buffer > (uintptr_t)request) {
+		/* cppcheck-suppress misra-c2012-12.2 */
+		/* cppcheck-suppress misra-c2012-10.1 */
+		/* coverity[misra_c_2012_rule_10_1_violation:SUPPRESS] */
+		/* coverity[misra_c_2012_rule_12_2_violation:SUPPRESS] */
+		return LIBSPDM_STATUS_SEND_FAIL;
+	}
+
+	/*
+	 * The request buffer can be allocated from either send_recv buffer or
+	 * scratch buffer. Since both these buffers are next to each other,
+	 * we can perform a sanity check.
+	 */
+	buf_offset = (uintptr_t)request - (uintptr_t)info->send_recv_buffer;
+
+	size_t request_size_align = round_up(request_size, 8U);
+
+	if ((buf_offset + request_size_align)
+		> (PRIV_LIBSPDM_SEND_RECV_BUF_SIZE + PRIV_LIBSPDM_SCRATCH_BUF_SIZE)) {
+		/* cppcheck-suppress misra-c2012-12.2 */
+		/* cppcheck-suppress misra-c2012-10.1 */
+		/* coverity[misra_c_2012_rule_10_1_violation:SUPPRESS] */
+		/* coverity[misra_c_2012_rule_12_2_violation:SUPPRESS] */
+		return LIBSPDM_STATUS_SEND_FAIL;
+	}
+
+	/* Clear out any extra bytes due to rounding up of request_size */
+	(void)memset((void *)((uintptr_t)request + request_size),
+		0, request_size_align - request_size);
+
+	/*
+	 * Sending the message. Device communication request is written to the
+	 * NS buffer.
+	 */
+	rc = (int)el0_app_service_call(APP_SERVICE_WRITE_TO_NS_BUF,
+		APP_SERVICE_RW_NS_BUF_HEAP, (unsigned long)buf_offset,
+		info->enter_args.req_addr, request_size_align);
+
+	if (rc != 0) {
+		/* cppcheck-suppress misra-c2012-12.2 */
+		/* cppcheck-suppress misra-c2012-10.1 */
+		/* coverity[misra_c_2012_rule_10_1_violation:SUPPRESS] */
+		/* coverity[misra_c_2012_rule_12_2_violation:SUPPRESS] */
+		return LIBSPDM_STATUS_SEND_FAIL;
+	}
+
+	info->exit_args.flags |= RMI_DEV_COMM_EXIT_FLAGS_SEND_BIT;
+	info->exit_args.timeout = timeout;
+	info->exit_args.protocol = (unsigned char)RMI_DEV_COMM_PROTOCOL_SPDM;
+	info->exit_args.req_len = request_size;
+
+	/* Copy back the exit args to shared buf */
+	*(struct rmi_dev_comm_exit *)info->shared_buf = info->exit_args;
+
+	el0_app_yield();
+
+	info->enter_args = *((struct rmi_dev_comm_enter *)info->shared_buf);
+	(void)memset(&info->exit_args, 0, sizeof(info->exit_args));
+
+	if (info->enter_args.status == RMI_DEV_COMM_ENTER_STATUS_ERROR) {
+		/* cppcheck-suppress misra-c2012-12.2 */
+		/* cppcheck-suppress misra-c2012-10.1 */
+		/* coverity[misra_c_2012_rule_10_1_violation:SUPPRESS] */
+		/* coverity[misra_c_2012_rule_12_2_violation:SUPPRESS] */
+		return LIBSPDM_STATUS_SEND_FAIL;
+	}
+
 	return LIBSPDM_STATUS_SUCCESS;
 }
 
@@ -29,10 +98,56 @@
 						 void **response,
 						 uint64_t timeout)
 {
-	(void)spdm_context;
-	(void)response_size;
-	(void)response;
+	struct dev_assign_info *info = spdm_to_dev_assign_info(spdm_context);
+	int rc;
+	uintptr_t buf_offset;
+	unsigned long resp_len = info->enter_args.resp_len;
+
 	(void)timeout;
+
+	info = spdm_to_dev_assign_info(spdm_context);
+
+	if ((uintptr_t)info->send_recv_buffer > (uintptr_t)*response) {
+		/* cppcheck-suppress misra-c2012-12.2 */
+		/* cppcheck-suppress misra-c2012-10.1 */
+		/* coverity[misra_c_2012_rule_10_1_violation:SUPPRESS] */
+		/* coverity[misra_c_2012_rule_12_2_violation:SUPPRESS] */
+		return LIBSPDM_STATUS_RECEIVE_FAIL;
+	}
+
+	buf_offset = (uintptr_t)*response - (uintptr_t)info->send_recv_buffer;
+
+	size_t resp_len_align = round_up(resp_len, 8U);
+
+	if ((buf_offset + resp_len_align)
+			> (PRIV_LIBSPDM_SEND_RECV_BUF_SIZE + PRIV_LIBSPDM_SCRATCH_BUF_SIZE)) {
+		/* cppcheck-suppress misra-c2012-12.2 */
+		/* cppcheck-suppress misra-c2012-10.1 */
+		/* coverity[misra_c_2012_rule_10_1_violation:SUPPRESS] */
+		/* coverity[misra_c_2012_rule_12_2_violation:SUPPRESS] */
+		return LIBSPDM_STATUS_RECEIVE_FAIL;
+	}
+
+	assert(info->enter_args.status == RMI_DEV_COMM_ENTER_STATUS_RESPONSE);
+
+	/*
+	 * Sending the message. Device communication request is written to the
+	 * NS buffer.
+	 */
+	rc = (int)el0_app_service_call(APP_SERVICE_READ_FROM_NS_BUF,
+		APP_SERVICE_RW_NS_BUF_HEAP, (unsigned long)buf_offset,
+		info->enter_args.resp_addr, resp_len_align);
+
+	if (rc != 0) {
+		/* cppcheck-suppress misra-c2012-12.2 */
+		/* cppcheck-suppress misra-c2012-10.1 */
+		/* coverity[misra_c_2012_rule_10_1_violation:SUPPRESS] */
+		/* coverity[misra_c_2012_rule_12_2_violation:SUPPRESS] */
+		return LIBSPDM_STATUS_RECEIVE_FAIL;
+	}
+
+	*response_size = resp_len;
+
 	return LIBSPDM_STATUS_SUCCESS;
 }
 
@@ -47,10 +162,10 @@
 	(void)session_id;
 	(void)is_app_message;
 	(void)is_request_message;
-	(void)message_size;
-	(void)message;
-	(void)transport_message_size;
-	(void)transport_message;
+
+	*transport_message_size = message_size;
+	*transport_message = message;
+
 	return LIBSPDM_STATUS_SUCCESS;
 }
 
@@ -62,13 +177,13 @@
 				  size_t *message_size, void **message)
 {
 	(void)spdm_context;
-	(void)session_id;
 	(void)is_app_message;
 	(void)is_request_message;
-	(void)transport_message_size;
-	(void)transport_message;
-	(void)message_size;
-	(void)message;
+
+	*session_id = NULL;
+	*message_size = transport_message_size;
+	*message = transport_message;
+
 	return LIBSPDM_STATUS_SUCCESS;
 }
 
@@ -91,12 +206,13 @@
 	(void)msg_buf_ptr;
 	info = spdm_to_dev_assign_info(spdm_context);
 	assert(info->send_recv_buffer == msg_buf_ptr);
+	/* Nothing to do */
 }
 
 static libspdm_return_t spdm_acquire_receiver_buffer(void *spdm_context,
 							 void **msg_buf_ptr)
 {
-	struct dev_assign_info *info __unused;
+	struct dev_assign_info *info;
 
 	info = spdm_to_dev_assign_info(spdm_context);
 	*msg_buf_ptr = info->send_recv_buffer;
@@ -112,6 +228,7 @@
 	(void)msg_buf_ptr;
 	info = spdm_to_dev_assign_info(spdm_context);
 	assert(info->send_recv_buffer == msg_buf_ptr);
+	/* Nothing to do */
 }
 
 /*
@@ -402,6 +519,40 @@
 	return DEV_ASSIGN_STATUS_SUCCESS;
 }
 
+static unsigned long dev_assign_communicate_cmd_cmn(unsigned long func_id, uintptr_t heap)
+{
+	struct dev_assign_info *info = heap_start_to_dev_assign_info(heap);
+	unsigned long ret;
+
+	void *shared_buf = info->shared_buf;
+
+	if (shared_buf == NULL) {
+		ERROR("Dev_assign cmds called without init\n");
+		return (unsigned long)DEV_ASSIGN_STATUS_ERROR;
+	}
+
+	/* Initialize the entry and exit args */
+	info->enter_args = *(struct rmi_dev_comm_enter *)shared_buf;
+	(void)memset(&info->exit_args, 0, sizeof(info->exit_args));
+
+	switch (func_id) {
+	case DEVICE_ASSIGN_APP_FUNC_ID_CONNECT_INIT:
+		ret = (unsigned long)dev_assign_cmd_init_connection_main(info);
+		break;
+	default:
+		assert(false);
+		return (unsigned long)DEV_ASSIGN_STATUS_ERROR;
+	}
+
+	/* Reset the exit flag on error */
+	if (ret != 0UL) {
+		info->exit_args.flags = 0;
+	}
+
+	/* Copy back the exit args to shared buf */
+	*(struct rmi_dev_comm_exit *)shared_buf = info->exit_args;
+	return ret;
+}
 
 /* coverity[misra_c_2012_rule_5_8_violation:SUPPRESS] */
 unsigned long el0_app_entry_func(
@@ -425,6 +576,8 @@
 		return (unsigned long)dev_assign_init(heap, arg_0,
 			(struct dev_assign_params *)shared);
 	}
+	case DEVICE_ASSIGN_APP_FUNC_ID_CONNECT_INIT:
+		return dev_assign_communicate_cmd_cmn(func_id, heap);
 	case DEVICE_ASSIGN_APP_FUNC_ID_DEINIT:
 		return (unsigned long)dev_assign_deinit(heap);
 	default:
diff --git a/app/device_assignment/rmm_stub/include/dev_assign_app.h b/app/device_assignment/rmm_stub/include/dev_assign_app.h
index 8f124af..af4fdf5 100644
--- a/app/device_assignment/rmm_stub/include/dev_assign_app.h
+++ b/app/device_assignment/rmm_stub/include/dev_assign_app.h
@@ -26,4 +26,26 @@
 	size_t granule_pa_count, void *granule_va_start,
 	struct dev_assign_params *params);
 
+
+/*
+ * Communicate with device and continue the device command as part of
+ * device assignment sequence.
+ * Arguments:
+ * app_data - Pointer to app_data_cfg. This is opaque to caller
+ * comm_enter_args - Entry arguments to app
+ * comm_exit_args - Exit arguments from app
+ * dev_cmd - Valid device communicate cmds.
+ *
+ * Note that when this function indicates that the app is yeilded
+ * then the only valid dev_cmd is DEVICE_ASSIGN_APP_FUNC_ID_RESUME.
+ *
+ * Returns DEV_ASSIGN_STATUS_SUCCESS if cmd is successful.
+ *         DEV_ASSIGN_STATUS_ERROR if cmd is unsuccessful
+ *         DEV_ASSIGN_STATUS_COMM_BLOCKED if the app is yielded.
+ */
+int dev_assign_dev_communicate(struct app_data_cfg *app_data,
+	struct rmi_dev_comm_enter *comm_enter_args,
+	struct rmi_dev_comm_exit *comm_exit_args,
+	int dev_cmd);
+
 #endif /* DEV_ASSIGN_APP_H */
diff --git a/app/device_assignment/rmm_stub/include/dev_assign_structs.h b/app/device_assignment/rmm_stub/include/dev_assign_structs.h
index 5d8d9bb..1411393 100644
--- a/app/device_assignment/rmm_stub/include/dev_assign_structs.h
+++ b/app/device_assignment/rmm_stub/include/dev_assign_structs.h
@@ -50,6 +50,44 @@
 };
 
 /*
+ * App functions for device communication. App uses heap available via tpidrro_el0.
+ * The function execution can yield and return back to RMM. In this case
+ * the return would be via APP_YIELD_CALL svc. Callers need to check
+ * `app_data->exit_flag` for APP_EXIT_SVC_YIELD_FLAG. The `rmi_dev_comm_enter`
+ * is expected to be populated in shared buf for entry into app and
+ * `rmm_dev_comm_exit` is expected to be populated for exit from app.
+ * These entry and exit data is expected to be populated in the yield case
+ * as well.
+ *
+ * Shared app buf == `struct dev_assign_comm_params`
+ *
+ * ret0 == DEV_ASSIGN_STATUS_SUCCESS if connection is successful.
+ *         DEV_ASSIGN_STATUS_ERROR if error on connection.
+ *         NA if app is yielded.
+ *
+ */
+#define DEVICE_ASSIGN_APP_FUNC_ID_CONNECT_INIT		2
+
+/*
+ * Pseudo App function ID for device communication resume. App uses heap available via
+ * tpidrro_el0. The cmd should only be issued to dev_assign_dev_communicate() if the
+ * app was yeilded. The `rmi_dev_comm_enter` is expected to be populated in shared
+ * buf for entry into app and `rmm_dev_comm_exit` is expected to be populated for
+ * exit from app. The app can yeild again and callers need to check `app_data->exit_flag`
+ * for APP_EXIT_SVC_YIELD_FLAG.
+ *
+ * Note that this function ID is not passed to the app but used in stub to handle
+ * resume after a yield (and hence pseudo).
+ *
+ * Shared app buf == `struct dev_assign_comm_params`
+ *
+ * ret0 == DEV_ASSIGN_STATUS_SUCCESS if connection is successful.
+ *         DEV_ASSIGN_STATUS_ERROR if error on connection.
+ *         NA if app is yielded.
+ */
+#define DEVICE_ASSIGN_APP_FUNC_ID_RESUME		10
+
+/*
  * App function ID to de-initialise. App uses heap available via
  * tpidrro_el0.
  *
diff --git a/app/device_assignment/rmm_stub/src/dev_assign_app_stub.c b/app/device_assignment/rmm_stub/src/dev_assign_app_stub.c
index de73e22..7861b23 100644
--- a/app/device_assignment/rmm_stub/src/dev_assign_app_stub.c
+++ b/app/device_assignment/rmm_stub/src/dev_assign_app_stub.c
@@ -25,6 +25,7 @@
 	int rc;
 	struct dev_assign_params *shared;
 
+
 	rc = app_init_data(app_data,
 				  RMM_DEV_ASSIGN_APP_ID,
 				  granule_pas,
@@ -45,3 +46,37 @@
 
 	return rc;
 }
+
+int dev_assign_dev_communicate(struct app_data_cfg *app_data,
+			struct rmi_dev_comm_enter *comm_enter_args,
+			struct rmi_dev_comm_exit *comm_exit_args,
+			int dev_cmd)
+{
+	int rc;
+	struct rmi_dev_comm_enter *shared;
+	struct rmi_dev_comm_exit *shared_ret;
+
+	assert((dev_cmd == DEVICE_ASSIGN_APP_FUNC_ID_RESUME) ||
+		(dev_cmd == DEVICE_ASSIGN_APP_FUNC_ID_CONNECT_INIT));
+
+	app_map_shared_page(app_data);
+	shared = app_data->el2_shared_page;
+	*shared = *comm_enter_args;
+
+	if (dev_cmd != DEVICE_ASSIGN_APP_FUNC_ID_RESUME) {
+		rc = (int)app_run(app_data, (unsigned long)dev_cmd,
+				0, 0, 0, 0);
+	} else {
+		rc = (int)app_resume(app_data);
+	}
+
+	if (app_data->exit_flag == APP_EXIT_SVC_YIELD_FLAG) {
+		rc = DEV_ASSIGN_STATUS_COMM_BLOCKED;
+	}
+
+	shared_ret = app_data->el2_shared_page;
+	*comm_exit_args = *shared_ret;
+	app_unmap_shared_page(app_data);
+
+	return rc;
+}
diff --git a/runtime/core/handler.c b/runtime/core/handler.c
index 2684d89..24947e1 100644
--- a/runtime/core/handler.c
+++ b/runtime/core/handler.c
@@ -162,7 +162,7 @@
 	HANDLER(DEV_MEM_MAP,		4, 0, smc_dev_mem_map,		 false, true),
 	HANDLER(DEV_MEM_UNMAP,		3, 2, smc_dev_mem_unmap,	 false, true),
 	HANDLER(PDEV_ABORT,		0, 0, NULL,			 true, true),
-	HANDLER(PDEV_COMMUNICATE,	2, 0, NULL,			 true, true),
+	HANDLER(PDEV_COMMUNICATE,	2, 0, smc_pdev_communicate,	 true, true),
 	HANDLER(PDEV_CREATE,		2, 0, smc_pdev_create,		 true, true),
 	HANDLER(PDEV_DESTROY,		1, 0, smc_pdev_destroy,		 true, true),
 	HANDLER(PDEV_GET_STATE,		1, 1, smc_pdev_get_state,	 true, true),
diff --git a/runtime/include/smc-handler.h b/runtime/include/smc-handler.h
index 222c5bf..fc919ff 100644
--- a/runtime/include/smc-handler.h
+++ b/runtime/include/smc-handler.h
@@ -111,6 +111,9 @@
 
 void smc_pdev_aux_count(unsigned long flags, struct smc_result *res);
 
+unsigned long smc_pdev_communicate(unsigned long pdev_ptr,
+				   unsigned long dev_comm_data_ptr);
+
 void smc_pdev_get_state(unsigned long pdev_ptr, struct smc_result *res);
 
 unsigned long smc_pdev_destroy(unsigned long pdev_ptr);
diff --git a/runtime/rmi/pdev.c b/runtime/rmi/pdev.c
index 6284e7e..210eb76 100644
--- a/runtime/rmi/pdev.c
+++ b/runtime/rmi/pdev.c
@@ -331,6 +331,244 @@
 	return smc_rc;
 }
 
+
+/* Validate RmiDevCommData.RmiDevCommEnter argument passed by Host */
+static unsigned long copyin_and_validate_dev_comm_enter(
+				  unsigned long dev_comm_data_ptr,
+				  struct rmi_dev_comm_enter *enter_args,
+				  unsigned int dev_comm_state)
+{
+	struct granule *g_dev_comm_data;
+	struct granule *g_buf;
+	bool ns_access_ok;
+
+	g_dev_comm_data = find_granule(dev_comm_data_ptr);
+	if ((g_dev_comm_data == NULL) ||
+	    (granule_unlocked_state(g_dev_comm_data) != GRANULE_STATE_NS)) {
+		return RMI_ERROR_INPUT;
+	}
+
+	ns_access_ok = ns_buffer_read(SLOT_NS, g_dev_comm_data,
+				      RMI_DEV_COMM_ENTER_OFFSET,
+				      sizeof(struct rmi_dev_comm_enter),
+				      enter_args);
+	if (!ns_access_ok) {
+		return RMI_ERROR_INPUT;
+	}
+
+	if (!GRANULE_ALIGNED(enter_args->req_addr) ||
+	    !GRANULE_ALIGNED(enter_args->resp_addr) ||
+	    (enter_args->resp_len > GRANULE_SIZE)) {
+		return RMI_ERROR_INPUT;
+	}
+
+	if ((enter_args->status == RMI_DEV_COMM_ENTER_STATUS_RESPONSE) &&
+		(enter_args->resp_len == 0U)) {
+		return RMI_ERROR_INPUT;
+	}
+
+	/* Check if request and response buffers are in NS PAS */
+	g_buf = find_granule(enter_args->req_addr);
+	if ((g_buf == NULL) ||
+	    (granule_unlocked_state(g_buf) != GRANULE_STATE_NS)) {
+		return RMI_ERROR_INPUT;
+	}
+
+	g_buf = find_granule(enter_args->resp_addr);
+	if ((g_buf == NULL) ||
+	    (granule_unlocked_state(g_buf) != GRANULE_STATE_NS)) {
+		return RMI_ERROR_INPUT;
+	}
+
+	if ((dev_comm_state == DEV_COMM_ACTIVE) &&
+	    ((enter_args->status != RMI_DEV_COMM_ENTER_STATUS_RESPONSE) &&
+	    (enter_args->status != RMI_DEV_COMM_ENTER_STATUS_ERROR))) {
+		return RMI_ERROR_DEVICE;
+	}
+
+	if ((dev_comm_state == DEV_COMM_PENDING) &&
+	    (enter_args->status != RMI_DEV_COMM_ENTER_STATUS_NONE)) {
+		return RMI_ERROR_DEVICE;
+	}
+	return RMI_SUCCESS;
+}
+
+/*
+ * copyout DevCommExitArgs
+ */
+static unsigned long copyout_dev_comm_exit(unsigned long dev_comm_data_ptr,
+				   struct rmi_dev_comm_exit *exit_args)
+{
+	struct granule *g_dev_comm_data;
+	bool ns_access_ok;
+
+	g_dev_comm_data = find_granule(dev_comm_data_ptr);
+	if ((g_dev_comm_data == NULL) ||
+	    (granule_unlocked_state(g_dev_comm_data) != GRANULE_STATE_NS)) {
+		return RMI_ERROR_INPUT;
+	}
+
+	ns_access_ok = ns_buffer_write(SLOT_NS, g_dev_comm_data,
+				       RMI_DEV_COMM_EXIT_OFFSET,
+				       sizeof(struct rmi_dev_comm_exit),
+				       exit_args);
+	if (!ns_access_ok) {
+		return RMI_ERROR_INPUT;
+	}
+
+	return RMI_SUCCESS;
+}
+
+static int pdev_dispatch_cmd(struct pdev *pd, struct rmi_dev_comm_enter *enter_args,
+		struct rmi_dev_comm_exit *exit_args)
+{
+	int rc;
+
+	if (pd->dev_comm_state == DEV_COMM_ACTIVE) {
+		return dev_assign_dev_communicate(&pd->da_app_data, enter_args,
+			exit_args, DEVICE_ASSIGN_APP_FUNC_ID_RESUME);
+	}
+
+	switch (pd->rmi_state) {
+	case RMI_PDEV_STATE_NEW:
+		rc = dev_assign_dev_communicate(&pd->da_app_data, enter_args,
+			exit_args, DEVICE_ASSIGN_APP_FUNC_ID_CONNECT_INIT);
+		break;
+	default:
+		assert(false);
+		rc = -1;
+	}
+
+	return rc;
+}
+
+static unsigned long dev_communicate(struct pdev *pd,
+				     unsigned long dev_comm_data_ptr)
+{
+	struct rmi_dev_comm_enter enter_args;
+	struct rmi_dev_comm_exit exit_args;
+	void *aux_mapped_addr;
+	unsigned long comm_rc;
+	int rc;
+
+	assert(pd != NULL);
+
+	if ((pd->dev_comm_state == DEV_COMM_IDLE) ||
+			(pd->dev_comm_state == DEV_COMM_ERROR)) {
+		return RMI_ERROR_DEVICE;
+	}
+
+	/* Validate RmiDevCommEnter arguments in DevCommData */
+	/* coverity[uninit_use_in_call:SUPPRESS] */
+	comm_rc = copyin_and_validate_dev_comm_enter(dev_comm_data_ptr, &enter_args,
+		pd->dev_comm_state);
+	if (comm_rc != RMI_SUCCESS) {
+		return comm_rc;
+	}
+
+	/* Map PDEV aux granules */
+	aux_mapped_addr = buffer_pdev_aux_granules_map(pd->g_aux, pd->num_aux);
+	assert(aux_mapped_addr != NULL);
+
+	rc = pdev_dispatch_cmd(pd, &enter_args, &exit_args);
+
+	/* Unmap all PDEV aux granules */
+	buffer_pdev_aux_unmap(aux_mapped_addr, pd->num_aux);
+
+	comm_rc = copyout_dev_comm_exit(dev_comm_data_ptr,
+				       &exit_args);
+	if (comm_rc != RMI_SUCCESS) {
+		/* todo: device status is updated but copyout data failed? */
+		return RMI_ERROR_INPUT;
+	}
+
+	/*
+	 * Based on the device communication results update the device IO state
+	 * and PDEV state.
+	 */
+	switch (rc) {
+	case DEV_ASSIGN_STATUS_COMM_BLOCKED:
+		pd->dev_comm_state = DEV_COMM_ACTIVE;
+		break;
+	case DEV_ASSIGN_STATUS_ERROR:
+		pd->dev_comm_state = DEV_COMM_ERROR;
+		break;
+	case DEV_ASSIGN_STATUS_SUCCESS:
+		pd->dev_comm_state = DEV_COMM_IDLE;
+		break;
+	default:
+		assert(false);
+	}
+
+	return RMI_SUCCESS;
+}
+
+/*
+ * smc_pdev_communicate
+ *
+ * pdev_ptr	- PA of the PDEV
+ * data_ptr	- PA of the communication data structure
+ */
+unsigned long smc_pdev_communicate(unsigned long pdev_ptr,
+				   unsigned long dev_comm_data_ptr)
+{
+	struct granule *g_pdev;
+	struct pdev *pd;
+	unsigned long rmi_rc;
+
+	if (!is_rmi_feat_da_enabled()) {
+		return SMC_NOT_SUPPORTED;
+	}
+
+	if (!GRANULE_ALIGNED(pdev_ptr) || !GRANULE_ALIGNED(dev_comm_data_ptr)) {
+		return RMI_ERROR_INPUT;
+	}
+
+	/* Lock pdev granule and map it */
+	g_pdev = find_lock_granule(pdev_ptr, GRANULE_STATE_PDEV);
+	if (g_pdev == NULL) {
+		return RMI_ERROR_INPUT;
+	}
+
+	pd = buffer_granule_map(g_pdev, SLOT_PDEV);
+	if (pd == NULL) {
+		granule_unlock(g_pdev);
+		return RMI_ERROR_INPUT;
+	}
+
+	assert(pd->g_pdev == g_pdev);
+
+	rmi_rc = dev_communicate(pd, dev_comm_data_ptr);
+
+	/*
+	 * Based on the device communication results update the device IO state
+	 * and PDEV state.
+	 */
+	switch (pd->dev_comm_state) {
+	case DEV_COMM_ERROR:
+		pd->rmi_state = RMI_PDEV_STATE_ERROR;
+		break;
+	case DEV_COMM_IDLE:
+		if (pd->rmi_state == RMI_PDEV_STATE_NEW) {
+			pd->rmi_state = RMI_PDEV_STATE_NEEDS_KEY;
+		} else {
+			pd->rmi_state = RMI_PDEV_STATE_ERROR;
+		}
+		break;
+	case DEV_COMM_ACTIVE:
+		/* No state change required */
+		break;
+	case DEV_COMM_PENDING:
+	default:
+		assert(false);
+	}
+
+	buffer_unmap(pd);
+	granule_unlock(g_pdev);
+
+	return rmi_rc;
+}
+
 /*
  * smc_pdev_get_state
  *