feat(ff-a): validate FFA_RXTX_MAP/UNMAP against NWd page tables

Check in FFA_RXTX_MAP/RXTX_UNMAP interfaces that rxtx buffers are mapped
in the NWd page tables.

Unmap rxtx buffers from NWd page tables during FFA_RXTX_MAP, so that the
buffers cannot be used for memory sharing operations from NWd, or
FFA_RXTX_MAP in another instance.

Remap rxtx buffers to NWd page tables during FFA_RXTX_UNMAP, so that the
buffers can be used for memory sharing operations again.

Signed-off-by: Karl Meakin <karl.meakin@arm.com>
Change-Id: I3419b1dcd97edcce18db59f3de66fcc850ea655d
diff --git a/src/api.c b/src/api.c
index 0899697..f642f0c 100644
--- a/src/api.c
+++ b/src/api.c
@@ -1496,35 +1496,38 @@
 /**
  * Configures the hypervisor's stage-1 view of the send and receive pages.
  */
-static bool api_vm_configure_stage1(struct mm_stage1_locked mm_stage1_locked,
-				    struct vm_locked vm_locked,
-				    paddr_t pa_send_begin, paddr_t pa_send_end,
-				    paddr_t pa_recv_begin, paddr_t pa_recv_end,
-				    uint32_t extra_attributes,
-				    struct mpool *local_page_pool)
+static struct ffa_value api_vm_configure_stage1(
+	struct mm_stage1_locked mm_stage1_locked, struct vm_locked vm_locked,
+	paddr_t pa_send_begin, paddr_t pa_send_end, paddr_t pa_recv_begin,
+	paddr_t pa_recv_end, uint32_t extra_attributes,
+	struct mpool *local_page_pool)
 {
-	bool ret;
+	struct ffa_value ret;
 
-	/* Map the send page as read-only in the hypervisor address space. */
+	/*
+	 * Map the send page as read-only in the SPMC/hypervisor address space.
+	 */
 	vm_locked.vm->mailbox.send =
 		mm_identity_map(mm_stage1_locked, pa_send_begin, pa_send_end,
 				MM_MODE_R | extra_attributes, local_page_pool);
 	if (!vm_locked.vm->mailbox.send) {
-		goto fail;
+		ret = ffa_error(FFA_NO_MEMORY);
+		goto out;
 	}
 
 	/*
-	 * Map the receive page as writable in the hypervisor address space. On
-	 * failure, unmap the send page before returning.
+	 * Map the receive page as writable in the SPMC/hypervisor address
+	 * space. On failure, unmap the send page before returning.
 	 */
 	vm_locked.vm->mailbox.recv =
 		mm_identity_map(mm_stage1_locked, pa_recv_begin, pa_recv_end,
 				MM_MODE_W | extra_attributes, local_page_pool);
 	if (!vm_locked.vm->mailbox.recv) {
+		ret = ffa_error(FFA_NO_MEMORY);
 		goto fail_undo_send;
 	}
 
-	ret = true;
+	ret = (struct ffa_value){.func = FFA_SUCCESS_32};
 	goto out;
 
 	/*
@@ -1536,9 +1539,6 @@
 	CHECK(mm_unmap(mm_stage1_locked, pa_send_begin, pa_send_end,
 		       local_page_pool));
 
-fail:
-	ret = false;
-
 out:
 	return ret;
 }
@@ -1572,12 +1572,16 @@
 
 	/* We only allow these to be setup once. */
 	if (vm_locked.vm->mailbox.send || vm_locked.vm->mailbox.recv) {
+		dlog_error("%s: Mailboxes have already been setup for VM %#x\n",
+			   __func__, vm_locked.vm->id);
 		ret = ffa_error(FFA_DENIED);
 		goto out;
 	}
 
 	/* Hafnium only supports a fixed size of RX/TX buffers. */
 	if (page_count != HF_MAILBOX_SIZE / FFA_PAGE_SIZE) {
+		dlog_error("%s: Page count must be %d, it is %d\n", __func__,
+			   HF_MAILBOX_SIZE / FFA_PAGE_SIZE, page_count);
 		ret = ffa_error(FFA_INVALID_PARAMETERS);
 		goto out;
 	}
@@ -1585,6 +1589,7 @@
 	/* Fail if addresses are not page-aligned. */
 	if (!is_aligned(ipa_addr(send), PAGE_SIZE) ||
 	    !is_aligned(ipa_addr(recv), PAGE_SIZE)) {
+		dlog_error("%s: Mailbox buffers not page-aligned\n", __func__);
 		ret = ffa_error(FFA_INVALID_PARAMETERS);
 		goto out;
 	}
@@ -1597,6 +1602,7 @@
 
 	/* Fail if the same page is used for the send and receive pages. */
 	if (pa_addr(pa_send_begin) == pa_addr(pa_recv_begin)) {
+		dlog_error("%s: Mailbox buffers overlap\n", __func__);
 		ret = ffa_error(FFA_INVALID_PARAMETERS);
 		goto out;
 	}
@@ -1655,9 +1661,18 @@
 				     mode, local_page_pool, NULL)) {
 			/* TODO: partial defrag of failed range. */
 			/* Recover any memory consumed in failed mapping. */
+			dlog_error("%s: cannot map recv page\n", __func__);
 			vm_ptable_defrag(vm_locked, local_page_pool);
+			ret = ffa_error(FFA_NO_MEMORY);
 			goto fail_undo_send;
 		}
+	} else {
+		ret = arch_other_world_vm_configure_rxtx_map(
+			vm_locked, local_page_pool, pa_send_begin, pa_send_end,
+			pa_recv_begin, pa_recv_end);
+		if (ret.func != FFA_SUCCESS_32) {
+			goto out;
+		}
 	}
 
 	/* Get extra send/recv pages mapping attributes for the given VM ID. */
@@ -1678,9 +1693,10 @@
 		extra_attributes |= MM_MODE_NG;
 	}
 
-	if (!api_vm_configure_stage1(mm_stage1_locked, vm_locked, pa_send_begin,
-				     pa_send_end, pa_recv_begin, pa_recv_end,
-				     extra_attributes, local_page_pool)) {
+	ret = api_vm_configure_stage1(
+		mm_stage1_locked, vm_locked, pa_send_begin, pa_send_end,
+		pa_recv_begin, pa_recv_end, extra_attributes, local_page_pool);
+	if (ret.func != FFA_SUCCESS_32) {
 		goto fail_undo_send_and_recv;
 	}
 
@@ -1694,7 +1710,6 @@
 fail_undo_send:
 	CHECK(vm_identity_map(vm_locked, pa_send_begin, pa_send_end,
 			      orig_send_mode, local_page_pool, NULL));
-	ret = ffa_error(FFA_NO_MEMORY);
 
 out:
 	return ret;
@@ -1908,6 +1923,13 @@
 		CHECK(vm_identity_map(vm_locked, recv_pa_begin, recv_pa_end,
 				      MM_MODE_R | MM_MODE_W | MM_MODE_X,
 				      &api_page_pool, NULL));
+	} else {
+		ret = arch_other_world_vm_configure_rxtx_unmap(
+			vm_locked, &api_page_pool, send_pa_begin, send_pa_end,
+			recv_pa_begin, recv_pa_end);
+		if (ret.func != FFA_SUCCESS_32) {
+			goto out;
+		}
 	}
 
 	/* Unmap the buffers in the partition manager. */
diff --git a/src/arch/aarch64/hypervisor/other_world.c b/src/arch/aarch64/hypervisor/other_world.c
index 1457148..7b14b0b 100644
--- a/src/arch/aarch64/hypervisor/other_world.c
+++ b/src/arch/aarch64/hypervisor/other_world.c
@@ -9,9 +9,12 @@
 #include "hf/arch/other_world.h"
 
 #include "hf/arch/mmu.h"
+#include "hf/arch/plat/ffa.h"
 
+#include "hf/check.h"
 #include "hf/dlog.h"
 #include "hf/ffa.h"
+#include "hf/ffa_internal.h"
 #include "hf/vcpu.h"
 #include "hf/vm.h"
 
@@ -81,3 +84,161 @@
 {
 	return smc_ffa_call_ext(args);
 }
+
+/**
+ * Obtain a lock on the other world VM, making sure it is
+ * locked in the correct order relative to the owner VM in order to avoid a
+ * deadlock.
+ */
+static struct vm_locked lock_other_world(struct vm_locked owner_vm_locked)
+{
+	struct vm *other_world_vm;
+	struct two_vm_locked both;
+
+	if (owner_vm_locked.vm->id == HF_OTHER_WORLD_ID) {
+		return owner_vm_locked;
+	}
+
+	other_world_vm = vm_find(HF_OTHER_WORLD_ID);
+	both = vm_lock_both_in_order(owner_vm_locked, other_world_vm);
+
+	return both.vm2;
+}
+
+static void unlock_other_world(struct vm_locked owner_vm_locked,
+			       struct vm_locked other_world_locked)
+{
+	if (owner_vm_locked.vm->id != other_world_locked.vm->id) {
+		vm_unlock(&other_world_locked);
+	}
+}
+
+/**
+ * Unmap rxtx buffers from other world so that they cannot be used for memory
+ * sharing operations from NWd, or FFA_RXTX_MAP in another instance.
+ *
+ * Fails if the given addresses are not already mapped in the other world page
+ * tables.
+ *
+ * Returns `FFA_DENIED` if the send/recv pages are not mapped in normal world
+ * pages tables, or are mapped with incorrect permissions.
+ *
+ * Returns `FFA_ABORTED` if unmapping the send/recv pages from the normal world
+ * page tables fails.
+ */
+struct ffa_value arch_other_world_vm_configure_rxtx_map(
+	struct vm_locked vm_locked, struct mpool *local_page_pool,
+	paddr_t pa_send_begin, paddr_t pa_send_end, paddr_t pa_recv_begin,
+	paddr_t pa_recv_end)
+{
+	struct ffa_value ret;
+	uint32_t send_mode;
+	uint32_t recv_mode;
+	struct vm_locked other_world_locked;
+	const uint32_t expected_mode =
+		MM_MODE_R | MM_MODE_W | MM_MODE_X | MM_MODE_NS;
+
+	other_world_locked = lock_other_world(vm_locked);
+	assert(other_world_locked.vm != NULL);
+
+	/*
+	 * Check that the memory is mapped in the NWd set of page
+	 * tables.
+	 */
+	if (!vm_mem_get_mode(other_world_locked, ipa_from_pa(pa_send_begin),
+			     ipa_from_pa(pa_send_end), &send_mode)) {
+		ret = ffa_error(FFA_DENIED);
+		dlog_error("%s: send page not mapped in NWd VM\n", __func__);
+		goto out_unlock;
+	}
+	if (!vm_mem_get_mode(other_world_locked, ipa_from_pa(pa_recv_begin),
+			     ipa_from_pa(pa_recv_end), &recv_mode)) {
+		ret = ffa_error(FFA_DENIED);
+		dlog_error("%s: recv page not mapped in NWd VM\n", __func__);
+		goto out_unlock;
+	}
+
+	if ((send_mode & expected_mode) != expected_mode) {
+		ret = ffa_error(FFA_DENIED);
+		dlog_error("%s: send page is invalid (expected %#x, got %#x)\n",
+			   __func__, expected_mode, send_mode);
+		goto out_unlock;
+	}
+	if ((recv_mode & expected_mode) != expected_mode) {
+		ret = ffa_error(FFA_DENIED);
+		dlog_error("%s: recv page is invalid (expected %#x, got %#x)\n",
+			   __func__, expected_mode, recv_mode);
+		goto out_unlock;
+	}
+
+	/*
+	 * Unmap the memory from the NWd page tables, to prevent that memory
+	 * being used in memory sharing operations from the NWd, or in further
+	 * `FFA_RXTX_MAP` calls.
+	 */
+	if (!vm_unmap(other_world_locked, pa_send_begin, pa_send_end,
+		      local_page_pool)) {
+		dlog_error("%s: cannot unmap send page from NWd VM\n",
+			   __func__);
+		ret = ffa_error(FFA_ABORTED);
+		goto out_unlock;
+	}
+	if (!vm_unmap(other_world_locked, pa_recv_begin, pa_recv_end,
+		      local_page_pool)) {
+		ret = ffa_error(FFA_ABORTED);
+		dlog_error("%s: cannot unmap recv page from NWd VM\n",
+			   __func__);
+		goto out_unlock;
+	}
+
+	ret = (struct ffa_value){.func = FFA_SUCCESS_32};
+
+out_unlock:
+	unlock_other_world(vm_locked, other_world_locked);
+	return ret;
+}
+
+/**
+ * Remap rxtx buffers to other world so that they can be used for memory sharing
+ * operations from NWd, or FFA_RXTX_MAP in another instance.
+ *
+ * Returns `FFA_ABORTED` if mapping the send/recv pages in the normal world page
+ * tables fails.
+ */
+struct ffa_value arch_other_world_vm_configure_rxtx_unmap(
+	struct vm_locked vm_locked, struct mpool *local_page_pool,
+	paddr_t pa_send_begin, paddr_t pa_send_end, paddr_t pa_recv_begin,
+	paddr_t pa_recv_end)
+{
+	struct vm_locked other_world_locked = lock_other_world(vm_locked);
+
+	if (other_world_locked.vm == NULL) {
+		return ffa_error(FFA_ABORTED);
+	}
+
+	/* Remap to other world page tables. */
+	if (!vm_identity_map(other_world_locked, pa_send_begin, pa_send_end,
+			     MM_MODE_R | MM_MODE_W | MM_MODE_X | MM_MODE_NS,
+			     local_page_pool, NULL)) {
+		dlog_error(
+			"%s: unable to remap send page to other world page "
+			"tables\n",
+			__func__);
+		return ffa_error(FFA_ABORTED);
+	}
+
+	if (!vm_identity_map(other_world_locked, pa_recv_begin, pa_recv_end,
+			     MM_MODE_R | MM_MODE_W | MM_MODE_X | MM_MODE_NS,
+			     local_page_pool, NULL)) {
+		dlog_error(
+			"%s: unable to remap recv page to other world page "
+			"tables\n",
+			__func__);
+		CHECK(vm_unmap(other_world_locked, pa_send_begin, pa_send_end,
+			       local_page_pool));
+		return ffa_error(FFA_ABORTED);
+	}
+
+	unlock_other_world(vm_locked, other_world_locked);
+	return (struct ffa_value){.func = FFA_SUCCESS_32};
+}
diff --git a/src/arch/aarch64/plat/ffa/hypervisor.c b/src/arch/aarch64/plat/ffa/hypervisor.c
index fe73343..3c7db40 100644
--- a/src/arch/aarch64/plat/ffa/hypervisor.c
+++ b/src/arch/aarch64/plat/ffa/hypervisor.c
@@ -28,9 +28,6 @@
 
 static bool ffa_tee_enabled;
 
-alignas(FFA_PAGE_SIZE) static uint8_t other_world_send_buffer[HF_MAILBOX_SIZE];
-alignas(FFA_PAGE_SIZE) static uint8_t other_world_recv_buffer[HF_MAILBOX_SIZE];
-
 bool vm_supports_indirect_messages(struct vm *vm)
 {
 	return vm->ffa_version >= MAKE_FFA_VERSION(1, 1) &&
@@ -93,6 +90,18 @@
 {
 	struct vm *other_world_vm = vm_find(HF_OTHER_WORLD_ID);
 	struct ffa_value ret;
+	struct mm_stage1_locked mm_stage1_locked;
+
+	/* This is a segment from TDRAM for the NS memory in the FVP platform.
+	 *
+	 * TODO: We ought to provide a better way to do this, if porting the
+	 * hypervisor to other platforms. One option would be to provide this
+	 * via DTS.
+	 */
+	const uint64_t start = 0x90000000;
+	const uint64_t len = 0x60000000;
+	const paddr_t send_addr = pa_init(start + len - PAGE_SIZE * 1);
+	const paddr_t recv_addr = pa_init(start + len - PAGE_SIZE * 2);
 
 	(void)ppool;
 
@@ -115,9 +124,19 @@
 		panic("Hypervisor and SPMC versions are not compatible.\n");
 	}
 
-	/* Setup TEE VM RX/TX buffers */
-	other_world_vm->mailbox.send = &other_world_send_buffer;
-	other_world_vm->mailbox.recv = &other_world_recv_buffer;
+	/*
+	 * Setup TEE VM RX/TX buffers.
+	 * Using the following hard-coded addresses, as they must be within the
+	 * NS memory node in the SPMC manifest. From that region we should
+	 * exclude the Hypervisor's address space to prevent SPs from using that
+	 * memory in memory region nodes, or for the NWd to misuse that memory
+	 * in runtime via memory sharing interfaces.
+	 */
+
+	// NOLINTNEXTLINE(performance-no-int-to-ptr)
+	other_world_vm->mailbox.send = (void *)pa_addr(send_addr);
+	// NOLINTNEXTLINE(performance-no-int-to-ptr)
+	other_world_vm->mailbox.recv = (void *)pa_addr(recv_addr);
 
 	/*
 	 * Note that send and recv are swapped around, as the send buffer from
@@ -132,6 +151,22 @@
 
 	ffa_tee_enabled = true;
 
+	/*
+	 * Hypervisor will write to secure world receive buffer, and will read
+	 * from the secure world send buffer.
+	 *
+	 * Mapping operation is necessary because the ranges are outside of the
+	 * hypervisor's binary.
+	 */
+	mm_stage1_locked = mm_lock_stage1();
+	CHECK(mm_identity_map(mm_stage1_locked, send_addr,
+			      pa_add(send_addr, PAGE_SIZE),
+			      MM_MODE_R | MM_MODE_SHARED, ppool) != NULL);
+	CHECK(mm_identity_map(
+		      mm_stage1_locked, recv_addr, pa_add(recv_addr, PAGE_SIZE),
+		      MM_MODE_R | MM_MODE_W | MM_MODE_SHARED, ppool) != NULL);
+	mm_unlock_stage1(&mm_stage1_locked);
+
 	dlog_verbose("TEE finished setting up buffers.\n");
 }