feat: initialize page tables for enforcing dma isolation

SPMC maintains two sets of dedicated page tables for each partition
to enforce dma isolation.

Depending on the number of DMA devices assigned to a specific
partition, the corresponding set of page tables are initialized.

Signed-off-by: Madhukar Pappireddy <madhukar.pappireddy@arm.com>
Change-Id: I1f97876cc80de29cb938e62fbe695c29a8af104b
diff --git a/inc/hf/arch/vm.h b/inc/hf/arch/vm.h
index 7bc13a2..64e0783 100644
--- a/inc/hf/arch/vm.h
+++ b/inc/hf/arch/vm.h
@@ -15,6 +15,7 @@
  */
 void arch_vm_features_set(struct vm *vm);
 bool arch_vm_init_mm(struct vm *vm, struct mpool *ppool);
+bool arch_vm_iommu_init_mm(struct vm *vm, struct mpool *ppool);
 bool arch_vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin,
 			      paddr_t end, uint32_t mode, struct mpool *ppool);
 void arch_vm_identity_commit(struct vm_locked vm_locked, paddr_t begin,
diff --git a/inc/hf/vm.h b/inc/hf/vm.h
index 47ca9d5..be8c011 100644
--- a/inc/hf/vm.h
+++ b/inc/hf/vm.h
@@ -13,6 +13,7 @@
 #include "hf/arch/vm/vm.h"
 
 #include "hf/cpu.h"
+#include "hf/ffa_partition_manifest.h"
 #include "hf/interrupt_desc.h"
 #include "hf/list.h"
 #include "hf/mm.h"
@@ -209,6 +210,14 @@
 	ffa_vcpu_count_t vcpu_count;
 	struct vcpu *vcpus;
 	struct mm_ptable ptable;
+
+	/**
+	 * Set of page tables used for defining the peripheral's secure
+	 * IPA space, in the context of SPMC.
+	 */
+	struct mm_ptable iommu_ptables[PARTITION_MAX_DMA_DEVICES];
+	/** Count of DMA devices assigned to this VM. */
+	uint8_t dma_device_count;
 	struct mailbox mailbox;
 
 	struct {
@@ -294,9 +303,11 @@
 };
 
 struct vm *vm_init(ffa_id_t id, ffa_vcpu_count_t vcpu_count,
-		   struct mpool *ppool, bool el0_partition);
+		   struct mpool *ppool, bool el0_partition,
+		   uint8_t dma_device_count);
 bool vm_init_next(ffa_vcpu_count_t vcpu_count, struct mpool *ppool,
-		  struct vm **new_vm, bool el0_partition);
+		  struct vm **new_vm, bool el0_partition,
+		  uint8_t dma_device_count);
 ffa_vm_count_t vm_get_count(void);
 struct vm *vm_find(ffa_id_t id);
 struct vm_locked vm_find_locked(ffa_id_t id);
diff --git a/src/arch/aarch64/hypervisor/vm.c b/src/arch/aarch64/hypervisor/vm.c
index 879a99b..328c38a 100644
--- a/src/arch/aarch64/hypervisor/vm.c
+++ b/src/arch/aarch64/hypervisor/vm.c
@@ -10,6 +10,7 @@
 
 #include "hf/arch/mmu.h"
 
+#include "hf/dlog.h"
 #include "hf/plat/iommu.h"
 
 #include "hypervisor/feature_id.h"
@@ -64,6 +65,48 @@
 	}
 }
 
+/*
+ * Allow the partition manager to perform necessary steps to enforce access
+ * control, with the help of IOMMU, for DMA accesses on behalf of a given
+ * partition.
+ */
+bool arch_vm_iommu_init_mm(struct vm *vm, struct mpool *ppool)
+{
+	bool ret = true;
+
+	/*
+	 * No support to enforce access control through (stage 1) address
+	 * translation for memory accesses by DMA device on behalf of an
+	 * EL0/S-EL0 partition.
+	 */
+	if (vm->el0_partition) {
+		return true;
+	}
+
+	for (uint8_t k = 0; k < vm->dma_device_count; k++) {
+		/*
+		 * Hafnium maintains an independent set of page tables for each
+		 * DMA device that is upstream of given VM. This is necessary
+		 * to enforce static DMA isolation.
+		 */
+		ret = ret &&
+		      mm_ptable_init(&vm->iommu_ptables[k], vm->id, 0, ppool);
+#if SECURE_WORLD == 1
+		ret = ret && mm_ptable_init(&vm->arch.iommu_ptables_ns[k],
+					    vm->id, 0, ppool);
+#endif
+		if (!ret) {
+			dlog_error(
+				"Failed to allocate entries for DMA page "
+				"tables. Consider increasing heap page "
+				"count.\n");
+			return ret;
+		}
+	}
+
+	return ret;
+}
+
 bool arch_vm_init_mm(struct vm *vm, struct mpool *ppool)
 {
 	bool ret;
@@ -79,7 +122,7 @@
 	ret = ret && mm_vm_init(&vm->arch.ptable_ns, vm->id, ppool);
 #endif
 
-	return ret;
+	return ret && arch_vm_iommu_init_mm(vm, ppool);
 }
 
 bool arch_vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin,
diff --git a/src/arch/aarch64/inc/hf/arch/vm/vm.h b/src/arch/aarch64/inc/hf/arch/vm/vm.h
index faa9ace..21aca9e 100644
--- a/src/arch/aarch64/inc/hf/arch/vm/vm.h
+++ b/src/arch/aarch64/inc/hf/arch/vm/vm.h
@@ -40,5 +40,11 @@
 	 * the SPMC defining the SP non-secure IPA space.
 	 */
 	struct mm_ptable ptable_ns;
+
+	/**
+	 * Set of page tables used for definiting the peripheral's non-secure
+	 * IPA space, in the context of SPMC.
+	 */
+	struct mm_ptable iommu_ptables_ns[PARTITION_MAX_DMA_DEVICES];
 #endif
 };
diff --git a/src/arch/fake/hypervisor/ffa.c b/src/arch/fake/hypervisor/ffa.c
index 5cf2c06..79fd9eb 100644
--- a/src/arch/fake/hypervisor/ffa.c
+++ b/src/arch/fake/hypervisor/ffa.c
@@ -562,6 +562,14 @@
 	return true;
 }
 
+bool arch_vm_iommu_init_mm(struct vm *vm, struct mpool *ppool)
+{
+	(void)vm;
+	(void)ppool;
+
+	return true;
+}
+
 bool arch_vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin,
 			      paddr_t end, uint32_t mode, struct mpool *ppool)
 {
diff --git a/src/load.c b/src/load.c
index 59c3f1c..352bab4 100644
--- a/src/load.c
+++ b/src/load.c
@@ -322,7 +322,8 @@
 		}
 	}
 
-	if (!vm_init_next(MAX_CPUS, ppool, &vm, false)) {
+	if (!vm_init_next(MAX_CPUS, ppool, &vm, false,
+			  manifest_vm->partition.dma_device_count)) {
 		dlog_error("Unable to initialise primary VM.\n");
 		return false;
 	}
@@ -706,7 +707,8 @@
 	CHECK(!is_el0_partition || manifest_vm->secondary.vcpu_count == 1);
 
 	if (!vm_init_next(manifest_vm->secondary.vcpu_count, ppool, &vm,
-			  is_el0_partition)) {
+			  is_el0_partition,
+			  manifest_vm->partition.dma_device_count)) {
 		dlog_error("Unable to initialise VM.\n");
 		return false;
 	}
@@ -913,7 +915,7 @@
 	 * -TrustZone (or the SPMC) when running the Hypervisor
 	 * -the Hypervisor when running TZ/SPMC
 	 */
-	other_world_vm = vm_init(HF_OTHER_WORLD_ID, MAX_CPUS, ppool, false);
+	other_world_vm = vm_init(HF_OTHER_WORLD_ID, MAX_CPUS, ppool, false, 0);
 	CHECK(other_world_vm != NULL);
 
 	for (i = 0; i < MAX_CPUS; i++) {
diff --git a/src/vm.c b/src/vm.c
index d2f6ce6..57353a3 100644
--- a/src/vm.c
+++ b/src/vm.c
@@ -42,11 +42,12 @@
 
 static bool vm_init_mm(struct vm *vm, struct mpool *ppool)
 {
-	return arch_vm_init_mm(vm, ppool);
+	return arch_vm_init_mm(vm, ppool) && arch_vm_iommu_init_mm(vm, ppool);
 }
 
 struct vm *vm_init(ffa_id_t id, ffa_vcpu_count_t vcpu_count,
-		   struct mpool *ppool, bool el0_partition)
+		   struct mpool *ppool, bool el0_partition,
+		   uint8_t dma_device_count)
 {
 	uint32_t i;
 	struct vm *vm;
@@ -81,6 +82,7 @@
 	vm->mailbox.state = MAILBOX_STATE_EMPTY;
 	atomic_init(&vm->aborting, false);
 	vm->el0_partition = el0_partition;
+	vm->dma_device_count = dma_device_count;
 
 	if (!vm_init_mm(vm, ppool)) {
 		return NULL;
@@ -103,7 +105,8 @@
 }
 
 bool vm_init_next(ffa_vcpu_count_t vcpu_count, struct mpool *ppool,
-		  struct vm **new_vm, bool el0_partition)
+		  struct vm **new_vm, bool el0_partition,
+		  uint8_t dma_device_count)
 {
 	if (vm_count >= MAX_VMS) {
 		return false;
@@ -111,7 +114,7 @@
 
 	/* Generate IDs based on an offset, as low IDs e.g., 0, are reserved */
 	*new_vm = vm_init(vm_count + HF_VM_ID_OFFSET, vcpu_count, ppool,
-			  el0_partition);
+			  el0_partition, dma_device_count);
 	if (*new_vm == NULL) {
 		return false;
 	}
diff --git a/src/vm_test.cc b/src/vm_test.cc
index d39e8b2..40b0e60 100644
--- a/src/vm_test.cc
+++ b/src/vm_test.cc
@@ -75,7 +75,7 @@
 	struct vm_locked vm_locked;
 
 	/* TODO: check ptable usage (security state?) */
-	EXPECT_TRUE(vm_init_next(1, &ppool, &vm, false));
+	EXPECT_TRUE(vm_init_next(1, &ppool, &vm, false, 0));
 	vm_locked = vm_lock(vm);
 	ASSERT_TRUE(mm_vm_init(&vm->ptable, vm->id, &ppool));
 	EXPECT_TRUE(vm_unmap_hypervisor(vm_locked, &ppool));
@@ -102,7 +102,7 @@
 	 * Insertion when no call to "vcpu_update_boot" has been made yet.
 	 * The "boot_list" is expected to be empty.
 	 */
-	EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur, false));
+	EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur, false, 0));
 	vm_cur->boot_order = 3;
 	vcpu = vm_get_vcpu(vm_cur, 0);
 	vcpu_update_boot(vcpu);
@@ -111,7 +111,7 @@
 	EXPECT_EQ(vcpu_get_boot_vcpu()->vm->id, vm_cur->id);
 
 	/* Insertion at the head of the boot list */
-	EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur, false));
+	EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur, false, 0));
 	vm_cur->boot_order = 1;
 	vcpu = vm_get_vcpu(vm_cur, 0);
 	vcpu_update_boot(vcpu);
@@ -121,7 +121,7 @@
 
 	/* Insertion of two in the middle of the boot list */
 	for (uint32_t i = 0; i < 2; i++) {
-		EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur, false));
+		EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur, false, 0));
 		vm_cur->boot_order = 2;
 		vcpu = vm_get_vcpu(vm_cur, 0);
 		vcpu_update_boot(vcpu);
@@ -514,7 +514,7 @@
 	uint32_t lists_count = 0;
 	enum notifications_info_get_state current_state = INIT;
 
-	EXPECT_TRUE(vm_init_next(vcpu_count, &ppool, &current_vm, false));
+	EXPECT_TRUE(vm_init_next(vcpu_count, &ppool, &current_vm, false, 0));
 	current_vm_locked = vm_lock(current_vm);
 	notifications = &current_vm->notifications.from_sp;