feat(mm): add check on address width before mapping a page

NWd and SWd can be configured with different PA ranges.
Currently Hafnium have hardcoded a 39-bit address space.

It is possible that the NWd wants to share a buffer with the SWd in an
address range beyond SWd PA limit.

In theory this should be safely handled by the architecture, that
expects:
- an address size fault if the OA of a translation is wider
  than the configured TCR_ELx.{I}PS;
- a translation fault if the IA is wider of the TCR_ELx.TnSZ

Nevertheless the debug of such failure would be tricky, hence adding a
check on the address width of each page before it's mapped should
prevent it.

Change-Id: I53d99eb099c91ff349c0e08aa1e3601b78154ffe
Signed-off-by: Federico Recanati <federico.recanati@arm.com>
diff --git a/src/arch/aarch64/hftest/mm.c b/src/arch/aarch64/hftest/mm.c
index 7406126..121a32c 100644
--- a/src/arch/aarch64/hftest/mm.c
+++ b/src/arch/aarch64/hftest/mm.c
@@ -32,9 +32,8 @@
  */
 bool arch_vm_mm_init(void)
 {
-	static const int pa_bits_table[16] = {32, 36, 40, 42, 44, 48, 52};
 	uint64_t features = read_msr(id_aa64mmfr0_el1);
-	int pa_bits = pa_bits_table[features & 0xf];
+	uint32_t pa_bits = arch_mm_get_pa_range();
 
 	/* Check that 4KB granules are supported. */
 	if (((features >> 28) & 0xf) == 0xf) {
diff --git a/src/arch/aarch64/mm.c b/src/arch/aarch64/mm.c
index 73700a5..95c21e0 100644
--- a/src/arch/aarch64/mm.c
+++ b/src/arch/aarch64/mm.c
@@ -741,13 +741,12 @@
  */
 bool arch_mm_init(paddr_t table)
 {
-	static const int pa_bits_table[16] = {32, 36, 40, 42, 44, 48, 52};
 	uint64_t features = read_msr(id_aa64mmfr0_el1);
 	uint64_t pe_features = read_msr(id_aa64pfr0_el1);
 	unsigned int nsa_nsw;
-	int pa_bits = pa_bits_table[features & 0xf];
-	int extend_bits;
-	int sl0;
+	uint32_t pa_bits = arch_mm_get_pa_range();
+	uint32_t extend_bits;
+	uint32_t sl0;
 
 	/* Check that 4KB granules are supported. */
 	if (((features >> 28) & 0xf) == 0xf) {
@@ -918,3 +917,13 @@
 {
 	return (id == HF_HYPERVISOR_VM_ID) ? MM_MODE_NS : 0;
 }
+
+/**
+ * Returns the maximum supported PA Range in bits.
+ */
+uint32_t arch_mm_get_pa_range(void)
+{
+	static const uint32_t pa_bits_table[16] = {32, 36, 40, 42, 44, 48, 52};
+	uint64_t features = read_msr(id_aa64mmfr0_el1);
+	return pa_bits_table[features & 0xf];
+}
diff --git a/src/arch/fake/mm.c b/src/arch/fake/mm.c
index e06a8b7..c1f6e98 100644
--- a/src/arch/fake/mm.c
+++ b/src/arch/fake/mm.c
@@ -178,3 +178,11 @@
 void arch_mm_sync_table_writes(void)
 {
 }
+
+/**
+ * Returns the maximum supported PA Range in bits.
+ */
+uint32_t arch_mm_get_pa_range(void)
+{
+	return 40;
+}
diff --git a/src/ffa_memory.c b/src/ffa_memory.c
index d68636c..c7ae984 100644
--- a/src/ffa_memory.c
+++ b/src/ffa_memory.c
@@ -8,6 +8,7 @@
 
 #include "hf/ffa_memory.h"
 
+#include "hf/arch/mm.h"
 #include "hf/arch/other_world.h"
 #include "hf/arch/plat/ffa.h"
 
@@ -751,6 +752,17 @@
 			paddr_t pa_begin =
 				pa_from_ipa(ipa_init(fragments[i][j].address));
 			paddr_t pa_end = pa_add(pa_begin, size);
+			uint32_t pa_range = arch_mm_get_pa_range();
+
+			/*
+			 * Ensure the requested region falls into system's PA
+			 * range.
+			 */
+			if (((pa_addr(pa_begin) >> pa_range) > 0) ||
+			    ((pa_addr(pa_end) >> pa_range) > 0)) {
+				dlog_error("Region is outside of PA Range\n");
+				return false;
+			}
 
 			if (commit) {
 				vm_identity_commit(vm_locked, pa_begin, pa_end,