refactor: manifest data allocation from page pool
As follow up to [1] allocate the (large) struct manifest object used
during manifest parsing from the page pool and releases it when manifest
parsing completes.
Clear manifest data memory after allocating and before de-allocating, to
prevent undesired data leakage.
[1] https://review.trustedfirmware.org/c/hafnium/hafnium/+/15741/10/src/init.c#45
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
Change-Id: Iff9d2b9f0fd7ac234d18621ee78c56f4cb36f500
diff --git a/inc/hf/manifest.h b/inc/hf/manifest.h
index 60c6f76..d2f9428 100644
--- a/inc/hf/manifest.h
+++ b/inc/hf/manifest.h
@@ -234,7 +234,7 @@
};
enum manifest_return_code manifest_init(struct mm_stage1_locked stage1_locked,
- struct manifest *manifest,
+ struct manifest **manifest,
struct memiter *manifest_fdt,
struct mpool *ppool);
void manifest_deinit(struct mpool *ppool);
diff --git a/src/init.c b/src/init.c
index 83de0b4..a97fca1 100644
--- a/src/init.c
+++ b/src/init.c
@@ -38,12 +38,6 @@
static struct mpool ppool;
-/*
- * Hafnium manifest declaring all VMs. Notice this structure is large hence kept
- * in BSS rather than directly allocated to the stack in one_time_init.
- */
-static struct manifest manifest;
-
/**
* Performs one-time initialisation of memory management for the hypervisor.
*
@@ -81,6 +75,7 @@
void *initrd;
size_t i;
struct mm_stage1_locked mm_stage1_locked;
+ struct manifest *manifest;
arch_one_time_init();
@@ -143,7 +138,7 @@
manifest_strerror(manifest_ret));
}
- plat_ffa_set_tee_enabled(manifest.ffa_tee_enabled);
+ plat_ffa_set_tee_enabled(manifest->ffa_tee_enabled);
if (!plat_iommu_init(&fdt, mm_stage1_locked, &ppool)) {
panic("Could not initialize IOMMUs.");
@@ -162,19 +157,19 @@
/* Load all VMs. */
update.reserved_ranges_count = 0;
- if (!load_vms(mm_stage1_locked, &manifest, &cpio, ¶ms, &update,
+ if (!load_vms(mm_stage1_locked, manifest, &cpio, ¶ms, &update,
&ppool)) {
panic("Unable to load VMs.");
}
- /* Now manifest parsing has completed free the resourses used. */
- manifest_deinit(&ppool);
-
- if (!boot_flow_update(mm_stage1_locked, &manifest, &update, &cpio,
+ if (!boot_flow_update(mm_stage1_locked, manifest, &update, &cpio,
&ppool)) {
panic("Unable to update boot flow.");
}
+ /* Now manifest parsing has completed free the resourses used. */
+ manifest_deinit(&ppool);
+
mm_unlock_stage1(&mm_stage1_locked);
/* Enable TLB invalidation for VM page table updates. */
diff --git a/src/manifest.c b/src/manifest.c
index 90aefd7..8570148 100644
--- a/src/manifest.c
+++ b/src/manifest.c
@@ -39,53 +39,57 @@
"TrustZone VM ID clashes with normal VM range.");
/**
- * A struct to keep track of fields that are allocated by partitions
- * in the manifest.
+ * A struct to keep track of a VM's properties during early boot
+ * manifest parsing.
*/
-struct allocated_fields {
+struct manifest_data {
+ struct manifest manifest;
struct interrupt_bitmap intids;
struct {
uintptr_t base;
uintptr_t limit;
} mem_regions[PARTITION_MAX_MEMORY_REGIONS * MAX_VMS];
};
+
/**
* Calculate the number of entries in the ppool that are required to
- * store the allocated_fields struct.
+ * store the manifest_data struct.
*/
-static size_t allocated_fields_ppool_entries =
- (align_up(sizeof(struct allocated_fields), MM_PPOOL_ENTRY_SIZE) /
+static size_t manifest_data_ppool_entries =
+ (align_up(sizeof(struct manifest_data), MM_PPOOL_ENTRY_SIZE) /
MM_PPOOL_ENTRY_SIZE);
-static struct allocated_fields *allocated_fields;
-/* Index used the track the number of memory regions allocted. */
+static struct manifest_data *manifest_data;
+/* Index used to track the number of memory regions allocated. */
static size_t allocated_mem_regions_index = 0;
/**
- * Allocates memory for the allocated fields struct in the given memory
- * pool.
+ * Allocates and clear memory for the manifest data in the given memory pool.
* Returns true if the memory is successfully allocated.
*/
-static bool manifest_allocated_fields_init(struct mpool *ppool)
+static bool manifest_data_init(struct mpool *ppool)
{
- allocated_fields = (struct allocated_fields *)mpool_alloc_contiguous(
- ppool, allocated_fields_ppool_entries, 1);
+ manifest_data = (struct manifest_data *)mpool_alloc_contiguous(
+ ppool, manifest_data_ppool_entries, 1);
+ memset_s(manifest_data, sizeof(struct manifest_data), 0,
+ sizeof(struct manifest_data));
- return allocated_fields != NULL;
+ return manifest_data != NULL;
}
/**
- * Frees the memory used for the allocated field struct in the given
- * memory pool.
+ * Frees the memory used for the manifest data in the given memory pool.
*/
-static void manifest_allocated_fields_deinit(struct mpool *ppool)
+static void manifest_data_deinit(struct mpool *ppool)
{
/**
- * Return the memory used for the allocated_fields struct to the
- * memory pool
+ * Clear and return the memory used for the manifest_data struct to the
+ * memory pool.
*/
- mpool_add_chunk(ppool, allocated_fields,
- allocated_fields_ppool_entries);
+ memset_s(manifest_data, sizeof(struct manifest_data), 0,
+ sizeof(struct manifest_data));
+ mpool_add_chunk(ppool, manifest_data, manifest_data_ppool_entries);
+
/**
* Reset the index used for tracking the number of memory regions
* allocated.
@@ -416,10 +420,9 @@
uintptr_t limit = base_address + page_count * PAGE_SIZE;
for (size_t i = 0; i < allocated_mem_regions_index; i++) {
- uintptr_t mem_region_base =
- allocated_fields->mem_regions[i].base;
+ uintptr_t mem_region_base = manifest_data->mem_regions[i].base;
uintptr_t mem_region_limit =
- allocated_fields->mem_regions[i].limit;
+ manifest_data->mem_regions[i].limit;
if ((base_address >= mem_region_base &&
base_address < mem_region_limit) ||
@@ -434,10 +437,9 @@
}
}
- allocated_fields->mem_regions[allocated_mem_regions_index].base =
+ manifest_data->mem_regions[allocated_mem_regions_index].base =
base_address;
- allocated_fields->mem_regions[allocated_mem_regions_index].limit =
- limit;
+ manifest_data->mem_regions[allocated_mem_regions_index].limit = limit;
allocated_mem_regions_index++;
return true;
@@ -543,7 +545,7 @@
struct uint32list_iter list;
uint16_t i = 0;
uint32_t j = 0;
- struct interrupt_bitmap allocated_intids = allocated_fields->intids;
+ struct interrupt_bitmap allocated_intids = manifest_data->intids;
dlog_verbose(" Partition Device Regions\n");
@@ -1037,24 +1039,25 @@
* Parse manifest from FDT.
*/
enum manifest_return_code manifest_init(struct mm_stage1_locked stage1_locked,
- struct manifest *manifest,
+ struct manifest **manifest_ret,
struct memiter *manifest_fdt,
struct mpool *ppool)
{
+ struct manifest *manifest;
struct string vm_name;
struct fdt fdt;
struct fdt_node hyp_node;
size_t i = 0;
bool found_primary_vm = false;
- memset_s(manifest, sizeof(*manifest), 0, sizeof(*manifest));
-
- /* Allocate space in the ppool for tracking the allocated fields. */
- if (!manifest_allocated_fields_init(ppool)) {
- panic("Unable to allocated space for allocated fields "
- "struct.\n");
+ /* Allocate space in the ppool for the manifest data. */
+ if (!manifest_data_init(ppool)) {
+ panic("Unable to allocate manifest data.\n");
}
+ manifest = &manifest_data->manifest;
+ *manifest_ret = manifest;
+
if (!fdt_init_from_memiter(&fdt, manifest_fdt)) {
return MANIFEST_ERROR_FILE_SIZE; /* TODO */
}
@@ -1126,10 +1129,13 @@
return MANIFEST_SUCCESS;
}
-/* Free resources used when parsing the manifest. */
+/**
+ * Free manifest data resources, called once manifest parsing has
+ * completed and VMs are loaded.
+ */
void manifest_deinit(struct mpool *ppool)
{
- manifest_allocated_fields_deinit(ppool);
+ manifest_data_deinit(ppool);
}
const char *manifest_strerror(enum manifest_return_code ret_code)
diff --git a/src/manifest_test.cc b/src/manifest_test.cc
index 0a71f8b..77e69ff 100644
--- a/src/manifest_test.cc
+++ b/src/manifest_test.cc
@@ -321,11 +321,21 @@
mpool_add_chunk(&ppool, test_heap.get(), TEST_HEAP_SIZE);
}
+ void TearDown() override
+ {
+ manifest_dealloc();
+ }
+
std::unique_ptr<uint8_t[]> test_heap;
protected:
struct mpool ppool;
+ void manifest_dealloc(void)
+ {
+ manifest_deinit(&ppool);
+ }
+
public:
/**
* Class for programatically building a Partition package.
@@ -354,26 +364,21 @@
};
enum manifest_return_code manifest_from_vec(
- struct_manifest *m, const std::vector<char> &vec)
+ struct_manifest **m, const std::vector<char> &vec)
{
struct memiter it;
struct mm_stage1_locked mm_stage1_locked;
- enum manifest_return_code ret;
memiter_init(&it, vec.data(), vec.size());
- ret = manifest_init(mm_stage1_locked, m, &it, &ppool);
- manifest_deinit(&ppool);
- return ret;
+ return manifest_init(mm_stage1_locked, m, &it, &ppool);
}
enum manifest_return_code ffa_manifest_from_vec(
- struct_manifest *m, const std::vector<char> &vec)
+ struct_manifest **m, const std::vector<char> &vec)
{
struct memiter it;
struct mm_stage1_locked mm_stage1_locked;
- enum manifest_return_code ret;
-
Partition_package spkg(vec);
/* clang-format off */
@@ -389,16 +394,14 @@
.Build();
/* clang-format on */
memiter_init(&it, core_dtb.data(), core_dtb.size());
- ret = manifest_init(mm_stage1_locked, m, &it, &ppool);
- manifest_deinit(&ppool);
- return ret;
+ return manifest_init(mm_stage1_locked, m, &it, &ppool);
}
};
TEST_F(manifest, no_hypervisor_node)
{
- struct_manifest m;
+ struct_manifest *m;
std::vector<char> dtb = ManifestDtBuilder().Build();
ASSERT_EQ(manifest_from_vec(&m, dtb),
@@ -407,7 +410,7 @@
TEST_F(manifest, no_compatible_property)
{
- struct_manifest m;
+ struct_manifest *m;
/* clang-format off */
std::vector<char> dtb = ManifestDtBuilder()
@@ -421,7 +424,7 @@
TEST_F(manifest, not_compatible)
{
- struct_manifest m;
+ struct_manifest *m;
/* clang-format off */
std::vector<char> dtb = ManifestDtBuilder()
@@ -436,7 +439,7 @@
TEST_F(manifest, compatible_one_of_many)
{
- struct_manifest m;
+ struct_manifest *m;
/* clang-format off */
std::vector<char> dtb = ManifestDtBuilder()
@@ -454,7 +457,7 @@
TEST_F(manifest, no_vm_nodes)
{
- struct_manifest m;
+ struct_manifest *m;
/* clang-format off */
std::vector<char> dtb = ManifestDtBuilder()
@@ -488,18 +491,20 @@
TEST_F(manifest, long_string)
{
- struct_manifest m;
+ struct_manifest *m;
std::vector<char> dtb_last_valid = gen_long_string_dtb(true);
std::vector<char> dtb_first_invalid = gen_long_string_dtb(false);
ASSERT_EQ(manifest_from_vec(&m, dtb_last_valid), MANIFEST_SUCCESS);
+ manifest_dealloc();
+
ASSERT_EQ(manifest_from_vec(&m, dtb_first_invalid),
MANIFEST_ERROR_STRING_TOO_LONG);
}
TEST_F(manifest, reserved_vm_id)
{
- struct_manifest m;
+ struct_manifest *m;
/* clang-format off */
std::vector<char> dtb = ManifestDtBuilder()
@@ -543,14 +548,15 @@
TEST_F(manifest, vcpu_count_limit)
{
- struct_manifest m;
+ struct_manifest *m;
std::vector<char> dtb_last_valid = gen_vcpu_count_limit_dtb(UINT16_MAX);
std::vector<char> dtb_first_invalid =
gen_vcpu_count_limit_dtb(UINT16_MAX + 1);
ASSERT_EQ(manifest_from_vec(&m, dtb_last_valid), MANIFEST_SUCCESS);
- ASSERT_EQ(m.vm_count, 2);
- ASSERT_EQ(m.vm[1].secondary.vcpu_count, UINT16_MAX);
+ ASSERT_EQ(m->vm_count, 2);
+ ASSERT_EQ(m->vm[1].secondary.vcpu_count, UINT16_MAX);
+ manifest_dealloc();
ASSERT_EQ(manifest_from_vec(&m, dtb_first_invalid),
MANIFEST_ERROR_INTEGER_OVERFLOW);
@@ -558,7 +564,7 @@
TEST_F(manifest, no_ramdisk_primary)
{
- struct_manifest m;
+ struct_manifest *m;
/* clang-format off */
std::vector<char> dtb = ManifestDtBuilder()
@@ -572,14 +578,14 @@
/* clang-format on */
ASSERT_EQ(manifest_from_vec(&m, dtb), MANIFEST_SUCCESS);
- ASSERT_EQ(m.vm_count, 1);
- ASSERT_STREQ(string_data(&m.vm[0].debug_name), "primary_vm");
- ASSERT_STREQ(string_data(&m.vm[0].primary.ramdisk_filename), "");
+ ASSERT_EQ(m->vm_count, 1);
+ ASSERT_STREQ(string_data(&m->vm[0].debug_name), "primary_vm");
+ ASSERT_STREQ(string_data(&m->vm[0].primary.ramdisk_filename), "");
}
TEST_F(manifest, no_boot_address_primary)
{
- struct_manifest m;
+ struct_manifest *m;
/* clang-format off */
std::vector<char> dtb = ManifestDtBuilder()
@@ -593,14 +599,14 @@
/* clang-format on */
ASSERT_EQ(manifest_from_vec(&m, dtb), MANIFEST_SUCCESS);
- ASSERT_EQ(m.vm_count, 1);
- ASSERT_STREQ(string_data(&m.vm[0].debug_name), "primary_vm");
- ASSERT_EQ(m.vm[0].primary.boot_address, MANIFEST_INVALID_ADDRESS);
+ ASSERT_EQ(m->vm_count, 1);
+ ASSERT_STREQ(string_data(&m->vm[0].debug_name), "primary_vm");
+ ASSERT_EQ(m->vm[0].primary.boot_address, MANIFEST_INVALID_ADDRESS);
}
TEST_F(manifest, boot_address_primary)
{
- struct_manifest m;
+ struct_manifest *m;
const uint64_t addr = UINT64_C(0x12345678ABCDEFEF);
/* clang-format off */
@@ -616,9 +622,9 @@
/* clang-format on */
ASSERT_EQ(manifest_from_vec(&m, dtb), MANIFEST_SUCCESS);
- ASSERT_EQ(m.vm_count, 1);
- ASSERT_STREQ(string_data(&m.vm[0].debug_name), "primary_vm");
- ASSERT_EQ(m.vm[0].primary.boot_address, addr);
+ ASSERT_EQ(m->vm_count, 1);
+ ASSERT_STREQ(string_data(&m->vm[0].debug_name), "primary_vm");
+ ASSERT_EQ(m->vm[0].primary.boot_address, addr);
}
static std::vector<char> gen_malformed_boolean_dtb(
@@ -639,7 +645,7 @@
TEST_F(manifest, malformed_booleans)
{
- struct_manifest m;
+ struct_manifest *m;
std::vector<char> dtb_false = gen_malformed_boolean_dtb("\"false\"");
std::vector<char> dtb_true = gen_malformed_boolean_dtb("\"true\"");
@@ -648,17 +654,23 @@
ASSERT_EQ(manifest_from_vec(&m, dtb_false),
MANIFEST_ERROR_MALFORMED_BOOLEAN);
+ manifest_dealloc();
+
ASSERT_EQ(manifest_from_vec(&m, dtb_true),
MANIFEST_ERROR_MALFORMED_BOOLEAN);
+ manifest_dealloc();
+
ASSERT_EQ(manifest_from_vec(&m, dtb_0),
MANIFEST_ERROR_MALFORMED_BOOLEAN);
+ manifest_dealloc();
+
ASSERT_EQ(manifest_from_vec(&m, dtb_1),
MANIFEST_ERROR_MALFORMED_BOOLEAN);
}
TEST_F(manifest, valid)
{
- struct_manifest m;
+ struct_manifest *m;
struct manifest_vm *vm;
/* clang-format off */
@@ -689,9 +701,9 @@
/* clang-format on */
ASSERT_EQ(manifest_from_vec(&m, dtb), MANIFEST_SUCCESS);
- ASSERT_EQ(m.vm_count, 3);
+ ASSERT_EQ(m->vm_count, 3);
- vm = &m.vm[0];
+ vm = &m->vm[0];
ASSERT_STREQ(string_data(&vm->debug_name), "primary_vm");
ASSERT_STREQ(string_data(&vm->kernel_filename), "primary_kernel");
ASSERT_STREQ(string_data(&vm->primary.ramdisk_filename),
@@ -701,7 +713,7 @@
ElementsAre(0x32000000, 0x33001111));
ASSERT_FALSE(vm->smc_whitelist.permissive);
- vm = &m.vm[1];
+ vm = &m->vm[1];
ASSERT_STREQ(string_data(&vm->debug_name), "first_secondary_vm");
ASSERT_STREQ(string_data(&vm->kernel_filename), "");
ASSERT_EQ(vm->secondary.vcpu_count, 42);
@@ -711,7 +723,7 @@
ElementsAre(0x04000000, 0x30002222, 0x31445566));
ASSERT_TRUE(vm->smc_whitelist.permissive);
- vm = &m.vm[2];
+ vm = &m->vm[2];
ASSERT_STREQ(string_data(&vm->debug_name), "second_secondary_vm");
ASSERT_STREQ(string_data(&vm->kernel_filename),
"second_secondary_kernel");
@@ -725,7 +737,7 @@
TEST_F(manifest, ffa_not_compatible)
{
- struct_manifest m;
+ struct_manifest *m;
/* clang-format off */
std::vector<char> dtb = ManifestDtBuilder()
@@ -748,7 +760,7 @@
TEST_F(manifest, ffa_missing_property)
{
- struct_manifest m;
+ struct_manifest *m;
/* clang-format off */
std::vector<char> dtb = ManifestDtBuilder()
@@ -767,7 +779,7 @@
* TODO: write test excluding all optional fields of the manifest, in
* accordance with specification.
*/
- struct_manifest m;
+ struct_manifest *m;
/* Incompatible version */
/* clang-format off */
@@ -787,6 +799,7 @@
/* clang-format on */
ASSERT_EQ(ffa_manifest_from_vec(&m, dtb),
MANIFEST_ERROR_NOT_COMPATIBLE);
+ manifest_dealloc();
/* Incompatible translation granule */
/* clang-format off */
@@ -806,6 +819,7 @@
/* clang-format on */
ASSERT_EQ(ffa_manifest_from_vec(&m, dtb),
MANIFEST_ERROR_NOT_COMPATIBLE);
+ manifest_dealloc();
/* Incompatible exeption level */
/* clang-format off */
@@ -825,6 +839,7 @@
/* clang-format on */
ASSERT_EQ(ffa_manifest_from_vec(&m, dtb),
MANIFEST_ERROR_NOT_COMPATIBLE);
+ manifest_dealloc();
/* Incompatible execution state */
/* clang-format off */
@@ -844,6 +859,7 @@
/* clang-format on */
ASSERT_EQ(ffa_manifest_from_vec(&m, dtb),
MANIFEST_ERROR_NOT_COMPATIBLE);
+ manifest_dealloc();
/* Incompatible messaging method */
/* clang-format off */
@@ -863,6 +879,7 @@
/* clang-format on */
ASSERT_EQ(ffa_manifest_from_vec(&m, dtb),
MANIFEST_ERROR_NOT_COMPATIBLE);
+ manifest_dealloc();
/* Incompatible NS interrupt action */
/* clang-format off */
@@ -885,7 +902,7 @@
TEST_F(manifest, ffa_validate_rxtx_info)
{
- struct_manifest m;
+ struct_manifest *m;
/* Not Compatible */
/* clang-format off */
@@ -898,6 +915,7 @@
/* clang-format on */
ASSERT_EQ(ffa_manifest_from_vec(&m, dtb),
MANIFEST_ERROR_NOT_COMPATIBLE);
+ manifest_dealloc();
/* Missing Properties */
/* clang-format off */
@@ -914,7 +932,7 @@
TEST_F(manifest, ffa_validate_mem_regions)
{
- struct_manifest m;
+ struct_manifest *m;
/* Not Compatible */
/* clang-format off */
@@ -927,6 +945,7 @@
/* clang-format on */
ASSERT_EQ(ffa_manifest_from_vec(&m, dtb),
MANIFEST_ERROR_NOT_COMPATIBLE);
+ manifest_dealloc();
/* Memory regions unavailable */
/* clang-format off */
@@ -939,6 +958,7 @@
/* clang-format on */
ASSERT_EQ(ffa_manifest_from_vec(&m, dtb),
MANIFEST_ERROR_MEMORY_REGION_NODE_EMPTY);
+ manifest_dealloc();
/* Missing Properties */
/* clang-format off */
@@ -954,6 +974,7 @@
/* clang-format on */
ASSERT_EQ(ffa_manifest_from_vec(&m, dtb),
MANIFEST_ERROR_PROPERTY_NOT_FOUND);
+ manifest_dealloc();
/* Overlapping memory regions */
/* clang-format off */
@@ -980,6 +1001,7 @@
/* clang-format on */
ASSERT_EQ(ffa_manifest_from_vec(&m, dtb),
MANIFEST_ERROR_MEM_REGION_OVERLAP);
+ manifest_dealloc();
/* clang-format off */
dtb = ManifestDtBuilder()
@@ -1005,6 +1027,7 @@
/* clang-format on */
ASSERT_EQ(ffa_manifest_from_vec(&m, dtb),
MANIFEST_ERROR_MEM_REGION_OVERLAP);
+ manifest_dealloc();
/* clang-format off */
dtb = ManifestDtBuilder()
@@ -1030,6 +1053,7 @@
/* clang-format on */
ASSERT_EQ(ffa_manifest_from_vec(&m, dtb),
MANIFEST_ERROR_MEM_REGION_OVERLAP);
+ manifest_dealloc();
/* Different RXTX buffer sizes */
/* clang-format off */
@@ -1065,7 +1089,7 @@
TEST_F(manifest, ffa_validate_dev_regions)
{
- struct_manifest m;
+ struct_manifest *m;
/* Not Compatible */
/* clang-format off */
@@ -1078,6 +1102,7 @@
/* clang-format on */
ASSERT_EQ(ffa_manifest_from_vec(&m, dtb),
MANIFEST_ERROR_NOT_COMPATIBLE);
+ manifest_dealloc();
/* Memory regions unavailable */
/* clang-format off */
@@ -1090,6 +1115,7 @@
/* clang-format on */
ASSERT_EQ(ffa_manifest_from_vec(&m, dtb),
MANIFEST_ERROR_DEVICE_REGION_NODE_EMPTY);
+ manifest_dealloc();
/* Missing Properties */
/* clang-format off */
@@ -1105,6 +1131,7 @@
/* clang-format on */
ASSERT_EQ(ffa_manifest_from_vec(&m, dtb),
MANIFEST_ERROR_PROPERTY_NOT_FOUND);
+ manifest_dealloc();
/* Malformed interrupt list pair */
/* clang-format off */
@@ -1126,6 +1153,7 @@
/* clang-format on */
ASSERT_EQ(ffa_manifest_from_vec(&m, dtb),
MANIFEST_ERROR_MALFORMED_INTEGER_LIST);
+ manifest_dealloc();
/* Non-unique interrupt IDs */
/* clang-format off */
@@ -1153,15 +1181,17 @@
ASSERT_EQ(ffa_manifest_from_vec(&m, dtb),
MANIFEST_ERROR_INTERRUPT_ID_REPEATED);
/* Check valid interrupts were still mapped */
- ASSERT_EQ(m.vm[0].partition.dev_regions[0].interrupts[0].id, 2);
- ASSERT_EQ(m.vm[0].partition.dev_regions[0].interrupts[0].attributes, 3);
- ASSERT_EQ(m.vm[0].partition.dev_regions[1].interrupts[0].id, 1);
- ASSERT_EQ(m.vm[0].partition.dev_regions[1].interrupts[0].attributes, 3);
+ ASSERT_EQ(m->vm[0].partition.dev_regions[0].interrupts[0].id, 2);
+ ASSERT_EQ(m->vm[0].partition.dev_regions[0].interrupts[0].attributes,
+ 3);
+ ASSERT_EQ(m->vm[0].partition.dev_regions[1].interrupts[0].id, 1);
+ ASSERT_EQ(m->vm[0].partition.dev_regions[1].interrupts[0].attributes,
+ 3);
}
TEST_F(manifest, ffa_invalid_memory_region_attributes)
{
- struct_manifest m;
+ struct_manifest *m;
/* clang-format off */
std::vector<char> dtb = ManifestDtBuilder()
@@ -1203,7 +1233,7 @@
TEST_F(manifest, ffa_invalid_device_region_attributes)
{
- struct_manifest m;
+ struct_manifest *m;
/* clang-format off */
std::vector<char> dtb = ManifestDtBuilder()
@@ -1257,7 +1287,8 @@
TEST_F(manifest, ffa_valid)
{
- struct_manifest m;
+ struct manifest_vm *vm;
+ struct_manifest *m;
/* clang-format off */
std::vector<char> dtb = ManifestDtBuilder()
@@ -1319,42 +1350,43 @@
ASSERT_EQ(ffa_manifest_from_vec(&m, dtb), MANIFEST_SUCCESS);
- ASSERT_EQ(m.vm[0].partition.ffa_version, 0x10000);
+ vm = &m->vm[0];
+ ASSERT_EQ(vm->partition.ffa_version, 0x10000);
ASSERT_THAT(
- std::span(m.vm[0].partition.uuid.uuid, 4),
+ std::span(vm->partition.uuid.uuid, 4),
ElementsAre(0xb4b5671e, 0x4a904fe1, 0xb81ffb13, 0xdae1dacb));
- ASSERT_EQ(m.vm[0].partition.execution_ctx_count, 1);
- ASSERT_EQ(m.vm[0].partition.run_time_el, S_EL1);
- ASSERT_EQ(m.vm[0].partition.execution_state, AARCH64);
- ASSERT_EQ(m.vm[0].partition.ep_offset, 0x00002000);
- ASSERT_EQ(m.vm[0].partition.xlat_granule, PAGE_4KB);
- ASSERT_EQ(m.vm[0].partition.boot_order, 0);
- ASSERT_EQ(m.vm[0].partition.messaging_method,
- FFA_PARTITION_INDIRECT_MSG);
- ASSERT_EQ(m.vm[0].partition.ns_interrupts_action, NS_ACTION_ME);
- ASSERT_EQ(m.vm[0].partition.mem_regions[0].base_address, 0x7100000);
- ASSERT_EQ(m.vm[0].partition.mem_regions[0].page_count, 4);
- ASSERT_EQ(m.vm[0].partition.mem_regions[0].attributes, 3);
- ASSERT_EQ(m.vm[0].partition.mem_regions[1].attributes, (8 | 3));
- ASSERT_EQ(m.vm[0].partition.rxtx.available, true);
- ASSERT_EQ(m.vm[0].partition.rxtx.rx_buffer->base_address, 0x7300000);
- ASSERT_EQ(m.vm[0].partition.rxtx.rx_buffer->page_count, 1);
- ASSERT_EQ(m.vm[0].partition.rxtx.rx_buffer->attributes, 1);
- ASSERT_EQ(m.vm[0].partition.rxtx.tx_buffer->base_address, 0x7301000);
- ASSERT_EQ(m.vm[0].partition.rxtx.tx_buffer->page_count, 1);
- ASSERT_EQ(m.vm[0].partition.rxtx.tx_buffer->attributes, 3);
- ASSERT_EQ(m.vm[0].partition.dev_regions[0].base_address, 0x7400000);
- ASSERT_EQ(m.vm[0].partition.dev_regions[0].page_count, 16);
+ ASSERT_EQ(vm->partition.execution_ctx_count, 1);
+ ASSERT_EQ(vm->partition.run_time_el, S_EL1);
+ ASSERT_EQ(vm->partition.execution_state, AARCH64);
+ ASSERT_EQ(vm->partition.ep_offset, 0x00002000);
+ ASSERT_EQ(vm->partition.xlat_granule, PAGE_4KB);
+ ASSERT_EQ(vm->partition.boot_order, 0);
+ ASSERT_EQ(vm->partition.messaging_method, FFA_PARTITION_INDIRECT_MSG);
+ ASSERT_EQ(vm->partition.ns_interrupts_action, NS_ACTION_ME);
+ ASSERT_EQ(vm->partition.mem_regions[0].base_address, 0x7100000);
+ ASSERT_EQ(vm->partition.mem_regions[0].page_count, 4);
+ ASSERT_EQ(vm->partition.mem_regions[0].attributes, 3);
+ ASSERT_EQ(vm->partition.mem_regions[1].attributes, (8 | 3));
- ASSERT_EQ(m.vm[0].partition.dev_regions[0].attributes, 3);
- ASSERT_EQ(m.vm[0].partition.dev_regions[0].smmu_id, 1);
- ASSERT_EQ(m.vm[0].partition.dev_regions[0].stream_ids[0], 0);
- ASSERT_EQ(m.vm[0].partition.dev_regions[0].stream_ids[1], 1);
- ASSERT_EQ(m.vm[0].partition.dev_regions[0].interrupts[0].id, 2);
- ASSERT_EQ(m.vm[0].partition.dev_regions[0].interrupts[0].attributes, 3);
- ASSERT_EQ(m.vm[0].partition.dev_regions[0].interrupts[1].id, 4);
- ASSERT_EQ(m.vm[0].partition.dev_regions[0].interrupts[1].attributes, 5);
- ASSERT_EQ(m.vm[0].partition.dev_regions[1].attributes, (8 | 1));
+ ASSERT_EQ(vm->partition.rxtx.available, true);
+ ASSERT_EQ(vm->partition.rxtx.rx_buffer->base_address, 0x7300000);
+ ASSERT_EQ(vm->partition.rxtx.rx_buffer->page_count, 1);
+ ASSERT_EQ(vm->partition.rxtx.rx_buffer->attributes, 1);
+ ASSERT_EQ(vm->partition.rxtx.tx_buffer->base_address, 0x7301000);
+ ASSERT_EQ(vm->partition.rxtx.tx_buffer->page_count, 1);
+ ASSERT_EQ(vm->partition.rxtx.tx_buffer->attributes, 3);
+
+ ASSERT_EQ(vm->partition.dev_regions[0].base_address, 0x7400000);
+ ASSERT_EQ(vm->partition.dev_regions[0].page_count, 16);
+ ASSERT_EQ(vm->partition.dev_regions[0].attributes, 3);
+ ASSERT_EQ(vm->partition.dev_regions[0].smmu_id, 1);
+ ASSERT_EQ(vm->partition.dev_regions[0].stream_ids[0], 0);
+ ASSERT_EQ(vm->partition.dev_regions[0].stream_ids[1], 1);
+ ASSERT_EQ(vm->partition.dev_regions[0].interrupts[0].id, 2);
+ ASSERT_EQ(vm->partition.dev_regions[0].interrupts[0].attributes, 3);
+ ASSERT_EQ(vm->partition.dev_regions[0].interrupts[1].id, 4);
+ ASSERT_EQ(vm->partition.dev_regions[0].interrupts[1].attributes, 5);
+ ASSERT_EQ(vm->partition.dev_regions[1].attributes, (8 | 1));
}
} /* namespace */