refactor: dynamically allocate per vcpu notifications
This patch makes changes required to allocate the per vcpu notification
structures dynamically based on vcpu count and remove dependency on the
MAX_CPUS macros. This should further help reduce memory foot print of
hafnium.
Signed-off-by: Raghu Krishnamurthy <raghu.ncstate@gmail.com>
Change-Id: I1f6df87e328e80ebfb9c8a0ac8cb07cef03c9714
diff --git a/src/arch/aarch64/plat/ffa/absent.c b/src/arch/aarch64/plat/ffa/absent.c
index abab7a4..bb8b9e8 100644
--- a/src/arch/aarch64/plat/ffa/absent.c
+++ b/src/arch/aarch64/plat/ffa/absent.c
@@ -41,8 +41,9 @@
(void)tee_enabled;
}
-void plat_ffa_init(void)
+void plat_ffa_init(struct mpool *ppool)
{
+ (void)ppool;
}
/**
diff --git a/src/arch/aarch64/plat/ffa/hypervisor.c b/src/arch/aarch64/plat/ffa/hypervisor.c
index 9629a61..1668ac1 100644
--- a/src/arch/aarch64/plat/ffa/hypervisor.c
+++ b/src/arch/aarch64/plat/ffa/hypervisor.c
@@ -79,11 +79,13 @@
CHECK(ret.func == FFA_SUCCESS_32);
}
-void plat_ffa_init(void)
+void plat_ffa_init(struct mpool *ppool)
{
struct vm *other_world_vm = vm_find(HF_OTHER_WORLD_ID);
struct ffa_value ret;
+ (void)ppool;
+
if (!ffa_tee_enabled) {
return;
}
diff --git a/src/arch/aarch64/plat/ffa/spmc.c b/src/arch/aarch64/plat/ffa/spmc.c
index 2df8fc7..0fb61f6 100644
--- a/src/arch/aarch64/plat/ffa/spmc.c
+++ b/src/arch/aarch64/plat/ffa/spmc.c
@@ -206,17 +206,21 @@
return smc_ffa_call((struct ffa_value){.func = FFA_ID_GET_32});
}
-static void plat_ffa_vm_init(void)
+static void plat_ffa_vm_init(struct mpool *ppool)
{
/* Init NWd VMs structures for use of Notifications interfaces. */
for (uint32_t i = 0; i < nwd_vms_size; i++) {
/*
- * A slot in 'nwd_vms' is considered available if its id
- * is HF_INVALID_VM_ID.
+ * Note that vm_init() is not called on nwd_vms. This means that
+ * dynamically allocated structures, such as vcpus, are left
+ * as NULL in the nwd_vms structures. This is okay, since as of
+ * today, the vcpu structures are not used. This also helps
+ * reduce memory foot print. A slot in 'nwd_vms' is considered
+ * available if its id is HF_INVALID_VM_ID.
*/
nwd_vms[i].id = HF_INVALID_VM_ID;
- vm_notifications_init_bindings(
- &nwd_vms[i].notifications.from_sp);
+ nwd_vms[i].vcpu_count = MAX_CPUS;
+ vm_notifications_init(&nwd_vms[i], MAX_CPUS, ppool);
}
}
@@ -225,10 +229,10 @@
(void)tee_enabled;
}
-void plat_ffa_init(void)
+void plat_ffa_init(struct mpool *ppool)
{
arch_ffa_init();
- plat_ffa_vm_init();
+ plat_ffa_vm_init(ppool);
}
bool plat_ffa_run_forward(ffa_vm_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
@@ -944,8 +948,8 @@
}
to_destroy_locked.vm->notifications.enabled = false;
- vm_notifications_init_bindings(
- &to_destroy_locked.vm->notifications.from_sp);
+ vm_notifications_init(to_destroy_locked.vm,
+ to_destroy_locked.vm->vcpu_count, NULL);
if (vm_id != HF_OTHER_WORLD_ID) {
plat_ffa_vm_destroy(to_destroy_locked);
}
diff --git a/src/init.c b/src/init.c
index b391d50..83de0b4 100644
--- a/src/init.c
+++ b/src/init.c
@@ -177,14 +177,14 @@
mm_unlock_stage1(&mm_stage1_locked);
- /* Initialise the API page pool. ppool will be empty from now on. */
- api_init(&ppool);
-
/* Enable TLB invalidation for VM page table updates. */
mm_vm_enable_invalidation();
/* Perform platform specfic FF-A initialization. */
- plat_ffa_init();
+ plat_ffa_init(&ppool);
+
+ /* Initialise the API page pool. ppool will be empty from now on. */
+ api_init(&ppool);
dlog_info("Hafnium initialisation completed\n");
}
diff --git a/src/vm.c b/src/vm.c
index 896b0ef..7f52cec 100644
--- a/src/vm.c
+++ b/src/vm.c
@@ -81,6 +81,7 @@
vm->vcpus = (struct vcpu *)mpool_alloc_contiguous(
ppool, vcpu_ppool_entries, 1);
CHECK(vm->vcpus != NULL);
+
vm->mailbox.state = MAILBOX_STATE_EMPTY;
atomic_init(&vm->aborting, false);
vm->el0_partition = el0_partition;
@@ -101,10 +102,7 @@
vcpu_init(vm_get_vcpu(vm, i), vm);
}
- /* Basic initialization of the notifications structure. */
- vm_notifications_init_bindings(&vm->notifications.from_sp);
- vm_notifications_init_bindings(&vm->notifications.from_vm);
-
+ vm_notifications_init(vm, vcpu_count, ppool);
return vm;
}
@@ -445,15 +443,68 @@
}
/*
+ * Dynamically allocate per_vcpu_notifications structure for a given VM.
+ */
+static void vm_notifications_init_per_vcpu_notifications(
+ struct vm *vm, ffa_vcpu_count_t vcpu_count, struct mpool *ppool)
+{
+ size_t notif_ppool_entries =
+ (align_up(sizeof(struct notifications_state) * vcpu_count,
+ MM_PPOOL_ENTRY_SIZE) /
+ MM_PPOOL_ENTRY_SIZE);
+
+ /*
+ * Allow for function to be called on already initialized VMs but those
+ * that require notification structure to be cleared.
+ */
+ if (vm->notifications.from_sp.per_vcpu == NULL) {
+ assert(vm->notifications.from_vm.per_vcpu == NULL);
+ assert(vcpu_count != 0);
+ CHECK(ppool != NULL);
+ vm->notifications.from_sp.per_vcpu =
+ (struct notifications_state *)mpool_alloc_contiguous(
+ ppool, notif_ppool_entries, 1);
+ CHECK(vm->notifications.from_sp.per_vcpu != NULL);
+
+ vm->notifications.from_vm.per_vcpu =
+ (struct notifications_state *)mpool_alloc_contiguous(
+ ppool, notif_ppool_entries, 1);
+ CHECK(vm->notifications.from_vm.per_vcpu != NULL);
+ } else {
+ assert(vm->notifications.from_vm.per_vcpu != NULL);
+ }
+
+ memset_s(vm->notifications.from_sp.per_vcpu,
+ sizeof(*(vm->notifications.from_sp.per_vcpu)) * vcpu_count, 0,
+ sizeof(*(vm->notifications.from_sp.per_vcpu)) * vcpu_count);
+ memset_s(vm->notifications.from_vm.per_vcpu,
+ sizeof(*(vm->notifications.from_vm.per_vcpu)) * vcpu_count, 0,
+ sizeof(*(vm->notifications.from_vm.per_vcpu)) * vcpu_count);
+}
+
+/*
* Initializes the notifications structure.
*/
-void vm_notifications_init_bindings(struct notifications *notifications)
+static void vm_notifications_init_bindings(struct notifications *notifications)
{
for (uint32_t i = 0U; i < MAX_FFA_NOTIFICATIONS; i++) {
notifications->bindings_sender_id[i] = HF_INVALID_VM_ID;
}
}
+/*
+ * Initialize notification related structures for a VM.
+ */
+void vm_notifications_init(struct vm *vm, ffa_vcpu_count_t vcpu_count,
+ struct mpool *ppool)
+{
+ vm_notifications_init_per_vcpu_notifications(vm, vcpu_count, ppool);
+
+ /* Basic initialization of the notifications structure. */
+ vm_notifications_init_bindings(&vm->notifications.from_sp);
+ vm_notifications_init_bindings(&vm->notifications.from_vm);
+}
+
/**
* Checks if there are pending notifications.
*/