feat: helper to reset and free partition notification bindings

The helper utility `vm_reset_notifications` added in this patch is
needed when partition manager reclaims resources from an aborted
partition.

The `ffa_vm_free_resources` function is extended to accept a new
parameter that tracks the associated memory page pool. This facilitates
any memory released by partition manager to be reallocated to the said
page pool.

Moreover, the function `ffa_vm_destroy` has been renamed to
`ffa_vm_nwd_free` for better clarity. And, the function
`ffa_vm_nwd_create` has been renamed to `ffa_vm_nwd_alloc`.

Change-Id: I868bef6bdbbde468f2bf2ce5597a55c6ae95d777
Signed-off-by: Madhukar Pappireddy <madhukar.pappireddy@arm.com>
diff --git a/src/api.c b/src/api.c
index 3bc2f9c..3ba1993 100644
--- a/src/api.c
+++ b/src/api.c
@@ -98,6 +98,11 @@
 	mpool_init_from(&api_page_pool, ppool);
 }
 
+struct mpool *api_get_ppool(void)
+{
+	return &api_page_pool;
+}
+
 /**
  * Get target VM vCPU:
  * If VM is UP then return first vCPU.
@@ -1992,7 +1997,7 @@
 
 	vm->mailbox.send = NULL;
 	vm->mailbox.recv = NULL;
-	ffa_vm_destroy(vm_locked);
+	ffa_vm_nwd_free(vm_locked);
 
 	/* Forward buffer unmapping to SPMC if coming from a VM. */
 	ffa_setup_rxtx_unmap_forward(vm_locked);
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index bf5155a..f3c290e 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -583,7 +583,8 @@
 		*args = api_ffa_console_log(*args, current);
 		return true;
 	case FFA_ERROR_32:
-		*args = ffa_cpu_cycles_error_32(current, next, args->arg2);
+		*args = ffa_cpu_cycles_error_32(current, next, args->arg2,
+						api_get_ppool());
 		return true;
 	case FFA_ABORT_32:
 	case FFA_ABORT_64:
@@ -1141,7 +1142,7 @@
 	 * SPMC de-allocates and/or uninitializes all the resources allocated
 	 * to the partition.
 	 */
-	ffa_vm_free_resources(vm_locked);
+	ffa_vm_free_resources(vm_locked, api_get_ppool());
 	vm_unlock(&vm_locked);
 
 	/*
diff --git a/src/arch/fake/hypervisor/ffa.c b/src/arch/fake/hypervisor/ffa.c
index 9a191fa..e437b3f 100644
--- a/src/arch/fake/hypervisor/ffa.c
+++ b/src/arch/fake/hypervisor/ffa.c
@@ -76,7 +76,7 @@
 	return false;
 }
 
-void ffa_vm_destroy(struct vm_locked to_destroy_locked)
+void ffa_vm_nwd_free(struct vm_locked to_destroy_locked)
 {
 	(void)to_destroy_locked;
 }
@@ -604,11 +604,13 @@
 
 struct ffa_value ffa_cpu_cycles_error_32(struct vcpu *current,
 					 struct vcpu **next,
-					 enum ffa_error error_code)
+					 enum ffa_error error_code,
+					 struct mpool *ppool)
 {
 	(void)current;
 	(void)next;
 	(void)error_code;
+	(void)ppool;
 
 	return ffa_error(FFA_NOT_SUPPORTED);
 }
@@ -618,9 +620,10 @@
 	return false;
 }
 
-void ffa_vm_free_resources(struct vm_locked vm_locked)
+void ffa_vm_free_resources(struct vm_locked vm_locked, struct mpool *ppool)
 {
 	(void)vm_locked;
+	(void)ppool;
 }
 
 bool arch_vm_iommu_mm_identity_map(struct vm_locked vm_locked, paddr_t begin,
diff --git a/src/ffa/absent.c b/src/ffa/absent.c
index 63c9a31..cbe0618 100644
--- a/src/ffa/absent.c
+++ b/src/ffa/absent.c
@@ -105,7 +105,7 @@
 	(void)vm_locked;
 }
 
-void ffa_vm_destroy(struct vm_locked to_destroy_locked)
+void ffa_vm_nwd_free(struct vm_locked to_destroy_locked)
 {
 	(void)to_destroy_locked;
 }
@@ -571,9 +571,10 @@
 	return false;
 }
 
-void ffa_vm_free_resources(struct vm_locked vm_locked)
+void ffa_vm_free_resources(struct vm_locked vm_locked, struct mpool *ppool)
 {
 	(void)vm_locked;
+	(void)ppool;
 }
 
 bool ffa_direct_msg_handle_framework_msg(struct ffa_value args,
diff --git a/src/ffa/hypervisor/vm.c b/src/ffa/hypervisor/vm.c
index 08b1cff..8d6e075 100644
--- a/src/ffa/hypervisor/vm.c
+++ b/src/ffa/hypervisor/vm.c
@@ -49,13 +49,14 @@
 	return false;
 }
 
-void ffa_vm_destroy(struct vm_locked to_destroy_locked)
+void ffa_vm_nwd_free(struct vm_locked to_destroy_locked)
 {
 	/* Hypervisor never frees VM structs. */
 	(void)to_destroy_locked;
 }
 
-void ffa_vm_free_resources(struct vm_locked vm_locked)
+void ffa_vm_free_resources(struct vm_locked vm_locked, struct mpool *ppool)
 {
 	(void)vm_locked;
+	(void)ppool;
 }
diff --git a/src/ffa/spmc/cpu_cycles.c b/src/ffa/spmc/cpu_cycles.c
index d9cab9c..0ddbc30 100644
--- a/src/ffa/spmc/cpu_cycles.c
+++ b/src/ffa/spmc/cpu_cycles.c
@@ -843,7 +843,8 @@
  */
 struct ffa_value ffa_cpu_cycles_error_32(struct vcpu *current,
 					 struct vcpu **next,
-					 enum ffa_error error_code)
+					 enum ffa_error error_code,
+					 struct mpool *ppool)
 {
 	struct vcpu_locked current_locked;
 	struct vm_locked vm_locked;
@@ -859,7 +860,7 @@
 			   vcpu_index(current));
 
 		CHECK(vm_set_state(vm_locked, VM_STATE_ABORTING));
-		ffa_vm_free_resources(vm_locked);
+		ffa_vm_free_resources(vm_locked, ppool);
 
 		if (sp_boot_next(current_locked, next)) {
 			goto out;
diff --git a/src/ffa/spmc/notifications.c b/src/ffa/spmc/notifications.c
index ff9b2b6..0f95ec3 100644
--- a/src/ffa/spmc/notifications.c
+++ b/src/ffa/spmc/notifications.c
@@ -20,8 +20,6 @@
 #include "hf/types.h"
 #include "hf/vm.h"
 
-#include "./vm.h"
-
 /** Interrupt priority for the Schedule Receiver Interrupt. */
 #define SRI_PRIORITY UINT32_C(0xf0)
 
@@ -252,7 +250,7 @@
 		vm_locked.vm->notifications.enabled = true;
 	} else {
 		/* Else should regard with NWd VM ID. */
-		vm_locked = ffa_vm_nwd_create(vm_id);
+		vm_locked = ffa_vm_nwd_alloc(vm_id);
 
 		/* If received NULL, there are no slots for VM creation. */
 		if (vm_locked.vm == NULL) {
@@ -313,7 +311,7 @@
 	vm_notifications_init(to_destroy_locked.vm,
 			      to_destroy_locked.vm->vcpu_count, NULL);
 	if (vm_id != HF_OTHER_WORLD_ID) {
-		ffa_vm_destroy(to_destroy_locked);
+		ffa_vm_nwd_free(to_destroy_locked);
 	}
 
 out:
diff --git a/src/ffa/spmc/vm.c b/src/ffa/spmc/vm.c
index 876b636..2df7135 100644
--- a/src/ffa/spmc/vm.c
+++ b/src/ffa/spmc/vm.c
@@ -10,6 +10,7 @@
 
 #include "hf/arch/std.h"
 
+#include "hf/api.h"
 #include "hf/check.h"
 #include "hf/ffa/vm.h"
 #include "hf/plat/interrupts.h"
@@ -73,7 +74,7 @@
  * If a VM with the ID already exists return it.
  * Return NULL if it can't allocate a new VM.
  */
-struct vm_locked ffa_vm_nwd_create(ffa_id_t vm_id)
+struct vm_locked ffa_vm_nwd_alloc(ffa_id_t vm_id)
 {
 	struct vm_locked vm_locked;
 	struct nwd_vms_locked nwd_vms_locked = nwd_vms_lock();
@@ -105,7 +106,7 @@
 	return vm_locked;
 }
 
-void ffa_vm_destroy(struct vm_locked to_destroy_locked)
+void ffa_vm_nwd_free(struct vm_locked to_destroy_locked)
 {
 	struct vm *vm = to_destroy_locked.vm;
 	/*
@@ -171,7 +172,7 @@
 		return vm_find_locked(vm_id);
 	}
 
-	return ffa_vm_nwd_create(vm_id);
+	return ffa_vm_nwd_alloc(vm_id);
 }
 
 bool ffa_vm_notifications_info_get(uint16_t *ids, uint32_t *ids_count,
@@ -235,10 +236,16 @@
 /**
  * Reclaim all resources belonging to VM in aborted state.
  */
-void ffa_vm_free_resources(struct vm_locked vm_locked)
+void ffa_vm_free_resources(struct vm_locked vm_locked, struct mpool *ppool)
 {
 	/*
 	 * Gracefully disable all interrupts belonging to SP.
 	 */
 	ffa_vm_disable_interrupts(vm_locked);
+
+	/*
+	 * Reset all notifications for this partition i.e. clear and unbind
+	 * them.
+	 */
+	vm_reset_notifications(vm_locked, ppool);
 }
diff --git a/src/vm.c b/src/vm.c
index fa05c8a..2e22bf9 100644
--- a/src/vm.c
+++ b/src/vm.c
@@ -480,6 +480,30 @@
 	vm_notifications_init_bindings(&vm->notifications.from_vm);
 }
 
+void vm_reset_notifications(struct vm_locked vm_locked, struct mpool *ppool)
+{
+	struct vm *vm = vm_locked.vm;
+
+	/* Clear from_vm notifications. */
+	struct notifications *from_vm = &vm->notifications.from_vm;
+
+	/* Clear from_sp notifications. */
+	struct notifications *from_sp = &vm->notifications.from_sp;
+
+	size_t notif_ppool_entries =
+		(align_up(sizeof(struct notifications_state) * (vm->vcpu_count),
+			  MM_PPOOL_ENTRY_SIZE) /
+		 MM_PPOOL_ENTRY_SIZE);
+
+	/*
+	 * Free the memory allocated to per_vcpu notifications state.
+	 * The other fields related to notifications need not be cleared
+	 * explicitly here as they will be zeroed during vm reinitialization.
+	 */
+	mpool_add_chunk(ppool, from_vm->per_vcpu, notif_ppool_entries);
+	mpool_add_chunk(ppool, from_sp->per_vcpu, notif_ppool_entries);
+}
+
 /**
  * Checks if there are pending notifications.
  */