feat(notifications): bitmaps create and destroy

Handle FF-A calls FFA_NOTIFICATION_BITMAP_CREATE and
FFA_NOTIFICATION_BITMAP_DESTROY.
Interfaces are to be used by the NWd (Hypervisor or single OS kernel).

Change-Id: I0d805875ae6c7f8c080bda04e9e496d9e0d79b00
Signed-off-by: J-Alves <joao.alves@arm.com>
diff --git a/src/api.c b/src/api.c
index 369e40b..94cd8a6 100644
--- a/src/api.c
+++ b/src/api.c
@@ -2550,3 +2550,31 @@
 
 	return (struct ffa_value){.func = FFA_SUCCESS_32};
 }
+
+struct ffa_value api_ffa_notification_bitmap_create(ffa_vm_id_t vm_id,
+						    ffa_vcpu_count_t vcpu_count,
+						    struct vcpu *current)
+{
+	if (!plat_ffa_is_notifications_create_valid(current, vm_id)) {
+		dlog_verbose("Bitmap create for NWd VM IDs only (%x).\n",
+			     vm_id);
+		return ffa_error(FFA_NOT_SUPPORTED);
+	}
+
+	return plat_ffa_notifications_bitmap_create(vm_id, vcpu_count);
+}
+
+struct ffa_value api_ffa_notification_bitmap_destroy(ffa_vm_id_t vm_id,
+						     struct vcpu *current)
+{
+	/*
+	 * Validity of use of this interface is the same as for bitmap create.
+	 */
+	if (!plat_ffa_is_notifications_create_valid(current, vm_id)) {
+		dlog_verbose("Bitmap destroy for NWd VM IDs only (%x).\n",
+			     vm_id);
+		return ffa_error(FFA_NOT_SUPPORTED);
+	}
+
+	return plat_ffa_notifications_bitmap_destroy(vm_id);
+}
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index 0e6acd9..26dd59c 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -594,6 +594,15 @@
 		*args = api_ffa_secondary_ep_register(ipa_init(args->arg1),
 						      current);
 		return true;
+	case FFA_NOTIFICATION_BITMAP_CREATE_32:
+		*args = api_ffa_notification_bitmap_create(
+			(ffa_vm_id_t)args->arg1, (ffa_vcpu_count_t)args->arg2,
+			current);
+		return true;
+	case FFA_NOTIFICATION_BITMAP_DESTROY_32:
+		*args = api_ffa_notification_bitmap_destroy(
+			(ffa_vm_id_t)args->arg1, current);
+		return true;
 	}
 
 	return false;
diff --git a/src/arch/aarch64/plat/ffa/absent.c b/src/arch/aarch64/plat/ffa/absent.c
index 23f1f64..d699dfe 100644
--- a/src/arch/aarch64/plat/ffa/absent.c
+++ b/src/arch/aarch64/plat/ffa/absent.c
@@ -7,6 +7,7 @@
  */
 
 #include "hf/ffa.h"
+#include "hf/ffa_internal.h"
 #include "hf/vcpu.h"
 #include "hf/vm.h"
 
@@ -61,6 +62,15 @@
 	return false;
 }
 
+bool plat_ffa_is_notifications_create_valid(struct vcpu *current,
+					    ffa_vm_id_t vm_id)
+{
+	(void)current;
+	(void)vm_id;
+
+	return false;
+}
+
 bool plat_ffa_direct_request_forward(ffa_vm_id_t receiver_vm_id,
 				     struct ffa_value args,
 				     struct ffa_value *ret)
@@ -88,3 +98,19 @@
 void plat_ffa_vm_init(void)
 {
 }
+
+struct ffa_value plat_ffa_notifications_bitmap_create(
+	ffa_vm_id_t vm_id, ffa_vcpu_count_t vcpu_count)
+{
+	(void)vm_id;
+	(void)vcpu_count;
+
+	return ffa_error(FFA_NOT_SUPPORTED);
+}
+
+struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_vm_id_t vm_id)
+{
+	(void)vm_id;
+
+	return ffa_error(FFA_NOT_SUPPORTED);
+}
diff --git a/src/arch/aarch64/plat/ffa/hypervisor.c b/src/arch/aarch64/plat/ffa/hypervisor.c
index 3e50294..b65e434 100644
--- a/src/arch/aarch64/plat/ffa/hypervisor.c
+++ b/src/arch/aarch64/plat/ffa/hypervisor.c
@@ -11,6 +11,8 @@
 
 #include "hf/dlog.h"
 #include "hf/ffa.h"
+#include "hf/ffa_internal.h"
+#include "hf/vcpu.h"
 #include "hf/vm.h"
 
 #include "smc.h"
@@ -108,6 +110,22 @@
 }
 
 /**
+ * Check validity of a FF-A notifications bitmap create.
+ */
+bool plat_ffa_is_notifications_create_valid(struct vcpu *current,
+					    ffa_vm_id_t vm_id)
+{
+	/*
+	 * Call should only be used by the Hypervisor, so any attempt of
+	 * invocation from NWd FF-A endpoints should fail.
+	 */
+	(void)current;
+	(void)vm_id;
+
+	return false;
+}
+
+/**
  * Check validity of a FF-A direct message response.
  */
 bool plat_ffa_is_direct_response_valid(struct vcpu *current,
@@ -188,3 +206,21 @@
 void plat_ffa_vm_init(void)
 {
 }
+
+struct ffa_value plat_ffa_notifications_bitmap_create(
+	ffa_vm_id_t vm_id, ffa_vcpu_count_t vcpu_count)
+{
+	/* TODO: Forward call to the SPMC */
+	(void)vm_id;
+	(void)vcpu_count;
+
+	return ffa_error(FFA_NOT_SUPPORTED);
+}
+
+struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_vm_id_t vm_id)
+{
+	/* TODO: Forward call to the SPMC */
+	(void)vm_id;
+
+	return ffa_error(FFA_NOT_SUPPORTED);
+}
diff --git a/src/arch/aarch64/plat/ffa/spmc.c b/src/arch/aarch64/plat/ffa/spmc.c
index 8efae8f..b92fcec 100644
--- a/src/arch/aarch64/plat/ffa/spmc.c
+++ b/src/arch/aarch64/plat/ffa/spmc.c
@@ -11,6 +11,7 @@
 
 #include "hf/dlog.h"
 #include "hf/ffa.h"
+#include "hf/ffa_internal.h"
 #include "hf/std.h"
 #include "hf/vm.h"
 
@@ -28,8 +29,37 @@
  */
 static struct vm nwd_vms[MAX_VMS];
 
+/**
+ * All accesses to `nwd_vms` needs to be guarded by this lock.
+ */
+static struct spinlock nwd_vms_lock_instance = SPINLOCK_INIT;
+
+/**
+ * Encapsulates the set of share states while the `nwd_vms_lock_instance` is
+ * held.
+ */
+struct nwd_vms_locked {
+	struct vm *nwd_vms;
+};
+
 const uint32_t nwd_vms_size = ARRAY_SIZE(nwd_vms);
 
+/** Locks the normal world vms guarding lock. */
+static struct nwd_vms_locked nwd_vms_lock(void)
+{
+	sl_lock(&nwd_vms_lock_instance);
+
+	return (struct nwd_vms_locked){.nwd_vms = nwd_vms};
+}
+
+/** Unlocks the normal world vms guarding lock. */
+static void nwd_vms_unlock(struct nwd_vms_locked *vms)
+{
+	CHECK(vms->nwd_vms == nwd_vms);
+	vms->nwd_vms = NULL;
+	sl_unlock(&nwd_vms_lock_instance);
+}
+
 void plat_ffa_log_init(void)
 {
 	dlog_info("Initializing Hafnium (SPMC)\n");
@@ -119,6 +149,17 @@
 	return false;
 }
 
+bool plat_ffa_is_notifications_create_valid(struct vcpu *current,
+					    ffa_vm_id_t vm_id)
+{
+	/**
+	 * Create/Destroy interfaces to be called by the hypervisor, into the
+	 * SPMC.
+	 */
+	return current->vm->id == HF_HYPERVISOR_VM_ID &&
+	       !vm_id_is_current_world(vm_id);
+}
+
 ffa_memory_handle_t plat_ffa_memory_handle_make(uint64_t index)
 {
 	return (index & ~FFA_MEMORY_HANDLE_ALLOCATOR_MASK) |
@@ -152,3 +193,176 @@
 {
 	return vm->managed_exit;
 }
+
+/** Allocates a NWd VM structure to the VM of given ID. */
+static void plat_ffa_vm_create(struct nwd_vms_locked nwd_vms_locked,
+			       struct vm_locked to_create_locked,
+			       ffa_vm_id_t vm_id, ffa_vcpu_count_t vcpu_count)
+{
+	CHECK(nwd_vms_locked.nwd_vms != NULL);
+	CHECK(to_create_locked.vm != NULL &&
+	      to_create_locked.vm->id == HF_INVALID_VM_ID);
+
+	to_create_locked.vm->id = vm_id;
+	to_create_locked.vm->vcpu_count = vcpu_count;
+	to_create_locked.vm->notifications.enabled = true;
+}
+
+static void plat_ffa_vm_destroy(struct vm_locked to_destroy_locked)
+{
+	to_destroy_locked.vm->id = HF_INVALID_VM_ID;
+	to_destroy_locked.vm->vcpu_count = 0U;
+	vm_notifications_init_bindings(
+		&to_destroy_locked.vm->notifications.from_sp);
+	to_destroy_locked.vm->notifications.enabled = false;
+}
+
+static struct vm_locked plat_ffa_nwd_vm_find_locked(
+	struct nwd_vms_locked nwd_vms_locked, ffa_vm_id_t vm_id)
+{
+	CHECK(nwd_vms_locked.nwd_vms != NULL);
+
+	for (unsigned int i = 0U; i < nwd_vms_size; i++) {
+		if (nwd_vms[i].id == vm_id) {
+			return vm_lock(&nwd_vms[i]);
+		}
+	}
+
+	return (struct vm_locked){.vm = NULL};
+}
+
+struct vm_locked plat_ffa_vm_find_locked(ffa_vm_id_t vm_id)
+{
+	struct vm_locked to_ret_locked;
+
+	struct nwd_vms_locked nwd_vms_locked = nwd_vms_lock();
+
+	to_ret_locked = plat_ffa_nwd_vm_find_locked(nwd_vms_locked, vm_id);
+
+	nwd_vms_unlock(&nwd_vms_locked);
+
+	return to_ret_locked;
+}
+
+struct ffa_value plat_ffa_notifications_bitmap_create(
+	ffa_vm_id_t vm_id, ffa_vcpu_count_t vcpu_count)
+{
+	struct ffa_value ret = (struct ffa_value){.func = FFA_SUCCESS_32};
+	struct vm_locked vm_locked;
+	const char *error_string = "Notification bitmap already created.";
+	struct nwd_vms_locked nwd_vms_locked = nwd_vms_lock();
+
+	if (vm_id == HF_OTHER_WORLD_ID) {
+		/*
+		 * If the provided VM ID regards to the Hypervisor, represented
+		 * by the other world VM with ID HF_OTHER_WORLD_ID, check if the
+		 * notifications have been enabled.
+		 */
+
+		vm_locked = vm_find_locked(vm_id);
+
+		CHECK(vm_locked.vm != NULL);
+
+		/* Call has been used for the other world vm already */
+		if (vm_locked.vm->notifications.enabled != false) {
+			dlog_error("%s\n", error_string);
+			ret = ffa_error(FFA_DENIED);
+			goto out;
+		}
+
+		/* Enable notifications for `other_world_vm`. */
+		vm_locked.vm->notifications.enabled = true;
+
+	} else {
+		/* Else should regard with NWd VM ID. */
+
+		/* If vm already exists bitmap has been created as well. */
+		vm_locked = plat_ffa_nwd_vm_find_locked(nwd_vms_locked, vm_id);
+		if (vm_locked.vm != NULL) {
+			dlog_error("%s\n", error_string);
+			ret = ffa_error(FFA_DENIED);
+			goto out;
+		}
+
+		/* Get first empty slot in `nwd_vms` to create VM. */
+		vm_locked = plat_ffa_nwd_vm_find_locked(nwd_vms_locked,
+							HF_INVALID_VM_ID);
+
+		/*
+		 * If received NULL, means there are no slots in `nwd_vms` for
+		 * VM creation.
+		 */
+		if (vm_locked.vm == NULL) {
+			dlog_error("No memory to create.\n");
+			ret = ffa_error(FFA_NO_MEMORY);
+			goto out;
+		}
+
+		plat_ffa_vm_create(nwd_vms_locked, vm_locked, vm_id,
+				   vcpu_count);
+	}
+
+out:
+	vm_unlock(&vm_locked);
+	nwd_vms_unlock(&nwd_vms_locked);
+
+	return ret;
+}
+
+struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_vm_id_t vm_id)
+{
+	struct ffa_value ret = {.func = FFA_SUCCESS_32};
+	struct vm_locked to_destroy_locked;
+	const char *error_not_created_string = "Bitmap not created for vm:";
+
+	if (vm_id == HF_OTHER_WORLD_ID) {
+		/*
+		 * Bitmap is part of `other_world_vm`, destroy will reset
+		 * bindings and will disable notifications.
+		 */
+
+		to_destroy_locked = vm_find_locked(vm_id);
+
+		CHECK(to_destroy_locked.vm != NULL);
+
+		if (to_destroy_locked.vm->notifications.enabled == false) {
+			dlog_error("%s %u\n", error_not_created_string, vm_id);
+			ret = ffa_error(FFA_DENIED);
+			goto out;
+		}
+
+		/* Check if there is any notification pending. */
+		if (vm_are_notifications_pending(to_destroy_locked, false,
+						 ~0x0U)) {
+			dlog_verbose("VM has notifications pending.\n");
+			ret = ffa_error(FFA_DENIED);
+			goto out;
+		}
+
+		to_destroy_locked.vm->notifications.enabled = false;
+		vm_notifications_init_bindings(
+			&to_destroy_locked.vm->notifications.from_sp);
+	} else {
+		to_destroy_locked = plat_ffa_vm_find_locked(vm_id);
+
+		/* If VM doesn't exist, bitmap hasn't been created. */
+		if (to_destroy_locked.vm == NULL) {
+			dlog_verbose("%s: %u.\n", error_not_created_string,
+				     vm_id);
+			return ffa_error(FFA_DENIED);
+		}
+
+		/* Check if there is any notification pending. */
+		if (vm_are_notifications_pending(to_destroy_locked, false,
+						 ~0x0U)) {
+			dlog_verbose("VM has notifications pending.\n");
+			ret = ffa_error(FFA_DENIED);
+			goto out;
+		}
+
+		plat_ffa_vm_destroy(to_destroy_locked);
+	}
+out:
+	vm_unlock(&to_destroy_locked);
+	return ret;
+}
diff --git a/src/arch/fake/hypervisor/ffa.c b/src/arch/fake/hypervisor/ffa.c
index 288b256..5a5a13b 100644
--- a/src/arch/fake/hypervisor/ffa.c
+++ b/src/arch/fake/hypervisor/ffa.c
@@ -8,6 +8,7 @@
 
 #include "hf/ffa.h"
 
+#include "hf/ffa_internal.h"
 #include "hf/vcpu.h"
 
 ffa_vm_id_t arch_ffa_spmc_id_get(void)
@@ -76,3 +77,28 @@
 	(void)vm;
 	return false;
 }
+
+bool plat_ffa_is_notifications_create_valid(struct vcpu *current,
+					    ffa_vm_id_t vm_id)
+{
+	(void)current;
+	(void)vm_id;
+
+	return false;
+}
+
+struct ffa_value plat_ffa_notifications_bitmap_create(
+	ffa_vm_id_t vm_id, ffa_vcpu_count_t vcpu_count)
+{
+	(void)vm_id;
+	(void)vcpu_count;
+
+	return ffa_error(FFA_NOT_SUPPORTED);
+}
+
+struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_vm_id_t vm_id)
+{
+	(void)vm_id;
+
+	return ffa_error(FFA_NOT_SUPPORTED);
+}
diff --git a/src/load.c b/src/load.c
index a69880d..5501e12 100644
--- a/src/load.c
+++ b/src/load.c
@@ -157,6 +157,9 @@
 		vm_locked.vm->boot_order = manifest_vm->sp.boot_order;
 		/* Updating boot list according to boot_order */
 		vm_update_boot(vm_locked.vm);
+
+		/* TODO: Enable in accordance to VM's manifest. */
+		vm_locked.vm->notifications.enabled = true;
 	}
 
 	/* Initialize architecture-specific features. */
diff --git a/src/vm.c b/src/vm.c
index bb9d7f1..ff74763 100644
--- a/src/vm.c
+++ b/src/vm.c
@@ -82,9 +82,6 @@
 	vm_notifications_init_bindings(&vm->notifications.from_sp);
 	vm_notifications_init_bindings(&vm->notifications.from_vm);
 
-	/* TODO: Enable in accordance to VM's manifest. */
-	vm->notifications.enabled = true;
-
 	return vm;
 }
 
@@ -384,16 +381,6 @@
 	vm->next_boot = current;
 }
 
-/*
- * Initializes the notifications structure.
- */
-void vm_notifications_init_bindings(struct notifications *notifications)
-{
-	for (uint32_t i = 0U; i < MAX_FFA_NOTIFICATIONS; i++) {
-		notifications->bindings_sender_id[i] = HF_INVALID_VM_ID;
-	}
-}
-
 /**
  * Gets the mode of the given range of ipa or va if they are mapped with the
  * same mode.
@@ -412,3 +399,37 @@
 	}
 	return mm_vm_get_mode(&vm_locked.vm->ptable, begin, end, mode);
 }
+
+/*
+ * Initializes the notifications structure.
+ */
+void vm_notifications_init_bindings(struct notifications *notifications)
+{
+	for (uint32_t i = 0U; i < MAX_FFA_NOTIFICATIONS; i++) {
+		notifications->bindings_sender_id[i] = HF_INVALID_VM_ID;
+	}
+}
+
+/**
+ * Checks if there are pending notifications.
+ */
+bool vm_are_notifications_pending(struct vm_locked vm_locked, bool from_vm,
+				  ffa_notifications_bitmap_t notifications)
+{
+	struct notifications *to_check;
+
+	CHECK(vm_locked.vm != NULL);
+
+	to_check = from_vm ? &vm_locked.vm->notifications.from_vm
+			   : &vm_locked.vm->notifications.from_sp;
+
+	/* Check if there are pending per vcpu notifications */
+	for (uint32_t i = 0U; i < MAX_CPUS; i++) {
+		if ((to_check->per_vcpu[i].pending & notifications) != 0U) {
+			return true;
+		}
+	}
+
+	/* Check if there are global pending notifications */
+	return (to_check->global.pending & notifications) != 0U;
+}