feat(notifications): notifications set and get

Handle FF-A calls FFA_NOTIFICATION_SET and FFA_NOTIFICATION_GET.
The former is used for a sender to signal a notification to the
receiver; the latter is for the receiver to get whichever notifications
there are pending.

Change-Id: I7e9db94201d0d78ceecd599cd350eeb37a8cb1f8
Signed-off-by: J-Alves <joao.alves@arm.com>
diff --git a/src/api.c b/src/api.c
index ddbde0f..e5b7bb6 100644
--- a/src/api.c
+++ b/src/api.c
@@ -2063,7 +2063,7 @@
 	} else if (vm_id_is_current_world(receiver_vm_id)) {
 		/*
 		 * It is expected the receiver_vm_id to be from an SP, otherwise
-		 * 'arch_other_world_is_direct_response_valid' should have
+		 * 'plat_ffa_is_direct_response_valid' should have
 		 * made function return error before getting to this point.
 		 */
 		*next = api_switch_to_vm(current, to_ret,
@@ -2661,3 +2661,165 @@
 	vm_unlock(&receiver_locked);
 	return ret;
 }
+
+struct ffa_value api_ffa_notification_set(
+	ffa_vm_id_t sender_vm_id, ffa_vm_id_t receiver_vm_id, uint32_t flags,
+	ffa_notifications_bitmap_t notifications, struct vcpu *current)
+{
+	struct ffa_value ret;
+	struct vm_locked receiver_locked;
+
+	/*
+	 * Check if is per-vCPU or global, and extracting vCPU ID according
+	 * to table 17.19 of the FF-A v1.1 Beta 0 spec.
+	 */
+	bool is_per_vcpu = (flags & FFA_NOTIFICATION_FLAG_PER_VCPU) != 0U;
+	ffa_vcpu_index_t vcpu_id = (uint16_t)(flags >> 16);
+
+	/*
+	 * TODO: cater for the delay_schedule_receiver flag when dealing with
+	 * schedule receiver interrupt.
+	 */
+
+	if (!plat_ffa_is_notification_set_valid(current, sender_vm_id,
+						receiver_vm_id)) {
+		dlog_verbose("Invalid use of notifications set interface.\n");
+		return ffa_error(FFA_INVALID_PARAMETERS);
+	}
+
+	if (notifications == 0U) {
+		dlog_verbose("No notifications have been specified.\n");
+		return ffa_error(FFA_INVALID_PARAMETERS);
+	}
+
+	/*
+	 * This check assumes receiver is the current VM, and has been enforced
+	 * by 'plat_ffa_is_notification_set_valid'.
+	 */
+	receiver_locked = plat_ffa_vm_find_locked(receiver_vm_id);
+
+	if (receiver_locked.vm == NULL) {
+		dlog_verbose("Receiver ID is not valid.\n");
+		return ffa_error(FFA_INVALID_PARAMETERS);
+	}
+
+	/*
+	 * TODO: Forward Hypervisor's call to SWd if setting SP's notifications
+	 * from VMs.
+	 */
+
+	if (!vm_are_notifications_enabled(receiver_locked)) {
+		dlog_verbose("Receiver's notifications not enabled.\n");
+		ret = ffa_error(FFA_DENIED);
+		goto out;
+	}
+
+	/*
+	 * If notifications are not bound to the sender, they wouldn't be
+	 * enabled either for the receiver.
+	 */
+	if (!vm_notifications_validate_binding(
+		    receiver_locked, plat_ffa_is_vm_id(sender_vm_id),
+		    sender_vm_id, notifications, is_per_vcpu)) {
+		dlog_verbose("Notifications bindings not valid.\n");
+		ret = ffa_error(FFA_DENIED);
+		goto out;
+	}
+
+	if (is_per_vcpu && vcpu_id >= receiver_locked.vm->vcpu_count) {
+		dlog_verbose("Invalid VCPU ID!\n");
+		ret = ffa_error(FFA_INVALID_PARAMETERS);
+		goto out;
+	}
+
+	/* Set notifications pending */
+	vm_notifications_set(receiver_locked, plat_ffa_is_vm_id(sender_vm_id),
+			     notifications, vcpu_id, is_per_vcpu);
+	dlog_verbose("Set the notifications: %x.\n", notifications);
+
+	ret = (struct ffa_value){.func = FFA_SUCCESS_32};
+
+out:
+	vm_unlock(&receiver_locked);
+
+	return ret;
+}
+
+static struct ffa_value api_ffa_notification_get_success_return(
+	ffa_notifications_bitmap_t from_sp, ffa_notifications_bitmap_t from_vm,
+	ffa_notifications_bitmap_t from_framework)
+{
+	return (struct ffa_value){
+		.func = FFA_SUCCESS_32,
+		.arg1 = 0U,
+		.arg2 = (uint32_t)from_sp,
+		.arg3 = (uint32_t)(from_sp >> 32),
+		.arg4 = (uint32_t)from_vm,
+		.arg5 = (uint32_t)(from_vm >> 32),
+		.arg6 = (uint32_t)from_framework,
+		.arg7 = (uint32_t)(from_framework >> 32),
+	};
+}
+
+struct ffa_value api_ffa_notification_get(ffa_vm_id_t receiver_vm_id,
+					  ffa_vcpu_index_t vcpu_id,
+					  uint32_t flags, struct vcpu *current)
+{
+	/* TODO: get framework notifications, when these are supported. */
+	ffa_notifications_bitmap_t sp_notifications = 0;
+	ffa_notifications_bitmap_t vm_notifications = 0;
+	struct vm_locked receiver_locked;
+	struct ffa_value ret;
+
+	/*
+	 * Following check should capture wrong uses of the interface, depending
+	 * on whether Hafnium is SPMC or hypervisor.
+	 * On the rest of the function it is assumed this condition is met.
+	 */
+	if (!plat_ffa_is_notification_get_valid(current, receiver_vm_id)) {
+		dlog_verbose("Invalid use of notifications get interface.\n");
+		return ffa_error(FFA_INVALID_PARAMETERS);
+	}
+
+	/*
+	 * This check assumes receiver is the current VM, and has been enforced
+	 * by `plat_ffa_is_notifications_get_valid`.
+	 */
+	receiver_locked = plat_ffa_vm_find_locked(receiver_vm_id);
+
+	/*
+	 * `plat_ffa_is_notifications_get_valid` ensures following is never
+	 * true.
+	 */
+	CHECK(receiver_locked.vm != NULL);
+
+	if (receiver_locked.vm->vcpu_count <= vcpu_id ||
+	    (receiver_locked.vm->vcpu_count != 1 &&
+	     cpu_index(current->cpu) != vcpu_id)) {
+		dlog_verbose("Invalid VCPU ID!\n");
+		ret = ffa_error(FFA_INVALID_PARAMETERS);
+		goto out;
+	}
+
+	if ((flags & FFA_NOTIFICATION_FLAG_BITMAP_SP) != 0U) {
+		/*
+		 * TODO: For hypervisor, forward call to SPMC to get VM's
+		 * notifications from SPs.
+		 */
+		sp_notifications = vm_notifications_get_pending_and_clear(
+			receiver_locked, false, vcpu_id);
+	}
+
+	if ((flags & FFA_NOTIFICATION_FLAG_BITMAP_VM) != 0U) {
+		vm_notifications = vm_notifications_get_pending_and_clear(
+			receiver_locked, true, vcpu_id);
+	}
+
+	ret = api_ffa_notification_get_success_return(sp_notifications,
+						      vm_notifications, 0);
+
+out:
+	vm_unlock(&receiver_locked);
+
+	return ret;
+}
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index 9bcce55..9104028 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -615,6 +615,17 @@
 			ffa_notifications_bitmap(args->arg3, args->arg4), false,
 			current);
 		return true;
+	case FFA_NOTIFICATION_SET_32:
+		*args = api_ffa_notification_set(
+			ffa_sender(*args), ffa_receiver(*args), args->arg2,
+			ffa_notifications_bitmap(args->arg3, args->arg4),
+			current);
+		return true;
+	case FFA_NOTIFICATION_GET_32:
+		*args = api_ffa_notification_get(
+			ffa_notifications_get_receiver(*args),
+			ffa_notifications_get_vcpu(*args), args->arg2, current);
+		return true;
 	}
 
 	return false;
diff --git a/src/arch/aarch64/plat/ffa/absent.c b/src/arch/aarch64/plat/ffa/absent.c
index 1bfa115..0902bca 100644
--- a/src/arch/aarch64/plat/ffa/absent.c
+++ b/src/arch/aarch64/plat/ffa/absent.c
@@ -111,6 +111,24 @@
 	return false;
 }
 
+bool plat_ffa_is_notification_set_valid(struct vcpu *current,
+					ffa_vm_id_t sender_id,
+					ffa_vm_id_t receiver_id)
+{
+	(void)current;
+	(void)sender_id;
+	(void)receiver_id;
+	return false;
+}
+
+bool plat_ffa_is_notification_get_valid(struct vcpu *current,
+					ffa_vm_id_t receiver_id)
+{
+	(void)current;
+	(void)receiver_id;
+	return false;
+}
+
 struct ffa_value plat_ffa_notifications_bitmap_create(
 	ffa_vm_id_t vm_id, ffa_vcpu_count_t vcpu_count)
 {
diff --git a/src/arch/aarch64/plat/ffa/hypervisor.c b/src/arch/aarch64/plat/ffa/hypervisor.c
index 14a057e..4f35c9f 100644
--- a/src/arch/aarch64/plat/ffa/hypervisor.c
+++ b/src/arch/aarch64/plat/ffa/hypervisor.c
@@ -240,6 +240,25 @@
 	return sender_id != receiver_id && current_vm_id == receiver_id;
 }
 
+bool plat_ffa_is_notification_set_valid(struct vcpu *current,
+					ffa_vm_id_t sender_id,
+					ffa_vm_id_t receiver_id)
+{
+	ffa_vm_id_t current_vm_id = current->vm->id;
+
+	/* If Hafnium is hypervisor, sender needs to be current vm. */
+	return sender_id == current_vm_id && sender_id != receiver_id;
+}
+
+bool plat_ffa_is_notification_get_valid(struct vcpu *current,
+					ffa_vm_id_t receiver_id)
+{
+	ffa_vm_id_t current_vm_id = current->vm->id;
+
+	/* If Hafnium is hypervisor, receiver needs to be current vm. */
+	return (current_vm_id == receiver_id);
+}
+
 struct ffa_value plat_ffa_notifications_bitmap_create(
 	ffa_vm_id_t vm_id, ffa_vcpu_count_t vcpu_count)
 {
diff --git a/src/arch/aarch64/plat/ffa/spmc.c b/src/arch/aarch64/plat/ffa/spmc.c
index 531af10..5645f39 100644
--- a/src/arch/aarch64/plat/ffa/spmc.c
+++ b/src/arch/aarch64/plat/ffa/spmc.c
@@ -197,6 +197,40 @@
 		 vm_id_is_current_world(sender_id)));
 }
 
+bool plat_ffa_is_notification_set_valid(struct vcpu *current,
+					ffa_vm_id_t sender_id,
+					ffa_vm_id_t receiver_id)
+{
+	ffa_vm_id_t current_vm_id = current->vm->id;
+
+	/*
+	 * SPMC:
+	 * - If set call from SP, sender's ID must be the same as current.
+	 * - If set call from NWd, current VM ID must be same as Hypervisor ID,
+	 * and receiver must be an SP.
+	 */
+	return sender_id != receiver_id &&
+	       (sender_id == current_vm_id ||
+		(current_vm_id == HF_HYPERVISOR_VM_ID &&
+		 !vm_id_is_current_world(sender_id) &&
+		 vm_id_is_current_world(receiver_id)));
+}
+
+bool plat_ffa_is_notification_get_valid(struct vcpu *current,
+					ffa_vm_id_t receiver_id)
+{
+	ffa_vm_id_t current_vm_id = current->vm->id;
+
+	/*
+	 * SPMC:
+	 * - An SP can ask for its notifications, or the hypervisor can get
+	 *  notifications target to a VM.
+	 */
+	return (current_vm_id == receiver_id) ||
+	       (current_vm_id == HF_HYPERVISOR_VM_ID &&
+		!vm_id_is_current_world(receiver_id));
+}
+
 ffa_memory_handle_t plat_ffa_memory_handle_make(uint64_t index)
 {
 	return (index & ~FFA_MEMORY_HANDLE_ALLOCATOR_MASK) |
diff --git a/src/arch/fake/hypervisor/ffa.c b/src/arch/fake/hypervisor/ffa.c
index c423748..00a0005 100644
--- a/src/arch/fake/hypervisor/ffa.c
+++ b/src/arch/fake/hypervisor/ffa.c
@@ -114,6 +114,24 @@
 	return false;
 }
 
+bool plat_ffa_is_notification_set_valid(struct vcpu *current,
+					ffa_vm_id_t sender_id,
+					ffa_vm_id_t receiver_id)
+{
+	(void)current;
+	(void)sender_id;
+	(void)receiver_id;
+	return false;
+}
+
+bool plat_ffa_is_notification_get_valid(struct vcpu *current,
+					ffa_vm_id_t receiver_id)
+{
+	(void)current;
+	(void)receiver_id;
+	return false;
+}
+
 struct ffa_value plat_ffa_notifications_bitmap_create(
 	ffa_vm_id_t vm_id, ffa_vcpu_count_t vcpu_count)
 {
diff --git a/src/vm.c b/src/vm.c
index f7fb390..147a8ec 100644
--- a/src/vm.c
+++ b/src/vm.c
@@ -538,3 +538,44 @@
 	return is_per_vcpu ? (~to_check->bindings_per_vcpu & notif) == 0U
 			   : (to_check->bindings_per_vcpu & notif) == 0U;
 }
+
+void vm_notifications_set(struct vm_locked vm_locked, bool is_from_vm,
+			  ffa_notifications_bitmap_t notifications,
+			  ffa_vcpu_index_t vcpu_id, bool is_per_vcpu)
+{
+	CHECK(vm_locked.vm != NULL);
+	struct notifications *to_set =
+		vm_get_notifications(vm_locked, is_from_vm);
+	CHECK(vcpu_id < MAX_CPUS);
+
+	if (is_per_vcpu) {
+		to_set->per_vcpu[vcpu_id].pending |= notifications;
+	} else {
+		to_set->global.pending |= notifications;
+	}
+}
+
+/**
+ * Get Global notifications and per CPU only of the current VCPU.
+ */
+ffa_notifications_bitmap_t vm_notifications_get_pending_and_clear(
+	struct vm_locked vm_locked, bool is_from_vm,
+	ffa_vcpu_index_t cur_vcpu_id)
+{
+	ffa_notifications_bitmap_t to_ret = 0;
+
+	CHECK(vm_locked.vm != NULL);
+	struct notifications *to_get =
+		vm_get_notifications(vm_locked, is_from_vm);
+	CHECK(cur_vcpu_id < MAX_CPUS);
+
+	to_ret |= to_get->global.pending;
+	to_get->global.pending = 0U;
+	to_get->global.info_get_retrieved = 0U;
+
+	to_ret |= to_get->per_vcpu[cur_vcpu_id].pending;
+	to_get->per_vcpu[cur_vcpu_id].pending = 0U;
+	to_get->per_vcpu[cur_vcpu_id].info_get_retrieved = 0U;
+
+	return to_ret;
+}