feat(notifications): information get interface

Handled FFA_NOTIFICATION_INFO_GET interface, to be used by a VM
(receiver's scheduler), Hypervisor or OS kernel.
Returns a list of IDs including Endpoints and vCPUs IDs that have
pending notifications. Provides support for multiple calls to the
FFA_NOTIFICATION_INFO_GET, in case there still are notifications whose
information needs to be retrieved after a call.

Change-Id: I4e73f18ee3301da4829313ffae247b6d0d262622
Signed-off-by: J-Alves <joao.alves@arm.com>
diff --git a/inc/hf/api.h b/inc/hf/api.h
index 59755a4..46bd8f9 100644
--- a/inc/hf/api.h
+++ b/inc/hf/api.h
@@ -114,3 +114,5 @@
 struct ffa_value api_ffa_notification_get(ffa_vm_id_t receiver_vm_id,
 					  uint16_t vcpu_id, uint32_t flags,
 					  struct vcpu *current);
+
+struct ffa_value api_ffa_notification_info_get(struct vcpu *current);
diff --git a/inc/hf/arch/plat/ffa.h b/inc/hf/arch/plat/ffa.h
index bffc84d..2baf272 100644
--- a/inc/hf/arch/plat/ffa.h
+++ b/inc/hf/arch/plat/ffa.h
@@ -105,3 +105,10 @@
  */
 bool plat_ffa_run_forward(ffa_vm_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
 			  struct ffa_value *ret);
+
+bool plat_ffa_notification_info_get_call(struct ffa_value *ret);
+
+bool plat_ffa_vm_notifications_info_get(uint16_t *ids, uint32_t *ids_count,
+					uint32_t *lists_sizes,
+					uint32_t *lists_count,
+					const uint32_t ids_count_max);
diff --git a/inc/hf/vm.h b/inc/hf/vm.h
index c9bbebc..928de40 100644
--- a/inc/hf/vm.h
+++ b/inc/hf/vm.h
@@ -123,6 +123,40 @@
 	struct notifications_state global;
 };
 
+/**
+ * The following enum relates to a state machine to guide the insertion of
+ * IDs in the respective list as a result of a FFA_NOTIFICATION_INFO_GET call.
+ * As per the FF-A v1.1 specification, the return of the interface
+ * FFA_NOTIFICATION_INFO_GET, is a list of 16-bit values, regarding the VM ID
+ * and VCPU IDs of those with pending notifications.
+ * The overall list, is composed of "sub-lists", that starts with the VM ID, and
+ * can follow with up to 3 more VCPU IDs. A VM can have multiple 'sub-lists'.
+ * The states are traversed on a per VM basis, and should help with filling the
+ * list of IDs.
+ *
+ * INIT is the initial state. The following state transitions are possible:
+ * * INIT => INSERTING: no list has been created for the VM prior. There are
+ * notifications pending and VM ID should be inserted first. If it regards to
+ * a per VCPU notification the VCPU ID should follow. Only VCPU IDs should be
+ * inserted from this point, until reaching "sub-list" size limit.
+ * * INIT => FULL: There is no space in the ID list to insert IDs.
+ * * INSERTING => STARTING_NEW: list has been created. Adding only VCPU IDs,
+ * however "sub-list" limit has been reached. If there are more pending per VCPU
+ * notifications pending for the VM, a new list should be created starting with
+ * VM ID.
+ * * INSERTING => FULL: There is no space in the ID list to insert IDs.
+ * * STARTING_NEW => INSERTING: Started a new 'sub-list' for the given VM, for
+ * the remaining pending per VCPU notifications, only the VCPU ID should be
+ * inserted.
+ * * STARTING_NEW => FULL: There is no space in the ID list to insert IDs.
+ */
+enum notifications_info_get_state {
+	INIT,
+	INSERTING,
+	STARTING_NEW,
+	FULL,
+};
+
 struct smc_whitelist {
 	uint32_t smcs[MAX_SMCS];
 	uint16_t smc_count;
@@ -253,3 +287,12 @@
 ffa_notifications_bitmap_t vm_notifications_get_pending_and_clear(
 	struct vm_locked vm_locked, bool is_from_vm,
 	ffa_vcpu_index_t cur_vcpu_id);
+void vm_notifications_info_get_pending(
+	struct vm_locked vm_locked, bool is_from_vm, uint16_t *ids,
+	uint32_t *ids_count, uint32_t *lists_sizes, uint32_t *lists_count,
+	const uint32_t ids_max_count,
+	enum notifications_info_get_state *info_get_state);
+bool vm_notifications_info_get(struct vm_locked vm_locked, uint16_t *ids,
+			       uint32_t *ids_count, uint32_t *lists_sizes,
+			       uint32_t *lists_count,
+			       const uint32_t ids_max_count);
diff --git a/inc/vmapi/hf/ffa.h b/inc/vmapi/hf/ffa.h
index 6f2dd27..5198be6 100644
--- a/inc/vmapi/hf/ffa.h
+++ b/inc/vmapi/hf/ffa.h
@@ -87,6 +87,7 @@
 #define FFA_DENIED             INT32_C(-6)
 #define FFA_RETRY              INT32_C(-7)
 #define FFA_ABORTED            INT32_C(-8)
+#define FFA_NO_DATA            INT32_C(-9)
 
 /* clang-format on */
 
@@ -435,6 +436,47 @@
 }
 
 /**
+ * The max number of IDs for return of FFA_NOTIFICATION_INFO_GET.
+ */
+#define FFA_NOTIFICATIONS_INFO_GET_MAX_IDS 20U
+
+/**
+ * Number of registers to use in successfull return of interface
+ * FFA_NOTIFICATION_INFO_GET.
+ */
+#define FFA_NOTIFICATIONS_INFO_GET_REGS_RET 5U
+
+#define FFA_NOTIFICATIONS_INFO_GET_FLAG_MORE_PENDING 0x1U
+
+/**
+ * Helper macros for return parameter encoding as described in section 17.7.1
+ * of the FF-A v1.1 Beta0 specification.
+ */
+#define FFA_NOTIFICATIONS_LISTS_COUNT_SHIFT 0x7U
+#define FFA_NOTIFICATIONS_LISTS_COUNT_MASK 0x1fU
+#define FFA_NOTIFICATIONS_LIST_SHIFT(l) (2 * (l - 1) + 12)
+#define FFA_NOTIFICATIONS_LIST_SIZE_MASK 0x3U
+
+static inline uint32_t ffa_notification_info_get_lists_count(
+	struct ffa_value args)
+{
+	return (uint32_t)(args.arg2 >> FFA_NOTIFICATIONS_LISTS_COUNT_SHIFT) &
+	       FFA_NOTIFICATIONS_LISTS_COUNT_MASK;
+}
+
+static inline uint32_t ffa_notification_info_get_list_size(
+	struct ffa_value args, unsigned int list_idx)
+{
+	return ((uint32_t)args.arg2 >> FFA_NOTIFICATIONS_LIST_SHIFT(list_idx)) &
+	       FFA_NOTIFICATIONS_LIST_SIZE_MASK;
+}
+
+static inline bool ffa_notification_info_get_more_pending(struct ffa_value args)
+{
+	return (args.arg2 & FFA_NOTIFICATIONS_INFO_GET_FLAG_MORE_PENDING) != 0U;
+}
+
+/**
  * A set of contiguous pages which is part of a memory region. This corresponds
  * to table 40 of the FF-A 1.0 EAC specification, "Constituent memory region
  * descriptor".
diff --git a/src/api.c b/src/api.c
index 542b461..7b615ce 100644
--- a/src/api.c
+++ b/src/api.c
@@ -2823,3 +2823,96 @@
 
 	return ret;
 }
+
+/**
+ * Prepares successful return for FFA_NOTIFICATION_INFO_GET, as described by
+ * the section 17.7.1 of the FF-A v1.1 Beta0 specification.
+ */
+static struct ffa_value api_ffa_notification_info_get_success_return(
+	const uint16_t *ids, uint32_t ids_count, const uint32_t *lists_sizes,
+	uint32_t lists_count, bool list_is_full)
+{
+	struct ffa_value ret = (struct ffa_value){.func = FFA_SUCCESS_64};
+
+	/*
+	 * Copying content of ids into ret structure. Use 5 registers (x3-x7) to
+	 * hold the list of ids.
+	 */
+	memcpy_s(&ret.arg3,
+		 sizeof(ret.arg3) * FFA_NOTIFICATIONS_INFO_GET_REGS_RET, ids,
+		 sizeof(ids[0]) * ids_count);
+
+	/*
+	 * According to the spec x2 should have:
+	 * - Bit flagging if there are more notifications pending;
+	 * - The total number of elements (i.e. total list size);
+	 * - The number of VCPU IDs within each VM specific list.
+	 */
+	ret.arg2 =
+		list_is_full ? FFA_NOTIFICATIONS_INFO_GET_FLAG_MORE_PENDING : 0;
+
+	ret.arg2 |= (lists_count & FFA_NOTIFICATIONS_LISTS_COUNT_MASK)
+		    << FFA_NOTIFICATIONS_LISTS_COUNT_SHIFT;
+
+	for (unsigned int i = 0; i < lists_count; i++) {
+		ret.arg2 |= (lists_sizes[i] & FFA_NOTIFICATIONS_LIST_SIZE_MASK)
+			    << FFA_NOTIFICATIONS_LIST_SHIFT(i + 1);
+	}
+
+	return ret;
+}
+
+struct ffa_value api_ffa_notification_info_get(struct vcpu *current)
+{
+	/*
+	 * Following set of variables should be populated with the return info.
+	 * At a successfull handling of this interface, they should be used
+	 * to populate the 'ret' structure in accordance to the table 17.29
+	 * of the FF-A v1.1 Beta0 specification.
+	 */
+	uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS];
+	uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
+	uint32_t lists_count = 0;
+	uint32_t ids_count = 0;
+	bool list_is_full = false;
+
+	/*
+	 * This interface can only be called at NS virtual/physical FF-A
+	 * instance by the endpoint implementing the primary scheduler and the
+	 * Hypervisor/OS kernel.
+	 * In the SPM, following check passes if call has been forwarded from
+	 * the hypervisor.
+	 */
+	if (current->vm->id != HF_PRIMARY_VM_ID) {
+		dlog_verbose(
+			"Only the receiver's scheduler can use this "
+			"interface\n");
+		return ffa_error(FFA_NOT_SUPPORTED);
+	}
+
+	/* Get notifications' info from this world */
+	for (ffa_vm_count_t index = 0; index < vm_get_count() && !list_is_full;
+	     ++index) {
+		struct vm_locked vm_locked = vm_lock(vm_find_index(index));
+
+		list_is_full = vm_notifications_info_get(
+			vm_locked, ids, &ids_count, lists_sizes, &lists_count,
+			FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
+
+		vm_unlock(&vm_locked);
+	}
+
+	if (!list_is_full) {
+		/* Grab notifications info from other world */
+		list_is_full = plat_ffa_vm_notifications_info_get(
+			ids, &ids_count, lists_sizes, &lists_count,
+			FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
+	}
+
+	if (ids_count == 0) {
+		return ffa_error(FFA_NO_DATA);
+	}
+
+	return api_ffa_notification_info_get_success_return(
+		ids, ids_count, lists_sizes, lists_count, list_is_full);
+}
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index 9104028..147c711 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -626,6 +626,9 @@
 			ffa_notifications_get_receiver(*args),
 			ffa_notifications_get_vcpu(*args), args->arg2, current);
 		return true;
+	case FFA_NOTIFICATION_INFO_GET_64:
+		*args = api_ffa_notification_info_get(current);
+		return true;
 	}
 
 	return false;
diff --git a/src/arch/aarch64/plat/ffa/absent.c b/src/arch/aarch64/plat/ffa/absent.c
index 0902bca..bb41c7b 100644
--- a/src/arch/aarch64/plat/ffa/absent.c
+++ b/src/arch/aarch64/plat/ffa/absent.c
@@ -166,3 +166,18 @@
 
 	return false;
 }
+
+bool plat_ffa_vm_notifications_info_get(const uint16_t *ids,
+					const uint32_t *ids_count,
+					const uint32_t *lists_sizes,
+					const uint32_t *lists_count,
+					const uint32_t ids_count_max)
+{
+	(void)ids;
+	(void)ids_count;
+	(void)lists_sizes;
+	(void)lists_count;
+	(void)ids_count_max;
+
+	return false;
+}
diff --git a/src/arch/aarch64/plat/ffa/hypervisor.c b/src/arch/aarch64/plat/ffa/hypervisor.c
index 4f35c9f..158cd37 100644
--- a/src/arch/aarch64/plat/ffa/hypervisor.c
+++ b/src/arch/aarch64/plat/ffa/hypervisor.c
@@ -290,3 +290,18 @@
 {
 	return vm_id_is_current_world(vm_id);
 }
+
+bool plat_ffa_vm_notifications_info_get(const uint16_t *ids,
+					const uint32_t *ids_count,
+					const uint32_t *lists_sizes,
+					const uint32_t *lists_count,
+					const uint32_t ids_count_max)
+{
+	(void)ids;
+	(void)ids_count;
+	(void)lists_sizes;
+	(void)lists_count;
+	(void)ids_count_max;
+
+	return false;
+}
diff --git a/src/arch/aarch64/plat/ffa/spmc.c b/src/arch/aarch64/plat/ffa/spmc.c
index 5645f39..dded83c 100644
--- a/src/arch/aarch64/plat/ffa/spmc.c
+++ b/src/arch/aarch64/plat/ffa/spmc.c
@@ -446,3 +446,47 @@
 {
 	return !vm_id_is_current_world(vm_id);
 }
+
+bool plat_ffa_vm_notifications_info_get(uint16_t *ids, uint32_t *ids_count,
+					uint32_t *lists_sizes,
+					uint32_t *lists_count,
+					const uint32_t ids_count_max)
+{
+	enum notifications_info_get_state info_get_state = INIT;
+	struct nwd_vms_locked nwd_vms_locked = nwd_vms_lock();
+	struct vm_locked other_world_locked = vm_find_locked(HF_OTHER_WORLD_ID);
+
+	CHECK(other_world_locked.vm != NULL);
+
+	vm_notifications_info_get_pending(other_world_locked, false, ids,
+					  ids_count, lists_sizes, lists_count,
+					  ids_count_max, &info_get_state);
+
+	if (info_get_state == FULL) {
+		goto out;
+	}
+
+	vm_unlock(&other_world_locked);
+
+	for (unsigned int i = 0; i < nwd_vms_size; i++) {
+		info_get_state = INIT;
+
+		if (nwd_vms[i].id != HF_INVALID_VM_ID) {
+			struct vm_locked vm_locked = vm_lock(&nwd_vms[i]);
+
+			vm_notifications_info_get_pending(
+				vm_locked, false, ids, ids_count, lists_sizes,
+				lists_count, ids_count_max, &info_get_state);
+
+			if (info_get_state == FULL) {
+				goto out;
+			}
+
+			vm_unlock(&vm_locked);
+		}
+	}
+out:
+	nwd_vms_unlock(&nwd_vms_locked);
+
+	return info_get_state == FULL;
+}
diff --git a/src/arch/fake/hypervisor/ffa.c b/src/arch/fake/hypervisor/ffa.c
index 00a0005..6ea1dda 100644
--- a/src/arch/fake/hypervisor/ffa.c
+++ b/src/arch/fake/hypervisor/ffa.c
@@ -159,3 +159,18 @@
 	(void)vm_id;
 	return false;
 }
+
+bool plat_ffa_vm_notifications_info_get(const uint16_t *ids,
+					const uint32_t *ids_count,
+					const uint32_t *lists_sizes,
+					const uint32_t *lists_count,
+					const uint32_t ids_count_max)
+{
+	(void)ids;
+	(void)ids_count;
+	(void)lists_sizes;
+	(void)lists_count;
+	(void)ids_count_max;
+
+	return false;
+}
diff --git a/src/vm.c b/src/vm.c
index 147a8ec..116d1b8 100644
--- a/src/vm.c
+++ b/src/vm.c
@@ -579,3 +579,140 @@
 
 	return to_ret;
 }
+
+/**
+ * Get pending notification's information to return to the receiver scheduler.
+ */
+void vm_notifications_info_get_pending(
+	struct vm_locked vm_locked, bool is_from_vm, uint16_t *ids,
+	uint32_t *ids_count, uint32_t *lists_sizes, uint32_t *lists_count,
+	const uint32_t ids_max_count,
+	enum notifications_info_get_state *info_get_state)
+{
+	ffa_notifications_bitmap_t pending_not_retrieved;
+
+	CHECK(vm_locked.vm != NULL);
+	struct notifications *notifications =
+		vm_get_notifications(vm_locked, is_from_vm);
+
+	if (*info_get_state == FULL) {
+		return;
+	}
+
+	CHECK(*ids_count <= ids_max_count);
+	CHECK(*lists_count <= ids_max_count);
+
+	pending_not_retrieved = notifications->global.pending &
+				~notifications->global.info_get_retrieved;
+
+	if (pending_not_retrieved != 0U && *info_get_state == INIT) {
+		/*
+		 * If action is to INIT, means that no list has been
+		 * created for the given VM ID, which also means that global
+		 * notifications are not represented in the list yet.
+		 */
+		if (*ids_count == ids_max_count) {
+			*info_get_state = FULL;
+			return;
+		}
+
+		*info_get_state = INSERTING;
+
+		(*lists_count)++;
+		ids[*ids_count] = vm_locked.vm->id;
+		++(*ids_count);
+	}
+
+	notifications->global.info_get_retrieved |= pending_not_retrieved;
+
+	for (ffa_vcpu_count_t i = 0; i < vm_locked.vm->vcpu_count; i++) {
+		/*
+		 * Include VCPU ID of per-VCPU notifications.
+		 */
+		pending_not_retrieved =
+			notifications->per_vcpu[i].pending &
+			~notifications->per_vcpu[i].info_get_retrieved;
+
+		if (pending_not_retrieved == 0U) {
+			continue;
+		}
+
+		switch (*info_get_state) {
+		case INIT:
+		case STARTING_NEW:
+			/*
+			 * At this iteration two ids need to be added: the VM ID
+			 * and VCPU ID. If there is not space, change state and
+			 * terminate function.
+			 */
+			if (ids_max_count - *ids_count < 2) {
+				*info_get_state = FULL;
+				return;
+			}
+
+			ids[*ids_count] = vm_locked.vm->id;
+			++(*ids_count);
+
+			/* Insert VCPU ID */
+			ids[*ids_count] = i;
+			++(*ids_count);
+
+			++lists_sizes[*lists_count];
+			++(*lists_count);
+
+			*info_get_state = INSERTING;
+			break;
+		case INSERTING:
+			if (*ids_count == ids_max_count) {
+				*info_get_state = FULL;
+				return;
+			}
+
+			/* Insert VCPU ID */
+			ids[*ids_count] = i;
+			(*ids_count)++;
+
+			/* Increment respective list size */
+			++lists_sizes[*lists_count - 1];
+
+			if (lists_sizes[*lists_count - 1] == 3) {
+				*info_get_state = STARTING_NEW;
+			}
+			break;
+		default:
+			panic("Notification info get action error!!\n");
+		}
+
+		notifications->per_vcpu[i].info_get_retrieved |=
+			pending_not_retrieved;
+	}
+}
+
+/**
+ * Gets all info from VM's pending notifications.
+ * Returns true if the list is full, and there is more pending.
+ */
+bool vm_notifications_info_get(struct vm_locked vm_locked, uint16_t *ids,
+			       uint32_t *ids_count, uint32_t *lists_sizes,
+			       uint32_t *lists_count,
+			       const uint32_t ids_max_count)
+{
+	enum notifications_info_get_state current_state = INIT;
+
+	/* Get info of pending notifications from SPs */
+	vm_notifications_info_get_pending(vm_locked, false, ids, ids_count,
+					  lists_sizes, lists_count,
+					  ids_max_count, &current_state);
+
+	/* Get info of pending notifications from VMs */
+	vm_notifications_info_get_pending(vm_locked, true, ids, ids_count,
+					  lists_sizes, lists_count,
+					  ids_max_count, &current_state);
+
+	/*
+	 * State transitions to FULL when trying to insert a new ID in the
+	 * list and there is not more space. This means there are notifications
+	 * pending, whose info is not retrieved.
+	 */
+	return current_state == FULL;
+}