feat(notifications): information get interface
Handled FFA_NOTIFICATION_INFO_GET interface, to be used by a VM
(receiver's scheduler), Hypervisor or OS kernel.
Returns a list of IDs including Endpoints and vCPUs IDs that have
pending notifications. Provides support for multiple calls to the
FFA_NOTIFICATION_INFO_GET, in case there still are notifications whose
information needs to be retrieved after a call.
Change-Id: I4e73f18ee3301da4829313ffae247b6d0d262622
Signed-off-by: J-Alves <joao.alves@arm.com>
diff --git a/src/api.c b/src/api.c
index 542b461..7b615ce 100644
--- a/src/api.c
+++ b/src/api.c
@@ -2823,3 +2823,96 @@
return ret;
}
+
+/**
+ * Prepares successful return for FFA_NOTIFICATION_INFO_GET, as described by
+ * the section 17.7.1 of the FF-A v1.1 Beta0 specification.
+ */
+static struct ffa_value api_ffa_notification_info_get_success_return(
+ const uint16_t *ids, uint32_t ids_count, const uint32_t *lists_sizes,
+ uint32_t lists_count, bool list_is_full)
+{
+ struct ffa_value ret = (struct ffa_value){.func = FFA_SUCCESS_64};
+
+ /*
+ * Copying content of ids into ret structure. Use 5 registers (x3-x7) to
+ * hold the list of ids.
+ */
+ memcpy_s(&ret.arg3,
+ sizeof(ret.arg3) * FFA_NOTIFICATIONS_INFO_GET_REGS_RET, ids,
+ sizeof(ids[0]) * ids_count);
+
+ /*
+ * According to the spec x2 should have:
+ * - Bit flagging if there are more notifications pending;
+ * - The total number of elements (i.e. total list size);
+ * - The number of VCPU IDs within each VM specific list.
+ */
+ ret.arg2 =
+ list_is_full ? FFA_NOTIFICATIONS_INFO_GET_FLAG_MORE_PENDING : 0;
+
+ ret.arg2 |= (lists_count & FFA_NOTIFICATIONS_LISTS_COUNT_MASK)
+ << FFA_NOTIFICATIONS_LISTS_COUNT_SHIFT;
+
+ for (unsigned int i = 0; i < lists_count; i++) {
+ ret.arg2 |= (lists_sizes[i] & FFA_NOTIFICATIONS_LIST_SIZE_MASK)
+ << FFA_NOTIFICATIONS_LIST_SHIFT(i + 1);
+ }
+
+ return ret;
+}
+
+struct ffa_value api_ffa_notification_info_get(struct vcpu *current)
+{
+ /*
+ * Following set of variables should be populated with the return info.
+ * At a successfull handling of this interface, they should be used
+ * to populate the 'ret' structure in accordance to the table 17.29
+ * of the FF-A v1.1 Beta0 specification.
+ */
+ uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS];
+ uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
+ uint32_t lists_count = 0;
+ uint32_t ids_count = 0;
+ bool list_is_full = false;
+
+ /*
+ * This interface can only be called at NS virtual/physical FF-A
+ * instance by the endpoint implementing the primary scheduler and the
+ * Hypervisor/OS kernel.
+ * In the SPM, following check passes if call has been forwarded from
+ * the hypervisor.
+ */
+ if (current->vm->id != HF_PRIMARY_VM_ID) {
+ dlog_verbose(
+ "Only the receiver's scheduler can use this "
+ "interface\n");
+ return ffa_error(FFA_NOT_SUPPORTED);
+ }
+
+ /* Get notifications' info from this world */
+ for (ffa_vm_count_t index = 0; index < vm_get_count() && !list_is_full;
+ ++index) {
+ struct vm_locked vm_locked = vm_lock(vm_find_index(index));
+
+ list_is_full = vm_notifications_info_get(
+ vm_locked, ids, &ids_count, lists_sizes, &lists_count,
+ FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
+
+ vm_unlock(&vm_locked);
+ }
+
+ if (!list_is_full) {
+ /* Grab notifications info from other world */
+ list_is_full = plat_ffa_vm_notifications_info_get(
+ ids, &ids_count, lists_sizes, &lists_count,
+ FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
+ }
+
+ if (ids_count == 0) {
+ return ffa_error(FFA_NO_DATA);
+ }
+
+ return api_ffa_notification_info_get_success_return(
+ ids, ids_count, lists_sizes, lists_count, list_is_full);
+}
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index 9104028..147c711 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -626,6 +626,9 @@
ffa_notifications_get_receiver(*args),
ffa_notifications_get_vcpu(*args), args->arg2, current);
return true;
+ case FFA_NOTIFICATION_INFO_GET_64:
+ *args = api_ffa_notification_info_get(current);
+ return true;
}
return false;
diff --git a/src/arch/aarch64/plat/ffa/absent.c b/src/arch/aarch64/plat/ffa/absent.c
index 0902bca..bb41c7b 100644
--- a/src/arch/aarch64/plat/ffa/absent.c
+++ b/src/arch/aarch64/plat/ffa/absent.c
@@ -166,3 +166,18 @@
return false;
}
+
+bool plat_ffa_vm_notifications_info_get(const uint16_t *ids,
+ const uint32_t *ids_count,
+ const uint32_t *lists_sizes,
+ const uint32_t *lists_count,
+ const uint32_t ids_count_max)
+{
+ (void)ids;
+ (void)ids_count;
+ (void)lists_sizes;
+ (void)lists_count;
+ (void)ids_count_max;
+
+ return false;
+}
diff --git a/src/arch/aarch64/plat/ffa/hypervisor.c b/src/arch/aarch64/plat/ffa/hypervisor.c
index 4f35c9f..158cd37 100644
--- a/src/arch/aarch64/plat/ffa/hypervisor.c
+++ b/src/arch/aarch64/plat/ffa/hypervisor.c
@@ -290,3 +290,18 @@
{
return vm_id_is_current_world(vm_id);
}
+
+bool plat_ffa_vm_notifications_info_get(const uint16_t *ids,
+ const uint32_t *ids_count,
+ const uint32_t *lists_sizes,
+ const uint32_t *lists_count,
+ const uint32_t ids_count_max)
+{
+ (void)ids;
+ (void)ids_count;
+ (void)lists_sizes;
+ (void)lists_count;
+ (void)ids_count_max;
+
+ return false;
+}
diff --git a/src/arch/aarch64/plat/ffa/spmc.c b/src/arch/aarch64/plat/ffa/spmc.c
index 5645f39..dded83c 100644
--- a/src/arch/aarch64/plat/ffa/spmc.c
+++ b/src/arch/aarch64/plat/ffa/spmc.c
@@ -446,3 +446,47 @@
{
return !vm_id_is_current_world(vm_id);
}
+
+bool plat_ffa_vm_notifications_info_get(uint16_t *ids, uint32_t *ids_count,
+ uint32_t *lists_sizes,
+ uint32_t *lists_count,
+ const uint32_t ids_count_max)
+{
+ enum notifications_info_get_state info_get_state = INIT;
+ struct nwd_vms_locked nwd_vms_locked = nwd_vms_lock();
+ struct vm_locked other_world_locked = vm_find_locked(HF_OTHER_WORLD_ID);
+
+ CHECK(other_world_locked.vm != NULL);
+
+ vm_notifications_info_get_pending(other_world_locked, false, ids,
+ ids_count, lists_sizes, lists_count,
+ ids_count_max, &info_get_state);
+
+ if (info_get_state == FULL) {
+ goto out;
+ }
+
+ vm_unlock(&other_world_locked);
+
+ for (unsigned int i = 0; i < nwd_vms_size; i++) {
+ info_get_state = INIT;
+
+ if (nwd_vms[i].id != HF_INVALID_VM_ID) {
+ struct vm_locked vm_locked = vm_lock(&nwd_vms[i]);
+
+ vm_notifications_info_get_pending(
+ vm_locked, false, ids, ids_count, lists_sizes,
+ lists_count, ids_count_max, &info_get_state);
+
+ if (info_get_state == FULL) {
+ goto out;
+ }
+
+ vm_unlock(&vm_locked);
+ }
+ }
+out:
+ nwd_vms_unlock(&nwd_vms_locked);
+
+ return info_get_state == FULL;
+}
diff --git a/src/arch/fake/hypervisor/ffa.c b/src/arch/fake/hypervisor/ffa.c
index 00a0005..6ea1dda 100644
--- a/src/arch/fake/hypervisor/ffa.c
+++ b/src/arch/fake/hypervisor/ffa.c
@@ -159,3 +159,18 @@
(void)vm_id;
return false;
}
+
+bool plat_ffa_vm_notifications_info_get(const uint16_t *ids,
+ const uint32_t *ids_count,
+ const uint32_t *lists_sizes,
+ const uint32_t *lists_count,
+ const uint32_t ids_count_max)
+{
+ (void)ids;
+ (void)ids_count;
+ (void)lists_sizes;
+ (void)lists_count;
+ (void)ids_count_max;
+
+ return false;
+}
diff --git a/src/vm.c b/src/vm.c
index 147a8ec..116d1b8 100644
--- a/src/vm.c
+++ b/src/vm.c
@@ -579,3 +579,140 @@
return to_ret;
}
+
+/**
+ * Get pending notification's information to return to the receiver scheduler.
+ */
+void vm_notifications_info_get_pending(
+ struct vm_locked vm_locked, bool is_from_vm, uint16_t *ids,
+ uint32_t *ids_count, uint32_t *lists_sizes, uint32_t *lists_count,
+ const uint32_t ids_max_count,
+ enum notifications_info_get_state *info_get_state)
+{
+ ffa_notifications_bitmap_t pending_not_retrieved;
+
+ CHECK(vm_locked.vm != NULL);
+ struct notifications *notifications =
+ vm_get_notifications(vm_locked, is_from_vm);
+
+ if (*info_get_state == FULL) {
+ return;
+ }
+
+ CHECK(*ids_count <= ids_max_count);
+ CHECK(*lists_count <= ids_max_count);
+
+ pending_not_retrieved = notifications->global.pending &
+ ~notifications->global.info_get_retrieved;
+
+ if (pending_not_retrieved != 0U && *info_get_state == INIT) {
+ /*
+ * If action is to INIT, means that no list has been
+ * created for the given VM ID, which also means that global
+ * notifications are not represented in the list yet.
+ */
+ if (*ids_count == ids_max_count) {
+ *info_get_state = FULL;
+ return;
+ }
+
+ *info_get_state = INSERTING;
+
+ (*lists_count)++;
+ ids[*ids_count] = vm_locked.vm->id;
+ ++(*ids_count);
+ }
+
+ notifications->global.info_get_retrieved |= pending_not_retrieved;
+
+ for (ffa_vcpu_count_t i = 0; i < vm_locked.vm->vcpu_count; i++) {
+ /*
+ * Include VCPU ID of per-VCPU notifications.
+ */
+ pending_not_retrieved =
+ notifications->per_vcpu[i].pending &
+ ~notifications->per_vcpu[i].info_get_retrieved;
+
+ if (pending_not_retrieved == 0U) {
+ continue;
+ }
+
+ switch (*info_get_state) {
+ case INIT:
+ case STARTING_NEW:
+ /*
+ * At this iteration two ids need to be added: the VM ID
+ * and VCPU ID. If there is not space, change state and
+ * terminate function.
+ */
+ if (ids_max_count - *ids_count < 2) {
+ *info_get_state = FULL;
+ return;
+ }
+
+ ids[*ids_count] = vm_locked.vm->id;
+ ++(*ids_count);
+
+ /* Insert VCPU ID */
+ ids[*ids_count] = i;
+ ++(*ids_count);
+
+ ++lists_sizes[*lists_count];
+ ++(*lists_count);
+
+ *info_get_state = INSERTING;
+ break;
+ case INSERTING:
+ if (*ids_count == ids_max_count) {
+ *info_get_state = FULL;
+ return;
+ }
+
+ /* Insert VCPU ID */
+ ids[*ids_count] = i;
+ (*ids_count)++;
+
+ /* Increment respective list size */
+ ++lists_sizes[*lists_count - 1];
+
+ if (lists_sizes[*lists_count - 1] == 3) {
+ *info_get_state = STARTING_NEW;
+ }
+ break;
+ default:
+ panic("Notification info get action error!!\n");
+ }
+
+ notifications->per_vcpu[i].info_get_retrieved |=
+ pending_not_retrieved;
+ }
+}
+
+/**
+ * Gets all info from VM's pending notifications.
+ * Returns true if the list is full, and there is more pending.
+ */
+bool vm_notifications_info_get(struct vm_locked vm_locked, uint16_t *ids,
+ uint32_t *ids_count, uint32_t *lists_sizes,
+ uint32_t *lists_count,
+ const uint32_t ids_max_count)
+{
+ enum notifications_info_get_state current_state = INIT;
+
+ /* Get info of pending notifications from SPs */
+ vm_notifications_info_get_pending(vm_locked, false, ids, ids_count,
+ lists_sizes, lists_count,
+ ids_max_count, ¤t_state);
+
+ /* Get info of pending notifications from VMs */
+ vm_notifications_info_get_pending(vm_locked, true, ids, ids_count,
+ lists_sizes, lists_count,
+ ids_max_count, ¤t_state);
+
+ /*
+ * State transitions to FULL when trying to insert a new ID in the
+ * list and there is not more space. This means there are notifications
+ * pending, whose info is not retrieved.
+ */
+ return current_state == FULL;
+}