refactor(notifications): improve notifications state handling
After the moment a notification is set, the receiver scheduler should
get an indication that the notification is pending.
For this the SPMC triggers the SRI, that should be handled by the FF-A
driver in the NWd, which shall call FFA_NOTIFICATION_INFO_GET and
notify the receiver scheduler that the receiver needs CPU cycles.
The SRI is being handled in a way such that spurious triggering of the
SRI is mitigated. A spurious SRI would be one such that after a call
to FFA_NOTIFICATION_INFO_GET there would be no information returned.
The mitigation is implemented through a state machine for the SRI.
If a call to FFA_NOTIFICATION_INFO_GET didn't return all the information
of pending notifications, the FF-A Beta0 spec mandates that this is
indicated to the receiver scheduler through the return to
FFA_NOTIFICATION_INFO_GET call.
In a system with concurrent calls to FFA_NOTIFICATION_INFO_GET and
FFA_NOTIFICATION_SET, the current implementation could allow for
a case in which a notification is pending, and its state hasn't been
indicated to the receiver scheduler: neither through triggering the SRI,
nor through the return to FFA_NOTIFICATION_INFO_GET.
This patch introduces global counters to the state of notifications:
1- Counter to the number of notifications pending - incremented at
notification set, and decremented at notification get.
2- Counter to the number of notifications that have been retrieved by
receiver scheduler that are still pending - incremented at
notification info get, and decremented at notification get if the
notification has been retrieved by the receiver scheduler.
Change-Id: Icd2bdab7c825ba607c09f3cf54c5b413e7264c1e
Signed-off-by: J-Alves <joao.alves@arm.com>
diff --git a/src/api.c b/src/api.c
index 54bde1d..330dc72 100644
--- a/src/api.c
+++ b/src/api.c
@@ -2902,6 +2902,14 @@
ret = api_ffa_notification_get_success_return(sp_notifications,
vm_notifications, 0);
+ /*
+ * If there are no more pending notifications, change `sri_state` to
+ * handled.
+ */
+ if (vm_is_notifications_pending_count_zero()) {
+ plat_ffa_sri_state_set(HANDLED);
+ }
+
out:
vm_unlock(&receiver_locked);
@@ -2914,7 +2922,7 @@
*/
static struct ffa_value api_ffa_notification_info_get_success_return(
const uint16_t *ids, uint32_t ids_count, const uint32_t *lists_sizes,
- uint32_t lists_count, bool list_is_full)
+ uint32_t lists_count)
{
struct ffa_value ret = (struct ffa_value){.func = FFA_SUCCESS_64};
@@ -2932,8 +2940,9 @@
* - The total number of elements (i.e. total list size);
* - The number of VCPU IDs within each VM specific list.
*/
- ret.arg2 =
- list_is_full ? FFA_NOTIFICATIONS_INFO_GET_FLAG_MORE_PENDING : 0;
+ ret.arg2 = vm_notifications_pending_not_retrieved_by_scheduler()
+ ? FFA_NOTIFICATIONS_INFO_GET_FLAG_MORE_PENDING
+ : 0;
ret.arg2 |= (lists_count & FFA_NOTIFICATIONS_LISTS_COUNT_MASK)
<< FFA_NOTIFICATIONS_LISTS_COUNT_SHIFT;
@@ -2999,7 +3008,7 @@
if (!list_is_full) {
/* Grab notifications info from other world */
- list_is_full = plat_ffa_vm_notifications_info_get(
+ plat_ffa_vm_notifications_info_get(
ids, &ids_count, lists_sizes, &lists_count,
FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
}
@@ -3010,10 +3019,11 @@
result = ffa_error(FFA_NO_DATA);
} else {
result = api_ffa_notification_info_get_success_return(
- ids, ids_count, lists_sizes, lists_count, list_is_full);
- plat_ffa_sri_state_set(HANDLED);
+ ids, ids_count, lists_sizes, lists_count);
}
+ plat_ffa_sri_state_set(HANDLED);
+
return result;
}
diff --git a/src/vm.c b/src/vm.c
index 116d1b8..e4bb2e1 100644
--- a/src/vm.c
+++ b/src/vm.c
@@ -24,6 +24,21 @@
static ffa_vm_count_t vm_count;
static struct vm *first_boot_vm;
+/**
+ * Counters on the status of notifications in the system. It helps to improve
+ * the information retrieved by the receiver scheduler.
+ */
+static struct {
+ /** Counts notifications pending. */
+ uint32_t pending_count;
+ /**
+ * Counts notifications pending, that have been retrieved by the
+ * receiver scheduler.
+ */
+ uint32_t info_get_retrieved_count;
+ struct spinlock lock;
+} all_notifications_state;
+
static bool vm_init_mm(struct vm *vm, struct mpool *ppool)
{
if (vm->el0_partition) {
@@ -464,6 +479,101 @@
: &vm_locked.vm->notifications.from_sp;
}
+static void vm_notifications_global_state_count_update(
+ ffa_notifications_bitmap_t bitmap, uint32_t *counter, int inc)
+{
+ /*
+ * Helper to increment counters from global notifications
+ * state. Count update by increments or decrements of 1 or -1,
+ * respectively.
+ */
+ CHECK(inc == 1 || inc == -1);
+
+ sl_lock(&all_notifications_state.lock);
+
+ for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
+ if (vm_is_notification_bit_set(bitmap, i)) {
+ CHECK((inc > 0 && *counter < UINT32_MAX) ||
+ (inc < 0 && *counter > 0));
+ *counter += inc;
+ }
+ }
+
+ sl_unlock(&all_notifications_state.lock);
+}
+
+/**
+ * Helper function to increment the pending notifications based on a bitmap
+ * passed as argument.
+ * Function to be used at setting notifications for a given VM.
+ */
+static void vm_notifications_pending_count_add(
+ ffa_notifications_bitmap_t to_add)
+{
+ vm_notifications_global_state_count_update(
+ to_add, &all_notifications_state.pending_count, 1);
+}
+
+/**
+ * Helper function to decrement the pending notifications count.
+ * Function to be used when getting the receiver's pending notifications.
+ */
+static void vm_notifications_pending_count_sub(
+ ffa_notifications_bitmap_t to_sub)
+{
+ vm_notifications_global_state_count_update(
+ to_sub, &all_notifications_state.pending_count, -1);
+}
+
+/**
+ * Helper function to count the notifications whose information has been
+ * retrieved by the scheduler of the system, and are still pending.
+ */
+static void vm_notifications_info_get_retrieved_count_add(
+ ffa_notifications_bitmap_t to_add)
+{
+ vm_notifications_global_state_count_update(
+ to_add, &all_notifications_state.info_get_retrieved_count, 1);
+}
+
+/**
+ * Helper function to subtract the notifications that the receiver is getting
+ * and whose information has been retrieved by the receiver scheduler.
+ */
+static void vm_notifications_info_get_retrieved_count_sub(
+ ffa_notifications_bitmap_t to_sub)
+{
+ vm_notifications_global_state_count_update(
+ to_sub, &all_notifications_state.info_get_retrieved_count, -1);
+}
+
+/**
+ * Helper function to determine if there are notifications pending whose info
+ * hasn't been retrieved by the receiver scheduler.
+ */
+bool vm_notifications_pending_not_retrieved_by_scheduler(void)
+{
+ bool ret;
+
+ sl_lock(&all_notifications_state.lock);
+ ret = all_notifications_state.pending_count >
+ all_notifications_state.info_get_retrieved_count;
+ sl_unlock(&all_notifications_state.lock);
+
+ return ret;
+}
+
+bool vm_is_notifications_pending_count_zero(void)
+{
+ bool ret;
+
+ sl_lock(&all_notifications_state.lock);
+ ret = all_notifications_state.pending_count == 0;
+ sl_unlock(&all_notifications_state.lock);
+
+ return ret;
+}
+
/**
* Checks that all provided notifications are bound to the specified sender, and
* are per VCPU or global, as specified.
@@ -553,6 +663,9 @@
} else {
to_set->global.pending |= notifications;
}
+
+ /* Update count of notifications pending. */
+ vm_notifications_pending_count_add(notifications);
}
/**
@@ -563,6 +676,7 @@
ffa_vcpu_index_t cur_vcpu_id)
{
ffa_notifications_bitmap_t to_ret = 0;
+ ffa_notifications_bitmap_t pending_and_info_get_retrieved;
CHECK(vm_locked.vm != NULL);
struct notifications *to_get =
@@ -570,10 +684,42 @@
CHECK(cur_vcpu_id < MAX_CPUS);
to_ret |= to_get->global.pending;
+
+ /* Update count of currently pending notifications in the system. */
+ vm_notifications_pending_count_sub(to_get->global.pending);
+
+ /*
+ * If notifications receiver is getting have been retrieved by the
+ * receiver scheduler, decrement those from respective count.
+ */
+ pending_and_info_get_retrieved =
+ to_get->global.pending & to_get->global.info_get_retrieved;
+
+ if (pending_and_info_get_retrieved != 0) {
+ vm_notifications_info_get_retrieved_count_sub(
+ pending_and_info_get_retrieved);
+ }
+
to_get->global.pending = 0U;
to_get->global.info_get_retrieved = 0U;
to_ret |= to_get->per_vcpu[cur_vcpu_id].pending;
+
+ /*
+ * Update counts of notifications, this time for per-vCPU notifications.
+ */
+ vm_notifications_pending_count_sub(
+ to_get->per_vcpu[cur_vcpu_id].pending);
+
+ pending_and_info_get_retrieved =
+ to_get->per_vcpu[cur_vcpu_id].pending &
+ to_get->per_vcpu[cur_vcpu_id].info_get_retrieved;
+
+ if (pending_and_info_get_retrieved != 0) {
+ vm_notifications_info_get_retrieved_count_sub(
+ pending_and_info_get_retrieved);
+ }
+
to_get->per_vcpu[cur_vcpu_id].pending = 0U;
to_get->per_vcpu[cur_vcpu_id].info_get_retrieved = 0U;
@@ -625,6 +771,8 @@
notifications->global.info_get_retrieved |= pending_not_retrieved;
+ vm_notifications_info_get_retrieved_count_add(pending_not_retrieved);
+
for (ffa_vcpu_count_t i = 0; i < vm_locked.vm->vcpu_count; i++) {
/*
* Include VCPU ID of per-VCPU notifications.
@@ -685,6 +833,9 @@
notifications->per_vcpu[i].info_get_retrieved |=
pending_not_retrieved;
+
+ vm_notifications_info_get_retrieved_count_add(
+ pending_not_retrieved);
}
}