refactor(plat/ffa): extract notifications
Extract the notifications interface from `plat/ffa.h` to
`plat/ffa/notifications.h`.
Change-Id: Iebfb2c1bd5562a6f8dc78ce45ba4acd14075fd45
Signed-off-by: Karl Meakin <karl.meakin@arm.com>
diff --git a/src/api.c b/src/api.c
index 07d223f..143615f 100644
--- a/src/api.c
+++ b/src/api.c
@@ -16,6 +16,7 @@
#include "hf/arch/plat/ffa.h"
#include "hf/arch/plat/ffa/direct_messaging.h"
#include "hf/arch/plat/ffa/indirect_messaging.h"
+#include "hf/arch/plat/ffa/notifications.h"
#include "hf/arch/plat/ffa/vm.h"
#include "hf/arch/timer.h"
#include "hf/arch/vm.h"
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index 89274cd..601d3ed 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -16,6 +16,7 @@
#include "hf/arch/mmu.h"
#include "hf/arch/plat/ffa.h"
#include "hf/arch/plat/ffa/indirect_messaging.h"
+#include "hf/arch/plat/ffa/notifications.h"
#include "hf/arch/plat/ffa/vm.h"
#include "hf/arch/plat/smc.h"
#include "hf/arch/timer.h"
diff --git a/src/arch/aarch64/plat/ffa/BUILD.gn b/src/arch/aarch64/plat/ffa/BUILD.gn
index 679ff1e..2f560a2 100644
--- a/src/arch/aarch64/plat/ffa/BUILD.gn
+++ b/src/arch/aarch64/plat/ffa/BUILD.gn
@@ -26,6 +26,7 @@
"hypervisor.c",
"hypervisor/direct_messaging.c",
"hypervisor/indirect_messaging.c",
+ "hypervisor/notifications.c",
"hypervisor/vm.c",
]
}
@@ -43,6 +44,7 @@
"spmc.c",
"spmc/direct_messaging.c",
"spmc/indirect_messaging.c",
+ "spmc/notifications.c",
"spmc/vm.c",
]
}
diff --git a/src/arch/aarch64/plat/ffa/hypervisor.c b/src/arch/aarch64/plat/ffa/hypervisor.c
index f71c581..32da2dc 100644
--- a/src/arch/aarch64/plat/ffa/hypervisor.c
+++ b/src/arch/aarch64/plat/ffa/hypervisor.c
@@ -189,23 +189,6 @@
return true;
}
-/**
- * Check validity of the calls:
- * FFA_NOTIFICATION_BITMAP_CREATE/FFA_NOTIFICATION_BITMAP_DESTROY.
- */
-struct ffa_value plat_ffa_is_notifications_bitmap_access_valid(
- struct vcpu *current, ffa_id_t vm_id)
-{
- /*
- * Call should only be used by the Hypervisor, so any attempt of
- * invocation from NWd FF-A endpoints should fail.
- */
- (void)current;
- (void)vm_id;
-
- return ffa_error(FFA_NOT_SUPPORTED);
-}
-
bool plat_ffa_rx_release_forward(struct vm_locked vm_locked,
struct ffa_value *ret)
{
@@ -324,257 +307,6 @@
return result;
}
-bool plat_ffa_is_notifications_bind_valid(struct vcpu *current,
- ffa_id_t sender_id,
- ffa_id_t receiver_id)
-{
- ffa_id_t current_vm_id = current->vm->id;
- /** If Hafnium is hypervisor, receiver needs to be current vm. */
- return sender_id != receiver_id && current_vm_id == receiver_id;
-}
-
-bool plat_ffa_notifications_update_bindings_forward(
- ffa_id_t receiver_id, ffa_id_t sender_id, uint32_t flags,
- ffa_notifications_bitmap_t bitmap, bool is_bind, struct ffa_value *ret)
-{
- CHECK(ret != NULL);
-
- if (vm_id_is_current_world(receiver_id) &&
- !vm_id_is_current_world(sender_id)) {
- dlog_verbose(
- "Forward notifications bind/unbind to other world.\n");
- *ret = arch_other_world_call((struct ffa_value){
- .func = is_bind ? FFA_NOTIFICATION_BIND_32
- : FFA_NOTIFICATION_UNBIND_32,
- .arg1 = (sender_id << 16) | (receiver_id),
- .arg2 = is_bind ? flags : 0U,
- .arg3 = (uint32_t)(bitmap),
- .arg4 = (uint32_t)(bitmap >> 32),
- });
- return true;
- }
- return false;
-}
-
-bool plat_ffa_is_notification_set_valid(struct vcpu *current,
- ffa_id_t sender_id,
- ffa_id_t receiver_id)
-{
- ffa_id_t current_vm_id = current->vm->id;
-
- /* If Hafnium is hypervisor, sender needs to be current vm. */
- return sender_id == current_vm_id && sender_id != receiver_id;
-}
-
-bool plat_ffa_notification_set_forward(ffa_id_t sender_vm_id,
- ffa_id_t receiver_vm_id, uint32_t flags,
- ffa_notifications_bitmap_t bitmap,
- struct ffa_value *ret)
-{
- /* Forward only if receiver is an SP. */
- if (vm_id_is_current_world(receiver_vm_id)) {
- return false;
- }
-
- dlog_verbose("Forwarding notification set to SPMC.\n");
-
- *ret = arch_other_world_call((struct ffa_value){
- .func = FFA_NOTIFICATION_SET_32,
- .arg1 = (sender_vm_id << 16) | receiver_vm_id,
- .arg2 = flags & ~FFA_NOTIFICATIONS_FLAG_DELAY_SRI,
- .arg3 = (uint32_t)(bitmap),
- .arg4 = (uint32_t)(bitmap >> 32),
- });
-
- if (ret->func == FFA_ERROR_32) {
- dlog_verbose("Failed to set notifications from SPMC.\n");
- }
-
- return true;
-}
-
-bool plat_ffa_is_notification_get_valid(struct vcpu *current,
- ffa_id_t receiver_id, uint32_t flags)
-{
- ffa_id_t current_vm_id = current->vm->id;
-
- (void)flags;
-
- /* If Hafnium is hypervisor, receiver needs to be current vm. */
- return (current_vm_id == receiver_id);
-}
-
-struct ffa_value plat_ffa_notifications_bitmap_create(
- ffa_id_t vm_id, ffa_vcpu_count_t vcpu_count)
-{
- (void)vm_id;
- (void)vcpu_count;
-
- return ffa_error(FFA_NOT_SUPPORTED);
-}
-
-struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_id_t vm_id)
-{
- (void)vm_id;
-
- return ffa_error(FFA_NOT_SUPPORTED);
-}
-
-bool plat_ffa_notifications_bitmap_create_call(ffa_id_t vm_id,
- ffa_vcpu_count_t vcpu_count)
-{
- struct ffa_value ret;
-
- if (ffa_tee_enabled) {
- ret = arch_other_world_call((struct ffa_value){
- .func = FFA_NOTIFICATION_BITMAP_CREATE_32,
- .arg1 = vm_id,
- .arg2 = vcpu_count,
- });
-
- if (ret.func == FFA_ERROR_32) {
- dlog_error(
- "Failed to create notifications bitmap "
- "to VM: %#x; error: %#x.\n",
- vm_id, ffa_error_code(ret));
- return false;
- }
- }
-
- return true;
-}
-
-void plat_ffa_notification_info_get_forward(uint16_t *ids, uint32_t *ids_count,
- uint32_t *lists_sizes,
- uint32_t *lists_count,
- const uint32_t ids_count_max)
-{
- CHECK(ids != NULL);
- CHECK(ids_count != NULL);
- CHECK(lists_sizes != NULL);
- CHECK(lists_count != NULL);
- CHECK(ids_count_max == FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
-
- uint32_t local_lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS];
- struct ffa_value ret;
-
- dlog_verbose("Forwarding notification info get to SPMC.\n");
-
- ret = arch_other_world_call((struct ffa_value){
- .func = FFA_NOTIFICATION_INFO_GET_64,
- });
-
- if (ret.func == FFA_ERROR_32) {
- dlog_verbose("No notifications returned by SPMC.\n");
- return;
- }
-
- *lists_count = ffa_notification_info_get_lists_count(ret);
-
- if (*lists_count > ids_count_max) {
- *lists_count = 0;
- return;
- }
-
- /*
- * The count of ids should be at least the number of lists, to
- * encompass for at least the ids of the FF-A endpoints. List
- * sizes will be between 0 and 3, and relates to the counting of
- * vCPU of the endpoint that have pending notifications.
- * If `lists_count` is already ids_count_max, each list size
- * must be 0.
- */
- *ids_count = *lists_count;
-
- for (uint32_t i = 0; i < *lists_count; i++) {
- local_lists_sizes[i] =
- ffa_notification_info_get_list_size(ret, i + 1);
-
- /*
- * ... sum the counting of each list size that are part
- * of the main list.
- */
- *ids_count += local_lists_sizes[i];
- }
-
- /*
- * Sanity check returned `lists_count` and determined
- * `ids_count`. If something wrong, reset arguments to 0 such
- * that hypervisor's handling of FFA_NOTIFICATION_INFO_GET can
- * proceed without SPMC's values.
- */
- if (*ids_count > ids_count_max) {
- *ids_count = 0;
- return;
- }
-
- /* Copy now lists sizes, as return sizes have been validated. */
- memcpy_s(lists_sizes, sizeof(lists_sizes[0]) * ids_count_max,
- local_lists_sizes, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
-
- /* Unpack the notifications info from the return. */
- memcpy_s(ids, sizeof(ids[0]) * ids_count_max, &ret.arg3,
- sizeof(ret.arg3) * FFA_NOTIFICATIONS_INFO_GET_REGS_RET);
-}
-
-bool plat_ffa_notifications_get_from_sp(struct vm_locked receiver_locked,
- ffa_vcpu_index_t vcpu_id,
- ffa_notifications_bitmap_t *from_sp,
- struct ffa_value *ret)
-{
- ffa_id_t receiver_id = receiver_locked.vm->id;
-
- assert(from_sp != NULL && ret != NULL);
-
- *ret = arch_other_world_call((struct ffa_value){
- .func = FFA_NOTIFICATION_GET_32,
- .arg1 = (vcpu_id << 16) | receiver_id,
- .arg2 = FFA_NOTIFICATION_FLAG_BITMAP_SP,
- });
-
- if (ret->func == FFA_ERROR_32) {
- return false;
- }
-
- *from_sp = ffa_notification_get_from_sp(*ret);
-
- return true;
-}
-
-bool plat_ffa_notifications_get_framework_notifications(
- struct vm_locked receiver_locked, ffa_notifications_bitmap_t *from_fwk,
- uint32_t flags, ffa_vcpu_index_t vcpu_id, struct ffa_value *ret)
-{
- ffa_id_t receiver_id = receiver_locked.vm->id;
- ffa_notifications_bitmap_t spm_notifications = 0;
-
- (void)flags;
-
- assert(from_fwk != NULL);
- assert(ret != NULL);
-
- /* Get SPMC notifications. */
- if (ffa_tee_enabled) {
- *ret = arch_other_world_call((struct ffa_value){
- .func = FFA_NOTIFICATION_GET_32,
- .arg1 = (vcpu_id << 16) | receiver_id,
- .arg2 = FFA_NOTIFICATION_FLAG_BITMAP_SPM,
- });
-
- if (ffa_func_id(*ret) == FFA_ERROR_32) {
- return false;
- }
-
- spm_notifications = ffa_notification_get_from_framework(*ret);
- }
-
- /* Merge notifications from SPMC and Hypervisor. */
- *from_fwk = spm_notifications |
- vm_notifications_framework_get_pending(receiver_locked);
-
- return true;
-}
-
void plat_ffa_rxtx_map_forward(struct vm_locked vm_locked)
{
struct vm *vm = vm_locked.vm;
@@ -689,31 +421,6 @@
CHECK(false);
}
-/**
- * An Hypervisor should send the SRI to the Primary Endpoint. Not implemented
- * as Hypervisor is only interesting for us for the sake of having a test
- * intrastructure that encompasses the NWd, and we are not interested on
- * in testing the flow of notifications between VMs only.
- */
-void plat_ffa_sri_trigger_if_delayed(struct cpu *cpu)
-{
- (void)cpu;
-}
-
-void plat_ffa_sri_trigger_not_delayed(struct cpu *cpu)
-{
- (void)cpu;
-}
-
-/**
- * Track that in current CPU there was a notification set with delay SRI
- * flag.
- */
-void plat_ffa_sri_set_delayed(struct cpu *cpu)
-{
- (void)cpu;
-}
-
bool plat_ffa_inject_notification_pending_interrupt(
struct vcpu_locked target_locked, struct vcpu_locked current_locked,
struct vm_locked receiver_locked)
diff --git a/src/arch/aarch64/plat/ffa/hypervisor/notifications.c b/src/arch/aarch64/plat/ffa/hypervisor/notifications.c
new file mode 100644
index 0000000..9ba1f3f
--- /dev/null
+++ b/src/arch/aarch64/plat/ffa/hypervisor/notifications.c
@@ -0,0 +1,310 @@
+/*
+ * Copyright 2024 The Hafnium Authors.
+ *
+ * Use of this source code is governed by a BSD-style
+ * license that can be found in the LICENSE file or at
+ * https://opensource.org/licenses/BSD-3-Clause.
+ */
+
+#include "hf/arch/plat/ffa/notifications.h"
+
+#include "hf/arch/other_world.h"
+
+#include "hf/ffa_internal.h"
+#include "hf/std.h"
+#include "hf/vm.h"
+
+#include "hypervisor.h"
+
+/**
+ * Check validity of the calls:
+ * FFA_NOTIFICATION_BITMAP_CREATE/FFA_NOTIFICATION_BITMAP_DESTROY.
+ */
+struct ffa_value plat_ffa_is_notifications_bitmap_access_valid(
+ struct vcpu *current, ffa_id_t vm_id)
+{
+ /*
+ * Call should only be used by the Hypervisor, so any attempt of
+ * invocation from NWd FF-A endpoints should fail.
+ */
+ (void)current;
+ (void)vm_id;
+
+ return ffa_error(FFA_NOT_SUPPORTED);
+}
+
+bool plat_ffa_is_notifications_bind_valid(struct vcpu *current,
+ ffa_id_t sender_id,
+ ffa_id_t receiver_id)
+{
+ ffa_id_t current_vm_id = current->vm->id;
+ /** If Hafnium is hypervisor, receiver needs to be current vm. */
+ return sender_id != receiver_id && current_vm_id == receiver_id;
+}
+
+bool plat_ffa_notifications_update_bindings_forward(
+ ffa_id_t receiver_id, ffa_id_t sender_id, uint32_t flags,
+ ffa_notifications_bitmap_t bitmap, bool is_bind, struct ffa_value *ret)
+{
+ CHECK(ret != NULL);
+
+ if (vm_id_is_current_world(receiver_id) &&
+ !vm_id_is_current_world(sender_id)) {
+ dlog_verbose(
+ "Forward notifications bind/unbind to other world.\n");
+ *ret = arch_other_world_call((struct ffa_value){
+ .func = is_bind ? FFA_NOTIFICATION_BIND_32
+ : FFA_NOTIFICATION_UNBIND_32,
+ .arg1 = (sender_id << 16) | (receiver_id),
+ .arg2 = is_bind ? flags : 0U,
+ .arg3 = (uint32_t)(bitmap),
+ .arg4 = (uint32_t)(bitmap >> 32),
+ });
+ return true;
+ }
+ return false;
+}
+
+bool plat_ffa_is_notification_set_valid(struct vcpu *current,
+ ffa_id_t sender_id,
+ ffa_id_t receiver_id)
+{
+ ffa_id_t current_vm_id = current->vm->id;
+
+ /* If Hafnium is hypervisor, sender needs to be current vm. */
+ return sender_id == current_vm_id && sender_id != receiver_id;
+}
+
+bool plat_ffa_notification_set_forward(ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id, uint32_t flags,
+ ffa_notifications_bitmap_t bitmap,
+ struct ffa_value *ret)
+{
+ /* Forward only if receiver is an SP. */
+ if (vm_id_is_current_world(receiver_vm_id)) {
+ return false;
+ }
+
+ dlog_verbose("Forwarding notification set to SPMC.\n");
+
+ *ret = arch_other_world_call((struct ffa_value){
+ .func = FFA_NOTIFICATION_SET_32,
+ .arg1 = (sender_vm_id << 16) | receiver_vm_id,
+ .arg2 = flags & ~FFA_NOTIFICATIONS_FLAG_DELAY_SRI,
+ .arg3 = (uint32_t)(bitmap),
+ .arg4 = (uint32_t)(bitmap >> 32),
+ });
+
+ if (ret->func == FFA_ERROR_32) {
+ dlog_verbose("Failed to set notifications from SPMC.\n");
+ }
+
+ return true;
+}
+
+bool plat_ffa_is_notification_get_valid(struct vcpu *current,
+ ffa_id_t receiver_id, uint32_t flags)
+{
+ ffa_id_t current_vm_id = current->vm->id;
+
+ (void)flags;
+
+ /* If Hafnium is hypervisor, receiver needs to be current vm. */
+ return (current_vm_id == receiver_id);
+}
+
+struct ffa_value plat_ffa_notifications_bitmap_create(
+ ffa_id_t vm_id, ffa_vcpu_count_t vcpu_count)
+{
+ (void)vm_id;
+ (void)vcpu_count;
+
+ return ffa_error(FFA_NOT_SUPPORTED);
+}
+
+struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_id_t vm_id)
+{
+ (void)vm_id;
+
+ return ffa_error(FFA_NOT_SUPPORTED);
+}
+
+bool plat_ffa_notifications_bitmap_create_call(ffa_id_t vm_id,
+ ffa_vcpu_count_t vcpu_count)
+{
+ struct ffa_value ret;
+
+ if (plat_ffa_is_tee_enabled()) {
+ ret = arch_other_world_call((struct ffa_value){
+ .func = FFA_NOTIFICATION_BITMAP_CREATE_32,
+ .arg1 = vm_id,
+ .arg2 = vcpu_count,
+ });
+
+ if (ret.func == FFA_ERROR_32) {
+ dlog_error(
+ "Failed to create notifications bitmap "
+ "to VM: %#x; error: %#x.\n",
+ vm_id, ffa_error_code(ret));
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void plat_ffa_notification_info_get_forward(uint16_t *ids, uint32_t *ids_count,
+ uint32_t *lists_sizes,
+ uint32_t *lists_count,
+ const uint32_t ids_count_max)
+{
+ CHECK(ids != NULL);
+ CHECK(ids_count != NULL);
+ CHECK(lists_sizes != NULL);
+ CHECK(lists_count != NULL);
+ CHECK(ids_count_max == FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
+
+ uint32_t local_lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS];
+ struct ffa_value ret;
+
+ dlog_verbose("Forwarding notification info get to SPMC.\n");
+
+ ret = arch_other_world_call((struct ffa_value){
+ .func = FFA_NOTIFICATION_INFO_GET_64,
+ });
+
+ if (ret.func == FFA_ERROR_32) {
+ dlog_verbose("No notifications returned by SPMC.\n");
+ return;
+ }
+
+ *lists_count = ffa_notification_info_get_lists_count(ret);
+
+ if (*lists_count > ids_count_max) {
+ *lists_count = 0;
+ return;
+ }
+
+ /*
+ * The count of ids should be at least the number of lists, to
+ * encompass for at least the ids of the FF-A endpoints. List
+ * sizes will be between 0 and 3, and relates to the counting of
+ * vCPU of the endpoint that have pending notifications.
+ * If `lists_count` is already ids_count_max, each list size
+ * must be 0.
+ */
+ *ids_count = *lists_count;
+
+ for (uint32_t i = 0; i < *lists_count; i++) {
+ local_lists_sizes[i] =
+ ffa_notification_info_get_list_size(ret, i + 1);
+
+ /*
+ * ... sum the counting of each list size that are part
+ * of the main list.
+ */
+ *ids_count += local_lists_sizes[i];
+ }
+
+ /*
+ * Sanity check returned `lists_count` and determined
+ * `ids_count`. If something wrong, reset arguments to 0 such
+ * that hypervisor's handling of FFA_NOTIFICATION_INFO_GET can
+ * proceed without SPMC's values.
+ */
+ if (*ids_count > ids_count_max) {
+ *ids_count = 0;
+ return;
+ }
+
+ /* Copy now lists sizes, as return sizes have been validated. */
+ memcpy_s(lists_sizes, sizeof(lists_sizes[0]) * ids_count_max,
+ local_lists_sizes, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
+
+ /* Unpack the notifications info from the return. */
+ memcpy_s(ids, sizeof(ids[0]) * ids_count_max, &ret.arg3,
+ sizeof(ret.arg3) * FFA_NOTIFICATIONS_INFO_GET_REGS_RET);
+}
+
+bool plat_ffa_notifications_get_from_sp(struct vm_locked receiver_locked,
+ ffa_vcpu_index_t vcpu_id,
+ ffa_notifications_bitmap_t *from_sp,
+ struct ffa_value *ret)
+{
+ ffa_id_t receiver_id = receiver_locked.vm->id;
+
+ assert(from_sp != NULL && ret != NULL);
+
+ *ret = arch_other_world_call((struct ffa_value){
+ .func = FFA_NOTIFICATION_GET_32,
+ .arg1 = (vcpu_id << 16) | receiver_id,
+ .arg2 = FFA_NOTIFICATION_FLAG_BITMAP_SP,
+ });
+
+ if (ret->func == FFA_ERROR_32) {
+ return false;
+ }
+
+ *from_sp = ffa_notification_get_from_sp(*ret);
+
+ return true;
+}
+
+bool plat_ffa_notifications_get_framework_notifications(
+ struct vm_locked receiver_locked, ffa_notifications_bitmap_t *from_fwk,
+ uint32_t flags, ffa_vcpu_index_t vcpu_id, struct ffa_value *ret)
+{
+ ffa_id_t receiver_id = receiver_locked.vm->id;
+ ffa_notifications_bitmap_t spm_notifications = 0;
+
+ (void)flags;
+
+ assert(from_fwk != NULL);
+ assert(ret != NULL);
+
+ /* Get SPMC notifications. */
+ if (plat_ffa_is_tee_enabled()) {
+ *ret = arch_other_world_call((struct ffa_value){
+ .func = FFA_NOTIFICATION_GET_32,
+ .arg1 = (vcpu_id << 16) | receiver_id,
+ .arg2 = FFA_NOTIFICATION_FLAG_BITMAP_SPM,
+ });
+
+ if (ffa_func_id(*ret) == FFA_ERROR_32) {
+ return false;
+ }
+
+ spm_notifications = ffa_notification_get_from_framework(*ret);
+ }
+
+ /* Merge notifications from SPMC and Hypervisor. */
+ *from_fwk = spm_notifications |
+ vm_notifications_framework_get_pending(receiver_locked);
+
+ return true;
+}
+
+/**
+ * A hypervisor should send the SRI to the Primary Endpoint. Not implemented as
+ * the hypervisor is only interesting for us for the sake of having a test
+ * intrastructure that encompasses the NWd, and we are not interested in testing
+ * the flow of notifications between VMs only.
+ */
+void plat_ffa_sri_trigger_if_delayed(struct cpu *cpu)
+{
+ (void)cpu;
+}
+
+void plat_ffa_sri_trigger_not_delayed(struct cpu *cpu)
+{
+ (void)cpu;
+}
+
+/**
+ * Track that in current CPU there was a notification set with delay SRI
+ * flag.
+ */
+void plat_ffa_sri_set_delayed(struct cpu *cpu)
+{
+ (void)cpu;
+}
diff --git a/src/arch/aarch64/plat/ffa/spmc.c b/src/arch/aarch64/plat/ffa/spmc.c
index ba2554c..cfaf1e8 100644
--- a/src/arch/aarch64/plat/ffa/spmc.c
+++ b/src/arch/aarch64/plat/ffa/spmc.c
@@ -35,9 +35,6 @@
#include "smc.h"
#include "sysregs.h"
-/** Interrupt priority for the Schedule Receiver Interrupt. */
-#define SRI_PRIORITY 0x80U
-
void plat_ffa_log_init(void)
{
dlog_info("Initializing Hafnium (SPMC)\n");
@@ -401,164 +398,6 @@
return true;
}
-struct ffa_value plat_ffa_is_notifications_bitmap_access_valid(
- struct vcpu *current, ffa_id_t vm_id)
-{
- /**
- * Create/Destroy interfaces to be called by the hypervisor, into the
- * SPMC.
- */
- if (current->vm->id != HF_HYPERVISOR_VM_ID) {
- return ffa_error(FFA_NOT_SUPPORTED);
- }
-
- /* ID provided must be a valid VM ID. */
- if (!ffa_is_vm_id(vm_id)) {
- return ffa_error(FFA_INVALID_PARAMETERS);
- }
-
- return (struct ffa_value){
- .func = FFA_SUCCESS_32,
- };
-}
-
-/**
- * - A bind call cannot be from an SPMD logical partition or target an
- * SPMD logical partition.
- * - If bind call from SP, receiver's ID must be same as current VM ID.
- * - If bind call from NWd, current VM ID must be same as Hypervisor ID,
- * receiver's ID must be from NWd, and sender's ID from SWd.
- */
-bool plat_ffa_is_notifications_bind_valid(struct vcpu *current,
- ffa_id_t sender_id,
- ffa_id_t receiver_id)
-{
- ffa_id_t current_vm_id = current->vm->id;
-
- if (plat_ffa_is_spmd_lp_id(sender_id) ||
- plat_ffa_is_spmd_lp_id(receiver_id)) {
- dlog_verbose(
- "Notification bind: not permitted for logical SPs (%x "
- "%x).\n",
- sender_id, receiver_id);
- return false;
- }
-
- if (sender_id == receiver_id) {
- dlog_verbose(
- "Notification set: sender can't target itself. (%x == "
- "%x)\n",
- sender_id, receiver_id);
- return false;
- }
-
- /* Caller is an SP. */
- if (vm_id_is_current_world(current_vm_id)) {
- if (receiver_id != current_vm_id) {
- dlog_verbose(
- "Notification bind: caller (%x) must be the "
- "receiver(%x).\n",
- current_vm_id, receiver_id);
- return false;
- }
- } else {
- assert(current_vm_id == HF_HYPERVISOR_VM_ID);
-
- if (!vm_id_is_current_world(sender_id) ||
- vm_id_is_current_world(receiver_id)) {
- dlog_verbose(
- "Notification bind: VM must specify itself as "
- "receiver (%x), and SP as sender(%x).\n",
- receiver_id, sender_id);
- return false;
- }
- }
-
- return true;
-}
-
-bool plat_ffa_notifications_update_bindings_forward(
- ffa_id_t receiver_id, ffa_id_t sender_id, uint32_t flags,
- ffa_notifications_bitmap_t bitmap, bool is_bind, struct ffa_value *ret)
-{
- (void)ret;
- (void)receiver_id;
- (void)sender_id;
- (void)flags;
- (void)bitmap;
- (void)is_bind;
- (void)ret;
-
- return false;
-}
-/*
- * - A set call cannot be from an SPMD logical partition or target an
- * SPMD logical partition.
- * - If set call from SP, sender's ID must be the same as current.
- * - If set call from NWd, current VM ID must be same as Hypervisor ID,
- * and receiver must be an SP.
- */
-bool plat_ffa_is_notification_set_valid(struct vcpu *current,
- ffa_id_t sender_id,
- ffa_id_t receiver_id)
-{
- ffa_id_t current_vm_id = current->vm->id;
-
- if (plat_ffa_is_spmd_lp_id(sender_id) ||
- plat_ffa_is_spmd_lp_id(receiver_id)) {
- dlog_verbose(
- "Notification set: not permitted for logical SPs (%x "
- "%x).\n",
- sender_id, receiver_id);
- return false;
- }
-
- if (sender_id == receiver_id) {
- dlog_verbose(
- "Notification set: sender can't target itself. (%x == "
- "%x)\n",
- sender_id, receiver_id);
- return false;
- }
-
- if (vm_id_is_current_world(current_vm_id)) {
- if (sender_id != current_vm_id) {
- dlog_verbose(
- "Notification set: caller (%x) must be the "
- "sender(%x).\n",
- current_vm_id, sender_id);
- return false;
- }
- } else {
- assert(current_vm_id == HF_HYPERVISOR_VM_ID);
-
- if (vm_id_is_current_world(sender_id) ||
- !vm_id_is_current_world(receiver_id)) {
- dlog_verbose(
- "Notification set: sender (%x) must be a VM "
- "and receiver (%x) an SP.\n",
- sender_id, receiver_id);
- return false;
- }
- }
-
- return true;
-}
-
-bool plat_ffa_notification_set_forward(ffa_id_t sender_vm_id,
- ffa_id_t receiver_vm_id, uint32_t flags,
- ffa_notifications_bitmap_t bitmap,
- struct ffa_value *ret)
-{
- (void)sender_vm_id;
- (void)receiver_vm_id;
- (void)flags;
- (void)bitmap;
- (void)ret;
-
- return false;
-}
-
void plat_ffa_rxtx_map_forward(struct vm_locked vm_locked)
{
(void)vm_locked;
@@ -569,46 +408,6 @@
(void)vm_locked;
}
-bool plat_ffa_is_notification_get_valid(struct vcpu *current,
- ffa_id_t receiver_id, uint32_t flags)
-{
- ffa_id_t current_vm_id = current->vm->id;
- /*
- * SPMC:
- * - A get call cannot be targeted to an SPMD logical partition.
- * - An SP can ask for its notifications, or the hypervisor can get
- * notifications target to a VM.
- */
- bool caller_and_receiver_valid =
- (!plat_ffa_is_spmd_lp_id(receiver_id) &&
- (current_vm_id == receiver_id)) ||
- (current_vm_id == HF_HYPERVISOR_VM_ID &&
- !vm_id_is_current_world(receiver_id));
-
- /*
- * Flags field is not valid if NWd endpoint requests notifications from
- * VMs or Hypervisor. Those are managed by the hypervisor if present.
- */
- bool flags_valid =
- !(ffa_is_vm_id(receiver_id) &&
- ((flags & FFA_NOTIFICATION_FLAG_BITMAP_VM) != 0U ||
- (flags & FFA_NOTIFICATION_FLAG_BITMAP_HYP) != 0U));
-
- return caller_and_receiver_valid && flags_valid;
-}
-
-void plat_ffa_notification_info_get_forward( // NOLINTNEXTLINE
- uint16_t *ids, uint32_t *ids_count, // NOLINTNEXTLINE
- uint32_t *lists_sizes, uint32_t *lists_count,
- const uint32_t ids_count_max)
-{
- (void)ids;
- (void)ids_count;
- (void)lists_sizes;
- (void)lists_count;
- (void)ids_count_max;
-}
-
ffa_memory_handle_t plat_ffa_memory_handle_make(uint64_t index)
{
return (index & ~FFA_MEMORY_HANDLE_ALLOCATOR_MASK) |
@@ -657,140 +456,6 @@
return result & final_mask;
}
-struct ffa_value plat_ffa_notifications_bitmap_create(
- ffa_id_t vm_id, ffa_vcpu_count_t vcpu_count)
-{
- struct ffa_value ret = (struct ffa_value){.func = FFA_SUCCESS_32};
- struct vm_locked vm_locked;
-
- if (vm_id == HF_OTHER_WORLD_ID) {
- /*
- * If the provided VM ID regards to the Hypervisor, represented
- * by the other world VM with ID HF_OTHER_WORLD_ID, check if the
- * notifications have been enabled.
- */
-
- vm_locked = vm_find_locked(vm_id);
-
- CHECK(vm_locked.vm != NULL);
-
- /* Call has been used for the other world vm already */
- if (vm_locked.vm->notifications.enabled) {
- dlog_verbose("Notification bitmap already created.\n");
- ret = ffa_error(FFA_DENIED);
- goto out;
- }
-
- /* Enable notifications for `other_world_vm`. */
- vm_locked.vm->notifications.enabled = true;
- } else {
- /* Else should regard with NWd VM ID. */
- vm_locked = plat_ffa_nwd_vm_create(vm_id);
-
- /* If received NULL, there are no slots for VM creation. */
- if (vm_locked.vm == NULL) {
- dlog_verbose("No memory to create VM ID %#x.\n", vm_id);
- return ffa_error(FFA_NO_MEMORY);
- }
-
- /* Ensure bitmap has not already been created. */
- if (vm_locked.vm->notifications.enabled) {
- dlog_verbose("Notification bitmap already created.\n");
- ret = ffa_error(FFA_DENIED);
- goto out;
- }
-
- vm_locked.vm->notifications.enabled = true;
- vm_locked.vm->vcpu_count = vcpu_count;
- }
-
-out:
- vm_unlock(&vm_locked);
-
- return ret;
-}
-
-bool plat_ffa_notifications_bitmap_create_call(ffa_id_t vm_id,
- ffa_vcpu_count_t vcpu_count)
-{
- (void)vm_id;
- (void)vcpu_count;
-
- return true;
-}
-
-struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_id_t vm_id)
-{
- struct ffa_value ret = {.func = FFA_SUCCESS_32};
- struct vm_locked to_destroy_locked = plat_ffa_vm_find_locked(vm_id);
-
- if (to_destroy_locked.vm == NULL) {
- dlog_verbose("Bitmap not created for VM: %u\n", vm_id);
- return ffa_error(FFA_DENIED);
- }
-
- if (!to_destroy_locked.vm->notifications.enabled) {
- dlog_verbose("Notification disabled for VM: %u\n", vm_id);
- ret = ffa_error(FFA_DENIED);
- goto out;
- }
-
- /* Check if there is any notification pending. */
- if (vm_are_notifications_pending(to_destroy_locked, false, ~0x0U)) {
- dlog_verbose("VM has notifications pending.\n");
- ret = ffa_error(FFA_DENIED);
- goto out;
- }
-
- to_destroy_locked.vm->notifications.enabled = false;
- vm_notifications_init(to_destroy_locked.vm,
- to_destroy_locked.vm->vcpu_count, NULL);
- if (vm_id != HF_OTHER_WORLD_ID) {
- plat_ffa_vm_destroy(to_destroy_locked);
- }
-
-out:
- vm_unlock(&to_destroy_locked);
-
- return ret;
-}
-
-bool plat_ffa_notifications_get_from_sp(struct vm_locked receiver_locked,
- ffa_vcpu_index_t vcpu_id,
- ffa_notifications_bitmap_t *from_sp,
- struct ffa_value *ret)
-{
- (void)ret;
-
- *from_sp = vm_notifications_partition_get_pending(receiver_locked,
- false, vcpu_id);
-
- return true;
-}
-
-bool plat_ffa_notifications_get_framework_notifications(
- struct vm_locked receiver_locked, ffa_notifications_bitmap_t *from_fwk,
- uint32_t flags, ffa_vcpu_index_t vcpu_id, struct ffa_value *ret)
-{
- assert(from_fwk != NULL);
- assert(ret != NULL);
-
- (void)vcpu_id;
-
- if (!vm_id_is_current_world(receiver_locked.vm->id) &&
- (flags & FFA_NOTIFICATION_FLAG_BITMAP_HYP) != 0U) {
- dlog_error(
- "Notification get flag from hypervisor in call to SPMC "
- "MBZ.\n");
- *ret = ffa_error(FFA_INVALID_PARAMETERS);
- return false;
- }
-
- *from_fwk = vm_notifications_framework_get_pending(receiver_locked);
-
- return true;
-}
-
bool plat_ffa_is_mem_perm_get_valid(const struct vcpu *current)
{
/* FFA_MEM_PERM_SET/GET is only valid before SPs are initialized */
@@ -1601,69 +1266,6 @@
return ffa_ret;
}
-static void plat_ffa_send_schedule_receiver_interrupt(struct cpu *cpu)
-{
- dlog_verbose("Setting Schedule Receiver SGI %u on core: %zu\n",
- HF_SCHEDULE_RECEIVER_INTID, cpu_index(cpu));
-
- plat_interrupts_send_sgi(HF_SCHEDULE_RECEIVER_INTID, cpu, false);
-}
-
-static void plat_ffa_sri_set_delayed_internal(struct cpu *cpu, bool delayed)
-{
- assert(cpu != NULL);
- cpu->is_sri_delayed = delayed;
-}
-
-void plat_ffa_sri_set_delayed(struct cpu *cpu)
-{
- plat_ffa_sri_set_delayed_internal(cpu, true);
-}
-
-static bool plat_ffa_is_sri_delayed(struct cpu *cpu)
-{
- assert(cpu != NULL);
- return cpu->is_sri_delayed;
-}
-
-void plat_ffa_sri_trigger_if_delayed(struct cpu *cpu)
-{
- assert(cpu != NULL);
-
- if (plat_ffa_is_sri_delayed(cpu)) {
- plat_ffa_send_schedule_receiver_interrupt(cpu);
- plat_ffa_sri_set_delayed_internal(cpu, false);
- }
-}
-
-void plat_ffa_sri_trigger_not_delayed(struct cpu *cpu)
-{
- /*
- * If flag to delay SRI isn't set, trigger SRI such that the
- * receiver scheduler is aware there are pending notifications.
- */
- plat_ffa_send_schedule_receiver_interrupt(cpu);
- plat_ffa_sri_set_delayed_internal(cpu, false);
-}
-
-void plat_ffa_sri_init(struct cpu *cpu)
-{
- /* Configure as Non Secure SGI. */
- struct interrupt_descriptor sri_desc = {
- .interrupt_id = HF_SCHEDULE_RECEIVER_INTID,
- .type = INT_DESC_TYPE_SGI,
- .sec_state = INT_DESC_SEC_STATE_NS,
- .priority = SRI_PRIORITY,
- .valid = true,
- .enabled = true,
- };
-
- /* TODO: when supported, make the interrupt driver use cpu structure. */
- (void)cpu;
-
- plat_interrupts_configure_interrupt(sri_desc);
-}
-
bool plat_ffa_inject_notification_pending_interrupt(
struct vcpu_locked target_locked, struct vcpu_locked current_locked,
struct vm_locked receiver_locked)
diff --git a/src/arch/aarch64/plat/ffa/spmc/notifications.c b/src/arch/aarch64/plat/ffa/spmc/notifications.c
new file mode 100644
index 0000000..73d7e0c
--- /dev/null
+++ b/src/arch/aarch64/plat/ffa/spmc/notifications.c
@@ -0,0 +1,423 @@
+/*
+ * Copyright 2024 The Hafnium Authors.
+ *
+ * Use of this source code is governed by a BSD-style
+ * license that can be found in the LICENSE file or at
+ * https://opensource.org/licenses/BSD-3-Clause.
+ */
+
+#include "hf/arch/plat/ffa/notifications.h"
+
+#include <stdint.h>
+
+#include "hf/arch/plat/ffa/vm.h"
+
+#include "hf/check.h"
+#include "hf/cpu.h"
+#include "hf/ffa.h"
+#include "hf/ffa_internal.h"
+#include "hf/plat/interrupts.h"
+#include "hf/types.h"
+#include "hf/vm.h"
+
+#include "./vm.h"
+
+/** Interrupt priority for the Schedule Receiver Interrupt. */
+#define SRI_PRIORITY 0x10U
+
+bool plat_ffa_is_spmd_lp_id(ffa_id_t vm_id);
+
+struct ffa_value plat_ffa_is_notifications_bitmap_access_valid(
+ struct vcpu *current, ffa_id_t vm_id)
+{
+ /**
+ * Create/Destroy interfaces to be called by the hypervisor, into the
+ * SPMC.
+ */
+ if (current->vm->id != HF_HYPERVISOR_VM_ID) {
+ return ffa_error(FFA_NOT_SUPPORTED);
+ }
+
+ /* ID provided must be a valid VM ID. */
+ if (!ffa_is_vm_id(vm_id)) {
+ return ffa_error(FFA_INVALID_PARAMETERS);
+ }
+
+ return (struct ffa_value){
+ .func = FFA_SUCCESS_32,
+ };
+}
+
+/**
+ * - A bind call cannot be from an SPMD logical partition or target an
+ * SPMD logical partition.
+ * - If bind call from SP, receiver's ID must be same as current VM ID.
+ * - If bind call from NWd, current VM ID must be same as Hypervisor ID,
+ * receiver's ID must be from NWd, and sender's ID from SWd.
+ */
+bool plat_ffa_is_notifications_bind_valid(struct vcpu *current,
+ ffa_id_t sender_id,
+ ffa_id_t receiver_id)
+{
+ ffa_id_t current_vm_id = current->vm->id;
+
+ if (plat_ffa_is_spmd_lp_id(sender_id) ||
+ plat_ffa_is_spmd_lp_id(receiver_id)) {
+ dlog_verbose(
+ "Notification bind: not permitted for logical SPs (%x "
+ "%x).\n",
+ sender_id, receiver_id);
+ return false;
+ }
+
+ if (sender_id == receiver_id) {
+ dlog_verbose(
+ "Notification set: sender can't target itself. (%x == "
+ "%x)\n",
+ sender_id, receiver_id);
+ return false;
+ }
+
+ /* Caller is an SP. */
+ if (vm_id_is_current_world(current_vm_id)) {
+ if (receiver_id != current_vm_id) {
+ dlog_verbose(
+ "Notification bind: caller (%x) must be the "
+ "receiver(%x).\n",
+ current_vm_id, receiver_id);
+ return false;
+ }
+ } else {
+ assert(current_vm_id == HF_HYPERVISOR_VM_ID);
+
+ if (!vm_id_is_current_world(sender_id) ||
+ vm_id_is_current_world(receiver_id)) {
+ dlog_verbose(
+ "Notification bind: VM must specify itself as "
+ "receiver (%x), and SP as sender(%x).\n",
+ receiver_id, sender_id);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool plat_ffa_notifications_update_bindings_forward(
+ ffa_id_t receiver_id, ffa_id_t sender_id, uint32_t flags,
+ ffa_notifications_bitmap_t bitmap, bool is_bind, struct ffa_value *ret)
+{
+ (void)ret;
+ (void)receiver_id;
+ (void)sender_id;
+ (void)flags;
+ (void)bitmap;
+ (void)is_bind;
+ (void)ret;
+
+ return false;
+}
+
+/*
+ * - A set call cannot be from an SPMD logical partition or target an
+ * SPMD logical partition.
+ * - If set call from SP, sender's ID must be the same as current.
+ * - If set call from NWd, current VM ID must be same as Hypervisor ID,
+ * and receiver must be an SP.
+ */
+bool plat_ffa_is_notification_set_valid(struct vcpu *current,
+ ffa_id_t sender_id,
+ ffa_id_t receiver_id)
+{
+ ffa_id_t current_vm_id = current->vm->id;
+
+ if (plat_ffa_is_spmd_lp_id(sender_id) ||
+ plat_ffa_is_spmd_lp_id(receiver_id)) {
+ dlog_verbose(
+ "Notification set: not permitted for logical SPs (%x "
+ "%x).\n",
+ sender_id, receiver_id);
+ return false;
+ }
+
+ if (sender_id == receiver_id) {
+ dlog_verbose(
+ "Notification set: sender can't target itself. (%x == "
+ "%x)\n",
+ sender_id, receiver_id);
+ return false;
+ }
+
+ if (vm_id_is_current_world(current_vm_id)) {
+ if (sender_id != current_vm_id) {
+ dlog_verbose(
+ "Notification set: caller (%x) must be the "
+ "sender(%x).\n",
+ current_vm_id, sender_id);
+ return false;
+ }
+ } else {
+ assert(current_vm_id == HF_HYPERVISOR_VM_ID);
+
+ if (vm_id_is_current_world(sender_id) ||
+ !vm_id_is_current_world(receiver_id)) {
+ dlog_verbose(
+ "Notification set: sender (%x) must be a VM "
+ "and receiver (%x) an SP.\n",
+ sender_id, receiver_id);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool plat_ffa_notification_set_forward(ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id, uint32_t flags,
+ ffa_notifications_bitmap_t bitmap,
+ struct ffa_value *ret)
+{
+ (void)sender_vm_id;
+ (void)receiver_vm_id;
+ (void)flags;
+ (void)bitmap;
+ (void)ret;
+
+ return false;
+}
+
+bool plat_ffa_is_notification_get_valid(struct vcpu *current,
+ ffa_id_t receiver_id, uint32_t flags)
+{
+ ffa_id_t current_vm_id = current->vm->id;
+ /*
+ * SPMC:
+ * - A get call cannot be targeted to an SPMD logical partition.
+ * - An SP can ask for its notifications, or the hypervisor can get
+ * notifications target to a VM.
+ */
+ bool caller_and_receiver_valid =
+ (!plat_ffa_is_spmd_lp_id(receiver_id) &&
+ (current_vm_id == receiver_id)) ||
+ (current_vm_id == HF_HYPERVISOR_VM_ID &&
+ !vm_id_is_current_world(receiver_id));
+
+ /*
+ * Flags field is not valid if NWd endpoint requests notifications from
+ * VMs or Hypervisor. Those are managed by the hypervisor if present.
+ */
+ bool flags_valid =
+ !(ffa_is_vm_id(receiver_id) &&
+ ((flags & FFA_NOTIFICATION_FLAG_BITMAP_VM) != 0U ||
+ (flags & FFA_NOTIFICATION_FLAG_BITMAP_HYP) != 0U));
+
+ return caller_and_receiver_valid && flags_valid;
+}
+
+void plat_ffa_notification_info_get_forward( // NOLINTNEXTLINE
+ uint16_t *ids, uint32_t *ids_count, // NOLINTNEXTLINE
+ uint32_t *lists_sizes, uint32_t *lists_count,
+ const uint32_t ids_count_max)
+{
+ (void)ids;
+ (void)ids_count;
+ (void)lists_sizes;
+ (void)lists_count;
+ (void)ids_count_max;
+}
+
+struct ffa_value plat_ffa_notifications_bitmap_create(
+ ffa_id_t vm_id, ffa_vcpu_count_t vcpu_count)
+{
+ struct ffa_value ret = (struct ffa_value){.func = FFA_SUCCESS_32};
+ struct vm_locked vm_locked;
+
+ if (vm_id == HF_OTHER_WORLD_ID) {
+ /*
+ * If the provided VM ID regards to the Hypervisor, represented
+ * by the other world VM with ID HF_OTHER_WORLD_ID, check if the
+ * notifications have been enabled.
+ */
+
+ vm_locked = vm_find_locked(vm_id);
+
+ CHECK(vm_locked.vm != NULL);
+
+ /* Call has been used for the other world vm already */
+ if (vm_locked.vm->notifications.enabled) {
+ dlog_verbose("Notification bitmap already created.\n");
+ ret = ffa_error(FFA_DENIED);
+ goto out;
+ }
+
+ /* Enable notifications for `other_world_vm`. */
+ vm_locked.vm->notifications.enabled = true;
+ } else {
+ /* Else should regard with NWd VM ID. */
+ vm_locked = plat_ffa_nwd_vm_create(vm_id);
+
+ /* If received NULL, there are no slots for VM creation. */
+ if (vm_locked.vm == NULL) {
+ dlog_verbose("No memory to create VM ID %#x.\n", vm_id);
+ return ffa_error(FFA_NO_MEMORY);
+ }
+
+ /* Ensure bitmap has not already been created. */
+ if (vm_locked.vm->notifications.enabled) {
+ dlog_verbose("Notification bitmap already created.\n");
+ ret = ffa_error(FFA_DENIED);
+ goto out;
+ }
+
+ vm_locked.vm->notifications.enabled = true;
+ vm_locked.vm->vcpu_count = vcpu_count;
+ }
+
+out:
+ vm_unlock(&vm_locked);
+
+ return ret;
+}
+
+bool plat_ffa_notifications_bitmap_create_call(ffa_id_t vm_id,
+ ffa_vcpu_count_t vcpu_count)
+{
+ (void)vm_id;
+ (void)vcpu_count;
+
+ return true;
+}
+
+struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_id_t vm_id)
+{
+ struct ffa_value ret = {.func = FFA_SUCCESS_32};
+ struct vm_locked to_destroy_locked = plat_ffa_vm_find_locked(vm_id);
+
+ if (to_destroy_locked.vm == NULL) {
+ dlog_verbose("Bitmap not created for VM: %u\n", vm_id);
+ return ffa_error(FFA_DENIED);
+ }
+
+ if (!to_destroy_locked.vm->notifications.enabled) {
+ dlog_verbose("Notification disabled for VM: %u\n", vm_id);
+ ret = ffa_error(FFA_DENIED);
+ goto out;
+ }
+
+ /* Check if there is any notification pending. */
+ if (vm_are_notifications_pending(to_destroy_locked, false, ~0x0U)) {
+ dlog_verbose("VM has notifications pending.\n");
+ ret = ffa_error(FFA_DENIED);
+ goto out;
+ }
+
+ to_destroy_locked.vm->notifications.enabled = false;
+ vm_notifications_init(to_destroy_locked.vm,
+ to_destroy_locked.vm->vcpu_count, NULL);
+ if (vm_id != HF_OTHER_WORLD_ID) {
+ plat_ffa_vm_destroy(to_destroy_locked);
+ }
+
+out:
+ vm_unlock(&to_destroy_locked);
+
+ return ret;
+}
+
+bool plat_ffa_notifications_get_from_sp(struct vm_locked receiver_locked,
+ ffa_vcpu_index_t vcpu_id,
+ ffa_notifications_bitmap_t *from_sp,
+ struct ffa_value *ret)
+{
+ (void)ret;
+
+ *from_sp = vm_notifications_partition_get_pending(receiver_locked,
+ false, vcpu_id);
+
+ return true;
+}
+
+bool plat_ffa_notifications_get_framework_notifications(
+ struct vm_locked receiver_locked, ffa_notifications_bitmap_t *from_fwk,
+ uint32_t flags, ffa_vcpu_index_t vcpu_id, struct ffa_value *ret)
+{
+ assert(from_fwk != NULL);
+ assert(ret != NULL);
+
+ (void)vcpu_id;
+
+ if (!vm_id_is_current_world(receiver_locked.vm->id) &&
+ (flags & FFA_NOTIFICATION_FLAG_BITMAP_HYP) != 0U) {
+ dlog_error(
+ "Notification get flag from hypervisor in call to SPMC "
+ "MBZ.\n");
+ *ret = ffa_error(FFA_INVALID_PARAMETERS);
+ return false;
+ }
+
+ *from_fwk = vm_notifications_framework_get_pending(receiver_locked);
+
+ return true;
+}
+
+static void plat_ffa_send_schedule_receiver_interrupt(struct cpu *cpu)
+{
+ dlog_verbose("Setting Schedule Receiver SGI %u on core: %zu\n",
+ HF_SCHEDULE_RECEIVER_INTID, cpu_index(cpu));
+
+ plat_interrupts_send_sgi(HF_SCHEDULE_RECEIVER_INTID, cpu, false);
+}
+
+static void plat_ffa_sri_set_delayed_internal(struct cpu *cpu, bool delayed)
+{
+ assert(cpu != NULL);
+ cpu->is_sri_delayed = delayed;
+}
+
+void plat_ffa_sri_set_delayed(struct cpu *cpu)
+{
+ plat_ffa_sri_set_delayed_internal(cpu, true);
+}
+
+static bool plat_ffa_is_sri_delayed(struct cpu *cpu)
+{
+ assert(cpu != NULL);
+ return cpu->is_sri_delayed;
+}
+
+void plat_ffa_sri_trigger_if_delayed(struct cpu *cpu)
+{
+ assert(cpu != NULL);
+
+ if (plat_ffa_is_sri_delayed(cpu)) {
+ plat_ffa_send_schedule_receiver_interrupt(cpu);
+ plat_ffa_sri_set_delayed_internal(cpu, false);
+ }
+}
+
+void plat_ffa_sri_trigger_not_delayed(struct cpu *cpu)
+{
+ /*
+ * If flag to delay SRI isn't set, trigger SRI such that the
+ * receiver scheduler is aware there are pending notifications.
+ */
+ plat_ffa_send_schedule_receiver_interrupt(cpu);
+ plat_ffa_sri_set_delayed_internal(cpu, false);
+}
+
+void plat_ffa_sri_init(struct cpu *cpu)
+{
+ /* Configure as Non Secure SGI. */
+ struct interrupt_descriptor sri_desc = {
+ .interrupt_id = HF_SCHEDULE_RECEIVER_INTID,
+ .type = INT_DESC_TYPE_SGI,
+ .sec_state = INT_DESC_SEC_STATE_NS,
+ .priority = SRI_PRIORITY,
+ .valid = true,
+ };
+
+ /* TODO: when supported, make the interrupt driver use cpu structure. */
+ (void)cpu;
+
+ plat_interrupts_configure_interrupt(sri_desc);
+}
diff --git a/src/arch/aarch64/plat/psci/spmc.c b/src/arch/aarch64/plat/psci/spmc.c
index 7f9fb01..5932c4e 100644
--- a/src/arch/aarch64/plat/psci/spmc.c
+++ b/src/arch/aarch64/plat/psci/spmc.c
@@ -7,6 +7,7 @@
*/
#include "hf/arch/plat/ffa.h"
+#include "hf/arch/plat/ffa/notifications.h"
#include "hf/arch/plat/psci.h"
#include "hf/api.h"
diff --git a/src/arch/fake/hypervisor/ffa.c b/src/arch/fake/hypervisor/ffa.c
index 0582c13..a9cb605 100644
--- a/src/arch/fake/hypervisor/ffa.c
+++ b/src/arch/fake/hypervisor/ffa.c
@@ -9,6 +9,7 @@
#include "hf/ffa.h"
#include "hf/arch/plat/ffa.h"
+#include "hf/arch/plat/ffa/notifications.h"
#include "hf/ffa_internal.h"
#include "hf/vcpu.h"
diff --git a/src/hf_ipi.c b/src/hf_ipi.c
index 3aa58e6..86f18c1 100644
--- a/src/hf_ipi.c
+++ b/src/hf_ipi.c
@@ -8,7 +8,7 @@
#include "hf/hf_ipi.h"
-#include "hf/arch/plat/ffa.h"
+#include "hf/arch/plat/ffa/notifications.h"
#include "hf/cpu.h"
#include "hf/plat/interrupts.h"
diff --git a/src/load.c b/src/load.c
index fc9007f..fab340e 100644
--- a/src/load.c
+++ b/src/load.c
@@ -14,6 +14,7 @@
#include "hf/arch/other_world.h"
#include "hf/arch/plat/ffa.h"
#include "hf/arch/plat/ffa/direct_messaging.h"
+#include "hf/arch/plat/ffa/notifications.h"
#include "hf/arch/vm.h"
#include "hf/api.h"