Add support for FFA_PARTITION_INFO_GET
Add support for FFA_PARTITION_INFO_GET, which returns
information on the partitions instantiated in the system.
Signed-off-by: Fuad Tabba <tabba@google.com>
Change-Id: I93070fe841b4b19c596645246203dbba14eddb12
diff --git a/src/api.c b/src/api.c
index 418ce50..3370bcc 100644
--- a/src/api.c
+++ b/src/api.c
@@ -26,6 +26,10 @@
#include "vmapi/hf/call.h"
#include "vmapi/hf/ffa.h"
+static_assert(sizeof(struct ffa_partition_info) == 8,
+ "Partition information descriptor size doesn't match the one in "
+ "the FF-A 1.0 EAC specification, Table 82.");
+
/*
* To eliminate the risk of deadlocks, we define a partial order for the
* acquisition of locks held concurrently by the same physical CPU. Our current
@@ -118,6 +122,35 @@
}
/**
+ * Checks whether the given `to` VM's mailbox is currently busy, and optionally
+ * registers the `from` VM to be notified when it becomes available.
+ */
+static bool msg_receiver_busy(struct vm_locked to, struct vm *from, bool notify)
+{
+ if (to.vm->mailbox.state != MAILBOX_STATE_EMPTY ||
+ to.vm->mailbox.recv == NULL) {
+ /*
+ * Fail if the receiver isn't currently ready to receive data,
+ * setting up for notification if requested.
+ */
+ if (notify) {
+ struct wait_entry *entry =
+ vm_get_wait_entry(from, to.vm->id);
+
+ /* Append waiter only if it's not there yet. */
+ if (list_empty(&entry->wait_links)) {
+ list_append(&to.vm->mailbox.waiter_list,
+ &entry->wait_links);
+ }
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
+/**
* Returns to the primary VM and signals that the vCPU still has work to do so.
*/
struct vcpu *api_preempt(struct vcpu *current)
@@ -223,6 +256,82 @@
return api_switch_to_primary(current, ret, VCPU_STATE_ABORTED);
}
+struct ffa_value api_ffa_partition_info_get(struct vcpu *current,
+ const struct ffa_uuid *uuid)
+{
+ struct vm *current_vm = current->vm;
+ struct vm_locked current_vm_locked;
+ ffa_vm_count_t vm_count = 0;
+ bool uuid_is_null = ffa_uuid_is_null(uuid);
+ struct ffa_value ret;
+ uint32_t size;
+ struct ffa_partition_info partitions[MAX_VMS];
+
+ /*
+ * Iterate through the VMs to find the ones with a matching UUID.
+ * A Null UUID retrieves information for all VMs.
+ */
+ for (uint16_t index = 0; index < vm_get_count(); ++index) {
+ const struct vm *vm = vm_find_index(index);
+
+ if (uuid_is_null || ffa_uuid_equal(uuid, &vm->uuid)) {
+ partitions[vm_count].vm_id = vm->id;
+ partitions[vm_count].vcpu_count = vm->vcpu_count;
+
+ /* Hafnium only supports indirect messaging. */
+ partitions[vm_count].properties =
+ FFA_PARTITION_INDIRECT_MSG;
+
+ ++vm_count;
+ }
+ }
+
+ /* Unrecognized UUID: does not match any of the VMs and is not Null. */
+ if (vm_count == 0) {
+ return ffa_error(FFA_INVALID_PARAMETERS);
+ }
+
+ size = vm_count * sizeof(partitions[0]);
+ if (size > FFA_MSG_PAYLOAD_MAX) {
+ dlog_error(
+ "Partition information does not fit in the VM's RX "
+ "buffer.\n");
+ return ffa_error(FFA_NO_MEMORY);
+ }
+
+ /*
+ * Partition information is returned in the VM's RX buffer, which is why
+ * the lock is needed.
+ */
+ current_vm_locked = vm_lock(current_vm);
+
+ if (msg_receiver_busy(current_vm_locked, NULL, false)) {
+ /*
+ * Can't retrieve memory information if the mailbox is not
+ * available.
+ */
+ dlog_verbose("RX buffer not ready.\n");
+ ret = ffa_error(FFA_BUSY);
+ goto out_unlock;
+ }
+
+ /* Populate the VM's RX buffer with the partition information. */
+ memcpy_s(current_vm->mailbox.recv, FFA_MSG_PAYLOAD_MAX, partitions,
+ size);
+ current_vm->mailbox.recv_size = size;
+ current_vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID;
+ current_vm->mailbox.recv_func = FFA_PARTITION_INFO_GET_32;
+ current_vm->mailbox.state = MAILBOX_STATE_READ;
+
+ /* Return the count of partition information descriptors in w2. */
+ ret = (struct ffa_value){.func = FFA_SUCCESS_32, .arg2 = vm_count};
+
+out_unlock:
+ vm_unlock(¤t_vm_locked);
+
+ return ret;
+}
+
/**
* Returns the ID of the VM.
*/
@@ -870,35 +979,6 @@
}
/**
- * Checks whether the given `to` VM's mailbox is currently busy, and optionally
- * registers the `from` VM to be notified when it becomes available.
- */
-static bool msg_receiver_busy(struct vm_locked to, struct vm *from, bool notify)
-{
- if (to.vm->mailbox.state != MAILBOX_STATE_EMPTY ||
- to.vm->mailbox.recv == NULL) {
- /*
- * Fail if the receiver isn't currently ready to receive data,
- * setting up for notification if requested.
- */
- if (notify) {
- struct wait_entry *entry =
- vm_get_wait_entry(from, to.vm->id);
-
- /* Append waiter only if it's not there yet. */
- if (list_empty(&entry->wait_links)) {
- list_append(&to.vm->mailbox.waiter_list,
- &entry->wait_links);
- }
- }
-
- return true;
- }
-
- return false;
-}
-
-/**
* Notifies the `to` VM about the message currently in its mailbox, possibly
* with the help of the primary VM.
*/
@@ -1428,6 +1508,7 @@
case FFA_FEATURES_32:
case FFA_RX_RELEASE_32:
case FFA_RXTX_MAP_64:
+ case FFA_PARTITION_INFO_GET_32:
case FFA_ID_GET_32:
case FFA_MSG_POLL_32:
case FFA_MSG_WAIT_32:
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index 280fb71..ddc5116 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -317,6 +317,14 @@
case FFA_VERSION_32:
*args = api_ffa_version(args->arg1);
return true;
+ case FFA_PARTITION_INFO_GET_32: {
+ struct ffa_uuid uuid;
+
+ ffa_uuid_init(args->arg1, args->arg2, args->arg3, args->arg4,
+ &uuid);
+ *args = api_ffa_partition_info_get(current(), &uuid);
+ return true;
+ }
case FFA_ID_GET_32:
*args = api_ffa_id_get(current());
return true;
diff --git a/src/load.c b/src/load.c
index 83eed53..8962d65 100644
--- a/src/load.c
+++ b/src/load.c
@@ -104,6 +104,7 @@
static bool load_common(const struct manifest_vm *manifest_vm, struct vm *vm)
{
vm->smc_whitelist = manifest_vm->smc_whitelist;
+ vm->uuid = manifest_vm->sp.uuid;
/* Initialize architecture-specific features. */
arch_vm_features_set(vm);
diff --git a/src/manifest.c b/src/manifest.c
index 003a1b9..947b106 100644
--- a/src/manifest.c
+++ b/src/manifest.c
@@ -317,7 +317,7 @@
while (uint32list_has_next(&uuid) && i < 4) {
TRY(uint32list_get_next(&uuid, &uuid_word));
- vm->sp.uuid[i] = uuid_word;
+ vm->sp.uuid.uuid[i] = uuid_word;
i++;
}
dlog_verbose(" SP UUID %#x-%x-%x_%x\n", vm->sp.uuid[0], vm->sp.uuid[1],
diff --git a/src/manifest_test.cc b/src/manifest_test.cc
index b89002d..2b8be8b 100644
--- a/src/manifest_test.cc
+++ b/src/manifest_test.cc
@@ -819,7 +819,7 @@
ASSERT_EQ(m.vm[0].sp.ffa_version, 0x10000);
ASSERT_THAT(
- std::span(m.vm[0].sp.uuid, 4),
+ std::span(m.vm[0].sp.uuid.uuid, 4),
ElementsAre(0xb4b5671e, 0x4a904fe1, 0xb81ffb13, 0xdae1dacb));
ASSERT_EQ(m.vm[0].sp.execution_ctx_count, 1);
ASSERT_EQ(m.vm[0].sp.run_time_el, S_EL1);
diff --git a/src/vm.c b/src/vm.c
index af3ecc0..a021d77 100644
--- a/src/vm.c
+++ b/src/vm.c
@@ -90,6 +90,9 @@
return vm_count;
}
+/**
+ * Returns a pointer to the VM with the corresponding id.
+ */
struct vm *vm_find(ffa_vm_id_t id)
{
uint16_t index;
@@ -108,6 +111,14 @@
index = id - HF_VM_ID_OFFSET;
+ return vm_find_index(index);
+}
+
+/**
+ * Returns a pointer to the VM at the specified index.
+ */
+struct vm *vm_find_index(uint16_t index)
+{
/* Ensure the VM is initialized. */
if (index >= vm_count) {
return NULL;