FF-A: Booting SPs according to 'boot-order'
Secure Hafnium boots partitions according to boot-order in the manifest.
In this patch:
- Added manifest parsing of "boot-order", and populated VM structure
with it;
- Added the field "next_boot" to the VM structure, in order to create a
boot list that is sorted by the "boot-order";
- The root of the list points to the highest priority VM;
- Booting consists on traversing the list upon use of MSG_WAIT
interface from the highest priority VMs;
- After traversing the whole boot list, returns execution to SPMD;
- "manifest_Test.cc" updated to include "boot-order" field in
tests to the partition manifest;
- "vm_test.cc" updated to include unit test for the main logic of this
patch.
Change-Id: I43adf90447eed3bc24c8eb2ccb8eb979b471f3c3
Signed-off-by: J-Alves <Joao.Alves@arm.com>
diff --git a/src/api.c b/src/api.c
index d131656..46d8a88 100644
--- a/src/api.c
+++ b/src/api.c
@@ -1196,12 +1196,14 @@
struct vcpu_locked current_locked;
struct vm *vm = current->vm;
struct ffa_value return_code;
+ bool is_from_secure_world =
+ (current->vm->id & HF_VM_ID_WORLD_MASK) != 0;
/*
* The primary VM will receive messages as a status code from running
* vCPUs and must not call this function.
*/
- if (vm->id == HF_PRIMARY_VM_ID) {
+ if (!is_from_secure_world && vm->id == HF_PRIMARY_VM_ID) {
return ffa_error(FFA_NOT_SUPPORTED);
}
@@ -1243,8 +1245,13 @@
goto out;
}
- /* Switch back to primary VM to block. */
- {
+ if (is_from_secure_world) {
+ /* Return to other world if caller is a SP. */
+ *next = api_switch_to_other_world(
+ current, (struct ffa_value){.func = FFA_MSG_WAIT_32},
+ VCPU_STATE_BLOCKED_MAILBOX);
+ } else {
+ /* Switch back to primary VM to block. */
struct ffa_value run_return = {
.func = FFA_MSG_WAIT_32,
.arg1 = ffa_vm_vcpu(vm->id, vcpu_index(current)),
diff --git a/src/arch/aarch64/hypervisor/cpu.c b/src/arch/aarch64/hypervisor/cpu.c
index 21bf6cf..0f6710e0 100644
--- a/src/arch/aarch64/hypervisor/cpu.c
+++ b/src/arch/aarch64/hypervisor/cpu.c
@@ -65,7 +65,12 @@
{
ffa_vm_id_t vm_id = vcpu->vm->id;
bool is_primary = vm_id == HF_PRIMARY_VM_ID;
+#if SECURE_WORLD == 0
cpu_id_t vcpu_id = is_primary ? vcpu->cpu->id : vcpu_index(vcpu);
+#else
+ cpu_id_t vcpu_id = vcpu_index(vcpu);
+#endif
+
paddr_t table = vcpu->vm->ptable.root;
struct arch_regs *r = &vcpu->regs;
uintreg_t pc = r->pc;
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index 4d3a462..c10f77e 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -19,6 +19,7 @@
#include "hf/cpu.h"
#include "hf/dlog.h"
#include "hf/ffa.h"
+#include "hf/ffa_internal.h"
#include "hf/panic.h"
#include "hf/vm.h"
@@ -259,6 +260,51 @@
write_msr(hcr_el2, hcr_el2);
}
+#if SECURE_WORLD == 1
+static bool sp_boot_next(struct vcpu *current, struct vcpu **next,
+ struct ffa_value *ffa_ret)
+{
+ struct vm_locked current_vm_locked;
+ struct vm *vm_next = NULL;
+ bool ret = false;
+
+ /*
+ * If VM hasn't been initialized, initialize it and traverse
+ * booting list following "next_boot" field in the VM structure.
+ * Once all the SPs have been booted (when "next_boot" is NULL),
+ * return execution to the NWd.
+ */
+ current_vm_locked = vm_lock(current->vm);
+ if (current_vm_locked.vm->initialized == false) {
+ current_vm_locked.vm->initialized = true;
+ dlog_verbose("Initialized VM: %#x, boot_order: %u\n",
+ current_vm_locked.vm->id,
+ current_vm_locked.vm->boot_order);
+
+ if (current_vm_locked.vm->next_boot != NULL) {
+ current->state = VCPU_STATE_BLOCKED_MAILBOX;
+ vm_next = current_vm_locked.vm->next_boot;
+ CHECK(vm_next->initialized == false);
+ *next = vm_get_vcpu(vm_next, vcpu_index(current));
+ arch_regs_reset(*next);
+ (*next)->cpu = current->cpu;
+ (*next)->state = VCPU_STATE_RUNNING;
+ (*next)->regs_available = false;
+
+ *ffa_ret = (struct ffa_value){.func = FFA_INTERRUPT_32};
+ ret = true;
+ goto out;
+ }
+
+ dlog_verbose("Finished initializing all VMs.\n");
+ }
+
+out:
+ vm_unlock(¤t_vm_locked);
+ return ret;
+}
+#endif
+
/**
* Checks whether to block an SMC being forwarded from a VM.
*/
@@ -371,6 +417,11 @@
ffa_msg_send_attributes(*args), current, next);
return true;
case FFA_MSG_WAIT_32:
+#if SECURE_WORLD == 1
+ if (sp_boot_next(current, next, args)) {
+ return true;
+ }
+#endif
*args = api_ffa_msg_recv(true, current, next);
return true;
case FFA_MSG_POLL_32:
diff --git a/src/load.c b/src/load.c
index f820204..bbc0f1b 100644
--- a/src/load.c
+++ b/src/load.c
@@ -148,7 +148,12 @@
return false;
}
}
+
+ vm_locked.vm->boot_order = manifest_vm->sp.boot_order;
+ /* Updating boot list according to boot_order */
+ vm_update_boot(vm_locked.vm);
}
+
/* Initialize architecture-specific features. */
arch_vm_features_set(vm_locked.vm);
diff --git a/src/main.c b/src/main.c
index 8a495db..5f30aac 100644
--- a/src/main.c
+++ b/src/main.c
@@ -16,13 +16,19 @@
struct vcpu *cpu_main(struct cpu *c)
{
struct vcpu *vcpu;
+#if SECURE_WORLD == 1
+ struct vm *first_boot = vm_get_first_boot();
+ vcpu = vm_get_vcpu(first_boot, cpu_index(c));
+#else
vcpu = vm_get_vcpu(vm_find(HF_PRIMARY_VM_ID), cpu_index(c));
+#endif
+
vcpu->cpu = c;
arch_cpu_init();
- /* Reset the registers to give a clean start for the primary's vCPU. */
+ /* Reset the registers to give a clean start for vCPU. */
arch_regs_reset(vcpu);
return vcpu;
diff --git a/src/manifest.c b/src/manifest.c
index 60b0fa3..ab61b12 100644
--- a/src/manifest.c
+++ b/src/manifest.c
@@ -195,6 +195,21 @@
return MANIFEST_SUCCESS;
}
+static enum manifest_return_code read_optional_uint16(
+ const struct fdt_node *node, const char *property,
+ uint16_t default_value, uint16_t *out)
+{
+ enum manifest_return_code ret;
+
+ ret = read_uint16(node, property, out);
+ if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
+ *out = default_value;
+ return MANIFEST_SUCCESS;
+ }
+
+ return MANIFEST_SUCCESS;
+}
+
static enum manifest_return_code read_uint8(const struct fdt_node *node,
const char *property, uint8_t *out)
{
@@ -530,6 +545,10 @@
TRY(read_uint64(&root, "entrypoint-offset", &vm->sp.ep_offset));
dlog_verbose(" SP entry point offset %#x\n", vm->sp.ep_offset);
+ TRY(read_optional_uint16(&root, "boot-order", DEFAULT_BOOT_ORDER,
+ &vm->sp.boot_order));
+ dlog_verbose(" SP boot order %#u\n", vm->sp.boot_order);
+
TRY(read_uint8(&root, "xlat-granule", (uint8_t *)&vm->sp.xlat_granule));
dlog_verbose(" SP translation granule %d\n", vm->sp.xlat_granule);
diff --git a/src/manifest_test.cc b/src/manifest_test.cc
index 98caf85..890ab26 100644
--- a/src/manifest_test.cc
+++ b/src/manifest_test.cc
@@ -235,6 +235,7 @@
Property("load-address", "<0x7000000>");
Property("entrypoint-offset", "<0x00001000>");
Property("xlat-granule", "<0>");
+ Property("boot-order", "<0>");
Property("messaging-method", "<1>");
return *this;
}
@@ -730,6 +731,10 @@
TEST(manifest, ffa_validate_sanity_check)
{
+ /*
+ * TODO: write test excluding all optional fields of the manifest, in
+ * accordance with specification.
+ */
struct manifest m;
/* Incompatible version */
@@ -744,6 +749,7 @@
.Property("load-address", "<0x7000000>")
.Property("entrypoint-offset", "<0x00001000>")
.Property("xlat-granule", "<0>")
+ .Property("boot-order", "<0>")
.Property("messaging-method", "<1>")
.Build();
/* clang-format on */
@@ -762,6 +768,7 @@
.Property("load-address", "<0x7000000>")
.Property("entrypoint-offset", "<0x00001000>")
.Property("xlat-granule", "<3>")
+ .Property("boot-order", "<0>")
.Property("messaging-method", "<1>")
.Build();
/* clang-format on */
@@ -780,6 +787,7 @@
.Property("load-address", "<0x7000000>")
.Property("entrypoint-offset", "<0x00001000>")
.Property("xlat-granule", "<0>")
+ .Property("boot-order", "<0>")
.Property("messaging-method", "<1>")
.Build();
/* clang-format on */
@@ -798,6 +806,7 @@
.Property("load-address", "<0x7000000>")
.Property("entrypoint-offset", "<0x00001000>")
.Property("xlat-granule", "<0>")
+ .Property("boot-order", "<0>")
.Property("messaging-method", "<1>")
.Build();
/* clang-format on */
@@ -816,6 +825,7 @@
.Property("load-address", "<0x7000000>")
.Property("entrypoint-offset", "<0x00001000>")
.Property("xlat-granule", "<0>")
+ .Property("boot-order", "<0>")
.Property("messaging-method", "<3>")
.Build();
/* clang-format on */
@@ -1054,6 +1064,7 @@
ASSERT_EQ(m.vm[0].sp.load_addr, 0x7000000);
ASSERT_EQ(m.vm[0].sp.ep_offset, 0x00001000);
ASSERT_EQ(m.vm[0].sp.xlat_granule, PAGE_4KB);
+ ASSERT_EQ(m.vm[0].sp.boot_order, 0);
ASSERT_EQ(m.vm[0].sp.messaging_method, INDIRECT_MESSAGING);
ASSERT_EQ(m.vm[0].sp.mem_regions[0].base_address, 0x7100000);
ASSERT_EQ(m.vm[0].sp.mem_regions[0].page_count, 4);
diff --git a/src/vm.c b/src/vm.c
index fbbdc9f..96e1212 100644
--- a/src/vm.c
+++ b/src/vm.c
@@ -21,6 +21,7 @@
static struct vm vms[MAX_VMS];
static struct vm other_world;
static ffa_vm_count_t vm_count;
+static struct vm *first_boot_vm;
struct vm *vm_init(ffa_vm_id_t id, ffa_vcpu_count_t vcpu_count,
struct mpool *ppool)
@@ -292,3 +293,41 @@
vm_unmap(vm_locked, layout_data_begin(), layout_data_end(),
ppool);
}
+
+/**
+ * Gets the first partition to boot, according to Boot Protocol from FFA spec.
+ */
+struct vm *vm_get_first_boot(void)
+{
+ return first_boot_vm;
+}
+
+/**
+ * Insert in boot list, sorted by `boot_order` parameter in the vm structure
+ * and rooted in `first_boot_vm`.
+ */
+void vm_update_boot(struct vm *vm)
+{
+ struct vm *current = NULL;
+ struct vm *previous = NULL;
+
+ if (first_boot_vm == NULL) {
+ first_boot_vm = vm;
+ return;
+ }
+
+ current = first_boot_vm;
+
+ while (current != NULL && current->boot_order >= vm->boot_order) {
+ previous = current;
+ current = current->next_boot;
+ }
+
+ if (previous != NULL) {
+ previous->next_boot = vm;
+ } else {
+ first_boot_vm = vm;
+ }
+
+ vm->next_boot = current;
+}
diff --git a/src/vm_test.cc b/src/vm_test.cc
index 6fb2bce..4a0c119 100644
--- a/src/vm_test.cc
+++ b/src/vm_test.cc
@@ -13,6 +13,7 @@
#include "hf/vm.h"
}
+#include <list>
#include <memory>
#include <span>
#include <vector>
@@ -29,7 +30,7 @@
using struct_vm = struct vm;
-constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 16;
+constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 32;
const int TOP_LEVEL = arch_mm_stage2_max_level();
class vm : public ::testing::Test
@@ -49,6 +50,12 @@
protected:
struct mpool ppool;
+
+ public:
+ static bool BootOrderBiggerThan(struct_vm *vm1, struct_vm *vm2)
+ {
+ return vm1->boot_order > vm2->boot_order;
+ }
};
/**
@@ -70,4 +77,70 @@
vm_unlock(&vm_locked);
}
+/**
+ * Validate the "boot_list" is created properly, according to vm's "boot_order"
+ * field.
+ */
+TEST_F(vm, vm_boot_order)
+{
+ struct_vm *vm_cur;
+ std::list<struct_vm *> expected_final_order;
+
+ EXPECT_FALSE(vm_get_first_boot());
+
+ /*
+ * Insertion when no call to "vm_update_boot" has been made yet.
+ * The "boot_list" is expected to be empty.
+ */
+ EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur));
+ vm_cur->boot_order = 1;
+ vm_update_boot(vm_cur);
+ expected_final_order.push_back(vm_cur);
+
+ EXPECT_EQ(vm_get_first_boot()->id, vm_cur->id);
+
+ /* Insertion at the head of the boot list */
+ EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur));
+ vm_cur->boot_order = 3;
+ vm_update_boot(vm_cur);
+ expected_final_order.push_back(vm_cur);
+
+ EXPECT_EQ(vm_get_first_boot()->id, vm_cur->id);
+
+ /* Insertion of two in the middle of the boot list */
+ for (int i = 0; i < 2; i++) {
+ EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur));
+ vm_cur->boot_order = 2;
+ vm_update_boot(vm_cur);
+ expected_final_order.push_back(vm_cur);
+ }
+
+ /*
+ * Insertion in the end of the list.
+ * This tests shares the data with "vm_unmap_hypervisor_not_mapped".
+ * As such, a VM is expected to have been initialized before this
+ * test, with ID 1 and boot_order 0.
+ */
+ vm_cur = vm_find(1);
+ EXPECT_FALSE(vm_cur == NULL);
+ vm_update_boot(vm_cur);
+ expected_final_order.push_back(vm_cur);
+
+ /*
+ * Number of VMs initialized should be the same as in the
+ * "expected_final_order", before the final verification.
+ */
+ EXPECT_EQ(expected_final_order.size(), vm_get_count())
+ << "Something went wrong with the test itself...\n";
+
+ /* Sort "expected_final_order" by "boot_order" field */
+ expected_final_order.sort(vm::BootOrderBiggerThan);
+
+ std::list<struct_vm *>::iterator it;
+ for (it = expected_final_order.begin(), vm_cur = vm_get_first_boot();
+ it != expected_final_order.end() && vm_cur != NULL;
+ it++, vm_cur = vm_cur->next_boot) {
+ EXPECT_EQ((*it)->id, vm_cur->id);
+ }
+}
} /* namespace */