Enable MMU and caching in VM API tests
VM API tests are failing on real hardware because VMs are not seeing
data written by the hypervisor. The reason for this is that Hafnium has
data caching enabled while the test VMs do not. Solve this discrepancy
by enabling data caching in the VMs too, which requires enabling stage-1
MMU translation.
The entire address space is identity-mapped with read-write-execute
permisssions. Only GIC tests currently require custom device mappings.
Implementation shares ptable management code from src/mm.c and
src/arch/mm.c.
Bug: 138985026
Test: ./kokoro/ubuntu/build.sh
Change-Id: Ib9f599c448d70296a6ca869ddbb51abfcc55148d
diff --git a/test/hftest/BUILD.gn b/test/hftest/BUILD.gn
index e08c62a..9a5b452 100644
--- a/test/hftest/BUILD.gn
+++ b/test/hftest/BUILD.gn
@@ -42,6 +42,7 @@
]
deps = [
+ ":mm",
"//src:dlog",
"//src:memiter",
"//src:panic",
@@ -91,6 +92,7 @@
deps = [
":common",
+ ":mm",
"//src:dlog",
"//src:fdt",
"//src:memiter",
@@ -116,3 +118,20 @@
"//src:std",
]
}
+
+source_set("mm") {
+ testonly = true
+
+ public_configs = [ ":hftest_config" ]
+
+ sources = [
+ "hftest_mm.c",
+ ]
+
+ deps = [
+ "//src:layout",
+ "//src:mm",
+ "//src/arch/${plat_arch}:arch",
+ "//src/arch/${plat_arch}/hftest:mm",
+ ]
+}
diff --git a/test/hftest/hftest_mm.c b/test/hftest/hftest_mm.c
new file mode 100644
index 0000000..36b977b
--- /dev/null
+++ b/test/hftest/hftest_mm.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2019 The Hafnium Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/arch/vm/mm.h"
+#include "hf/arch/vm/power_mgmt.h"
+
+#include "hftest.h"
+
+/* Number of pages reserved for page tables. Increase if necessary. */
+#define PTABLE_PAGES 3
+
+alignas(alignof(struct mm_page_table)) static char ptable_buf
+ [sizeof(struct mm_page_table) * PTABLE_PAGES];
+
+static struct mpool ppool;
+static struct mm_ptable ptable;
+
+static struct mm_stage1_locked get_stage1_locked(void)
+{
+ return (struct mm_stage1_locked){.ptable = &ptable};
+}
+
+bool hftest_mm_init(void)
+{
+ struct mm_stage1_locked stage1_locked;
+
+ mpool_init(&ppool, sizeof(struct mm_page_table));
+ if (!mpool_add_chunk(&ppool, ptable_buf, sizeof(ptable_buf))) {
+ HFTEST_FAIL(true, "Failed to add buffer to page-table pool.");
+ }
+
+ if (!mm_ptable_init(&ptable, MM_FLAG_STAGE1, &ppool)) {
+ HFTEST_FAIL(true, "Unable to allocate memory for page table.");
+ }
+
+ stage1_locked = get_stage1_locked();
+ mm_identity_map(stage1_locked, pa_init(0),
+ pa_init(mm_ptable_addr_space_end(MM_FLAG_STAGE1)),
+ MM_MODE_R | MM_MODE_W | MM_MODE_X, &ppool);
+
+ return arch_vm_mm_init(ptable.root);
+}
+
+void hftest_mm_identity_map(const void *base, size_t size, int mode)
+{
+ struct mm_stage1_locked stage1_locked = get_stage1_locked();
+ paddr_t start = pa_from_va(va_from_ptr(base));
+ paddr_t end = pa_add(start, size);
+
+ if (mm_identity_map(stage1_locked, start, end, mode, &ppool) != base) {
+ FAIL("Could not add new page table mapping. Try increasing "
+ "size of the page table buffer.");
+ }
+}
+
+struct cpu_start_state {
+ void (*entry)(uintptr_t arg);
+ uintreg_t arg;
+ struct spinlock lock;
+};
+
+static void cpu_entry(uintptr_t arg)
+{
+ struct cpu_start_state *s = (struct cpu_start_state *)arg;
+ struct cpu_start_state local = *s;
+
+ sl_unlock(&s->lock);
+ ASSERT_TRUE(arch_vm_mm_init(ptable.root));
+ local.entry(local.arg);
+}
+
+bool hftest_cpu_start(uintptr_t id, void *stack, size_t stack_size,
+ void (*entry)(uintptr_t arg), uintptr_t arg)
+{
+ struct cpu_start_state s =
+ (struct cpu_start_state){.entry = entry, .arg = arg};
+
+ sl_init(&s.lock);
+ sl_lock(&s.lock);
+ if (!cpu_start(id, stack, stack_size, &cpu_entry, (uintptr_t)&s)) {
+ return false;
+ }
+
+ /* Wait until cpu_entry unlocks the lock before freeing stack memory. */
+ sl_lock(&s.lock);
+ return true;
+}
diff --git a/test/hftest/hftest_service.c b/test/hftest/hftest_service.c
index 5b9fafb..72efa7d 100644
--- a/test/hftest/hftest_service.c
+++ b/test/hftest/hftest_service.c
@@ -18,6 +18,7 @@
#include <stdint.h>
#include "hf/memiter.h"
+#include "hf/mm.h"
#include "hf/spci.h"
#include "hf/std.h"
@@ -82,6 +83,17 @@
hftest_test_fn service;
struct hftest_context *ctx;
+ /*
+ * Initialize the stage-1 MMU and identity-map the entire address space.
+ */
+ if (!hftest_mm_init()) {
+ HFTEST_LOG_FAILURE();
+ HFTEST_LOG(HFTEST_LOG_INDENT "Memory initialization failed");
+ for (;;) {
+ /* Hang if memory init failed. */
+ }
+ }
+
struct spci_message *recv_msg = (struct spci_message *)recv;
/* Prepare the context. */
diff --git a/test/hftest/inc/hftest.h b/test/hftest/inc/hftest.h
index bcffe5c..62eb84c 100644
--- a/test/hftest/inc/hftest.h
+++ b/test/hftest/inc/hftest.h
@@ -90,6 +90,20 @@
*/
#define HFTEST_LOG_INDENT " "
+/** Initializes stage-1 MMU for tests running in a VM. */
+bool hftest_mm_init(void);
+
+/** Adds stage-1 identity mapping for pages covering bytes [base, base+size). */
+void hftest_mm_identity_map(const void *base, size_t size, int mode);
+
+/**
+ * Starts the CPU with the given ID. It will start at the provided entry point
+ * with the provided argument. It is a wrapper around the generic cpu_start()
+ * and takes care of MMU initialization.
+ */
+bool hftest_cpu_start(uintptr_t id, void *stack, size_t stack_size,
+ void (*entry)(uintptr_t arg), uintptr_t arg);
+
uintptr_t hftest_get_cpu_id(size_t index);
/* Above this point is the public API. Now include the implementation. */
diff --git a/test/hftest/standalone_main.c b/test/hftest/standalone_main.c
index c1bfa1a..63edf1e 100644
--- a/test/hftest/standalone_main.c
+++ b/test/hftest/standalone_main.c
@@ -19,6 +19,7 @@
#include "hf/fdt.h"
#include "hf/memiter.h"
+#include "hf/mm.h"
#include "hftest.h"
#include "hftest_common.h"
@@ -36,6 +37,14 @@
struct memiter bootargs_iter;
struct memiter command;
+ /*
+ * Initialize the stage-1 MMU and identity-map the entire address space.
+ */
+ if ((VM_TOOLCHAIN == 1) && !hftest_mm_init()) {
+ HFTEST_LOG("Memory initialization failed.");
+ return;
+ }
+
hftest_use_list(hftest_begin, hftest_end - hftest_begin);
if (!fdt_root_node(&n, fdt)) {
diff --git a/test/vmapi/gicv3/gicv3.c b/test/vmapi/gicv3/gicv3.c
index 571f978..47923e0 100644
--- a/test/vmapi/gicv3/gicv3.c
+++ b/test/vmapi/gicv3/gicv3.c
@@ -50,6 +50,11 @@
void system_setup()
{
+ const int mode = MM_MODE_R | MM_MODE_W | MM_MODE_D;
+ hftest_mm_identity_map((void *)GICD_BASE, PAGE_SIZE, mode);
+ hftest_mm_identity_map((void *)GICR_BASE, PAGE_SIZE, mode);
+ hftest_mm_identity_map((void *)SGI_BASE, PAGE_SIZE, mode);
+
exception_setup(irq);
interrupt_gic_setup();
}
diff --git a/test/vmapi/primary_only/faults.c b/test/vmapi/primary_only/faults.c
index 1adeb5b..bf56b15 100644
--- a/test/vmapi/primary_only/faults.c
+++ b/test/vmapi/primary_only/faults.c
@@ -61,9 +61,10 @@
/* Start secondary cpu while holding lock. */
sl_lock(&s.lock);
- EXPECT_EQ(cpu_start(hftest_get_cpu_id(1), other_stack,
- sizeof(other_stack), rx_reader, (uintptr_t)&s),
- true);
+ EXPECT_EQ(
+ hftest_cpu_start(hftest_get_cpu_id(1), other_stack,
+ sizeof(other_stack), rx_reader, (uintptr_t)&s),
+ true);
/* Wait for CPU to release the lock. */
sl_lock(&s.lock);
diff --git a/test/vmapi/primary_only/primary_only.c b/test/vmapi/primary_only/primary_only.c
index a7b29c8..4bede95 100644
--- a/test/vmapi/primary_only/primary_only.c
+++ b/test/vmapi/primary_only/primary_only.c
@@ -117,10 +117,10 @@
/* Start secondary while holding lock. */
sl_lock(&lock);
- EXPECT_EQ(
- cpu_start(hftest_get_cpu_id(1), other_stack,
- sizeof(other_stack), vm_cpu_entry, (uintptr_t)&lock),
- true);
+ EXPECT_EQ(hftest_cpu_start(hftest_get_cpu_id(1), other_stack,
+ sizeof(other_stack), vm_cpu_entry,
+ (uintptr_t)&lock),
+ true);
/* Wait for CPU to release the lock. */
sl_lock(&lock);
diff --git a/test/vmapi/primary_with_secondaries/run_race.c b/test/vmapi/primary_with_secondaries/run_race.c
index 49c95e3..63ba81d 100644
--- a/test/vmapi/primary_with_secondaries/run_race.c
+++ b/test/vmapi/primary_with_secondaries/run_race.c
@@ -88,8 +88,8 @@
SERVICE_SELECT(SERVICE_VM0, "check_state", mb.send);
/* Start second vCPU. */
- ASSERT_TRUE(cpu_start(hftest_get_cpu_id(1), stack, sizeof(stack),
- vm_cpu_entry, (uintptr_t)&mb));
+ ASSERT_TRUE(hftest_cpu_start(hftest_get_cpu_id(1), stack, sizeof(stack),
+ vm_cpu_entry, (uintptr_t)&mb));
/* Run on a loop until the secondary VM is done. */
EXPECT_TRUE(run_loop(&mb));
diff --git a/test/vmapi/primary_with_secondaries/services/abort.c b/test/vmapi/primary_with_secondaries/services/abort.c
index e37d7e9..3d11bd2 100644
--- a/test/vmapi/primary_with_secondaries/services/abort.c
+++ b/test/vmapi/primary_with_secondaries/services/abort.c
@@ -49,12 +49,18 @@
TEST_SERVICE(straddling_instruction_abort)
{
+ /*
+ * Get a function pointer which, when branched to, will attempt to
+ * execute a 4-byte instruction straddling two pages.
+ */
int (*f)(void) = (int (*)(void))(&pages[PAGE_SIZE - 2]);
- /* Give some memory to the primary VM so that it's unmapped. */
+ /* Give second page to the primary VM so that it's unmapped. */
ASSERT_EQ(hf_share_memory(HF_PRIMARY_VM_ID,
(hf_ipaddr_t)(&pages[PAGE_SIZE]), PAGE_SIZE,
HF_MEMORY_GIVE),
0);
+
+ /* Branch to instruction whose 2 bytes are now in an unmapped page. */
f();
}
diff --git a/test/vmapi/primary_with_secondaries/services/smp.c b/test/vmapi/primary_with_secondaries/services/smp.c
index 863919d..44dd505 100644
--- a/test/vmapi/primary_with_secondaries/services/smp.c
+++ b/test/vmapi/primary_with_secondaries/services/smp.c
@@ -73,8 +73,8 @@
/* Start second vCPU. */
dlog("Secondary starting second vCPU.\n");
- ASSERT_TRUE(
- cpu_start(1, stack, sizeof(stack), vm_cpu_entry, ARG_VALUE));
+ ASSERT_TRUE(hftest_cpu_start(1, stack, sizeof(stack), vm_cpu_entry,
+ ARG_VALUE));
dlog("Secondary started second vCPU.\n");
/* Check that vCPU statuses are as expected. */