David Brazdil | 3ad6e54 | 2019-09-13 17:17:09 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2019 The Hafnium Authors. |
| 3 | * |
Andrew Walbran | e959ec1 | 2020-06-17 15:01:09 +0100 | [diff] [blame] | 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
David Brazdil | 3ad6e54 | 2019-09-13 17:17:09 +0100 | [diff] [blame] | 7 | */ |
| 8 | |
| 9 | #include "hf/arch/vm/power_mgmt.h" |
| 10 | |
| 11 | #include "hf/arch/mm.h" |
Karl Meakin | 2b56fc1 | 2024-07-02 15:24:27 +0100 | [diff] [blame] | 12 | #include "hf/arch/types.h" |
David Brazdil | 3ad6e54 | 2019-09-13 17:17:09 +0100 | [diff] [blame] | 13 | |
J-Alves | 491ff30 | 2023-06-21 10:58:05 +0100 | [diff] [blame] | 14 | #include "hf/mm.h" |
David Brazdil | 3ad6e54 | 2019-09-13 17:17:09 +0100 | [diff] [blame] | 15 | #include "hf/spinlock.h" |
| 16 | |
Andrew Walbran | 1e7c774 | 2019-12-13 17:10:02 +0000 | [diff] [blame] | 17 | #include "test/hftest.h" |
David Brazdil | 3ad6e54 | 2019-09-13 17:17:09 +0100 | [diff] [blame] | 18 | |
| 19 | struct cpu_start_state { |
Karl Meakin | b4ef432 | 2024-05-29 17:25:07 +0100 | [diff] [blame] | 20 | cpu_entry_point *entry; |
David Brazdil | 3ad6e54 | 2019-09-13 17:17:09 +0100 | [diff] [blame] | 21 | uintreg_t arg; |
| 22 | struct spinlock lock; |
| 23 | }; |
| 24 | |
| 25 | static noreturn void cpu_entry(uintptr_t arg) |
| 26 | { |
Daniel Boulby | 11d6c81 | 2021-10-04 16:54:56 +0100 | [diff] [blame] | 27 | /* |
| 28 | * The function prototype must match the entry function so we permit the |
| 29 | * int to pointer conversion. |
| 30 | */ |
| 31 | // NOLINTNEXTLINE(performance-no-int-to-ptr) |
David Brazdil | 3ad6e54 | 2019-09-13 17:17:09 +0100 | [diff] [blame] | 32 | struct cpu_start_state *s = (struct cpu_start_state *)arg; |
| 33 | struct cpu_start_state s_copy; |
| 34 | |
| 35 | /* |
| 36 | * Initialize memory and enable caching. Must be the first thing we do. |
| 37 | */ |
| 38 | hftest_mm_vcpu_init(); |
| 39 | |
| 40 | /* Make a copy of the cpu_start_state struct. */ |
| 41 | s_copy = *s; |
| 42 | |
| 43 | /* Inform cpu_start() that the state struct memory can now be freed. */ |
| 44 | sl_unlock(&s->lock); |
| 45 | |
| 46 | /* Call the given entry function with the given argument. */ |
| 47 | s_copy.entry(s_copy.arg); |
| 48 | |
| 49 | /* If the entry function returns, turn off the CPU. */ |
| 50 | arch_cpu_stop(); |
| 51 | } |
| 52 | |
J-Alves | 31e5c95 | 2024-07-12 09:45:05 +0100 | [diff] [blame^] | 53 | bool hftest_cpu_start(cpu_id_t id, const uint8_t *secondary_ec_stack, |
| 54 | cpu_entry_point *entry, uintptr_t arg) |
David Brazdil | 3ad6e54 | 2019-09-13 17:17:09 +0100 | [diff] [blame] | 55 | { |
| 56 | struct cpu_start_state s; |
| 57 | struct arch_cpu_start_state s_arch; |
| 58 | |
| 59 | /* |
| 60 | * Config for arch_cpu_start() which will start a new CPU and |
| 61 | * immediately jump to cpu_entry(). This function must guarantee that |
| 62 | * the state struct is not be freed until cpu_entry() is called. |
| 63 | */ |
J-Alves | 31e5c95 | 2024-07-12 09:45:05 +0100 | [diff] [blame^] | 64 | s_arch.initial_sp = (uintptr_t)secondary_ec_stack; |
David Brazdil | 3ad6e54 | 2019-09-13 17:17:09 +0100 | [diff] [blame] | 65 | s_arch.entry = cpu_entry; |
| 66 | s_arch.arg = (uintptr_t)&s; |
| 67 | |
| 68 | /* |
Andrew Scull | c059fbe | 2019-09-12 12:58:40 +0100 | [diff] [blame] | 69 | * Flush the `cpu_start_state` struct because the new CPU will be |
David Brazdil | 3ad6e54 | 2019-09-13 17:17:09 +0100 | [diff] [blame] | 70 | * started without caching enabled and will need the data early on. |
Andrew Scull | c059fbe | 2019-09-12 12:58:40 +0100 | [diff] [blame] | 71 | * Write back is all that is really needed so flushing will definitely |
| 72 | * get the job done. |
David Brazdil | 3ad6e54 | 2019-09-13 17:17:09 +0100 | [diff] [blame] | 73 | */ |
Andrew Scull | c059fbe | 2019-09-12 12:58:40 +0100 | [diff] [blame] | 74 | arch_mm_flush_dcache(&s_arch, sizeof(s_arch)); |
David Brazdil | 3ad6e54 | 2019-09-13 17:17:09 +0100 | [diff] [blame] | 75 | |
| 76 | if ((s_arch.initial_sp % STACK_ALIGN) != 0) { |
| 77 | HFTEST_FAIL(true, |
| 78 | "Stack pointer of new vCPU not properly aligned."); |
| 79 | } |
| 80 | |
| 81 | /* |
| 82 | * Config for cpu_entry(). Its job is to initialize memory and call the |
| 83 | * provided entry point with the provided argument. |
| 84 | */ |
| 85 | s.entry = entry; |
| 86 | s.arg = arg; |
| 87 | sl_init(&s.lock); |
| 88 | |
| 89 | /* |
| 90 | * Lock the cpu_start_state struct which will be unlocked once |
| 91 | * cpu_entry() does not need its content anymore. This simultaneously |
| 92 | * protects the arch_cpu_start_state struct which must not be freed |
| 93 | * before cpu_entry() is called. |
| 94 | */ |
| 95 | sl_lock(&s.lock); |
| 96 | |
| 97 | /* Try to start the given CPU. */ |
| 98 | if (!arch_cpu_start(id, &s_arch)) { |
Karl Meakin | e8937d9 | 2024-03-19 16:04:25 +0000 | [diff] [blame] | 99 | HFTEST_LOG("Couldn't start cpu %lu", id); |
David Brazdil | 3ad6e54 | 2019-09-13 17:17:09 +0100 | [diff] [blame] | 100 | return false; |
| 101 | } |
| 102 | |
| 103 | /* |
| 104 | * Wait until cpu_entry() unlocks the cpu_start_state lock before |
| 105 | * freeing stack memory. |
| 106 | */ |
| 107 | sl_lock(&s.lock); |
| 108 | return true; |
| 109 | } |