Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 1 | /* |
Andrew Walbran | 692b325 | 2019-03-07 15:51:31 +0000 | [diff] [blame] | 2 | * Copyright 2018 The Hafnium Authors. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 3 | * |
Andrew Walbran | e959ec1 | 2020-06-17 15:01:09 +0100 | [diff] [blame] | 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 7 | */ |
| 8 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 9 | #include "hf/cpu.h" |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 10 | |
Andrew Scull | 04502e4 | 2018-09-03 14:54:52 +0100 | [diff] [blame] | 11 | #include <stdalign.h> |
| 12 | |
Max Shvetsov | 9c0ebe4 | 2020-08-27 12:37:57 +0100 | [diff] [blame] | 13 | #include "hf/arch/cache.h" |
| 14 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 15 | #include "hf/api.h" |
Andrew Scull | 877ae4b | 2019-07-02 12:52:33 +0100 | [diff] [blame] | 16 | #include "hf/check.h" |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 17 | #include "hf/dlog.h" |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 18 | |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 19 | #include "vmapi/hf/call.h" |
| 20 | |
Maksims Svecovs | 134b8f9 | 2022-03-04 15:14:09 +0000 | [diff] [blame^] | 21 | #include "system/sys/cdefs.h" |
| 22 | |
Andrew Scull | 23e93a8 | 2018-10-26 14:56:04 +0100 | [diff] [blame] | 23 | #define STACK_SIZE PAGE_SIZE |
| 24 | |
Andrew Scull | eed724b | 2019-09-12 09:57:19 +0100 | [diff] [blame] | 25 | /** |
| 26 | * The stacks to be used by the CPUs. |
| 27 | * |
| 28 | * Align to page boundaries to ensure that cache lines are not shared between a |
| 29 | * CPU's stack and data that can be accessed from other CPUs. If this did |
| 30 | * happen, there may be coherency problems when the stack is being used before |
| 31 | * caching is enabled. |
| 32 | */ |
Maksims Svecovs | 134b8f9 | 2022-03-04 15:14:09 +0000 | [diff] [blame^] | 33 | alignas(PAGE_SIZE) static char callstacks[MAX_CPUS][STACK_SIZE] __section( |
| 34 | .stacks); |
Andrew Scull | eed724b | 2019-09-12 09:57:19 +0100 | [diff] [blame] | 35 | |
| 36 | /* NOLINTNEXTLINE(misc-redundant-expression) */ |
| 37 | static_assert((STACK_SIZE % PAGE_SIZE) == 0, "Keep each stack page aligned."); |
| 38 | static_assert((PAGE_SIZE % STACK_ALIGN) == 0, |
| 39 | "Page alignment is too weak for the stack."); |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 40 | |
Jose Marinho | 20713fa | 2019-08-07 15:42:07 +0100 | [diff] [blame] | 41 | /** |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 42 | * Internal buffer used to store FF-A messages from a VM Tx. Its usage prevents |
Jose Marinho | 20713fa | 2019-08-07 15:42:07 +0100 | [diff] [blame] | 43 | * TOCTOU issues while Hafnium performs actions on information that would |
| 44 | * otherwise be re-writable by the VM. |
| 45 | * |
Fuad Tabba | b0ef2a4 | 2019-12-19 11:19:25 +0000 | [diff] [blame] | 46 | * Each buffer is owned by a single CPU. The buffer can only be used for |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 47 | * ffa_msg_send. The information stored in the buffer is only valid during the |
| 48 | * ffa_msg_send request is performed. |
Jose Marinho | 20713fa | 2019-08-07 15:42:07 +0100 | [diff] [blame] | 49 | */ |
Andrew Walbran | 0ec6159 | 2019-12-17 13:29:01 +0000 | [diff] [blame] | 50 | alignas(PAGE_SIZE) static uint8_t cpu_message_buffer[MAX_CPUS][PAGE_SIZE]; |
Jose Marinho | 20713fa | 2019-08-07 15:42:07 +0100 | [diff] [blame] | 51 | |
Mahesh Bireddy | 8ca5786 | 2020-01-07 13:43:21 +0530 | [diff] [blame] | 52 | uint8_t *cpu_get_buffer(struct cpu *c) |
Jose Marinho | 20713fa | 2019-08-07 15:42:07 +0100 | [diff] [blame] | 53 | { |
Mahesh Bireddy | 8ca5786 | 2020-01-07 13:43:21 +0530 | [diff] [blame] | 54 | size_t cpu_indx = cpu_index(c); |
Jose Marinho | 20713fa | 2019-08-07 15:42:07 +0100 | [diff] [blame] | 55 | |
Mahesh Bireddy | 8ca5786 | 2020-01-07 13:43:21 +0530 | [diff] [blame] | 56 | CHECK(cpu_indx < MAX_CPUS); |
| 57 | |
| 58 | return cpu_message_buffer[cpu_indx]; |
Jose Marinho | 20713fa | 2019-08-07 15:42:07 +0100 | [diff] [blame] | 59 | } |
| 60 | |
Mahesh Bireddy | 8ca5786 | 2020-01-07 13:43:21 +0530 | [diff] [blame] | 61 | uint32_t cpu_get_buffer_size(struct cpu *c) |
Jose Marinho | 20713fa | 2019-08-07 15:42:07 +0100 | [diff] [blame] | 62 | { |
Mahesh Bireddy | 8ca5786 | 2020-01-07 13:43:21 +0530 | [diff] [blame] | 63 | size_t cpu_indx = cpu_index(c); |
Jose Marinho | 20713fa | 2019-08-07 15:42:07 +0100 | [diff] [blame] | 64 | |
Mahesh Bireddy | 8ca5786 | 2020-01-07 13:43:21 +0530 | [diff] [blame] | 65 | CHECK(cpu_indx < MAX_CPUS); |
| 66 | |
| 67 | return sizeof(cpu_message_buffer[cpu_indx]); |
Jose Marinho | 20713fa | 2019-08-07 15:42:07 +0100 | [diff] [blame] | 68 | } |
| 69 | |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 70 | /* State of all supported CPUs. The stack of the first one is initialized. */ |
| 71 | struct cpu cpus[MAX_CPUS] = { |
| 72 | { |
| 73 | .is_on = 1, |
Andrew Scull | f3d4559 | 2018-09-20 14:30:22 +0100 | [diff] [blame] | 74 | .stack_bottom = &callstacks[0][STACK_SIZE], |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 75 | }, |
| 76 | }; |
| 77 | |
Max Shvetsov | 9c0ebe4 | 2020-08-27 12:37:57 +0100 | [diff] [blame] | 78 | uint32_t cpu_count = 1; |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 79 | |
Andrew Walbran | 4d3fa28 | 2019-06-26 13:31:15 +0100 | [diff] [blame] | 80 | void cpu_module_init(const cpu_id_t *cpu_ids, size_t count) |
Andrew Scull | bb3ab6c | 2018-11-26 20:38:49 +0000 | [diff] [blame] | 81 | { |
| 82 | uint32_t i; |
| 83 | uint32_t j; |
Andrew Walbran | 4d3fa28 | 2019-06-26 13:31:15 +0100 | [diff] [blame] | 84 | cpu_id_t boot_cpu_id = cpus[0].id; |
Andrew Scull | bb3ab6c | 2018-11-26 20:38:49 +0000 | [diff] [blame] | 85 | bool found_boot_cpu = false; |
| 86 | |
| 87 | cpu_count = count; |
| 88 | |
| 89 | /* |
| 90 | * Initialize CPUs with the IDs from the configuration passed in. The |
| 91 | * CPUs after the boot CPU are initialized in reverse order. The boot |
| 92 | * CPU is initialized when it is found or in place of the last CPU if it |
| 93 | * is not found. |
| 94 | */ |
| 95 | j = cpu_count; |
| 96 | for (i = 0; i < cpu_count; ++i) { |
| 97 | struct cpu *c; |
Andrew Walbran | 4d3fa28 | 2019-06-26 13:31:15 +0100 | [diff] [blame] | 98 | cpu_id_t id = cpu_ids[i]; |
Andrew Scull | bb3ab6c | 2018-11-26 20:38:49 +0000 | [diff] [blame] | 99 | |
| 100 | if (found_boot_cpu || id != boot_cpu_id) { |
Andrew Scull | 4897398 | 2019-08-16 17:40:28 +0100 | [diff] [blame] | 101 | --j; |
| 102 | c = &cpus[j]; |
| 103 | c->stack_bottom = &callstacks[j][STACK_SIZE]; |
Andrew Scull | bb3ab6c | 2018-11-26 20:38:49 +0000 | [diff] [blame] | 104 | } else { |
| 105 | found_boot_cpu = true; |
| 106 | c = &cpus[0]; |
Andrew Scull | 4897398 | 2019-08-16 17:40:28 +0100 | [diff] [blame] | 107 | CHECK(c->stack_bottom == &callstacks[0][STACK_SIZE]); |
Andrew Scull | bb3ab6c | 2018-11-26 20:38:49 +0000 | [diff] [blame] | 108 | } |
Wedson Almeida Filho | 81568c4 | 2019-01-04 13:33:02 +0000 | [diff] [blame] | 109 | |
Andrew Scull | 4897398 | 2019-08-16 17:40:28 +0100 | [diff] [blame] | 110 | sl_init(&c->lock); |
Andrew Scull | bb3ab6c | 2018-11-26 20:38:49 +0000 | [diff] [blame] | 111 | c->id = id; |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 112 | } |
Andrew Scull | bb3ab6c | 2018-11-26 20:38:49 +0000 | [diff] [blame] | 113 | |
| 114 | if (!found_boot_cpu) { |
| 115 | /* Boot CPU was initialized but with wrong ID. */ |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 116 | dlog_warning("Boot CPU's ID not found in config.\n"); |
Andrew Scull | bb3ab6c | 2018-11-26 20:38:49 +0000 | [diff] [blame] | 117 | cpus[0].id = boot_cpu_id; |
| 118 | } |
Max Shvetsov | 9c0ebe4 | 2020-08-27 12:37:57 +0100 | [diff] [blame] | 119 | |
| 120 | /* |
| 121 | * Clean the cache for the cpus array such that secondary cores |
| 122 | * hitting the entry point can read the cpus array consistently |
| 123 | * with MMU off (hence data cache off). |
| 124 | */ |
Olivier Deprez | a284617 | 2021-03-23 18:45:41 +0100 | [diff] [blame] | 125 | arch_cache_data_clean_range(va_from_ptr(cpus), sizeof(cpus)); |
Max Shvetsov | 9c0ebe4 | 2020-08-27 12:37:57 +0100 | [diff] [blame] | 126 | |
Olivier Deprez | a284617 | 2021-03-23 18:45:41 +0100 | [diff] [blame] | 127 | arch_cache_data_clean_range(va_from_ptr(&cpu_count), sizeof(cpu_count)); |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 128 | } |
| 129 | |
| 130 | size_t cpu_index(struct cpu *c) |
| 131 | { |
Wedson Almeida Filho | 03e767a | 2018-07-30 15:32:03 +0100 | [diff] [blame] | 132 | return c - cpus; |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 133 | } |
| 134 | |
Olivier Deprez | 7d5e553 | 2020-09-22 15:06:58 +0200 | [diff] [blame] | 135 | /* |
| 136 | * Return cpu with the given index. |
| 137 | */ |
| 138 | struct cpu *cpu_find_index(size_t index) |
| 139 | { |
| 140 | return (index < MAX_CPUS) ? &cpus[index] : NULL; |
| 141 | } |
| 142 | |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 143 | /** |
| 144 | * Turns CPU on and returns the previous state. |
| 145 | */ |
Andrew Scull | 3740287 | 2018-10-24 14:23:06 +0100 | [diff] [blame] | 146 | bool cpu_on(struct cpu *c, ipaddr_t entry, uintreg_t arg) |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 147 | { |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 148 | bool prev; |
| 149 | |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 150 | sl_lock(&c->lock); |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 151 | prev = c->is_on; |
| 152 | c->is_on = true; |
| 153 | sl_unlock(&c->lock); |
| 154 | |
| 155 | if (!prev) { |
Olivier Deprez | b9adff4 | 2021-02-01 12:14:05 +0100 | [diff] [blame] | 156 | /* This returns the first booted VM (e.g. primary in the NWd) */ |
| 157 | struct vm *vm = vm_get_first_boot(); |
Andrew Walbran | e1310df | 2019-04-29 17:28:28 +0100 | [diff] [blame] | 158 | struct vcpu *vcpu = vm_get_vcpu(vm, cpu_index(c)); |
Andrew Walbran | b58f899 | 2019-04-15 12:29:31 +0100 | [diff] [blame] | 159 | struct vcpu_locked vcpu_locked; |
Wedson Almeida Filho | 81568c4 | 2019-01-04 13:33:02 +0000 | [diff] [blame] | 160 | |
Andrew Walbran | b58f899 | 2019-04-15 12:29:31 +0100 | [diff] [blame] | 161 | vcpu_locked = vcpu_lock(vcpu); |
| 162 | vcpu_on(vcpu_locked, entry, arg); |
| 163 | vcpu_unlock(&vcpu_locked); |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 164 | } |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 165 | |
| 166 | return prev; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 167 | } |
| 168 | |
Wedson Almeida Filho | 03e767a | 2018-07-30 15:32:03 +0100 | [diff] [blame] | 169 | /** |
| 170 | * Prepares the CPU for turning itself off. |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 171 | */ |
| 172 | void cpu_off(struct cpu *c) |
| 173 | { |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 174 | sl_lock(&c->lock); |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 175 | c->is_on = false; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 176 | sl_unlock(&c->lock); |
Wedson Almeida Filho | 03e767a | 2018-07-30 15:32:03 +0100 | [diff] [blame] | 177 | } |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 178 | |
Wedson Almeida Filho | 03e767a | 2018-07-30 15:32:03 +0100 | [diff] [blame] | 179 | /** |
Fuad Tabba | b0ef2a4 | 2019-12-19 11:19:25 +0000 | [diff] [blame] | 180 | * Searches for a CPU based on its ID. |
Wedson Almeida Filho | 03e767a | 2018-07-30 15:32:03 +0100 | [diff] [blame] | 181 | */ |
Andrew Walbran | 4d3fa28 | 2019-06-26 13:31:15 +0100 | [diff] [blame] | 182 | struct cpu *cpu_find(cpu_id_t id) |
Wedson Almeida Filho | 03e767a | 2018-07-30 15:32:03 +0100 | [diff] [blame] | 183 | { |
| 184 | size_t i; |
| 185 | |
Andrew Scull | bb3ab6c | 2018-11-26 20:38:49 +0000 | [diff] [blame] | 186 | for (i = 0; i < cpu_count; i++) { |
Wedson Almeida Filho | 03e767a | 2018-07-30 15:32:03 +0100 | [diff] [blame] | 187 | if (cpus[i].id == id) { |
Andrew Scull | f3d4559 | 2018-09-20 14:30:22 +0100 | [diff] [blame] | 188 | return &cpus[i]; |
Wedson Almeida Filho | 03e767a | 2018-07-30 15:32:03 +0100 | [diff] [blame] | 189 | } |
| 190 | } |
| 191 | |
| 192 | return NULL; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 193 | } |