Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 1 | /* |
Andrew Walbran | 692b325 | 2019-03-07 15:51:31 +0000 | [diff] [blame] | 2 | * Copyright 2018 The Hafnium Authors. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 3 | * |
Andrew Walbran | e959ec1 | 2020-06-17 15:01:09 +0100 | [diff] [blame] | 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 7 | */ |
| 8 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 9 | #include "hf/cpu.h" |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 10 | |
Andrew Scull | 04502e4 | 2018-09-03 14:54:52 +0100 | [diff] [blame] | 11 | #include <stdalign.h> |
| 12 | |
Max Shvetsov | 9c0ebe4 | 2020-08-27 12:37:57 +0100 | [diff] [blame] | 13 | #include "hf/arch/cache.h" |
| 14 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 15 | #include "hf/api.h" |
Andrew Scull | 877ae4b | 2019-07-02 12:52:33 +0100 | [diff] [blame] | 16 | #include "hf/check.h" |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 17 | #include "hf/dlog.h" |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 18 | |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 19 | #include "vmapi/hf/call.h" |
| 20 | |
Maksims Svecovs | 134b8f9 | 2022-03-04 15:14:09 +0000 | [diff] [blame] | 21 | #include "system/sys/cdefs.h" |
| 22 | |
Andrew Scull | eed724b | 2019-09-12 09:57:19 +0100 | [diff] [blame] | 23 | /** |
| 24 | * The stacks to be used by the CPUs. |
| 25 | * |
| 26 | * Align to page boundaries to ensure that cache lines are not shared between a |
| 27 | * CPU's stack and data that can be accessed from other CPUs. If this did |
| 28 | * happen, there may be coherency problems when the stack is being used before |
| 29 | * caching is enabled. |
| 30 | */ |
Maksims Svecovs | 134b8f9 | 2022-03-04 15:14:09 +0000 | [diff] [blame] | 31 | alignas(PAGE_SIZE) static char callstacks[MAX_CPUS][STACK_SIZE] __section( |
| 32 | .stacks); |
Andrew Scull | eed724b | 2019-09-12 09:57:19 +0100 | [diff] [blame] | 33 | |
| 34 | /* NOLINTNEXTLINE(misc-redundant-expression) */ |
| 35 | static_assert((STACK_SIZE % PAGE_SIZE) == 0, "Keep each stack page aligned."); |
| 36 | static_assert((PAGE_SIZE % STACK_ALIGN) == 0, |
| 37 | "Page alignment is too weak for the stack."); |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 38 | |
Jose Marinho | 20713fa | 2019-08-07 15:42:07 +0100 | [diff] [blame] | 39 | /** |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 40 | * Internal buffer used to store FF-A messages from a VM Tx. Its usage prevents |
Jose Marinho | 20713fa | 2019-08-07 15:42:07 +0100 | [diff] [blame] | 41 | * TOCTOU issues while Hafnium performs actions on information that would |
| 42 | * otherwise be re-writable by the VM. |
| 43 | * |
Fuad Tabba | b0ef2a4 | 2019-12-19 11:19:25 +0000 | [diff] [blame] | 44 | * Each buffer is owned by a single CPU. The buffer can only be used for |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 45 | * ffa_msg_send. The information stored in the buffer is only valid during the |
| 46 | * ffa_msg_send request is performed. |
Jose Marinho | 20713fa | 2019-08-07 15:42:07 +0100 | [diff] [blame] | 47 | */ |
Andrew Walbran | 0ec6159 | 2019-12-17 13:29:01 +0000 | [diff] [blame] | 48 | alignas(PAGE_SIZE) static uint8_t cpu_message_buffer[MAX_CPUS][PAGE_SIZE]; |
Jose Marinho | 20713fa | 2019-08-07 15:42:07 +0100 | [diff] [blame] | 49 | |
Mahesh Bireddy | 8ca5786 | 2020-01-07 13:43:21 +0530 | [diff] [blame] | 50 | uint8_t *cpu_get_buffer(struct cpu *c) |
Jose Marinho | 20713fa | 2019-08-07 15:42:07 +0100 | [diff] [blame] | 51 | { |
Mahesh Bireddy | 8ca5786 | 2020-01-07 13:43:21 +0530 | [diff] [blame] | 52 | size_t cpu_indx = cpu_index(c); |
Jose Marinho | 20713fa | 2019-08-07 15:42:07 +0100 | [diff] [blame] | 53 | |
Mahesh Bireddy | 8ca5786 | 2020-01-07 13:43:21 +0530 | [diff] [blame] | 54 | CHECK(cpu_indx < MAX_CPUS); |
| 55 | |
| 56 | return cpu_message_buffer[cpu_indx]; |
Jose Marinho | 20713fa | 2019-08-07 15:42:07 +0100 | [diff] [blame] | 57 | } |
| 58 | |
Mahesh Bireddy | 8ca5786 | 2020-01-07 13:43:21 +0530 | [diff] [blame] | 59 | uint32_t cpu_get_buffer_size(struct cpu *c) |
Jose Marinho | 20713fa | 2019-08-07 15:42:07 +0100 | [diff] [blame] | 60 | { |
Mahesh Bireddy | 8ca5786 | 2020-01-07 13:43:21 +0530 | [diff] [blame] | 61 | size_t cpu_indx = cpu_index(c); |
Jose Marinho | 20713fa | 2019-08-07 15:42:07 +0100 | [diff] [blame] | 62 | |
Mahesh Bireddy | 8ca5786 | 2020-01-07 13:43:21 +0530 | [diff] [blame] | 63 | CHECK(cpu_indx < MAX_CPUS); |
| 64 | |
| 65 | return sizeof(cpu_message_buffer[cpu_indx]); |
Jose Marinho | 20713fa | 2019-08-07 15:42:07 +0100 | [diff] [blame] | 66 | } |
| 67 | |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 68 | /* State of all supported CPUs. The stack of the first one is initialized. */ |
| 69 | struct cpu cpus[MAX_CPUS] = { |
| 70 | { |
| 71 | .is_on = 1, |
Andrew Scull | f3d4559 | 2018-09-20 14:30:22 +0100 | [diff] [blame] | 72 | .stack_bottom = &callstacks[0][STACK_SIZE], |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 73 | }, |
| 74 | }; |
| 75 | |
Max Shvetsov | 9c0ebe4 | 2020-08-27 12:37:57 +0100 | [diff] [blame] | 76 | uint32_t cpu_count = 1; |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 77 | |
Andrew Walbran | 4d3fa28 | 2019-06-26 13:31:15 +0100 | [diff] [blame] | 78 | void cpu_module_init(const cpu_id_t *cpu_ids, size_t count) |
Andrew Scull | bb3ab6c | 2018-11-26 20:38:49 +0000 | [diff] [blame] | 79 | { |
| 80 | uint32_t i; |
| 81 | uint32_t j; |
Andrew Walbran | 4d3fa28 | 2019-06-26 13:31:15 +0100 | [diff] [blame] | 82 | cpu_id_t boot_cpu_id = cpus[0].id; |
Andrew Scull | bb3ab6c | 2018-11-26 20:38:49 +0000 | [diff] [blame] | 83 | bool found_boot_cpu = false; |
| 84 | |
| 85 | cpu_count = count; |
| 86 | |
| 87 | /* |
| 88 | * Initialize CPUs with the IDs from the configuration passed in. The |
| 89 | * CPUs after the boot CPU are initialized in reverse order. The boot |
| 90 | * CPU is initialized when it is found or in place of the last CPU if it |
| 91 | * is not found. |
| 92 | */ |
| 93 | j = cpu_count; |
| 94 | for (i = 0; i < cpu_count; ++i) { |
| 95 | struct cpu *c; |
Andrew Walbran | 4d3fa28 | 2019-06-26 13:31:15 +0100 | [diff] [blame] | 96 | cpu_id_t id = cpu_ids[i]; |
Andrew Scull | bb3ab6c | 2018-11-26 20:38:49 +0000 | [diff] [blame] | 97 | |
| 98 | if (found_boot_cpu || id != boot_cpu_id) { |
Andrew Scull | 4897398 | 2019-08-16 17:40:28 +0100 | [diff] [blame] | 99 | --j; |
| 100 | c = &cpus[j]; |
| 101 | c->stack_bottom = &callstacks[j][STACK_SIZE]; |
Andrew Scull | bb3ab6c | 2018-11-26 20:38:49 +0000 | [diff] [blame] | 102 | } else { |
| 103 | found_boot_cpu = true; |
| 104 | c = &cpus[0]; |
Andrew Scull | 4897398 | 2019-08-16 17:40:28 +0100 | [diff] [blame] | 105 | CHECK(c->stack_bottom == &callstacks[0][STACK_SIZE]); |
Andrew Scull | bb3ab6c | 2018-11-26 20:38:49 +0000 | [diff] [blame] | 106 | } |
Wedson Almeida Filho | 81568c4 | 2019-01-04 13:33:02 +0000 | [diff] [blame] | 107 | |
Andrew Scull | 4897398 | 2019-08-16 17:40:28 +0100 | [diff] [blame] | 108 | sl_init(&c->lock); |
Andrew Scull | bb3ab6c | 2018-11-26 20:38:49 +0000 | [diff] [blame] | 109 | c->id = id; |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 110 | } |
Andrew Scull | bb3ab6c | 2018-11-26 20:38:49 +0000 | [diff] [blame] | 111 | |
| 112 | if (!found_boot_cpu) { |
| 113 | /* Boot CPU was initialized but with wrong ID. */ |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 114 | dlog_warning("Boot CPU's ID not found in config.\n"); |
Andrew Scull | bb3ab6c | 2018-11-26 20:38:49 +0000 | [diff] [blame] | 115 | cpus[0].id = boot_cpu_id; |
| 116 | } |
Max Shvetsov | 9c0ebe4 | 2020-08-27 12:37:57 +0100 | [diff] [blame] | 117 | |
| 118 | /* |
| 119 | * Clean the cache for the cpus array such that secondary cores |
| 120 | * hitting the entry point can read the cpus array consistently |
| 121 | * with MMU off (hence data cache off). |
| 122 | */ |
Olivier Deprez | a284617 | 2021-03-23 18:45:41 +0100 | [diff] [blame] | 123 | arch_cache_data_clean_range(va_from_ptr(cpus), sizeof(cpus)); |
Max Shvetsov | 9c0ebe4 | 2020-08-27 12:37:57 +0100 | [diff] [blame] | 124 | |
Olivier Deprez | a284617 | 2021-03-23 18:45:41 +0100 | [diff] [blame] | 125 | arch_cache_data_clean_range(va_from_ptr(&cpu_count), sizeof(cpu_count)); |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 126 | } |
| 127 | |
| 128 | size_t cpu_index(struct cpu *c) |
| 129 | { |
Wedson Almeida Filho | 03e767a | 2018-07-30 15:32:03 +0100 | [diff] [blame] | 130 | return c - cpus; |
Wedson Almeida Filho | 3fcbcff | 2018-07-10 23:53:39 +0100 | [diff] [blame] | 131 | } |
| 132 | |
Olivier Deprez | 7d5e553 | 2020-09-22 15:06:58 +0200 | [diff] [blame] | 133 | /* |
| 134 | * Return cpu with the given index. |
| 135 | */ |
| 136 | struct cpu *cpu_find_index(size_t index) |
| 137 | { |
| 138 | return (index < MAX_CPUS) ? &cpus[index] : NULL; |
| 139 | } |
| 140 | |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 141 | /** |
| 142 | * Turns CPU on and returns the previous state. |
| 143 | */ |
Olivier Deprez | 70f8a4a | 2022-09-26 09:17:56 +0200 | [diff] [blame^] | 144 | bool cpu_on(struct cpu *c) |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 145 | { |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 146 | bool prev; |
| 147 | |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 148 | sl_lock(&c->lock); |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 149 | prev = c->is_on; |
| 150 | c->is_on = true; |
| 151 | sl_unlock(&c->lock); |
| 152 | |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 153 | return prev; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 154 | } |
| 155 | |
Wedson Almeida Filho | 03e767a | 2018-07-30 15:32:03 +0100 | [diff] [blame] | 156 | /** |
| 157 | * Prepares the CPU for turning itself off. |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 158 | */ |
| 159 | void cpu_off(struct cpu *c) |
| 160 | { |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 161 | sl_lock(&c->lock); |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 162 | c->is_on = false; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 163 | sl_unlock(&c->lock); |
Wedson Almeida Filho | 03e767a | 2018-07-30 15:32:03 +0100 | [diff] [blame] | 164 | } |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 165 | |
Wedson Almeida Filho | 03e767a | 2018-07-30 15:32:03 +0100 | [diff] [blame] | 166 | /** |
Fuad Tabba | b0ef2a4 | 2019-12-19 11:19:25 +0000 | [diff] [blame] | 167 | * Searches for a CPU based on its ID. |
Wedson Almeida Filho | 03e767a | 2018-07-30 15:32:03 +0100 | [diff] [blame] | 168 | */ |
Andrew Walbran | 4d3fa28 | 2019-06-26 13:31:15 +0100 | [diff] [blame] | 169 | struct cpu *cpu_find(cpu_id_t id) |
Wedson Almeida Filho | 03e767a | 2018-07-30 15:32:03 +0100 | [diff] [blame] | 170 | { |
| 171 | size_t i; |
| 172 | |
Andrew Scull | bb3ab6c | 2018-11-26 20:38:49 +0000 | [diff] [blame] | 173 | for (i = 0; i < cpu_count; i++) { |
Wedson Almeida Filho | 03e767a | 2018-07-30 15:32:03 +0100 | [diff] [blame] | 174 | if (cpus[i].id == id) { |
Andrew Scull | f3d4559 | 2018-09-20 14:30:22 +0100 | [diff] [blame] | 175 | return &cpus[i]; |
Wedson Almeida Filho | 03e767a | 2018-07-30 15:32:03 +0100 | [diff] [blame] | 176 | } |
| 177 | } |
| 178 | |
| 179 | return NULL; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 180 | } |