Sandrine Bailleux | 3cd87d7 | 2018-10-09 11:12:55 +0200 | [diff] [blame] | 1 | /* |
Alexei Fedorov | 719714f | 2019-10-03 10:57:53 +0100 | [diff] [blame] | 2 | * Copyright (c) 2018-2019, Arm Limited. All rights reserved. |
Sandrine Bailleux | 3cd87d7 | 2018-10-09 11:12:55 +0200 | [diff] [blame] | 3 | * |
| 4 | * SPDX-License-Identifier: BSD-3-Clause |
| 5 | */ |
| 6 | |
| 7 | #include <arch_helpers.h> |
Sandrine Bailleux | 3cd87d7 | 2018-10-09 11:12:55 +0200 | [diff] [blame] | 8 | #include <assert.h> |
| 9 | #include <cdefs.h> /* For __dead2 */ |
Sandrine Bailleux | 3cd87d7 | 2018-10-09 11:12:55 +0200 | [diff] [blame] | 10 | #include <debug.h> |
Antonio Nino Diaz | 09a00ef | 2019-01-11 13:12:58 +0000 | [diff] [blame] | 11 | #include <drivers/arm/arm_gic.h> |
| 12 | #include <drivers/console.h> |
Sandrine Bailleux | 3cd87d7 | 2018-10-09 11:12:55 +0200 | [diff] [blame] | 13 | #include <irq.h> |
Alexei Fedorov | 719714f | 2019-10-03 10:57:53 +0100 | [diff] [blame] | 14 | #include <pauth.h> |
Sandrine Bailleux | 3cd87d7 | 2018-10-09 11:12:55 +0200 | [diff] [blame] | 15 | #include <platform.h> |
| 16 | #include <platform_def.h> |
| 17 | #include <power_management.h> |
| 18 | #include <psci.h> |
| 19 | #include <sgi.h> |
| 20 | #include <spinlock.h> |
| 21 | #include <stdint.h> |
| 22 | #include <tftf.h> |
| 23 | |
| 24 | /* |
| 25 | * Affinity info map of CPUs as seen by TFTF |
| 26 | * - Set cpus_status_map[i].state to TFTF_AFFINITY_STATE_ON to mark CPU i |
| 27 | * as ON. |
| 28 | * - Set cpus_status_map[i].state to TFTF_AFFINITY_STATE_ON_PENDING to mark |
| 29 | * CPU i as ON_PENDING. |
| 30 | * - Set cpus_status_map[i].state to TFTF_AFFINITY_STATE_OFF to mark CPU i |
| 31 | * as OFF. |
| 32 | */ |
| 33 | static tftf_cpu_state_t cpus_status_map[PLATFORM_CORE_COUNT]; |
| 34 | static int cpus_status_init_done; |
| 35 | |
| 36 | /* |
| 37 | * Reference count keeping track of the number of CPUs participating in |
| 38 | * a test. |
| 39 | */ |
| 40 | static volatile unsigned int ref_cnt; |
| 41 | |
| 42 | /* Lock to prevent concurrent accesses to the reference count */ |
| 43 | static spinlock_t ref_cnt_lock; |
| 44 | |
| 45 | /* Per-cpu test entrypoint */ |
| 46 | volatile test_function_t test_entrypoint[PLATFORM_CORE_COUNT]; |
| 47 | |
| 48 | u_register_t tftf_primary_core = INVALID_MPID; |
| 49 | |
| 50 | unsigned int tftf_inc_ref_cnt(void) |
| 51 | { |
| 52 | unsigned int cnt; |
| 53 | |
| 54 | spin_lock(&ref_cnt_lock); |
| 55 | assert(ref_cnt < PLATFORM_CORE_COUNT); |
| 56 | cnt = ++ref_cnt; |
| 57 | spin_unlock(&ref_cnt_lock); |
| 58 | |
| 59 | VERBOSE("Entering the test (%u CPUs in the test now)\n", cnt); |
| 60 | |
| 61 | return cnt; |
| 62 | } |
| 63 | |
| 64 | unsigned int tftf_dec_ref_cnt(void) |
| 65 | { |
| 66 | unsigned int cnt; |
| 67 | |
| 68 | spin_lock(&ref_cnt_lock); |
| 69 | assert(ref_cnt != 0); |
| 70 | cnt = --ref_cnt; |
| 71 | spin_unlock(&ref_cnt_lock); |
| 72 | |
| 73 | VERBOSE("Exiting the test (%u CPUs in the test now)\n", cnt); |
| 74 | |
| 75 | return cnt; |
| 76 | } |
| 77 | |
| 78 | unsigned int tftf_get_ref_cnt(void) |
| 79 | { |
| 80 | return ref_cnt; |
| 81 | } |
| 82 | |
| 83 | void tftf_init_cpus_status_map(void) |
| 84 | { |
| 85 | unsigned int mpid = read_mpidr_el1(); |
| 86 | unsigned int core_pos = platform_get_core_pos(mpid); |
| 87 | |
| 88 | /* Check only primary does the initialisation */ |
| 89 | assert((mpid & MPID_MASK) == tftf_primary_core); |
| 90 | |
| 91 | /* Check init is done only once */ |
| 92 | assert(!cpus_status_init_done); |
| 93 | |
| 94 | cpus_status_init_done = 1; |
| 95 | |
| 96 | /* |
| 97 | * cpus_status_map already initialised to zero as part of BSS init, |
| 98 | * just set the primary to ON state |
| 99 | */ |
| 100 | cpus_status_map[core_pos].state = TFTF_AFFINITY_STATE_ON; |
| 101 | } |
| 102 | |
| 103 | void tftf_set_cpu_online(void) |
| 104 | { |
| 105 | unsigned int mpid = read_mpidr_el1(); |
| 106 | unsigned int core_pos = platform_get_core_pos(mpid); |
| 107 | |
| 108 | /* |
| 109 | * Wait here till the `tftf_try_cpu_on` has had a chance to update the |
| 110 | * the cpu state. |
| 111 | */ |
| 112 | while (cpus_status_map[core_pos].state == TFTF_AFFINITY_STATE_OFF) |
| 113 | ; |
| 114 | |
| 115 | spin_lock(&cpus_status_map[core_pos].lock); |
| 116 | assert(cpus_status_map[core_pos].state == TFTF_AFFINITY_STATE_ON_PENDING); |
| 117 | cpus_status_map[core_pos].state = TFTF_AFFINITY_STATE_ON; |
| 118 | spin_unlock(&cpus_status_map[core_pos].lock); |
| 119 | } |
| 120 | |
| 121 | void tftf_set_cpu_offline(void) |
| 122 | { |
| 123 | unsigned int mpid = read_mpidr_el1(); |
| 124 | unsigned int core_pos = platform_get_core_pos(mpid); |
| 125 | |
| 126 | spin_lock(&cpus_status_map[core_pos].lock); |
| 127 | |
| 128 | assert(tftf_is_cpu_online(mpid)); |
| 129 | cpus_status_map[core_pos].state = TFTF_AFFINITY_STATE_OFF; |
| 130 | spin_unlock(&cpus_status_map[core_pos].lock); |
| 131 | } |
| 132 | |
| 133 | unsigned int tftf_is_cpu_online(unsigned int mpid) |
| 134 | { |
| 135 | unsigned int core_pos = platform_get_core_pos(mpid); |
| 136 | return cpus_status_map[core_pos].state == TFTF_AFFINITY_STATE_ON; |
| 137 | } |
| 138 | |
| 139 | unsigned int tftf_is_core_pos_online(unsigned int core_pos) |
| 140 | { |
| 141 | return cpus_status_map[core_pos].state == TFTF_AFFINITY_STATE_ON; |
| 142 | } |
| 143 | |
| 144 | int32_t tftf_cpu_on(u_register_t target_cpu, |
| 145 | uintptr_t entrypoint, |
| 146 | u_register_t context_id) |
| 147 | { |
| 148 | int32_t ret; |
| 149 | tftf_affinity_info_t cpu_state; |
| 150 | unsigned int core_pos = platform_get_core_pos(target_cpu); |
| 151 | |
| 152 | spin_lock(&cpus_status_map[core_pos].lock); |
| 153 | cpu_state = cpus_status_map[core_pos].state; |
| 154 | |
| 155 | if (cpu_state == TFTF_AFFINITY_STATE_ON) { |
| 156 | spin_unlock(&cpus_status_map[core_pos].lock); |
| 157 | return PSCI_E_ALREADY_ON; |
| 158 | } |
| 159 | |
| 160 | if (cpu_state == TFTF_AFFINITY_STATE_ON_PENDING) { |
| 161 | spin_unlock(&cpus_status_map[core_pos].lock); |
| 162 | return PSCI_E_SUCCESS; |
| 163 | } |
| 164 | |
| 165 | assert(cpu_state == TFTF_AFFINITY_STATE_OFF); |
| 166 | |
| 167 | do { |
| 168 | ret = tftf_psci_cpu_on(target_cpu, |
| 169 | (uintptr_t) tftf_hotplug_entry, |
| 170 | context_id); |
| 171 | |
| 172 | /* Check if multiple CPU_ON calls are done for same CPU */ |
| 173 | assert(ret != PSCI_E_ON_PENDING); |
| 174 | } while (ret == PSCI_E_ALREADY_ON); |
| 175 | |
| 176 | if (ret == PSCI_E_SUCCESS) { |
| 177 | /* |
| 178 | * Populate the test entry point for this core. |
| 179 | * This is the address where the core will jump to once the framework |
| 180 | * has finished initialising it. |
| 181 | */ |
| 182 | test_entrypoint[core_pos] = (test_function_t) entrypoint; |
| 183 | |
| 184 | cpus_status_map[core_pos].state = TFTF_AFFINITY_STATE_ON_PENDING; |
| 185 | spin_unlock(&cpus_status_map[core_pos].lock); |
| 186 | } else { |
| 187 | spin_unlock(&cpus_status_map[core_pos].lock); |
| 188 | ERROR("Failed to boot CPU 0x%llx (%d)\n", |
| 189 | (unsigned long long)target_cpu, ret); |
| 190 | } |
| 191 | |
| 192 | return ret; |
| 193 | } |
| 194 | |
| 195 | int32_t tftf_try_cpu_on(u_register_t target_cpu, |
| 196 | uintptr_t entrypoint, |
| 197 | u_register_t context_id) |
| 198 | { |
| 199 | int32_t ret; |
| 200 | unsigned int core_pos = platform_get_core_pos(target_cpu); |
| 201 | |
| 202 | ret = tftf_psci_cpu_on(target_cpu, |
| 203 | (uintptr_t) tftf_hotplug_entry, |
| 204 | context_id); |
| 205 | |
| 206 | if (ret == PSCI_E_SUCCESS) { |
| 207 | spin_lock(&cpus_status_map[core_pos].lock); |
| 208 | assert(cpus_status_map[core_pos].state == |
| 209 | TFTF_AFFINITY_STATE_OFF); |
| 210 | cpus_status_map[core_pos].state = |
| 211 | TFTF_AFFINITY_STATE_ON_PENDING; |
| 212 | |
| 213 | spin_unlock(&cpus_status_map[core_pos].lock); |
| 214 | |
| 215 | /* |
| 216 | * Populate the test entry point for this core. |
| 217 | * This is the address where the core will jump to once the |
| 218 | * framework has finished initialising it. |
| 219 | */ |
| 220 | test_entrypoint[core_pos] = (test_function_t) entrypoint; |
| 221 | } |
| 222 | |
| 223 | return ret; |
| 224 | } |
| 225 | |
| 226 | /* |
| 227 | * Prepare the core to power off. Any driver which needs to perform specific |
| 228 | * tasks before powering off a CPU, e.g. migrating interrupts to another |
| 229 | * core, can implement a function and call it from here. |
| 230 | */ |
| 231 | static void tftf_prepare_cpu_off(void) |
| 232 | { |
| 233 | /* |
| 234 | * Do the bare minimal to turn off this CPU i.e. turn off interrupts |
| 235 | * and disable the GIC CPU interface |
| 236 | */ |
| 237 | disable_irq(); |
| 238 | arm_gic_disable_interrupts_local(); |
| 239 | } |
| 240 | |
| 241 | /* |
| 242 | * Revert the changes made during tftf_prepare_cpu_off() |
| 243 | */ |
| 244 | static void tftf_revert_cpu_off(void) |
| 245 | { |
| 246 | arm_gic_enable_interrupts_local(); |
| 247 | enable_irq(); |
| 248 | } |
| 249 | |
| 250 | int32_t tftf_cpu_off(void) |
| 251 | { |
| 252 | int32_t ret; |
| 253 | |
| 254 | tftf_prepare_cpu_off(); |
| 255 | tftf_set_cpu_offline(); |
| 256 | |
Madhukar Pappireddy | 6681b7a | 2024-11-01 16:27:44 -0500 | [diff] [blame] | 257 | INFO("Powering off CPU:%lx\n", read_mpidr_el1()); |
Sandrine Bailleux | 3cd87d7 | 2018-10-09 11:12:55 +0200 | [diff] [blame] | 258 | |
| 259 | /* Flush console before the last CPU is powered off. */ |
| 260 | if (tftf_get_ref_cnt() == 0) |
| 261 | console_flush(); |
| 262 | |
| 263 | /* Power off the CPU */ |
| 264 | ret = tftf_psci_cpu_off(); |
| 265 | |
| 266 | ERROR("Failed to power off (%d)\n", ret); |
| 267 | |
| 268 | /* |
| 269 | * PSCI CPU_OFF call does not return when successful. |
| 270 | * Otherwise, it should return the PSCI error code 'DENIED'. |
| 271 | */ |
| 272 | assert(ret == PSCI_E_DENIED); |
| 273 | |
| 274 | /* |
| 275 | * The CPU failed to power down since we returned from |
| 276 | * tftf_psci_cpu_off(). So we need to adjust the framework's view of |
| 277 | * the core by marking it back online. |
| 278 | */ |
| 279 | tftf_set_cpu_online(); |
| 280 | tftf_revert_cpu_off(); |
| 281 | |
| 282 | return ret; |
| 283 | } |
| 284 | |
| 285 | /* |
| 286 | * C entry point for a CPU that has just been powered up. |
| 287 | */ |
| 288 | void __dead2 tftf_warm_boot_main(void) |
| 289 | { |
| 290 | /* Initialise the CPU */ |
| 291 | tftf_arch_setup(); |
Alexei Fedorov | 719714f | 2019-10-03 10:57:53 +0100 | [diff] [blame] | 292 | |
| 293 | #if ENABLE_PAUTH |
| 294 | /* |
| 295 | * Program APIAKey_EL1 key and enable ARMv8.3-PAuth here as this |
| 296 | * function doesn't return, and RETAA instuction won't be executed, |
| 297 | * what would cause translation fault otherwise. |
| 298 | */ |
| 299 | pauth_init_enable(); |
| 300 | #endif /* ENABLE_PAUTH */ |
| 301 | |
Sandrine Bailleux | 3cd87d7 | 2018-10-09 11:12:55 +0200 | [diff] [blame] | 302 | arm_gic_setup_local(); |
| 303 | |
| 304 | /* Enable the SGI used by the timer management framework */ |
| 305 | tftf_irq_enable(IRQ_WAKE_SGI, GIC_HIGHEST_NS_PRIORITY); |
Jens Wiklander | 5a44078 | 2024-06-25 12:36:20 +0200 | [diff] [blame] | 306 | tftf_initialise_timer_secondary_core(); |
Sandrine Bailleux | 3cd87d7 | 2018-10-09 11:12:55 +0200 | [diff] [blame] | 307 | |
| 308 | enable_irq(); |
| 309 | |
Madhukar Pappireddy | 6681b7a | 2024-11-01 16:27:44 -0500 | [diff] [blame] | 310 | INFO("Booting CPU:%lx\n", read_mpidr_el1()); |
Sandrine Bailleux | 3cd87d7 | 2018-10-09 11:12:55 +0200 | [diff] [blame] | 311 | |
| 312 | tftf_set_cpu_online(); |
| 313 | |
| 314 | /* Enter the test session */ |
| 315 | run_tests(); |
| 316 | |
| 317 | /* Should never reach this point */ |
| 318 | bug_unreachable(); |
| 319 | } |