blob: 7c3a988f866fc2ecc5661c4c81e127fa9a072678 [file] [log] [blame]
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +02001/*
2 * Copyright (c) 2018, Arm Limited. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch_helpers.h>
8#include <arm_gic.h>
9#include <assert.h>
10#include <cdefs.h> /* For __dead2 */
11#include <console.h>
12#include <debug.h>
13#include <irq.h>
14#include <platform.h>
15#include <platform_def.h>
16#include <power_management.h>
17#include <psci.h>
18#include <sgi.h>
19#include <spinlock.h>
20#include <stdint.h>
21#include <tftf.h>
22
23/*
24 * Affinity info map of CPUs as seen by TFTF
25 * - Set cpus_status_map[i].state to TFTF_AFFINITY_STATE_ON to mark CPU i
26 * as ON.
27 * - Set cpus_status_map[i].state to TFTF_AFFINITY_STATE_ON_PENDING to mark
28 * CPU i as ON_PENDING.
29 * - Set cpus_status_map[i].state to TFTF_AFFINITY_STATE_OFF to mark CPU i
30 * as OFF.
31 */
32static tftf_cpu_state_t cpus_status_map[PLATFORM_CORE_COUNT];
33static int cpus_status_init_done;
34
35/*
36 * Reference count keeping track of the number of CPUs participating in
37 * a test.
38 */
39static volatile unsigned int ref_cnt;
40
41/* Lock to prevent concurrent accesses to the reference count */
42static spinlock_t ref_cnt_lock;
43
44/* Per-cpu test entrypoint */
45volatile test_function_t test_entrypoint[PLATFORM_CORE_COUNT];
46
47u_register_t tftf_primary_core = INVALID_MPID;
48
49unsigned int tftf_inc_ref_cnt(void)
50{
51 unsigned int cnt;
52
53 spin_lock(&ref_cnt_lock);
54 assert(ref_cnt < PLATFORM_CORE_COUNT);
55 cnt = ++ref_cnt;
56 spin_unlock(&ref_cnt_lock);
57
58 VERBOSE("Entering the test (%u CPUs in the test now)\n", cnt);
59
60 return cnt;
61}
62
63unsigned int tftf_dec_ref_cnt(void)
64{
65 unsigned int cnt;
66
67 spin_lock(&ref_cnt_lock);
68 assert(ref_cnt != 0);
69 cnt = --ref_cnt;
70 spin_unlock(&ref_cnt_lock);
71
72 VERBOSE("Exiting the test (%u CPUs in the test now)\n", cnt);
73
74 return cnt;
75}
76
77unsigned int tftf_get_ref_cnt(void)
78{
79 return ref_cnt;
80}
81
82void tftf_init_cpus_status_map(void)
83{
84 unsigned int mpid = read_mpidr_el1();
85 unsigned int core_pos = platform_get_core_pos(mpid);
86
87 /* Check only primary does the initialisation */
88 assert((mpid & MPID_MASK) == tftf_primary_core);
89
90 /* Check init is done only once */
91 assert(!cpus_status_init_done);
92
93 cpus_status_init_done = 1;
94
95 /*
96 * cpus_status_map already initialised to zero as part of BSS init,
97 * just set the primary to ON state
98 */
99 cpus_status_map[core_pos].state = TFTF_AFFINITY_STATE_ON;
100}
101
102void tftf_set_cpu_online(void)
103{
104 unsigned int mpid = read_mpidr_el1();
105 unsigned int core_pos = platform_get_core_pos(mpid);
106
107 /*
108 * Wait here till the `tftf_try_cpu_on` has had a chance to update the
109 * the cpu state.
110 */
111 while (cpus_status_map[core_pos].state == TFTF_AFFINITY_STATE_OFF)
112 ;
113
114 spin_lock(&cpus_status_map[core_pos].lock);
115 assert(cpus_status_map[core_pos].state == TFTF_AFFINITY_STATE_ON_PENDING);
116 cpus_status_map[core_pos].state = TFTF_AFFINITY_STATE_ON;
117 spin_unlock(&cpus_status_map[core_pos].lock);
118}
119
120void tftf_set_cpu_offline(void)
121{
122 unsigned int mpid = read_mpidr_el1();
123 unsigned int core_pos = platform_get_core_pos(mpid);
124
125 spin_lock(&cpus_status_map[core_pos].lock);
126
127 assert(tftf_is_cpu_online(mpid));
128 cpus_status_map[core_pos].state = TFTF_AFFINITY_STATE_OFF;
129 spin_unlock(&cpus_status_map[core_pos].lock);
130}
131
132unsigned int tftf_is_cpu_online(unsigned int mpid)
133{
134 unsigned int core_pos = platform_get_core_pos(mpid);
135 return cpus_status_map[core_pos].state == TFTF_AFFINITY_STATE_ON;
136}
137
138unsigned int tftf_is_core_pos_online(unsigned int core_pos)
139{
140 return cpus_status_map[core_pos].state == TFTF_AFFINITY_STATE_ON;
141}
142
143int32_t tftf_cpu_on(u_register_t target_cpu,
144 uintptr_t entrypoint,
145 u_register_t context_id)
146{
147 int32_t ret;
148 tftf_affinity_info_t cpu_state;
149 unsigned int core_pos = platform_get_core_pos(target_cpu);
150
151 spin_lock(&cpus_status_map[core_pos].lock);
152 cpu_state = cpus_status_map[core_pos].state;
153
154 if (cpu_state == TFTF_AFFINITY_STATE_ON) {
155 spin_unlock(&cpus_status_map[core_pos].lock);
156 return PSCI_E_ALREADY_ON;
157 }
158
159 if (cpu_state == TFTF_AFFINITY_STATE_ON_PENDING) {
160 spin_unlock(&cpus_status_map[core_pos].lock);
161 return PSCI_E_SUCCESS;
162 }
163
164 assert(cpu_state == TFTF_AFFINITY_STATE_OFF);
165
166 do {
167 ret = tftf_psci_cpu_on(target_cpu,
168 (uintptr_t) tftf_hotplug_entry,
169 context_id);
170
171 /* Check if multiple CPU_ON calls are done for same CPU */
172 assert(ret != PSCI_E_ON_PENDING);
173 } while (ret == PSCI_E_ALREADY_ON);
174
175 if (ret == PSCI_E_SUCCESS) {
176 /*
177 * Populate the test entry point for this core.
178 * This is the address where the core will jump to once the framework
179 * has finished initialising it.
180 */
181 test_entrypoint[core_pos] = (test_function_t) entrypoint;
182
183 cpus_status_map[core_pos].state = TFTF_AFFINITY_STATE_ON_PENDING;
184 spin_unlock(&cpus_status_map[core_pos].lock);
185 } else {
186 spin_unlock(&cpus_status_map[core_pos].lock);
187 ERROR("Failed to boot CPU 0x%llx (%d)\n",
188 (unsigned long long)target_cpu, ret);
189 }
190
191 return ret;
192}
193
194int32_t tftf_try_cpu_on(u_register_t target_cpu,
195 uintptr_t entrypoint,
196 u_register_t context_id)
197{
198 int32_t ret;
199 unsigned int core_pos = platform_get_core_pos(target_cpu);
200
201 ret = tftf_psci_cpu_on(target_cpu,
202 (uintptr_t) tftf_hotplug_entry,
203 context_id);
204
205 if (ret == PSCI_E_SUCCESS) {
206 spin_lock(&cpus_status_map[core_pos].lock);
207 assert(cpus_status_map[core_pos].state ==
208 TFTF_AFFINITY_STATE_OFF);
209 cpus_status_map[core_pos].state =
210 TFTF_AFFINITY_STATE_ON_PENDING;
211
212 spin_unlock(&cpus_status_map[core_pos].lock);
213
214 /*
215 * Populate the test entry point for this core.
216 * This is the address where the core will jump to once the
217 * framework has finished initialising it.
218 */
219 test_entrypoint[core_pos] = (test_function_t) entrypoint;
220 }
221
222 return ret;
223}
224
225/*
226 * Prepare the core to power off. Any driver which needs to perform specific
227 * tasks before powering off a CPU, e.g. migrating interrupts to another
228 * core, can implement a function and call it from here.
229 */
230static void tftf_prepare_cpu_off(void)
231{
232 /*
233 * Do the bare minimal to turn off this CPU i.e. turn off interrupts
234 * and disable the GIC CPU interface
235 */
236 disable_irq();
237 arm_gic_disable_interrupts_local();
238}
239
240/*
241 * Revert the changes made during tftf_prepare_cpu_off()
242 */
243static void tftf_revert_cpu_off(void)
244{
245 arm_gic_enable_interrupts_local();
246 enable_irq();
247}
248
249int32_t tftf_cpu_off(void)
250{
251 int32_t ret;
252
253 tftf_prepare_cpu_off();
254 tftf_set_cpu_offline();
255
256 INFO("Powering off\n");
257
258 /* Flush console before the last CPU is powered off. */
259 if (tftf_get_ref_cnt() == 0)
260 console_flush();
261
262 /* Power off the CPU */
263 ret = tftf_psci_cpu_off();
264
265 ERROR("Failed to power off (%d)\n", ret);
266
267 /*
268 * PSCI CPU_OFF call does not return when successful.
269 * Otherwise, it should return the PSCI error code 'DENIED'.
270 */
271 assert(ret == PSCI_E_DENIED);
272
273 /*
274 * The CPU failed to power down since we returned from
275 * tftf_psci_cpu_off(). So we need to adjust the framework's view of
276 * the core by marking it back online.
277 */
278 tftf_set_cpu_online();
279 tftf_revert_cpu_off();
280
281 return ret;
282}
283
284/*
285 * C entry point for a CPU that has just been powered up.
286 */
287void __dead2 tftf_warm_boot_main(void)
288{
289 /* Initialise the CPU */
290 tftf_arch_setup();
291 arm_gic_setup_local();
292
293 /* Enable the SGI used by the timer management framework */
294 tftf_irq_enable(IRQ_WAKE_SGI, GIC_HIGHEST_NS_PRIORITY);
295
296 enable_irq();
297
298 INFO("Booting\n");
299
300 tftf_set_cpu_online();
301
302 /* Enter the test session */
303 run_tests();
304
305 /* Should never reach this point */
306 bug_unreachable();
307}