blob: bc1bad03a80ec8fc7e6729e06053112558ae8021 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Boyan Karatotev3b802102024-11-06 16:26:15 +00002 * Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
dp-arm82cb2c12017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Achin Gupta4f6ad662013-10-25 09:08:21 +01005 */
6
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +00007#include <assert.h>
8#include <string.h>
9
Dan Handley97043ac2014-04-09 13:14:54 +010010#include <arch.h>
Jayanth Dodderi Chidanand777f1f62023-07-18 14:48:09 +010011#include <arch_features.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010012#include <arch_helpers.h>
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +000013#include <common/bl_common.h>
14#include <common/debug.h>
Dan Handley97043ac2014-04-09 13:14:54 +010015#include <context.h>
Sandeep Tripathy22744902020-08-17 20:22:13 +053016#include <drivers/delay_timer.h>
Boyan Karatotev232c1892025-03-11 16:41:33 +000017#include <lib/cpus/cpu_ops.h>
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +000018#include <lib/el3_runtime/context_mgmt.h>
Jayanth Dodderi Chidanand777f1f62023-07-18 14:48:09 +010019#include <lib/extensions/spe.h>
Boyan Karatotev9b1e8002024-10-10 08:11:09 +010020#include <lib/pmf/pmf.h>
21#include <lib/runtime_instr.h>
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +000022#include <lib/utils.h>
23#include <plat/common/platform.h>
24
Dan Handley35e98e52014-04-09 13:13:04 +010025#include "psci_private.h"
Achin Gupta4f6ad662013-10-25 09:08:21 +010026
Achin Gupta607084e2014-02-09 18:24:19 +000027/*
Jeenu Viswambharan7f366602014-02-20 17:11:00 +000028 * SPD power management operations, expected to be supplied by the registered
29 * SPD on successful SP initialization
Achin Gupta607084e2014-02-09 18:24:19 +000030 */
Dan Handleyfb037bf2014-04-10 15:37:22 +010031const spd_pm_ops_t *psci_spd_pm;
Achin Gupta607084e2014-02-09 18:24:19 +000032
Soby Mathew67487842015-07-13 14:10:57 +010033/*
34 * PSCI requested local power state map. This array is used to store the local
35 * power states requested by a CPU for power levels from level 1 to
36 * PLAT_MAX_PWR_LVL. It does not store the requested local power state for power
37 * level 0 (PSCI_CPU_PWR_LVL) as the requested and the target power state for a
38 * CPU are the same.
39 *
40 * During state coordination, the platform is passed an array containing the
41 * local states requested for a particular non cpu power domain by each cpu
42 * within the domain.
43 *
44 * TODO: Dense packing of the requested states will cause cache thrashing
45 * when multiple power domains write to it. If we allocate the requested
46 * states at each power level in a cache-line aligned per-domain memory,
47 * the cache thrashing can be avoided.
48 */
49static plat_local_state_t
50 psci_req_local_pwr_states[PLAT_MAX_PWR_LVL][PLATFORM_CORE_COUNT];
51
Pankaj Guptaab4df502019-10-15 15:44:45 +053052unsigned int psci_plat_core_count;
Soby Mathew67487842015-07-13 14:10:57 +010053
Achin Gupta4f6ad662013-10-25 09:08:21 +010054/*******************************************************************************
Soby Mathew67487842015-07-13 14:10:57 +010055 * Arrays that hold the platform's power domain tree information for state
56 * management of power domains.
57 * Each node in the array 'psci_non_cpu_pd_nodes' corresponds to a power domain
58 * which is an ancestor of a CPU power domain.
59 * Each node in the array 'psci_cpu_pd_nodes' corresponds to a cpu power domain
Achin Gupta4f6ad662013-10-25 09:08:21 +010060 ******************************************************************************/
Soby Mathew67487842015-07-13 14:10:57 +010061non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS]
Soby Mathewab8707e2015-01-08 18:02:44 +000062#if USE_COHERENT_MEM
Chris Kayda043412023-02-14 11:30:04 +000063__section(".tzfw_coherent_mem")
Soby Mathewab8707e2015-01-08 18:02:44 +000064#endif
65;
Achin Gupta4f6ad662013-10-25 09:08:21 +010066
Jeenu Viswambharanb0408e82017-01-05 11:01:02 +000067/* Lock for PSCI state coordination */
68DEFINE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
Andrew Thoelkeee7b35c2015-09-10 11:39:36 +010069
Soby Mathew67487842015-07-13 14:10:57 +010070cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
71
Achin Gupta4f6ad662013-10-25 09:08:21 +010072/*******************************************************************************
Achin Gupta4f6ad662013-10-25 09:08:21 +010073 * Pointer to functions exported by the platform to complete power mgmt. ops
74 ******************************************************************************/
Soby Mathew67487842015-07-13 14:10:57 +010075const plat_psci_ops_t *psci_plat_pm_ops;
Achin Gupta4f6ad662013-10-25 09:08:21 +010076
Soby Mathew67487842015-07-13 14:10:57 +010077/******************************************************************************
78 * Check that the maximum power level supported by the platform makes sense
79 *****************************************************************************/
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +010080CASSERT((PLAT_MAX_PWR_LVL <= PSCI_MAX_PWR_LVL) &&
81 (PLAT_MAX_PWR_LVL >= PSCI_CPU_PWR_LVL),
82 assert_platform_max_pwrlvl_check);
Soby Mathew8c32bc22015-02-12 14:45:02 +000083
Wing Lib88a4412022-09-14 13:18:15 -070084#if PSCI_OS_INIT_MODE
85/*******************************************************************************
86 * The power state coordination mode used in CPU_SUSPEND.
87 * Defaults to platform-coordinated mode.
88 ******************************************************************************/
89suspend_mode_t psci_suspend_mode = PLAT_COORD;
90#endif
91
Soby Mathew67487842015-07-13 14:10:57 +010092/*
93 * The plat_local_state used by the platform is one of these types: RUN,
94 * RETENTION and OFF. The platform can define further sub-states for each type
95 * apart from RUN. This categorization is done to verify the sanity of the
96 * psci_power_state passed by the platform and to print debug information. The
97 * categorization is done on the basis of the following conditions:
98 *
99 * 1. If (plat_local_state == 0) then the category is STATE_TYPE_RUN.
100 *
101 * 2. If (0 < plat_local_state <= PLAT_MAX_RET_STATE), then the category is
102 * STATE_TYPE_RETN.
103 *
104 * 3. If (plat_local_state > PLAT_MAX_RET_STATE), then the category is
105 * STATE_TYPE_OFF.
106 */
107typedef enum plat_local_state_type {
108 STATE_TYPE_RUN = 0,
109 STATE_TYPE_RETN,
110 STATE_TYPE_OFF
111} plat_local_state_type_t;
112
Antonio Nino Diaz97373c32018-07-18 11:57:21 +0100113/* Function used to categorize plat_local_state. */
114static plat_local_state_type_t find_local_state_type(plat_local_state_t state)
115{
116 if (state != 0U) {
117 if (state > PLAT_MAX_RET_STATE) {
118 return STATE_TYPE_OFF;
119 } else {
120 return STATE_TYPE_RETN;
121 }
122 } else {
123 return STATE_TYPE_RUN;
124 }
125}
Soby Mathew67487842015-07-13 14:10:57 +0100126
127/******************************************************************************
128 * Check that the maximum retention level supported by the platform is less
129 * than the maximum off level.
130 *****************************************************************************/
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +0100131CASSERT(PLAT_MAX_RET_STATE < PLAT_MAX_OFF_STATE,
Soby Mathew67487842015-07-13 14:10:57 +0100132 assert_platform_max_off_and_retn_state_check);
133
134/******************************************************************************
135 * This function ensures that the power state parameter in a CPU_SUSPEND request
136 * is valid. If so, it returns the requested states for each power level.
137 *****************************************************************************/
138int psci_validate_power_state(unsigned int power_state,
139 psci_power_state_t *state_info)
Achin Gupta0a46e2c2014-07-31 11:19:11 +0100140{
Soby Mathew67487842015-07-13 14:10:57 +0100141 /* Check SBZ bits in power state are zero */
Maheedhar Bollapallic7b0a282024-04-25 11:47:27 +0530142 if (psci_check_power_state(power_state) != 0U) {
Soby Mathew67487842015-07-13 14:10:57 +0100143 return PSCI_E_INVALID_PARAMS;
Maheedhar Bollapallic7b0a282024-04-25 11:47:27 +0530144 }
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +0100145 assert(psci_plat_pm_ops->validate_power_state != NULL);
Achin Gupta0a46e2c2014-07-31 11:19:11 +0100146
Soby Mathew67487842015-07-13 14:10:57 +0100147 /* Validate the power_state using platform pm_ops */
148 return psci_plat_pm_ops->validate_power_state(power_state, state_info);
149}
Achin Gupta0a46e2c2014-07-31 11:19:11 +0100150
Soby Mathew67487842015-07-13 14:10:57 +0100151/******************************************************************************
152 * This function retrieves the `psci_power_state_t` for system suspend from
153 * the platform.
154 *****************************************************************************/
155void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info)
156{
157 /*
158 * Assert that the required pm_ops hook is implemented to ensure that
159 * the capability detected during psci_setup() is valid.
160 */
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +0100161 assert(psci_plat_pm_ops->get_sys_suspend_power_state != NULL);
Soby Mathew67487842015-07-13 14:10:57 +0100162
163 /*
164 * Query the platform for the power_state required for system suspend
165 */
166 psci_plat_pm_ops->get_sys_suspend_power_state(state_info);
Achin Gupta0a46e2c2014-07-31 11:19:11 +0100167}
168
Wing Li606b7432022-09-14 13:18:17 -0700169#if PSCI_OS_INIT_MODE
170/*******************************************************************************
171 * This function verifies that all the other cores at the 'end_pwrlvl' have been
172 * idled and the current CPU is the last running CPU at the 'end_pwrlvl'.
173 * Returns 1 (true) if the current CPU is the last ON CPU or 0 (false)
174 * otherwise.
175 ******************************************************************************/
Boyan Karatotev3b802102024-11-06 16:26:15 +0000176static bool psci_is_last_cpu_to_idle_at_pwrlvl(unsigned int my_idx, unsigned int end_pwrlvl)
Wing Li606b7432022-09-14 13:18:17 -0700177{
Boyan Karatotev3b802102024-11-06 16:26:15 +0000178 unsigned int lvl;
Mark Dykes152ad112024-04-08 13:38:01 -0500179 unsigned int parent_idx = 0;
Wing Li606b7432022-09-14 13:18:17 -0700180 unsigned int cpu_start_idx, ncpus, cpu_idx;
181 plat_local_state_t local_state;
182
183 if (end_pwrlvl == PSCI_CPU_PWR_LVL) {
184 return true;
185 }
186
Charlie Bareham01959a12023-10-17 20:17:58 +0200187 parent_idx = psci_cpu_pd_nodes[my_idx].parent_node;
188 for (lvl = PSCI_CPU_PWR_LVL + U(1); lvl < end_pwrlvl; lvl++) {
189 parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
Wing Li606b7432022-09-14 13:18:17 -0700190 }
191
192 cpu_start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx;
193 ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus;
194
195 for (cpu_idx = cpu_start_idx; cpu_idx < cpu_start_idx + ncpus;
196 cpu_idx++) {
197 local_state = psci_get_cpu_local_state_by_idx(cpu_idx);
198 if (cpu_idx == my_idx) {
199 assert(is_local_state_run(local_state) != 0);
200 continue;
201 }
202
203 if (is_local_state_run(local_state) != 0) {
204 return false;
205 }
206 }
207
208 return true;
209}
210#endif
211
Achin Gupta0a46e2c2014-07-31 11:19:11 +0100212/*******************************************************************************
Wing Lib88a4412022-09-14 13:18:15 -0700213 * This function verifies that all the other cores in the system have been
Soby Mathewc0aff0e2014-12-17 14:47:57 +0000214 * turned OFF and the current CPU is the last running CPU in the system.
Jayanth Dodderi Chidanandb41b0822022-08-22 23:46:10 +0100215 * Returns true, if the current CPU is the last ON CPU or false otherwise.
Soby Mathewc0aff0e2014-12-17 14:47:57 +0000216 ******************************************************************************/
Boyan Karatotev3b802102024-11-06 16:26:15 +0000217bool psci_is_last_on_cpu(unsigned int my_idx)
Soby Mathewc0aff0e2014-12-17 14:47:57 +0000218{
Manish V Badarkhea7be2a52025-05-29 14:55:39 +0100219 for (unsigned int cpu_idx = 0U; cpu_idx < psci_plat_core_count; cpu_idx++) {
Soby Mathew67487842015-07-13 14:10:57 +0100220 if (cpu_idx == my_idx) {
221 assert(psci_get_aff_info_state() == AFF_STATE_ON);
Soby Mathewc0aff0e2014-12-17 14:47:57 +0000222 continue;
223 }
224
Jayanth Dodderi Chidanandb41b0822022-08-22 23:46:10 +0100225 if (psci_get_aff_info_state_by_idx(cpu_idx) != AFF_STATE_OFF) {
226 VERBOSE("core=%u other than current core=%u %s\n",
227 cpu_idx, my_idx, "running in the system");
228 return false;
229 }
Soby Mathewc0aff0e2014-12-17 14:47:57 +0000230 }
231
Jayanth Dodderi Chidanandb41b0822022-08-22 23:46:10 +0100232 return true;
Soby Mathewc0aff0e2014-12-17 14:47:57 +0000233}
234
235/*******************************************************************************
Wing Lib88a4412022-09-14 13:18:15 -0700236 * This function verifies that all cores in the system have been turned ON.
237 * Returns true, if all CPUs are ON or false otherwise.
238 ******************************************************************************/
239static bool psci_are_all_cpus_on(void)
240{
241 unsigned int cpu_idx;
242
Manish V Badarkhea7be2a52025-05-29 14:55:39 +0100243 for (cpu_idx = 0U; cpu_idx < psci_plat_core_count; cpu_idx++) {
Wing Lib88a4412022-09-14 13:18:15 -0700244 if (psci_get_aff_info_state_by_idx(cpu_idx) == AFF_STATE_OFF) {
245 return false;
246 }
247 }
248
249 return true;
250}
251
252/*******************************************************************************
Manish V Badarkhea7be2a52025-05-29 14:55:39 +0100253 * Counts the number of CPUs in the system that are currently in the ON or
254 * ON_PENDING state.
255 *
256 * @note This function does not acquire any power domain locks. It must only be
257 * called in contexts where it is guaranteed that PSCI state transitions
258 * are not concurrently happening, or where locks are already held.
259 *
260 * @return The number of CPUs currently in AFF_STATE_ON or AFF_STATE_ON_PENDING.
261 ******************************************************************************/
262static unsigned int psci_num_cpus_running(void)
263{
264 unsigned int cpu_idx;
265 unsigned int no_of_cpus = 0U;
266 aff_info_state_t aff_state;
267
268 for (cpu_idx = 0U; cpu_idx < psci_plat_core_count; cpu_idx++) {
269 aff_state = psci_get_aff_info_state_by_idx(cpu_idx);
270 if (aff_state == AFF_STATE_ON ||
271 aff_state == AFF_STATE_ON_PENDING) {
272 no_of_cpus++;
273 }
274 }
275
276 return no_of_cpus;
277}
278
279/*******************************************************************************
Soby Mathew67487842015-07-13 14:10:57 +0100280 * Routine to return the maximum power level to traverse to after a cpu has
Achin Guptaa45e3972013-12-05 15:10:48 +0000281 * been physically powered up. It is expected to be called immediately after
Achin Gupta776b68a2014-07-25 14:52:47 +0100282 * reset from assembler code.
Achin Guptaa45e3972013-12-05 15:10:48 +0000283 ******************************************************************************/
Soby Mathew9d070b92015-07-29 17:05:03 +0100284static unsigned int get_power_on_target_pwrlvl(void)
Achin Guptaa45e3972013-12-05 15:10:48 +0000285{
Soby Mathew9d070b92015-07-29 17:05:03 +0100286 unsigned int pwrlvl;
Achin Guptaa45e3972013-12-05 15:10:48 +0000287
288 /*
Soby Mathew67487842015-07-13 14:10:57 +0100289 * Assume that this cpu was suspended and retrieve its target power
Boyan Karatotev0c836552024-09-30 11:31:55 +0100290 * level. If it wasn't, the cpu is off so this will be PLAT_MAX_PWR_LVL.
Achin Gupta776b68a2014-07-25 14:52:47 +0100291 */
Soby Mathew67487842015-07-13 14:10:57 +0100292 pwrlvl = psci_get_suspend_pwrlvl();
Deepika Bhavnani0c411c72019-08-17 01:10:02 +0300293 assert(pwrlvl < PSCI_INVALID_PWR_LVL);
Soby Mathew67487842015-07-13 14:10:57 +0100294 return pwrlvl;
Achin Guptaa45e3972013-12-05 15:10:48 +0000295}
296
Soby Mathew67487842015-07-13 14:10:57 +0100297/******************************************************************************
298 * Helper function to update the requested local power state array. This array
299 * does not store the requested state for the CPU power level. Hence an
Deepika Bhavnani41af0512019-08-15 00:56:46 +0300300 * assertion is added to prevent us from accessing the CPU power level.
Soby Mathew67487842015-07-13 14:10:57 +0100301 *****************************************************************************/
302static void psci_set_req_local_pwr_state(unsigned int pwrlvl,
303 unsigned int cpu_idx,
304 plat_local_state_t req_pwr_state)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100305{
Soby Mathew67487842015-07-13 14:10:57 +0100306 assert(pwrlvl > PSCI_CPU_PWR_LVL);
Deepika Bhavnani41af0512019-08-15 00:56:46 +0300307 if ((pwrlvl > PSCI_CPU_PWR_LVL) && (pwrlvl <= PLAT_MAX_PWR_LVL) &&
Pankaj Guptaab4df502019-10-15 15:44:45 +0530308 (cpu_idx < psci_plat_core_count)) {
Deepika Bhavnani41af0512019-08-15 00:56:46 +0300309 psci_req_local_pwr_states[pwrlvl - 1U][cpu_idx] = req_pwr_state;
310 }
Soby Mathew67487842015-07-13 14:10:57 +0100311}
Achin Gupta4f6ad662013-10-25 09:08:21 +0100312
Soby Mathew67487842015-07-13 14:10:57 +0100313/******************************************************************************
314 * This function initializes the psci_req_local_pwr_states.
315 *****************************************************************************/
Daniel Boulby87c85132018-09-20 14:12:46 +0100316void __init psci_init_req_local_pwr_states(void)
Soby Mathew67487842015-07-13 14:10:57 +0100317{
318 /* Initialize the requested state of all non CPU power domains as OFF */
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +0100319 unsigned int pwrlvl;
Pankaj Guptaab4df502019-10-15 15:44:45 +0530320 unsigned int core;
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +0100321
322 for (pwrlvl = 0U; pwrlvl < PLAT_MAX_PWR_LVL; pwrlvl++) {
Pankaj Guptaab4df502019-10-15 15:44:45 +0530323 for (core = 0; core < psci_plat_core_count; core++) {
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +0100324 psci_req_local_pwr_states[pwrlvl][core] =
325 PLAT_MAX_OFF_STATE;
326 }
327 }
Soby Mathew67487842015-07-13 14:10:57 +0100328}
329
330/******************************************************************************
331 * Helper function to return a reference to an array containing the local power
332 * states requested by each cpu for a power domain at 'pwrlvl'. The size of the
333 * array will be the number of cpu power domains of which this power domain is
334 * an ancestor. These requested states will be used to determine a suitable
335 * target state for this power domain during psci state coordination. An
336 * assertion is added to prevent us from accessing the CPU power level.
337 *****************************************************************************/
Soby Mathew9d070b92015-07-29 17:05:03 +0100338static plat_local_state_t *psci_get_req_local_pwr_states(unsigned int pwrlvl,
Deepika Bhavnanifc810212019-08-27 00:32:24 +0300339 unsigned int cpu_idx)
Soby Mathew67487842015-07-13 14:10:57 +0100340{
341 assert(pwrlvl > PSCI_CPU_PWR_LVL);
342
Deepika Bhavnani41af0512019-08-15 00:56:46 +0300343 if ((pwrlvl > PSCI_CPU_PWR_LVL) && (pwrlvl <= PLAT_MAX_PWR_LVL) &&
Pankaj Guptaab4df502019-10-15 15:44:45 +0530344 (cpu_idx < psci_plat_core_count)) {
Deepika Bhavnani41af0512019-08-15 00:56:46 +0300345 return &psci_req_local_pwr_states[pwrlvl - 1U][cpu_idx];
346 } else
347 return NULL;
Soby Mathew67487842015-07-13 14:10:57 +0100348}
349
Wing Li606b7432022-09-14 13:18:17 -0700350#if PSCI_OS_INIT_MODE
351/******************************************************************************
352 * Helper function to save a copy of the psci_req_local_pwr_states (prev) for a
353 * CPU (cpu_idx), and update psci_req_local_pwr_states with the new requested
354 * local power states (state_info).
355 *****************************************************************************/
356void psci_update_req_local_pwr_states(unsigned int end_pwrlvl,
357 unsigned int cpu_idx,
358 psci_power_state_t *state_info,
359 plat_local_state_t *prev)
360{
361 unsigned int lvl;
362#ifdef PLAT_MAX_CPU_SUSPEND_PWR_LVL
363 unsigned int max_pwrlvl = PLAT_MAX_CPU_SUSPEND_PWR_LVL;
364#else
365 unsigned int max_pwrlvl = PLAT_MAX_PWR_LVL;
366#endif
367 plat_local_state_t req_state;
368
369 for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= max_pwrlvl; lvl++) {
370 /* Save the previous requested local power state */
371 prev[lvl - 1U] = *psci_get_req_local_pwr_states(lvl, cpu_idx);
372
373 /* Update the new requested local power state */
374 if (lvl <= end_pwrlvl) {
375 req_state = state_info->pwr_domain_state[lvl];
376 } else {
377 req_state = state_info->pwr_domain_state[end_pwrlvl];
378 }
379 psci_set_req_local_pwr_state(lvl, cpu_idx, req_state);
380 }
381}
382
383/******************************************************************************
384 * Helper function to restore the previously saved requested local power states
385 * (prev) for a CPU (cpu_idx) to psci_req_local_pwr_states.
386 *****************************************************************************/
387void psci_restore_req_local_pwr_states(unsigned int cpu_idx,
388 plat_local_state_t *prev)
389{
390 unsigned int lvl;
391#ifdef PLAT_MAX_CPU_SUSPEND_PWR_LVL
392 unsigned int max_pwrlvl = PLAT_MAX_CPU_SUSPEND_PWR_LVL;
393#else
394 unsigned int max_pwrlvl = PLAT_MAX_PWR_LVL;
395#endif
396
397 for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= max_pwrlvl; lvl++) {
398 /* Restore the previous requested local power state */
399 psci_set_req_local_pwr_state(lvl, cpu_idx, prev[lvl - 1U]);
400 }
401}
402#endif
403
Jeenu Viswambharana10d3632017-01-06 14:58:11 +0000404/*
405 * psci_non_cpu_pd_nodes can be placed either in normal memory or coherent
406 * memory.
407 *
408 * With !USE_COHERENT_MEM, psci_non_cpu_pd_nodes is placed in normal memory,
409 * it's accessed by both cached and non-cached participants. To serve the common
410 * minimum, perform a cache flush before read and after write so that non-cached
411 * participants operate on latest data in main memory.
412 *
413 * When USE_COHERENT_MEM is used, psci_non_cpu_pd_nodes is placed in coherent
414 * memory. With HW_ASSISTED_COHERENCY, all PSCI participants are cache-coherent.
415 * In both cases, no cache operations are required.
416 */
417
418/*
419 * Retrieve local state of non-CPU power domain node from a non-cached CPU,
420 * after any required cache maintenance operation.
421 */
422static plat_local_state_t get_non_cpu_pd_node_local_state(
423 unsigned int parent_idx)
424{
Andrew F. Davisf996a5f2018-08-30 12:13:57 -0500425#if !(USE_COHERENT_MEM || HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
Jeenu Viswambharana10d3632017-01-06 14:58:11 +0000426 flush_dcache_range(
427 (uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
428 sizeof(psci_non_cpu_pd_nodes[parent_idx]));
429#endif
430 return psci_non_cpu_pd_nodes[parent_idx].local_state;
431}
432
433/*
434 * Update local state of non-CPU power domain node from a cached CPU; perform
435 * any required cache maintenance operation afterwards.
436 */
437static void set_non_cpu_pd_node_local_state(unsigned int parent_idx,
438 plat_local_state_t state)
439{
440 psci_non_cpu_pd_nodes[parent_idx].local_state = state;
Andrew F. Davisf996a5f2018-08-30 12:13:57 -0500441#if !(USE_COHERENT_MEM || HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
Jeenu Viswambharana10d3632017-01-06 14:58:11 +0000442 flush_dcache_range(
443 (uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
444 sizeof(psci_non_cpu_pd_nodes[parent_idx]));
445#endif
446}
447
Soby Mathew67487842015-07-13 14:10:57 +0100448/******************************************************************************
449 * Helper function to return the current local power state of each power domain
450 * from the current cpu power domain to its ancestor at the 'end_pwrlvl'. This
451 * function will be called after a cpu is powered on to find the local state
452 * each power domain has emerged from.
453 *****************************************************************************/
Boyan Karatotev3b802102024-11-06 16:26:15 +0000454void psci_get_target_local_pwr_states(unsigned int cpu_idx, unsigned int end_pwrlvl,
Achin Gupta61eae522016-06-28 16:46:15 +0100455 psci_power_state_t *target_state)
Soby Mathew67487842015-07-13 14:10:57 +0100456{
Soby Mathew9d070b92015-07-29 17:05:03 +0100457 unsigned int parent_idx, lvl;
Soby Mathew67487842015-07-13 14:10:57 +0100458 plat_local_state_t *pd_state = target_state->pwr_domain_state;
459
460 pd_state[PSCI_CPU_PWR_LVL] = psci_get_cpu_local_state();
Boyan Karatotev3b802102024-11-06 16:26:15 +0000461 parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
Soby Mathew67487842015-07-13 14:10:57 +0100462
463 /* Copy the local power state from node to state_info */
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +0100464 for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
Jeenu Viswambharana10d3632017-01-06 14:58:11 +0000465 pd_state[lvl] = get_non_cpu_pd_node_local_state(parent_idx);
Soby Mathew67487842015-07-13 14:10:57 +0100466 parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
467 }
468
469 /* Set the the higher levels to RUN */
Maheedhar Bollapallic7b0a282024-04-25 11:47:27 +0530470 for (; lvl <= PLAT_MAX_PWR_LVL; lvl++) {
Soby Mathew67487842015-07-13 14:10:57 +0100471 target_state->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
Maheedhar Bollapallic7b0a282024-04-25 11:47:27 +0530472 }
Soby Mathew67487842015-07-13 14:10:57 +0100473}
474
475/******************************************************************************
476 * Helper function to set the target local power state that each power domain
477 * from the current cpu power domain to its ancestor at the 'end_pwrlvl' will
478 * enter. This function will be called after coordination of requested power
479 * states has been done for each power level.
480 *****************************************************************************/
Boyan Karatotev3b802102024-11-06 16:26:15 +0000481void psci_set_target_local_pwr_states(unsigned int cpu_idx, unsigned int end_pwrlvl,
Wing Lid3488612023-05-04 08:31:19 -0700482 const psci_power_state_t *target_state)
Soby Mathew67487842015-07-13 14:10:57 +0100483{
Soby Mathew9d070b92015-07-29 17:05:03 +0100484 unsigned int parent_idx, lvl;
Soby Mathew67487842015-07-13 14:10:57 +0100485 const plat_local_state_t *pd_state = target_state->pwr_domain_state;
486
487 psci_set_cpu_local_state(pd_state[PSCI_CPU_PWR_LVL]);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100488
489 /*
Jeenu Viswambharana10d3632017-01-06 14:58:11 +0000490 * Need to flush as local_state might be accessed with Data Cache
Soby Mathew67487842015-07-13 14:10:57 +0100491 * disabled during power on
Achin Gupta4f6ad662013-10-25 09:08:21 +0100492 */
Jeenu Viswambharana10d3632017-01-06 14:58:11 +0000493 psci_flush_cpu_data(psci_svc_cpu_data.local_state);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100494
Boyan Karatotev3b802102024-11-06 16:26:15 +0000495 parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100496
Soby Mathew67487842015-07-13 14:10:57 +0100497 /* Copy the local_state from state_info */
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +0100498 for (lvl = 1U; lvl <= end_pwrlvl; lvl++) {
Jeenu Viswambharana10d3632017-01-06 14:58:11 +0000499 set_non_cpu_pd_node_local_state(parent_idx, pd_state[lvl]);
Soby Mathew67487842015-07-13 14:10:57 +0100500 parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
501 }
Achin Gupta4f6ad662013-10-25 09:08:21 +0100502}
503
504/*******************************************************************************
Soby Mathew67487842015-07-13 14:10:57 +0100505 * PSCI helper function to get the parent nodes corresponding to a cpu_index.
Achin Gupta0959db52013-12-02 17:33:04 +0000506 ******************************************************************************/
Deepika Bhavnanifc810212019-08-27 00:32:24 +0300507void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
Soby Mathew9d070b92015-07-29 17:05:03 +0100508 unsigned int end_lvl,
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +0100509 unsigned int *node_index)
Achin Gupta0959db52013-12-02 17:33:04 +0000510{
Soby Mathew67487842015-07-13 14:10:57 +0100511 unsigned int parent_node = psci_cpu_pd_nodes[cpu_idx].parent_node;
Varun Wadekar6311f632017-06-07 09:57:42 -0700512 unsigned int i;
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +0100513 unsigned int *node = node_index;
Soby Mathew67487842015-07-13 14:10:57 +0100514
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +0100515 for (i = PSCI_CPU_PWR_LVL + 1U; i <= end_lvl; i++) {
516 *node = parent_node;
517 node++;
Soby Mathew67487842015-07-13 14:10:57 +0100518 parent_node = psci_non_cpu_pd_nodes[parent_node].parent_node;
519 }
520}
521
522/******************************************************************************
523 * This function is invoked post CPU power up and initialization. It sets the
524 * affinity info state, target power state and requested power state for the
525 * current CPU and all its ancestor power domains to RUN.
526 *****************************************************************************/
Boyan Karatotev3b802102024-11-06 16:26:15 +0000527void psci_set_pwr_domains_to_run(unsigned int cpu_idx, unsigned int end_pwrlvl)
Soby Mathew67487842015-07-13 14:10:57 +0100528{
Boyan Karatotev3b802102024-11-06 16:26:15 +0000529 unsigned int parent_idx, lvl;
Soby Mathew67487842015-07-13 14:10:57 +0100530 parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
531
532 /* Reset the local_state to RUN for the non cpu power domains. */
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +0100533 for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
Jeenu Viswambharana10d3632017-01-06 14:58:11 +0000534 set_non_cpu_pd_node_local_state(parent_idx,
535 PSCI_LOCAL_STATE_RUN);
Soby Mathew67487842015-07-13 14:10:57 +0100536 psci_set_req_local_pwr_state(lvl,
537 cpu_idx,
538 PSCI_LOCAL_STATE_RUN);
539 parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
540 }
541
542 /* Set the affinity info state to ON */
543 psci_set_aff_info_state(AFF_STATE_ON);
544
545 psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN);
Jeenu Viswambharana10d3632017-01-06 14:58:11 +0000546 psci_flush_cpu_data(psci_svc_cpu_data);
Soby Mathew67487842015-07-13 14:10:57 +0100547}
548
549/******************************************************************************
Wing Li606b7432022-09-14 13:18:17 -0700550 * This function is used in platform-coordinated mode.
551 *
Soby Mathew67487842015-07-13 14:10:57 +0100552 * This function is passed the local power states requested for each power
553 * domain (state_info) between the current CPU domain and its ancestors until
554 * the target power level (end_pwrlvl). It updates the array of requested power
555 * states with this information.
556 *
557 * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it
558 * retrieves the states requested by all the cpus of which the power domain at
559 * that level is an ancestor. It passes this information to the platform to
560 * coordinate and return the target power state. If the target state for a level
561 * is RUN then subsequent levels are not considered. At the CPU level, state
562 * coordination is not required. Hence, the requested and the target states are
563 * the same.
564 *
565 * The 'state_info' is updated with the target state for each level between the
566 * CPU and the 'end_pwrlvl' and returned to the caller.
567 *
568 * This function will only be invoked with data cache enabled and while
569 * powering down a core.
570 *****************************************************************************/
Boyan Karatotev3b802102024-11-06 16:26:15 +0000571void psci_do_state_coordination(unsigned int cpu_idx, unsigned int end_pwrlvl,
Soby Mathew9d070b92015-07-29 17:05:03 +0100572 psci_power_state_t *state_info)
Soby Mathew67487842015-07-13 14:10:57 +0100573{
Boyan Karatotev3b802102024-11-06 16:26:15 +0000574 unsigned int lvl, parent_idx;
Deepika Bhavnanifc810212019-08-27 00:32:24 +0300575 unsigned int start_idx;
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +0100576 unsigned int ncpus;
Nithin G7b970842024-04-19 18:06:36 +0530577 plat_local_state_t target_state;
Soby Mathew67487842015-07-13 14:10:57 +0100578
Soby Mathew6d189692016-02-02 14:23:10 +0000579 assert(end_pwrlvl <= PLAT_MAX_PWR_LVL);
Soby Mathew67487842015-07-13 14:10:57 +0100580 parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
581
582 /* For level 0, the requested state will be equivalent
583 to target state */
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +0100584 for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
Soby Mathew67487842015-07-13 14:10:57 +0100585
586 /* First update the requested power state */
587 psci_set_req_local_pwr_state(lvl, cpu_idx,
588 state_info->pwr_domain_state[lvl]);
589
590 /* Get the requested power states for this power level */
591 start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx;
Nithin G7b970842024-04-19 18:06:36 +0530592 plat_local_state_t const *req_states = psci_get_req_local_pwr_states(lvl,
593 start_idx);
Soby Mathew67487842015-07-13 14:10:57 +0100594
595 /*
596 * Let the platform coordinate amongst the requested states at
597 * this power level and return the target local power state.
598 */
599 ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus;
600 target_state = plat_get_target_pwr_state(lvl,
601 req_states,
602 ncpus);
603
604 state_info->pwr_domain_state[lvl] = target_state;
605
606 /* Break early if the negotiated target power state is RUN */
Maheedhar Bollapallic7b0a282024-04-25 11:47:27 +0530607 if (is_local_state_run(state_info->pwr_domain_state[lvl]) != 0) {
Soby Mathew67487842015-07-13 14:10:57 +0100608 break;
Maheedhar Bollapallic7b0a282024-04-25 11:47:27 +0530609 }
Soby Mathew67487842015-07-13 14:10:57 +0100610
611 parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
612 }
613
614 /*
615 * This is for cases when we break out of the above loop early because
616 * the target power state is RUN at a power level < end_pwlvl.
617 * We update the requested power state from state_info and then
618 * set the target state as RUN.
619 */
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +0100620 for (lvl = lvl + 1U; lvl <= end_pwrlvl; lvl++) {
Soby Mathew67487842015-07-13 14:10:57 +0100621 psci_set_req_local_pwr_state(lvl, cpu_idx,
622 state_info->pwr_domain_state[lvl]);
623 state_info->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
624
625 }
Soby Mathew67487842015-07-13 14:10:57 +0100626}
627
Wing Li606b7432022-09-14 13:18:17 -0700628#if PSCI_OS_INIT_MODE
629/******************************************************************************
630 * This function is used in OS-initiated mode.
631 *
632 * This function is passed the local power states requested for each power
633 * domain (state_info) between the current CPU domain and its ancestors until
634 * the target power level (end_pwrlvl), and ensures the requested power states
635 * are valid. It updates the array of requested power states with this
636 * information.
637 *
638 * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it
639 * retrieves the states requested by all the cpus of which the power domain at
640 * that level is an ancestor. It passes this information to the platform to
641 * coordinate and return the target power state. If the requested state does
642 * not match the target state, the request is denied.
643 *
644 * The 'state_info' is not modified.
645 *
646 * This function will only be invoked with data cache enabled and while
647 * powering down a core.
648 *****************************************************************************/
Boyan Karatotev3b802102024-11-06 16:26:15 +0000649int psci_validate_state_coordination(unsigned int cpu_idx, unsigned int end_pwrlvl,
Wing Li606b7432022-09-14 13:18:17 -0700650 psci_power_state_t *state_info)
651{
652 int rc = PSCI_E_SUCCESS;
Boyan Karatotev3b802102024-11-06 16:26:15 +0000653 unsigned int lvl, parent_idx;
Wing Li606b7432022-09-14 13:18:17 -0700654 unsigned int start_idx;
655 unsigned int ncpus;
656 plat_local_state_t target_state, *req_states;
657 plat_local_state_t prev[PLAT_MAX_PWR_LVL];
658
659 assert(end_pwrlvl <= PLAT_MAX_PWR_LVL);
660 parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
661
662 /*
663 * Save a copy of the previous requested local power states and update
664 * the new requested local power states.
665 */
666 psci_update_req_local_pwr_states(end_pwrlvl, cpu_idx, state_info, prev);
667
668 for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
669 /* Get the requested power states for this power level */
670 start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx;
671 req_states = psci_get_req_local_pwr_states(lvl, start_idx);
672
673 /*
674 * Let the platform coordinate amongst the requested states at
675 * this power level and return the target local power state.
676 */
677 ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus;
678 target_state = plat_get_target_pwr_state(lvl,
679 req_states,
680 ncpus);
681
682 /*
683 * Verify that the requested power state matches the target
684 * local power state.
685 */
686 if (state_info->pwr_domain_state[lvl] != target_state) {
687 if (target_state == PSCI_LOCAL_STATE_RUN) {
688 rc = PSCI_E_DENIED;
689 } else {
690 rc = PSCI_E_INVALID_PARAMS;
691 }
692 goto exit;
693 }
Patrick Delaunay412d92f2023-10-17 20:05:52 +0200694
695 parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
Wing Li606b7432022-09-14 13:18:17 -0700696 }
697
698 /*
699 * Verify that the current core is the last running core at the
700 * specified power level.
701 */
702 lvl = state_info->last_at_pwrlvl;
Boyan Karatotev3b802102024-11-06 16:26:15 +0000703 if (!psci_is_last_cpu_to_idle_at_pwrlvl(cpu_idx, lvl)) {
Wing Li606b7432022-09-14 13:18:17 -0700704 rc = PSCI_E_DENIED;
705 }
706
707exit:
708 if (rc != PSCI_E_SUCCESS) {
709 /* Restore the previous requested local power states. */
710 psci_restore_req_local_pwr_states(cpu_idx, prev);
711 return rc;
712 }
713
Wing Li606b7432022-09-14 13:18:17 -0700714 return rc;
715}
716#endif
717
Soby Mathew67487842015-07-13 14:10:57 +0100718/******************************************************************************
719 * This function validates a suspend request by making sure that if a standby
720 * state is requested then no power level is turned off and the highest power
721 * level is placed in a standby/retention state.
722 *
723 * It also ensures that the state level X will enter is not shallower than the
724 * state level X + 1 will enter.
725 *
726 * This validation will be enabled only for DEBUG builds as the platform is
727 * expected to perform these validations as well.
728 *****************************************************************************/
729int psci_validate_suspend_req(const psci_power_state_t *state_info,
730 unsigned int is_power_down_state)
731{
732 unsigned int max_off_lvl, target_lvl, max_retn_lvl;
733 plat_local_state_t state;
734 plat_local_state_type_t req_state_type, deepest_state_type;
735 int i;
736
737 /* Find the target suspend power level */
738 target_lvl = psci_find_target_suspend_lvl(state_info);
Soby Mathew9d070b92015-07-29 17:05:03 +0100739 if (target_lvl == PSCI_INVALID_PWR_LVL)
Achin Gupta0959db52013-12-02 17:33:04 +0000740 return PSCI_E_INVALID_PARAMS;
741
Soby Mathew67487842015-07-13 14:10:57 +0100742 /* All power domain levels are in a RUN state to begin with */
743 deepest_state_type = STATE_TYPE_RUN;
Achin Gupta0959db52013-12-02 17:33:04 +0000744
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +0100745 for (i = (int) target_lvl; i >= (int) PSCI_CPU_PWR_LVL; i--) {
Soby Mathew67487842015-07-13 14:10:57 +0100746 state = state_info->pwr_domain_state[i];
747 req_state_type = find_local_state_type(state);
748
749 /*
750 * While traversing from the highest power level to the lowest,
751 * the state requested for lower levels has to be the same or
752 * deeper i.e. equal to or greater than the state at the higher
753 * levels. If this condition is true, then the requested state
754 * becomes the deepest state encountered so far.
755 */
756 if (req_state_type < deepest_state_type)
757 return PSCI_E_INVALID_PARAMS;
758 deepest_state_type = req_state_type;
759 }
760
761 /* Find the highest off power level */
762 max_off_lvl = psci_find_max_off_lvl(state_info);
763
764 /* The target_lvl is either equal to the max_off_lvl or max_retn_lvl */
Soby Mathew9d070b92015-07-29 17:05:03 +0100765 max_retn_lvl = PSCI_INVALID_PWR_LVL;
Soby Mathew67487842015-07-13 14:10:57 +0100766 if (target_lvl != max_off_lvl)
767 max_retn_lvl = target_lvl;
768
769 /*
770 * If this is not a request for a power down state then max off level
771 * has to be invalid and max retention level has to be a valid power
772 * level.
773 */
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +0100774 if ((is_power_down_state == 0U) &&
775 ((max_off_lvl != PSCI_INVALID_PWR_LVL) ||
776 (max_retn_lvl == PSCI_INVALID_PWR_LVL)))
Achin Gupta0959db52013-12-02 17:33:04 +0000777 return PSCI_E_INVALID_PARAMS;
778
779 return PSCI_E_SUCCESS;
780}
781
Soby Mathew67487842015-07-13 14:10:57 +0100782/******************************************************************************
783 * This function finds the highest power level which will be powered down
784 * amongst all the power levels specified in the 'state_info' structure
785 *****************************************************************************/
786unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info)
Achin Gupta84c9f102014-07-28 00:09:01 +0100787{
Soby Mathew67487842015-07-13 14:10:57 +0100788 int i;
Achin Gupta84c9f102014-07-28 00:09:01 +0100789
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +0100790 for (i = (int) PLAT_MAX_PWR_LVL; i >= (int) PSCI_CPU_PWR_LVL; i--) {
Maheedhar Bollapallic7b0a282024-04-25 11:47:27 +0530791 if (is_local_state_off(state_info->pwr_domain_state[i]) != 0) {
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +0100792 return (unsigned int) i;
Maheedhar Bollapallic7b0a282024-04-25 11:47:27 +0530793 }
Achin Gupta84c9f102014-07-28 00:09:01 +0100794 }
Soby Mathew67487842015-07-13 14:10:57 +0100795
Soby Mathew9d070b92015-07-29 17:05:03 +0100796 return PSCI_INVALID_PWR_LVL;
Soby Mathew67487842015-07-13 14:10:57 +0100797}
798
799/******************************************************************************
800 * This functions finds the level of the highest power domain which will be
801 * placed in a low power state during a suspend operation.
802 *****************************************************************************/
803unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info)
804{
805 int i;
806
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +0100807 for (i = (int) PLAT_MAX_PWR_LVL; i >= (int) PSCI_CPU_PWR_LVL; i--) {
808 if (is_local_state_run(state_info->pwr_domain_state[i]) == 0)
809 return (unsigned int) i;
Soby Mathew67487842015-07-13 14:10:57 +0100810 }
811
Soby Mathew9d070b92015-07-29 17:05:03 +0100812 return PSCI_INVALID_PWR_LVL;
Achin Gupta84c9f102014-07-28 00:09:01 +0100813}
814
815/*******************************************************************************
Andrew F. Davis74d27d02019-06-04 10:46:54 -0400816 * This function is passed the highest level in the topology tree that the
817 * operation should be applied to and a list of node indexes. It picks up locks
818 * from the node index list in order of increasing power domain level in the
819 * range specified.
Achin Gupta0959db52013-12-02 17:33:04 +0000820 ******************************************************************************/
Andrew F. Davis74d27d02019-06-04 10:46:54 -0400821void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
822 const unsigned int *parent_nodes)
Achin Gupta0959db52013-12-02 17:33:04 +0000823{
Andrew F. Davis74d27d02019-06-04 10:46:54 -0400824 unsigned int parent_idx;
Soby Mathew9d070b92015-07-29 17:05:03 +0100825 unsigned int level;
Achin Gupta0959db52013-12-02 17:33:04 +0000826
Soby Mathew67487842015-07-13 14:10:57 +0100827 /* No locking required for level 0. Hence start locking from level 1 */
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +0100828 for (level = PSCI_CPU_PWR_LVL + 1U; level <= end_pwrlvl; level++) {
Andrew F. Davis74d27d02019-06-04 10:46:54 -0400829 parent_idx = parent_nodes[level - 1U];
Soby Mathew67487842015-07-13 14:10:57 +0100830 psci_lock_get(&psci_non_cpu_pd_nodes[parent_idx]);
Achin Gupta0959db52013-12-02 17:33:04 +0000831 }
832}
833
834/*******************************************************************************
Andrew F. Davis74d27d02019-06-04 10:46:54 -0400835 * This function is passed the highest level in the topology tree that the
836 * operation should be applied to and a list of node indexes. It releases the
837 * locks in order of decreasing power domain level in the range specified.
Achin Gupta0959db52013-12-02 17:33:04 +0000838 ******************************************************************************/
Andrew F. Davis74d27d02019-06-04 10:46:54 -0400839void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
840 const unsigned int *parent_nodes)
Achin Gupta0959db52013-12-02 17:33:04 +0000841{
Andrew F. Davis74d27d02019-06-04 10:46:54 -0400842 unsigned int parent_idx;
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +0100843 unsigned int level;
Achin Gupta0959db52013-12-02 17:33:04 +0000844
Soby Mathew67487842015-07-13 14:10:57 +0100845 /* Unlock top down. No unlocking required for level 0. */
Zelalem2fe75a22020-02-12 10:37:03 -0600846 for (level = end_pwrlvl; level >= (PSCI_CPU_PWR_LVL + 1U); level--) {
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +0100847 parent_idx = parent_nodes[level - 1U];
Soby Mathew67487842015-07-13 14:10:57 +0100848 psci_lock_release(&psci_non_cpu_pd_nodes[parent_idx]);
Achin Gupta0959db52013-12-02 17:33:04 +0000849 }
850}
851
852/*******************************************************************************
Andrew Thoelke167a9352014-06-04 21:10:52 +0100853 * This function determines the full entrypoint information for the requested
Soby Mathew78879b92015-01-06 15:36:38 +0000854 * PSCI entrypoint on power on/resume and returns it.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100855 ******************************************************************************/
Julius Werner402b3cf2019-07-09 14:02:43 -0700856#ifdef __aarch64__
Soby Mathew617540d2015-07-15 12:13:26 +0100857static int psci_get_ns_ep_info(entry_point_info_t *ep,
Soby Mathew9d070b92015-07-29 17:05:03 +0100858 uintptr_t entrypoint,
859 u_register_t context_id)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100860{
Soby Mathew4c0d0392016-06-16 14:52:04 +0100861 u_register_t ep_attr, sctlr;
Soby Mathew9d070b92015-07-29 17:05:03 +0100862 unsigned int daif, ee, mode;
Soby Mathew4c0d0392016-06-16 14:52:04 +0100863 u_register_t ns_scr_el3 = read_scr_el3();
864 u_register_t ns_sctlr_el1 = read_sctlr_el1();
Achin Gupta4f6ad662013-10-25 09:08:21 +0100865
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +0100866 sctlr = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ?
867 read_sctlr_el2() : ns_sctlr_el1;
Andrew Thoelke167a9352014-06-04 21:10:52 +0100868 ee = 0;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100869
Andrew Thoelke167a9352014-06-04 21:10:52 +0100870 ep_attr = NON_SECURE | EP_ST_DISABLE;
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +0100871 if ((sctlr & SCTLR_EE_BIT) != 0U) {
Andrew Thoelke167a9352014-06-04 21:10:52 +0100872 ep_attr |= EP_EE_BIG;
873 ee = 1;
874 }
Soby Mathew78879b92015-01-06 15:36:38 +0000875 SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100876
Soby Mathew78879b92015-01-06 15:36:38 +0000877 ep->pc = entrypoint;
Douglas Raillard32f0d3c2017-01-26 15:54:44 +0000878 zeromem(&ep->args, sizeof(ep->args));
Soby Mathew78879b92015-01-06 15:36:38 +0000879 ep->args.arg0 = context_id;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100880
881 /*
882 * Figure out whether the cpu enters the non-secure address space
883 * in aarch32 or aarch64
884 */
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +0100885 if ((ns_scr_el3 & SCR_RW_BIT) != 0U) {
Achin Gupta4f6ad662013-10-25 09:08:21 +0100886
887 /*
888 * Check whether a Thumb entry point has been provided for an
889 * aarch64 EL
890 */
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +0100891 if ((entrypoint & 0x1UL) != 0UL)
Soby Mathew617540d2015-07-15 12:13:26 +0100892 return PSCI_E_INVALID_ADDRESS;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100893
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +0100894 mode = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ? MODE_EL2 : MODE_EL1;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100895
Jimmy Brissond7b5f402020-08-04 16:18:52 -0500896 ep->spsr = SPSR_64((uint64_t)mode, MODE_SP_ELX,
897 DISABLE_ALL_EXCEPTIONS);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100898 } else {
899
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +0100900 mode = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ?
901 MODE32_hyp : MODE32_svc;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100902
903 /*
904 * TODO: Choose async. exception bits if HYP mode is not
905 * implemented according to the values of SCR.{AW, FW} bits
906 */
Vikram Kanigiri23ff9ba2014-05-13 14:42:08 +0100907 daif = DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT;
908
Jimmy Brissond7b5f402020-08-04 16:18:52 -0500909 ep->spsr = SPSR_MODE32((uint64_t)mode, entrypoint & 0x1, ee,
910 daif);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100911 }
912
Andrew Thoelke167a9352014-06-04 21:10:52 +0100913 return PSCI_E_SUCCESS;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100914}
Julius Werner402b3cf2019-07-09 14:02:43 -0700915#else /* !__aarch64__ */
916static int psci_get_ns_ep_info(entry_point_info_t *ep,
917 uintptr_t entrypoint,
918 u_register_t context_id)
919{
920 u_register_t ep_attr;
921 unsigned int aif, ee, mode;
922 u_register_t scr = read_scr();
923 u_register_t ns_sctlr, sctlr;
924
925 /* Switch to non secure state */
926 write_scr(scr | SCR_NS_BIT);
927 isb();
928 ns_sctlr = read_sctlr();
929
930 sctlr = scr & SCR_HCE_BIT ? read_hsctlr() : ns_sctlr;
931
932 /* Return to original state */
933 write_scr(scr);
934 isb();
935 ee = 0;
936
937 ep_attr = NON_SECURE | EP_ST_DISABLE;
938 if (sctlr & SCTLR_EE_BIT) {
939 ep_attr |= EP_EE_BIG;
940 ee = 1;
941 }
942 SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
943
944 ep->pc = entrypoint;
945 zeromem(&ep->args, sizeof(ep->args));
946 ep->args.arg0 = context_id;
947
948 mode = scr & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc;
949
950 /*
951 * TODO: Choose async. exception bits if HYP mode is not
952 * implemented according to the values of SCR.{AW, FW} bits
953 */
954 aif = SPSR_ABT_BIT | SPSR_IRQ_BIT | SPSR_FIQ_BIT;
955
956 ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, aif);
957
958 return PSCI_E_SUCCESS;
959}
960
961#endif /* __aarch64__ */
Achin Gupta4f6ad662013-10-25 09:08:21 +0100962
963/*******************************************************************************
Soby Mathew617540d2015-07-15 12:13:26 +0100964 * This function validates the entrypoint with the platform layer if the
965 * appropriate pm_ops hook is exported by the platform and returns the
966 * 'entry_point_info'.
967 ******************************************************************************/
968int psci_validate_entry_point(entry_point_info_t *ep,
Soby Mathew9d070b92015-07-29 17:05:03 +0100969 uintptr_t entrypoint,
970 u_register_t context_id)
Soby Mathew617540d2015-07-15 12:13:26 +0100971{
972 int rc;
973
974 /* Validate the entrypoint using platform psci_ops */
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +0100975 if (psci_plat_pm_ops->validate_ns_entrypoint != NULL) {
Soby Mathew617540d2015-07-15 12:13:26 +0100976 rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
Maheedhar Bollapallic7b0a282024-04-25 11:47:27 +0530977 if (rc != PSCI_E_SUCCESS) {
Soby Mathew617540d2015-07-15 12:13:26 +0100978 return PSCI_E_INVALID_ADDRESS;
Maheedhar Bollapallic7b0a282024-04-25 11:47:27 +0530979 }
Soby Mathew617540d2015-07-15 12:13:26 +0100980 }
981
982 /*
983 * Verify and derive the re-entry information for
984 * the non-secure world from the non-secure state from
985 * where this call originated.
986 */
987 rc = psci_get_ns_ep_info(ep, entrypoint, context_id);
988 return rc;
989}
990
991/*******************************************************************************
Achin Gupta4f6ad662013-10-25 09:08:21 +0100992 * Generic handler which is called when a cpu is physically powered on. It
Soby Mathew67487842015-07-13 14:10:57 +0100993 * traverses the node information and finds the highest power level powered
994 * off and performs generic, architectural, platform setup and state management
995 * to power on that power level and power levels below it.
996 * e.g. For a cpu that's been powered on, it will call the platform specific
997 * code to enable the gic cpu interface and for a cluster it will enable
998 * coherency at the interconnect level in addition to gic cpu interface.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100999 ******************************************************************************/
Soby Mathewcf0b1492016-04-29 19:01:30 +01001000void psci_warmboot_entrypoint(void)
Achin Gupta4f6ad662013-10-25 09:08:21 +01001001{
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +01001002 unsigned int end_pwrlvl;
Deepika Bhavnanifc810212019-08-27 00:32:24 +03001003 unsigned int cpu_idx = plat_my_core_pos();
Andrew F. Davis74d27d02019-06-04 10:46:54 -04001004 unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
Soby Mathew67487842015-07-13 14:10:57 +01001005 psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
Achin Gupta4f6ad662013-10-25 09:08:21 +01001006
Boyan Karatotevd335bbb2025-07-03 14:59:55 +01001007#if FEATURE_DETECTION
1008 /* Detect if features enabled during compilation are supported by PE. */
1009 detect_arch_features(cpu_idx);
1010#endif /* FEATURE_DETECTION */
1011
Boyan Karatotev24a70732023-03-08 11:56:49 +00001012 /* Init registers that never change for the lifetime of TF-A */
Boyan Karatotev83ec7e42024-11-06 14:55:35 +00001013 cm_manage_extensions_el3(cpu_idx);
Boyan Karatotev24a70732023-03-08 11:56:49 +00001014
Achin Gupta4f6ad662013-10-25 09:08:21 +01001015 /*
Soby Mathew67487842015-07-13 14:10:57 +01001016 * Verify that we have been explicitly turned ON or resumed from
1017 * suspend.
Achin Gupta4f6ad662013-10-25 09:08:21 +01001018 */
Soby Mathew67487842015-07-13 14:10:57 +01001019 if (psci_get_aff_info_state() == AFF_STATE_OFF) {
Andrew Walbran33e8c562020-01-23 16:22:44 +00001020 ERROR("Unexpected affinity info state.\n");
James Morrissey40a6f642014-02-10 14:24:36 +00001021 panic();
Soby Mathew67487842015-07-13 14:10:57 +01001022 }
Achin Gupta4f6ad662013-10-25 09:08:21 +01001023
1024 /*
Soby Mathew67487842015-07-13 14:10:57 +01001025 * Get the maximum power domain level to traverse to after this cpu
1026 * has been physically powered up.
Achin Gupta4f6ad662013-10-25 09:08:21 +01001027 */
Soby Mathew67487842015-07-13 14:10:57 +01001028 end_pwrlvl = get_power_on_target_pwrlvl();
Achin Gupta0a46e2c2014-07-31 11:19:11 +01001029
Andrew F. Davis74d27d02019-06-04 10:46:54 -04001030 /* Get the parent nodes */
1031 psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes);
1032
Achin Gupta0a46e2c2014-07-31 11:19:11 +01001033 /*
Soby Mathew67487842015-07-13 14:10:57 +01001034 * This function acquires the lock corresponding to each power level so
1035 * that by the time all locks are taken, the system topology is snapshot
1036 * and state management can be done safely.
Achin Gupta0a46e2c2014-07-31 11:19:11 +01001037 */
Andrew F. Davis74d27d02019-06-04 10:46:54 -04001038 psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes);
Achin Gupta0a46e2c2014-07-31 11:19:11 +01001039
Boyan Karatotev3b802102024-11-06 16:26:15 +00001040 psci_get_target_local_pwr_states(cpu_idx, end_pwrlvl, &state_info);
Soby Mathewbfc87a82017-10-16 15:19:31 +01001041
Yatharth Kochar170fb932016-05-09 18:26:35 +01001042#if ENABLE_PSCI_STAT
dp-arm04c1db12017-01-31 13:01:04 +00001043 plat_psci_stat_accounting_stop(&state_info);
Yatharth Kochar170fb932016-05-09 18:26:35 +01001044#endif
1045
Achin Gupta4f6ad662013-10-25 09:08:21 +01001046 /*
Soby Mathew67487842015-07-13 14:10:57 +01001047 * This CPU could be resuming from suspend or it could have just been
1048 * turned on. To distinguish between these 2 cases, we examine the
1049 * affinity state of the CPU:
1050 * - If the affinity state is ON_PENDING then it has just been
1051 * turned on.
1052 * - Else it is resuming from suspend.
1053 *
1054 * Depending on the type of warm reset identified, choose the right set
1055 * of power management handler and perform the generic, architecture
1056 * and platform specific handling.
Achin Gupta84c9f102014-07-28 00:09:01 +01001057 */
Maheedhar Bollapallic7b0a282024-04-25 11:47:27 +05301058 if (psci_get_aff_info_state() == AFF_STATE_ON_PENDING) {
Soby Mathew67487842015-07-13 14:10:57 +01001059 psci_cpu_on_finish(cpu_idx, &state_info);
Maheedhar Bollapallic7b0a282024-04-25 11:47:27 +05301060 } else {
Boyan Karatotev2b5e00d2024-12-19 16:07:29 +00001061 unsigned int max_off_lvl = psci_find_max_off_lvl(&state_info);
1062
1063 assert(max_off_lvl != PSCI_INVALID_PWR_LVL);
Boyan Karatotev04c39e42025-03-24 14:49:00 +00001064 psci_cpu_suspend_to_powerdown_finish(cpu_idx, max_off_lvl, &state_info, false);
Boyan Karatotev2b5e00d2024-12-19 16:07:29 +00001065 }
Achin Gupta84c9f102014-07-28 00:09:01 +01001066
1067 /*
Manish Pandeyef738d12024-06-22 00:00:18 +01001068 * Caches and (importantly) coherency are on so we can rely on seeing
1069 * whatever the primary gave us without explicit cache maintenance
1070 */
1071 entry_point_info_t *ep = get_cpu_data(warmboot_ep_info);
1072 cm_init_my_context(ep);
1073
1074 /*
Boyan Karatoteve07e7392023-05-17 12:20:09 +01001075 * Generic management: Now we just need to retrieve the
1076 * information that we had stashed away during the cpu_on
1077 * call to set this cpu on its way.
1078 */
1079 cm_prepare_el3_exit_ns();
1080
1081 /*
Soby Mathew67487842015-07-13 14:10:57 +01001082 * Set the requested and target state of this CPU and all the higher
1083 * power domains which are ancestors of this CPU to run.
Achin Gupta0a46e2c2014-07-31 11:19:11 +01001084 */
Boyan Karatotev3b802102024-11-06 16:26:15 +00001085 psci_set_pwr_domains_to_run(cpu_idx, end_pwrlvl);
Achin Gupta0a46e2c2014-07-31 11:19:11 +01001086
Yatharth Kochar170fb932016-05-09 18:26:35 +01001087#if ENABLE_PSCI_STAT
Boyan Karatotev3b802102024-11-06 16:26:15 +00001088 psci_stats_update_pwr_up(cpu_idx, end_pwrlvl, &state_info);
Yatharth Kochar170fb932016-05-09 18:26:35 +01001089#endif
1090
Achin Gupta0a46e2c2014-07-31 11:19:11 +01001091 /*
Soby Mathew67487842015-07-13 14:10:57 +01001092 * This loop releases the lock corresponding to each power level
Achin Gupta0959db52013-12-02 17:33:04 +00001093 * in the reverse order to which they were acquired.
1094 */
Andrew F. Davis74d27d02019-06-04 10:46:54 -04001095 psci_release_pwr_domain_locks(end_pwrlvl, parent_nodes);
Achin Gupta4f6ad662013-10-25 09:08:21 +01001096}
Jeenu Viswambharan7f366602014-02-20 17:11:00 +00001097
1098/*******************************************************************************
1099 * This function initializes the set of hooks that PSCI invokes as part of power
1100 * management operation. The power management hooks are expected to be provided
1101 * by the SPD, after it finishes all its initialization
1102 ******************************************************************************/
Dan Handleyfb037bf2014-04-10 15:37:22 +01001103void psci_register_spd_pm_hook(const spd_pm_ops_t *pm)
Jeenu Viswambharan7f366602014-02-20 17:11:00 +00001104{
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +01001105 assert(pm != NULL);
Jeenu Viswambharan7f366602014-02-20 17:11:00 +00001106 psci_spd_pm = pm;
Soby Mathew90e82582015-01-07 11:10:22 +00001107
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +01001108 if (pm->svc_migrate != NULL)
Soby Mathew90e82582015-01-07 11:10:22 +00001109 psci_caps |= define_psci_cap(PSCI_MIG_AARCH64);
1110
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +01001111 if (pm->svc_migrate_info != NULL)
Soby Mathew90e82582015-01-07 11:10:22 +00001112 psci_caps |= define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64)
1113 | define_psci_cap(PSCI_MIG_INFO_TYPE);
Jeenu Viswambharan7f366602014-02-20 17:11:00 +00001114}
Juan Castillod5f13092014-08-12 11:17:06 +01001115
1116/*******************************************************************************
Soby Mathew8991eed2014-10-23 10:35:34 +01001117 * This function invokes the migrate info hook in the spd_pm_ops. It performs
1118 * the necessary return value validation. If the Secure Payload is UP and
1119 * migrate capable, it returns the mpidr of the CPU on which the Secure payload
1120 * is resident through the mpidr parameter. Else the value of the parameter on
1121 * return is undefined.
1122 ******************************************************************************/
Soby Mathew9d070b92015-07-29 17:05:03 +01001123int psci_spd_migrate_info(u_register_t *mpidr)
Soby Mathew8991eed2014-10-23 10:35:34 +01001124{
1125 int rc;
1126
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +01001127 if ((psci_spd_pm == NULL) || (psci_spd_pm->svc_migrate_info == NULL))
Soby Mathew8991eed2014-10-23 10:35:34 +01001128 return PSCI_E_NOT_SUPPORTED;
1129
1130 rc = psci_spd_pm->svc_migrate_info(mpidr);
1131
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +01001132 assert((rc == PSCI_TOS_UP_MIG_CAP) || (rc == PSCI_TOS_NOT_UP_MIG_CAP) ||
1133 (rc == PSCI_TOS_NOT_PRESENT_MP) || (rc == PSCI_E_NOT_SUPPORTED));
Soby Mathew8991eed2014-10-23 10:35:34 +01001134
1135 return rc;
1136}
1137
1138
1139/*******************************************************************************
Soby Mathew67487842015-07-13 14:10:57 +01001140 * This function prints the state of all power domains present in the
Juan Castillod5f13092014-08-12 11:17:06 +01001141 * system
1142 ******************************************************************************/
Soby Mathew67487842015-07-13 14:10:57 +01001143void psci_print_power_domain_map(void)
Juan Castillod5f13092014-08-12 11:17:06 +01001144{
1145#if LOG_LEVEL >= LOG_LEVEL_INFO
Pankaj Guptaab4df502019-10-15 15:44:45 +05301146 unsigned int idx;
Soby Mathew67487842015-07-13 14:10:57 +01001147 plat_local_state_t state;
1148 plat_local_state_type_t state_type;
1149
Juan Castillod5f13092014-08-12 11:17:06 +01001150 /* This array maps to the PSCI_STATE_X definitions in psci.h */
Soby Mathewda554d72016-05-03 17:11:42 +01001151 static const char * const psci_state_type_str[] = {
Juan Castillod5f13092014-08-12 11:17:06 +01001152 "ON",
Soby Mathew67487842015-07-13 14:10:57 +01001153 "RETENTION",
Juan Castillod5f13092014-08-12 11:17:06 +01001154 "OFF",
Juan Castillod5f13092014-08-12 11:17:06 +01001155 };
1156
Soby Mathew67487842015-07-13 14:10:57 +01001157 INFO("PSCI Power Domain Map:\n");
Pankaj Guptaab4df502019-10-15 15:44:45 +05301158 for (idx = 0; idx < (PSCI_NUM_PWR_DOMAINS - psci_plat_core_count);
Soby Mathew67487842015-07-13 14:10:57 +01001159 idx++) {
1160 state_type = find_local_state_type(
1161 psci_non_cpu_pd_nodes[idx].local_state);
Yann Gautierb9338ee2022-02-14 11:09:23 +01001162 INFO(" Domain Node : Level %u, parent_node %u,"
Soby Mathew67487842015-07-13 14:10:57 +01001163 " State %s (0x%x)\n",
1164 psci_non_cpu_pd_nodes[idx].level,
1165 psci_non_cpu_pd_nodes[idx].parent_node,
1166 psci_state_type_str[state_type],
1167 psci_non_cpu_pd_nodes[idx].local_state);
1168 }
1169
Pankaj Guptaab4df502019-10-15 15:44:45 +05301170 for (idx = 0; idx < psci_plat_core_count; idx++) {
Soby Mathew67487842015-07-13 14:10:57 +01001171 state = psci_get_cpu_local_state_by_idx(idx);
1172 state_type = find_local_state_type(state);
Yann Gautierb9338ee2022-02-14 11:09:23 +01001173 INFO(" CPU Node : MPID 0x%llx, parent_node %u,"
Soby Mathew67487842015-07-13 14:10:57 +01001174 " State %s (0x%x)\n",
Soby Mathew4c0d0392016-06-16 14:52:04 +01001175 (unsigned long long)psci_cpu_pd_nodes[idx].mpidr,
Soby Mathew67487842015-07-13 14:10:57 +01001176 psci_cpu_pd_nodes[idx].parent_node,
1177 psci_state_type_str[state_type],
1178 psci_get_cpu_local_state_by_idx(idx));
Juan Castillod5f13092014-08-12 11:17:06 +01001179 }
1180#endif
1181}
Soby Mathew67487842015-07-13 14:10:57 +01001182
Jeenu Viswambharanb10d4492017-02-16 14:55:15 +00001183/******************************************************************************
1184 * Return whether any secondaries were powered up with CPU_ON call. A CPU that
1185 * have ever been powered up would have set its MPDIR value to something other
1186 * than PSCI_INVALID_MPIDR. Note that MPDIR isn't reset back to
1187 * PSCI_INVALID_MPIDR when a CPU is powered down later, so the return value is
1188 * meaningful only when called on the primary CPU during early boot.
1189 *****************************************************************************/
1190int psci_secondaries_brought_up(void)
1191{
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +01001192 unsigned int idx, n_valid = 0U;
Jeenu Viswambharanb10d4492017-02-16 14:55:15 +00001193
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +01001194 for (idx = 0U; idx < ARRAY_SIZE(psci_cpu_pd_nodes); idx++) {
Jeenu Viswambharanb10d4492017-02-16 14:55:15 +00001195 if (psci_cpu_pd_nodes[idx].mpidr != PSCI_INVALID_MPIDR)
1196 n_valid++;
1197 }
1198
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +01001199 assert(n_valid > 0U);
Jeenu Viswambharanb10d4492017-02-16 14:55:15 +00001200
Antonio Nino Diaz6b7b0f32018-07-17 15:10:08 +01001201 return (n_valid > 1U) ? 1 : 0;
Jeenu Viswambharanb10d4492017-02-16 14:55:15 +00001202}
1203
Boyan Karatotev461b62b2025-03-25 12:03:15 +00001204static u_register_t call_cpu_pwr_dwn(unsigned int power_level)
Boyan Karatotevaadb4b52025-03-12 10:36:46 +00001205{
1206 struct cpu_ops *ops = get_cpu_data(cpu_ops_ptr);
1207
1208 /* Call the last available power down handler */
1209 if (power_level > CPU_MAX_PWR_DWN_OPS - 1) {
1210 power_level = CPU_MAX_PWR_DWN_OPS - 1;
1211 }
1212
1213 assert(ops != NULL);
1214 assert(ops->pwr_dwn_ops[power_level] != NULL);
1215
1216 return ops->pwr_dwn_ops[power_level]();
1217}
1218
1219static void prepare_cpu_pwr_dwn(unsigned int power_level)
1220{
Boyan Karatotev461b62b2025-03-25 12:03:15 +00001221 /* ignore the return, all cpus should behave the same */
1222 (void)call_cpu_pwr_dwn(power_level);
1223}
1224
1225static void prepare_cpu_pwr_up(unsigned int power_level)
1226{
1227 /*
1228 * Call the pwr_dwn cpu hook again, indicating that an abandon happened.
1229 * The cpu driver is expected to clean up. We ask it to return
1230 * PABANDON_ACK to indicate that it has handled this. This is a
1231 * heuristic: the value has been chosen such that an unported CPU is
1232 * extremely unlikely to return this value.
1233 */
1234 u_register_t ret = call_cpu_pwr_dwn(power_level);
1235
1236 /* unreachable on AArch32 so cast down to calm the compiler */
1237 if (ret != (u_register_t) PABANDON_ACK) {
1238 panic();
1239 }
Boyan Karatotevaadb4b52025-03-12 10:36:46 +00001240}
1241
Jeenu Viswambharanb0408e82017-01-05 11:01:02 +00001242/*******************************************************************************
1243 * Initiate power down sequence, by calling power down operations registered for
1244 * this CPU.
1245 ******************************************************************************/
Boyan Karatotev2b5e00d2024-12-19 16:07:29 +00001246void psci_pwrdown_cpu_start(unsigned int power_level)
Jeenu Viswambharanb0408e82017-01-05 11:01:02 +00001247{
Boyan Karatotev9b1e8002024-10-10 08:11:09 +01001248#if ENABLE_RUNTIME_INSTRUMENTATION
1249
1250 /*
1251 * Flush cache line so that even if CPU power down happens
1252 * the timestamp update is reflected in memory.
1253 */
1254 PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
1255 RT_INSTR_ENTER_CFLUSH,
1256 PMF_CACHE_MAINT);
1257#endif
1258
Boyan Karatotevaadb4b52025-03-12 10:36:46 +00001259#if !HW_ASSISTED_COHERENCY
Jeenu Viswambharanb0408e82017-01-05 11:01:02 +00001260 /*
Boyan Karatotevaadb4b52025-03-12 10:36:46 +00001261 * Disable data caching and handle the stack's cache maintenance.
Jeenu Viswambharanb0408e82017-01-05 11:01:02 +00001262 *
Boyan Karatotevaadb4b52025-03-12 10:36:46 +00001263 * If the core can't automatically exit coherency, the cpu driver needs
1264 * to flush caches and exit coherency. We can't do this with data caches
1265 * enabled. The cpu driver will decide which caches to flush based on
1266 * the power level.
1267 *
1268 * If automatic coherency management is possible, we can keep data
1269 * caches on until the very end and let hardware do cache maintenance.
Jeenu Viswambharanb0408e82017-01-05 11:01:02 +00001270 */
Boyan Karatotevaadb4b52025-03-12 10:36:46 +00001271 psci_do_pwrdown_cache_maintenance();
Jeenu Viswambharanb0408e82017-01-05 11:01:02 +00001272#endif
Boyan Karatotev9b1e8002024-10-10 08:11:09 +01001273
Boyan Karatotevaadb4b52025-03-12 10:36:46 +00001274 /* Initiate the power down sequence by calling into the cpu driver. */
1275 prepare_cpu_pwr_dwn(power_level);
1276
Boyan Karatotev9b1e8002024-10-10 08:11:09 +01001277#if ENABLE_RUNTIME_INSTRUMENTATION
1278 PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
1279 RT_INSTR_EXIT_CFLUSH,
1280 PMF_NO_CACHE_MAINT);
1281#endif
Jeenu Viswambharanb0408e82017-01-05 11:01:02 +00001282}
Sandeep Tripathy22744902020-08-17 20:22:13 +05301283
1284/*******************************************************************************
Boyan Karatotev2b5e00d2024-12-19 16:07:29 +00001285 * Finish a terminal power down sequence, ending with a wfi. In case of wakeup
1286 * will retry the sleep and panic if it persists.
1287 ******************************************************************************/
1288void __dead2 psci_pwrdown_cpu_end_terminal(void)
1289{
Boyan Karatotev45c73282024-09-20 13:37:51 +01001290#if ERRATA_SME_POWER_DOWN
1291 /*
1292 * force SME off to not get power down rejected. Getting here is
1293 * terminal so we don't care if we lose context because of another
1294 * wakeup
1295 */
1296 if (is_feat_sme_supported()) {
1297 write_svcr(0);
1298 isb();
1299 }
1300#endif /* ERRATA_SME_POWER_DOWN */
1301
Boyan Karatotev232c1892025-03-11 16:41:33 +00001302 /* ensure write buffer empty */
1303 dsbsy();
1304
Boyan Karatotev2b5e00d2024-12-19 16:07:29 +00001305 /*
1306 * Execute a wfi which, in most cases, will allow the power controller
1307 * to physically power down this cpu. Under some circumstances that may
1308 * be denied. Hopefully this is transient, retrying a few times should
1309 * power down.
1310 */
1311 for (int i = 0; i < 32; i++)
Boyan Karatotev232c1892025-03-11 16:41:33 +00001312 wfi();
Boyan Karatotev2b5e00d2024-12-19 16:07:29 +00001313
1314 /* Wake up wasn't transient. System is probably in a bad state. */
1315 ERROR("Could not power off CPU.\n");
1316 panic();
1317}
1318
1319/*******************************************************************************
1320 * Finish a non-terminal power down sequence, ending with a wfi. In case of
1321 * wakeup will unwind any CPU specific actions and return.
1322 ******************************************************************************/
1323
1324void psci_pwrdown_cpu_end_wakeup(unsigned int power_level)
1325{
Boyan Karatotev232c1892025-03-11 16:41:33 +00001326 /* ensure write buffer empty */
1327 dsbsy();
1328
Boyan Karatotev2b5e00d2024-12-19 16:07:29 +00001329 /*
Boyan Karatotev232c1892025-03-11 16:41:33 +00001330 * Turn the core off. Usually, will be terminal. In some circumstances
1331 * the powerdown will be denied and we'll need to unwind.
Boyan Karatotev2b5e00d2024-12-19 16:07:29 +00001332 */
Boyan Karatotev232c1892025-03-11 16:41:33 +00001333 wfi();
Boyan Karatotev2b5e00d2024-12-19 16:07:29 +00001334
1335 /*
1336 * Waking up does not require hardware-assisted coherency, but that is
Boyan Karatotev04c39e42025-03-24 14:49:00 +00001337 * the case for every core that can wake up. Can either happen because
1338 * of errata or pabandon.
Boyan Karatotev2b5e00d2024-12-19 16:07:29 +00001339 */
Boyan Karatotev04c39e42025-03-24 14:49:00 +00001340#if !defined(__aarch64__) || !HW_ASSISTED_COHERENCY
1341 ERROR("AArch32 systems shouldn't wake up.\n");
Boyan Karatotev2b5e00d2024-12-19 16:07:29 +00001342 panic();
Boyan Karatotev04c39e42025-03-24 14:49:00 +00001343#endif
Boyan Karatotev2b5e00d2024-12-19 16:07:29 +00001344 /*
1345 * Begin unwinding. Everything can be shared with CPU_ON and co later,
1346 * except the CPU specific bit. Cores that have hardware-assisted
Boyan Karatotev461b62b2025-03-25 12:03:15 +00001347 * coherency should be able to handle this.
Boyan Karatotev2b5e00d2024-12-19 16:07:29 +00001348 */
Boyan Karatotev461b62b2025-03-25 12:03:15 +00001349 prepare_cpu_pwr_up(power_level);
Boyan Karatotev2b5e00d2024-12-19 16:07:29 +00001350}
1351
1352/*******************************************************************************
Sandeep Tripathy22744902020-08-17 20:22:13 +05301353 * This function invokes the callback 'stop_func()' with the 'mpidr' of each
1354 * online PE. Caller can pass suitable method to stop a remote core.
1355 *
1356 * 'wait_ms' is the timeout value in milliseconds for the other cores to
1357 * transition to power down state. Passing '0' makes it non-blocking.
1358 *
1359 * The function returns 'PSCI_E_DENIED' if some cores failed to stop within the
1360 * given timeout.
1361 ******************************************************************************/
Boyan Karatotev3b802102024-11-06 16:26:15 +00001362int psci_stop_other_cores(unsigned int this_cpu_idx, unsigned int wait_ms,
Sandeep Tripathy22744902020-08-17 20:22:13 +05301363 void (*stop_func)(u_register_t mpidr))
1364{
Sandeep Tripathy22744902020-08-17 20:22:13 +05301365 /* Invoke stop_func for each core */
Boyan Karatotev3b802102024-11-06 16:26:15 +00001366 for (unsigned int idx = 0U; idx < psci_plat_core_count; idx++) {
Sandeep Tripathy22744902020-08-17 20:22:13 +05301367 /* skip current CPU */
1368 if (idx == this_cpu_idx) {
1369 continue;
1370 }
1371
1372 /* Check if the CPU is ON */
1373 if (psci_get_aff_info_state_by_idx(idx) == AFF_STATE_ON) {
1374 (*stop_func)(psci_cpu_pd_nodes[idx].mpidr);
1375 }
1376 }
1377
1378 /* Need to wait for other cores to shutdown */
1379 if (wait_ms != 0U) {
Maheedhar Bollapallie64cdee2024-04-23 11:49:04 +05301380 for (uint32_t delay_ms = wait_ms; ((delay_ms != 0U) &&
1381 (!psci_is_last_on_cpu(this_cpu_idx))); delay_ms--) {
Sandeep Tripathy22744902020-08-17 20:22:13 +05301382 mdelay(1U);
1383 }
1384
Boyan Karatotev3b802102024-11-06 16:26:15 +00001385 if (!psci_is_last_on_cpu(this_cpu_idx)) {
Sandeep Tripathy22744902020-08-17 20:22:13 +05301386 WARN("Failed to stop all cores!\n");
1387 psci_print_power_domain_map();
1388 return PSCI_E_DENIED;
1389 }
1390 }
1391
1392 return PSCI_E_SUCCESS;
1393}
Lucian Paul-Trifuce14a122022-03-02 21:28:24 +00001394
1395/*******************************************************************************
1396 * This function verifies that all the other cores in the system have been
1397 * turned OFF and the current CPU is the last running CPU in the system.
1398 * Returns true if the current CPU is the last ON CPU or false otherwise.
1399 *
1400 * This API has following differences with psci_is_last_on_cpu
1401 * 1. PSCI states are locked
Lucian Paul-Trifuce14a122022-03-02 21:28:24 +00001402 ******************************************************************************/
Boyan Karatotev3b802102024-11-06 16:26:15 +00001403bool psci_is_last_on_cpu_safe(unsigned int this_core)
Lucian Paul-Trifuce14a122022-03-02 21:28:24 +00001404{
Lucian Paul-Trifuce14a122022-03-02 21:28:24 +00001405 unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
Lucian Paul-Trifuce14a122022-03-02 21:28:24 +00001406
Jayanth Dodderi Chidanandb41b0822022-08-22 23:46:10 +01001407 psci_get_parent_pwr_domain_nodes(this_core, PLAT_MAX_PWR_LVL, parent_nodes);
Lucian Paul-Trifuce14a122022-03-02 21:28:24 +00001408
Jayanth Dodderi Chidanandb41b0822022-08-22 23:46:10 +01001409 psci_acquire_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
Lucian Paul-Trifuce14a122022-03-02 21:28:24 +00001410
Boyan Karatotev3b802102024-11-06 16:26:15 +00001411 if (!psci_is_last_on_cpu(this_core)) {
Lucian Paul-Trifuce14a122022-03-02 21:28:24 +00001412 psci_release_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
Jayanth Dodderi Chidanandb41b0822022-08-22 23:46:10 +01001413 return false;
Lucian Paul-Trifuce14a122022-03-02 21:28:24 +00001414 }
1415
Jayanth Dodderi Chidanandb41b0822022-08-22 23:46:10 +01001416 psci_release_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
1417
Lucian Paul-Trifuce14a122022-03-02 21:28:24 +00001418 return true;
1419}
Wing Lib88a4412022-09-14 13:18:15 -07001420
1421/*******************************************************************************
1422 * This function verifies that all cores in the system have been turned ON.
1423 * Returns true, if all CPUs are ON or false otherwise.
1424 *
1425 * This API has following differences with psci_are_all_cpus_on
1426 * 1. PSCI states are locked
1427 ******************************************************************************/
Boyan Karatotev3b802102024-11-06 16:26:15 +00001428bool psci_are_all_cpus_on_safe(unsigned int this_core)
Wing Lib88a4412022-09-14 13:18:15 -07001429{
Wing Lib88a4412022-09-14 13:18:15 -07001430 unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
1431
1432 psci_get_parent_pwr_domain_nodes(this_core, PLAT_MAX_PWR_LVL, parent_nodes);
1433
1434 psci_acquire_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
1435
1436 if (!psci_are_all_cpus_on()) {
1437 psci_release_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
1438 return false;
1439 }
1440
1441 psci_release_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
1442
1443 return true;
1444}
Manish V Badarkhea7be2a52025-05-29 14:55:39 +01001445
1446/*******************************************************************************
1447 * Safely counts the number of CPUs in the system that are currently in the ON
1448 * or ON_PENDING state.
1449 *
1450 * This function acquires and releases the necessary power domain locks to
1451 * ensure consistency of the CPU state information.
1452 *
1453 * @param this_core The index of the current core making the query.
1454 *
1455 * @return The number of CPUs currently in AFF_STATE_ON or AFF_STATE_ON_PENDING.
1456 ******************************************************************************/
1457unsigned int psci_num_cpus_running_on_safe(unsigned int this_core)
1458{
1459 unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
1460 unsigned int no_of_cpus;
1461
1462 psci_get_parent_pwr_domain_nodes(this_core, PLAT_MAX_PWR_LVL, parent_nodes);
1463
1464 psci_acquire_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
1465
1466 no_of_cpus = psci_num_cpus_running();
1467
1468 psci_release_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
1469
1470 return no_of_cpus;
1471}