blob: 4ae07e1587542adcfb92d740d7cb830209e15e55 [file] [log] [blame]
Soby Mathewb48349e2015-06-29 16:30:12 +01001/*
Boyan Karatotev5d893412025-01-07 11:00:03 +00002 * Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved.
Soby Mathewb48349e2015-06-29 16:30:12 +01003 *
dp-arm82cb2c12017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathewb48349e2015-06-29 16:30:12 +01005 */
6
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +00007#include <assert.h>
8#include <stddef.h>
9
Soby Mathewb48349e2015-06-29 16:30:12 +010010#include <arch.h>
11#include <arch_helpers.h>
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +000012#include <common/bl_common.h>
13#include <common/debug.h>
Boyan Karatotev5d893412025-01-07 11:00:03 +000014#include <drivers/arm/gic.h>
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +000015#include <lib/el3_runtime/context_mgmt.h>
16#include <lib/el3_runtime/pubsub_events.h>
17#include <plat/common/platform.h>
18
Soby Mathewb48349e2015-06-29 16:30:12 +010019#include "psci_private.h"
20
Antonio Nino Diaz97373c32018-07-18 11:57:21 +010021/*
22 * Helper functions for the CPU level spinlocks
23 */
Deepika Bhavnani5b33ad12019-12-13 10:23:18 -060024static inline void psci_spin_lock_cpu(unsigned int idx)
Antonio Nino Diaz97373c32018-07-18 11:57:21 +010025{
26 spin_lock(&psci_cpu_pd_nodes[idx].cpu_lock);
27}
28
Deepika Bhavnani5b33ad12019-12-13 10:23:18 -060029static inline void psci_spin_unlock_cpu(unsigned int idx)
Antonio Nino Diaz97373c32018-07-18 11:57:21 +010030{
31 spin_unlock(&psci_cpu_pd_nodes[idx].cpu_lock);
32}
33
Soby Mathewb48349e2015-06-29 16:30:12 +010034/*******************************************************************************
35 * This function checks whether a cpu which has been requested to be turned on
36 * is OFF to begin with.
37 ******************************************************************************/
Soby Mathew8ee24982015-04-07 12:16:56 +010038static int cpu_on_validate_state(aff_info_state_t aff_state)
Soby Mathewb48349e2015-06-29 16:30:12 +010039{
Soby Mathew8ee24982015-04-07 12:16:56 +010040 if (aff_state == AFF_STATE_ON)
Soby Mathewb48349e2015-06-29 16:30:12 +010041 return PSCI_E_ALREADY_ON;
42
Soby Mathew8ee24982015-04-07 12:16:56 +010043 if (aff_state == AFF_STATE_ON_PENDING)
Soby Mathewb48349e2015-06-29 16:30:12 +010044 return PSCI_E_ON_PENDING;
45
Soby Mathew8ee24982015-04-07 12:16:56 +010046 assert(aff_state == AFF_STATE_OFF);
Soby Mathewb48349e2015-06-29 16:30:12 +010047 return PSCI_E_SUCCESS;
48}
49
50/*******************************************************************************
Soby Mathewb48349e2015-06-29 16:30:12 +010051 * Generic handler which is called to physically power on a cpu identified by
Soby Mathew6590ce22015-06-30 11:00:24 +010052 * its mpidr. It performs the generic, architectural, platform setup and state
53 * management to power on the target cpu e.g. it will ensure that
54 * enough information is stashed for it to resume execution in the non-secure
55 * security state.
Soby Mathewb48349e2015-06-29 16:30:12 +010056 *
Soby Mathew4067dc32015-05-05 16:33:16 +010057 * The state of all the relevant power domains are changed after calling the
Soby Mathew6590ce22015-06-30 11:00:24 +010058 * platform handler as it can return error.
Soby Mathewb48349e2015-06-29 16:30:12 +010059 ******************************************************************************/
Soby Mathew9d070b92015-07-29 17:05:03 +010060int psci_cpu_on_start(u_register_t target_cpu,
Antonio Nino Diaz621d64f2018-07-16 23:19:25 +010061 const entry_point_info_t *ep)
Soby Mathewb48349e2015-06-29 16:30:12 +010062{
63 int rc;
Soby Mathew203cdfe2016-01-26 11:47:53 +000064 aff_info_state_t target_aff_state;
Manish Pandeye60c1842023-10-27 11:45:44 +010065 unsigned int target_idx = (unsigned int)plat_core_pos_by_mpidr(target_cpu);
Deepika Bhavnani5b33ad12019-12-13 10:23:18 -060066
Soby Mathewb48349e2015-06-29 16:30:12 +010067 /*
68 * This function must only be called on platforms where the
69 * CPU_ON platform hooks have been implemented.
70 */
Antonio Nino Diaz621d64f2018-07-16 23:19:25 +010071 assert((psci_plat_pm_ops->pwr_domain_on != NULL) &&
72 (psci_plat_pm_ops->pwr_domain_on_finish != NULL));
Soby Mathewb48349e2015-06-29 16:30:12 +010073
Soby Mathew82dcc032015-04-08 17:42:06 +010074 /* Protect against multiple CPUs trying to turn ON the same target CPU */
75 psci_spin_lock_cpu(target_idx);
Soby Mathewb48349e2015-06-29 16:30:12 +010076
77 /*
78 * Generic management: Ensure that the cpu is off to be
79 * turned on.
David Cunado71341d22017-07-19 12:14:07 +010080 * Perform cache maintanence ahead of reading the target CPU state to
81 * ensure that the data is not stale.
82 * There is a theoretical edge case where the cache may contain stale
83 * data for the target CPU data - this can occur under the following
84 * conditions:
85 * - the target CPU is in another cluster from the current
86 * - the target CPU was the last CPU to shutdown on its cluster
87 * - the cluster was removed from coherency as part of the CPU shutdown
88 *
89 * In this case the cache maintenace that was performed as part of the
90 * target CPUs shutdown was not seen by the current CPU's cluster. And
91 * so the cache may contain stale data for the target CPU.
Soby Mathewb48349e2015-06-29 16:30:12 +010092 */
Deepika Bhavnani5b33ad12019-12-13 10:23:18 -060093 flush_cpu_data_by_index(target_idx,
Antonio Nino Diaz621d64f2018-07-16 23:19:25 +010094 psci_svc_cpu_data.aff_info_state);
Soby Mathew8ee24982015-04-07 12:16:56 +010095 rc = cpu_on_validate_state(psci_get_aff_info_state_by_idx(target_idx));
Soby Mathewb48349e2015-06-29 16:30:12 +010096 if (rc != PSCI_E_SUCCESS)
Maheedhar Bollapalli0839cfc2024-04-19 16:21:29 +053097 goto on_exit;
Soby Mathewb48349e2015-06-29 16:30:12 +010098
99 /*
100 * Call the cpu on handler registered by the Secure Payload Dispatcher
101 * to let it do any bookeeping. If the handler encounters an error, it's
102 * expected to assert within
103 */
Maheedhar Bollapallic7b0a282024-04-25 11:47:27 +0530104 if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_on != NULL)) {
Soby Mathewb48349e2015-06-29 16:30:12 +0100105 psci_spd_pm->svc_on(target_cpu);
Maheedhar Bollapallic7b0a282024-04-25 11:47:27 +0530106 }
Soby Mathewb48349e2015-06-29 16:30:12 +0100107
108 /*
Soby Mathew8ee24982015-04-07 12:16:56 +0100109 * Set the Affinity info state of the target cpu to ON_PENDING.
Soby Mathew203cdfe2016-01-26 11:47:53 +0000110 * Flush aff_info_state as it will be accessed with caches
111 * turned OFF.
Soby Mathewb48349e2015-06-29 16:30:12 +0100112 */
Soby Mathew8ee24982015-04-07 12:16:56 +0100113 psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING);
Deepika Bhavnani5b33ad12019-12-13 10:23:18 -0600114 flush_cpu_data_by_index(target_idx,
Antonio Nino Diaz621d64f2018-07-16 23:19:25 +0100115 psci_svc_cpu_data.aff_info_state);
Soby Mathew203cdfe2016-01-26 11:47:53 +0000116
117 /*
118 * The cache line invalidation by the target CPU after setting the
119 * state to OFF (see psci_do_cpu_off()), could cause the update to
120 * aff_info_state to be invalidated. Retry the update if the target
121 * CPU aff_info_state is not ON_PENDING.
122 */
123 target_aff_state = psci_get_aff_info_state_by_idx(target_idx);
124 if (target_aff_state != AFF_STATE_ON_PENDING) {
125 assert(target_aff_state == AFF_STATE_OFF);
126 psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING);
Deepika Bhavnani5b33ad12019-12-13 10:23:18 -0600127 flush_cpu_data_by_index(target_idx,
Antonio Nino Diaz621d64f2018-07-16 23:19:25 +0100128 psci_svc_cpu_data.aff_info_state);
Soby Mathew203cdfe2016-01-26 11:47:53 +0000129
Antonio Nino Diaz621d64f2018-07-16 23:19:25 +0100130 assert(psci_get_aff_info_state_by_idx(target_idx) ==
131 AFF_STATE_ON_PENDING);
Soby Mathew203cdfe2016-01-26 11:47:53 +0000132 }
Soby Mathewb48349e2015-06-29 16:30:12 +0100133
Soby Mathew6590ce22015-06-30 11:00:24 +0100134 /*
135 * Perform generic, architecture and platform specific handling.
136 */
Soby Mathew6590ce22015-06-30 11:00:24 +0100137 /*
138 * Plat. management: Give the platform the current state
139 * of the target cpu to allow it to perform the necessary
140 * steps to power on.
141 */
Soby Mathew9d070b92015-07-29 17:05:03 +0100142 rc = psci_plat_pm_ops->pwr_domain_on(target_cpu);
Antonio Nino Diaz621d64f2018-07-16 23:19:25 +0100143 assert((rc == PSCI_E_SUCCESS) || (rc == PSCI_E_INTERN_FAIL));
Soby Mathewb48349e2015-06-29 16:30:12 +0100144
Manish Pandeyef738d12024-06-22 00:00:18 +0100145 if (rc != PSCI_E_SUCCESS) {
Soby Mathewb48349e2015-06-29 16:30:12 +0100146 /* Restore the state on error. */
Soby Mathew8ee24982015-04-07 12:16:56 +0100147 psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_OFF);
Deepika Bhavnani5b33ad12019-12-13 10:23:18 -0600148 flush_cpu_data_by_index(target_idx,
Antonio Nino Diaz621d64f2018-07-16 23:19:25 +0100149 psci_svc_cpu_data.aff_info_state);
Soby Mathew203cdfe2016-01-26 11:47:53 +0000150 }
Soby Mathew12d0d002015-04-09 13:40:55 +0100151
Maheedhar Bollapalli0839cfc2024-04-19 16:21:29 +0530152on_exit:
Soby Mathew82dcc032015-04-08 17:42:06 +0100153 psci_spin_unlock_cpu(target_idx);
Soby Mathewb48349e2015-06-29 16:30:12 +0100154 return rc;
155}
156
157/*******************************************************************************
Soby Mathew4067dc32015-05-05 16:33:16 +0100158 * The following function finish an earlier power on request. They
Soby Mathew8ee24982015-04-07 12:16:56 +0100159 * are called by the common finisher routine in psci_common.c. The `state_info`
160 * is the psci_power_state from which this CPU has woken up from.
Soby Mathewb48349e2015-06-29 16:30:12 +0100161 ******************************************************************************/
Deepika Bhavnani5b33ad12019-12-13 10:23:18 -0600162void psci_cpu_on_finish(unsigned int cpu_idx, const psci_power_state_t *state_info)
Soby Mathewb48349e2015-06-29 16:30:12 +0100163{
Soby Mathewb48349e2015-06-29 16:30:12 +0100164 /*
165 * Plat. management: Perform the platform specific actions
166 * for this cpu e.g. enabling the gic or zeroing the mailbox
167 * register. The actual state of this cpu has already been
168 * changed.
169 */
Soby Mathew8ee24982015-04-07 12:16:56 +0100170 psci_plat_pm_ops->pwr_domain_on_finish(state_info);
Soby Mathewb48349e2015-06-29 16:30:12 +0100171
Soby Mathewbcc3c492017-04-10 22:35:42 +0100172#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
Soby Mathewb48349e2015-06-29 16:30:12 +0100173 /*
174 * Arch. management: Enable data cache and manage stack memory
175 */
176 psci_do_pwrup_cache_maintenance();
Jeenu Viswambharanb0408e82017-01-05 11:01:02 +0000177#endif
Soby Mathewb48349e2015-06-29 16:30:12 +0100178
179 /*
Madhukar Pappireddy10107702019-08-12 18:31:33 -0500180 * Plat. management: Perform any platform specific actions which
181 * can only be done with the cpu and the cluster guaranteed to
182 * be coherent.
183 */
Maheedhar Bollapallic7b0a282024-04-25 11:47:27 +0530184 if (psci_plat_pm_ops->pwr_domain_on_finish_late != NULL) {
Madhukar Pappireddy10107702019-08-12 18:31:33 -0500185 psci_plat_pm_ops->pwr_domain_on_finish_late(state_info);
Maheedhar Bollapallic7b0a282024-04-25 11:47:27 +0530186 }
Boyan Karatotev5d893412025-01-07 11:00:03 +0000187
188#if USE_GIC_DRIVER
189 /* GIC init after platform has had a say with MMU on */
190 gic_pcpu_init(cpu_idx);
191 gic_cpuif_enable(cpu_idx);
192#endif /* USE_GIC_DRIVER */
193
Madhukar Pappireddy10107702019-08-12 18:31:33 -0500194 /*
Soby Mathewb48349e2015-06-29 16:30:12 +0100195 * All the platform specific actions for turning this cpu
196 * on have completed. Perform enough arch.initialization
197 * to run in the non-secure address space.
198 */
Soby Mathewcf0b1492016-04-29 19:01:30 +0100199 psci_arch_setup();
Soby Mathewb48349e2015-06-29 16:30:12 +0100200
201 /*
Soby Mathew82dcc032015-04-08 17:42:06 +0100202 * Lock the CPU spin lock to make sure that the context initialization
203 * is done. Since the lock is only used in this function to create
204 * a synchronization point with cpu_on_start(), it can be released
205 * immediately.
206 */
207 psci_spin_lock_cpu(cpu_idx);
208 psci_spin_unlock_cpu(cpu_idx);
209
Soby Mathew8ee24982015-04-07 12:16:56 +0100210 /* Ensure we have been explicitly woken up by another cpu */
211 assert(psci_get_aff_info_state() == AFF_STATE_ON_PENDING);
212
Soby Mathew82dcc032015-04-08 17:42:06 +0100213 /*
Soby Mathewb48349e2015-06-29 16:30:12 +0100214 * Call the cpu on finish handler registered by the Secure Payload
215 * Dispatcher to let it do any bookeeping. If the handler encounters an
216 * error, it's expected to assert within
217 */
Maheedhar Bollapallic7b0a282024-04-25 11:47:27 +0530218 if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_on_finish != NULL)) {
Soby Mathewb48349e2015-06-29 16:30:12 +0100219 psci_spd_pm->svc_on_finish(0);
Maheedhar Bollapallic7b0a282024-04-25 11:47:27 +0530220 }
Jeenu Viswambharanbd0c3472017-09-22 08:32:10 +0100221 PUBLISH_EVENT(psci_cpu_on_finish);
222
Soby Mathew82dcc032015-04-08 17:42:06 +0100223 /* Populate the mpidr field within the cpu node array */
224 /* This needs to be done only once */
225 psci_cpu_pd_nodes[cpu_idx].mpidr = read_mpidr() & MPIDR_AFFINITY_MASK;
Soby Mathewb48349e2015-06-29 16:30:12 +0100226}