blob: 863448cedc3bbca228efbc78744ad4a10db230b0 [file] [log] [blame]
Soby Mathewe33b78a2016-05-05 14:10:46 +01001/*
Boyan Karatotevaadb4b52025-03-12 10:36:46 +00002 * Copyright (c) 2016-2025, Arm Limited and Contributors. All rights reserved.
Soby Mathewe33b78a2016-05-05 14:10:46 +01003 *
dp-arm82cb2c12017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathewe33b78a2016-05-05 14:10:46 +01005 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
Soby Mathewe33b78a2016-05-05 14:10:46 +010010#include <cpu_macros.S>
Varun Wadekarc2ad38c2019-01-11 14:47:48 -080011#include <common/bl_common.h>
Boyan Karatotevdd9fae12023-01-25 18:50:10 +000012#include <lib/cpus/cpu_ops.h>
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +000013#include <lib/el3_runtime/cpu_data.h>
Soby Mathewe33b78a2016-05-05 14:10:46 +010014
Arvind Ram Prakash42d4d3b2022-11-22 14:41:00 -060015#if defined(IMAGE_BL1) || defined(IMAGE_BL32) || \
16 (defined(IMAGE_BL2) && RESET_TO_BL2)
Soby Mathewe33b78a2016-05-05 14:10:46 +010017 /*
18 * The reset handler common to all platforms. After a matching
19 * cpu_ops structure entry is found, the correponding reset_handler
20 * in the cpu_ops is invoked. The reset handler is invoked very early
21 * in the boot sequence and it is assumed that we can clobber r0 - r10
22 * without the need to follow AAPCS.
23 * Clobbers: r0 - r10
24 */
25 .globl reset_handler
26func reset_handler
Heiko Stuebnerc6c10b02019-03-06 00:29:13 +010027 mov r8, lr
Soby Mathewe33b78a2016-05-05 14:10:46 +010028
Heiko Stuebnerc6c10b02019-03-06 00:29:13 +010029 /* The plat_reset_handler can clobber r0 - r7 */
Soby Mathewe33b78a2016-05-05 14:10:46 +010030 bl plat_reset_handler
31
32 /* Get the matching cpu_ops pointer (clobbers: r0 - r5) */
33 bl get_cpu_ops_ptr
34
Antonio Nino Diaz044bb2f2017-04-20 09:58:28 +010035#if ENABLE_ASSERTIONS
Soby Mathewe33b78a2016-05-05 14:10:46 +010036 cmp r0, #0
37 ASM_ASSERT(ne)
38#endif
39
40 /* Get the cpu_ops reset handler */
41 ldr r1, [r0, #CPU_RESET_FUNC]
42 cmp r1, #0
Heiko Stuebnerc6c10b02019-03-06 00:29:13 +010043 mov lr, r8
Soby Mathewe33b78a2016-05-05 14:10:46 +010044 bxne r1
45 bx lr
46endfunc reset_handler
47
Roberto Vargasb1d27b42017-10-30 14:43:43 +000048#endif
Yatharth Kochar1a0a3f02016-06-28 16:58:26 +010049
Boyan Karatotevaadb4b52025-03-12 10:36:46 +000050#ifdef IMAGE_BL32
Soby Mathewe33b78a2016-05-05 14:10:46 +010051 /*
52 * Initializes the cpu_ops_ptr if not already initialized
53 * in cpu_data. This must only be called after the data cache
54 * is enabled. AAPCS is followed.
55 */
56 .globl init_cpu_ops
57func init_cpu_ops
58 push {r4 - r6, lr}
59 bl _cpu_data
60 mov r6, r0
61 ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR]
62 cmp r1, #0
63 bne 1f
64 bl get_cpu_ops_ptr
Antonio Nino Diaz044bb2f2017-04-20 09:58:28 +010065#if ENABLE_ASSERTIONS
Soby Mathewe33b78a2016-05-05 14:10:46 +010066 cmp r0, #0
67 ASM_ASSERT(ne)
68#endif
69 str r0, [r6, #CPU_DATA_CPU_OPS_PTR]!
701:
71 pop {r4 - r6, pc}
72endfunc init_cpu_ops
73
Yatharth Kochar1a0a3f02016-06-28 16:58:26 +010074#endif /* IMAGE_BL32 */
75
Soby Mathewe33b78a2016-05-05 14:10:46 +010076 /*
77 * The below function returns the cpu_ops structure matching the
78 * midr of the core. It reads the MIDR and finds the matching
79 * entry in cpu_ops entries. Only the implementation and part number
80 * are used to match the entries.
81 * Return :
82 * r0 - The matching cpu_ops pointer on Success
83 * r0 - 0 on failure.
84 * Clobbers: r0 - r5
85 */
86 .globl get_cpu_ops_ptr
87func get_cpu_ops_ptr
88 /* Get the cpu_ops start and end locations */
89 ldr r4, =(__CPU_OPS_START__ + CPU_MIDR)
90 ldr r5, =(__CPU_OPS_END__ + CPU_MIDR)
91
92 /* Initialize the return parameter */
93 mov r0, #0
94
95 /* Read the MIDR_EL1 */
96 ldcopr r2, MIDR
97 ldr r3, =CPU_IMPL_PN_MASK
98
99 /* Retain only the implementation and part number using mask */
100 and r2, r2, r3
1011:
102 /* Check if we have reached end of list */
103 cmp r4, r5
Douglas Raillard355a5d02017-03-07 16:36:14 +0000104 bhs error_exit
Soby Mathewe33b78a2016-05-05 14:10:46 +0100105
106 /* load the midr from the cpu_ops */
107 ldr r1, [r4], #CPU_OPS_SIZE
108 and r1, r1, r3
109
110 /* Check if midr matches to midr of this core */
111 cmp r1, r2
112 bne 1b
113
114 /* Subtract the increment and offset to get the cpu-ops pointer */
115 sub r0, r4, #(CPU_OPS_SIZE + CPU_MIDR)
Yann Gautiere3b9cc12021-02-23 14:50:44 +0100116#if ENABLE_ASSERTIONS
117 cmp r0, #0
118 ASM_ASSERT(ne)
119#endif
Soby Mathewe33b78a2016-05-05 14:10:46 +0100120error_exit:
121 bx lr
122endfunc get_cpu_ops_ptr
Jeenu Viswambharan10bcd762017-01-03 11:01:51 +0000123
124/*
125 * Extract CPU revision and variant, and combine them into a single numeric for
126 * easier comparison.
127 */
128 .globl cpu_get_rev_var
129func cpu_get_rev_var
130 ldcopr r1, MIDR
131
132 /*
133 * Extract the variant[23:20] and revision[3:0] from r1 and pack it in
134 * r0[0:7] as variant[7:4] and revision[3:0]:
135 *
136 * First extract r1[23:16] to r0[7:0] and zero fill the rest. Then
137 * extract r1[3:0] into r0[3:0] retaining other bits.
138 */
139 ubfx r0, r1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
140 bfi r0, r1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
141 bx lr
142endfunc cpu_get_rev_var
143
144/*
145 * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
146 * application purposes. If the revision-variant is less than or same as a given
147 * value, indicates that errata applies; otherwise not.
148 */
149 .globl cpu_rev_var_ls
150func cpu_rev_var_ls
151 cmp r0, r1
152 movls r0, #ERRATA_APPLIES
153 movhi r0, #ERRATA_NOT_APPLIES
154 bx lr
155endfunc cpu_rev_var_ls
156
Dimitris Papastamos56e04992017-06-05 13:36:34 +0100157/*
158 * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
159 * application purposes. If the revision-variant is higher than or same as a
160 * given value, indicates that errata applies; otherwise not.
161 */
162 .globl cpu_rev_var_hs
163func cpu_rev_var_hs
164 cmp r0, r1
165 movge r0, #ERRATA_APPLIES
166 movlt r0, #ERRATA_NOT_APPLIES
167 bx lr
168endfunc cpu_rev_var_hs