blob: 64158e7200ca3325b09a80d6b7cc860191986c91 [file] [log] [blame]
Varun Wadekar3a8c55f2015-07-14 17:11:20 +05301/*
Boyan Karatotev3bfa5a02025-01-22 13:54:43 +00002 * Copyright (c) 2015-2025, Arm Limited and Contributors. All rights reserved.
Varun Wadekarb2ed9982022-05-24 15:00:06 +01003 * Copyright (c) 2020-2022, NVIDIA Corporation. All rights reserved.
Varun Wadekar3a8c55f2015-07-14 17:11:20 +05304 *
dp-arm82cb2c12017-05-03 09:38:09 +01005 * SPDX-License-Identifier: BSD-3-Clause
Varun Wadekar3a8c55f2015-07-14 17:11:20 +05306 */
7
8#include <arch.h>
9#include <asm_macros.S>
10#include <assert_macros.S>
Varun Wadekarb0301462018-01-10 17:03:22 -080011#include <context.h>
Varun Wadekar3a8c55f2015-07-14 17:11:20 +053012#include <denver.h>
13#include <cpu_macros.S>
14#include <plat_macros.S>
15
Boyan Karatotev3bfa5a02025-01-22 13:54:43 +000016cpu_reset_prologue denver
17
Varun Wadekarb0301462018-01-10 17:03:22 -080018 /* -------------------------------------------------
19 * CVE-2017-5715 mitigation
20 *
21 * Flush the indirect branch predictor and RSB on
22 * entry to EL3 by issuing a newly added instruction
23 * for Denver CPUs.
24 *
25 * To achieve this without performing any branch
26 * instruction, a per-cpu vbar is installed which
27 * executes the workaround and then branches off to
28 * the corresponding vector entry in the main vector
29 * table.
30 * -------------------------------------------------
31 */
Varun Wadekarb0301462018-01-10 17:03:22 -080032vector_base workaround_bpflush_runtime_exceptions
33
34 .macro apply_workaround
35 stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
36
Varun Wadekarc5c1af02020-05-24 16:26:22 -070037 /* Disable cycle counter when event counting is prohibited */
38 mrs x1, pmcr_el0
39 orr x0, x1, #PMCR_EL0_DP_BIT
40 msr pmcr_el0, x0
41 isb
42
Varun Wadekarb0301462018-01-10 17:03:22 -080043 /* -------------------------------------------------
44 * A new write-only system register where a write of
45 * 1 to bit 0 will cause the indirect branch predictor
46 * and RSB to be flushed.
47 *
48 * A write of 0 to bit 0 will be ignored. A write of
49 * 1 to any other bit will cause an MCA.
50 * -------------------------------------------------
51 */
52 mov x0, #1
53 msr s3_0_c15_c0_6, x0
54 isb
55
56 ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
57 .endm
58
59 /* ---------------------------------------------------------------------
60 * Current EL with SP_EL0 : 0x0 - 0x200
61 * ---------------------------------------------------------------------
62 */
63vector_entry workaround_bpflush_sync_exception_sp_el0
64 b sync_exception_sp_el0
Roberto Vargasa9203ed2018-04-17 11:31:43 +010065end_vector_entry workaround_bpflush_sync_exception_sp_el0
Varun Wadekarb0301462018-01-10 17:03:22 -080066
67vector_entry workaround_bpflush_irq_sp_el0
68 b irq_sp_el0
Roberto Vargasa9203ed2018-04-17 11:31:43 +010069end_vector_entry workaround_bpflush_irq_sp_el0
Varun Wadekarb0301462018-01-10 17:03:22 -080070
71vector_entry workaround_bpflush_fiq_sp_el0
72 b fiq_sp_el0
Roberto Vargasa9203ed2018-04-17 11:31:43 +010073end_vector_entry workaround_bpflush_fiq_sp_el0
Varun Wadekarb0301462018-01-10 17:03:22 -080074
75vector_entry workaround_bpflush_serror_sp_el0
76 b serror_sp_el0
Roberto Vargasa9203ed2018-04-17 11:31:43 +010077end_vector_entry workaround_bpflush_serror_sp_el0
Varun Wadekarb0301462018-01-10 17:03:22 -080078
79 /* ---------------------------------------------------------------------
80 * Current EL with SP_ELx: 0x200 - 0x400
81 * ---------------------------------------------------------------------
82 */
83vector_entry workaround_bpflush_sync_exception_sp_elx
84 b sync_exception_sp_elx
Roberto Vargasa9203ed2018-04-17 11:31:43 +010085end_vector_entry workaround_bpflush_sync_exception_sp_elx
Varun Wadekarb0301462018-01-10 17:03:22 -080086
87vector_entry workaround_bpflush_irq_sp_elx
88 b irq_sp_elx
Roberto Vargasa9203ed2018-04-17 11:31:43 +010089end_vector_entry workaround_bpflush_irq_sp_elx
Varun Wadekarb0301462018-01-10 17:03:22 -080090
91vector_entry workaround_bpflush_fiq_sp_elx
92 b fiq_sp_elx
Roberto Vargasa9203ed2018-04-17 11:31:43 +010093end_vector_entry workaround_bpflush_fiq_sp_elx
Varun Wadekarb0301462018-01-10 17:03:22 -080094
95vector_entry workaround_bpflush_serror_sp_elx
96 b serror_sp_elx
Roberto Vargasa9203ed2018-04-17 11:31:43 +010097end_vector_entry workaround_bpflush_serror_sp_elx
Varun Wadekarb0301462018-01-10 17:03:22 -080098
99 /* ---------------------------------------------------------------------
100 * Lower EL using AArch64 : 0x400 - 0x600
101 * ---------------------------------------------------------------------
102 */
103vector_entry workaround_bpflush_sync_exception_aarch64
104 apply_workaround
105 b sync_exception_aarch64
Roberto Vargasa9203ed2018-04-17 11:31:43 +0100106end_vector_entry workaround_bpflush_sync_exception_aarch64
Varun Wadekarb0301462018-01-10 17:03:22 -0800107
108vector_entry workaround_bpflush_irq_aarch64
109 apply_workaround
110 b irq_aarch64
Roberto Vargasa9203ed2018-04-17 11:31:43 +0100111end_vector_entry workaround_bpflush_irq_aarch64
Varun Wadekarb0301462018-01-10 17:03:22 -0800112
113vector_entry workaround_bpflush_fiq_aarch64
114 apply_workaround
115 b fiq_aarch64
Roberto Vargasa9203ed2018-04-17 11:31:43 +0100116end_vector_entry workaround_bpflush_fiq_aarch64
Varun Wadekarb0301462018-01-10 17:03:22 -0800117
118vector_entry workaround_bpflush_serror_aarch64
119 apply_workaround
120 b serror_aarch64
Roberto Vargasa9203ed2018-04-17 11:31:43 +0100121end_vector_entry workaround_bpflush_serror_aarch64
Varun Wadekarb0301462018-01-10 17:03:22 -0800122
123 /* ---------------------------------------------------------------------
124 * Lower EL using AArch32 : 0x600 - 0x800
125 * ---------------------------------------------------------------------
126 */
127vector_entry workaround_bpflush_sync_exception_aarch32
128 apply_workaround
129 b sync_exception_aarch32
Roberto Vargasa9203ed2018-04-17 11:31:43 +0100130end_vector_entry workaround_bpflush_sync_exception_aarch32
Varun Wadekarb0301462018-01-10 17:03:22 -0800131
132vector_entry workaround_bpflush_irq_aarch32
133 apply_workaround
134 b irq_aarch32
Roberto Vargasa9203ed2018-04-17 11:31:43 +0100135end_vector_entry workaround_bpflush_irq_aarch32
Varun Wadekarb0301462018-01-10 17:03:22 -0800136
137vector_entry workaround_bpflush_fiq_aarch32
138 apply_workaround
139 b fiq_aarch32
Roberto Vargasa9203ed2018-04-17 11:31:43 +0100140end_vector_entry workaround_bpflush_fiq_aarch32
Varun Wadekarb0301462018-01-10 17:03:22 -0800141
142vector_entry workaround_bpflush_serror_aarch32
143 apply_workaround
144 b serror_aarch32
Roberto Vargasa9203ed2018-04-17 11:31:43 +0100145end_vector_entry workaround_bpflush_serror_aarch32
Varun Wadekarb0301462018-01-10 17:03:22 -0800146
Varun Wadekar9f1c5dd2016-02-22 11:09:41 -0800147 .global denver_disable_dco
148
Varun Wadekar3a8c55f2015-07-14 17:11:20 +0530149 /* ---------------------------------------------
150 * Disable debug interfaces
151 * ---------------------------------------------
152 */
153func denver_disable_ext_debug
154 mov x0, #1
155 msr osdlr_el1, x0
156 isb
157 dsb sy
158 ret
159endfunc denver_disable_ext_debug
160
161 /* ----------------------------------------------------
162 * Enable dynamic code optimizer (DCO)
163 * ----------------------------------------------------
164 */
165func denver_enable_dco
Varun Wadekar5f902752020-08-05 23:10:40 -0700166 /* DCO is not supported on PN5 and later */
167 mrs x1, midr_el1
168 mov_imm x2, DENVER_MIDR_PN4
169 cmp x1, x2
170 b.hi 1f
171
Kalyani Chidambarame6c0da12018-10-08 17:01:01 -0700172 mov x18, x30
Varun Wadekar1593cae2018-02-27 18:30:31 -0800173 bl plat_my_core_pos
Varun Wadekar3a8c55f2015-07-14 17:11:20 +0530174 mov x1, #1
175 lsl x1, x1, x0
176 msr s3_0_c15_c0_2, x1
Kalyani Chidambarame6c0da12018-10-08 17:01:01 -0700177 mov x30, x18
Varun Wadekar5f902752020-08-05 23:10:40 -07001781: ret
Varun Wadekar3a8c55f2015-07-14 17:11:20 +0530179endfunc denver_enable_dco
180
181 /* ----------------------------------------------------
182 * Disable dynamic code optimizer (DCO)
183 * ----------------------------------------------------
184 */
185func denver_disable_dco
Varun Wadekar5f902752020-08-05 23:10:40 -0700186 /* DCO is not supported on PN5 and later */
187 mrs x1, midr_el1
188 mov_imm x2, DENVER_MIDR_PN4
189 cmp x1, x2
190 b.hi 2f
Varun Wadekar1593cae2018-02-27 18:30:31 -0800191
Varun Wadekar3a8c55f2015-07-14 17:11:20 +0530192 /* turn off background work */
Varun Wadekar5f902752020-08-05 23:10:40 -0700193 mov x18, x30
Varun Wadekar1593cae2018-02-27 18:30:31 -0800194 bl plat_my_core_pos
Varun Wadekar3a8c55f2015-07-14 17:11:20 +0530195 mov x1, #1
196 lsl x1, x1, x0
197 lsl x2, x1, #16
198 msr s3_0_c15_c0_2, x2
199 isb
200
201 /* wait till the background work turns off */
2021: mrs x2, s3_0_c15_c0_2
203 lsr x2, x2, #32
204 and w2, w2, 0xFFFF
205 and x2, x2, x1
206 cbnz x2, 1b
207
Kalyani Chidambarame6c0da12018-10-08 17:01:01 -0700208 mov x30, x18
Varun Wadekar5f902752020-08-05 23:10:40 -07002092: ret
Varun Wadekar3a8c55f2015-07-14 17:11:20 +0530210endfunc denver_disable_dco
211
Boyan Karatotev15702f22023-04-05 11:26:35 +0100212workaround_reset_start denver, CVE(2017, 5715), WORKAROUND_CVE_2017_5715
213#if IMAGE_BL31
214 adr x1, workaround_bpflush_runtime_exceptions
215 msr vbar_el3, x1
216#endif
217workaround_reset_end denver, CVE(2017, 5715)
218
219check_erratum_custom_start denver, CVE(2017, 5715)
Varun Wadekar83353962018-07-06 13:39:52 -0700220 mov x0, #ERRATA_MISSING
221#if WORKAROUND_CVE_2017_5715
222 /*
223 * Check if the CPU supports the special instruction
224 * required to flush the indirect branch predictor and
225 * RSB. Support for this operation can be determined by
226 * comparing bits 19:16 of ID_AFR0_EL1 with 0b0001.
227 */
228 mrs x1, id_afr0_el1
229 mov x2, #0x10000
230 and x1, x1, x2
231 cbz x1, 1f
232 mov x0, #ERRATA_APPLIES
2331:
234#endif
235 ret
Boyan Karatotev15702f22023-04-05 11:26:35 +0100236check_erratum_custom_end denver, CVE(2017, 5715)
Varun Wadekar83353962018-07-06 13:39:52 -0700237
Boyan Karatotev15702f22023-04-05 11:26:35 +0100238workaround_reset_start denver, CVE(2018, 3639), WORKAROUND_CVE_2018_3639
Varun Wadekar6cf8d652018-08-28 09:11:30 -0700239 /*
240 * Denver CPUs with DENVER_MIDR_PN3 or earlier, use different
241 * bits in the ACTLR_EL3 register to disable speculative
242 * store buffer and memory disambiguation.
243 */
244 mrs x0, midr_el1
245 mov_imm x1, DENVER_MIDR_PN4
246 cmp x0, x1
247 mrs x0, actlr_el3
248 mov x1, #(DENVER_CPU_DIS_MD_EL3 | DENVER_CPU_DIS_SSB_EL3)
249 mov x2, #(DENVER_PN4_CPU_DIS_MD_EL3 | DENVER_PN4_CPU_DIS_SSB_EL3)
250 csel x3, x1, x2, ne
251 orr x0, x0, x3
252 msr actlr_el3, x0
253 isb
254 dsb sy
Boyan Karatotev15702f22023-04-05 11:26:35 +0100255workaround_reset_end denver, CVE(2018, 3639)
Varun Wadekar6cf8d652018-08-28 09:11:30 -0700256
Boyan Karatotev15702f22023-04-05 11:26:35 +0100257check_erratum_chosen denver, CVE(2018, 3639), WORKAROUND_CVE_2018_3639
258
259cpu_reset_func_start denver
Varun Wadekar3a8c55f2015-07-14 17:11:20 +0530260 /* ----------------------------------------------------
Varun Wadekarcf3ed0d2018-06-25 11:36:47 -0700261 * Reset ACTLR.PMSTATE to C1 state
262 * ----------------------------------------------------
263 */
264 mrs x0, actlr_el1
265 bic x0, x0, #DENVER_CPU_PMSTATE_MASK
266 orr x0, x0, #DENVER_CPU_PMSTATE_C1
267 msr actlr_el1, x0
268
269 /* ----------------------------------------------------
Varun Wadekar3a8c55f2015-07-14 17:11:20 +0530270 * Enable dynamic code optimizer (DCO)
271 * ----------------------------------------------------
272 */
273 bl denver_enable_dco
Boyan Karatotev15702f22023-04-05 11:26:35 +0100274cpu_reset_func_end denver
Varun Wadekar3a8c55f2015-07-14 17:11:20 +0530275
276 /* ----------------------------------------------------
277 * The CPU Ops core power down function for Denver.
278 * ----------------------------------------------------
279 */
280func denver_core_pwr_dwn
281
282 mov x19, x30
283
Varun Wadekar3a8c55f2015-07-14 17:11:20 +0530284 /* ---------------------------------------------
285 * Force the debug interfaces to be quiescent
286 * ---------------------------------------------
287 */
288 bl denver_disable_ext_debug
289
290 ret x19
291endfunc denver_core_pwr_dwn
292
293 /* -------------------------------------------------------
294 * The CPU Ops cluster power down function for Denver.
295 * -------------------------------------------------------
296 */
297func denver_cluster_pwr_dwn
298 ret
299endfunc denver_cluster_pwr_dwn
300
301 /* ---------------------------------------------
302 * This function provides Denver specific
303 * register information for crash reporting.
304 * It needs to return with x6 pointing to
305 * a list of register names in ascii and
306 * x8 - x15 having values of registers to be
307 * reported.
308 * ---------------------------------------------
309 */
310.section .rodata.denver_regs, "aS"
311denver_regs: /* The ascii list of register names to be reported */
312 .asciz "actlr_el1", ""
313
314func denver_cpu_reg_dump
315 adr x6, denver_regs
316 mrs x8, ACTLR_EL1
317 ret
318endfunc denver_cpu_reg_dump
319
Varun Wadekar9b624a72020-08-28 14:00:15 -0700320/* macro to declare cpu_ops for Denver SKUs */
321.macro denver_cpu_ops_wa midr
322 declare_cpu_ops_wa denver, \midr, \
323 denver_reset_func, \
Boyan Karatotev15702f22023-04-05 11:26:35 +0100324 check_erratum_denver_5715, \
Varun Wadekar9b624a72020-08-28 14:00:15 -0700325 CPU_NO_EXTRA2_FUNC, \
Varun Wadekarb2ed9982022-05-24 15:00:06 +0100326 CPU_NO_EXTRA3_FUNC, \
Varun Wadekar9b624a72020-08-28 14:00:15 -0700327 denver_core_pwr_dwn, \
328 denver_cluster_pwr_dwn
329.endm
Varun Wadekare956e222015-09-03 17:15:06 +0530330
Varun Wadekar9b624a72020-08-28 14:00:15 -0700331denver_cpu_ops_wa DENVER_MIDR_PN0
332denver_cpu_ops_wa DENVER_MIDR_PN1
333denver_cpu_ops_wa DENVER_MIDR_PN2
334denver_cpu_ops_wa DENVER_MIDR_PN3
335denver_cpu_ops_wa DENVER_MIDR_PN4
336denver_cpu_ops_wa DENVER_MIDR_PN5
337denver_cpu_ops_wa DENVER_MIDR_PN6
338denver_cpu_ops_wa DENVER_MIDR_PN7
339denver_cpu_ops_wa DENVER_MIDR_PN8
Hemant Nigamc6d25c02019-12-17 14:21:38 -0800340denver_cpu_ops_wa DENVER_MIDR_PN9