blob: a864e6661c73a5ffe3e41340d54c9f0507f3f636 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Jayanth Dodderi Chidanand445f7b52023-06-19 16:20:02 +01002 * Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
dp-arm82cb2c12017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Achin Gupta4f6ad662013-10-25 09:08:21 +01005 */
Antonio Nino Diazc3cf06f2018-11-08 10:20:19 +00006#ifndef CPU_MACROS_S
7#define CPU_MACROS_S
Achin Gupta4f6ad662013-10-25 09:08:21 +01008
Antonio Nino Diazff6f62e2019-02-12 11:25:02 +00009#include <assert_macros.S>
Boyan Karatotev007433d2023-01-25 16:55:18 +000010#include <lib/cpus/cpu_ops.h>
Boyan Karatotev6bb96fa2023-01-27 09:37:07 +000011#include <lib/cpus/errata.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010012
Soby Mathew9b476842014-08-14 11:33:56 +010013 /*
Jeenu Viswambharan5dd9dbb2016-11-18 12:58:28 +000014 * Write given expressions as quad words
15 *
16 * _count:
17 * Write at least _count quad words. If the given number of
18 * expressions is less than _count, repeat the last expression to
19 * fill _count quad words in total
20 * _rest:
21 * Optional list of expressions. _this is for parameter extraction
22 * only, and has no significance to the caller
23 *
24 * Invoked as:
25 * fill_constants 2, foo, bar, blah, ...
Soby Mathew9b476842014-08-14 11:33:56 +010026 */
Jeenu Viswambharan5dd9dbb2016-11-18 12:58:28 +000027 .macro fill_constants _count:req, _this, _rest:vararg
28 .ifgt \_count
29 /* Write the current expression */
30 .ifb \_this
31 .error "Nothing to fill"
32 .endif
33 .quad \_this
34
35 /* Invoke recursively for remaining expressions */
36 .ifnb \_rest
37 fill_constants \_count-1, \_rest
38 .else
39 fill_constants \_count-1, \_this
40 .endif
41 .endif
42 .endm
43
44 /*
45 * Declare CPU operations
46 *
47 * _name:
48 * Name of the CPU for which operations are being specified
49 * _midr:
50 * Numeric value expected to read from CPU's MIDR
51 * _resetfunc:
52 * Reset function for the CPU. If there's no CPU reset function,
53 * specify CPU_NO_RESET_FUNC
Dimitris Papastamosa205a562018-03-12 14:47:09 +000054 * _extra1:
55 * This is a placeholder for future per CPU operations. Currently,
56 * some CPUs use this entry to set a test function to determine if
57 * the workaround for CVE-2017-5715 needs to be applied or not.
Dimitris Papastamosfe007b22018-05-16 11:36:14 +010058 * _extra2:
Bipin Ravi9b2510b2022-02-23 23:45:50 -060059 * This is a placeholder for future per CPU operations. Currently
Dimitris Papastamosfe007b22018-05-16 11:36:14 +010060 * some CPUs use this entry to set a function to disable the
61 * workaround for CVE-2018-3639.
Bipin Ravi9b2510b2022-02-23 23:45:50 -060062 * _extra3:
63 * This is a placeholder for future per CPU operations. Currently,
64 * some CPUs use this entry to set a test function to determine if
65 * the workaround for CVE-2022-23960 needs to be applied or not.
Arvind Ram Prakashb5386662025-02-03 17:10:38 +010066 * _extra4:
67 * This is a placeholder for future per CPU operations. Currently,
68 * some CPUs use this entry to set a test function to determine if
69 * the workaround for CVE-2024-7881 needs to be applied or not.
laurenw-arm80942622019-08-20 15:51:24 -050070 * _e_handler:
71 * This is a placeholder for future per CPU exception handlers.
Jeenu Viswambharan5dd9dbb2016-11-18 12:58:28 +000072 * _power_down_ops:
73 * Comma-separated list of functions to perform power-down
74 * operatios on the CPU. At least one, and up to
75 * CPU_MAX_PWR_DWN_OPS number of functions may be specified.
76 * Starting at power level 0, these functions shall handle power
77 * down at subsequent power levels. If there aren't exactly
78 * CPU_MAX_PWR_DWN_OPS functions, the last specified one will be
79 * used to handle power down at subsequent levels
80 */
Dimitris Papastamosa205a562018-03-12 14:47:09 +000081 .macro declare_cpu_ops_base _name:req, _midr:req, _resetfunc:req, \
Arvind Ram Prakashb5386662025-02-03 17:10:38 +010082 _extra1:req, _extra2:req, _extra3:req, _extra4:req, \
83 _e_handler:req, _power_down_ops:vararg
Chris Kayda043412023-02-14 11:30:04 +000084 .section .cpu_ops, "a"
Jeenu Viswambharan5dd9dbb2016-11-18 12:58:28 +000085 .align 3
Soby Mathew9b476842014-08-14 11:33:56 +010086 .type cpu_ops_\_name, %object
87 .quad \_midr
Roberto Vargasb1d27b42017-10-30 14:43:43 +000088#if defined(IMAGE_AT_EL3)
Jeenu Viswambharan5dd9dbb2016-11-18 12:58:28 +000089 .quad \_resetfunc
Soby Mathew9b476842014-08-14 11:33:56 +010090#endif
Dimitris Papastamosa205a562018-03-12 14:47:09 +000091 .quad \_extra1
Dimitris Papastamosfe007b22018-05-16 11:36:14 +010092 .quad \_extra2
Bipin Ravi9b2510b2022-02-23 23:45:50 -060093 .quad \_extra3
Arvind Ram Prakashb5386662025-02-03 17:10:38 +010094 .quad \_extra4
laurenw-arm80942622019-08-20 15:51:24 -050095 .quad \_e_handler
Masahiro Yamada3d8256b2016-12-25 23:36:24 +090096#ifdef IMAGE_BL31
Jeenu Viswambharan5dd9dbb2016-11-18 12:58:28 +000097 /* Insert list of functions */
98 fill_constants CPU_MAX_PWR_DWN_OPS, \_power_down_ops
Soby Mathewadd40352014-08-14 12:49:05 +010099#endif
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +0000100 /*
101 * It is possible (although unlikely) that a cpu may have no errata in
102 * code. In that case the start label will not be defined. The list is
103 * intended to be used in a loop, so define it as zero-length for
104 * predictable behaviour. Since this macro is always called at the end
105 * of the cpu file (after all errata have been parsed) we can be sure
106 * that we are at the end of the list. Some cpus call declare_cpu_ops
107 * twice, so only do this once.
108 */
109 .pushsection .rodata.errata_entries
110 .ifndef \_name\()_errata_list_start
111 \_name\()_errata_list_start:
112 .endif
113 .ifndef \_name\()_errata_list_end
114 \_name\()_errata_list_end:
115 .endif
116 .popsection
117
118 /* and now put them in cpu_ops */
119 .quad \_name\()_errata_list_start
120 .quad \_name\()_errata_list_end
Jeenu Viswambharan10bcd762017-01-03 11:01:51 +0000121
122#if REPORT_ERRATA
123 .ifndef \_name\()_cpu_str
124 /*
125 * Place errata reported flag, and the spinlock to arbitrate access to
126 * it in the data section.
127 */
128 .pushsection .data
129 define_asm_spinlock \_name\()_errata_lock
130 \_name\()_errata_reported:
131 .word 0
132 .popsection
133
134 /* Place CPU string in rodata */
135 .pushsection .rodata
136 \_name\()_cpu_str:
137 .asciz "\_name"
138 .popsection
139 .endif
140
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +0000141
Jeenu Viswambharan10bcd762017-01-03 11:01:51 +0000142 /*
Soby Mathew12af5ed2018-09-17 04:34:35 +0100143 * Mandatory errata status printing function for CPUs of
Jeenu Viswambharan10bcd762017-01-03 11:01:51 +0000144 * this class.
145 */
Jeenu Viswambharan10bcd762017-01-03 11:01:51 +0000146 .quad \_name\()_errata_report
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +0000147 .quad \_name\()_cpu_str
Jeenu Viswambharan10bcd762017-01-03 11:01:51 +0000148
149#ifdef IMAGE_BL31
150 /* Pointers to errata lock and reported flag */
151 .quad \_name\()_errata_lock
152 .quad \_name\()_errata_reported
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +0000153#endif /* IMAGE_BL31 */
154#endif /* REPORT_ERRATA */
Jeenu Viswambharan10bcd762017-01-03 11:01:51 +0000155
Masahiro Yamada3d8256b2016-12-25 23:36:24 +0900156#if defined(IMAGE_BL31) && CRASH_REPORTING
Soby Mathewd3f70af2014-08-14 13:36:41 +0100157 .quad \_name\()_cpu_reg_dump
158#endif
Soby Mathew9b476842014-08-14 11:33:56 +0100159 .endm
Dan Handleye2bf57f2015-04-01 17:34:24 +0100160
Dimitris Papastamosa205a562018-03-12 14:47:09 +0000161 .macro declare_cpu_ops _name:req, _midr:req, _resetfunc:req, \
162 _power_down_ops:vararg
Arvind Ram Prakashb5386662025-02-03 17:10:38 +0100163 declare_cpu_ops_base \_name, \_midr, \_resetfunc, 0, 0, 0, 0, 0, \
Dimitris Papastamosa205a562018-03-12 14:47:09 +0000164 \_power_down_ops
165 .endm
166
laurenw-arm80942622019-08-20 15:51:24 -0500167 .macro declare_cpu_ops_eh _name:req, _midr:req, _resetfunc:req, \
168 _e_handler:req, _power_down_ops:vararg
169 declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
Arvind Ram Prakashb5386662025-02-03 17:10:38 +0100170 0, 0, 0, 0, \_e_handler, \_power_down_ops
laurenw-arm80942622019-08-20 15:51:24 -0500171 .endm
172
Dimitris Papastamosfe007b22018-05-16 11:36:14 +0100173 .macro declare_cpu_ops_wa _name:req, _midr:req, \
174 _resetfunc:req, _extra1:req, _extra2:req, \
Bipin Ravi9b2510b2022-02-23 23:45:50 -0600175 _extra3:req, _power_down_ops:vararg
Dimitris Papastamosa205a562018-03-12 14:47:09 +0000176 declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
Arvind Ram Prakashb5386662025-02-03 17:10:38 +0100177 \_extra1, \_extra2, \_extra3, 0, 0, \_power_down_ops
178 .endm
179
180 .macro declare_cpu_ops_wa_4 _name:req, _midr:req, \
181 _resetfunc:req, _extra1:req, _extra2:req, \
182 _extra3:req, _extra4:req, _power_down_ops:vararg
183 declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
184 \_extra1, \_extra2, \_extra3, \_extra4, 0, \_power_down_ops
Dimitris Papastamosa205a562018-03-12 14:47:09 +0000185 .endm
186
Dimitris Papastamos3991a6a2018-03-12 13:27:02 +0000187 /*
188 * This macro is used on some CPUs to detect if they are vulnerable
189 * to CVE-2017-5715.
190 */
191 .macro cpu_check_csv2 _reg _label
192 mrs \_reg, id_aa64pfr0_el1
193 ubfx \_reg, \_reg, #ID_AA64PFR0_CSV2_SHIFT, #ID_AA64PFR0_CSV2_LENGTH
194 /*
Antonio Nino Diazff6f62e2019-02-12 11:25:02 +0000195 * If the field equals 1, branch targets trained in one context cannot
196 * affect speculative execution in a different context.
197 *
198 * If the field equals 2, it means that the system is also aware of
199 * SCXTNUM_ELx register contexts. We aren't using them in the TF, so we
200 * expect users of the registers to do the right thing.
201 *
202 * Only apply mitigations if the value of this field is 0.
Dimitris Papastamos3991a6a2018-03-12 13:27:02 +0000203 */
Antonio Nino Diazff6f62e2019-02-12 11:25:02 +0000204#if ENABLE_ASSERTIONS
205 cmp \_reg, #3 /* Only values 0 to 2 are expected */
206 ASM_ASSERT(lo)
207#endif
208
209 cmp \_reg, #0
210 bne \_label
Dimitris Papastamos3991a6a2018-03-12 13:27:02 +0000211 .endm
Deepak Pandeyda3b0382018-10-11 13:44:43 +0530212
213 /*
214 * Helper macro that reads the part number of the current
215 * CPU and jumps to the given label if it matches the CPU
216 * MIDR provided.
217 *
218 * Clobbers x0.
219 */
220 .macro jump_if_cpu_midr _cpu_midr, _label
221 mrs x0, midr_el1
222 ubfx x0, x0, MIDR_PN_SHIFT, #12
223 cmp w0, #((\_cpu_midr >> MIDR_PN_SHIFT) & MIDR_PN_MASK)
224 b.eq \_label
225 .endm
Antonio Nino Diazc3cf06f2018-11-08 10:20:19 +0000226
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +0000227
228/*
229 * Workaround wrappers for errata that apply at reset or runtime. Reset errata
230 * will be applied automatically
231 *
232 * _cpu:
233 * Name of cpu as given to declare_cpu_ops
234 *
235 * _cve:
236 * Whether erratum is a CVE. CVE year if yes, 0 otherwise
237 *
238 * _id:
239 * Erratum or CVE number. Please combine with previous field with ERRATUM
240 * or CVE macros
241 *
242 * _chosen:
243 * Compile time flag on whether the erratum is included
244 *
245 * _apply_at_reset:
246 * Whether the erratum should be automatically applied at reset
247 */
248.macro add_erratum_entry _cpu:req, _cve:req, _id:req, _chosen:req, _apply_at_reset:req
249 .pushsection .rodata.errata_entries
250 .align 3
251 .ifndef \_cpu\()_errata_list_start
252 \_cpu\()_errata_list_start:
253 .endif
254
255 /* check if unused and compile out if no references */
256 .if \_apply_at_reset && \_chosen
257 .quad erratum_\_cpu\()_\_id\()_wa
258 .else
259 .quad 0
260 .endif
261 /* TODO(errata ABI): this prevents all checker functions from
262 * being optimised away. Can be done away with unless the ABI
263 * needs them */
264 .quad check_erratum_\_cpu\()_\_id
265 /* Will fit CVEs with up to 10 character in the ID field */
266 .word \_id
267 .hword \_cve
268 .byte \_chosen
269 /* TODO(errata ABI): mitigated field for known but unmitigated
270 * errata */
271 .byte 0x1
272 .popsection
273.endm
274
275.macro _workaround_start _cpu:req, _cve:req, _id:req, _chosen:req, _apply_at_reset:req
276 add_erratum_entry \_cpu, \_cve, \_id, \_chosen, \_apply_at_reset
277
278 func erratum_\_cpu\()_\_id\()_wa
279 mov x8, x30
280
281 /* save rev_var for workarounds that might need it but don't
282 * restore to x0 because few will care */
283 mov x7, x0
284 bl check_erratum_\_cpu\()_\_id
285 cbz x0, erratum_\_cpu\()_\_id\()_skip
286.endm
287
288.macro _workaround_end _cpu:req, _id:req
289 erratum_\_cpu\()_\_id\()_skip:
290 ret x8
291 endfunc erratum_\_cpu\()_\_id\()_wa
292.endm
293
294/*******************************************************************************
295 * Errata workaround wrappers
296 ******************************************************************************/
297/*
298 * Workaround wrappers for errata that apply at reset or runtime. Reset errata
299 * will be applied automatically
300 *
301 * _cpu:
302 * Name of cpu as given to declare_cpu_ops
303 *
304 * _cve:
305 * Whether erratum is a CVE. CVE year if yes, 0 otherwise
306 *
307 * _id:
308 * Erratum or CVE number. Please combine with previous field with ERRATUM
309 * or CVE macros
310 *
311 * _chosen:
312 * Compile time flag on whether the erratum is included
313 *
314 * in body:
315 * clobber x0 to x7 (please only use those)
316 * argument x7 - cpu_rev_var
317 *
318 * _wa clobbers: x0-x8 (PCS compliant)
319 */
320.macro workaround_reset_start _cpu:req, _cve:req, _id:req, _chosen:req
321 _workaround_start \_cpu, \_cve, \_id, \_chosen, 1
322.endm
323
324/*
325 * See `workaround_reset_start` for usage info. Additional arguments:
326 *
327 * _midr:
328 * Check if CPU's MIDR matches the CPU it's meant for. Must be specified
329 * for errata applied in generic code
330 */
331.macro workaround_runtime_start _cpu:req, _cve:req, _id:req, _chosen:req, _midr
332 /*
333 * Let errata specify if they need MIDR checking. Sadly, storing the
334 * MIDR in an .equ to retrieve automatically blows up as it stores some
335 * brackets in the symbol
336 */
337 .ifnb \_midr
338 jump_if_cpu_midr \_midr, 1f
339 b erratum_\_cpu\()_\_id\()_skip
340
341 1:
342 .endif
343 _workaround_start \_cpu, \_cve, \_id, \_chosen, 0
344.endm
345
346/*
347 * Usage and arguments identical to `workaround_reset_start`. The _cve argument
348 * is kept here so the same #define can be used as that macro
349 */
350.macro workaround_reset_end _cpu:req, _cve:req, _id:req
351 _workaround_end \_cpu, \_id
352.endm
353
354/*
355 * See `workaround_reset_start` for usage info. The _cve argument is kept here
356 * so the same #define can be used as that macro. Additional arguments:
357 *
358 * _no_isb:
359 * Optionally do not include the trailing isb. Please disable with the
360 * NO_ISB macro
361 */
362.macro workaround_runtime_end _cpu:req, _cve:req, _id:req, _no_isb
363 /*
364 * Runtime errata do not have a reset function to call the isb for them
365 * and missing the isb could be very problematic. It is also likely as
366 * they tend to be scattered in generic code.
367 */
368 .ifb \_no_isb
369 isb
370 .endif
371 _workaround_end \_cpu, \_id
372.endm
373
374/*******************************************************************************
375 * Errata workaround helpers
376 ******************************************************************************/
377/*
378 * Set a bit in a system register. Can set multiple bits but is limited by the
379 * way the ORR instruction encodes them.
380 *
381 * _reg:
382 * Register to write to
383 *
384 * _bit:
385 * Bit to set. Please use a descriptive #define
386 *
387 * _assert:
388 * Optionally whether to read back and assert that the bit has been
389 * written. Please disable with NO_ASSERT macro
390 *
391 * clobbers: x1
392 */
393.macro sysreg_bit_set _reg:req, _bit:req, _assert=1
394 mrs x1, \_reg
395 orr x1, x1, #\_bit
396 msr \_reg, x1
397.endm
398
399/*
Boyan Karatotev94a75ad2023-04-04 11:29:00 +0100400 * Clear a bit in a system register. Can clear multiple bits but is limited by
401 * the way the BIC instrucion encodes them.
402 *
403 * see sysreg_bit_set for usage
404 */
405.macro sysreg_bit_clear _reg:req, _bit:req
406 mrs x1, \_reg
407 bic x1, x1, #\_bit
408 msr \_reg, x1
409.endm
410
411.macro override_vector_table _table:req
412 adr x1, \_table
413 msr vbar_el3, x1
414.endm
415
416/*
Jayanth Dodderi Chidanand445f7b52023-06-19 16:20:02 +0100417 * BFI : Inserts bitfield into a system register.
418 *
419 * BFI{cond} Rd, Rn, #lsb, #width
420 */
421.macro sysreg_bitfield_insert _reg:req, _src:req, _lsb:req, _width:req
422 /* Source value for BFI */
423 mov x1, #\_src
424 mrs x0, \_reg
425 bfi x0, x1, #\_lsb, #\_width
426 msr \_reg, x0
427.endm
428
429/*
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +0000430 * Apply erratum
431 *
432 * _cpu:
433 * Name of cpu as given to declare_cpu_ops
434 *
435 * _cve:
436 * Whether erratum is a CVE. CVE year if yes, 0 otherwise
437 *
438 * _id:
439 * Erratum or CVE number. Please combine with previous field with ERRATUM
440 * or CVE macros
441 *
442 * _chosen:
443 * Compile time flag on whether the erratum is included
444 *
Harrison Mutai4d22b0e2023-06-26 16:25:21 +0100445 * _get_rev:
446 * Optional parameter that determines whether to insert a call to the CPU revision fetching
Boyan Karatotev0eed05e2024-09-26 17:09:53 +0100447 * procedure. Stores the result of this in the temporary register x10 to allow for chaining
Harrison Mutai4d22b0e2023-06-26 16:25:21 +0100448 *
449 * clobbers: x0-x10 (PCS compliant)
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +0000450 */
Harrison Mutai4d22b0e2023-06-26 16:25:21 +0100451.macro apply_erratum _cpu:req, _cve:req, _id:req, _chosen:req, _get_rev=GET_CPU_REV
Boyan Karatotevc99a2132024-09-26 17:00:09 +0100452 .if (\_chosen && \_get_rev)
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +0000453 mov x9, x30
454 bl cpu_get_rev_var
Harrison Mutai4d22b0e2023-06-26 16:25:21 +0100455 mov x10, x0
456 .elseif (\_chosen)
457 mov x9, x30
458 mov x0, x10
459 .endif
460
461 .if \_chosen
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +0000462 bl erratum_\_cpu\()_\_id\()_wa
463 mov x30, x9
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +0000464 .endif
465.endm
466
467/*
468 * Helpers to select which revisions errata apply to. Don't leave a link
469 * register as the cpu_rev_var_*** will call the ret and we can save on one.
470 *
471 * _cpu:
472 * Name of cpu as given to declare_cpu_ops
473 *
474 * _cve:
475 * Whether erratum is a CVE. CVE year if yes, 0 otherwise
476 *
477 * _id:
478 * Erratum or CVE number. Please combine with previous field with ERRATUM
479 * or CVE macros
480 *
481 * _rev_num:
482 * Revision to apply to
483 *
484 * in body:
485 * clobber: x0 to x4
486 * argument: x0 - cpu_rev_var
487 */
488.macro check_erratum_ls _cpu:req, _cve:req, _id:req, _rev_num:req
489 func check_erratum_\_cpu\()_\_id
490 mov x1, #\_rev_num
491 b cpu_rev_var_ls
492 endfunc check_erratum_\_cpu\()_\_id
493.endm
494
495.macro check_erratum_hs _cpu:req, _cve:req, _id:req, _rev_num:req
496 func check_erratum_\_cpu\()_\_id
497 mov x1, #\_rev_num
498 b cpu_rev_var_hs
499 endfunc check_erratum_\_cpu\()_\_id
500.endm
501
502.macro check_erratum_range _cpu:req, _cve:req, _id:req, _rev_num_lo:req, _rev_num_hi:req
503 func check_erratum_\_cpu\()_\_id
504 mov x1, #\_rev_num_lo
505 mov x2, #\_rev_num_hi
506 b cpu_rev_var_range
507 endfunc check_erratum_\_cpu\()_\_id
508.endm
509
Boyan Karatotev94a75ad2023-04-04 11:29:00 +0100510.macro check_erratum_chosen _cpu:req, _cve:req, _id:req, _chosen:req
511 func check_erratum_\_cpu\()_\_id
512 .if \_chosen
513 mov x0, #ERRATA_APPLIES
514 .else
515 mov x0, #ERRATA_MISSING
516 .endif
517 ret
518 endfunc check_erratum_\_cpu\()_\_id
519.endm
520
521/* provide a shorthand for the name format for annoying errata */
522.macro check_erratum_custom_start _cpu:req, _cve:req, _id:req
523 func check_erratum_\_cpu\()_\_id
524.endm
525
526.macro check_erratum_custom_end _cpu:req, _cve:req, _id:req
527 endfunc check_erratum_\_cpu\()_\_id
528.endm
529
530
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +0000531/*******************************************************************************
532 * CPU reset function wrapper
533 ******************************************************************************/
534
535/*
536 * Wrapper to automatically apply all reset-time errata. Will end with an isb.
537 *
538 * _cpu:
539 * Name of cpu as given to declare_cpu_ops
540 *
541 * in body:
542 * clobber x8 to x14
543 * argument x14 - cpu_rev_var
544 */
545.macro cpu_reset_func_start _cpu:req
546 func \_cpu\()_reset_func
547 mov x15, x30
548 bl cpu_get_rev_var
549 mov x14, x0
550
551 /* short circuit the location to avoid searching the list */
552 adrp x12, \_cpu\()_errata_list_start
553 add x12, x12, :lo12:\_cpu\()_errata_list_start
554 adrp x13, \_cpu\()_errata_list_end
555 add x13, x13, :lo12:\_cpu\()_errata_list_end
556
557 errata_begin:
558 /* if head catches up with end of list, exit */
559 cmp x12, x13
560 b.eq errata_end
561
562 ldr x10, [x12, #ERRATUM_WA_FUNC]
563 /* TODO(errata ABI): check mitigated and checker function fields
564 * for 0 */
565 ldrb w11, [x12, #ERRATUM_CHOSEN]
566
567 /* skip if not chosen */
568 cbz x11, 1f
569 /* skip if runtime erratum */
570 cbz x10, 1f
571
572 /* put cpu revision in x0 and call workaround */
573 mov x0, x14
574 blr x10
575 1:
576 add x12, x12, #ERRATUM_ENTRY_SIZE
577 b errata_begin
578 errata_end:
579.endm
580
581.macro cpu_reset_func_end _cpu:req
582 isb
583 ret x15
584 endfunc \_cpu\()_reset_func
585.endm
Boyan Karatotev4f748cc2023-01-27 09:38:15 +0000586
587/*
588 * Maintain compatibility with the old scheme of each cpu has its own reporting.
589 * TODO remove entirely once all cpus have been converted. This includes the
590 * cpu_ops entry, as print_errata_status can call this directly for all cpus
591 */
592.macro errata_report_shim _cpu:req
593 #if REPORT_ERRATA
594 func \_cpu\()_errata_report
595 /* normal stack frame for pretty debugging */
596 stp x29, x30, [sp, #-16]!
597 mov x29, sp
598
599 bl generic_errata_report
600
601 ldp x29, x30, [sp], #16
602 ret
603 endfunc \_cpu\()_errata_report
604 #endif
605.endm
Antonio Nino Diazc3cf06f2018-11-08 10:20:19 +0000606#endif /* CPU_MACROS_S */