blob: 9e08348c1d262fc21880f61695368f8e946c0cf6 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Jagdish Gediyaad8b5142024-07-23 12:54:28 +01002 * Copyright (c) 2014-2024, Arm Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
dp-arm82cb2c12017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Achin Gupta4f6ad662013-10-25 09:08:21 +01005 */
Antonio Nino Diazc3cf06f2018-11-08 10:20:19 +00006#ifndef CPU_MACROS_S
7#define CPU_MACROS_S
Achin Gupta4f6ad662013-10-25 09:08:21 +01008
Antonio Nino Diazff6f62e2019-02-12 11:25:02 +00009#include <assert_macros.S>
Boyan Karatotev007433d2023-01-25 16:55:18 +000010#include <lib/cpus/cpu_ops.h>
Boyan Karatotev6bb96fa2023-01-27 09:37:07 +000011#include <lib/cpus/errata.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010012
Soby Mathew9b476842014-08-14 11:33:56 +010013 /*
Jeenu Viswambharan5dd9dbb2016-11-18 12:58:28 +000014 * Write given expressions as quad words
15 *
16 * _count:
17 * Write at least _count quad words. If the given number of
18 * expressions is less than _count, repeat the last expression to
19 * fill _count quad words in total
20 * _rest:
21 * Optional list of expressions. _this is for parameter extraction
22 * only, and has no significance to the caller
23 *
24 * Invoked as:
25 * fill_constants 2, foo, bar, blah, ...
Soby Mathew9b476842014-08-14 11:33:56 +010026 */
Jeenu Viswambharan5dd9dbb2016-11-18 12:58:28 +000027 .macro fill_constants _count:req, _this, _rest:vararg
28 .ifgt \_count
29 /* Write the current expression */
30 .ifb \_this
31 .error "Nothing to fill"
32 .endif
33 .quad \_this
34
35 /* Invoke recursively for remaining expressions */
36 .ifnb \_rest
37 fill_constants \_count-1, \_rest
38 .else
39 fill_constants \_count-1, \_this
40 .endif
41 .endif
42 .endm
43
44 /*
45 * Declare CPU operations
46 *
47 * _name:
48 * Name of the CPU for which operations are being specified
49 * _midr:
50 * Numeric value expected to read from CPU's MIDR
51 * _resetfunc:
52 * Reset function for the CPU. If there's no CPU reset function,
53 * specify CPU_NO_RESET_FUNC
Dimitris Papastamosa205a562018-03-12 14:47:09 +000054 * _extra1:
55 * This is a placeholder for future per CPU operations. Currently,
56 * some CPUs use this entry to set a test function to determine if
57 * the workaround for CVE-2017-5715 needs to be applied or not.
Dimitris Papastamosfe007b22018-05-16 11:36:14 +010058 * _extra2:
Bipin Ravi9b2510b2022-02-23 23:45:50 -060059 * This is a placeholder for future per CPU operations. Currently
Dimitris Papastamosfe007b22018-05-16 11:36:14 +010060 * some CPUs use this entry to set a function to disable the
61 * workaround for CVE-2018-3639.
Bipin Ravi9b2510b2022-02-23 23:45:50 -060062 * _extra3:
63 * This is a placeholder for future per CPU operations. Currently,
64 * some CPUs use this entry to set a test function to determine if
65 * the workaround for CVE-2022-23960 needs to be applied or not.
laurenw-arm80942622019-08-20 15:51:24 -050066 * _e_handler:
67 * This is a placeholder for future per CPU exception handlers.
Jeenu Viswambharan5dd9dbb2016-11-18 12:58:28 +000068 * _power_down_ops:
69 * Comma-separated list of functions to perform power-down
70 * operatios on the CPU. At least one, and up to
71 * CPU_MAX_PWR_DWN_OPS number of functions may be specified.
72 * Starting at power level 0, these functions shall handle power
73 * down at subsequent power levels. If there aren't exactly
74 * CPU_MAX_PWR_DWN_OPS functions, the last specified one will be
75 * used to handle power down at subsequent levels
76 */
Dimitris Papastamosa205a562018-03-12 14:47:09 +000077 .macro declare_cpu_ops_base _name:req, _midr:req, _resetfunc:req, \
Bipin Ravi9b2510b2022-02-23 23:45:50 -060078 _extra1:req, _extra2:req, _extra3:req, _e_handler:req, _power_down_ops:vararg
Chris Kayda043412023-02-14 11:30:04 +000079 .section .cpu_ops, "a"
Jeenu Viswambharan5dd9dbb2016-11-18 12:58:28 +000080 .align 3
Soby Mathew9b476842014-08-14 11:33:56 +010081 .type cpu_ops_\_name, %object
82 .quad \_midr
Roberto Vargasb1d27b42017-10-30 14:43:43 +000083#if defined(IMAGE_AT_EL3)
Jeenu Viswambharan5dd9dbb2016-11-18 12:58:28 +000084 .quad \_resetfunc
Soby Mathew9b476842014-08-14 11:33:56 +010085#endif
Dimitris Papastamosa205a562018-03-12 14:47:09 +000086 .quad \_extra1
Dimitris Papastamosfe007b22018-05-16 11:36:14 +010087 .quad \_extra2
Bipin Ravi9b2510b2022-02-23 23:45:50 -060088 .quad \_extra3
laurenw-arm80942622019-08-20 15:51:24 -050089 .quad \_e_handler
Masahiro Yamada3d8256b2016-12-25 23:36:24 +090090#ifdef IMAGE_BL31
Jeenu Viswambharan5dd9dbb2016-11-18 12:58:28 +000091 /* Insert list of functions */
92 fill_constants CPU_MAX_PWR_DWN_OPS, \_power_down_ops
Soby Mathewadd40352014-08-14 12:49:05 +010093#endif
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +000094 /*
95 * It is possible (although unlikely) that a cpu may have no errata in
96 * code. In that case the start label will not be defined. The list is
97 * intended to be used in a loop, so define it as zero-length for
98 * predictable behaviour. Since this macro is always called at the end
99 * of the cpu file (after all errata have been parsed) we can be sure
100 * that we are at the end of the list. Some cpus call declare_cpu_ops
101 * twice, so only do this once.
102 */
103 .pushsection .rodata.errata_entries
104 .ifndef \_name\()_errata_list_start
105 \_name\()_errata_list_start:
106 .endif
107 .ifndef \_name\()_errata_list_end
108 \_name\()_errata_list_end:
109 .endif
110 .popsection
111
112 /* and now put them in cpu_ops */
113 .quad \_name\()_errata_list_start
114 .quad \_name\()_errata_list_end
Jeenu Viswambharan10bcd762017-01-03 11:01:51 +0000115
116#if REPORT_ERRATA
117 .ifndef \_name\()_cpu_str
118 /*
119 * Place errata reported flag, and the spinlock to arbitrate access to
120 * it in the data section.
121 */
122 .pushsection .data
123 define_asm_spinlock \_name\()_errata_lock
124 \_name\()_errata_reported:
125 .word 0
126 .popsection
127
128 /* Place CPU string in rodata */
129 .pushsection .rodata
130 \_name\()_cpu_str:
131 .asciz "\_name"
132 .popsection
133 .endif
134
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +0000135
Jeenu Viswambharan10bcd762017-01-03 11:01:51 +0000136 /*
Soby Mathew12af5ed2018-09-17 04:34:35 +0100137 * Mandatory errata status printing function for CPUs of
Jeenu Viswambharan10bcd762017-01-03 11:01:51 +0000138 * this class.
139 */
Jeenu Viswambharan10bcd762017-01-03 11:01:51 +0000140 .quad \_name\()_errata_report
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +0000141 .quad \_name\()_cpu_str
Jeenu Viswambharan10bcd762017-01-03 11:01:51 +0000142
143#ifdef IMAGE_BL31
144 /* Pointers to errata lock and reported flag */
145 .quad \_name\()_errata_lock
146 .quad \_name\()_errata_reported
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +0000147#endif /* IMAGE_BL31 */
148#endif /* REPORT_ERRATA */
Jeenu Viswambharan10bcd762017-01-03 11:01:51 +0000149
Masahiro Yamada3d8256b2016-12-25 23:36:24 +0900150#if defined(IMAGE_BL31) && CRASH_REPORTING
Soby Mathewd3f70af2014-08-14 13:36:41 +0100151 .quad \_name\()_cpu_reg_dump
152#endif
Soby Mathew9b476842014-08-14 11:33:56 +0100153 .endm
Dan Handleye2bf57f2015-04-01 17:34:24 +0100154
Dimitris Papastamosa205a562018-03-12 14:47:09 +0000155 .macro declare_cpu_ops _name:req, _midr:req, _resetfunc:req, \
156 _power_down_ops:vararg
Bipin Ravi9b2510b2022-02-23 23:45:50 -0600157 declare_cpu_ops_base \_name, \_midr, \_resetfunc, 0, 0, 0, 0, \
Dimitris Papastamosa205a562018-03-12 14:47:09 +0000158 \_power_down_ops
159 .endm
160
laurenw-arm80942622019-08-20 15:51:24 -0500161 .macro declare_cpu_ops_eh _name:req, _midr:req, _resetfunc:req, \
162 _e_handler:req, _power_down_ops:vararg
163 declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
Bipin Ravi9b2510b2022-02-23 23:45:50 -0600164 0, 0, 0, \_e_handler, \_power_down_ops
laurenw-arm80942622019-08-20 15:51:24 -0500165 .endm
166
Dimitris Papastamosfe007b22018-05-16 11:36:14 +0100167 .macro declare_cpu_ops_wa _name:req, _midr:req, \
168 _resetfunc:req, _extra1:req, _extra2:req, \
Bipin Ravi9b2510b2022-02-23 23:45:50 -0600169 _extra3:req, _power_down_ops:vararg
Dimitris Papastamosa205a562018-03-12 14:47:09 +0000170 declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
Bipin Ravi9b2510b2022-02-23 23:45:50 -0600171 \_extra1, \_extra2, \_extra3, 0, \_power_down_ops
Dimitris Papastamosa205a562018-03-12 14:47:09 +0000172 .endm
173
Dimitris Papastamos3991a6a2018-03-12 13:27:02 +0000174 /*
175 * This macro is used on some CPUs to detect if they are vulnerable
176 * to CVE-2017-5715.
177 */
178 .macro cpu_check_csv2 _reg _label
179 mrs \_reg, id_aa64pfr0_el1
180 ubfx \_reg, \_reg, #ID_AA64PFR0_CSV2_SHIFT, #ID_AA64PFR0_CSV2_LENGTH
181 /*
Antonio Nino Diazff6f62e2019-02-12 11:25:02 +0000182 * If the field equals 1, branch targets trained in one context cannot
183 * affect speculative execution in a different context.
184 *
185 * If the field equals 2, it means that the system is also aware of
186 * SCXTNUM_ELx register contexts. We aren't using them in the TF, so we
187 * expect users of the registers to do the right thing.
188 *
189 * Only apply mitigations if the value of this field is 0.
Dimitris Papastamos3991a6a2018-03-12 13:27:02 +0000190 */
Antonio Nino Diazff6f62e2019-02-12 11:25:02 +0000191#if ENABLE_ASSERTIONS
192 cmp \_reg, #3 /* Only values 0 to 2 are expected */
193 ASM_ASSERT(lo)
194#endif
195
196 cmp \_reg, #0
197 bne \_label
Dimitris Papastamos3991a6a2018-03-12 13:27:02 +0000198 .endm
Deepak Pandeyda3b0382018-10-11 13:44:43 +0530199
200 /*
201 * Helper macro that reads the part number of the current
202 * CPU and jumps to the given label if it matches the CPU
203 * MIDR provided.
204 *
205 * Clobbers x0.
206 */
207 .macro jump_if_cpu_midr _cpu_midr, _label
208 mrs x0, midr_el1
209 ubfx x0, x0, MIDR_PN_SHIFT, #12
210 cmp w0, #((\_cpu_midr >> MIDR_PN_SHIFT) & MIDR_PN_MASK)
211 b.eq \_label
212 .endm
Antonio Nino Diazc3cf06f2018-11-08 10:20:19 +0000213
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +0000214
215/*
216 * Workaround wrappers for errata that apply at reset or runtime. Reset errata
217 * will be applied automatically
218 *
219 * _cpu:
220 * Name of cpu as given to declare_cpu_ops
221 *
222 * _cve:
223 * Whether erratum is a CVE. CVE year if yes, 0 otherwise
224 *
225 * _id:
226 * Erratum or CVE number. Please combine with previous field with ERRATUM
227 * or CVE macros
228 *
229 * _chosen:
230 * Compile time flag on whether the erratum is included
231 *
232 * _apply_at_reset:
233 * Whether the erratum should be automatically applied at reset
234 */
235.macro add_erratum_entry _cpu:req, _cve:req, _id:req, _chosen:req, _apply_at_reset:req
236 .pushsection .rodata.errata_entries
237 .align 3
238 .ifndef \_cpu\()_errata_list_start
239 \_cpu\()_errata_list_start:
240 .endif
241
242 /* check if unused and compile out if no references */
243 .if \_apply_at_reset && \_chosen
244 .quad erratum_\_cpu\()_\_id\()_wa
245 .else
246 .quad 0
247 .endif
248 /* TODO(errata ABI): this prevents all checker functions from
249 * being optimised away. Can be done away with unless the ABI
250 * needs them */
251 .quad check_erratum_\_cpu\()_\_id
252 /* Will fit CVEs with up to 10 character in the ID field */
253 .word \_id
254 .hword \_cve
255 .byte \_chosen
256 /* TODO(errata ABI): mitigated field for known but unmitigated
257 * errata */
258 .byte 0x1
259 .popsection
260.endm
261
262.macro _workaround_start _cpu:req, _cve:req, _id:req, _chosen:req, _apply_at_reset:req
263 add_erratum_entry \_cpu, \_cve, \_id, \_chosen, \_apply_at_reset
264
265 func erratum_\_cpu\()_\_id\()_wa
266 mov x8, x30
267
268 /* save rev_var for workarounds that might need it but don't
269 * restore to x0 because few will care */
270 mov x7, x0
271 bl check_erratum_\_cpu\()_\_id
272 cbz x0, erratum_\_cpu\()_\_id\()_skip
273.endm
274
275.macro _workaround_end _cpu:req, _id:req
276 erratum_\_cpu\()_\_id\()_skip:
277 ret x8
278 endfunc erratum_\_cpu\()_\_id\()_wa
279.endm
280
281/*******************************************************************************
282 * Errata workaround wrappers
283 ******************************************************************************/
284/*
285 * Workaround wrappers for errata that apply at reset or runtime. Reset errata
286 * will be applied automatically
287 *
288 * _cpu:
289 * Name of cpu as given to declare_cpu_ops
290 *
291 * _cve:
292 * Whether erratum is a CVE. CVE year if yes, 0 otherwise
293 *
294 * _id:
295 * Erratum or CVE number. Please combine with previous field with ERRATUM
296 * or CVE macros
297 *
298 * _chosen:
299 * Compile time flag on whether the erratum is included
300 *
301 * in body:
302 * clobber x0 to x7 (please only use those)
303 * argument x7 - cpu_rev_var
304 *
305 * _wa clobbers: x0-x8 (PCS compliant)
306 */
307.macro workaround_reset_start _cpu:req, _cve:req, _id:req, _chosen:req
308 _workaround_start \_cpu, \_cve, \_id, \_chosen, 1
309.endm
310
311/*
312 * See `workaround_reset_start` for usage info. Additional arguments:
313 *
314 * _midr:
315 * Check if CPU's MIDR matches the CPU it's meant for. Must be specified
316 * for errata applied in generic code
317 */
318.macro workaround_runtime_start _cpu:req, _cve:req, _id:req, _chosen:req, _midr
319 /*
320 * Let errata specify if they need MIDR checking. Sadly, storing the
321 * MIDR in an .equ to retrieve automatically blows up as it stores some
322 * brackets in the symbol
323 */
324 .ifnb \_midr
325 jump_if_cpu_midr \_midr, 1f
326 b erratum_\_cpu\()_\_id\()_skip
327
328 1:
329 .endif
330 _workaround_start \_cpu, \_cve, \_id, \_chosen, 0
331.endm
332
333/*
334 * Usage and arguments identical to `workaround_reset_start`. The _cve argument
335 * is kept here so the same #define can be used as that macro
336 */
337.macro workaround_reset_end _cpu:req, _cve:req, _id:req
338 _workaround_end \_cpu, \_id
339.endm
340
341/*
342 * See `workaround_reset_start` for usage info. The _cve argument is kept here
343 * so the same #define can be used as that macro. Additional arguments:
344 *
345 * _no_isb:
346 * Optionally do not include the trailing isb. Please disable with the
347 * NO_ISB macro
348 */
349.macro workaround_runtime_end _cpu:req, _cve:req, _id:req, _no_isb
350 /*
351 * Runtime errata do not have a reset function to call the isb for them
352 * and missing the isb could be very problematic. It is also likely as
353 * they tend to be scattered in generic code.
354 */
355 .ifb \_no_isb
356 isb
357 .endif
358 _workaround_end \_cpu, \_id
359.endm
360
361/*******************************************************************************
362 * Errata workaround helpers
363 ******************************************************************************/
364/*
365 * Set a bit in a system register. Can set multiple bits but is limited by the
366 * way the ORR instruction encodes them.
367 *
368 * _reg:
369 * Register to write to
370 *
371 * _bit:
372 * Bit to set. Please use a descriptive #define
373 *
374 * _assert:
375 * Optionally whether to read back and assert that the bit has been
376 * written. Please disable with NO_ASSERT macro
377 *
378 * clobbers: x1
379 */
380.macro sysreg_bit_set _reg:req, _bit:req, _assert=1
381 mrs x1, \_reg
382 orr x1, x1, #\_bit
383 msr \_reg, x1
384.endm
385
386/*
Boyan Karatotev94a75ad2023-04-04 11:29:00 +0100387 * Clear a bit in a system register. Can clear multiple bits but is limited by
388 * the way the BIC instrucion encodes them.
389 *
390 * see sysreg_bit_set for usage
391 */
392.macro sysreg_bit_clear _reg:req, _bit:req
393 mrs x1, \_reg
394 bic x1, x1, #\_bit
395 msr \_reg, x1
396.endm
397
398.macro override_vector_table _table:req
399 adr x1, \_table
400 msr vbar_el3, x1
401.endm
402
403/*
Jayanth Dodderi Chidanand445f7b52023-06-19 16:20:02 +0100404 * BFI : Inserts bitfield into a system register.
405 *
406 * BFI{cond} Rd, Rn, #lsb, #width
407 */
408.macro sysreg_bitfield_insert _reg:req, _src:req, _lsb:req, _width:req
409 /* Source value for BFI */
410 mov x1, #\_src
411 mrs x0, \_reg
412 bfi x0, x1, #\_lsb, #\_width
413 msr \_reg, x0
414.endm
415
Jagdish Gediyaad8b5142024-07-23 12:54:28 +0100416.macro sysreg_bitfield_insert_from_gpr _reg:req, _gpr:req, _lsb:req, _width:req
417 /* Source value in register for BFI */
418 mov x1, \_gpr
419 mrs x0, \_reg
420 bfi x0, x1, #\_lsb, #\_width
421 msr \_reg, x0
422.endm
423
Jayanth Dodderi Chidanand445f7b52023-06-19 16:20:02 +0100424/*
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +0000425 * Apply erratum
426 *
427 * _cpu:
428 * Name of cpu as given to declare_cpu_ops
429 *
430 * _cve:
431 * Whether erratum is a CVE. CVE year if yes, 0 otherwise
432 *
433 * _id:
434 * Erratum or CVE number. Please combine with previous field with ERRATUM
435 * or CVE macros
436 *
437 * _chosen:
438 * Compile time flag on whether the erratum is included
439 *
Harrison Mutai4d22b0e2023-06-26 16:25:21 +0100440 * _get_rev:
441 * Optional parameter that determines whether to insert a call to the CPU revision fetching
442 * procedure. Stores the result of this in the temporary register x10.
443 *
444 * clobbers: x0-x10 (PCS compliant)
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +0000445 */
Harrison Mutai4d22b0e2023-06-26 16:25:21 +0100446.macro apply_erratum _cpu:req, _cve:req, _id:req, _chosen:req, _get_rev=GET_CPU_REV
447 .if (\_chosen & \_get_rev)
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +0000448 mov x9, x30
449 bl cpu_get_rev_var
Harrison Mutai4d22b0e2023-06-26 16:25:21 +0100450 mov x10, x0
451 .elseif (\_chosen)
452 mov x9, x30
453 mov x0, x10
454 .endif
455
456 .if \_chosen
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +0000457 bl erratum_\_cpu\()_\_id\()_wa
458 mov x30, x9
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +0000459 .endif
460.endm
461
462/*
463 * Helpers to select which revisions errata apply to. Don't leave a link
464 * register as the cpu_rev_var_*** will call the ret and we can save on one.
465 *
466 * _cpu:
467 * Name of cpu as given to declare_cpu_ops
468 *
469 * _cve:
470 * Whether erratum is a CVE. CVE year if yes, 0 otherwise
471 *
472 * _id:
473 * Erratum or CVE number. Please combine with previous field with ERRATUM
474 * or CVE macros
475 *
476 * _rev_num:
477 * Revision to apply to
478 *
479 * in body:
480 * clobber: x0 to x4
481 * argument: x0 - cpu_rev_var
482 */
483.macro check_erratum_ls _cpu:req, _cve:req, _id:req, _rev_num:req
484 func check_erratum_\_cpu\()_\_id
485 mov x1, #\_rev_num
486 b cpu_rev_var_ls
487 endfunc check_erratum_\_cpu\()_\_id
488.endm
489
490.macro check_erratum_hs _cpu:req, _cve:req, _id:req, _rev_num:req
491 func check_erratum_\_cpu\()_\_id
492 mov x1, #\_rev_num
493 b cpu_rev_var_hs
494 endfunc check_erratum_\_cpu\()_\_id
495.endm
496
497.macro check_erratum_range _cpu:req, _cve:req, _id:req, _rev_num_lo:req, _rev_num_hi:req
498 func check_erratum_\_cpu\()_\_id
499 mov x1, #\_rev_num_lo
500 mov x2, #\_rev_num_hi
501 b cpu_rev_var_range
502 endfunc check_erratum_\_cpu\()_\_id
503.endm
504
Boyan Karatotev94a75ad2023-04-04 11:29:00 +0100505.macro check_erratum_chosen _cpu:req, _cve:req, _id:req, _chosen:req
506 func check_erratum_\_cpu\()_\_id
507 .if \_chosen
508 mov x0, #ERRATA_APPLIES
509 .else
510 mov x0, #ERRATA_MISSING
511 .endif
512 ret
513 endfunc check_erratum_\_cpu\()_\_id
514.endm
515
516/* provide a shorthand for the name format for annoying errata */
517.macro check_erratum_custom_start _cpu:req, _cve:req, _id:req
518 func check_erratum_\_cpu\()_\_id
519.endm
520
521.macro check_erratum_custom_end _cpu:req, _cve:req, _id:req
522 endfunc check_erratum_\_cpu\()_\_id
523.endm
524
525
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +0000526/*******************************************************************************
527 * CPU reset function wrapper
528 ******************************************************************************/
529
530/*
531 * Wrapper to automatically apply all reset-time errata. Will end with an isb.
532 *
533 * _cpu:
534 * Name of cpu as given to declare_cpu_ops
535 *
536 * in body:
537 * clobber x8 to x14
538 * argument x14 - cpu_rev_var
539 */
540.macro cpu_reset_func_start _cpu:req
541 func \_cpu\()_reset_func
542 mov x15, x30
543 bl cpu_get_rev_var
544 mov x14, x0
545
546 /* short circuit the location to avoid searching the list */
547 adrp x12, \_cpu\()_errata_list_start
548 add x12, x12, :lo12:\_cpu\()_errata_list_start
549 adrp x13, \_cpu\()_errata_list_end
550 add x13, x13, :lo12:\_cpu\()_errata_list_end
551
552 errata_begin:
553 /* if head catches up with end of list, exit */
554 cmp x12, x13
555 b.eq errata_end
556
557 ldr x10, [x12, #ERRATUM_WA_FUNC]
558 /* TODO(errata ABI): check mitigated and checker function fields
559 * for 0 */
560 ldrb w11, [x12, #ERRATUM_CHOSEN]
561
562 /* skip if not chosen */
563 cbz x11, 1f
564 /* skip if runtime erratum */
565 cbz x10, 1f
566
567 /* put cpu revision in x0 and call workaround */
568 mov x0, x14
569 blr x10
570 1:
571 add x12, x12, #ERRATUM_ENTRY_SIZE
572 b errata_begin
573 errata_end:
574.endm
575
576.macro cpu_reset_func_end _cpu:req
577 isb
578 ret x15
579 endfunc \_cpu\()_reset_func
580.endm
Boyan Karatotev4f748cc2023-01-27 09:38:15 +0000581
582/*
583 * Maintain compatibility with the old scheme of each cpu has its own reporting.
584 * TODO remove entirely once all cpus have been converted. This includes the
585 * cpu_ops entry, as print_errata_status can call this directly for all cpus
586 */
587.macro errata_report_shim _cpu:req
588 #if REPORT_ERRATA
589 func \_cpu\()_errata_report
590 /* normal stack frame for pretty debugging */
591 stp x29, x30, [sp, #-16]!
592 mov x29, sp
593
594 bl generic_errata_report
595
596 ldp x29, x30, [sp], #16
597 ret
598 endfunc \_cpu\()_errata_report
599 #endif
600.endm
Antonio Nino Diazc3cf06f2018-11-08 10:20:19 +0000601#endif /* CPU_MACROS_S */