Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame^] | 1 | /* |
| 2 | * SPDX-License-Identifier: BSD-3-Clause |
| 3 | * |
| 4 | * SPDX-FileCopyrightText: Copyright TF-RMM Contributors. |
| 5 | */ |
| 6 | |
| 7 | #include <arch.h> |
| 8 | #include <arch_helpers.h> |
| 9 | #include <debug.h> |
| 10 | #include <esr.h> |
| 11 | #include <memory_alloc.h> |
| 12 | #include <rec.h> |
| 13 | #include <smc-rmi.h> |
| 14 | |
| 15 | #define SYSREG_READ_CASE(reg) \ |
| 16 | case ESR_EL2_SYSREG_##reg: return read_##reg() |
| 17 | |
| 18 | static unsigned long read_idreg(unsigned int idreg) |
| 19 | { |
| 20 | switch (idreg) { |
| 21 | SYSREG_READ_CASE(ID_AA64PFR0_EL1); |
| 22 | SYSREG_READ_CASE(ID_AA64PFR1_EL1); |
| 23 | /* |
| 24 | * TODO: not supported without SVE: |
| 25 | * SYSREG_READ_CASE(ID_AA64ZFR0_EL1); |
| 26 | */ |
| 27 | SYSREG_READ_CASE(ID_AA64DFR0_EL1); |
| 28 | SYSREG_READ_CASE(ID_AA64DFR1_EL1); |
| 29 | SYSREG_READ_CASE(ID_AA64AFR0_EL1); |
| 30 | SYSREG_READ_CASE(ID_AA64AFR1_EL1); |
| 31 | SYSREG_READ_CASE(ID_AA64ISAR0_EL1); |
| 32 | SYSREG_READ_CASE(ID_AA64ISAR1_EL1); |
| 33 | SYSREG_READ_CASE(ID_AA64MMFR0_EL1); |
| 34 | SYSREG_READ_CASE(ID_AA64MMFR1_EL1); |
| 35 | SYSREG_READ_CASE(ID_AA64MMFR2_EL1); |
| 36 | |
| 37 | default: |
| 38 | /* All other encodings are in the RES0 space */ |
| 39 | return 0UL; |
| 40 | } |
| 41 | } |
| 42 | |
| 43 | /* |
| 44 | * Handle ID_AA64XXX<n>_EL1 instructions |
| 45 | */ |
| 46 | static bool handle_id_sysreg_trap(struct rec *rec, |
| 47 | struct rmi_rec_exit *rec_exit, |
| 48 | unsigned long esr) |
| 49 | { |
| 50 | unsigned int rt; |
| 51 | unsigned long idreg, mask; |
| 52 | |
| 53 | /* |
| 54 | * We only set HCR_EL2.TID3 to trap ID registers at the moment and |
| 55 | * that only traps reads of registers. Seeing a write here indicates a |
| 56 | * consistency problem with the RMM and we should panic immediately. |
| 57 | */ |
| 58 | assert(!ESR_EL2_SYSREG_IS_WRITE(esr)); |
| 59 | |
| 60 | /* |
| 61 | * Read Rt value from the issued instruction, |
| 62 | * the general-purpose register used for the transfer. |
| 63 | */ |
| 64 | rt = ESR_EL2_SYSREG_ISS_RT(esr); |
| 65 | |
| 66 | /* Handle writes to XZR register */ |
| 67 | if (rt == 31U) { |
| 68 | return true; |
| 69 | } |
| 70 | |
| 71 | idreg = esr & ESR_EL2_SYSREG_MASK; |
| 72 | |
| 73 | if (idreg == ESR_EL2_SYSREG_ID_AA64ISAR1_EL1) { |
| 74 | /* Clear Address and Generic Authentication bits */ |
| 75 | mask = (0xfUL << ESR_EL2_SYSREG_ID_AA64ISAR1_APA_SHIFT) | |
| 76 | (0xfUL << ESR_EL2_SYSREG_ID_AA64ISAR1_API_SHIFT) | |
| 77 | (0xfUL << ESR_EL2_SYSREG_ID_AA64ISAR1_GPA_SHIFT) | |
| 78 | (0xfUL << ESR_EL2_SYSREG_ID_AA64ISAR1_GPI_SHIFT); |
| 79 | /* |
| 80 | * Workaround for TF-A trapping AMU registers access |
| 81 | * to EL3 in Realm state |
| 82 | */ |
| 83 | } else if (idreg == ESR_EL2_SYSREG_ID_AA64PFR0_EL1) { |
| 84 | /* Clear support for Activity Monitors Extension */ |
| 85 | mask = MASK(ID_AA64PFR0_EL1_AMU); |
| 86 | |
| 87 | /* |
| 88 | * Clear support for SVE. This is a temporary fix until RMM |
| 89 | * completely supports SVE. |
| 90 | */ |
| 91 | mask |= MASK(ID_AA64PFR0_EL1_SVE); |
| 92 | } else { |
| 93 | mask = 0UL; |
| 94 | } |
| 95 | |
| 96 | ARRAY_WRITE(rec->regs, rt, read_idreg(idreg) & ~mask); |
| 97 | |
| 98 | return true; |
| 99 | } |
| 100 | |
| 101 | static bool handle_icc_el1_sysreg_trap(struct rec *rec, |
| 102 | struct rmi_rec_exit *rec_exit, |
| 103 | unsigned long esr) |
| 104 | { |
| 105 | __unused unsigned long sysreg = esr & ESR_EL2_SYSREG_MASK; |
| 106 | |
| 107 | /* |
| 108 | * We should only have configured ICH_HCR_EL2 to trap on DIR and we |
| 109 | * always trap on the SGIRs following the architecture, so make sure |
| 110 | * we're not accidentally trapping on some other register here. |
| 111 | */ |
| 112 | assert((sysreg == ESR_EL2_SYSREG_ICC_DIR) || |
| 113 | (sysreg == ESR_EL2_SYSREG_ICC_SGI1R_EL1) || |
| 114 | (sysreg == ESR_EL2_SYSREG_ICC_SGI0R_EL1)); |
| 115 | |
| 116 | /* |
| 117 | * The registers above should only trap to EL2 for writes, read |
| 118 | * instructions are not defined and should cause an Undefined exception |
| 119 | * at EL1. |
| 120 | */ |
| 121 | assert(ESR_EL2_SYSREG_IS_WRITE(esr)); |
| 122 | |
| 123 | rec_exit->exit_reason = RMI_EXIT_SYNC; |
| 124 | rec_exit->esr = esr; |
| 125 | return false; |
| 126 | } |
| 127 | |
| 128 | typedef bool (*sysreg_handler_fn)(struct rec *rec, struct rmi_rec_exit *rec_exit, |
| 129 | unsigned long esr); |
| 130 | |
| 131 | struct sysreg_handler { |
| 132 | unsigned long esr_mask; |
| 133 | unsigned long esr_value; |
| 134 | sysreg_handler_fn fn; |
| 135 | }; |
| 136 | |
| 137 | #define SYSREG_HANDLER(_mask, _value, _handler_fn) \ |
| 138 | { .esr_mask = (_mask), .esr_value = (_value), .fn = _handler_fn } |
| 139 | |
| 140 | static const struct sysreg_handler sysreg_handlers[] = { |
| 141 | SYSREG_HANDLER(ESR_EL2_SYSREG_ID_MASK, ESR_EL2_SYSREG_ID, handle_id_sysreg_trap), |
| 142 | SYSREG_HANDLER(ESR_EL2_SYSREG_ICC_EL1_MASK, ESR_EL2_SYSREG_ICC_EL1, handle_icc_el1_sysreg_trap), |
| 143 | SYSREG_HANDLER(ESR_EL2_SYSREG_MASK, ESR_EL2_SYSREG_ICC_PMR_EL1, handle_icc_el1_sysreg_trap) |
| 144 | }; |
| 145 | |
| 146 | static unsigned long get_sysreg_write_value(struct rec *rec, unsigned long esr) |
| 147 | { |
| 148 | unsigned int rt = esr_sysreg_rt(esr); |
| 149 | unsigned long val; |
| 150 | |
| 151 | /* Handle reads from XZR register */ |
| 152 | if (rt == 31U) { |
| 153 | return 0UL; |
| 154 | } |
| 155 | |
| 156 | ARRAY_READ(rec->regs, rt, val); |
| 157 | return val; |
| 158 | } |
| 159 | |
| 160 | static void emulate_sysreg_access_ns(struct rec *rec, struct rmi_rec_exit *rec_exit, |
| 161 | unsigned long esr) |
| 162 | { |
| 163 | if (ESR_EL2_SYSREG_IS_WRITE(esr)) { |
| 164 | rec_exit->gprs[0] = get_sysreg_write_value(rec, esr); |
| 165 | } |
| 166 | } |
| 167 | |
| 168 | /* |
| 169 | * Handle trapped MSR, MRS or System instruction execution |
| 170 | * in AArch64 state |
| 171 | */ |
| 172 | bool handle_sysreg_access_trap(struct rec *rec, struct rmi_rec_exit *rec_exit, |
| 173 | unsigned long esr) |
| 174 | { |
| 175 | /* |
| 176 | * Read Rt value from the issued instruction, |
| 177 | * the general-purpose register used for the transfer. |
| 178 | */ |
| 179 | unsigned int rt = ESR_EL2_SYSREG_ISS_RT(esr); |
| 180 | unsigned int i; |
| 181 | unsigned int __unused op0, op1, crn, crm, op2; |
| 182 | unsigned long __unused sysreg; |
| 183 | |
| 184 | /* Check for 32-bit instruction trapped */ |
| 185 | assert(ESR_IL(esr) != 0UL); |
| 186 | |
| 187 | for (i = 0U; i < ARRAY_LEN(sysreg_handlers); i++) { |
| 188 | const struct sysreg_handler *handler = &sysreg_handlers[i]; |
| 189 | bool handled; |
| 190 | |
| 191 | if ((esr & handler->esr_mask) == handler->esr_value) { |
| 192 | handled = handler->fn(rec, rec_exit, esr); |
| 193 | if (!handled) { |
| 194 | emulate_sysreg_access_ns(rec, rec_exit, esr); |
| 195 | } |
| 196 | return handled; |
| 197 | } |
| 198 | } |
| 199 | |
| 200 | /* |
| 201 | * For now, treat all unhandled accesses as RAZ/WI. |
| 202 | * Handle writes to XZR register. |
| 203 | */ |
| 204 | if (!ESR_EL2_SYSREG_IS_WRITE(esr) && (rt != 31U)) { |
| 205 | ARRAY_WRITE(rec->regs, rt, 0UL); |
| 206 | } |
| 207 | |
| 208 | sysreg = esr & ESR_EL2_SYSREG_MASK; |
| 209 | |
| 210 | /* Extract sytem register encoding */ |
| 211 | op0 = EXTRACT(ESR_EL2_SYSREG_TRAP_OP0, sysreg); |
| 212 | op1 = EXTRACT(ESR_EL2_SYSREG_TRAP_OP1, sysreg); |
| 213 | crn = EXTRACT(ESR_EL2_SYSREG_TRAP_CRN, sysreg); |
| 214 | crm = EXTRACT(ESR_EL2_SYSREG_TRAP_CRM, sysreg); |
| 215 | op2 = EXTRACT(ESR_EL2_SYSREG_TRAP_OP2, sysreg); |
| 216 | |
| 217 | INFO("Unhandled %s S%u_%u_C%u_C%u_%u\n", |
| 218 | ESR_EL2_SYSREG_IS_WRITE(esr) ? "write" : "read", |
| 219 | op0, op1, crn, crm, op2); |
| 220 | |
| 221 | return true; |
| 222 | } |