Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 1 | /* |
Andrew Walbran | 692b325 | 2019-03-07 15:51:31 +0000 | [diff] [blame] | 2 | * Copyright 2018 The Hafnium Authors. |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 3 | * |
Andrew Walbran | e959ec1 | 2020-06-17 15:01:09 +0100 | [diff] [blame] | 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 7 | */ |
| 8 | |
Andrew Scull | 9a6384b | 2019-01-02 12:08:40 +0000 | [diff] [blame] | 9 | #include "hf/arch/cpu.h" |
| 10 | |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 11 | #include <stdbool.h> |
| 12 | #include <stddef.h> |
| 13 | #include <stdint.h> |
| 14 | |
Andrew Scull | 550d99b | 2020-02-10 13:55:00 +0000 | [diff] [blame] | 15 | #include "hf/arch/plat/psci.h" |
| 16 | |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 17 | #include "hf/addr.h" |
Daniel Boulby | 8435071 | 2021-11-26 11:13:20 +0000 | [diff] [blame] | 18 | #include "hf/check.h" |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 19 | #include "hf/ffa.h" |
Madhukar Pappireddy | 72454a1 | 2021-08-03 12:21:46 -0500 | [diff] [blame] | 20 | #include "hf/plat/interrupts.h" |
Andrew Scull | 8d9e121 | 2019-04-05 13:52:55 +0100 | [diff] [blame] | 21 | #include "hf/std.h" |
Fuad Tabba | 5c73843 | 2019-12-02 11:02:42 +0000 | [diff] [blame] | 22 | #include "hf/vm.h" |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 23 | |
Fuad Tabba | 77a4b01 | 2019-11-15 12:13:08 +0000 | [diff] [blame] | 24 | #include "feature_id.h" |
Fuad Tabba | c8eede3 | 2019-10-31 11:17:50 +0000 | [diff] [blame] | 25 | #include "msr.h" |
Andrew Walbran | 42d89e7 | 2019-11-27 12:40:10 +0000 | [diff] [blame] | 26 | #include "perfmon.h" |
| 27 | #include "sysregs.h" |
Fuad Tabba | c8eede3 | 2019-10-31 11:17:50 +0000 | [diff] [blame] | 28 | |
Olivier Deprez | e7d7f32 | 2020-12-14 16:01:03 +0100 | [diff] [blame] | 29 | #if BRANCH_PROTECTION |
| 30 | |
| 31 | __uint128_t pauth_apia_key; |
| 32 | |
| 33 | #endif |
| 34 | |
Maksims Svecovs | 6a0ccc9 | 2022-03-04 15:16:55 +0000 | [diff] [blame] | 35 | #if ENABLE_MTE |
| 36 | |
| 37 | /* MTE hypervisor seed. */ |
| 38 | uintptr_t mte_seed; |
| 39 | |
| 40 | #endif |
| 41 | |
Fuad Tabba | c8eede3 | 2019-10-31 11:17:50 +0000 | [diff] [blame] | 42 | /** |
| 43 | * The LO field indicates whether LORegions are supported. |
| 44 | */ |
| 45 | #define ID_AA64MMFR1_EL1_LO (UINT64_C(1) << 16) |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 46 | |
Fuad Tabba | c8eede3 | 2019-10-31 11:17:50 +0000 | [diff] [blame] | 47 | static void lor_disable(void) |
| 48 | { |
Jose Marinho | cc071f1 | 2019-11-08 14:42:16 +0000 | [diff] [blame] | 49 | #if SECURE_WORLD == 0 |
Fuad Tabba | c8eede3 | 2019-10-31 11:17:50 +0000 | [diff] [blame] | 50 | /* |
| 51 | * Accesses to LORC_EL1 are undefined if LORegions are not supported. |
| 52 | */ |
| 53 | if (read_msr(ID_AA64MMFR1_EL1) & ID_AA64MMFR1_EL1_LO) { |
| 54 | write_msr(MSR_LORC_EL1, 0); |
| 55 | } |
Jose Marinho | cc071f1 | 2019-11-08 14:42:16 +0000 | [diff] [blame] | 56 | #endif |
Fuad Tabba | c8eede3 | 2019-10-31 11:17:50 +0000 | [diff] [blame] | 57 | } |
| 58 | |
Andrew Walbran | b208b4a | 2019-05-20 12:42:22 +0100 | [diff] [blame] | 59 | static void gic_regs_reset(struct arch_regs *r, bool is_primary) |
| 60 | { |
| 61 | #if GIC_VERSION == 3 || GIC_VERSION == 4 |
| 62 | uint32_t ich_hcr = 0; |
Andrew Walbran | 4b976f4 | 2019-06-05 15:00:50 +0100 | [diff] [blame] | 63 | uint32_t icc_sre_el2 = |
Andrew Walbran | e52006c | 2019-10-22 18:01:28 +0100 | [diff] [blame] | 64 | (1U << 0) | /* SRE, enable ICH_* and ICC_* at EL2. */ |
Andrew Walbran | 4b976f4 | 2019-06-05 15:00:50 +0100 | [diff] [blame] | 65 | (0x3 << 1); /* DIB and DFB, disable IRQ/FIQ bypass. */ |
Andrew Walbran | b208b4a | 2019-05-20 12:42:22 +0100 | [diff] [blame] | 66 | |
Andrew Walbran | 4b976f4 | 2019-06-05 15:00:50 +0100 | [diff] [blame] | 67 | if (is_primary) { |
Andrew Walbran | e52006c | 2019-10-22 18:01:28 +0100 | [diff] [blame] | 68 | icc_sre_el2 |= 1U << 3; /* Enable EL1 access to ICC_SRE_EL1. */ |
Andrew Walbran | 4b976f4 | 2019-06-05 15:00:50 +0100 | [diff] [blame] | 69 | } else { |
Andrew Walbran | b208b4a | 2019-05-20 12:42:22 +0100 | [diff] [blame] | 70 | /* Trap EL1 access to GICv3 system registers. */ |
| 71 | ich_hcr = |
Andrew Walbran | e52006c | 2019-10-22 18:01:28 +0100 | [diff] [blame] | 72 | (0x1fU << 10); /* TDIR, TSEI, TALL1, TALL0, TC bits. */ |
Andrew Walbran | b208b4a | 2019-05-20 12:42:22 +0100 | [diff] [blame] | 73 | } |
| 74 | r->gic.ich_hcr_el2 = ich_hcr; |
Andrew Walbran | 4b976f4 | 2019-06-05 15:00:50 +0100 | [diff] [blame] | 75 | r->gic.icc_sre_el2 = icc_sre_el2; |
Andrew Walbran | b208b4a | 2019-05-20 12:42:22 +0100 | [diff] [blame] | 76 | #endif |
| 77 | } |
| 78 | |
Fuad Tabba | 5c73843 | 2019-12-02 11:02:42 +0000 | [diff] [blame] | 79 | void arch_regs_reset(struct vcpu *vcpu) |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 80 | { |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 81 | ffa_vm_id_t vm_id = vcpu->vm->id; |
Fuad Tabba | 5c73843 | 2019-12-02 11:02:42 +0000 | [diff] [blame] | 82 | bool is_primary = vm_id == HF_PRIMARY_VM_ID; |
Mahesh Bireddy | 86808c2 | 2020-01-07 12:13:29 +0530 | [diff] [blame] | 83 | cpu_id_t vcpu_id = is_primary ? vcpu->cpu->id : vcpu_index(vcpu); |
J-Alves | b37fd08 | 2020-10-22 12:29:21 +0100 | [diff] [blame] | 84 | |
Fuad Tabba | 5c73843 | 2019-12-02 11:02:42 +0000 | [diff] [blame] | 85 | paddr_t table = vcpu->vm->ptable.root; |
| 86 | struct arch_regs *r = &vcpu->regs; |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 87 | uintreg_t pc = r->pc; |
| 88 | uintreg_t arg = r->r[0]; |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 89 | uintreg_t cnthctl; |
| 90 | |
Andrew Scull | 2b5fbad | 2019-04-05 13:55:56 +0100 | [diff] [blame] | 91 | memset_s(r, sizeof(*r), 0, sizeof(*r)); |
Andrew Scull | c960c03 | 2018-10-24 15:13:35 +0100 | [diff] [blame] | 92 | |
| 93 | r->pc = pc; |
| 94 | r->r[0] = arg; |
| 95 | |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 96 | cnthctl = 0; |
| 97 | |
| 98 | if (is_primary) { |
Raghu Krishnamurthy | 84eefa5 | 2021-01-17 09:49:37 -0800 | [diff] [blame] | 99 | /* |
| 100 | * cnthctl_el2 is redefined when VHE is enabled. |
| 101 | * EL1PCTEN, don't trap phys cnt access. |
| 102 | * EL1PCEN, don't trap phys timer access. |
| 103 | */ |
| 104 | if (has_vhe_support()) { |
| 105 | cnthctl |= (1U << 10) | (1U << 11); |
| 106 | } else { |
| 107 | cnthctl |= (1U << 0) | (1U << 1); |
| 108 | } |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 109 | } |
| 110 | |
Olivier Deprez | 6d408f9 | 2022-08-08 19:14:23 +0200 | [diff] [blame] | 111 | r->hyp_state.hcr_el2 = |
| 112 | get_hcr_el2_value(vm_id, vcpu->vm->el0_partition); |
| 113 | r->hyp_state.sctlr_el2 = get_sctlr_el2_value(vcpu->vm->el0_partition); |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 114 | r->lazy.cnthctl_el2 = cnthctl; |
Raghu Krishnamurthy | 5a13c34 | 2021-02-13 15:49:29 -0800 | [diff] [blame] | 115 | if (vcpu->vm->el0_partition) { |
| 116 | CHECK(has_vhe_support()); |
| 117 | /* |
| 118 | * AArch64 hafnium only uses 8 bit ASIDs at the moment. |
| 119 | * TCR_EL2.AS is set to 0, and per the Arm ARM, the upper 8 bits |
| 120 | * are ignored and treated as 0. There is no need to mask the |
| 121 | * VMID (used as asid) to only 8 bits. |
| 122 | */ |
Olivier Deprez | 6d408f9 | 2022-08-08 19:14:23 +0200 | [diff] [blame] | 123 | r->hyp_state.ttbr0_el2 = |
| 124 | pa_addr(table) | ((uint64_t)vm_id << 48); |
Raghu Krishnamurthy | 5a13c34 | 2021-02-13 15:49:29 -0800 | [diff] [blame] | 125 | r->spsr = PSR_PE_MODE_EL0T; |
| 126 | } else { |
Olivier Deprez | 6d408f9 | 2022-08-08 19:14:23 +0200 | [diff] [blame] | 127 | r->hyp_state.ttbr0_el2 = read_msr(ttbr0_el2); |
Olivier Deprez | b7f6bd6 | 2022-03-08 10:55:52 +0100 | [diff] [blame] | 128 | r->lazy.vtcr_el2 = arch_mm_get_vtcr_el2(); |
Olivier Deprez | d9d409f | 2023-03-17 11:47:57 +0100 | [diff] [blame] | 129 | #if SECURE_WORLD == 0 |
| 130 | /* |
| 131 | * For a VM managed by the Hypervisor a single set |
| 132 | * of NS S2 PT exists. |
| 133 | * vttbr_el2 points to the single S2 root PT. |
| 134 | */ |
Raghu Krishnamurthy | 5a13c34 | 2021-02-13 15:49:29 -0800 | [diff] [blame] | 135 | r->lazy.vttbr_el2 = pa_addr(table) | ((uint64_t)vm_id << 48); |
Olivier Deprez | d9d409f | 2023-03-17 11:47:57 +0100 | [diff] [blame] | 136 | #else |
| 137 | /* |
| 138 | * For a SP managed by the SPMC both sets of NS and secure |
| 139 | * S2 PTs exist. |
| 140 | * vttbr_el2 points to the NS S2 root PT. |
| 141 | * vsttbr_el2 points to secure S2 root PT. |
| 142 | */ |
| 143 | r->lazy.vttbr_el2 = pa_addr(vcpu->vm->arch.ptable_ns.root) | |
| 144 | ((uint64_t)vm_id << 48); |
Olivier Deprez | b7f6bd6 | 2022-03-08 10:55:52 +0100 | [diff] [blame] | 145 | r->lazy.vstcr_el2 = arch_mm_get_vstcr_el2(); |
| 146 | r->lazy.vsttbr_el2 = pa_addr(table); |
| 147 | #endif |
Olivier Deprez | d9d409f | 2023-03-17 11:47:57 +0100 | [diff] [blame] | 148 | |
Raghu Krishnamurthy | 5a13c34 | 2021-02-13 15:49:29 -0800 | [diff] [blame] | 149 | r->lazy.vmpidr_el2 = vcpu_id; |
| 150 | /* Mask (disable) interrupts and run in EL1h mode. */ |
| 151 | r->spsr = PSR_D | PSR_A | PSR_I | PSR_F | PSR_PE_MODE_EL1H; |
Andrew Walbran | b208b4a | 2019-05-20 12:42:22 +0100 | [diff] [blame] | 152 | |
Raghu Krishnamurthy | 5a13c34 | 2021-02-13 15:49:29 -0800 | [diff] [blame] | 153 | r->lazy.mdcr_el2 = get_mdcr_el2_value(); |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 154 | |
Raghu Krishnamurthy | 5a13c34 | 2021-02-13 15:49:29 -0800 | [diff] [blame] | 155 | /* |
| 156 | * NOTE: It is important that MDSCR_EL1.MDE (bit 15) is set to 0 |
| 157 | * for secondary VMs as long as Hafnium does not support debug |
| 158 | * register access for secondary VMs. If adding Hafnium support |
| 159 | * for secondary VM debug register accesses, then on context |
| 160 | * switches Hafnium needs to save/restore EL1 debug register |
| 161 | * state that either might change, or that needs to be |
| 162 | * protected. |
| 163 | */ |
| 164 | r->lazy.mdscr_el1 = 0x0U & ~(0x1U << 15); |
Fuad Tabba | c76466d | 2019-09-06 10:42:12 +0100 | [diff] [blame] | 165 | |
Raghu Krishnamurthy | 5a13c34 | 2021-02-13 15:49:29 -0800 | [diff] [blame] | 166 | /* Disable cycle counting on initialization. */ |
| 167 | r->lazy.pmccfiltr_el0 = |
| 168 | perfmon_get_pmccfiltr_el0_init_value(vm_id); |
Fuad Tabba | f1d6dc5 | 2019-09-18 17:33:14 +0100 | [diff] [blame] | 169 | |
Raghu Krishnamurthy | 5a13c34 | 2021-02-13 15:49:29 -0800 | [diff] [blame] | 170 | /* Set feature-specific register values. */ |
| 171 | feature_set_traps(vcpu->vm, r); |
| 172 | } |
Fuad Tabba | 77a4b01 | 2019-11-15 12:13:08 +0000 | [diff] [blame] | 173 | |
Andrew Walbran | b208b4a | 2019-05-20 12:42:22 +0100 | [diff] [blame] | 174 | gic_regs_reset(r, is_primary); |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 175 | } |
| 176 | |
| 177 | void arch_regs_set_pc_arg(struct arch_regs *r, ipaddr_t pc, uintreg_t arg) |
| 178 | { |
| 179 | r->pc = ipa_addr(pc); |
| 180 | r->r[0] = arg; |
| 181 | } |
| 182 | |
J-Alves | b7800a1 | 2022-01-25 17:55:53 +0000 | [diff] [blame] | 183 | bool arch_regs_reg_num_valid(const unsigned int gp_reg_num) |
| 184 | { |
| 185 | return gp_reg_num < NUM_GP_REGS; |
| 186 | } |
| 187 | |
| 188 | void arch_regs_set_gp_reg(struct arch_regs *r, const uintreg_t value, |
| 189 | const unsigned int gp_reg_num) |
| 190 | { |
| 191 | assert(arch_regs_reg_num_valid(gp_reg_num)); |
| 192 | r->r[gp_reg_num] = value; |
| 193 | } |
| 194 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 195 | void arch_regs_set_retval(struct arch_regs *r, struct ffa_value v) |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 196 | { |
Andrew Walbran | d4d2fa1 | 2019-10-01 16:47:25 +0100 | [diff] [blame] | 197 | r->r[0] = v.func; |
| 198 | r->r[1] = v.arg1; |
| 199 | r->r[2] = v.arg2; |
| 200 | r->r[3] = v.arg3; |
| 201 | r->r[4] = v.arg4; |
| 202 | r->r[5] = v.arg5; |
| 203 | r->r[6] = v.arg6; |
| 204 | r->r[7] = v.arg7; |
Raghu Krishnamurthy | 567068e | 2022-12-26 07:46:38 -0800 | [diff] [blame] | 205 | |
| 206 | if (v.extended_val.valid) { |
| 207 | r->r[8] = v.extended_val.arg8; |
| 208 | r->r[9] = v.extended_val.arg9; |
| 209 | r->r[10] = v.extended_val.arg10; |
| 210 | r->r[11] = v.extended_val.arg11; |
| 211 | r->r[12] = v.extended_val.arg12; |
| 212 | r->r[13] = v.extended_val.arg13; |
| 213 | r->r[14] = v.extended_val.arg14; |
| 214 | r->r[15] = v.extended_val.arg15; |
| 215 | r->r[16] = v.extended_val.arg16; |
| 216 | r->r[17] = v.extended_val.arg17; |
| 217 | } |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 218 | } |
Fuad Tabba | c8eede3 | 2019-10-31 11:17:50 +0000 | [diff] [blame] | 219 | |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 220 | struct ffa_value arch_regs_get_args(struct arch_regs *regs) |
| 221 | { |
| 222 | return (struct ffa_value){ |
| 223 | .func = regs->r[0], |
| 224 | .arg1 = regs->r[1], |
| 225 | .arg2 = regs->r[2], |
| 226 | .arg3 = regs->r[3], |
| 227 | .arg4 = regs->r[4], |
| 228 | .arg5 = regs->r[5], |
| 229 | .arg6 = regs->r[6], |
| 230 | .arg7 = regs->r[7], |
Raghu Krishnamurthy | 567068e | 2022-12-26 07:46:38 -0800 | [diff] [blame] | 231 | .extended_val.valid = false, |
Andrew Walbran | d8d3f5d | 2020-10-07 18:23:01 +0100 | [diff] [blame] | 232 | }; |
| 233 | } |
| 234 | |
Olivier Deprez | 1336b55 | 2022-07-07 15:05:30 +0200 | [diff] [blame] | 235 | /* Returns the SVE implemented VL in bytes (constrained by ZCR_EL3.LEN) */ |
| 236 | static uint64_t arch_cpu_sve_len_get(void) |
| 237 | { |
| 238 | uint64_t vl; |
| 239 | |
| 240 | __asm__ volatile( |
| 241 | ".arch_extension sve;" |
| 242 | "rdvl %0, #1;" |
| 243 | ".arch_extension nosve;" |
| 244 | : "=r"(vl)); |
| 245 | |
| 246 | return vl; |
| 247 | } |
| 248 | |
| 249 | static void arch_cpu_sve_configure_sve_vector_length(void) |
| 250 | { |
| 251 | uint64_t vl_bits; |
| 252 | uint32_t zcr_len; |
| 253 | |
| 254 | /* |
| 255 | * Set ZCR_EL2.LEN to the maximum vector length permitted by the |
| 256 | * architecture which applies to EL2 and lower ELs (limited by the |
| 257 | * HW implementation). |
| 258 | * This is done so that the VL read by arch_cpu_sve_len_get isn't |
| 259 | * constrained by EL2 and thus indirectly retrieves the value |
| 260 | * constrained by EL3 which applies to EL3 and lower ELs (limited by |
| 261 | * the HW implementation). |
| 262 | */ |
| 263 | write_msr(MSR_ZCR_EL2, ZCR_LEN_MAX); |
| 264 | isb(); |
| 265 | |
| 266 | vl_bits = arch_cpu_sve_len_get() << 3; |
| 267 | zcr_len = (vl_bits >> 7) - 1; |
| 268 | |
| 269 | /* |
| 270 | * Set ZCR_EL2.LEN to the discovered value which contrains the VL at |
| 271 | * EL2 and lower ELs to the value set by EL3. |
| 272 | */ |
| 273 | write_msr(MSR_ZCR_EL2, zcr_len & ZCR_LEN_MASK); |
| 274 | isb(); |
| 275 | } |
| 276 | |
Olivier Deprez | 148b560 | 2022-03-16 17:13:06 +0100 | [diff] [blame] | 277 | void arch_cpu_init(struct cpu *c) |
Fuad Tabba | c8eede3 | 2019-10-31 11:17:50 +0000 | [diff] [blame] | 278 | { |
| 279 | /* |
| 280 | * Linux expects LORegions to be disabled, hence if the current system |
| 281 | * supports them, Hafnium ensures that they are disabled. |
| 282 | */ |
| 283 | lor_disable(); |
Fuad Tabba | 2e2c98b | 2019-11-04 14:37:24 +0000 | [diff] [blame] | 284 | |
| 285 | write_msr(CPTR_EL2, get_cptr_el2_value()); |
Mahesh Bireddy | ef3c3cd | 2020-01-07 12:26:38 +0530 | [diff] [blame] | 286 | |
| 287 | /* Initialize counter-timer virtual offset register to 0. */ |
| 288 | write_msr(CNTVOFF_EL2, 0); |
Raghu Krishnamurthy | 8a025cb | 2022-03-03 21:34:23 -0800 | [diff] [blame] | 289 | isb(); |
Olivier Deprez | 1336b55 | 2022-07-07 15:05:30 +0200 | [diff] [blame] | 290 | |
| 291 | if (is_arch_feat_sve_supported()) { |
| 292 | arch_cpu_sve_configure_sve_vector_length(); |
| 293 | } |
| 294 | |
Madhukar Pappireddy | 72454a1 | 2021-08-03 12:21:46 -0500 | [diff] [blame] | 295 | plat_interrupts_controller_hw_init(c); |
Fuad Tabba | c8eede3 | 2019-10-31 11:17:50 +0000 | [diff] [blame] | 296 | } |
Olivier Deprez | 148b560 | 2022-03-16 17:13:06 +0100 | [diff] [blame] | 297 | |
| 298 | struct vcpu *arch_vcpu_resume(struct cpu *c) |
| 299 | { |
| 300 | return plat_psci_cpu_resume(c); |
| 301 | } |