blob: a364a4ad54790325b03b8f6769cd4cc8434673a7 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Fault injection for both 32 and 64bit guests.
4 *
5 * Copyright (C) 2012,2013 - ARM Ltd
6 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 *
8 * Based on arch/arm/kvm/emulate.c
9 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
10 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011 */
12
13#include <linux/kvm_host.h>
14#include <asm/kvm_emulate.h>
15#include <asm/esr.h>
16
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000017#define CURRENT_EL_SP_EL0_VECTOR 0x0
18#define CURRENT_EL_SP_ELx_VECTOR 0x200
19#define LOWER_EL_AArch64_VECTOR 0x400
20#define LOWER_EL_AArch32_VECTOR 0x600
21
22enum exception_type {
23 except_type_sync = 0,
24 except_type_irq = 0x80,
25 except_type_fiq = 0x100,
26 except_type_serror = 0x180,
27};
28
29static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
30{
31 u64 exc_offset;
32
33 switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
34 case PSR_MODE_EL1t:
35 exc_offset = CURRENT_EL_SP_EL0_VECTOR;
36 break;
37 case PSR_MODE_EL1h:
38 exc_offset = CURRENT_EL_SP_ELx_VECTOR;
39 break;
40 case PSR_MODE_EL0t:
41 exc_offset = LOWER_EL_AArch64_VECTOR;
42 break;
43 default:
44 exc_offset = LOWER_EL_AArch32_VECTOR;
45 }
46
47 return vcpu_read_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
48}
49
Olivier Deprez0e641232021-09-23 10:07:05 +020050/*
51 * When an exception is taken, most PSTATE fields are left unchanged in the
52 * handler. However, some are explicitly overridden (e.g. M[4:0]). Luckily all
53 * of the inherited bits have the same position in the AArch64/AArch32 SPSR_ELx
54 * layouts, so we don't need to shuffle these for exceptions from AArch32 EL0.
55 *
56 * For the SPSR_ELx layout for AArch64, see ARM DDI 0487E.a page C5-429.
57 * For the SPSR_ELx layout for AArch32, see ARM DDI 0487E.a page C5-426.
58 *
59 * Here we manipulate the fields in order of the AArch64 SPSR_ELx layout, from
60 * MSB to LSB.
61 */
62static unsigned long get_except64_pstate(struct kvm_vcpu *vcpu)
63{
64 unsigned long sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
65 unsigned long old, new;
66
67 old = *vcpu_cpsr(vcpu);
68 new = 0;
69
70 new |= (old & PSR_N_BIT);
71 new |= (old & PSR_Z_BIT);
72 new |= (old & PSR_C_BIT);
73 new |= (old & PSR_V_BIT);
74
75 // TODO: TCO (if/when ARMv8.5-MemTag is exposed to guests)
76
77 new |= (old & PSR_DIT_BIT);
78
79 // PSTATE.UAO is set to zero upon any exception to AArch64
80 // See ARM DDI 0487E.a, page D5-2579.
81
82 // PSTATE.PAN is unchanged unless SCTLR_ELx.SPAN == 0b0
83 // SCTLR_ELx.SPAN is RES1 when ARMv8.1-PAN is not implemented
84 // See ARM DDI 0487E.a, page D5-2578.
85 new |= (old & PSR_PAN_BIT);
86 if (!(sctlr & SCTLR_EL1_SPAN))
87 new |= PSR_PAN_BIT;
88
89 // PSTATE.SS is set to zero upon any exception to AArch64
90 // See ARM DDI 0487E.a, page D2-2452.
91
92 // PSTATE.IL is set to zero upon any exception to AArch64
93 // See ARM DDI 0487E.a, page D1-2306.
94
95 // PSTATE.SSBS is set to SCTLR_ELx.DSSBS upon any exception to AArch64
96 // See ARM DDI 0487E.a, page D13-3258
97 if (sctlr & SCTLR_ELx_DSSBS)
98 new |= PSR_SSBS_BIT;
99
100 // PSTATE.BTYPE is set to zero upon any exception to AArch64
101 // See ARM DDI 0487E.a, pages D1-2293 to D1-2294.
102
103 new |= PSR_D_BIT;
104 new |= PSR_A_BIT;
105 new |= PSR_I_BIT;
106 new |= PSR_F_BIT;
107
108 new |= PSR_MODE_EL1h;
109
110 return new;
111}
112
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000113static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
114{
115 unsigned long cpsr = *vcpu_cpsr(vcpu);
116 bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
117 u32 esr = 0;
118
119 vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
120 *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
121
Olivier Deprez0e641232021-09-23 10:07:05 +0200122 *vcpu_cpsr(vcpu) = get_except64_pstate(vcpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000123 vcpu_write_spsr(vcpu, cpsr);
124
125 vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
126
127 /*
128 * Build an {i,d}abort, depending on the level and the
129 * instruction set. Report an external synchronous abort.
130 */
131 if (kvm_vcpu_trap_il_is32bit(vcpu))
132 esr |= ESR_ELx_IL;
133
134 /*
135 * Here, the guest runs in AArch64 mode when in EL1. If we get
136 * an AArch32 fault, it means we managed to trap an EL0 fault.
137 */
138 if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t)
139 esr |= (ESR_ELx_EC_IABT_LOW << ESR_ELx_EC_SHIFT);
140 else
141 esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);
142
143 if (!is_iabt)
144 esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
145
146 vcpu_write_sys_reg(vcpu, esr | ESR_ELx_FSC_EXTABT, ESR_EL1);
147}
148
149static void inject_undef64(struct kvm_vcpu *vcpu)
150{
151 unsigned long cpsr = *vcpu_cpsr(vcpu);
152 u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
153
154 vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
155 *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
156
Olivier Deprez0e641232021-09-23 10:07:05 +0200157 *vcpu_cpsr(vcpu) = get_except64_pstate(vcpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000158 vcpu_write_spsr(vcpu, cpsr);
159
160 /*
161 * Build an unknown exception, depending on the instruction
162 * set.
163 */
164 if (kvm_vcpu_trap_il_is32bit(vcpu))
165 esr |= ESR_ELx_IL;
166
167 vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
168}
169
170/**
171 * kvm_inject_dabt - inject a data abort into the guest
172 * @vcpu: The VCPU to receive the undefined exception
173 * @addr: The address to report in the DFAR
174 *
175 * It is assumed that this code is called from the VCPU thread and that the
176 * VCPU therefore is not currently executing guest code.
177 */
178void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
179{
180 if (vcpu_el1_is_32bit(vcpu))
181 kvm_inject_dabt32(vcpu, addr);
182 else
183 inject_abt64(vcpu, false, addr);
184}
185
186/**
187 * kvm_inject_pabt - inject a prefetch abort into the guest
188 * @vcpu: The VCPU to receive the undefined exception
189 * @addr: The address to report in the DFAR
190 *
191 * It is assumed that this code is called from the VCPU thread and that the
192 * VCPU therefore is not currently executing guest code.
193 */
194void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
195{
196 if (vcpu_el1_is_32bit(vcpu))
197 kvm_inject_pabt32(vcpu, addr);
198 else
199 inject_abt64(vcpu, true, addr);
200}
201
202/**
203 * kvm_inject_undefined - inject an undefined instruction into the guest
204 *
205 * It is assumed that this code is called from the VCPU thread and that the
206 * VCPU therefore is not currently executing guest code.
207 */
208void kvm_inject_undefined(struct kvm_vcpu *vcpu)
209{
210 if (vcpu_el1_is_32bit(vcpu))
211 kvm_inject_undef32(vcpu);
212 else
213 inject_undef64(vcpu);
214}
215
216void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 esr)
217{
218 vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK);
219 *vcpu_hcr(vcpu) |= HCR_VSE;
220}
221
222/**
223 * kvm_inject_vabt - inject an async abort / SError into the guest
224 * @vcpu: The VCPU to receive the exception
225 *
226 * It is assumed that this code is called from the VCPU thread and that the
227 * VCPU therefore is not currently executing guest code.
228 *
229 * Systems with the RAS Extensions specify an imp-def ESR (ISV/IDS = 1) with
230 * the remaining ISS all-zeros so that this error is not interpreted as an
231 * uncategorized RAS error. Without the RAS Extensions we can't specify an ESR
232 * value, so the CPU generates an imp-def value.
233 */
234void kvm_inject_vabt(struct kvm_vcpu *vcpu)
235{
236 kvm_set_sei_esr(vcpu, ESR_ELx_ISV);
237}