blob: 60783f3b57cc3ce300155f9a50867b892f95273c [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * Copyright (C) 2016 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16*/
17
18#include <linux/linkage.h>
19#include <asm/asm-offsets.h>
20#include <asm/kvm_arm.h>
21#include <asm/kvm_asm.h>
22
23 .arch_extension virt
24
25 .text
26 .pushsection .hyp.text, "ax"
27
28#define USR_REGS_OFFSET (CPU_CTXT_GP_REGS + GP_REGS_USR)
29
30/* int __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host) */
31ENTRY(__guest_enter)
32 @ Save host registers
33 add r1, r1, #(USR_REGS_OFFSET + S_R4)
34 stm r1!, {r4-r12}
35 str lr, [r1, #4] @ Skip SP_usr (already saved)
36
37 @ Restore guest registers
38 add r0, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R0)
39 ldr lr, [r0, #S_LR]
40 ldm r0, {r0-r12}
41
42 clrex
43 eret
44ENDPROC(__guest_enter)
45
46ENTRY(__guest_exit)
47 /*
48 * return convention:
49 * guest r0, r1, r2 saved on the stack
50 * r0: vcpu pointer
51 * r1: exception code
52 */
53
54 add r2, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R3)
55 stm r2!, {r3-r12}
56 str lr, [r2, #4]
57 add r2, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R0)
58 pop {r3, r4, r5} @ r0, r1, r2
59 stm r2, {r3-r5}
60
61 ldr r0, [r0, #VCPU_HOST_CTXT]
62 add r0, r0, #(USR_REGS_OFFSET + S_R4)
63 ldm r0!, {r4-r12}
64 ldr lr, [r0, #4]
65
66 mov r0, r1
67 mrs r1, SPSR
68 mrs r2, ELR_hyp
69 mrc p15, 4, r3, c5, c2, 0 @ HSR
70
71 /*
72 * Force loads and stores to complete before unmasking aborts
73 * and forcing the delivery of the exception. This gives us a
74 * single instruction window, which the handler will try to
75 * match.
76 */
77 dsb sy
78 cpsie a
79
80 .global abort_guest_exit_start
81abort_guest_exit_start:
82
83 isb
84
85 .global abort_guest_exit_end
86abort_guest_exit_end:
87
88 /*
89 * If we took an abort, r0[31] will be set, and cmp will set
90 * the N bit in PSTATE.
91 */
92 cmp r0, #0
93 msrmi SPSR_cxsf, r1
94 msrmi ELR_hyp, r2
95 mcrmi p15, 4, r3, c5, c2, 0 @ HSR
96
97 bx lr
98ENDPROC(__guest_exit)
99
100/*
101 * If VFPv3 support is not available, then we will not switch the VFP
102 * registers; however cp10 and cp11 accesses will still trap and fallback
103 * to the regular coprocessor emulation code, which currently will
104 * inject an undefined exception to the guest.
105 */
106#ifdef CONFIG_VFPv3
107ENTRY(__vfp_guest_restore)
108 push {r3, r4, lr}
109
110 @ NEON/VFP used. Turn on VFP access.
111 mrc p15, 4, r1, c1, c1, 2 @ HCPTR
112 bic r1, r1, #(HCPTR_TCP(10) | HCPTR_TCP(11))
113 mcr p15, 4, r1, c1, c1, 2 @ HCPTR
114 isb
115
116 @ Switch VFP/NEON hardware state to the guest's
117 mov r4, r0
118 ldr r0, [r0, #VCPU_HOST_CTXT]
119 add r0, r0, #CPU_CTXT_VFP
120 bl __vfp_save_state
121 add r0, r4, #(VCPU_GUEST_CTXT + CPU_CTXT_VFP)
122 bl __vfp_restore_state
123
124 pop {r3, r4, lr}
125 pop {r0, r1, r2}
126 clrex
127 eret
128ENDPROC(__vfp_guest_restore)
129#endif
130
131 .popsection
132