David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2012,2013 - ARM Ltd |
| 4 | * Author: Marc Zyngier <marc.zyngier@arm.com> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | #include <linux/linkage.h> |
| 8 | |
| 9 | #include <asm/assembler.h> |
| 10 | #include <asm/kvm_arm.h> |
| 11 | #include <asm/kvm_mmu.h> |
| 12 | #include <asm/pgtable-hwdef.h> |
| 13 | #include <asm/sysreg.h> |
| 14 | #include <asm/virt.h> |
| 15 | |
| 16 | .text |
| 17 | .pushsection .hyp.idmap.text, "ax" |
| 18 | |
| 19 | .align 11 |
| 20 | |
| 21 | ENTRY(__kvm_hyp_init) |
| 22 | ventry __invalid // Synchronous EL2t |
| 23 | ventry __invalid // IRQ EL2t |
| 24 | ventry __invalid // FIQ EL2t |
| 25 | ventry __invalid // Error EL2t |
| 26 | |
| 27 | ventry __invalid // Synchronous EL2h |
| 28 | ventry __invalid // IRQ EL2h |
| 29 | ventry __invalid // FIQ EL2h |
| 30 | ventry __invalid // Error EL2h |
| 31 | |
| 32 | ventry __do_hyp_init // Synchronous 64-bit EL1 |
| 33 | ventry __invalid // IRQ 64-bit EL1 |
| 34 | ventry __invalid // FIQ 64-bit EL1 |
| 35 | ventry __invalid // Error 64-bit EL1 |
| 36 | |
| 37 | ventry __invalid // Synchronous 32-bit EL1 |
| 38 | ventry __invalid // IRQ 32-bit EL1 |
| 39 | ventry __invalid // FIQ 32-bit EL1 |
| 40 | ventry __invalid // Error 32-bit EL1 |
| 41 | |
| 42 | __invalid: |
| 43 | b . |
| 44 | |
| 45 | /* |
| 46 | * x0: HYP pgd |
| 47 | * x1: HYP stack |
| 48 | * x2: HYP vectors |
| 49 | * x3: per-CPU offset |
| 50 | */ |
| 51 | __do_hyp_init: |
| 52 | /* Check for a stub HVC call */ |
| 53 | cmp x0, #HVC_STUB_HCALL_NR |
| 54 | b.lo __kvm_handle_stub_hvc |
| 55 | |
| 56 | phys_to_ttbr x4, x0 |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 57 | alternative_if ARM64_HAS_CNP |
| 58 | orr x4, x4, #TTBR_CNP_BIT |
| 59 | alternative_else_nop_endif |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 60 | msr ttbr0_el2, x4 |
| 61 | |
| 62 | mrs x4, tcr_el1 |
| 63 | ldr x5, =TCR_EL2_MASK |
| 64 | and x4, x4, x5 |
| 65 | mov x5, #TCR_EL2_RES1 |
| 66 | orr x4, x4, x5 |
| 67 | |
| 68 | /* |
| 69 | * The ID map may be configured to use an extended virtual address |
| 70 | * range. This is only the case if system RAM is out of range for the |
| 71 | * currently configured page size and VA_BITS, in which case we will |
| 72 | * also need the extended virtual range for the HYP ID map, or we won't |
| 73 | * be able to enable the EL2 MMU. |
| 74 | * |
| 75 | * However, at EL2, there is only one TTBR register, and we can't switch |
| 76 | * between translation tables *and* update TCR_EL2.T0SZ at the same |
| 77 | * time. Bottom line: we need to use the extended range with *both* our |
| 78 | * translation tables. |
| 79 | * |
| 80 | * So use the same T0SZ value we use for the ID map. |
| 81 | */ |
| 82 | ldr_l x5, idmap_t0sz |
| 83 | bfi x4, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH |
| 84 | |
| 85 | /* |
| 86 | * Set the PS bits in TCR_EL2. |
| 87 | */ |
| 88 | tcr_compute_pa_size x4, #TCR_EL2_PS_SHIFT, x5, x6 |
| 89 | |
| 90 | msr tcr_el2, x4 |
| 91 | |
| 92 | mrs x4, mair_el1 |
| 93 | msr mair_el2, x4 |
| 94 | isb |
| 95 | |
| 96 | /* Invalidate the stale TLBs from Bootloader */ |
| 97 | tlbi alle2 |
| 98 | dsb sy |
| 99 | |
| 100 | /* |
| 101 | * Preserve all the RES1 bits while setting the default flags, |
| 102 | * as well as the EE bit on BE. Drop the A flag since the compiler |
| 103 | * is allowed to generate unaligned accesses. |
| 104 | */ |
| 105 | ldr x4, =(SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A)) |
| 106 | CPU_BE( orr x4, x4, #SCTLR_ELx_EE) |
| 107 | msr sctlr_el2, x4 |
| 108 | isb |
| 109 | |
| 110 | /* Set the stack and new vectors */ |
| 111 | kern_hyp_va x1 |
| 112 | mov sp, x1 |
| 113 | msr vbar_el2, x2 |
| 114 | |
| 115 | /* Set tpidr_el2 for use by HYP */ |
| 116 | msr tpidr_el2, x3 |
| 117 | |
| 118 | /* Hello, World! */ |
| 119 | eret |
| 120 | ENDPROC(__kvm_hyp_init) |
| 121 | |
| 122 | ENTRY(__kvm_handle_stub_hvc) |
| 123 | cmp x0, #HVC_SOFT_RESTART |
| 124 | b.ne 1f |
| 125 | |
| 126 | /* This is where we're about to jump, staying at EL2 */ |
| 127 | msr elr_el2, x1 |
| 128 | mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT | PSR_MODE_EL2h) |
| 129 | msr spsr_el2, x0 |
| 130 | |
| 131 | /* Shuffle the arguments, and don't come back */ |
| 132 | mov x0, x2 |
| 133 | mov x1, x3 |
| 134 | mov x2, x4 |
| 135 | b reset |
| 136 | |
| 137 | 1: cmp x0, #HVC_RESET_VECTORS |
| 138 | b.ne 1f |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 139 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 140 | /* |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 141 | * Set the HVC_RESET_VECTORS return code before entering the common |
| 142 | * path so that we do not clobber x0-x2 in case we are coming via |
| 143 | * HVC_SOFT_RESTART. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 144 | */ |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 145 | mov x0, xzr |
| 146 | reset: |
| 147 | /* Reset kvm back to the hyp stub. */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 148 | mrs x5, sctlr_el2 |
| 149 | ldr x6, =SCTLR_ELx_FLAGS |
| 150 | bic x5, x5, x6 // Clear SCTL_M and etc |
| 151 | pre_disable_mmu_workaround |
| 152 | msr sctlr_el2, x5 |
| 153 | isb |
| 154 | |
| 155 | /* Install stub vectors */ |
| 156 | adr_l x5, __hyp_stub_vectors |
| 157 | msr vbar_el2, x5 |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 158 | eret |
| 159 | |
| 160 | 1: /* Bad stub call */ |
| 161 | ldr x0, =HVC_STUB_ERR |
| 162 | eret |
| 163 | |
| 164 | ENDPROC(__kvm_handle_stub_hvc) |
| 165 | |
| 166 | .ltorg |
| 167 | |
| 168 | .popsection |