Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 1 | /* |
Andrew Walbran | 692b325 | 2019-03-07 15:51:31 +0000 | [diff] [blame] | 2 | * Copyright 2018 The Hafnium Authors. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * https://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 17 | #include "hf/vm.h" |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 18 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 19 | #include "hf/api.h" |
Andrew Scull | 877ae4b | 2019-07-02 12:52:33 +0100 | [diff] [blame] | 20 | #include "hf/check.h" |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 21 | #include "hf/cpu.h" |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 22 | #include "hf/layout.h" |
| 23 | #include "hf/plat/iommu.h" |
Andrew Walbran | b037d5b | 2019-06-25 17:19:41 +0100 | [diff] [blame] | 24 | #include "hf/spci.h" |
Andrew Scull | 8d9e121 | 2019-04-05 13:52:55 +0100 | [diff] [blame] | 25 | #include "hf/std.h" |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 26 | |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 27 | #include "vmapi/hf/call.h" |
| 28 | |
| 29 | static struct vm vms[MAX_VMS]; |
Andrew Walbran | 52d9967 | 2019-06-25 15:51:11 +0100 | [diff] [blame] | 30 | static spci_vm_count_t vm_count; |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 31 | |
Andrew Walbran | c6d23c4 | 2019-06-26 13:30:42 +0100 | [diff] [blame] | 32 | bool vm_init(spci_vcpu_count_t vcpu_count, struct mpool *ppool, |
| 33 | struct vm **new_vm) |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 34 | { |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 35 | uint32_t i; |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 36 | struct vm *vm; |
| 37 | |
| 38 | if (vm_count >= MAX_VMS) { |
| 39 | return false; |
| 40 | } |
| 41 | |
| 42 | vm = &vms[vm_count]; |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 43 | |
Andrew Scull | 2b5fbad | 2019-04-05 13:55:56 +0100 | [diff] [blame] | 44 | memset_s(vm, sizeof(*vm), 0, sizeof(*vm)); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 45 | |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 46 | list_init(&vm->mailbox.waiter_list); |
| 47 | list_init(&vm->mailbox.ready_list); |
| 48 | sl_init(&vm->lock); |
| 49 | |
Fuad Tabba | 494376e | 2019-08-05 12:35:10 +0100 | [diff] [blame] | 50 | /* Generate IDs based on an offset, as low IDs e.g., 0, are reserved */ |
| 51 | vm->id = vm_count + HF_VM_ID_OFFSET; |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 52 | vm->vcpu_count = vcpu_count; |
Andrew Scull | d6ee110 | 2019-04-05 22:12:42 +0100 | [diff] [blame] | 53 | vm->mailbox.state = MAILBOX_STATE_EMPTY; |
Andrew Scull | 9726c25 | 2019-01-23 13:44:19 +0000 | [diff] [blame] | 54 | atomic_init(&vm->aborting, false); |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 55 | |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 56 | if (!mm_vm_init(&vm->ptable, ppool)) { |
Wedson Almeida Filho | 0330611 | 2018-11-26 00:08:03 +0000 | [diff] [blame] | 57 | return false; |
| 58 | } |
| 59 | |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 60 | /* Initialise waiter entries. */ |
| 61 | for (i = 0; i < MAX_VMS; i++) { |
Wedson Almeida Filho | b790f65 | 2019-01-22 23:41:56 +0000 | [diff] [blame] | 62 | vm->wait_entries[i].waiting_vm = vm; |
| 63 | list_init(&vm->wait_entries[i].wait_links); |
| 64 | list_init(&vm->wait_entries[i].ready_links); |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 65 | } |
| 66 | |
Fuad Tabba | b0ef2a4 | 2019-12-19 11:19:25 +0000 | [diff] [blame^] | 67 | /* Do basic initialization of vCPUs. */ |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 68 | for (i = 0; i < vcpu_count; i++) { |
Andrew Walbran | e1310df | 2019-04-29 17:28:28 +0100 | [diff] [blame] | 69 | vcpu_init(vm_get_vcpu(vm, i), vm); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 70 | } |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 71 | |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 72 | ++vm_count; |
| 73 | *new_vm = vm; |
| 74 | |
Wedson Almeida Filho | 0330611 | 2018-11-26 00:08:03 +0000 | [diff] [blame] | 75 | return true; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 76 | } |
| 77 | |
Andrew Walbran | 52d9967 | 2019-06-25 15:51:11 +0100 | [diff] [blame] | 78 | spci_vm_count_t vm_get_count(void) |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 79 | { |
| 80 | return vm_count; |
| 81 | } |
| 82 | |
Andrew Walbran | 42347a9 | 2019-05-09 13:59:03 +0100 | [diff] [blame] | 83 | struct vm *vm_find(spci_vm_id_t id) |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 84 | { |
David Brazdil | bc50119 | 2019-09-27 13:20:56 +0100 | [diff] [blame] | 85 | uint16_t index; |
Fuad Tabba | 494376e | 2019-08-05 12:35:10 +0100 | [diff] [blame] | 86 | |
David Brazdil | bc50119 | 2019-09-27 13:20:56 +0100 | [diff] [blame] | 87 | /* Check that this is not a reserved ID. */ |
| 88 | if (id < HF_VM_ID_OFFSET) { |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 89 | return NULL; |
| 90 | } |
| 91 | |
David Brazdil | bc50119 | 2019-09-27 13:20:56 +0100 | [diff] [blame] | 92 | index = id - HF_VM_ID_OFFSET; |
| 93 | |
| 94 | /* Ensure the VM is initialized. */ |
| 95 | if (index >= vm_count) { |
| 96 | return NULL; |
| 97 | } |
| 98 | |
| 99 | return &vms[index]; |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 100 | } |
| 101 | |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 102 | /** |
| 103 | * Locks the given VM and updates `locked` to hold the newly locked vm. |
| 104 | */ |
Andrew Walbran | 7e932bd | 2019-04-29 16:47:06 +0100 | [diff] [blame] | 105 | struct vm_locked vm_lock(struct vm *vm) |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 106 | { |
Andrew Walbran | 7e932bd | 2019-04-29 16:47:06 +0100 | [diff] [blame] | 107 | struct vm_locked locked = { |
| 108 | .vm = vm, |
| 109 | }; |
| 110 | |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 111 | sl_lock(&vm->lock); |
Andrew Walbran | 7e932bd | 2019-04-29 16:47:06 +0100 | [diff] [blame] | 112 | |
| 113 | return locked; |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 114 | } |
| 115 | |
| 116 | /** |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 117 | * Locks two VMs ensuring that the locking order is according to the locks' |
| 118 | * addresses. |
| 119 | */ |
| 120 | struct two_vm_locked vm_lock_both(struct vm *vm1, struct vm *vm2) |
| 121 | { |
| 122 | struct two_vm_locked dual_lock; |
| 123 | |
| 124 | sl_lock_both(&vm1->lock, &vm2->lock); |
| 125 | dual_lock.vm1.vm = vm1; |
| 126 | dual_lock.vm2.vm = vm2; |
| 127 | |
| 128 | return dual_lock; |
| 129 | } |
| 130 | |
| 131 | /** |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 132 | * Unlocks a VM previously locked with vm_lock, and updates `locked` to reflect |
| 133 | * the fact that the VM is no longer locked. |
| 134 | */ |
| 135 | void vm_unlock(struct vm_locked *locked) |
| 136 | { |
| 137 | sl_unlock(&locked->vm->lock); |
| 138 | locked->vm = NULL; |
| 139 | } |
Andrew Walbran | e1310df | 2019-04-29 17:28:28 +0100 | [diff] [blame] | 140 | |
| 141 | /** |
| 142 | * Get the vCPU with the given index from the given VM. |
| 143 | * This assumes the index is valid, i.e. less than vm->vcpu_count. |
| 144 | */ |
Andrew Walbran | b037d5b | 2019-06-25 17:19:41 +0100 | [diff] [blame] | 145 | struct vcpu *vm_get_vcpu(struct vm *vm, spci_vcpu_index_t vcpu_index) |
Andrew Walbran | e1310df | 2019-04-29 17:28:28 +0100 | [diff] [blame] | 146 | { |
Andrew Scull | 877ae4b | 2019-07-02 12:52:33 +0100 | [diff] [blame] | 147 | CHECK(vcpu_index < vm->vcpu_count); |
Andrew Walbran | e1310df | 2019-04-29 17:28:28 +0100 | [diff] [blame] | 148 | return &vm->vcpus[vcpu_index]; |
| 149 | } |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 150 | |
| 151 | /** |
Andrew Walbran | aad8f98 | 2019-12-04 10:56:39 +0000 | [diff] [blame] | 152 | * Gets `vm`'s wait entry for waiting on the `for_vm`. |
| 153 | */ |
| 154 | struct wait_entry *vm_get_wait_entry(struct vm *vm, spci_vm_id_t for_vm) |
| 155 | { |
| 156 | uint16_t index; |
| 157 | |
| 158 | CHECK(for_vm >= HF_VM_ID_OFFSET); |
| 159 | index = for_vm - HF_VM_ID_OFFSET; |
| 160 | CHECK(index < MAX_VMS); |
| 161 | |
| 162 | return &vm->wait_entries[index]; |
| 163 | } |
| 164 | |
| 165 | /** |
| 166 | * Gets the ID of the VM which the given VM's wait entry is for. |
| 167 | */ |
| 168 | spci_vm_id_t vm_id_for_wait_entry(struct vm *vm, struct wait_entry *entry) |
| 169 | { |
| 170 | uint16_t index = entry - vm->wait_entries; |
| 171 | |
| 172 | return index + HF_VM_ID_OFFSET; |
| 173 | } |
| 174 | |
| 175 | /** |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 176 | * Map a range of addresses to the VM in both the MMU and the IOMMU. |
| 177 | * |
| 178 | * mm_vm_defrag should always be called after a series of page table updates, |
| 179 | * whether they succeed or fail. This is because on failure extra page table |
| 180 | * entries may have been allocated and then not used, while on success it may be |
| 181 | * possible to compact the page table by merging several entries into a block. |
| 182 | * |
| 183 | * Returns true on success, or false if the update failed and no changes were |
| 184 | * made. |
| 185 | * |
| 186 | */ |
| 187 | bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end, |
| 188 | uint32_t mode, struct mpool *ppool, ipaddr_t *ipa) |
| 189 | { |
| 190 | if (!vm_identity_prepare(vm_locked, begin, end, mode, ppool)) { |
| 191 | return false; |
| 192 | } |
| 193 | |
| 194 | vm_identity_commit(vm_locked, begin, end, mode, ppool, ipa); |
| 195 | |
| 196 | return true; |
| 197 | } |
| 198 | |
| 199 | /** |
| 200 | * Prepares the given VM for the given address mapping such that it will be able |
| 201 | * to commit the change without failure. |
| 202 | * |
| 203 | * In particular, multiple calls to this function will result in the |
| 204 | * corresponding calls to commit the changes to succeed. |
| 205 | * |
| 206 | * Returns true on success, or false if the update failed and no changes were |
| 207 | * made. |
| 208 | */ |
| 209 | bool vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, paddr_t end, |
| 210 | uint32_t mode, struct mpool *ppool) |
| 211 | { |
| 212 | return mm_vm_identity_prepare(&vm_locked.vm->ptable, begin, end, mode, |
| 213 | ppool); |
| 214 | } |
| 215 | |
| 216 | /** |
| 217 | * Commits the given address mapping to the VM assuming the operation cannot |
| 218 | * fail. `vm_identity_prepare` must used correctly before this to ensure |
| 219 | * this condition. |
| 220 | */ |
| 221 | void vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, paddr_t end, |
| 222 | uint32_t mode, struct mpool *ppool, ipaddr_t *ipa) |
| 223 | { |
| 224 | mm_vm_identity_commit(&vm_locked.vm->ptable, begin, end, mode, ppool, |
| 225 | ipa); |
| 226 | plat_iommu_identity_map(vm_locked, begin, end, mode); |
| 227 | } |
| 228 | |
| 229 | /** |
| 230 | * Unmap a range of addresses from the VM. |
| 231 | * |
| 232 | * Returns true on success, or false if the update failed and no changes were |
| 233 | * made. |
| 234 | */ |
| 235 | bool vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end, |
| 236 | struct mpool *ppool) |
| 237 | { |
| 238 | uint32_t mode = MM_MODE_UNMAPPED_MASK; |
| 239 | |
| 240 | return vm_identity_map(vm_locked, begin, end, mode, ppool, NULL); |
| 241 | } |
| 242 | |
| 243 | /** |
| 244 | * Unmaps the hypervisor pages from the given page table. |
| 245 | */ |
| 246 | bool vm_unmap_hypervisor(struct vm_locked vm_locked, struct mpool *ppool) |
| 247 | { |
| 248 | /* TODO: If we add pages dynamically, they must be included here too. */ |
| 249 | return vm_unmap(vm_locked, layout_text_begin(), layout_text_end(), |
| 250 | ppool) && |
| 251 | vm_unmap(vm_locked, layout_rodata_begin(), layout_rodata_end(), |
| 252 | ppool) && |
| 253 | vm_unmap(vm_locked, layout_data_begin(), layout_data_end(), |
| 254 | ppool); |
| 255 | } |