Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 1 | /* |
Andrew Walbran | 692b325 | 2019-03-07 15:51:31 +0000 | [diff] [blame] | 2 | * Copyright 2018 The Hafnium Authors. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 3 | * |
Andrew Walbran | e959ec1 | 2020-06-17 15:01:09 +0100 | [diff] [blame] | 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 7 | */ |
| 8 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 9 | #include "hf/vm.h" |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 10 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 11 | #include "hf/api.h" |
Andrew Scull | 877ae4b | 2019-07-02 12:52:33 +0100 | [diff] [blame] | 12 | #include "hf/check.h" |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 13 | #include "hf/cpu.h" |
J-Alves | 4ef6e84 | 2021-03-18 12:47:01 +0000 | [diff] [blame] | 14 | #include "hf/dlog.h" |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 15 | #include "hf/ffa.h" |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 16 | #include "hf/layout.h" |
| 17 | #include "hf/plat/iommu.h" |
Andrew Scull | 8d9e121 | 2019-04-05 13:52:55 +0100 | [diff] [blame] | 18 | #include "hf/std.h" |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 19 | |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 20 | #include "vmapi/hf/call.h" |
| 21 | |
| 22 | static struct vm vms[MAX_VMS]; |
Olivier Deprez | 96a2a26 | 2020-06-11 17:21:38 +0200 | [diff] [blame] | 23 | static struct vm other_world; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 24 | static ffa_vm_count_t vm_count; |
J-Alves | b37fd08 | 2020-10-22 12:29:21 +0100 | [diff] [blame] | 25 | static struct vm *first_boot_vm; |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 26 | |
Raghu Krishnamurthy | ec1b491 | 2021-02-10 19:09:06 -0800 | [diff] [blame] | 27 | static bool vm_init_mm(struct vm *vm, struct mpool *ppool) |
| 28 | { |
| 29 | if (vm->el0_partition) { |
| 30 | return mm_ptable_init(&vm->ptable, vm->id, MM_FLAG_STAGE1, |
| 31 | ppool); |
| 32 | } |
| 33 | return mm_vm_init(&vm->ptable, vm->id, ppool); |
| 34 | } |
| 35 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 36 | struct vm *vm_init(ffa_vm_id_t id, ffa_vcpu_count_t vcpu_count, |
Raghu Krishnamurthy | cd1eceb | 2021-01-04 12:20:48 -0800 | [diff] [blame] | 37 | struct mpool *ppool, bool el0_partition) |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 38 | { |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 39 | uint32_t i; |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 40 | struct vm *vm; |
| 41 | |
Olivier Deprez | 96a2a26 | 2020-06-11 17:21:38 +0200 | [diff] [blame] | 42 | if (id == HF_OTHER_WORLD_ID) { |
Raghu Krishnamurthy | cd1eceb | 2021-01-04 12:20:48 -0800 | [diff] [blame] | 43 | CHECK(el0_partition == false); |
Olivier Deprez | 96a2a26 | 2020-06-11 17:21:38 +0200 | [diff] [blame] | 44 | vm = &other_world; |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 45 | } else { |
| 46 | uint16_t vm_index = id - HF_VM_ID_OFFSET; |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 47 | |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 48 | CHECK(id >= HF_VM_ID_OFFSET); |
| 49 | CHECK(vm_index < ARRAY_SIZE(vms)); |
| 50 | vm = &vms[vm_index]; |
| 51 | } |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 52 | |
Andrew Scull | 2b5fbad | 2019-04-05 13:55:56 +0100 | [diff] [blame] | 53 | memset_s(vm, sizeof(*vm), 0, sizeof(*vm)); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 54 | |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 55 | list_init(&vm->mailbox.waiter_list); |
| 56 | list_init(&vm->mailbox.ready_list); |
| 57 | sl_init(&vm->lock); |
| 58 | |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 59 | vm->id = id; |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 60 | vm->vcpu_count = vcpu_count; |
Andrew Scull | d6ee110 | 2019-04-05 22:12:42 +0100 | [diff] [blame] | 61 | vm->mailbox.state = MAILBOX_STATE_EMPTY; |
Andrew Scull | 9726c25 | 2019-01-23 13:44:19 +0000 | [diff] [blame] | 62 | atomic_init(&vm->aborting, false); |
Raghu Krishnamurthy | cd1eceb | 2021-01-04 12:20:48 -0800 | [diff] [blame] | 63 | vm->el0_partition = el0_partition; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 64 | |
Raghu Krishnamurthy | ec1b491 | 2021-02-10 19:09:06 -0800 | [diff] [blame] | 65 | if (!vm_init_mm(vm, ppool)) { |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 66 | return NULL; |
Wedson Almeida Filho | 0330611 | 2018-11-26 00:08:03 +0000 | [diff] [blame] | 67 | } |
| 68 | |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 69 | /* Initialise waiter entries. */ |
| 70 | for (i = 0; i < MAX_VMS; i++) { |
Wedson Almeida Filho | b790f65 | 2019-01-22 23:41:56 +0000 | [diff] [blame] | 71 | vm->wait_entries[i].waiting_vm = vm; |
| 72 | list_init(&vm->wait_entries[i].wait_links); |
| 73 | list_init(&vm->wait_entries[i].ready_links); |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 74 | } |
| 75 | |
Fuad Tabba | b0ef2a4 | 2019-12-19 11:19:25 +0000 | [diff] [blame] | 76 | /* Do basic initialization of vCPUs. */ |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 77 | for (i = 0; i < vcpu_count; i++) { |
Andrew Walbran | e1310df | 2019-04-29 17:28:28 +0100 | [diff] [blame] | 78 | vcpu_init(vm_get_vcpu(vm, i), vm); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 79 | } |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 80 | |
J-Alves | 4ef6e84 | 2021-03-18 12:47:01 +0000 | [diff] [blame] | 81 | /* Basic initialization of the notifications structure. */ |
| 82 | vm_notifications_init_bindings(&vm->notifications.from_sp); |
| 83 | vm_notifications_init_bindings(&vm->notifications.from_vm); |
| 84 | |
| 85 | /* TODO: Enable in accordance to VM's manifest. */ |
| 86 | vm->notifications.enabled = true; |
| 87 | |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 88 | return vm; |
| 89 | } |
| 90 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 91 | bool vm_init_next(ffa_vcpu_count_t vcpu_count, struct mpool *ppool, |
Raghu Krishnamurthy | cd1eceb | 2021-01-04 12:20:48 -0800 | [diff] [blame] | 92 | struct vm **new_vm, bool el0_partition) |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 93 | { |
| 94 | if (vm_count >= MAX_VMS) { |
| 95 | return false; |
| 96 | } |
| 97 | |
| 98 | /* Generate IDs based on an offset, as low IDs e.g., 0, are reserved */ |
Raghu Krishnamurthy | cd1eceb | 2021-01-04 12:20:48 -0800 | [diff] [blame] | 99 | *new_vm = vm_init(vm_count + HF_VM_ID_OFFSET, vcpu_count, ppool, |
| 100 | el0_partition); |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 101 | if (*new_vm == NULL) { |
| 102 | return false; |
| 103 | } |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 104 | ++vm_count; |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 105 | |
Wedson Almeida Filho | 0330611 | 2018-11-26 00:08:03 +0000 | [diff] [blame] | 106 | return true; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 107 | } |
| 108 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 109 | ffa_vm_count_t vm_get_count(void) |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 110 | { |
| 111 | return vm_count; |
| 112 | } |
| 113 | |
Fuad Tabba | e4efcc3 | 2020-07-16 15:37:27 +0100 | [diff] [blame] | 114 | /** |
| 115 | * Returns a pointer to the VM with the corresponding id. |
| 116 | */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 117 | struct vm *vm_find(ffa_vm_id_t id) |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 118 | { |
David Brazdil | bc50119 | 2019-09-27 13:20:56 +0100 | [diff] [blame] | 119 | uint16_t index; |
Fuad Tabba | 494376e | 2019-08-05 12:35:10 +0100 | [diff] [blame] | 120 | |
Olivier Deprez | 96a2a26 | 2020-06-11 17:21:38 +0200 | [diff] [blame] | 121 | if (id == HF_OTHER_WORLD_ID) { |
| 122 | if (other_world.id == HF_OTHER_WORLD_ID) { |
| 123 | return &other_world; |
| 124 | } |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 125 | return NULL; |
| 126 | } |
| 127 | |
Olivier Deprez | 96a2a26 | 2020-06-11 17:21:38 +0200 | [diff] [blame] | 128 | /* Check that this is not a reserved ID. */ |
| 129 | if (id < HF_VM_ID_OFFSET) { |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 130 | return NULL; |
| 131 | } |
| 132 | |
David Brazdil | bc50119 | 2019-09-27 13:20:56 +0100 | [diff] [blame] | 133 | index = id - HF_VM_ID_OFFSET; |
| 134 | |
Fuad Tabba | e4efcc3 | 2020-07-16 15:37:27 +0100 | [diff] [blame] | 135 | return vm_find_index(index); |
| 136 | } |
| 137 | |
| 138 | /** |
J-Alves | 46ee068 | 2021-07-26 15:17:53 +0100 | [diff] [blame] | 139 | * Returns a locked instance of the VM with the corresponding id. |
| 140 | */ |
| 141 | struct vm_locked vm_find_locked(ffa_vm_id_t id) |
| 142 | { |
| 143 | struct vm *vm = vm_find(id); |
| 144 | |
| 145 | if (vm != NULL) { |
| 146 | return vm_lock(vm); |
| 147 | } |
| 148 | |
| 149 | return (struct vm_locked){.vm = NULL}; |
| 150 | } |
| 151 | |
| 152 | /** |
Fuad Tabba | e4efcc3 | 2020-07-16 15:37:27 +0100 | [diff] [blame] | 153 | * Returns a pointer to the VM at the specified index. |
| 154 | */ |
| 155 | struct vm *vm_find_index(uint16_t index) |
| 156 | { |
David Brazdil | bc50119 | 2019-09-27 13:20:56 +0100 | [diff] [blame] | 157 | /* Ensure the VM is initialized. */ |
| 158 | if (index >= vm_count) { |
| 159 | return NULL; |
| 160 | } |
| 161 | |
| 162 | return &vms[index]; |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 163 | } |
| 164 | |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 165 | /** |
Fuad Tabba | ed294af | 2019-12-20 10:43:01 +0000 | [diff] [blame] | 166 | * Locks the given VM and updates `locked` to hold the newly locked VM. |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 167 | */ |
Andrew Walbran | 7e932bd | 2019-04-29 16:47:06 +0100 | [diff] [blame] | 168 | struct vm_locked vm_lock(struct vm *vm) |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 169 | { |
Andrew Walbran | 7e932bd | 2019-04-29 16:47:06 +0100 | [diff] [blame] | 170 | struct vm_locked locked = { |
| 171 | .vm = vm, |
| 172 | }; |
| 173 | |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 174 | sl_lock(&vm->lock); |
Andrew Walbran | 7e932bd | 2019-04-29 16:47:06 +0100 | [diff] [blame] | 175 | |
| 176 | return locked; |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 177 | } |
| 178 | |
| 179 | /** |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 180 | * Locks two VMs ensuring that the locking order is according to the locks' |
| 181 | * addresses. |
| 182 | */ |
| 183 | struct two_vm_locked vm_lock_both(struct vm *vm1, struct vm *vm2) |
| 184 | { |
| 185 | struct two_vm_locked dual_lock; |
| 186 | |
| 187 | sl_lock_both(&vm1->lock, &vm2->lock); |
| 188 | dual_lock.vm1.vm = vm1; |
| 189 | dual_lock.vm2.vm = vm2; |
| 190 | |
| 191 | return dual_lock; |
| 192 | } |
| 193 | |
| 194 | /** |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 195 | * Unlocks a VM previously locked with vm_lock, and updates `locked` to reflect |
| 196 | * the fact that the VM is no longer locked. |
| 197 | */ |
| 198 | void vm_unlock(struct vm_locked *locked) |
| 199 | { |
| 200 | sl_unlock(&locked->vm->lock); |
| 201 | locked->vm = NULL; |
| 202 | } |
Andrew Walbran | e1310df | 2019-04-29 17:28:28 +0100 | [diff] [blame] | 203 | |
| 204 | /** |
| 205 | * Get the vCPU with the given index from the given VM. |
| 206 | * This assumes the index is valid, i.e. less than vm->vcpu_count. |
| 207 | */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 208 | struct vcpu *vm_get_vcpu(struct vm *vm, ffa_vcpu_index_t vcpu_index) |
Andrew Walbran | e1310df | 2019-04-29 17:28:28 +0100 | [diff] [blame] | 209 | { |
Andrew Scull | 877ae4b | 2019-07-02 12:52:33 +0100 | [diff] [blame] | 210 | CHECK(vcpu_index < vm->vcpu_count); |
Andrew Walbran | e1310df | 2019-04-29 17:28:28 +0100 | [diff] [blame] | 211 | return &vm->vcpus[vcpu_index]; |
| 212 | } |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 213 | |
| 214 | /** |
Andrew Walbran | aad8f98 | 2019-12-04 10:56:39 +0000 | [diff] [blame] | 215 | * Gets `vm`'s wait entry for waiting on the `for_vm`. |
| 216 | */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 217 | struct wait_entry *vm_get_wait_entry(struct vm *vm, ffa_vm_id_t for_vm) |
Andrew Walbran | aad8f98 | 2019-12-04 10:56:39 +0000 | [diff] [blame] | 218 | { |
| 219 | uint16_t index; |
| 220 | |
| 221 | CHECK(for_vm >= HF_VM_ID_OFFSET); |
| 222 | index = for_vm - HF_VM_ID_OFFSET; |
| 223 | CHECK(index < MAX_VMS); |
| 224 | |
| 225 | return &vm->wait_entries[index]; |
| 226 | } |
| 227 | |
| 228 | /** |
| 229 | * Gets the ID of the VM which the given VM's wait entry is for. |
| 230 | */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 231 | ffa_vm_id_t vm_id_for_wait_entry(struct vm *vm, struct wait_entry *entry) |
Andrew Walbran | aad8f98 | 2019-12-04 10:56:39 +0000 | [diff] [blame] | 232 | { |
| 233 | uint16_t index = entry - vm->wait_entries; |
| 234 | |
| 235 | return index + HF_VM_ID_OFFSET; |
| 236 | } |
| 237 | |
| 238 | /** |
Andrew Walbran | 45633dd | 2020-10-07 17:59:54 +0100 | [diff] [blame] | 239 | * Return whether the given VM ID represents an entity in the current world: |
| 240 | * i.e. the hypervisor or a normal world VM when running in the normal world, or |
| 241 | * the SPM or an SP when running in the secure world. |
| 242 | */ |
| 243 | bool vm_id_is_current_world(ffa_vm_id_t vm_id) |
| 244 | { |
| 245 | return (vm_id & HF_VM_ID_WORLD_MASK) != |
| 246 | (HF_OTHER_WORLD_ID & HF_VM_ID_WORLD_MASK); |
| 247 | } |
| 248 | |
| 249 | /** |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 250 | * Map a range of addresses to the VM in both the MMU and the IOMMU. |
| 251 | * |
| 252 | * mm_vm_defrag should always be called after a series of page table updates, |
| 253 | * whether they succeed or fail. This is because on failure extra page table |
| 254 | * entries may have been allocated and then not used, while on success it may be |
| 255 | * possible to compact the page table by merging several entries into a block. |
| 256 | * |
| 257 | * Returns true on success, or false if the update failed and no changes were |
| 258 | * made. |
| 259 | * |
| 260 | */ |
| 261 | bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end, |
| 262 | uint32_t mode, struct mpool *ppool, ipaddr_t *ipa) |
| 263 | { |
| 264 | if (!vm_identity_prepare(vm_locked, begin, end, mode, ppool)) { |
| 265 | return false; |
| 266 | } |
| 267 | |
| 268 | vm_identity_commit(vm_locked, begin, end, mode, ppool, ipa); |
| 269 | |
| 270 | return true; |
| 271 | } |
| 272 | |
| 273 | /** |
| 274 | * Prepares the given VM for the given address mapping such that it will be able |
| 275 | * to commit the change without failure. |
| 276 | * |
| 277 | * In particular, multiple calls to this function will result in the |
| 278 | * corresponding calls to commit the changes to succeed. |
| 279 | * |
| 280 | * Returns true on success, or false if the update failed and no changes were |
| 281 | * made. |
| 282 | */ |
| 283 | bool vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, paddr_t end, |
| 284 | uint32_t mode, struct mpool *ppool) |
| 285 | { |
Raghu Krishnamurthy | ec1b491 | 2021-02-10 19:09:06 -0800 | [diff] [blame] | 286 | if (vm_locked.vm->el0_partition) { |
| 287 | return mm_identity_prepare(&vm_locked.vm->ptable, begin, end, |
| 288 | mode, ppool); |
| 289 | } |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 290 | return mm_vm_identity_prepare(&vm_locked.vm->ptable, begin, end, mode, |
| 291 | ppool); |
| 292 | } |
| 293 | |
| 294 | /** |
| 295 | * Commits the given address mapping to the VM assuming the operation cannot |
| 296 | * fail. `vm_identity_prepare` must used correctly before this to ensure |
| 297 | * this condition. |
| 298 | */ |
| 299 | void vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, paddr_t end, |
| 300 | uint32_t mode, struct mpool *ppool, ipaddr_t *ipa) |
| 301 | { |
Raghu Krishnamurthy | ec1b491 | 2021-02-10 19:09:06 -0800 | [diff] [blame] | 302 | if (vm_locked.vm->el0_partition) { |
| 303 | mm_identity_commit(&vm_locked.vm->ptable, begin, end, mode, |
| 304 | ppool); |
| 305 | if (ipa != NULL) { |
| 306 | /* |
| 307 | * EL0 partitions are modeled as lightweight VM's, to |
| 308 | * promote code reuse. The below statement returns the |
| 309 | * mapped PA as an IPA, however, for an EL0 partition, |
| 310 | * this is really a VA. |
| 311 | */ |
| 312 | *ipa = ipa_from_pa(begin); |
| 313 | } |
| 314 | } else { |
| 315 | mm_vm_identity_commit(&vm_locked.vm->ptable, begin, end, mode, |
| 316 | ppool, ipa); |
| 317 | } |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 318 | plat_iommu_identity_map(vm_locked, begin, end, mode); |
| 319 | } |
| 320 | |
| 321 | /** |
| 322 | * Unmap a range of addresses from the VM. |
| 323 | * |
| 324 | * Returns true on success, or false if the update failed and no changes were |
| 325 | * made. |
| 326 | */ |
| 327 | bool vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end, |
| 328 | struct mpool *ppool) |
| 329 | { |
| 330 | uint32_t mode = MM_MODE_UNMAPPED_MASK; |
| 331 | |
| 332 | return vm_identity_map(vm_locked, begin, end, mode, ppool, NULL); |
| 333 | } |
| 334 | |
| 335 | /** |
| 336 | * Unmaps the hypervisor pages from the given page table. |
| 337 | */ |
| 338 | bool vm_unmap_hypervisor(struct vm_locked vm_locked, struct mpool *ppool) |
| 339 | { |
| 340 | /* TODO: If we add pages dynamically, they must be included here too. */ |
| 341 | return vm_unmap(vm_locked, layout_text_begin(), layout_text_end(), |
| 342 | ppool) && |
| 343 | vm_unmap(vm_locked, layout_rodata_begin(), layout_rodata_end(), |
| 344 | ppool) && |
| 345 | vm_unmap(vm_locked, layout_data_begin(), layout_data_end(), |
| 346 | ppool); |
| 347 | } |
J-Alves | b37fd08 | 2020-10-22 12:29:21 +0100 | [diff] [blame] | 348 | |
| 349 | /** |
| 350 | * Gets the first partition to boot, according to Boot Protocol from FFA spec. |
| 351 | */ |
| 352 | struct vm *vm_get_first_boot(void) |
| 353 | { |
| 354 | return first_boot_vm; |
| 355 | } |
| 356 | |
| 357 | /** |
| 358 | * Insert in boot list, sorted by `boot_order` parameter in the vm structure |
| 359 | * and rooted in `first_boot_vm`. |
| 360 | */ |
| 361 | void vm_update_boot(struct vm *vm) |
| 362 | { |
| 363 | struct vm *current = NULL; |
| 364 | struct vm *previous = NULL; |
| 365 | |
| 366 | if (first_boot_vm == NULL) { |
| 367 | first_boot_vm = vm; |
| 368 | return; |
| 369 | } |
| 370 | |
| 371 | current = first_boot_vm; |
| 372 | |
| 373 | while (current != NULL && current->boot_order >= vm->boot_order) { |
| 374 | previous = current; |
| 375 | current = current->next_boot; |
| 376 | } |
| 377 | |
| 378 | if (previous != NULL) { |
| 379 | previous->next_boot = vm; |
| 380 | } else { |
| 381 | first_boot_vm = vm; |
| 382 | } |
| 383 | |
| 384 | vm->next_boot = current; |
| 385 | } |
J-Alves | 4ef6e84 | 2021-03-18 12:47:01 +0000 | [diff] [blame] | 386 | |
| 387 | /* |
| 388 | * Initializes the notifications structure. |
| 389 | */ |
| 390 | void vm_notifications_init_bindings(struct notifications *notifications) |
| 391 | { |
| 392 | for (uint32_t i = 0U; i < MAX_FFA_NOTIFICATIONS; i++) { |
| 393 | notifications->bindings_sender_id[i] = HF_INVALID_VM_ID; |
| 394 | } |
| 395 | } |
Raghu Krishnamurthy | ea195fa | 2021-02-12 23:29:00 -0800 | [diff] [blame] | 396 | |
| 397 | /** |
| 398 | * Gets the mode of the given range of ipa or va if they are mapped with the |
| 399 | * same mode. |
| 400 | * |
| 401 | * Returns true if the range is mapped with the same mode and false otherwise. |
| 402 | * The wrapper calls the appropriate mm function depending on if the partition |
| 403 | * is a vm or a el0 partition. |
| 404 | */ |
| 405 | bool vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin, ipaddr_t end, |
| 406 | uint32_t *mode) |
| 407 | { |
| 408 | if (vm_locked.vm->el0_partition) { |
| 409 | return mm_get_mode(&vm_locked.vm->ptable, |
| 410 | va_from_pa(pa_from_ipa(begin)), |
| 411 | va_from_pa(pa_from_ipa(end)), mode); |
| 412 | } |
| 413 | return mm_vm_get_mode(&vm_locked.vm->ptable, begin, end, mode); |
| 414 | } |