Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 1 | /* |
Andrew Walbran | 692b325 | 2019-03-07 15:51:31 +0000 | [diff] [blame] | 2 | * Copyright 2018 The Hafnium Authors. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 3 | * |
Andrew Walbran | e959ec1 | 2020-06-17 15:01:09 +0100 | [diff] [blame] | 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 7 | */ |
| 8 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 9 | #include "hf/vm.h" |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 10 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 11 | #include "hf/api.h" |
Andrew Scull | 877ae4b | 2019-07-02 12:52:33 +0100 | [diff] [blame] | 12 | #include "hf/check.h" |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 13 | #include "hf/cpu.h" |
J-Alves | 4ef6e84 | 2021-03-18 12:47:01 +0000 | [diff] [blame] | 14 | #include "hf/dlog.h" |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 15 | #include "hf/ffa.h" |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 16 | #include "hf/layout.h" |
| 17 | #include "hf/plat/iommu.h" |
Andrew Scull | 8d9e121 | 2019-04-05 13:52:55 +0100 | [diff] [blame] | 18 | #include "hf/std.h" |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 19 | |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 20 | #include "vmapi/hf/call.h" |
| 21 | |
| 22 | static struct vm vms[MAX_VMS]; |
Olivier Deprez | 96a2a26 | 2020-06-11 17:21:38 +0200 | [diff] [blame] | 23 | static struct vm other_world; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 24 | static ffa_vm_count_t vm_count; |
J-Alves | b37fd08 | 2020-10-22 12:29:21 +0100 | [diff] [blame] | 25 | static struct vm *first_boot_vm; |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 26 | |
Raghu Krishnamurthy | ec1b491 | 2021-02-10 19:09:06 -0800 | [diff] [blame] | 27 | static bool vm_init_mm(struct vm *vm, struct mpool *ppool) |
| 28 | { |
| 29 | if (vm->el0_partition) { |
| 30 | return mm_ptable_init(&vm->ptable, vm->id, MM_FLAG_STAGE1, |
| 31 | ppool); |
| 32 | } |
| 33 | return mm_vm_init(&vm->ptable, vm->id, ppool); |
| 34 | } |
| 35 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 36 | struct vm *vm_init(ffa_vm_id_t id, ffa_vcpu_count_t vcpu_count, |
Raghu Krishnamurthy | cd1eceb | 2021-01-04 12:20:48 -0800 | [diff] [blame] | 37 | struct mpool *ppool, bool el0_partition) |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 38 | { |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 39 | uint32_t i; |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 40 | struct vm *vm; |
| 41 | |
Olivier Deprez | 96a2a26 | 2020-06-11 17:21:38 +0200 | [diff] [blame] | 42 | if (id == HF_OTHER_WORLD_ID) { |
Raghu Krishnamurthy | cd1eceb | 2021-01-04 12:20:48 -0800 | [diff] [blame] | 43 | CHECK(el0_partition == false); |
Olivier Deprez | 96a2a26 | 2020-06-11 17:21:38 +0200 | [diff] [blame] | 44 | vm = &other_world; |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 45 | } else { |
| 46 | uint16_t vm_index = id - HF_VM_ID_OFFSET; |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 47 | |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 48 | CHECK(id >= HF_VM_ID_OFFSET); |
| 49 | CHECK(vm_index < ARRAY_SIZE(vms)); |
| 50 | vm = &vms[vm_index]; |
| 51 | } |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 52 | |
Andrew Scull | 2b5fbad | 2019-04-05 13:55:56 +0100 | [diff] [blame] | 53 | memset_s(vm, sizeof(*vm), 0, sizeof(*vm)); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 54 | |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 55 | list_init(&vm->mailbox.waiter_list); |
| 56 | list_init(&vm->mailbox.ready_list); |
| 57 | sl_init(&vm->lock); |
| 58 | |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 59 | vm->id = id; |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 60 | vm->vcpu_count = vcpu_count; |
Andrew Scull | d6ee110 | 2019-04-05 22:12:42 +0100 | [diff] [blame] | 61 | vm->mailbox.state = MAILBOX_STATE_EMPTY; |
Andrew Scull | 9726c25 | 2019-01-23 13:44:19 +0000 | [diff] [blame] | 62 | atomic_init(&vm->aborting, false); |
Raghu Krishnamurthy | cd1eceb | 2021-01-04 12:20:48 -0800 | [diff] [blame] | 63 | vm->el0_partition = el0_partition; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 64 | |
Raghu Krishnamurthy | ec1b491 | 2021-02-10 19:09:06 -0800 | [diff] [blame] | 65 | if (!vm_init_mm(vm, ppool)) { |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 66 | return NULL; |
Wedson Almeida Filho | 0330611 | 2018-11-26 00:08:03 +0000 | [diff] [blame] | 67 | } |
| 68 | |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 69 | /* Initialise waiter entries. */ |
| 70 | for (i = 0; i < MAX_VMS; i++) { |
Wedson Almeida Filho | b790f65 | 2019-01-22 23:41:56 +0000 | [diff] [blame] | 71 | vm->wait_entries[i].waiting_vm = vm; |
| 72 | list_init(&vm->wait_entries[i].wait_links); |
| 73 | list_init(&vm->wait_entries[i].ready_links); |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 74 | } |
| 75 | |
Fuad Tabba | b0ef2a4 | 2019-12-19 11:19:25 +0000 | [diff] [blame] | 76 | /* Do basic initialization of vCPUs. */ |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 77 | for (i = 0; i < vcpu_count; i++) { |
Andrew Walbran | e1310df | 2019-04-29 17:28:28 +0100 | [diff] [blame] | 78 | vcpu_init(vm_get_vcpu(vm, i), vm); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 79 | } |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 80 | |
J-Alves | 4ef6e84 | 2021-03-18 12:47:01 +0000 | [diff] [blame] | 81 | /* Basic initialization of the notifications structure. */ |
| 82 | vm_notifications_init_bindings(&vm->notifications.from_sp); |
| 83 | vm_notifications_init_bindings(&vm->notifications.from_vm); |
| 84 | |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 85 | return vm; |
| 86 | } |
| 87 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 88 | bool vm_init_next(ffa_vcpu_count_t vcpu_count, struct mpool *ppool, |
Raghu Krishnamurthy | cd1eceb | 2021-01-04 12:20:48 -0800 | [diff] [blame] | 89 | struct vm **new_vm, bool el0_partition) |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 90 | { |
| 91 | if (vm_count >= MAX_VMS) { |
| 92 | return false; |
| 93 | } |
| 94 | |
| 95 | /* Generate IDs based on an offset, as low IDs e.g., 0, are reserved */ |
Raghu Krishnamurthy | cd1eceb | 2021-01-04 12:20:48 -0800 | [diff] [blame] | 96 | *new_vm = vm_init(vm_count + HF_VM_ID_OFFSET, vcpu_count, ppool, |
| 97 | el0_partition); |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 98 | if (*new_vm == NULL) { |
| 99 | return false; |
| 100 | } |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 101 | ++vm_count; |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 102 | |
Wedson Almeida Filho | 0330611 | 2018-11-26 00:08:03 +0000 | [diff] [blame] | 103 | return true; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 104 | } |
| 105 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 106 | ffa_vm_count_t vm_get_count(void) |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 107 | { |
| 108 | return vm_count; |
| 109 | } |
| 110 | |
Fuad Tabba | e4efcc3 | 2020-07-16 15:37:27 +0100 | [diff] [blame] | 111 | /** |
| 112 | * Returns a pointer to the VM with the corresponding id. |
| 113 | */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 114 | struct vm *vm_find(ffa_vm_id_t id) |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 115 | { |
David Brazdil | bc50119 | 2019-09-27 13:20:56 +0100 | [diff] [blame] | 116 | uint16_t index; |
Fuad Tabba | 494376e | 2019-08-05 12:35:10 +0100 | [diff] [blame] | 117 | |
Olivier Deprez | 96a2a26 | 2020-06-11 17:21:38 +0200 | [diff] [blame] | 118 | if (id == HF_OTHER_WORLD_ID) { |
| 119 | if (other_world.id == HF_OTHER_WORLD_ID) { |
| 120 | return &other_world; |
| 121 | } |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 122 | return NULL; |
| 123 | } |
| 124 | |
Olivier Deprez | 96a2a26 | 2020-06-11 17:21:38 +0200 | [diff] [blame] | 125 | /* Check that this is not a reserved ID. */ |
| 126 | if (id < HF_VM_ID_OFFSET) { |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 127 | return NULL; |
| 128 | } |
| 129 | |
David Brazdil | bc50119 | 2019-09-27 13:20:56 +0100 | [diff] [blame] | 130 | index = id - HF_VM_ID_OFFSET; |
| 131 | |
Fuad Tabba | e4efcc3 | 2020-07-16 15:37:27 +0100 | [diff] [blame] | 132 | return vm_find_index(index); |
| 133 | } |
| 134 | |
| 135 | /** |
J-Alves | 46ee068 | 2021-07-26 15:17:53 +0100 | [diff] [blame] | 136 | * Returns a locked instance of the VM with the corresponding id. |
| 137 | */ |
| 138 | struct vm_locked vm_find_locked(ffa_vm_id_t id) |
| 139 | { |
| 140 | struct vm *vm = vm_find(id); |
| 141 | |
| 142 | if (vm != NULL) { |
| 143 | return vm_lock(vm); |
| 144 | } |
| 145 | |
| 146 | return (struct vm_locked){.vm = NULL}; |
| 147 | } |
| 148 | |
| 149 | /** |
Fuad Tabba | e4efcc3 | 2020-07-16 15:37:27 +0100 | [diff] [blame] | 150 | * Returns a pointer to the VM at the specified index. |
| 151 | */ |
| 152 | struct vm *vm_find_index(uint16_t index) |
| 153 | { |
David Brazdil | bc50119 | 2019-09-27 13:20:56 +0100 | [diff] [blame] | 154 | /* Ensure the VM is initialized. */ |
| 155 | if (index >= vm_count) { |
| 156 | return NULL; |
| 157 | } |
| 158 | |
| 159 | return &vms[index]; |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 160 | } |
| 161 | |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 162 | /** |
Fuad Tabba | ed294af | 2019-12-20 10:43:01 +0000 | [diff] [blame] | 163 | * Locks the given VM and updates `locked` to hold the newly locked VM. |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 164 | */ |
Andrew Walbran | 7e932bd | 2019-04-29 16:47:06 +0100 | [diff] [blame] | 165 | struct vm_locked vm_lock(struct vm *vm) |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 166 | { |
Andrew Walbran | 7e932bd | 2019-04-29 16:47:06 +0100 | [diff] [blame] | 167 | struct vm_locked locked = { |
| 168 | .vm = vm, |
| 169 | }; |
| 170 | |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 171 | sl_lock(&vm->lock); |
Andrew Walbran | 7e932bd | 2019-04-29 16:47:06 +0100 | [diff] [blame] | 172 | |
| 173 | return locked; |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 174 | } |
| 175 | |
| 176 | /** |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 177 | * Locks two VMs ensuring that the locking order is according to the locks' |
| 178 | * addresses. |
| 179 | */ |
| 180 | struct two_vm_locked vm_lock_both(struct vm *vm1, struct vm *vm2) |
| 181 | { |
| 182 | struct two_vm_locked dual_lock; |
| 183 | |
| 184 | sl_lock_both(&vm1->lock, &vm2->lock); |
| 185 | dual_lock.vm1.vm = vm1; |
| 186 | dual_lock.vm2.vm = vm2; |
| 187 | |
| 188 | return dual_lock; |
| 189 | } |
| 190 | |
| 191 | /** |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 192 | * Unlocks a VM previously locked with vm_lock, and updates `locked` to reflect |
| 193 | * the fact that the VM is no longer locked. |
| 194 | */ |
| 195 | void vm_unlock(struct vm_locked *locked) |
| 196 | { |
| 197 | sl_unlock(&locked->vm->lock); |
| 198 | locked->vm = NULL; |
| 199 | } |
Andrew Walbran | e1310df | 2019-04-29 17:28:28 +0100 | [diff] [blame] | 200 | |
| 201 | /** |
| 202 | * Get the vCPU with the given index from the given VM. |
| 203 | * This assumes the index is valid, i.e. less than vm->vcpu_count. |
| 204 | */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 205 | struct vcpu *vm_get_vcpu(struct vm *vm, ffa_vcpu_index_t vcpu_index) |
Andrew Walbran | e1310df | 2019-04-29 17:28:28 +0100 | [diff] [blame] | 206 | { |
Andrew Scull | 877ae4b | 2019-07-02 12:52:33 +0100 | [diff] [blame] | 207 | CHECK(vcpu_index < vm->vcpu_count); |
Andrew Walbran | e1310df | 2019-04-29 17:28:28 +0100 | [diff] [blame] | 208 | return &vm->vcpus[vcpu_index]; |
| 209 | } |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 210 | |
| 211 | /** |
Andrew Walbran | aad8f98 | 2019-12-04 10:56:39 +0000 | [diff] [blame] | 212 | * Gets `vm`'s wait entry for waiting on the `for_vm`. |
| 213 | */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 214 | struct wait_entry *vm_get_wait_entry(struct vm *vm, ffa_vm_id_t for_vm) |
Andrew Walbran | aad8f98 | 2019-12-04 10:56:39 +0000 | [diff] [blame] | 215 | { |
| 216 | uint16_t index; |
| 217 | |
| 218 | CHECK(for_vm >= HF_VM_ID_OFFSET); |
| 219 | index = for_vm - HF_VM_ID_OFFSET; |
| 220 | CHECK(index < MAX_VMS); |
| 221 | |
| 222 | return &vm->wait_entries[index]; |
| 223 | } |
| 224 | |
| 225 | /** |
| 226 | * Gets the ID of the VM which the given VM's wait entry is for. |
| 227 | */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 228 | ffa_vm_id_t vm_id_for_wait_entry(struct vm *vm, struct wait_entry *entry) |
Andrew Walbran | aad8f98 | 2019-12-04 10:56:39 +0000 | [diff] [blame] | 229 | { |
| 230 | uint16_t index = entry - vm->wait_entries; |
| 231 | |
| 232 | return index + HF_VM_ID_OFFSET; |
| 233 | } |
| 234 | |
| 235 | /** |
Andrew Walbran | 45633dd | 2020-10-07 17:59:54 +0100 | [diff] [blame] | 236 | * Return whether the given VM ID represents an entity in the current world: |
| 237 | * i.e. the hypervisor or a normal world VM when running in the normal world, or |
| 238 | * the SPM or an SP when running in the secure world. |
| 239 | */ |
| 240 | bool vm_id_is_current_world(ffa_vm_id_t vm_id) |
| 241 | { |
| 242 | return (vm_id & HF_VM_ID_WORLD_MASK) != |
| 243 | (HF_OTHER_WORLD_ID & HF_VM_ID_WORLD_MASK); |
| 244 | } |
| 245 | |
| 246 | /** |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 247 | * Map a range of addresses to the VM in both the MMU and the IOMMU. |
| 248 | * |
| 249 | * mm_vm_defrag should always be called after a series of page table updates, |
| 250 | * whether they succeed or fail. This is because on failure extra page table |
| 251 | * entries may have been allocated and then not used, while on success it may be |
| 252 | * possible to compact the page table by merging several entries into a block. |
| 253 | * |
| 254 | * Returns true on success, or false if the update failed and no changes were |
| 255 | * made. |
| 256 | * |
| 257 | */ |
| 258 | bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end, |
| 259 | uint32_t mode, struct mpool *ppool, ipaddr_t *ipa) |
| 260 | { |
| 261 | if (!vm_identity_prepare(vm_locked, begin, end, mode, ppool)) { |
| 262 | return false; |
| 263 | } |
| 264 | |
| 265 | vm_identity_commit(vm_locked, begin, end, mode, ppool, ipa); |
| 266 | |
| 267 | return true; |
| 268 | } |
| 269 | |
| 270 | /** |
| 271 | * Prepares the given VM for the given address mapping such that it will be able |
| 272 | * to commit the change without failure. |
| 273 | * |
| 274 | * In particular, multiple calls to this function will result in the |
| 275 | * corresponding calls to commit the changes to succeed. |
| 276 | * |
| 277 | * Returns true on success, or false if the update failed and no changes were |
| 278 | * made. |
| 279 | */ |
| 280 | bool vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, paddr_t end, |
| 281 | uint32_t mode, struct mpool *ppool) |
| 282 | { |
Raghu Krishnamurthy | ec1b491 | 2021-02-10 19:09:06 -0800 | [diff] [blame] | 283 | if (vm_locked.vm->el0_partition) { |
| 284 | return mm_identity_prepare(&vm_locked.vm->ptable, begin, end, |
| 285 | mode, ppool); |
| 286 | } |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 287 | return mm_vm_identity_prepare(&vm_locked.vm->ptable, begin, end, mode, |
| 288 | ppool); |
| 289 | } |
| 290 | |
| 291 | /** |
| 292 | * Commits the given address mapping to the VM assuming the operation cannot |
| 293 | * fail. `vm_identity_prepare` must used correctly before this to ensure |
| 294 | * this condition. |
| 295 | */ |
| 296 | void vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, paddr_t end, |
| 297 | uint32_t mode, struct mpool *ppool, ipaddr_t *ipa) |
| 298 | { |
Raghu Krishnamurthy | ec1b491 | 2021-02-10 19:09:06 -0800 | [diff] [blame] | 299 | if (vm_locked.vm->el0_partition) { |
| 300 | mm_identity_commit(&vm_locked.vm->ptable, begin, end, mode, |
| 301 | ppool); |
| 302 | if (ipa != NULL) { |
| 303 | /* |
| 304 | * EL0 partitions are modeled as lightweight VM's, to |
| 305 | * promote code reuse. The below statement returns the |
| 306 | * mapped PA as an IPA, however, for an EL0 partition, |
| 307 | * this is really a VA. |
| 308 | */ |
| 309 | *ipa = ipa_from_pa(begin); |
| 310 | } |
| 311 | } else { |
| 312 | mm_vm_identity_commit(&vm_locked.vm->ptable, begin, end, mode, |
| 313 | ppool, ipa); |
| 314 | } |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 315 | plat_iommu_identity_map(vm_locked, begin, end, mode); |
| 316 | } |
| 317 | |
| 318 | /** |
| 319 | * Unmap a range of addresses from the VM. |
| 320 | * |
| 321 | * Returns true on success, or false if the update failed and no changes were |
| 322 | * made. |
| 323 | */ |
| 324 | bool vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end, |
| 325 | struct mpool *ppool) |
| 326 | { |
| 327 | uint32_t mode = MM_MODE_UNMAPPED_MASK; |
| 328 | |
| 329 | return vm_identity_map(vm_locked, begin, end, mode, ppool, NULL); |
| 330 | } |
| 331 | |
| 332 | /** |
Raghu Krishnamurthy | 7ad3d14 | 2021-03-28 00:47:35 -0700 | [diff] [blame] | 333 | * Defrag page tables for an EL0 partition or for a VM. |
| 334 | */ |
| 335 | void vm_ptable_defrag(struct vm_locked vm_locked, struct mpool *ppool) |
| 336 | { |
| 337 | if (vm_locked.vm->el0_partition) { |
| 338 | mm_stage1_defrag(&vm_locked.vm->ptable, ppool); |
| 339 | } else { |
| 340 | mm_vm_defrag(&vm_locked.vm->ptable, ppool); |
| 341 | } |
| 342 | } |
| 343 | |
| 344 | /** |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 345 | * Unmaps the hypervisor pages from the given page table. |
| 346 | */ |
| 347 | bool vm_unmap_hypervisor(struct vm_locked vm_locked, struct mpool *ppool) |
| 348 | { |
| 349 | /* TODO: If we add pages dynamically, they must be included here too. */ |
| 350 | return vm_unmap(vm_locked, layout_text_begin(), layout_text_end(), |
| 351 | ppool) && |
| 352 | vm_unmap(vm_locked, layout_rodata_begin(), layout_rodata_end(), |
| 353 | ppool) && |
| 354 | vm_unmap(vm_locked, layout_data_begin(), layout_data_end(), |
| 355 | ppool); |
| 356 | } |
J-Alves | b37fd08 | 2020-10-22 12:29:21 +0100 | [diff] [blame] | 357 | |
| 358 | /** |
| 359 | * Gets the first partition to boot, according to Boot Protocol from FFA spec. |
| 360 | */ |
| 361 | struct vm *vm_get_first_boot(void) |
| 362 | { |
| 363 | return first_boot_vm; |
| 364 | } |
| 365 | |
| 366 | /** |
| 367 | * Insert in boot list, sorted by `boot_order` parameter in the vm structure |
| 368 | * and rooted in `first_boot_vm`. |
| 369 | */ |
| 370 | void vm_update_boot(struct vm *vm) |
| 371 | { |
| 372 | struct vm *current = NULL; |
| 373 | struct vm *previous = NULL; |
| 374 | |
| 375 | if (first_boot_vm == NULL) { |
| 376 | first_boot_vm = vm; |
| 377 | return; |
| 378 | } |
| 379 | |
| 380 | current = first_boot_vm; |
| 381 | |
| 382 | while (current != NULL && current->boot_order >= vm->boot_order) { |
| 383 | previous = current; |
| 384 | current = current->next_boot; |
| 385 | } |
| 386 | |
| 387 | if (previous != NULL) { |
| 388 | previous->next_boot = vm; |
| 389 | } else { |
| 390 | first_boot_vm = vm; |
| 391 | } |
| 392 | |
| 393 | vm->next_boot = current; |
| 394 | } |
J-Alves | 4ef6e84 | 2021-03-18 12:47:01 +0000 | [diff] [blame] | 395 | |
Raghu Krishnamurthy | ea195fa | 2021-02-12 23:29:00 -0800 | [diff] [blame] | 396 | /** |
| 397 | * Gets the mode of the given range of ipa or va if they are mapped with the |
| 398 | * same mode. |
| 399 | * |
| 400 | * Returns true if the range is mapped with the same mode and false otherwise. |
| 401 | * The wrapper calls the appropriate mm function depending on if the partition |
| 402 | * is a vm or a el0 partition. |
| 403 | */ |
| 404 | bool vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin, ipaddr_t end, |
| 405 | uint32_t *mode) |
| 406 | { |
| 407 | if (vm_locked.vm->el0_partition) { |
| 408 | return mm_get_mode(&vm_locked.vm->ptable, |
| 409 | va_from_pa(pa_from_ipa(begin)), |
| 410 | va_from_pa(pa_from_ipa(end)), mode); |
| 411 | } |
| 412 | return mm_vm_get_mode(&vm_locked.vm->ptable, begin, end, mode); |
| 413 | } |
J-Alves | a0f317d | 2021-06-09 13:31:59 +0100 | [diff] [blame] | 414 | |
| 415 | /* |
| 416 | * Initializes the notifications structure. |
| 417 | */ |
| 418 | void vm_notifications_init_bindings(struct notifications *notifications) |
| 419 | { |
| 420 | for (uint32_t i = 0U; i < MAX_FFA_NOTIFICATIONS; i++) { |
| 421 | notifications->bindings_sender_id[i] = HF_INVALID_VM_ID; |
| 422 | } |
| 423 | } |
| 424 | |
| 425 | /** |
| 426 | * Checks if there are pending notifications. |
| 427 | */ |
| 428 | bool vm_are_notifications_pending(struct vm_locked vm_locked, bool from_vm, |
| 429 | ffa_notifications_bitmap_t notifications) |
| 430 | { |
| 431 | struct notifications *to_check; |
| 432 | |
| 433 | CHECK(vm_locked.vm != NULL); |
| 434 | |
| 435 | to_check = from_vm ? &vm_locked.vm->notifications.from_vm |
| 436 | : &vm_locked.vm->notifications.from_sp; |
| 437 | |
| 438 | /* Check if there are pending per vcpu notifications */ |
| 439 | for (uint32_t i = 0U; i < MAX_CPUS; i++) { |
| 440 | if ((to_check->per_vcpu[i].pending & notifications) != 0U) { |
| 441 | return true; |
| 442 | } |
| 443 | } |
| 444 | |
| 445 | /* Check if there are global pending notifications */ |
| 446 | return (to_check->global.pending & notifications) != 0U; |
| 447 | } |
J-Alves | c003a7a | 2021-03-18 13:06:53 +0000 | [diff] [blame] | 448 | |
| 449 | bool vm_are_notifications_enabled(struct vm_locked vm_locked) |
| 450 | { |
| 451 | return vm_locked.vm->notifications.enabled == true; |
| 452 | } |
| 453 | |
| 454 | static bool vm_is_notification_bit_set(ffa_notifications_bitmap_t notifications, |
| 455 | uint32_t i) |
| 456 | { |
| 457 | return (notifications & FFA_NOTIFICATION_MASK(i)) != 0U; |
| 458 | } |
| 459 | |
| 460 | static struct notifications *vm_get_notifications(struct vm_locked vm_locked, |
| 461 | bool is_from_vm) |
| 462 | { |
| 463 | return is_from_vm ? &vm_locked.vm->notifications.from_vm |
| 464 | : &vm_locked.vm->notifications.from_sp; |
| 465 | } |
| 466 | |
| 467 | /** |
| 468 | * Checks that all provided notifications are bound to the specified sender, and |
| 469 | * are per VCPU or global, as specified. |
| 470 | */ |
| 471 | bool vm_notifications_validate_binding(struct vm_locked vm_locked, |
| 472 | bool is_from_vm, ffa_vm_id_t sender_id, |
| 473 | ffa_notifications_bitmap_t notifications, |
| 474 | bool is_per_vcpu) |
| 475 | { |
| 476 | return vm_notifications_validate_bound_sender( |
| 477 | vm_locked, is_from_vm, sender_id, notifications) && |
| 478 | vm_notifications_validate_per_vcpu(vm_locked, is_from_vm, |
| 479 | is_per_vcpu, notifications); |
| 480 | } |
| 481 | |
| 482 | /** |
| 483 | * Update binds information in notification structure for the specified |
| 484 | * notifications. |
| 485 | */ |
| 486 | void vm_notifications_update_bindings(struct vm_locked vm_locked, |
| 487 | bool is_from_vm, ffa_vm_id_t sender_id, |
| 488 | ffa_notifications_bitmap_t notifications, |
| 489 | bool is_per_vcpu) |
| 490 | { |
| 491 | CHECK(vm_locked.vm != NULL); |
| 492 | struct notifications *to_update = |
| 493 | vm_get_notifications(vm_locked, is_from_vm); |
| 494 | |
| 495 | for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) { |
| 496 | if (vm_is_notification_bit_set(notifications, i)) { |
| 497 | to_update->bindings_sender_id[i] = sender_id; |
| 498 | } |
| 499 | } |
| 500 | |
| 501 | /* |
| 502 | * Set notifications if they are per VCPU, else clear them as they are |
| 503 | * global. |
| 504 | */ |
| 505 | if (is_per_vcpu) { |
| 506 | to_update->bindings_per_vcpu |= notifications; |
| 507 | } else { |
| 508 | to_update->bindings_per_vcpu &= ~notifications; |
| 509 | } |
| 510 | } |
| 511 | |
| 512 | bool vm_notifications_validate_bound_sender( |
| 513 | struct vm_locked vm_locked, bool is_from_vm, ffa_vm_id_t sender_id, |
| 514 | ffa_notifications_bitmap_t notifications) |
| 515 | { |
| 516 | CHECK(vm_locked.vm != NULL); |
| 517 | struct notifications *to_check = |
| 518 | vm_get_notifications(vm_locked, is_from_vm); |
| 519 | |
| 520 | for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) { |
| 521 | if (vm_is_notification_bit_set(notifications, i) && |
| 522 | to_check->bindings_sender_id[i] != sender_id) { |
| 523 | return false; |
| 524 | } |
| 525 | } |
| 526 | |
| 527 | return true; |
| 528 | } |
| 529 | |
| 530 | bool vm_notifications_validate_per_vcpu(struct vm_locked vm_locked, |
| 531 | bool is_from_vm, bool is_per_vcpu, |
| 532 | ffa_notifications_bitmap_t notif) |
| 533 | { |
| 534 | CHECK(vm_locked.vm != NULL); |
| 535 | struct notifications *to_check = |
| 536 | vm_get_notifications(vm_locked, is_from_vm); |
| 537 | |
| 538 | return is_per_vcpu ? (~to_check->bindings_per_vcpu & notif) == 0U |
| 539 | : (to_check->bindings_per_vcpu & notif) == 0U; |
| 540 | } |
J-Alves | aa79c01 | 2021-07-09 14:29:45 +0100 | [diff] [blame] | 541 | |
| 542 | void vm_notifications_set(struct vm_locked vm_locked, bool is_from_vm, |
| 543 | ffa_notifications_bitmap_t notifications, |
| 544 | ffa_vcpu_index_t vcpu_id, bool is_per_vcpu) |
| 545 | { |
| 546 | CHECK(vm_locked.vm != NULL); |
| 547 | struct notifications *to_set = |
| 548 | vm_get_notifications(vm_locked, is_from_vm); |
| 549 | CHECK(vcpu_id < MAX_CPUS); |
| 550 | |
| 551 | if (is_per_vcpu) { |
| 552 | to_set->per_vcpu[vcpu_id].pending |= notifications; |
| 553 | } else { |
| 554 | to_set->global.pending |= notifications; |
| 555 | } |
| 556 | } |
| 557 | |
| 558 | /** |
| 559 | * Get Global notifications and per CPU only of the current VCPU. |
| 560 | */ |
| 561 | ffa_notifications_bitmap_t vm_notifications_get_pending_and_clear( |
| 562 | struct vm_locked vm_locked, bool is_from_vm, |
| 563 | ffa_vcpu_index_t cur_vcpu_id) |
| 564 | { |
| 565 | ffa_notifications_bitmap_t to_ret = 0; |
| 566 | |
| 567 | CHECK(vm_locked.vm != NULL); |
| 568 | struct notifications *to_get = |
| 569 | vm_get_notifications(vm_locked, is_from_vm); |
| 570 | CHECK(cur_vcpu_id < MAX_CPUS); |
| 571 | |
| 572 | to_ret |= to_get->global.pending; |
| 573 | to_get->global.pending = 0U; |
| 574 | to_get->global.info_get_retrieved = 0U; |
| 575 | |
| 576 | to_ret |= to_get->per_vcpu[cur_vcpu_id].pending; |
| 577 | to_get->per_vcpu[cur_vcpu_id].pending = 0U; |
| 578 | to_get->per_vcpu[cur_vcpu_id].info_get_retrieved = 0U; |
| 579 | |
| 580 | return to_ret; |
| 581 | } |
J-Alves | c8e8a22 | 2021-06-08 17:33:52 +0100 | [diff] [blame^] | 582 | |
| 583 | /** |
| 584 | * Get pending notification's information to return to the receiver scheduler. |
| 585 | */ |
| 586 | void vm_notifications_info_get_pending( |
| 587 | struct vm_locked vm_locked, bool is_from_vm, uint16_t *ids, |
| 588 | uint32_t *ids_count, uint32_t *lists_sizes, uint32_t *lists_count, |
| 589 | const uint32_t ids_max_count, |
| 590 | enum notifications_info_get_state *info_get_state) |
| 591 | { |
| 592 | ffa_notifications_bitmap_t pending_not_retrieved; |
| 593 | |
| 594 | CHECK(vm_locked.vm != NULL); |
| 595 | struct notifications *notifications = |
| 596 | vm_get_notifications(vm_locked, is_from_vm); |
| 597 | |
| 598 | if (*info_get_state == FULL) { |
| 599 | return; |
| 600 | } |
| 601 | |
| 602 | CHECK(*ids_count <= ids_max_count); |
| 603 | CHECK(*lists_count <= ids_max_count); |
| 604 | |
| 605 | pending_not_retrieved = notifications->global.pending & |
| 606 | ~notifications->global.info_get_retrieved; |
| 607 | |
| 608 | if (pending_not_retrieved != 0U && *info_get_state == INIT) { |
| 609 | /* |
| 610 | * If action is to INIT, means that no list has been |
| 611 | * created for the given VM ID, which also means that global |
| 612 | * notifications are not represented in the list yet. |
| 613 | */ |
| 614 | if (*ids_count == ids_max_count) { |
| 615 | *info_get_state = FULL; |
| 616 | return; |
| 617 | } |
| 618 | |
| 619 | *info_get_state = INSERTING; |
| 620 | |
| 621 | (*lists_count)++; |
| 622 | ids[*ids_count] = vm_locked.vm->id; |
| 623 | ++(*ids_count); |
| 624 | } |
| 625 | |
| 626 | notifications->global.info_get_retrieved |= pending_not_retrieved; |
| 627 | |
| 628 | for (ffa_vcpu_count_t i = 0; i < vm_locked.vm->vcpu_count; i++) { |
| 629 | /* |
| 630 | * Include VCPU ID of per-VCPU notifications. |
| 631 | */ |
| 632 | pending_not_retrieved = |
| 633 | notifications->per_vcpu[i].pending & |
| 634 | ~notifications->per_vcpu[i].info_get_retrieved; |
| 635 | |
| 636 | if (pending_not_retrieved == 0U) { |
| 637 | continue; |
| 638 | } |
| 639 | |
| 640 | switch (*info_get_state) { |
| 641 | case INIT: |
| 642 | case STARTING_NEW: |
| 643 | /* |
| 644 | * At this iteration two ids need to be added: the VM ID |
| 645 | * and VCPU ID. If there is not space, change state and |
| 646 | * terminate function. |
| 647 | */ |
| 648 | if (ids_max_count - *ids_count < 2) { |
| 649 | *info_get_state = FULL; |
| 650 | return; |
| 651 | } |
| 652 | |
| 653 | ids[*ids_count] = vm_locked.vm->id; |
| 654 | ++(*ids_count); |
| 655 | |
| 656 | /* Insert VCPU ID */ |
| 657 | ids[*ids_count] = i; |
| 658 | ++(*ids_count); |
| 659 | |
| 660 | ++lists_sizes[*lists_count]; |
| 661 | ++(*lists_count); |
| 662 | |
| 663 | *info_get_state = INSERTING; |
| 664 | break; |
| 665 | case INSERTING: |
| 666 | if (*ids_count == ids_max_count) { |
| 667 | *info_get_state = FULL; |
| 668 | return; |
| 669 | } |
| 670 | |
| 671 | /* Insert VCPU ID */ |
| 672 | ids[*ids_count] = i; |
| 673 | (*ids_count)++; |
| 674 | |
| 675 | /* Increment respective list size */ |
| 676 | ++lists_sizes[*lists_count - 1]; |
| 677 | |
| 678 | if (lists_sizes[*lists_count - 1] == 3) { |
| 679 | *info_get_state = STARTING_NEW; |
| 680 | } |
| 681 | break; |
| 682 | default: |
| 683 | panic("Notification info get action error!!\n"); |
| 684 | } |
| 685 | |
| 686 | notifications->per_vcpu[i].info_get_retrieved |= |
| 687 | pending_not_retrieved; |
| 688 | } |
| 689 | } |
| 690 | |
| 691 | /** |
| 692 | * Gets all info from VM's pending notifications. |
| 693 | * Returns true if the list is full, and there is more pending. |
| 694 | */ |
| 695 | bool vm_notifications_info_get(struct vm_locked vm_locked, uint16_t *ids, |
| 696 | uint32_t *ids_count, uint32_t *lists_sizes, |
| 697 | uint32_t *lists_count, |
| 698 | const uint32_t ids_max_count) |
| 699 | { |
| 700 | enum notifications_info_get_state current_state = INIT; |
| 701 | |
| 702 | /* Get info of pending notifications from SPs */ |
| 703 | vm_notifications_info_get_pending(vm_locked, false, ids, ids_count, |
| 704 | lists_sizes, lists_count, |
| 705 | ids_max_count, ¤t_state); |
| 706 | |
| 707 | /* Get info of pending notifications from VMs */ |
| 708 | vm_notifications_info_get_pending(vm_locked, true, ids, ids_count, |
| 709 | lists_sizes, lists_count, |
| 710 | ids_max_count, ¤t_state); |
| 711 | |
| 712 | /* |
| 713 | * State transitions to FULL when trying to insert a new ID in the |
| 714 | * list and there is not more space. This means there are notifications |
| 715 | * pending, whose info is not retrieved. |
| 716 | */ |
| 717 | return current_state == FULL; |
| 718 | } |