Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 1 | /* |
Andrew Walbran | 692b325 | 2019-03-07 15:51:31 +0000 | [diff] [blame] | 2 | * Copyright 2018 The Hafnium Authors. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 3 | * |
Andrew Walbran | e959ec1 | 2020-06-17 15:01:09 +0100 | [diff] [blame] | 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 7 | */ |
| 8 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 9 | #include "hf/vm.h" |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 10 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 11 | #include "hf/api.h" |
Daniel Boulby | a2f8c66 | 2021-11-26 17:52:53 +0000 | [diff] [blame] | 12 | #include "hf/assert.h" |
Andrew Scull | 877ae4b | 2019-07-02 12:52:33 +0100 | [diff] [blame] | 13 | #include "hf/check.h" |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 14 | #include "hf/cpu.h" |
J-Alves | 4ef6e84 | 2021-03-18 12:47:01 +0000 | [diff] [blame] | 15 | #include "hf/dlog.h" |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 16 | #include "hf/ffa.h" |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 17 | #include "hf/layout.h" |
| 18 | #include "hf/plat/iommu.h" |
Andrew Scull | 8d9e121 | 2019-04-05 13:52:55 +0100 | [diff] [blame] | 19 | #include "hf/std.h" |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 20 | |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 21 | #include "vmapi/hf/call.h" |
| 22 | |
| 23 | static struct vm vms[MAX_VMS]; |
Olivier Deprez | 96a2a26 | 2020-06-11 17:21:38 +0200 | [diff] [blame] | 24 | static struct vm other_world; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 25 | static ffa_vm_count_t vm_count; |
J-Alves | b37fd08 | 2020-10-22 12:29:21 +0100 | [diff] [blame] | 26 | static struct vm *first_boot_vm; |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 27 | |
J-Alves | fe23ebe | 2021-10-13 16:07:07 +0100 | [diff] [blame] | 28 | /** |
| 29 | * Counters on the status of notifications in the system. It helps to improve |
| 30 | * the information retrieved by the receiver scheduler. |
| 31 | */ |
| 32 | static struct { |
| 33 | /** Counts notifications pending. */ |
| 34 | uint32_t pending_count; |
| 35 | /** |
| 36 | * Counts notifications pending, that have been retrieved by the |
| 37 | * receiver scheduler. |
| 38 | */ |
| 39 | uint32_t info_get_retrieved_count; |
| 40 | struct spinlock lock; |
| 41 | } all_notifications_state; |
| 42 | |
Raghu Krishnamurthy | ec1b491 | 2021-02-10 19:09:06 -0800 | [diff] [blame] | 43 | static bool vm_init_mm(struct vm *vm, struct mpool *ppool) |
| 44 | { |
| 45 | if (vm->el0_partition) { |
| 46 | return mm_ptable_init(&vm->ptable, vm->id, MM_FLAG_STAGE1, |
| 47 | ppool); |
| 48 | } |
| 49 | return mm_vm_init(&vm->ptable, vm->id, ppool); |
| 50 | } |
| 51 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 52 | struct vm *vm_init(ffa_vm_id_t id, ffa_vcpu_count_t vcpu_count, |
Raghu Krishnamurthy | cd1eceb | 2021-01-04 12:20:48 -0800 | [diff] [blame] | 53 | struct mpool *ppool, bool el0_partition) |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 54 | { |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 55 | uint32_t i; |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 56 | struct vm *vm; |
Raghu Krishnamurthy | 30aabd6 | 2022-09-17 21:41:00 -0700 | [diff] [blame] | 57 | size_t vcpu_ppool_entries = (align_up(sizeof(struct vcpu) * vcpu_count, |
| 58 | MM_PPOOL_ENTRY_SIZE) / |
| 59 | MM_PPOOL_ENTRY_SIZE); |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 60 | |
Olivier Deprez | 96a2a26 | 2020-06-11 17:21:38 +0200 | [diff] [blame] | 61 | if (id == HF_OTHER_WORLD_ID) { |
Raghu Krishnamurthy | cd1eceb | 2021-01-04 12:20:48 -0800 | [diff] [blame] | 62 | CHECK(el0_partition == false); |
Olivier Deprez | 96a2a26 | 2020-06-11 17:21:38 +0200 | [diff] [blame] | 63 | vm = &other_world; |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 64 | } else { |
| 65 | uint16_t vm_index = id - HF_VM_ID_OFFSET; |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 66 | |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 67 | CHECK(id >= HF_VM_ID_OFFSET); |
| 68 | CHECK(vm_index < ARRAY_SIZE(vms)); |
| 69 | vm = &vms[vm_index]; |
| 70 | } |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 71 | |
Andrew Scull | 2b5fbad | 2019-04-05 13:55:56 +0100 | [diff] [blame] | 72 | memset_s(vm, sizeof(*vm), 0, sizeof(*vm)); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 73 | |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 74 | list_init(&vm->mailbox.waiter_list); |
| 75 | list_init(&vm->mailbox.ready_list); |
| 76 | sl_init(&vm->lock); |
| 77 | |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 78 | vm->id = id; |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 79 | vm->vcpu_count = vcpu_count; |
Raghu Krishnamurthy | 30aabd6 | 2022-09-17 21:41:00 -0700 | [diff] [blame] | 80 | |
| 81 | vm->vcpus = (struct vcpu *)mpool_alloc_contiguous( |
| 82 | ppool, vcpu_ppool_entries, 1); |
| 83 | CHECK(vm->vcpus != NULL); |
Raghu Krishnamurthy | f5fec20 | 2022-09-30 07:25:10 -0700 | [diff] [blame] | 84 | |
Andrew Scull | d6ee110 | 2019-04-05 22:12:42 +0100 | [diff] [blame] | 85 | vm->mailbox.state = MAILBOX_STATE_EMPTY; |
Andrew Scull | 9726c25 | 2019-01-23 13:44:19 +0000 | [diff] [blame] | 86 | atomic_init(&vm->aborting, false); |
Raghu Krishnamurthy | cd1eceb | 2021-01-04 12:20:48 -0800 | [diff] [blame] | 87 | vm->el0_partition = el0_partition; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 88 | |
Raghu Krishnamurthy | ec1b491 | 2021-02-10 19:09:06 -0800 | [diff] [blame] | 89 | if (!vm_init_mm(vm, ppool)) { |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 90 | return NULL; |
Wedson Almeida Filho | 0330611 | 2018-11-26 00:08:03 +0000 | [diff] [blame] | 91 | } |
| 92 | |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 93 | /* Initialise waiter entries. */ |
| 94 | for (i = 0; i < MAX_VMS; i++) { |
Wedson Almeida Filho | b790f65 | 2019-01-22 23:41:56 +0000 | [diff] [blame] | 95 | vm->wait_entries[i].waiting_vm = vm; |
| 96 | list_init(&vm->wait_entries[i].wait_links); |
| 97 | list_init(&vm->wait_entries[i].ready_links); |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 98 | } |
| 99 | |
Fuad Tabba | b0ef2a4 | 2019-12-19 11:19:25 +0000 | [diff] [blame] | 100 | /* Do basic initialization of vCPUs. */ |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 101 | for (i = 0; i < vcpu_count; i++) { |
Andrew Walbran | e1310df | 2019-04-29 17:28:28 +0100 | [diff] [blame] | 102 | vcpu_init(vm_get_vcpu(vm, i), vm); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 103 | } |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 104 | |
Raghu Krishnamurthy | f5fec20 | 2022-09-30 07:25:10 -0700 | [diff] [blame] | 105 | vm_notifications_init(vm, vcpu_count, ppool); |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 106 | return vm; |
| 107 | } |
| 108 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 109 | bool vm_init_next(ffa_vcpu_count_t vcpu_count, struct mpool *ppool, |
Raghu Krishnamurthy | cd1eceb | 2021-01-04 12:20:48 -0800 | [diff] [blame] | 110 | struct vm **new_vm, bool el0_partition) |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 111 | { |
| 112 | if (vm_count >= MAX_VMS) { |
| 113 | return false; |
| 114 | } |
| 115 | |
| 116 | /* Generate IDs based on an offset, as low IDs e.g., 0, are reserved */ |
Raghu Krishnamurthy | cd1eceb | 2021-01-04 12:20:48 -0800 | [diff] [blame] | 117 | *new_vm = vm_init(vm_count + HF_VM_ID_OFFSET, vcpu_count, ppool, |
| 118 | el0_partition); |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 119 | if (*new_vm == NULL) { |
| 120 | return false; |
| 121 | } |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 122 | ++vm_count; |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 123 | |
Wedson Almeida Filho | 0330611 | 2018-11-26 00:08:03 +0000 | [diff] [blame] | 124 | return true; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 125 | } |
| 126 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 127 | ffa_vm_count_t vm_get_count(void) |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 128 | { |
| 129 | return vm_count; |
| 130 | } |
| 131 | |
Fuad Tabba | e4efcc3 | 2020-07-16 15:37:27 +0100 | [diff] [blame] | 132 | /** |
| 133 | * Returns a pointer to the VM with the corresponding id. |
| 134 | */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 135 | struct vm *vm_find(ffa_vm_id_t id) |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 136 | { |
David Brazdil | bc50119 | 2019-09-27 13:20:56 +0100 | [diff] [blame] | 137 | uint16_t index; |
Fuad Tabba | 494376e | 2019-08-05 12:35:10 +0100 | [diff] [blame] | 138 | |
Olivier Deprez | 96a2a26 | 2020-06-11 17:21:38 +0200 | [diff] [blame] | 139 | if (id == HF_OTHER_WORLD_ID) { |
| 140 | if (other_world.id == HF_OTHER_WORLD_ID) { |
| 141 | return &other_world; |
| 142 | } |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 143 | return NULL; |
| 144 | } |
| 145 | |
Olivier Deprez | 96a2a26 | 2020-06-11 17:21:38 +0200 | [diff] [blame] | 146 | /* Check that this is not a reserved ID. */ |
| 147 | if (id < HF_VM_ID_OFFSET) { |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 148 | return NULL; |
| 149 | } |
| 150 | |
David Brazdil | bc50119 | 2019-09-27 13:20:56 +0100 | [diff] [blame] | 151 | index = id - HF_VM_ID_OFFSET; |
| 152 | |
Fuad Tabba | e4efcc3 | 2020-07-16 15:37:27 +0100 | [diff] [blame] | 153 | return vm_find_index(index); |
| 154 | } |
| 155 | |
| 156 | /** |
J-Alves | 46ee068 | 2021-07-26 15:17:53 +0100 | [diff] [blame] | 157 | * Returns a locked instance of the VM with the corresponding id. |
| 158 | */ |
| 159 | struct vm_locked vm_find_locked(ffa_vm_id_t id) |
| 160 | { |
| 161 | struct vm *vm = vm_find(id); |
| 162 | |
| 163 | if (vm != NULL) { |
| 164 | return vm_lock(vm); |
| 165 | } |
| 166 | |
| 167 | return (struct vm_locked){.vm = NULL}; |
| 168 | } |
| 169 | |
| 170 | /** |
Fuad Tabba | e4efcc3 | 2020-07-16 15:37:27 +0100 | [diff] [blame] | 171 | * Returns a pointer to the VM at the specified index. |
| 172 | */ |
| 173 | struct vm *vm_find_index(uint16_t index) |
| 174 | { |
David Brazdil | bc50119 | 2019-09-27 13:20:56 +0100 | [diff] [blame] | 175 | /* Ensure the VM is initialized. */ |
| 176 | if (index >= vm_count) { |
| 177 | return NULL; |
| 178 | } |
| 179 | |
| 180 | return &vms[index]; |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 181 | } |
| 182 | |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 183 | /** |
Fuad Tabba | ed294af | 2019-12-20 10:43:01 +0000 | [diff] [blame] | 184 | * Locks the given VM and updates `locked` to hold the newly locked VM. |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 185 | */ |
Andrew Walbran | 7e932bd | 2019-04-29 16:47:06 +0100 | [diff] [blame] | 186 | struct vm_locked vm_lock(struct vm *vm) |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 187 | { |
Andrew Walbran | 7e932bd | 2019-04-29 16:47:06 +0100 | [diff] [blame] | 188 | struct vm_locked locked = { |
| 189 | .vm = vm, |
| 190 | }; |
| 191 | |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 192 | sl_lock(&vm->lock); |
Andrew Walbran | 7e932bd | 2019-04-29 16:47:06 +0100 | [diff] [blame] | 193 | |
| 194 | return locked; |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 195 | } |
| 196 | |
| 197 | /** |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 198 | * Locks two VMs ensuring that the locking order is according to the locks' |
| 199 | * addresses. |
| 200 | */ |
| 201 | struct two_vm_locked vm_lock_both(struct vm *vm1, struct vm *vm2) |
| 202 | { |
| 203 | struct two_vm_locked dual_lock; |
| 204 | |
| 205 | sl_lock_both(&vm1->lock, &vm2->lock); |
| 206 | dual_lock.vm1.vm = vm1; |
| 207 | dual_lock.vm2.vm = vm2; |
| 208 | |
| 209 | return dual_lock; |
| 210 | } |
| 211 | |
| 212 | /** |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 213 | * Unlocks a VM previously locked with vm_lock, and updates `locked` to reflect |
| 214 | * the fact that the VM is no longer locked. |
| 215 | */ |
| 216 | void vm_unlock(struct vm_locked *locked) |
| 217 | { |
| 218 | sl_unlock(&locked->vm->lock); |
| 219 | locked->vm = NULL; |
| 220 | } |
Andrew Walbran | e1310df | 2019-04-29 17:28:28 +0100 | [diff] [blame] | 221 | |
| 222 | /** |
| 223 | * Get the vCPU with the given index from the given VM. |
| 224 | * This assumes the index is valid, i.e. less than vm->vcpu_count. |
| 225 | */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 226 | struct vcpu *vm_get_vcpu(struct vm *vm, ffa_vcpu_index_t vcpu_index) |
Andrew Walbran | e1310df | 2019-04-29 17:28:28 +0100 | [diff] [blame] | 227 | { |
Andrew Scull | 877ae4b | 2019-07-02 12:52:33 +0100 | [diff] [blame] | 228 | CHECK(vcpu_index < vm->vcpu_count); |
Andrew Walbran | e1310df | 2019-04-29 17:28:28 +0100 | [diff] [blame] | 229 | return &vm->vcpus[vcpu_index]; |
| 230 | } |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 231 | |
| 232 | /** |
Andrew Walbran | aad8f98 | 2019-12-04 10:56:39 +0000 | [diff] [blame] | 233 | * Gets `vm`'s wait entry for waiting on the `for_vm`. |
| 234 | */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 235 | struct wait_entry *vm_get_wait_entry(struct vm *vm, ffa_vm_id_t for_vm) |
Andrew Walbran | aad8f98 | 2019-12-04 10:56:39 +0000 | [diff] [blame] | 236 | { |
| 237 | uint16_t index; |
| 238 | |
| 239 | CHECK(for_vm >= HF_VM_ID_OFFSET); |
| 240 | index = for_vm - HF_VM_ID_OFFSET; |
| 241 | CHECK(index < MAX_VMS); |
| 242 | |
| 243 | return &vm->wait_entries[index]; |
| 244 | } |
| 245 | |
| 246 | /** |
| 247 | * Gets the ID of the VM which the given VM's wait entry is for. |
| 248 | */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 249 | ffa_vm_id_t vm_id_for_wait_entry(struct vm *vm, struct wait_entry *entry) |
Andrew Walbran | aad8f98 | 2019-12-04 10:56:39 +0000 | [diff] [blame] | 250 | { |
| 251 | uint16_t index = entry - vm->wait_entries; |
| 252 | |
| 253 | return index + HF_VM_ID_OFFSET; |
| 254 | } |
| 255 | |
| 256 | /** |
Andrew Walbran | 45633dd | 2020-10-07 17:59:54 +0100 | [diff] [blame] | 257 | * Return whether the given VM ID represents an entity in the current world: |
| 258 | * i.e. the hypervisor or a normal world VM when running in the normal world, or |
| 259 | * the SPM or an SP when running in the secure world. |
| 260 | */ |
| 261 | bool vm_id_is_current_world(ffa_vm_id_t vm_id) |
| 262 | { |
| 263 | return (vm_id & HF_VM_ID_WORLD_MASK) != |
| 264 | (HF_OTHER_WORLD_ID & HF_VM_ID_WORLD_MASK); |
| 265 | } |
| 266 | |
| 267 | /** |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 268 | * Map a range of addresses to the VM in both the MMU and the IOMMU. |
| 269 | * |
| 270 | * mm_vm_defrag should always be called after a series of page table updates, |
| 271 | * whether they succeed or fail. This is because on failure extra page table |
| 272 | * entries may have been allocated and then not used, while on success it may be |
| 273 | * possible to compact the page table by merging several entries into a block. |
| 274 | * |
| 275 | * Returns true on success, or false if the update failed and no changes were |
| 276 | * made. |
| 277 | * |
| 278 | */ |
| 279 | bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end, |
| 280 | uint32_t mode, struct mpool *ppool, ipaddr_t *ipa) |
| 281 | { |
| 282 | if (!vm_identity_prepare(vm_locked, begin, end, mode, ppool)) { |
| 283 | return false; |
| 284 | } |
| 285 | |
| 286 | vm_identity_commit(vm_locked, begin, end, mode, ppool, ipa); |
| 287 | |
| 288 | return true; |
| 289 | } |
| 290 | |
| 291 | /** |
| 292 | * Prepares the given VM for the given address mapping such that it will be able |
| 293 | * to commit the change without failure. |
| 294 | * |
| 295 | * In particular, multiple calls to this function will result in the |
| 296 | * corresponding calls to commit the changes to succeed. |
| 297 | * |
| 298 | * Returns true on success, or false if the update failed and no changes were |
| 299 | * made. |
| 300 | */ |
| 301 | bool vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, paddr_t end, |
| 302 | uint32_t mode, struct mpool *ppool) |
| 303 | { |
Raghu Krishnamurthy | ec1b491 | 2021-02-10 19:09:06 -0800 | [diff] [blame] | 304 | if (vm_locked.vm->el0_partition) { |
| 305 | return mm_identity_prepare(&vm_locked.vm->ptable, begin, end, |
| 306 | mode, ppool); |
| 307 | } |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 308 | return mm_vm_identity_prepare(&vm_locked.vm->ptable, begin, end, mode, |
| 309 | ppool); |
| 310 | } |
| 311 | |
| 312 | /** |
| 313 | * Commits the given address mapping to the VM assuming the operation cannot |
| 314 | * fail. `vm_identity_prepare` must used correctly before this to ensure |
| 315 | * this condition. |
| 316 | */ |
| 317 | void vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, paddr_t end, |
| 318 | uint32_t mode, struct mpool *ppool, ipaddr_t *ipa) |
| 319 | { |
Raghu Krishnamurthy | ec1b491 | 2021-02-10 19:09:06 -0800 | [diff] [blame] | 320 | if (vm_locked.vm->el0_partition) { |
| 321 | mm_identity_commit(&vm_locked.vm->ptable, begin, end, mode, |
| 322 | ppool); |
| 323 | if (ipa != NULL) { |
| 324 | /* |
| 325 | * EL0 partitions are modeled as lightweight VM's, to |
| 326 | * promote code reuse. The below statement returns the |
| 327 | * mapped PA as an IPA, however, for an EL0 partition, |
| 328 | * this is really a VA. |
| 329 | */ |
| 330 | *ipa = ipa_from_pa(begin); |
| 331 | } |
| 332 | } else { |
| 333 | mm_vm_identity_commit(&vm_locked.vm->ptable, begin, end, mode, |
| 334 | ppool, ipa); |
| 335 | } |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 336 | plat_iommu_identity_map(vm_locked, begin, end, mode); |
| 337 | } |
| 338 | |
| 339 | /** |
| 340 | * Unmap a range of addresses from the VM. |
| 341 | * |
| 342 | * Returns true on success, or false if the update failed and no changes were |
| 343 | * made. |
| 344 | */ |
| 345 | bool vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end, |
| 346 | struct mpool *ppool) |
| 347 | { |
| 348 | uint32_t mode = MM_MODE_UNMAPPED_MASK; |
| 349 | |
| 350 | return vm_identity_map(vm_locked, begin, end, mode, ppool, NULL); |
| 351 | } |
| 352 | |
| 353 | /** |
Raghu Krishnamurthy | 7ad3d14 | 2021-03-28 00:47:35 -0700 | [diff] [blame] | 354 | * Defrag page tables for an EL0 partition or for a VM. |
| 355 | */ |
| 356 | void vm_ptable_defrag(struct vm_locked vm_locked, struct mpool *ppool) |
| 357 | { |
| 358 | if (vm_locked.vm->el0_partition) { |
| 359 | mm_stage1_defrag(&vm_locked.vm->ptable, ppool); |
| 360 | } else { |
| 361 | mm_vm_defrag(&vm_locked.vm->ptable, ppool); |
| 362 | } |
| 363 | } |
| 364 | |
| 365 | /** |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 366 | * Unmaps the hypervisor pages from the given page table. |
| 367 | */ |
| 368 | bool vm_unmap_hypervisor(struct vm_locked vm_locked, struct mpool *ppool) |
| 369 | { |
| 370 | /* TODO: If we add pages dynamically, they must be included here too. */ |
| 371 | return vm_unmap(vm_locked, layout_text_begin(), layout_text_end(), |
| 372 | ppool) && |
| 373 | vm_unmap(vm_locked, layout_rodata_begin(), layout_rodata_end(), |
| 374 | ppool) && |
| 375 | vm_unmap(vm_locked, layout_data_begin(), layout_data_end(), |
Maksims Svecovs | 134b8f9 | 2022-03-04 15:14:09 +0000 | [diff] [blame] | 376 | ppool) && |
| 377 | vm_unmap(vm_locked, layout_stacks_begin(), layout_stacks_end(), |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 378 | ppool); |
| 379 | } |
J-Alves | b37fd08 | 2020-10-22 12:29:21 +0100 | [diff] [blame] | 380 | |
| 381 | /** |
| 382 | * Gets the first partition to boot, according to Boot Protocol from FFA spec. |
| 383 | */ |
| 384 | struct vm *vm_get_first_boot(void) |
| 385 | { |
| 386 | return first_boot_vm; |
| 387 | } |
| 388 | |
| 389 | /** |
| 390 | * Insert in boot list, sorted by `boot_order` parameter in the vm structure |
| 391 | * and rooted in `first_boot_vm`. |
| 392 | */ |
| 393 | void vm_update_boot(struct vm *vm) |
| 394 | { |
| 395 | struct vm *current = NULL; |
| 396 | struct vm *previous = NULL; |
| 397 | |
| 398 | if (first_boot_vm == NULL) { |
| 399 | first_boot_vm = vm; |
| 400 | return; |
| 401 | } |
| 402 | |
| 403 | current = first_boot_vm; |
| 404 | |
J-Alves | beeb6dc | 2021-12-08 18:21:32 +0000 | [diff] [blame] | 405 | while (current != NULL && current->boot_order <= vm->boot_order) { |
J-Alves | b37fd08 | 2020-10-22 12:29:21 +0100 | [diff] [blame] | 406 | previous = current; |
| 407 | current = current->next_boot; |
| 408 | } |
| 409 | |
| 410 | if (previous != NULL) { |
| 411 | previous->next_boot = vm; |
| 412 | } else { |
| 413 | first_boot_vm = vm; |
| 414 | } |
| 415 | |
| 416 | vm->next_boot = current; |
| 417 | } |
J-Alves | 4ef6e84 | 2021-03-18 12:47:01 +0000 | [diff] [blame] | 418 | |
Raghu Krishnamurthy | ea195fa | 2021-02-12 23:29:00 -0800 | [diff] [blame] | 419 | /** |
| 420 | * Gets the mode of the given range of ipa or va if they are mapped with the |
| 421 | * same mode. |
| 422 | * |
| 423 | * Returns true if the range is mapped with the same mode and false otherwise. |
| 424 | * The wrapper calls the appropriate mm function depending on if the partition |
| 425 | * is a vm or a el0 partition. |
| 426 | */ |
| 427 | bool vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin, ipaddr_t end, |
| 428 | uint32_t *mode) |
| 429 | { |
| 430 | if (vm_locked.vm->el0_partition) { |
| 431 | return mm_get_mode(&vm_locked.vm->ptable, |
| 432 | va_from_pa(pa_from_ipa(begin)), |
| 433 | va_from_pa(pa_from_ipa(end)), mode); |
| 434 | } |
| 435 | return mm_vm_get_mode(&vm_locked.vm->ptable, begin, end, mode); |
| 436 | } |
J-Alves | a0f317d | 2021-06-09 13:31:59 +0100 | [diff] [blame] | 437 | |
J-Alves | 7461ef2 | 2021-10-18 17:21:33 +0100 | [diff] [blame] | 438 | static struct notifications *vm_get_notifications(struct vm_locked vm_locked, |
| 439 | bool is_from_vm) |
| 440 | { |
| 441 | return is_from_vm ? &vm_locked.vm->notifications.from_vm |
| 442 | : &vm_locked.vm->notifications.from_sp; |
| 443 | } |
| 444 | |
J-Alves | a0f317d | 2021-06-09 13:31:59 +0100 | [diff] [blame] | 445 | /* |
Raghu Krishnamurthy | f5fec20 | 2022-09-30 07:25:10 -0700 | [diff] [blame] | 446 | * Dynamically allocate per_vcpu_notifications structure for a given VM. |
| 447 | */ |
| 448 | static void vm_notifications_init_per_vcpu_notifications( |
| 449 | struct vm *vm, ffa_vcpu_count_t vcpu_count, struct mpool *ppool) |
| 450 | { |
| 451 | size_t notif_ppool_entries = |
| 452 | (align_up(sizeof(struct notifications_state) * vcpu_count, |
| 453 | MM_PPOOL_ENTRY_SIZE) / |
| 454 | MM_PPOOL_ENTRY_SIZE); |
| 455 | |
| 456 | /* |
| 457 | * Allow for function to be called on already initialized VMs but those |
| 458 | * that require notification structure to be cleared. |
| 459 | */ |
| 460 | if (vm->notifications.from_sp.per_vcpu == NULL) { |
| 461 | assert(vm->notifications.from_vm.per_vcpu == NULL); |
| 462 | assert(vcpu_count != 0); |
| 463 | CHECK(ppool != NULL); |
| 464 | vm->notifications.from_sp.per_vcpu = |
| 465 | (struct notifications_state *)mpool_alloc_contiguous( |
| 466 | ppool, notif_ppool_entries, 1); |
| 467 | CHECK(vm->notifications.from_sp.per_vcpu != NULL); |
| 468 | |
| 469 | vm->notifications.from_vm.per_vcpu = |
| 470 | (struct notifications_state *)mpool_alloc_contiguous( |
| 471 | ppool, notif_ppool_entries, 1); |
| 472 | CHECK(vm->notifications.from_vm.per_vcpu != NULL); |
| 473 | } else { |
| 474 | assert(vm->notifications.from_vm.per_vcpu != NULL); |
| 475 | } |
| 476 | |
| 477 | memset_s(vm->notifications.from_sp.per_vcpu, |
| 478 | sizeof(*(vm->notifications.from_sp.per_vcpu)) * vcpu_count, 0, |
| 479 | sizeof(*(vm->notifications.from_sp.per_vcpu)) * vcpu_count); |
| 480 | memset_s(vm->notifications.from_vm.per_vcpu, |
| 481 | sizeof(*(vm->notifications.from_vm.per_vcpu)) * vcpu_count, 0, |
| 482 | sizeof(*(vm->notifications.from_vm.per_vcpu)) * vcpu_count); |
| 483 | } |
| 484 | |
| 485 | /* |
J-Alves | a0f317d | 2021-06-09 13:31:59 +0100 | [diff] [blame] | 486 | * Initializes the notifications structure. |
| 487 | */ |
Raghu Krishnamurthy | f5fec20 | 2022-09-30 07:25:10 -0700 | [diff] [blame] | 488 | static void vm_notifications_init_bindings(struct notifications *notifications) |
J-Alves | a0f317d | 2021-06-09 13:31:59 +0100 | [diff] [blame] | 489 | { |
| 490 | for (uint32_t i = 0U; i < MAX_FFA_NOTIFICATIONS; i++) { |
| 491 | notifications->bindings_sender_id[i] = HF_INVALID_VM_ID; |
| 492 | } |
| 493 | } |
| 494 | |
Raghu Krishnamurthy | f5fec20 | 2022-09-30 07:25:10 -0700 | [diff] [blame] | 495 | /* |
| 496 | * Initialize notification related structures for a VM. |
| 497 | */ |
| 498 | void vm_notifications_init(struct vm *vm, ffa_vcpu_count_t vcpu_count, |
| 499 | struct mpool *ppool) |
| 500 | { |
| 501 | vm_notifications_init_per_vcpu_notifications(vm, vcpu_count, ppool); |
| 502 | |
| 503 | /* Basic initialization of the notifications structure. */ |
| 504 | vm_notifications_init_bindings(&vm->notifications.from_sp); |
| 505 | vm_notifications_init_bindings(&vm->notifications.from_vm); |
| 506 | } |
| 507 | |
J-Alves | a0f317d | 2021-06-09 13:31:59 +0100 | [diff] [blame] | 508 | /** |
| 509 | * Checks if there are pending notifications. |
| 510 | */ |
| 511 | bool vm_are_notifications_pending(struct vm_locked vm_locked, bool from_vm, |
| 512 | ffa_notifications_bitmap_t notifications) |
| 513 | { |
| 514 | struct notifications *to_check; |
| 515 | |
| 516 | CHECK(vm_locked.vm != NULL); |
| 517 | |
J-Alves | 7461ef2 | 2021-10-18 17:21:33 +0100 | [diff] [blame] | 518 | to_check = vm_get_notifications(vm_locked, from_vm); |
J-Alves | a0f317d | 2021-06-09 13:31:59 +0100 | [diff] [blame] | 519 | |
| 520 | /* Check if there are pending per vcpu notifications */ |
Raghu Krishnamurthy | 30aabd6 | 2022-09-17 21:41:00 -0700 | [diff] [blame] | 521 | for (uint32_t i = 0U; i < vm_locked.vm->vcpu_count; i++) { |
J-Alves | a0f317d | 2021-06-09 13:31:59 +0100 | [diff] [blame] | 522 | if ((to_check->per_vcpu[i].pending & notifications) != 0U) { |
| 523 | return true; |
| 524 | } |
| 525 | } |
| 526 | |
| 527 | /* Check if there are global pending notifications */ |
| 528 | return (to_check->global.pending & notifications) != 0U; |
| 529 | } |
J-Alves | c003a7a | 2021-03-18 13:06:53 +0000 | [diff] [blame] | 530 | |
J-Alves | 7461ef2 | 2021-10-18 17:21:33 +0100 | [diff] [blame] | 531 | /** |
| 532 | * Checks if there are pending global notifications, either from SPs or from |
| 533 | * VMs. |
| 534 | */ |
| 535 | bool vm_are_global_notifications_pending(struct vm_locked vm_locked) |
| 536 | { |
| 537 | return vm_get_notifications(vm_locked, true)->global.pending != 0ULL || |
J-Alves | 52578f8 | 2022-03-25 12:30:47 +0000 | [diff] [blame] | 538 | vm_get_notifications(vm_locked, false)->global.pending != 0ULL || |
| 539 | vm_locked.vm->notifications.framework.pending != 0ULL; |
J-Alves | 7461ef2 | 2021-10-18 17:21:33 +0100 | [diff] [blame] | 540 | } |
| 541 | |
| 542 | /** |
| 543 | * Checks if there are pending per-vCPU notifications, in a specific vCPU either |
| 544 | * from SPs or from VMs. |
| 545 | */ |
| 546 | bool vm_are_per_vcpu_notifications_pending(struct vm_locked vm_locked, |
| 547 | ffa_vcpu_index_t vcpu_id) |
| 548 | { |
Raghu Krishnamurthy | 30aabd6 | 2022-09-17 21:41:00 -0700 | [diff] [blame] | 549 | CHECK(vcpu_id < vm_locked.vm->vcpu_count); |
J-Alves | 7461ef2 | 2021-10-18 17:21:33 +0100 | [diff] [blame] | 550 | |
| 551 | return vm_get_notifications(vm_locked, true) |
| 552 | ->per_vcpu[vcpu_id] |
| 553 | .pending != 0ULL || |
| 554 | vm_get_notifications(vm_locked, false) |
| 555 | ->per_vcpu[vcpu_id] |
| 556 | .pending != 0ULL; |
| 557 | } |
| 558 | |
J-Alves | 09ff9d8 | 2021-11-02 11:55:20 +0000 | [diff] [blame] | 559 | bool vm_are_notifications_enabled(struct vm *vm) |
J-Alves | c003a7a | 2021-03-18 13:06:53 +0000 | [diff] [blame] | 560 | { |
J-Alves | 09ff9d8 | 2021-11-02 11:55:20 +0000 | [diff] [blame] | 561 | return vm->notifications.enabled == true; |
| 562 | } |
| 563 | |
| 564 | bool vm_locked_are_notifications_enabled(struct vm_locked vm_locked) |
| 565 | { |
| 566 | return vm_are_notifications_enabled(vm_locked.vm); |
J-Alves | c003a7a | 2021-03-18 13:06:53 +0000 | [diff] [blame] | 567 | } |
| 568 | |
| 569 | static bool vm_is_notification_bit_set(ffa_notifications_bitmap_t notifications, |
| 570 | uint32_t i) |
| 571 | { |
| 572 | return (notifications & FFA_NOTIFICATION_MASK(i)) != 0U; |
| 573 | } |
| 574 | |
J-Alves | fe23ebe | 2021-10-13 16:07:07 +0100 | [diff] [blame] | 575 | static void vm_notifications_global_state_count_update( |
| 576 | ffa_notifications_bitmap_t bitmap, uint32_t *counter, int inc) |
| 577 | { |
| 578 | /* |
| 579 | * Helper to increment counters from global notifications |
| 580 | * state. Count update by increments or decrements of 1 or -1, |
| 581 | * respectively. |
| 582 | */ |
Daniel Boulby | a2f8c66 | 2021-11-26 17:52:53 +0000 | [diff] [blame] | 583 | assert(inc == 1 || inc == -1); |
J-Alves | fe23ebe | 2021-10-13 16:07:07 +0100 | [diff] [blame] | 584 | |
| 585 | sl_lock(&all_notifications_state.lock); |
| 586 | |
| 587 | for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) { |
| 588 | if (vm_is_notification_bit_set(bitmap, i)) { |
| 589 | CHECK((inc > 0 && *counter < UINT32_MAX) || |
| 590 | (inc < 0 && *counter > 0)); |
| 591 | *counter += inc; |
| 592 | } |
| 593 | } |
| 594 | |
| 595 | sl_unlock(&all_notifications_state.lock); |
| 596 | } |
| 597 | |
| 598 | /** |
| 599 | * Helper function to increment the pending notifications based on a bitmap |
| 600 | * passed as argument. |
| 601 | * Function to be used at setting notifications for a given VM. |
| 602 | */ |
| 603 | static void vm_notifications_pending_count_add( |
| 604 | ffa_notifications_bitmap_t to_add) |
| 605 | { |
| 606 | vm_notifications_global_state_count_update( |
| 607 | to_add, &all_notifications_state.pending_count, 1); |
| 608 | } |
| 609 | |
| 610 | /** |
| 611 | * Helper function to decrement the pending notifications count. |
| 612 | * Function to be used when getting the receiver's pending notifications. |
| 613 | */ |
| 614 | static void vm_notifications_pending_count_sub( |
| 615 | ffa_notifications_bitmap_t to_sub) |
| 616 | { |
| 617 | vm_notifications_global_state_count_update( |
| 618 | to_sub, &all_notifications_state.pending_count, -1); |
| 619 | } |
| 620 | |
| 621 | /** |
| 622 | * Helper function to count the notifications whose information has been |
| 623 | * retrieved by the scheduler of the system, and are still pending. |
| 624 | */ |
| 625 | static void vm_notifications_info_get_retrieved_count_add( |
| 626 | ffa_notifications_bitmap_t to_add) |
| 627 | { |
| 628 | vm_notifications_global_state_count_update( |
| 629 | to_add, &all_notifications_state.info_get_retrieved_count, 1); |
| 630 | } |
| 631 | |
| 632 | /** |
| 633 | * Helper function to subtract the notifications that the receiver is getting |
| 634 | * and whose information has been retrieved by the receiver scheduler. |
| 635 | */ |
| 636 | static void vm_notifications_info_get_retrieved_count_sub( |
| 637 | ffa_notifications_bitmap_t to_sub) |
| 638 | { |
| 639 | vm_notifications_global_state_count_update( |
| 640 | to_sub, &all_notifications_state.info_get_retrieved_count, -1); |
| 641 | } |
| 642 | |
| 643 | /** |
| 644 | * Helper function to determine if there are notifications pending whose info |
| 645 | * hasn't been retrieved by the receiver scheduler. |
| 646 | */ |
| 647 | bool vm_notifications_pending_not_retrieved_by_scheduler(void) |
| 648 | { |
| 649 | bool ret; |
| 650 | |
| 651 | sl_lock(&all_notifications_state.lock); |
| 652 | ret = all_notifications_state.pending_count > |
| 653 | all_notifications_state.info_get_retrieved_count; |
| 654 | sl_unlock(&all_notifications_state.lock); |
| 655 | |
| 656 | return ret; |
| 657 | } |
| 658 | |
| 659 | bool vm_is_notifications_pending_count_zero(void) |
| 660 | { |
| 661 | bool ret; |
| 662 | |
| 663 | sl_lock(&all_notifications_state.lock); |
| 664 | ret = all_notifications_state.pending_count == 0; |
| 665 | sl_unlock(&all_notifications_state.lock); |
| 666 | |
| 667 | return ret; |
| 668 | } |
| 669 | |
J-Alves | c003a7a | 2021-03-18 13:06:53 +0000 | [diff] [blame] | 670 | /** |
| 671 | * Checks that all provided notifications are bound to the specified sender, and |
| 672 | * are per VCPU or global, as specified. |
| 673 | */ |
| 674 | bool vm_notifications_validate_binding(struct vm_locked vm_locked, |
| 675 | bool is_from_vm, ffa_vm_id_t sender_id, |
| 676 | ffa_notifications_bitmap_t notifications, |
| 677 | bool is_per_vcpu) |
| 678 | { |
| 679 | return vm_notifications_validate_bound_sender( |
| 680 | vm_locked, is_from_vm, sender_id, notifications) && |
| 681 | vm_notifications_validate_per_vcpu(vm_locked, is_from_vm, |
| 682 | is_per_vcpu, notifications); |
| 683 | } |
| 684 | |
| 685 | /** |
| 686 | * Update binds information in notification structure for the specified |
| 687 | * notifications. |
| 688 | */ |
| 689 | void vm_notifications_update_bindings(struct vm_locked vm_locked, |
| 690 | bool is_from_vm, ffa_vm_id_t sender_id, |
| 691 | ffa_notifications_bitmap_t notifications, |
| 692 | bool is_per_vcpu) |
| 693 | { |
| 694 | CHECK(vm_locked.vm != NULL); |
| 695 | struct notifications *to_update = |
| 696 | vm_get_notifications(vm_locked, is_from_vm); |
| 697 | |
| 698 | for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) { |
| 699 | if (vm_is_notification_bit_set(notifications, i)) { |
| 700 | to_update->bindings_sender_id[i] = sender_id; |
| 701 | } |
| 702 | } |
| 703 | |
| 704 | /* |
| 705 | * Set notifications if they are per VCPU, else clear them as they are |
| 706 | * global. |
| 707 | */ |
| 708 | if (is_per_vcpu) { |
| 709 | to_update->bindings_per_vcpu |= notifications; |
| 710 | } else { |
| 711 | to_update->bindings_per_vcpu &= ~notifications; |
| 712 | } |
| 713 | } |
| 714 | |
| 715 | bool vm_notifications_validate_bound_sender( |
| 716 | struct vm_locked vm_locked, bool is_from_vm, ffa_vm_id_t sender_id, |
| 717 | ffa_notifications_bitmap_t notifications) |
| 718 | { |
| 719 | CHECK(vm_locked.vm != NULL); |
| 720 | struct notifications *to_check = |
| 721 | vm_get_notifications(vm_locked, is_from_vm); |
| 722 | |
| 723 | for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) { |
| 724 | if (vm_is_notification_bit_set(notifications, i) && |
| 725 | to_check->bindings_sender_id[i] != sender_id) { |
| 726 | return false; |
| 727 | } |
| 728 | } |
| 729 | |
| 730 | return true; |
| 731 | } |
| 732 | |
| 733 | bool vm_notifications_validate_per_vcpu(struct vm_locked vm_locked, |
| 734 | bool is_from_vm, bool is_per_vcpu, |
| 735 | ffa_notifications_bitmap_t notif) |
| 736 | { |
| 737 | CHECK(vm_locked.vm != NULL); |
| 738 | struct notifications *to_check = |
| 739 | vm_get_notifications(vm_locked, is_from_vm); |
| 740 | |
| 741 | return is_per_vcpu ? (~to_check->bindings_per_vcpu & notif) == 0U |
| 742 | : (to_check->bindings_per_vcpu & notif) == 0U; |
| 743 | } |
J-Alves | aa79c01 | 2021-07-09 14:29:45 +0100 | [diff] [blame] | 744 | |
J-Alves | 14163a7 | 2022-03-25 14:01:34 +0000 | [diff] [blame] | 745 | static void vm_notifications_state_set(struct notifications_state *state, |
| 746 | ffa_notifications_bitmap_t notifications) |
| 747 | { |
| 748 | state->pending |= notifications; |
| 749 | vm_notifications_pending_count_add(notifications); |
| 750 | } |
| 751 | |
J-Alves | 5a16c96 | 2022-03-25 12:32:51 +0000 | [diff] [blame] | 752 | void vm_notifications_partition_set_pending( |
| 753 | struct vm_locked vm_locked, bool is_from_vm, |
| 754 | ffa_notifications_bitmap_t notifications, ffa_vcpu_index_t vcpu_id, |
| 755 | bool is_per_vcpu) |
J-Alves | aa79c01 | 2021-07-09 14:29:45 +0100 | [diff] [blame] | 756 | { |
J-Alves | 14163a7 | 2022-03-25 14:01:34 +0000 | [diff] [blame] | 757 | struct notifications *to_set; |
| 758 | struct notifications_state *state; |
| 759 | |
J-Alves | aa79c01 | 2021-07-09 14:29:45 +0100 | [diff] [blame] | 760 | CHECK(vm_locked.vm != NULL); |
Raghu Krishnamurthy | 30aabd6 | 2022-09-17 21:41:00 -0700 | [diff] [blame] | 761 | CHECK(vcpu_id < vm_locked.vm->vcpu_count); |
J-Alves | aa79c01 | 2021-07-09 14:29:45 +0100 | [diff] [blame] | 762 | |
J-Alves | 14163a7 | 2022-03-25 14:01:34 +0000 | [diff] [blame] | 763 | to_set = vm_get_notifications(vm_locked, is_from_vm); |
J-Alves | fe23ebe | 2021-10-13 16:07:07 +0100 | [diff] [blame] | 764 | |
J-Alves | 14163a7 | 2022-03-25 14:01:34 +0000 | [diff] [blame] | 765 | state = is_per_vcpu ? &to_set->per_vcpu[vcpu_id] : &to_set->global; |
| 766 | |
| 767 | vm_notifications_state_set(state, notifications); |
| 768 | } |
| 769 | |
| 770 | /** |
| 771 | * Set pending framework notifications. |
| 772 | */ |
| 773 | void vm_notifications_framework_set_pending( |
| 774 | struct vm_locked vm_locked, ffa_notifications_bitmap_t notifications) |
| 775 | { |
| 776 | CHECK(vm_locked.vm != NULL); |
Federico Recanati | e73d283 | 2022-04-20 11:10:52 +0200 | [diff] [blame] | 777 | assert(is_ffa_spm_buffer_full_notification(notifications) || |
| 778 | is_ffa_hyp_buffer_full_notification(notifications)); |
J-Alves | 14163a7 | 2022-03-25 14:01:34 +0000 | [diff] [blame] | 779 | vm_notifications_state_set(&vm_locked.vm->notifications.framework, |
| 780 | notifications); |
J-Alves | aa79c01 | 2021-07-09 14:29:45 +0100 | [diff] [blame] | 781 | } |
| 782 | |
J-Alves | 5136dda | 2022-03-25 12:26:38 +0000 | [diff] [blame] | 783 | static ffa_notifications_bitmap_t vm_notifications_state_get_pending( |
| 784 | struct notifications_state *state) |
J-Alves | aa79c01 | 2021-07-09 14:29:45 +0100 | [diff] [blame] | 785 | { |
J-Alves | 5136dda | 2022-03-25 12:26:38 +0000 | [diff] [blame] | 786 | ffa_notifications_bitmap_t to_ret; |
J-Alves | fe23ebe | 2021-10-13 16:07:07 +0100 | [diff] [blame] | 787 | ffa_notifications_bitmap_t pending_and_info_get_retrieved; |
J-Alves | aa79c01 | 2021-07-09 14:29:45 +0100 | [diff] [blame] | 788 | |
J-Alves | 5136dda | 2022-03-25 12:26:38 +0000 | [diff] [blame] | 789 | assert(state != NULL); |
J-Alves | aa79c01 | 2021-07-09 14:29:45 +0100 | [diff] [blame] | 790 | |
J-Alves | 5136dda | 2022-03-25 12:26:38 +0000 | [diff] [blame] | 791 | to_ret = state->pending; |
J-Alves | fe23ebe | 2021-10-13 16:07:07 +0100 | [diff] [blame] | 792 | |
| 793 | /* Update count of currently pending notifications in the system. */ |
J-Alves | 5136dda | 2022-03-25 12:26:38 +0000 | [diff] [blame] | 794 | vm_notifications_pending_count_sub(state->pending); |
J-Alves | fe23ebe | 2021-10-13 16:07:07 +0100 | [diff] [blame] | 795 | |
| 796 | /* |
| 797 | * If notifications receiver is getting have been retrieved by the |
| 798 | * receiver scheduler, decrement those from respective count. |
| 799 | */ |
| 800 | pending_and_info_get_retrieved = |
J-Alves | 5136dda | 2022-03-25 12:26:38 +0000 | [diff] [blame] | 801 | state->pending & state->info_get_retrieved; |
J-Alves | fe23ebe | 2021-10-13 16:07:07 +0100 | [diff] [blame] | 802 | |
| 803 | if (pending_and_info_get_retrieved != 0) { |
| 804 | vm_notifications_info_get_retrieved_count_sub( |
| 805 | pending_and_info_get_retrieved); |
| 806 | } |
| 807 | |
J-Alves | 5136dda | 2022-03-25 12:26:38 +0000 | [diff] [blame] | 808 | state->pending = 0U; |
| 809 | state->info_get_retrieved = 0U; |
J-Alves | aa79c01 | 2021-07-09 14:29:45 +0100 | [diff] [blame] | 810 | |
J-Alves | 5136dda | 2022-03-25 12:26:38 +0000 | [diff] [blame] | 811 | return to_ret; |
| 812 | } |
J-Alves | fe23ebe | 2021-10-13 16:07:07 +0100 | [diff] [blame] | 813 | |
J-Alves | 5136dda | 2022-03-25 12:26:38 +0000 | [diff] [blame] | 814 | /** |
| 815 | * Get global and per-vCPU notifications for the given vCPU ID. |
| 816 | */ |
| 817 | ffa_notifications_bitmap_t vm_notifications_partition_get_pending( |
| 818 | struct vm_locked vm_locked, bool is_from_vm, ffa_vcpu_index_t vcpu_id) |
| 819 | { |
| 820 | ffa_notifications_bitmap_t to_ret; |
| 821 | struct notifications *to_get; |
J-Alves | fe23ebe | 2021-10-13 16:07:07 +0100 | [diff] [blame] | 822 | |
J-Alves | 5136dda | 2022-03-25 12:26:38 +0000 | [diff] [blame] | 823 | assert(vm_locked.vm != NULL); |
| 824 | to_get = vm_get_notifications(vm_locked, is_from_vm); |
Raghu Krishnamurthy | 30aabd6 | 2022-09-17 21:41:00 -0700 | [diff] [blame] | 825 | assert(vcpu_id < vm_locked.vm->vcpu_count); |
J-Alves | fe23ebe | 2021-10-13 16:07:07 +0100 | [diff] [blame] | 826 | |
J-Alves | 5136dda | 2022-03-25 12:26:38 +0000 | [diff] [blame] | 827 | to_ret = vm_notifications_state_get_pending(&to_get->global); |
| 828 | to_ret |= |
| 829 | vm_notifications_state_get_pending(&to_get->per_vcpu[vcpu_id]); |
J-Alves | aa79c01 | 2021-07-09 14:29:45 +0100 | [diff] [blame] | 830 | |
| 831 | return to_ret; |
| 832 | } |
J-Alves | c8e8a22 | 2021-06-08 17:33:52 +0100 | [diff] [blame] | 833 | |
| 834 | /** |
J-Alves | 663682a | 2022-03-25 13:56:51 +0000 | [diff] [blame] | 835 | * Get pending framework notifications. |
| 836 | */ |
| 837 | ffa_notifications_bitmap_t vm_notifications_framework_get_pending( |
| 838 | struct vm_locked vm_locked) |
| 839 | { |
Federico Recanati | 6c1e05c | 2022-04-20 11:37:26 +0200 | [diff] [blame] | 840 | struct vm *vm = vm_locked.vm; |
| 841 | ffa_notifications_bitmap_t framework; |
| 842 | bool rx_buffer_full; |
| 843 | |
| 844 | assert(vm != NULL); |
| 845 | |
| 846 | framework = vm_notifications_state_get_pending( |
| 847 | &vm->notifications.framework); |
| 848 | |
| 849 | /* |
| 850 | * By retrieving an RX buffer full notification the buffer state |
| 851 | * transitions from RECEIVED to READ; the VM is now the RX buffer |
| 852 | * owner, can read it and is allowed to release it. |
| 853 | */ |
| 854 | rx_buffer_full = is_ffa_spm_buffer_full_notification(framework) || |
| 855 | is_ffa_hyp_buffer_full_notification(framework); |
| 856 | if (rx_buffer_full && vm->mailbox.state == MAILBOX_STATE_RECEIVED) { |
| 857 | vm->mailbox.state = MAILBOX_STATE_READ; |
| 858 | } |
| 859 | |
| 860 | return framework; |
J-Alves | 663682a | 2022-03-25 13:56:51 +0000 | [diff] [blame] | 861 | } |
| 862 | |
J-Alves | 17c9b6d | 2022-03-25 14:39:05 +0000 | [diff] [blame] | 863 | static void vm_notifications_state_info_get( |
| 864 | struct notifications_state *state, ffa_vm_id_t vm_id, bool is_per_vcpu, |
| 865 | ffa_vcpu_index_t vcpu_id, uint16_t *ids, uint32_t *ids_count, |
| 866 | uint32_t *lists_sizes, uint32_t *lists_count, |
| 867 | const uint32_t ids_max_count, |
| 868 | enum notifications_info_get_state *info_get_state) |
| 869 | { |
| 870 | ffa_notifications_bitmap_t pending_not_retrieved; |
| 871 | |
| 872 | CHECK(*ids_count <= ids_max_count); |
| 873 | CHECK(*lists_count <= ids_max_count); |
| 874 | |
| 875 | if (*info_get_state == FULL) { |
| 876 | return; |
| 877 | } |
| 878 | |
| 879 | pending_not_retrieved = state->pending & ~state->info_get_retrieved; |
| 880 | |
| 881 | /* No notifications pending that haven't been retrieved. */ |
| 882 | if (pending_not_retrieved == 0U) { |
| 883 | return; |
| 884 | } |
| 885 | |
| 886 | if (*ids_count == ids_max_count) { |
| 887 | *info_get_state = FULL; |
| 888 | return; |
| 889 | } |
| 890 | |
| 891 | switch (*info_get_state) { |
| 892 | case INIT: |
| 893 | case STARTING_NEW: |
| 894 | /* |
| 895 | * At this iteration two ids are to be added: the VM ID |
| 896 | * and vCPU ID. If there is no space, change state and |
| 897 | * terminate function. |
| 898 | */ |
| 899 | if (is_per_vcpu && ids_max_count - *ids_count < 2) { |
| 900 | *info_get_state = FULL; |
| 901 | return; |
| 902 | } |
| 903 | |
| 904 | *info_get_state = INSERTING; |
| 905 | ids[*ids_count] = vm_id; |
| 906 | ++(*ids_count); |
| 907 | |
| 908 | if (is_per_vcpu) { |
| 909 | /* Insert vCPU ID. */ |
| 910 | ids[*ids_count] = vcpu_id; |
| 911 | ++(*ids_count); |
| 912 | ++lists_sizes[*lists_count]; |
| 913 | } |
| 914 | |
| 915 | ++(*lists_count); |
| 916 | break; |
| 917 | case INSERTING: |
| 918 | /* For per-vCPU notifications only. */ |
| 919 | if (!is_per_vcpu) { |
| 920 | break; |
| 921 | } |
| 922 | |
| 923 | /* Insert vCPU ID */ |
| 924 | ids[*ids_count] = vcpu_id; |
| 925 | (*ids_count)++; |
| 926 | /* Increment respective list size */ |
| 927 | ++lists_sizes[*lists_count - 1]; |
| 928 | |
| 929 | if (lists_sizes[*lists_count - 1] == 3) { |
| 930 | *info_get_state = STARTING_NEW; |
| 931 | } |
| 932 | break; |
| 933 | default: |
| 934 | panic("Notification info get action error!!\n"); |
| 935 | } |
| 936 | |
| 937 | state->info_get_retrieved |= pending_not_retrieved; |
| 938 | |
| 939 | vm_notifications_info_get_retrieved_count_add(pending_not_retrieved); |
| 940 | } |
| 941 | |
J-Alves | 663682a | 2022-03-25 13:56:51 +0000 | [diff] [blame] | 942 | /** |
J-Alves | c8e8a22 | 2021-06-08 17:33:52 +0100 | [diff] [blame] | 943 | * Get pending notification's information to return to the receiver scheduler. |
| 944 | */ |
| 945 | void vm_notifications_info_get_pending( |
| 946 | struct vm_locked vm_locked, bool is_from_vm, uint16_t *ids, |
| 947 | uint32_t *ids_count, uint32_t *lists_sizes, uint32_t *lists_count, |
| 948 | const uint32_t ids_max_count, |
| 949 | enum notifications_info_get_state *info_get_state) |
| 950 | { |
J-Alves | 17c9b6d | 2022-03-25 14:39:05 +0000 | [diff] [blame] | 951 | struct notifications *notifications; |
J-Alves | c8e8a22 | 2021-06-08 17:33:52 +0100 | [diff] [blame] | 952 | |
| 953 | CHECK(vm_locked.vm != NULL); |
J-Alves | c8e8a22 | 2021-06-08 17:33:52 +0100 | [diff] [blame] | 954 | |
J-Alves | 17c9b6d | 2022-03-25 14:39:05 +0000 | [diff] [blame] | 955 | notifications = vm_get_notifications(vm_locked, is_from_vm); |
J-Alves | c8e8a22 | 2021-06-08 17:33:52 +0100 | [diff] [blame] | 956 | |
J-Alves | 17c9b6d | 2022-03-25 14:39:05 +0000 | [diff] [blame] | 957 | /* |
| 958 | * Perform info get for global notifications, before doing it for |
| 959 | * per-vCPU. |
| 960 | */ |
| 961 | vm_notifications_state_info_get(¬ifications->global, |
| 962 | vm_locked.vm->id, false, 0, ids, |
| 963 | ids_count, lists_sizes, lists_count, |
| 964 | ids_max_count, info_get_state); |
J-Alves | fe23ebe | 2021-10-13 16:07:07 +0100 | [diff] [blame] | 965 | |
J-Alves | c8e8a22 | 2021-06-08 17:33:52 +0100 | [diff] [blame] | 966 | for (ffa_vcpu_count_t i = 0; i < vm_locked.vm->vcpu_count; i++) { |
J-Alves | 17c9b6d | 2022-03-25 14:39:05 +0000 | [diff] [blame] | 967 | vm_notifications_state_info_get( |
| 968 | ¬ifications->per_vcpu[i], vm_locked.vm->id, true, i, |
| 969 | ids, ids_count, lists_sizes, lists_count, ids_max_count, |
| 970 | info_get_state); |
J-Alves | c8e8a22 | 2021-06-08 17:33:52 +0100 | [diff] [blame] | 971 | } |
| 972 | } |
| 973 | |
| 974 | /** |
| 975 | * Gets all info from VM's pending notifications. |
| 976 | * Returns true if the list is full, and there is more pending. |
| 977 | */ |
| 978 | bool vm_notifications_info_get(struct vm_locked vm_locked, uint16_t *ids, |
| 979 | uint32_t *ids_count, uint32_t *lists_sizes, |
| 980 | uint32_t *lists_count, |
| 981 | const uint32_t ids_max_count) |
| 982 | { |
| 983 | enum notifications_info_get_state current_state = INIT; |
| 984 | |
J-Alves | f31940e | 2022-03-25 17:24:00 +0000 | [diff] [blame] | 985 | /* Get info of pending notifications from the framework. */ |
| 986 | vm_notifications_state_info_get(&vm_locked.vm->notifications.framework, |
| 987 | vm_locked.vm->id, false, 0, ids, |
| 988 | ids_count, lists_sizes, lists_count, |
| 989 | ids_max_count, ¤t_state); |
| 990 | |
| 991 | /* Get info of pending notifications from SPs. */ |
J-Alves | c8e8a22 | 2021-06-08 17:33:52 +0100 | [diff] [blame] | 992 | vm_notifications_info_get_pending(vm_locked, false, ids, ids_count, |
| 993 | lists_sizes, lists_count, |
| 994 | ids_max_count, ¤t_state); |
| 995 | |
J-Alves | f31940e | 2022-03-25 17:24:00 +0000 | [diff] [blame] | 996 | /* Get info of pending notifications from VMs. */ |
J-Alves | c8e8a22 | 2021-06-08 17:33:52 +0100 | [diff] [blame] | 997 | vm_notifications_info_get_pending(vm_locked, true, ids, ids_count, |
| 998 | lists_sizes, lists_count, |
| 999 | ids_max_count, ¤t_state); |
| 1000 | |
| 1001 | /* |
| 1002 | * State transitions to FULL when trying to insert a new ID in the |
| 1003 | * list and there is not more space. This means there are notifications |
| 1004 | * pending, whose info is not retrieved. |
| 1005 | */ |
| 1006 | return current_state == FULL; |
| 1007 | } |
J-Alves | 439ac97 | 2021-11-18 17:32:03 +0000 | [diff] [blame] | 1008 | |
| 1009 | /** |
| 1010 | * Checks VM's messaging method support. |
| 1011 | */ |
| 1012 | bool vm_supports_messaging_method(struct vm *vm, uint8_t msg_method) |
| 1013 | { |
| 1014 | return (vm->messaging_method & msg_method) != 0; |
| 1015 | } |
J-Alves | 6e2abc6 | 2021-12-02 14:58:56 +0000 | [diff] [blame] | 1016 | |
| 1017 | void vm_notifications_set_npi_injected(struct vm_locked vm_locked, |
| 1018 | bool npi_injected) |
| 1019 | { |
| 1020 | vm_locked.vm->notifications.npi_injected = npi_injected; |
| 1021 | } |
| 1022 | |
| 1023 | bool vm_notifications_is_npi_injected(struct vm_locked vm_locked) |
| 1024 | { |
| 1025 | return vm_locked.vm->notifications.npi_injected; |
| 1026 | } |
J-Alves | 7e67d10 | 2022-04-13 13:22:39 +0100 | [diff] [blame] | 1027 | |
| 1028 | /** |
| 1029 | * Sets the designated GP register that the VM expects to receive the boot |
| 1030 | * info's address. |
| 1031 | */ |
| 1032 | void vm_set_boot_info_gp_reg(struct vm *vm, struct vcpu *vcpu) |
| 1033 | { |
| 1034 | if (!vm->initialized && vm->boot_info.blob_addr.ipa != 0U) { |
| 1035 | arch_regs_set_gp_reg(&vcpu->regs, |
| 1036 | ipa_addr(vm->boot_info.blob_addr), |
| 1037 | vm->boot_info.gp_register_num); |
| 1038 | } |
| 1039 | } |