Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 1 | /* |
Andrew Walbran | 692b325 | 2019-03-07 15:51:31 +0000 | [diff] [blame] | 2 | * Copyright 2018 The Hafnium Authors. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 3 | * |
Andrew Walbran | e959ec1 | 2020-06-17 15:01:09 +0100 | [diff] [blame] | 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 7 | */ |
| 8 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 9 | #include "hf/vm.h" |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 10 | |
Karl Meakin | e143080 | 2024-03-06 14:08:11 +0000 | [diff] [blame] | 11 | #include "hf/arch/spinlock.h" |
Olivier Deprez | d9d409f | 2023-03-17 11:47:57 +0100 | [diff] [blame] | 12 | #include "hf/arch/vm.h" |
| 13 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 14 | #include "hf/api.h" |
Daniel Boulby | a2f8c66 | 2021-11-26 17:52:53 +0000 | [diff] [blame] | 15 | #include "hf/assert.h" |
Andrew Scull | 877ae4b | 2019-07-02 12:52:33 +0100 | [diff] [blame] | 16 | #include "hf/check.h" |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 17 | #include "hf/cpu.h" |
J-Alves | 4ef6e84 | 2021-03-18 12:47:01 +0000 | [diff] [blame] | 18 | #include "hf/dlog.h" |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 19 | #include "hf/ffa.h" |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 20 | #include "hf/layout.h" |
Madhukar Pappireddy | 0e57d3d | 2023-10-11 15:49:05 -0500 | [diff] [blame] | 21 | #include "hf/plat/iommu.h" |
Andrew Scull | 8d9e121 | 2019-04-05 13:52:55 +0100 | [diff] [blame] | 22 | #include "hf/std.h" |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 23 | |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 24 | #include "vmapi/hf/call.h" |
| 25 | |
| 26 | static struct vm vms[MAX_VMS]; |
Olivier Deprez | 96a2a26 | 2020-06-11 17:21:38 +0200 | [diff] [blame] | 27 | static struct vm other_world; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 28 | static ffa_vm_count_t vm_count; |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 29 | |
J-Alves | fe23ebe | 2021-10-13 16:07:07 +0100 | [diff] [blame] | 30 | /** |
Madhukar Pappireddy | a49ba16 | 2024-11-25 09:40:45 -0600 | [diff] [blame] | 31 | * The `boot_list` is a special entry in the circular linked list maintained by |
| 32 | * the partition manager and serves as both the start and end of the list. |
| 33 | */ |
| 34 | static struct list_entry boot_list = LIST_INIT(boot_list); |
| 35 | |
| 36 | /** |
J-Alves | fe23ebe | 2021-10-13 16:07:07 +0100 | [diff] [blame] | 37 | * Counters on the status of notifications in the system. It helps to improve |
| 38 | * the information retrieved by the receiver scheduler. |
| 39 | */ |
| 40 | static struct { |
| 41 | /** Counts notifications pending. */ |
| 42 | uint32_t pending_count; |
| 43 | /** |
| 44 | * Counts notifications pending, that have been retrieved by the |
| 45 | * receiver scheduler. |
| 46 | */ |
| 47 | uint32_t info_get_retrieved_count; |
| 48 | struct spinlock lock; |
| 49 | } all_notifications_state; |
| 50 | |
Raghu Krishnamurthy | ec1b491 | 2021-02-10 19:09:06 -0800 | [diff] [blame] | 51 | static bool vm_init_mm(struct vm *vm, struct mpool *ppool) |
| 52 | { |
Madhukar Pappireddy | 070f49e | 2024-01-12 13:02:27 -0600 | [diff] [blame] | 53 | return arch_vm_init_mm(vm, ppool) && arch_vm_iommu_init_mm(vm, ppool); |
Raghu Krishnamurthy | ec1b491 | 2021-02-10 19:09:06 -0800 | [diff] [blame] | 54 | } |
| 55 | |
J-Alves | 19e20cf | 2023-08-02 12:48:55 +0100 | [diff] [blame] | 56 | struct vm *vm_init(ffa_id_t id, ffa_vcpu_count_t vcpu_count, |
Madhukar Pappireddy | 070f49e | 2024-01-12 13:02:27 -0600 | [diff] [blame] | 57 | struct mpool *ppool, bool el0_partition, |
| 58 | uint8_t dma_device_count) |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 59 | { |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 60 | uint32_t i; |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 61 | struct vm *vm; |
Raghu Krishnamurthy | 30aabd6 | 2022-09-17 21:41:00 -0700 | [diff] [blame] | 62 | size_t vcpu_ppool_entries = (align_up(sizeof(struct vcpu) * vcpu_count, |
| 63 | MM_PPOOL_ENTRY_SIZE) / |
| 64 | MM_PPOOL_ENTRY_SIZE); |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 65 | |
Olivier Deprez | 96a2a26 | 2020-06-11 17:21:38 +0200 | [diff] [blame] | 66 | if (id == HF_OTHER_WORLD_ID) { |
Raghu Krishnamurthy | cd1eceb | 2021-01-04 12:20:48 -0800 | [diff] [blame] | 67 | CHECK(el0_partition == false); |
Olivier Deprez | 96a2a26 | 2020-06-11 17:21:38 +0200 | [diff] [blame] | 68 | vm = &other_world; |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 69 | } else { |
| 70 | uint16_t vm_index = id - HF_VM_ID_OFFSET; |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 71 | |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 72 | CHECK(id >= HF_VM_ID_OFFSET); |
| 73 | CHECK(vm_index < ARRAY_SIZE(vms)); |
| 74 | vm = &vms[vm_index]; |
| 75 | } |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 76 | |
Andrew Scull | 2b5fbad | 2019-04-05 13:55:56 +0100 | [diff] [blame] | 77 | memset_s(vm, sizeof(*vm), 0, sizeof(*vm)); |
Wedson Almeida Filho | 2f94ec1 | 2018-07-26 16:00:48 +0100 | [diff] [blame] | 78 | |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 79 | sl_init(&vm->lock); |
| 80 | |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 81 | vm->id = id; |
Wedson Almeida Filho | 8700964 | 2018-07-02 10:20:07 +0100 | [diff] [blame] | 82 | vm->vcpu_count = vcpu_count; |
Raghu Krishnamurthy | 30aabd6 | 2022-09-17 21:41:00 -0700 | [diff] [blame] | 83 | |
| 84 | vm->vcpus = (struct vcpu *)mpool_alloc_contiguous( |
| 85 | ppool, vcpu_ppool_entries, 1); |
| 86 | CHECK(vm->vcpus != NULL); |
Raghu Krishnamurthy | f5fec20 | 2022-09-30 07:25:10 -0700 | [diff] [blame] | 87 | |
Andrew Scull | d6ee110 | 2019-04-05 22:12:42 +0100 | [diff] [blame] | 88 | vm->mailbox.state = MAILBOX_STATE_EMPTY; |
Andrew Scull | 9726c25 | 2019-01-23 13:44:19 +0000 | [diff] [blame] | 89 | atomic_init(&vm->aborting, false); |
Raghu Krishnamurthy | cd1eceb | 2021-01-04 12:20:48 -0800 | [diff] [blame] | 90 | vm->el0_partition = el0_partition; |
Madhukar Pappireddy | 070f49e | 2024-01-12 13:02:27 -0600 | [diff] [blame] | 91 | vm->dma_device_count = dma_device_count; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 92 | |
Raghu Krishnamurthy | ec1b491 | 2021-02-10 19:09:06 -0800 | [diff] [blame] | 93 | if (!vm_init_mm(vm, ppool)) { |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 94 | return NULL; |
Wedson Almeida Filho | 0330611 | 2018-11-26 00:08:03 +0000 | [diff] [blame] | 95 | } |
| 96 | |
Madhukar Pappireddy | d6c055d | 2025-05-08 15:35:46 -0500 | [diff] [blame] | 97 | /* |
| 98 | * Do basic initialization of vCPUs, i.e. All vCPUs of the partition |
| 99 | * shall be in CREATED state. |
| 100 | */ |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 101 | for (i = 0; i < vcpu_count; i++) { |
Andrew Walbran | e1310df | 2019-04-29 17:28:28 +0100 | [diff] [blame] | 102 | vcpu_init(vm_get_vcpu(vm, i), vm); |
Andrew Scull | 7364a8e | 2018-07-19 15:39:29 +0100 | [diff] [blame] | 103 | } |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 104 | |
Raghu Krishnamurthy | f5fec20 | 2022-09-30 07:25:10 -0700 | [diff] [blame] | 105 | vm_notifications_init(vm, vcpu_count, ppool); |
Madhukar Pappireddy | a49ba16 | 2024-11-25 09:40:45 -0600 | [diff] [blame] | 106 | list_init(&vm->boot_list_node); |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 107 | return vm; |
| 108 | } |
| 109 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 110 | bool vm_init_next(ffa_vcpu_count_t vcpu_count, struct mpool *ppool, |
Madhukar Pappireddy | 070f49e | 2024-01-12 13:02:27 -0600 | [diff] [blame] | 111 | struct vm **new_vm, bool el0_partition, |
| 112 | uint8_t dma_device_count) |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 113 | { |
| 114 | if (vm_count >= MAX_VMS) { |
| 115 | return false; |
| 116 | } |
| 117 | |
| 118 | /* Generate IDs based on an offset, as low IDs e.g., 0, are reserved */ |
Raghu Krishnamurthy | cd1eceb | 2021-01-04 12:20:48 -0800 | [diff] [blame] | 119 | *new_vm = vm_init(vm_count + HF_VM_ID_OFFSET, vcpu_count, ppool, |
Madhukar Pappireddy | 070f49e | 2024-01-12 13:02:27 -0600 | [diff] [blame] | 120 | el0_partition, dma_device_count); |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 121 | if (*new_vm == NULL) { |
| 122 | return false; |
| 123 | } |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 124 | ++vm_count; |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 125 | |
Wedson Almeida Filho | 0330611 | 2018-11-26 00:08:03 +0000 | [diff] [blame] | 126 | return true; |
Wedson Almeida Filho | 987c0ff | 2018-06-20 16:34:38 +0100 | [diff] [blame] | 127 | } |
| 128 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 129 | ffa_vm_count_t vm_get_count(void) |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 130 | { |
| 131 | return vm_count; |
| 132 | } |
| 133 | |
Fuad Tabba | e4efcc3 | 2020-07-16 15:37:27 +0100 | [diff] [blame] | 134 | /** |
| 135 | * Returns a pointer to the VM with the corresponding id. |
| 136 | */ |
J-Alves | 19e20cf | 2023-08-02 12:48:55 +0100 | [diff] [blame] | 137 | struct vm *vm_find(ffa_id_t id) |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 138 | { |
David Brazdil | bc50119 | 2019-09-27 13:20:56 +0100 | [diff] [blame] | 139 | uint16_t index; |
Fuad Tabba | 494376e | 2019-08-05 12:35:10 +0100 | [diff] [blame] | 140 | |
Olivier Deprez | 96a2a26 | 2020-06-11 17:21:38 +0200 | [diff] [blame] | 141 | if (id == HF_OTHER_WORLD_ID) { |
| 142 | if (other_world.id == HF_OTHER_WORLD_ID) { |
| 143 | return &other_world; |
| 144 | } |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 145 | return NULL; |
| 146 | } |
| 147 | |
Olivier Deprez | 96a2a26 | 2020-06-11 17:21:38 +0200 | [diff] [blame] | 148 | /* Check that this is not a reserved ID. */ |
| 149 | if (id < HF_VM_ID_OFFSET) { |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 150 | return NULL; |
| 151 | } |
| 152 | |
David Brazdil | bc50119 | 2019-09-27 13:20:56 +0100 | [diff] [blame] | 153 | index = id - HF_VM_ID_OFFSET; |
| 154 | |
Fuad Tabba | e4efcc3 | 2020-07-16 15:37:27 +0100 | [diff] [blame] | 155 | return vm_find_index(index); |
| 156 | } |
| 157 | |
| 158 | /** |
J-Alves | 46ee068 | 2021-07-26 15:17:53 +0100 | [diff] [blame] | 159 | * Returns a locked instance of the VM with the corresponding id. |
| 160 | */ |
J-Alves | 19e20cf | 2023-08-02 12:48:55 +0100 | [diff] [blame] | 161 | struct vm_locked vm_find_locked(ffa_id_t id) |
J-Alves | 46ee068 | 2021-07-26 15:17:53 +0100 | [diff] [blame] | 162 | { |
| 163 | struct vm *vm = vm_find(id); |
| 164 | |
| 165 | if (vm != NULL) { |
| 166 | return vm_lock(vm); |
| 167 | } |
| 168 | |
| 169 | return (struct vm_locked){.vm = NULL}; |
| 170 | } |
| 171 | |
| 172 | /** |
Fuad Tabba | e4efcc3 | 2020-07-16 15:37:27 +0100 | [diff] [blame] | 173 | * Returns a pointer to the VM at the specified index. |
| 174 | */ |
| 175 | struct vm *vm_find_index(uint16_t index) |
| 176 | { |
David Brazdil | bc50119 | 2019-09-27 13:20:56 +0100 | [diff] [blame] | 177 | /* Ensure the VM is initialized. */ |
| 178 | if (index >= vm_count) { |
| 179 | return NULL; |
| 180 | } |
| 181 | |
| 182 | return &vms[index]; |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 183 | } |
| 184 | |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 185 | /** |
Fuad Tabba | ed294af | 2019-12-20 10:43:01 +0000 | [diff] [blame] | 186 | * Locks the given VM and updates `locked` to hold the newly locked VM. |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 187 | */ |
Andrew Walbran | 7e932bd | 2019-04-29 16:47:06 +0100 | [diff] [blame] | 188 | struct vm_locked vm_lock(struct vm *vm) |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 189 | { |
Andrew Walbran | 7e932bd | 2019-04-29 16:47:06 +0100 | [diff] [blame] | 190 | struct vm_locked locked = { |
| 191 | .vm = vm, |
| 192 | }; |
| 193 | |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 194 | sl_lock(&vm->lock); |
Andrew Walbran | 7e932bd | 2019-04-29 16:47:06 +0100 | [diff] [blame] | 195 | |
| 196 | return locked; |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 197 | } |
| 198 | |
| 199 | /** |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 200 | * Locks two VMs ensuring that the locking order is according to the locks' |
| 201 | * addresses. |
| 202 | */ |
| 203 | struct two_vm_locked vm_lock_both(struct vm *vm1, struct vm *vm2) |
| 204 | { |
| 205 | struct two_vm_locked dual_lock; |
| 206 | |
| 207 | sl_lock_both(&vm1->lock, &vm2->lock); |
| 208 | dual_lock.vm1.vm = vm1; |
| 209 | dual_lock.vm2.vm = vm2; |
| 210 | |
| 211 | return dual_lock; |
| 212 | } |
| 213 | |
| 214 | /** |
Karl Meakin | e143080 | 2024-03-06 14:08:11 +0000 | [diff] [blame] | 215 | * Locks two VMs ensuring that the locking order is according to the locks' |
| 216 | * addresses, given `vm1` is already locked. |
| 217 | */ |
| 218 | struct two_vm_locked vm_lock_both_in_order(struct vm_locked vm1, struct vm *vm2) |
| 219 | { |
| 220 | struct spinlock *sl1 = &vm1.vm->lock; |
| 221 | struct spinlock *sl2 = &vm2->lock; |
| 222 | |
| 223 | /* |
| 224 | * Use `sl_lock`/`sl_unlock` directly rather than |
| 225 | * `vm_lock`/`vm_unlock` because `vm_unlock` sets the vm field |
| 226 | * to NULL. |
| 227 | */ |
| 228 | if (sl1 < sl2) { |
| 229 | sl_lock(sl2); |
| 230 | } else { |
| 231 | sl_unlock(sl1); |
| 232 | sl_lock(sl2); |
| 233 | sl_lock(sl1); |
| 234 | } |
| 235 | |
| 236 | return (struct two_vm_locked){ |
| 237 | .vm1 = vm1, |
| 238 | .vm2 = (struct vm_locked){.vm = vm2}, |
| 239 | }; |
| 240 | } |
| 241 | |
| 242 | /** |
Wedson Almeida Filho | ea62e2e | 2019-01-09 19:14:59 +0000 | [diff] [blame] | 243 | * Unlocks a VM previously locked with vm_lock, and updates `locked` to reflect |
| 244 | * the fact that the VM is no longer locked. |
| 245 | */ |
| 246 | void vm_unlock(struct vm_locked *locked) |
| 247 | { |
| 248 | sl_unlock(&locked->vm->lock); |
| 249 | locked->vm = NULL; |
| 250 | } |
Andrew Walbran | e1310df | 2019-04-29 17:28:28 +0100 | [diff] [blame] | 251 | |
| 252 | /** |
| 253 | * Get the vCPU with the given index from the given VM. |
| 254 | * This assumes the index is valid, i.e. less than vm->vcpu_count. |
| 255 | */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 256 | struct vcpu *vm_get_vcpu(struct vm *vm, ffa_vcpu_index_t vcpu_index) |
Andrew Walbran | e1310df | 2019-04-29 17:28:28 +0100 | [diff] [blame] | 257 | { |
Andrew Scull | 877ae4b | 2019-07-02 12:52:33 +0100 | [diff] [blame] | 258 | CHECK(vcpu_index < vm->vcpu_count); |
Andrew Walbran | e1310df | 2019-04-29 17:28:28 +0100 | [diff] [blame] | 259 | return &vm->vcpus[vcpu_index]; |
| 260 | } |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 261 | |
| 262 | /** |
J-Alves | 122f1a1 | 2022-12-12 15:55:42 +0000 | [diff] [blame] | 263 | * Checks whether the given `to` VM's mailbox is currently busy. |
| 264 | */ |
| 265 | bool vm_is_mailbox_busy(struct vm_locked to) |
| 266 | { |
| 267 | return to.vm->mailbox.state != MAILBOX_STATE_EMPTY || |
| 268 | to.vm->mailbox.recv == NULL; |
| 269 | } |
| 270 | |
| 271 | /** |
J-Alves | e8c8c2b | 2022-12-16 15:34:48 +0000 | [diff] [blame] | 272 | * Checks if mailbox is currently owned by the other world. |
| 273 | */ |
| 274 | bool vm_is_mailbox_other_world_owned(struct vm_locked to) |
| 275 | { |
| 276 | return to.vm->mailbox.state == MAILBOX_STATE_OTHER_WORLD_OWNED; |
| 277 | } |
| 278 | |
| 279 | /** |
Andrew Walbran | 45633dd | 2020-10-07 17:59:54 +0100 | [diff] [blame] | 280 | * Return whether the given VM ID represents an entity in the current world: |
| 281 | * i.e. the hypervisor or a normal world VM when running in the normal world, or |
| 282 | * the SPM or an SP when running in the secure world. |
| 283 | */ |
J-Alves | 19e20cf | 2023-08-02 12:48:55 +0100 | [diff] [blame] | 284 | bool vm_id_is_current_world(ffa_id_t vm_id) |
Andrew Walbran | 45633dd | 2020-10-07 17:59:54 +0100 | [diff] [blame] | 285 | { |
| 286 | return (vm_id & HF_VM_ID_WORLD_MASK) != |
| 287 | (HF_OTHER_WORLD_ID & HF_VM_ID_WORLD_MASK); |
| 288 | } |
| 289 | |
| 290 | /** |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 291 | * Map a range of addresses to the VM in both the MMU and the IOMMU. |
| 292 | * |
| 293 | * mm_vm_defrag should always be called after a series of page table updates, |
| 294 | * whether they succeed or fail. This is because on failure extra page table |
| 295 | * entries may have been allocated and then not used, while on success it may be |
| 296 | * possible to compact the page table by merging several entries into a block. |
| 297 | * |
| 298 | * Returns true on success, or false if the update failed and no changes were |
| 299 | * made. |
| 300 | * |
| 301 | */ |
| 302 | bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end, |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 303 | mm_mode_t mode, struct mpool *ppool, ipaddr_t *ipa) |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 304 | { |
| 305 | if (!vm_identity_prepare(vm_locked, begin, end, mode, ppool)) { |
| 306 | return false; |
| 307 | } |
| 308 | |
| 309 | vm_identity_commit(vm_locked, begin, end, mode, ppool, ipa); |
| 310 | |
| 311 | return true; |
| 312 | } |
| 313 | |
| 314 | /** |
| 315 | * Prepares the given VM for the given address mapping such that it will be able |
| 316 | * to commit the change without failure. |
| 317 | * |
| 318 | * In particular, multiple calls to this function will result in the |
| 319 | * corresponding calls to commit the changes to succeed. |
| 320 | * |
| 321 | * Returns true on success, or false if the update failed and no changes were |
| 322 | * made. |
| 323 | */ |
| 324 | bool vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, paddr_t end, |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 325 | mm_mode_t mode, struct mpool *ppool) |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 326 | { |
Olivier Deprez | d9d409f | 2023-03-17 11:47:57 +0100 | [diff] [blame] | 327 | return arch_vm_identity_prepare(vm_locked, begin, end, mode, ppool); |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 328 | } |
| 329 | |
| 330 | /** |
| 331 | * Commits the given address mapping to the VM assuming the operation cannot |
| 332 | * fail. `vm_identity_prepare` must used correctly before this to ensure |
| 333 | * this condition. |
| 334 | */ |
| 335 | void vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, paddr_t end, |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 336 | mm_mode_t mode, struct mpool *ppool, ipaddr_t *ipa) |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 337 | { |
Olivier Deprez | d9d409f | 2023-03-17 11:47:57 +0100 | [diff] [blame] | 338 | arch_vm_identity_commit(vm_locked, begin, end, mode, ppool, ipa); |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 339 | } |
| 340 | |
| 341 | /** |
| 342 | * Unmap a range of addresses from the VM. |
| 343 | * |
| 344 | * Returns true on success, or false if the update failed and no changes were |
| 345 | * made. |
| 346 | */ |
| 347 | bool vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end, |
| 348 | struct mpool *ppool) |
| 349 | { |
Olivier Deprez | d9d409f | 2023-03-17 11:47:57 +0100 | [diff] [blame] | 350 | return arch_vm_unmap(vm_locked, begin, end, ppool); |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 351 | } |
| 352 | |
| 353 | /** |
Raghu Krishnamurthy | 7ad3d14 | 2021-03-28 00:47:35 -0700 | [diff] [blame] | 354 | * Defrag page tables for an EL0 partition or for a VM. |
| 355 | */ |
| 356 | void vm_ptable_defrag(struct vm_locked vm_locked, struct mpool *ppool) |
| 357 | { |
Olivier Deprez | d9d409f | 2023-03-17 11:47:57 +0100 | [diff] [blame] | 358 | arch_vm_ptable_defrag(vm_locked, ppool); |
Raghu Krishnamurthy | 7ad3d14 | 2021-03-28 00:47:35 -0700 | [diff] [blame] | 359 | } |
| 360 | |
| 361 | /** |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 362 | * Unmaps the hypervisor pages from the given page table. |
| 363 | */ |
| 364 | bool vm_unmap_hypervisor(struct vm_locked vm_locked, struct mpool *ppool) |
| 365 | { |
| 366 | /* TODO: If we add pages dynamically, they must be included here too. */ |
| 367 | return vm_unmap(vm_locked, layout_text_begin(), layout_text_end(), |
| 368 | ppool) && |
| 369 | vm_unmap(vm_locked, layout_rodata_begin(), layout_rodata_end(), |
| 370 | ppool) && |
| 371 | vm_unmap(vm_locked, layout_data_begin(), layout_data_end(), |
Maksims Svecovs | 134b8f9 | 2022-03-04 15:14:09 +0000 | [diff] [blame] | 372 | ppool) && |
| 373 | vm_unmap(vm_locked, layout_stacks_begin(), layout_stacks_end(), |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 374 | ppool); |
| 375 | } |
J-Alves | b37fd08 | 2020-10-22 12:29:21 +0100 | [diff] [blame] | 376 | |
| 377 | /** |
Raghu Krishnamurthy | ea195fa | 2021-02-12 23:29:00 -0800 | [diff] [blame] | 378 | * Gets the mode of the given range of ipa or va if they are mapped with the |
| 379 | * same mode. |
| 380 | * |
| 381 | * Returns true if the range is mapped with the same mode and false otherwise. |
| 382 | * The wrapper calls the appropriate mm function depending on if the partition |
| 383 | * is a vm or a el0 partition. |
| 384 | */ |
| 385 | bool vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin, ipaddr_t end, |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 386 | mm_mode_t *mode) |
Raghu Krishnamurthy | ea195fa | 2021-02-12 23:29:00 -0800 | [diff] [blame] | 387 | { |
Olivier Deprez | d9d409f | 2023-03-17 11:47:57 +0100 | [diff] [blame] | 388 | return arch_vm_mem_get_mode(vm_locked, begin, end, mode); |
Raghu Krishnamurthy | ea195fa | 2021-02-12 23:29:00 -0800 | [diff] [blame] | 389 | } |
J-Alves | a0f317d | 2021-06-09 13:31:59 +0100 | [diff] [blame] | 390 | |
Madhukar Pappireddy | 0e57d3d | 2023-10-11 15:49:05 -0500 | [diff] [blame] | 391 | bool vm_iommu_mm_identity_map(struct vm_locked vm_locked, paddr_t begin, |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 392 | paddr_t end, mm_mode_t mode, struct mpool *ppool, |
Madhukar Pappireddy | 0e57d3d | 2023-10-11 15:49:05 -0500 | [diff] [blame] | 393 | ipaddr_t *ipa, uint8_t dma_device_id) |
| 394 | { |
| 395 | return arch_vm_iommu_mm_identity_map(vm_locked, begin, end, mode, ppool, |
| 396 | ipa, dma_device_id); |
| 397 | } |
| 398 | |
J-Alves | 6665225 | 2022-07-06 09:49:51 +0100 | [diff] [blame] | 399 | bool vm_mailbox_state_busy(struct vm_locked vm_locked) |
| 400 | { |
| 401 | return vm_locked.vm->mailbox.state != MAILBOX_STATE_EMPTY || |
| 402 | vm_locked.vm->mailbox.recv == NULL; |
| 403 | } |
| 404 | |
J-Alves | 7461ef2 | 2021-10-18 17:21:33 +0100 | [diff] [blame] | 405 | static struct notifications *vm_get_notifications(struct vm_locked vm_locked, |
| 406 | bool is_from_vm) |
| 407 | { |
| 408 | return is_from_vm ? &vm_locked.vm->notifications.from_vm |
| 409 | : &vm_locked.vm->notifications.from_sp; |
| 410 | } |
| 411 | |
J-Alves | a0f317d | 2021-06-09 13:31:59 +0100 | [diff] [blame] | 412 | /* |
Raghu Krishnamurthy | f5fec20 | 2022-09-30 07:25:10 -0700 | [diff] [blame] | 413 | * Dynamically allocate per_vcpu_notifications structure for a given VM. |
| 414 | */ |
| 415 | static void vm_notifications_init_per_vcpu_notifications( |
| 416 | struct vm *vm, ffa_vcpu_count_t vcpu_count, struct mpool *ppool) |
| 417 | { |
| 418 | size_t notif_ppool_entries = |
| 419 | (align_up(sizeof(struct notifications_state) * vcpu_count, |
| 420 | MM_PPOOL_ENTRY_SIZE) / |
| 421 | MM_PPOOL_ENTRY_SIZE); |
| 422 | |
| 423 | /* |
| 424 | * Allow for function to be called on already initialized VMs but those |
| 425 | * that require notification structure to be cleared. |
| 426 | */ |
| 427 | if (vm->notifications.from_sp.per_vcpu == NULL) { |
| 428 | assert(vm->notifications.from_vm.per_vcpu == NULL); |
| 429 | assert(vcpu_count != 0); |
| 430 | CHECK(ppool != NULL); |
| 431 | vm->notifications.from_sp.per_vcpu = |
| 432 | (struct notifications_state *)mpool_alloc_contiguous( |
| 433 | ppool, notif_ppool_entries, 1); |
| 434 | CHECK(vm->notifications.from_sp.per_vcpu != NULL); |
| 435 | |
| 436 | vm->notifications.from_vm.per_vcpu = |
| 437 | (struct notifications_state *)mpool_alloc_contiguous( |
| 438 | ppool, notif_ppool_entries, 1); |
| 439 | CHECK(vm->notifications.from_vm.per_vcpu != NULL); |
| 440 | } else { |
| 441 | assert(vm->notifications.from_vm.per_vcpu != NULL); |
| 442 | } |
| 443 | |
| 444 | memset_s(vm->notifications.from_sp.per_vcpu, |
| 445 | sizeof(*(vm->notifications.from_sp.per_vcpu)) * vcpu_count, 0, |
| 446 | sizeof(*(vm->notifications.from_sp.per_vcpu)) * vcpu_count); |
| 447 | memset_s(vm->notifications.from_vm.per_vcpu, |
| 448 | sizeof(*(vm->notifications.from_vm.per_vcpu)) * vcpu_count, 0, |
| 449 | sizeof(*(vm->notifications.from_vm.per_vcpu)) * vcpu_count); |
| 450 | } |
| 451 | |
| 452 | /* |
J-Alves | a0f317d | 2021-06-09 13:31:59 +0100 | [diff] [blame] | 453 | * Initializes the notifications structure. |
| 454 | */ |
Raghu Krishnamurthy | f5fec20 | 2022-09-30 07:25:10 -0700 | [diff] [blame] | 455 | static void vm_notifications_init_bindings(struct notifications *notifications) |
J-Alves | a0f317d | 2021-06-09 13:31:59 +0100 | [diff] [blame] | 456 | { |
| 457 | for (uint32_t i = 0U; i < MAX_FFA_NOTIFICATIONS; i++) { |
| 458 | notifications->bindings_sender_id[i] = HF_INVALID_VM_ID; |
| 459 | } |
| 460 | } |
| 461 | |
Raghu Krishnamurthy | f5fec20 | 2022-09-30 07:25:10 -0700 | [diff] [blame] | 462 | /* |
| 463 | * Initialize notification related structures for a VM. |
| 464 | */ |
| 465 | void vm_notifications_init(struct vm *vm, ffa_vcpu_count_t vcpu_count, |
| 466 | struct mpool *ppool) |
| 467 | { |
| 468 | vm_notifications_init_per_vcpu_notifications(vm, vcpu_count, ppool); |
| 469 | |
| 470 | /* Basic initialization of the notifications structure. */ |
| 471 | vm_notifications_init_bindings(&vm->notifications.from_sp); |
| 472 | vm_notifications_init_bindings(&vm->notifications.from_vm); |
| 473 | } |
| 474 | |
J-Alves | a0f317d | 2021-06-09 13:31:59 +0100 | [diff] [blame] | 475 | /** |
| 476 | * Checks if there are pending notifications. |
| 477 | */ |
| 478 | bool vm_are_notifications_pending(struct vm_locked vm_locked, bool from_vm, |
| 479 | ffa_notifications_bitmap_t notifications) |
| 480 | { |
| 481 | struct notifications *to_check; |
| 482 | |
| 483 | CHECK(vm_locked.vm != NULL); |
| 484 | |
J-Alves | 7461ef2 | 2021-10-18 17:21:33 +0100 | [diff] [blame] | 485 | to_check = vm_get_notifications(vm_locked, from_vm); |
J-Alves | a0f317d | 2021-06-09 13:31:59 +0100 | [diff] [blame] | 486 | |
| 487 | /* Check if there are pending per vcpu notifications */ |
Raghu Krishnamurthy | 30aabd6 | 2022-09-17 21:41:00 -0700 | [diff] [blame] | 488 | for (uint32_t i = 0U; i < vm_locked.vm->vcpu_count; i++) { |
J-Alves | a0f317d | 2021-06-09 13:31:59 +0100 | [diff] [blame] | 489 | if ((to_check->per_vcpu[i].pending & notifications) != 0U) { |
| 490 | return true; |
| 491 | } |
| 492 | } |
| 493 | |
| 494 | /* Check if there are global pending notifications */ |
| 495 | return (to_check->global.pending & notifications) != 0U; |
| 496 | } |
J-Alves | c003a7a | 2021-03-18 13:06:53 +0000 | [diff] [blame] | 497 | |
J-Alves | 7461ef2 | 2021-10-18 17:21:33 +0100 | [diff] [blame] | 498 | /** |
| 499 | * Checks if there are pending global notifications, either from SPs or from |
| 500 | * VMs. |
| 501 | */ |
| 502 | bool vm_are_global_notifications_pending(struct vm_locked vm_locked) |
| 503 | { |
| 504 | return vm_get_notifications(vm_locked, true)->global.pending != 0ULL || |
J-Alves | 52578f8 | 2022-03-25 12:30:47 +0000 | [diff] [blame] | 505 | vm_get_notifications(vm_locked, false)->global.pending != 0ULL || |
J-Alves | e8c8c2b | 2022-12-16 15:34:48 +0000 | [diff] [blame] | 506 | vm_are_fwk_notifications_pending(vm_locked); |
| 507 | } |
| 508 | |
| 509 | /** |
| 510 | * Currently only RX full notification is supported as framework notification. |
| 511 | * Returns true if there is one pending, either from Hypervisor or SPMC. |
| 512 | */ |
| 513 | bool vm_are_fwk_notifications_pending(struct vm_locked vm_locked) |
| 514 | { |
| 515 | return vm_locked.vm->notifications.framework.pending != 0ULL; |
J-Alves | 7461ef2 | 2021-10-18 17:21:33 +0100 | [diff] [blame] | 516 | } |
| 517 | |
| 518 | /** |
| 519 | * Checks if there are pending per-vCPU notifications, in a specific vCPU either |
| 520 | * from SPs or from VMs. |
| 521 | */ |
| 522 | bool vm_are_per_vcpu_notifications_pending(struct vm_locked vm_locked, |
| 523 | ffa_vcpu_index_t vcpu_id) |
| 524 | { |
Raghu Krishnamurthy | 30aabd6 | 2022-09-17 21:41:00 -0700 | [diff] [blame] | 525 | CHECK(vcpu_id < vm_locked.vm->vcpu_count); |
J-Alves | 7461ef2 | 2021-10-18 17:21:33 +0100 | [diff] [blame] | 526 | |
| 527 | return vm_get_notifications(vm_locked, true) |
| 528 | ->per_vcpu[vcpu_id] |
| 529 | .pending != 0ULL || |
| 530 | vm_get_notifications(vm_locked, false) |
| 531 | ->per_vcpu[vcpu_id] |
| 532 | .pending != 0ULL; |
| 533 | } |
| 534 | |
J-Alves | 09ff9d8 | 2021-11-02 11:55:20 +0000 | [diff] [blame] | 535 | bool vm_are_notifications_enabled(struct vm *vm) |
J-Alves | c003a7a | 2021-03-18 13:06:53 +0000 | [diff] [blame] | 536 | { |
Karl Meakin | d0123af | 2025-03-17 16:46:38 +0000 | [diff] [blame] | 537 | return vm->notifications.enabled; |
J-Alves | 09ff9d8 | 2021-11-02 11:55:20 +0000 | [diff] [blame] | 538 | } |
| 539 | |
| 540 | bool vm_locked_are_notifications_enabled(struct vm_locked vm_locked) |
| 541 | { |
| 542 | return vm_are_notifications_enabled(vm_locked.vm); |
J-Alves | c003a7a | 2021-03-18 13:06:53 +0000 | [diff] [blame] | 543 | } |
| 544 | |
| 545 | static bool vm_is_notification_bit_set(ffa_notifications_bitmap_t notifications, |
| 546 | uint32_t i) |
| 547 | { |
| 548 | return (notifications & FFA_NOTIFICATION_MASK(i)) != 0U; |
| 549 | } |
| 550 | |
J-Alves | fe23ebe | 2021-10-13 16:07:07 +0100 | [diff] [blame] | 551 | static void vm_notifications_global_state_count_update( |
| 552 | ffa_notifications_bitmap_t bitmap, uint32_t *counter, int inc) |
| 553 | { |
| 554 | /* |
| 555 | * Helper to increment counters from global notifications |
| 556 | * state. Count update by increments or decrements of 1 or -1, |
| 557 | * respectively. |
| 558 | */ |
Daniel Boulby | a2f8c66 | 2021-11-26 17:52:53 +0000 | [diff] [blame] | 559 | assert(inc == 1 || inc == -1); |
J-Alves | fe23ebe | 2021-10-13 16:07:07 +0100 | [diff] [blame] | 560 | |
| 561 | sl_lock(&all_notifications_state.lock); |
| 562 | |
| 563 | for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) { |
| 564 | if (vm_is_notification_bit_set(bitmap, i)) { |
| 565 | CHECK((inc > 0 && *counter < UINT32_MAX) || |
| 566 | (inc < 0 && *counter > 0)); |
| 567 | *counter += inc; |
| 568 | } |
| 569 | } |
| 570 | |
| 571 | sl_unlock(&all_notifications_state.lock); |
| 572 | } |
| 573 | |
| 574 | /** |
| 575 | * Helper function to increment the pending notifications based on a bitmap |
| 576 | * passed as argument. |
| 577 | * Function to be used at setting notifications for a given VM. |
| 578 | */ |
| 579 | static void vm_notifications_pending_count_add( |
| 580 | ffa_notifications_bitmap_t to_add) |
| 581 | { |
| 582 | vm_notifications_global_state_count_update( |
| 583 | to_add, &all_notifications_state.pending_count, 1); |
| 584 | } |
| 585 | |
| 586 | /** |
| 587 | * Helper function to decrement the pending notifications count. |
| 588 | * Function to be used when getting the receiver's pending notifications. |
| 589 | */ |
| 590 | static void vm_notifications_pending_count_sub( |
| 591 | ffa_notifications_bitmap_t to_sub) |
| 592 | { |
| 593 | vm_notifications_global_state_count_update( |
| 594 | to_sub, &all_notifications_state.pending_count, -1); |
| 595 | } |
| 596 | |
| 597 | /** |
| 598 | * Helper function to count the notifications whose information has been |
| 599 | * retrieved by the scheduler of the system, and are still pending. |
| 600 | */ |
| 601 | static void vm_notifications_info_get_retrieved_count_add( |
| 602 | ffa_notifications_bitmap_t to_add) |
| 603 | { |
| 604 | vm_notifications_global_state_count_update( |
| 605 | to_add, &all_notifications_state.info_get_retrieved_count, 1); |
| 606 | } |
| 607 | |
| 608 | /** |
| 609 | * Helper function to subtract the notifications that the receiver is getting |
| 610 | * and whose information has been retrieved by the receiver scheduler. |
| 611 | */ |
| 612 | static void vm_notifications_info_get_retrieved_count_sub( |
| 613 | ffa_notifications_bitmap_t to_sub) |
| 614 | { |
| 615 | vm_notifications_global_state_count_update( |
| 616 | to_sub, &all_notifications_state.info_get_retrieved_count, -1); |
| 617 | } |
| 618 | |
| 619 | /** |
| 620 | * Helper function to determine if there are notifications pending whose info |
| 621 | * hasn't been retrieved by the receiver scheduler. |
| 622 | */ |
| 623 | bool vm_notifications_pending_not_retrieved_by_scheduler(void) |
| 624 | { |
| 625 | bool ret; |
| 626 | |
| 627 | sl_lock(&all_notifications_state.lock); |
| 628 | ret = all_notifications_state.pending_count > |
| 629 | all_notifications_state.info_get_retrieved_count; |
| 630 | sl_unlock(&all_notifications_state.lock); |
| 631 | |
| 632 | return ret; |
| 633 | } |
| 634 | |
| 635 | bool vm_is_notifications_pending_count_zero(void) |
| 636 | { |
| 637 | bool ret; |
| 638 | |
| 639 | sl_lock(&all_notifications_state.lock); |
| 640 | ret = all_notifications_state.pending_count == 0; |
| 641 | sl_unlock(&all_notifications_state.lock); |
| 642 | |
| 643 | return ret; |
| 644 | } |
| 645 | |
J-Alves | c003a7a | 2021-03-18 13:06:53 +0000 | [diff] [blame] | 646 | /** |
| 647 | * Checks that all provided notifications are bound to the specified sender, and |
| 648 | * are per VCPU or global, as specified. |
| 649 | */ |
| 650 | bool vm_notifications_validate_binding(struct vm_locked vm_locked, |
J-Alves | 19e20cf | 2023-08-02 12:48:55 +0100 | [diff] [blame] | 651 | bool is_from_vm, ffa_id_t sender_id, |
J-Alves | c003a7a | 2021-03-18 13:06:53 +0000 | [diff] [blame] | 652 | ffa_notifications_bitmap_t notifications, |
| 653 | bool is_per_vcpu) |
| 654 | { |
| 655 | return vm_notifications_validate_bound_sender( |
| 656 | vm_locked, is_from_vm, sender_id, notifications) && |
| 657 | vm_notifications_validate_per_vcpu(vm_locked, is_from_vm, |
| 658 | is_per_vcpu, notifications); |
| 659 | } |
| 660 | |
| 661 | /** |
| 662 | * Update binds information in notification structure for the specified |
| 663 | * notifications. |
| 664 | */ |
| 665 | void vm_notifications_update_bindings(struct vm_locked vm_locked, |
J-Alves | 19e20cf | 2023-08-02 12:48:55 +0100 | [diff] [blame] | 666 | bool is_from_vm, ffa_id_t sender_id, |
J-Alves | c003a7a | 2021-03-18 13:06:53 +0000 | [diff] [blame] | 667 | ffa_notifications_bitmap_t notifications, |
| 668 | bool is_per_vcpu) |
| 669 | { |
| 670 | CHECK(vm_locked.vm != NULL); |
| 671 | struct notifications *to_update = |
| 672 | vm_get_notifications(vm_locked, is_from_vm); |
| 673 | |
| 674 | for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) { |
| 675 | if (vm_is_notification_bit_set(notifications, i)) { |
| 676 | to_update->bindings_sender_id[i] = sender_id; |
| 677 | } |
| 678 | } |
| 679 | |
| 680 | /* |
| 681 | * Set notifications if they are per VCPU, else clear them as they are |
| 682 | * global. |
| 683 | */ |
| 684 | if (is_per_vcpu) { |
| 685 | to_update->bindings_per_vcpu |= notifications; |
| 686 | } else { |
| 687 | to_update->bindings_per_vcpu &= ~notifications; |
| 688 | } |
| 689 | } |
| 690 | |
| 691 | bool vm_notifications_validate_bound_sender( |
J-Alves | 19e20cf | 2023-08-02 12:48:55 +0100 | [diff] [blame] | 692 | struct vm_locked vm_locked, bool is_from_vm, ffa_id_t sender_id, |
J-Alves | c003a7a | 2021-03-18 13:06:53 +0000 | [diff] [blame] | 693 | ffa_notifications_bitmap_t notifications) |
| 694 | { |
| 695 | CHECK(vm_locked.vm != NULL); |
| 696 | struct notifications *to_check = |
| 697 | vm_get_notifications(vm_locked, is_from_vm); |
| 698 | |
| 699 | for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) { |
| 700 | if (vm_is_notification_bit_set(notifications, i) && |
| 701 | to_check->bindings_sender_id[i] != sender_id) { |
| 702 | return false; |
| 703 | } |
| 704 | } |
| 705 | |
| 706 | return true; |
| 707 | } |
| 708 | |
| 709 | bool vm_notifications_validate_per_vcpu(struct vm_locked vm_locked, |
| 710 | bool is_from_vm, bool is_per_vcpu, |
| 711 | ffa_notifications_bitmap_t notif) |
| 712 | { |
| 713 | CHECK(vm_locked.vm != NULL); |
| 714 | struct notifications *to_check = |
| 715 | vm_get_notifications(vm_locked, is_from_vm); |
| 716 | |
| 717 | return is_per_vcpu ? (~to_check->bindings_per_vcpu & notif) == 0U |
| 718 | : (to_check->bindings_per_vcpu & notif) == 0U; |
| 719 | } |
J-Alves | aa79c01 | 2021-07-09 14:29:45 +0100 | [diff] [blame] | 720 | |
J-Alves | 14163a7 | 2022-03-25 14:01:34 +0000 | [diff] [blame] | 721 | static void vm_notifications_state_set(struct notifications_state *state, |
| 722 | ffa_notifications_bitmap_t notifications) |
| 723 | { |
J-Alves | fc50ef7 | 2025-02-03 11:57:51 +0000 | [diff] [blame] | 724 | /* |
| 725 | * Exclude notifications which are already pending, to avoid |
| 726 | * leaving the pending counter in a wrongful state. |
| 727 | */ |
| 728 | ffa_notifications_bitmap_t to_set = |
| 729 | (state->pending & notifications) ^ notifications; |
| 730 | |
| 731 | /* Change the state of the pending notifications. */ |
| 732 | state->pending |= to_set; |
| 733 | vm_notifications_pending_count_add(to_set); |
J-Alves | 14163a7 | 2022-03-25 14:01:34 +0000 | [diff] [blame] | 734 | } |
| 735 | |
J-Alves | 5a16c96 | 2022-03-25 12:32:51 +0000 | [diff] [blame] | 736 | void vm_notifications_partition_set_pending( |
| 737 | struct vm_locked vm_locked, bool is_from_vm, |
| 738 | ffa_notifications_bitmap_t notifications, ffa_vcpu_index_t vcpu_id, |
| 739 | bool is_per_vcpu) |
J-Alves | aa79c01 | 2021-07-09 14:29:45 +0100 | [diff] [blame] | 740 | { |
J-Alves | 14163a7 | 2022-03-25 14:01:34 +0000 | [diff] [blame] | 741 | struct notifications *to_set; |
| 742 | struct notifications_state *state; |
| 743 | |
J-Alves | aa79c01 | 2021-07-09 14:29:45 +0100 | [diff] [blame] | 744 | CHECK(vm_locked.vm != NULL); |
Raghu Krishnamurthy | 30aabd6 | 2022-09-17 21:41:00 -0700 | [diff] [blame] | 745 | CHECK(vcpu_id < vm_locked.vm->vcpu_count); |
J-Alves | aa79c01 | 2021-07-09 14:29:45 +0100 | [diff] [blame] | 746 | |
J-Alves | 14163a7 | 2022-03-25 14:01:34 +0000 | [diff] [blame] | 747 | to_set = vm_get_notifications(vm_locked, is_from_vm); |
J-Alves | fe23ebe | 2021-10-13 16:07:07 +0100 | [diff] [blame] | 748 | |
J-Alves | 14163a7 | 2022-03-25 14:01:34 +0000 | [diff] [blame] | 749 | state = is_per_vcpu ? &to_set->per_vcpu[vcpu_id] : &to_set->global; |
| 750 | |
| 751 | vm_notifications_state_set(state, notifications); |
| 752 | } |
| 753 | |
| 754 | /** |
| 755 | * Set pending framework notifications. |
| 756 | */ |
| 757 | void vm_notifications_framework_set_pending( |
| 758 | struct vm_locked vm_locked, ffa_notifications_bitmap_t notifications) |
| 759 | { |
| 760 | CHECK(vm_locked.vm != NULL); |
Federico Recanati | e73d283 | 2022-04-20 11:10:52 +0200 | [diff] [blame] | 761 | assert(is_ffa_spm_buffer_full_notification(notifications) || |
| 762 | is_ffa_hyp_buffer_full_notification(notifications)); |
J-Alves | 14163a7 | 2022-03-25 14:01:34 +0000 | [diff] [blame] | 763 | vm_notifications_state_set(&vm_locked.vm->notifications.framework, |
| 764 | notifications); |
J-Alves | aa79c01 | 2021-07-09 14:29:45 +0100 | [diff] [blame] | 765 | } |
| 766 | |
J-Alves | 5136dda | 2022-03-25 12:26:38 +0000 | [diff] [blame] | 767 | static ffa_notifications_bitmap_t vm_notifications_state_get_pending( |
| 768 | struct notifications_state *state) |
J-Alves | aa79c01 | 2021-07-09 14:29:45 +0100 | [diff] [blame] | 769 | { |
J-Alves | 5136dda | 2022-03-25 12:26:38 +0000 | [diff] [blame] | 770 | ffa_notifications_bitmap_t to_ret; |
J-Alves | fe23ebe | 2021-10-13 16:07:07 +0100 | [diff] [blame] | 771 | ffa_notifications_bitmap_t pending_and_info_get_retrieved; |
J-Alves | aa79c01 | 2021-07-09 14:29:45 +0100 | [diff] [blame] | 772 | |
J-Alves | 5136dda | 2022-03-25 12:26:38 +0000 | [diff] [blame] | 773 | assert(state != NULL); |
J-Alves | aa79c01 | 2021-07-09 14:29:45 +0100 | [diff] [blame] | 774 | |
J-Alves | 5136dda | 2022-03-25 12:26:38 +0000 | [diff] [blame] | 775 | to_ret = state->pending; |
J-Alves | fe23ebe | 2021-10-13 16:07:07 +0100 | [diff] [blame] | 776 | |
| 777 | /* Update count of currently pending notifications in the system. */ |
J-Alves | 5136dda | 2022-03-25 12:26:38 +0000 | [diff] [blame] | 778 | vm_notifications_pending_count_sub(state->pending); |
J-Alves | fe23ebe | 2021-10-13 16:07:07 +0100 | [diff] [blame] | 779 | |
| 780 | /* |
| 781 | * If notifications receiver is getting have been retrieved by the |
| 782 | * receiver scheduler, decrement those from respective count. |
| 783 | */ |
| 784 | pending_and_info_get_retrieved = |
J-Alves | 5136dda | 2022-03-25 12:26:38 +0000 | [diff] [blame] | 785 | state->pending & state->info_get_retrieved; |
J-Alves | fe23ebe | 2021-10-13 16:07:07 +0100 | [diff] [blame] | 786 | |
| 787 | if (pending_and_info_get_retrieved != 0) { |
| 788 | vm_notifications_info_get_retrieved_count_sub( |
| 789 | pending_and_info_get_retrieved); |
| 790 | } |
| 791 | |
J-Alves | 5136dda | 2022-03-25 12:26:38 +0000 | [diff] [blame] | 792 | state->pending = 0U; |
| 793 | state->info_get_retrieved = 0U; |
J-Alves | aa79c01 | 2021-07-09 14:29:45 +0100 | [diff] [blame] | 794 | |
J-Alves | 5136dda | 2022-03-25 12:26:38 +0000 | [diff] [blame] | 795 | return to_ret; |
| 796 | } |
J-Alves | fe23ebe | 2021-10-13 16:07:07 +0100 | [diff] [blame] | 797 | |
J-Alves | 5136dda | 2022-03-25 12:26:38 +0000 | [diff] [blame] | 798 | /** |
| 799 | * Get global and per-vCPU notifications for the given vCPU ID. |
| 800 | */ |
| 801 | ffa_notifications_bitmap_t vm_notifications_partition_get_pending( |
| 802 | struct vm_locked vm_locked, bool is_from_vm, ffa_vcpu_index_t vcpu_id) |
| 803 | { |
| 804 | ffa_notifications_bitmap_t to_ret; |
| 805 | struct notifications *to_get; |
J-Alves | fe23ebe | 2021-10-13 16:07:07 +0100 | [diff] [blame] | 806 | |
J-Alves | 5136dda | 2022-03-25 12:26:38 +0000 | [diff] [blame] | 807 | assert(vm_locked.vm != NULL); |
| 808 | to_get = vm_get_notifications(vm_locked, is_from_vm); |
Raghu Krishnamurthy | 30aabd6 | 2022-09-17 21:41:00 -0700 | [diff] [blame] | 809 | assert(vcpu_id < vm_locked.vm->vcpu_count); |
J-Alves | fe23ebe | 2021-10-13 16:07:07 +0100 | [diff] [blame] | 810 | |
J-Alves | 5136dda | 2022-03-25 12:26:38 +0000 | [diff] [blame] | 811 | to_ret = vm_notifications_state_get_pending(&to_get->global); |
| 812 | to_ret |= |
| 813 | vm_notifications_state_get_pending(&to_get->per_vcpu[vcpu_id]); |
J-Alves | aa79c01 | 2021-07-09 14:29:45 +0100 | [diff] [blame] | 814 | |
| 815 | return to_ret; |
| 816 | } |
J-Alves | c8e8a22 | 2021-06-08 17:33:52 +0100 | [diff] [blame] | 817 | |
| 818 | /** |
J-Alves | 663682a | 2022-03-25 13:56:51 +0000 | [diff] [blame] | 819 | * Get pending framework notifications. |
| 820 | */ |
| 821 | ffa_notifications_bitmap_t vm_notifications_framework_get_pending( |
| 822 | struct vm_locked vm_locked) |
| 823 | { |
Federico Recanati | 6c1e05c | 2022-04-20 11:37:26 +0200 | [diff] [blame] | 824 | struct vm *vm = vm_locked.vm; |
| 825 | ffa_notifications_bitmap_t framework; |
Federico Recanati | 6c1e05c | 2022-04-20 11:37:26 +0200 | [diff] [blame] | 826 | |
| 827 | assert(vm != NULL); |
| 828 | |
| 829 | framework = vm_notifications_state_get_pending( |
| 830 | &vm->notifications.framework); |
| 831 | |
Federico Recanati | 6c1e05c | 2022-04-20 11:37:26 +0200 | [diff] [blame] | 832 | return framework; |
J-Alves | 663682a | 2022-03-25 13:56:51 +0000 | [diff] [blame] | 833 | } |
| 834 | |
Daniel Boulby | 1f2babf | 2024-08-29 16:39:47 +0100 | [diff] [blame] | 835 | static bool vm_insert_notification_info_list( |
| 836 | ffa_id_t vm_id, bool is_per_vcpu, ffa_vcpu_index_t vcpu_id, |
| 837 | uint16_t *ids, uint32_t *ids_count, uint32_t *lists_sizes, |
| 838 | uint32_t *lists_count, const uint32_t ids_max_count, |
J-Alves | 17c9b6d | 2022-03-25 14:39:05 +0000 | [diff] [blame] | 839 | enum notifications_info_get_state *info_get_state) |
| 840 | { |
J-Alves | 17c9b6d | 2022-03-25 14:39:05 +0000 | [diff] [blame] | 841 | CHECK(*ids_count <= ids_max_count); |
| 842 | CHECK(*lists_count <= ids_max_count); |
| 843 | |
Daniel Boulby | 1f2babf | 2024-08-29 16:39:47 +0100 | [diff] [blame] | 844 | if (*info_get_state == FULL || *ids_count == ids_max_count) { |
J-Alves | 17c9b6d | 2022-03-25 14:39:05 +0000 | [diff] [blame] | 845 | *info_get_state = FULL; |
Daniel Boulby | 1f2babf | 2024-08-29 16:39:47 +0100 | [diff] [blame] | 846 | return false; |
J-Alves | 17c9b6d | 2022-03-25 14:39:05 +0000 | [diff] [blame] | 847 | } |
| 848 | |
| 849 | switch (*info_get_state) { |
| 850 | case INIT: |
| 851 | case STARTING_NEW: |
| 852 | /* |
| 853 | * At this iteration two ids are to be added: the VM ID |
| 854 | * and vCPU ID. If there is no space, change state and |
| 855 | * terminate function. |
| 856 | */ |
| 857 | if (is_per_vcpu && ids_max_count - *ids_count < 2) { |
| 858 | *info_get_state = FULL; |
Daniel Boulby | 1f2babf | 2024-08-29 16:39:47 +0100 | [diff] [blame] | 859 | return false; |
J-Alves | 17c9b6d | 2022-03-25 14:39:05 +0000 | [diff] [blame] | 860 | } |
| 861 | |
| 862 | *info_get_state = INSERTING; |
| 863 | ids[*ids_count] = vm_id; |
| 864 | ++(*ids_count); |
| 865 | |
| 866 | if (is_per_vcpu) { |
| 867 | /* Insert vCPU ID. */ |
| 868 | ids[*ids_count] = vcpu_id; |
| 869 | ++(*ids_count); |
| 870 | ++lists_sizes[*lists_count]; |
| 871 | } |
| 872 | |
| 873 | ++(*lists_count); |
| 874 | break; |
| 875 | case INSERTING: |
| 876 | /* For per-vCPU notifications only. */ |
| 877 | if (!is_per_vcpu) { |
| 878 | break; |
| 879 | } |
| 880 | |
| 881 | /* Insert vCPU ID */ |
| 882 | ids[*ids_count] = vcpu_id; |
| 883 | (*ids_count)++; |
| 884 | /* Increment respective list size */ |
| 885 | ++lists_sizes[*lists_count - 1]; |
| 886 | |
| 887 | if (lists_sizes[*lists_count - 1] == 3) { |
| 888 | *info_get_state = STARTING_NEW; |
| 889 | } |
| 890 | break; |
| 891 | default: |
| 892 | panic("Notification info get action error!!\n"); |
| 893 | } |
| 894 | |
Daniel Boulby | 1f2babf | 2024-08-29 16:39:47 +0100 | [diff] [blame] | 895 | return true; |
| 896 | } |
| 897 | |
| 898 | /** |
| 899 | * Check if the notification is pending and hasn't being retrieved. |
| 900 | * If so attempt to add it to the notification info list. |
| 901 | * Returns true if successfully added to the list. |
| 902 | */ |
| 903 | static bool vm_notifications_state_info_get( |
| 904 | struct notifications_state *state, ffa_id_t vm_id, bool is_per_vcpu, |
| 905 | ffa_vcpu_index_t vcpu_id, uint16_t *ids, uint32_t *ids_count, |
| 906 | uint32_t *lists_sizes, uint32_t *lists_count, |
| 907 | const uint32_t ids_max_count, |
| 908 | enum notifications_info_get_state *info_get_state) |
| 909 | { |
| 910 | ffa_notifications_bitmap_t pending_not_retrieved; |
| 911 | |
| 912 | pending_not_retrieved = state->pending & ~state->info_get_retrieved; |
| 913 | |
| 914 | /* No notifications pending that haven't been retrieved. */ |
| 915 | if (pending_not_retrieved == 0U) { |
| 916 | return false; |
| 917 | } |
| 918 | |
| 919 | if (!vm_insert_notification_info_list( |
| 920 | vm_id, is_per_vcpu, vcpu_id, ids, ids_count, lists_sizes, |
| 921 | lists_count, ids_max_count, info_get_state)) { |
| 922 | return false; |
| 923 | } |
| 924 | |
J-Alves | 17c9b6d | 2022-03-25 14:39:05 +0000 | [diff] [blame] | 925 | state->info_get_retrieved |= pending_not_retrieved; |
| 926 | |
| 927 | vm_notifications_info_get_retrieved_count_add(pending_not_retrieved); |
Daniel Boulby | 1f2babf | 2024-08-29 16:39:47 +0100 | [diff] [blame] | 928 | |
| 929 | return true; |
| 930 | } |
| 931 | |
| 932 | /** |
J-Alves | 0cbd7a3 | 2025-02-10 17:29:15 +0000 | [diff] [blame] | 933 | * Insert partition information and vCPU ID in the return to notification |
| 934 | * information, if the vCPU has pending interrupts that need explicit CPU |
| 935 | * cycles from the scheduler to the partition. |
| 936 | * |
| 937 | * This can be if: |
| 938 | * - Partition has configured in the partition manifest an SRI policy, and |
| 939 | * it is in the waiting state. |
| 940 | * - If it has pending IPIs, and it is in the waiting state. |
| 941 | * |
Daniel Boulby | 1f2babf | 2024-08-29 16:39:47 +0100 | [diff] [blame] | 942 | * Returns true if successfully added to the list. |
| 943 | */ |
J-Alves | 0cbd7a3 | 2025-02-10 17:29:15 +0000 | [diff] [blame] | 944 | static void vm_interrupts_info_get( |
Daniel Boulby | 1f2babf | 2024-08-29 16:39:47 +0100 | [diff] [blame] | 945 | struct vcpu *vcpu, ffa_id_t vm_id, ffa_vcpu_index_t vcpu_id, |
| 946 | uint16_t *ids, uint32_t *ids_count, uint32_t *lists_sizes, |
| 947 | uint32_t *lists_count, const uint32_t ids_max_count, |
| 948 | enum notifications_info_get_state *info_get_state, bool per_vcpu_added) |
J-Alves | 0cbd7a3 | 2025-02-10 17:29:15 +0000 | [diff] [blame] | 949 | |
Daniel Boulby | 1f2babf | 2024-08-29 16:39:47 +0100 | [diff] [blame] | 950 | { |
Daniel Boulby | 1f2babf | 2024-08-29 16:39:47 +0100 | [diff] [blame] | 951 | struct vcpu_locked vcpu_locked = vcpu_lock(vcpu); |
J-Alves | 0cbd7a3 | 2025-02-10 17:29:15 +0000 | [diff] [blame] | 952 | struct vm *vm = vcpu->vm; |
| 953 | bool sri_interrupts_policy_configured = |
| 954 | vm->sri_policy.intr_while_waiting || |
| 955 | vm->sri_policy.intr_pending_entry_wait; |
Daniel Boulby | 1f2babf | 2024-08-29 16:39:47 +0100 | [diff] [blame] | 956 | |
Daniel Boulby | 6c2aa33 | 2024-11-13 13:54:08 +0000 | [diff] [blame] | 957 | /* |
J-Alves | 0cbd7a3 | 2025-02-10 17:29:15 +0000 | [diff] [blame] | 958 | * If the information about interrupts in the current vCPU has been |
| 959 | * retrieved or there are no pending interrupts, skip inserting an |
| 960 | * element in the list. |
Daniel Boulby | 6c2aa33 | 2024-11-13 13:54:08 +0000 | [diff] [blame] | 961 | */ |
J-Alves | 0cbd7a3 | 2025-02-10 17:29:15 +0000 | [diff] [blame] | 962 | if (vcpu->interrupts_info_get_retrieved || |
| 963 | vcpu_virt_interrupt_count_get(vcpu_locked) == 0U) { |
Daniel Boulby | 1f2babf | 2024-08-29 16:39:47 +0100 | [diff] [blame] | 964 | goto out; |
| 965 | } |
| 966 | |
| 967 | /* |
J-Alves | 0cbd7a3 | 2025-02-10 17:29:15 +0000 | [diff] [blame] | 968 | * Report for any interrupt that is pending if partition is in the |
| 969 | * waiting state, and either: |
| 970 | * - The target partition is configured with an SRI policy. |
| 971 | * - There are pending IPI and the SP in the waiting state. |
Daniel Boulby | 1f2babf | 2024-08-29 16:39:47 +0100 | [diff] [blame] | 972 | */ |
J-Alves | 0cbd7a3 | 2025-02-10 17:29:15 +0000 | [diff] [blame] | 973 | if (vcpu->state == VCPU_STATE_WAITING && |
| 974 | (sri_interrupts_policy_configured || |
| 975 | vcpu_is_virt_interrupt_pending(&vcpu->interrupts, HF_IPI_INTID))) { |
| 976 | if (per_vcpu_added || |
| 977 | vm_insert_notification_info_list( |
| 978 | vm_id, true, vcpu_id, ids, ids_count, lists_sizes, |
| 979 | lists_count, ids_max_count, info_get_state)) { |
| 980 | vcpu->interrupts_info_get_retrieved = true; |
| 981 | } |
Daniel Boulby | 1f2babf | 2024-08-29 16:39:47 +0100 | [diff] [blame] | 982 | } |
Daniel Boulby | 1f2babf | 2024-08-29 16:39:47 +0100 | [diff] [blame] | 983 | out: |
| 984 | vcpu_unlock(&vcpu_locked); |
J-Alves | 17c9b6d | 2022-03-25 14:39:05 +0000 | [diff] [blame] | 985 | } |
| 986 | |
J-Alves | 663682a | 2022-03-25 13:56:51 +0000 | [diff] [blame] | 987 | /** |
J-Alves | c8e8a22 | 2021-06-08 17:33:52 +0100 | [diff] [blame] | 988 | * Get pending notification's information to return to the receiver scheduler. |
| 989 | */ |
| 990 | void vm_notifications_info_get_pending( |
| 991 | struct vm_locked vm_locked, bool is_from_vm, uint16_t *ids, |
| 992 | uint32_t *ids_count, uint32_t *lists_sizes, uint32_t *lists_count, |
| 993 | const uint32_t ids_max_count, |
| 994 | enum notifications_info_get_state *info_get_state) |
| 995 | { |
J-Alves | 17c9b6d | 2022-03-25 14:39:05 +0000 | [diff] [blame] | 996 | struct notifications *notifications; |
J-Alves | c8e8a22 | 2021-06-08 17:33:52 +0100 | [diff] [blame] | 997 | |
| 998 | CHECK(vm_locked.vm != NULL); |
J-Alves | c8e8a22 | 2021-06-08 17:33:52 +0100 | [diff] [blame] | 999 | |
J-Alves | 17c9b6d | 2022-03-25 14:39:05 +0000 | [diff] [blame] | 1000 | notifications = vm_get_notifications(vm_locked, is_from_vm); |
J-Alves | c8e8a22 | 2021-06-08 17:33:52 +0100 | [diff] [blame] | 1001 | |
J-Alves | 17c9b6d | 2022-03-25 14:39:05 +0000 | [diff] [blame] | 1002 | /* |
| 1003 | * Perform info get for global notifications, before doing it for |
| 1004 | * per-vCPU. |
| 1005 | */ |
| 1006 | vm_notifications_state_info_get(¬ifications->global, |
| 1007 | vm_locked.vm->id, false, 0, ids, |
| 1008 | ids_count, lists_sizes, lists_count, |
| 1009 | ids_max_count, info_get_state); |
J-Alves | fe23ebe | 2021-10-13 16:07:07 +0100 | [diff] [blame] | 1010 | |
J-Alves | c8e8a22 | 2021-06-08 17:33:52 +0100 | [diff] [blame] | 1011 | for (ffa_vcpu_count_t i = 0; i < vm_locked.vm->vcpu_count; i++) { |
Daniel Boulby | 1f2babf | 2024-08-29 16:39:47 +0100 | [diff] [blame] | 1012 | struct vcpu *vcpu = vm_get_vcpu(vm_locked.vm, i); |
| 1013 | bool per_vcpu_added; |
| 1014 | |
| 1015 | per_vcpu_added = vm_notifications_state_info_get( |
J-Alves | 17c9b6d | 2022-03-25 14:39:05 +0000 | [diff] [blame] | 1016 | ¬ifications->per_vcpu[i], vm_locked.vm->id, true, i, |
| 1017 | ids, ids_count, lists_sizes, lists_count, ids_max_count, |
| 1018 | info_get_state); |
Daniel Boulby | 1f2babf | 2024-08-29 16:39:47 +0100 | [diff] [blame] | 1019 | /* |
| 1020 | * IPIs can only be pending for partitions at the |
| 1021 | * current virtual FF-A instance. |
| 1022 | */ |
| 1023 | if (vm_id_is_current_world(vm_locked.vm->id)) { |
J-Alves | 0cbd7a3 | 2025-02-10 17:29:15 +0000 | [diff] [blame] | 1024 | vm_interrupts_info_get(vcpu, vm_locked.vm->id, i, ids, |
| 1025 | ids_count, lists_sizes, |
| 1026 | lists_count, ids_max_count, |
| 1027 | info_get_state, per_vcpu_added); |
Daniel Boulby | 1f2babf | 2024-08-29 16:39:47 +0100 | [diff] [blame] | 1028 | } |
J-Alves | c8e8a22 | 2021-06-08 17:33:52 +0100 | [diff] [blame] | 1029 | } |
| 1030 | } |
| 1031 | |
| 1032 | /** |
| 1033 | * Gets all info from VM's pending notifications. |
| 1034 | * Returns true if the list is full, and there is more pending. |
| 1035 | */ |
| 1036 | bool vm_notifications_info_get(struct vm_locked vm_locked, uint16_t *ids, |
| 1037 | uint32_t *ids_count, uint32_t *lists_sizes, |
| 1038 | uint32_t *lists_count, |
| 1039 | const uint32_t ids_max_count) |
| 1040 | { |
| 1041 | enum notifications_info_get_state current_state = INIT; |
| 1042 | |
J-Alves | f31940e | 2022-03-25 17:24:00 +0000 | [diff] [blame] | 1043 | /* Get info of pending notifications from the framework. */ |
| 1044 | vm_notifications_state_info_get(&vm_locked.vm->notifications.framework, |
| 1045 | vm_locked.vm->id, false, 0, ids, |
| 1046 | ids_count, lists_sizes, lists_count, |
| 1047 | ids_max_count, ¤t_state); |
| 1048 | |
| 1049 | /* Get info of pending notifications from SPs. */ |
J-Alves | c8e8a22 | 2021-06-08 17:33:52 +0100 | [diff] [blame] | 1050 | vm_notifications_info_get_pending(vm_locked, false, ids, ids_count, |
| 1051 | lists_sizes, lists_count, |
| 1052 | ids_max_count, ¤t_state); |
| 1053 | |
J-Alves | f31940e | 2022-03-25 17:24:00 +0000 | [diff] [blame] | 1054 | /* Get info of pending notifications from VMs. */ |
J-Alves | c8e8a22 | 2021-06-08 17:33:52 +0100 | [diff] [blame] | 1055 | vm_notifications_info_get_pending(vm_locked, true, ids, ids_count, |
| 1056 | lists_sizes, lists_count, |
| 1057 | ids_max_count, ¤t_state); |
| 1058 | |
| 1059 | /* |
| 1060 | * State transitions to FULL when trying to insert a new ID in the |
J-Alves | 0cbd7a3 | 2025-02-10 17:29:15 +0000 | [diff] [blame] | 1061 | * list and there is not more space. This means there are |
| 1062 | * notifications pending, whose info is not retrieved. |
J-Alves | c8e8a22 | 2021-06-08 17:33:52 +0100 | [diff] [blame] | 1063 | */ |
| 1064 | return current_state == FULL; |
| 1065 | } |
J-Alves | 439ac97 | 2021-11-18 17:32:03 +0000 | [diff] [blame] | 1066 | |
| 1067 | /** |
| 1068 | * Checks VM's messaging method support. |
| 1069 | */ |
Karl Meakin | d0123af | 2025-03-17 16:46:38 +0000 | [diff] [blame] | 1070 | bool vm_supports_messaging_method(struct vm *vm, uint16_t messaging_method) |
J-Alves | 439ac97 | 2021-11-18 17:32:03 +0000 | [diff] [blame] | 1071 | { |
Karl Meakin | d0123af | 2025-03-17 16:46:38 +0000 | [diff] [blame] | 1072 | return (vm->messaging_method & messaging_method) != 0; |
J-Alves | 439ac97 | 2021-11-18 17:32:03 +0000 | [diff] [blame] | 1073 | } |
J-Alves | 6e2abc6 | 2021-12-02 14:58:56 +0000 | [diff] [blame] | 1074 | |
J-Alves | 7e67d10 | 2022-04-13 13:22:39 +0100 | [diff] [blame] | 1075 | /** |
| 1076 | * Sets the designated GP register that the VM expects to receive the boot |
| 1077 | * info's address. |
| 1078 | */ |
| 1079 | void vm_set_boot_info_gp_reg(struct vm *vm, struct vcpu *vcpu) |
| 1080 | { |
Olivier Deprez | b280833 | 2023-02-02 15:25:40 +0100 | [diff] [blame] | 1081 | if (vm->boot_info.blob_addr.ipa != 0U) { |
J-Alves | 7e67d10 | 2022-04-13 13:22:39 +0100 | [diff] [blame] | 1082 | arch_regs_set_gp_reg(&vcpu->regs, |
| 1083 | ipa_addr(vm->boot_info.blob_addr), |
| 1084 | vm->boot_info.gp_register_num); |
| 1085 | } |
| 1086 | } |
Madhukar Pappireddy | 18c6eb7 | 2023-08-21 12:16:18 -0500 | [diff] [blame] | 1087 | |
| 1088 | /** |
| 1089 | * Obtain the interrupt descriptor entry of the specified vm corresponding |
| 1090 | * to the specific interrupt id. |
| 1091 | */ |
Madhukar Pappireddy | 3221a44 | 2023-07-24 16:10:55 -0500 | [diff] [blame] | 1092 | static struct interrupt_descriptor *vm_find_interrupt_descriptor( |
Madhukar Pappireddy | 18c6eb7 | 2023-08-21 12:16:18 -0500 | [diff] [blame] | 1093 | struct vm_locked vm_locked, uint32_t id) |
| 1094 | { |
J-Alves | a89a0a0 | 2025-03-17 11:18:20 +0000 | [diff] [blame] | 1095 | for (uint32_t i = 0; i < VM_MANIFEST_MAX_INTERRUPTS; i++) { |
Madhukar Pappireddy | 18c6eb7 | 2023-08-21 12:16:18 -0500 | [diff] [blame] | 1096 | /* Interrupt descriptors are populated contiguously. */ |
| 1097 | if (!vm_locked.vm->interrupt_desc[i].valid) { |
| 1098 | break; |
| 1099 | } |
| 1100 | |
| 1101 | if (vm_locked.vm->interrupt_desc[i].interrupt_id == id) { |
| 1102 | /* Interrupt descriptor found. */ |
| 1103 | return &vm_locked.vm->interrupt_desc[i]; |
| 1104 | } |
| 1105 | } |
| 1106 | |
| 1107 | return NULL; |
| 1108 | } |
| 1109 | |
| 1110 | /** |
| 1111 | * Update the target MPIDR corresponding to the specified interrupt id |
| 1112 | * belonging to the specified vm. |
| 1113 | */ |
| 1114 | struct interrupt_descriptor *vm_interrupt_set_target_mpidr( |
| 1115 | struct vm_locked vm_locked, uint32_t id, uint32_t target_mpidr) |
| 1116 | { |
| 1117 | struct interrupt_descriptor *int_desc; |
| 1118 | |
| 1119 | int_desc = vm_find_interrupt_descriptor(vm_locked, id); |
| 1120 | |
| 1121 | if (int_desc != NULL) { |
Daniel Boulby | 1848594 | 2024-10-14 16:23:03 +0100 | [diff] [blame] | 1122 | int_desc->mpidr_valid = true; |
| 1123 | int_desc->mpidr = target_mpidr; |
Madhukar Pappireddy | 18c6eb7 | 2023-08-21 12:16:18 -0500 | [diff] [blame] | 1124 | } |
| 1125 | |
| 1126 | return int_desc; |
| 1127 | } |
| 1128 | |
| 1129 | /** |
| 1130 | * Update the security state of the specified interrupt id belonging to the |
| 1131 | * specified vm. |
| 1132 | */ |
| 1133 | struct interrupt_descriptor *vm_interrupt_set_sec_state( |
| 1134 | struct vm_locked vm_locked, uint32_t id, uint32_t sec_state) |
| 1135 | { |
| 1136 | struct interrupt_descriptor *int_desc; |
| 1137 | |
| 1138 | int_desc = vm_find_interrupt_descriptor(vm_locked, id); |
| 1139 | |
| 1140 | if (int_desc != NULL) { |
Daniel Boulby | 1848594 | 2024-10-14 16:23:03 +0100 | [diff] [blame] | 1141 | int_desc->sec_state = sec_state; |
Madhukar Pappireddy | 18c6eb7 | 2023-08-21 12:16:18 -0500 | [diff] [blame] | 1142 | } |
| 1143 | |
| 1144 | return int_desc; |
| 1145 | } |
Madhukar Pappireddy | 938faaf | 2023-07-31 17:56:55 -0500 | [diff] [blame] | 1146 | |
| 1147 | /** |
| 1148 | * Enable or disable the specified interrupt id belonging to specified vm. |
| 1149 | */ |
| 1150 | struct interrupt_descriptor *vm_interrupt_set_enable(struct vm_locked vm_locked, |
| 1151 | uint32_t id, bool enable) |
| 1152 | { |
| 1153 | struct interrupt_descriptor *int_desc; |
| 1154 | |
| 1155 | int_desc = vm_find_interrupt_descriptor(vm_locked, id); |
| 1156 | |
| 1157 | if (int_desc != NULL) { |
Daniel Boulby | 1848594 | 2024-10-14 16:23:03 +0100 | [diff] [blame] | 1158 | int_desc->enabled = enable; |
Madhukar Pappireddy | 938faaf | 2023-07-31 17:56:55 -0500 | [diff] [blame] | 1159 | } |
| 1160 | |
| 1161 | return int_desc; |
| 1162 | } |
Madhukar Pappireddy | a49ba16 | 2024-11-25 09:40:45 -0600 | [diff] [blame] | 1163 | |
| 1164 | /** |
| 1165 | * The 'boot_list' is used as the start and end of the list. |
| 1166 | * Start: the nodes it points to is the first VM to boot. |
| 1167 | * End: the last node's next points to the entry. |
| 1168 | */ |
| 1169 | static bool vm_is_boot_list_end(struct vm *vm) |
| 1170 | { |
| 1171 | return vm->boot_list_node.next == &boot_list; |
| 1172 | } |
| 1173 | |
| 1174 | /** |
| 1175 | * Gets the first partition to boot, according to Boot Protocol from FF-A spec. |
| 1176 | */ |
| 1177 | struct vm *vm_get_boot_vm(void) |
| 1178 | { |
| 1179 | assert(!list_empty(&boot_list)); |
| 1180 | |
| 1181 | return CONTAINER_OF(boot_list.next, struct vm, boot_list_node); |
| 1182 | } |
| 1183 | |
| 1184 | /** |
Madhukar Pappireddy | a81f541 | 2024-11-25 09:46:48 -0600 | [diff] [blame] | 1185 | * Gets the first MP partition to boot on a secondary CPU, as per the boot |
| 1186 | * order from FF-A spec. |
| 1187 | * If every SP in the system is an UP partition, this function returns NULL. |
| 1188 | */ |
| 1189 | struct vm *vm_get_boot_vm_secondary_core(void) |
| 1190 | { |
| 1191 | struct vm *vm = vm_get_boot_vm(); |
| 1192 | |
| 1193 | if (vm_is_up(vm)) { |
| 1194 | return vm_get_next_boot_secondary_core(vm); |
| 1195 | } |
| 1196 | |
| 1197 | return vm; |
| 1198 | } |
| 1199 | |
| 1200 | /** |
Madhukar Pappireddy | a49ba16 | 2024-11-25 09:40:45 -0600 | [diff] [blame] | 1201 | * Returns the next element in the boot order list, if there is one. |
| 1202 | */ |
| 1203 | struct vm *vm_get_next_boot(struct vm *vm) |
| 1204 | { |
| 1205 | return vm_is_boot_list_end(vm) |
| 1206 | ? NULL |
| 1207 | : CONTAINER_OF(vm->boot_list_node.next, struct vm, |
| 1208 | boot_list_node); |
| 1209 | } |
| 1210 | |
| 1211 | /** |
Madhukar Pappireddy | a81f541 | 2024-11-25 09:46:48 -0600 | [diff] [blame] | 1212 | * Returns the next element representing an MP endpoint in the boot order list, |
| 1213 | * if there is one. |
| 1214 | */ |
| 1215 | struct vm *vm_get_next_boot_secondary_core(struct vm *vm) |
| 1216 | { |
| 1217 | struct vm *vm_next; |
| 1218 | |
| 1219 | assert(vm != NULL); |
| 1220 | |
| 1221 | vm_next = vm_get_next_boot(vm); |
| 1222 | |
| 1223 | /* Keep searching until an MP endpoint is found. */ |
| 1224 | while (vm_next != NULL && vm_is_up(vm_next)) { |
| 1225 | vm_next = vm_get_next_boot(vm_next); |
| 1226 | } |
| 1227 | |
| 1228 | return vm_next; |
| 1229 | } |
| 1230 | |
| 1231 | /** |
Madhukar Pappireddy | a49ba16 | 2024-11-25 09:40:45 -0600 | [diff] [blame] | 1232 | * Insert in boot list, sorted by `boot_order` parameter in the vm structure |
| 1233 | * and rooted in `first_boot_vm`. |
| 1234 | */ |
| 1235 | void vm_update_boot(struct vm *vm) |
| 1236 | { |
| 1237 | struct vm *current_vm = NULL; |
| 1238 | |
| 1239 | if (list_empty(&boot_list)) { |
| 1240 | list_prepend(&boot_list, &vm->boot_list_node); |
| 1241 | return; |
| 1242 | } |
| 1243 | |
| 1244 | /* |
| 1245 | * When getting to this point the first insertion should have |
| 1246 | * been done. |
| 1247 | */ |
| 1248 | current_vm = vm_get_boot_vm(); |
| 1249 | assert(current_vm != NULL); |
| 1250 | |
| 1251 | /* |
| 1252 | * Iterate until the position is found according to boot order, or |
| 1253 | * until we reach end of the list. |
| 1254 | */ |
| 1255 | while (!vm_is_boot_list_end(current_vm) && |
| 1256 | current_vm->boot_order <= vm->boot_order) { |
| 1257 | current_vm = vm_get_next_boot(current_vm); |
| 1258 | } |
| 1259 | |
| 1260 | current_vm->boot_order > vm->boot_order |
| 1261 | ? list_prepend(¤t_vm->boot_list_node, &vm->boot_list_node) |
| 1262 | : list_append(¤t_vm->boot_list_node, &vm->boot_list_node); |
| 1263 | } |