blob: bca85bd4758c963da6e0ac916c27cdf88f18fbd3 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/vm.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010010
Karl Meakine1430802024-03-06 14:08:11 +000011#include "hf/arch/spinlock.h"
Olivier Deprezd9d409f2023-03-17 11:47:57 +010012#include "hf/arch/vm.h"
13
Andrew Scull18c78fc2018-08-20 12:57:41 +010014#include "hf/api.h"
Daniel Boulbya2f8c662021-11-26 17:52:53 +000015#include "hf/assert.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010016#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010017#include "hf/cpu.h"
J-Alves4ef6e842021-03-18 12:47:01 +000018#include "hf/dlog.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010019#include "hf/ffa.h"
Andrew Scull3c257452019-11-26 13:32:50 +000020#include "hf/layout.h"
Madhukar Pappireddy0e57d3d2023-10-11 15:49:05 -050021#include "hf/plat/iommu.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010022#include "hf/std.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010023
Andrew Scull19503262018-09-20 14:48:39 +010024#include "vmapi/hf/call.h"
25
26static struct vm vms[MAX_VMS];
Olivier Deprez96a2a262020-06-11 17:21:38 +020027static struct vm other_world;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010028static ffa_vm_count_t vm_count;
Andrew Scull19503262018-09-20 14:48:39 +010029
J-Alvesfe23ebe2021-10-13 16:07:07 +010030/**
Madhukar Pappireddya49ba162024-11-25 09:40:45 -060031 * The `boot_list` is a special entry in the circular linked list maintained by
32 * the partition manager and serves as both the start and end of the list.
33 */
34static struct list_entry boot_list = LIST_INIT(boot_list);
35
36/**
J-Alvesfe23ebe2021-10-13 16:07:07 +010037 * Counters on the status of notifications in the system. It helps to improve
38 * the information retrieved by the receiver scheduler.
39 */
40static struct {
41 /** Counts notifications pending. */
42 uint32_t pending_count;
43 /**
44 * Counts notifications pending, that have been retrieved by the
45 * receiver scheduler.
46 */
47 uint32_t info_get_retrieved_count;
48 struct spinlock lock;
49} all_notifications_state;
50
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -080051static bool vm_init_mm(struct vm *vm, struct mpool *ppool)
52{
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -060053 return arch_vm_init_mm(vm, ppool) && arch_vm_iommu_init_mm(vm, ppool);
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -080054}
55
J-Alves19e20cf2023-08-02 12:48:55 +010056struct vm *vm_init(ffa_id_t id, ffa_vcpu_count_t vcpu_count,
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -060057 struct mpool *ppool, bool el0_partition,
58 uint8_t dma_device_count)
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010059{
Wedson Almeida Filho87009642018-07-02 10:20:07 +010060 uint32_t i;
Andrew Scull19503262018-09-20 14:48:39 +010061 struct vm *vm;
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -070062 size_t vcpu_ppool_entries = (align_up(sizeof(struct vcpu) * vcpu_count,
63 MM_PPOOL_ENTRY_SIZE) /
64 MM_PPOOL_ENTRY_SIZE);
Andrew Scull19503262018-09-20 14:48:39 +010065
Olivier Deprez96a2a262020-06-11 17:21:38 +020066 if (id == HF_OTHER_WORLD_ID) {
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080067 CHECK(el0_partition == false);
Olivier Deprez96a2a262020-06-11 17:21:38 +020068 vm = &other_world;
Andrew Walbran9daa57e2019-09-27 13:33:20 +010069 } else {
70 uint16_t vm_index = id - HF_VM_ID_OFFSET;
Andrew Scull19503262018-09-20 14:48:39 +010071
Andrew Walbran9daa57e2019-09-27 13:33:20 +010072 CHECK(id >= HF_VM_ID_OFFSET);
73 CHECK(vm_index < ARRAY_SIZE(vms));
74 vm = &vms[vm_index];
75 }
Wedson Almeida Filho87009642018-07-02 10:20:07 +010076
Andrew Scull2b5fbad2019-04-05 13:55:56 +010077 memset_s(vm, sizeof(*vm), 0, sizeof(*vm));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010078
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000079 sl_init(&vm->lock);
80
Andrew Walbran9daa57e2019-09-27 13:33:20 +010081 vm->id = id;
Wedson Almeida Filho87009642018-07-02 10:20:07 +010082 vm->vcpu_count = vcpu_count;
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -070083
84 vm->vcpus = (struct vcpu *)mpool_alloc_contiguous(
85 ppool, vcpu_ppool_entries, 1);
86 CHECK(vm->vcpus != NULL);
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -070087
Andrew Sculld6ee1102019-04-05 22:12:42 +010088 vm->mailbox.state = MAILBOX_STATE_EMPTY;
Andrew Scull9726c252019-01-23 13:44:19 +000089 atomic_init(&vm->aborting, false);
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080090 vm->el0_partition = el0_partition;
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -060091 vm->dma_device_count = dma_device_count;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010092
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -080093 if (!vm_init_mm(vm, ppool)) {
Andrew Walbran9daa57e2019-09-27 13:33:20 +010094 return NULL;
Wedson Almeida Filho03306112018-11-26 00:08:03 +000095 }
96
Madhukar Pappireddyd6c055d2025-05-08 15:35:46 -050097 /*
98 * Do basic initialization of vCPUs, i.e. All vCPUs of the partition
99 * shall be in CREATED state.
100 */
Andrew Scull7364a8e2018-07-19 15:39:29 +0100101 for (i = 0; i < vcpu_count; i++) {
Andrew Walbrane1310df2019-04-29 17:28:28 +0100102 vcpu_init(vm_get_vcpu(vm, i), vm);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100103 }
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100104
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700105 vm_notifications_init(vm, vcpu_count, ppool);
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600106 list_init(&vm->boot_list_node);
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100107 return vm;
108}
109
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100110bool vm_init_next(ffa_vcpu_count_t vcpu_count, struct mpool *ppool,
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -0600111 struct vm **new_vm, bool el0_partition,
112 uint8_t dma_device_count)
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100113{
114 if (vm_count >= MAX_VMS) {
115 return false;
116 }
117
118 /* Generate IDs based on an offset, as low IDs e.g., 0, are reserved */
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800119 *new_vm = vm_init(vm_count + HF_VM_ID_OFFSET, vcpu_count, ppool,
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -0600120 el0_partition, dma_device_count);
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100121 if (*new_vm == NULL) {
122 return false;
123 }
Andrew Scull19503262018-09-20 14:48:39 +0100124 ++vm_count;
Andrew Scull19503262018-09-20 14:48:39 +0100125
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000126 return true;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100127}
128
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100129ffa_vm_count_t vm_get_count(void)
Andrew Scull19503262018-09-20 14:48:39 +0100130{
131 return vm_count;
132}
133
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100134/**
135 * Returns a pointer to the VM with the corresponding id.
136 */
J-Alves19e20cf2023-08-02 12:48:55 +0100137struct vm *vm_find(ffa_id_t id)
Andrew Scull19503262018-09-20 14:48:39 +0100138{
David Brazdilbc501192019-09-27 13:20:56 +0100139 uint16_t index;
Fuad Tabba494376e2019-08-05 12:35:10 +0100140
Olivier Deprez96a2a262020-06-11 17:21:38 +0200141 if (id == HF_OTHER_WORLD_ID) {
142 if (other_world.id == HF_OTHER_WORLD_ID) {
143 return &other_world;
144 }
Andrew Scull19503262018-09-20 14:48:39 +0100145 return NULL;
146 }
147
Olivier Deprez96a2a262020-06-11 17:21:38 +0200148 /* Check that this is not a reserved ID. */
149 if (id < HF_VM_ID_OFFSET) {
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100150 return NULL;
151 }
152
David Brazdilbc501192019-09-27 13:20:56 +0100153 index = id - HF_VM_ID_OFFSET;
154
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100155 return vm_find_index(index);
156}
157
158/**
J-Alves46ee0682021-07-26 15:17:53 +0100159 * Returns a locked instance of the VM with the corresponding id.
160 */
J-Alves19e20cf2023-08-02 12:48:55 +0100161struct vm_locked vm_find_locked(ffa_id_t id)
J-Alves46ee0682021-07-26 15:17:53 +0100162{
163 struct vm *vm = vm_find(id);
164
165 if (vm != NULL) {
166 return vm_lock(vm);
167 }
168
169 return (struct vm_locked){.vm = NULL};
170}
171
172/**
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100173 * Returns a pointer to the VM at the specified index.
174 */
175struct vm *vm_find_index(uint16_t index)
176{
David Brazdilbc501192019-09-27 13:20:56 +0100177 /* Ensure the VM is initialized. */
178 if (index >= vm_count) {
179 return NULL;
180 }
181
182 return &vms[index];
Andrew Scull19503262018-09-20 14:48:39 +0100183}
184
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000185/**
Fuad Tabbaed294af2019-12-20 10:43:01 +0000186 * Locks the given VM and updates `locked` to hold the newly locked VM.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000187 */
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100188struct vm_locked vm_lock(struct vm *vm)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000189{
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100190 struct vm_locked locked = {
191 .vm = vm,
192 };
193
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000194 sl_lock(&vm->lock);
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100195
196 return locked;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000197}
198
199/**
Jose Marinho75509b42019-04-09 09:34:59 +0100200 * Locks two VMs ensuring that the locking order is according to the locks'
201 * addresses.
202 */
203struct two_vm_locked vm_lock_both(struct vm *vm1, struct vm *vm2)
204{
205 struct two_vm_locked dual_lock;
206
207 sl_lock_both(&vm1->lock, &vm2->lock);
208 dual_lock.vm1.vm = vm1;
209 dual_lock.vm2.vm = vm2;
210
211 return dual_lock;
212}
213
214/**
Karl Meakine1430802024-03-06 14:08:11 +0000215 * Locks two VMs ensuring that the locking order is according to the locks'
216 * addresses, given `vm1` is already locked.
217 */
218struct two_vm_locked vm_lock_both_in_order(struct vm_locked vm1, struct vm *vm2)
219{
220 struct spinlock *sl1 = &vm1.vm->lock;
221 struct spinlock *sl2 = &vm2->lock;
222
223 /*
224 * Use `sl_lock`/`sl_unlock` directly rather than
225 * `vm_lock`/`vm_unlock` because `vm_unlock` sets the vm field
226 * to NULL.
227 */
228 if (sl1 < sl2) {
229 sl_lock(sl2);
230 } else {
231 sl_unlock(sl1);
232 sl_lock(sl2);
233 sl_lock(sl1);
234 }
235
236 return (struct two_vm_locked){
237 .vm1 = vm1,
238 .vm2 = (struct vm_locked){.vm = vm2},
239 };
240}
241
242/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000243 * Unlocks a VM previously locked with vm_lock, and updates `locked` to reflect
244 * the fact that the VM is no longer locked.
245 */
246void vm_unlock(struct vm_locked *locked)
247{
248 sl_unlock(&locked->vm->lock);
249 locked->vm = NULL;
250}
Andrew Walbrane1310df2019-04-29 17:28:28 +0100251
252/**
253 * Get the vCPU with the given index from the given VM.
254 * This assumes the index is valid, i.e. less than vm->vcpu_count.
255 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100256struct vcpu *vm_get_vcpu(struct vm *vm, ffa_vcpu_index_t vcpu_index)
Andrew Walbrane1310df2019-04-29 17:28:28 +0100257{
Andrew Scull877ae4b2019-07-02 12:52:33 +0100258 CHECK(vcpu_index < vm->vcpu_count);
Andrew Walbrane1310df2019-04-29 17:28:28 +0100259 return &vm->vcpus[vcpu_index];
260}
Andrew Scull3c257452019-11-26 13:32:50 +0000261
262/**
J-Alves122f1a12022-12-12 15:55:42 +0000263 * Checks whether the given `to` VM's mailbox is currently busy.
264 */
265bool vm_is_mailbox_busy(struct vm_locked to)
266{
267 return to.vm->mailbox.state != MAILBOX_STATE_EMPTY ||
268 to.vm->mailbox.recv == NULL;
269}
270
271/**
J-Alvese8c8c2b2022-12-16 15:34:48 +0000272 * Checks if mailbox is currently owned by the other world.
273 */
274bool vm_is_mailbox_other_world_owned(struct vm_locked to)
275{
276 return to.vm->mailbox.state == MAILBOX_STATE_OTHER_WORLD_OWNED;
277}
278
279/**
Andrew Walbran45633dd2020-10-07 17:59:54 +0100280 * Return whether the given VM ID represents an entity in the current world:
281 * i.e. the hypervisor or a normal world VM when running in the normal world, or
282 * the SPM or an SP when running in the secure world.
283 */
J-Alves19e20cf2023-08-02 12:48:55 +0100284bool vm_id_is_current_world(ffa_id_t vm_id)
Andrew Walbran45633dd2020-10-07 17:59:54 +0100285{
286 return (vm_id & HF_VM_ID_WORLD_MASK) !=
287 (HF_OTHER_WORLD_ID & HF_VM_ID_WORLD_MASK);
288}
289
290/**
Andrew Scull3c257452019-11-26 13:32:50 +0000291 * Map a range of addresses to the VM in both the MMU and the IOMMU.
292 *
293 * mm_vm_defrag should always be called after a series of page table updates,
294 * whether they succeed or fail. This is because on failure extra page table
295 * entries may have been allocated and then not used, while on success it may be
296 * possible to compact the page table by merging several entries into a block.
297 *
298 * Returns true on success, or false if the update failed and no changes were
299 * made.
300 *
301 */
302bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000303 mm_mode_t mode, struct mpool *ppool, ipaddr_t *ipa)
Andrew Scull3c257452019-11-26 13:32:50 +0000304{
305 if (!vm_identity_prepare(vm_locked, begin, end, mode, ppool)) {
306 return false;
307 }
308
309 vm_identity_commit(vm_locked, begin, end, mode, ppool, ipa);
310
311 return true;
312}
313
314/**
315 * Prepares the given VM for the given address mapping such that it will be able
316 * to commit the change without failure.
317 *
318 * In particular, multiple calls to this function will result in the
319 * corresponding calls to commit the changes to succeed.
320 *
321 * Returns true on success, or false if the update failed and no changes were
322 * made.
323 */
324bool vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000325 mm_mode_t mode, struct mpool *ppool)
Andrew Scull3c257452019-11-26 13:32:50 +0000326{
Olivier Deprezd9d409f2023-03-17 11:47:57 +0100327 return arch_vm_identity_prepare(vm_locked, begin, end, mode, ppool);
Andrew Scull3c257452019-11-26 13:32:50 +0000328}
329
330/**
331 * Commits the given address mapping to the VM assuming the operation cannot
332 * fail. `vm_identity_prepare` must used correctly before this to ensure
333 * this condition.
334 */
335void vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000336 mm_mode_t mode, struct mpool *ppool, ipaddr_t *ipa)
Andrew Scull3c257452019-11-26 13:32:50 +0000337{
Olivier Deprezd9d409f2023-03-17 11:47:57 +0100338 arch_vm_identity_commit(vm_locked, begin, end, mode, ppool, ipa);
Andrew Scull3c257452019-11-26 13:32:50 +0000339}
340
341/**
342 * Unmap a range of addresses from the VM.
343 *
344 * Returns true on success, or false if the update failed and no changes were
345 * made.
346 */
347bool vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
348 struct mpool *ppool)
349{
Olivier Deprezd9d409f2023-03-17 11:47:57 +0100350 return arch_vm_unmap(vm_locked, begin, end, ppool);
Andrew Scull3c257452019-11-26 13:32:50 +0000351}
352
353/**
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700354 * Defrag page tables for an EL0 partition or for a VM.
355 */
356void vm_ptable_defrag(struct vm_locked vm_locked, struct mpool *ppool)
357{
Olivier Deprezd9d409f2023-03-17 11:47:57 +0100358 arch_vm_ptable_defrag(vm_locked, ppool);
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700359}
360
361/**
Andrew Scull3c257452019-11-26 13:32:50 +0000362 * Unmaps the hypervisor pages from the given page table.
363 */
364bool vm_unmap_hypervisor(struct vm_locked vm_locked, struct mpool *ppool)
365{
366 /* TODO: If we add pages dynamically, they must be included here too. */
367 return vm_unmap(vm_locked, layout_text_begin(), layout_text_end(),
368 ppool) &&
369 vm_unmap(vm_locked, layout_rodata_begin(), layout_rodata_end(),
370 ppool) &&
371 vm_unmap(vm_locked, layout_data_begin(), layout_data_end(),
Maksims Svecovs134b8f92022-03-04 15:14:09 +0000372 ppool) &&
373 vm_unmap(vm_locked, layout_stacks_begin(), layout_stacks_end(),
Andrew Scull3c257452019-11-26 13:32:50 +0000374 ppool);
375}
J-Alvesb37fd082020-10-22 12:29:21 +0100376
377/**
Raghu Krishnamurthyea195fa2021-02-12 23:29:00 -0800378 * Gets the mode of the given range of ipa or va if they are mapped with the
379 * same mode.
380 *
381 * Returns true if the range is mapped with the same mode and false otherwise.
382 * The wrapper calls the appropriate mm function depending on if the partition
383 * is a vm or a el0 partition.
384 */
385bool vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin, ipaddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000386 mm_mode_t *mode)
Raghu Krishnamurthyea195fa2021-02-12 23:29:00 -0800387{
Olivier Deprezd9d409f2023-03-17 11:47:57 +0100388 return arch_vm_mem_get_mode(vm_locked, begin, end, mode);
Raghu Krishnamurthyea195fa2021-02-12 23:29:00 -0800389}
J-Alvesa0f317d2021-06-09 13:31:59 +0100390
Madhukar Pappireddy0e57d3d2023-10-11 15:49:05 -0500391bool vm_iommu_mm_identity_map(struct vm_locked vm_locked, paddr_t begin,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000392 paddr_t end, mm_mode_t mode, struct mpool *ppool,
Madhukar Pappireddy0e57d3d2023-10-11 15:49:05 -0500393 ipaddr_t *ipa, uint8_t dma_device_id)
394{
395 return arch_vm_iommu_mm_identity_map(vm_locked, begin, end, mode, ppool,
396 ipa, dma_device_id);
397}
398
J-Alves66652252022-07-06 09:49:51 +0100399bool vm_mailbox_state_busy(struct vm_locked vm_locked)
400{
401 return vm_locked.vm->mailbox.state != MAILBOX_STATE_EMPTY ||
402 vm_locked.vm->mailbox.recv == NULL;
403}
404
J-Alves7461ef22021-10-18 17:21:33 +0100405static struct notifications *vm_get_notifications(struct vm_locked vm_locked,
406 bool is_from_vm)
407{
408 return is_from_vm ? &vm_locked.vm->notifications.from_vm
409 : &vm_locked.vm->notifications.from_sp;
410}
411
J-Alvesa0f317d2021-06-09 13:31:59 +0100412/*
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700413 * Dynamically allocate per_vcpu_notifications structure for a given VM.
414 */
415static void vm_notifications_init_per_vcpu_notifications(
416 struct vm *vm, ffa_vcpu_count_t vcpu_count, struct mpool *ppool)
417{
418 size_t notif_ppool_entries =
419 (align_up(sizeof(struct notifications_state) * vcpu_count,
420 MM_PPOOL_ENTRY_SIZE) /
421 MM_PPOOL_ENTRY_SIZE);
422
423 /*
424 * Allow for function to be called on already initialized VMs but those
425 * that require notification structure to be cleared.
426 */
427 if (vm->notifications.from_sp.per_vcpu == NULL) {
428 assert(vm->notifications.from_vm.per_vcpu == NULL);
429 assert(vcpu_count != 0);
430 CHECK(ppool != NULL);
431 vm->notifications.from_sp.per_vcpu =
432 (struct notifications_state *)mpool_alloc_contiguous(
433 ppool, notif_ppool_entries, 1);
434 CHECK(vm->notifications.from_sp.per_vcpu != NULL);
435
436 vm->notifications.from_vm.per_vcpu =
437 (struct notifications_state *)mpool_alloc_contiguous(
438 ppool, notif_ppool_entries, 1);
439 CHECK(vm->notifications.from_vm.per_vcpu != NULL);
440 } else {
441 assert(vm->notifications.from_vm.per_vcpu != NULL);
442 }
443
444 memset_s(vm->notifications.from_sp.per_vcpu,
445 sizeof(*(vm->notifications.from_sp.per_vcpu)) * vcpu_count, 0,
446 sizeof(*(vm->notifications.from_sp.per_vcpu)) * vcpu_count);
447 memset_s(vm->notifications.from_vm.per_vcpu,
448 sizeof(*(vm->notifications.from_vm.per_vcpu)) * vcpu_count, 0,
449 sizeof(*(vm->notifications.from_vm.per_vcpu)) * vcpu_count);
450}
451
452/*
J-Alvesa0f317d2021-06-09 13:31:59 +0100453 * Initializes the notifications structure.
454 */
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700455static void vm_notifications_init_bindings(struct notifications *notifications)
J-Alvesa0f317d2021-06-09 13:31:59 +0100456{
457 for (uint32_t i = 0U; i < MAX_FFA_NOTIFICATIONS; i++) {
458 notifications->bindings_sender_id[i] = HF_INVALID_VM_ID;
459 }
460}
461
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700462/*
463 * Initialize notification related structures for a VM.
464 */
465void vm_notifications_init(struct vm *vm, ffa_vcpu_count_t vcpu_count,
466 struct mpool *ppool)
467{
468 vm_notifications_init_per_vcpu_notifications(vm, vcpu_count, ppool);
469
470 /* Basic initialization of the notifications structure. */
471 vm_notifications_init_bindings(&vm->notifications.from_sp);
472 vm_notifications_init_bindings(&vm->notifications.from_vm);
473}
474
J-Alvesa0f317d2021-06-09 13:31:59 +0100475/**
476 * Checks if there are pending notifications.
477 */
478bool vm_are_notifications_pending(struct vm_locked vm_locked, bool from_vm,
479 ffa_notifications_bitmap_t notifications)
480{
481 struct notifications *to_check;
482
483 CHECK(vm_locked.vm != NULL);
484
J-Alves7461ef22021-10-18 17:21:33 +0100485 to_check = vm_get_notifications(vm_locked, from_vm);
J-Alvesa0f317d2021-06-09 13:31:59 +0100486
487 /* Check if there are pending per vcpu notifications */
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700488 for (uint32_t i = 0U; i < vm_locked.vm->vcpu_count; i++) {
J-Alvesa0f317d2021-06-09 13:31:59 +0100489 if ((to_check->per_vcpu[i].pending & notifications) != 0U) {
490 return true;
491 }
492 }
493
494 /* Check if there are global pending notifications */
495 return (to_check->global.pending & notifications) != 0U;
496}
J-Alvesc003a7a2021-03-18 13:06:53 +0000497
J-Alves7461ef22021-10-18 17:21:33 +0100498/**
499 * Checks if there are pending global notifications, either from SPs or from
500 * VMs.
501 */
502bool vm_are_global_notifications_pending(struct vm_locked vm_locked)
503{
504 return vm_get_notifications(vm_locked, true)->global.pending != 0ULL ||
J-Alves52578f82022-03-25 12:30:47 +0000505 vm_get_notifications(vm_locked, false)->global.pending != 0ULL ||
J-Alvese8c8c2b2022-12-16 15:34:48 +0000506 vm_are_fwk_notifications_pending(vm_locked);
507}
508
509/**
510 * Currently only RX full notification is supported as framework notification.
511 * Returns true if there is one pending, either from Hypervisor or SPMC.
512 */
513bool vm_are_fwk_notifications_pending(struct vm_locked vm_locked)
514{
515 return vm_locked.vm->notifications.framework.pending != 0ULL;
J-Alves7461ef22021-10-18 17:21:33 +0100516}
517
518/**
519 * Checks if there are pending per-vCPU notifications, in a specific vCPU either
520 * from SPs or from VMs.
521 */
522bool vm_are_per_vcpu_notifications_pending(struct vm_locked vm_locked,
523 ffa_vcpu_index_t vcpu_id)
524{
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700525 CHECK(vcpu_id < vm_locked.vm->vcpu_count);
J-Alves7461ef22021-10-18 17:21:33 +0100526
527 return vm_get_notifications(vm_locked, true)
528 ->per_vcpu[vcpu_id]
529 .pending != 0ULL ||
530 vm_get_notifications(vm_locked, false)
531 ->per_vcpu[vcpu_id]
532 .pending != 0ULL;
533}
534
J-Alves09ff9d82021-11-02 11:55:20 +0000535bool vm_are_notifications_enabled(struct vm *vm)
J-Alvesc003a7a2021-03-18 13:06:53 +0000536{
Karl Meakind0123af2025-03-17 16:46:38 +0000537 return vm->notifications.enabled;
J-Alves09ff9d82021-11-02 11:55:20 +0000538}
539
540bool vm_locked_are_notifications_enabled(struct vm_locked vm_locked)
541{
542 return vm_are_notifications_enabled(vm_locked.vm);
J-Alvesc003a7a2021-03-18 13:06:53 +0000543}
544
545static bool vm_is_notification_bit_set(ffa_notifications_bitmap_t notifications,
546 uint32_t i)
547{
548 return (notifications & FFA_NOTIFICATION_MASK(i)) != 0U;
549}
550
J-Alvesfe23ebe2021-10-13 16:07:07 +0100551static void vm_notifications_global_state_count_update(
552 ffa_notifications_bitmap_t bitmap, uint32_t *counter, int inc)
553{
554 /*
555 * Helper to increment counters from global notifications
556 * state. Count update by increments or decrements of 1 or -1,
557 * respectively.
558 */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000559 assert(inc == 1 || inc == -1);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100560
561 sl_lock(&all_notifications_state.lock);
562
563 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
564 if (vm_is_notification_bit_set(bitmap, i)) {
565 CHECK((inc > 0 && *counter < UINT32_MAX) ||
566 (inc < 0 && *counter > 0));
567 *counter += inc;
568 }
569 }
570
571 sl_unlock(&all_notifications_state.lock);
572}
573
574/**
575 * Helper function to increment the pending notifications based on a bitmap
576 * passed as argument.
577 * Function to be used at setting notifications for a given VM.
578 */
579static void vm_notifications_pending_count_add(
580 ffa_notifications_bitmap_t to_add)
581{
582 vm_notifications_global_state_count_update(
583 to_add, &all_notifications_state.pending_count, 1);
584}
585
586/**
587 * Helper function to decrement the pending notifications count.
588 * Function to be used when getting the receiver's pending notifications.
589 */
590static void vm_notifications_pending_count_sub(
591 ffa_notifications_bitmap_t to_sub)
592{
593 vm_notifications_global_state_count_update(
594 to_sub, &all_notifications_state.pending_count, -1);
595}
596
597/**
598 * Helper function to count the notifications whose information has been
599 * retrieved by the scheduler of the system, and are still pending.
600 */
601static void vm_notifications_info_get_retrieved_count_add(
602 ffa_notifications_bitmap_t to_add)
603{
604 vm_notifications_global_state_count_update(
605 to_add, &all_notifications_state.info_get_retrieved_count, 1);
606}
607
608/**
609 * Helper function to subtract the notifications that the receiver is getting
610 * and whose information has been retrieved by the receiver scheduler.
611 */
612static void vm_notifications_info_get_retrieved_count_sub(
613 ffa_notifications_bitmap_t to_sub)
614{
615 vm_notifications_global_state_count_update(
616 to_sub, &all_notifications_state.info_get_retrieved_count, -1);
617}
618
619/**
620 * Helper function to determine if there are notifications pending whose info
621 * hasn't been retrieved by the receiver scheduler.
622 */
623bool vm_notifications_pending_not_retrieved_by_scheduler(void)
624{
625 bool ret;
626
627 sl_lock(&all_notifications_state.lock);
628 ret = all_notifications_state.pending_count >
629 all_notifications_state.info_get_retrieved_count;
630 sl_unlock(&all_notifications_state.lock);
631
632 return ret;
633}
634
635bool vm_is_notifications_pending_count_zero(void)
636{
637 bool ret;
638
639 sl_lock(&all_notifications_state.lock);
640 ret = all_notifications_state.pending_count == 0;
641 sl_unlock(&all_notifications_state.lock);
642
643 return ret;
644}
645
J-Alvesc003a7a2021-03-18 13:06:53 +0000646/**
647 * Checks that all provided notifications are bound to the specified sender, and
648 * are per VCPU or global, as specified.
649 */
650bool vm_notifications_validate_binding(struct vm_locked vm_locked,
J-Alves19e20cf2023-08-02 12:48:55 +0100651 bool is_from_vm, ffa_id_t sender_id,
J-Alvesc003a7a2021-03-18 13:06:53 +0000652 ffa_notifications_bitmap_t notifications,
653 bool is_per_vcpu)
654{
655 return vm_notifications_validate_bound_sender(
656 vm_locked, is_from_vm, sender_id, notifications) &&
657 vm_notifications_validate_per_vcpu(vm_locked, is_from_vm,
658 is_per_vcpu, notifications);
659}
660
661/**
662 * Update binds information in notification structure for the specified
663 * notifications.
664 */
665void vm_notifications_update_bindings(struct vm_locked vm_locked,
J-Alves19e20cf2023-08-02 12:48:55 +0100666 bool is_from_vm, ffa_id_t sender_id,
J-Alvesc003a7a2021-03-18 13:06:53 +0000667 ffa_notifications_bitmap_t notifications,
668 bool is_per_vcpu)
669{
670 CHECK(vm_locked.vm != NULL);
671 struct notifications *to_update =
672 vm_get_notifications(vm_locked, is_from_vm);
673
674 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
675 if (vm_is_notification_bit_set(notifications, i)) {
676 to_update->bindings_sender_id[i] = sender_id;
677 }
678 }
679
680 /*
681 * Set notifications if they are per VCPU, else clear them as they are
682 * global.
683 */
684 if (is_per_vcpu) {
685 to_update->bindings_per_vcpu |= notifications;
686 } else {
687 to_update->bindings_per_vcpu &= ~notifications;
688 }
689}
690
691bool vm_notifications_validate_bound_sender(
J-Alves19e20cf2023-08-02 12:48:55 +0100692 struct vm_locked vm_locked, bool is_from_vm, ffa_id_t sender_id,
J-Alvesc003a7a2021-03-18 13:06:53 +0000693 ffa_notifications_bitmap_t notifications)
694{
695 CHECK(vm_locked.vm != NULL);
696 struct notifications *to_check =
697 vm_get_notifications(vm_locked, is_from_vm);
698
699 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
700 if (vm_is_notification_bit_set(notifications, i) &&
701 to_check->bindings_sender_id[i] != sender_id) {
702 return false;
703 }
704 }
705
706 return true;
707}
708
709bool vm_notifications_validate_per_vcpu(struct vm_locked vm_locked,
710 bool is_from_vm, bool is_per_vcpu,
711 ffa_notifications_bitmap_t notif)
712{
713 CHECK(vm_locked.vm != NULL);
714 struct notifications *to_check =
715 vm_get_notifications(vm_locked, is_from_vm);
716
717 return is_per_vcpu ? (~to_check->bindings_per_vcpu & notif) == 0U
718 : (to_check->bindings_per_vcpu & notif) == 0U;
719}
J-Alvesaa79c012021-07-09 14:29:45 +0100720
J-Alves14163a72022-03-25 14:01:34 +0000721static void vm_notifications_state_set(struct notifications_state *state,
722 ffa_notifications_bitmap_t notifications)
723{
J-Alvesfc50ef72025-02-03 11:57:51 +0000724 /*
725 * Exclude notifications which are already pending, to avoid
726 * leaving the pending counter in a wrongful state.
727 */
728 ffa_notifications_bitmap_t to_set =
729 (state->pending & notifications) ^ notifications;
730
731 /* Change the state of the pending notifications. */
732 state->pending |= to_set;
733 vm_notifications_pending_count_add(to_set);
J-Alves14163a72022-03-25 14:01:34 +0000734}
735
J-Alves5a16c962022-03-25 12:32:51 +0000736void vm_notifications_partition_set_pending(
737 struct vm_locked vm_locked, bool is_from_vm,
738 ffa_notifications_bitmap_t notifications, ffa_vcpu_index_t vcpu_id,
739 bool is_per_vcpu)
J-Alvesaa79c012021-07-09 14:29:45 +0100740{
J-Alves14163a72022-03-25 14:01:34 +0000741 struct notifications *to_set;
742 struct notifications_state *state;
743
J-Alvesaa79c012021-07-09 14:29:45 +0100744 CHECK(vm_locked.vm != NULL);
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700745 CHECK(vcpu_id < vm_locked.vm->vcpu_count);
J-Alvesaa79c012021-07-09 14:29:45 +0100746
J-Alves14163a72022-03-25 14:01:34 +0000747 to_set = vm_get_notifications(vm_locked, is_from_vm);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100748
J-Alves14163a72022-03-25 14:01:34 +0000749 state = is_per_vcpu ? &to_set->per_vcpu[vcpu_id] : &to_set->global;
750
751 vm_notifications_state_set(state, notifications);
752}
753
754/**
755 * Set pending framework notifications.
756 */
757void vm_notifications_framework_set_pending(
758 struct vm_locked vm_locked, ffa_notifications_bitmap_t notifications)
759{
760 CHECK(vm_locked.vm != NULL);
Federico Recanatie73d2832022-04-20 11:10:52 +0200761 assert(is_ffa_spm_buffer_full_notification(notifications) ||
762 is_ffa_hyp_buffer_full_notification(notifications));
J-Alves14163a72022-03-25 14:01:34 +0000763 vm_notifications_state_set(&vm_locked.vm->notifications.framework,
764 notifications);
J-Alvesaa79c012021-07-09 14:29:45 +0100765}
766
J-Alves5136dda2022-03-25 12:26:38 +0000767static ffa_notifications_bitmap_t vm_notifications_state_get_pending(
768 struct notifications_state *state)
J-Alvesaa79c012021-07-09 14:29:45 +0100769{
J-Alves5136dda2022-03-25 12:26:38 +0000770 ffa_notifications_bitmap_t to_ret;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100771 ffa_notifications_bitmap_t pending_and_info_get_retrieved;
J-Alvesaa79c012021-07-09 14:29:45 +0100772
J-Alves5136dda2022-03-25 12:26:38 +0000773 assert(state != NULL);
J-Alvesaa79c012021-07-09 14:29:45 +0100774
J-Alves5136dda2022-03-25 12:26:38 +0000775 to_ret = state->pending;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100776
777 /* Update count of currently pending notifications in the system. */
J-Alves5136dda2022-03-25 12:26:38 +0000778 vm_notifications_pending_count_sub(state->pending);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100779
780 /*
781 * If notifications receiver is getting have been retrieved by the
782 * receiver scheduler, decrement those from respective count.
783 */
784 pending_and_info_get_retrieved =
J-Alves5136dda2022-03-25 12:26:38 +0000785 state->pending & state->info_get_retrieved;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100786
787 if (pending_and_info_get_retrieved != 0) {
788 vm_notifications_info_get_retrieved_count_sub(
789 pending_and_info_get_retrieved);
790 }
791
J-Alves5136dda2022-03-25 12:26:38 +0000792 state->pending = 0U;
793 state->info_get_retrieved = 0U;
J-Alvesaa79c012021-07-09 14:29:45 +0100794
J-Alves5136dda2022-03-25 12:26:38 +0000795 return to_ret;
796}
J-Alvesfe23ebe2021-10-13 16:07:07 +0100797
J-Alves5136dda2022-03-25 12:26:38 +0000798/**
799 * Get global and per-vCPU notifications for the given vCPU ID.
800 */
801ffa_notifications_bitmap_t vm_notifications_partition_get_pending(
802 struct vm_locked vm_locked, bool is_from_vm, ffa_vcpu_index_t vcpu_id)
803{
804 ffa_notifications_bitmap_t to_ret;
805 struct notifications *to_get;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100806
J-Alves5136dda2022-03-25 12:26:38 +0000807 assert(vm_locked.vm != NULL);
808 to_get = vm_get_notifications(vm_locked, is_from_vm);
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700809 assert(vcpu_id < vm_locked.vm->vcpu_count);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100810
J-Alves5136dda2022-03-25 12:26:38 +0000811 to_ret = vm_notifications_state_get_pending(&to_get->global);
812 to_ret |=
813 vm_notifications_state_get_pending(&to_get->per_vcpu[vcpu_id]);
J-Alvesaa79c012021-07-09 14:29:45 +0100814
815 return to_ret;
816}
J-Alvesc8e8a222021-06-08 17:33:52 +0100817
818/**
J-Alves663682a2022-03-25 13:56:51 +0000819 * Get pending framework notifications.
820 */
821ffa_notifications_bitmap_t vm_notifications_framework_get_pending(
822 struct vm_locked vm_locked)
823{
Federico Recanati6c1e05c2022-04-20 11:37:26 +0200824 struct vm *vm = vm_locked.vm;
825 ffa_notifications_bitmap_t framework;
Federico Recanati6c1e05c2022-04-20 11:37:26 +0200826
827 assert(vm != NULL);
828
829 framework = vm_notifications_state_get_pending(
830 &vm->notifications.framework);
831
Federico Recanati6c1e05c2022-04-20 11:37:26 +0200832 return framework;
J-Alves663682a2022-03-25 13:56:51 +0000833}
834
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100835static bool vm_insert_notification_info_list(
836 ffa_id_t vm_id, bool is_per_vcpu, ffa_vcpu_index_t vcpu_id,
837 uint16_t *ids, uint32_t *ids_count, uint32_t *lists_sizes,
838 uint32_t *lists_count, const uint32_t ids_max_count,
J-Alves17c9b6d2022-03-25 14:39:05 +0000839 enum notifications_info_get_state *info_get_state)
840{
J-Alves17c9b6d2022-03-25 14:39:05 +0000841 CHECK(*ids_count <= ids_max_count);
842 CHECK(*lists_count <= ids_max_count);
843
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100844 if (*info_get_state == FULL || *ids_count == ids_max_count) {
J-Alves17c9b6d2022-03-25 14:39:05 +0000845 *info_get_state = FULL;
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100846 return false;
J-Alves17c9b6d2022-03-25 14:39:05 +0000847 }
848
849 switch (*info_get_state) {
850 case INIT:
851 case STARTING_NEW:
852 /*
853 * At this iteration two ids are to be added: the VM ID
854 * and vCPU ID. If there is no space, change state and
855 * terminate function.
856 */
857 if (is_per_vcpu && ids_max_count - *ids_count < 2) {
858 *info_get_state = FULL;
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100859 return false;
J-Alves17c9b6d2022-03-25 14:39:05 +0000860 }
861
862 *info_get_state = INSERTING;
863 ids[*ids_count] = vm_id;
864 ++(*ids_count);
865
866 if (is_per_vcpu) {
867 /* Insert vCPU ID. */
868 ids[*ids_count] = vcpu_id;
869 ++(*ids_count);
870 ++lists_sizes[*lists_count];
871 }
872
873 ++(*lists_count);
874 break;
875 case INSERTING:
876 /* For per-vCPU notifications only. */
877 if (!is_per_vcpu) {
878 break;
879 }
880
881 /* Insert vCPU ID */
882 ids[*ids_count] = vcpu_id;
883 (*ids_count)++;
884 /* Increment respective list size */
885 ++lists_sizes[*lists_count - 1];
886
887 if (lists_sizes[*lists_count - 1] == 3) {
888 *info_get_state = STARTING_NEW;
889 }
890 break;
891 default:
892 panic("Notification info get action error!!\n");
893 }
894
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100895 return true;
896}
897
898/**
899 * Check if the notification is pending and hasn't being retrieved.
900 * If so attempt to add it to the notification info list.
901 * Returns true if successfully added to the list.
902 */
903static bool vm_notifications_state_info_get(
904 struct notifications_state *state, ffa_id_t vm_id, bool is_per_vcpu,
905 ffa_vcpu_index_t vcpu_id, uint16_t *ids, uint32_t *ids_count,
906 uint32_t *lists_sizes, uint32_t *lists_count,
907 const uint32_t ids_max_count,
908 enum notifications_info_get_state *info_get_state)
909{
910 ffa_notifications_bitmap_t pending_not_retrieved;
911
912 pending_not_retrieved = state->pending & ~state->info_get_retrieved;
913
914 /* No notifications pending that haven't been retrieved. */
915 if (pending_not_retrieved == 0U) {
916 return false;
917 }
918
919 if (!vm_insert_notification_info_list(
920 vm_id, is_per_vcpu, vcpu_id, ids, ids_count, lists_sizes,
921 lists_count, ids_max_count, info_get_state)) {
922 return false;
923 }
924
J-Alves17c9b6d2022-03-25 14:39:05 +0000925 state->info_get_retrieved |= pending_not_retrieved;
926
927 vm_notifications_info_get_retrieved_count_add(pending_not_retrieved);
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100928
929 return true;
930}
931
932/**
J-Alves0cbd7a32025-02-10 17:29:15 +0000933 * Insert partition information and vCPU ID in the return to notification
934 * information, if the vCPU has pending interrupts that need explicit CPU
935 * cycles from the scheduler to the partition.
936 *
937 * This can be if:
938 * - Partition has configured in the partition manifest an SRI policy, and
939 * it is in the waiting state.
940 * - If it has pending IPIs, and it is in the waiting state.
941 *
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100942 * Returns true if successfully added to the list.
943 */
J-Alves0cbd7a32025-02-10 17:29:15 +0000944static void vm_interrupts_info_get(
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100945 struct vcpu *vcpu, ffa_id_t vm_id, ffa_vcpu_index_t vcpu_id,
946 uint16_t *ids, uint32_t *ids_count, uint32_t *lists_sizes,
947 uint32_t *lists_count, const uint32_t ids_max_count,
948 enum notifications_info_get_state *info_get_state, bool per_vcpu_added)
J-Alves0cbd7a32025-02-10 17:29:15 +0000949
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100950{
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100951 struct vcpu_locked vcpu_locked = vcpu_lock(vcpu);
J-Alves0cbd7a32025-02-10 17:29:15 +0000952 struct vm *vm = vcpu->vm;
953 bool sri_interrupts_policy_configured =
954 vm->sri_policy.intr_while_waiting ||
955 vm->sri_policy.intr_pending_entry_wait;
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100956
Daniel Boulby6c2aa332024-11-13 13:54:08 +0000957 /*
J-Alves0cbd7a32025-02-10 17:29:15 +0000958 * If the information about interrupts in the current vCPU has been
959 * retrieved or there are no pending interrupts, skip inserting an
960 * element in the list.
Daniel Boulby6c2aa332024-11-13 13:54:08 +0000961 */
J-Alves0cbd7a32025-02-10 17:29:15 +0000962 if (vcpu->interrupts_info_get_retrieved ||
963 vcpu_virt_interrupt_count_get(vcpu_locked) == 0U) {
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100964 goto out;
965 }
966
967 /*
J-Alves0cbd7a32025-02-10 17:29:15 +0000968 * Report for any interrupt that is pending if partition is in the
969 * waiting state, and either:
970 * - The target partition is configured with an SRI policy.
971 * - There are pending IPI and the SP in the waiting state.
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100972 */
J-Alves0cbd7a32025-02-10 17:29:15 +0000973 if (vcpu->state == VCPU_STATE_WAITING &&
974 (sri_interrupts_policy_configured ||
975 vcpu_is_virt_interrupt_pending(&vcpu->interrupts, HF_IPI_INTID))) {
976 if (per_vcpu_added ||
977 vm_insert_notification_info_list(
978 vm_id, true, vcpu_id, ids, ids_count, lists_sizes,
979 lists_count, ids_max_count, info_get_state)) {
980 vcpu->interrupts_info_get_retrieved = true;
981 }
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100982 }
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100983out:
984 vcpu_unlock(&vcpu_locked);
J-Alves17c9b6d2022-03-25 14:39:05 +0000985}
986
J-Alves663682a2022-03-25 13:56:51 +0000987/**
J-Alvesc8e8a222021-06-08 17:33:52 +0100988 * Get pending notification's information to return to the receiver scheduler.
989 */
990void vm_notifications_info_get_pending(
991 struct vm_locked vm_locked, bool is_from_vm, uint16_t *ids,
992 uint32_t *ids_count, uint32_t *lists_sizes, uint32_t *lists_count,
993 const uint32_t ids_max_count,
994 enum notifications_info_get_state *info_get_state)
995{
J-Alves17c9b6d2022-03-25 14:39:05 +0000996 struct notifications *notifications;
J-Alvesc8e8a222021-06-08 17:33:52 +0100997
998 CHECK(vm_locked.vm != NULL);
J-Alvesc8e8a222021-06-08 17:33:52 +0100999
J-Alves17c9b6d2022-03-25 14:39:05 +00001000 notifications = vm_get_notifications(vm_locked, is_from_vm);
J-Alvesc8e8a222021-06-08 17:33:52 +01001001
J-Alves17c9b6d2022-03-25 14:39:05 +00001002 /*
1003 * Perform info get for global notifications, before doing it for
1004 * per-vCPU.
1005 */
1006 vm_notifications_state_info_get(&notifications->global,
1007 vm_locked.vm->id, false, 0, ids,
1008 ids_count, lists_sizes, lists_count,
1009 ids_max_count, info_get_state);
J-Alvesfe23ebe2021-10-13 16:07:07 +01001010
J-Alvesc8e8a222021-06-08 17:33:52 +01001011 for (ffa_vcpu_count_t i = 0; i < vm_locked.vm->vcpu_count; i++) {
Daniel Boulby1f2babf2024-08-29 16:39:47 +01001012 struct vcpu *vcpu = vm_get_vcpu(vm_locked.vm, i);
1013 bool per_vcpu_added;
1014
1015 per_vcpu_added = vm_notifications_state_info_get(
J-Alves17c9b6d2022-03-25 14:39:05 +00001016 &notifications->per_vcpu[i], vm_locked.vm->id, true, i,
1017 ids, ids_count, lists_sizes, lists_count, ids_max_count,
1018 info_get_state);
Daniel Boulby1f2babf2024-08-29 16:39:47 +01001019 /*
1020 * IPIs can only be pending for partitions at the
1021 * current virtual FF-A instance.
1022 */
1023 if (vm_id_is_current_world(vm_locked.vm->id)) {
J-Alves0cbd7a32025-02-10 17:29:15 +00001024 vm_interrupts_info_get(vcpu, vm_locked.vm->id, i, ids,
1025 ids_count, lists_sizes,
1026 lists_count, ids_max_count,
1027 info_get_state, per_vcpu_added);
Daniel Boulby1f2babf2024-08-29 16:39:47 +01001028 }
J-Alvesc8e8a222021-06-08 17:33:52 +01001029 }
1030}
1031
1032/**
1033 * Gets all info from VM's pending notifications.
1034 * Returns true if the list is full, and there is more pending.
1035 */
1036bool vm_notifications_info_get(struct vm_locked vm_locked, uint16_t *ids,
1037 uint32_t *ids_count, uint32_t *lists_sizes,
1038 uint32_t *lists_count,
1039 const uint32_t ids_max_count)
1040{
1041 enum notifications_info_get_state current_state = INIT;
1042
J-Alvesf31940e2022-03-25 17:24:00 +00001043 /* Get info of pending notifications from the framework. */
1044 vm_notifications_state_info_get(&vm_locked.vm->notifications.framework,
1045 vm_locked.vm->id, false, 0, ids,
1046 ids_count, lists_sizes, lists_count,
1047 ids_max_count, &current_state);
1048
1049 /* Get info of pending notifications from SPs. */
J-Alvesc8e8a222021-06-08 17:33:52 +01001050 vm_notifications_info_get_pending(vm_locked, false, ids, ids_count,
1051 lists_sizes, lists_count,
1052 ids_max_count, &current_state);
1053
J-Alvesf31940e2022-03-25 17:24:00 +00001054 /* Get info of pending notifications from VMs. */
J-Alvesc8e8a222021-06-08 17:33:52 +01001055 vm_notifications_info_get_pending(vm_locked, true, ids, ids_count,
1056 lists_sizes, lists_count,
1057 ids_max_count, &current_state);
1058
1059 /*
1060 * State transitions to FULL when trying to insert a new ID in the
J-Alves0cbd7a32025-02-10 17:29:15 +00001061 * list and there is not more space. This means there are
1062 * notifications pending, whose info is not retrieved.
J-Alvesc8e8a222021-06-08 17:33:52 +01001063 */
1064 return current_state == FULL;
1065}
J-Alves439ac972021-11-18 17:32:03 +00001066
1067/**
1068 * Checks VM's messaging method support.
1069 */
Karl Meakind0123af2025-03-17 16:46:38 +00001070bool vm_supports_messaging_method(struct vm *vm, uint16_t messaging_method)
J-Alves439ac972021-11-18 17:32:03 +00001071{
Karl Meakind0123af2025-03-17 16:46:38 +00001072 return (vm->messaging_method & messaging_method) != 0;
J-Alves439ac972021-11-18 17:32:03 +00001073}
J-Alves6e2abc62021-12-02 14:58:56 +00001074
J-Alves7e67d102022-04-13 13:22:39 +01001075/**
1076 * Sets the designated GP register that the VM expects to receive the boot
1077 * info's address.
1078 */
1079void vm_set_boot_info_gp_reg(struct vm *vm, struct vcpu *vcpu)
1080{
Olivier Deprezb2808332023-02-02 15:25:40 +01001081 if (vm->boot_info.blob_addr.ipa != 0U) {
J-Alves7e67d102022-04-13 13:22:39 +01001082 arch_regs_set_gp_reg(&vcpu->regs,
1083 ipa_addr(vm->boot_info.blob_addr),
1084 vm->boot_info.gp_register_num);
1085 }
1086}
Madhukar Pappireddy18c6eb72023-08-21 12:16:18 -05001087
1088/**
1089 * Obtain the interrupt descriptor entry of the specified vm corresponding
1090 * to the specific interrupt id.
1091 */
Madhukar Pappireddy3221a442023-07-24 16:10:55 -05001092static struct interrupt_descriptor *vm_find_interrupt_descriptor(
Madhukar Pappireddy18c6eb72023-08-21 12:16:18 -05001093 struct vm_locked vm_locked, uint32_t id)
1094{
J-Alvesa89a0a02025-03-17 11:18:20 +00001095 for (uint32_t i = 0; i < VM_MANIFEST_MAX_INTERRUPTS; i++) {
Madhukar Pappireddy18c6eb72023-08-21 12:16:18 -05001096 /* Interrupt descriptors are populated contiguously. */
1097 if (!vm_locked.vm->interrupt_desc[i].valid) {
1098 break;
1099 }
1100
1101 if (vm_locked.vm->interrupt_desc[i].interrupt_id == id) {
1102 /* Interrupt descriptor found. */
1103 return &vm_locked.vm->interrupt_desc[i];
1104 }
1105 }
1106
1107 return NULL;
1108}
1109
1110/**
1111 * Update the target MPIDR corresponding to the specified interrupt id
1112 * belonging to the specified vm.
1113 */
1114struct interrupt_descriptor *vm_interrupt_set_target_mpidr(
1115 struct vm_locked vm_locked, uint32_t id, uint32_t target_mpidr)
1116{
1117 struct interrupt_descriptor *int_desc;
1118
1119 int_desc = vm_find_interrupt_descriptor(vm_locked, id);
1120
1121 if (int_desc != NULL) {
Daniel Boulby18485942024-10-14 16:23:03 +01001122 int_desc->mpidr_valid = true;
1123 int_desc->mpidr = target_mpidr;
Madhukar Pappireddy18c6eb72023-08-21 12:16:18 -05001124 }
1125
1126 return int_desc;
1127}
1128
1129/**
1130 * Update the security state of the specified interrupt id belonging to the
1131 * specified vm.
1132 */
1133struct interrupt_descriptor *vm_interrupt_set_sec_state(
1134 struct vm_locked vm_locked, uint32_t id, uint32_t sec_state)
1135{
1136 struct interrupt_descriptor *int_desc;
1137
1138 int_desc = vm_find_interrupt_descriptor(vm_locked, id);
1139
1140 if (int_desc != NULL) {
Daniel Boulby18485942024-10-14 16:23:03 +01001141 int_desc->sec_state = sec_state;
Madhukar Pappireddy18c6eb72023-08-21 12:16:18 -05001142 }
1143
1144 return int_desc;
1145}
Madhukar Pappireddy938faaf2023-07-31 17:56:55 -05001146
1147/**
1148 * Enable or disable the specified interrupt id belonging to specified vm.
1149 */
1150struct interrupt_descriptor *vm_interrupt_set_enable(struct vm_locked vm_locked,
1151 uint32_t id, bool enable)
1152{
1153 struct interrupt_descriptor *int_desc;
1154
1155 int_desc = vm_find_interrupt_descriptor(vm_locked, id);
1156
1157 if (int_desc != NULL) {
Daniel Boulby18485942024-10-14 16:23:03 +01001158 int_desc->enabled = enable;
Madhukar Pappireddy938faaf2023-07-31 17:56:55 -05001159 }
1160
1161 return int_desc;
1162}
Madhukar Pappireddya49ba162024-11-25 09:40:45 -06001163
1164/**
1165 * The 'boot_list' is used as the start and end of the list.
1166 * Start: the nodes it points to is the first VM to boot.
1167 * End: the last node's next points to the entry.
1168 */
1169static bool vm_is_boot_list_end(struct vm *vm)
1170{
1171 return vm->boot_list_node.next == &boot_list;
1172}
1173
1174/**
1175 * Gets the first partition to boot, according to Boot Protocol from FF-A spec.
1176 */
1177struct vm *vm_get_boot_vm(void)
1178{
1179 assert(!list_empty(&boot_list));
1180
1181 return CONTAINER_OF(boot_list.next, struct vm, boot_list_node);
1182}
1183
1184/**
Madhukar Pappireddya81f5412024-11-25 09:46:48 -06001185 * Gets the first MP partition to boot on a secondary CPU, as per the boot
1186 * order from FF-A spec.
1187 * If every SP in the system is an UP partition, this function returns NULL.
1188 */
1189struct vm *vm_get_boot_vm_secondary_core(void)
1190{
1191 struct vm *vm = vm_get_boot_vm();
1192
1193 if (vm_is_up(vm)) {
1194 return vm_get_next_boot_secondary_core(vm);
1195 }
1196
1197 return vm;
1198}
1199
1200/**
Madhukar Pappireddya49ba162024-11-25 09:40:45 -06001201 * Returns the next element in the boot order list, if there is one.
1202 */
1203struct vm *vm_get_next_boot(struct vm *vm)
1204{
1205 return vm_is_boot_list_end(vm)
1206 ? NULL
1207 : CONTAINER_OF(vm->boot_list_node.next, struct vm,
1208 boot_list_node);
1209}
1210
1211/**
Madhukar Pappireddya81f5412024-11-25 09:46:48 -06001212 * Returns the next element representing an MP endpoint in the boot order list,
1213 * if there is one.
1214 */
1215struct vm *vm_get_next_boot_secondary_core(struct vm *vm)
1216{
1217 struct vm *vm_next;
1218
1219 assert(vm != NULL);
1220
1221 vm_next = vm_get_next_boot(vm);
1222
1223 /* Keep searching until an MP endpoint is found. */
1224 while (vm_next != NULL && vm_is_up(vm_next)) {
1225 vm_next = vm_get_next_boot(vm_next);
1226 }
1227
1228 return vm_next;
1229}
1230
1231/**
Madhukar Pappireddya49ba162024-11-25 09:40:45 -06001232 * Insert in boot list, sorted by `boot_order` parameter in the vm structure
1233 * and rooted in `first_boot_vm`.
1234 */
1235void vm_update_boot(struct vm *vm)
1236{
1237 struct vm *current_vm = NULL;
1238
1239 if (list_empty(&boot_list)) {
1240 list_prepend(&boot_list, &vm->boot_list_node);
1241 return;
1242 }
1243
1244 /*
1245 * When getting to this point the first insertion should have
1246 * been done.
1247 */
1248 current_vm = vm_get_boot_vm();
1249 assert(current_vm != NULL);
1250
1251 /*
1252 * Iterate until the position is found according to boot order, or
1253 * until we reach end of the list.
1254 */
1255 while (!vm_is_boot_list_end(current_vm) &&
1256 current_vm->boot_order <= vm->boot_order) {
1257 current_vm = vm_get_next_boot(current_vm);
1258 }
1259
1260 current_vm->boot_order > vm->boot_order
1261 ? list_prepend(&current_vm->boot_list_node, &vm->boot_list_node)
1262 : list_append(&current_vm->boot_list_node, &vm->boot_list_node);
1263}