blob: 75392d92f18966e1c71b7b41b232b9019e2cc2cf [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/vm.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010010
Karl Meakine1430802024-03-06 14:08:11 +000011#include "hf/arch/spinlock.h"
Olivier Deprezd9d409f2023-03-17 11:47:57 +010012#include "hf/arch/vm.h"
13
Andrew Scull18c78fc2018-08-20 12:57:41 +010014#include "hf/api.h"
Daniel Boulbya2f8c662021-11-26 17:52:53 +000015#include "hf/assert.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010016#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010017#include "hf/cpu.h"
J-Alves4ef6e842021-03-18 12:47:01 +000018#include "hf/dlog.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010019#include "hf/ffa.h"
Andrew Scull3c257452019-11-26 13:32:50 +000020#include "hf/layout.h"
Madhukar Pappireddy0e57d3d2023-10-11 15:49:05 -050021#include "hf/plat/iommu.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010022#include "hf/std.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010023
Andrew Scull19503262018-09-20 14:48:39 +010024#include "vmapi/hf/call.h"
25
26static struct vm vms[MAX_VMS];
Olivier Deprez96a2a262020-06-11 17:21:38 +020027static struct vm other_world;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010028static ffa_vm_count_t vm_count;
Andrew Scull19503262018-09-20 14:48:39 +010029
J-Alvesfe23ebe2021-10-13 16:07:07 +010030/**
31 * Counters on the status of notifications in the system. It helps to improve
32 * the information retrieved by the receiver scheduler.
33 */
34static struct {
35 /** Counts notifications pending. */
36 uint32_t pending_count;
37 /**
38 * Counts notifications pending, that have been retrieved by the
39 * receiver scheduler.
40 */
41 uint32_t info_get_retrieved_count;
42 struct spinlock lock;
43} all_notifications_state;
44
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -080045static bool vm_init_mm(struct vm *vm, struct mpool *ppool)
46{
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -060047 return arch_vm_init_mm(vm, ppool) && arch_vm_iommu_init_mm(vm, ppool);
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -080048}
49
J-Alves19e20cf2023-08-02 12:48:55 +010050struct vm *vm_init(ffa_id_t id, ffa_vcpu_count_t vcpu_count,
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -060051 struct mpool *ppool, bool el0_partition,
52 uint8_t dma_device_count)
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010053{
Wedson Almeida Filho87009642018-07-02 10:20:07 +010054 uint32_t i;
Andrew Scull19503262018-09-20 14:48:39 +010055 struct vm *vm;
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -070056 size_t vcpu_ppool_entries = (align_up(sizeof(struct vcpu) * vcpu_count,
57 MM_PPOOL_ENTRY_SIZE) /
58 MM_PPOOL_ENTRY_SIZE);
Andrew Scull19503262018-09-20 14:48:39 +010059
Olivier Deprez96a2a262020-06-11 17:21:38 +020060 if (id == HF_OTHER_WORLD_ID) {
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080061 CHECK(el0_partition == false);
Olivier Deprez96a2a262020-06-11 17:21:38 +020062 vm = &other_world;
Andrew Walbran9daa57e2019-09-27 13:33:20 +010063 } else {
64 uint16_t vm_index = id - HF_VM_ID_OFFSET;
Andrew Scull19503262018-09-20 14:48:39 +010065
Andrew Walbran9daa57e2019-09-27 13:33:20 +010066 CHECK(id >= HF_VM_ID_OFFSET);
67 CHECK(vm_index < ARRAY_SIZE(vms));
68 vm = &vms[vm_index];
69 }
Wedson Almeida Filho87009642018-07-02 10:20:07 +010070
Andrew Scull2b5fbad2019-04-05 13:55:56 +010071 memset_s(vm, sizeof(*vm), 0, sizeof(*vm));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010072
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000073 sl_init(&vm->lock);
74
Andrew Walbran9daa57e2019-09-27 13:33:20 +010075 vm->id = id;
Wedson Almeida Filho87009642018-07-02 10:20:07 +010076 vm->vcpu_count = vcpu_count;
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -070077
78 vm->vcpus = (struct vcpu *)mpool_alloc_contiguous(
79 ppool, vcpu_ppool_entries, 1);
80 CHECK(vm->vcpus != NULL);
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -070081
Andrew Sculld6ee1102019-04-05 22:12:42 +010082 vm->mailbox.state = MAILBOX_STATE_EMPTY;
Andrew Scull9726c252019-01-23 13:44:19 +000083 atomic_init(&vm->aborting, false);
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080084 vm->el0_partition = el0_partition;
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -060085 vm->dma_device_count = dma_device_count;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010086
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -080087 if (!vm_init_mm(vm, ppool)) {
Andrew Walbran9daa57e2019-09-27 13:33:20 +010088 return NULL;
Wedson Almeida Filho03306112018-11-26 00:08:03 +000089 }
90
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000091 /* Do basic initialization of vCPUs. */
Andrew Scull7364a8e2018-07-19 15:39:29 +010092 for (i = 0; i < vcpu_count; i++) {
Andrew Walbrane1310df2019-04-29 17:28:28 +010093 vcpu_init(vm_get_vcpu(vm, i), vm);
Andrew Scull7364a8e2018-07-19 15:39:29 +010094 }
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010095
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -070096 vm_notifications_init(vm, vcpu_count, ppool);
Andrew Walbran9daa57e2019-09-27 13:33:20 +010097 return vm;
98}
99
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100100bool vm_init_next(ffa_vcpu_count_t vcpu_count, struct mpool *ppool,
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -0600101 struct vm **new_vm, bool el0_partition,
102 uint8_t dma_device_count)
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100103{
104 if (vm_count >= MAX_VMS) {
105 return false;
106 }
107
108 /* Generate IDs based on an offset, as low IDs e.g., 0, are reserved */
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800109 *new_vm = vm_init(vm_count + HF_VM_ID_OFFSET, vcpu_count, ppool,
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -0600110 el0_partition, dma_device_count);
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100111 if (*new_vm == NULL) {
112 return false;
113 }
Andrew Scull19503262018-09-20 14:48:39 +0100114 ++vm_count;
Andrew Scull19503262018-09-20 14:48:39 +0100115
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000116 return true;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100117}
118
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100119ffa_vm_count_t vm_get_count(void)
Andrew Scull19503262018-09-20 14:48:39 +0100120{
121 return vm_count;
122}
123
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100124/**
125 * Returns a pointer to the VM with the corresponding id.
126 */
J-Alves19e20cf2023-08-02 12:48:55 +0100127struct vm *vm_find(ffa_id_t id)
Andrew Scull19503262018-09-20 14:48:39 +0100128{
David Brazdilbc501192019-09-27 13:20:56 +0100129 uint16_t index;
Fuad Tabba494376e2019-08-05 12:35:10 +0100130
Olivier Deprez96a2a262020-06-11 17:21:38 +0200131 if (id == HF_OTHER_WORLD_ID) {
132 if (other_world.id == HF_OTHER_WORLD_ID) {
133 return &other_world;
134 }
Andrew Scull19503262018-09-20 14:48:39 +0100135 return NULL;
136 }
137
Olivier Deprez96a2a262020-06-11 17:21:38 +0200138 /* Check that this is not a reserved ID. */
139 if (id < HF_VM_ID_OFFSET) {
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100140 return NULL;
141 }
142
David Brazdilbc501192019-09-27 13:20:56 +0100143 index = id - HF_VM_ID_OFFSET;
144
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100145 return vm_find_index(index);
146}
147
148/**
J-Alves46ee0682021-07-26 15:17:53 +0100149 * Returns a locked instance of the VM with the corresponding id.
150 */
J-Alves19e20cf2023-08-02 12:48:55 +0100151struct vm_locked vm_find_locked(ffa_id_t id)
J-Alves46ee0682021-07-26 15:17:53 +0100152{
153 struct vm *vm = vm_find(id);
154
155 if (vm != NULL) {
156 return vm_lock(vm);
157 }
158
159 return (struct vm_locked){.vm = NULL};
160}
161
162/**
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100163 * Returns a pointer to the VM at the specified index.
164 */
165struct vm *vm_find_index(uint16_t index)
166{
David Brazdilbc501192019-09-27 13:20:56 +0100167 /* Ensure the VM is initialized. */
168 if (index >= vm_count) {
169 return NULL;
170 }
171
172 return &vms[index];
Andrew Scull19503262018-09-20 14:48:39 +0100173}
174
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000175/**
Fuad Tabbaed294af2019-12-20 10:43:01 +0000176 * Locks the given VM and updates `locked` to hold the newly locked VM.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000177 */
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100178struct vm_locked vm_lock(struct vm *vm)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000179{
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100180 struct vm_locked locked = {
181 .vm = vm,
182 };
183
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000184 sl_lock(&vm->lock);
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100185
186 return locked;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000187}
188
189/**
Jose Marinho75509b42019-04-09 09:34:59 +0100190 * Locks two VMs ensuring that the locking order is according to the locks'
191 * addresses.
192 */
193struct two_vm_locked vm_lock_both(struct vm *vm1, struct vm *vm2)
194{
195 struct two_vm_locked dual_lock;
196
197 sl_lock_both(&vm1->lock, &vm2->lock);
198 dual_lock.vm1.vm = vm1;
199 dual_lock.vm2.vm = vm2;
200
201 return dual_lock;
202}
203
204/**
Karl Meakine1430802024-03-06 14:08:11 +0000205 * Locks two VMs ensuring that the locking order is according to the locks'
206 * addresses, given `vm1` is already locked.
207 */
208struct two_vm_locked vm_lock_both_in_order(struct vm_locked vm1, struct vm *vm2)
209{
210 struct spinlock *sl1 = &vm1.vm->lock;
211 struct spinlock *sl2 = &vm2->lock;
212
213 /*
214 * Use `sl_lock`/`sl_unlock` directly rather than
215 * `vm_lock`/`vm_unlock` because `vm_unlock` sets the vm field
216 * to NULL.
217 */
218 if (sl1 < sl2) {
219 sl_lock(sl2);
220 } else {
221 sl_unlock(sl1);
222 sl_lock(sl2);
223 sl_lock(sl1);
224 }
225
226 return (struct two_vm_locked){
227 .vm1 = vm1,
228 .vm2 = (struct vm_locked){.vm = vm2},
229 };
230}
231
232/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000233 * Unlocks a VM previously locked with vm_lock, and updates `locked` to reflect
234 * the fact that the VM is no longer locked.
235 */
236void vm_unlock(struct vm_locked *locked)
237{
238 sl_unlock(&locked->vm->lock);
239 locked->vm = NULL;
240}
Andrew Walbrane1310df2019-04-29 17:28:28 +0100241
242/**
243 * Get the vCPU with the given index from the given VM.
244 * This assumes the index is valid, i.e. less than vm->vcpu_count.
245 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100246struct vcpu *vm_get_vcpu(struct vm *vm, ffa_vcpu_index_t vcpu_index)
Andrew Walbrane1310df2019-04-29 17:28:28 +0100247{
Andrew Scull877ae4b2019-07-02 12:52:33 +0100248 CHECK(vcpu_index < vm->vcpu_count);
Andrew Walbrane1310df2019-04-29 17:28:28 +0100249 return &vm->vcpus[vcpu_index];
250}
Andrew Scull3c257452019-11-26 13:32:50 +0000251
252/**
J-Alves122f1a12022-12-12 15:55:42 +0000253 * Checks whether the given `to` VM's mailbox is currently busy.
254 */
255bool vm_is_mailbox_busy(struct vm_locked to)
256{
257 return to.vm->mailbox.state != MAILBOX_STATE_EMPTY ||
258 to.vm->mailbox.recv == NULL;
259}
260
261/**
J-Alvese8c8c2b2022-12-16 15:34:48 +0000262 * Checks if mailbox is currently owned by the other world.
263 */
264bool vm_is_mailbox_other_world_owned(struct vm_locked to)
265{
266 return to.vm->mailbox.state == MAILBOX_STATE_OTHER_WORLD_OWNED;
267}
268
269/**
Andrew Walbran45633dd2020-10-07 17:59:54 +0100270 * Return whether the given VM ID represents an entity in the current world:
271 * i.e. the hypervisor or a normal world VM when running in the normal world, or
272 * the SPM or an SP when running in the secure world.
273 */
J-Alves19e20cf2023-08-02 12:48:55 +0100274bool vm_id_is_current_world(ffa_id_t vm_id)
Andrew Walbran45633dd2020-10-07 17:59:54 +0100275{
276 return (vm_id & HF_VM_ID_WORLD_MASK) !=
277 (HF_OTHER_WORLD_ID & HF_VM_ID_WORLD_MASK);
278}
279
280/**
Andrew Scull3c257452019-11-26 13:32:50 +0000281 * Map a range of addresses to the VM in both the MMU and the IOMMU.
282 *
283 * mm_vm_defrag should always be called after a series of page table updates,
284 * whether they succeed or fail. This is because on failure extra page table
285 * entries may have been allocated and then not used, while on success it may be
286 * possible to compact the page table by merging several entries into a block.
287 *
288 * Returns true on success, or false if the update failed and no changes were
289 * made.
290 *
291 */
292bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
293 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
294{
295 if (!vm_identity_prepare(vm_locked, begin, end, mode, ppool)) {
296 return false;
297 }
298
299 vm_identity_commit(vm_locked, begin, end, mode, ppool, ipa);
300
301 return true;
302}
303
304/**
305 * Prepares the given VM for the given address mapping such that it will be able
306 * to commit the change without failure.
307 *
308 * In particular, multiple calls to this function will result in the
309 * corresponding calls to commit the changes to succeed.
310 *
311 * Returns true on success, or false if the update failed and no changes were
312 * made.
313 */
314bool vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
315 uint32_t mode, struct mpool *ppool)
316{
Olivier Deprezd9d409f2023-03-17 11:47:57 +0100317 return arch_vm_identity_prepare(vm_locked, begin, end, mode, ppool);
Andrew Scull3c257452019-11-26 13:32:50 +0000318}
319
320/**
321 * Commits the given address mapping to the VM assuming the operation cannot
322 * fail. `vm_identity_prepare` must used correctly before this to ensure
323 * this condition.
324 */
325void vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
326 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
327{
Olivier Deprezd9d409f2023-03-17 11:47:57 +0100328 arch_vm_identity_commit(vm_locked, begin, end, mode, ppool, ipa);
Andrew Scull3c257452019-11-26 13:32:50 +0000329}
330
331/**
332 * Unmap a range of addresses from the VM.
333 *
334 * Returns true on success, or false if the update failed and no changes were
335 * made.
336 */
337bool vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
338 struct mpool *ppool)
339{
Olivier Deprezd9d409f2023-03-17 11:47:57 +0100340 return arch_vm_unmap(vm_locked, begin, end, ppool);
Andrew Scull3c257452019-11-26 13:32:50 +0000341}
342
343/**
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700344 * Defrag page tables for an EL0 partition or for a VM.
345 */
346void vm_ptable_defrag(struct vm_locked vm_locked, struct mpool *ppool)
347{
Olivier Deprezd9d409f2023-03-17 11:47:57 +0100348 arch_vm_ptable_defrag(vm_locked, ppool);
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700349}
350
351/**
Andrew Scull3c257452019-11-26 13:32:50 +0000352 * Unmaps the hypervisor pages from the given page table.
353 */
354bool vm_unmap_hypervisor(struct vm_locked vm_locked, struct mpool *ppool)
355{
356 /* TODO: If we add pages dynamically, they must be included here too. */
357 return vm_unmap(vm_locked, layout_text_begin(), layout_text_end(),
358 ppool) &&
359 vm_unmap(vm_locked, layout_rodata_begin(), layout_rodata_end(),
360 ppool) &&
361 vm_unmap(vm_locked, layout_data_begin(), layout_data_end(),
Maksims Svecovs134b8f92022-03-04 15:14:09 +0000362 ppool) &&
363 vm_unmap(vm_locked, layout_stacks_begin(), layout_stacks_end(),
Andrew Scull3c257452019-11-26 13:32:50 +0000364 ppool);
365}
J-Alvesb37fd082020-10-22 12:29:21 +0100366
367/**
Raghu Krishnamurthyea195fa2021-02-12 23:29:00 -0800368 * Gets the mode of the given range of ipa or va if they are mapped with the
369 * same mode.
370 *
371 * Returns true if the range is mapped with the same mode and false otherwise.
372 * The wrapper calls the appropriate mm function depending on if the partition
373 * is a vm or a el0 partition.
374 */
375bool vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin, ipaddr_t end,
376 uint32_t *mode)
377{
Olivier Deprezd9d409f2023-03-17 11:47:57 +0100378 return arch_vm_mem_get_mode(vm_locked, begin, end, mode);
Raghu Krishnamurthyea195fa2021-02-12 23:29:00 -0800379}
J-Alvesa0f317d2021-06-09 13:31:59 +0100380
Madhukar Pappireddy0e57d3d2023-10-11 15:49:05 -0500381bool vm_iommu_mm_identity_map(struct vm_locked vm_locked, paddr_t begin,
382 paddr_t end, uint32_t mode, struct mpool *ppool,
383 ipaddr_t *ipa, uint8_t dma_device_id)
384{
385 return arch_vm_iommu_mm_identity_map(vm_locked, begin, end, mode, ppool,
386 ipa, dma_device_id);
387}
388
J-Alves66652252022-07-06 09:49:51 +0100389bool vm_mailbox_state_busy(struct vm_locked vm_locked)
390{
391 return vm_locked.vm->mailbox.state != MAILBOX_STATE_EMPTY ||
392 vm_locked.vm->mailbox.recv == NULL;
393}
394
J-Alves7461ef22021-10-18 17:21:33 +0100395static struct notifications *vm_get_notifications(struct vm_locked vm_locked,
396 bool is_from_vm)
397{
398 return is_from_vm ? &vm_locked.vm->notifications.from_vm
399 : &vm_locked.vm->notifications.from_sp;
400}
401
J-Alvesa0f317d2021-06-09 13:31:59 +0100402/*
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700403 * Dynamically allocate per_vcpu_notifications structure for a given VM.
404 */
405static void vm_notifications_init_per_vcpu_notifications(
406 struct vm *vm, ffa_vcpu_count_t vcpu_count, struct mpool *ppool)
407{
408 size_t notif_ppool_entries =
409 (align_up(sizeof(struct notifications_state) * vcpu_count,
410 MM_PPOOL_ENTRY_SIZE) /
411 MM_PPOOL_ENTRY_SIZE);
412
413 /*
414 * Allow for function to be called on already initialized VMs but those
415 * that require notification structure to be cleared.
416 */
417 if (vm->notifications.from_sp.per_vcpu == NULL) {
418 assert(vm->notifications.from_vm.per_vcpu == NULL);
419 assert(vcpu_count != 0);
420 CHECK(ppool != NULL);
421 vm->notifications.from_sp.per_vcpu =
422 (struct notifications_state *)mpool_alloc_contiguous(
423 ppool, notif_ppool_entries, 1);
424 CHECK(vm->notifications.from_sp.per_vcpu != NULL);
425
426 vm->notifications.from_vm.per_vcpu =
427 (struct notifications_state *)mpool_alloc_contiguous(
428 ppool, notif_ppool_entries, 1);
429 CHECK(vm->notifications.from_vm.per_vcpu != NULL);
430 } else {
431 assert(vm->notifications.from_vm.per_vcpu != NULL);
432 }
433
434 memset_s(vm->notifications.from_sp.per_vcpu,
435 sizeof(*(vm->notifications.from_sp.per_vcpu)) * vcpu_count, 0,
436 sizeof(*(vm->notifications.from_sp.per_vcpu)) * vcpu_count);
437 memset_s(vm->notifications.from_vm.per_vcpu,
438 sizeof(*(vm->notifications.from_vm.per_vcpu)) * vcpu_count, 0,
439 sizeof(*(vm->notifications.from_vm.per_vcpu)) * vcpu_count);
440}
441
442/*
J-Alvesa0f317d2021-06-09 13:31:59 +0100443 * Initializes the notifications structure.
444 */
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700445static void vm_notifications_init_bindings(struct notifications *notifications)
J-Alvesa0f317d2021-06-09 13:31:59 +0100446{
447 for (uint32_t i = 0U; i < MAX_FFA_NOTIFICATIONS; i++) {
448 notifications->bindings_sender_id[i] = HF_INVALID_VM_ID;
449 }
450}
451
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700452/*
453 * Initialize notification related structures for a VM.
454 */
455void vm_notifications_init(struct vm *vm, ffa_vcpu_count_t vcpu_count,
456 struct mpool *ppool)
457{
458 vm_notifications_init_per_vcpu_notifications(vm, vcpu_count, ppool);
459
460 /* Basic initialization of the notifications structure. */
461 vm_notifications_init_bindings(&vm->notifications.from_sp);
462 vm_notifications_init_bindings(&vm->notifications.from_vm);
463}
464
J-Alvesa0f317d2021-06-09 13:31:59 +0100465/**
466 * Checks if there are pending notifications.
467 */
468bool vm_are_notifications_pending(struct vm_locked vm_locked, bool from_vm,
469 ffa_notifications_bitmap_t notifications)
470{
471 struct notifications *to_check;
472
473 CHECK(vm_locked.vm != NULL);
474
J-Alves7461ef22021-10-18 17:21:33 +0100475 to_check = vm_get_notifications(vm_locked, from_vm);
J-Alvesa0f317d2021-06-09 13:31:59 +0100476
477 /* Check if there are pending per vcpu notifications */
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700478 for (uint32_t i = 0U; i < vm_locked.vm->vcpu_count; i++) {
J-Alvesa0f317d2021-06-09 13:31:59 +0100479 if ((to_check->per_vcpu[i].pending & notifications) != 0U) {
480 return true;
481 }
482 }
483
484 /* Check if there are global pending notifications */
485 return (to_check->global.pending & notifications) != 0U;
486}
J-Alvesc003a7a2021-03-18 13:06:53 +0000487
J-Alves7461ef22021-10-18 17:21:33 +0100488/**
489 * Checks if there are pending global notifications, either from SPs or from
490 * VMs.
491 */
492bool vm_are_global_notifications_pending(struct vm_locked vm_locked)
493{
494 return vm_get_notifications(vm_locked, true)->global.pending != 0ULL ||
J-Alves52578f82022-03-25 12:30:47 +0000495 vm_get_notifications(vm_locked, false)->global.pending != 0ULL ||
J-Alvese8c8c2b2022-12-16 15:34:48 +0000496 vm_are_fwk_notifications_pending(vm_locked);
497}
498
499/**
500 * Currently only RX full notification is supported as framework notification.
501 * Returns true if there is one pending, either from Hypervisor or SPMC.
502 */
503bool vm_are_fwk_notifications_pending(struct vm_locked vm_locked)
504{
505 return vm_locked.vm->notifications.framework.pending != 0ULL;
J-Alves7461ef22021-10-18 17:21:33 +0100506}
507
508/**
509 * Checks if there are pending per-vCPU notifications, in a specific vCPU either
510 * from SPs or from VMs.
511 */
512bool vm_are_per_vcpu_notifications_pending(struct vm_locked vm_locked,
513 ffa_vcpu_index_t vcpu_id)
514{
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700515 CHECK(vcpu_id < vm_locked.vm->vcpu_count);
J-Alves7461ef22021-10-18 17:21:33 +0100516
517 return vm_get_notifications(vm_locked, true)
518 ->per_vcpu[vcpu_id]
519 .pending != 0ULL ||
520 vm_get_notifications(vm_locked, false)
521 ->per_vcpu[vcpu_id]
522 .pending != 0ULL;
523}
524
J-Alves09ff9d82021-11-02 11:55:20 +0000525bool vm_are_notifications_enabled(struct vm *vm)
J-Alvesc003a7a2021-03-18 13:06:53 +0000526{
J-Alves09ff9d82021-11-02 11:55:20 +0000527 return vm->notifications.enabled == true;
528}
529
530bool vm_locked_are_notifications_enabled(struct vm_locked vm_locked)
531{
532 return vm_are_notifications_enabled(vm_locked.vm);
J-Alvesc003a7a2021-03-18 13:06:53 +0000533}
534
535static bool vm_is_notification_bit_set(ffa_notifications_bitmap_t notifications,
536 uint32_t i)
537{
538 return (notifications & FFA_NOTIFICATION_MASK(i)) != 0U;
539}
540
J-Alvesfe23ebe2021-10-13 16:07:07 +0100541static void vm_notifications_global_state_count_update(
542 ffa_notifications_bitmap_t bitmap, uint32_t *counter, int inc)
543{
544 /*
545 * Helper to increment counters from global notifications
546 * state. Count update by increments or decrements of 1 or -1,
547 * respectively.
548 */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000549 assert(inc == 1 || inc == -1);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100550
551 sl_lock(&all_notifications_state.lock);
552
553 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
554 if (vm_is_notification_bit_set(bitmap, i)) {
555 CHECK((inc > 0 && *counter < UINT32_MAX) ||
556 (inc < 0 && *counter > 0));
557 *counter += inc;
558 }
559 }
560
561 sl_unlock(&all_notifications_state.lock);
562}
563
564/**
565 * Helper function to increment the pending notifications based on a bitmap
566 * passed as argument.
567 * Function to be used at setting notifications for a given VM.
568 */
569static void vm_notifications_pending_count_add(
570 ffa_notifications_bitmap_t to_add)
571{
572 vm_notifications_global_state_count_update(
573 to_add, &all_notifications_state.pending_count, 1);
574}
575
576/**
577 * Helper function to decrement the pending notifications count.
578 * Function to be used when getting the receiver's pending notifications.
579 */
580static void vm_notifications_pending_count_sub(
581 ffa_notifications_bitmap_t to_sub)
582{
583 vm_notifications_global_state_count_update(
584 to_sub, &all_notifications_state.pending_count, -1);
585}
586
587/**
588 * Helper function to count the notifications whose information has been
589 * retrieved by the scheduler of the system, and are still pending.
590 */
591static void vm_notifications_info_get_retrieved_count_add(
592 ffa_notifications_bitmap_t to_add)
593{
594 vm_notifications_global_state_count_update(
595 to_add, &all_notifications_state.info_get_retrieved_count, 1);
596}
597
598/**
599 * Helper function to subtract the notifications that the receiver is getting
600 * and whose information has been retrieved by the receiver scheduler.
601 */
602static void vm_notifications_info_get_retrieved_count_sub(
603 ffa_notifications_bitmap_t to_sub)
604{
605 vm_notifications_global_state_count_update(
606 to_sub, &all_notifications_state.info_get_retrieved_count, -1);
607}
608
609/**
610 * Helper function to determine if there are notifications pending whose info
611 * hasn't been retrieved by the receiver scheduler.
612 */
613bool vm_notifications_pending_not_retrieved_by_scheduler(void)
614{
615 bool ret;
616
617 sl_lock(&all_notifications_state.lock);
618 ret = all_notifications_state.pending_count >
619 all_notifications_state.info_get_retrieved_count;
620 sl_unlock(&all_notifications_state.lock);
621
622 return ret;
623}
624
625bool vm_is_notifications_pending_count_zero(void)
626{
627 bool ret;
628
629 sl_lock(&all_notifications_state.lock);
630 ret = all_notifications_state.pending_count == 0;
631 sl_unlock(&all_notifications_state.lock);
632
633 return ret;
634}
635
J-Alvesc003a7a2021-03-18 13:06:53 +0000636/**
637 * Checks that all provided notifications are bound to the specified sender, and
638 * are per VCPU or global, as specified.
639 */
640bool vm_notifications_validate_binding(struct vm_locked vm_locked,
J-Alves19e20cf2023-08-02 12:48:55 +0100641 bool is_from_vm, ffa_id_t sender_id,
J-Alvesc003a7a2021-03-18 13:06:53 +0000642 ffa_notifications_bitmap_t notifications,
643 bool is_per_vcpu)
644{
645 return vm_notifications_validate_bound_sender(
646 vm_locked, is_from_vm, sender_id, notifications) &&
647 vm_notifications_validate_per_vcpu(vm_locked, is_from_vm,
648 is_per_vcpu, notifications);
649}
650
651/**
652 * Update binds information in notification structure for the specified
653 * notifications.
654 */
655void vm_notifications_update_bindings(struct vm_locked vm_locked,
J-Alves19e20cf2023-08-02 12:48:55 +0100656 bool is_from_vm, ffa_id_t sender_id,
J-Alvesc003a7a2021-03-18 13:06:53 +0000657 ffa_notifications_bitmap_t notifications,
658 bool is_per_vcpu)
659{
660 CHECK(vm_locked.vm != NULL);
661 struct notifications *to_update =
662 vm_get_notifications(vm_locked, is_from_vm);
663
664 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
665 if (vm_is_notification_bit_set(notifications, i)) {
666 to_update->bindings_sender_id[i] = sender_id;
667 }
668 }
669
670 /*
671 * Set notifications if they are per VCPU, else clear them as they are
672 * global.
673 */
674 if (is_per_vcpu) {
675 to_update->bindings_per_vcpu |= notifications;
676 } else {
677 to_update->bindings_per_vcpu &= ~notifications;
678 }
679}
680
681bool vm_notifications_validate_bound_sender(
J-Alves19e20cf2023-08-02 12:48:55 +0100682 struct vm_locked vm_locked, bool is_from_vm, ffa_id_t sender_id,
J-Alvesc003a7a2021-03-18 13:06:53 +0000683 ffa_notifications_bitmap_t notifications)
684{
685 CHECK(vm_locked.vm != NULL);
686 struct notifications *to_check =
687 vm_get_notifications(vm_locked, is_from_vm);
688
689 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
690 if (vm_is_notification_bit_set(notifications, i) &&
691 to_check->bindings_sender_id[i] != sender_id) {
692 return false;
693 }
694 }
695
696 return true;
697}
698
699bool vm_notifications_validate_per_vcpu(struct vm_locked vm_locked,
700 bool is_from_vm, bool is_per_vcpu,
701 ffa_notifications_bitmap_t notif)
702{
703 CHECK(vm_locked.vm != NULL);
704 struct notifications *to_check =
705 vm_get_notifications(vm_locked, is_from_vm);
706
707 return is_per_vcpu ? (~to_check->bindings_per_vcpu & notif) == 0U
708 : (to_check->bindings_per_vcpu & notif) == 0U;
709}
J-Alvesaa79c012021-07-09 14:29:45 +0100710
J-Alves14163a72022-03-25 14:01:34 +0000711static void vm_notifications_state_set(struct notifications_state *state,
712 ffa_notifications_bitmap_t notifications)
713{
714 state->pending |= notifications;
715 vm_notifications_pending_count_add(notifications);
716}
717
J-Alves5a16c962022-03-25 12:32:51 +0000718void vm_notifications_partition_set_pending(
719 struct vm_locked vm_locked, bool is_from_vm,
720 ffa_notifications_bitmap_t notifications, ffa_vcpu_index_t vcpu_id,
721 bool is_per_vcpu)
J-Alvesaa79c012021-07-09 14:29:45 +0100722{
J-Alves14163a72022-03-25 14:01:34 +0000723 struct notifications *to_set;
724 struct notifications_state *state;
725
J-Alvesaa79c012021-07-09 14:29:45 +0100726 CHECK(vm_locked.vm != NULL);
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700727 CHECK(vcpu_id < vm_locked.vm->vcpu_count);
J-Alvesaa79c012021-07-09 14:29:45 +0100728
J-Alves14163a72022-03-25 14:01:34 +0000729 to_set = vm_get_notifications(vm_locked, is_from_vm);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100730
J-Alves14163a72022-03-25 14:01:34 +0000731 state = is_per_vcpu ? &to_set->per_vcpu[vcpu_id] : &to_set->global;
732
733 vm_notifications_state_set(state, notifications);
734}
735
736/**
737 * Set pending framework notifications.
738 */
739void vm_notifications_framework_set_pending(
740 struct vm_locked vm_locked, ffa_notifications_bitmap_t notifications)
741{
742 CHECK(vm_locked.vm != NULL);
Federico Recanatie73d2832022-04-20 11:10:52 +0200743 assert(is_ffa_spm_buffer_full_notification(notifications) ||
744 is_ffa_hyp_buffer_full_notification(notifications));
J-Alves14163a72022-03-25 14:01:34 +0000745 vm_notifications_state_set(&vm_locked.vm->notifications.framework,
746 notifications);
J-Alvesaa79c012021-07-09 14:29:45 +0100747}
748
J-Alves5136dda2022-03-25 12:26:38 +0000749static ffa_notifications_bitmap_t vm_notifications_state_get_pending(
750 struct notifications_state *state)
J-Alvesaa79c012021-07-09 14:29:45 +0100751{
J-Alves5136dda2022-03-25 12:26:38 +0000752 ffa_notifications_bitmap_t to_ret;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100753 ffa_notifications_bitmap_t pending_and_info_get_retrieved;
J-Alvesaa79c012021-07-09 14:29:45 +0100754
J-Alves5136dda2022-03-25 12:26:38 +0000755 assert(state != NULL);
J-Alvesaa79c012021-07-09 14:29:45 +0100756
J-Alves5136dda2022-03-25 12:26:38 +0000757 to_ret = state->pending;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100758
759 /* Update count of currently pending notifications in the system. */
J-Alves5136dda2022-03-25 12:26:38 +0000760 vm_notifications_pending_count_sub(state->pending);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100761
762 /*
763 * If notifications receiver is getting have been retrieved by the
764 * receiver scheduler, decrement those from respective count.
765 */
766 pending_and_info_get_retrieved =
J-Alves5136dda2022-03-25 12:26:38 +0000767 state->pending & state->info_get_retrieved;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100768
769 if (pending_and_info_get_retrieved != 0) {
770 vm_notifications_info_get_retrieved_count_sub(
771 pending_and_info_get_retrieved);
772 }
773
J-Alves5136dda2022-03-25 12:26:38 +0000774 state->pending = 0U;
775 state->info_get_retrieved = 0U;
J-Alvesaa79c012021-07-09 14:29:45 +0100776
J-Alves5136dda2022-03-25 12:26:38 +0000777 return to_ret;
778}
J-Alvesfe23ebe2021-10-13 16:07:07 +0100779
J-Alves5136dda2022-03-25 12:26:38 +0000780/**
781 * Get global and per-vCPU notifications for the given vCPU ID.
782 */
783ffa_notifications_bitmap_t vm_notifications_partition_get_pending(
784 struct vm_locked vm_locked, bool is_from_vm, ffa_vcpu_index_t vcpu_id)
785{
786 ffa_notifications_bitmap_t to_ret;
787 struct notifications *to_get;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100788
J-Alves5136dda2022-03-25 12:26:38 +0000789 assert(vm_locked.vm != NULL);
790 to_get = vm_get_notifications(vm_locked, is_from_vm);
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700791 assert(vcpu_id < vm_locked.vm->vcpu_count);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100792
J-Alves5136dda2022-03-25 12:26:38 +0000793 to_ret = vm_notifications_state_get_pending(&to_get->global);
794 to_ret |=
795 vm_notifications_state_get_pending(&to_get->per_vcpu[vcpu_id]);
J-Alvesaa79c012021-07-09 14:29:45 +0100796
797 return to_ret;
798}
J-Alvesc8e8a222021-06-08 17:33:52 +0100799
800/**
J-Alves663682a2022-03-25 13:56:51 +0000801 * Get pending framework notifications.
802 */
803ffa_notifications_bitmap_t vm_notifications_framework_get_pending(
804 struct vm_locked vm_locked)
805{
Federico Recanati6c1e05c2022-04-20 11:37:26 +0200806 struct vm *vm = vm_locked.vm;
807 ffa_notifications_bitmap_t framework;
Federico Recanati6c1e05c2022-04-20 11:37:26 +0200808
809 assert(vm != NULL);
810
811 framework = vm_notifications_state_get_pending(
812 &vm->notifications.framework);
813
Federico Recanati6c1e05c2022-04-20 11:37:26 +0200814 return framework;
J-Alves663682a2022-03-25 13:56:51 +0000815}
816
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100817static bool vm_insert_notification_info_list(
818 ffa_id_t vm_id, bool is_per_vcpu, ffa_vcpu_index_t vcpu_id,
819 uint16_t *ids, uint32_t *ids_count, uint32_t *lists_sizes,
820 uint32_t *lists_count, const uint32_t ids_max_count,
J-Alves17c9b6d2022-03-25 14:39:05 +0000821 enum notifications_info_get_state *info_get_state)
822{
J-Alves17c9b6d2022-03-25 14:39:05 +0000823 CHECK(*ids_count <= ids_max_count);
824 CHECK(*lists_count <= ids_max_count);
825
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100826 if (*info_get_state == FULL || *ids_count == ids_max_count) {
J-Alves17c9b6d2022-03-25 14:39:05 +0000827 *info_get_state = FULL;
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100828 return false;
J-Alves17c9b6d2022-03-25 14:39:05 +0000829 }
830
831 switch (*info_get_state) {
832 case INIT:
833 case STARTING_NEW:
834 /*
835 * At this iteration two ids are to be added: the VM ID
836 * and vCPU ID. If there is no space, change state and
837 * terminate function.
838 */
839 if (is_per_vcpu && ids_max_count - *ids_count < 2) {
840 *info_get_state = FULL;
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100841 return false;
J-Alves17c9b6d2022-03-25 14:39:05 +0000842 }
843
844 *info_get_state = INSERTING;
845 ids[*ids_count] = vm_id;
846 ++(*ids_count);
847
848 if (is_per_vcpu) {
849 /* Insert vCPU ID. */
850 ids[*ids_count] = vcpu_id;
851 ++(*ids_count);
852 ++lists_sizes[*lists_count];
853 }
854
855 ++(*lists_count);
856 break;
857 case INSERTING:
858 /* For per-vCPU notifications only. */
859 if (!is_per_vcpu) {
860 break;
861 }
862
863 /* Insert vCPU ID */
864 ids[*ids_count] = vcpu_id;
865 (*ids_count)++;
866 /* Increment respective list size */
867 ++lists_sizes[*lists_count - 1];
868
869 if (lists_sizes[*lists_count - 1] == 3) {
870 *info_get_state = STARTING_NEW;
871 }
872 break;
873 default:
874 panic("Notification info get action error!!\n");
875 }
876
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100877 return true;
878}
879
880/**
881 * Check if the notification is pending and hasn't being retrieved.
882 * If so attempt to add it to the notification info list.
883 * Returns true if successfully added to the list.
884 */
885static bool vm_notifications_state_info_get(
886 struct notifications_state *state, ffa_id_t vm_id, bool is_per_vcpu,
887 ffa_vcpu_index_t vcpu_id, uint16_t *ids, uint32_t *ids_count,
888 uint32_t *lists_sizes, uint32_t *lists_count,
889 const uint32_t ids_max_count,
890 enum notifications_info_get_state *info_get_state)
891{
892 ffa_notifications_bitmap_t pending_not_retrieved;
893
894 pending_not_retrieved = state->pending & ~state->info_get_retrieved;
895
896 /* No notifications pending that haven't been retrieved. */
897 if (pending_not_retrieved == 0U) {
898 return false;
899 }
900
901 if (!vm_insert_notification_info_list(
902 vm_id, is_per_vcpu, vcpu_id, ids, ids_count, lists_sizes,
903 lists_count, ids_max_count, info_get_state)) {
904 return false;
905 }
906
J-Alves17c9b6d2022-03-25 14:39:05 +0000907 state->info_get_retrieved |= pending_not_retrieved;
908
909 vm_notifications_info_get_retrieved_count_add(pending_not_retrieved);
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100910
911 return true;
912}
913
914/**
915 * Check if the vcpu has a pending IPI that hasn't been retrieved.
916 * If so try add it to the notification info list.
917 * Returns true if successfully added to the list.
918 */
919static bool vm_ipi_state_info_get(
920 struct vcpu *vcpu, ffa_id_t vm_id, ffa_vcpu_index_t vcpu_id,
921 uint16_t *ids, uint32_t *ids_count, uint32_t *lists_sizes,
922 uint32_t *lists_count, const uint32_t ids_max_count,
923 enum notifications_info_get_state *info_get_state, bool per_vcpu_added)
924{
925 bool ret = true;
926 bool pending_not_retrieved;
927 struct vcpu_locked vcpu_locked = vcpu_lock(vcpu);
928 struct interrupts *interrupts = &vcpu_locked.vcpu->interrupts;
929
930 pending_not_retrieved =
931 vcpu_is_virt_interrupt_pending(interrupts, HF_IPI_INTID) &&
932 !vcpu_ipi_is_info_get_retrieved(vcpu_locked);
933
934 /* No notifications pending that haven't been retrieved. */
935 if (!pending_not_retrieved) {
936 ret = false;
937 goto out;
938 }
939
940 /*
941 * If the per vCPU notification was added to the list we do not need
942 * to add it again for the IPI.
943 */
944 if (!per_vcpu_added &&
945 !vm_insert_notification_info_list(
946 vm_id, true, vcpu_id, ids, ids_count, lists_sizes,
947 lists_count, ids_max_count, info_get_state)) {
948 ret = false;
949 goto out;
950 }
951
952 vcpu_ipi_set_info_get_retrieved(vcpu_locked);
953
954out:
955 vcpu_unlock(&vcpu_locked);
956
957 return ret;
J-Alves17c9b6d2022-03-25 14:39:05 +0000958}
959
J-Alves663682a2022-03-25 13:56:51 +0000960/**
J-Alvesc8e8a222021-06-08 17:33:52 +0100961 * Get pending notification's information to return to the receiver scheduler.
962 */
963void vm_notifications_info_get_pending(
964 struct vm_locked vm_locked, bool is_from_vm, uint16_t *ids,
965 uint32_t *ids_count, uint32_t *lists_sizes, uint32_t *lists_count,
966 const uint32_t ids_max_count,
967 enum notifications_info_get_state *info_get_state)
968{
J-Alves17c9b6d2022-03-25 14:39:05 +0000969 struct notifications *notifications;
J-Alvesc8e8a222021-06-08 17:33:52 +0100970
971 CHECK(vm_locked.vm != NULL);
J-Alvesc8e8a222021-06-08 17:33:52 +0100972
J-Alves17c9b6d2022-03-25 14:39:05 +0000973 notifications = vm_get_notifications(vm_locked, is_from_vm);
J-Alvesc8e8a222021-06-08 17:33:52 +0100974
J-Alves17c9b6d2022-03-25 14:39:05 +0000975 /*
976 * Perform info get for global notifications, before doing it for
977 * per-vCPU.
978 */
979 vm_notifications_state_info_get(&notifications->global,
980 vm_locked.vm->id, false, 0, ids,
981 ids_count, lists_sizes, lists_count,
982 ids_max_count, info_get_state);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100983
J-Alvesc8e8a222021-06-08 17:33:52 +0100984 for (ffa_vcpu_count_t i = 0; i < vm_locked.vm->vcpu_count; i++) {
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100985 struct vcpu *vcpu = vm_get_vcpu(vm_locked.vm, i);
986 bool per_vcpu_added;
987
988 per_vcpu_added = vm_notifications_state_info_get(
J-Alves17c9b6d2022-03-25 14:39:05 +0000989 &notifications->per_vcpu[i], vm_locked.vm->id, true, i,
990 ids, ids_count, lists_sizes, lists_count, ids_max_count,
991 info_get_state);
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100992 /*
993 * IPIs can only be pending for partitions at the
994 * current virtual FF-A instance.
995 */
996 if (vm_id_is_current_world(vm_locked.vm->id)) {
997 vm_ipi_state_info_get(vcpu, vm_locked.vm->id, i, ids,
998 ids_count, lists_sizes,
999 lists_count, ids_max_count,
1000 info_get_state, per_vcpu_added);
1001 }
J-Alvesc8e8a222021-06-08 17:33:52 +01001002 }
1003}
1004
1005/**
1006 * Gets all info from VM's pending notifications.
1007 * Returns true if the list is full, and there is more pending.
1008 */
1009bool vm_notifications_info_get(struct vm_locked vm_locked, uint16_t *ids,
1010 uint32_t *ids_count, uint32_t *lists_sizes,
1011 uint32_t *lists_count,
1012 const uint32_t ids_max_count)
1013{
1014 enum notifications_info_get_state current_state = INIT;
1015
J-Alvesf31940e2022-03-25 17:24:00 +00001016 /* Get info of pending notifications from the framework. */
1017 vm_notifications_state_info_get(&vm_locked.vm->notifications.framework,
1018 vm_locked.vm->id, false, 0, ids,
1019 ids_count, lists_sizes, lists_count,
1020 ids_max_count, &current_state);
1021
1022 /* Get info of pending notifications from SPs. */
J-Alvesc8e8a222021-06-08 17:33:52 +01001023 vm_notifications_info_get_pending(vm_locked, false, ids, ids_count,
1024 lists_sizes, lists_count,
1025 ids_max_count, &current_state);
1026
J-Alvesf31940e2022-03-25 17:24:00 +00001027 /* Get info of pending notifications from VMs. */
J-Alvesc8e8a222021-06-08 17:33:52 +01001028 vm_notifications_info_get_pending(vm_locked, true, ids, ids_count,
1029 lists_sizes, lists_count,
1030 ids_max_count, &current_state);
1031
1032 /*
1033 * State transitions to FULL when trying to insert a new ID in the
1034 * list and there is not more space. This means there are notifications
1035 * pending, whose info is not retrieved.
1036 */
1037 return current_state == FULL;
1038}
J-Alves439ac972021-11-18 17:32:03 +00001039
1040/**
1041 * Checks VM's messaging method support.
1042 */
Kathleen Capellaf71dee42023-08-08 16:24:14 -04001043bool vm_supports_messaging_method(struct vm *vm, uint16_t msg_method)
J-Alves439ac972021-11-18 17:32:03 +00001044{
1045 return (vm->messaging_method & msg_method) != 0;
1046}
J-Alves6e2abc62021-12-02 14:58:56 +00001047
1048void vm_notifications_set_npi_injected(struct vm_locked vm_locked,
1049 bool npi_injected)
1050{
1051 vm_locked.vm->notifications.npi_injected = npi_injected;
1052}
1053
1054bool vm_notifications_is_npi_injected(struct vm_locked vm_locked)
1055{
1056 return vm_locked.vm->notifications.npi_injected;
1057}
J-Alves7e67d102022-04-13 13:22:39 +01001058
1059/**
1060 * Sets the designated GP register that the VM expects to receive the boot
1061 * info's address.
1062 */
1063void vm_set_boot_info_gp_reg(struct vm *vm, struct vcpu *vcpu)
1064{
Olivier Deprezb2808332023-02-02 15:25:40 +01001065 if (vm->boot_info.blob_addr.ipa != 0U) {
J-Alves7e67d102022-04-13 13:22:39 +01001066 arch_regs_set_gp_reg(&vcpu->regs,
1067 ipa_addr(vm->boot_info.blob_addr),
1068 vm->boot_info.gp_register_num);
1069 }
1070}
Madhukar Pappireddy18c6eb72023-08-21 12:16:18 -05001071
1072/**
1073 * Obtain the interrupt descriptor entry of the specified vm corresponding
1074 * to the specific interrupt id.
1075 */
Madhukar Pappireddy3221a442023-07-24 16:10:55 -05001076static struct interrupt_descriptor *vm_find_interrupt_descriptor(
Madhukar Pappireddy18c6eb72023-08-21 12:16:18 -05001077 struct vm_locked vm_locked, uint32_t id)
1078{
1079 for (uint32_t i = 0; i < HF_NUM_INTIDS; i++) {
1080 /* Interrupt descriptors are populated contiguously. */
1081 if (!vm_locked.vm->interrupt_desc[i].valid) {
1082 break;
1083 }
1084
1085 if (vm_locked.vm->interrupt_desc[i].interrupt_id == id) {
1086 /* Interrupt descriptor found. */
1087 return &vm_locked.vm->interrupt_desc[i];
1088 }
1089 }
1090
1091 return NULL;
1092}
1093
1094/**
1095 * Update the target MPIDR corresponding to the specified interrupt id
1096 * belonging to the specified vm.
1097 */
1098struct interrupt_descriptor *vm_interrupt_set_target_mpidr(
1099 struct vm_locked vm_locked, uint32_t id, uint32_t target_mpidr)
1100{
1101 struct interrupt_descriptor *int_desc;
1102
1103 int_desc = vm_find_interrupt_descriptor(vm_locked, id);
1104
1105 if (int_desc != NULL) {
Daniel Boulby18485942024-10-14 16:23:03 +01001106 int_desc->mpidr_valid = true;
1107 int_desc->mpidr = target_mpidr;
Madhukar Pappireddy18c6eb72023-08-21 12:16:18 -05001108 }
1109
1110 return int_desc;
1111}
1112
1113/**
1114 * Update the security state of the specified interrupt id belonging to the
1115 * specified vm.
1116 */
1117struct interrupt_descriptor *vm_interrupt_set_sec_state(
1118 struct vm_locked vm_locked, uint32_t id, uint32_t sec_state)
1119{
1120 struct interrupt_descriptor *int_desc;
1121
1122 int_desc = vm_find_interrupt_descriptor(vm_locked, id);
1123
1124 if (int_desc != NULL) {
Daniel Boulby18485942024-10-14 16:23:03 +01001125 int_desc->sec_state = sec_state;
Madhukar Pappireddy18c6eb72023-08-21 12:16:18 -05001126 }
1127
1128 return int_desc;
1129}
Madhukar Pappireddy938faaf2023-07-31 17:56:55 -05001130
1131/**
1132 * Enable or disable the specified interrupt id belonging to specified vm.
1133 */
1134struct interrupt_descriptor *vm_interrupt_set_enable(struct vm_locked vm_locked,
1135 uint32_t id, bool enable)
1136{
1137 struct interrupt_descriptor *int_desc;
1138
1139 int_desc = vm_find_interrupt_descriptor(vm_locked, id);
1140
1141 if (int_desc != NULL) {
Daniel Boulby18485942024-10-14 16:23:03 +01001142 int_desc->enabled = enable;
Madhukar Pappireddy938faaf2023-07-31 17:56:55 -05001143 }
1144
1145 return int_desc;
1146}