blob: 577aeea6be97c18f5cedb84b2ed6131d27d13e17 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/vm.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010010
Karl Meakine1430802024-03-06 14:08:11 +000011#include "hf/arch/spinlock.h"
Olivier Deprezd9d409f2023-03-17 11:47:57 +010012#include "hf/arch/vm.h"
13
Andrew Scull18c78fc2018-08-20 12:57:41 +010014#include "hf/api.h"
Daniel Boulbya2f8c662021-11-26 17:52:53 +000015#include "hf/assert.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010016#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010017#include "hf/cpu.h"
J-Alves4ef6e842021-03-18 12:47:01 +000018#include "hf/dlog.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010019#include "hf/ffa.h"
Andrew Scull3c257452019-11-26 13:32:50 +000020#include "hf/layout.h"
Madhukar Pappireddy0e57d3d2023-10-11 15:49:05 -050021#include "hf/plat/iommu.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010022#include "hf/std.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010023
Andrew Scull19503262018-09-20 14:48:39 +010024#include "vmapi/hf/call.h"
25
26static struct vm vms[MAX_VMS];
Olivier Deprez96a2a262020-06-11 17:21:38 +020027static struct vm other_world;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010028static ffa_vm_count_t vm_count;
Andrew Scull19503262018-09-20 14:48:39 +010029
J-Alvesfe23ebe2021-10-13 16:07:07 +010030/**
31 * Counters on the status of notifications in the system. It helps to improve
32 * the information retrieved by the receiver scheduler.
33 */
34static struct {
35 /** Counts notifications pending. */
36 uint32_t pending_count;
37 /**
38 * Counts notifications pending, that have been retrieved by the
39 * receiver scheduler.
40 */
41 uint32_t info_get_retrieved_count;
42 struct spinlock lock;
43} all_notifications_state;
44
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -080045static bool vm_init_mm(struct vm *vm, struct mpool *ppool)
46{
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -060047 return arch_vm_init_mm(vm, ppool) && arch_vm_iommu_init_mm(vm, ppool);
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -080048}
49
J-Alves19e20cf2023-08-02 12:48:55 +010050struct vm *vm_init(ffa_id_t id, ffa_vcpu_count_t vcpu_count,
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -060051 struct mpool *ppool, bool el0_partition,
52 uint8_t dma_device_count)
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010053{
Wedson Almeida Filho87009642018-07-02 10:20:07 +010054 uint32_t i;
Andrew Scull19503262018-09-20 14:48:39 +010055 struct vm *vm;
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -070056 size_t vcpu_ppool_entries = (align_up(sizeof(struct vcpu) * vcpu_count,
57 MM_PPOOL_ENTRY_SIZE) /
58 MM_PPOOL_ENTRY_SIZE);
Andrew Scull19503262018-09-20 14:48:39 +010059
Olivier Deprez96a2a262020-06-11 17:21:38 +020060 if (id == HF_OTHER_WORLD_ID) {
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080061 CHECK(el0_partition == false);
Olivier Deprez96a2a262020-06-11 17:21:38 +020062 vm = &other_world;
Andrew Walbran9daa57e2019-09-27 13:33:20 +010063 } else {
64 uint16_t vm_index = id - HF_VM_ID_OFFSET;
Andrew Scull19503262018-09-20 14:48:39 +010065
Andrew Walbran9daa57e2019-09-27 13:33:20 +010066 CHECK(id >= HF_VM_ID_OFFSET);
67 CHECK(vm_index < ARRAY_SIZE(vms));
68 vm = &vms[vm_index];
69 }
Wedson Almeida Filho87009642018-07-02 10:20:07 +010070
Andrew Scull2b5fbad2019-04-05 13:55:56 +010071 memset_s(vm, sizeof(*vm), 0, sizeof(*vm));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010072
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000073 list_init(&vm->mailbox.waiter_list);
74 list_init(&vm->mailbox.ready_list);
75 sl_init(&vm->lock);
76
Andrew Walbran9daa57e2019-09-27 13:33:20 +010077 vm->id = id;
Wedson Almeida Filho87009642018-07-02 10:20:07 +010078 vm->vcpu_count = vcpu_count;
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -070079
80 vm->vcpus = (struct vcpu *)mpool_alloc_contiguous(
81 ppool, vcpu_ppool_entries, 1);
82 CHECK(vm->vcpus != NULL);
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -070083
Andrew Sculld6ee1102019-04-05 22:12:42 +010084 vm->mailbox.state = MAILBOX_STATE_EMPTY;
Andrew Scull9726c252019-01-23 13:44:19 +000085 atomic_init(&vm->aborting, false);
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080086 vm->el0_partition = el0_partition;
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -060087 vm->dma_device_count = dma_device_count;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010088
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -080089 if (!vm_init_mm(vm, ppool)) {
Andrew Walbran9daa57e2019-09-27 13:33:20 +010090 return NULL;
Wedson Almeida Filho03306112018-11-26 00:08:03 +000091 }
92
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000093 /* Initialise waiter entries. */
94 for (i = 0; i < MAX_VMS; i++) {
Wedson Almeida Filhob790f652019-01-22 23:41:56 +000095 vm->wait_entries[i].waiting_vm = vm;
96 list_init(&vm->wait_entries[i].wait_links);
97 list_init(&vm->wait_entries[i].ready_links);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000098 }
99
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000100 /* Do basic initialization of vCPUs. */
Andrew Scull7364a8e2018-07-19 15:39:29 +0100101 for (i = 0; i < vcpu_count; i++) {
Andrew Walbrane1310df2019-04-29 17:28:28 +0100102 vcpu_init(vm_get_vcpu(vm, i), vm);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100103 }
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100104
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700105 vm_notifications_init(vm, vcpu_count, ppool);
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100106 return vm;
107}
108
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100109bool vm_init_next(ffa_vcpu_count_t vcpu_count, struct mpool *ppool,
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -0600110 struct vm **new_vm, bool el0_partition,
111 uint8_t dma_device_count)
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100112{
113 if (vm_count >= MAX_VMS) {
114 return false;
115 }
116
117 /* Generate IDs based on an offset, as low IDs e.g., 0, are reserved */
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800118 *new_vm = vm_init(vm_count + HF_VM_ID_OFFSET, vcpu_count, ppool,
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -0600119 el0_partition, dma_device_count);
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100120 if (*new_vm == NULL) {
121 return false;
122 }
Andrew Scull19503262018-09-20 14:48:39 +0100123 ++vm_count;
Andrew Scull19503262018-09-20 14:48:39 +0100124
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000125 return true;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100126}
127
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100128ffa_vm_count_t vm_get_count(void)
Andrew Scull19503262018-09-20 14:48:39 +0100129{
130 return vm_count;
131}
132
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100133/**
134 * Returns a pointer to the VM with the corresponding id.
135 */
J-Alves19e20cf2023-08-02 12:48:55 +0100136struct vm *vm_find(ffa_id_t id)
Andrew Scull19503262018-09-20 14:48:39 +0100137{
David Brazdilbc501192019-09-27 13:20:56 +0100138 uint16_t index;
Fuad Tabba494376e2019-08-05 12:35:10 +0100139
Olivier Deprez96a2a262020-06-11 17:21:38 +0200140 if (id == HF_OTHER_WORLD_ID) {
141 if (other_world.id == HF_OTHER_WORLD_ID) {
142 return &other_world;
143 }
Andrew Scull19503262018-09-20 14:48:39 +0100144 return NULL;
145 }
146
Olivier Deprez96a2a262020-06-11 17:21:38 +0200147 /* Check that this is not a reserved ID. */
148 if (id < HF_VM_ID_OFFSET) {
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100149 return NULL;
150 }
151
David Brazdilbc501192019-09-27 13:20:56 +0100152 index = id - HF_VM_ID_OFFSET;
153
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100154 return vm_find_index(index);
155}
156
157/**
J-Alves46ee0682021-07-26 15:17:53 +0100158 * Returns a locked instance of the VM with the corresponding id.
159 */
J-Alves19e20cf2023-08-02 12:48:55 +0100160struct vm_locked vm_find_locked(ffa_id_t id)
J-Alves46ee0682021-07-26 15:17:53 +0100161{
162 struct vm *vm = vm_find(id);
163
164 if (vm != NULL) {
165 return vm_lock(vm);
166 }
167
168 return (struct vm_locked){.vm = NULL};
169}
170
171/**
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100172 * Returns a pointer to the VM at the specified index.
173 */
174struct vm *vm_find_index(uint16_t index)
175{
David Brazdilbc501192019-09-27 13:20:56 +0100176 /* Ensure the VM is initialized. */
177 if (index >= vm_count) {
178 return NULL;
179 }
180
181 return &vms[index];
Andrew Scull19503262018-09-20 14:48:39 +0100182}
183
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000184/**
Fuad Tabbaed294af2019-12-20 10:43:01 +0000185 * Locks the given VM and updates `locked` to hold the newly locked VM.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000186 */
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100187struct vm_locked vm_lock(struct vm *vm)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000188{
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100189 struct vm_locked locked = {
190 .vm = vm,
191 };
192
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000193 sl_lock(&vm->lock);
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100194
195 return locked;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000196}
197
198/**
Jose Marinho75509b42019-04-09 09:34:59 +0100199 * Locks two VMs ensuring that the locking order is according to the locks'
200 * addresses.
201 */
202struct two_vm_locked vm_lock_both(struct vm *vm1, struct vm *vm2)
203{
204 struct two_vm_locked dual_lock;
205
206 sl_lock_both(&vm1->lock, &vm2->lock);
207 dual_lock.vm1.vm = vm1;
208 dual_lock.vm2.vm = vm2;
209
210 return dual_lock;
211}
212
213/**
Karl Meakine1430802024-03-06 14:08:11 +0000214 * Locks two VMs ensuring that the locking order is according to the locks'
215 * addresses, given `vm1` is already locked.
216 */
217struct two_vm_locked vm_lock_both_in_order(struct vm_locked vm1, struct vm *vm2)
218{
219 struct spinlock *sl1 = &vm1.vm->lock;
220 struct spinlock *sl2 = &vm2->lock;
221
222 /*
223 * Use `sl_lock`/`sl_unlock` directly rather than
224 * `vm_lock`/`vm_unlock` because `vm_unlock` sets the vm field
225 * to NULL.
226 */
227 if (sl1 < sl2) {
228 sl_lock(sl2);
229 } else {
230 sl_unlock(sl1);
231 sl_lock(sl2);
232 sl_lock(sl1);
233 }
234
235 return (struct two_vm_locked){
236 .vm1 = vm1,
237 .vm2 = (struct vm_locked){.vm = vm2},
238 };
239}
240
241/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000242 * Unlocks a VM previously locked with vm_lock, and updates `locked` to reflect
243 * the fact that the VM is no longer locked.
244 */
245void vm_unlock(struct vm_locked *locked)
246{
247 sl_unlock(&locked->vm->lock);
248 locked->vm = NULL;
249}
Andrew Walbrane1310df2019-04-29 17:28:28 +0100250
251/**
252 * Get the vCPU with the given index from the given VM.
253 * This assumes the index is valid, i.e. less than vm->vcpu_count.
254 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100255struct vcpu *vm_get_vcpu(struct vm *vm, ffa_vcpu_index_t vcpu_index)
Andrew Walbrane1310df2019-04-29 17:28:28 +0100256{
Andrew Scull877ae4b2019-07-02 12:52:33 +0100257 CHECK(vcpu_index < vm->vcpu_count);
Andrew Walbrane1310df2019-04-29 17:28:28 +0100258 return &vm->vcpus[vcpu_index];
259}
Andrew Scull3c257452019-11-26 13:32:50 +0000260
261/**
Andrew Walbranaad8f982019-12-04 10:56:39 +0000262 * Gets `vm`'s wait entry for waiting on the `for_vm`.
263 */
J-Alves19e20cf2023-08-02 12:48:55 +0100264struct wait_entry *vm_get_wait_entry(struct vm *vm, ffa_id_t for_vm)
Andrew Walbranaad8f982019-12-04 10:56:39 +0000265{
266 uint16_t index;
267
268 CHECK(for_vm >= HF_VM_ID_OFFSET);
269 index = for_vm - HF_VM_ID_OFFSET;
270 CHECK(index < MAX_VMS);
271
272 return &vm->wait_entries[index];
273}
274
275/**
J-Alves122f1a12022-12-12 15:55:42 +0000276 * Checks whether the given `to` VM's mailbox is currently busy.
277 */
278bool vm_is_mailbox_busy(struct vm_locked to)
279{
280 return to.vm->mailbox.state != MAILBOX_STATE_EMPTY ||
281 to.vm->mailbox.recv == NULL;
282}
283
284/**
J-Alvese8c8c2b2022-12-16 15:34:48 +0000285 * Checks if mailbox is currently owned by the other world.
286 */
287bool vm_is_mailbox_other_world_owned(struct vm_locked to)
288{
289 return to.vm->mailbox.state == MAILBOX_STATE_OTHER_WORLD_OWNED;
290}
291
292/**
Andrew Walbranaad8f982019-12-04 10:56:39 +0000293 * Gets the ID of the VM which the given VM's wait entry is for.
294 */
J-Alves19e20cf2023-08-02 12:48:55 +0100295ffa_id_t vm_id_for_wait_entry(struct vm *vm, struct wait_entry *entry)
Andrew Walbranaad8f982019-12-04 10:56:39 +0000296{
297 uint16_t index = entry - vm->wait_entries;
298
299 return index + HF_VM_ID_OFFSET;
300}
301
302/**
Andrew Walbran45633dd2020-10-07 17:59:54 +0100303 * Return whether the given VM ID represents an entity in the current world:
304 * i.e. the hypervisor or a normal world VM when running in the normal world, or
305 * the SPM or an SP when running in the secure world.
306 */
J-Alves19e20cf2023-08-02 12:48:55 +0100307bool vm_id_is_current_world(ffa_id_t vm_id)
Andrew Walbran45633dd2020-10-07 17:59:54 +0100308{
309 return (vm_id & HF_VM_ID_WORLD_MASK) !=
310 (HF_OTHER_WORLD_ID & HF_VM_ID_WORLD_MASK);
311}
312
313/**
Andrew Scull3c257452019-11-26 13:32:50 +0000314 * Map a range of addresses to the VM in both the MMU and the IOMMU.
315 *
316 * mm_vm_defrag should always be called after a series of page table updates,
317 * whether they succeed or fail. This is because on failure extra page table
318 * entries may have been allocated and then not used, while on success it may be
319 * possible to compact the page table by merging several entries into a block.
320 *
321 * Returns true on success, or false if the update failed and no changes were
322 * made.
323 *
324 */
325bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
326 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
327{
328 if (!vm_identity_prepare(vm_locked, begin, end, mode, ppool)) {
329 return false;
330 }
331
332 vm_identity_commit(vm_locked, begin, end, mode, ppool, ipa);
333
334 return true;
335}
336
337/**
338 * Prepares the given VM for the given address mapping such that it will be able
339 * to commit the change without failure.
340 *
341 * In particular, multiple calls to this function will result in the
342 * corresponding calls to commit the changes to succeed.
343 *
344 * Returns true on success, or false if the update failed and no changes were
345 * made.
346 */
347bool vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
348 uint32_t mode, struct mpool *ppool)
349{
Olivier Deprezd9d409f2023-03-17 11:47:57 +0100350 return arch_vm_identity_prepare(vm_locked, begin, end, mode, ppool);
Andrew Scull3c257452019-11-26 13:32:50 +0000351}
352
353/**
354 * Commits the given address mapping to the VM assuming the operation cannot
355 * fail. `vm_identity_prepare` must used correctly before this to ensure
356 * this condition.
357 */
358void vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
359 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
360{
Olivier Deprezd9d409f2023-03-17 11:47:57 +0100361 arch_vm_identity_commit(vm_locked, begin, end, mode, ppool, ipa);
Andrew Scull3c257452019-11-26 13:32:50 +0000362}
363
364/**
365 * Unmap a range of addresses from the VM.
366 *
367 * Returns true on success, or false if the update failed and no changes were
368 * made.
369 */
370bool vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
371 struct mpool *ppool)
372{
Olivier Deprezd9d409f2023-03-17 11:47:57 +0100373 return arch_vm_unmap(vm_locked, begin, end, ppool);
Andrew Scull3c257452019-11-26 13:32:50 +0000374}
375
376/**
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700377 * Defrag page tables for an EL0 partition or for a VM.
378 */
379void vm_ptable_defrag(struct vm_locked vm_locked, struct mpool *ppool)
380{
Olivier Deprezd9d409f2023-03-17 11:47:57 +0100381 arch_vm_ptable_defrag(vm_locked, ppool);
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700382}
383
384/**
Andrew Scull3c257452019-11-26 13:32:50 +0000385 * Unmaps the hypervisor pages from the given page table.
386 */
387bool vm_unmap_hypervisor(struct vm_locked vm_locked, struct mpool *ppool)
388{
389 /* TODO: If we add pages dynamically, they must be included here too. */
390 return vm_unmap(vm_locked, layout_text_begin(), layout_text_end(),
391 ppool) &&
392 vm_unmap(vm_locked, layout_rodata_begin(), layout_rodata_end(),
393 ppool) &&
394 vm_unmap(vm_locked, layout_data_begin(), layout_data_end(),
Maksims Svecovs134b8f92022-03-04 15:14:09 +0000395 ppool) &&
396 vm_unmap(vm_locked, layout_stacks_begin(), layout_stacks_end(),
Andrew Scull3c257452019-11-26 13:32:50 +0000397 ppool);
398}
J-Alvesb37fd082020-10-22 12:29:21 +0100399
400/**
Raghu Krishnamurthyea195fa2021-02-12 23:29:00 -0800401 * Gets the mode of the given range of ipa or va if they are mapped with the
402 * same mode.
403 *
404 * Returns true if the range is mapped with the same mode and false otherwise.
405 * The wrapper calls the appropriate mm function depending on if the partition
406 * is a vm or a el0 partition.
407 */
408bool vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin, ipaddr_t end,
409 uint32_t *mode)
410{
Olivier Deprezd9d409f2023-03-17 11:47:57 +0100411 return arch_vm_mem_get_mode(vm_locked, begin, end, mode);
Raghu Krishnamurthyea195fa2021-02-12 23:29:00 -0800412}
J-Alvesa0f317d2021-06-09 13:31:59 +0100413
Madhukar Pappireddy0e57d3d2023-10-11 15:49:05 -0500414bool vm_iommu_mm_identity_map(struct vm_locked vm_locked, paddr_t begin,
415 paddr_t end, uint32_t mode, struct mpool *ppool,
416 ipaddr_t *ipa, uint8_t dma_device_id)
417{
418 return arch_vm_iommu_mm_identity_map(vm_locked, begin, end, mode, ppool,
419 ipa, dma_device_id);
420}
421
J-Alves66652252022-07-06 09:49:51 +0100422bool vm_mailbox_state_busy(struct vm_locked vm_locked)
423{
424 return vm_locked.vm->mailbox.state != MAILBOX_STATE_EMPTY ||
425 vm_locked.vm->mailbox.recv == NULL;
426}
427
J-Alves7461ef22021-10-18 17:21:33 +0100428static struct notifications *vm_get_notifications(struct vm_locked vm_locked,
429 bool is_from_vm)
430{
431 return is_from_vm ? &vm_locked.vm->notifications.from_vm
432 : &vm_locked.vm->notifications.from_sp;
433}
434
J-Alvesa0f317d2021-06-09 13:31:59 +0100435/*
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700436 * Dynamically allocate per_vcpu_notifications structure for a given VM.
437 */
438static void vm_notifications_init_per_vcpu_notifications(
439 struct vm *vm, ffa_vcpu_count_t vcpu_count, struct mpool *ppool)
440{
441 size_t notif_ppool_entries =
442 (align_up(sizeof(struct notifications_state) * vcpu_count,
443 MM_PPOOL_ENTRY_SIZE) /
444 MM_PPOOL_ENTRY_SIZE);
445
446 /*
447 * Allow for function to be called on already initialized VMs but those
448 * that require notification structure to be cleared.
449 */
450 if (vm->notifications.from_sp.per_vcpu == NULL) {
451 assert(vm->notifications.from_vm.per_vcpu == NULL);
452 assert(vcpu_count != 0);
453 CHECK(ppool != NULL);
454 vm->notifications.from_sp.per_vcpu =
455 (struct notifications_state *)mpool_alloc_contiguous(
456 ppool, notif_ppool_entries, 1);
457 CHECK(vm->notifications.from_sp.per_vcpu != NULL);
458
459 vm->notifications.from_vm.per_vcpu =
460 (struct notifications_state *)mpool_alloc_contiguous(
461 ppool, notif_ppool_entries, 1);
462 CHECK(vm->notifications.from_vm.per_vcpu != NULL);
463 } else {
464 assert(vm->notifications.from_vm.per_vcpu != NULL);
465 }
466
467 memset_s(vm->notifications.from_sp.per_vcpu,
468 sizeof(*(vm->notifications.from_sp.per_vcpu)) * vcpu_count, 0,
469 sizeof(*(vm->notifications.from_sp.per_vcpu)) * vcpu_count);
470 memset_s(vm->notifications.from_vm.per_vcpu,
471 sizeof(*(vm->notifications.from_vm.per_vcpu)) * vcpu_count, 0,
472 sizeof(*(vm->notifications.from_vm.per_vcpu)) * vcpu_count);
473}
474
475/*
J-Alvesa0f317d2021-06-09 13:31:59 +0100476 * Initializes the notifications structure.
477 */
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700478static void vm_notifications_init_bindings(struct notifications *notifications)
J-Alvesa0f317d2021-06-09 13:31:59 +0100479{
480 for (uint32_t i = 0U; i < MAX_FFA_NOTIFICATIONS; i++) {
481 notifications->bindings_sender_id[i] = HF_INVALID_VM_ID;
482 }
483}
484
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700485/*
486 * Initialize notification related structures for a VM.
487 */
488void vm_notifications_init(struct vm *vm, ffa_vcpu_count_t vcpu_count,
489 struct mpool *ppool)
490{
491 vm_notifications_init_per_vcpu_notifications(vm, vcpu_count, ppool);
492
493 /* Basic initialization of the notifications structure. */
494 vm_notifications_init_bindings(&vm->notifications.from_sp);
495 vm_notifications_init_bindings(&vm->notifications.from_vm);
496}
497
J-Alvesa0f317d2021-06-09 13:31:59 +0100498/**
499 * Checks if there are pending notifications.
500 */
501bool vm_are_notifications_pending(struct vm_locked vm_locked, bool from_vm,
502 ffa_notifications_bitmap_t notifications)
503{
504 struct notifications *to_check;
505
506 CHECK(vm_locked.vm != NULL);
507
J-Alves7461ef22021-10-18 17:21:33 +0100508 to_check = vm_get_notifications(vm_locked, from_vm);
J-Alvesa0f317d2021-06-09 13:31:59 +0100509
510 /* Check if there are pending per vcpu notifications */
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700511 for (uint32_t i = 0U; i < vm_locked.vm->vcpu_count; i++) {
J-Alvesa0f317d2021-06-09 13:31:59 +0100512 if ((to_check->per_vcpu[i].pending & notifications) != 0U) {
513 return true;
514 }
515 }
516
517 /* Check if there are global pending notifications */
518 return (to_check->global.pending & notifications) != 0U;
519}
J-Alvesc003a7a2021-03-18 13:06:53 +0000520
J-Alves7461ef22021-10-18 17:21:33 +0100521/**
522 * Checks if there are pending global notifications, either from SPs or from
523 * VMs.
524 */
525bool vm_are_global_notifications_pending(struct vm_locked vm_locked)
526{
527 return vm_get_notifications(vm_locked, true)->global.pending != 0ULL ||
J-Alves52578f82022-03-25 12:30:47 +0000528 vm_get_notifications(vm_locked, false)->global.pending != 0ULL ||
J-Alvese8c8c2b2022-12-16 15:34:48 +0000529 vm_are_fwk_notifications_pending(vm_locked);
530}
531
532/**
533 * Currently only RX full notification is supported as framework notification.
534 * Returns true if there is one pending, either from Hypervisor or SPMC.
535 */
536bool vm_are_fwk_notifications_pending(struct vm_locked vm_locked)
537{
538 return vm_locked.vm->notifications.framework.pending != 0ULL;
J-Alves7461ef22021-10-18 17:21:33 +0100539}
540
541/**
542 * Checks if there are pending per-vCPU notifications, in a specific vCPU either
543 * from SPs or from VMs.
544 */
545bool vm_are_per_vcpu_notifications_pending(struct vm_locked vm_locked,
546 ffa_vcpu_index_t vcpu_id)
547{
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700548 CHECK(vcpu_id < vm_locked.vm->vcpu_count);
J-Alves7461ef22021-10-18 17:21:33 +0100549
550 return vm_get_notifications(vm_locked, true)
551 ->per_vcpu[vcpu_id]
552 .pending != 0ULL ||
553 vm_get_notifications(vm_locked, false)
554 ->per_vcpu[vcpu_id]
555 .pending != 0ULL;
556}
557
J-Alves09ff9d82021-11-02 11:55:20 +0000558bool vm_are_notifications_enabled(struct vm *vm)
J-Alvesc003a7a2021-03-18 13:06:53 +0000559{
J-Alves09ff9d82021-11-02 11:55:20 +0000560 return vm->notifications.enabled == true;
561}
562
563bool vm_locked_are_notifications_enabled(struct vm_locked vm_locked)
564{
565 return vm_are_notifications_enabled(vm_locked.vm);
J-Alvesc003a7a2021-03-18 13:06:53 +0000566}
567
568static bool vm_is_notification_bit_set(ffa_notifications_bitmap_t notifications,
569 uint32_t i)
570{
571 return (notifications & FFA_NOTIFICATION_MASK(i)) != 0U;
572}
573
J-Alvesfe23ebe2021-10-13 16:07:07 +0100574static void vm_notifications_global_state_count_update(
575 ffa_notifications_bitmap_t bitmap, uint32_t *counter, int inc)
576{
577 /*
578 * Helper to increment counters from global notifications
579 * state. Count update by increments or decrements of 1 or -1,
580 * respectively.
581 */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000582 assert(inc == 1 || inc == -1);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100583
584 sl_lock(&all_notifications_state.lock);
585
586 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
587 if (vm_is_notification_bit_set(bitmap, i)) {
588 CHECK((inc > 0 && *counter < UINT32_MAX) ||
589 (inc < 0 && *counter > 0));
590 *counter += inc;
591 }
592 }
593
594 sl_unlock(&all_notifications_state.lock);
595}
596
597/**
598 * Helper function to increment the pending notifications based on a bitmap
599 * passed as argument.
600 * Function to be used at setting notifications for a given VM.
601 */
602static void vm_notifications_pending_count_add(
603 ffa_notifications_bitmap_t to_add)
604{
605 vm_notifications_global_state_count_update(
606 to_add, &all_notifications_state.pending_count, 1);
607}
608
609/**
610 * Helper function to decrement the pending notifications count.
611 * Function to be used when getting the receiver's pending notifications.
612 */
613static void vm_notifications_pending_count_sub(
614 ffa_notifications_bitmap_t to_sub)
615{
616 vm_notifications_global_state_count_update(
617 to_sub, &all_notifications_state.pending_count, -1);
618}
619
620/**
621 * Helper function to count the notifications whose information has been
622 * retrieved by the scheduler of the system, and are still pending.
623 */
624static void vm_notifications_info_get_retrieved_count_add(
625 ffa_notifications_bitmap_t to_add)
626{
627 vm_notifications_global_state_count_update(
628 to_add, &all_notifications_state.info_get_retrieved_count, 1);
629}
630
631/**
632 * Helper function to subtract the notifications that the receiver is getting
633 * and whose information has been retrieved by the receiver scheduler.
634 */
635static void vm_notifications_info_get_retrieved_count_sub(
636 ffa_notifications_bitmap_t to_sub)
637{
638 vm_notifications_global_state_count_update(
639 to_sub, &all_notifications_state.info_get_retrieved_count, -1);
640}
641
642/**
643 * Helper function to determine if there are notifications pending whose info
644 * hasn't been retrieved by the receiver scheduler.
645 */
646bool vm_notifications_pending_not_retrieved_by_scheduler(void)
647{
648 bool ret;
649
650 sl_lock(&all_notifications_state.lock);
651 ret = all_notifications_state.pending_count >
652 all_notifications_state.info_get_retrieved_count;
653 sl_unlock(&all_notifications_state.lock);
654
655 return ret;
656}
657
658bool vm_is_notifications_pending_count_zero(void)
659{
660 bool ret;
661
662 sl_lock(&all_notifications_state.lock);
663 ret = all_notifications_state.pending_count == 0;
664 sl_unlock(&all_notifications_state.lock);
665
666 return ret;
667}
668
J-Alvesc003a7a2021-03-18 13:06:53 +0000669/**
670 * Checks that all provided notifications are bound to the specified sender, and
671 * are per VCPU or global, as specified.
672 */
673bool vm_notifications_validate_binding(struct vm_locked vm_locked,
J-Alves19e20cf2023-08-02 12:48:55 +0100674 bool is_from_vm, ffa_id_t sender_id,
J-Alvesc003a7a2021-03-18 13:06:53 +0000675 ffa_notifications_bitmap_t notifications,
676 bool is_per_vcpu)
677{
678 return vm_notifications_validate_bound_sender(
679 vm_locked, is_from_vm, sender_id, notifications) &&
680 vm_notifications_validate_per_vcpu(vm_locked, is_from_vm,
681 is_per_vcpu, notifications);
682}
683
684/**
685 * Update binds information in notification structure for the specified
686 * notifications.
687 */
688void vm_notifications_update_bindings(struct vm_locked vm_locked,
J-Alves19e20cf2023-08-02 12:48:55 +0100689 bool is_from_vm, ffa_id_t sender_id,
J-Alvesc003a7a2021-03-18 13:06:53 +0000690 ffa_notifications_bitmap_t notifications,
691 bool is_per_vcpu)
692{
693 CHECK(vm_locked.vm != NULL);
694 struct notifications *to_update =
695 vm_get_notifications(vm_locked, is_from_vm);
696
697 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
698 if (vm_is_notification_bit_set(notifications, i)) {
699 to_update->bindings_sender_id[i] = sender_id;
700 }
701 }
702
703 /*
704 * Set notifications if they are per VCPU, else clear them as they are
705 * global.
706 */
707 if (is_per_vcpu) {
708 to_update->bindings_per_vcpu |= notifications;
709 } else {
710 to_update->bindings_per_vcpu &= ~notifications;
711 }
712}
713
714bool vm_notifications_validate_bound_sender(
J-Alves19e20cf2023-08-02 12:48:55 +0100715 struct vm_locked vm_locked, bool is_from_vm, ffa_id_t sender_id,
J-Alvesc003a7a2021-03-18 13:06:53 +0000716 ffa_notifications_bitmap_t notifications)
717{
718 CHECK(vm_locked.vm != NULL);
719 struct notifications *to_check =
720 vm_get_notifications(vm_locked, is_from_vm);
721
722 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
723 if (vm_is_notification_bit_set(notifications, i) &&
724 to_check->bindings_sender_id[i] != sender_id) {
725 return false;
726 }
727 }
728
729 return true;
730}
731
732bool vm_notifications_validate_per_vcpu(struct vm_locked vm_locked,
733 bool is_from_vm, bool is_per_vcpu,
734 ffa_notifications_bitmap_t notif)
735{
736 CHECK(vm_locked.vm != NULL);
737 struct notifications *to_check =
738 vm_get_notifications(vm_locked, is_from_vm);
739
740 return is_per_vcpu ? (~to_check->bindings_per_vcpu & notif) == 0U
741 : (to_check->bindings_per_vcpu & notif) == 0U;
742}
J-Alvesaa79c012021-07-09 14:29:45 +0100743
J-Alves14163a72022-03-25 14:01:34 +0000744static void vm_notifications_state_set(struct notifications_state *state,
745 ffa_notifications_bitmap_t notifications)
746{
747 state->pending |= notifications;
748 vm_notifications_pending_count_add(notifications);
749}
750
J-Alves5a16c962022-03-25 12:32:51 +0000751void vm_notifications_partition_set_pending(
752 struct vm_locked vm_locked, bool is_from_vm,
753 ffa_notifications_bitmap_t notifications, ffa_vcpu_index_t vcpu_id,
754 bool is_per_vcpu)
J-Alvesaa79c012021-07-09 14:29:45 +0100755{
J-Alves14163a72022-03-25 14:01:34 +0000756 struct notifications *to_set;
757 struct notifications_state *state;
758
J-Alvesaa79c012021-07-09 14:29:45 +0100759 CHECK(vm_locked.vm != NULL);
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700760 CHECK(vcpu_id < vm_locked.vm->vcpu_count);
J-Alvesaa79c012021-07-09 14:29:45 +0100761
J-Alves14163a72022-03-25 14:01:34 +0000762 to_set = vm_get_notifications(vm_locked, is_from_vm);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100763
J-Alves14163a72022-03-25 14:01:34 +0000764 state = is_per_vcpu ? &to_set->per_vcpu[vcpu_id] : &to_set->global;
765
766 vm_notifications_state_set(state, notifications);
767}
768
769/**
770 * Set pending framework notifications.
771 */
772void vm_notifications_framework_set_pending(
773 struct vm_locked vm_locked, ffa_notifications_bitmap_t notifications)
774{
775 CHECK(vm_locked.vm != NULL);
Federico Recanatie73d2832022-04-20 11:10:52 +0200776 assert(is_ffa_spm_buffer_full_notification(notifications) ||
777 is_ffa_hyp_buffer_full_notification(notifications));
J-Alves14163a72022-03-25 14:01:34 +0000778 vm_notifications_state_set(&vm_locked.vm->notifications.framework,
779 notifications);
J-Alvesaa79c012021-07-09 14:29:45 +0100780}
781
J-Alves5136dda2022-03-25 12:26:38 +0000782static ffa_notifications_bitmap_t vm_notifications_state_get_pending(
783 struct notifications_state *state)
J-Alvesaa79c012021-07-09 14:29:45 +0100784{
J-Alves5136dda2022-03-25 12:26:38 +0000785 ffa_notifications_bitmap_t to_ret;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100786 ffa_notifications_bitmap_t pending_and_info_get_retrieved;
J-Alvesaa79c012021-07-09 14:29:45 +0100787
J-Alves5136dda2022-03-25 12:26:38 +0000788 assert(state != NULL);
J-Alvesaa79c012021-07-09 14:29:45 +0100789
J-Alves5136dda2022-03-25 12:26:38 +0000790 to_ret = state->pending;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100791
792 /* Update count of currently pending notifications in the system. */
J-Alves5136dda2022-03-25 12:26:38 +0000793 vm_notifications_pending_count_sub(state->pending);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100794
795 /*
796 * If notifications receiver is getting have been retrieved by the
797 * receiver scheduler, decrement those from respective count.
798 */
799 pending_and_info_get_retrieved =
J-Alves5136dda2022-03-25 12:26:38 +0000800 state->pending & state->info_get_retrieved;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100801
802 if (pending_and_info_get_retrieved != 0) {
803 vm_notifications_info_get_retrieved_count_sub(
804 pending_and_info_get_retrieved);
805 }
806
J-Alves5136dda2022-03-25 12:26:38 +0000807 state->pending = 0U;
808 state->info_get_retrieved = 0U;
J-Alvesaa79c012021-07-09 14:29:45 +0100809
J-Alves5136dda2022-03-25 12:26:38 +0000810 return to_ret;
811}
J-Alvesfe23ebe2021-10-13 16:07:07 +0100812
J-Alves5136dda2022-03-25 12:26:38 +0000813/**
814 * Get global and per-vCPU notifications for the given vCPU ID.
815 */
816ffa_notifications_bitmap_t vm_notifications_partition_get_pending(
817 struct vm_locked vm_locked, bool is_from_vm, ffa_vcpu_index_t vcpu_id)
818{
819 ffa_notifications_bitmap_t to_ret;
820 struct notifications *to_get;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100821
J-Alves5136dda2022-03-25 12:26:38 +0000822 assert(vm_locked.vm != NULL);
823 to_get = vm_get_notifications(vm_locked, is_from_vm);
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700824 assert(vcpu_id < vm_locked.vm->vcpu_count);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100825
J-Alves5136dda2022-03-25 12:26:38 +0000826 to_ret = vm_notifications_state_get_pending(&to_get->global);
827 to_ret |=
828 vm_notifications_state_get_pending(&to_get->per_vcpu[vcpu_id]);
J-Alvesaa79c012021-07-09 14:29:45 +0100829
830 return to_ret;
831}
J-Alvesc8e8a222021-06-08 17:33:52 +0100832
833/**
J-Alves663682a2022-03-25 13:56:51 +0000834 * Get pending framework notifications.
835 */
836ffa_notifications_bitmap_t vm_notifications_framework_get_pending(
837 struct vm_locked vm_locked)
838{
Federico Recanati6c1e05c2022-04-20 11:37:26 +0200839 struct vm *vm = vm_locked.vm;
840 ffa_notifications_bitmap_t framework;
Federico Recanati6c1e05c2022-04-20 11:37:26 +0200841
842 assert(vm != NULL);
843
844 framework = vm_notifications_state_get_pending(
845 &vm->notifications.framework);
846
Federico Recanati6c1e05c2022-04-20 11:37:26 +0200847 return framework;
J-Alves663682a2022-03-25 13:56:51 +0000848}
849
J-Alves17c9b6d2022-03-25 14:39:05 +0000850static void vm_notifications_state_info_get(
J-Alves19e20cf2023-08-02 12:48:55 +0100851 struct notifications_state *state, ffa_id_t vm_id, bool is_per_vcpu,
J-Alves17c9b6d2022-03-25 14:39:05 +0000852 ffa_vcpu_index_t vcpu_id, uint16_t *ids, uint32_t *ids_count,
853 uint32_t *lists_sizes, uint32_t *lists_count,
854 const uint32_t ids_max_count,
855 enum notifications_info_get_state *info_get_state)
856{
857 ffa_notifications_bitmap_t pending_not_retrieved;
858
859 CHECK(*ids_count <= ids_max_count);
860 CHECK(*lists_count <= ids_max_count);
861
862 if (*info_get_state == FULL) {
863 return;
864 }
865
866 pending_not_retrieved = state->pending & ~state->info_get_retrieved;
867
868 /* No notifications pending that haven't been retrieved. */
869 if (pending_not_retrieved == 0U) {
870 return;
871 }
872
873 if (*ids_count == ids_max_count) {
874 *info_get_state = FULL;
875 return;
876 }
877
878 switch (*info_get_state) {
879 case INIT:
880 case STARTING_NEW:
881 /*
882 * At this iteration two ids are to be added: the VM ID
883 * and vCPU ID. If there is no space, change state and
884 * terminate function.
885 */
886 if (is_per_vcpu && ids_max_count - *ids_count < 2) {
887 *info_get_state = FULL;
888 return;
889 }
890
891 *info_get_state = INSERTING;
892 ids[*ids_count] = vm_id;
893 ++(*ids_count);
894
895 if (is_per_vcpu) {
896 /* Insert vCPU ID. */
897 ids[*ids_count] = vcpu_id;
898 ++(*ids_count);
899 ++lists_sizes[*lists_count];
900 }
901
902 ++(*lists_count);
903 break;
904 case INSERTING:
905 /* For per-vCPU notifications only. */
906 if (!is_per_vcpu) {
907 break;
908 }
909
910 /* Insert vCPU ID */
911 ids[*ids_count] = vcpu_id;
912 (*ids_count)++;
913 /* Increment respective list size */
914 ++lists_sizes[*lists_count - 1];
915
916 if (lists_sizes[*lists_count - 1] == 3) {
917 *info_get_state = STARTING_NEW;
918 }
919 break;
920 default:
921 panic("Notification info get action error!!\n");
922 }
923
924 state->info_get_retrieved |= pending_not_retrieved;
925
926 vm_notifications_info_get_retrieved_count_add(pending_not_retrieved);
927}
928
J-Alves663682a2022-03-25 13:56:51 +0000929/**
J-Alvesc8e8a222021-06-08 17:33:52 +0100930 * Get pending notification's information to return to the receiver scheduler.
931 */
932void vm_notifications_info_get_pending(
933 struct vm_locked vm_locked, bool is_from_vm, uint16_t *ids,
934 uint32_t *ids_count, uint32_t *lists_sizes, uint32_t *lists_count,
935 const uint32_t ids_max_count,
936 enum notifications_info_get_state *info_get_state)
937{
J-Alves17c9b6d2022-03-25 14:39:05 +0000938 struct notifications *notifications;
J-Alvesc8e8a222021-06-08 17:33:52 +0100939
940 CHECK(vm_locked.vm != NULL);
J-Alvesc8e8a222021-06-08 17:33:52 +0100941
J-Alves17c9b6d2022-03-25 14:39:05 +0000942 notifications = vm_get_notifications(vm_locked, is_from_vm);
J-Alvesc8e8a222021-06-08 17:33:52 +0100943
J-Alves17c9b6d2022-03-25 14:39:05 +0000944 /*
945 * Perform info get for global notifications, before doing it for
946 * per-vCPU.
947 */
948 vm_notifications_state_info_get(&notifications->global,
949 vm_locked.vm->id, false, 0, ids,
950 ids_count, lists_sizes, lists_count,
951 ids_max_count, info_get_state);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100952
J-Alvesc8e8a222021-06-08 17:33:52 +0100953 for (ffa_vcpu_count_t i = 0; i < vm_locked.vm->vcpu_count; i++) {
J-Alves17c9b6d2022-03-25 14:39:05 +0000954 vm_notifications_state_info_get(
955 &notifications->per_vcpu[i], vm_locked.vm->id, true, i,
956 ids, ids_count, lists_sizes, lists_count, ids_max_count,
957 info_get_state);
J-Alvesc8e8a222021-06-08 17:33:52 +0100958 }
959}
960
961/**
962 * Gets all info from VM's pending notifications.
963 * Returns true if the list is full, and there is more pending.
964 */
965bool vm_notifications_info_get(struct vm_locked vm_locked, uint16_t *ids,
966 uint32_t *ids_count, uint32_t *lists_sizes,
967 uint32_t *lists_count,
968 const uint32_t ids_max_count)
969{
970 enum notifications_info_get_state current_state = INIT;
971
J-Alvesf31940e2022-03-25 17:24:00 +0000972 /* Get info of pending notifications from the framework. */
973 vm_notifications_state_info_get(&vm_locked.vm->notifications.framework,
974 vm_locked.vm->id, false, 0, ids,
975 ids_count, lists_sizes, lists_count,
976 ids_max_count, &current_state);
977
978 /* Get info of pending notifications from SPs. */
J-Alvesc8e8a222021-06-08 17:33:52 +0100979 vm_notifications_info_get_pending(vm_locked, false, ids, ids_count,
980 lists_sizes, lists_count,
981 ids_max_count, &current_state);
982
J-Alvesf31940e2022-03-25 17:24:00 +0000983 /* Get info of pending notifications from VMs. */
J-Alvesc8e8a222021-06-08 17:33:52 +0100984 vm_notifications_info_get_pending(vm_locked, true, ids, ids_count,
985 lists_sizes, lists_count,
986 ids_max_count, &current_state);
987
988 /*
989 * State transitions to FULL when trying to insert a new ID in the
990 * list and there is not more space. This means there are notifications
991 * pending, whose info is not retrieved.
992 */
993 return current_state == FULL;
994}
J-Alves439ac972021-11-18 17:32:03 +0000995
996/**
997 * Checks VM's messaging method support.
998 */
Kathleen Capellaf71dee42023-08-08 16:24:14 -0400999bool vm_supports_messaging_method(struct vm *vm, uint16_t msg_method)
J-Alves439ac972021-11-18 17:32:03 +00001000{
1001 return (vm->messaging_method & msg_method) != 0;
1002}
J-Alves6e2abc62021-12-02 14:58:56 +00001003
1004void vm_notifications_set_npi_injected(struct vm_locked vm_locked,
1005 bool npi_injected)
1006{
1007 vm_locked.vm->notifications.npi_injected = npi_injected;
1008}
1009
1010bool vm_notifications_is_npi_injected(struct vm_locked vm_locked)
1011{
1012 return vm_locked.vm->notifications.npi_injected;
1013}
J-Alves7e67d102022-04-13 13:22:39 +01001014
1015/**
1016 * Sets the designated GP register that the VM expects to receive the boot
1017 * info's address.
1018 */
1019void vm_set_boot_info_gp_reg(struct vm *vm, struct vcpu *vcpu)
1020{
Olivier Deprezb2808332023-02-02 15:25:40 +01001021 if (vm->boot_info.blob_addr.ipa != 0U) {
J-Alves7e67d102022-04-13 13:22:39 +01001022 arch_regs_set_gp_reg(&vcpu->regs,
1023 ipa_addr(vm->boot_info.blob_addr),
1024 vm->boot_info.gp_register_num);
1025 }
1026}
Madhukar Pappireddy18c6eb72023-08-21 12:16:18 -05001027
1028/**
1029 * Obtain the interrupt descriptor entry of the specified vm corresponding
1030 * to the specific interrupt id.
1031 */
Madhukar Pappireddy3221a442023-07-24 16:10:55 -05001032static struct interrupt_descriptor *vm_find_interrupt_descriptor(
Madhukar Pappireddy18c6eb72023-08-21 12:16:18 -05001033 struct vm_locked vm_locked, uint32_t id)
1034{
1035 for (uint32_t i = 0; i < HF_NUM_INTIDS; i++) {
1036 /* Interrupt descriptors are populated contiguously. */
1037 if (!vm_locked.vm->interrupt_desc[i].valid) {
1038 break;
1039 }
1040
1041 if (vm_locked.vm->interrupt_desc[i].interrupt_id == id) {
1042 /* Interrupt descriptor found. */
1043 return &vm_locked.vm->interrupt_desc[i];
1044 }
1045 }
1046
1047 return NULL;
1048}
1049
1050/**
1051 * Update the target MPIDR corresponding to the specified interrupt id
1052 * belonging to the specified vm.
1053 */
1054struct interrupt_descriptor *vm_interrupt_set_target_mpidr(
1055 struct vm_locked vm_locked, uint32_t id, uint32_t target_mpidr)
1056{
1057 struct interrupt_descriptor *int_desc;
1058
1059 int_desc = vm_find_interrupt_descriptor(vm_locked, id);
1060
1061 if (int_desc != NULL) {
1062 interrupt_desc_set_mpidr(int_desc, target_mpidr);
1063 }
1064
1065 return int_desc;
1066}
1067
1068/**
1069 * Update the security state of the specified interrupt id belonging to the
1070 * specified vm.
1071 */
1072struct interrupt_descriptor *vm_interrupt_set_sec_state(
1073 struct vm_locked vm_locked, uint32_t id, uint32_t sec_state)
1074{
1075 struct interrupt_descriptor *int_desc;
1076
1077 int_desc = vm_find_interrupt_descriptor(vm_locked, id);
1078
1079 if (int_desc != NULL) {
1080 interrupt_desc_set_sec_state(int_desc, sec_state);
1081 }
1082
1083 return int_desc;
1084}
Madhukar Pappireddy938faaf2023-07-31 17:56:55 -05001085
1086/**
1087 * Enable or disable the specified interrupt id belonging to specified vm.
1088 */
1089struct interrupt_descriptor *vm_interrupt_set_enable(struct vm_locked vm_locked,
1090 uint32_t id, bool enable)
1091{
1092 struct interrupt_descriptor *int_desc;
1093
1094 int_desc = vm_find_interrupt_descriptor(vm_locked, id);
1095
1096 if (int_desc != NULL) {
1097 interrupt_desc_set_enabled(int_desc, enable);
1098 }
1099
1100 return int_desc;
1101}