blob: 266c3bc817b31dd4ee212ad1df9ae2450f5f6a54 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/vm.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010010
Olivier Deprezd9d409f2023-03-17 11:47:57 +010011#include "hf/arch/vm.h"
12
Andrew Scull18c78fc2018-08-20 12:57:41 +010013#include "hf/api.h"
Daniel Boulbya2f8c662021-11-26 17:52:53 +000014#include "hf/assert.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010015#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010016#include "hf/cpu.h"
J-Alves4ef6e842021-03-18 12:47:01 +000017#include "hf/dlog.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010018#include "hf/ffa.h"
Andrew Scull3c257452019-11-26 13:32:50 +000019#include "hf/layout.h"
Madhukar Pappireddy0e57d3d2023-10-11 15:49:05 -050020#include "hf/plat/iommu.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010021#include "hf/std.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010022
Andrew Scull19503262018-09-20 14:48:39 +010023#include "vmapi/hf/call.h"
24
25static struct vm vms[MAX_VMS];
Olivier Deprez96a2a262020-06-11 17:21:38 +020026static struct vm other_world;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010027static ffa_vm_count_t vm_count;
Andrew Scull19503262018-09-20 14:48:39 +010028
J-Alvesfe23ebe2021-10-13 16:07:07 +010029/**
30 * Counters on the status of notifications in the system. It helps to improve
31 * the information retrieved by the receiver scheduler.
32 */
33static struct {
34 /** Counts notifications pending. */
35 uint32_t pending_count;
36 /**
37 * Counts notifications pending, that have been retrieved by the
38 * receiver scheduler.
39 */
40 uint32_t info_get_retrieved_count;
41 struct spinlock lock;
42} all_notifications_state;
43
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -080044static bool vm_init_mm(struct vm *vm, struct mpool *ppool)
45{
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -060046 return arch_vm_init_mm(vm, ppool) && arch_vm_iommu_init_mm(vm, ppool);
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -080047}
48
J-Alves19e20cf2023-08-02 12:48:55 +010049struct vm *vm_init(ffa_id_t id, ffa_vcpu_count_t vcpu_count,
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -060050 struct mpool *ppool, bool el0_partition,
51 uint8_t dma_device_count)
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010052{
Wedson Almeida Filho87009642018-07-02 10:20:07 +010053 uint32_t i;
Andrew Scull19503262018-09-20 14:48:39 +010054 struct vm *vm;
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -070055 size_t vcpu_ppool_entries = (align_up(sizeof(struct vcpu) * vcpu_count,
56 MM_PPOOL_ENTRY_SIZE) /
57 MM_PPOOL_ENTRY_SIZE);
Andrew Scull19503262018-09-20 14:48:39 +010058
Olivier Deprez96a2a262020-06-11 17:21:38 +020059 if (id == HF_OTHER_WORLD_ID) {
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080060 CHECK(el0_partition == false);
Olivier Deprez96a2a262020-06-11 17:21:38 +020061 vm = &other_world;
Andrew Walbran9daa57e2019-09-27 13:33:20 +010062 } else {
63 uint16_t vm_index = id - HF_VM_ID_OFFSET;
Andrew Scull19503262018-09-20 14:48:39 +010064
Andrew Walbran9daa57e2019-09-27 13:33:20 +010065 CHECK(id >= HF_VM_ID_OFFSET);
66 CHECK(vm_index < ARRAY_SIZE(vms));
67 vm = &vms[vm_index];
68 }
Wedson Almeida Filho87009642018-07-02 10:20:07 +010069
Andrew Scull2b5fbad2019-04-05 13:55:56 +010070 memset_s(vm, sizeof(*vm), 0, sizeof(*vm));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010071
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000072 list_init(&vm->mailbox.waiter_list);
73 list_init(&vm->mailbox.ready_list);
74 sl_init(&vm->lock);
75
Andrew Walbran9daa57e2019-09-27 13:33:20 +010076 vm->id = id;
Wedson Almeida Filho87009642018-07-02 10:20:07 +010077 vm->vcpu_count = vcpu_count;
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -070078
79 vm->vcpus = (struct vcpu *)mpool_alloc_contiguous(
80 ppool, vcpu_ppool_entries, 1);
81 CHECK(vm->vcpus != NULL);
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -070082
Andrew Sculld6ee1102019-04-05 22:12:42 +010083 vm->mailbox.state = MAILBOX_STATE_EMPTY;
Andrew Scull9726c252019-01-23 13:44:19 +000084 atomic_init(&vm->aborting, false);
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080085 vm->el0_partition = el0_partition;
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -060086 vm->dma_device_count = dma_device_count;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010087
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -080088 if (!vm_init_mm(vm, ppool)) {
Andrew Walbran9daa57e2019-09-27 13:33:20 +010089 return NULL;
Wedson Almeida Filho03306112018-11-26 00:08:03 +000090 }
91
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000092 /* Initialise waiter entries. */
93 for (i = 0; i < MAX_VMS; i++) {
Wedson Almeida Filhob790f652019-01-22 23:41:56 +000094 vm->wait_entries[i].waiting_vm = vm;
95 list_init(&vm->wait_entries[i].wait_links);
96 list_init(&vm->wait_entries[i].ready_links);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000097 }
98
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000099 /* Do basic initialization of vCPUs. */
Andrew Scull7364a8e2018-07-19 15:39:29 +0100100 for (i = 0; i < vcpu_count; i++) {
Andrew Walbrane1310df2019-04-29 17:28:28 +0100101 vcpu_init(vm_get_vcpu(vm, i), vm);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100102 }
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100103
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700104 vm_notifications_init(vm, vcpu_count, ppool);
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100105 return vm;
106}
107
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100108bool vm_init_next(ffa_vcpu_count_t vcpu_count, struct mpool *ppool,
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -0600109 struct vm **new_vm, bool el0_partition,
110 uint8_t dma_device_count)
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100111{
112 if (vm_count >= MAX_VMS) {
113 return false;
114 }
115
116 /* Generate IDs based on an offset, as low IDs e.g., 0, are reserved */
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800117 *new_vm = vm_init(vm_count + HF_VM_ID_OFFSET, vcpu_count, ppool,
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -0600118 el0_partition, dma_device_count);
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100119 if (*new_vm == NULL) {
120 return false;
121 }
Andrew Scull19503262018-09-20 14:48:39 +0100122 ++vm_count;
Andrew Scull19503262018-09-20 14:48:39 +0100123
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000124 return true;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100125}
126
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100127ffa_vm_count_t vm_get_count(void)
Andrew Scull19503262018-09-20 14:48:39 +0100128{
129 return vm_count;
130}
131
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100132/**
133 * Returns a pointer to the VM with the corresponding id.
134 */
J-Alves19e20cf2023-08-02 12:48:55 +0100135struct vm *vm_find(ffa_id_t id)
Andrew Scull19503262018-09-20 14:48:39 +0100136{
David Brazdilbc501192019-09-27 13:20:56 +0100137 uint16_t index;
Fuad Tabba494376e2019-08-05 12:35:10 +0100138
Olivier Deprez96a2a262020-06-11 17:21:38 +0200139 if (id == HF_OTHER_WORLD_ID) {
140 if (other_world.id == HF_OTHER_WORLD_ID) {
141 return &other_world;
142 }
Andrew Scull19503262018-09-20 14:48:39 +0100143 return NULL;
144 }
145
Olivier Deprez96a2a262020-06-11 17:21:38 +0200146 /* Check that this is not a reserved ID. */
147 if (id < HF_VM_ID_OFFSET) {
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100148 return NULL;
149 }
150
David Brazdilbc501192019-09-27 13:20:56 +0100151 index = id - HF_VM_ID_OFFSET;
152
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100153 return vm_find_index(index);
154}
155
156/**
J-Alves46ee0682021-07-26 15:17:53 +0100157 * Returns a locked instance of the VM with the corresponding id.
158 */
J-Alves19e20cf2023-08-02 12:48:55 +0100159struct vm_locked vm_find_locked(ffa_id_t id)
J-Alves46ee0682021-07-26 15:17:53 +0100160{
161 struct vm *vm = vm_find(id);
162
163 if (vm != NULL) {
164 return vm_lock(vm);
165 }
166
167 return (struct vm_locked){.vm = NULL};
168}
169
170/**
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100171 * Returns a pointer to the VM at the specified index.
172 */
173struct vm *vm_find_index(uint16_t index)
174{
David Brazdilbc501192019-09-27 13:20:56 +0100175 /* Ensure the VM is initialized. */
176 if (index >= vm_count) {
177 return NULL;
178 }
179
180 return &vms[index];
Andrew Scull19503262018-09-20 14:48:39 +0100181}
182
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000183/**
Fuad Tabbaed294af2019-12-20 10:43:01 +0000184 * Locks the given VM and updates `locked` to hold the newly locked VM.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000185 */
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100186struct vm_locked vm_lock(struct vm *vm)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000187{
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100188 struct vm_locked locked = {
189 .vm = vm,
190 };
191
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000192 sl_lock(&vm->lock);
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100193
194 return locked;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000195}
196
197/**
Jose Marinho75509b42019-04-09 09:34:59 +0100198 * Locks two VMs ensuring that the locking order is according to the locks'
199 * addresses.
200 */
201struct two_vm_locked vm_lock_both(struct vm *vm1, struct vm *vm2)
202{
203 struct two_vm_locked dual_lock;
204
205 sl_lock_both(&vm1->lock, &vm2->lock);
206 dual_lock.vm1.vm = vm1;
207 dual_lock.vm2.vm = vm2;
208
209 return dual_lock;
210}
211
212/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000213 * Unlocks a VM previously locked with vm_lock, and updates `locked` to reflect
214 * the fact that the VM is no longer locked.
215 */
216void vm_unlock(struct vm_locked *locked)
217{
218 sl_unlock(&locked->vm->lock);
219 locked->vm = NULL;
220}
Andrew Walbrane1310df2019-04-29 17:28:28 +0100221
222/**
223 * Get the vCPU with the given index from the given VM.
224 * This assumes the index is valid, i.e. less than vm->vcpu_count.
225 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100226struct vcpu *vm_get_vcpu(struct vm *vm, ffa_vcpu_index_t vcpu_index)
Andrew Walbrane1310df2019-04-29 17:28:28 +0100227{
Andrew Scull877ae4b2019-07-02 12:52:33 +0100228 CHECK(vcpu_index < vm->vcpu_count);
Andrew Walbrane1310df2019-04-29 17:28:28 +0100229 return &vm->vcpus[vcpu_index];
230}
Andrew Scull3c257452019-11-26 13:32:50 +0000231
232/**
Andrew Walbranaad8f982019-12-04 10:56:39 +0000233 * Gets `vm`'s wait entry for waiting on the `for_vm`.
234 */
J-Alves19e20cf2023-08-02 12:48:55 +0100235struct wait_entry *vm_get_wait_entry(struct vm *vm, ffa_id_t for_vm)
Andrew Walbranaad8f982019-12-04 10:56:39 +0000236{
237 uint16_t index;
238
239 CHECK(for_vm >= HF_VM_ID_OFFSET);
240 index = for_vm - HF_VM_ID_OFFSET;
241 CHECK(index < MAX_VMS);
242
243 return &vm->wait_entries[index];
244}
245
246/**
J-Alves122f1a12022-12-12 15:55:42 +0000247 * Checks whether the given `to` VM's mailbox is currently busy.
248 */
249bool vm_is_mailbox_busy(struct vm_locked to)
250{
251 return to.vm->mailbox.state != MAILBOX_STATE_EMPTY ||
252 to.vm->mailbox.recv == NULL;
253}
254
255/**
J-Alvese8c8c2b2022-12-16 15:34:48 +0000256 * Checks if mailbox is currently owned by the other world.
257 */
258bool vm_is_mailbox_other_world_owned(struct vm_locked to)
259{
260 return to.vm->mailbox.state == MAILBOX_STATE_OTHER_WORLD_OWNED;
261}
262
263/**
Andrew Walbranaad8f982019-12-04 10:56:39 +0000264 * Gets the ID of the VM which the given VM's wait entry is for.
265 */
J-Alves19e20cf2023-08-02 12:48:55 +0100266ffa_id_t vm_id_for_wait_entry(struct vm *vm, struct wait_entry *entry)
Andrew Walbranaad8f982019-12-04 10:56:39 +0000267{
268 uint16_t index = entry - vm->wait_entries;
269
270 return index + HF_VM_ID_OFFSET;
271}
272
273/**
Andrew Walbran45633dd2020-10-07 17:59:54 +0100274 * Return whether the given VM ID represents an entity in the current world:
275 * i.e. the hypervisor or a normal world VM when running in the normal world, or
276 * the SPM or an SP when running in the secure world.
277 */
J-Alves19e20cf2023-08-02 12:48:55 +0100278bool vm_id_is_current_world(ffa_id_t vm_id)
Andrew Walbran45633dd2020-10-07 17:59:54 +0100279{
280 return (vm_id & HF_VM_ID_WORLD_MASK) !=
281 (HF_OTHER_WORLD_ID & HF_VM_ID_WORLD_MASK);
282}
283
284/**
Andrew Scull3c257452019-11-26 13:32:50 +0000285 * Map a range of addresses to the VM in both the MMU and the IOMMU.
286 *
287 * mm_vm_defrag should always be called after a series of page table updates,
288 * whether they succeed or fail. This is because on failure extra page table
289 * entries may have been allocated and then not used, while on success it may be
290 * possible to compact the page table by merging several entries into a block.
291 *
292 * Returns true on success, or false if the update failed and no changes were
293 * made.
294 *
295 */
296bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
297 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
298{
299 if (!vm_identity_prepare(vm_locked, begin, end, mode, ppool)) {
300 return false;
301 }
302
303 vm_identity_commit(vm_locked, begin, end, mode, ppool, ipa);
304
305 return true;
306}
307
308/**
309 * Prepares the given VM for the given address mapping such that it will be able
310 * to commit the change without failure.
311 *
312 * In particular, multiple calls to this function will result in the
313 * corresponding calls to commit the changes to succeed.
314 *
315 * Returns true on success, or false if the update failed and no changes were
316 * made.
317 */
318bool vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
319 uint32_t mode, struct mpool *ppool)
320{
Olivier Deprezd9d409f2023-03-17 11:47:57 +0100321 return arch_vm_identity_prepare(vm_locked, begin, end, mode, ppool);
Andrew Scull3c257452019-11-26 13:32:50 +0000322}
323
324/**
325 * Commits the given address mapping to the VM assuming the operation cannot
326 * fail. `vm_identity_prepare` must used correctly before this to ensure
327 * this condition.
328 */
329void vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
330 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
331{
Olivier Deprezd9d409f2023-03-17 11:47:57 +0100332 arch_vm_identity_commit(vm_locked, begin, end, mode, ppool, ipa);
Andrew Scull3c257452019-11-26 13:32:50 +0000333}
334
335/**
336 * Unmap a range of addresses from the VM.
337 *
338 * Returns true on success, or false if the update failed and no changes were
339 * made.
340 */
341bool vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
342 struct mpool *ppool)
343{
Olivier Deprezd9d409f2023-03-17 11:47:57 +0100344 return arch_vm_unmap(vm_locked, begin, end, ppool);
Andrew Scull3c257452019-11-26 13:32:50 +0000345}
346
347/**
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700348 * Defrag page tables for an EL0 partition or for a VM.
349 */
350void vm_ptable_defrag(struct vm_locked vm_locked, struct mpool *ppool)
351{
Olivier Deprezd9d409f2023-03-17 11:47:57 +0100352 arch_vm_ptable_defrag(vm_locked, ppool);
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700353}
354
355/**
Andrew Scull3c257452019-11-26 13:32:50 +0000356 * Unmaps the hypervisor pages from the given page table.
357 */
358bool vm_unmap_hypervisor(struct vm_locked vm_locked, struct mpool *ppool)
359{
360 /* TODO: If we add pages dynamically, they must be included here too. */
361 return vm_unmap(vm_locked, layout_text_begin(), layout_text_end(),
362 ppool) &&
363 vm_unmap(vm_locked, layout_rodata_begin(), layout_rodata_end(),
364 ppool) &&
365 vm_unmap(vm_locked, layout_data_begin(), layout_data_end(),
Maksims Svecovs134b8f92022-03-04 15:14:09 +0000366 ppool) &&
367 vm_unmap(vm_locked, layout_stacks_begin(), layout_stacks_end(),
Andrew Scull3c257452019-11-26 13:32:50 +0000368 ppool);
369}
J-Alvesb37fd082020-10-22 12:29:21 +0100370
371/**
Raghu Krishnamurthyea195fa2021-02-12 23:29:00 -0800372 * Gets the mode of the given range of ipa or va if they are mapped with the
373 * same mode.
374 *
375 * Returns true if the range is mapped with the same mode and false otherwise.
376 * The wrapper calls the appropriate mm function depending on if the partition
377 * is a vm or a el0 partition.
378 */
379bool vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin, ipaddr_t end,
380 uint32_t *mode)
381{
Olivier Deprezd9d409f2023-03-17 11:47:57 +0100382 return arch_vm_mem_get_mode(vm_locked, begin, end, mode);
Raghu Krishnamurthyea195fa2021-02-12 23:29:00 -0800383}
J-Alvesa0f317d2021-06-09 13:31:59 +0100384
Madhukar Pappireddy0e57d3d2023-10-11 15:49:05 -0500385bool vm_iommu_mm_identity_map(struct vm_locked vm_locked, paddr_t begin,
386 paddr_t end, uint32_t mode, struct mpool *ppool,
387 ipaddr_t *ipa, uint8_t dma_device_id)
388{
389 return arch_vm_iommu_mm_identity_map(vm_locked, begin, end, mode, ppool,
390 ipa, dma_device_id);
391}
392
J-Alves66652252022-07-06 09:49:51 +0100393bool vm_mailbox_state_busy(struct vm_locked vm_locked)
394{
395 return vm_locked.vm->mailbox.state != MAILBOX_STATE_EMPTY ||
396 vm_locked.vm->mailbox.recv == NULL;
397}
398
J-Alves7461ef22021-10-18 17:21:33 +0100399static struct notifications *vm_get_notifications(struct vm_locked vm_locked,
400 bool is_from_vm)
401{
402 return is_from_vm ? &vm_locked.vm->notifications.from_vm
403 : &vm_locked.vm->notifications.from_sp;
404}
405
J-Alvesa0f317d2021-06-09 13:31:59 +0100406/*
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700407 * Dynamically allocate per_vcpu_notifications structure for a given VM.
408 */
409static void vm_notifications_init_per_vcpu_notifications(
410 struct vm *vm, ffa_vcpu_count_t vcpu_count, struct mpool *ppool)
411{
412 size_t notif_ppool_entries =
413 (align_up(sizeof(struct notifications_state) * vcpu_count,
414 MM_PPOOL_ENTRY_SIZE) /
415 MM_PPOOL_ENTRY_SIZE);
416
417 /*
418 * Allow for function to be called on already initialized VMs but those
419 * that require notification structure to be cleared.
420 */
421 if (vm->notifications.from_sp.per_vcpu == NULL) {
422 assert(vm->notifications.from_vm.per_vcpu == NULL);
423 assert(vcpu_count != 0);
424 CHECK(ppool != NULL);
425 vm->notifications.from_sp.per_vcpu =
426 (struct notifications_state *)mpool_alloc_contiguous(
427 ppool, notif_ppool_entries, 1);
428 CHECK(vm->notifications.from_sp.per_vcpu != NULL);
429
430 vm->notifications.from_vm.per_vcpu =
431 (struct notifications_state *)mpool_alloc_contiguous(
432 ppool, notif_ppool_entries, 1);
433 CHECK(vm->notifications.from_vm.per_vcpu != NULL);
434 } else {
435 assert(vm->notifications.from_vm.per_vcpu != NULL);
436 }
437
438 memset_s(vm->notifications.from_sp.per_vcpu,
439 sizeof(*(vm->notifications.from_sp.per_vcpu)) * vcpu_count, 0,
440 sizeof(*(vm->notifications.from_sp.per_vcpu)) * vcpu_count);
441 memset_s(vm->notifications.from_vm.per_vcpu,
442 sizeof(*(vm->notifications.from_vm.per_vcpu)) * vcpu_count, 0,
443 sizeof(*(vm->notifications.from_vm.per_vcpu)) * vcpu_count);
444}
445
446/*
J-Alvesa0f317d2021-06-09 13:31:59 +0100447 * Initializes the notifications structure.
448 */
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700449static void vm_notifications_init_bindings(struct notifications *notifications)
J-Alvesa0f317d2021-06-09 13:31:59 +0100450{
451 for (uint32_t i = 0U; i < MAX_FFA_NOTIFICATIONS; i++) {
452 notifications->bindings_sender_id[i] = HF_INVALID_VM_ID;
453 }
454}
455
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700456/*
457 * Initialize notification related structures for a VM.
458 */
459void vm_notifications_init(struct vm *vm, ffa_vcpu_count_t vcpu_count,
460 struct mpool *ppool)
461{
462 vm_notifications_init_per_vcpu_notifications(vm, vcpu_count, ppool);
463
464 /* Basic initialization of the notifications structure. */
465 vm_notifications_init_bindings(&vm->notifications.from_sp);
466 vm_notifications_init_bindings(&vm->notifications.from_vm);
467}
468
J-Alvesa0f317d2021-06-09 13:31:59 +0100469/**
470 * Checks if there are pending notifications.
471 */
472bool vm_are_notifications_pending(struct vm_locked vm_locked, bool from_vm,
473 ffa_notifications_bitmap_t notifications)
474{
475 struct notifications *to_check;
476
477 CHECK(vm_locked.vm != NULL);
478
J-Alves7461ef22021-10-18 17:21:33 +0100479 to_check = vm_get_notifications(vm_locked, from_vm);
J-Alvesa0f317d2021-06-09 13:31:59 +0100480
481 /* Check if there are pending per vcpu notifications */
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700482 for (uint32_t i = 0U; i < vm_locked.vm->vcpu_count; i++) {
J-Alvesa0f317d2021-06-09 13:31:59 +0100483 if ((to_check->per_vcpu[i].pending & notifications) != 0U) {
484 return true;
485 }
486 }
487
488 /* Check if there are global pending notifications */
489 return (to_check->global.pending & notifications) != 0U;
490}
J-Alvesc003a7a2021-03-18 13:06:53 +0000491
J-Alves7461ef22021-10-18 17:21:33 +0100492/**
493 * Checks if there are pending global notifications, either from SPs or from
494 * VMs.
495 */
496bool vm_are_global_notifications_pending(struct vm_locked vm_locked)
497{
498 return vm_get_notifications(vm_locked, true)->global.pending != 0ULL ||
J-Alves52578f82022-03-25 12:30:47 +0000499 vm_get_notifications(vm_locked, false)->global.pending != 0ULL ||
J-Alvese8c8c2b2022-12-16 15:34:48 +0000500 vm_are_fwk_notifications_pending(vm_locked);
501}
502
503/**
504 * Currently only RX full notification is supported as framework notification.
505 * Returns true if there is one pending, either from Hypervisor or SPMC.
506 */
507bool vm_are_fwk_notifications_pending(struct vm_locked vm_locked)
508{
509 return vm_locked.vm->notifications.framework.pending != 0ULL;
J-Alves7461ef22021-10-18 17:21:33 +0100510}
511
512/**
513 * Checks if there are pending per-vCPU notifications, in a specific vCPU either
514 * from SPs or from VMs.
515 */
516bool vm_are_per_vcpu_notifications_pending(struct vm_locked vm_locked,
517 ffa_vcpu_index_t vcpu_id)
518{
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700519 CHECK(vcpu_id < vm_locked.vm->vcpu_count);
J-Alves7461ef22021-10-18 17:21:33 +0100520
521 return vm_get_notifications(vm_locked, true)
522 ->per_vcpu[vcpu_id]
523 .pending != 0ULL ||
524 vm_get_notifications(vm_locked, false)
525 ->per_vcpu[vcpu_id]
526 .pending != 0ULL;
527}
528
J-Alves09ff9d82021-11-02 11:55:20 +0000529bool vm_are_notifications_enabled(struct vm *vm)
J-Alvesc003a7a2021-03-18 13:06:53 +0000530{
J-Alves09ff9d82021-11-02 11:55:20 +0000531 return vm->notifications.enabled == true;
532}
533
534bool vm_locked_are_notifications_enabled(struct vm_locked vm_locked)
535{
536 return vm_are_notifications_enabled(vm_locked.vm);
J-Alvesc003a7a2021-03-18 13:06:53 +0000537}
538
539static bool vm_is_notification_bit_set(ffa_notifications_bitmap_t notifications,
540 uint32_t i)
541{
542 return (notifications & FFA_NOTIFICATION_MASK(i)) != 0U;
543}
544
J-Alvesfe23ebe2021-10-13 16:07:07 +0100545static void vm_notifications_global_state_count_update(
546 ffa_notifications_bitmap_t bitmap, uint32_t *counter, int inc)
547{
548 /*
549 * Helper to increment counters from global notifications
550 * state. Count update by increments or decrements of 1 or -1,
551 * respectively.
552 */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000553 assert(inc == 1 || inc == -1);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100554
555 sl_lock(&all_notifications_state.lock);
556
557 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
558 if (vm_is_notification_bit_set(bitmap, i)) {
559 CHECK((inc > 0 && *counter < UINT32_MAX) ||
560 (inc < 0 && *counter > 0));
561 *counter += inc;
562 }
563 }
564
565 sl_unlock(&all_notifications_state.lock);
566}
567
568/**
569 * Helper function to increment the pending notifications based on a bitmap
570 * passed as argument.
571 * Function to be used at setting notifications for a given VM.
572 */
573static void vm_notifications_pending_count_add(
574 ffa_notifications_bitmap_t to_add)
575{
576 vm_notifications_global_state_count_update(
577 to_add, &all_notifications_state.pending_count, 1);
578}
579
580/**
581 * Helper function to decrement the pending notifications count.
582 * Function to be used when getting the receiver's pending notifications.
583 */
584static void vm_notifications_pending_count_sub(
585 ffa_notifications_bitmap_t to_sub)
586{
587 vm_notifications_global_state_count_update(
588 to_sub, &all_notifications_state.pending_count, -1);
589}
590
591/**
592 * Helper function to count the notifications whose information has been
593 * retrieved by the scheduler of the system, and are still pending.
594 */
595static void vm_notifications_info_get_retrieved_count_add(
596 ffa_notifications_bitmap_t to_add)
597{
598 vm_notifications_global_state_count_update(
599 to_add, &all_notifications_state.info_get_retrieved_count, 1);
600}
601
602/**
603 * Helper function to subtract the notifications that the receiver is getting
604 * and whose information has been retrieved by the receiver scheduler.
605 */
606static void vm_notifications_info_get_retrieved_count_sub(
607 ffa_notifications_bitmap_t to_sub)
608{
609 vm_notifications_global_state_count_update(
610 to_sub, &all_notifications_state.info_get_retrieved_count, -1);
611}
612
613/**
614 * Helper function to determine if there are notifications pending whose info
615 * hasn't been retrieved by the receiver scheduler.
616 */
617bool vm_notifications_pending_not_retrieved_by_scheduler(void)
618{
619 bool ret;
620
621 sl_lock(&all_notifications_state.lock);
622 ret = all_notifications_state.pending_count >
623 all_notifications_state.info_get_retrieved_count;
624 sl_unlock(&all_notifications_state.lock);
625
626 return ret;
627}
628
629bool vm_is_notifications_pending_count_zero(void)
630{
631 bool ret;
632
633 sl_lock(&all_notifications_state.lock);
634 ret = all_notifications_state.pending_count == 0;
635 sl_unlock(&all_notifications_state.lock);
636
637 return ret;
638}
639
J-Alvesc003a7a2021-03-18 13:06:53 +0000640/**
641 * Checks that all provided notifications are bound to the specified sender, and
642 * are per VCPU or global, as specified.
643 */
644bool vm_notifications_validate_binding(struct vm_locked vm_locked,
J-Alves19e20cf2023-08-02 12:48:55 +0100645 bool is_from_vm, ffa_id_t sender_id,
J-Alvesc003a7a2021-03-18 13:06:53 +0000646 ffa_notifications_bitmap_t notifications,
647 bool is_per_vcpu)
648{
649 return vm_notifications_validate_bound_sender(
650 vm_locked, is_from_vm, sender_id, notifications) &&
651 vm_notifications_validate_per_vcpu(vm_locked, is_from_vm,
652 is_per_vcpu, notifications);
653}
654
655/**
656 * Update binds information in notification structure for the specified
657 * notifications.
658 */
659void vm_notifications_update_bindings(struct vm_locked vm_locked,
J-Alves19e20cf2023-08-02 12:48:55 +0100660 bool is_from_vm, ffa_id_t sender_id,
J-Alvesc003a7a2021-03-18 13:06:53 +0000661 ffa_notifications_bitmap_t notifications,
662 bool is_per_vcpu)
663{
664 CHECK(vm_locked.vm != NULL);
665 struct notifications *to_update =
666 vm_get_notifications(vm_locked, is_from_vm);
667
668 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
669 if (vm_is_notification_bit_set(notifications, i)) {
670 to_update->bindings_sender_id[i] = sender_id;
671 }
672 }
673
674 /*
675 * Set notifications if they are per VCPU, else clear them as they are
676 * global.
677 */
678 if (is_per_vcpu) {
679 to_update->bindings_per_vcpu |= notifications;
680 } else {
681 to_update->bindings_per_vcpu &= ~notifications;
682 }
683}
684
685bool vm_notifications_validate_bound_sender(
J-Alves19e20cf2023-08-02 12:48:55 +0100686 struct vm_locked vm_locked, bool is_from_vm, ffa_id_t sender_id,
J-Alvesc003a7a2021-03-18 13:06:53 +0000687 ffa_notifications_bitmap_t notifications)
688{
689 CHECK(vm_locked.vm != NULL);
690 struct notifications *to_check =
691 vm_get_notifications(vm_locked, is_from_vm);
692
693 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
694 if (vm_is_notification_bit_set(notifications, i) &&
695 to_check->bindings_sender_id[i] != sender_id) {
696 return false;
697 }
698 }
699
700 return true;
701}
702
703bool vm_notifications_validate_per_vcpu(struct vm_locked vm_locked,
704 bool is_from_vm, bool is_per_vcpu,
705 ffa_notifications_bitmap_t notif)
706{
707 CHECK(vm_locked.vm != NULL);
708 struct notifications *to_check =
709 vm_get_notifications(vm_locked, is_from_vm);
710
711 return is_per_vcpu ? (~to_check->bindings_per_vcpu & notif) == 0U
712 : (to_check->bindings_per_vcpu & notif) == 0U;
713}
J-Alvesaa79c012021-07-09 14:29:45 +0100714
J-Alves14163a72022-03-25 14:01:34 +0000715static void vm_notifications_state_set(struct notifications_state *state,
716 ffa_notifications_bitmap_t notifications)
717{
718 state->pending |= notifications;
719 vm_notifications_pending_count_add(notifications);
720}
721
J-Alves5a16c962022-03-25 12:32:51 +0000722void vm_notifications_partition_set_pending(
723 struct vm_locked vm_locked, bool is_from_vm,
724 ffa_notifications_bitmap_t notifications, ffa_vcpu_index_t vcpu_id,
725 bool is_per_vcpu)
J-Alvesaa79c012021-07-09 14:29:45 +0100726{
J-Alves14163a72022-03-25 14:01:34 +0000727 struct notifications *to_set;
728 struct notifications_state *state;
729
J-Alvesaa79c012021-07-09 14:29:45 +0100730 CHECK(vm_locked.vm != NULL);
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700731 CHECK(vcpu_id < vm_locked.vm->vcpu_count);
J-Alvesaa79c012021-07-09 14:29:45 +0100732
J-Alves14163a72022-03-25 14:01:34 +0000733 to_set = vm_get_notifications(vm_locked, is_from_vm);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100734
J-Alves14163a72022-03-25 14:01:34 +0000735 state = is_per_vcpu ? &to_set->per_vcpu[vcpu_id] : &to_set->global;
736
737 vm_notifications_state_set(state, notifications);
738}
739
740/**
741 * Set pending framework notifications.
742 */
743void vm_notifications_framework_set_pending(
744 struct vm_locked vm_locked, ffa_notifications_bitmap_t notifications)
745{
746 CHECK(vm_locked.vm != NULL);
Federico Recanatie73d2832022-04-20 11:10:52 +0200747 assert(is_ffa_spm_buffer_full_notification(notifications) ||
748 is_ffa_hyp_buffer_full_notification(notifications));
J-Alves14163a72022-03-25 14:01:34 +0000749 vm_notifications_state_set(&vm_locked.vm->notifications.framework,
750 notifications);
J-Alvesaa79c012021-07-09 14:29:45 +0100751}
752
J-Alves5136dda2022-03-25 12:26:38 +0000753static ffa_notifications_bitmap_t vm_notifications_state_get_pending(
754 struct notifications_state *state)
J-Alvesaa79c012021-07-09 14:29:45 +0100755{
J-Alves5136dda2022-03-25 12:26:38 +0000756 ffa_notifications_bitmap_t to_ret;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100757 ffa_notifications_bitmap_t pending_and_info_get_retrieved;
J-Alvesaa79c012021-07-09 14:29:45 +0100758
J-Alves5136dda2022-03-25 12:26:38 +0000759 assert(state != NULL);
J-Alvesaa79c012021-07-09 14:29:45 +0100760
J-Alves5136dda2022-03-25 12:26:38 +0000761 to_ret = state->pending;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100762
763 /* Update count of currently pending notifications in the system. */
J-Alves5136dda2022-03-25 12:26:38 +0000764 vm_notifications_pending_count_sub(state->pending);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100765
766 /*
767 * If notifications receiver is getting have been retrieved by the
768 * receiver scheduler, decrement those from respective count.
769 */
770 pending_and_info_get_retrieved =
J-Alves5136dda2022-03-25 12:26:38 +0000771 state->pending & state->info_get_retrieved;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100772
773 if (pending_and_info_get_retrieved != 0) {
774 vm_notifications_info_get_retrieved_count_sub(
775 pending_and_info_get_retrieved);
776 }
777
J-Alves5136dda2022-03-25 12:26:38 +0000778 state->pending = 0U;
779 state->info_get_retrieved = 0U;
J-Alvesaa79c012021-07-09 14:29:45 +0100780
J-Alves5136dda2022-03-25 12:26:38 +0000781 return to_ret;
782}
J-Alvesfe23ebe2021-10-13 16:07:07 +0100783
J-Alves5136dda2022-03-25 12:26:38 +0000784/**
785 * Get global and per-vCPU notifications for the given vCPU ID.
786 */
787ffa_notifications_bitmap_t vm_notifications_partition_get_pending(
788 struct vm_locked vm_locked, bool is_from_vm, ffa_vcpu_index_t vcpu_id)
789{
790 ffa_notifications_bitmap_t to_ret;
791 struct notifications *to_get;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100792
J-Alves5136dda2022-03-25 12:26:38 +0000793 assert(vm_locked.vm != NULL);
794 to_get = vm_get_notifications(vm_locked, is_from_vm);
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700795 assert(vcpu_id < vm_locked.vm->vcpu_count);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100796
J-Alves5136dda2022-03-25 12:26:38 +0000797 to_ret = vm_notifications_state_get_pending(&to_get->global);
798 to_ret |=
799 vm_notifications_state_get_pending(&to_get->per_vcpu[vcpu_id]);
J-Alvesaa79c012021-07-09 14:29:45 +0100800
801 return to_ret;
802}
J-Alvesc8e8a222021-06-08 17:33:52 +0100803
804/**
J-Alves663682a2022-03-25 13:56:51 +0000805 * Get pending framework notifications.
806 */
807ffa_notifications_bitmap_t vm_notifications_framework_get_pending(
808 struct vm_locked vm_locked)
809{
Federico Recanati6c1e05c2022-04-20 11:37:26 +0200810 struct vm *vm = vm_locked.vm;
811 ffa_notifications_bitmap_t framework;
Federico Recanati6c1e05c2022-04-20 11:37:26 +0200812
813 assert(vm != NULL);
814
815 framework = vm_notifications_state_get_pending(
816 &vm->notifications.framework);
817
Federico Recanati6c1e05c2022-04-20 11:37:26 +0200818 return framework;
J-Alves663682a2022-03-25 13:56:51 +0000819}
820
J-Alves17c9b6d2022-03-25 14:39:05 +0000821static void vm_notifications_state_info_get(
J-Alves19e20cf2023-08-02 12:48:55 +0100822 struct notifications_state *state, ffa_id_t vm_id, bool is_per_vcpu,
J-Alves17c9b6d2022-03-25 14:39:05 +0000823 ffa_vcpu_index_t vcpu_id, uint16_t *ids, uint32_t *ids_count,
824 uint32_t *lists_sizes, uint32_t *lists_count,
825 const uint32_t ids_max_count,
826 enum notifications_info_get_state *info_get_state)
827{
828 ffa_notifications_bitmap_t pending_not_retrieved;
829
830 CHECK(*ids_count <= ids_max_count);
831 CHECK(*lists_count <= ids_max_count);
832
833 if (*info_get_state == FULL) {
834 return;
835 }
836
837 pending_not_retrieved = state->pending & ~state->info_get_retrieved;
838
839 /* No notifications pending that haven't been retrieved. */
840 if (pending_not_retrieved == 0U) {
841 return;
842 }
843
844 if (*ids_count == ids_max_count) {
845 *info_get_state = FULL;
846 return;
847 }
848
849 switch (*info_get_state) {
850 case INIT:
851 case STARTING_NEW:
852 /*
853 * At this iteration two ids are to be added: the VM ID
854 * and vCPU ID. If there is no space, change state and
855 * terminate function.
856 */
857 if (is_per_vcpu && ids_max_count - *ids_count < 2) {
858 *info_get_state = FULL;
859 return;
860 }
861
862 *info_get_state = INSERTING;
863 ids[*ids_count] = vm_id;
864 ++(*ids_count);
865
866 if (is_per_vcpu) {
867 /* Insert vCPU ID. */
868 ids[*ids_count] = vcpu_id;
869 ++(*ids_count);
870 ++lists_sizes[*lists_count];
871 }
872
873 ++(*lists_count);
874 break;
875 case INSERTING:
876 /* For per-vCPU notifications only. */
877 if (!is_per_vcpu) {
878 break;
879 }
880
881 /* Insert vCPU ID */
882 ids[*ids_count] = vcpu_id;
883 (*ids_count)++;
884 /* Increment respective list size */
885 ++lists_sizes[*lists_count - 1];
886
887 if (lists_sizes[*lists_count - 1] == 3) {
888 *info_get_state = STARTING_NEW;
889 }
890 break;
891 default:
892 panic("Notification info get action error!!\n");
893 }
894
895 state->info_get_retrieved |= pending_not_retrieved;
896
897 vm_notifications_info_get_retrieved_count_add(pending_not_retrieved);
898}
899
J-Alves663682a2022-03-25 13:56:51 +0000900/**
J-Alvesc8e8a222021-06-08 17:33:52 +0100901 * Get pending notification's information to return to the receiver scheduler.
902 */
903void vm_notifications_info_get_pending(
904 struct vm_locked vm_locked, bool is_from_vm, uint16_t *ids,
905 uint32_t *ids_count, uint32_t *lists_sizes, uint32_t *lists_count,
906 const uint32_t ids_max_count,
907 enum notifications_info_get_state *info_get_state)
908{
J-Alves17c9b6d2022-03-25 14:39:05 +0000909 struct notifications *notifications;
J-Alvesc8e8a222021-06-08 17:33:52 +0100910
911 CHECK(vm_locked.vm != NULL);
J-Alvesc8e8a222021-06-08 17:33:52 +0100912
J-Alves17c9b6d2022-03-25 14:39:05 +0000913 notifications = vm_get_notifications(vm_locked, is_from_vm);
J-Alvesc8e8a222021-06-08 17:33:52 +0100914
J-Alves17c9b6d2022-03-25 14:39:05 +0000915 /*
916 * Perform info get for global notifications, before doing it for
917 * per-vCPU.
918 */
919 vm_notifications_state_info_get(&notifications->global,
920 vm_locked.vm->id, false, 0, ids,
921 ids_count, lists_sizes, lists_count,
922 ids_max_count, info_get_state);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100923
J-Alvesc8e8a222021-06-08 17:33:52 +0100924 for (ffa_vcpu_count_t i = 0; i < vm_locked.vm->vcpu_count; i++) {
J-Alves17c9b6d2022-03-25 14:39:05 +0000925 vm_notifications_state_info_get(
926 &notifications->per_vcpu[i], vm_locked.vm->id, true, i,
927 ids, ids_count, lists_sizes, lists_count, ids_max_count,
928 info_get_state);
J-Alvesc8e8a222021-06-08 17:33:52 +0100929 }
930}
931
932/**
933 * Gets all info from VM's pending notifications.
934 * Returns true if the list is full, and there is more pending.
935 */
936bool vm_notifications_info_get(struct vm_locked vm_locked, uint16_t *ids,
937 uint32_t *ids_count, uint32_t *lists_sizes,
938 uint32_t *lists_count,
939 const uint32_t ids_max_count)
940{
941 enum notifications_info_get_state current_state = INIT;
942
J-Alvesf31940e2022-03-25 17:24:00 +0000943 /* Get info of pending notifications from the framework. */
944 vm_notifications_state_info_get(&vm_locked.vm->notifications.framework,
945 vm_locked.vm->id, false, 0, ids,
946 ids_count, lists_sizes, lists_count,
947 ids_max_count, &current_state);
948
949 /* Get info of pending notifications from SPs. */
J-Alvesc8e8a222021-06-08 17:33:52 +0100950 vm_notifications_info_get_pending(vm_locked, false, ids, ids_count,
951 lists_sizes, lists_count,
952 ids_max_count, &current_state);
953
J-Alvesf31940e2022-03-25 17:24:00 +0000954 /* Get info of pending notifications from VMs. */
J-Alvesc8e8a222021-06-08 17:33:52 +0100955 vm_notifications_info_get_pending(vm_locked, true, ids, ids_count,
956 lists_sizes, lists_count,
957 ids_max_count, &current_state);
958
959 /*
960 * State transitions to FULL when trying to insert a new ID in the
961 * list and there is not more space. This means there are notifications
962 * pending, whose info is not retrieved.
963 */
964 return current_state == FULL;
965}
J-Alves439ac972021-11-18 17:32:03 +0000966
967/**
968 * Checks VM's messaging method support.
969 */
Kathleen Capellaf71dee42023-08-08 16:24:14 -0400970bool vm_supports_messaging_method(struct vm *vm, uint16_t msg_method)
J-Alves439ac972021-11-18 17:32:03 +0000971{
972 return (vm->messaging_method & msg_method) != 0;
973}
J-Alves6e2abc62021-12-02 14:58:56 +0000974
975void vm_notifications_set_npi_injected(struct vm_locked vm_locked,
976 bool npi_injected)
977{
978 vm_locked.vm->notifications.npi_injected = npi_injected;
979}
980
981bool vm_notifications_is_npi_injected(struct vm_locked vm_locked)
982{
983 return vm_locked.vm->notifications.npi_injected;
984}
J-Alves7e67d102022-04-13 13:22:39 +0100985
986/**
987 * Sets the designated GP register that the VM expects to receive the boot
988 * info's address.
989 */
990void vm_set_boot_info_gp_reg(struct vm *vm, struct vcpu *vcpu)
991{
Olivier Deprezb2808332023-02-02 15:25:40 +0100992 if (vm->boot_info.blob_addr.ipa != 0U) {
J-Alves7e67d102022-04-13 13:22:39 +0100993 arch_regs_set_gp_reg(&vcpu->regs,
994 ipa_addr(vm->boot_info.blob_addr),
995 vm->boot_info.gp_register_num);
996 }
997}
Madhukar Pappireddy18c6eb72023-08-21 12:16:18 -0500998
999/**
1000 * Obtain the interrupt descriptor entry of the specified vm corresponding
1001 * to the specific interrupt id.
1002 */
Madhukar Pappireddy3221a442023-07-24 16:10:55 -05001003static struct interrupt_descriptor *vm_find_interrupt_descriptor(
Madhukar Pappireddy18c6eb72023-08-21 12:16:18 -05001004 struct vm_locked vm_locked, uint32_t id)
1005{
1006 for (uint32_t i = 0; i < HF_NUM_INTIDS; i++) {
1007 /* Interrupt descriptors are populated contiguously. */
1008 if (!vm_locked.vm->interrupt_desc[i].valid) {
1009 break;
1010 }
1011
1012 if (vm_locked.vm->interrupt_desc[i].interrupt_id == id) {
1013 /* Interrupt descriptor found. */
1014 return &vm_locked.vm->interrupt_desc[i];
1015 }
1016 }
1017
1018 return NULL;
1019}
1020
1021/**
1022 * Update the target MPIDR corresponding to the specified interrupt id
1023 * belonging to the specified vm.
1024 */
1025struct interrupt_descriptor *vm_interrupt_set_target_mpidr(
1026 struct vm_locked vm_locked, uint32_t id, uint32_t target_mpidr)
1027{
1028 struct interrupt_descriptor *int_desc;
1029
1030 int_desc = vm_find_interrupt_descriptor(vm_locked, id);
1031
1032 if (int_desc != NULL) {
1033 interrupt_desc_set_mpidr(int_desc, target_mpidr);
1034 }
1035
1036 return int_desc;
1037}
1038
1039/**
1040 * Update the security state of the specified interrupt id belonging to the
1041 * specified vm.
1042 */
1043struct interrupt_descriptor *vm_interrupt_set_sec_state(
1044 struct vm_locked vm_locked, uint32_t id, uint32_t sec_state)
1045{
1046 struct interrupt_descriptor *int_desc;
1047
1048 int_desc = vm_find_interrupt_descriptor(vm_locked, id);
1049
1050 if (int_desc != NULL) {
1051 interrupt_desc_set_sec_state(int_desc, sec_state);
1052 }
1053
1054 return int_desc;
1055}
Madhukar Pappireddy938faaf2023-07-31 17:56:55 -05001056
1057/**
1058 * Enable or disable the specified interrupt id belonging to specified vm.
1059 */
1060struct interrupt_descriptor *vm_interrupt_set_enable(struct vm_locked vm_locked,
1061 uint32_t id, bool enable)
1062{
1063 struct interrupt_descriptor *int_desc;
1064
1065 int_desc = vm_find_interrupt_descriptor(vm_locked, id);
1066
1067 if (int_desc != NULL) {
1068 interrupt_desc_set_enabled(int_desc, enable);
1069 }
1070
1071 return int_desc;
1072}