blob: ec1e374a8dcda194d76ba6e7c8b719722d3c864e [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/vm.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010010
Olivier Deprezd9d409f2023-03-17 11:47:57 +010011#include "hf/arch/vm.h"
12
Andrew Scull18c78fc2018-08-20 12:57:41 +010013#include "hf/api.h"
Daniel Boulbya2f8c662021-11-26 17:52:53 +000014#include "hf/assert.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010015#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010016#include "hf/cpu.h"
J-Alves4ef6e842021-03-18 12:47:01 +000017#include "hf/dlog.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010018#include "hf/ffa.h"
Andrew Scull3c257452019-11-26 13:32:50 +000019#include "hf/layout.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010020#include "hf/std.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010021
Andrew Scull19503262018-09-20 14:48:39 +010022#include "vmapi/hf/call.h"
23
24static struct vm vms[MAX_VMS];
Olivier Deprez96a2a262020-06-11 17:21:38 +020025static struct vm other_world;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010026static ffa_vm_count_t vm_count;
Andrew Scull19503262018-09-20 14:48:39 +010027
J-Alvesfe23ebe2021-10-13 16:07:07 +010028/**
29 * Counters on the status of notifications in the system. It helps to improve
30 * the information retrieved by the receiver scheduler.
31 */
32static struct {
33 /** Counts notifications pending. */
34 uint32_t pending_count;
35 /**
36 * Counts notifications pending, that have been retrieved by the
37 * receiver scheduler.
38 */
39 uint32_t info_get_retrieved_count;
40 struct spinlock lock;
41} all_notifications_state;
42
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -080043static bool vm_init_mm(struct vm *vm, struct mpool *ppool)
44{
Olivier Deprezd9d409f2023-03-17 11:47:57 +010045 return arch_vm_init_mm(vm, ppool);
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -080046}
47
J-Alves19e20cf2023-08-02 12:48:55 +010048struct vm *vm_init(ffa_id_t id, ffa_vcpu_count_t vcpu_count,
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080049 struct mpool *ppool, bool el0_partition)
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010050{
Wedson Almeida Filho87009642018-07-02 10:20:07 +010051 uint32_t i;
Andrew Scull19503262018-09-20 14:48:39 +010052 struct vm *vm;
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -070053 size_t vcpu_ppool_entries = (align_up(sizeof(struct vcpu) * vcpu_count,
54 MM_PPOOL_ENTRY_SIZE) /
55 MM_PPOOL_ENTRY_SIZE);
Andrew Scull19503262018-09-20 14:48:39 +010056
Olivier Deprez96a2a262020-06-11 17:21:38 +020057 if (id == HF_OTHER_WORLD_ID) {
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080058 CHECK(el0_partition == false);
Olivier Deprez96a2a262020-06-11 17:21:38 +020059 vm = &other_world;
Andrew Walbran9daa57e2019-09-27 13:33:20 +010060 } else {
61 uint16_t vm_index = id - HF_VM_ID_OFFSET;
Andrew Scull19503262018-09-20 14:48:39 +010062
Andrew Walbran9daa57e2019-09-27 13:33:20 +010063 CHECK(id >= HF_VM_ID_OFFSET);
64 CHECK(vm_index < ARRAY_SIZE(vms));
65 vm = &vms[vm_index];
66 }
Wedson Almeida Filho87009642018-07-02 10:20:07 +010067
Andrew Scull2b5fbad2019-04-05 13:55:56 +010068 memset_s(vm, sizeof(*vm), 0, sizeof(*vm));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010069
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000070 list_init(&vm->mailbox.waiter_list);
71 list_init(&vm->mailbox.ready_list);
72 sl_init(&vm->lock);
73
Andrew Walbran9daa57e2019-09-27 13:33:20 +010074 vm->id = id;
Wedson Almeida Filho87009642018-07-02 10:20:07 +010075 vm->vcpu_count = vcpu_count;
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -070076
77 vm->vcpus = (struct vcpu *)mpool_alloc_contiguous(
78 ppool, vcpu_ppool_entries, 1);
79 CHECK(vm->vcpus != NULL);
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -070080
Andrew Sculld6ee1102019-04-05 22:12:42 +010081 vm->mailbox.state = MAILBOX_STATE_EMPTY;
Andrew Scull9726c252019-01-23 13:44:19 +000082 atomic_init(&vm->aborting, false);
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080083 vm->el0_partition = el0_partition;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010084
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -080085 if (!vm_init_mm(vm, ppool)) {
Andrew Walbran9daa57e2019-09-27 13:33:20 +010086 return NULL;
Wedson Almeida Filho03306112018-11-26 00:08:03 +000087 }
88
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000089 /* Initialise waiter entries. */
90 for (i = 0; i < MAX_VMS; i++) {
Wedson Almeida Filhob790f652019-01-22 23:41:56 +000091 vm->wait_entries[i].waiting_vm = vm;
92 list_init(&vm->wait_entries[i].wait_links);
93 list_init(&vm->wait_entries[i].ready_links);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000094 }
95
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000096 /* Do basic initialization of vCPUs. */
Andrew Scull7364a8e2018-07-19 15:39:29 +010097 for (i = 0; i < vcpu_count; i++) {
Andrew Walbrane1310df2019-04-29 17:28:28 +010098 vcpu_init(vm_get_vcpu(vm, i), vm);
Andrew Scull7364a8e2018-07-19 15:39:29 +010099 }
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100100
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700101 vm_notifications_init(vm, vcpu_count, ppool);
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100102 return vm;
103}
104
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100105bool vm_init_next(ffa_vcpu_count_t vcpu_count, struct mpool *ppool,
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800106 struct vm **new_vm, bool el0_partition)
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100107{
108 if (vm_count >= MAX_VMS) {
109 return false;
110 }
111
112 /* Generate IDs based on an offset, as low IDs e.g., 0, are reserved */
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800113 *new_vm = vm_init(vm_count + HF_VM_ID_OFFSET, vcpu_count, ppool,
114 el0_partition);
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100115 if (*new_vm == NULL) {
116 return false;
117 }
Andrew Scull19503262018-09-20 14:48:39 +0100118 ++vm_count;
Andrew Scull19503262018-09-20 14:48:39 +0100119
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000120 return true;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100121}
122
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100123ffa_vm_count_t vm_get_count(void)
Andrew Scull19503262018-09-20 14:48:39 +0100124{
125 return vm_count;
126}
127
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100128/**
129 * Returns a pointer to the VM with the corresponding id.
130 */
J-Alves19e20cf2023-08-02 12:48:55 +0100131struct vm *vm_find(ffa_id_t id)
Andrew Scull19503262018-09-20 14:48:39 +0100132{
David Brazdilbc501192019-09-27 13:20:56 +0100133 uint16_t index;
Fuad Tabba494376e2019-08-05 12:35:10 +0100134
Olivier Deprez96a2a262020-06-11 17:21:38 +0200135 if (id == HF_OTHER_WORLD_ID) {
136 if (other_world.id == HF_OTHER_WORLD_ID) {
137 return &other_world;
138 }
Andrew Scull19503262018-09-20 14:48:39 +0100139 return NULL;
140 }
141
Olivier Deprez96a2a262020-06-11 17:21:38 +0200142 /* Check that this is not a reserved ID. */
143 if (id < HF_VM_ID_OFFSET) {
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100144 return NULL;
145 }
146
David Brazdilbc501192019-09-27 13:20:56 +0100147 index = id - HF_VM_ID_OFFSET;
148
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100149 return vm_find_index(index);
150}
151
152/**
J-Alves46ee0682021-07-26 15:17:53 +0100153 * Returns a locked instance of the VM with the corresponding id.
154 */
J-Alves19e20cf2023-08-02 12:48:55 +0100155struct vm_locked vm_find_locked(ffa_id_t id)
J-Alves46ee0682021-07-26 15:17:53 +0100156{
157 struct vm *vm = vm_find(id);
158
159 if (vm != NULL) {
160 return vm_lock(vm);
161 }
162
163 return (struct vm_locked){.vm = NULL};
164}
165
166/**
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100167 * Returns a pointer to the VM at the specified index.
168 */
169struct vm *vm_find_index(uint16_t index)
170{
David Brazdilbc501192019-09-27 13:20:56 +0100171 /* Ensure the VM is initialized. */
172 if (index >= vm_count) {
173 return NULL;
174 }
175
176 return &vms[index];
Andrew Scull19503262018-09-20 14:48:39 +0100177}
178
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000179/**
Fuad Tabbaed294af2019-12-20 10:43:01 +0000180 * Locks the given VM and updates `locked` to hold the newly locked VM.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000181 */
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100182struct vm_locked vm_lock(struct vm *vm)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000183{
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100184 struct vm_locked locked = {
185 .vm = vm,
186 };
187
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000188 sl_lock(&vm->lock);
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100189
190 return locked;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000191}
192
193/**
Jose Marinho75509b42019-04-09 09:34:59 +0100194 * Locks two VMs ensuring that the locking order is according to the locks'
195 * addresses.
196 */
197struct two_vm_locked vm_lock_both(struct vm *vm1, struct vm *vm2)
198{
199 struct two_vm_locked dual_lock;
200
201 sl_lock_both(&vm1->lock, &vm2->lock);
202 dual_lock.vm1.vm = vm1;
203 dual_lock.vm2.vm = vm2;
204
205 return dual_lock;
206}
207
208/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000209 * Unlocks a VM previously locked with vm_lock, and updates `locked` to reflect
210 * the fact that the VM is no longer locked.
211 */
212void vm_unlock(struct vm_locked *locked)
213{
214 sl_unlock(&locked->vm->lock);
215 locked->vm = NULL;
216}
Andrew Walbrane1310df2019-04-29 17:28:28 +0100217
218/**
219 * Get the vCPU with the given index from the given VM.
220 * This assumes the index is valid, i.e. less than vm->vcpu_count.
221 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100222struct vcpu *vm_get_vcpu(struct vm *vm, ffa_vcpu_index_t vcpu_index)
Andrew Walbrane1310df2019-04-29 17:28:28 +0100223{
Andrew Scull877ae4b2019-07-02 12:52:33 +0100224 CHECK(vcpu_index < vm->vcpu_count);
Andrew Walbrane1310df2019-04-29 17:28:28 +0100225 return &vm->vcpus[vcpu_index];
226}
Andrew Scull3c257452019-11-26 13:32:50 +0000227
228/**
Andrew Walbranaad8f982019-12-04 10:56:39 +0000229 * Gets `vm`'s wait entry for waiting on the `for_vm`.
230 */
J-Alves19e20cf2023-08-02 12:48:55 +0100231struct wait_entry *vm_get_wait_entry(struct vm *vm, ffa_id_t for_vm)
Andrew Walbranaad8f982019-12-04 10:56:39 +0000232{
233 uint16_t index;
234
235 CHECK(for_vm >= HF_VM_ID_OFFSET);
236 index = for_vm - HF_VM_ID_OFFSET;
237 CHECK(index < MAX_VMS);
238
239 return &vm->wait_entries[index];
240}
241
242/**
J-Alves122f1a12022-12-12 15:55:42 +0000243 * Checks whether the given `to` VM's mailbox is currently busy.
244 */
245bool vm_is_mailbox_busy(struct vm_locked to)
246{
247 return to.vm->mailbox.state != MAILBOX_STATE_EMPTY ||
248 to.vm->mailbox.recv == NULL;
249}
250
251/**
J-Alvese8c8c2b2022-12-16 15:34:48 +0000252 * Checks if mailbox is currently owned by the other world.
253 */
254bool vm_is_mailbox_other_world_owned(struct vm_locked to)
255{
256 return to.vm->mailbox.state == MAILBOX_STATE_OTHER_WORLD_OWNED;
257}
258
259/**
Andrew Walbranaad8f982019-12-04 10:56:39 +0000260 * Gets the ID of the VM which the given VM's wait entry is for.
261 */
J-Alves19e20cf2023-08-02 12:48:55 +0100262ffa_id_t vm_id_for_wait_entry(struct vm *vm, struct wait_entry *entry)
Andrew Walbranaad8f982019-12-04 10:56:39 +0000263{
264 uint16_t index = entry - vm->wait_entries;
265
266 return index + HF_VM_ID_OFFSET;
267}
268
269/**
Andrew Walbran45633dd2020-10-07 17:59:54 +0100270 * Return whether the given VM ID represents an entity in the current world:
271 * i.e. the hypervisor or a normal world VM when running in the normal world, or
272 * the SPM or an SP when running in the secure world.
273 */
J-Alves19e20cf2023-08-02 12:48:55 +0100274bool vm_id_is_current_world(ffa_id_t vm_id)
Andrew Walbran45633dd2020-10-07 17:59:54 +0100275{
276 return (vm_id & HF_VM_ID_WORLD_MASK) !=
277 (HF_OTHER_WORLD_ID & HF_VM_ID_WORLD_MASK);
278}
279
280/**
Andrew Scull3c257452019-11-26 13:32:50 +0000281 * Map a range of addresses to the VM in both the MMU and the IOMMU.
282 *
283 * mm_vm_defrag should always be called after a series of page table updates,
284 * whether they succeed or fail. This is because on failure extra page table
285 * entries may have been allocated and then not used, while on success it may be
286 * possible to compact the page table by merging several entries into a block.
287 *
288 * Returns true on success, or false if the update failed and no changes were
289 * made.
290 *
291 */
292bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
293 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
294{
295 if (!vm_identity_prepare(vm_locked, begin, end, mode, ppool)) {
296 return false;
297 }
298
299 vm_identity_commit(vm_locked, begin, end, mode, ppool, ipa);
300
301 return true;
302}
303
304/**
305 * Prepares the given VM for the given address mapping such that it will be able
306 * to commit the change without failure.
307 *
308 * In particular, multiple calls to this function will result in the
309 * corresponding calls to commit the changes to succeed.
310 *
311 * Returns true on success, or false if the update failed and no changes were
312 * made.
313 */
314bool vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
315 uint32_t mode, struct mpool *ppool)
316{
Olivier Deprezd9d409f2023-03-17 11:47:57 +0100317 return arch_vm_identity_prepare(vm_locked, begin, end, mode, ppool);
Andrew Scull3c257452019-11-26 13:32:50 +0000318}
319
320/**
321 * Commits the given address mapping to the VM assuming the operation cannot
322 * fail. `vm_identity_prepare` must used correctly before this to ensure
323 * this condition.
324 */
325void vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
326 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
327{
Olivier Deprezd9d409f2023-03-17 11:47:57 +0100328 arch_vm_identity_commit(vm_locked, begin, end, mode, ppool, ipa);
Andrew Scull3c257452019-11-26 13:32:50 +0000329}
330
331/**
332 * Unmap a range of addresses from the VM.
333 *
334 * Returns true on success, or false if the update failed and no changes were
335 * made.
336 */
337bool vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
338 struct mpool *ppool)
339{
Olivier Deprezd9d409f2023-03-17 11:47:57 +0100340 return arch_vm_unmap(vm_locked, begin, end, ppool);
Andrew Scull3c257452019-11-26 13:32:50 +0000341}
342
343/**
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700344 * Defrag page tables for an EL0 partition or for a VM.
345 */
346void vm_ptable_defrag(struct vm_locked vm_locked, struct mpool *ppool)
347{
Olivier Deprezd9d409f2023-03-17 11:47:57 +0100348 arch_vm_ptable_defrag(vm_locked, ppool);
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700349}
350
351/**
Andrew Scull3c257452019-11-26 13:32:50 +0000352 * Unmaps the hypervisor pages from the given page table.
353 */
354bool vm_unmap_hypervisor(struct vm_locked vm_locked, struct mpool *ppool)
355{
356 /* TODO: If we add pages dynamically, they must be included here too. */
357 return vm_unmap(vm_locked, layout_text_begin(), layout_text_end(),
358 ppool) &&
359 vm_unmap(vm_locked, layout_rodata_begin(), layout_rodata_end(),
360 ppool) &&
361 vm_unmap(vm_locked, layout_data_begin(), layout_data_end(),
Maksims Svecovs134b8f92022-03-04 15:14:09 +0000362 ppool) &&
363 vm_unmap(vm_locked, layout_stacks_begin(), layout_stacks_end(),
Andrew Scull3c257452019-11-26 13:32:50 +0000364 ppool);
365}
J-Alvesb37fd082020-10-22 12:29:21 +0100366
367/**
Raghu Krishnamurthyea195fa2021-02-12 23:29:00 -0800368 * Gets the mode of the given range of ipa or va if they are mapped with the
369 * same mode.
370 *
371 * Returns true if the range is mapped with the same mode and false otherwise.
372 * The wrapper calls the appropriate mm function depending on if the partition
373 * is a vm or a el0 partition.
374 */
375bool vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin, ipaddr_t end,
376 uint32_t *mode)
377{
Olivier Deprezd9d409f2023-03-17 11:47:57 +0100378 return arch_vm_mem_get_mode(vm_locked, begin, end, mode);
Raghu Krishnamurthyea195fa2021-02-12 23:29:00 -0800379}
J-Alvesa0f317d2021-06-09 13:31:59 +0100380
J-Alves66652252022-07-06 09:49:51 +0100381bool vm_mailbox_state_busy(struct vm_locked vm_locked)
382{
383 return vm_locked.vm->mailbox.state != MAILBOX_STATE_EMPTY ||
384 vm_locked.vm->mailbox.recv == NULL;
385}
386
J-Alves7461ef22021-10-18 17:21:33 +0100387static struct notifications *vm_get_notifications(struct vm_locked vm_locked,
388 bool is_from_vm)
389{
390 return is_from_vm ? &vm_locked.vm->notifications.from_vm
391 : &vm_locked.vm->notifications.from_sp;
392}
393
J-Alvesa0f317d2021-06-09 13:31:59 +0100394/*
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700395 * Dynamically allocate per_vcpu_notifications structure for a given VM.
396 */
397static void vm_notifications_init_per_vcpu_notifications(
398 struct vm *vm, ffa_vcpu_count_t vcpu_count, struct mpool *ppool)
399{
400 size_t notif_ppool_entries =
401 (align_up(sizeof(struct notifications_state) * vcpu_count,
402 MM_PPOOL_ENTRY_SIZE) /
403 MM_PPOOL_ENTRY_SIZE);
404
405 /*
406 * Allow for function to be called on already initialized VMs but those
407 * that require notification structure to be cleared.
408 */
409 if (vm->notifications.from_sp.per_vcpu == NULL) {
410 assert(vm->notifications.from_vm.per_vcpu == NULL);
411 assert(vcpu_count != 0);
412 CHECK(ppool != NULL);
413 vm->notifications.from_sp.per_vcpu =
414 (struct notifications_state *)mpool_alloc_contiguous(
415 ppool, notif_ppool_entries, 1);
416 CHECK(vm->notifications.from_sp.per_vcpu != NULL);
417
418 vm->notifications.from_vm.per_vcpu =
419 (struct notifications_state *)mpool_alloc_contiguous(
420 ppool, notif_ppool_entries, 1);
421 CHECK(vm->notifications.from_vm.per_vcpu != NULL);
422 } else {
423 assert(vm->notifications.from_vm.per_vcpu != NULL);
424 }
425
426 memset_s(vm->notifications.from_sp.per_vcpu,
427 sizeof(*(vm->notifications.from_sp.per_vcpu)) * vcpu_count, 0,
428 sizeof(*(vm->notifications.from_sp.per_vcpu)) * vcpu_count);
429 memset_s(vm->notifications.from_vm.per_vcpu,
430 sizeof(*(vm->notifications.from_vm.per_vcpu)) * vcpu_count, 0,
431 sizeof(*(vm->notifications.from_vm.per_vcpu)) * vcpu_count);
432}
433
434/*
J-Alvesa0f317d2021-06-09 13:31:59 +0100435 * Initializes the notifications structure.
436 */
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700437static void vm_notifications_init_bindings(struct notifications *notifications)
J-Alvesa0f317d2021-06-09 13:31:59 +0100438{
439 for (uint32_t i = 0U; i < MAX_FFA_NOTIFICATIONS; i++) {
440 notifications->bindings_sender_id[i] = HF_INVALID_VM_ID;
441 }
442}
443
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700444/*
445 * Initialize notification related structures for a VM.
446 */
447void vm_notifications_init(struct vm *vm, ffa_vcpu_count_t vcpu_count,
448 struct mpool *ppool)
449{
450 vm_notifications_init_per_vcpu_notifications(vm, vcpu_count, ppool);
451
452 /* Basic initialization of the notifications structure. */
453 vm_notifications_init_bindings(&vm->notifications.from_sp);
454 vm_notifications_init_bindings(&vm->notifications.from_vm);
455}
456
J-Alvesa0f317d2021-06-09 13:31:59 +0100457/**
458 * Checks if there are pending notifications.
459 */
460bool vm_are_notifications_pending(struct vm_locked vm_locked, bool from_vm,
461 ffa_notifications_bitmap_t notifications)
462{
463 struct notifications *to_check;
464
465 CHECK(vm_locked.vm != NULL);
466
J-Alves7461ef22021-10-18 17:21:33 +0100467 to_check = vm_get_notifications(vm_locked, from_vm);
J-Alvesa0f317d2021-06-09 13:31:59 +0100468
469 /* Check if there are pending per vcpu notifications */
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700470 for (uint32_t i = 0U; i < vm_locked.vm->vcpu_count; i++) {
J-Alvesa0f317d2021-06-09 13:31:59 +0100471 if ((to_check->per_vcpu[i].pending & notifications) != 0U) {
472 return true;
473 }
474 }
475
476 /* Check if there are global pending notifications */
477 return (to_check->global.pending & notifications) != 0U;
478}
J-Alvesc003a7a2021-03-18 13:06:53 +0000479
J-Alves7461ef22021-10-18 17:21:33 +0100480/**
481 * Checks if there are pending global notifications, either from SPs or from
482 * VMs.
483 */
484bool vm_are_global_notifications_pending(struct vm_locked vm_locked)
485{
486 return vm_get_notifications(vm_locked, true)->global.pending != 0ULL ||
J-Alves52578f82022-03-25 12:30:47 +0000487 vm_get_notifications(vm_locked, false)->global.pending != 0ULL ||
J-Alvese8c8c2b2022-12-16 15:34:48 +0000488 vm_are_fwk_notifications_pending(vm_locked);
489}
490
491/**
492 * Currently only RX full notification is supported as framework notification.
493 * Returns true if there is one pending, either from Hypervisor or SPMC.
494 */
495bool vm_are_fwk_notifications_pending(struct vm_locked vm_locked)
496{
497 return vm_locked.vm->notifications.framework.pending != 0ULL;
J-Alves7461ef22021-10-18 17:21:33 +0100498}
499
500/**
501 * Checks if there are pending per-vCPU notifications, in a specific vCPU either
502 * from SPs or from VMs.
503 */
504bool vm_are_per_vcpu_notifications_pending(struct vm_locked vm_locked,
505 ffa_vcpu_index_t vcpu_id)
506{
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700507 CHECK(vcpu_id < vm_locked.vm->vcpu_count);
J-Alves7461ef22021-10-18 17:21:33 +0100508
509 return vm_get_notifications(vm_locked, true)
510 ->per_vcpu[vcpu_id]
511 .pending != 0ULL ||
512 vm_get_notifications(vm_locked, false)
513 ->per_vcpu[vcpu_id]
514 .pending != 0ULL;
515}
516
J-Alves09ff9d82021-11-02 11:55:20 +0000517bool vm_are_notifications_enabled(struct vm *vm)
J-Alvesc003a7a2021-03-18 13:06:53 +0000518{
J-Alves09ff9d82021-11-02 11:55:20 +0000519 return vm->notifications.enabled == true;
520}
521
522bool vm_locked_are_notifications_enabled(struct vm_locked vm_locked)
523{
524 return vm_are_notifications_enabled(vm_locked.vm);
J-Alvesc003a7a2021-03-18 13:06:53 +0000525}
526
527static bool vm_is_notification_bit_set(ffa_notifications_bitmap_t notifications,
528 uint32_t i)
529{
530 return (notifications & FFA_NOTIFICATION_MASK(i)) != 0U;
531}
532
J-Alvesfe23ebe2021-10-13 16:07:07 +0100533static void vm_notifications_global_state_count_update(
534 ffa_notifications_bitmap_t bitmap, uint32_t *counter, int inc)
535{
536 /*
537 * Helper to increment counters from global notifications
538 * state. Count update by increments or decrements of 1 or -1,
539 * respectively.
540 */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000541 assert(inc == 1 || inc == -1);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100542
543 sl_lock(&all_notifications_state.lock);
544
545 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
546 if (vm_is_notification_bit_set(bitmap, i)) {
547 CHECK((inc > 0 && *counter < UINT32_MAX) ||
548 (inc < 0 && *counter > 0));
549 *counter += inc;
550 }
551 }
552
553 sl_unlock(&all_notifications_state.lock);
554}
555
556/**
557 * Helper function to increment the pending notifications based on a bitmap
558 * passed as argument.
559 * Function to be used at setting notifications for a given VM.
560 */
561static void vm_notifications_pending_count_add(
562 ffa_notifications_bitmap_t to_add)
563{
564 vm_notifications_global_state_count_update(
565 to_add, &all_notifications_state.pending_count, 1);
566}
567
568/**
569 * Helper function to decrement the pending notifications count.
570 * Function to be used when getting the receiver's pending notifications.
571 */
572static void vm_notifications_pending_count_sub(
573 ffa_notifications_bitmap_t to_sub)
574{
575 vm_notifications_global_state_count_update(
576 to_sub, &all_notifications_state.pending_count, -1);
577}
578
579/**
580 * Helper function to count the notifications whose information has been
581 * retrieved by the scheduler of the system, and are still pending.
582 */
583static void vm_notifications_info_get_retrieved_count_add(
584 ffa_notifications_bitmap_t to_add)
585{
586 vm_notifications_global_state_count_update(
587 to_add, &all_notifications_state.info_get_retrieved_count, 1);
588}
589
590/**
591 * Helper function to subtract the notifications that the receiver is getting
592 * and whose information has been retrieved by the receiver scheduler.
593 */
594static void vm_notifications_info_get_retrieved_count_sub(
595 ffa_notifications_bitmap_t to_sub)
596{
597 vm_notifications_global_state_count_update(
598 to_sub, &all_notifications_state.info_get_retrieved_count, -1);
599}
600
601/**
602 * Helper function to determine if there are notifications pending whose info
603 * hasn't been retrieved by the receiver scheduler.
604 */
605bool vm_notifications_pending_not_retrieved_by_scheduler(void)
606{
607 bool ret;
608
609 sl_lock(&all_notifications_state.lock);
610 ret = all_notifications_state.pending_count >
611 all_notifications_state.info_get_retrieved_count;
612 sl_unlock(&all_notifications_state.lock);
613
614 return ret;
615}
616
617bool vm_is_notifications_pending_count_zero(void)
618{
619 bool ret;
620
621 sl_lock(&all_notifications_state.lock);
622 ret = all_notifications_state.pending_count == 0;
623 sl_unlock(&all_notifications_state.lock);
624
625 return ret;
626}
627
J-Alvesc003a7a2021-03-18 13:06:53 +0000628/**
629 * Checks that all provided notifications are bound to the specified sender, and
630 * are per VCPU or global, as specified.
631 */
632bool vm_notifications_validate_binding(struct vm_locked vm_locked,
J-Alves19e20cf2023-08-02 12:48:55 +0100633 bool is_from_vm, ffa_id_t sender_id,
J-Alvesc003a7a2021-03-18 13:06:53 +0000634 ffa_notifications_bitmap_t notifications,
635 bool is_per_vcpu)
636{
637 return vm_notifications_validate_bound_sender(
638 vm_locked, is_from_vm, sender_id, notifications) &&
639 vm_notifications_validate_per_vcpu(vm_locked, is_from_vm,
640 is_per_vcpu, notifications);
641}
642
643/**
644 * Update binds information in notification structure for the specified
645 * notifications.
646 */
647void vm_notifications_update_bindings(struct vm_locked vm_locked,
J-Alves19e20cf2023-08-02 12:48:55 +0100648 bool is_from_vm, ffa_id_t sender_id,
J-Alvesc003a7a2021-03-18 13:06:53 +0000649 ffa_notifications_bitmap_t notifications,
650 bool is_per_vcpu)
651{
652 CHECK(vm_locked.vm != NULL);
653 struct notifications *to_update =
654 vm_get_notifications(vm_locked, is_from_vm);
655
656 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
657 if (vm_is_notification_bit_set(notifications, i)) {
658 to_update->bindings_sender_id[i] = sender_id;
659 }
660 }
661
662 /*
663 * Set notifications if they are per VCPU, else clear them as they are
664 * global.
665 */
666 if (is_per_vcpu) {
667 to_update->bindings_per_vcpu |= notifications;
668 } else {
669 to_update->bindings_per_vcpu &= ~notifications;
670 }
671}
672
673bool vm_notifications_validate_bound_sender(
J-Alves19e20cf2023-08-02 12:48:55 +0100674 struct vm_locked vm_locked, bool is_from_vm, ffa_id_t sender_id,
J-Alvesc003a7a2021-03-18 13:06:53 +0000675 ffa_notifications_bitmap_t notifications)
676{
677 CHECK(vm_locked.vm != NULL);
678 struct notifications *to_check =
679 vm_get_notifications(vm_locked, is_from_vm);
680
681 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
682 if (vm_is_notification_bit_set(notifications, i) &&
683 to_check->bindings_sender_id[i] != sender_id) {
684 return false;
685 }
686 }
687
688 return true;
689}
690
691bool vm_notifications_validate_per_vcpu(struct vm_locked vm_locked,
692 bool is_from_vm, bool is_per_vcpu,
693 ffa_notifications_bitmap_t notif)
694{
695 CHECK(vm_locked.vm != NULL);
696 struct notifications *to_check =
697 vm_get_notifications(vm_locked, is_from_vm);
698
699 return is_per_vcpu ? (~to_check->bindings_per_vcpu & notif) == 0U
700 : (to_check->bindings_per_vcpu & notif) == 0U;
701}
J-Alvesaa79c012021-07-09 14:29:45 +0100702
J-Alves14163a72022-03-25 14:01:34 +0000703static void vm_notifications_state_set(struct notifications_state *state,
704 ffa_notifications_bitmap_t notifications)
705{
706 state->pending |= notifications;
707 vm_notifications_pending_count_add(notifications);
708}
709
J-Alves5a16c962022-03-25 12:32:51 +0000710void vm_notifications_partition_set_pending(
711 struct vm_locked vm_locked, bool is_from_vm,
712 ffa_notifications_bitmap_t notifications, ffa_vcpu_index_t vcpu_id,
713 bool is_per_vcpu)
J-Alvesaa79c012021-07-09 14:29:45 +0100714{
J-Alves14163a72022-03-25 14:01:34 +0000715 struct notifications *to_set;
716 struct notifications_state *state;
717
J-Alvesaa79c012021-07-09 14:29:45 +0100718 CHECK(vm_locked.vm != NULL);
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700719 CHECK(vcpu_id < vm_locked.vm->vcpu_count);
J-Alvesaa79c012021-07-09 14:29:45 +0100720
J-Alves14163a72022-03-25 14:01:34 +0000721 to_set = vm_get_notifications(vm_locked, is_from_vm);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100722
J-Alves14163a72022-03-25 14:01:34 +0000723 state = is_per_vcpu ? &to_set->per_vcpu[vcpu_id] : &to_set->global;
724
725 vm_notifications_state_set(state, notifications);
726}
727
728/**
729 * Set pending framework notifications.
730 */
731void vm_notifications_framework_set_pending(
732 struct vm_locked vm_locked, ffa_notifications_bitmap_t notifications)
733{
734 CHECK(vm_locked.vm != NULL);
Federico Recanatie73d2832022-04-20 11:10:52 +0200735 assert(is_ffa_spm_buffer_full_notification(notifications) ||
736 is_ffa_hyp_buffer_full_notification(notifications));
J-Alves14163a72022-03-25 14:01:34 +0000737 vm_notifications_state_set(&vm_locked.vm->notifications.framework,
738 notifications);
J-Alvesaa79c012021-07-09 14:29:45 +0100739}
740
J-Alves5136dda2022-03-25 12:26:38 +0000741static ffa_notifications_bitmap_t vm_notifications_state_get_pending(
742 struct notifications_state *state)
J-Alvesaa79c012021-07-09 14:29:45 +0100743{
J-Alves5136dda2022-03-25 12:26:38 +0000744 ffa_notifications_bitmap_t to_ret;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100745 ffa_notifications_bitmap_t pending_and_info_get_retrieved;
J-Alvesaa79c012021-07-09 14:29:45 +0100746
J-Alves5136dda2022-03-25 12:26:38 +0000747 assert(state != NULL);
J-Alvesaa79c012021-07-09 14:29:45 +0100748
J-Alves5136dda2022-03-25 12:26:38 +0000749 to_ret = state->pending;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100750
751 /* Update count of currently pending notifications in the system. */
J-Alves5136dda2022-03-25 12:26:38 +0000752 vm_notifications_pending_count_sub(state->pending);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100753
754 /*
755 * If notifications receiver is getting have been retrieved by the
756 * receiver scheduler, decrement those from respective count.
757 */
758 pending_and_info_get_retrieved =
J-Alves5136dda2022-03-25 12:26:38 +0000759 state->pending & state->info_get_retrieved;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100760
761 if (pending_and_info_get_retrieved != 0) {
762 vm_notifications_info_get_retrieved_count_sub(
763 pending_and_info_get_retrieved);
764 }
765
J-Alves5136dda2022-03-25 12:26:38 +0000766 state->pending = 0U;
767 state->info_get_retrieved = 0U;
J-Alvesaa79c012021-07-09 14:29:45 +0100768
J-Alves5136dda2022-03-25 12:26:38 +0000769 return to_ret;
770}
J-Alvesfe23ebe2021-10-13 16:07:07 +0100771
J-Alves5136dda2022-03-25 12:26:38 +0000772/**
773 * Get global and per-vCPU notifications for the given vCPU ID.
774 */
775ffa_notifications_bitmap_t vm_notifications_partition_get_pending(
776 struct vm_locked vm_locked, bool is_from_vm, ffa_vcpu_index_t vcpu_id)
777{
778 ffa_notifications_bitmap_t to_ret;
779 struct notifications *to_get;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100780
J-Alves5136dda2022-03-25 12:26:38 +0000781 assert(vm_locked.vm != NULL);
782 to_get = vm_get_notifications(vm_locked, is_from_vm);
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700783 assert(vcpu_id < vm_locked.vm->vcpu_count);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100784
J-Alves5136dda2022-03-25 12:26:38 +0000785 to_ret = vm_notifications_state_get_pending(&to_get->global);
786 to_ret |=
787 vm_notifications_state_get_pending(&to_get->per_vcpu[vcpu_id]);
J-Alvesaa79c012021-07-09 14:29:45 +0100788
789 return to_ret;
790}
J-Alvesc8e8a222021-06-08 17:33:52 +0100791
792/**
J-Alves663682a2022-03-25 13:56:51 +0000793 * Get pending framework notifications.
794 */
795ffa_notifications_bitmap_t vm_notifications_framework_get_pending(
796 struct vm_locked vm_locked)
797{
Federico Recanati6c1e05c2022-04-20 11:37:26 +0200798 struct vm *vm = vm_locked.vm;
799 ffa_notifications_bitmap_t framework;
Federico Recanati6c1e05c2022-04-20 11:37:26 +0200800
801 assert(vm != NULL);
802
803 framework = vm_notifications_state_get_pending(
804 &vm->notifications.framework);
805
Federico Recanati6c1e05c2022-04-20 11:37:26 +0200806 return framework;
J-Alves663682a2022-03-25 13:56:51 +0000807}
808
J-Alves17c9b6d2022-03-25 14:39:05 +0000809static void vm_notifications_state_info_get(
J-Alves19e20cf2023-08-02 12:48:55 +0100810 struct notifications_state *state, ffa_id_t vm_id, bool is_per_vcpu,
J-Alves17c9b6d2022-03-25 14:39:05 +0000811 ffa_vcpu_index_t vcpu_id, uint16_t *ids, uint32_t *ids_count,
812 uint32_t *lists_sizes, uint32_t *lists_count,
813 const uint32_t ids_max_count,
814 enum notifications_info_get_state *info_get_state)
815{
816 ffa_notifications_bitmap_t pending_not_retrieved;
817
818 CHECK(*ids_count <= ids_max_count);
819 CHECK(*lists_count <= ids_max_count);
820
821 if (*info_get_state == FULL) {
822 return;
823 }
824
825 pending_not_retrieved = state->pending & ~state->info_get_retrieved;
826
827 /* No notifications pending that haven't been retrieved. */
828 if (pending_not_retrieved == 0U) {
829 return;
830 }
831
832 if (*ids_count == ids_max_count) {
833 *info_get_state = FULL;
834 return;
835 }
836
837 switch (*info_get_state) {
838 case INIT:
839 case STARTING_NEW:
840 /*
841 * At this iteration two ids are to be added: the VM ID
842 * and vCPU ID. If there is no space, change state and
843 * terminate function.
844 */
845 if (is_per_vcpu && ids_max_count - *ids_count < 2) {
846 *info_get_state = FULL;
847 return;
848 }
849
850 *info_get_state = INSERTING;
851 ids[*ids_count] = vm_id;
852 ++(*ids_count);
853
854 if (is_per_vcpu) {
855 /* Insert vCPU ID. */
856 ids[*ids_count] = vcpu_id;
857 ++(*ids_count);
858 ++lists_sizes[*lists_count];
859 }
860
861 ++(*lists_count);
862 break;
863 case INSERTING:
864 /* For per-vCPU notifications only. */
865 if (!is_per_vcpu) {
866 break;
867 }
868
869 /* Insert vCPU ID */
870 ids[*ids_count] = vcpu_id;
871 (*ids_count)++;
872 /* Increment respective list size */
873 ++lists_sizes[*lists_count - 1];
874
875 if (lists_sizes[*lists_count - 1] == 3) {
876 *info_get_state = STARTING_NEW;
877 }
878 break;
879 default:
880 panic("Notification info get action error!!\n");
881 }
882
883 state->info_get_retrieved |= pending_not_retrieved;
884
885 vm_notifications_info_get_retrieved_count_add(pending_not_retrieved);
886}
887
J-Alves663682a2022-03-25 13:56:51 +0000888/**
J-Alvesc8e8a222021-06-08 17:33:52 +0100889 * Get pending notification's information to return to the receiver scheduler.
890 */
891void vm_notifications_info_get_pending(
892 struct vm_locked vm_locked, bool is_from_vm, uint16_t *ids,
893 uint32_t *ids_count, uint32_t *lists_sizes, uint32_t *lists_count,
894 const uint32_t ids_max_count,
895 enum notifications_info_get_state *info_get_state)
896{
J-Alves17c9b6d2022-03-25 14:39:05 +0000897 struct notifications *notifications;
J-Alvesc8e8a222021-06-08 17:33:52 +0100898
899 CHECK(vm_locked.vm != NULL);
J-Alvesc8e8a222021-06-08 17:33:52 +0100900
J-Alves17c9b6d2022-03-25 14:39:05 +0000901 notifications = vm_get_notifications(vm_locked, is_from_vm);
J-Alvesc8e8a222021-06-08 17:33:52 +0100902
J-Alves17c9b6d2022-03-25 14:39:05 +0000903 /*
904 * Perform info get for global notifications, before doing it for
905 * per-vCPU.
906 */
907 vm_notifications_state_info_get(&notifications->global,
908 vm_locked.vm->id, false, 0, ids,
909 ids_count, lists_sizes, lists_count,
910 ids_max_count, info_get_state);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100911
J-Alvesc8e8a222021-06-08 17:33:52 +0100912 for (ffa_vcpu_count_t i = 0; i < vm_locked.vm->vcpu_count; i++) {
J-Alves17c9b6d2022-03-25 14:39:05 +0000913 vm_notifications_state_info_get(
914 &notifications->per_vcpu[i], vm_locked.vm->id, true, i,
915 ids, ids_count, lists_sizes, lists_count, ids_max_count,
916 info_get_state);
J-Alvesc8e8a222021-06-08 17:33:52 +0100917 }
918}
919
920/**
921 * Gets all info from VM's pending notifications.
922 * Returns true if the list is full, and there is more pending.
923 */
924bool vm_notifications_info_get(struct vm_locked vm_locked, uint16_t *ids,
925 uint32_t *ids_count, uint32_t *lists_sizes,
926 uint32_t *lists_count,
927 const uint32_t ids_max_count)
928{
929 enum notifications_info_get_state current_state = INIT;
930
J-Alvesf31940e2022-03-25 17:24:00 +0000931 /* Get info of pending notifications from the framework. */
932 vm_notifications_state_info_get(&vm_locked.vm->notifications.framework,
933 vm_locked.vm->id, false, 0, ids,
934 ids_count, lists_sizes, lists_count,
935 ids_max_count, &current_state);
936
937 /* Get info of pending notifications from SPs. */
J-Alvesc8e8a222021-06-08 17:33:52 +0100938 vm_notifications_info_get_pending(vm_locked, false, ids, ids_count,
939 lists_sizes, lists_count,
940 ids_max_count, &current_state);
941
J-Alvesf31940e2022-03-25 17:24:00 +0000942 /* Get info of pending notifications from VMs. */
J-Alvesc8e8a222021-06-08 17:33:52 +0100943 vm_notifications_info_get_pending(vm_locked, true, ids, ids_count,
944 lists_sizes, lists_count,
945 ids_max_count, &current_state);
946
947 /*
948 * State transitions to FULL when trying to insert a new ID in the
949 * list and there is not more space. This means there are notifications
950 * pending, whose info is not retrieved.
951 */
952 return current_state == FULL;
953}
J-Alves439ac972021-11-18 17:32:03 +0000954
955/**
956 * Checks VM's messaging method support.
957 */
958bool vm_supports_messaging_method(struct vm *vm, uint8_t msg_method)
959{
960 return (vm->messaging_method & msg_method) != 0;
961}
J-Alves6e2abc62021-12-02 14:58:56 +0000962
963void vm_notifications_set_npi_injected(struct vm_locked vm_locked,
964 bool npi_injected)
965{
966 vm_locked.vm->notifications.npi_injected = npi_injected;
967}
968
969bool vm_notifications_is_npi_injected(struct vm_locked vm_locked)
970{
971 return vm_locked.vm->notifications.npi_injected;
972}
J-Alves7e67d102022-04-13 13:22:39 +0100973
974/**
975 * Sets the designated GP register that the VM expects to receive the boot
976 * info's address.
977 */
978void vm_set_boot_info_gp_reg(struct vm *vm, struct vcpu *vcpu)
979{
Olivier Deprezb2808332023-02-02 15:25:40 +0100980 if (vm->boot_info.blob_addr.ipa != 0U) {
J-Alves7e67d102022-04-13 13:22:39 +0100981 arch_regs_set_gp_reg(&vcpu->regs,
982 ipa_addr(vm->boot_info.blob_addr),
983 vm->boot_info.gp_register_num);
984 }
985}
Madhukar Pappireddy18c6eb72023-08-21 12:16:18 -0500986
987/**
988 * Obtain the interrupt descriptor entry of the specified vm corresponding
989 * to the specific interrupt id.
990 */
Madhukar Pappireddy3221a442023-07-24 16:10:55 -0500991static struct interrupt_descriptor *vm_find_interrupt_descriptor(
Madhukar Pappireddy18c6eb72023-08-21 12:16:18 -0500992 struct vm_locked vm_locked, uint32_t id)
993{
994 for (uint32_t i = 0; i < HF_NUM_INTIDS; i++) {
995 /* Interrupt descriptors are populated contiguously. */
996 if (!vm_locked.vm->interrupt_desc[i].valid) {
997 break;
998 }
999
1000 if (vm_locked.vm->interrupt_desc[i].interrupt_id == id) {
1001 /* Interrupt descriptor found. */
1002 return &vm_locked.vm->interrupt_desc[i];
1003 }
1004 }
1005
1006 return NULL;
1007}
1008
1009/**
1010 * Update the target MPIDR corresponding to the specified interrupt id
1011 * belonging to the specified vm.
1012 */
1013struct interrupt_descriptor *vm_interrupt_set_target_mpidr(
1014 struct vm_locked vm_locked, uint32_t id, uint32_t target_mpidr)
1015{
1016 struct interrupt_descriptor *int_desc;
1017
1018 int_desc = vm_find_interrupt_descriptor(vm_locked, id);
1019
1020 if (int_desc != NULL) {
1021 interrupt_desc_set_mpidr(int_desc, target_mpidr);
1022 }
1023
1024 return int_desc;
1025}
1026
1027/**
1028 * Update the security state of the specified interrupt id belonging to the
1029 * specified vm.
1030 */
1031struct interrupt_descriptor *vm_interrupt_set_sec_state(
1032 struct vm_locked vm_locked, uint32_t id, uint32_t sec_state)
1033{
1034 struct interrupt_descriptor *int_desc;
1035
1036 int_desc = vm_find_interrupt_descriptor(vm_locked, id);
1037
1038 if (int_desc != NULL) {
1039 interrupt_desc_set_sec_state(int_desc, sec_state);
1040 }
1041
1042 return int_desc;
1043}