blob: 57353a3f56118ddced8f01e9be2dfec03c13c39e [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/vm.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010010
Olivier Deprezd9d409f2023-03-17 11:47:57 +010011#include "hf/arch/vm.h"
12
Andrew Scull18c78fc2018-08-20 12:57:41 +010013#include "hf/api.h"
Daniel Boulbya2f8c662021-11-26 17:52:53 +000014#include "hf/assert.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010015#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010016#include "hf/cpu.h"
J-Alves4ef6e842021-03-18 12:47:01 +000017#include "hf/dlog.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010018#include "hf/ffa.h"
Andrew Scull3c257452019-11-26 13:32:50 +000019#include "hf/layout.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010020#include "hf/std.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010021
Andrew Scull19503262018-09-20 14:48:39 +010022#include "vmapi/hf/call.h"
23
24static struct vm vms[MAX_VMS];
Olivier Deprez96a2a262020-06-11 17:21:38 +020025static struct vm other_world;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010026static ffa_vm_count_t vm_count;
Andrew Scull19503262018-09-20 14:48:39 +010027
J-Alvesfe23ebe2021-10-13 16:07:07 +010028/**
29 * Counters on the status of notifications in the system. It helps to improve
30 * the information retrieved by the receiver scheduler.
31 */
32static struct {
33 /** Counts notifications pending. */
34 uint32_t pending_count;
35 /**
36 * Counts notifications pending, that have been retrieved by the
37 * receiver scheduler.
38 */
39 uint32_t info_get_retrieved_count;
40 struct spinlock lock;
41} all_notifications_state;
42
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -080043static bool vm_init_mm(struct vm *vm, struct mpool *ppool)
44{
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -060045 return arch_vm_init_mm(vm, ppool) && arch_vm_iommu_init_mm(vm, ppool);
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -080046}
47
J-Alves19e20cf2023-08-02 12:48:55 +010048struct vm *vm_init(ffa_id_t id, ffa_vcpu_count_t vcpu_count,
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -060049 struct mpool *ppool, bool el0_partition,
50 uint8_t dma_device_count)
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010051{
Wedson Almeida Filho87009642018-07-02 10:20:07 +010052 uint32_t i;
Andrew Scull19503262018-09-20 14:48:39 +010053 struct vm *vm;
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -070054 size_t vcpu_ppool_entries = (align_up(sizeof(struct vcpu) * vcpu_count,
55 MM_PPOOL_ENTRY_SIZE) /
56 MM_PPOOL_ENTRY_SIZE);
Andrew Scull19503262018-09-20 14:48:39 +010057
Olivier Deprez96a2a262020-06-11 17:21:38 +020058 if (id == HF_OTHER_WORLD_ID) {
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080059 CHECK(el0_partition == false);
Olivier Deprez96a2a262020-06-11 17:21:38 +020060 vm = &other_world;
Andrew Walbran9daa57e2019-09-27 13:33:20 +010061 } else {
62 uint16_t vm_index = id - HF_VM_ID_OFFSET;
Andrew Scull19503262018-09-20 14:48:39 +010063
Andrew Walbran9daa57e2019-09-27 13:33:20 +010064 CHECK(id >= HF_VM_ID_OFFSET);
65 CHECK(vm_index < ARRAY_SIZE(vms));
66 vm = &vms[vm_index];
67 }
Wedson Almeida Filho87009642018-07-02 10:20:07 +010068
Andrew Scull2b5fbad2019-04-05 13:55:56 +010069 memset_s(vm, sizeof(*vm), 0, sizeof(*vm));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010070
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000071 list_init(&vm->mailbox.waiter_list);
72 list_init(&vm->mailbox.ready_list);
73 sl_init(&vm->lock);
74
Andrew Walbran9daa57e2019-09-27 13:33:20 +010075 vm->id = id;
Wedson Almeida Filho87009642018-07-02 10:20:07 +010076 vm->vcpu_count = vcpu_count;
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -070077
78 vm->vcpus = (struct vcpu *)mpool_alloc_contiguous(
79 ppool, vcpu_ppool_entries, 1);
80 CHECK(vm->vcpus != NULL);
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -070081
Andrew Sculld6ee1102019-04-05 22:12:42 +010082 vm->mailbox.state = MAILBOX_STATE_EMPTY;
Andrew Scull9726c252019-01-23 13:44:19 +000083 atomic_init(&vm->aborting, false);
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080084 vm->el0_partition = el0_partition;
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -060085 vm->dma_device_count = dma_device_count;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010086
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -080087 if (!vm_init_mm(vm, ppool)) {
Andrew Walbran9daa57e2019-09-27 13:33:20 +010088 return NULL;
Wedson Almeida Filho03306112018-11-26 00:08:03 +000089 }
90
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000091 /* Initialise waiter entries. */
92 for (i = 0; i < MAX_VMS; i++) {
Wedson Almeida Filhob790f652019-01-22 23:41:56 +000093 vm->wait_entries[i].waiting_vm = vm;
94 list_init(&vm->wait_entries[i].wait_links);
95 list_init(&vm->wait_entries[i].ready_links);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000096 }
97
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000098 /* Do basic initialization of vCPUs. */
Andrew Scull7364a8e2018-07-19 15:39:29 +010099 for (i = 0; i < vcpu_count; i++) {
Andrew Walbrane1310df2019-04-29 17:28:28 +0100100 vcpu_init(vm_get_vcpu(vm, i), vm);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100101 }
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100102
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700103 vm_notifications_init(vm, vcpu_count, ppool);
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100104 return vm;
105}
106
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100107bool vm_init_next(ffa_vcpu_count_t vcpu_count, struct mpool *ppool,
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -0600108 struct vm **new_vm, bool el0_partition,
109 uint8_t dma_device_count)
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100110{
111 if (vm_count >= MAX_VMS) {
112 return false;
113 }
114
115 /* Generate IDs based on an offset, as low IDs e.g., 0, are reserved */
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800116 *new_vm = vm_init(vm_count + HF_VM_ID_OFFSET, vcpu_count, ppool,
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -0600117 el0_partition, dma_device_count);
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100118 if (*new_vm == NULL) {
119 return false;
120 }
Andrew Scull19503262018-09-20 14:48:39 +0100121 ++vm_count;
Andrew Scull19503262018-09-20 14:48:39 +0100122
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000123 return true;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100124}
125
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100126ffa_vm_count_t vm_get_count(void)
Andrew Scull19503262018-09-20 14:48:39 +0100127{
128 return vm_count;
129}
130
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100131/**
132 * Returns a pointer to the VM with the corresponding id.
133 */
J-Alves19e20cf2023-08-02 12:48:55 +0100134struct vm *vm_find(ffa_id_t id)
Andrew Scull19503262018-09-20 14:48:39 +0100135{
David Brazdilbc501192019-09-27 13:20:56 +0100136 uint16_t index;
Fuad Tabba494376e2019-08-05 12:35:10 +0100137
Olivier Deprez96a2a262020-06-11 17:21:38 +0200138 if (id == HF_OTHER_WORLD_ID) {
139 if (other_world.id == HF_OTHER_WORLD_ID) {
140 return &other_world;
141 }
Andrew Scull19503262018-09-20 14:48:39 +0100142 return NULL;
143 }
144
Olivier Deprez96a2a262020-06-11 17:21:38 +0200145 /* Check that this is not a reserved ID. */
146 if (id < HF_VM_ID_OFFSET) {
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100147 return NULL;
148 }
149
David Brazdilbc501192019-09-27 13:20:56 +0100150 index = id - HF_VM_ID_OFFSET;
151
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100152 return vm_find_index(index);
153}
154
155/**
J-Alves46ee0682021-07-26 15:17:53 +0100156 * Returns a locked instance of the VM with the corresponding id.
157 */
J-Alves19e20cf2023-08-02 12:48:55 +0100158struct vm_locked vm_find_locked(ffa_id_t id)
J-Alves46ee0682021-07-26 15:17:53 +0100159{
160 struct vm *vm = vm_find(id);
161
162 if (vm != NULL) {
163 return vm_lock(vm);
164 }
165
166 return (struct vm_locked){.vm = NULL};
167}
168
169/**
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100170 * Returns a pointer to the VM at the specified index.
171 */
172struct vm *vm_find_index(uint16_t index)
173{
David Brazdilbc501192019-09-27 13:20:56 +0100174 /* Ensure the VM is initialized. */
175 if (index >= vm_count) {
176 return NULL;
177 }
178
179 return &vms[index];
Andrew Scull19503262018-09-20 14:48:39 +0100180}
181
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000182/**
Fuad Tabbaed294af2019-12-20 10:43:01 +0000183 * Locks the given VM and updates `locked` to hold the newly locked VM.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000184 */
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100185struct vm_locked vm_lock(struct vm *vm)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000186{
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100187 struct vm_locked locked = {
188 .vm = vm,
189 };
190
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000191 sl_lock(&vm->lock);
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100192
193 return locked;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000194}
195
196/**
Jose Marinho75509b42019-04-09 09:34:59 +0100197 * Locks two VMs ensuring that the locking order is according to the locks'
198 * addresses.
199 */
200struct two_vm_locked vm_lock_both(struct vm *vm1, struct vm *vm2)
201{
202 struct two_vm_locked dual_lock;
203
204 sl_lock_both(&vm1->lock, &vm2->lock);
205 dual_lock.vm1.vm = vm1;
206 dual_lock.vm2.vm = vm2;
207
208 return dual_lock;
209}
210
211/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000212 * Unlocks a VM previously locked with vm_lock, and updates `locked` to reflect
213 * the fact that the VM is no longer locked.
214 */
215void vm_unlock(struct vm_locked *locked)
216{
217 sl_unlock(&locked->vm->lock);
218 locked->vm = NULL;
219}
Andrew Walbrane1310df2019-04-29 17:28:28 +0100220
221/**
222 * Get the vCPU with the given index from the given VM.
223 * This assumes the index is valid, i.e. less than vm->vcpu_count.
224 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100225struct vcpu *vm_get_vcpu(struct vm *vm, ffa_vcpu_index_t vcpu_index)
Andrew Walbrane1310df2019-04-29 17:28:28 +0100226{
Andrew Scull877ae4b2019-07-02 12:52:33 +0100227 CHECK(vcpu_index < vm->vcpu_count);
Andrew Walbrane1310df2019-04-29 17:28:28 +0100228 return &vm->vcpus[vcpu_index];
229}
Andrew Scull3c257452019-11-26 13:32:50 +0000230
231/**
Andrew Walbranaad8f982019-12-04 10:56:39 +0000232 * Gets `vm`'s wait entry for waiting on the `for_vm`.
233 */
J-Alves19e20cf2023-08-02 12:48:55 +0100234struct wait_entry *vm_get_wait_entry(struct vm *vm, ffa_id_t for_vm)
Andrew Walbranaad8f982019-12-04 10:56:39 +0000235{
236 uint16_t index;
237
238 CHECK(for_vm >= HF_VM_ID_OFFSET);
239 index = for_vm - HF_VM_ID_OFFSET;
240 CHECK(index < MAX_VMS);
241
242 return &vm->wait_entries[index];
243}
244
245/**
J-Alves122f1a12022-12-12 15:55:42 +0000246 * Checks whether the given `to` VM's mailbox is currently busy.
247 */
248bool vm_is_mailbox_busy(struct vm_locked to)
249{
250 return to.vm->mailbox.state != MAILBOX_STATE_EMPTY ||
251 to.vm->mailbox.recv == NULL;
252}
253
254/**
J-Alvese8c8c2b2022-12-16 15:34:48 +0000255 * Checks if mailbox is currently owned by the other world.
256 */
257bool vm_is_mailbox_other_world_owned(struct vm_locked to)
258{
259 return to.vm->mailbox.state == MAILBOX_STATE_OTHER_WORLD_OWNED;
260}
261
262/**
Andrew Walbranaad8f982019-12-04 10:56:39 +0000263 * Gets the ID of the VM which the given VM's wait entry is for.
264 */
J-Alves19e20cf2023-08-02 12:48:55 +0100265ffa_id_t vm_id_for_wait_entry(struct vm *vm, struct wait_entry *entry)
Andrew Walbranaad8f982019-12-04 10:56:39 +0000266{
267 uint16_t index = entry - vm->wait_entries;
268
269 return index + HF_VM_ID_OFFSET;
270}
271
272/**
Andrew Walbran45633dd2020-10-07 17:59:54 +0100273 * Return whether the given VM ID represents an entity in the current world:
274 * i.e. the hypervisor or a normal world VM when running in the normal world, or
275 * the SPM or an SP when running in the secure world.
276 */
J-Alves19e20cf2023-08-02 12:48:55 +0100277bool vm_id_is_current_world(ffa_id_t vm_id)
Andrew Walbran45633dd2020-10-07 17:59:54 +0100278{
279 return (vm_id & HF_VM_ID_WORLD_MASK) !=
280 (HF_OTHER_WORLD_ID & HF_VM_ID_WORLD_MASK);
281}
282
283/**
Andrew Scull3c257452019-11-26 13:32:50 +0000284 * Map a range of addresses to the VM in both the MMU and the IOMMU.
285 *
286 * mm_vm_defrag should always be called after a series of page table updates,
287 * whether they succeed or fail. This is because on failure extra page table
288 * entries may have been allocated and then not used, while on success it may be
289 * possible to compact the page table by merging several entries into a block.
290 *
291 * Returns true on success, or false if the update failed and no changes were
292 * made.
293 *
294 */
295bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
296 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
297{
298 if (!vm_identity_prepare(vm_locked, begin, end, mode, ppool)) {
299 return false;
300 }
301
302 vm_identity_commit(vm_locked, begin, end, mode, ppool, ipa);
303
304 return true;
305}
306
307/**
308 * Prepares the given VM for the given address mapping such that it will be able
309 * to commit the change without failure.
310 *
311 * In particular, multiple calls to this function will result in the
312 * corresponding calls to commit the changes to succeed.
313 *
314 * Returns true on success, or false if the update failed and no changes were
315 * made.
316 */
317bool vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
318 uint32_t mode, struct mpool *ppool)
319{
Olivier Deprezd9d409f2023-03-17 11:47:57 +0100320 return arch_vm_identity_prepare(vm_locked, begin, end, mode, ppool);
Andrew Scull3c257452019-11-26 13:32:50 +0000321}
322
323/**
324 * Commits the given address mapping to the VM assuming the operation cannot
325 * fail. `vm_identity_prepare` must used correctly before this to ensure
326 * this condition.
327 */
328void vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
329 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
330{
Olivier Deprezd9d409f2023-03-17 11:47:57 +0100331 arch_vm_identity_commit(vm_locked, begin, end, mode, ppool, ipa);
Andrew Scull3c257452019-11-26 13:32:50 +0000332}
333
334/**
335 * Unmap a range of addresses from the VM.
336 *
337 * Returns true on success, or false if the update failed and no changes were
338 * made.
339 */
340bool vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
341 struct mpool *ppool)
342{
Olivier Deprezd9d409f2023-03-17 11:47:57 +0100343 return arch_vm_unmap(vm_locked, begin, end, ppool);
Andrew Scull3c257452019-11-26 13:32:50 +0000344}
345
346/**
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700347 * Defrag page tables for an EL0 partition or for a VM.
348 */
349void vm_ptable_defrag(struct vm_locked vm_locked, struct mpool *ppool)
350{
Olivier Deprezd9d409f2023-03-17 11:47:57 +0100351 arch_vm_ptable_defrag(vm_locked, ppool);
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700352}
353
354/**
Andrew Scull3c257452019-11-26 13:32:50 +0000355 * Unmaps the hypervisor pages from the given page table.
356 */
357bool vm_unmap_hypervisor(struct vm_locked vm_locked, struct mpool *ppool)
358{
359 /* TODO: If we add pages dynamically, they must be included here too. */
360 return vm_unmap(vm_locked, layout_text_begin(), layout_text_end(),
361 ppool) &&
362 vm_unmap(vm_locked, layout_rodata_begin(), layout_rodata_end(),
363 ppool) &&
364 vm_unmap(vm_locked, layout_data_begin(), layout_data_end(),
Maksims Svecovs134b8f92022-03-04 15:14:09 +0000365 ppool) &&
366 vm_unmap(vm_locked, layout_stacks_begin(), layout_stacks_end(),
Andrew Scull3c257452019-11-26 13:32:50 +0000367 ppool);
368}
J-Alvesb37fd082020-10-22 12:29:21 +0100369
370/**
Raghu Krishnamurthyea195fa2021-02-12 23:29:00 -0800371 * Gets the mode of the given range of ipa or va if they are mapped with the
372 * same mode.
373 *
374 * Returns true if the range is mapped with the same mode and false otherwise.
375 * The wrapper calls the appropriate mm function depending on if the partition
376 * is a vm or a el0 partition.
377 */
378bool vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin, ipaddr_t end,
379 uint32_t *mode)
380{
Olivier Deprezd9d409f2023-03-17 11:47:57 +0100381 return arch_vm_mem_get_mode(vm_locked, begin, end, mode);
Raghu Krishnamurthyea195fa2021-02-12 23:29:00 -0800382}
J-Alvesa0f317d2021-06-09 13:31:59 +0100383
J-Alves66652252022-07-06 09:49:51 +0100384bool vm_mailbox_state_busy(struct vm_locked vm_locked)
385{
386 return vm_locked.vm->mailbox.state != MAILBOX_STATE_EMPTY ||
387 vm_locked.vm->mailbox.recv == NULL;
388}
389
J-Alves7461ef22021-10-18 17:21:33 +0100390static struct notifications *vm_get_notifications(struct vm_locked vm_locked,
391 bool is_from_vm)
392{
393 return is_from_vm ? &vm_locked.vm->notifications.from_vm
394 : &vm_locked.vm->notifications.from_sp;
395}
396
J-Alvesa0f317d2021-06-09 13:31:59 +0100397/*
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700398 * Dynamically allocate per_vcpu_notifications structure for a given VM.
399 */
400static void vm_notifications_init_per_vcpu_notifications(
401 struct vm *vm, ffa_vcpu_count_t vcpu_count, struct mpool *ppool)
402{
403 size_t notif_ppool_entries =
404 (align_up(sizeof(struct notifications_state) * vcpu_count,
405 MM_PPOOL_ENTRY_SIZE) /
406 MM_PPOOL_ENTRY_SIZE);
407
408 /*
409 * Allow for function to be called on already initialized VMs but those
410 * that require notification structure to be cleared.
411 */
412 if (vm->notifications.from_sp.per_vcpu == NULL) {
413 assert(vm->notifications.from_vm.per_vcpu == NULL);
414 assert(vcpu_count != 0);
415 CHECK(ppool != NULL);
416 vm->notifications.from_sp.per_vcpu =
417 (struct notifications_state *)mpool_alloc_contiguous(
418 ppool, notif_ppool_entries, 1);
419 CHECK(vm->notifications.from_sp.per_vcpu != NULL);
420
421 vm->notifications.from_vm.per_vcpu =
422 (struct notifications_state *)mpool_alloc_contiguous(
423 ppool, notif_ppool_entries, 1);
424 CHECK(vm->notifications.from_vm.per_vcpu != NULL);
425 } else {
426 assert(vm->notifications.from_vm.per_vcpu != NULL);
427 }
428
429 memset_s(vm->notifications.from_sp.per_vcpu,
430 sizeof(*(vm->notifications.from_sp.per_vcpu)) * vcpu_count, 0,
431 sizeof(*(vm->notifications.from_sp.per_vcpu)) * vcpu_count);
432 memset_s(vm->notifications.from_vm.per_vcpu,
433 sizeof(*(vm->notifications.from_vm.per_vcpu)) * vcpu_count, 0,
434 sizeof(*(vm->notifications.from_vm.per_vcpu)) * vcpu_count);
435}
436
437/*
J-Alvesa0f317d2021-06-09 13:31:59 +0100438 * Initializes the notifications structure.
439 */
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700440static void vm_notifications_init_bindings(struct notifications *notifications)
J-Alvesa0f317d2021-06-09 13:31:59 +0100441{
442 for (uint32_t i = 0U; i < MAX_FFA_NOTIFICATIONS; i++) {
443 notifications->bindings_sender_id[i] = HF_INVALID_VM_ID;
444 }
445}
446
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700447/*
448 * Initialize notification related structures for a VM.
449 */
450void vm_notifications_init(struct vm *vm, ffa_vcpu_count_t vcpu_count,
451 struct mpool *ppool)
452{
453 vm_notifications_init_per_vcpu_notifications(vm, vcpu_count, ppool);
454
455 /* Basic initialization of the notifications structure. */
456 vm_notifications_init_bindings(&vm->notifications.from_sp);
457 vm_notifications_init_bindings(&vm->notifications.from_vm);
458}
459
J-Alvesa0f317d2021-06-09 13:31:59 +0100460/**
461 * Checks if there are pending notifications.
462 */
463bool vm_are_notifications_pending(struct vm_locked vm_locked, bool from_vm,
464 ffa_notifications_bitmap_t notifications)
465{
466 struct notifications *to_check;
467
468 CHECK(vm_locked.vm != NULL);
469
J-Alves7461ef22021-10-18 17:21:33 +0100470 to_check = vm_get_notifications(vm_locked, from_vm);
J-Alvesa0f317d2021-06-09 13:31:59 +0100471
472 /* Check if there are pending per vcpu notifications */
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700473 for (uint32_t i = 0U; i < vm_locked.vm->vcpu_count; i++) {
J-Alvesa0f317d2021-06-09 13:31:59 +0100474 if ((to_check->per_vcpu[i].pending & notifications) != 0U) {
475 return true;
476 }
477 }
478
479 /* Check if there are global pending notifications */
480 return (to_check->global.pending & notifications) != 0U;
481}
J-Alvesc003a7a2021-03-18 13:06:53 +0000482
J-Alves7461ef22021-10-18 17:21:33 +0100483/**
484 * Checks if there are pending global notifications, either from SPs or from
485 * VMs.
486 */
487bool vm_are_global_notifications_pending(struct vm_locked vm_locked)
488{
489 return vm_get_notifications(vm_locked, true)->global.pending != 0ULL ||
J-Alves52578f82022-03-25 12:30:47 +0000490 vm_get_notifications(vm_locked, false)->global.pending != 0ULL ||
J-Alvese8c8c2b2022-12-16 15:34:48 +0000491 vm_are_fwk_notifications_pending(vm_locked);
492}
493
494/**
495 * Currently only RX full notification is supported as framework notification.
496 * Returns true if there is one pending, either from Hypervisor or SPMC.
497 */
498bool vm_are_fwk_notifications_pending(struct vm_locked vm_locked)
499{
500 return vm_locked.vm->notifications.framework.pending != 0ULL;
J-Alves7461ef22021-10-18 17:21:33 +0100501}
502
503/**
504 * Checks if there are pending per-vCPU notifications, in a specific vCPU either
505 * from SPs or from VMs.
506 */
507bool vm_are_per_vcpu_notifications_pending(struct vm_locked vm_locked,
508 ffa_vcpu_index_t vcpu_id)
509{
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700510 CHECK(vcpu_id < vm_locked.vm->vcpu_count);
J-Alves7461ef22021-10-18 17:21:33 +0100511
512 return vm_get_notifications(vm_locked, true)
513 ->per_vcpu[vcpu_id]
514 .pending != 0ULL ||
515 vm_get_notifications(vm_locked, false)
516 ->per_vcpu[vcpu_id]
517 .pending != 0ULL;
518}
519
J-Alves09ff9d82021-11-02 11:55:20 +0000520bool vm_are_notifications_enabled(struct vm *vm)
J-Alvesc003a7a2021-03-18 13:06:53 +0000521{
J-Alves09ff9d82021-11-02 11:55:20 +0000522 return vm->notifications.enabled == true;
523}
524
525bool vm_locked_are_notifications_enabled(struct vm_locked vm_locked)
526{
527 return vm_are_notifications_enabled(vm_locked.vm);
J-Alvesc003a7a2021-03-18 13:06:53 +0000528}
529
530static bool vm_is_notification_bit_set(ffa_notifications_bitmap_t notifications,
531 uint32_t i)
532{
533 return (notifications & FFA_NOTIFICATION_MASK(i)) != 0U;
534}
535
J-Alvesfe23ebe2021-10-13 16:07:07 +0100536static void vm_notifications_global_state_count_update(
537 ffa_notifications_bitmap_t bitmap, uint32_t *counter, int inc)
538{
539 /*
540 * Helper to increment counters from global notifications
541 * state. Count update by increments or decrements of 1 or -1,
542 * respectively.
543 */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000544 assert(inc == 1 || inc == -1);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100545
546 sl_lock(&all_notifications_state.lock);
547
548 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
549 if (vm_is_notification_bit_set(bitmap, i)) {
550 CHECK((inc > 0 && *counter < UINT32_MAX) ||
551 (inc < 0 && *counter > 0));
552 *counter += inc;
553 }
554 }
555
556 sl_unlock(&all_notifications_state.lock);
557}
558
559/**
560 * Helper function to increment the pending notifications based on a bitmap
561 * passed as argument.
562 * Function to be used at setting notifications for a given VM.
563 */
564static void vm_notifications_pending_count_add(
565 ffa_notifications_bitmap_t to_add)
566{
567 vm_notifications_global_state_count_update(
568 to_add, &all_notifications_state.pending_count, 1);
569}
570
571/**
572 * Helper function to decrement the pending notifications count.
573 * Function to be used when getting the receiver's pending notifications.
574 */
575static void vm_notifications_pending_count_sub(
576 ffa_notifications_bitmap_t to_sub)
577{
578 vm_notifications_global_state_count_update(
579 to_sub, &all_notifications_state.pending_count, -1);
580}
581
582/**
583 * Helper function to count the notifications whose information has been
584 * retrieved by the scheduler of the system, and are still pending.
585 */
586static void vm_notifications_info_get_retrieved_count_add(
587 ffa_notifications_bitmap_t to_add)
588{
589 vm_notifications_global_state_count_update(
590 to_add, &all_notifications_state.info_get_retrieved_count, 1);
591}
592
593/**
594 * Helper function to subtract the notifications that the receiver is getting
595 * and whose information has been retrieved by the receiver scheduler.
596 */
597static void vm_notifications_info_get_retrieved_count_sub(
598 ffa_notifications_bitmap_t to_sub)
599{
600 vm_notifications_global_state_count_update(
601 to_sub, &all_notifications_state.info_get_retrieved_count, -1);
602}
603
604/**
605 * Helper function to determine if there are notifications pending whose info
606 * hasn't been retrieved by the receiver scheduler.
607 */
608bool vm_notifications_pending_not_retrieved_by_scheduler(void)
609{
610 bool ret;
611
612 sl_lock(&all_notifications_state.lock);
613 ret = all_notifications_state.pending_count >
614 all_notifications_state.info_get_retrieved_count;
615 sl_unlock(&all_notifications_state.lock);
616
617 return ret;
618}
619
620bool vm_is_notifications_pending_count_zero(void)
621{
622 bool ret;
623
624 sl_lock(&all_notifications_state.lock);
625 ret = all_notifications_state.pending_count == 0;
626 sl_unlock(&all_notifications_state.lock);
627
628 return ret;
629}
630
J-Alvesc003a7a2021-03-18 13:06:53 +0000631/**
632 * Checks that all provided notifications are bound to the specified sender, and
633 * are per VCPU or global, as specified.
634 */
635bool vm_notifications_validate_binding(struct vm_locked vm_locked,
J-Alves19e20cf2023-08-02 12:48:55 +0100636 bool is_from_vm, ffa_id_t sender_id,
J-Alvesc003a7a2021-03-18 13:06:53 +0000637 ffa_notifications_bitmap_t notifications,
638 bool is_per_vcpu)
639{
640 return vm_notifications_validate_bound_sender(
641 vm_locked, is_from_vm, sender_id, notifications) &&
642 vm_notifications_validate_per_vcpu(vm_locked, is_from_vm,
643 is_per_vcpu, notifications);
644}
645
646/**
647 * Update binds information in notification structure for the specified
648 * notifications.
649 */
650void vm_notifications_update_bindings(struct vm_locked vm_locked,
J-Alves19e20cf2023-08-02 12:48:55 +0100651 bool is_from_vm, ffa_id_t sender_id,
J-Alvesc003a7a2021-03-18 13:06:53 +0000652 ffa_notifications_bitmap_t notifications,
653 bool is_per_vcpu)
654{
655 CHECK(vm_locked.vm != NULL);
656 struct notifications *to_update =
657 vm_get_notifications(vm_locked, is_from_vm);
658
659 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
660 if (vm_is_notification_bit_set(notifications, i)) {
661 to_update->bindings_sender_id[i] = sender_id;
662 }
663 }
664
665 /*
666 * Set notifications if they are per VCPU, else clear them as they are
667 * global.
668 */
669 if (is_per_vcpu) {
670 to_update->bindings_per_vcpu |= notifications;
671 } else {
672 to_update->bindings_per_vcpu &= ~notifications;
673 }
674}
675
676bool vm_notifications_validate_bound_sender(
J-Alves19e20cf2023-08-02 12:48:55 +0100677 struct vm_locked vm_locked, bool is_from_vm, ffa_id_t sender_id,
J-Alvesc003a7a2021-03-18 13:06:53 +0000678 ffa_notifications_bitmap_t notifications)
679{
680 CHECK(vm_locked.vm != NULL);
681 struct notifications *to_check =
682 vm_get_notifications(vm_locked, is_from_vm);
683
684 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
685 if (vm_is_notification_bit_set(notifications, i) &&
686 to_check->bindings_sender_id[i] != sender_id) {
687 return false;
688 }
689 }
690
691 return true;
692}
693
694bool vm_notifications_validate_per_vcpu(struct vm_locked vm_locked,
695 bool is_from_vm, bool is_per_vcpu,
696 ffa_notifications_bitmap_t notif)
697{
698 CHECK(vm_locked.vm != NULL);
699 struct notifications *to_check =
700 vm_get_notifications(vm_locked, is_from_vm);
701
702 return is_per_vcpu ? (~to_check->bindings_per_vcpu & notif) == 0U
703 : (to_check->bindings_per_vcpu & notif) == 0U;
704}
J-Alvesaa79c012021-07-09 14:29:45 +0100705
J-Alves14163a72022-03-25 14:01:34 +0000706static void vm_notifications_state_set(struct notifications_state *state,
707 ffa_notifications_bitmap_t notifications)
708{
709 state->pending |= notifications;
710 vm_notifications_pending_count_add(notifications);
711}
712
J-Alves5a16c962022-03-25 12:32:51 +0000713void vm_notifications_partition_set_pending(
714 struct vm_locked vm_locked, bool is_from_vm,
715 ffa_notifications_bitmap_t notifications, ffa_vcpu_index_t vcpu_id,
716 bool is_per_vcpu)
J-Alvesaa79c012021-07-09 14:29:45 +0100717{
J-Alves14163a72022-03-25 14:01:34 +0000718 struct notifications *to_set;
719 struct notifications_state *state;
720
J-Alvesaa79c012021-07-09 14:29:45 +0100721 CHECK(vm_locked.vm != NULL);
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700722 CHECK(vcpu_id < vm_locked.vm->vcpu_count);
J-Alvesaa79c012021-07-09 14:29:45 +0100723
J-Alves14163a72022-03-25 14:01:34 +0000724 to_set = vm_get_notifications(vm_locked, is_from_vm);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100725
J-Alves14163a72022-03-25 14:01:34 +0000726 state = is_per_vcpu ? &to_set->per_vcpu[vcpu_id] : &to_set->global;
727
728 vm_notifications_state_set(state, notifications);
729}
730
731/**
732 * Set pending framework notifications.
733 */
734void vm_notifications_framework_set_pending(
735 struct vm_locked vm_locked, ffa_notifications_bitmap_t notifications)
736{
737 CHECK(vm_locked.vm != NULL);
Federico Recanatie73d2832022-04-20 11:10:52 +0200738 assert(is_ffa_spm_buffer_full_notification(notifications) ||
739 is_ffa_hyp_buffer_full_notification(notifications));
J-Alves14163a72022-03-25 14:01:34 +0000740 vm_notifications_state_set(&vm_locked.vm->notifications.framework,
741 notifications);
J-Alvesaa79c012021-07-09 14:29:45 +0100742}
743
J-Alves5136dda2022-03-25 12:26:38 +0000744static ffa_notifications_bitmap_t vm_notifications_state_get_pending(
745 struct notifications_state *state)
J-Alvesaa79c012021-07-09 14:29:45 +0100746{
J-Alves5136dda2022-03-25 12:26:38 +0000747 ffa_notifications_bitmap_t to_ret;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100748 ffa_notifications_bitmap_t pending_and_info_get_retrieved;
J-Alvesaa79c012021-07-09 14:29:45 +0100749
J-Alves5136dda2022-03-25 12:26:38 +0000750 assert(state != NULL);
J-Alvesaa79c012021-07-09 14:29:45 +0100751
J-Alves5136dda2022-03-25 12:26:38 +0000752 to_ret = state->pending;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100753
754 /* Update count of currently pending notifications in the system. */
J-Alves5136dda2022-03-25 12:26:38 +0000755 vm_notifications_pending_count_sub(state->pending);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100756
757 /*
758 * If notifications receiver is getting have been retrieved by the
759 * receiver scheduler, decrement those from respective count.
760 */
761 pending_and_info_get_retrieved =
J-Alves5136dda2022-03-25 12:26:38 +0000762 state->pending & state->info_get_retrieved;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100763
764 if (pending_and_info_get_retrieved != 0) {
765 vm_notifications_info_get_retrieved_count_sub(
766 pending_and_info_get_retrieved);
767 }
768
J-Alves5136dda2022-03-25 12:26:38 +0000769 state->pending = 0U;
770 state->info_get_retrieved = 0U;
J-Alvesaa79c012021-07-09 14:29:45 +0100771
J-Alves5136dda2022-03-25 12:26:38 +0000772 return to_ret;
773}
J-Alvesfe23ebe2021-10-13 16:07:07 +0100774
J-Alves5136dda2022-03-25 12:26:38 +0000775/**
776 * Get global and per-vCPU notifications for the given vCPU ID.
777 */
778ffa_notifications_bitmap_t vm_notifications_partition_get_pending(
779 struct vm_locked vm_locked, bool is_from_vm, ffa_vcpu_index_t vcpu_id)
780{
781 ffa_notifications_bitmap_t to_ret;
782 struct notifications *to_get;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100783
J-Alves5136dda2022-03-25 12:26:38 +0000784 assert(vm_locked.vm != NULL);
785 to_get = vm_get_notifications(vm_locked, is_from_vm);
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700786 assert(vcpu_id < vm_locked.vm->vcpu_count);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100787
J-Alves5136dda2022-03-25 12:26:38 +0000788 to_ret = vm_notifications_state_get_pending(&to_get->global);
789 to_ret |=
790 vm_notifications_state_get_pending(&to_get->per_vcpu[vcpu_id]);
J-Alvesaa79c012021-07-09 14:29:45 +0100791
792 return to_ret;
793}
J-Alvesc8e8a222021-06-08 17:33:52 +0100794
795/**
J-Alves663682a2022-03-25 13:56:51 +0000796 * Get pending framework notifications.
797 */
798ffa_notifications_bitmap_t vm_notifications_framework_get_pending(
799 struct vm_locked vm_locked)
800{
Federico Recanati6c1e05c2022-04-20 11:37:26 +0200801 struct vm *vm = vm_locked.vm;
802 ffa_notifications_bitmap_t framework;
Federico Recanati6c1e05c2022-04-20 11:37:26 +0200803
804 assert(vm != NULL);
805
806 framework = vm_notifications_state_get_pending(
807 &vm->notifications.framework);
808
Federico Recanati6c1e05c2022-04-20 11:37:26 +0200809 return framework;
J-Alves663682a2022-03-25 13:56:51 +0000810}
811
J-Alves17c9b6d2022-03-25 14:39:05 +0000812static void vm_notifications_state_info_get(
J-Alves19e20cf2023-08-02 12:48:55 +0100813 struct notifications_state *state, ffa_id_t vm_id, bool is_per_vcpu,
J-Alves17c9b6d2022-03-25 14:39:05 +0000814 ffa_vcpu_index_t vcpu_id, uint16_t *ids, uint32_t *ids_count,
815 uint32_t *lists_sizes, uint32_t *lists_count,
816 const uint32_t ids_max_count,
817 enum notifications_info_get_state *info_get_state)
818{
819 ffa_notifications_bitmap_t pending_not_retrieved;
820
821 CHECK(*ids_count <= ids_max_count);
822 CHECK(*lists_count <= ids_max_count);
823
824 if (*info_get_state == FULL) {
825 return;
826 }
827
828 pending_not_retrieved = state->pending & ~state->info_get_retrieved;
829
830 /* No notifications pending that haven't been retrieved. */
831 if (pending_not_retrieved == 0U) {
832 return;
833 }
834
835 if (*ids_count == ids_max_count) {
836 *info_get_state = FULL;
837 return;
838 }
839
840 switch (*info_get_state) {
841 case INIT:
842 case STARTING_NEW:
843 /*
844 * At this iteration two ids are to be added: the VM ID
845 * and vCPU ID. If there is no space, change state and
846 * terminate function.
847 */
848 if (is_per_vcpu && ids_max_count - *ids_count < 2) {
849 *info_get_state = FULL;
850 return;
851 }
852
853 *info_get_state = INSERTING;
854 ids[*ids_count] = vm_id;
855 ++(*ids_count);
856
857 if (is_per_vcpu) {
858 /* Insert vCPU ID. */
859 ids[*ids_count] = vcpu_id;
860 ++(*ids_count);
861 ++lists_sizes[*lists_count];
862 }
863
864 ++(*lists_count);
865 break;
866 case INSERTING:
867 /* For per-vCPU notifications only. */
868 if (!is_per_vcpu) {
869 break;
870 }
871
872 /* Insert vCPU ID */
873 ids[*ids_count] = vcpu_id;
874 (*ids_count)++;
875 /* Increment respective list size */
876 ++lists_sizes[*lists_count - 1];
877
878 if (lists_sizes[*lists_count - 1] == 3) {
879 *info_get_state = STARTING_NEW;
880 }
881 break;
882 default:
883 panic("Notification info get action error!!\n");
884 }
885
886 state->info_get_retrieved |= pending_not_retrieved;
887
888 vm_notifications_info_get_retrieved_count_add(pending_not_retrieved);
889}
890
J-Alves663682a2022-03-25 13:56:51 +0000891/**
J-Alvesc8e8a222021-06-08 17:33:52 +0100892 * Get pending notification's information to return to the receiver scheduler.
893 */
894void vm_notifications_info_get_pending(
895 struct vm_locked vm_locked, bool is_from_vm, uint16_t *ids,
896 uint32_t *ids_count, uint32_t *lists_sizes, uint32_t *lists_count,
897 const uint32_t ids_max_count,
898 enum notifications_info_get_state *info_get_state)
899{
J-Alves17c9b6d2022-03-25 14:39:05 +0000900 struct notifications *notifications;
J-Alvesc8e8a222021-06-08 17:33:52 +0100901
902 CHECK(vm_locked.vm != NULL);
J-Alvesc8e8a222021-06-08 17:33:52 +0100903
J-Alves17c9b6d2022-03-25 14:39:05 +0000904 notifications = vm_get_notifications(vm_locked, is_from_vm);
J-Alvesc8e8a222021-06-08 17:33:52 +0100905
J-Alves17c9b6d2022-03-25 14:39:05 +0000906 /*
907 * Perform info get for global notifications, before doing it for
908 * per-vCPU.
909 */
910 vm_notifications_state_info_get(&notifications->global,
911 vm_locked.vm->id, false, 0, ids,
912 ids_count, lists_sizes, lists_count,
913 ids_max_count, info_get_state);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100914
J-Alvesc8e8a222021-06-08 17:33:52 +0100915 for (ffa_vcpu_count_t i = 0; i < vm_locked.vm->vcpu_count; i++) {
J-Alves17c9b6d2022-03-25 14:39:05 +0000916 vm_notifications_state_info_get(
917 &notifications->per_vcpu[i], vm_locked.vm->id, true, i,
918 ids, ids_count, lists_sizes, lists_count, ids_max_count,
919 info_get_state);
J-Alvesc8e8a222021-06-08 17:33:52 +0100920 }
921}
922
923/**
924 * Gets all info from VM's pending notifications.
925 * Returns true if the list is full, and there is more pending.
926 */
927bool vm_notifications_info_get(struct vm_locked vm_locked, uint16_t *ids,
928 uint32_t *ids_count, uint32_t *lists_sizes,
929 uint32_t *lists_count,
930 const uint32_t ids_max_count)
931{
932 enum notifications_info_get_state current_state = INIT;
933
J-Alvesf31940e2022-03-25 17:24:00 +0000934 /* Get info of pending notifications from the framework. */
935 vm_notifications_state_info_get(&vm_locked.vm->notifications.framework,
936 vm_locked.vm->id, false, 0, ids,
937 ids_count, lists_sizes, lists_count,
938 ids_max_count, &current_state);
939
940 /* Get info of pending notifications from SPs. */
J-Alvesc8e8a222021-06-08 17:33:52 +0100941 vm_notifications_info_get_pending(vm_locked, false, ids, ids_count,
942 lists_sizes, lists_count,
943 ids_max_count, &current_state);
944
J-Alvesf31940e2022-03-25 17:24:00 +0000945 /* Get info of pending notifications from VMs. */
J-Alvesc8e8a222021-06-08 17:33:52 +0100946 vm_notifications_info_get_pending(vm_locked, true, ids, ids_count,
947 lists_sizes, lists_count,
948 ids_max_count, &current_state);
949
950 /*
951 * State transitions to FULL when trying to insert a new ID in the
952 * list and there is not more space. This means there are notifications
953 * pending, whose info is not retrieved.
954 */
955 return current_state == FULL;
956}
J-Alves439ac972021-11-18 17:32:03 +0000957
958/**
959 * Checks VM's messaging method support.
960 */
Kathleen Capellaf71dee42023-08-08 16:24:14 -0400961bool vm_supports_messaging_method(struct vm *vm, uint16_t msg_method)
J-Alves439ac972021-11-18 17:32:03 +0000962{
963 return (vm->messaging_method & msg_method) != 0;
964}
J-Alves6e2abc62021-12-02 14:58:56 +0000965
966void vm_notifications_set_npi_injected(struct vm_locked vm_locked,
967 bool npi_injected)
968{
969 vm_locked.vm->notifications.npi_injected = npi_injected;
970}
971
972bool vm_notifications_is_npi_injected(struct vm_locked vm_locked)
973{
974 return vm_locked.vm->notifications.npi_injected;
975}
J-Alves7e67d102022-04-13 13:22:39 +0100976
977/**
978 * Sets the designated GP register that the VM expects to receive the boot
979 * info's address.
980 */
981void vm_set_boot_info_gp_reg(struct vm *vm, struct vcpu *vcpu)
982{
Olivier Deprezb2808332023-02-02 15:25:40 +0100983 if (vm->boot_info.blob_addr.ipa != 0U) {
J-Alves7e67d102022-04-13 13:22:39 +0100984 arch_regs_set_gp_reg(&vcpu->regs,
985 ipa_addr(vm->boot_info.blob_addr),
986 vm->boot_info.gp_register_num);
987 }
988}
Madhukar Pappireddy18c6eb72023-08-21 12:16:18 -0500989
990/**
991 * Obtain the interrupt descriptor entry of the specified vm corresponding
992 * to the specific interrupt id.
993 */
Madhukar Pappireddy3221a442023-07-24 16:10:55 -0500994static struct interrupt_descriptor *vm_find_interrupt_descriptor(
Madhukar Pappireddy18c6eb72023-08-21 12:16:18 -0500995 struct vm_locked vm_locked, uint32_t id)
996{
997 for (uint32_t i = 0; i < HF_NUM_INTIDS; i++) {
998 /* Interrupt descriptors are populated contiguously. */
999 if (!vm_locked.vm->interrupt_desc[i].valid) {
1000 break;
1001 }
1002
1003 if (vm_locked.vm->interrupt_desc[i].interrupt_id == id) {
1004 /* Interrupt descriptor found. */
1005 return &vm_locked.vm->interrupt_desc[i];
1006 }
1007 }
1008
1009 return NULL;
1010}
1011
1012/**
1013 * Update the target MPIDR corresponding to the specified interrupt id
1014 * belonging to the specified vm.
1015 */
1016struct interrupt_descriptor *vm_interrupt_set_target_mpidr(
1017 struct vm_locked vm_locked, uint32_t id, uint32_t target_mpidr)
1018{
1019 struct interrupt_descriptor *int_desc;
1020
1021 int_desc = vm_find_interrupt_descriptor(vm_locked, id);
1022
1023 if (int_desc != NULL) {
1024 interrupt_desc_set_mpidr(int_desc, target_mpidr);
1025 }
1026
1027 return int_desc;
1028}
1029
1030/**
1031 * Update the security state of the specified interrupt id belonging to the
1032 * specified vm.
1033 */
1034struct interrupt_descriptor *vm_interrupt_set_sec_state(
1035 struct vm_locked vm_locked, uint32_t id, uint32_t sec_state)
1036{
1037 struct interrupt_descriptor *int_desc;
1038
1039 int_desc = vm_find_interrupt_descriptor(vm_locked, id);
1040
1041 if (int_desc != NULL) {
1042 interrupt_desc_set_sec_state(int_desc, sec_state);
1043 }
1044
1045 return int_desc;
1046}
Madhukar Pappireddy938faaf2023-07-31 17:56:55 -05001047
1048/**
1049 * Enable or disable the specified interrupt id belonging to specified vm.
1050 */
1051struct interrupt_descriptor *vm_interrupt_set_enable(struct vm_locked vm_locked,
1052 uint32_t id, bool enable)
1053{
1054 struct interrupt_descriptor *int_desc;
1055
1056 int_desc = vm_find_interrupt_descriptor(vm_locked, id);
1057
1058 if (int_desc != NULL) {
1059 interrupt_desc_set_enabled(int_desc, enable);
1060 }
1061
1062 return int_desc;
1063}