blob: 9b702e0fd8f0571bfd394e334eb0a9688255eabd [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/vm.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010010
Andrew Scull18c78fc2018-08-20 12:57:41 +010011#include "hf/api.h"
Daniel Boulbya2f8c662021-11-26 17:52:53 +000012#include "hf/assert.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010013#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010014#include "hf/cpu.h"
J-Alves4ef6e842021-03-18 12:47:01 +000015#include "hf/dlog.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010016#include "hf/ffa.h"
Andrew Scull3c257452019-11-26 13:32:50 +000017#include "hf/layout.h"
18#include "hf/plat/iommu.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010019#include "hf/std.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010020
Andrew Scull19503262018-09-20 14:48:39 +010021#include "vmapi/hf/call.h"
22
23static struct vm vms[MAX_VMS];
Olivier Deprez96a2a262020-06-11 17:21:38 +020024static struct vm other_world;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010025static ffa_vm_count_t vm_count;
J-Alvesb37fd082020-10-22 12:29:21 +010026static struct vm *first_boot_vm;
Andrew Scull19503262018-09-20 14:48:39 +010027
J-Alvesfe23ebe2021-10-13 16:07:07 +010028/**
29 * Counters on the status of notifications in the system. It helps to improve
30 * the information retrieved by the receiver scheduler.
31 */
32static struct {
33 /** Counts notifications pending. */
34 uint32_t pending_count;
35 /**
36 * Counts notifications pending, that have been retrieved by the
37 * receiver scheduler.
38 */
39 uint32_t info_get_retrieved_count;
40 struct spinlock lock;
41} all_notifications_state;
42
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -080043static bool vm_init_mm(struct vm *vm, struct mpool *ppool)
44{
45 if (vm->el0_partition) {
46 return mm_ptable_init(&vm->ptable, vm->id, MM_FLAG_STAGE1,
47 ppool);
48 }
49 return mm_vm_init(&vm->ptable, vm->id, ppool);
50}
51
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010052struct vm *vm_init(ffa_vm_id_t id, ffa_vcpu_count_t vcpu_count,
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080053 struct mpool *ppool, bool el0_partition)
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010054{
Wedson Almeida Filho87009642018-07-02 10:20:07 +010055 uint32_t i;
Andrew Scull19503262018-09-20 14:48:39 +010056 struct vm *vm;
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -070057 size_t vcpu_ppool_entries = (align_up(sizeof(struct vcpu) * vcpu_count,
58 MM_PPOOL_ENTRY_SIZE) /
59 MM_PPOOL_ENTRY_SIZE);
Andrew Scull19503262018-09-20 14:48:39 +010060
Olivier Deprez96a2a262020-06-11 17:21:38 +020061 if (id == HF_OTHER_WORLD_ID) {
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080062 CHECK(el0_partition == false);
Olivier Deprez96a2a262020-06-11 17:21:38 +020063 vm = &other_world;
Andrew Walbran9daa57e2019-09-27 13:33:20 +010064 } else {
65 uint16_t vm_index = id - HF_VM_ID_OFFSET;
Andrew Scull19503262018-09-20 14:48:39 +010066
Andrew Walbran9daa57e2019-09-27 13:33:20 +010067 CHECK(id >= HF_VM_ID_OFFSET);
68 CHECK(vm_index < ARRAY_SIZE(vms));
69 vm = &vms[vm_index];
70 }
Wedson Almeida Filho87009642018-07-02 10:20:07 +010071
Andrew Scull2b5fbad2019-04-05 13:55:56 +010072 memset_s(vm, sizeof(*vm), 0, sizeof(*vm));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010073
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000074 list_init(&vm->mailbox.waiter_list);
75 list_init(&vm->mailbox.ready_list);
76 sl_init(&vm->lock);
77
Andrew Walbran9daa57e2019-09-27 13:33:20 +010078 vm->id = id;
Wedson Almeida Filho87009642018-07-02 10:20:07 +010079 vm->vcpu_count = vcpu_count;
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -070080
81 vm->vcpus = (struct vcpu *)mpool_alloc_contiguous(
82 ppool, vcpu_ppool_entries, 1);
83 CHECK(vm->vcpus != NULL);
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -070084
Andrew Sculld6ee1102019-04-05 22:12:42 +010085 vm->mailbox.state = MAILBOX_STATE_EMPTY;
Andrew Scull9726c252019-01-23 13:44:19 +000086 atomic_init(&vm->aborting, false);
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080087 vm->el0_partition = el0_partition;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010088
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -080089 if (!vm_init_mm(vm, ppool)) {
Andrew Walbran9daa57e2019-09-27 13:33:20 +010090 return NULL;
Wedson Almeida Filho03306112018-11-26 00:08:03 +000091 }
92
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000093 /* Initialise waiter entries. */
94 for (i = 0; i < MAX_VMS; i++) {
Wedson Almeida Filhob790f652019-01-22 23:41:56 +000095 vm->wait_entries[i].waiting_vm = vm;
96 list_init(&vm->wait_entries[i].wait_links);
97 list_init(&vm->wait_entries[i].ready_links);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000098 }
99
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000100 /* Do basic initialization of vCPUs. */
Andrew Scull7364a8e2018-07-19 15:39:29 +0100101 for (i = 0; i < vcpu_count; i++) {
Andrew Walbrane1310df2019-04-29 17:28:28 +0100102 vcpu_init(vm_get_vcpu(vm, i), vm);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100103 }
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100104
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700105 vm_notifications_init(vm, vcpu_count, ppool);
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100106 return vm;
107}
108
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100109bool vm_init_next(ffa_vcpu_count_t vcpu_count, struct mpool *ppool,
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800110 struct vm **new_vm, bool el0_partition)
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100111{
112 if (vm_count >= MAX_VMS) {
113 return false;
114 }
115
116 /* Generate IDs based on an offset, as low IDs e.g., 0, are reserved */
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800117 *new_vm = vm_init(vm_count + HF_VM_ID_OFFSET, vcpu_count, ppool,
118 el0_partition);
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100119 if (*new_vm == NULL) {
120 return false;
121 }
Andrew Scull19503262018-09-20 14:48:39 +0100122 ++vm_count;
Andrew Scull19503262018-09-20 14:48:39 +0100123
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000124 return true;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100125}
126
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100127ffa_vm_count_t vm_get_count(void)
Andrew Scull19503262018-09-20 14:48:39 +0100128{
129 return vm_count;
130}
131
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100132/**
133 * Returns a pointer to the VM with the corresponding id.
134 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100135struct vm *vm_find(ffa_vm_id_t id)
Andrew Scull19503262018-09-20 14:48:39 +0100136{
David Brazdilbc501192019-09-27 13:20:56 +0100137 uint16_t index;
Fuad Tabba494376e2019-08-05 12:35:10 +0100138
Olivier Deprez96a2a262020-06-11 17:21:38 +0200139 if (id == HF_OTHER_WORLD_ID) {
140 if (other_world.id == HF_OTHER_WORLD_ID) {
141 return &other_world;
142 }
Andrew Scull19503262018-09-20 14:48:39 +0100143 return NULL;
144 }
145
Olivier Deprez96a2a262020-06-11 17:21:38 +0200146 /* Check that this is not a reserved ID. */
147 if (id < HF_VM_ID_OFFSET) {
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100148 return NULL;
149 }
150
David Brazdilbc501192019-09-27 13:20:56 +0100151 index = id - HF_VM_ID_OFFSET;
152
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100153 return vm_find_index(index);
154}
155
156/**
J-Alves46ee0682021-07-26 15:17:53 +0100157 * Returns a locked instance of the VM with the corresponding id.
158 */
159struct vm_locked vm_find_locked(ffa_vm_id_t id)
160{
161 struct vm *vm = vm_find(id);
162
163 if (vm != NULL) {
164 return vm_lock(vm);
165 }
166
167 return (struct vm_locked){.vm = NULL};
168}
169
170/**
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100171 * Returns a pointer to the VM at the specified index.
172 */
173struct vm *vm_find_index(uint16_t index)
174{
David Brazdilbc501192019-09-27 13:20:56 +0100175 /* Ensure the VM is initialized. */
176 if (index >= vm_count) {
177 return NULL;
178 }
179
180 return &vms[index];
Andrew Scull19503262018-09-20 14:48:39 +0100181}
182
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000183/**
Fuad Tabbaed294af2019-12-20 10:43:01 +0000184 * Locks the given VM and updates `locked` to hold the newly locked VM.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000185 */
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100186struct vm_locked vm_lock(struct vm *vm)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000187{
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100188 struct vm_locked locked = {
189 .vm = vm,
190 };
191
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000192 sl_lock(&vm->lock);
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100193
194 return locked;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000195}
196
197/**
Jose Marinho75509b42019-04-09 09:34:59 +0100198 * Locks two VMs ensuring that the locking order is according to the locks'
199 * addresses.
200 */
201struct two_vm_locked vm_lock_both(struct vm *vm1, struct vm *vm2)
202{
203 struct two_vm_locked dual_lock;
204
205 sl_lock_both(&vm1->lock, &vm2->lock);
206 dual_lock.vm1.vm = vm1;
207 dual_lock.vm2.vm = vm2;
208
209 return dual_lock;
210}
211
212/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000213 * Unlocks a VM previously locked with vm_lock, and updates `locked` to reflect
214 * the fact that the VM is no longer locked.
215 */
216void vm_unlock(struct vm_locked *locked)
217{
218 sl_unlock(&locked->vm->lock);
219 locked->vm = NULL;
220}
Andrew Walbrane1310df2019-04-29 17:28:28 +0100221
222/**
223 * Get the vCPU with the given index from the given VM.
224 * This assumes the index is valid, i.e. less than vm->vcpu_count.
225 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100226struct vcpu *vm_get_vcpu(struct vm *vm, ffa_vcpu_index_t vcpu_index)
Andrew Walbrane1310df2019-04-29 17:28:28 +0100227{
Andrew Scull877ae4b2019-07-02 12:52:33 +0100228 CHECK(vcpu_index < vm->vcpu_count);
Andrew Walbrane1310df2019-04-29 17:28:28 +0100229 return &vm->vcpus[vcpu_index];
230}
Andrew Scull3c257452019-11-26 13:32:50 +0000231
232/**
Andrew Walbranaad8f982019-12-04 10:56:39 +0000233 * Gets `vm`'s wait entry for waiting on the `for_vm`.
234 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100235struct wait_entry *vm_get_wait_entry(struct vm *vm, ffa_vm_id_t for_vm)
Andrew Walbranaad8f982019-12-04 10:56:39 +0000236{
237 uint16_t index;
238
239 CHECK(for_vm >= HF_VM_ID_OFFSET);
240 index = for_vm - HF_VM_ID_OFFSET;
241 CHECK(index < MAX_VMS);
242
243 return &vm->wait_entries[index];
244}
245
246/**
J-Alves122f1a12022-12-12 15:55:42 +0000247 * Checks whether the given `to` VM's mailbox is currently busy.
248 */
249bool vm_is_mailbox_busy(struct vm_locked to)
250{
251 return to.vm->mailbox.state != MAILBOX_STATE_EMPTY ||
252 to.vm->mailbox.recv == NULL;
253}
254
255/**
Andrew Walbranaad8f982019-12-04 10:56:39 +0000256 * Gets the ID of the VM which the given VM's wait entry is for.
257 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100258ffa_vm_id_t vm_id_for_wait_entry(struct vm *vm, struct wait_entry *entry)
Andrew Walbranaad8f982019-12-04 10:56:39 +0000259{
260 uint16_t index = entry - vm->wait_entries;
261
262 return index + HF_VM_ID_OFFSET;
263}
264
265/**
Andrew Walbran45633dd2020-10-07 17:59:54 +0100266 * Return whether the given VM ID represents an entity in the current world:
267 * i.e. the hypervisor or a normal world VM when running in the normal world, or
268 * the SPM or an SP when running in the secure world.
269 */
270bool vm_id_is_current_world(ffa_vm_id_t vm_id)
271{
272 return (vm_id & HF_VM_ID_WORLD_MASK) !=
273 (HF_OTHER_WORLD_ID & HF_VM_ID_WORLD_MASK);
274}
275
276/**
Andrew Scull3c257452019-11-26 13:32:50 +0000277 * Map a range of addresses to the VM in both the MMU and the IOMMU.
278 *
279 * mm_vm_defrag should always be called after a series of page table updates,
280 * whether they succeed or fail. This is because on failure extra page table
281 * entries may have been allocated and then not used, while on success it may be
282 * possible to compact the page table by merging several entries into a block.
283 *
284 * Returns true on success, or false if the update failed and no changes were
285 * made.
286 *
287 */
288bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
289 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
290{
291 if (!vm_identity_prepare(vm_locked, begin, end, mode, ppool)) {
292 return false;
293 }
294
295 vm_identity_commit(vm_locked, begin, end, mode, ppool, ipa);
296
297 return true;
298}
299
300/**
301 * Prepares the given VM for the given address mapping such that it will be able
302 * to commit the change without failure.
303 *
304 * In particular, multiple calls to this function will result in the
305 * corresponding calls to commit the changes to succeed.
306 *
307 * Returns true on success, or false if the update failed and no changes were
308 * made.
309 */
310bool vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
311 uint32_t mode, struct mpool *ppool)
312{
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -0800313 if (vm_locked.vm->el0_partition) {
314 return mm_identity_prepare(&vm_locked.vm->ptable, begin, end,
315 mode, ppool);
316 }
Andrew Scull3c257452019-11-26 13:32:50 +0000317 return mm_vm_identity_prepare(&vm_locked.vm->ptable, begin, end, mode,
318 ppool);
319}
320
321/**
322 * Commits the given address mapping to the VM assuming the operation cannot
323 * fail. `vm_identity_prepare` must used correctly before this to ensure
324 * this condition.
325 */
326void vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
327 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
328{
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -0800329 if (vm_locked.vm->el0_partition) {
330 mm_identity_commit(&vm_locked.vm->ptable, begin, end, mode,
331 ppool);
332 if (ipa != NULL) {
333 /*
334 * EL0 partitions are modeled as lightweight VM's, to
335 * promote code reuse. The below statement returns the
336 * mapped PA as an IPA, however, for an EL0 partition,
337 * this is really a VA.
338 */
339 *ipa = ipa_from_pa(begin);
340 }
341 } else {
342 mm_vm_identity_commit(&vm_locked.vm->ptable, begin, end, mode,
343 ppool, ipa);
344 }
Andrew Scull3c257452019-11-26 13:32:50 +0000345 plat_iommu_identity_map(vm_locked, begin, end, mode);
346}
347
348/**
349 * Unmap a range of addresses from the VM.
350 *
351 * Returns true on success, or false if the update failed and no changes were
352 * made.
353 */
354bool vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
355 struct mpool *ppool)
356{
357 uint32_t mode = MM_MODE_UNMAPPED_MASK;
358
359 return vm_identity_map(vm_locked, begin, end, mode, ppool, NULL);
360}
361
362/**
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700363 * Defrag page tables for an EL0 partition or for a VM.
364 */
365void vm_ptable_defrag(struct vm_locked vm_locked, struct mpool *ppool)
366{
367 if (vm_locked.vm->el0_partition) {
368 mm_stage1_defrag(&vm_locked.vm->ptable, ppool);
369 } else {
370 mm_vm_defrag(&vm_locked.vm->ptable, ppool);
371 }
372}
373
374/**
Andrew Scull3c257452019-11-26 13:32:50 +0000375 * Unmaps the hypervisor pages from the given page table.
376 */
377bool vm_unmap_hypervisor(struct vm_locked vm_locked, struct mpool *ppool)
378{
379 /* TODO: If we add pages dynamically, they must be included here too. */
380 return vm_unmap(vm_locked, layout_text_begin(), layout_text_end(),
381 ppool) &&
382 vm_unmap(vm_locked, layout_rodata_begin(), layout_rodata_end(),
383 ppool) &&
384 vm_unmap(vm_locked, layout_data_begin(), layout_data_end(),
Maksims Svecovs134b8f92022-03-04 15:14:09 +0000385 ppool) &&
386 vm_unmap(vm_locked, layout_stacks_begin(), layout_stacks_end(),
Andrew Scull3c257452019-11-26 13:32:50 +0000387 ppool);
388}
J-Alvesb37fd082020-10-22 12:29:21 +0100389
390/**
391 * Gets the first partition to boot, according to Boot Protocol from FFA spec.
392 */
393struct vm *vm_get_first_boot(void)
394{
395 return first_boot_vm;
396}
397
398/**
399 * Insert in boot list, sorted by `boot_order` parameter in the vm structure
400 * and rooted in `first_boot_vm`.
401 */
402void vm_update_boot(struct vm *vm)
403{
404 struct vm *current = NULL;
405 struct vm *previous = NULL;
406
407 if (first_boot_vm == NULL) {
408 first_boot_vm = vm;
409 return;
410 }
411
412 current = first_boot_vm;
413
J-Alvesbeeb6dc2021-12-08 18:21:32 +0000414 while (current != NULL && current->boot_order <= vm->boot_order) {
J-Alvesb37fd082020-10-22 12:29:21 +0100415 previous = current;
416 current = current->next_boot;
417 }
418
419 if (previous != NULL) {
420 previous->next_boot = vm;
421 } else {
422 first_boot_vm = vm;
423 }
424
425 vm->next_boot = current;
426}
J-Alves4ef6e842021-03-18 12:47:01 +0000427
Raghu Krishnamurthyea195fa2021-02-12 23:29:00 -0800428/**
429 * Gets the mode of the given range of ipa or va if they are mapped with the
430 * same mode.
431 *
432 * Returns true if the range is mapped with the same mode and false otherwise.
433 * The wrapper calls the appropriate mm function depending on if the partition
434 * is a vm or a el0 partition.
435 */
436bool vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin, ipaddr_t end,
437 uint32_t *mode)
438{
439 if (vm_locked.vm->el0_partition) {
440 return mm_get_mode(&vm_locked.vm->ptable,
441 va_from_pa(pa_from_ipa(begin)),
442 va_from_pa(pa_from_ipa(end)), mode);
443 }
444 return mm_vm_get_mode(&vm_locked.vm->ptable, begin, end, mode);
445}
J-Alvesa0f317d2021-06-09 13:31:59 +0100446
J-Alves66652252022-07-06 09:49:51 +0100447bool vm_mailbox_state_busy(struct vm_locked vm_locked)
448{
449 return vm_locked.vm->mailbox.state != MAILBOX_STATE_EMPTY ||
450 vm_locked.vm->mailbox.recv == NULL;
451}
452
J-Alves7461ef22021-10-18 17:21:33 +0100453static struct notifications *vm_get_notifications(struct vm_locked vm_locked,
454 bool is_from_vm)
455{
456 return is_from_vm ? &vm_locked.vm->notifications.from_vm
457 : &vm_locked.vm->notifications.from_sp;
458}
459
J-Alvesa0f317d2021-06-09 13:31:59 +0100460/*
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700461 * Dynamically allocate per_vcpu_notifications structure for a given VM.
462 */
463static void vm_notifications_init_per_vcpu_notifications(
464 struct vm *vm, ffa_vcpu_count_t vcpu_count, struct mpool *ppool)
465{
466 size_t notif_ppool_entries =
467 (align_up(sizeof(struct notifications_state) * vcpu_count,
468 MM_PPOOL_ENTRY_SIZE) /
469 MM_PPOOL_ENTRY_SIZE);
470
471 /*
472 * Allow for function to be called on already initialized VMs but those
473 * that require notification structure to be cleared.
474 */
475 if (vm->notifications.from_sp.per_vcpu == NULL) {
476 assert(vm->notifications.from_vm.per_vcpu == NULL);
477 assert(vcpu_count != 0);
478 CHECK(ppool != NULL);
479 vm->notifications.from_sp.per_vcpu =
480 (struct notifications_state *)mpool_alloc_contiguous(
481 ppool, notif_ppool_entries, 1);
482 CHECK(vm->notifications.from_sp.per_vcpu != NULL);
483
484 vm->notifications.from_vm.per_vcpu =
485 (struct notifications_state *)mpool_alloc_contiguous(
486 ppool, notif_ppool_entries, 1);
487 CHECK(vm->notifications.from_vm.per_vcpu != NULL);
488 } else {
489 assert(vm->notifications.from_vm.per_vcpu != NULL);
490 }
491
492 memset_s(vm->notifications.from_sp.per_vcpu,
493 sizeof(*(vm->notifications.from_sp.per_vcpu)) * vcpu_count, 0,
494 sizeof(*(vm->notifications.from_sp.per_vcpu)) * vcpu_count);
495 memset_s(vm->notifications.from_vm.per_vcpu,
496 sizeof(*(vm->notifications.from_vm.per_vcpu)) * vcpu_count, 0,
497 sizeof(*(vm->notifications.from_vm.per_vcpu)) * vcpu_count);
498}
499
500/*
J-Alvesa0f317d2021-06-09 13:31:59 +0100501 * Initializes the notifications structure.
502 */
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700503static void vm_notifications_init_bindings(struct notifications *notifications)
J-Alvesa0f317d2021-06-09 13:31:59 +0100504{
505 for (uint32_t i = 0U; i < MAX_FFA_NOTIFICATIONS; i++) {
506 notifications->bindings_sender_id[i] = HF_INVALID_VM_ID;
507 }
508}
509
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700510/*
511 * Initialize notification related structures for a VM.
512 */
513void vm_notifications_init(struct vm *vm, ffa_vcpu_count_t vcpu_count,
514 struct mpool *ppool)
515{
516 vm_notifications_init_per_vcpu_notifications(vm, vcpu_count, ppool);
517
518 /* Basic initialization of the notifications structure. */
519 vm_notifications_init_bindings(&vm->notifications.from_sp);
520 vm_notifications_init_bindings(&vm->notifications.from_vm);
521}
522
J-Alvesa0f317d2021-06-09 13:31:59 +0100523/**
524 * Checks if there are pending notifications.
525 */
526bool vm_are_notifications_pending(struct vm_locked vm_locked, bool from_vm,
527 ffa_notifications_bitmap_t notifications)
528{
529 struct notifications *to_check;
530
531 CHECK(vm_locked.vm != NULL);
532
J-Alves7461ef22021-10-18 17:21:33 +0100533 to_check = vm_get_notifications(vm_locked, from_vm);
J-Alvesa0f317d2021-06-09 13:31:59 +0100534
535 /* Check if there are pending per vcpu notifications */
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700536 for (uint32_t i = 0U; i < vm_locked.vm->vcpu_count; i++) {
J-Alvesa0f317d2021-06-09 13:31:59 +0100537 if ((to_check->per_vcpu[i].pending & notifications) != 0U) {
538 return true;
539 }
540 }
541
542 /* Check if there are global pending notifications */
543 return (to_check->global.pending & notifications) != 0U;
544}
J-Alvesc003a7a2021-03-18 13:06:53 +0000545
J-Alves7461ef22021-10-18 17:21:33 +0100546/**
547 * Checks if there are pending global notifications, either from SPs or from
548 * VMs.
549 */
550bool vm_are_global_notifications_pending(struct vm_locked vm_locked)
551{
552 return vm_get_notifications(vm_locked, true)->global.pending != 0ULL ||
J-Alves52578f82022-03-25 12:30:47 +0000553 vm_get_notifications(vm_locked, false)->global.pending != 0ULL ||
554 vm_locked.vm->notifications.framework.pending != 0ULL;
J-Alves7461ef22021-10-18 17:21:33 +0100555}
556
557/**
558 * Checks if there are pending per-vCPU notifications, in a specific vCPU either
559 * from SPs or from VMs.
560 */
561bool vm_are_per_vcpu_notifications_pending(struct vm_locked vm_locked,
562 ffa_vcpu_index_t vcpu_id)
563{
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700564 CHECK(vcpu_id < vm_locked.vm->vcpu_count);
J-Alves7461ef22021-10-18 17:21:33 +0100565
566 return vm_get_notifications(vm_locked, true)
567 ->per_vcpu[vcpu_id]
568 .pending != 0ULL ||
569 vm_get_notifications(vm_locked, false)
570 ->per_vcpu[vcpu_id]
571 .pending != 0ULL;
572}
573
J-Alves09ff9d82021-11-02 11:55:20 +0000574bool vm_are_notifications_enabled(struct vm *vm)
J-Alvesc003a7a2021-03-18 13:06:53 +0000575{
J-Alves09ff9d82021-11-02 11:55:20 +0000576 return vm->notifications.enabled == true;
577}
578
579bool vm_locked_are_notifications_enabled(struct vm_locked vm_locked)
580{
581 return vm_are_notifications_enabled(vm_locked.vm);
J-Alvesc003a7a2021-03-18 13:06:53 +0000582}
583
584static bool vm_is_notification_bit_set(ffa_notifications_bitmap_t notifications,
585 uint32_t i)
586{
587 return (notifications & FFA_NOTIFICATION_MASK(i)) != 0U;
588}
589
J-Alvesfe23ebe2021-10-13 16:07:07 +0100590static void vm_notifications_global_state_count_update(
591 ffa_notifications_bitmap_t bitmap, uint32_t *counter, int inc)
592{
593 /*
594 * Helper to increment counters from global notifications
595 * state. Count update by increments or decrements of 1 or -1,
596 * respectively.
597 */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000598 assert(inc == 1 || inc == -1);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100599
600 sl_lock(&all_notifications_state.lock);
601
602 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
603 if (vm_is_notification_bit_set(bitmap, i)) {
604 CHECK((inc > 0 && *counter < UINT32_MAX) ||
605 (inc < 0 && *counter > 0));
606 *counter += inc;
607 }
608 }
609
610 sl_unlock(&all_notifications_state.lock);
611}
612
613/**
614 * Helper function to increment the pending notifications based on a bitmap
615 * passed as argument.
616 * Function to be used at setting notifications for a given VM.
617 */
618static void vm_notifications_pending_count_add(
619 ffa_notifications_bitmap_t to_add)
620{
621 vm_notifications_global_state_count_update(
622 to_add, &all_notifications_state.pending_count, 1);
623}
624
625/**
626 * Helper function to decrement the pending notifications count.
627 * Function to be used when getting the receiver's pending notifications.
628 */
629static void vm_notifications_pending_count_sub(
630 ffa_notifications_bitmap_t to_sub)
631{
632 vm_notifications_global_state_count_update(
633 to_sub, &all_notifications_state.pending_count, -1);
634}
635
636/**
637 * Helper function to count the notifications whose information has been
638 * retrieved by the scheduler of the system, and are still pending.
639 */
640static void vm_notifications_info_get_retrieved_count_add(
641 ffa_notifications_bitmap_t to_add)
642{
643 vm_notifications_global_state_count_update(
644 to_add, &all_notifications_state.info_get_retrieved_count, 1);
645}
646
647/**
648 * Helper function to subtract the notifications that the receiver is getting
649 * and whose information has been retrieved by the receiver scheduler.
650 */
651static void vm_notifications_info_get_retrieved_count_sub(
652 ffa_notifications_bitmap_t to_sub)
653{
654 vm_notifications_global_state_count_update(
655 to_sub, &all_notifications_state.info_get_retrieved_count, -1);
656}
657
658/**
659 * Helper function to determine if there are notifications pending whose info
660 * hasn't been retrieved by the receiver scheduler.
661 */
662bool vm_notifications_pending_not_retrieved_by_scheduler(void)
663{
664 bool ret;
665
666 sl_lock(&all_notifications_state.lock);
667 ret = all_notifications_state.pending_count >
668 all_notifications_state.info_get_retrieved_count;
669 sl_unlock(&all_notifications_state.lock);
670
671 return ret;
672}
673
674bool vm_is_notifications_pending_count_zero(void)
675{
676 bool ret;
677
678 sl_lock(&all_notifications_state.lock);
679 ret = all_notifications_state.pending_count == 0;
680 sl_unlock(&all_notifications_state.lock);
681
682 return ret;
683}
684
J-Alvesc003a7a2021-03-18 13:06:53 +0000685/**
686 * Checks that all provided notifications are bound to the specified sender, and
687 * are per VCPU or global, as specified.
688 */
689bool vm_notifications_validate_binding(struct vm_locked vm_locked,
690 bool is_from_vm, ffa_vm_id_t sender_id,
691 ffa_notifications_bitmap_t notifications,
692 bool is_per_vcpu)
693{
694 return vm_notifications_validate_bound_sender(
695 vm_locked, is_from_vm, sender_id, notifications) &&
696 vm_notifications_validate_per_vcpu(vm_locked, is_from_vm,
697 is_per_vcpu, notifications);
698}
699
700/**
701 * Update binds information in notification structure for the specified
702 * notifications.
703 */
704void vm_notifications_update_bindings(struct vm_locked vm_locked,
705 bool is_from_vm, ffa_vm_id_t sender_id,
706 ffa_notifications_bitmap_t notifications,
707 bool is_per_vcpu)
708{
709 CHECK(vm_locked.vm != NULL);
710 struct notifications *to_update =
711 vm_get_notifications(vm_locked, is_from_vm);
712
713 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
714 if (vm_is_notification_bit_set(notifications, i)) {
715 to_update->bindings_sender_id[i] = sender_id;
716 }
717 }
718
719 /*
720 * Set notifications if they are per VCPU, else clear them as they are
721 * global.
722 */
723 if (is_per_vcpu) {
724 to_update->bindings_per_vcpu |= notifications;
725 } else {
726 to_update->bindings_per_vcpu &= ~notifications;
727 }
728}
729
730bool vm_notifications_validate_bound_sender(
731 struct vm_locked vm_locked, bool is_from_vm, ffa_vm_id_t sender_id,
732 ffa_notifications_bitmap_t notifications)
733{
734 CHECK(vm_locked.vm != NULL);
735 struct notifications *to_check =
736 vm_get_notifications(vm_locked, is_from_vm);
737
738 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
739 if (vm_is_notification_bit_set(notifications, i) &&
740 to_check->bindings_sender_id[i] != sender_id) {
741 return false;
742 }
743 }
744
745 return true;
746}
747
748bool vm_notifications_validate_per_vcpu(struct vm_locked vm_locked,
749 bool is_from_vm, bool is_per_vcpu,
750 ffa_notifications_bitmap_t notif)
751{
752 CHECK(vm_locked.vm != NULL);
753 struct notifications *to_check =
754 vm_get_notifications(vm_locked, is_from_vm);
755
756 return is_per_vcpu ? (~to_check->bindings_per_vcpu & notif) == 0U
757 : (to_check->bindings_per_vcpu & notif) == 0U;
758}
J-Alvesaa79c012021-07-09 14:29:45 +0100759
J-Alves14163a72022-03-25 14:01:34 +0000760static void vm_notifications_state_set(struct notifications_state *state,
761 ffa_notifications_bitmap_t notifications)
762{
763 state->pending |= notifications;
764 vm_notifications_pending_count_add(notifications);
765}
766
J-Alves5a16c962022-03-25 12:32:51 +0000767void vm_notifications_partition_set_pending(
768 struct vm_locked vm_locked, bool is_from_vm,
769 ffa_notifications_bitmap_t notifications, ffa_vcpu_index_t vcpu_id,
770 bool is_per_vcpu)
J-Alvesaa79c012021-07-09 14:29:45 +0100771{
J-Alves14163a72022-03-25 14:01:34 +0000772 struct notifications *to_set;
773 struct notifications_state *state;
774
J-Alvesaa79c012021-07-09 14:29:45 +0100775 CHECK(vm_locked.vm != NULL);
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700776 CHECK(vcpu_id < vm_locked.vm->vcpu_count);
J-Alvesaa79c012021-07-09 14:29:45 +0100777
J-Alves14163a72022-03-25 14:01:34 +0000778 to_set = vm_get_notifications(vm_locked, is_from_vm);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100779
J-Alves14163a72022-03-25 14:01:34 +0000780 state = is_per_vcpu ? &to_set->per_vcpu[vcpu_id] : &to_set->global;
781
782 vm_notifications_state_set(state, notifications);
783}
784
785/**
786 * Set pending framework notifications.
787 */
788void vm_notifications_framework_set_pending(
789 struct vm_locked vm_locked, ffa_notifications_bitmap_t notifications)
790{
791 CHECK(vm_locked.vm != NULL);
Federico Recanatie73d2832022-04-20 11:10:52 +0200792 assert(is_ffa_spm_buffer_full_notification(notifications) ||
793 is_ffa_hyp_buffer_full_notification(notifications));
J-Alves14163a72022-03-25 14:01:34 +0000794 vm_notifications_state_set(&vm_locked.vm->notifications.framework,
795 notifications);
J-Alvesaa79c012021-07-09 14:29:45 +0100796}
797
J-Alves5136dda2022-03-25 12:26:38 +0000798static ffa_notifications_bitmap_t vm_notifications_state_get_pending(
799 struct notifications_state *state)
J-Alvesaa79c012021-07-09 14:29:45 +0100800{
J-Alves5136dda2022-03-25 12:26:38 +0000801 ffa_notifications_bitmap_t to_ret;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100802 ffa_notifications_bitmap_t pending_and_info_get_retrieved;
J-Alvesaa79c012021-07-09 14:29:45 +0100803
J-Alves5136dda2022-03-25 12:26:38 +0000804 assert(state != NULL);
J-Alvesaa79c012021-07-09 14:29:45 +0100805
J-Alves5136dda2022-03-25 12:26:38 +0000806 to_ret = state->pending;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100807
808 /* Update count of currently pending notifications in the system. */
J-Alves5136dda2022-03-25 12:26:38 +0000809 vm_notifications_pending_count_sub(state->pending);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100810
811 /*
812 * If notifications receiver is getting have been retrieved by the
813 * receiver scheduler, decrement those from respective count.
814 */
815 pending_and_info_get_retrieved =
J-Alves5136dda2022-03-25 12:26:38 +0000816 state->pending & state->info_get_retrieved;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100817
818 if (pending_and_info_get_retrieved != 0) {
819 vm_notifications_info_get_retrieved_count_sub(
820 pending_and_info_get_retrieved);
821 }
822
J-Alves5136dda2022-03-25 12:26:38 +0000823 state->pending = 0U;
824 state->info_get_retrieved = 0U;
J-Alvesaa79c012021-07-09 14:29:45 +0100825
J-Alves5136dda2022-03-25 12:26:38 +0000826 return to_ret;
827}
J-Alvesfe23ebe2021-10-13 16:07:07 +0100828
J-Alves5136dda2022-03-25 12:26:38 +0000829/**
830 * Get global and per-vCPU notifications for the given vCPU ID.
831 */
832ffa_notifications_bitmap_t vm_notifications_partition_get_pending(
833 struct vm_locked vm_locked, bool is_from_vm, ffa_vcpu_index_t vcpu_id)
834{
835 ffa_notifications_bitmap_t to_ret;
836 struct notifications *to_get;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100837
J-Alves5136dda2022-03-25 12:26:38 +0000838 assert(vm_locked.vm != NULL);
839 to_get = vm_get_notifications(vm_locked, is_from_vm);
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700840 assert(vcpu_id < vm_locked.vm->vcpu_count);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100841
J-Alves5136dda2022-03-25 12:26:38 +0000842 to_ret = vm_notifications_state_get_pending(&to_get->global);
843 to_ret |=
844 vm_notifications_state_get_pending(&to_get->per_vcpu[vcpu_id]);
J-Alvesaa79c012021-07-09 14:29:45 +0100845
846 return to_ret;
847}
J-Alvesc8e8a222021-06-08 17:33:52 +0100848
849/**
J-Alves663682a2022-03-25 13:56:51 +0000850 * Get pending framework notifications.
851 */
852ffa_notifications_bitmap_t vm_notifications_framework_get_pending(
853 struct vm_locked vm_locked)
854{
Federico Recanati6c1e05c2022-04-20 11:37:26 +0200855 struct vm *vm = vm_locked.vm;
856 ffa_notifications_bitmap_t framework;
857 bool rx_buffer_full;
858
859 assert(vm != NULL);
860
861 framework = vm_notifications_state_get_pending(
862 &vm->notifications.framework);
863
864 /*
865 * By retrieving an RX buffer full notification the buffer state
866 * transitions from RECEIVED to READ; the VM is now the RX buffer
867 * owner, can read it and is allowed to release it.
868 */
869 rx_buffer_full = is_ffa_spm_buffer_full_notification(framework) ||
870 is_ffa_hyp_buffer_full_notification(framework);
871 if (rx_buffer_full && vm->mailbox.state == MAILBOX_STATE_RECEIVED) {
872 vm->mailbox.state = MAILBOX_STATE_READ;
873 }
874
875 return framework;
J-Alves663682a2022-03-25 13:56:51 +0000876}
877
J-Alves17c9b6d2022-03-25 14:39:05 +0000878static void vm_notifications_state_info_get(
879 struct notifications_state *state, ffa_vm_id_t vm_id, bool is_per_vcpu,
880 ffa_vcpu_index_t vcpu_id, uint16_t *ids, uint32_t *ids_count,
881 uint32_t *lists_sizes, uint32_t *lists_count,
882 const uint32_t ids_max_count,
883 enum notifications_info_get_state *info_get_state)
884{
885 ffa_notifications_bitmap_t pending_not_retrieved;
886
887 CHECK(*ids_count <= ids_max_count);
888 CHECK(*lists_count <= ids_max_count);
889
890 if (*info_get_state == FULL) {
891 return;
892 }
893
894 pending_not_retrieved = state->pending & ~state->info_get_retrieved;
895
896 /* No notifications pending that haven't been retrieved. */
897 if (pending_not_retrieved == 0U) {
898 return;
899 }
900
901 if (*ids_count == ids_max_count) {
902 *info_get_state = FULL;
903 return;
904 }
905
906 switch (*info_get_state) {
907 case INIT:
908 case STARTING_NEW:
909 /*
910 * At this iteration two ids are to be added: the VM ID
911 * and vCPU ID. If there is no space, change state and
912 * terminate function.
913 */
914 if (is_per_vcpu && ids_max_count - *ids_count < 2) {
915 *info_get_state = FULL;
916 return;
917 }
918
919 *info_get_state = INSERTING;
920 ids[*ids_count] = vm_id;
921 ++(*ids_count);
922
923 if (is_per_vcpu) {
924 /* Insert vCPU ID. */
925 ids[*ids_count] = vcpu_id;
926 ++(*ids_count);
927 ++lists_sizes[*lists_count];
928 }
929
930 ++(*lists_count);
931 break;
932 case INSERTING:
933 /* For per-vCPU notifications only. */
934 if (!is_per_vcpu) {
935 break;
936 }
937
938 /* Insert vCPU ID */
939 ids[*ids_count] = vcpu_id;
940 (*ids_count)++;
941 /* Increment respective list size */
942 ++lists_sizes[*lists_count - 1];
943
944 if (lists_sizes[*lists_count - 1] == 3) {
945 *info_get_state = STARTING_NEW;
946 }
947 break;
948 default:
949 panic("Notification info get action error!!\n");
950 }
951
952 state->info_get_retrieved |= pending_not_retrieved;
953
954 vm_notifications_info_get_retrieved_count_add(pending_not_retrieved);
955}
956
J-Alves663682a2022-03-25 13:56:51 +0000957/**
J-Alvesc8e8a222021-06-08 17:33:52 +0100958 * Get pending notification's information to return to the receiver scheduler.
959 */
960void vm_notifications_info_get_pending(
961 struct vm_locked vm_locked, bool is_from_vm, uint16_t *ids,
962 uint32_t *ids_count, uint32_t *lists_sizes, uint32_t *lists_count,
963 const uint32_t ids_max_count,
964 enum notifications_info_get_state *info_get_state)
965{
J-Alves17c9b6d2022-03-25 14:39:05 +0000966 struct notifications *notifications;
J-Alvesc8e8a222021-06-08 17:33:52 +0100967
968 CHECK(vm_locked.vm != NULL);
J-Alvesc8e8a222021-06-08 17:33:52 +0100969
J-Alves17c9b6d2022-03-25 14:39:05 +0000970 notifications = vm_get_notifications(vm_locked, is_from_vm);
J-Alvesc8e8a222021-06-08 17:33:52 +0100971
J-Alves17c9b6d2022-03-25 14:39:05 +0000972 /*
973 * Perform info get for global notifications, before doing it for
974 * per-vCPU.
975 */
976 vm_notifications_state_info_get(&notifications->global,
977 vm_locked.vm->id, false, 0, ids,
978 ids_count, lists_sizes, lists_count,
979 ids_max_count, info_get_state);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100980
J-Alvesc8e8a222021-06-08 17:33:52 +0100981 for (ffa_vcpu_count_t i = 0; i < vm_locked.vm->vcpu_count; i++) {
J-Alves17c9b6d2022-03-25 14:39:05 +0000982 vm_notifications_state_info_get(
983 &notifications->per_vcpu[i], vm_locked.vm->id, true, i,
984 ids, ids_count, lists_sizes, lists_count, ids_max_count,
985 info_get_state);
J-Alvesc8e8a222021-06-08 17:33:52 +0100986 }
987}
988
989/**
990 * Gets all info from VM's pending notifications.
991 * Returns true if the list is full, and there is more pending.
992 */
993bool vm_notifications_info_get(struct vm_locked vm_locked, uint16_t *ids,
994 uint32_t *ids_count, uint32_t *lists_sizes,
995 uint32_t *lists_count,
996 const uint32_t ids_max_count)
997{
998 enum notifications_info_get_state current_state = INIT;
999
J-Alvesf31940e2022-03-25 17:24:00 +00001000 /* Get info of pending notifications from the framework. */
1001 vm_notifications_state_info_get(&vm_locked.vm->notifications.framework,
1002 vm_locked.vm->id, false, 0, ids,
1003 ids_count, lists_sizes, lists_count,
1004 ids_max_count, &current_state);
1005
1006 /* Get info of pending notifications from SPs. */
J-Alvesc8e8a222021-06-08 17:33:52 +01001007 vm_notifications_info_get_pending(vm_locked, false, ids, ids_count,
1008 lists_sizes, lists_count,
1009 ids_max_count, &current_state);
1010
J-Alvesf31940e2022-03-25 17:24:00 +00001011 /* Get info of pending notifications from VMs. */
J-Alvesc8e8a222021-06-08 17:33:52 +01001012 vm_notifications_info_get_pending(vm_locked, true, ids, ids_count,
1013 lists_sizes, lists_count,
1014 ids_max_count, &current_state);
1015
1016 /*
1017 * State transitions to FULL when trying to insert a new ID in the
1018 * list and there is not more space. This means there are notifications
1019 * pending, whose info is not retrieved.
1020 */
1021 return current_state == FULL;
1022}
J-Alves439ac972021-11-18 17:32:03 +00001023
1024/**
1025 * Checks VM's messaging method support.
1026 */
1027bool vm_supports_messaging_method(struct vm *vm, uint8_t msg_method)
1028{
1029 return (vm->messaging_method & msg_method) != 0;
1030}
J-Alves6e2abc62021-12-02 14:58:56 +00001031
1032void vm_notifications_set_npi_injected(struct vm_locked vm_locked,
1033 bool npi_injected)
1034{
1035 vm_locked.vm->notifications.npi_injected = npi_injected;
1036}
1037
1038bool vm_notifications_is_npi_injected(struct vm_locked vm_locked)
1039{
1040 return vm_locked.vm->notifications.npi_injected;
1041}
J-Alves7e67d102022-04-13 13:22:39 +01001042
1043/**
1044 * Sets the designated GP register that the VM expects to receive the boot
1045 * info's address.
1046 */
1047void vm_set_boot_info_gp_reg(struct vm *vm, struct vcpu *vcpu)
1048{
1049 if (!vm->initialized && vm->boot_info.blob_addr.ipa != 0U) {
1050 arch_regs_set_gp_reg(&vcpu->regs,
1051 ipa_addr(vm->boot_info.blob_addr),
1052 vm->boot_info.gp_register_num);
1053 }
1054}