blob: ae56eac2ed3b7eab9e023a0a1c7af935a63ca7c3 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/vm.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010010
Karl Meakine1430802024-03-06 14:08:11 +000011#include "hf/arch/spinlock.h"
Olivier Deprezd9d409f2023-03-17 11:47:57 +010012#include "hf/arch/vm.h"
13
Andrew Scull18c78fc2018-08-20 12:57:41 +010014#include "hf/api.h"
Daniel Boulbya2f8c662021-11-26 17:52:53 +000015#include "hf/assert.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010016#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010017#include "hf/cpu.h"
J-Alves4ef6e842021-03-18 12:47:01 +000018#include "hf/dlog.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010019#include "hf/ffa.h"
Andrew Scull3c257452019-11-26 13:32:50 +000020#include "hf/layout.h"
Madhukar Pappireddy0e57d3d2023-10-11 15:49:05 -050021#include "hf/plat/iommu.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010022#include "hf/std.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010023
Andrew Scull19503262018-09-20 14:48:39 +010024#include "vmapi/hf/call.h"
25
26static struct vm vms[MAX_VMS];
Olivier Deprez96a2a262020-06-11 17:21:38 +020027static struct vm other_world;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010028static ffa_vm_count_t vm_count;
Andrew Scull19503262018-09-20 14:48:39 +010029
J-Alvesfe23ebe2021-10-13 16:07:07 +010030/**
Madhukar Pappireddya49ba162024-11-25 09:40:45 -060031 * The `boot_list` is a special entry in the circular linked list maintained by
32 * the partition manager and serves as both the start and end of the list.
33 */
34static struct list_entry boot_list = LIST_INIT(boot_list);
35
36/**
J-Alvesfe23ebe2021-10-13 16:07:07 +010037 * Counters on the status of notifications in the system. It helps to improve
38 * the information retrieved by the receiver scheduler.
39 */
40static struct {
41 /** Counts notifications pending. */
42 uint32_t pending_count;
43 /**
44 * Counts notifications pending, that have been retrieved by the
45 * receiver scheduler.
46 */
47 uint32_t info_get_retrieved_count;
48 struct spinlock lock;
49} all_notifications_state;
50
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -080051static bool vm_init_mm(struct vm *vm, struct mpool *ppool)
52{
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -060053 return arch_vm_init_mm(vm, ppool) && arch_vm_iommu_init_mm(vm, ppool);
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -080054}
55
J-Alves19e20cf2023-08-02 12:48:55 +010056struct vm *vm_init(ffa_id_t id, ffa_vcpu_count_t vcpu_count,
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -060057 struct mpool *ppool, bool el0_partition,
58 uint8_t dma_device_count)
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010059{
Wedson Almeida Filho87009642018-07-02 10:20:07 +010060 uint32_t i;
Andrew Scull19503262018-09-20 14:48:39 +010061 struct vm *vm;
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -070062 size_t vcpu_ppool_entries = (align_up(sizeof(struct vcpu) * vcpu_count,
63 MM_PPOOL_ENTRY_SIZE) /
64 MM_PPOOL_ENTRY_SIZE);
Andrew Scull19503262018-09-20 14:48:39 +010065
Olivier Deprez96a2a262020-06-11 17:21:38 +020066 if (id == HF_OTHER_WORLD_ID) {
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080067 CHECK(el0_partition == false);
Olivier Deprez96a2a262020-06-11 17:21:38 +020068 vm = &other_world;
Andrew Walbran9daa57e2019-09-27 13:33:20 +010069 } else {
70 uint16_t vm_index = id - HF_VM_ID_OFFSET;
Andrew Scull19503262018-09-20 14:48:39 +010071
Andrew Walbran9daa57e2019-09-27 13:33:20 +010072 CHECK(id >= HF_VM_ID_OFFSET);
73 CHECK(vm_index < ARRAY_SIZE(vms));
74 vm = &vms[vm_index];
75 }
Wedson Almeida Filho87009642018-07-02 10:20:07 +010076
Andrew Scull2b5fbad2019-04-05 13:55:56 +010077 memset_s(vm, sizeof(*vm), 0, sizeof(*vm));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010078
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000079 sl_init(&vm->lock);
80
Andrew Walbran9daa57e2019-09-27 13:33:20 +010081 vm->id = id;
Wedson Almeida Filho87009642018-07-02 10:20:07 +010082 vm->vcpu_count = vcpu_count;
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -070083
84 vm->vcpus = (struct vcpu *)mpool_alloc_contiguous(
85 ppool, vcpu_ppool_entries, 1);
86 CHECK(vm->vcpus != NULL);
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -070087
Andrew Sculld6ee1102019-04-05 22:12:42 +010088 vm->mailbox.state = MAILBOX_STATE_EMPTY;
Andrew Scull9726c252019-01-23 13:44:19 +000089 atomic_init(&vm->aborting, false);
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080090 vm->el0_partition = el0_partition;
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -060091 vm->dma_device_count = dma_device_count;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010092
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -080093 if (!vm_init_mm(vm, ppool)) {
Andrew Walbran9daa57e2019-09-27 13:33:20 +010094 return NULL;
Wedson Almeida Filho03306112018-11-26 00:08:03 +000095 }
96
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000097 /* Do basic initialization of vCPUs. */
Andrew Scull7364a8e2018-07-19 15:39:29 +010098 for (i = 0; i < vcpu_count; i++) {
Andrew Walbrane1310df2019-04-29 17:28:28 +010099 vcpu_init(vm_get_vcpu(vm, i), vm);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100100 }
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100101
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700102 vm_notifications_init(vm, vcpu_count, ppool);
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600103 list_init(&vm->boot_list_node);
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100104 return vm;
105}
106
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100107bool vm_init_next(ffa_vcpu_count_t vcpu_count, struct mpool *ppool,
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -0600108 struct vm **new_vm, bool el0_partition,
109 uint8_t dma_device_count)
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100110{
111 if (vm_count >= MAX_VMS) {
112 return false;
113 }
114
115 /* Generate IDs based on an offset, as low IDs e.g., 0, are reserved */
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800116 *new_vm = vm_init(vm_count + HF_VM_ID_OFFSET, vcpu_count, ppool,
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -0600117 el0_partition, dma_device_count);
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100118 if (*new_vm == NULL) {
119 return false;
120 }
Andrew Scull19503262018-09-20 14:48:39 +0100121 ++vm_count;
Andrew Scull19503262018-09-20 14:48:39 +0100122
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000123 return true;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100124}
125
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100126ffa_vm_count_t vm_get_count(void)
Andrew Scull19503262018-09-20 14:48:39 +0100127{
128 return vm_count;
129}
130
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100131/**
132 * Returns a pointer to the VM with the corresponding id.
133 */
J-Alves19e20cf2023-08-02 12:48:55 +0100134struct vm *vm_find(ffa_id_t id)
Andrew Scull19503262018-09-20 14:48:39 +0100135{
David Brazdilbc501192019-09-27 13:20:56 +0100136 uint16_t index;
Fuad Tabba494376e2019-08-05 12:35:10 +0100137
Olivier Deprez96a2a262020-06-11 17:21:38 +0200138 if (id == HF_OTHER_WORLD_ID) {
139 if (other_world.id == HF_OTHER_WORLD_ID) {
140 return &other_world;
141 }
Andrew Scull19503262018-09-20 14:48:39 +0100142 return NULL;
143 }
144
Olivier Deprez96a2a262020-06-11 17:21:38 +0200145 /* Check that this is not a reserved ID. */
146 if (id < HF_VM_ID_OFFSET) {
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100147 return NULL;
148 }
149
David Brazdilbc501192019-09-27 13:20:56 +0100150 index = id - HF_VM_ID_OFFSET;
151
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100152 return vm_find_index(index);
153}
154
155/**
J-Alves46ee0682021-07-26 15:17:53 +0100156 * Returns a locked instance of the VM with the corresponding id.
157 */
J-Alves19e20cf2023-08-02 12:48:55 +0100158struct vm_locked vm_find_locked(ffa_id_t id)
J-Alves46ee0682021-07-26 15:17:53 +0100159{
160 struct vm *vm = vm_find(id);
161
162 if (vm != NULL) {
163 return vm_lock(vm);
164 }
165
166 return (struct vm_locked){.vm = NULL};
167}
168
169/**
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100170 * Returns a pointer to the VM at the specified index.
171 */
172struct vm *vm_find_index(uint16_t index)
173{
David Brazdilbc501192019-09-27 13:20:56 +0100174 /* Ensure the VM is initialized. */
175 if (index >= vm_count) {
176 return NULL;
177 }
178
179 return &vms[index];
Andrew Scull19503262018-09-20 14:48:39 +0100180}
181
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000182/**
Fuad Tabbaed294af2019-12-20 10:43:01 +0000183 * Locks the given VM and updates `locked` to hold the newly locked VM.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000184 */
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100185struct vm_locked vm_lock(struct vm *vm)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000186{
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100187 struct vm_locked locked = {
188 .vm = vm,
189 };
190
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000191 sl_lock(&vm->lock);
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100192
193 return locked;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000194}
195
196/**
Jose Marinho75509b42019-04-09 09:34:59 +0100197 * Locks two VMs ensuring that the locking order is according to the locks'
198 * addresses.
199 */
200struct two_vm_locked vm_lock_both(struct vm *vm1, struct vm *vm2)
201{
202 struct two_vm_locked dual_lock;
203
204 sl_lock_both(&vm1->lock, &vm2->lock);
205 dual_lock.vm1.vm = vm1;
206 dual_lock.vm2.vm = vm2;
207
208 return dual_lock;
209}
210
211/**
Karl Meakine1430802024-03-06 14:08:11 +0000212 * Locks two VMs ensuring that the locking order is according to the locks'
213 * addresses, given `vm1` is already locked.
214 */
215struct two_vm_locked vm_lock_both_in_order(struct vm_locked vm1, struct vm *vm2)
216{
217 struct spinlock *sl1 = &vm1.vm->lock;
218 struct spinlock *sl2 = &vm2->lock;
219
220 /*
221 * Use `sl_lock`/`sl_unlock` directly rather than
222 * `vm_lock`/`vm_unlock` because `vm_unlock` sets the vm field
223 * to NULL.
224 */
225 if (sl1 < sl2) {
226 sl_lock(sl2);
227 } else {
228 sl_unlock(sl1);
229 sl_lock(sl2);
230 sl_lock(sl1);
231 }
232
233 return (struct two_vm_locked){
234 .vm1 = vm1,
235 .vm2 = (struct vm_locked){.vm = vm2},
236 };
237}
238
239/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000240 * Unlocks a VM previously locked with vm_lock, and updates `locked` to reflect
241 * the fact that the VM is no longer locked.
242 */
243void vm_unlock(struct vm_locked *locked)
244{
245 sl_unlock(&locked->vm->lock);
246 locked->vm = NULL;
247}
Andrew Walbrane1310df2019-04-29 17:28:28 +0100248
249/**
250 * Get the vCPU with the given index from the given VM.
251 * This assumes the index is valid, i.e. less than vm->vcpu_count.
252 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100253struct vcpu *vm_get_vcpu(struct vm *vm, ffa_vcpu_index_t vcpu_index)
Andrew Walbrane1310df2019-04-29 17:28:28 +0100254{
Andrew Scull877ae4b2019-07-02 12:52:33 +0100255 CHECK(vcpu_index < vm->vcpu_count);
Andrew Walbrane1310df2019-04-29 17:28:28 +0100256 return &vm->vcpus[vcpu_index];
257}
Andrew Scull3c257452019-11-26 13:32:50 +0000258
259/**
J-Alves122f1a12022-12-12 15:55:42 +0000260 * Checks whether the given `to` VM's mailbox is currently busy.
261 */
262bool vm_is_mailbox_busy(struct vm_locked to)
263{
264 return to.vm->mailbox.state != MAILBOX_STATE_EMPTY ||
265 to.vm->mailbox.recv == NULL;
266}
267
268/**
J-Alvese8c8c2b2022-12-16 15:34:48 +0000269 * Checks if mailbox is currently owned by the other world.
270 */
271bool vm_is_mailbox_other_world_owned(struct vm_locked to)
272{
273 return to.vm->mailbox.state == MAILBOX_STATE_OTHER_WORLD_OWNED;
274}
275
276/**
Andrew Walbran45633dd2020-10-07 17:59:54 +0100277 * Return whether the given VM ID represents an entity in the current world:
278 * i.e. the hypervisor or a normal world VM when running in the normal world, or
279 * the SPM or an SP when running in the secure world.
280 */
J-Alves19e20cf2023-08-02 12:48:55 +0100281bool vm_id_is_current_world(ffa_id_t vm_id)
Andrew Walbran45633dd2020-10-07 17:59:54 +0100282{
283 return (vm_id & HF_VM_ID_WORLD_MASK) !=
284 (HF_OTHER_WORLD_ID & HF_VM_ID_WORLD_MASK);
285}
286
287/**
Andrew Scull3c257452019-11-26 13:32:50 +0000288 * Map a range of addresses to the VM in both the MMU and the IOMMU.
289 *
290 * mm_vm_defrag should always be called after a series of page table updates,
291 * whether they succeed or fail. This is because on failure extra page table
292 * entries may have been allocated and then not used, while on success it may be
293 * possible to compact the page table by merging several entries into a block.
294 *
295 * Returns true on success, or false if the update failed and no changes were
296 * made.
297 *
298 */
299bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
300 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
301{
302 if (!vm_identity_prepare(vm_locked, begin, end, mode, ppool)) {
303 return false;
304 }
305
306 vm_identity_commit(vm_locked, begin, end, mode, ppool, ipa);
307
308 return true;
309}
310
311/**
312 * Prepares the given VM for the given address mapping such that it will be able
313 * to commit the change without failure.
314 *
315 * In particular, multiple calls to this function will result in the
316 * corresponding calls to commit the changes to succeed.
317 *
318 * Returns true on success, or false if the update failed and no changes were
319 * made.
320 */
321bool vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
322 uint32_t mode, struct mpool *ppool)
323{
Olivier Deprezd9d409f2023-03-17 11:47:57 +0100324 return arch_vm_identity_prepare(vm_locked, begin, end, mode, ppool);
Andrew Scull3c257452019-11-26 13:32:50 +0000325}
326
327/**
328 * Commits the given address mapping to the VM assuming the operation cannot
329 * fail. `vm_identity_prepare` must used correctly before this to ensure
330 * this condition.
331 */
332void vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
333 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
334{
Olivier Deprezd9d409f2023-03-17 11:47:57 +0100335 arch_vm_identity_commit(vm_locked, begin, end, mode, ppool, ipa);
Andrew Scull3c257452019-11-26 13:32:50 +0000336}
337
338/**
339 * Unmap a range of addresses from the VM.
340 *
341 * Returns true on success, or false if the update failed and no changes were
342 * made.
343 */
344bool vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
345 struct mpool *ppool)
346{
Olivier Deprezd9d409f2023-03-17 11:47:57 +0100347 return arch_vm_unmap(vm_locked, begin, end, ppool);
Andrew Scull3c257452019-11-26 13:32:50 +0000348}
349
350/**
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700351 * Defrag page tables for an EL0 partition or for a VM.
352 */
353void vm_ptable_defrag(struct vm_locked vm_locked, struct mpool *ppool)
354{
Olivier Deprezd9d409f2023-03-17 11:47:57 +0100355 arch_vm_ptable_defrag(vm_locked, ppool);
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700356}
357
358/**
Andrew Scull3c257452019-11-26 13:32:50 +0000359 * Unmaps the hypervisor pages from the given page table.
360 */
361bool vm_unmap_hypervisor(struct vm_locked vm_locked, struct mpool *ppool)
362{
363 /* TODO: If we add pages dynamically, they must be included here too. */
364 return vm_unmap(vm_locked, layout_text_begin(), layout_text_end(),
365 ppool) &&
366 vm_unmap(vm_locked, layout_rodata_begin(), layout_rodata_end(),
367 ppool) &&
368 vm_unmap(vm_locked, layout_data_begin(), layout_data_end(),
Maksims Svecovs134b8f92022-03-04 15:14:09 +0000369 ppool) &&
370 vm_unmap(vm_locked, layout_stacks_begin(), layout_stacks_end(),
Andrew Scull3c257452019-11-26 13:32:50 +0000371 ppool);
372}
J-Alvesb37fd082020-10-22 12:29:21 +0100373
374/**
Raghu Krishnamurthyea195fa2021-02-12 23:29:00 -0800375 * Gets the mode of the given range of ipa or va if they are mapped with the
376 * same mode.
377 *
378 * Returns true if the range is mapped with the same mode and false otherwise.
379 * The wrapper calls the appropriate mm function depending on if the partition
380 * is a vm or a el0 partition.
381 */
382bool vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin, ipaddr_t end,
383 uint32_t *mode)
384{
Olivier Deprezd9d409f2023-03-17 11:47:57 +0100385 return arch_vm_mem_get_mode(vm_locked, begin, end, mode);
Raghu Krishnamurthyea195fa2021-02-12 23:29:00 -0800386}
J-Alvesa0f317d2021-06-09 13:31:59 +0100387
Madhukar Pappireddy0e57d3d2023-10-11 15:49:05 -0500388bool vm_iommu_mm_identity_map(struct vm_locked vm_locked, paddr_t begin,
389 paddr_t end, uint32_t mode, struct mpool *ppool,
390 ipaddr_t *ipa, uint8_t dma_device_id)
391{
392 return arch_vm_iommu_mm_identity_map(vm_locked, begin, end, mode, ppool,
393 ipa, dma_device_id);
394}
395
J-Alves66652252022-07-06 09:49:51 +0100396bool vm_mailbox_state_busy(struct vm_locked vm_locked)
397{
398 return vm_locked.vm->mailbox.state != MAILBOX_STATE_EMPTY ||
399 vm_locked.vm->mailbox.recv == NULL;
400}
401
J-Alves7461ef22021-10-18 17:21:33 +0100402static struct notifications *vm_get_notifications(struct vm_locked vm_locked,
403 bool is_from_vm)
404{
405 return is_from_vm ? &vm_locked.vm->notifications.from_vm
406 : &vm_locked.vm->notifications.from_sp;
407}
408
J-Alvesa0f317d2021-06-09 13:31:59 +0100409/*
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700410 * Dynamically allocate per_vcpu_notifications structure for a given VM.
411 */
412static void vm_notifications_init_per_vcpu_notifications(
413 struct vm *vm, ffa_vcpu_count_t vcpu_count, struct mpool *ppool)
414{
415 size_t notif_ppool_entries =
416 (align_up(sizeof(struct notifications_state) * vcpu_count,
417 MM_PPOOL_ENTRY_SIZE) /
418 MM_PPOOL_ENTRY_SIZE);
419
420 /*
421 * Allow for function to be called on already initialized VMs but those
422 * that require notification structure to be cleared.
423 */
424 if (vm->notifications.from_sp.per_vcpu == NULL) {
425 assert(vm->notifications.from_vm.per_vcpu == NULL);
426 assert(vcpu_count != 0);
427 CHECK(ppool != NULL);
428 vm->notifications.from_sp.per_vcpu =
429 (struct notifications_state *)mpool_alloc_contiguous(
430 ppool, notif_ppool_entries, 1);
431 CHECK(vm->notifications.from_sp.per_vcpu != NULL);
432
433 vm->notifications.from_vm.per_vcpu =
434 (struct notifications_state *)mpool_alloc_contiguous(
435 ppool, notif_ppool_entries, 1);
436 CHECK(vm->notifications.from_vm.per_vcpu != NULL);
437 } else {
438 assert(vm->notifications.from_vm.per_vcpu != NULL);
439 }
440
441 memset_s(vm->notifications.from_sp.per_vcpu,
442 sizeof(*(vm->notifications.from_sp.per_vcpu)) * vcpu_count, 0,
443 sizeof(*(vm->notifications.from_sp.per_vcpu)) * vcpu_count);
444 memset_s(vm->notifications.from_vm.per_vcpu,
445 sizeof(*(vm->notifications.from_vm.per_vcpu)) * vcpu_count, 0,
446 sizeof(*(vm->notifications.from_vm.per_vcpu)) * vcpu_count);
447}
448
449/*
J-Alvesa0f317d2021-06-09 13:31:59 +0100450 * Initializes the notifications structure.
451 */
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700452static void vm_notifications_init_bindings(struct notifications *notifications)
J-Alvesa0f317d2021-06-09 13:31:59 +0100453{
454 for (uint32_t i = 0U; i < MAX_FFA_NOTIFICATIONS; i++) {
455 notifications->bindings_sender_id[i] = HF_INVALID_VM_ID;
456 }
457}
458
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700459/*
460 * Initialize notification related structures for a VM.
461 */
462void vm_notifications_init(struct vm *vm, ffa_vcpu_count_t vcpu_count,
463 struct mpool *ppool)
464{
465 vm_notifications_init_per_vcpu_notifications(vm, vcpu_count, ppool);
466
467 /* Basic initialization of the notifications structure. */
468 vm_notifications_init_bindings(&vm->notifications.from_sp);
469 vm_notifications_init_bindings(&vm->notifications.from_vm);
470}
471
J-Alvesa0f317d2021-06-09 13:31:59 +0100472/**
473 * Checks if there are pending notifications.
474 */
475bool vm_are_notifications_pending(struct vm_locked vm_locked, bool from_vm,
476 ffa_notifications_bitmap_t notifications)
477{
478 struct notifications *to_check;
479
480 CHECK(vm_locked.vm != NULL);
481
J-Alves7461ef22021-10-18 17:21:33 +0100482 to_check = vm_get_notifications(vm_locked, from_vm);
J-Alvesa0f317d2021-06-09 13:31:59 +0100483
484 /* Check if there are pending per vcpu notifications */
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700485 for (uint32_t i = 0U; i < vm_locked.vm->vcpu_count; i++) {
J-Alvesa0f317d2021-06-09 13:31:59 +0100486 if ((to_check->per_vcpu[i].pending & notifications) != 0U) {
487 return true;
488 }
489 }
490
491 /* Check if there are global pending notifications */
492 return (to_check->global.pending & notifications) != 0U;
493}
J-Alvesc003a7a2021-03-18 13:06:53 +0000494
J-Alves7461ef22021-10-18 17:21:33 +0100495/**
496 * Checks if there are pending global notifications, either from SPs or from
497 * VMs.
498 */
499bool vm_are_global_notifications_pending(struct vm_locked vm_locked)
500{
501 return vm_get_notifications(vm_locked, true)->global.pending != 0ULL ||
J-Alves52578f82022-03-25 12:30:47 +0000502 vm_get_notifications(vm_locked, false)->global.pending != 0ULL ||
J-Alvese8c8c2b2022-12-16 15:34:48 +0000503 vm_are_fwk_notifications_pending(vm_locked);
504}
505
506/**
507 * Currently only RX full notification is supported as framework notification.
508 * Returns true if there is one pending, either from Hypervisor or SPMC.
509 */
510bool vm_are_fwk_notifications_pending(struct vm_locked vm_locked)
511{
512 return vm_locked.vm->notifications.framework.pending != 0ULL;
J-Alves7461ef22021-10-18 17:21:33 +0100513}
514
515/**
516 * Checks if there are pending per-vCPU notifications, in a specific vCPU either
517 * from SPs or from VMs.
518 */
519bool vm_are_per_vcpu_notifications_pending(struct vm_locked vm_locked,
520 ffa_vcpu_index_t vcpu_id)
521{
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700522 CHECK(vcpu_id < vm_locked.vm->vcpu_count);
J-Alves7461ef22021-10-18 17:21:33 +0100523
524 return vm_get_notifications(vm_locked, true)
525 ->per_vcpu[vcpu_id]
526 .pending != 0ULL ||
527 vm_get_notifications(vm_locked, false)
528 ->per_vcpu[vcpu_id]
529 .pending != 0ULL;
530}
531
J-Alves09ff9d82021-11-02 11:55:20 +0000532bool vm_are_notifications_enabled(struct vm *vm)
J-Alvesc003a7a2021-03-18 13:06:53 +0000533{
J-Alves09ff9d82021-11-02 11:55:20 +0000534 return vm->notifications.enabled == true;
535}
536
537bool vm_locked_are_notifications_enabled(struct vm_locked vm_locked)
538{
539 return vm_are_notifications_enabled(vm_locked.vm);
J-Alvesc003a7a2021-03-18 13:06:53 +0000540}
541
542static bool vm_is_notification_bit_set(ffa_notifications_bitmap_t notifications,
543 uint32_t i)
544{
545 return (notifications & FFA_NOTIFICATION_MASK(i)) != 0U;
546}
547
J-Alvesfe23ebe2021-10-13 16:07:07 +0100548static void vm_notifications_global_state_count_update(
549 ffa_notifications_bitmap_t bitmap, uint32_t *counter, int inc)
550{
551 /*
552 * Helper to increment counters from global notifications
553 * state. Count update by increments or decrements of 1 or -1,
554 * respectively.
555 */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000556 assert(inc == 1 || inc == -1);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100557
558 sl_lock(&all_notifications_state.lock);
559
560 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
561 if (vm_is_notification_bit_set(bitmap, i)) {
562 CHECK((inc > 0 && *counter < UINT32_MAX) ||
563 (inc < 0 && *counter > 0));
564 *counter += inc;
565 }
566 }
567
568 sl_unlock(&all_notifications_state.lock);
569}
570
571/**
572 * Helper function to increment the pending notifications based on a bitmap
573 * passed as argument.
574 * Function to be used at setting notifications for a given VM.
575 */
576static void vm_notifications_pending_count_add(
577 ffa_notifications_bitmap_t to_add)
578{
579 vm_notifications_global_state_count_update(
580 to_add, &all_notifications_state.pending_count, 1);
581}
582
583/**
584 * Helper function to decrement the pending notifications count.
585 * Function to be used when getting the receiver's pending notifications.
586 */
587static void vm_notifications_pending_count_sub(
588 ffa_notifications_bitmap_t to_sub)
589{
590 vm_notifications_global_state_count_update(
591 to_sub, &all_notifications_state.pending_count, -1);
592}
593
594/**
595 * Helper function to count the notifications whose information has been
596 * retrieved by the scheduler of the system, and are still pending.
597 */
598static void vm_notifications_info_get_retrieved_count_add(
599 ffa_notifications_bitmap_t to_add)
600{
601 vm_notifications_global_state_count_update(
602 to_add, &all_notifications_state.info_get_retrieved_count, 1);
603}
604
605/**
606 * Helper function to subtract the notifications that the receiver is getting
607 * and whose information has been retrieved by the receiver scheduler.
608 */
609static void vm_notifications_info_get_retrieved_count_sub(
610 ffa_notifications_bitmap_t to_sub)
611{
612 vm_notifications_global_state_count_update(
613 to_sub, &all_notifications_state.info_get_retrieved_count, -1);
614}
615
616/**
617 * Helper function to determine if there are notifications pending whose info
618 * hasn't been retrieved by the receiver scheduler.
619 */
620bool vm_notifications_pending_not_retrieved_by_scheduler(void)
621{
622 bool ret;
623
624 sl_lock(&all_notifications_state.lock);
625 ret = all_notifications_state.pending_count >
626 all_notifications_state.info_get_retrieved_count;
627 sl_unlock(&all_notifications_state.lock);
628
629 return ret;
630}
631
632bool vm_is_notifications_pending_count_zero(void)
633{
634 bool ret;
635
636 sl_lock(&all_notifications_state.lock);
637 ret = all_notifications_state.pending_count == 0;
638 sl_unlock(&all_notifications_state.lock);
639
640 return ret;
641}
642
J-Alvesc003a7a2021-03-18 13:06:53 +0000643/**
644 * Checks that all provided notifications are bound to the specified sender, and
645 * are per VCPU or global, as specified.
646 */
647bool vm_notifications_validate_binding(struct vm_locked vm_locked,
J-Alves19e20cf2023-08-02 12:48:55 +0100648 bool is_from_vm, ffa_id_t sender_id,
J-Alvesc003a7a2021-03-18 13:06:53 +0000649 ffa_notifications_bitmap_t notifications,
650 bool is_per_vcpu)
651{
652 return vm_notifications_validate_bound_sender(
653 vm_locked, is_from_vm, sender_id, notifications) &&
654 vm_notifications_validate_per_vcpu(vm_locked, is_from_vm,
655 is_per_vcpu, notifications);
656}
657
658/**
659 * Update binds information in notification structure for the specified
660 * notifications.
661 */
662void vm_notifications_update_bindings(struct vm_locked vm_locked,
J-Alves19e20cf2023-08-02 12:48:55 +0100663 bool is_from_vm, ffa_id_t sender_id,
J-Alvesc003a7a2021-03-18 13:06:53 +0000664 ffa_notifications_bitmap_t notifications,
665 bool is_per_vcpu)
666{
667 CHECK(vm_locked.vm != NULL);
668 struct notifications *to_update =
669 vm_get_notifications(vm_locked, is_from_vm);
670
671 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
672 if (vm_is_notification_bit_set(notifications, i)) {
673 to_update->bindings_sender_id[i] = sender_id;
674 }
675 }
676
677 /*
678 * Set notifications if they are per VCPU, else clear them as they are
679 * global.
680 */
681 if (is_per_vcpu) {
682 to_update->bindings_per_vcpu |= notifications;
683 } else {
684 to_update->bindings_per_vcpu &= ~notifications;
685 }
686}
687
688bool vm_notifications_validate_bound_sender(
J-Alves19e20cf2023-08-02 12:48:55 +0100689 struct vm_locked vm_locked, bool is_from_vm, ffa_id_t sender_id,
J-Alvesc003a7a2021-03-18 13:06:53 +0000690 ffa_notifications_bitmap_t notifications)
691{
692 CHECK(vm_locked.vm != NULL);
693 struct notifications *to_check =
694 vm_get_notifications(vm_locked, is_from_vm);
695
696 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
697 if (vm_is_notification_bit_set(notifications, i) &&
698 to_check->bindings_sender_id[i] != sender_id) {
699 return false;
700 }
701 }
702
703 return true;
704}
705
706bool vm_notifications_validate_per_vcpu(struct vm_locked vm_locked,
707 bool is_from_vm, bool is_per_vcpu,
708 ffa_notifications_bitmap_t notif)
709{
710 CHECK(vm_locked.vm != NULL);
711 struct notifications *to_check =
712 vm_get_notifications(vm_locked, is_from_vm);
713
714 return is_per_vcpu ? (~to_check->bindings_per_vcpu & notif) == 0U
715 : (to_check->bindings_per_vcpu & notif) == 0U;
716}
J-Alvesaa79c012021-07-09 14:29:45 +0100717
J-Alves14163a72022-03-25 14:01:34 +0000718static void vm_notifications_state_set(struct notifications_state *state,
719 ffa_notifications_bitmap_t notifications)
720{
J-Alvesfc50ef72025-02-03 11:57:51 +0000721 /*
722 * Exclude notifications which are already pending, to avoid
723 * leaving the pending counter in a wrongful state.
724 */
725 ffa_notifications_bitmap_t to_set =
726 (state->pending & notifications) ^ notifications;
727
728 /* Change the state of the pending notifications. */
729 state->pending |= to_set;
730 vm_notifications_pending_count_add(to_set);
J-Alves14163a72022-03-25 14:01:34 +0000731}
732
J-Alves5a16c962022-03-25 12:32:51 +0000733void vm_notifications_partition_set_pending(
734 struct vm_locked vm_locked, bool is_from_vm,
735 ffa_notifications_bitmap_t notifications, ffa_vcpu_index_t vcpu_id,
736 bool is_per_vcpu)
J-Alvesaa79c012021-07-09 14:29:45 +0100737{
J-Alves14163a72022-03-25 14:01:34 +0000738 struct notifications *to_set;
739 struct notifications_state *state;
740
J-Alvesaa79c012021-07-09 14:29:45 +0100741 CHECK(vm_locked.vm != NULL);
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700742 CHECK(vcpu_id < vm_locked.vm->vcpu_count);
J-Alvesaa79c012021-07-09 14:29:45 +0100743
J-Alves14163a72022-03-25 14:01:34 +0000744 to_set = vm_get_notifications(vm_locked, is_from_vm);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100745
J-Alves14163a72022-03-25 14:01:34 +0000746 state = is_per_vcpu ? &to_set->per_vcpu[vcpu_id] : &to_set->global;
747
748 vm_notifications_state_set(state, notifications);
749}
750
751/**
752 * Set pending framework notifications.
753 */
754void vm_notifications_framework_set_pending(
755 struct vm_locked vm_locked, ffa_notifications_bitmap_t notifications)
756{
757 CHECK(vm_locked.vm != NULL);
Federico Recanatie73d2832022-04-20 11:10:52 +0200758 assert(is_ffa_spm_buffer_full_notification(notifications) ||
759 is_ffa_hyp_buffer_full_notification(notifications));
J-Alves14163a72022-03-25 14:01:34 +0000760 vm_notifications_state_set(&vm_locked.vm->notifications.framework,
761 notifications);
J-Alvesaa79c012021-07-09 14:29:45 +0100762}
763
J-Alves5136dda2022-03-25 12:26:38 +0000764static ffa_notifications_bitmap_t vm_notifications_state_get_pending(
765 struct notifications_state *state)
J-Alvesaa79c012021-07-09 14:29:45 +0100766{
J-Alves5136dda2022-03-25 12:26:38 +0000767 ffa_notifications_bitmap_t to_ret;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100768 ffa_notifications_bitmap_t pending_and_info_get_retrieved;
J-Alvesaa79c012021-07-09 14:29:45 +0100769
J-Alves5136dda2022-03-25 12:26:38 +0000770 assert(state != NULL);
J-Alvesaa79c012021-07-09 14:29:45 +0100771
J-Alves5136dda2022-03-25 12:26:38 +0000772 to_ret = state->pending;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100773
774 /* Update count of currently pending notifications in the system. */
J-Alves5136dda2022-03-25 12:26:38 +0000775 vm_notifications_pending_count_sub(state->pending);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100776
777 /*
778 * If notifications receiver is getting have been retrieved by the
779 * receiver scheduler, decrement those from respective count.
780 */
781 pending_and_info_get_retrieved =
J-Alves5136dda2022-03-25 12:26:38 +0000782 state->pending & state->info_get_retrieved;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100783
784 if (pending_and_info_get_retrieved != 0) {
785 vm_notifications_info_get_retrieved_count_sub(
786 pending_and_info_get_retrieved);
787 }
788
J-Alves5136dda2022-03-25 12:26:38 +0000789 state->pending = 0U;
790 state->info_get_retrieved = 0U;
J-Alvesaa79c012021-07-09 14:29:45 +0100791
J-Alves5136dda2022-03-25 12:26:38 +0000792 return to_ret;
793}
J-Alvesfe23ebe2021-10-13 16:07:07 +0100794
J-Alves5136dda2022-03-25 12:26:38 +0000795/**
796 * Get global and per-vCPU notifications for the given vCPU ID.
797 */
798ffa_notifications_bitmap_t vm_notifications_partition_get_pending(
799 struct vm_locked vm_locked, bool is_from_vm, ffa_vcpu_index_t vcpu_id)
800{
801 ffa_notifications_bitmap_t to_ret;
802 struct notifications *to_get;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100803
J-Alves5136dda2022-03-25 12:26:38 +0000804 assert(vm_locked.vm != NULL);
805 to_get = vm_get_notifications(vm_locked, is_from_vm);
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700806 assert(vcpu_id < vm_locked.vm->vcpu_count);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100807
J-Alves5136dda2022-03-25 12:26:38 +0000808 to_ret = vm_notifications_state_get_pending(&to_get->global);
809 to_ret |=
810 vm_notifications_state_get_pending(&to_get->per_vcpu[vcpu_id]);
J-Alvesaa79c012021-07-09 14:29:45 +0100811
812 return to_ret;
813}
J-Alvesc8e8a222021-06-08 17:33:52 +0100814
815/**
J-Alves663682a2022-03-25 13:56:51 +0000816 * Get pending framework notifications.
817 */
818ffa_notifications_bitmap_t vm_notifications_framework_get_pending(
819 struct vm_locked vm_locked)
820{
Federico Recanati6c1e05c2022-04-20 11:37:26 +0200821 struct vm *vm = vm_locked.vm;
822 ffa_notifications_bitmap_t framework;
Federico Recanati6c1e05c2022-04-20 11:37:26 +0200823
824 assert(vm != NULL);
825
826 framework = vm_notifications_state_get_pending(
827 &vm->notifications.framework);
828
Federico Recanati6c1e05c2022-04-20 11:37:26 +0200829 return framework;
J-Alves663682a2022-03-25 13:56:51 +0000830}
831
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100832static bool vm_insert_notification_info_list(
833 ffa_id_t vm_id, bool is_per_vcpu, ffa_vcpu_index_t vcpu_id,
834 uint16_t *ids, uint32_t *ids_count, uint32_t *lists_sizes,
835 uint32_t *lists_count, const uint32_t ids_max_count,
J-Alves17c9b6d2022-03-25 14:39:05 +0000836 enum notifications_info_get_state *info_get_state)
837{
J-Alves17c9b6d2022-03-25 14:39:05 +0000838 CHECK(*ids_count <= ids_max_count);
839 CHECK(*lists_count <= ids_max_count);
840
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100841 if (*info_get_state == FULL || *ids_count == ids_max_count) {
J-Alves17c9b6d2022-03-25 14:39:05 +0000842 *info_get_state = FULL;
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100843 return false;
J-Alves17c9b6d2022-03-25 14:39:05 +0000844 }
845
846 switch (*info_get_state) {
847 case INIT:
848 case STARTING_NEW:
849 /*
850 * At this iteration two ids are to be added: the VM ID
851 * and vCPU ID. If there is no space, change state and
852 * terminate function.
853 */
854 if (is_per_vcpu && ids_max_count - *ids_count < 2) {
855 *info_get_state = FULL;
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100856 return false;
J-Alves17c9b6d2022-03-25 14:39:05 +0000857 }
858
859 *info_get_state = INSERTING;
860 ids[*ids_count] = vm_id;
861 ++(*ids_count);
862
863 if (is_per_vcpu) {
864 /* Insert vCPU ID. */
865 ids[*ids_count] = vcpu_id;
866 ++(*ids_count);
867 ++lists_sizes[*lists_count];
868 }
869
870 ++(*lists_count);
871 break;
872 case INSERTING:
873 /* For per-vCPU notifications only. */
874 if (!is_per_vcpu) {
875 break;
876 }
877
878 /* Insert vCPU ID */
879 ids[*ids_count] = vcpu_id;
880 (*ids_count)++;
881 /* Increment respective list size */
882 ++lists_sizes[*lists_count - 1];
883
884 if (lists_sizes[*lists_count - 1] == 3) {
885 *info_get_state = STARTING_NEW;
886 }
887 break;
888 default:
889 panic("Notification info get action error!!\n");
890 }
891
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100892 return true;
893}
894
895/**
896 * Check if the notification is pending and hasn't being retrieved.
897 * If so attempt to add it to the notification info list.
898 * Returns true if successfully added to the list.
899 */
900static bool vm_notifications_state_info_get(
901 struct notifications_state *state, ffa_id_t vm_id, bool is_per_vcpu,
902 ffa_vcpu_index_t vcpu_id, uint16_t *ids, uint32_t *ids_count,
903 uint32_t *lists_sizes, uint32_t *lists_count,
904 const uint32_t ids_max_count,
905 enum notifications_info_get_state *info_get_state)
906{
907 ffa_notifications_bitmap_t pending_not_retrieved;
908
909 pending_not_retrieved = state->pending & ~state->info_get_retrieved;
910
911 /* No notifications pending that haven't been retrieved. */
912 if (pending_not_retrieved == 0U) {
913 return false;
914 }
915
916 if (!vm_insert_notification_info_list(
917 vm_id, is_per_vcpu, vcpu_id, ids, ids_count, lists_sizes,
918 lists_count, ids_max_count, info_get_state)) {
919 return false;
920 }
921
J-Alves17c9b6d2022-03-25 14:39:05 +0000922 state->info_get_retrieved |= pending_not_retrieved;
923
924 vm_notifications_info_get_retrieved_count_add(pending_not_retrieved);
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100925
926 return true;
927}
928
929/**
930 * Check if the vcpu has a pending IPI that hasn't been retrieved.
931 * If so try add it to the notification info list.
932 * Returns true if successfully added to the list.
933 */
934static bool vm_ipi_state_info_get(
935 struct vcpu *vcpu, ffa_id_t vm_id, ffa_vcpu_index_t vcpu_id,
936 uint16_t *ids, uint32_t *ids_count, uint32_t *lists_sizes,
937 uint32_t *lists_count, const uint32_t ids_max_count,
938 enum notifications_info_get_state *info_get_state, bool per_vcpu_added)
939{
940 bool ret = true;
941 bool pending_not_retrieved;
942 struct vcpu_locked vcpu_locked = vcpu_lock(vcpu);
943 struct interrupts *interrupts = &vcpu_locked.vcpu->interrupts;
944
945 pending_not_retrieved =
946 vcpu_is_virt_interrupt_pending(interrupts, HF_IPI_INTID) &&
947 !vcpu_ipi_is_info_get_retrieved(vcpu_locked);
948
Daniel Boulby6c2aa332024-11-13 13:54:08 +0000949 /*
950 * No notifications pending that haven't been retrieved or the vCPU is
951 * not in the waiting state. Only report waiting vCPUs as this is the
952 * only state that needs explicit cycles donated from the NWd.
953 */
954 if (!pending_not_retrieved || vcpu->state != VCPU_STATE_WAITING) {
Daniel Boulby1f2babf2024-08-29 16:39:47 +0100955 ret = false;
956 goto out;
957 }
958
959 /*
960 * If the per vCPU notification was added to the list we do not need
961 * to add it again for the IPI.
962 */
963 if (!per_vcpu_added &&
964 !vm_insert_notification_info_list(
965 vm_id, true, vcpu_id, ids, ids_count, lists_sizes,
966 lists_count, ids_max_count, info_get_state)) {
967 ret = false;
968 goto out;
969 }
970
971 vcpu_ipi_set_info_get_retrieved(vcpu_locked);
972
973out:
974 vcpu_unlock(&vcpu_locked);
975
976 return ret;
J-Alves17c9b6d2022-03-25 14:39:05 +0000977}
978
J-Alves663682a2022-03-25 13:56:51 +0000979/**
J-Alvesc8e8a222021-06-08 17:33:52 +0100980 * Get pending notification's information to return to the receiver scheduler.
981 */
982void vm_notifications_info_get_pending(
983 struct vm_locked vm_locked, bool is_from_vm, uint16_t *ids,
984 uint32_t *ids_count, uint32_t *lists_sizes, uint32_t *lists_count,
985 const uint32_t ids_max_count,
986 enum notifications_info_get_state *info_get_state)
987{
J-Alves17c9b6d2022-03-25 14:39:05 +0000988 struct notifications *notifications;
J-Alvesc8e8a222021-06-08 17:33:52 +0100989
990 CHECK(vm_locked.vm != NULL);
J-Alvesc8e8a222021-06-08 17:33:52 +0100991
J-Alves17c9b6d2022-03-25 14:39:05 +0000992 notifications = vm_get_notifications(vm_locked, is_from_vm);
J-Alvesc8e8a222021-06-08 17:33:52 +0100993
J-Alves17c9b6d2022-03-25 14:39:05 +0000994 /*
995 * Perform info get for global notifications, before doing it for
996 * per-vCPU.
997 */
998 vm_notifications_state_info_get(&notifications->global,
999 vm_locked.vm->id, false, 0, ids,
1000 ids_count, lists_sizes, lists_count,
1001 ids_max_count, info_get_state);
J-Alvesfe23ebe2021-10-13 16:07:07 +01001002
J-Alvesc8e8a222021-06-08 17:33:52 +01001003 for (ffa_vcpu_count_t i = 0; i < vm_locked.vm->vcpu_count; i++) {
Daniel Boulby1f2babf2024-08-29 16:39:47 +01001004 struct vcpu *vcpu = vm_get_vcpu(vm_locked.vm, i);
1005 bool per_vcpu_added;
1006
1007 per_vcpu_added = vm_notifications_state_info_get(
J-Alves17c9b6d2022-03-25 14:39:05 +00001008 &notifications->per_vcpu[i], vm_locked.vm->id, true, i,
1009 ids, ids_count, lists_sizes, lists_count, ids_max_count,
1010 info_get_state);
Daniel Boulby1f2babf2024-08-29 16:39:47 +01001011 /*
1012 * IPIs can only be pending for partitions at the
1013 * current virtual FF-A instance.
1014 */
1015 if (vm_id_is_current_world(vm_locked.vm->id)) {
1016 vm_ipi_state_info_get(vcpu, vm_locked.vm->id, i, ids,
1017 ids_count, lists_sizes,
1018 lists_count, ids_max_count,
1019 info_get_state, per_vcpu_added);
1020 }
J-Alvesc8e8a222021-06-08 17:33:52 +01001021 }
1022}
1023
1024/**
1025 * Gets all info from VM's pending notifications.
1026 * Returns true if the list is full, and there is more pending.
1027 */
1028bool vm_notifications_info_get(struct vm_locked vm_locked, uint16_t *ids,
1029 uint32_t *ids_count, uint32_t *lists_sizes,
1030 uint32_t *lists_count,
1031 const uint32_t ids_max_count)
1032{
1033 enum notifications_info_get_state current_state = INIT;
1034
J-Alvesf31940e2022-03-25 17:24:00 +00001035 /* Get info of pending notifications from the framework. */
1036 vm_notifications_state_info_get(&vm_locked.vm->notifications.framework,
1037 vm_locked.vm->id, false, 0, ids,
1038 ids_count, lists_sizes, lists_count,
1039 ids_max_count, &current_state);
1040
1041 /* Get info of pending notifications from SPs. */
J-Alvesc8e8a222021-06-08 17:33:52 +01001042 vm_notifications_info_get_pending(vm_locked, false, ids, ids_count,
1043 lists_sizes, lists_count,
1044 ids_max_count, &current_state);
1045
J-Alvesf31940e2022-03-25 17:24:00 +00001046 /* Get info of pending notifications from VMs. */
J-Alvesc8e8a222021-06-08 17:33:52 +01001047 vm_notifications_info_get_pending(vm_locked, true, ids, ids_count,
1048 lists_sizes, lists_count,
1049 ids_max_count, &current_state);
1050
1051 /*
1052 * State transitions to FULL when trying to insert a new ID in the
1053 * list and there is not more space. This means there are notifications
1054 * pending, whose info is not retrieved.
1055 */
1056 return current_state == FULL;
1057}
J-Alves439ac972021-11-18 17:32:03 +00001058
1059/**
1060 * Checks VM's messaging method support.
1061 */
Kathleen Capellaf71dee42023-08-08 16:24:14 -04001062bool vm_supports_messaging_method(struct vm *vm, uint16_t msg_method)
J-Alves439ac972021-11-18 17:32:03 +00001063{
1064 return (vm->messaging_method & msg_method) != 0;
1065}
J-Alves6e2abc62021-12-02 14:58:56 +00001066
J-Alves7e67d102022-04-13 13:22:39 +01001067/**
1068 * Sets the designated GP register that the VM expects to receive the boot
1069 * info's address.
1070 */
1071void vm_set_boot_info_gp_reg(struct vm *vm, struct vcpu *vcpu)
1072{
Olivier Deprezb2808332023-02-02 15:25:40 +01001073 if (vm->boot_info.blob_addr.ipa != 0U) {
J-Alves7e67d102022-04-13 13:22:39 +01001074 arch_regs_set_gp_reg(&vcpu->regs,
1075 ipa_addr(vm->boot_info.blob_addr),
1076 vm->boot_info.gp_register_num);
1077 }
1078}
Madhukar Pappireddy18c6eb72023-08-21 12:16:18 -05001079
1080/**
1081 * Obtain the interrupt descriptor entry of the specified vm corresponding
1082 * to the specific interrupt id.
1083 */
Madhukar Pappireddy3221a442023-07-24 16:10:55 -05001084static struct interrupt_descriptor *vm_find_interrupt_descriptor(
Madhukar Pappireddy18c6eb72023-08-21 12:16:18 -05001085 struct vm_locked vm_locked, uint32_t id)
1086{
1087 for (uint32_t i = 0; i < HF_NUM_INTIDS; i++) {
1088 /* Interrupt descriptors are populated contiguously. */
1089 if (!vm_locked.vm->interrupt_desc[i].valid) {
1090 break;
1091 }
1092
1093 if (vm_locked.vm->interrupt_desc[i].interrupt_id == id) {
1094 /* Interrupt descriptor found. */
1095 return &vm_locked.vm->interrupt_desc[i];
1096 }
1097 }
1098
1099 return NULL;
1100}
1101
1102/**
1103 * Update the target MPIDR corresponding to the specified interrupt id
1104 * belonging to the specified vm.
1105 */
1106struct interrupt_descriptor *vm_interrupt_set_target_mpidr(
1107 struct vm_locked vm_locked, uint32_t id, uint32_t target_mpidr)
1108{
1109 struct interrupt_descriptor *int_desc;
1110
1111 int_desc = vm_find_interrupt_descriptor(vm_locked, id);
1112
1113 if (int_desc != NULL) {
Daniel Boulby18485942024-10-14 16:23:03 +01001114 int_desc->mpidr_valid = true;
1115 int_desc->mpidr = target_mpidr;
Madhukar Pappireddy18c6eb72023-08-21 12:16:18 -05001116 }
1117
1118 return int_desc;
1119}
1120
1121/**
1122 * Update the security state of the specified interrupt id belonging to the
1123 * specified vm.
1124 */
1125struct interrupt_descriptor *vm_interrupt_set_sec_state(
1126 struct vm_locked vm_locked, uint32_t id, uint32_t sec_state)
1127{
1128 struct interrupt_descriptor *int_desc;
1129
1130 int_desc = vm_find_interrupt_descriptor(vm_locked, id);
1131
1132 if (int_desc != NULL) {
Daniel Boulby18485942024-10-14 16:23:03 +01001133 int_desc->sec_state = sec_state;
Madhukar Pappireddy18c6eb72023-08-21 12:16:18 -05001134 }
1135
1136 return int_desc;
1137}
Madhukar Pappireddy938faaf2023-07-31 17:56:55 -05001138
1139/**
1140 * Enable or disable the specified interrupt id belonging to specified vm.
1141 */
1142struct interrupt_descriptor *vm_interrupt_set_enable(struct vm_locked vm_locked,
1143 uint32_t id, bool enable)
1144{
1145 struct interrupt_descriptor *int_desc;
1146
1147 int_desc = vm_find_interrupt_descriptor(vm_locked, id);
1148
1149 if (int_desc != NULL) {
Daniel Boulby18485942024-10-14 16:23:03 +01001150 int_desc->enabled = enable;
Madhukar Pappireddy938faaf2023-07-31 17:56:55 -05001151 }
1152
1153 return int_desc;
1154}
Madhukar Pappireddya49ba162024-11-25 09:40:45 -06001155
1156/**
1157 * The 'boot_list' is used as the start and end of the list.
1158 * Start: the nodes it points to is the first VM to boot.
1159 * End: the last node's next points to the entry.
1160 */
1161static bool vm_is_boot_list_end(struct vm *vm)
1162{
1163 return vm->boot_list_node.next == &boot_list;
1164}
1165
1166/**
1167 * Gets the first partition to boot, according to Boot Protocol from FF-A spec.
1168 */
1169struct vm *vm_get_boot_vm(void)
1170{
1171 assert(!list_empty(&boot_list));
1172
1173 return CONTAINER_OF(boot_list.next, struct vm, boot_list_node);
1174}
1175
1176/**
Madhukar Pappireddya81f5412024-11-25 09:46:48 -06001177 * Gets the first MP partition to boot on a secondary CPU, as per the boot
1178 * order from FF-A spec.
1179 * If every SP in the system is an UP partition, this function returns NULL.
1180 */
1181struct vm *vm_get_boot_vm_secondary_core(void)
1182{
1183 struct vm *vm = vm_get_boot_vm();
1184
1185 if (vm_is_up(vm)) {
1186 return vm_get_next_boot_secondary_core(vm);
1187 }
1188
1189 return vm;
1190}
1191
1192/**
Madhukar Pappireddya49ba162024-11-25 09:40:45 -06001193 * Returns the next element in the boot order list, if there is one.
1194 */
1195struct vm *vm_get_next_boot(struct vm *vm)
1196{
1197 return vm_is_boot_list_end(vm)
1198 ? NULL
1199 : CONTAINER_OF(vm->boot_list_node.next, struct vm,
1200 boot_list_node);
1201}
1202
1203/**
Madhukar Pappireddya81f5412024-11-25 09:46:48 -06001204 * Returns the next element representing an MP endpoint in the boot order list,
1205 * if there is one.
1206 */
1207struct vm *vm_get_next_boot_secondary_core(struct vm *vm)
1208{
1209 struct vm *vm_next;
1210
1211 assert(vm != NULL);
1212
1213 vm_next = vm_get_next_boot(vm);
1214
1215 /* Keep searching until an MP endpoint is found. */
1216 while (vm_next != NULL && vm_is_up(vm_next)) {
1217 vm_next = vm_get_next_boot(vm_next);
1218 }
1219
1220 return vm_next;
1221}
1222
1223/**
Madhukar Pappireddya49ba162024-11-25 09:40:45 -06001224 * Insert in boot list, sorted by `boot_order` parameter in the vm structure
1225 * and rooted in `first_boot_vm`.
1226 */
1227void vm_update_boot(struct vm *vm)
1228{
1229 struct vm *current_vm = NULL;
1230
1231 if (list_empty(&boot_list)) {
1232 list_prepend(&boot_list, &vm->boot_list_node);
1233 return;
1234 }
1235
1236 /*
1237 * When getting to this point the first insertion should have
1238 * been done.
1239 */
1240 current_vm = vm_get_boot_vm();
1241 assert(current_vm != NULL);
1242
1243 /*
1244 * Iterate until the position is found according to boot order, or
1245 * until we reach end of the list.
1246 */
1247 while (!vm_is_boot_list_end(current_vm) &&
1248 current_vm->boot_order <= vm->boot_order) {
1249 current_vm = vm_get_next_boot(current_vm);
1250 }
1251
1252 current_vm->boot_order > vm->boot_order
1253 ? list_prepend(&current_vm->boot_list_node, &vm->boot_list_node)
1254 : list_append(&current_vm->boot_list_node, &vm->boot_list_node);
1255}