blob: 116d1b896e51af7300e16f25dc1b20dcbaf03ae9 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/vm.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010010
Andrew Scull18c78fc2018-08-20 12:57:41 +010011#include "hf/api.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010012#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010013#include "hf/cpu.h"
J-Alves4ef6e842021-03-18 12:47:01 +000014#include "hf/dlog.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010015#include "hf/ffa.h"
Andrew Scull3c257452019-11-26 13:32:50 +000016#include "hf/layout.h"
17#include "hf/plat/iommu.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010018#include "hf/std.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010019
Andrew Scull19503262018-09-20 14:48:39 +010020#include "vmapi/hf/call.h"
21
22static struct vm vms[MAX_VMS];
Olivier Deprez96a2a262020-06-11 17:21:38 +020023static struct vm other_world;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010024static ffa_vm_count_t vm_count;
J-Alvesb37fd082020-10-22 12:29:21 +010025static struct vm *first_boot_vm;
Andrew Scull19503262018-09-20 14:48:39 +010026
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -080027static bool vm_init_mm(struct vm *vm, struct mpool *ppool)
28{
29 if (vm->el0_partition) {
30 return mm_ptable_init(&vm->ptable, vm->id, MM_FLAG_STAGE1,
31 ppool);
32 }
33 return mm_vm_init(&vm->ptable, vm->id, ppool);
34}
35
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010036struct vm *vm_init(ffa_vm_id_t id, ffa_vcpu_count_t vcpu_count,
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080037 struct mpool *ppool, bool el0_partition)
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010038{
Wedson Almeida Filho87009642018-07-02 10:20:07 +010039 uint32_t i;
Andrew Scull19503262018-09-20 14:48:39 +010040 struct vm *vm;
41
Olivier Deprez96a2a262020-06-11 17:21:38 +020042 if (id == HF_OTHER_WORLD_ID) {
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080043 CHECK(el0_partition == false);
Olivier Deprez96a2a262020-06-11 17:21:38 +020044 vm = &other_world;
Andrew Walbran9daa57e2019-09-27 13:33:20 +010045 } else {
46 uint16_t vm_index = id - HF_VM_ID_OFFSET;
Andrew Scull19503262018-09-20 14:48:39 +010047
Andrew Walbran9daa57e2019-09-27 13:33:20 +010048 CHECK(id >= HF_VM_ID_OFFSET);
49 CHECK(vm_index < ARRAY_SIZE(vms));
50 vm = &vms[vm_index];
51 }
Wedson Almeida Filho87009642018-07-02 10:20:07 +010052
Andrew Scull2b5fbad2019-04-05 13:55:56 +010053 memset_s(vm, sizeof(*vm), 0, sizeof(*vm));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010054
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000055 list_init(&vm->mailbox.waiter_list);
56 list_init(&vm->mailbox.ready_list);
57 sl_init(&vm->lock);
58
Andrew Walbran9daa57e2019-09-27 13:33:20 +010059 vm->id = id;
Wedson Almeida Filho87009642018-07-02 10:20:07 +010060 vm->vcpu_count = vcpu_count;
Andrew Sculld6ee1102019-04-05 22:12:42 +010061 vm->mailbox.state = MAILBOX_STATE_EMPTY;
Andrew Scull9726c252019-01-23 13:44:19 +000062 atomic_init(&vm->aborting, false);
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080063 vm->el0_partition = el0_partition;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010064
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -080065 if (!vm_init_mm(vm, ppool)) {
Andrew Walbran9daa57e2019-09-27 13:33:20 +010066 return NULL;
Wedson Almeida Filho03306112018-11-26 00:08:03 +000067 }
68
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000069 /* Initialise waiter entries. */
70 for (i = 0; i < MAX_VMS; i++) {
Wedson Almeida Filhob790f652019-01-22 23:41:56 +000071 vm->wait_entries[i].waiting_vm = vm;
72 list_init(&vm->wait_entries[i].wait_links);
73 list_init(&vm->wait_entries[i].ready_links);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000074 }
75
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000076 /* Do basic initialization of vCPUs. */
Andrew Scull7364a8e2018-07-19 15:39:29 +010077 for (i = 0; i < vcpu_count; i++) {
Andrew Walbrane1310df2019-04-29 17:28:28 +010078 vcpu_init(vm_get_vcpu(vm, i), vm);
Andrew Scull7364a8e2018-07-19 15:39:29 +010079 }
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010080
J-Alves4ef6e842021-03-18 12:47:01 +000081 /* Basic initialization of the notifications structure. */
82 vm_notifications_init_bindings(&vm->notifications.from_sp);
83 vm_notifications_init_bindings(&vm->notifications.from_vm);
84
Andrew Walbran9daa57e2019-09-27 13:33:20 +010085 return vm;
86}
87
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010088bool vm_init_next(ffa_vcpu_count_t vcpu_count, struct mpool *ppool,
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080089 struct vm **new_vm, bool el0_partition)
Andrew Walbran9daa57e2019-09-27 13:33:20 +010090{
91 if (vm_count >= MAX_VMS) {
92 return false;
93 }
94
95 /* Generate IDs based on an offset, as low IDs e.g., 0, are reserved */
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080096 *new_vm = vm_init(vm_count + HF_VM_ID_OFFSET, vcpu_count, ppool,
97 el0_partition);
Andrew Walbran9daa57e2019-09-27 13:33:20 +010098 if (*new_vm == NULL) {
99 return false;
100 }
Andrew Scull19503262018-09-20 14:48:39 +0100101 ++vm_count;
Andrew Scull19503262018-09-20 14:48:39 +0100102
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000103 return true;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100104}
105
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100106ffa_vm_count_t vm_get_count(void)
Andrew Scull19503262018-09-20 14:48:39 +0100107{
108 return vm_count;
109}
110
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100111/**
112 * Returns a pointer to the VM with the corresponding id.
113 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100114struct vm *vm_find(ffa_vm_id_t id)
Andrew Scull19503262018-09-20 14:48:39 +0100115{
David Brazdilbc501192019-09-27 13:20:56 +0100116 uint16_t index;
Fuad Tabba494376e2019-08-05 12:35:10 +0100117
Olivier Deprez96a2a262020-06-11 17:21:38 +0200118 if (id == HF_OTHER_WORLD_ID) {
119 if (other_world.id == HF_OTHER_WORLD_ID) {
120 return &other_world;
121 }
Andrew Scull19503262018-09-20 14:48:39 +0100122 return NULL;
123 }
124
Olivier Deprez96a2a262020-06-11 17:21:38 +0200125 /* Check that this is not a reserved ID. */
126 if (id < HF_VM_ID_OFFSET) {
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100127 return NULL;
128 }
129
David Brazdilbc501192019-09-27 13:20:56 +0100130 index = id - HF_VM_ID_OFFSET;
131
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100132 return vm_find_index(index);
133}
134
135/**
J-Alves46ee0682021-07-26 15:17:53 +0100136 * Returns a locked instance of the VM with the corresponding id.
137 */
138struct vm_locked vm_find_locked(ffa_vm_id_t id)
139{
140 struct vm *vm = vm_find(id);
141
142 if (vm != NULL) {
143 return vm_lock(vm);
144 }
145
146 return (struct vm_locked){.vm = NULL};
147}
148
149/**
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100150 * Returns a pointer to the VM at the specified index.
151 */
152struct vm *vm_find_index(uint16_t index)
153{
David Brazdilbc501192019-09-27 13:20:56 +0100154 /* Ensure the VM is initialized. */
155 if (index >= vm_count) {
156 return NULL;
157 }
158
159 return &vms[index];
Andrew Scull19503262018-09-20 14:48:39 +0100160}
161
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000162/**
Fuad Tabbaed294af2019-12-20 10:43:01 +0000163 * Locks the given VM and updates `locked` to hold the newly locked VM.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000164 */
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100165struct vm_locked vm_lock(struct vm *vm)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000166{
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100167 struct vm_locked locked = {
168 .vm = vm,
169 };
170
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000171 sl_lock(&vm->lock);
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100172
173 return locked;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000174}
175
176/**
Jose Marinho75509b42019-04-09 09:34:59 +0100177 * Locks two VMs ensuring that the locking order is according to the locks'
178 * addresses.
179 */
180struct two_vm_locked vm_lock_both(struct vm *vm1, struct vm *vm2)
181{
182 struct two_vm_locked dual_lock;
183
184 sl_lock_both(&vm1->lock, &vm2->lock);
185 dual_lock.vm1.vm = vm1;
186 dual_lock.vm2.vm = vm2;
187
188 return dual_lock;
189}
190
191/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000192 * Unlocks a VM previously locked with vm_lock, and updates `locked` to reflect
193 * the fact that the VM is no longer locked.
194 */
195void vm_unlock(struct vm_locked *locked)
196{
197 sl_unlock(&locked->vm->lock);
198 locked->vm = NULL;
199}
Andrew Walbrane1310df2019-04-29 17:28:28 +0100200
201/**
202 * Get the vCPU with the given index from the given VM.
203 * This assumes the index is valid, i.e. less than vm->vcpu_count.
204 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100205struct vcpu *vm_get_vcpu(struct vm *vm, ffa_vcpu_index_t vcpu_index)
Andrew Walbrane1310df2019-04-29 17:28:28 +0100206{
Andrew Scull877ae4b2019-07-02 12:52:33 +0100207 CHECK(vcpu_index < vm->vcpu_count);
Andrew Walbrane1310df2019-04-29 17:28:28 +0100208 return &vm->vcpus[vcpu_index];
209}
Andrew Scull3c257452019-11-26 13:32:50 +0000210
211/**
Andrew Walbranaad8f982019-12-04 10:56:39 +0000212 * Gets `vm`'s wait entry for waiting on the `for_vm`.
213 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100214struct wait_entry *vm_get_wait_entry(struct vm *vm, ffa_vm_id_t for_vm)
Andrew Walbranaad8f982019-12-04 10:56:39 +0000215{
216 uint16_t index;
217
218 CHECK(for_vm >= HF_VM_ID_OFFSET);
219 index = for_vm - HF_VM_ID_OFFSET;
220 CHECK(index < MAX_VMS);
221
222 return &vm->wait_entries[index];
223}
224
225/**
226 * Gets the ID of the VM which the given VM's wait entry is for.
227 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100228ffa_vm_id_t vm_id_for_wait_entry(struct vm *vm, struct wait_entry *entry)
Andrew Walbranaad8f982019-12-04 10:56:39 +0000229{
230 uint16_t index = entry - vm->wait_entries;
231
232 return index + HF_VM_ID_OFFSET;
233}
234
235/**
Andrew Walbran45633dd2020-10-07 17:59:54 +0100236 * Return whether the given VM ID represents an entity in the current world:
237 * i.e. the hypervisor or a normal world VM when running in the normal world, or
238 * the SPM or an SP when running in the secure world.
239 */
240bool vm_id_is_current_world(ffa_vm_id_t vm_id)
241{
242 return (vm_id & HF_VM_ID_WORLD_MASK) !=
243 (HF_OTHER_WORLD_ID & HF_VM_ID_WORLD_MASK);
244}
245
246/**
Andrew Scull3c257452019-11-26 13:32:50 +0000247 * Map a range of addresses to the VM in both the MMU and the IOMMU.
248 *
249 * mm_vm_defrag should always be called after a series of page table updates,
250 * whether they succeed or fail. This is because on failure extra page table
251 * entries may have been allocated and then not used, while on success it may be
252 * possible to compact the page table by merging several entries into a block.
253 *
254 * Returns true on success, or false if the update failed and no changes were
255 * made.
256 *
257 */
258bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
259 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
260{
261 if (!vm_identity_prepare(vm_locked, begin, end, mode, ppool)) {
262 return false;
263 }
264
265 vm_identity_commit(vm_locked, begin, end, mode, ppool, ipa);
266
267 return true;
268}
269
270/**
271 * Prepares the given VM for the given address mapping such that it will be able
272 * to commit the change without failure.
273 *
274 * In particular, multiple calls to this function will result in the
275 * corresponding calls to commit the changes to succeed.
276 *
277 * Returns true on success, or false if the update failed and no changes were
278 * made.
279 */
280bool vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
281 uint32_t mode, struct mpool *ppool)
282{
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -0800283 if (vm_locked.vm->el0_partition) {
284 return mm_identity_prepare(&vm_locked.vm->ptable, begin, end,
285 mode, ppool);
286 }
Andrew Scull3c257452019-11-26 13:32:50 +0000287 return mm_vm_identity_prepare(&vm_locked.vm->ptable, begin, end, mode,
288 ppool);
289}
290
291/**
292 * Commits the given address mapping to the VM assuming the operation cannot
293 * fail. `vm_identity_prepare` must used correctly before this to ensure
294 * this condition.
295 */
296void vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
297 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
298{
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -0800299 if (vm_locked.vm->el0_partition) {
300 mm_identity_commit(&vm_locked.vm->ptable, begin, end, mode,
301 ppool);
302 if (ipa != NULL) {
303 /*
304 * EL0 partitions are modeled as lightweight VM's, to
305 * promote code reuse. The below statement returns the
306 * mapped PA as an IPA, however, for an EL0 partition,
307 * this is really a VA.
308 */
309 *ipa = ipa_from_pa(begin);
310 }
311 } else {
312 mm_vm_identity_commit(&vm_locked.vm->ptable, begin, end, mode,
313 ppool, ipa);
314 }
Andrew Scull3c257452019-11-26 13:32:50 +0000315 plat_iommu_identity_map(vm_locked, begin, end, mode);
316}
317
318/**
319 * Unmap a range of addresses from the VM.
320 *
321 * Returns true on success, or false if the update failed and no changes were
322 * made.
323 */
324bool vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
325 struct mpool *ppool)
326{
327 uint32_t mode = MM_MODE_UNMAPPED_MASK;
328
329 return vm_identity_map(vm_locked, begin, end, mode, ppool, NULL);
330}
331
332/**
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700333 * Defrag page tables for an EL0 partition or for a VM.
334 */
335void vm_ptable_defrag(struct vm_locked vm_locked, struct mpool *ppool)
336{
337 if (vm_locked.vm->el0_partition) {
338 mm_stage1_defrag(&vm_locked.vm->ptable, ppool);
339 } else {
340 mm_vm_defrag(&vm_locked.vm->ptable, ppool);
341 }
342}
343
344/**
Andrew Scull3c257452019-11-26 13:32:50 +0000345 * Unmaps the hypervisor pages from the given page table.
346 */
347bool vm_unmap_hypervisor(struct vm_locked vm_locked, struct mpool *ppool)
348{
349 /* TODO: If we add pages dynamically, they must be included here too. */
350 return vm_unmap(vm_locked, layout_text_begin(), layout_text_end(),
351 ppool) &&
352 vm_unmap(vm_locked, layout_rodata_begin(), layout_rodata_end(),
353 ppool) &&
354 vm_unmap(vm_locked, layout_data_begin(), layout_data_end(),
355 ppool);
356}
J-Alvesb37fd082020-10-22 12:29:21 +0100357
358/**
359 * Gets the first partition to boot, according to Boot Protocol from FFA spec.
360 */
361struct vm *vm_get_first_boot(void)
362{
363 return first_boot_vm;
364}
365
366/**
367 * Insert in boot list, sorted by `boot_order` parameter in the vm structure
368 * and rooted in `first_boot_vm`.
369 */
370void vm_update_boot(struct vm *vm)
371{
372 struct vm *current = NULL;
373 struct vm *previous = NULL;
374
375 if (first_boot_vm == NULL) {
376 first_boot_vm = vm;
377 return;
378 }
379
380 current = first_boot_vm;
381
382 while (current != NULL && current->boot_order >= vm->boot_order) {
383 previous = current;
384 current = current->next_boot;
385 }
386
387 if (previous != NULL) {
388 previous->next_boot = vm;
389 } else {
390 first_boot_vm = vm;
391 }
392
393 vm->next_boot = current;
394}
J-Alves4ef6e842021-03-18 12:47:01 +0000395
Raghu Krishnamurthyea195fa2021-02-12 23:29:00 -0800396/**
397 * Gets the mode of the given range of ipa or va if they are mapped with the
398 * same mode.
399 *
400 * Returns true if the range is mapped with the same mode and false otherwise.
401 * The wrapper calls the appropriate mm function depending on if the partition
402 * is a vm or a el0 partition.
403 */
404bool vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin, ipaddr_t end,
405 uint32_t *mode)
406{
407 if (vm_locked.vm->el0_partition) {
408 return mm_get_mode(&vm_locked.vm->ptable,
409 va_from_pa(pa_from_ipa(begin)),
410 va_from_pa(pa_from_ipa(end)), mode);
411 }
412 return mm_vm_get_mode(&vm_locked.vm->ptable, begin, end, mode);
413}
J-Alvesa0f317d2021-06-09 13:31:59 +0100414
415/*
416 * Initializes the notifications structure.
417 */
418void vm_notifications_init_bindings(struct notifications *notifications)
419{
420 for (uint32_t i = 0U; i < MAX_FFA_NOTIFICATIONS; i++) {
421 notifications->bindings_sender_id[i] = HF_INVALID_VM_ID;
422 }
423}
424
425/**
426 * Checks if there are pending notifications.
427 */
428bool vm_are_notifications_pending(struct vm_locked vm_locked, bool from_vm,
429 ffa_notifications_bitmap_t notifications)
430{
431 struct notifications *to_check;
432
433 CHECK(vm_locked.vm != NULL);
434
435 to_check = from_vm ? &vm_locked.vm->notifications.from_vm
436 : &vm_locked.vm->notifications.from_sp;
437
438 /* Check if there are pending per vcpu notifications */
439 for (uint32_t i = 0U; i < MAX_CPUS; i++) {
440 if ((to_check->per_vcpu[i].pending & notifications) != 0U) {
441 return true;
442 }
443 }
444
445 /* Check if there are global pending notifications */
446 return (to_check->global.pending & notifications) != 0U;
447}
J-Alvesc003a7a2021-03-18 13:06:53 +0000448
449bool vm_are_notifications_enabled(struct vm_locked vm_locked)
450{
451 return vm_locked.vm->notifications.enabled == true;
452}
453
454static bool vm_is_notification_bit_set(ffa_notifications_bitmap_t notifications,
455 uint32_t i)
456{
457 return (notifications & FFA_NOTIFICATION_MASK(i)) != 0U;
458}
459
460static struct notifications *vm_get_notifications(struct vm_locked vm_locked,
461 bool is_from_vm)
462{
463 return is_from_vm ? &vm_locked.vm->notifications.from_vm
464 : &vm_locked.vm->notifications.from_sp;
465}
466
467/**
468 * Checks that all provided notifications are bound to the specified sender, and
469 * are per VCPU or global, as specified.
470 */
471bool vm_notifications_validate_binding(struct vm_locked vm_locked,
472 bool is_from_vm, ffa_vm_id_t sender_id,
473 ffa_notifications_bitmap_t notifications,
474 bool is_per_vcpu)
475{
476 return vm_notifications_validate_bound_sender(
477 vm_locked, is_from_vm, sender_id, notifications) &&
478 vm_notifications_validate_per_vcpu(vm_locked, is_from_vm,
479 is_per_vcpu, notifications);
480}
481
482/**
483 * Update binds information in notification structure for the specified
484 * notifications.
485 */
486void vm_notifications_update_bindings(struct vm_locked vm_locked,
487 bool is_from_vm, ffa_vm_id_t sender_id,
488 ffa_notifications_bitmap_t notifications,
489 bool is_per_vcpu)
490{
491 CHECK(vm_locked.vm != NULL);
492 struct notifications *to_update =
493 vm_get_notifications(vm_locked, is_from_vm);
494
495 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
496 if (vm_is_notification_bit_set(notifications, i)) {
497 to_update->bindings_sender_id[i] = sender_id;
498 }
499 }
500
501 /*
502 * Set notifications if they are per VCPU, else clear them as they are
503 * global.
504 */
505 if (is_per_vcpu) {
506 to_update->bindings_per_vcpu |= notifications;
507 } else {
508 to_update->bindings_per_vcpu &= ~notifications;
509 }
510}
511
512bool vm_notifications_validate_bound_sender(
513 struct vm_locked vm_locked, bool is_from_vm, ffa_vm_id_t sender_id,
514 ffa_notifications_bitmap_t notifications)
515{
516 CHECK(vm_locked.vm != NULL);
517 struct notifications *to_check =
518 vm_get_notifications(vm_locked, is_from_vm);
519
520 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
521 if (vm_is_notification_bit_set(notifications, i) &&
522 to_check->bindings_sender_id[i] != sender_id) {
523 return false;
524 }
525 }
526
527 return true;
528}
529
530bool vm_notifications_validate_per_vcpu(struct vm_locked vm_locked,
531 bool is_from_vm, bool is_per_vcpu,
532 ffa_notifications_bitmap_t notif)
533{
534 CHECK(vm_locked.vm != NULL);
535 struct notifications *to_check =
536 vm_get_notifications(vm_locked, is_from_vm);
537
538 return is_per_vcpu ? (~to_check->bindings_per_vcpu & notif) == 0U
539 : (to_check->bindings_per_vcpu & notif) == 0U;
540}
J-Alvesaa79c012021-07-09 14:29:45 +0100541
542void vm_notifications_set(struct vm_locked vm_locked, bool is_from_vm,
543 ffa_notifications_bitmap_t notifications,
544 ffa_vcpu_index_t vcpu_id, bool is_per_vcpu)
545{
546 CHECK(vm_locked.vm != NULL);
547 struct notifications *to_set =
548 vm_get_notifications(vm_locked, is_from_vm);
549 CHECK(vcpu_id < MAX_CPUS);
550
551 if (is_per_vcpu) {
552 to_set->per_vcpu[vcpu_id].pending |= notifications;
553 } else {
554 to_set->global.pending |= notifications;
555 }
556}
557
558/**
559 * Get Global notifications and per CPU only of the current VCPU.
560 */
561ffa_notifications_bitmap_t vm_notifications_get_pending_and_clear(
562 struct vm_locked vm_locked, bool is_from_vm,
563 ffa_vcpu_index_t cur_vcpu_id)
564{
565 ffa_notifications_bitmap_t to_ret = 0;
566
567 CHECK(vm_locked.vm != NULL);
568 struct notifications *to_get =
569 vm_get_notifications(vm_locked, is_from_vm);
570 CHECK(cur_vcpu_id < MAX_CPUS);
571
572 to_ret |= to_get->global.pending;
573 to_get->global.pending = 0U;
574 to_get->global.info_get_retrieved = 0U;
575
576 to_ret |= to_get->per_vcpu[cur_vcpu_id].pending;
577 to_get->per_vcpu[cur_vcpu_id].pending = 0U;
578 to_get->per_vcpu[cur_vcpu_id].info_get_retrieved = 0U;
579
580 return to_ret;
581}
J-Alvesc8e8a222021-06-08 17:33:52 +0100582
583/**
584 * Get pending notification's information to return to the receiver scheduler.
585 */
586void vm_notifications_info_get_pending(
587 struct vm_locked vm_locked, bool is_from_vm, uint16_t *ids,
588 uint32_t *ids_count, uint32_t *lists_sizes, uint32_t *lists_count,
589 const uint32_t ids_max_count,
590 enum notifications_info_get_state *info_get_state)
591{
592 ffa_notifications_bitmap_t pending_not_retrieved;
593
594 CHECK(vm_locked.vm != NULL);
595 struct notifications *notifications =
596 vm_get_notifications(vm_locked, is_from_vm);
597
598 if (*info_get_state == FULL) {
599 return;
600 }
601
602 CHECK(*ids_count <= ids_max_count);
603 CHECK(*lists_count <= ids_max_count);
604
605 pending_not_retrieved = notifications->global.pending &
606 ~notifications->global.info_get_retrieved;
607
608 if (pending_not_retrieved != 0U && *info_get_state == INIT) {
609 /*
610 * If action is to INIT, means that no list has been
611 * created for the given VM ID, which also means that global
612 * notifications are not represented in the list yet.
613 */
614 if (*ids_count == ids_max_count) {
615 *info_get_state = FULL;
616 return;
617 }
618
619 *info_get_state = INSERTING;
620
621 (*lists_count)++;
622 ids[*ids_count] = vm_locked.vm->id;
623 ++(*ids_count);
624 }
625
626 notifications->global.info_get_retrieved |= pending_not_retrieved;
627
628 for (ffa_vcpu_count_t i = 0; i < vm_locked.vm->vcpu_count; i++) {
629 /*
630 * Include VCPU ID of per-VCPU notifications.
631 */
632 pending_not_retrieved =
633 notifications->per_vcpu[i].pending &
634 ~notifications->per_vcpu[i].info_get_retrieved;
635
636 if (pending_not_retrieved == 0U) {
637 continue;
638 }
639
640 switch (*info_get_state) {
641 case INIT:
642 case STARTING_NEW:
643 /*
644 * At this iteration two ids need to be added: the VM ID
645 * and VCPU ID. If there is not space, change state and
646 * terminate function.
647 */
648 if (ids_max_count - *ids_count < 2) {
649 *info_get_state = FULL;
650 return;
651 }
652
653 ids[*ids_count] = vm_locked.vm->id;
654 ++(*ids_count);
655
656 /* Insert VCPU ID */
657 ids[*ids_count] = i;
658 ++(*ids_count);
659
660 ++lists_sizes[*lists_count];
661 ++(*lists_count);
662
663 *info_get_state = INSERTING;
664 break;
665 case INSERTING:
666 if (*ids_count == ids_max_count) {
667 *info_get_state = FULL;
668 return;
669 }
670
671 /* Insert VCPU ID */
672 ids[*ids_count] = i;
673 (*ids_count)++;
674
675 /* Increment respective list size */
676 ++lists_sizes[*lists_count - 1];
677
678 if (lists_sizes[*lists_count - 1] == 3) {
679 *info_get_state = STARTING_NEW;
680 }
681 break;
682 default:
683 panic("Notification info get action error!!\n");
684 }
685
686 notifications->per_vcpu[i].info_get_retrieved |=
687 pending_not_retrieved;
688 }
689}
690
691/**
692 * Gets all info from VM's pending notifications.
693 * Returns true if the list is full, and there is more pending.
694 */
695bool vm_notifications_info_get(struct vm_locked vm_locked, uint16_t *ids,
696 uint32_t *ids_count, uint32_t *lists_sizes,
697 uint32_t *lists_count,
698 const uint32_t ids_max_count)
699{
700 enum notifications_info_get_state current_state = INIT;
701
702 /* Get info of pending notifications from SPs */
703 vm_notifications_info_get_pending(vm_locked, false, ids, ids_count,
704 lists_sizes, lists_count,
705 ids_max_count, &current_state);
706
707 /* Get info of pending notifications from VMs */
708 vm_notifications_info_get_pending(vm_locked, true, ids, ids_count,
709 lists_sizes, lists_count,
710 ids_max_count, &current_state);
711
712 /*
713 * State transitions to FULL when trying to insert a new ID in the
714 * list and there is not more space. This means there are notifications
715 * pending, whose info is not retrieved.
716 */
717 return current_state == FULL;
718}