blob: 514c74ce4d83beba870212e957802822c9810404 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/vm.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010010
Andrew Scull18c78fc2018-08-20 12:57:41 +010011#include "hf/api.h"
Daniel Boulbya2f8c662021-11-26 17:52:53 +000012#include "hf/assert.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010013#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010014#include "hf/cpu.h"
J-Alves4ef6e842021-03-18 12:47:01 +000015#include "hf/dlog.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010016#include "hf/ffa.h"
Andrew Scull3c257452019-11-26 13:32:50 +000017#include "hf/layout.h"
18#include "hf/plat/iommu.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010019#include "hf/std.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010020
Andrew Scull19503262018-09-20 14:48:39 +010021#include "vmapi/hf/call.h"
22
23static struct vm vms[MAX_VMS];
Olivier Deprez96a2a262020-06-11 17:21:38 +020024static struct vm other_world;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010025static ffa_vm_count_t vm_count;
J-Alvesb37fd082020-10-22 12:29:21 +010026static struct vm *first_boot_vm;
Andrew Scull19503262018-09-20 14:48:39 +010027
J-Alvesfe23ebe2021-10-13 16:07:07 +010028/**
29 * Counters on the status of notifications in the system. It helps to improve
30 * the information retrieved by the receiver scheduler.
31 */
32static struct {
33 /** Counts notifications pending. */
34 uint32_t pending_count;
35 /**
36 * Counts notifications pending, that have been retrieved by the
37 * receiver scheduler.
38 */
39 uint32_t info_get_retrieved_count;
40 struct spinlock lock;
41} all_notifications_state;
42
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -080043static bool vm_init_mm(struct vm *vm, struct mpool *ppool)
44{
45 if (vm->el0_partition) {
46 return mm_ptable_init(&vm->ptable, vm->id, MM_FLAG_STAGE1,
47 ppool);
48 }
49 return mm_vm_init(&vm->ptable, vm->id, ppool);
50}
51
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010052struct vm *vm_init(ffa_vm_id_t id, ffa_vcpu_count_t vcpu_count,
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080053 struct mpool *ppool, bool el0_partition)
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010054{
Wedson Almeida Filho87009642018-07-02 10:20:07 +010055 uint32_t i;
Andrew Scull19503262018-09-20 14:48:39 +010056 struct vm *vm;
57
Olivier Deprez96a2a262020-06-11 17:21:38 +020058 if (id == HF_OTHER_WORLD_ID) {
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080059 CHECK(el0_partition == false);
Olivier Deprez96a2a262020-06-11 17:21:38 +020060 vm = &other_world;
Andrew Walbran9daa57e2019-09-27 13:33:20 +010061 } else {
62 uint16_t vm_index = id - HF_VM_ID_OFFSET;
Andrew Scull19503262018-09-20 14:48:39 +010063
Andrew Walbran9daa57e2019-09-27 13:33:20 +010064 CHECK(id >= HF_VM_ID_OFFSET);
65 CHECK(vm_index < ARRAY_SIZE(vms));
66 vm = &vms[vm_index];
67 }
Wedson Almeida Filho87009642018-07-02 10:20:07 +010068
Andrew Scull2b5fbad2019-04-05 13:55:56 +010069 memset_s(vm, sizeof(*vm), 0, sizeof(*vm));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010070
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000071 list_init(&vm->mailbox.waiter_list);
72 list_init(&vm->mailbox.ready_list);
73 sl_init(&vm->lock);
74
Andrew Walbran9daa57e2019-09-27 13:33:20 +010075 vm->id = id;
Wedson Almeida Filho87009642018-07-02 10:20:07 +010076 vm->vcpu_count = vcpu_count;
Andrew Sculld6ee1102019-04-05 22:12:42 +010077 vm->mailbox.state = MAILBOX_STATE_EMPTY;
Andrew Scull9726c252019-01-23 13:44:19 +000078 atomic_init(&vm->aborting, false);
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080079 vm->el0_partition = el0_partition;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010080
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -080081 if (!vm_init_mm(vm, ppool)) {
Andrew Walbran9daa57e2019-09-27 13:33:20 +010082 return NULL;
Wedson Almeida Filho03306112018-11-26 00:08:03 +000083 }
84
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000085 /* Initialise waiter entries. */
86 for (i = 0; i < MAX_VMS; i++) {
Wedson Almeida Filhob790f652019-01-22 23:41:56 +000087 vm->wait_entries[i].waiting_vm = vm;
88 list_init(&vm->wait_entries[i].wait_links);
89 list_init(&vm->wait_entries[i].ready_links);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000090 }
91
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000092 /* Do basic initialization of vCPUs. */
Andrew Scull7364a8e2018-07-19 15:39:29 +010093 for (i = 0; i < vcpu_count; i++) {
Andrew Walbrane1310df2019-04-29 17:28:28 +010094 vcpu_init(vm_get_vcpu(vm, i), vm);
Andrew Scull7364a8e2018-07-19 15:39:29 +010095 }
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010096
J-Alves4ef6e842021-03-18 12:47:01 +000097 /* Basic initialization of the notifications structure. */
98 vm_notifications_init_bindings(&vm->notifications.from_sp);
99 vm_notifications_init_bindings(&vm->notifications.from_vm);
100
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100101 return vm;
102}
103
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100104bool vm_init_next(ffa_vcpu_count_t vcpu_count, struct mpool *ppool,
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800105 struct vm **new_vm, bool el0_partition)
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100106{
107 if (vm_count >= MAX_VMS) {
108 return false;
109 }
110
111 /* Generate IDs based on an offset, as low IDs e.g., 0, are reserved */
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800112 *new_vm = vm_init(vm_count + HF_VM_ID_OFFSET, vcpu_count, ppool,
113 el0_partition);
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100114 if (*new_vm == NULL) {
115 return false;
116 }
Andrew Scull19503262018-09-20 14:48:39 +0100117 ++vm_count;
Andrew Scull19503262018-09-20 14:48:39 +0100118
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000119 return true;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100120}
121
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100122ffa_vm_count_t vm_get_count(void)
Andrew Scull19503262018-09-20 14:48:39 +0100123{
124 return vm_count;
125}
126
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100127/**
128 * Returns a pointer to the VM with the corresponding id.
129 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100130struct vm *vm_find(ffa_vm_id_t id)
Andrew Scull19503262018-09-20 14:48:39 +0100131{
David Brazdilbc501192019-09-27 13:20:56 +0100132 uint16_t index;
Fuad Tabba494376e2019-08-05 12:35:10 +0100133
Olivier Deprez96a2a262020-06-11 17:21:38 +0200134 if (id == HF_OTHER_WORLD_ID) {
135 if (other_world.id == HF_OTHER_WORLD_ID) {
136 return &other_world;
137 }
Andrew Scull19503262018-09-20 14:48:39 +0100138 return NULL;
139 }
140
Olivier Deprez96a2a262020-06-11 17:21:38 +0200141 /* Check that this is not a reserved ID. */
142 if (id < HF_VM_ID_OFFSET) {
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100143 return NULL;
144 }
145
David Brazdilbc501192019-09-27 13:20:56 +0100146 index = id - HF_VM_ID_OFFSET;
147
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100148 return vm_find_index(index);
149}
150
151/**
J-Alves46ee0682021-07-26 15:17:53 +0100152 * Returns a locked instance of the VM with the corresponding id.
153 */
154struct vm_locked vm_find_locked(ffa_vm_id_t id)
155{
156 struct vm *vm = vm_find(id);
157
158 if (vm != NULL) {
159 return vm_lock(vm);
160 }
161
162 return (struct vm_locked){.vm = NULL};
163}
164
165/**
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100166 * Returns a pointer to the VM at the specified index.
167 */
168struct vm *vm_find_index(uint16_t index)
169{
David Brazdilbc501192019-09-27 13:20:56 +0100170 /* Ensure the VM is initialized. */
171 if (index >= vm_count) {
172 return NULL;
173 }
174
175 return &vms[index];
Andrew Scull19503262018-09-20 14:48:39 +0100176}
177
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000178/**
Fuad Tabbaed294af2019-12-20 10:43:01 +0000179 * Locks the given VM and updates `locked` to hold the newly locked VM.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000180 */
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100181struct vm_locked vm_lock(struct vm *vm)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000182{
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100183 struct vm_locked locked = {
184 .vm = vm,
185 };
186
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000187 sl_lock(&vm->lock);
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100188
189 return locked;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000190}
191
192/**
Jose Marinho75509b42019-04-09 09:34:59 +0100193 * Locks two VMs ensuring that the locking order is according to the locks'
194 * addresses.
195 */
196struct two_vm_locked vm_lock_both(struct vm *vm1, struct vm *vm2)
197{
198 struct two_vm_locked dual_lock;
199
200 sl_lock_both(&vm1->lock, &vm2->lock);
201 dual_lock.vm1.vm = vm1;
202 dual_lock.vm2.vm = vm2;
203
204 return dual_lock;
205}
206
207/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000208 * Unlocks a VM previously locked with vm_lock, and updates `locked` to reflect
209 * the fact that the VM is no longer locked.
210 */
211void vm_unlock(struct vm_locked *locked)
212{
213 sl_unlock(&locked->vm->lock);
214 locked->vm = NULL;
215}
Andrew Walbrane1310df2019-04-29 17:28:28 +0100216
217/**
218 * Get the vCPU with the given index from the given VM.
219 * This assumes the index is valid, i.e. less than vm->vcpu_count.
220 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100221struct vcpu *vm_get_vcpu(struct vm *vm, ffa_vcpu_index_t vcpu_index)
Andrew Walbrane1310df2019-04-29 17:28:28 +0100222{
Andrew Scull877ae4b2019-07-02 12:52:33 +0100223 CHECK(vcpu_index < vm->vcpu_count);
Andrew Walbrane1310df2019-04-29 17:28:28 +0100224 return &vm->vcpus[vcpu_index];
225}
Andrew Scull3c257452019-11-26 13:32:50 +0000226
227/**
Andrew Walbranaad8f982019-12-04 10:56:39 +0000228 * Gets `vm`'s wait entry for waiting on the `for_vm`.
229 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100230struct wait_entry *vm_get_wait_entry(struct vm *vm, ffa_vm_id_t for_vm)
Andrew Walbranaad8f982019-12-04 10:56:39 +0000231{
232 uint16_t index;
233
234 CHECK(for_vm >= HF_VM_ID_OFFSET);
235 index = for_vm - HF_VM_ID_OFFSET;
236 CHECK(index < MAX_VMS);
237
238 return &vm->wait_entries[index];
239}
240
241/**
242 * Gets the ID of the VM which the given VM's wait entry is for.
243 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100244ffa_vm_id_t vm_id_for_wait_entry(struct vm *vm, struct wait_entry *entry)
Andrew Walbranaad8f982019-12-04 10:56:39 +0000245{
246 uint16_t index = entry - vm->wait_entries;
247
248 return index + HF_VM_ID_OFFSET;
249}
250
251/**
Andrew Walbran45633dd2020-10-07 17:59:54 +0100252 * Return whether the given VM ID represents an entity in the current world:
253 * i.e. the hypervisor or a normal world VM when running in the normal world, or
254 * the SPM or an SP when running in the secure world.
255 */
256bool vm_id_is_current_world(ffa_vm_id_t vm_id)
257{
258 return (vm_id & HF_VM_ID_WORLD_MASK) !=
259 (HF_OTHER_WORLD_ID & HF_VM_ID_WORLD_MASK);
260}
261
262/**
Andrew Scull3c257452019-11-26 13:32:50 +0000263 * Map a range of addresses to the VM in both the MMU and the IOMMU.
264 *
265 * mm_vm_defrag should always be called after a series of page table updates,
266 * whether they succeed or fail. This is because on failure extra page table
267 * entries may have been allocated and then not used, while on success it may be
268 * possible to compact the page table by merging several entries into a block.
269 *
270 * Returns true on success, or false if the update failed and no changes were
271 * made.
272 *
273 */
274bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
275 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
276{
277 if (!vm_identity_prepare(vm_locked, begin, end, mode, ppool)) {
278 return false;
279 }
280
281 vm_identity_commit(vm_locked, begin, end, mode, ppool, ipa);
282
283 return true;
284}
285
286/**
287 * Prepares the given VM for the given address mapping such that it will be able
288 * to commit the change without failure.
289 *
290 * In particular, multiple calls to this function will result in the
291 * corresponding calls to commit the changes to succeed.
292 *
293 * Returns true on success, or false if the update failed and no changes were
294 * made.
295 */
296bool vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
297 uint32_t mode, struct mpool *ppool)
298{
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -0800299 if (vm_locked.vm->el0_partition) {
300 return mm_identity_prepare(&vm_locked.vm->ptable, begin, end,
301 mode, ppool);
302 }
Andrew Scull3c257452019-11-26 13:32:50 +0000303 return mm_vm_identity_prepare(&vm_locked.vm->ptable, begin, end, mode,
304 ppool);
305}
306
307/**
308 * Commits the given address mapping to the VM assuming the operation cannot
309 * fail. `vm_identity_prepare` must used correctly before this to ensure
310 * this condition.
311 */
312void vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
313 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
314{
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -0800315 if (vm_locked.vm->el0_partition) {
316 mm_identity_commit(&vm_locked.vm->ptable, begin, end, mode,
317 ppool);
318 if (ipa != NULL) {
319 /*
320 * EL0 partitions are modeled as lightweight VM's, to
321 * promote code reuse. The below statement returns the
322 * mapped PA as an IPA, however, for an EL0 partition,
323 * this is really a VA.
324 */
325 *ipa = ipa_from_pa(begin);
326 }
327 } else {
328 mm_vm_identity_commit(&vm_locked.vm->ptable, begin, end, mode,
329 ppool, ipa);
330 }
Andrew Scull3c257452019-11-26 13:32:50 +0000331 plat_iommu_identity_map(vm_locked, begin, end, mode);
332}
333
334/**
335 * Unmap a range of addresses from the VM.
336 *
337 * Returns true on success, or false if the update failed and no changes were
338 * made.
339 */
340bool vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
341 struct mpool *ppool)
342{
343 uint32_t mode = MM_MODE_UNMAPPED_MASK;
344
345 return vm_identity_map(vm_locked, begin, end, mode, ppool, NULL);
346}
347
348/**
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700349 * Defrag page tables for an EL0 partition or for a VM.
350 */
351void vm_ptable_defrag(struct vm_locked vm_locked, struct mpool *ppool)
352{
353 if (vm_locked.vm->el0_partition) {
354 mm_stage1_defrag(&vm_locked.vm->ptable, ppool);
355 } else {
356 mm_vm_defrag(&vm_locked.vm->ptable, ppool);
357 }
358}
359
360/**
Andrew Scull3c257452019-11-26 13:32:50 +0000361 * Unmaps the hypervisor pages from the given page table.
362 */
363bool vm_unmap_hypervisor(struct vm_locked vm_locked, struct mpool *ppool)
364{
365 /* TODO: If we add pages dynamically, they must be included here too. */
366 return vm_unmap(vm_locked, layout_text_begin(), layout_text_end(),
367 ppool) &&
368 vm_unmap(vm_locked, layout_rodata_begin(), layout_rodata_end(),
369 ppool) &&
370 vm_unmap(vm_locked, layout_data_begin(), layout_data_end(),
Maksims Svecovs134b8f92022-03-04 15:14:09 +0000371 ppool) &&
372 vm_unmap(vm_locked, layout_stacks_begin(), layout_stacks_end(),
Andrew Scull3c257452019-11-26 13:32:50 +0000373 ppool);
374}
J-Alvesb37fd082020-10-22 12:29:21 +0100375
376/**
377 * Gets the first partition to boot, according to Boot Protocol from FFA spec.
378 */
379struct vm *vm_get_first_boot(void)
380{
381 return first_boot_vm;
382}
383
384/**
385 * Insert in boot list, sorted by `boot_order` parameter in the vm structure
386 * and rooted in `first_boot_vm`.
387 */
388void vm_update_boot(struct vm *vm)
389{
390 struct vm *current = NULL;
391 struct vm *previous = NULL;
392
393 if (first_boot_vm == NULL) {
394 first_boot_vm = vm;
395 return;
396 }
397
398 current = first_boot_vm;
399
J-Alvesbeeb6dc2021-12-08 18:21:32 +0000400 while (current != NULL && current->boot_order <= vm->boot_order) {
J-Alvesb37fd082020-10-22 12:29:21 +0100401 previous = current;
402 current = current->next_boot;
403 }
404
405 if (previous != NULL) {
406 previous->next_boot = vm;
407 } else {
408 first_boot_vm = vm;
409 }
410
411 vm->next_boot = current;
412}
J-Alves4ef6e842021-03-18 12:47:01 +0000413
Raghu Krishnamurthyea195fa2021-02-12 23:29:00 -0800414/**
415 * Gets the mode of the given range of ipa or va if they are mapped with the
416 * same mode.
417 *
418 * Returns true if the range is mapped with the same mode and false otherwise.
419 * The wrapper calls the appropriate mm function depending on if the partition
420 * is a vm or a el0 partition.
421 */
422bool vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin, ipaddr_t end,
423 uint32_t *mode)
424{
425 if (vm_locked.vm->el0_partition) {
426 return mm_get_mode(&vm_locked.vm->ptable,
427 va_from_pa(pa_from_ipa(begin)),
428 va_from_pa(pa_from_ipa(end)), mode);
429 }
430 return mm_vm_get_mode(&vm_locked.vm->ptable, begin, end, mode);
431}
J-Alvesa0f317d2021-06-09 13:31:59 +0100432
J-Alves7461ef22021-10-18 17:21:33 +0100433static struct notifications *vm_get_notifications(struct vm_locked vm_locked,
434 bool is_from_vm)
435{
436 return is_from_vm ? &vm_locked.vm->notifications.from_vm
437 : &vm_locked.vm->notifications.from_sp;
438}
439
J-Alvesa0f317d2021-06-09 13:31:59 +0100440/*
441 * Initializes the notifications structure.
442 */
443void vm_notifications_init_bindings(struct notifications *notifications)
444{
445 for (uint32_t i = 0U; i < MAX_FFA_NOTIFICATIONS; i++) {
446 notifications->bindings_sender_id[i] = HF_INVALID_VM_ID;
447 }
448}
449
450/**
451 * Checks if there are pending notifications.
452 */
453bool vm_are_notifications_pending(struct vm_locked vm_locked, bool from_vm,
454 ffa_notifications_bitmap_t notifications)
455{
456 struct notifications *to_check;
457
458 CHECK(vm_locked.vm != NULL);
459
J-Alves7461ef22021-10-18 17:21:33 +0100460 to_check = vm_get_notifications(vm_locked, from_vm);
J-Alvesa0f317d2021-06-09 13:31:59 +0100461
462 /* Check if there are pending per vcpu notifications */
463 for (uint32_t i = 0U; i < MAX_CPUS; i++) {
464 if ((to_check->per_vcpu[i].pending & notifications) != 0U) {
465 return true;
466 }
467 }
468
469 /* Check if there are global pending notifications */
470 return (to_check->global.pending & notifications) != 0U;
471}
J-Alvesc003a7a2021-03-18 13:06:53 +0000472
J-Alves7461ef22021-10-18 17:21:33 +0100473/**
474 * Checks if there are pending global notifications, either from SPs or from
475 * VMs.
476 */
477bool vm_are_global_notifications_pending(struct vm_locked vm_locked)
478{
479 return vm_get_notifications(vm_locked, true)->global.pending != 0ULL ||
J-Alves52578f82022-03-25 12:30:47 +0000480 vm_get_notifications(vm_locked, false)->global.pending != 0ULL ||
481 vm_locked.vm->notifications.framework.pending != 0ULL;
J-Alves7461ef22021-10-18 17:21:33 +0100482}
483
484/**
485 * Checks if there are pending per-vCPU notifications, in a specific vCPU either
486 * from SPs or from VMs.
487 */
488bool vm_are_per_vcpu_notifications_pending(struct vm_locked vm_locked,
489 ffa_vcpu_index_t vcpu_id)
490{
491 CHECK(vcpu_id < MAX_CPUS);
492
493 return vm_get_notifications(vm_locked, true)
494 ->per_vcpu[vcpu_id]
495 .pending != 0ULL ||
496 vm_get_notifications(vm_locked, false)
497 ->per_vcpu[vcpu_id]
498 .pending != 0ULL;
499}
500
J-Alves09ff9d82021-11-02 11:55:20 +0000501bool vm_are_notifications_enabled(struct vm *vm)
J-Alvesc003a7a2021-03-18 13:06:53 +0000502{
J-Alves09ff9d82021-11-02 11:55:20 +0000503 return vm->notifications.enabled == true;
504}
505
506bool vm_locked_are_notifications_enabled(struct vm_locked vm_locked)
507{
508 return vm_are_notifications_enabled(vm_locked.vm);
J-Alvesc003a7a2021-03-18 13:06:53 +0000509}
510
511static bool vm_is_notification_bit_set(ffa_notifications_bitmap_t notifications,
512 uint32_t i)
513{
514 return (notifications & FFA_NOTIFICATION_MASK(i)) != 0U;
515}
516
J-Alvesfe23ebe2021-10-13 16:07:07 +0100517static void vm_notifications_global_state_count_update(
518 ffa_notifications_bitmap_t bitmap, uint32_t *counter, int inc)
519{
520 /*
521 * Helper to increment counters from global notifications
522 * state. Count update by increments or decrements of 1 or -1,
523 * respectively.
524 */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000525 assert(inc == 1 || inc == -1);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100526
527 sl_lock(&all_notifications_state.lock);
528
529 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
530 if (vm_is_notification_bit_set(bitmap, i)) {
531 CHECK((inc > 0 && *counter < UINT32_MAX) ||
532 (inc < 0 && *counter > 0));
533 *counter += inc;
534 }
535 }
536
537 sl_unlock(&all_notifications_state.lock);
538}
539
540/**
541 * Helper function to increment the pending notifications based on a bitmap
542 * passed as argument.
543 * Function to be used at setting notifications for a given VM.
544 */
545static void vm_notifications_pending_count_add(
546 ffa_notifications_bitmap_t to_add)
547{
548 vm_notifications_global_state_count_update(
549 to_add, &all_notifications_state.pending_count, 1);
550}
551
552/**
553 * Helper function to decrement the pending notifications count.
554 * Function to be used when getting the receiver's pending notifications.
555 */
556static void vm_notifications_pending_count_sub(
557 ffa_notifications_bitmap_t to_sub)
558{
559 vm_notifications_global_state_count_update(
560 to_sub, &all_notifications_state.pending_count, -1);
561}
562
563/**
564 * Helper function to count the notifications whose information has been
565 * retrieved by the scheduler of the system, and are still pending.
566 */
567static void vm_notifications_info_get_retrieved_count_add(
568 ffa_notifications_bitmap_t to_add)
569{
570 vm_notifications_global_state_count_update(
571 to_add, &all_notifications_state.info_get_retrieved_count, 1);
572}
573
574/**
575 * Helper function to subtract the notifications that the receiver is getting
576 * and whose information has been retrieved by the receiver scheduler.
577 */
578static void vm_notifications_info_get_retrieved_count_sub(
579 ffa_notifications_bitmap_t to_sub)
580{
581 vm_notifications_global_state_count_update(
582 to_sub, &all_notifications_state.info_get_retrieved_count, -1);
583}
584
585/**
586 * Helper function to determine if there are notifications pending whose info
587 * hasn't been retrieved by the receiver scheduler.
588 */
589bool vm_notifications_pending_not_retrieved_by_scheduler(void)
590{
591 bool ret;
592
593 sl_lock(&all_notifications_state.lock);
594 ret = all_notifications_state.pending_count >
595 all_notifications_state.info_get_retrieved_count;
596 sl_unlock(&all_notifications_state.lock);
597
598 return ret;
599}
600
601bool vm_is_notifications_pending_count_zero(void)
602{
603 bool ret;
604
605 sl_lock(&all_notifications_state.lock);
606 ret = all_notifications_state.pending_count == 0;
607 sl_unlock(&all_notifications_state.lock);
608
609 return ret;
610}
611
J-Alvesc003a7a2021-03-18 13:06:53 +0000612/**
613 * Checks that all provided notifications are bound to the specified sender, and
614 * are per VCPU or global, as specified.
615 */
616bool vm_notifications_validate_binding(struct vm_locked vm_locked,
617 bool is_from_vm, ffa_vm_id_t sender_id,
618 ffa_notifications_bitmap_t notifications,
619 bool is_per_vcpu)
620{
621 return vm_notifications_validate_bound_sender(
622 vm_locked, is_from_vm, sender_id, notifications) &&
623 vm_notifications_validate_per_vcpu(vm_locked, is_from_vm,
624 is_per_vcpu, notifications);
625}
626
627/**
628 * Update binds information in notification structure for the specified
629 * notifications.
630 */
631void vm_notifications_update_bindings(struct vm_locked vm_locked,
632 bool is_from_vm, ffa_vm_id_t sender_id,
633 ffa_notifications_bitmap_t notifications,
634 bool is_per_vcpu)
635{
636 CHECK(vm_locked.vm != NULL);
637 struct notifications *to_update =
638 vm_get_notifications(vm_locked, is_from_vm);
639
640 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
641 if (vm_is_notification_bit_set(notifications, i)) {
642 to_update->bindings_sender_id[i] = sender_id;
643 }
644 }
645
646 /*
647 * Set notifications if they are per VCPU, else clear them as they are
648 * global.
649 */
650 if (is_per_vcpu) {
651 to_update->bindings_per_vcpu |= notifications;
652 } else {
653 to_update->bindings_per_vcpu &= ~notifications;
654 }
655}
656
657bool vm_notifications_validate_bound_sender(
658 struct vm_locked vm_locked, bool is_from_vm, ffa_vm_id_t sender_id,
659 ffa_notifications_bitmap_t notifications)
660{
661 CHECK(vm_locked.vm != NULL);
662 struct notifications *to_check =
663 vm_get_notifications(vm_locked, is_from_vm);
664
665 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
666 if (vm_is_notification_bit_set(notifications, i) &&
667 to_check->bindings_sender_id[i] != sender_id) {
668 return false;
669 }
670 }
671
672 return true;
673}
674
675bool vm_notifications_validate_per_vcpu(struct vm_locked vm_locked,
676 bool is_from_vm, bool is_per_vcpu,
677 ffa_notifications_bitmap_t notif)
678{
679 CHECK(vm_locked.vm != NULL);
680 struct notifications *to_check =
681 vm_get_notifications(vm_locked, is_from_vm);
682
683 return is_per_vcpu ? (~to_check->bindings_per_vcpu & notif) == 0U
684 : (to_check->bindings_per_vcpu & notif) == 0U;
685}
J-Alvesaa79c012021-07-09 14:29:45 +0100686
J-Alves14163a72022-03-25 14:01:34 +0000687static void vm_notifications_state_set(struct notifications_state *state,
688 ffa_notifications_bitmap_t notifications)
689{
690 state->pending |= notifications;
691 vm_notifications_pending_count_add(notifications);
692}
693
J-Alves5a16c962022-03-25 12:32:51 +0000694void vm_notifications_partition_set_pending(
695 struct vm_locked vm_locked, bool is_from_vm,
696 ffa_notifications_bitmap_t notifications, ffa_vcpu_index_t vcpu_id,
697 bool is_per_vcpu)
J-Alvesaa79c012021-07-09 14:29:45 +0100698{
J-Alves14163a72022-03-25 14:01:34 +0000699 struct notifications *to_set;
700 struct notifications_state *state;
701
J-Alvesaa79c012021-07-09 14:29:45 +0100702 CHECK(vm_locked.vm != NULL);
J-Alvesaa79c012021-07-09 14:29:45 +0100703 CHECK(vcpu_id < MAX_CPUS);
704
J-Alves14163a72022-03-25 14:01:34 +0000705 to_set = vm_get_notifications(vm_locked, is_from_vm);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100706
J-Alves14163a72022-03-25 14:01:34 +0000707 state = is_per_vcpu ? &to_set->per_vcpu[vcpu_id] : &to_set->global;
708
709 vm_notifications_state_set(state, notifications);
710}
711
712/**
713 * Set pending framework notifications.
714 */
715void vm_notifications_framework_set_pending(
716 struct vm_locked vm_locked, ffa_notifications_bitmap_t notifications)
717{
718 CHECK(vm_locked.vm != NULL);
Federico Recanatie73d2832022-04-20 11:10:52 +0200719 assert(is_ffa_spm_buffer_full_notification(notifications) ||
720 is_ffa_hyp_buffer_full_notification(notifications));
J-Alves14163a72022-03-25 14:01:34 +0000721 vm_notifications_state_set(&vm_locked.vm->notifications.framework,
722 notifications);
J-Alvesaa79c012021-07-09 14:29:45 +0100723}
724
J-Alves5136dda2022-03-25 12:26:38 +0000725static ffa_notifications_bitmap_t vm_notifications_state_get_pending(
726 struct notifications_state *state)
J-Alvesaa79c012021-07-09 14:29:45 +0100727{
J-Alves5136dda2022-03-25 12:26:38 +0000728 ffa_notifications_bitmap_t to_ret;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100729 ffa_notifications_bitmap_t pending_and_info_get_retrieved;
J-Alvesaa79c012021-07-09 14:29:45 +0100730
J-Alves5136dda2022-03-25 12:26:38 +0000731 assert(state != NULL);
J-Alvesaa79c012021-07-09 14:29:45 +0100732
J-Alves5136dda2022-03-25 12:26:38 +0000733 to_ret = state->pending;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100734
735 /* Update count of currently pending notifications in the system. */
J-Alves5136dda2022-03-25 12:26:38 +0000736 vm_notifications_pending_count_sub(state->pending);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100737
738 /*
739 * If notifications receiver is getting have been retrieved by the
740 * receiver scheduler, decrement those from respective count.
741 */
742 pending_and_info_get_retrieved =
J-Alves5136dda2022-03-25 12:26:38 +0000743 state->pending & state->info_get_retrieved;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100744
745 if (pending_and_info_get_retrieved != 0) {
746 vm_notifications_info_get_retrieved_count_sub(
747 pending_and_info_get_retrieved);
748 }
749
J-Alves5136dda2022-03-25 12:26:38 +0000750 state->pending = 0U;
751 state->info_get_retrieved = 0U;
J-Alvesaa79c012021-07-09 14:29:45 +0100752
J-Alves5136dda2022-03-25 12:26:38 +0000753 return to_ret;
754}
J-Alvesfe23ebe2021-10-13 16:07:07 +0100755
J-Alves5136dda2022-03-25 12:26:38 +0000756/**
757 * Get global and per-vCPU notifications for the given vCPU ID.
758 */
759ffa_notifications_bitmap_t vm_notifications_partition_get_pending(
760 struct vm_locked vm_locked, bool is_from_vm, ffa_vcpu_index_t vcpu_id)
761{
762 ffa_notifications_bitmap_t to_ret;
763 struct notifications *to_get;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100764
J-Alves5136dda2022-03-25 12:26:38 +0000765 assert(vm_locked.vm != NULL);
766 to_get = vm_get_notifications(vm_locked, is_from_vm);
767 assert(vcpu_id < MAX_CPUS);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100768
J-Alves5136dda2022-03-25 12:26:38 +0000769 to_ret = vm_notifications_state_get_pending(&to_get->global);
770 to_ret |=
771 vm_notifications_state_get_pending(&to_get->per_vcpu[vcpu_id]);
J-Alvesaa79c012021-07-09 14:29:45 +0100772
773 return to_ret;
774}
J-Alvesc8e8a222021-06-08 17:33:52 +0100775
776/**
J-Alves663682a2022-03-25 13:56:51 +0000777 * Get pending framework notifications.
778 */
779ffa_notifications_bitmap_t vm_notifications_framework_get_pending(
780 struct vm_locked vm_locked)
781{
Federico Recanati6c1e05c2022-04-20 11:37:26 +0200782 struct vm *vm = vm_locked.vm;
783 ffa_notifications_bitmap_t framework;
784 bool rx_buffer_full;
785
786 assert(vm != NULL);
787
788 framework = vm_notifications_state_get_pending(
789 &vm->notifications.framework);
790
791 /*
792 * By retrieving an RX buffer full notification the buffer state
793 * transitions from RECEIVED to READ; the VM is now the RX buffer
794 * owner, can read it and is allowed to release it.
795 */
796 rx_buffer_full = is_ffa_spm_buffer_full_notification(framework) ||
797 is_ffa_hyp_buffer_full_notification(framework);
798 if (rx_buffer_full && vm->mailbox.state == MAILBOX_STATE_RECEIVED) {
799 vm->mailbox.state = MAILBOX_STATE_READ;
800 }
801
802 return framework;
J-Alves663682a2022-03-25 13:56:51 +0000803}
804
J-Alves17c9b6d2022-03-25 14:39:05 +0000805static void vm_notifications_state_info_get(
806 struct notifications_state *state, ffa_vm_id_t vm_id, bool is_per_vcpu,
807 ffa_vcpu_index_t vcpu_id, uint16_t *ids, uint32_t *ids_count,
808 uint32_t *lists_sizes, uint32_t *lists_count,
809 const uint32_t ids_max_count,
810 enum notifications_info_get_state *info_get_state)
811{
812 ffa_notifications_bitmap_t pending_not_retrieved;
813
814 CHECK(*ids_count <= ids_max_count);
815 CHECK(*lists_count <= ids_max_count);
816
817 if (*info_get_state == FULL) {
818 return;
819 }
820
821 pending_not_retrieved = state->pending & ~state->info_get_retrieved;
822
823 /* No notifications pending that haven't been retrieved. */
824 if (pending_not_retrieved == 0U) {
825 return;
826 }
827
828 if (*ids_count == ids_max_count) {
829 *info_get_state = FULL;
830 return;
831 }
832
833 switch (*info_get_state) {
834 case INIT:
835 case STARTING_NEW:
836 /*
837 * At this iteration two ids are to be added: the VM ID
838 * and vCPU ID. If there is no space, change state and
839 * terminate function.
840 */
841 if (is_per_vcpu && ids_max_count - *ids_count < 2) {
842 *info_get_state = FULL;
843 return;
844 }
845
846 *info_get_state = INSERTING;
847 ids[*ids_count] = vm_id;
848 ++(*ids_count);
849
850 if (is_per_vcpu) {
851 /* Insert vCPU ID. */
852 ids[*ids_count] = vcpu_id;
853 ++(*ids_count);
854 ++lists_sizes[*lists_count];
855 }
856
857 ++(*lists_count);
858 break;
859 case INSERTING:
860 /* For per-vCPU notifications only. */
861 if (!is_per_vcpu) {
862 break;
863 }
864
865 /* Insert vCPU ID */
866 ids[*ids_count] = vcpu_id;
867 (*ids_count)++;
868 /* Increment respective list size */
869 ++lists_sizes[*lists_count - 1];
870
871 if (lists_sizes[*lists_count - 1] == 3) {
872 *info_get_state = STARTING_NEW;
873 }
874 break;
875 default:
876 panic("Notification info get action error!!\n");
877 }
878
879 state->info_get_retrieved |= pending_not_retrieved;
880
881 vm_notifications_info_get_retrieved_count_add(pending_not_retrieved);
882}
883
J-Alves663682a2022-03-25 13:56:51 +0000884/**
J-Alvesc8e8a222021-06-08 17:33:52 +0100885 * Get pending notification's information to return to the receiver scheduler.
886 */
887void vm_notifications_info_get_pending(
888 struct vm_locked vm_locked, bool is_from_vm, uint16_t *ids,
889 uint32_t *ids_count, uint32_t *lists_sizes, uint32_t *lists_count,
890 const uint32_t ids_max_count,
891 enum notifications_info_get_state *info_get_state)
892{
J-Alves17c9b6d2022-03-25 14:39:05 +0000893 struct notifications *notifications;
J-Alvesc8e8a222021-06-08 17:33:52 +0100894
895 CHECK(vm_locked.vm != NULL);
J-Alvesc8e8a222021-06-08 17:33:52 +0100896
J-Alves17c9b6d2022-03-25 14:39:05 +0000897 notifications = vm_get_notifications(vm_locked, is_from_vm);
J-Alvesc8e8a222021-06-08 17:33:52 +0100898
J-Alves17c9b6d2022-03-25 14:39:05 +0000899 /*
900 * Perform info get for global notifications, before doing it for
901 * per-vCPU.
902 */
903 vm_notifications_state_info_get(&notifications->global,
904 vm_locked.vm->id, false, 0, ids,
905 ids_count, lists_sizes, lists_count,
906 ids_max_count, info_get_state);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100907
J-Alvesc8e8a222021-06-08 17:33:52 +0100908 for (ffa_vcpu_count_t i = 0; i < vm_locked.vm->vcpu_count; i++) {
J-Alves17c9b6d2022-03-25 14:39:05 +0000909 vm_notifications_state_info_get(
910 &notifications->per_vcpu[i], vm_locked.vm->id, true, i,
911 ids, ids_count, lists_sizes, lists_count, ids_max_count,
912 info_get_state);
J-Alvesc8e8a222021-06-08 17:33:52 +0100913 }
914}
915
916/**
917 * Gets all info from VM's pending notifications.
918 * Returns true if the list is full, and there is more pending.
919 */
920bool vm_notifications_info_get(struct vm_locked vm_locked, uint16_t *ids,
921 uint32_t *ids_count, uint32_t *lists_sizes,
922 uint32_t *lists_count,
923 const uint32_t ids_max_count)
924{
925 enum notifications_info_get_state current_state = INIT;
926
J-Alvesf31940e2022-03-25 17:24:00 +0000927 /* Get info of pending notifications from the framework. */
928 vm_notifications_state_info_get(&vm_locked.vm->notifications.framework,
929 vm_locked.vm->id, false, 0, ids,
930 ids_count, lists_sizes, lists_count,
931 ids_max_count, &current_state);
932
933 /* Get info of pending notifications from SPs. */
J-Alvesc8e8a222021-06-08 17:33:52 +0100934 vm_notifications_info_get_pending(vm_locked, false, ids, ids_count,
935 lists_sizes, lists_count,
936 ids_max_count, &current_state);
937
J-Alvesf31940e2022-03-25 17:24:00 +0000938 /* Get info of pending notifications from VMs. */
J-Alvesc8e8a222021-06-08 17:33:52 +0100939 vm_notifications_info_get_pending(vm_locked, true, ids, ids_count,
940 lists_sizes, lists_count,
941 ids_max_count, &current_state);
942
943 /*
944 * State transitions to FULL when trying to insert a new ID in the
945 * list and there is not more space. This means there are notifications
946 * pending, whose info is not retrieved.
947 */
948 return current_state == FULL;
949}
J-Alves439ac972021-11-18 17:32:03 +0000950
951/**
952 * Checks VM's messaging method support.
953 */
954bool vm_supports_messaging_method(struct vm *vm, uint8_t msg_method)
955{
956 return (vm->messaging_method & msg_method) != 0;
957}
J-Alves6e2abc62021-12-02 14:58:56 +0000958
959void vm_notifications_set_npi_injected(struct vm_locked vm_locked,
960 bool npi_injected)
961{
962 vm_locked.vm->notifications.npi_injected = npi_injected;
963}
964
965bool vm_notifications_is_npi_injected(struct vm_locked vm_locked)
966{
967 return vm_locked.vm->notifications.npi_injected;
968}
J-Alves7e67d102022-04-13 13:22:39 +0100969
970/**
971 * Sets the designated GP register that the VM expects to receive the boot
972 * info's address.
973 */
974void vm_set_boot_info_gp_reg(struct vm *vm, struct vcpu *vcpu)
975{
976 if (!vm->initialized && vm->boot_info.blob_addr.ipa != 0U) {
977 arch_regs_set_gp_reg(&vcpu->regs,
978 ipa_addr(vm->boot_info.blob_addr),
979 vm->boot_info.gp_register_num);
980 }
981}