blob: 896b0ef7f6cdf9fd440d9654ab3f5ff7f9a4c445 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/vm.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010010
Andrew Scull18c78fc2018-08-20 12:57:41 +010011#include "hf/api.h"
Daniel Boulbya2f8c662021-11-26 17:52:53 +000012#include "hf/assert.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010013#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010014#include "hf/cpu.h"
J-Alves4ef6e842021-03-18 12:47:01 +000015#include "hf/dlog.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010016#include "hf/ffa.h"
Andrew Scull3c257452019-11-26 13:32:50 +000017#include "hf/layout.h"
18#include "hf/plat/iommu.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010019#include "hf/std.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010020
Andrew Scull19503262018-09-20 14:48:39 +010021#include "vmapi/hf/call.h"
22
23static struct vm vms[MAX_VMS];
Olivier Deprez96a2a262020-06-11 17:21:38 +020024static struct vm other_world;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010025static ffa_vm_count_t vm_count;
J-Alvesb37fd082020-10-22 12:29:21 +010026static struct vm *first_boot_vm;
Andrew Scull19503262018-09-20 14:48:39 +010027
J-Alvesfe23ebe2021-10-13 16:07:07 +010028/**
29 * Counters on the status of notifications in the system. It helps to improve
30 * the information retrieved by the receiver scheduler.
31 */
32static struct {
33 /** Counts notifications pending. */
34 uint32_t pending_count;
35 /**
36 * Counts notifications pending, that have been retrieved by the
37 * receiver scheduler.
38 */
39 uint32_t info_get_retrieved_count;
40 struct spinlock lock;
41} all_notifications_state;
42
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -080043static bool vm_init_mm(struct vm *vm, struct mpool *ppool)
44{
45 if (vm->el0_partition) {
46 return mm_ptable_init(&vm->ptable, vm->id, MM_FLAG_STAGE1,
47 ppool);
48 }
49 return mm_vm_init(&vm->ptable, vm->id, ppool);
50}
51
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010052struct vm *vm_init(ffa_vm_id_t id, ffa_vcpu_count_t vcpu_count,
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080053 struct mpool *ppool, bool el0_partition)
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010054{
Wedson Almeida Filho87009642018-07-02 10:20:07 +010055 uint32_t i;
Andrew Scull19503262018-09-20 14:48:39 +010056 struct vm *vm;
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -070057 size_t vcpu_ppool_entries = (align_up(sizeof(struct vcpu) * vcpu_count,
58 MM_PPOOL_ENTRY_SIZE) /
59 MM_PPOOL_ENTRY_SIZE);
Andrew Scull19503262018-09-20 14:48:39 +010060
Olivier Deprez96a2a262020-06-11 17:21:38 +020061 if (id == HF_OTHER_WORLD_ID) {
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080062 CHECK(el0_partition == false);
Olivier Deprez96a2a262020-06-11 17:21:38 +020063 vm = &other_world;
Andrew Walbran9daa57e2019-09-27 13:33:20 +010064 } else {
65 uint16_t vm_index = id - HF_VM_ID_OFFSET;
Andrew Scull19503262018-09-20 14:48:39 +010066
Andrew Walbran9daa57e2019-09-27 13:33:20 +010067 CHECK(id >= HF_VM_ID_OFFSET);
68 CHECK(vm_index < ARRAY_SIZE(vms));
69 vm = &vms[vm_index];
70 }
Wedson Almeida Filho87009642018-07-02 10:20:07 +010071
Andrew Scull2b5fbad2019-04-05 13:55:56 +010072 memset_s(vm, sizeof(*vm), 0, sizeof(*vm));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010073
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000074 list_init(&vm->mailbox.waiter_list);
75 list_init(&vm->mailbox.ready_list);
76 sl_init(&vm->lock);
77
Andrew Walbran9daa57e2019-09-27 13:33:20 +010078 vm->id = id;
Wedson Almeida Filho87009642018-07-02 10:20:07 +010079 vm->vcpu_count = vcpu_count;
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -070080
81 vm->vcpus = (struct vcpu *)mpool_alloc_contiguous(
82 ppool, vcpu_ppool_entries, 1);
83 CHECK(vm->vcpus != NULL);
Andrew Sculld6ee1102019-04-05 22:12:42 +010084 vm->mailbox.state = MAILBOX_STATE_EMPTY;
Andrew Scull9726c252019-01-23 13:44:19 +000085 atomic_init(&vm->aborting, false);
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080086 vm->el0_partition = el0_partition;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010087
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -080088 if (!vm_init_mm(vm, ppool)) {
Andrew Walbran9daa57e2019-09-27 13:33:20 +010089 return NULL;
Wedson Almeida Filho03306112018-11-26 00:08:03 +000090 }
91
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000092 /* Initialise waiter entries. */
93 for (i = 0; i < MAX_VMS; i++) {
Wedson Almeida Filhob790f652019-01-22 23:41:56 +000094 vm->wait_entries[i].waiting_vm = vm;
95 list_init(&vm->wait_entries[i].wait_links);
96 list_init(&vm->wait_entries[i].ready_links);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000097 }
98
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000099 /* Do basic initialization of vCPUs. */
Andrew Scull7364a8e2018-07-19 15:39:29 +0100100 for (i = 0; i < vcpu_count; i++) {
Andrew Walbrane1310df2019-04-29 17:28:28 +0100101 vcpu_init(vm_get_vcpu(vm, i), vm);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100102 }
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100103
J-Alves4ef6e842021-03-18 12:47:01 +0000104 /* Basic initialization of the notifications structure. */
105 vm_notifications_init_bindings(&vm->notifications.from_sp);
106 vm_notifications_init_bindings(&vm->notifications.from_vm);
107
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100108 return vm;
109}
110
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100111bool vm_init_next(ffa_vcpu_count_t vcpu_count, struct mpool *ppool,
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800112 struct vm **new_vm, bool el0_partition)
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100113{
114 if (vm_count >= MAX_VMS) {
115 return false;
116 }
117
118 /* Generate IDs based on an offset, as low IDs e.g., 0, are reserved */
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800119 *new_vm = vm_init(vm_count + HF_VM_ID_OFFSET, vcpu_count, ppool,
120 el0_partition);
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100121 if (*new_vm == NULL) {
122 return false;
123 }
Andrew Scull19503262018-09-20 14:48:39 +0100124 ++vm_count;
Andrew Scull19503262018-09-20 14:48:39 +0100125
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000126 return true;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100127}
128
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100129ffa_vm_count_t vm_get_count(void)
Andrew Scull19503262018-09-20 14:48:39 +0100130{
131 return vm_count;
132}
133
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100134/**
135 * Returns a pointer to the VM with the corresponding id.
136 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100137struct vm *vm_find(ffa_vm_id_t id)
Andrew Scull19503262018-09-20 14:48:39 +0100138{
David Brazdilbc501192019-09-27 13:20:56 +0100139 uint16_t index;
Fuad Tabba494376e2019-08-05 12:35:10 +0100140
Olivier Deprez96a2a262020-06-11 17:21:38 +0200141 if (id == HF_OTHER_WORLD_ID) {
142 if (other_world.id == HF_OTHER_WORLD_ID) {
143 return &other_world;
144 }
Andrew Scull19503262018-09-20 14:48:39 +0100145 return NULL;
146 }
147
Olivier Deprez96a2a262020-06-11 17:21:38 +0200148 /* Check that this is not a reserved ID. */
149 if (id < HF_VM_ID_OFFSET) {
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100150 return NULL;
151 }
152
David Brazdilbc501192019-09-27 13:20:56 +0100153 index = id - HF_VM_ID_OFFSET;
154
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100155 return vm_find_index(index);
156}
157
158/**
J-Alves46ee0682021-07-26 15:17:53 +0100159 * Returns a locked instance of the VM with the corresponding id.
160 */
161struct vm_locked vm_find_locked(ffa_vm_id_t id)
162{
163 struct vm *vm = vm_find(id);
164
165 if (vm != NULL) {
166 return vm_lock(vm);
167 }
168
169 return (struct vm_locked){.vm = NULL};
170}
171
172/**
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100173 * Returns a pointer to the VM at the specified index.
174 */
175struct vm *vm_find_index(uint16_t index)
176{
David Brazdilbc501192019-09-27 13:20:56 +0100177 /* Ensure the VM is initialized. */
178 if (index >= vm_count) {
179 return NULL;
180 }
181
182 return &vms[index];
Andrew Scull19503262018-09-20 14:48:39 +0100183}
184
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000185/**
Fuad Tabbaed294af2019-12-20 10:43:01 +0000186 * Locks the given VM and updates `locked` to hold the newly locked VM.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000187 */
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100188struct vm_locked vm_lock(struct vm *vm)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000189{
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100190 struct vm_locked locked = {
191 .vm = vm,
192 };
193
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000194 sl_lock(&vm->lock);
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100195
196 return locked;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000197}
198
199/**
Jose Marinho75509b42019-04-09 09:34:59 +0100200 * Locks two VMs ensuring that the locking order is according to the locks'
201 * addresses.
202 */
203struct two_vm_locked vm_lock_both(struct vm *vm1, struct vm *vm2)
204{
205 struct two_vm_locked dual_lock;
206
207 sl_lock_both(&vm1->lock, &vm2->lock);
208 dual_lock.vm1.vm = vm1;
209 dual_lock.vm2.vm = vm2;
210
211 return dual_lock;
212}
213
214/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000215 * Unlocks a VM previously locked with vm_lock, and updates `locked` to reflect
216 * the fact that the VM is no longer locked.
217 */
218void vm_unlock(struct vm_locked *locked)
219{
220 sl_unlock(&locked->vm->lock);
221 locked->vm = NULL;
222}
Andrew Walbrane1310df2019-04-29 17:28:28 +0100223
224/**
225 * Get the vCPU with the given index from the given VM.
226 * This assumes the index is valid, i.e. less than vm->vcpu_count.
227 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100228struct vcpu *vm_get_vcpu(struct vm *vm, ffa_vcpu_index_t vcpu_index)
Andrew Walbrane1310df2019-04-29 17:28:28 +0100229{
Andrew Scull877ae4b2019-07-02 12:52:33 +0100230 CHECK(vcpu_index < vm->vcpu_count);
Andrew Walbrane1310df2019-04-29 17:28:28 +0100231 return &vm->vcpus[vcpu_index];
232}
Andrew Scull3c257452019-11-26 13:32:50 +0000233
234/**
Andrew Walbranaad8f982019-12-04 10:56:39 +0000235 * Gets `vm`'s wait entry for waiting on the `for_vm`.
236 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100237struct wait_entry *vm_get_wait_entry(struct vm *vm, ffa_vm_id_t for_vm)
Andrew Walbranaad8f982019-12-04 10:56:39 +0000238{
239 uint16_t index;
240
241 CHECK(for_vm >= HF_VM_ID_OFFSET);
242 index = for_vm - HF_VM_ID_OFFSET;
243 CHECK(index < MAX_VMS);
244
245 return &vm->wait_entries[index];
246}
247
248/**
249 * Gets the ID of the VM which the given VM's wait entry is for.
250 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100251ffa_vm_id_t vm_id_for_wait_entry(struct vm *vm, struct wait_entry *entry)
Andrew Walbranaad8f982019-12-04 10:56:39 +0000252{
253 uint16_t index = entry - vm->wait_entries;
254
255 return index + HF_VM_ID_OFFSET;
256}
257
258/**
Andrew Walbran45633dd2020-10-07 17:59:54 +0100259 * Return whether the given VM ID represents an entity in the current world:
260 * i.e. the hypervisor or a normal world VM when running in the normal world, or
261 * the SPM or an SP when running in the secure world.
262 */
263bool vm_id_is_current_world(ffa_vm_id_t vm_id)
264{
265 return (vm_id & HF_VM_ID_WORLD_MASK) !=
266 (HF_OTHER_WORLD_ID & HF_VM_ID_WORLD_MASK);
267}
268
269/**
Andrew Scull3c257452019-11-26 13:32:50 +0000270 * Map a range of addresses to the VM in both the MMU and the IOMMU.
271 *
272 * mm_vm_defrag should always be called after a series of page table updates,
273 * whether they succeed or fail. This is because on failure extra page table
274 * entries may have been allocated and then not used, while on success it may be
275 * possible to compact the page table by merging several entries into a block.
276 *
277 * Returns true on success, or false if the update failed and no changes were
278 * made.
279 *
280 */
281bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
282 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
283{
284 if (!vm_identity_prepare(vm_locked, begin, end, mode, ppool)) {
285 return false;
286 }
287
288 vm_identity_commit(vm_locked, begin, end, mode, ppool, ipa);
289
290 return true;
291}
292
293/**
294 * Prepares the given VM for the given address mapping such that it will be able
295 * to commit the change without failure.
296 *
297 * In particular, multiple calls to this function will result in the
298 * corresponding calls to commit the changes to succeed.
299 *
300 * Returns true on success, or false if the update failed and no changes were
301 * made.
302 */
303bool vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
304 uint32_t mode, struct mpool *ppool)
305{
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -0800306 if (vm_locked.vm->el0_partition) {
307 return mm_identity_prepare(&vm_locked.vm->ptable, begin, end,
308 mode, ppool);
309 }
Andrew Scull3c257452019-11-26 13:32:50 +0000310 return mm_vm_identity_prepare(&vm_locked.vm->ptable, begin, end, mode,
311 ppool);
312}
313
314/**
315 * Commits the given address mapping to the VM assuming the operation cannot
316 * fail. `vm_identity_prepare` must used correctly before this to ensure
317 * this condition.
318 */
319void vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
320 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
321{
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -0800322 if (vm_locked.vm->el0_partition) {
323 mm_identity_commit(&vm_locked.vm->ptable, begin, end, mode,
324 ppool);
325 if (ipa != NULL) {
326 /*
327 * EL0 partitions are modeled as lightweight VM's, to
328 * promote code reuse. The below statement returns the
329 * mapped PA as an IPA, however, for an EL0 partition,
330 * this is really a VA.
331 */
332 *ipa = ipa_from_pa(begin);
333 }
334 } else {
335 mm_vm_identity_commit(&vm_locked.vm->ptable, begin, end, mode,
336 ppool, ipa);
337 }
Andrew Scull3c257452019-11-26 13:32:50 +0000338 plat_iommu_identity_map(vm_locked, begin, end, mode);
339}
340
341/**
342 * Unmap a range of addresses from the VM.
343 *
344 * Returns true on success, or false if the update failed and no changes were
345 * made.
346 */
347bool vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
348 struct mpool *ppool)
349{
350 uint32_t mode = MM_MODE_UNMAPPED_MASK;
351
352 return vm_identity_map(vm_locked, begin, end, mode, ppool, NULL);
353}
354
355/**
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700356 * Defrag page tables for an EL0 partition or for a VM.
357 */
358void vm_ptable_defrag(struct vm_locked vm_locked, struct mpool *ppool)
359{
360 if (vm_locked.vm->el0_partition) {
361 mm_stage1_defrag(&vm_locked.vm->ptable, ppool);
362 } else {
363 mm_vm_defrag(&vm_locked.vm->ptable, ppool);
364 }
365}
366
367/**
Andrew Scull3c257452019-11-26 13:32:50 +0000368 * Unmaps the hypervisor pages from the given page table.
369 */
370bool vm_unmap_hypervisor(struct vm_locked vm_locked, struct mpool *ppool)
371{
372 /* TODO: If we add pages dynamically, they must be included here too. */
373 return vm_unmap(vm_locked, layout_text_begin(), layout_text_end(),
374 ppool) &&
375 vm_unmap(vm_locked, layout_rodata_begin(), layout_rodata_end(),
376 ppool) &&
377 vm_unmap(vm_locked, layout_data_begin(), layout_data_end(),
Maksims Svecovs134b8f92022-03-04 15:14:09 +0000378 ppool) &&
379 vm_unmap(vm_locked, layout_stacks_begin(), layout_stacks_end(),
Andrew Scull3c257452019-11-26 13:32:50 +0000380 ppool);
381}
J-Alvesb37fd082020-10-22 12:29:21 +0100382
383/**
384 * Gets the first partition to boot, according to Boot Protocol from FFA spec.
385 */
386struct vm *vm_get_first_boot(void)
387{
388 return first_boot_vm;
389}
390
391/**
392 * Insert in boot list, sorted by `boot_order` parameter in the vm structure
393 * and rooted in `first_boot_vm`.
394 */
395void vm_update_boot(struct vm *vm)
396{
397 struct vm *current = NULL;
398 struct vm *previous = NULL;
399
400 if (first_boot_vm == NULL) {
401 first_boot_vm = vm;
402 return;
403 }
404
405 current = first_boot_vm;
406
J-Alvesbeeb6dc2021-12-08 18:21:32 +0000407 while (current != NULL && current->boot_order <= vm->boot_order) {
J-Alvesb37fd082020-10-22 12:29:21 +0100408 previous = current;
409 current = current->next_boot;
410 }
411
412 if (previous != NULL) {
413 previous->next_boot = vm;
414 } else {
415 first_boot_vm = vm;
416 }
417
418 vm->next_boot = current;
419}
J-Alves4ef6e842021-03-18 12:47:01 +0000420
Raghu Krishnamurthyea195fa2021-02-12 23:29:00 -0800421/**
422 * Gets the mode of the given range of ipa or va if they are mapped with the
423 * same mode.
424 *
425 * Returns true if the range is mapped with the same mode and false otherwise.
426 * The wrapper calls the appropriate mm function depending on if the partition
427 * is a vm or a el0 partition.
428 */
429bool vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin, ipaddr_t end,
430 uint32_t *mode)
431{
432 if (vm_locked.vm->el0_partition) {
433 return mm_get_mode(&vm_locked.vm->ptable,
434 va_from_pa(pa_from_ipa(begin)),
435 va_from_pa(pa_from_ipa(end)), mode);
436 }
437 return mm_vm_get_mode(&vm_locked.vm->ptable, begin, end, mode);
438}
J-Alvesa0f317d2021-06-09 13:31:59 +0100439
J-Alves7461ef22021-10-18 17:21:33 +0100440static struct notifications *vm_get_notifications(struct vm_locked vm_locked,
441 bool is_from_vm)
442{
443 return is_from_vm ? &vm_locked.vm->notifications.from_vm
444 : &vm_locked.vm->notifications.from_sp;
445}
446
J-Alvesa0f317d2021-06-09 13:31:59 +0100447/*
448 * Initializes the notifications structure.
449 */
450void vm_notifications_init_bindings(struct notifications *notifications)
451{
452 for (uint32_t i = 0U; i < MAX_FFA_NOTIFICATIONS; i++) {
453 notifications->bindings_sender_id[i] = HF_INVALID_VM_ID;
454 }
455}
456
457/**
458 * Checks if there are pending notifications.
459 */
460bool vm_are_notifications_pending(struct vm_locked vm_locked, bool from_vm,
461 ffa_notifications_bitmap_t notifications)
462{
463 struct notifications *to_check;
464
465 CHECK(vm_locked.vm != NULL);
466
J-Alves7461ef22021-10-18 17:21:33 +0100467 to_check = vm_get_notifications(vm_locked, from_vm);
J-Alvesa0f317d2021-06-09 13:31:59 +0100468
469 /* Check if there are pending per vcpu notifications */
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700470 for (uint32_t i = 0U; i < vm_locked.vm->vcpu_count; i++) {
J-Alvesa0f317d2021-06-09 13:31:59 +0100471 if ((to_check->per_vcpu[i].pending & notifications) != 0U) {
472 return true;
473 }
474 }
475
476 /* Check if there are global pending notifications */
477 return (to_check->global.pending & notifications) != 0U;
478}
J-Alvesc003a7a2021-03-18 13:06:53 +0000479
J-Alves7461ef22021-10-18 17:21:33 +0100480/**
481 * Checks if there are pending global notifications, either from SPs or from
482 * VMs.
483 */
484bool vm_are_global_notifications_pending(struct vm_locked vm_locked)
485{
486 return vm_get_notifications(vm_locked, true)->global.pending != 0ULL ||
J-Alves52578f82022-03-25 12:30:47 +0000487 vm_get_notifications(vm_locked, false)->global.pending != 0ULL ||
488 vm_locked.vm->notifications.framework.pending != 0ULL;
J-Alves7461ef22021-10-18 17:21:33 +0100489}
490
491/**
492 * Checks if there are pending per-vCPU notifications, in a specific vCPU either
493 * from SPs or from VMs.
494 */
495bool vm_are_per_vcpu_notifications_pending(struct vm_locked vm_locked,
496 ffa_vcpu_index_t vcpu_id)
497{
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700498 CHECK(vcpu_id < vm_locked.vm->vcpu_count);
J-Alves7461ef22021-10-18 17:21:33 +0100499
500 return vm_get_notifications(vm_locked, true)
501 ->per_vcpu[vcpu_id]
502 .pending != 0ULL ||
503 vm_get_notifications(vm_locked, false)
504 ->per_vcpu[vcpu_id]
505 .pending != 0ULL;
506}
507
J-Alves09ff9d82021-11-02 11:55:20 +0000508bool vm_are_notifications_enabled(struct vm *vm)
J-Alvesc003a7a2021-03-18 13:06:53 +0000509{
J-Alves09ff9d82021-11-02 11:55:20 +0000510 return vm->notifications.enabled == true;
511}
512
513bool vm_locked_are_notifications_enabled(struct vm_locked vm_locked)
514{
515 return vm_are_notifications_enabled(vm_locked.vm);
J-Alvesc003a7a2021-03-18 13:06:53 +0000516}
517
518static bool vm_is_notification_bit_set(ffa_notifications_bitmap_t notifications,
519 uint32_t i)
520{
521 return (notifications & FFA_NOTIFICATION_MASK(i)) != 0U;
522}
523
J-Alvesfe23ebe2021-10-13 16:07:07 +0100524static void vm_notifications_global_state_count_update(
525 ffa_notifications_bitmap_t bitmap, uint32_t *counter, int inc)
526{
527 /*
528 * Helper to increment counters from global notifications
529 * state. Count update by increments or decrements of 1 or -1,
530 * respectively.
531 */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000532 assert(inc == 1 || inc == -1);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100533
534 sl_lock(&all_notifications_state.lock);
535
536 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
537 if (vm_is_notification_bit_set(bitmap, i)) {
538 CHECK((inc > 0 && *counter < UINT32_MAX) ||
539 (inc < 0 && *counter > 0));
540 *counter += inc;
541 }
542 }
543
544 sl_unlock(&all_notifications_state.lock);
545}
546
547/**
548 * Helper function to increment the pending notifications based on a bitmap
549 * passed as argument.
550 * Function to be used at setting notifications for a given VM.
551 */
552static void vm_notifications_pending_count_add(
553 ffa_notifications_bitmap_t to_add)
554{
555 vm_notifications_global_state_count_update(
556 to_add, &all_notifications_state.pending_count, 1);
557}
558
559/**
560 * Helper function to decrement the pending notifications count.
561 * Function to be used when getting the receiver's pending notifications.
562 */
563static void vm_notifications_pending_count_sub(
564 ffa_notifications_bitmap_t to_sub)
565{
566 vm_notifications_global_state_count_update(
567 to_sub, &all_notifications_state.pending_count, -1);
568}
569
570/**
571 * Helper function to count the notifications whose information has been
572 * retrieved by the scheduler of the system, and are still pending.
573 */
574static void vm_notifications_info_get_retrieved_count_add(
575 ffa_notifications_bitmap_t to_add)
576{
577 vm_notifications_global_state_count_update(
578 to_add, &all_notifications_state.info_get_retrieved_count, 1);
579}
580
581/**
582 * Helper function to subtract the notifications that the receiver is getting
583 * and whose information has been retrieved by the receiver scheduler.
584 */
585static void vm_notifications_info_get_retrieved_count_sub(
586 ffa_notifications_bitmap_t to_sub)
587{
588 vm_notifications_global_state_count_update(
589 to_sub, &all_notifications_state.info_get_retrieved_count, -1);
590}
591
592/**
593 * Helper function to determine if there are notifications pending whose info
594 * hasn't been retrieved by the receiver scheduler.
595 */
596bool vm_notifications_pending_not_retrieved_by_scheduler(void)
597{
598 bool ret;
599
600 sl_lock(&all_notifications_state.lock);
601 ret = all_notifications_state.pending_count >
602 all_notifications_state.info_get_retrieved_count;
603 sl_unlock(&all_notifications_state.lock);
604
605 return ret;
606}
607
608bool vm_is_notifications_pending_count_zero(void)
609{
610 bool ret;
611
612 sl_lock(&all_notifications_state.lock);
613 ret = all_notifications_state.pending_count == 0;
614 sl_unlock(&all_notifications_state.lock);
615
616 return ret;
617}
618
J-Alvesc003a7a2021-03-18 13:06:53 +0000619/**
620 * Checks that all provided notifications are bound to the specified sender, and
621 * are per VCPU or global, as specified.
622 */
623bool vm_notifications_validate_binding(struct vm_locked vm_locked,
624 bool is_from_vm, ffa_vm_id_t sender_id,
625 ffa_notifications_bitmap_t notifications,
626 bool is_per_vcpu)
627{
628 return vm_notifications_validate_bound_sender(
629 vm_locked, is_from_vm, sender_id, notifications) &&
630 vm_notifications_validate_per_vcpu(vm_locked, is_from_vm,
631 is_per_vcpu, notifications);
632}
633
634/**
635 * Update binds information in notification structure for the specified
636 * notifications.
637 */
638void vm_notifications_update_bindings(struct vm_locked vm_locked,
639 bool is_from_vm, ffa_vm_id_t sender_id,
640 ffa_notifications_bitmap_t notifications,
641 bool is_per_vcpu)
642{
643 CHECK(vm_locked.vm != NULL);
644 struct notifications *to_update =
645 vm_get_notifications(vm_locked, is_from_vm);
646
647 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
648 if (vm_is_notification_bit_set(notifications, i)) {
649 to_update->bindings_sender_id[i] = sender_id;
650 }
651 }
652
653 /*
654 * Set notifications if they are per VCPU, else clear them as they are
655 * global.
656 */
657 if (is_per_vcpu) {
658 to_update->bindings_per_vcpu |= notifications;
659 } else {
660 to_update->bindings_per_vcpu &= ~notifications;
661 }
662}
663
664bool vm_notifications_validate_bound_sender(
665 struct vm_locked vm_locked, bool is_from_vm, ffa_vm_id_t sender_id,
666 ffa_notifications_bitmap_t notifications)
667{
668 CHECK(vm_locked.vm != NULL);
669 struct notifications *to_check =
670 vm_get_notifications(vm_locked, is_from_vm);
671
672 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
673 if (vm_is_notification_bit_set(notifications, i) &&
674 to_check->bindings_sender_id[i] != sender_id) {
675 return false;
676 }
677 }
678
679 return true;
680}
681
682bool vm_notifications_validate_per_vcpu(struct vm_locked vm_locked,
683 bool is_from_vm, bool is_per_vcpu,
684 ffa_notifications_bitmap_t notif)
685{
686 CHECK(vm_locked.vm != NULL);
687 struct notifications *to_check =
688 vm_get_notifications(vm_locked, is_from_vm);
689
690 return is_per_vcpu ? (~to_check->bindings_per_vcpu & notif) == 0U
691 : (to_check->bindings_per_vcpu & notif) == 0U;
692}
J-Alvesaa79c012021-07-09 14:29:45 +0100693
J-Alves14163a72022-03-25 14:01:34 +0000694static void vm_notifications_state_set(struct notifications_state *state,
695 ffa_notifications_bitmap_t notifications)
696{
697 state->pending |= notifications;
698 vm_notifications_pending_count_add(notifications);
699}
700
J-Alves5a16c962022-03-25 12:32:51 +0000701void vm_notifications_partition_set_pending(
702 struct vm_locked vm_locked, bool is_from_vm,
703 ffa_notifications_bitmap_t notifications, ffa_vcpu_index_t vcpu_id,
704 bool is_per_vcpu)
J-Alvesaa79c012021-07-09 14:29:45 +0100705{
J-Alves14163a72022-03-25 14:01:34 +0000706 struct notifications *to_set;
707 struct notifications_state *state;
708
J-Alvesaa79c012021-07-09 14:29:45 +0100709 CHECK(vm_locked.vm != NULL);
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700710 CHECK(vcpu_id < vm_locked.vm->vcpu_count);
J-Alvesaa79c012021-07-09 14:29:45 +0100711
J-Alves14163a72022-03-25 14:01:34 +0000712 to_set = vm_get_notifications(vm_locked, is_from_vm);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100713
J-Alves14163a72022-03-25 14:01:34 +0000714 state = is_per_vcpu ? &to_set->per_vcpu[vcpu_id] : &to_set->global;
715
716 vm_notifications_state_set(state, notifications);
717}
718
719/**
720 * Set pending framework notifications.
721 */
722void vm_notifications_framework_set_pending(
723 struct vm_locked vm_locked, ffa_notifications_bitmap_t notifications)
724{
725 CHECK(vm_locked.vm != NULL);
Federico Recanatie73d2832022-04-20 11:10:52 +0200726 assert(is_ffa_spm_buffer_full_notification(notifications) ||
727 is_ffa_hyp_buffer_full_notification(notifications));
J-Alves14163a72022-03-25 14:01:34 +0000728 vm_notifications_state_set(&vm_locked.vm->notifications.framework,
729 notifications);
J-Alvesaa79c012021-07-09 14:29:45 +0100730}
731
J-Alves5136dda2022-03-25 12:26:38 +0000732static ffa_notifications_bitmap_t vm_notifications_state_get_pending(
733 struct notifications_state *state)
J-Alvesaa79c012021-07-09 14:29:45 +0100734{
J-Alves5136dda2022-03-25 12:26:38 +0000735 ffa_notifications_bitmap_t to_ret;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100736 ffa_notifications_bitmap_t pending_and_info_get_retrieved;
J-Alvesaa79c012021-07-09 14:29:45 +0100737
J-Alves5136dda2022-03-25 12:26:38 +0000738 assert(state != NULL);
J-Alvesaa79c012021-07-09 14:29:45 +0100739
J-Alves5136dda2022-03-25 12:26:38 +0000740 to_ret = state->pending;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100741
742 /* Update count of currently pending notifications in the system. */
J-Alves5136dda2022-03-25 12:26:38 +0000743 vm_notifications_pending_count_sub(state->pending);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100744
745 /*
746 * If notifications receiver is getting have been retrieved by the
747 * receiver scheduler, decrement those from respective count.
748 */
749 pending_and_info_get_retrieved =
J-Alves5136dda2022-03-25 12:26:38 +0000750 state->pending & state->info_get_retrieved;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100751
752 if (pending_and_info_get_retrieved != 0) {
753 vm_notifications_info_get_retrieved_count_sub(
754 pending_and_info_get_retrieved);
755 }
756
J-Alves5136dda2022-03-25 12:26:38 +0000757 state->pending = 0U;
758 state->info_get_retrieved = 0U;
J-Alvesaa79c012021-07-09 14:29:45 +0100759
J-Alves5136dda2022-03-25 12:26:38 +0000760 return to_ret;
761}
J-Alvesfe23ebe2021-10-13 16:07:07 +0100762
J-Alves5136dda2022-03-25 12:26:38 +0000763/**
764 * Get global and per-vCPU notifications for the given vCPU ID.
765 */
766ffa_notifications_bitmap_t vm_notifications_partition_get_pending(
767 struct vm_locked vm_locked, bool is_from_vm, ffa_vcpu_index_t vcpu_id)
768{
769 ffa_notifications_bitmap_t to_ret;
770 struct notifications *to_get;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100771
J-Alves5136dda2022-03-25 12:26:38 +0000772 assert(vm_locked.vm != NULL);
773 to_get = vm_get_notifications(vm_locked, is_from_vm);
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700774 assert(vcpu_id < vm_locked.vm->vcpu_count);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100775
J-Alves5136dda2022-03-25 12:26:38 +0000776 to_ret = vm_notifications_state_get_pending(&to_get->global);
777 to_ret |=
778 vm_notifications_state_get_pending(&to_get->per_vcpu[vcpu_id]);
J-Alvesaa79c012021-07-09 14:29:45 +0100779
780 return to_ret;
781}
J-Alvesc8e8a222021-06-08 17:33:52 +0100782
783/**
J-Alves663682a2022-03-25 13:56:51 +0000784 * Get pending framework notifications.
785 */
786ffa_notifications_bitmap_t vm_notifications_framework_get_pending(
787 struct vm_locked vm_locked)
788{
Federico Recanati6c1e05c2022-04-20 11:37:26 +0200789 struct vm *vm = vm_locked.vm;
790 ffa_notifications_bitmap_t framework;
791 bool rx_buffer_full;
792
793 assert(vm != NULL);
794
795 framework = vm_notifications_state_get_pending(
796 &vm->notifications.framework);
797
798 /*
799 * By retrieving an RX buffer full notification the buffer state
800 * transitions from RECEIVED to READ; the VM is now the RX buffer
801 * owner, can read it and is allowed to release it.
802 */
803 rx_buffer_full = is_ffa_spm_buffer_full_notification(framework) ||
804 is_ffa_hyp_buffer_full_notification(framework);
805 if (rx_buffer_full && vm->mailbox.state == MAILBOX_STATE_RECEIVED) {
806 vm->mailbox.state = MAILBOX_STATE_READ;
807 }
808
809 return framework;
J-Alves663682a2022-03-25 13:56:51 +0000810}
811
J-Alves17c9b6d2022-03-25 14:39:05 +0000812static void vm_notifications_state_info_get(
813 struct notifications_state *state, ffa_vm_id_t vm_id, bool is_per_vcpu,
814 ffa_vcpu_index_t vcpu_id, uint16_t *ids, uint32_t *ids_count,
815 uint32_t *lists_sizes, uint32_t *lists_count,
816 const uint32_t ids_max_count,
817 enum notifications_info_get_state *info_get_state)
818{
819 ffa_notifications_bitmap_t pending_not_retrieved;
820
821 CHECK(*ids_count <= ids_max_count);
822 CHECK(*lists_count <= ids_max_count);
823
824 if (*info_get_state == FULL) {
825 return;
826 }
827
828 pending_not_retrieved = state->pending & ~state->info_get_retrieved;
829
830 /* No notifications pending that haven't been retrieved. */
831 if (pending_not_retrieved == 0U) {
832 return;
833 }
834
835 if (*ids_count == ids_max_count) {
836 *info_get_state = FULL;
837 return;
838 }
839
840 switch (*info_get_state) {
841 case INIT:
842 case STARTING_NEW:
843 /*
844 * At this iteration two ids are to be added: the VM ID
845 * and vCPU ID. If there is no space, change state and
846 * terminate function.
847 */
848 if (is_per_vcpu && ids_max_count - *ids_count < 2) {
849 *info_get_state = FULL;
850 return;
851 }
852
853 *info_get_state = INSERTING;
854 ids[*ids_count] = vm_id;
855 ++(*ids_count);
856
857 if (is_per_vcpu) {
858 /* Insert vCPU ID. */
859 ids[*ids_count] = vcpu_id;
860 ++(*ids_count);
861 ++lists_sizes[*lists_count];
862 }
863
864 ++(*lists_count);
865 break;
866 case INSERTING:
867 /* For per-vCPU notifications only. */
868 if (!is_per_vcpu) {
869 break;
870 }
871
872 /* Insert vCPU ID */
873 ids[*ids_count] = vcpu_id;
874 (*ids_count)++;
875 /* Increment respective list size */
876 ++lists_sizes[*lists_count - 1];
877
878 if (lists_sizes[*lists_count - 1] == 3) {
879 *info_get_state = STARTING_NEW;
880 }
881 break;
882 default:
883 panic("Notification info get action error!!\n");
884 }
885
886 state->info_get_retrieved |= pending_not_retrieved;
887
888 vm_notifications_info_get_retrieved_count_add(pending_not_retrieved);
889}
890
J-Alves663682a2022-03-25 13:56:51 +0000891/**
J-Alvesc8e8a222021-06-08 17:33:52 +0100892 * Get pending notification's information to return to the receiver scheduler.
893 */
894void vm_notifications_info_get_pending(
895 struct vm_locked vm_locked, bool is_from_vm, uint16_t *ids,
896 uint32_t *ids_count, uint32_t *lists_sizes, uint32_t *lists_count,
897 const uint32_t ids_max_count,
898 enum notifications_info_get_state *info_get_state)
899{
J-Alves17c9b6d2022-03-25 14:39:05 +0000900 struct notifications *notifications;
J-Alvesc8e8a222021-06-08 17:33:52 +0100901
902 CHECK(vm_locked.vm != NULL);
J-Alvesc8e8a222021-06-08 17:33:52 +0100903
J-Alves17c9b6d2022-03-25 14:39:05 +0000904 notifications = vm_get_notifications(vm_locked, is_from_vm);
J-Alvesc8e8a222021-06-08 17:33:52 +0100905
J-Alves17c9b6d2022-03-25 14:39:05 +0000906 /*
907 * Perform info get for global notifications, before doing it for
908 * per-vCPU.
909 */
910 vm_notifications_state_info_get(&notifications->global,
911 vm_locked.vm->id, false, 0, ids,
912 ids_count, lists_sizes, lists_count,
913 ids_max_count, info_get_state);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100914
J-Alvesc8e8a222021-06-08 17:33:52 +0100915 for (ffa_vcpu_count_t i = 0; i < vm_locked.vm->vcpu_count; i++) {
J-Alves17c9b6d2022-03-25 14:39:05 +0000916 vm_notifications_state_info_get(
917 &notifications->per_vcpu[i], vm_locked.vm->id, true, i,
918 ids, ids_count, lists_sizes, lists_count, ids_max_count,
919 info_get_state);
J-Alvesc8e8a222021-06-08 17:33:52 +0100920 }
921}
922
923/**
924 * Gets all info from VM's pending notifications.
925 * Returns true if the list is full, and there is more pending.
926 */
927bool vm_notifications_info_get(struct vm_locked vm_locked, uint16_t *ids,
928 uint32_t *ids_count, uint32_t *lists_sizes,
929 uint32_t *lists_count,
930 const uint32_t ids_max_count)
931{
932 enum notifications_info_get_state current_state = INIT;
933
J-Alvesf31940e2022-03-25 17:24:00 +0000934 /* Get info of pending notifications from the framework. */
935 vm_notifications_state_info_get(&vm_locked.vm->notifications.framework,
936 vm_locked.vm->id, false, 0, ids,
937 ids_count, lists_sizes, lists_count,
938 ids_max_count, &current_state);
939
940 /* Get info of pending notifications from SPs. */
J-Alvesc8e8a222021-06-08 17:33:52 +0100941 vm_notifications_info_get_pending(vm_locked, false, ids, ids_count,
942 lists_sizes, lists_count,
943 ids_max_count, &current_state);
944
J-Alvesf31940e2022-03-25 17:24:00 +0000945 /* Get info of pending notifications from VMs. */
J-Alvesc8e8a222021-06-08 17:33:52 +0100946 vm_notifications_info_get_pending(vm_locked, true, ids, ids_count,
947 lists_sizes, lists_count,
948 ids_max_count, &current_state);
949
950 /*
951 * State transitions to FULL when trying to insert a new ID in the
952 * list and there is not more space. This means there are notifications
953 * pending, whose info is not retrieved.
954 */
955 return current_state == FULL;
956}
J-Alves439ac972021-11-18 17:32:03 +0000957
958/**
959 * Checks VM's messaging method support.
960 */
961bool vm_supports_messaging_method(struct vm *vm, uint8_t msg_method)
962{
963 return (vm->messaging_method & msg_method) != 0;
964}
J-Alves6e2abc62021-12-02 14:58:56 +0000965
966void vm_notifications_set_npi_injected(struct vm_locked vm_locked,
967 bool npi_injected)
968{
969 vm_locked.vm->notifications.npi_injected = npi_injected;
970}
971
972bool vm_notifications_is_npi_injected(struct vm_locked vm_locked)
973{
974 return vm_locked.vm->notifications.npi_injected;
975}
J-Alves7e67d102022-04-13 13:22:39 +0100976
977/**
978 * Sets the designated GP register that the VM expects to receive the boot
979 * info's address.
980 */
981void vm_set_boot_info_gp_reg(struct vm *vm, struct vcpu *vcpu)
982{
983 if (!vm->initialized && vm->boot_info.blob_addr.ipa != 0U) {
984 arch_regs_set_gp_reg(&vcpu->regs,
985 ipa_addr(vm->boot_info.blob_addr),
986 vm->boot_info.gp_register_num);
987 }
988}