blob: 699e498d623e8e7850a89e0a05962d9db4701982 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/vm.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010010
Andrew Scull18c78fc2018-08-20 12:57:41 +010011#include "hf/api.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010012#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010013#include "hf/cpu.h"
J-Alves4ef6e842021-03-18 12:47:01 +000014#include "hf/dlog.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010015#include "hf/ffa.h"
Andrew Scull3c257452019-11-26 13:32:50 +000016#include "hf/layout.h"
17#include "hf/plat/iommu.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010018#include "hf/std.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010019
Andrew Scull19503262018-09-20 14:48:39 +010020#include "vmapi/hf/call.h"
21
22static struct vm vms[MAX_VMS];
Olivier Deprez96a2a262020-06-11 17:21:38 +020023static struct vm other_world;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010024static ffa_vm_count_t vm_count;
J-Alvesb37fd082020-10-22 12:29:21 +010025static struct vm *first_boot_vm;
Andrew Scull19503262018-09-20 14:48:39 +010026
J-Alvesfe23ebe2021-10-13 16:07:07 +010027/**
28 * Counters on the status of notifications in the system. It helps to improve
29 * the information retrieved by the receiver scheduler.
30 */
31static struct {
32 /** Counts notifications pending. */
33 uint32_t pending_count;
34 /**
35 * Counts notifications pending, that have been retrieved by the
36 * receiver scheduler.
37 */
38 uint32_t info_get_retrieved_count;
39 struct spinlock lock;
40} all_notifications_state;
41
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -080042static bool vm_init_mm(struct vm *vm, struct mpool *ppool)
43{
44 if (vm->el0_partition) {
45 return mm_ptable_init(&vm->ptable, vm->id, MM_FLAG_STAGE1,
46 ppool);
47 }
48 return mm_vm_init(&vm->ptable, vm->id, ppool);
49}
50
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010051struct vm *vm_init(ffa_vm_id_t id, ffa_vcpu_count_t vcpu_count,
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080052 struct mpool *ppool, bool el0_partition)
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010053{
Wedson Almeida Filho87009642018-07-02 10:20:07 +010054 uint32_t i;
Andrew Scull19503262018-09-20 14:48:39 +010055 struct vm *vm;
56
Olivier Deprez96a2a262020-06-11 17:21:38 +020057 if (id == HF_OTHER_WORLD_ID) {
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080058 CHECK(el0_partition == false);
Olivier Deprez96a2a262020-06-11 17:21:38 +020059 vm = &other_world;
Andrew Walbran9daa57e2019-09-27 13:33:20 +010060 } else {
61 uint16_t vm_index = id - HF_VM_ID_OFFSET;
Andrew Scull19503262018-09-20 14:48:39 +010062
Andrew Walbran9daa57e2019-09-27 13:33:20 +010063 CHECK(id >= HF_VM_ID_OFFSET);
64 CHECK(vm_index < ARRAY_SIZE(vms));
65 vm = &vms[vm_index];
66 }
Wedson Almeida Filho87009642018-07-02 10:20:07 +010067
Andrew Scull2b5fbad2019-04-05 13:55:56 +010068 memset_s(vm, sizeof(*vm), 0, sizeof(*vm));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010069
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000070 list_init(&vm->mailbox.waiter_list);
71 list_init(&vm->mailbox.ready_list);
72 sl_init(&vm->lock);
73
Andrew Walbran9daa57e2019-09-27 13:33:20 +010074 vm->id = id;
Wedson Almeida Filho87009642018-07-02 10:20:07 +010075 vm->vcpu_count = vcpu_count;
Andrew Sculld6ee1102019-04-05 22:12:42 +010076 vm->mailbox.state = MAILBOX_STATE_EMPTY;
Andrew Scull9726c252019-01-23 13:44:19 +000077 atomic_init(&vm->aborting, false);
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080078 vm->el0_partition = el0_partition;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010079
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -080080 if (!vm_init_mm(vm, ppool)) {
Andrew Walbran9daa57e2019-09-27 13:33:20 +010081 return NULL;
Wedson Almeida Filho03306112018-11-26 00:08:03 +000082 }
83
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000084 /* Initialise waiter entries. */
85 for (i = 0; i < MAX_VMS; i++) {
Wedson Almeida Filhob790f652019-01-22 23:41:56 +000086 vm->wait_entries[i].waiting_vm = vm;
87 list_init(&vm->wait_entries[i].wait_links);
88 list_init(&vm->wait_entries[i].ready_links);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000089 }
90
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000091 /* Do basic initialization of vCPUs. */
Andrew Scull7364a8e2018-07-19 15:39:29 +010092 for (i = 0; i < vcpu_count; i++) {
Andrew Walbrane1310df2019-04-29 17:28:28 +010093 vcpu_init(vm_get_vcpu(vm, i), vm);
Andrew Scull7364a8e2018-07-19 15:39:29 +010094 }
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010095
J-Alves4ef6e842021-03-18 12:47:01 +000096 /* Basic initialization of the notifications structure. */
97 vm_notifications_init_bindings(&vm->notifications.from_sp);
98 vm_notifications_init_bindings(&vm->notifications.from_vm);
99
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100100 return vm;
101}
102
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100103bool vm_init_next(ffa_vcpu_count_t vcpu_count, struct mpool *ppool,
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800104 struct vm **new_vm, bool el0_partition)
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100105{
106 if (vm_count >= MAX_VMS) {
107 return false;
108 }
109
110 /* Generate IDs based on an offset, as low IDs e.g., 0, are reserved */
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800111 *new_vm = vm_init(vm_count + HF_VM_ID_OFFSET, vcpu_count, ppool,
112 el0_partition);
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100113 if (*new_vm == NULL) {
114 return false;
115 }
Andrew Scull19503262018-09-20 14:48:39 +0100116 ++vm_count;
Andrew Scull19503262018-09-20 14:48:39 +0100117
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000118 return true;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100119}
120
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100121ffa_vm_count_t vm_get_count(void)
Andrew Scull19503262018-09-20 14:48:39 +0100122{
123 return vm_count;
124}
125
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100126/**
127 * Returns a pointer to the VM with the corresponding id.
128 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100129struct vm *vm_find(ffa_vm_id_t id)
Andrew Scull19503262018-09-20 14:48:39 +0100130{
David Brazdilbc501192019-09-27 13:20:56 +0100131 uint16_t index;
Fuad Tabba494376e2019-08-05 12:35:10 +0100132
Olivier Deprez96a2a262020-06-11 17:21:38 +0200133 if (id == HF_OTHER_WORLD_ID) {
134 if (other_world.id == HF_OTHER_WORLD_ID) {
135 return &other_world;
136 }
Andrew Scull19503262018-09-20 14:48:39 +0100137 return NULL;
138 }
139
Olivier Deprez96a2a262020-06-11 17:21:38 +0200140 /* Check that this is not a reserved ID. */
141 if (id < HF_VM_ID_OFFSET) {
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100142 return NULL;
143 }
144
David Brazdilbc501192019-09-27 13:20:56 +0100145 index = id - HF_VM_ID_OFFSET;
146
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100147 return vm_find_index(index);
148}
149
150/**
J-Alves46ee0682021-07-26 15:17:53 +0100151 * Returns a locked instance of the VM with the corresponding id.
152 */
153struct vm_locked vm_find_locked(ffa_vm_id_t id)
154{
155 struct vm *vm = vm_find(id);
156
157 if (vm != NULL) {
158 return vm_lock(vm);
159 }
160
161 return (struct vm_locked){.vm = NULL};
162}
163
164/**
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100165 * Returns a pointer to the VM at the specified index.
166 */
167struct vm *vm_find_index(uint16_t index)
168{
David Brazdilbc501192019-09-27 13:20:56 +0100169 /* Ensure the VM is initialized. */
170 if (index >= vm_count) {
171 return NULL;
172 }
173
174 return &vms[index];
Andrew Scull19503262018-09-20 14:48:39 +0100175}
176
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000177/**
Fuad Tabbaed294af2019-12-20 10:43:01 +0000178 * Locks the given VM and updates `locked` to hold the newly locked VM.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000179 */
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100180struct vm_locked vm_lock(struct vm *vm)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000181{
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100182 struct vm_locked locked = {
183 .vm = vm,
184 };
185
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000186 sl_lock(&vm->lock);
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100187
188 return locked;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000189}
190
191/**
Jose Marinho75509b42019-04-09 09:34:59 +0100192 * Locks two VMs ensuring that the locking order is according to the locks'
193 * addresses.
194 */
195struct two_vm_locked vm_lock_both(struct vm *vm1, struct vm *vm2)
196{
197 struct two_vm_locked dual_lock;
198
199 sl_lock_both(&vm1->lock, &vm2->lock);
200 dual_lock.vm1.vm = vm1;
201 dual_lock.vm2.vm = vm2;
202
203 return dual_lock;
204}
205
206/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000207 * Unlocks a VM previously locked with vm_lock, and updates `locked` to reflect
208 * the fact that the VM is no longer locked.
209 */
210void vm_unlock(struct vm_locked *locked)
211{
212 sl_unlock(&locked->vm->lock);
213 locked->vm = NULL;
214}
Andrew Walbrane1310df2019-04-29 17:28:28 +0100215
216/**
217 * Get the vCPU with the given index from the given VM.
218 * This assumes the index is valid, i.e. less than vm->vcpu_count.
219 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100220struct vcpu *vm_get_vcpu(struct vm *vm, ffa_vcpu_index_t vcpu_index)
Andrew Walbrane1310df2019-04-29 17:28:28 +0100221{
Andrew Scull877ae4b2019-07-02 12:52:33 +0100222 CHECK(vcpu_index < vm->vcpu_count);
Andrew Walbrane1310df2019-04-29 17:28:28 +0100223 return &vm->vcpus[vcpu_index];
224}
Andrew Scull3c257452019-11-26 13:32:50 +0000225
226/**
Andrew Walbranaad8f982019-12-04 10:56:39 +0000227 * Gets `vm`'s wait entry for waiting on the `for_vm`.
228 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100229struct wait_entry *vm_get_wait_entry(struct vm *vm, ffa_vm_id_t for_vm)
Andrew Walbranaad8f982019-12-04 10:56:39 +0000230{
231 uint16_t index;
232
233 CHECK(for_vm >= HF_VM_ID_OFFSET);
234 index = for_vm - HF_VM_ID_OFFSET;
235 CHECK(index < MAX_VMS);
236
237 return &vm->wait_entries[index];
238}
239
240/**
241 * Gets the ID of the VM which the given VM's wait entry is for.
242 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100243ffa_vm_id_t vm_id_for_wait_entry(struct vm *vm, struct wait_entry *entry)
Andrew Walbranaad8f982019-12-04 10:56:39 +0000244{
245 uint16_t index = entry - vm->wait_entries;
246
247 return index + HF_VM_ID_OFFSET;
248}
249
250/**
Andrew Walbran45633dd2020-10-07 17:59:54 +0100251 * Return whether the given VM ID represents an entity in the current world:
252 * i.e. the hypervisor or a normal world VM when running in the normal world, or
253 * the SPM or an SP when running in the secure world.
254 */
255bool vm_id_is_current_world(ffa_vm_id_t vm_id)
256{
257 return (vm_id & HF_VM_ID_WORLD_MASK) !=
258 (HF_OTHER_WORLD_ID & HF_VM_ID_WORLD_MASK);
259}
260
261/**
Andrew Scull3c257452019-11-26 13:32:50 +0000262 * Map a range of addresses to the VM in both the MMU and the IOMMU.
263 *
264 * mm_vm_defrag should always be called after a series of page table updates,
265 * whether they succeed or fail. This is because on failure extra page table
266 * entries may have been allocated and then not used, while on success it may be
267 * possible to compact the page table by merging several entries into a block.
268 *
269 * Returns true on success, or false if the update failed and no changes were
270 * made.
271 *
272 */
273bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
274 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
275{
276 if (!vm_identity_prepare(vm_locked, begin, end, mode, ppool)) {
277 return false;
278 }
279
280 vm_identity_commit(vm_locked, begin, end, mode, ppool, ipa);
281
282 return true;
283}
284
285/**
286 * Prepares the given VM for the given address mapping such that it will be able
287 * to commit the change without failure.
288 *
289 * In particular, multiple calls to this function will result in the
290 * corresponding calls to commit the changes to succeed.
291 *
292 * Returns true on success, or false if the update failed and no changes were
293 * made.
294 */
295bool vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
296 uint32_t mode, struct mpool *ppool)
297{
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -0800298 if (vm_locked.vm->el0_partition) {
299 return mm_identity_prepare(&vm_locked.vm->ptable, begin, end,
300 mode, ppool);
301 }
Andrew Scull3c257452019-11-26 13:32:50 +0000302 return mm_vm_identity_prepare(&vm_locked.vm->ptable, begin, end, mode,
303 ppool);
304}
305
306/**
307 * Commits the given address mapping to the VM assuming the operation cannot
308 * fail. `vm_identity_prepare` must used correctly before this to ensure
309 * this condition.
310 */
311void vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
312 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
313{
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -0800314 if (vm_locked.vm->el0_partition) {
315 mm_identity_commit(&vm_locked.vm->ptable, begin, end, mode,
316 ppool);
317 if (ipa != NULL) {
318 /*
319 * EL0 partitions are modeled as lightweight VM's, to
320 * promote code reuse. The below statement returns the
321 * mapped PA as an IPA, however, for an EL0 partition,
322 * this is really a VA.
323 */
324 *ipa = ipa_from_pa(begin);
325 }
326 } else {
327 mm_vm_identity_commit(&vm_locked.vm->ptable, begin, end, mode,
328 ppool, ipa);
329 }
Andrew Scull3c257452019-11-26 13:32:50 +0000330 plat_iommu_identity_map(vm_locked, begin, end, mode);
331}
332
333/**
334 * Unmap a range of addresses from the VM.
335 *
336 * Returns true on success, or false if the update failed and no changes were
337 * made.
338 */
339bool vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
340 struct mpool *ppool)
341{
342 uint32_t mode = MM_MODE_UNMAPPED_MASK;
343
344 return vm_identity_map(vm_locked, begin, end, mode, ppool, NULL);
345}
346
347/**
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700348 * Defrag page tables for an EL0 partition or for a VM.
349 */
350void vm_ptable_defrag(struct vm_locked vm_locked, struct mpool *ppool)
351{
352 if (vm_locked.vm->el0_partition) {
353 mm_stage1_defrag(&vm_locked.vm->ptable, ppool);
354 } else {
355 mm_vm_defrag(&vm_locked.vm->ptable, ppool);
356 }
357}
358
359/**
Andrew Scull3c257452019-11-26 13:32:50 +0000360 * Unmaps the hypervisor pages from the given page table.
361 */
362bool vm_unmap_hypervisor(struct vm_locked vm_locked, struct mpool *ppool)
363{
364 /* TODO: If we add pages dynamically, they must be included here too. */
365 return vm_unmap(vm_locked, layout_text_begin(), layout_text_end(),
366 ppool) &&
367 vm_unmap(vm_locked, layout_rodata_begin(), layout_rodata_end(),
368 ppool) &&
369 vm_unmap(vm_locked, layout_data_begin(), layout_data_end(),
370 ppool);
371}
J-Alvesb37fd082020-10-22 12:29:21 +0100372
373/**
374 * Gets the first partition to boot, according to Boot Protocol from FFA spec.
375 */
376struct vm *vm_get_first_boot(void)
377{
378 return first_boot_vm;
379}
380
381/**
382 * Insert in boot list, sorted by `boot_order` parameter in the vm structure
383 * and rooted in `first_boot_vm`.
384 */
385void vm_update_boot(struct vm *vm)
386{
387 struct vm *current = NULL;
388 struct vm *previous = NULL;
389
390 if (first_boot_vm == NULL) {
391 first_boot_vm = vm;
392 return;
393 }
394
395 current = first_boot_vm;
396
397 while (current != NULL && current->boot_order >= vm->boot_order) {
398 previous = current;
399 current = current->next_boot;
400 }
401
402 if (previous != NULL) {
403 previous->next_boot = vm;
404 } else {
405 first_boot_vm = vm;
406 }
407
408 vm->next_boot = current;
409}
J-Alves4ef6e842021-03-18 12:47:01 +0000410
Raghu Krishnamurthyea195fa2021-02-12 23:29:00 -0800411/**
412 * Gets the mode of the given range of ipa or va if they are mapped with the
413 * same mode.
414 *
415 * Returns true if the range is mapped with the same mode and false otherwise.
416 * The wrapper calls the appropriate mm function depending on if the partition
417 * is a vm or a el0 partition.
418 */
419bool vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin, ipaddr_t end,
420 uint32_t *mode)
421{
422 if (vm_locked.vm->el0_partition) {
423 return mm_get_mode(&vm_locked.vm->ptable,
424 va_from_pa(pa_from_ipa(begin)),
425 va_from_pa(pa_from_ipa(end)), mode);
426 }
427 return mm_vm_get_mode(&vm_locked.vm->ptable, begin, end, mode);
428}
J-Alvesa0f317d2021-06-09 13:31:59 +0100429
J-Alves7461ef22021-10-18 17:21:33 +0100430static struct notifications *vm_get_notifications(struct vm_locked vm_locked,
431 bool is_from_vm)
432{
433 return is_from_vm ? &vm_locked.vm->notifications.from_vm
434 : &vm_locked.vm->notifications.from_sp;
435}
436
J-Alvesa0f317d2021-06-09 13:31:59 +0100437/*
438 * Initializes the notifications structure.
439 */
440void vm_notifications_init_bindings(struct notifications *notifications)
441{
442 for (uint32_t i = 0U; i < MAX_FFA_NOTIFICATIONS; i++) {
443 notifications->bindings_sender_id[i] = HF_INVALID_VM_ID;
444 }
445}
446
447/**
448 * Checks if there are pending notifications.
449 */
450bool vm_are_notifications_pending(struct vm_locked vm_locked, bool from_vm,
451 ffa_notifications_bitmap_t notifications)
452{
453 struct notifications *to_check;
454
455 CHECK(vm_locked.vm != NULL);
456
J-Alves7461ef22021-10-18 17:21:33 +0100457 to_check = vm_get_notifications(vm_locked, from_vm);
J-Alvesa0f317d2021-06-09 13:31:59 +0100458
459 /* Check if there are pending per vcpu notifications */
460 for (uint32_t i = 0U; i < MAX_CPUS; i++) {
461 if ((to_check->per_vcpu[i].pending & notifications) != 0U) {
462 return true;
463 }
464 }
465
466 /* Check if there are global pending notifications */
467 return (to_check->global.pending & notifications) != 0U;
468}
J-Alvesc003a7a2021-03-18 13:06:53 +0000469
J-Alves7461ef22021-10-18 17:21:33 +0100470/**
471 * Checks if there are pending global notifications, either from SPs or from
472 * VMs.
473 */
474bool vm_are_global_notifications_pending(struct vm_locked vm_locked)
475{
476 return vm_get_notifications(vm_locked, true)->global.pending != 0ULL ||
477 vm_get_notifications(vm_locked, false)->global.pending != 0ULL;
478}
479
480/**
481 * Checks if there are pending per-vCPU notifications, in a specific vCPU either
482 * from SPs or from VMs.
483 */
484bool vm_are_per_vcpu_notifications_pending(struct vm_locked vm_locked,
485 ffa_vcpu_index_t vcpu_id)
486{
487 CHECK(vcpu_id < MAX_CPUS);
488
489 return vm_get_notifications(vm_locked, true)
490 ->per_vcpu[vcpu_id]
491 .pending != 0ULL ||
492 vm_get_notifications(vm_locked, false)
493 ->per_vcpu[vcpu_id]
494 .pending != 0ULL;
495}
496
J-Alves09ff9d82021-11-02 11:55:20 +0000497bool vm_are_notifications_enabled(struct vm *vm)
J-Alvesc003a7a2021-03-18 13:06:53 +0000498{
J-Alves09ff9d82021-11-02 11:55:20 +0000499 return vm->notifications.enabled == true;
500}
501
502bool vm_locked_are_notifications_enabled(struct vm_locked vm_locked)
503{
504 return vm_are_notifications_enabled(vm_locked.vm);
J-Alvesc003a7a2021-03-18 13:06:53 +0000505}
506
507static bool vm_is_notification_bit_set(ffa_notifications_bitmap_t notifications,
508 uint32_t i)
509{
510 return (notifications & FFA_NOTIFICATION_MASK(i)) != 0U;
511}
512
J-Alvesfe23ebe2021-10-13 16:07:07 +0100513static void vm_notifications_global_state_count_update(
514 ffa_notifications_bitmap_t bitmap, uint32_t *counter, int inc)
515{
516 /*
517 * Helper to increment counters from global notifications
518 * state. Count update by increments or decrements of 1 or -1,
519 * respectively.
520 */
521 CHECK(inc == 1 || inc == -1);
522
523 sl_lock(&all_notifications_state.lock);
524
525 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
526 if (vm_is_notification_bit_set(bitmap, i)) {
527 CHECK((inc > 0 && *counter < UINT32_MAX) ||
528 (inc < 0 && *counter > 0));
529 *counter += inc;
530 }
531 }
532
533 sl_unlock(&all_notifications_state.lock);
534}
535
536/**
537 * Helper function to increment the pending notifications based on a bitmap
538 * passed as argument.
539 * Function to be used at setting notifications for a given VM.
540 */
541static void vm_notifications_pending_count_add(
542 ffa_notifications_bitmap_t to_add)
543{
544 vm_notifications_global_state_count_update(
545 to_add, &all_notifications_state.pending_count, 1);
546}
547
548/**
549 * Helper function to decrement the pending notifications count.
550 * Function to be used when getting the receiver's pending notifications.
551 */
552static void vm_notifications_pending_count_sub(
553 ffa_notifications_bitmap_t to_sub)
554{
555 vm_notifications_global_state_count_update(
556 to_sub, &all_notifications_state.pending_count, -1);
557}
558
559/**
560 * Helper function to count the notifications whose information has been
561 * retrieved by the scheduler of the system, and are still pending.
562 */
563static void vm_notifications_info_get_retrieved_count_add(
564 ffa_notifications_bitmap_t to_add)
565{
566 vm_notifications_global_state_count_update(
567 to_add, &all_notifications_state.info_get_retrieved_count, 1);
568}
569
570/**
571 * Helper function to subtract the notifications that the receiver is getting
572 * and whose information has been retrieved by the receiver scheduler.
573 */
574static void vm_notifications_info_get_retrieved_count_sub(
575 ffa_notifications_bitmap_t to_sub)
576{
577 vm_notifications_global_state_count_update(
578 to_sub, &all_notifications_state.info_get_retrieved_count, -1);
579}
580
581/**
582 * Helper function to determine if there are notifications pending whose info
583 * hasn't been retrieved by the receiver scheduler.
584 */
585bool vm_notifications_pending_not_retrieved_by_scheduler(void)
586{
587 bool ret;
588
589 sl_lock(&all_notifications_state.lock);
590 ret = all_notifications_state.pending_count >
591 all_notifications_state.info_get_retrieved_count;
592 sl_unlock(&all_notifications_state.lock);
593
594 return ret;
595}
596
597bool vm_is_notifications_pending_count_zero(void)
598{
599 bool ret;
600
601 sl_lock(&all_notifications_state.lock);
602 ret = all_notifications_state.pending_count == 0;
603 sl_unlock(&all_notifications_state.lock);
604
605 return ret;
606}
607
J-Alvesc003a7a2021-03-18 13:06:53 +0000608/**
609 * Checks that all provided notifications are bound to the specified sender, and
610 * are per VCPU or global, as specified.
611 */
612bool vm_notifications_validate_binding(struct vm_locked vm_locked,
613 bool is_from_vm, ffa_vm_id_t sender_id,
614 ffa_notifications_bitmap_t notifications,
615 bool is_per_vcpu)
616{
617 return vm_notifications_validate_bound_sender(
618 vm_locked, is_from_vm, sender_id, notifications) &&
619 vm_notifications_validate_per_vcpu(vm_locked, is_from_vm,
620 is_per_vcpu, notifications);
621}
622
623/**
624 * Update binds information in notification structure for the specified
625 * notifications.
626 */
627void vm_notifications_update_bindings(struct vm_locked vm_locked,
628 bool is_from_vm, ffa_vm_id_t sender_id,
629 ffa_notifications_bitmap_t notifications,
630 bool is_per_vcpu)
631{
632 CHECK(vm_locked.vm != NULL);
633 struct notifications *to_update =
634 vm_get_notifications(vm_locked, is_from_vm);
635
636 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
637 if (vm_is_notification_bit_set(notifications, i)) {
638 to_update->bindings_sender_id[i] = sender_id;
639 }
640 }
641
642 /*
643 * Set notifications if they are per VCPU, else clear them as they are
644 * global.
645 */
646 if (is_per_vcpu) {
647 to_update->bindings_per_vcpu |= notifications;
648 } else {
649 to_update->bindings_per_vcpu &= ~notifications;
650 }
651}
652
653bool vm_notifications_validate_bound_sender(
654 struct vm_locked vm_locked, bool is_from_vm, ffa_vm_id_t sender_id,
655 ffa_notifications_bitmap_t notifications)
656{
657 CHECK(vm_locked.vm != NULL);
658 struct notifications *to_check =
659 vm_get_notifications(vm_locked, is_from_vm);
660
661 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
662 if (vm_is_notification_bit_set(notifications, i) &&
663 to_check->bindings_sender_id[i] != sender_id) {
664 return false;
665 }
666 }
667
668 return true;
669}
670
671bool vm_notifications_validate_per_vcpu(struct vm_locked vm_locked,
672 bool is_from_vm, bool is_per_vcpu,
673 ffa_notifications_bitmap_t notif)
674{
675 CHECK(vm_locked.vm != NULL);
676 struct notifications *to_check =
677 vm_get_notifications(vm_locked, is_from_vm);
678
679 return is_per_vcpu ? (~to_check->bindings_per_vcpu & notif) == 0U
680 : (to_check->bindings_per_vcpu & notif) == 0U;
681}
J-Alvesaa79c012021-07-09 14:29:45 +0100682
683void vm_notifications_set(struct vm_locked vm_locked, bool is_from_vm,
684 ffa_notifications_bitmap_t notifications,
685 ffa_vcpu_index_t vcpu_id, bool is_per_vcpu)
686{
687 CHECK(vm_locked.vm != NULL);
688 struct notifications *to_set =
689 vm_get_notifications(vm_locked, is_from_vm);
690 CHECK(vcpu_id < MAX_CPUS);
691
692 if (is_per_vcpu) {
693 to_set->per_vcpu[vcpu_id].pending |= notifications;
694 } else {
695 to_set->global.pending |= notifications;
696 }
J-Alvesfe23ebe2021-10-13 16:07:07 +0100697
698 /* Update count of notifications pending. */
699 vm_notifications_pending_count_add(notifications);
J-Alvesaa79c012021-07-09 14:29:45 +0100700}
701
702/**
703 * Get Global notifications and per CPU only of the current VCPU.
704 */
705ffa_notifications_bitmap_t vm_notifications_get_pending_and_clear(
706 struct vm_locked vm_locked, bool is_from_vm,
707 ffa_vcpu_index_t cur_vcpu_id)
708{
709 ffa_notifications_bitmap_t to_ret = 0;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100710 ffa_notifications_bitmap_t pending_and_info_get_retrieved;
J-Alvesaa79c012021-07-09 14:29:45 +0100711
712 CHECK(vm_locked.vm != NULL);
713 struct notifications *to_get =
714 vm_get_notifications(vm_locked, is_from_vm);
715 CHECK(cur_vcpu_id < MAX_CPUS);
716
717 to_ret |= to_get->global.pending;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100718
719 /* Update count of currently pending notifications in the system. */
720 vm_notifications_pending_count_sub(to_get->global.pending);
721
722 /*
723 * If notifications receiver is getting have been retrieved by the
724 * receiver scheduler, decrement those from respective count.
725 */
726 pending_and_info_get_retrieved =
727 to_get->global.pending & to_get->global.info_get_retrieved;
728
729 if (pending_and_info_get_retrieved != 0) {
730 vm_notifications_info_get_retrieved_count_sub(
731 pending_and_info_get_retrieved);
732 }
733
J-Alvesaa79c012021-07-09 14:29:45 +0100734 to_get->global.pending = 0U;
735 to_get->global.info_get_retrieved = 0U;
736
737 to_ret |= to_get->per_vcpu[cur_vcpu_id].pending;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100738
739 /*
740 * Update counts of notifications, this time for per-vCPU notifications.
741 */
742 vm_notifications_pending_count_sub(
743 to_get->per_vcpu[cur_vcpu_id].pending);
744
745 pending_and_info_get_retrieved =
746 to_get->per_vcpu[cur_vcpu_id].pending &
747 to_get->per_vcpu[cur_vcpu_id].info_get_retrieved;
748
749 if (pending_and_info_get_retrieved != 0) {
750 vm_notifications_info_get_retrieved_count_sub(
751 pending_and_info_get_retrieved);
752 }
753
J-Alvesaa79c012021-07-09 14:29:45 +0100754 to_get->per_vcpu[cur_vcpu_id].pending = 0U;
755 to_get->per_vcpu[cur_vcpu_id].info_get_retrieved = 0U;
756
757 return to_ret;
758}
J-Alvesc8e8a222021-06-08 17:33:52 +0100759
760/**
761 * Get pending notification's information to return to the receiver scheduler.
762 */
763void vm_notifications_info_get_pending(
764 struct vm_locked vm_locked, bool is_from_vm, uint16_t *ids,
765 uint32_t *ids_count, uint32_t *lists_sizes, uint32_t *lists_count,
766 const uint32_t ids_max_count,
767 enum notifications_info_get_state *info_get_state)
768{
769 ffa_notifications_bitmap_t pending_not_retrieved;
770
771 CHECK(vm_locked.vm != NULL);
772 struct notifications *notifications =
773 vm_get_notifications(vm_locked, is_from_vm);
774
775 if (*info_get_state == FULL) {
776 return;
777 }
778
779 CHECK(*ids_count <= ids_max_count);
780 CHECK(*lists_count <= ids_max_count);
781
782 pending_not_retrieved = notifications->global.pending &
783 ~notifications->global.info_get_retrieved;
784
785 if (pending_not_retrieved != 0U && *info_get_state == INIT) {
786 /*
787 * If action is to INIT, means that no list has been
788 * created for the given VM ID, which also means that global
789 * notifications are not represented in the list yet.
790 */
791 if (*ids_count == ids_max_count) {
792 *info_get_state = FULL;
793 return;
794 }
795
796 *info_get_state = INSERTING;
797
798 (*lists_count)++;
799 ids[*ids_count] = vm_locked.vm->id;
800 ++(*ids_count);
801 }
802
803 notifications->global.info_get_retrieved |= pending_not_retrieved;
804
J-Alvesfe23ebe2021-10-13 16:07:07 +0100805 vm_notifications_info_get_retrieved_count_add(pending_not_retrieved);
806
J-Alvesc8e8a222021-06-08 17:33:52 +0100807 for (ffa_vcpu_count_t i = 0; i < vm_locked.vm->vcpu_count; i++) {
808 /*
809 * Include VCPU ID of per-VCPU notifications.
810 */
811 pending_not_retrieved =
812 notifications->per_vcpu[i].pending &
813 ~notifications->per_vcpu[i].info_get_retrieved;
814
815 if (pending_not_retrieved == 0U) {
816 continue;
817 }
818
819 switch (*info_get_state) {
820 case INIT:
821 case STARTING_NEW:
822 /*
823 * At this iteration two ids need to be added: the VM ID
824 * and VCPU ID. If there is not space, change state and
825 * terminate function.
826 */
827 if (ids_max_count - *ids_count < 2) {
828 *info_get_state = FULL;
829 return;
830 }
831
832 ids[*ids_count] = vm_locked.vm->id;
833 ++(*ids_count);
834
835 /* Insert VCPU ID */
836 ids[*ids_count] = i;
837 ++(*ids_count);
838
839 ++lists_sizes[*lists_count];
840 ++(*lists_count);
841
842 *info_get_state = INSERTING;
843 break;
844 case INSERTING:
845 if (*ids_count == ids_max_count) {
846 *info_get_state = FULL;
847 return;
848 }
849
850 /* Insert VCPU ID */
851 ids[*ids_count] = i;
852 (*ids_count)++;
853
854 /* Increment respective list size */
855 ++lists_sizes[*lists_count - 1];
856
857 if (lists_sizes[*lists_count - 1] == 3) {
858 *info_get_state = STARTING_NEW;
859 }
860 break;
861 default:
862 panic("Notification info get action error!!\n");
863 }
864
865 notifications->per_vcpu[i].info_get_retrieved |=
866 pending_not_retrieved;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100867
868 vm_notifications_info_get_retrieved_count_add(
869 pending_not_retrieved);
J-Alvesc8e8a222021-06-08 17:33:52 +0100870 }
871}
872
873/**
874 * Gets all info from VM's pending notifications.
875 * Returns true if the list is full, and there is more pending.
876 */
877bool vm_notifications_info_get(struct vm_locked vm_locked, uint16_t *ids,
878 uint32_t *ids_count, uint32_t *lists_sizes,
879 uint32_t *lists_count,
880 const uint32_t ids_max_count)
881{
882 enum notifications_info_get_state current_state = INIT;
883
884 /* Get info of pending notifications from SPs */
885 vm_notifications_info_get_pending(vm_locked, false, ids, ids_count,
886 lists_sizes, lists_count,
887 ids_max_count, &current_state);
888
889 /* Get info of pending notifications from VMs */
890 vm_notifications_info_get_pending(vm_locked, true, ids, ids_count,
891 lists_sizes, lists_count,
892 ids_max_count, &current_state);
893
894 /*
895 * State transitions to FULL when trying to insert a new ID in the
896 * list and there is not more space. This means there are notifications
897 * pending, whose info is not retrieved.
898 */
899 return current_state == FULL;
900}