blob: 2b4a3a77611440b4f9a617060d078b0b07dceb52 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/vm.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010010
Andrew Scull18c78fc2018-08-20 12:57:41 +010011#include "hf/api.h"
Daniel Boulbya2f8c662021-11-26 17:52:53 +000012#include "hf/assert.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010013#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010014#include "hf/cpu.h"
J-Alves4ef6e842021-03-18 12:47:01 +000015#include "hf/dlog.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010016#include "hf/ffa.h"
Andrew Scull3c257452019-11-26 13:32:50 +000017#include "hf/layout.h"
18#include "hf/plat/iommu.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010019#include "hf/std.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010020
Andrew Scull19503262018-09-20 14:48:39 +010021#include "vmapi/hf/call.h"
22
23static struct vm vms[MAX_VMS];
Olivier Deprez96a2a262020-06-11 17:21:38 +020024static struct vm other_world;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010025static ffa_vm_count_t vm_count;
Andrew Scull19503262018-09-20 14:48:39 +010026
J-Alvesfe23ebe2021-10-13 16:07:07 +010027/**
28 * Counters on the status of notifications in the system. It helps to improve
29 * the information retrieved by the receiver scheduler.
30 */
31static struct {
32 /** Counts notifications pending. */
33 uint32_t pending_count;
34 /**
35 * Counts notifications pending, that have been retrieved by the
36 * receiver scheduler.
37 */
38 uint32_t info_get_retrieved_count;
39 struct spinlock lock;
40} all_notifications_state;
41
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -080042static bool vm_init_mm(struct vm *vm, struct mpool *ppool)
43{
44 if (vm->el0_partition) {
45 return mm_ptable_init(&vm->ptable, vm->id, MM_FLAG_STAGE1,
46 ppool);
47 }
48 return mm_vm_init(&vm->ptable, vm->id, ppool);
49}
50
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010051struct vm *vm_init(ffa_vm_id_t id, ffa_vcpu_count_t vcpu_count,
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080052 struct mpool *ppool, bool el0_partition)
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010053{
Wedson Almeida Filho87009642018-07-02 10:20:07 +010054 uint32_t i;
Andrew Scull19503262018-09-20 14:48:39 +010055 struct vm *vm;
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -070056 size_t vcpu_ppool_entries = (align_up(sizeof(struct vcpu) * vcpu_count,
57 MM_PPOOL_ENTRY_SIZE) /
58 MM_PPOOL_ENTRY_SIZE);
Andrew Scull19503262018-09-20 14:48:39 +010059
Olivier Deprez96a2a262020-06-11 17:21:38 +020060 if (id == HF_OTHER_WORLD_ID) {
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080061 CHECK(el0_partition == false);
Olivier Deprez96a2a262020-06-11 17:21:38 +020062 vm = &other_world;
Andrew Walbran9daa57e2019-09-27 13:33:20 +010063 } else {
64 uint16_t vm_index = id - HF_VM_ID_OFFSET;
Andrew Scull19503262018-09-20 14:48:39 +010065
Andrew Walbran9daa57e2019-09-27 13:33:20 +010066 CHECK(id >= HF_VM_ID_OFFSET);
67 CHECK(vm_index < ARRAY_SIZE(vms));
68 vm = &vms[vm_index];
69 }
Wedson Almeida Filho87009642018-07-02 10:20:07 +010070
Andrew Scull2b5fbad2019-04-05 13:55:56 +010071 memset_s(vm, sizeof(*vm), 0, sizeof(*vm));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010072
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000073 list_init(&vm->mailbox.waiter_list);
74 list_init(&vm->mailbox.ready_list);
75 sl_init(&vm->lock);
76
Andrew Walbran9daa57e2019-09-27 13:33:20 +010077 vm->id = id;
Wedson Almeida Filho87009642018-07-02 10:20:07 +010078 vm->vcpu_count = vcpu_count;
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -070079
80 vm->vcpus = (struct vcpu *)mpool_alloc_contiguous(
81 ppool, vcpu_ppool_entries, 1);
82 CHECK(vm->vcpus != NULL);
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -070083
Andrew Sculld6ee1102019-04-05 22:12:42 +010084 vm->mailbox.state = MAILBOX_STATE_EMPTY;
Andrew Scull9726c252019-01-23 13:44:19 +000085 atomic_init(&vm->aborting, false);
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080086 vm->el0_partition = el0_partition;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010087
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -080088 if (!vm_init_mm(vm, ppool)) {
Andrew Walbran9daa57e2019-09-27 13:33:20 +010089 return NULL;
Wedson Almeida Filho03306112018-11-26 00:08:03 +000090 }
91
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000092 /* Initialise waiter entries. */
93 for (i = 0; i < MAX_VMS; i++) {
Wedson Almeida Filhob790f652019-01-22 23:41:56 +000094 vm->wait_entries[i].waiting_vm = vm;
95 list_init(&vm->wait_entries[i].wait_links);
96 list_init(&vm->wait_entries[i].ready_links);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000097 }
98
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000099 /* Do basic initialization of vCPUs. */
Andrew Scull7364a8e2018-07-19 15:39:29 +0100100 for (i = 0; i < vcpu_count; i++) {
Andrew Walbrane1310df2019-04-29 17:28:28 +0100101 vcpu_init(vm_get_vcpu(vm, i), vm);
Andrew Scull7364a8e2018-07-19 15:39:29 +0100102 }
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100103
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700104 vm_notifications_init(vm, vcpu_count, ppool);
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100105 return vm;
106}
107
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100108bool vm_init_next(ffa_vcpu_count_t vcpu_count, struct mpool *ppool,
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800109 struct vm **new_vm, bool el0_partition)
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100110{
111 if (vm_count >= MAX_VMS) {
112 return false;
113 }
114
115 /* Generate IDs based on an offset, as low IDs e.g., 0, are reserved */
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800116 *new_vm = vm_init(vm_count + HF_VM_ID_OFFSET, vcpu_count, ppool,
117 el0_partition);
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100118 if (*new_vm == NULL) {
119 return false;
120 }
Andrew Scull19503262018-09-20 14:48:39 +0100121 ++vm_count;
Andrew Scull19503262018-09-20 14:48:39 +0100122
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000123 return true;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100124}
125
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100126ffa_vm_count_t vm_get_count(void)
Andrew Scull19503262018-09-20 14:48:39 +0100127{
128 return vm_count;
129}
130
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100131/**
132 * Returns a pointer to the VM with the corresponding id.
133 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100134struct vm *vm_find(ffa_vm_id_t id)
Andrew Scull19503262018-09-20 14:48:39 +0100135{
David Brazdilbc501192019-09-27 13:20:56 +0100136 uint16_t index;
Fuad Tabba494376e2019-08-05 12:35:10 +0100137
Olivier Deprez96a2a262020-06-11 17:21:38 +0200138 if (id == HF_OTHER_WORLD_ID) {
139 if (other_world.id == HF_OTHER_WORLD_ID) {
140 return &other_world;
141 }
Andrew Scull19503262018-09-20 14:48:39 +0100142 return NULL;
143 }
144
Olivier Deprez96a2a262020-06-11 17:21:38 +0200145 /* Check that this is not a reserved ID. */
146 if (id < HF_VM_ID_OFFSET) {
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100147 return NULL;
148 }
149
David Brazdilbc501192019-09-27 13:20:56 +0100150 index = id - HF_VM_ID_OFFSET;
151
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100152 return vm_find_index(index);
153}
154
155/**
J-Alves46ee0682021-07-26 15:17:53 +0100156 * Returns a locked instance of the VM with the corresponding id.
157 */
158struct vm_locked vm_find_locked(ffa_vm_id_t id)
159{
160 struct vm *vm = vm_find(id);
161
162 if (vm != NULL) {
163 return vm_lock(vm);
164 }
165
166 return (struct vm_locked){.vm = NULL};
167}
168
169/**
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100170 * Returns a pointer to the VM at the specified index.
171 */
172struct vm *vm_find_index(uint16_t index)
173{
David Brazdilbc501192019-09-27 13:20:56 +0100174 /* Ensure the VM is initialized. */
175 if (index >= vm_count) {
176 return NULL;
177 }
178
179 return &vms[index];
Andrew Scull19503262018-09-20 14:48:39 +0100180}
181
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000182/**
Fuad Tabbaed294af2019-12-20 10:43:01 +0000183 * Locks the given VM and updates `locked` to hold the newly locked VM.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000184 */
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100185struct vm_locked vm_lock(struct vm *vm)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000186{
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100187 struct vm_locked locked = {
188 .vm = vm,
189 };
190
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000191 sl_lock(&vm->lock);
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100192
193 return locked;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000194}
195
196/**
Jose Marinho75509b42019-04-09 09:34:59 +0100197 * Locks two VMs ensuring that the locking order is according to the locks'
198 * addresses.
199 */
200struct two_vm_locked vm_lock_both(struct vm *vm1, struct vm *vm2)
201{
202 struct two_vm_locked dual_lock;
203
204 sl_lock_both(&vm1->lock, &vm2->lock);
205 dual_lock.vm1.vm = vm1;
206 dual_lock.vm2.vm = vm2;
207
208 return dual_lock;
209}
210
211/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000212 * Unlocks a VM previously locked with vm_lock, and updates `locked` to reflect
213 * the fact that the VM is no longer locked.
214 */
215void vm_unlock(struct vm_locked *locked)
216{
217 sl_unlock(&locked->vm->lock);
218 locked->vm = NULL;
219}
Andrew Walbrane1310df2019-04-29 17:28:28 +0100220
221/**
222 * Get the vCPU with the given index from the given VM.
223 * This assumes the index is valid, i.e. less than vm->vcpu_count.
224 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100225struct vcpu *vm_get_vcpu(struct vm *vm, ffa_vcpu_index_t vcpu_index)
Andrew Walbrane1310df2019-04-29 17:28:28 +0100226{
Andrew Scull877ae4b2019-07-02 12:52:33 +0100227 CHECK(vcpu_index < vm->vcpu_count);
Andrew Walbrane1310df2019-04-29 17:28:28 +0100228 return &vm->vcpus[vcpu_index];
229}
Andrew Scull3c257452019-11-26 13:32:50 +0000230
231/**
Andrew Walbranaad8f982019-12-04 10:56:39 +0000232 * Gets `vm`'s wait entry for waiting on the `for_vm`.
233 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100234struct wait_entry *vm_get_wait_entry(struct vm *vm, ffa_vm_id_t for_vm)
Andrew Walbranaad8f982019-12-04 10:56:39 +0000235{
236 uint16_t index;
237
238 CHECK(for_vm >= HF_VM_ID_OFFSET);
239 index = for_vm - HF_VM_ID_OFFSET;
240 CHECK(index < MAX_VMS);
241
242 return &vm->wait_entries[index];
243}
244
245/**
J-Alves122f1a12022-12-12 15:55:42 +0000246 * Checks whether the given `to` VM's mailbox is currently busy.
247 */
248bool vm_is_mailbox_busy(struct vm_locked to)
249{
250 return to.vm->mailbox.state != MAILBOX_STATE_EMPTY ||
251 to.vm->mailbox.recv == NULL;
252}
253
254/**
Andrew Walbranaad8f982019-12-04 10:56:39 +0000255 * Gets the ID of the VM which the given VM's wait entry is for.
256 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100257ffa_vm_id_t vm_id_for_wait_entry(struct vm *vm, struct wait_entry *entry)
Andrew Walbranaad8f982019-12-04 10:56:39 +0000258{
259 uint16_t index = entry - vm->wait_entries;
260
261 return index + HF_VM_ID_OFFSET;
262}
263
264/**
Andrew Walbran45633dd2020-10-07 17:59:54 +0100265 * Return whether the given VM ID represents an entity in the current world:
266 * i.e. the hypervisor or a normal world VM when running in the normal world, or
267 * the SPM or an SP when running in the secure world.
268 */
269bool vm_id_is_current_world(ffa_vm_id_t vm_id)
270{
271 return (vm_id & HF_VM_ID_WORLD_MASK) !=
272 (HF_OTHER_WORLD_ID & HF_VM_ID_WORLD_MASK);
273}
274
275/**
Andrew Scull3c257452019-11-26 13:32:50 +0000276 * Map a range of addresses to the VM in both the MMU and the IOMMU.
277 *
278 * mm_vm_defrag should always be called after a series of page table updates,
279 * whether they succeed or fail. This is because on failure extra page table
280 * entries may have been allocated and then not used, while on success it may be
281 * possible to compact the page table by merging several entries into a block.
282 *
283 * Returns true on success, or false if the update failed and no changes were
284 * made.
285 *
286 */
287bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
288 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
289{
290 if (!vm_identity_prepare(vm_locked, begin, end, mode, ppool)) {
291 return false;
292 }
293
294 vm_identity_commit(vm_locked, begin, end, mode, ppool, ipa);
295
296 return true;
297}
298
299/**
300 * Prepares the given VM for the given address mapping such that it will be able
301 * to commit the change without failure.
302 *
303 * In particular, multiple calls to this function will result in the
304 * corresponding calls to commit the changes to succeed.
305 *
306 * Returns true on success, or false if the update failed and no changes were
307 * made.
308 */
309bool vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
310 uint32_t mode, struct mpool *ppool)
311{
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -0800312 if (vm_locked.vm->el0_partition) {
313 return mm_identity_prepare(&vm_locked.vm->ptable, begin, end,
314 mode, ppool);
315 }
Andrew Scull3c257452019-11-26 13:32:50 +0000316 return mm_vm_identity_prepare(&vm_locked.vm->ptable, begin, end, mode,
317 ppool);
318}
319
320/**
321 * Commits the given address mapping to the VM assuming the operation cannot
322 * fail. `vm_identity_prepare` must used correctly before this to ensure
323 * this condition.
324 */
325void vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
326 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
327{
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -0800328 if (vm_locked.vm->el0_partition) {
329 mm_identity_commit(&vm_locked.vm->ptable, begin, end, mode,
330 ppool);
331 if (ipa != NULL) {
332 /*
333 * EL0 partitions are modeled as lightweight VM's, to
334 * promote code reuse. The below statement returns the
335 * mapped PA as an IPA, however, for an EL0 partition,
336 * this is really a VA.
337 */
338 *ipa = ipa_from_pa(begin);
339 }
340 } else {
341 mm_vm_identity_commit(&vm_locked.vm->ptable, begin, end, mode,
342 ppool, ipa);
343 }
Andrew Scull3c257452019-11-26 13:32:50 +0000344 plat_iommu_identity_map(vm_locked, begin, end, mode);
345}
346
347/**
348 * Unmap a range of addresses from the VM.
349 *
350 * Returns true on success, or false if the update failed and no changes were
351 * made.
352 */
353bool vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
354 struct mpool *ppool)
355{
356 uint32_t mode = MM_MODE_UNMAPPED_MASK;
357
358 return vm_identity_map(vm_locked, begin, end, mode, ppool, NULL);
359}
360
361/**
Raghu Krishnamurthy7ad3d142021-03-28 00:47:35 -0700362 * Defrag page tables for an EL0 partition or for a VM.
363 */
364void vm_ptable_defrag(struct vm_locked vm_locked, struct mpool *ppool)
365{
366 if (vm_locked.vm->el0_partition) {
367 mm_stage1_defrag(&vm_locked.vm->ptable, ppool);
368 } else {
369 mm_vm_defrag(&vm_locked.vm->ptable, ppool);
370 }
371}
372
373/**
Andrew Scull3c257452019-11-26 13:32:50 +0000374 * Unmaps the hypervisor pages from the given page table.
375 */
376bool vm_unmap_hypervisor(struct vm_locked vm_locked, struct mpool *ppool)
377{
378 /* TODO: If we add pages dynamically, they must be included here too. */
379 return vm_unmap(vm_locked, layout_text_begin(), layout_text_end(),
380 ppool) &&
381 vm_unmap(vm_locked, layout_rodata_begin(), layout_rodata_end(),
382 ppool) &&
383 vm_unmap(vm_locked, layout_data_begin(), layout_data_end(),
Maksims Svecovs134b8f92022-03-04 15:14:09 +0000384 ppool) &&
385 vm_unmap(vm_locked, layout_stacks_begin(), layout_stacks_end(),
Andrew Scull3c257452019-11-26 13:32:50 +0000386 ppool);
387}
J-Alvesb37fd082020-10-22 12:29:21 +0100388
389/**
Raghu Krishnamurthyea195fa2021-02-12 23:29:00 -0800390 * Gets the mode of the given range of ipa or va if they are mapped with the
391 * same mode.
392 *
393 * Returns true if the range is mapped with the same mode and false otherwise.
394 * The wrapper calls the appropriate mm function depending on if the partition
395 * is a vm or a el0 partition.
396 */
397bool vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin, ipaddr_t end,
398 uint32_t *mode)
399{
400 if (vm_locked.vm->el0_partition) {
401 return mm_get_mode(&vm_locked.vm->ptable,
402 va_from_pa(pa_from_ipa(begin)),
403 va_from_pa(pa_from_ipa(end)), mode);
404 }
405 return mm_vm_get_mode(&vm_locked.vm->ptable, begin, end, mode);
406}
J-Alvesa0f317d2021-06-09 13:31:59 +0100407
J-Alves66652252022-07-06 09:49:51 +0100408bool vm_mailbox_state_busy(struct vm_locked vm_locked)
409{
410 return vm_locked.vm->mailbox.state != MAILBOX_STATE_EMPTY ||
411 vm_locked.vm->mailbox.recv == NULL;
412}
413
J-Alves7461ef22021-10-18 17:21:33 +0100414static struct notifications *vm_get_notifications(struct vm_locked vm_locked,
415 bool is_from_vm)
416{
417 return is_from_vm ? &vm_locked.vm->notifications.from_vm
418 : &vm_locked.vm->notifications.from_sp;
419}
420
J-Alvesa0f317d2021-06-09 13:31:59 +0100421/*
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700422 * Dynamically allocate per_vcpu_notifications structure for a given VM.
423 */
424static void vm_notifications_init_per_vcpu_notifications(
425 struct vm *vm, ffa_vcpu_count_t vcpu_count, struct mpool *ppool)
426{
427 size_t notif_ppool_entries =
428 (align_up(sizeof(struct notifications_state) * vcpu_count,
429 MM_PPOOL_ENTRY_SIZE) /
430 MM_PPOOL_ENTRY_SIZE);
431
432 /*
433 * Allow for function to be called on already initialized VMs but those
434 * that require notification structure to be cleared.
435 */
436 if (vm->notifications.from_sp.per_vcpu == NULL) {
437 assert(vm->notifications.from_vm.per_vcpu == NULL);
438 assert(vcpu_count != 0);
439 CHECK(ppool != NULL);
440 vm->notifications.from_sp.per_vcpu =
441 (struct notifications_state *)mpool_alloc_contiguous(
442 ppool, notif_ppool_entries, 1);
443 CHECK(vm->notifications.from_sp.per_vcpu != NULL);
444
445 vm->notifications.from_vm.per_vcpu =
446 (struct notifications_state *)mpool_alloc_contiguous(
447 ppool, notif_ppool_entries, 1);
448 CHECK(vm->notifications.from_vm.per_vcpu != NULL);
449 } else {
450 assert(vm->notifications.from_vm.per_vcpu != NULL);
451 }
452
453 memset_s(vm->notifications.from_sp.per_vcpu,
454 sizeof(*(vm->notifications.from_sp.per_vcpu)) * vcpu_count, 0,
455 sizeof(*(vm->notifications.from_sp.per_vcpu)) * vcpu_count);
456 memset_s(vm->notifications.from_vm.per_vcpu,
457 sizeof(*(vm->notifications.from_vm.per_vcpu)) * vcpu_count, 0,
458 sizeof(*(vm->notifications.from_vm.per_vcpu)) * vcpu_count);
459}
460
461/*
J-Alvesa0f317d2021-06-09 13:31:59 +0100462 * Initializes the notifications structure.
463 */
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700464static void vm_notifications_init_bindings(struct notifications *notifications)
J-Alvesa0f317d2021-06-09 13:31:59 +0100465{
466 for (uint32_t i = 0U; i < MAX_FFA_NOTIFICATIONS; i++) {
467 notifications->bindings_sender_id[i] = HF_INVALID_VM_ID;
468 }
469}
470
Raghu Krishnamurthyf5fec202022-09-30 07:25:10 -0700471/*
472 * Initialize notification related structures for a VM.
473 */
474void vm_notifications_init(struct vm *vm, ffa_vcpu_count_t vcpu_count,
475 struct mpool *ppool)
476{
477 vm_notifications_init_per_vcpu_notifications(vm, vcpu_count, ppool);
478
479 /* Basic initialization of the notifications structure. */
480 vm_notifications_init_bindings(&vm->notifications.from_sp);
481 vm_notifications_init_bindings(&vm->notifications.from_vm);
482}
483
J-Alvesa0f317d2021-06-09 13:31:59 +0100484/**
485 * Checks if there are pending notifications.
486 */
487bool vm_are_notifications_pending(struct vm_locked vm_locked, bool from_vm,
488 ffa_notifications_bitmap_t notifications)
489{
490 struct notifications *to_check;
491
492 CHECK(vm_locked.vm != NULL);
493
J-Alves7461ef22021-10-18 17:21:33 +0100494 to_check = vm_get_notifications(vm_locked, from_vm);
J-Alvesa0f317d2021-06-09 13:31:59 +0100495
496 /* Check if there are pending per vcpu notifications */
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700497 for (uint32_t i = 0U; i < vm_locked.vm->vcpu_count; i++) {
J-Alvesa0f317d2021-06-09 13:31:59 +0100498 if ((to_check->per_vcpu[i].pending & notifications) != 0U) {
499 return true;
500 }
501 }
502
503 /* Check if there are global pending notifications */
504 return (to_check->global.pending & notifications) != 0U;
505}
J-Alvesc003a7a2021-03-18 13:06:53 +0000506
J-Alves7461ef22021-10-18 17:21:33 +0100507/**
508 * Checks if there are pending global notifications, either from SPs or from
509 * VMs.
510 */
511bool vm_are_global_notifications_pending(struct vm_locked vm_locked)
512{
513 return vm_get_notifications(vm_locked, true)->global.pending != 0ULL ||
J-Alves52578f82022-03-25 12:30:47 +0000514 vm_get_notifications(vm_locked, false)->global.pending != 0ULL ||
515 vm_locked.vm->notifications.framework.pending != 0ULL;
J-Alves7461ef22021-10-18 17:21:33 +0100516}
517
518/**
519 * Checks if there are pending per-vCPU notifications, in a specific vCPU either
520 * from SPs or from VMs.
521 */
522bool vm_are_per_vcpu_notifications_pending(struct vm_locked vm_locked,
523 ffa_vcpu_index_t vcpu_id)
524{
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700525 CHECK(vcpu_id < vm_locked.vm->vcpu_count);
J-Alves7461ef22021-10-18 17:21:33 +0100526
527 return vm_get_notifications(vm_locked, true)
528 ->per_vcpu[vcpu_id]
529 .pending != 0ULL ||
530 vm_get_notifications(vm_locked, false)
531 ->per_vcpu[vcpu_id]
532 .pending != 0ULL;
533}
534
J-Alves09ff9d82021-11-02 11:55:20 +0000535bool vm_are_notifications_enabled(struct vm *vm)
J-Alvesc003a7a2021-03-18 13:06:53 +0000536{
J-Alves09ff9d82021-11-02 11:55:20 +0000537 return vm->notifications.enabled == true;
538}
539
540bool vm_locked_are_notifications_enabled(struct vm_locked vm_locked)
541{
542 return vm_are_notifications_enabled(vm_locked.vm);
J-Alvesc003a7a2021-03-18 13:06:53 +0000543}
544
545static bool vm_is_notification_bit_set(ffa_notifications_bitmap_t notifications,
546 uint32_t i)
547{
548 return (notifications & FFA_NOTIFICATION_MASK(i)) != 0U;
549}
550
J-Alvesfe23ebe2021-10-13 16:07:07 +0100551static void vm_notifications_global_state_count_update(
552 ffa_notifications_bitmap_t bitmap, uint32_t *counter, int inc)
553{
554 /*
555 * Helper to increment counters from global notifications
556 * state. Count update by increments or decrements of 1 or -1,
557 * respectively.
558 */
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000559 assert(inc == 1 || inc == -1);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100560
561 sl_lock(&all_notifications_state.lock);
562
563 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
564 if (vm_is_notification_bit_set(bitmap, i)) {
565 CHECK((inc > 0 && *counter < UINT32_MAX) ||
566 (inc < 0 && *counter > 0));
567 *counter += inc;
568 }
569 }
570
571 sl_unlock(&all_notifications_state.lock);
572}
573
574/**
575 * Helper function to increment the pending notifications based on a bitmap
576 * passed as argument.
577 * Function to be used at setting notifications for a given VM.
578 */
579static void vm_notifications_pending_count_add(
580 ffa_notifications_bitmap_t to_add)
581{
582 vm_notifications_global_state_count_update(
583 to_add, &all_notifications_state.pending_count, 1);
584}
585
586/**
587 * Helper function to decrement the pending notifications count.
588 * Function to be used when getting the receiver's pending notifications.
589 */
590static void vm_notifications_pending_count_sub(
591 ffa_notifications_bitmap_t to_sub)
592{
593 vm_notifications_global_state_count_update(
594 to_sub, &all_notifications_state.pending_count, -1);
595}
596
597/**
598 * Helper function to count the notifications whose information has been
599 * retrieved by the scheduler of the system, and are still pending.
600 */
601static void vm_notifications_info_get_retrieved_count_add(
602 ffa_notifications_bitmap_t to_add)
603{
604 vm_notifications_global_state_count_update(
605 to_add, &all_notifications_state.info_get_retrieved_count, 1);
606}
607
608/**
609 * Helper function to subtract the notifications that the receiver is getting
610 * and whose information has been retrieved by the receiver scheduler.
611 */
612static void vm_notifications_info_get_retrieved_count_sub(
613 ffa_notifications_bitmap_t to_sub)
614{
615 vm_notifications_global_state_count_update(
616 to_sub, &all_notifications_state.info_get_retrieved_count, -1);
617}
618
619/**
620 * Helper function to determine if there are notifications pending whose info
621 * hasn't been retrieved by the receiver scheduler.
622 */
623bool vm_notifications_pending_not_retrieved_by_scheduler(void)
624{
625 bool ret;
626
627 sl_lock(&all_notifications_state.lock);
628 ret = all_notifications_state.pending_count >
629 all_notifications_state.info_get_retrieved_count;
630 sl_unlock(&all_notifications_state.lock);
631
632 return ret;
633}
634
635bool vm_is_notifications_pending_count_zero(void)
636{
637 bool ret;
638
639 sl_lock(&all_notifications_state.lock);
640 ret = all_notifications_state.pending_count == 0;
641 sl_unlock(&all_notifications_state.lock);
642
643 return ret;
644}
645
J-Alvesc003a7a2021-03-18 13:06:53 +0000646/**
647 * Checks that all provided notifications are bound to the specified sender, and
648 * are per VCPU or global, as specified.
649 */
650bool vm_notifications_validate_binding(struct vm_locked vm_locked,
651 bool is_from_vm, ffa_vm_id_t sender_id,
652 ffa_notifications_bitmap_t notifications,
653 bool is_per_vcpu)
654{
655 return vm_notifications_validate_bound_sender(
656 vm_locked, is_from_vm, sender_id, notifications) &&
657 vm_notifications_validate_per_vcpu(vm_locked, is_from_vm,
658 is_per_vcpu, notifications);
659}
660
661/**
662 * Update binds information in notification structure for the specified
663 * notifications.
664 */
665void vm_notifications_update_bindings(struct vm_locked vm_locked,
666 bool is_from_vm, ffa_vm_id_t sender_id,
667 ffa_notifications_bitmap_t notifications,
668 bool is_per_vcpu)
669{
670 CHECK(vm_locked.vm != NULL);
671 struct notifications *to_update =
672 vm_get_notifications(vm_locked, is_from_vm);
673
674 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
675 if (vm_is_notification_bit_set(notifications, i)) {
676 to_update->bindings_sender_id[i] = sender_id;
677 }
678 }
679
680 /*
681 * Set notifications if they are per VCPU, else clear them as they are
682 * global.
683 */
684 if (is_per_vcpu) {
685 to_update->bindings_per_vcpu |= notifications;
686 } else {
687 to_update->bindings_per_vcpu &= ~notifications;
688 }
689}
690
691bool vm_notifications_validate_bound_sender(
692 struct vm_locked vm_locked, bool is_from_vm, ffa_vm_id_t sender_id,
693 ffa_notifications_bitmap_t notifications)
694{
695 CHECK(vm_locked.vm != NULL);
696 struct notifications *to_check =
697 vm_get_notifications(vm_locked, is_from_vm);
698
699 for (uint32_t i = 0; i < MAX_FFA_NOTIFICATIONS; i++) {
700 if (vm_is_notification_bit_set(notifications, i) &&
701 to_check->bindings_sender_id[i] != sender_id) {
702 return false;
703 }
704 }
705
706 return true;
707}
708
709bool vm_notifications_validate_per_vcpu(struct vm_locked vm_locked,
710 bool is_from_vm, bool is_per_vcpu,
711 ffa_notifications_bitmap_t notif)
712{
713 CHECK(vm_locked.vm != NULL);
714 struct notifications *to_check =
715 vm_get_notifications(vm_locked, is_from_vm);
716
717 return is_per_vcpu ? (~to_check->bindings_per_vcpu & notif) == 0U
718 : (to_check->bindings_per_vcpu & notif) == 0U;
719}
J-Alvesaa79c012021-07-09 14:29:45 +0100720
J-Alves14163a72022-03-25 14:01:34 +0000721static void vm_notifications_state_set(struct notifications_state *state,
722 ffa_notifications_bitmap_t notifications)
723{
724 state->pending |= notifications;
725 vm_notifications_pending_count_add(notifications);
726}
727
J-Alves5a16c962022-03-25 12:32:51 +0000728void vm_notifications_partition_set_pending(
729 struct vm_locked vm_locked, bool is_from_vm,
730 ffa_notifications_bitmap_t notifications, ffa_vcpu_index_t vcpu_id,
731 bool is_per_vcpu)
J-Alvesaa79c012021-07-09 14:29:45 +0100732{
J-Alves14163a72022-03-25 14:01:34 +0000733 struct notifications *to_set;
734 struct notifications_state *state;
735
J-Alvesaa79c012021-07-09 14:29:45 +0100736 CHECK(vm_locked.vm != NULL);
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700737 CHECK(vcpu_id < vm_locked.vm->vcpu_count);
J-Alvesaa79c012021-07-09 14:29:45 +0100738
J-Alves14163a72022-03-25 14:01:34 +0000739 to_set = vm_get_notifications(vm_locked, is_from_vm);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100740
J-Alves14163a72022-03-25 14:01:34 +0000741 state = is_per_vcpu ? &to_set->per_vcpu[vcpu_id] : &to_set->global;
742
743 vm_notifications_state_set(state, notifications);
744}
745
746/**
747 * Set pending framework notifications.
748 */
749void vm_notifications_framework_set_pending(
750 struct vm_locked vm_locked, ffa_notifications_bitmap_t notifications)
751{
752 CHECK(vm_locked.vm != NULL);
Federico Recanatie73d2832022-04-20 11:10:52 +0200753 assert(is_ffa_spm_buffer_full_notification(notifications) ||
754 is_ffa_hyp_buffer_full_notification(notifications));
J-Alves14163a72022-03-25 14:01:34 +0000755 vm_notifications_state_set(&vm_locked.vm->notifications.framework,
756 notifications);
J-Alvesaa79c012021-07-09 14:29:45 +0100757}
758
J-Alves5136dda2022-03-25 12:26:38 +0000759static ffa_notifications_bitmap_t vm_notifications_state_get_pending(
760 struct notifications_state *state)
J-Alvesaa79c012021-07-09 14:29:45 +0100761{
J-Alves5136dda2022-03-25 12:26:38 +0000762 ffa_notifications_bitmap_t to_ret;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100763 ffa_notifications_bitmap_t pending_and_info_get_retrieved;
J-Alvesaa79c012021-07-09 14:29:45 +0100764
J-Alves5136dda2022-03-25 12:26:38 +0000765 assert(state != NULL);
J-Alvesaa79c012021-07-09 14:29:45 +0100766
J-Alves5136dda2022-03-25 12:26:38 +0000767 to_ret = state->pending;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100768
769 /* Update count of currently pending notifications in the system. */
J-Alves5136dda2022-03-25 12:26:38 +0000770 vm_notifications_pending_count_sub(state->pending);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100771
772 /*
773 * If notifications receiver is getting have been retrieved by the
774 * receiver scheduler, decrement those from respective count.
775 */
776 pending_and_info_get_retrieved =
J-Alves5136dda2022-03-25 12:26:38 +0000777 state->pending & state->info_get_retrieved;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100778
779 if (pending_and_info_get_retrieved != 0) {
780 vm_notifications_info_get_retrieved_count_sub(
781 pending_and_info_get_retrieved);
782 }
783
J-Alves5136dda2022-03-25 12:26:38 +0000784 state->pending = 0U;
785 state->info_get_retrieved = 0U;
J-Alvesaa79c012021-07-09 14:29:45 +0100786
J-Alves5136dda2022-03-25 12:26:38 +0000787 return to_ret;
788}
J-Alvesfe23ebe2021-10-13 16:07:07 +0100789
J-Alves5136dda2022-03-25 12:26:38 +0000790/**
791 * Get global and per-vCPU notifications for the given vCPU ID.
792 */
793ffa_notifications_bitmap_t vm_notifications_partition_get_pending(
794 struct vm_locked vm_locked, bool is_from_vm, ffa_vcpu_index_t vcpu_id)
795{
796 ffa_notifications_bitmap_t to_ret;
797 struct notifications *to_get;
J-Alvesfe23ebe2021-10-13 16:07:07 +0100798
J-Alves5136dda2022-03-25 12:26:38 +0000799 assert(vm_locked.vm != NULL);
800 to_get = vm_get_notifications(vm_locked, is_from_vm);
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700801 assert(vcpu_id < vm_locked.vm->vcpu_count);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100802
J-Alves5136dda2022-03-25 12:26:38 +0000803 to_ret = vm_notifications_state_get_pending(&to_get->global);
804 to_ret |=
805 vm_notifications_state_get_pending(&to_get->per_vcpu[vcpu_id]);
J-Alvesaa79c012021-07-09 14:29:45 +0100806
807 return to_ret;
808}
J-Alvesc8e8a222021-06-08 17:33:52 +0100809
810/**
J-Alves663682a2022-03-25 13:56:51 +0000811 * Get pending framework notifications.
812 */
813ffa_notifications_bitmap_t vm_notifications_framework_get_pending(
814 struct vm_locked vm_locked)
815{
Federico Recanati6c1e05c2022-04-20 11:37:26 +0200816 struct vm *vm = vm_locked.vm;
817 ffa_notifications_bitmap_t framework;
818 bool rx_buffer_full;
819
820 assert(vm != NULL);
821
822 framework = vm_notifications_state_get_pending(
823 &vm->notifications.framework);
824
825 /*
826 * By retrieving an RX buffer full notification the buffer state
827 * transitions from RECEIVED to READ; the VM is now the RX buffer
828 * owner, can read it and is allowed to release it.
829 */
830 rx_buffer_full = is_ffa_spm_buffer_full_notification(framework) ||
831 is_ffa_hyp_buffer_full_notification(framework);
832 if (rx_buffer_full && vm->mailbox.state == MAILBOX_STATE_RECEIVED) {
833 vm->mailbox.state = MAILBOX_STATE_READ;
834 }
835
836 return framework;
J-Alves663682a2022-03-25 13:56:51 +0000837}
838
J-Alves17c9b6d2022-03-25 14:39:05 +0000839static void vm_notifications_state_info_get(
840 struct notifications_state *state, ffa_vm_id_t vm_id, bool is_per_vcpu,
841 ffa_vcpu_index_t vcpu_id, uint16_t *ids, uint32_t *ids_count,
842 uint32_t *lists_sizes, uint32_t *lists_count,
843 const uint32_t ids_max_count,
844 enum notifications_info_get_state *info_get_state)
845{
846 ffa_notifications_bitmap_t pending_not_retrieved;
847
848 CHECK(*ids_count <= ids_max_count);
849 CHECK(*lists_count <= ids_max_count);
850
851 if (*info_get_state == FULL) {
852 return;
853 }
854
855 pending_not_retrieved = state->pending & ~state->info_get_retrieved;
856
857 /* No notifications pending that haven't been retrieved. */
858 if (pending_not_retrieved == 0U) {
859 return;
860 }
861
862 if (*ids_count == ids_max_count) {
863 *info_get_state = FULL;
864 return;
865 }
866
867 switch (*info_get_state) {
868 case INIT:
869 case STARTING_NEW:
870 /*
871 * At this iteration two ids are to be added: the VM ID
872 * and vCPU ID. If there is no space, change state and
873 * terminate function.
874 */
875 if (is_per_vcpu && ids_max_count - *ids_count < 2) {
876 *info_get_state = FULL;
877 return;
878 }
879
880 *info_get_state = INSERTING;
881 ids[*ids_count] = vm_id;
882 ++(*ids_count);
883
884 if (is_per_vcpu) {
885 /* Insert vCPU ID. */
886 ids[*ids_count] = vcpu_id;
887 ++(*ids_count);
888 ++lists_sizes[*lists_count];
889 }
890
891 ++(*lists_count);
892 break;
893 case INSERTING:
894 /* For per-vCPU notifications only. */
895 if (!is_per_vcpu) {
896 break;
897 }
898
899 /* Insert vCPU ID */
900 ids[*ids_count] = vcpu_id;
901 (*ids_count)++;
902 /* Increment respective list size */
903 ++lists_sizes[*lists_count - 1];
904
905 if (lists_sizes[*lists_count - 1] == 3) {
906 *info_get_state = STARTING_NEW;
907 }
908 break;
909 default:
910 panic("Notification info get action error!!\n");
911 }
912
913 state->info_get_retrieved |= pending_not_retrieved;
914
915 vm_notifications_info_get_retrieved_count_add(pending_not_retrieved);
916}
917
J-Alves663682a2022-03-25 13:56:51 +0000918/**
J-Alvesc8e8a222021-06-08 17:33:52 +0100919 * Get pending notification's information to return to the receiver scheduler.
920 */
921void vm_notifications_info_get_pending(
922 struct vm_locked vm_locked, bool is_from_vm, uint16_t *ids,
923 uint32_t *ids_count, uint32_t *lists_sizes, uint32_t *lists_count,
924 const uint32_t ids_max_count,
925 enum notifications_info_get_state *info_get_state)
926{
J-Alves17c9b6d2022-03-25 14:39:05 +0000927 struct notifications *notifications;
J-Alvesc8e8a222021-06-08 17:33:52 +0100928
929 CHECK(vm_locked.vm != NULL);
J-Alvesc8e8a222021-06-08 17:33:52 +0100930
J-Alves17c9b6d2022-03-25 14:39:05 +0000931 notifications = vm_get_notifications(vm_locked, is_from_vm);
J-Alvesc8e8a222021-06-08 17:33:52 +0100932
J-Alves17c9b6d2022-03-25 14:39:05 +0000933 /*
934 * Perform info get for global notifications, before doing it for
935 * per-vCPU.
936 */
937 vm_notifications_state_info_get(&notifications->global,
938 vm_locked.vm->id, false, 0, ids,
939 ids_count, lists_sizes, lists_count,
940 ids_max_count, info_get_state);
J-Alvesfe23ebe2021-10-13 16:07:07 +0100941
J-Alvesc8e8a222021-06-08 17:33:52 +0100942 for (ffa_vcpu_count_t i = 0; i < vm_locked.vm->vcpu_count; i++) {
J-Alves17c9b6d2022-03-25 14:39:05 +0000943 vm_notifications_state_info_get(
944 &notifications->per_vcpu[i], vm_locked.vm->id, true, i,
945 ids, ids_count, lists_sizes, lists_count, ids_max_count,
946 info_get_state);
J-Alvesc8e8a222021-06-08 17:33:52 +0100947 }
948}
949
950/**
951 * Gets all info from VM's pending notifications.
952 * Returns true if the list is full, and there is more pending.
953 */
954bool vm_notifications_info_get(struct vm_locked vm_locked, uint16_t *ids,
955 uint32_t *ids_count, uint32_t *lists_sizes,
956 uint32_t *lists_count,
957 const uint32_t ids_max_count)
958{
959 enum notifications_info_get_state current_state = INIT;
960
J-Alvesf31940e2022-03-25 17:24:00 +0000961 /* Get info of pending notifications from the framework. */
962 vm_notifications_state_info_get(&vm_locked.vm->notifications.framework,
963 vm_locked.vm->id, false, 0, ids,
964 ids_count, lists_sizes, lists_count,
965 ids_max_count, &current_state);
966
967 /* Get info of pending notifications from SPs. */
J-Alvesc8e8a222021-06-08 17:33:52 +0100968 vm_notifications_info_get_pending(vm_locked, false, ids, ids_count,
969 lists_sizes, lists_count,
970 ids_max_count, &current_state);
971
J-Alvesf31940e2022-03-25 17:24:00 +0000972 /* Get info of pending notifications from VMs. */
J-Alvesc8e8a222021-06-08 17:33:52 +0100973 vm_notifications_info_get_pending(vm_locked, true, ids, ids_count,
974 lists_sizes, lists_count,
975 ids_max_count, &current_state);
976
977 /*
978 * State transitions to FULL when trying to insert a new ID in the
979 * list and there is not more space. This means there are notifications
980 * pending, whose info is not retrieved.
981 */
982 return current_state == FULL;
983}
J-Alves439ac972021-11-18 17:32:03 +0000984
985/**
986 * Checks VM's messaging method support.
987 */
988bool vm_supports_messaging_method(struct vm *vm, uint8_t msg_method)
989{
990 return (vm->messaging_method & msg_method) != 0;
991}
J-Alves6e2abc62021-12-02 14:58:56 +0000992
993void vm_notifications_set_npi_injected(struct vm_locked vm_locked,
994 bool npi_injected)
995{
996 vm_locked.vm->notifications.npi_injected = npi_injected;
997}
998
999bool vm_notifications_is_npi_injected(struct vm_locked vm_locked)
1000{
1001 return vm_locked.vm->notifications.npi_injected;
1002}
J-Alves7e67d102022-04-13 13:22:39 +01001003
1004/**
1005 * Sets the designated GP register that the VM expects to receive the boot
1006 * info's address.
1007 */
1008void vm_set_boot_info_gp_reg(struct vm *vm, struct vcpu *vcpu)
1009{
Olivier Deprezb2808332023-02-02 15:25:40 +01001010 if (vm->boot_info.blob_addr.ipa != 0U) {
J-Alves7e67d102022-04-13 13:22:39 +01001011 arch_regs_set_gp_reg(&vcpu->regs,
1012 ipa_addr(vm->boot_info.blob_addr),
1013 vm->boot_info.gp_register_num);
1014 }
1015}