blob: 635b3d16bdb9b686f56d79955bcab61869591681 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/vm.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010010
Andrew Scull18c78fc2018-08-20 12:57:41 +010011#include "hf/api.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010012#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010013#include "hf/cpu.h"
J-Alves4ef6e842021-03-18 12:47:01 +000014#include "hf/dlog.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010015#include "hf/ffa.h"
Andrew Scull3c257452019-11-26 13:32:50 +000016#include "hf/layout.h"
17#include "hf/plat/iommu.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010018#include "hf/std.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010019
Andrew Scull19503262018-09-20 14:48:39 +010020#include "vmapi/hf/call.h"
21
22static struct vm vms[MAX_VMS];
Olivier Deprez96a2a262020-06-11 17:21:38 +020023static struct vm other_world;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010024static ffa_vm_count_t vm_count;
J-Alvesb37fd082020-10-22 12:29:21 +010025static struct vm *first_boot_vm;
Andrew Scull19503262018-09-20 14:48:39 +010026
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -080027static bool vm_init_mm(struct vm *vm, struct mpool *ppool)
28{
29 if (vm->el0_partition) {
30 return mm_ptable_init(&vm->ptable, vm->id, MM_FLAG_STAGE1,
31 ppool);
32 }
33 return mm_vm_init(&vm->ptable, vm->id, ppool);
34}
35
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010036struct vm *vm_init(ffa_vm_id_t id, ffa_vcpu_count_t vcpu_count,
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080037 struct mpool *ppool, bool el0_partition)
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010038{
Wedson Almeida Filho87009642018-07-02 10:20:07 +010039 uint32_t i;
Andrew Scull19503262018-09-20 14:48:39 +010040 struct vm *vm;
41
Olivier Deprez96a2a262020-06-11 17:21:38 +020042 if (id == HF_OTHER_WORLD_ID) {
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080043 CHECK(el0_partition == false);
Olivier Deprez96a2a262020-06-11 17:21:38 +020044 vm = &other_world;
Andrew Walbran9daa57e2019-09-27 13:33:20 +010045 } else {
46 uint16_t vm_index = id - HF_VM_ID_OFFSET;
Andrew Scull19503262018-09-20 14:48:39 +010047
Andrew Walbran9daa57e2019-09-27 13:33:20 +010048 CHECK(id >= HF_VM_ID_OFFSET);
49 CHECK(vm_index < ARRAY_SIZE(vms));
50 vm = &vms[vm_index];
51 }
Wedson Almeida Filho87009642018-07-02 10:20:07 +010052
Andrew Scull2b5fbad2019-04-05 13:55:56 +010053 memset_s(vm, sizeof(*vm), 0, sizeof(*vm));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010054
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000055 list_init(&vm->mailbox.waiter_list);
56 list_init(&vm->mailbox.ready_list);
57 sl_init(&vm->lock);
58
Andrew Walbran9daa57e2019-09-27 13:33:20 +010059 vm->id = id;
Wedson Almeida Filho87009642018-07-02 10:20:07 +010060 vm->vcpu_count = vcpu_count;
Andrew Sculld6ee1102019-04-05 22:12:42 +010061 vm->mailbox.state = MAILBOX_STATE_EMPTY;
Andrew Scull9726c252019-01-23 13:44:19 +000062 atomic_init(&vm->aborting, false);
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080063 vm->el0_partition = el0_partition;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010064
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -080065 if (!vm_init_mm(vm, ppool)) {
Andrew Walbran9daa57e2019-09-27 13:33:20 +010066 return NULL;
Wedson Almeida Filho03306112018-11-26 00:08:03 +000067 }
68
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000069 /* Initialise waiter entries. */
70 for (i = 0; i < MAX_VMS; i++) {
Wedson Almeida Filhob790f652019-01-22 23:41:56 +000071 vm->wait_entries[i].waiting_vm = vm;
72 list_init(&vm->wait_entries[i].wait_links);
73 list_init(&vm->wait_entries[i].ready_links);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000074 }
75
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000076 /* Do basic initialization of vCPUs. */
Andrew Scull7364a8e2018-07-19 15:39:29 +010077 for (i = 0; i < vcpu_count; i++) {
Andrew Walbrane1310df2019-04-29 17:28:28 +010078 vcpu_init(vm_get_vcpu(vm, i), vm);
Andrew Scull7364a8e2018-07-19 15:39:29 +010079 }
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010080
J-Alves4ef6e842021-03-18 12:47:01 +000081 /* Basic initialization of the notifications structure. */
82 vm_notifications_init_bindings(&vm->notifications.from_sp);
83 vm_notifications_init_bindings(&vm->notifications.from_vm);
84
85 /* TODO: Enable in accordance to VM's manifest. */
86 vm->notifications.enabled = true;
87
Andrew Walbran9daa57e2019-09-27 13:33:20 +010088 return vm;
89}
90
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010091bool vm_init_next(ffa_vcpu_count_t vcpu_count, struct mpool *ppool,
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080092 struct vm **new_vm, bool el0_partition)
Andrew Walbran9daa57e2019-09-27 13:33:20 +010093{
94 if (vm_count >= MAX_VMS) {
95 return false;
96 }
97
98 /* Generate IDs based on an offset, as low IDs e.g., 0, are reserved */
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080099 *new_vm = vm_init(vm_count + HF_VM_ID_OFFSET, vcpu_count, ppool,
100 el0_partition);
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100101 if (*new_vm == NULL) {
102 return false;
103 }
Andrew Scull19503262018-09-20 14:48:39 +0100104 ++vm_count;
Andrew Scull19503262018-09-20 14:48:39 +0100105
Wedson Almeida Filho03306112018-11-26 00:08:03 +0000106 return true;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100107}
108
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100109ffa_vm_count_t vm_get_count(void)
Andrew Scull19503262018-09-20 14:48:39 +0100110{
111 return vm_count;
112}
113
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100114/**
115 * Returns a pointer to the VM with the corresponding id.
116 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100117struct vm *vm_find(ffa_vm_id_t id)
Andrew Scull19503262018-09-20 14:48:39 +0100118{
David Brazdilbc501192019-09-27 13:20:56 +0100119 uint16_t index;
Fuad Tabba494376e2019-08-05 12:35:10 +0100120
Olivier Deprez96a2a262020-06-11 17:21:38 +0200121 if (id == HF_OTHER_WORLD_ID) {
122 if (other_world.id == HF_OTHER_WORLD_ID) {
123 return &other_world;
124 }
Andrew Scull19503262018-09-20 14:48:39 +0100125 return NULL;
126 }
127
Olivier Deprez96a2a262020-06-11 17:21:38 +0200128 /* Check that this is not a reserved ID. */
129 if (id < HF_VM_ID_OFFSET) {
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100130 return NULL;
131 }
132
David Brazdilbc501192019-09-27 13:20:56 +0100133 index = id - HF_VM_ID_OFFSET;
134
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100135 return vm_find_index(index);
136}
137
138/**
J-Alves46ee0682021-07-26 15:17:53 +0100139 * Returns a locked instance of the VM with the corresponding id.
140 */
141struct vm_locked vm_find_locked(ffa_vm_id_t id)
142{
143 struct vm *vm = vm_find(id);
144
145 if (vm != NULL) {
146 return vm_lock(vm);
147 }
148
149 return (struct vm_locked){.vm = NULL};
150}
151
152/**
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100153 * Returns a pointer to the VM at the specified index.
154 */
155struct vm *vm_find_index(uint16_t index)
156{
David Brazdilbc501192019-09-27 13:20:56 +0100157 /* Ensure the VM is initialized. */
158 if (index >= vm_count) {
159 return NULL;
160 }
161
162 return &vms[index];
Andrew Scull19503262018-09-20 14:48:39 +0100163}
164
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000165/**
Fuad Tabbaed294af2019-12-20 10:43:01 +0000166 * Locks the given VM and updates `locked` to hold the newly locked VM.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000167 */
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100168struct vm_locked vm_lock(struct vm *vm)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000169{
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100170 struct vm_locked locked = {
171 .vm = vm,
172 };
173
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000174 sl_lock(&vm->lock);
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100175
176 return locked;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000177}
178
179/**
Jose Marinho75509b42019-04-09 09:34:59 +0100180 * Locks two VMs ensuring that the locking order is according to the locks'
181 * addresses.
182 */
183struct two_vm_locked vm_lock_both(struct vm *vm1, struct vm *vm2)
184{
185 struct two_vm_locked dual_lock;
186
187 sl_lock_both(&vm1->lock, &vm2->lock);
188 dual_lock.vm1.vm = vm1;
189 dual_lock.vm2.vm = vm2;
190
191 return dual_lock;
192}
193
194/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000195 * Unlocks a VM previously locked with vm_lock, and updates `locked` to reflect
196 * the fact that the VM is no longer locked.
197 */
198void vm_unlock(struct vm_locked *locked)
199{
200 sl_unlock(&locked->vm->lock);
201 locked->vm = NULL;
202}
Andrew Walbrane1310df2019-04-29 17:28:28 +0100203
204/**
205 * Get the vCPU with the given index from the given VM.
206 * This assumes the index is valid, i.e. less than vm->vcpu_count.
207 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100208struct vcpu *vm_get_vcpu(struct vm *vm, ffa_vcpu_index_t vcpu_index)
Andrew Walbrane1310df2019-04-29 17:28:28 +0100209{
Andrew Scull877ae4b2019-07-02 12:52:33 +0100210 CHECK(vcpu_index < vm->vcpu_count);
Andrew Walbrane1310df2019-04-29 17:28:28 +0100211 return &vm->vcpus[vcpu_index];
212}
Andrew Scull3c257452019-11-26 13:32:50 +0000213
214/**
Andrew Walbranaad8f982019-12-04 10:56:39 +0000215 * Gets `vm`'s wait entry for waiting on the `for_vm`.
216 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100217struct wait_entry *vm_get_wait_entry(struct vm *vm, ffa_vm_id_t for_vm)
Andrew Walbranaad8f982019-12-04 10:56:39 +0000218{
219 uint16_t index;
220
221 CHECK(for_vm >= HF_VM_ID_OFFSET);
222 index = for_vm - HF_VM_ID_OFFSET;
223 CHECK(index < MAX_VMS);
224
225 return &vm->wait_entries[index];
226}
227
228/**
229 * Gets the ID of the VM which the given VM's wait entry is for.
230 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100231ffa_vm_id_t vm_id_for_wait_entry(struct vm *vm, struct wait_entry *entry)
Andrew Walbranaad8f982019-12-04 10:56:39 +0000232{
233 uint16_t index = entry - vm->wait_entries;
234
235 return index + HF_VM_ID_OFFSET;
236}
237
238/**
Andrew Walbran45633dd2020-10-07 17:59:54 +0100239 * Return whether the given VM ID represents an entity in the current world:
240 * i.e. the hypervisor or a normal world VM when running in the normal world, or
241 * the SPM or an SP when running in the secure world.
242 */
243bool vm_id_is_current_world(ffa_vm_id_t vm_id)
244{
245 return (vm_id & HF_VM_ID_WORLD_MASK) !=
246 (HF_OTHER_WORLD_ID & HF_VM_ID_WORLD_MASK);
247}
248
249/**
Andrew Scull3c257452019-11-26 13:32:50 +0000250 * Map a range of addresses to the VM in both the MMU and the IOMMU.
251 *
252 * mm_vm_defrag should always be called after a series of page table updates,
253 * whether they succeed or fail. This is because on failure extra page table
254 * entries may have been allocated and then not used, while on success it may be
255 * possible to compact the page table by merging several entries into a block.
256 *
257 * Returns true on success, or false if the update failed and no changes were
258 * made.
259 *
260 */
261bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
262 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
263{
264 if (!vm_identity_prepare(vm_locked, begin, end, mode, ppool)) {
265 return false;
266 }
267
268 vm_identity_commit(vm_locked, begin, end, mode, ppool, ipa);
269
270 return true;
271}
272
273/**
274 * Prepares the given VM for the given address mapping such that it will be able
275 * to commit the change without failure.
276 *
277 * In particular, multiple calls to this function will result in the
278 * corresponding calls to commit the changes to succeed.
279 *
280 * Returns true on success, or false if the update failed and no changes were
281 * made.
282 */
283bool vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
284 uint32_t mode, struct mpool *ppool)
285{
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -0800286 if (vm_locked.vm->el0_partition) {
287 return mm_identity_prepare(&vm_locked.vm->ptable, begin, end,
288 mode, ppool);
289 }
Andrew Scull3c257452019-11-26 13:32:50 +0000290 return mm_vm_identity_prepare(&vm_locked.vm->ptable, begin, end, mode,
291 ppool);
292}
293
294/**
295 * Commits the given address mapping to the VM assuming the operation cannot
296 * fail. `vm_identity_prepare` must used correctly before this to ensure
297 * this condition.
298 */
299void vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
300 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
301{
Raghu Krishnamurthyec1b4912021-02-10 19:09:06 -0800302 if (vm_locked.vm->el0_partition) {
303 mm_identity_commit(&vm_locked.vm->ptable, begin, end, mode,
304 ppool);
305 if (ipa != NULL) {
306 /*
307 * EL0 partitions are modeled as lightweight VM's, to
308 * promote code reuse. The below statement returns the
309 * mapped PA as an IPA, however, for an EL0 partition,
310 * this is really a VA.
311 */
312 *ipa = ipa_from_pa(begin);
313 }
314 } else {
315 mm_vm_identity_commit(&vm_locked.vm->ptable, begin, end, mode,
316 ppool, ipa);
317 }
Andrew Scull3c257452019-11-26 13:32:50 +0000318 plat_iommu_identity_map(vm_locked, begin, end, mode);
319}
320
321/**
322 * Unmap a range of addresses from the VM.
323 *
324 * Returns true on success, or false if the update failed and no changes were
325 * made.
326 */
327bool vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
328 struct mpool *ppool)
329{
330 uint32_t mode = MM_MODE_UNMAPPED_MASK;
331
332 return vm_identity_map(vm_locked, begin, end, mode, ppool, NULL);
333}
334
335/**
336 * Unmaps the hypervisor pages from the given page table.
337 */
338bool vm_unmap_hypervisor(struct vm_locked vm_locked, struct mpool *ppool)
339{
340 /* TODO: If we add pages dynamically, they must be included here too. */
341 return vm_unmap(vm_locked, layout_text_begin(), layout_text_end(),
342 ppool) &&
343 vm_unmap(vm_locked, layout_rodata_begin(), layout_rodata_end(),
344 ppool) &&
345 vm_unmap(vm_locked, layout_data_begin(), layout_data_end(),
346 ppool);
347}
J-Alvesb37fd082020-10-22 12:29:21 +0100348
349/**
350 * Gets the first partition to boot, according to Boot Protocol from FFA spec.
351 */
352struct vm *vm_get_first_boot(void)
353{
354 return first_boot_vm;
355}
356
357/**
358 * Insert in boot list, sorted by `boot_order` parameter in the vm structure
359 * and rooted in `first_boot_vm`.
360 */
361void vm_update_boot(struct vm *vm)
362{
363 struct vm *current = NULL;
364 struct vm *previous = NULL;
365
366 if (first_boot_vm == NULL) {
367 first_boot_vm = vm;
368 return;
369 }
370
371 current = first_boot_vm;
372
373 while (current != NULL && current->boot_order >= vm->boot_order) {
374 previous = current;
375 current = current->next_boot;
376 }
377
378 if (previous != NULL) {
379 previous->next_boot = vm;
380 } else {
381 first_boot_vm = vm;
382 }
383
384 vm->next_boot = current;
385}
J-Alves4ef6e842021-03-18 12:47:01 +0000386
387/*
388 * Initializes the notifications structure.
389 */
390void vm_notifications_init_bindings(struct notifications *notifications)
391{
392 for (uint32_t i = 0U; i < MAX_FFA_NOTIFICATIONS; i++) {
393 notifications->bindings_sender_id[i] = HF_INVALID_VM_ID;
394 }
395}