blob: cb7873f8bd2c44826ee440de4ae2e04b4e37d8e7 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/vm.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010010
Andrew Scull18c78fc2018-08-20 12:57:41 +010011#include "hf/api.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010012#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010013#include "hf/cpu.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010014#include "hf/ffa.h"
Andrew Scull3c257452019-11-26 13:32:50 +000015#include "hf/layout.h"
16#include "hf/plat/iommu.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010017#include "hf/std.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010018
Andrew Scull19503262018-09-20 14:48:39 +010019#include "vmapi/hf/call.h"
20
21static struct vm vms[MAX_VMS];
Olivier Deprez96a2a262020-06-11 17:21:38 +020022static struct vm other_world;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010023static ffa_vm_count_t vm_count;
J-Alvesb37fd082020-10-22 12:29:21 +010024static struct vm *first_boot_vm;
Andrew Scull19503262018-09-20 14:48:39 +010025
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010026struct vm *vm_init(ffa_vm_id_t id, ffa_vcpu_count_t vcpu_count,
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080027 struct mpool *ppool, bool el0_partition)
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010028{
Wedson Almeida Filho87009642018-07-02 10:20:07 +010029 uint32_t i;
Andrew Scull19503262018-09-20 14:48:39 +010030 struct vm *vm;
31
Olivier Deprez96a2a262020-06-11 17:21:38 +020032 if (id == HF_OTHER_WORLD_ID) {
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080033 CHECK(el0_partition == false);
Olivier Deprez96a2a262020-06-11 17:21:38 +020034 vm = &other_world;
Andrew Walbran9daa57e2019-09-27 13:33:20 +010035 } else {
36 uint16_t vm_index = id - HF_VM_ID_OFFSET;
Andrew Scull19503262018-09-20 14:48:39 +010037
Andrew Walbran9daa57e2019-09-27 13:33:20 +010038 CHECK(id >= HF_VM_ID_OFFSET);
39 CHECK(vm_index < ARRAY_SIZE(vms));
40 vm = &vms[vm_index];
41 }
Wedson Almeida Filho87009642018-07-02 10:20:07 +010042
Andrew Scull2b5fbad2019-04-05 13:55:56 +010043 memset_s(vm, sizeof(*vm), 0, sizeof(*vm));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010044
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000045 list_init(&vm->mailbox.waiter_list);
46 list_init(&vm->mailbox.ready_list);
47 sl_init(&vm->lock);
48
Andrew Walbran9daa57e2019-09-27 13:33:20 +010049 vm->id = id;
Wedson Almeida Filho87009642018-07-02 10:20:07 +010050 vm->vcpu_count = vcpu_count;
Andrew Sculld6ee1102019-04-05 22:12:42 +010051 vm->mailbox.state = MAILBOX_STATE_EMPTY;
Andrew Scull9726c252019-01-23 13:44:19 +000052 atomic_init(&vm->aborting, false);
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080053 vm->el0_partition = el0_partition;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010054
Andrew Scullda3df7f2019-01-05 17:49:27 +000055 if (!mm_vm_init(&vm->ptable, ppool)) {
Andrew Walbran9daa57e2019-09-27 13:33:20 +010056 return NULL;
Wedson Almeida Filho03306112018-11-26 00:08:03 +000057 }
58
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000059 /* Initialise waiter entries. */
60 for (i = 0; i < MAX_VMS; i++) {
Wedson Almeida Filhob790f652019-01-22 23:41:56 +000061 vm->wait_entries[i].waiting_vm = vm;
62 list_init(&vm->wait_entries[i].wait_links);
63 list_init(&vm->wait_entries[i].ready_links);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000064 }
65
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000066 /* Do basic initialization of vCPUs. */
Andrew Scull7364a8e2018-07-19 15:39:29 +010067 for (i = 0; i < vcpu_count; i++) {
Andrew Walbrane1310df2019-04-29 17:28:28 +010068 vcpu_init(vm_get_vcpu(vm, i), vm);
Andrew Scull7364a8e2018-07-19 15:39:29 +010069 }
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010070
Andrew Walbran9daa57e2019-09-27 13:33:20 +010071 return vm;
72}
73
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010074bool vm_init_next(ffa_vcpu_count_t vcpu_count, struct mpool *ppool,
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080075 struct vm **new_vm, bool el0_partition)
Andrew Walbran9daa57e2019-09-27 13:33:20 +010076{
77 if (vm_count >= MAX_VMS) {
78 return false;
79 }
80
81 /* Generate IDs based on an offset, as low IDs e.g., 0, are reserved */
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080082 *new_vm = vm_init(vm_count + HF_VM_ID_OFFSET, vcpu_count, ppool,
83 el0_partition);
Andrew Walbran9daa57e2019-09-27 13:33:20 +010084 if (*new_vm == NULL) {
85 return false;
86 }
Andrew Scull19503262018-09-20 14:48:39 +010087 ++vm_count;
Andrew Scull19503262018-09-20 14:48:39 +010088
Wedson Almeida Filho03306112018-11-26 00:08:03 +000089 return true;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010090}
91
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010092ffa_vm_count_t vm_get_count(void)
Andrew Scull19503262018-09-20 14:48:39 +010093{
94 return vm_count;
95}
96
Fuad Tabbae4efcc32020-07-16 15:37:27 +010097/**
98 * Returns a pointer to the VM with the corresponding id.
99 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100100struct vm *vm_find(ffa_vm_id_t id)
Andrew Scull19503262018-09-20 14:48:39 +0100101{
David Brazdilbc501192019-09-27 13:20:56 +0100102 uint16_t index;
Fuad Tabba494376e2019-08-05 12:35:10 +0100103
Olivier Deprez96a2a262020-06-11 17:21:38 +0200104 if (id == HF_OTHER_WORLD_ID) {
105 if (other_world.id == HF_OTHER_WORLD_ID) {
106 return &other_world;
107 }
Andrew Scull19503262018-09-20 14:48:39 +0100108 return NULL;
109 }
110
Olivier Deprez96a2a262020-06-11 17:21:38 +0200111 /* Check that this is not a reserved ID. */
112 if (id < HF_VM_ID_OFFSET) {
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100113 return NULL;
114 }
115
David Brazdilbc501192019-09-27 13:20:56 +0100116 index = id - HF_VM_ID_OFFSET;
117
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100118 return vm_find_index(index);
119}
120
121/**
122 * Returns a pointer to the VM at the specified index.
123 */
124struct vm *vm_find_index(uint16_t index)
125{
David Brazdilbc501192019-09-27 13:20:56 +0100126 /* Ensure the VM is initialized. */
127 if (index >= vm_count) {
128 return NULL;
129 }
130
131 return &vms[index];
Andrew Scull19503262018-09-20 14:48:39 +0100132}
133
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000134/**
Fuad Tabbaed294af2019-12-20 10:43:01 +0000135 * Locks the given VM and updates `locked` to hold the newly locked VM.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000136 */
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100137struct vm_locked vm_lock(struct vm *vm)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000138{
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100139 struct vm_locked locked = {
140 .vm = vm,
141 };
142
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000143 sl_lock(&vm->lock);
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100144
145 return locked;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000146}
147
148/**
Jose Marinho75509b42019-04-09 09:34:59 +0100149 * Locks two VMs ensuring that the locking order is according to the locks'
150 * addresses.
151 */
152struct two_vm_locked vm_lock_both(struct vm *vm1, struct vm *vm2)
153{
154 struct two_vm_locked dual_lock;
155
156 sl_lock_both(&vm1->lock, &vm2->lock);
157 dual_lock.vm1.vm = vm1;
158 dual_lock.vm2.vm = vm2;
159
160 return dual_lock;
161}
162
163/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000164 * Unlocks a VM previously locked with vm_lock, and updates `locked` to reflect
165 * the fact that the VM is no longer locked.
166 */
167void vm_unlock(struct vm_locked *locked)
168{
169 sl_unlock(&locked->vm->lock);
170 locked->vm = NULL;
171}
Andrew Walbrane1310df2019-04-29 17:28:28 +0100172
173/**
174 * Get the vCPU with the given index from the given VM.
175 * This assumes the index is valid, i.e. less than vm->vcpu_count.
176 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100177struct vcpu *vm_get_vcpu(struct vm *vm, ffa_vcpu_index_t vcpu_index)
Andrew Walbrane1310df2019-04-29 17:28:28 +0100178{
Andrew Scull877ae4b2019-07-02 12:52:33 +0100179 CHECK(vcpu_index < vm->vcpu_count);
Andrew Walbrane1310df2019-04-29 17:28:28 +0100180 return &vm->vcpus[vcpu_index];
181}
Andrew Scull3c257452019-11-26 13:32:50 +0000182
183/**
Andrew Walbranaad8f982019-12-04 10:56:39 +0000184 * Gets `vm`'s wait entry for waiting on the `for_vm`.
185 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100186struct wait_entry *vm_get_wait_entry(struct vm *vm, ffa_vm_id_t for_vm)
Andrew Walbranaad8f982019-12-04 10:56:39 +0000187{
188 uint16_t index;
189
190 CHECK(for_vm >= HF_VM_ID_OFFSET);
191 index = for_vm - HF_VM_ID_OFFSET;
192 CHECK(index < MAX_VMS);
193
194 return &vm->wait_entries[index];
195}
196
197/**
198 * Gets the ID of the VM which the given VM's wait entry is for.
199 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100200ffa_vm_id_t vm_id_for_wait_entry(struct vm *vm, struct wait_entry *entry)
Andrew Walbranaad8f982019-12-04 10:56:39 +0000201{
202 uint16_t index = entry - vm->wait_entries;
203
204 return index + HF_VM_ID_OFFSET;
205}
206
207/**
Andrew Walbran45633dd2020-10-07 17:59:54 +0100208 * Return whether the given VM ID represents an entity in the current world:
209 * i.e. the hypervisor or a normal world VM when running in the normal world, or
210 * the SPM or an SP when running in the secure world.
211 */
212bool vm_id_is_current_world(ffa_vm_id_t vm_id)
213{
214 return (vm_id & HF_VM_ID_WORLD_MASK) !=
215 (HF_OTHER_WORLD_ID & HF_VM_ID_WORLD_MASK);
216}
217
218/**
Andrew Scull3c257452019-11-26 13:32:50 +0000219 * Map a range of addresses to the VM in both the MMU and the IOMMU.
220 *
221 * mm_vm_defrag should always be called after a series of page table updates,
222 * whether they succeed or fail. This is because on failure extra page table
223 * entries may have been allocated and then not used, while on success it may be
224 * possible to compact the page table by merging several entries into a block.
225 *
226 * Returns true on success, or false if the update failed and no changes were
227 * made.
228 *
229 */
230bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
231 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
232{
233 if (!vm_identity_prepare(vm_locked, begin, end, mode, ppool)) {
234 return false;
235 }
236
237 vm_identity_commit(vm_locked, begin, end, mode, ppool, ipa);
238
239 return true;
240}
241
242/**
243 * Prepares the given VM for the given address mapping such that it will be able
244 * to commit the change without failure.
245 *
246 * In particular, multiple calls to this function will result in the
247 * corresponding calls to commit the changes to succeed.
248 *
249 * Returns true on success, or false if the update failed and no changes were
250 * made.
251 */
252bool vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
253 uint32_t mode, struct mpool *ppool)
254{
255 return mm_vm_identity_prepare(&vm_locked.vm->ptable, begin, end, mode,
256 ppool);
257}
258
259/**
260 * Commits the given address mapping to the VM assuming the operation cannot
261 * fail. `vm_identity_prepare` must used correctly before this to ensure
262 * this condition.
263 */
264void vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
265 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
266{
267 mm_vm_identity_commit(&vm_locked.vm->ptable, begin, end, mode, ppool,
268 ipa);
269 plat_iommu_identity_map(vm_locked, begin, end, mode);
270}
271
272/**
273 * Unmap a range of addresses from the VM.
274 *
275 * Returns true on success, or false if the update failed and no changes were
276 * made.
277 */
278bool vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
279 struct mpool *ppool)
280{
281 uint32_t mode = MM_MODE_UNMAPPED_MASK;
282
283 return vm_identity_map(vm_locked, begin, end, mode, ppool, NULL);
284}
285
286/**
287 * Unmaps the hypervisor pages from the given page table.
288 */
289bool vm_unmap_hypervisor(struct vm_locked vm_locked, struct mpool *ppool)
290{
291 /* TODO: If we add pages dynamically, they must be included here too. */
292 return vm_unmap(vm_locked, layout_text_begin(), layout_text_end(),
293 ppool) &&
294 vm_unmap(vm_locked, layout_rodata_begin(), layout_rodata_end(),
295 ppool) &&
296 vm_unmap(vm_locked, layout_data_begin(), layout_data_end(),
297 ppool);
298}
J-Alvesb37fd082020-10-22 12:29:21 +0100299
300/**
301 * Gets the first partition to boot, according to Boot Protocol from FFA spec.
302 */
303struct vm *vm_get_first_boot(void)
304{
305 return first_boot_vm;
306}
307
308/**
309 * Insert in boot list, sorted by `boot_order` parameter in the vm structure
310 * and rooted in `first_boot_vm`.
311 */
312void vm_update_boot(struct vm *vm)
313{
314 struct vm *current = NULL;
315 struct vm *previous = NULL;
316
317 if (first_boot_vm == NULL) {
318 first_boot_vm = vm;
319 return;
320 }
321
322 current = first_boot_vm;
323
324 while (current != NULL && current->boot_order >= vm->boot_order) {
325 previous = current;
326 current = current->next_boot;
327 }
328
329 if (previous != NULL) {
330 previous->next_boot = vm;
331 } else {
332 first_boot_vm = vm;
333 }
334
335 vm->next_boot = current;
336}
Maksims Svecovsb596eab2021-04-27 00:52:27 +0100337
338/**
339 * Returns true if VM supports a managed exit.
340 */
341bool vm_managed_exit_supported(struct vm *vm)
342{
343 return (vm->messaging_method & FFA_PARTITION_MANAGED_EXIT) != 0U;
344}