blob: 116de09c792dd1f0a216662284a7e6458d7fced7 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/vm.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010010
Andrew Scull18c78fc2018-08-20 12:57:41 +010011#include "hf/api.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010012#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010013#include "hf/cpu.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010014#include "hf/ffa.h"
Andrew Scull3c257452019-11-26 13:32:50 +000015#include "hf/layout.h"
16#include "hf/plat/iommu.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010017#include "hf/std.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010018
Andrew Scull19503262018-09-20 14:48:39 +010019#include "vmapi/hf/call.h"
20
21static struct vm vms[MAX_VMS];
Olivier Deprez96a2a262020-06-11 17:21:38 +020022static struct vm other_world;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010023static ffa_vm_count_t vm_count;
J-Alvesb37fd082020-10-22 12:29:21 +010024static struct vm *first_boot_vm;
Andrew Scull19503262018-09-20 14:48:39 +010025
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010026struct vm *vm_init(ffa_vm_id_t id, ffa_vcpu_count_t vcpu_count,
Andrew Walbran9daa57e2019-09-27 13:33:20 +010027 struct mpool *ppool)
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010028{
Wedson Almeida Filho87009642018-07-02 10:20:07 +010029 uint32_t i;
Andrew Scull19503262018-09-20 14:48:39 +010030 struct vm *vm;
31
Olivier Deprez96a2a262020-06-11 17:21:38 +020032 if (id == HF_OTHER_WORLD_ID) {
33 vm = &other_world;
Andrew Walbran9daa57e2019-09-27 13:33:20 +010034 } else {
35 uint16_t vm_index = id - HF_VM_ID_OFFSET;
Andrew Scull19503262018-09-20 14:48:39 +010036
Andrew Walbran9daa57e2019-09-27 13:33:20 +010037 CHECK(id >= HF_VM_ID_OFFSET);
38 CHECK(vm_index < ARRAY_SIZE(vms));
39 vm = &vms[vm_index];
40 }
Wedson Almeida Filho87009642018-07-02 10:20:07 +010041
Andrew Scull2b5fbad2019-04-05 13:55:56 +010042 memset_s(vm, sizeof(*vm), 0, sizeof(*vm));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010043
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000044 list_init(&vm->mailbox.waiter_list);
45 list_init(&vm->mailbox.ready_list);
46 sl_init(&vm->lock);
47
Andrew Walbran9daa57e2019-09-27 13:33:20 +010048 vm->id = id;
Wedson Almeida Filho87009642018-07-02 10:20:07 +010049 vm->vcpu_count = vcpu_count;
Andrew Sculld6ee1102019-04-05 22:12:42 +010050 vm->mailbox.state = MAILBOX_STATE_EMPTY;
Andrew Scull9726c252019-01-23 13:44:19 +000051 atomic_init(&vm->aborting, false);
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010052
Andrew Scullda3df7f2019-01-05 17:49:27 +000053 if (!mm_vm_init(&vm->ptable, ppool)) {
Andrew Walbran9daa57e2019-09-27 13:33:20 +010054 return NULL;
Wedson Almeida Filho03306112018-11-26 00:08:03 +000055 }
56
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000057 /* Initialise waiter entries. */
58 for (i = 0; i < MAX_VMS; i++) {
Wedson Almeida Filhob790f652019-01-22 23:41:56 +000059 vm->wait_entries[i].waiting_vm = vm;
60 list_init(&vm->wait_entries[i].wait_links);
61 list_init(&vm->wait_entries[i].ready_links);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000062 }
63
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000064 /* Do basic initialization of vCPUs. */
Andrew Scull7364a8e2018-07-19 15:39:29 +010065 for (i = 0; i < vcpu_count; i++) {
Andrew Walbrane1310df2019-04-29 17:28:28 +010066 vcpu_init(vm_get_vcpu(vm, i), vm);
Andrew Scull7364a8e2018-07-19 15:39:29 +010067 }
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010068
Andrew Walbran9daa57e2019-09-27 13:33:20 +010069 return vm;
70}
71
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010072bool vm_init_next(ffa_vcpu_count_t vcpu_count, struct mpool *ppool,
Andrew Walbran9daa57e2019-09-27 13:33:20 +010073 struct vm **new_vm)
74{
75 if (vm_count >= MAX_VMS) {
76 return false;
77 }
78
79 /* Generate IDs based on an offset, as low IDs e.g., 0, are reserved */
80 *new_vm = vm_init(vm_count + HF_VM_ID_OFFSET, vcpu_count, ppool);
81 if (*new_vm == NULL) {
82 return false;
83 }
Andrew Scull19503262018-09-20 14:48:39 +010084 ++vm_count;
Andrew Scull19503262018-09-20 14:48:39 +010085
Wedson Almeida Filho03306112018-11-26 00:08:03 +000086 return true;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010087}
88
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010089ffa_vm_count_t vm_get_count(void)
Andrew Scull19503262018-09-20 14:48:39 +010090{
91 return vm_count;
92}
93
Fuad Tabbae4efcc32020-07-16 15:37:27 +010094/**
95 * Returns a pointer to the VM with the corresponding id.
96 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010097struct vm *vm_find(ffa_vm_id_t id)
Andrew Scull19503262018-09-20 14:48:39 +010098{
David Brazdilbc501192019-09-27 13:20:56 +010099 uint16_t index;
Fuad Tabba494376e2019-08-05 12:35:10 +0100100
Olivier Deprez96a2a262020-06-11 17:21:38 +0200101 if (id == HF_OTHER_WORLD_ID) {
102 if (other_world.id == HF_OTHER_WORLD_ID) {
103 return &other_world;
104 }
Andrew Scull19503262018-09-20 14:48:39 +0100105 return NULL;
106 }
107
Olivier Deprez96a2a262020-06-11 17:21:38 +0200108 /* Check that this is not a reserved ID. */
109 if (id < HF_VM_ID_OFFSET) {
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100110 return NULL;
111 }
112
David Brazdilbc501192019-09-27 13:20:56 +0100113 index = id - HF_VM_ID_OFFSET;
114
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100115 return vm_find_index(index);
116}
117
118/**
119 * Returns a pointer to the VM at the specified index.
120 */
121struct vm *vm_find_index(uint16_t index)
122{
David Brazdilbc501192019-09-27 13:20:56 +0100123 /* Ensure the VM is initialized. */
124 if (index >= vm_count) {
125 return NULL;
126 }
127
128 return &vms[index];
Andrew Scull19503262018-09-20 14:48:39 +0100129}
130
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000131/**
Fuad Tabbaed294af2019-12-20 10:43:01 +0000132 * Locks the given VM and updates `locked` to hold the newly locked VM.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000133 */
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100134struct vm_locked vm_lock(struct vm *vm)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000135{
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100136 struct vm_locked locked = {
137 .vm = vm,
138 };
139
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000140 sl_lock(&vm->lock);
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100141
142 return locked;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000143}
144
145/**
Jose Marinho75509b42019-04-09 09:34:59 +0100146 * Locks two VMs ensuring that the locking order is according to the locks'
147 * addresses.
148 */
149struct two_vm_locked vm_lock_both(struct vm *vm1, struct vm *vm2)
150{
151 struct two_vm_locked dual_lock;
152
153 sl_lock_both(&vm1->lock, &vm2->lock);
154 dual_lock.vm1.vm = vm1;
155 dual_lock.vm2.vm = vm2;
156
157 return dual_lock;
158}
159
160/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000161 * Unlocks a VM previously locked with vm_lock, and updates `locked` to reflect
162 * the fact that the VM is no longer locked.
163 */
164void vm_unlock(struct vm_locked *locked)
165{
166 sl_unlock(&locked->vm->lock);
167 locked->vm = NULL;
168}
Andrew Walbrane1310df2019-04-29 17:28:28 +0100169
170/**
171 * Get the vCPU with the given index from the given VM.
172 * This assumes the index is valid, i.e. less than vm->vcpu_count.
173 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100174struct vcpu *vm_get_vcpu(struct vm *vm, ffa_vcpu_index_t vcpu_index)
Andrew Walbrane1310df2019-04-29 17:28:28 +0100175{
Andrew Scull877ae4b2019-07-02 12:52:33 +0100176 CHECK(vcpu_index < vm->vcpu_count);
Andrew Walbrane1310df2019-04-29 17:28:28 +0100177 return &vm->vcpus[vcpu_index];
178}
Andrew Scull3c257452019-11-26 13:32:50 +0000179
180/**
Andrew Walbranaad8f982019-12-04 10:56:39 +0000181 * Gets `vm`'s wait entry for waiting on the `for_vm`.
182 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100183struct wait_entry *vm_get_wait_entry(struct vm *vm, ffa_vm_id_t for_vm)
Andrew Walbranaad8f982019-12-04 10:56:39 +0000184{
185 uint16_t index;
186
187 CHECK(for_vm >= HF_VM_ID_OFFSET);
188 index = for_vm - HF_VM_ID_OFFSET;
189 CHECK(index < MAX_VMS);
190
191 return &vm->wait_entries[index];
192}
193
194/**
195 * Gets the ID of the VM which the given VM's wait entry is for.
196 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100197ffa_vm_id_t vm_id_for_wait_entry(struct vm *vm, struct wait_entry *entry)
Andrew Walbranaad8f982019-12-04 10:56:39 +0000198{
199 uint16_t index = entry - vm->wait_entries;
200
201 return index + HF_VM_ID_OFFSET;
202}
203
204/**
Andrew Walbran45633dd2020-10-07 17:59:54 +0100205 * Return whether the given VM ID represents an entity in the current world:
206 * i.e. the hypervisor or a normal world VM when running in the normal world, or
207 * the SPM or an SP when running in the secure world.
208 */
209bool vm_id_is_current_world(ffa_vm_id_t vm_id)
210{
211 return (vm_id & HF_VM_ID_WORLD_MASK) !=
212 (HF_OTHER_WORLD_ID & HF_VM_ID_WORLD_MASK);
213}
214
215/**
Andrew Scull3c257452019-11-26 13:32:50 +0000216 * Map a range of addresses to the VM in both the MMU and the IOMMU.
217 *
218 * mm_vm_defrag should always be called after a series of page table updates,
219 * whether they succeed or fail. This is because on failure extra page table
220 * entries may have been allocated and then not used, while on success it may be
221 * possible to compact the page table by merging several entries into a block.
222 *
223 * Returns true on success, or false if the update failed and no changes were
224 * made.
225 *
226 */
227bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
228 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
229{
230 if (!vm_identity_prepare(vm_locked, begin, end, mode, ppool)) {
231 return false;
232 }
233
234 vm_identity_commit(vm_locked, begin, end, mode, ppool, ipa);
235
236 return true;
237}
238
239/**
240 * Prepares the given VM for the given address mapping such that it will be able
241 * to commit the change without failure.
242 *
243 * In particular, multiple calls to this function will result in the
244 * corresponding calls to commit the changes to succeed.
245 *
246 * Returns true on success, or false if the update failed and no changes were
247 * made.
248 */
249bool vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
250 uint32_t mode, struct mpool *ppool)
251{
252 return mm_vm_identity_prepare(&vm_locked.vm->ptable, begin, end, mode,
253 ppool);
254}
255
256/**
257 * Commits the given address mapping to the VM assuming the operation cannot
258 * fail. `vm_identity_prepare` must used correctly before this to ensure
259 * this condition.
260 */
261void vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
262 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
263{
264 mm_vm_identity_commit(&vm_locked.vm->ptable, begin, end, mode, ppool,
265 ipa);
266 plat_iommu_identity_map(vm_locked, begin, end, mode);
267}
268
269/**
270 * Unmap a range of addresses from the VM.
271 *
272 * Returns true on success, or false if the update failed and no changes were
273 * made.
274 */
275bool vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
276 struct mpool *ppool)
277{
278 uint32_t mode = MM_MODE_UNMAPPED_MASK;
279
280 return vm_identity_map(vm_locked, begin, end, mode, ppool, NULL);
281}
282
283/**
284 * Unmaps the hypervisor pages from the given page table.
285 */
286bool vm_unmap_hypervisor(struct vm_locked vm_locked, struct mpool *ppool)
287{
288 /* TODO: If we add pages dynamically, they must be included here too. */
289 return vm_unmap(vm_locked, layout_text_begin(), layout_text_end(),
290 ppool) &&
291 vm_unmap(vm_locked, layout_rodata_begin(), layout_rodata_end(),
292 ppool) &&
293 vm_unmap(vm_locked, layout_data_begin(), layout_data_end(),
294 ppool);
295}
J-Alvesb37fd082020-10-22 12:29:21 +0100296
297/**
298 * Gets the first partition to boot, according to Boot Protocol from FFA spec.
299 */
300struct vm *vm_get_first_boot(void)
301{
302 return first_boot_vm;
303}
304
305/**
306 * Insert in boot list, sorted by `boot_order` parameter in the vm structure
307 * and rooted in `first_boot_vm`.
308 */
309void vm_update_boot(struct vm *vm)
310{
311 struct vm *current = NULL;
312 struct vm *previous = NULL;
313
314 if (first_boot_vm == NULL) {
315 first_boot_vm = vm;
316 return;
317 }
318
319 current = first_boot_vm;
320
321 while (current != NULL && current->boot_order >= vm->boot_order) {
322 previous = current;
323 current = current->next_boot;
324 }
325
326 if (previous != NULL) {
327 previous->next_boot = vm;
328 } else {
329 first_boot_vm = vm;
330 }
331
332 vm->next_boot = current;
333}
Maksims Svecovsb596eab2021-04-27 00:52:27 +0100334
335/**
336 * Returns true if VM supports a managed exit.
337 */
338bool vm_managed_exit_supported(struct vm *vm)
339{
340 return (vm->messaging_method & FFA_PARTITION_MANAGED_EXIT) != 0U;
341}