blob: af3ecc0505d00e125dbd51fc695569955da1aaa7 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/vm.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010010
Andrew Scull18c78fc2018-08-20 12:57:41 +010011#include "hf/api.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010012#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010013#include "hf/cpu.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010014#include "hf/ffa.h"
Andrew Scull3c257452019-11-26 13:32:50 +000015#include "hf/layout.h"
16#include "hf/plat/iommu.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010017#include "hf/std.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010018
Andrew Scull19503262018-09-20 14:48:39 +010019#include "vmapi/hf/call.h"
20
21static struct vm vms[MAX_VMS];
Andrew Walbran9daa57e2019-09-27 13:33:20 +010022static struct vm tee_vm;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010023static ffa_vm_count_t vm_count;
Andrew Scull19503262018-09-20 14:48:39 +010024
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010025struct vm *vm_init(ffa_vm_id_t id, ffa_vcpu_count_t vcpu_count,
Andrew Walbran9daa57e2019-09-27 13:33:20 +010026 struct mpool *ppool)
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010027{
Wedson Almeida Filho87009642018-07-02 10:20:07 +010028 uint32_t i;
Andrew Scull19503262018-09-20 14:48:39 +010029 struct vm *vm;
30
Andrew Walbran9daa57e2019-09-27 13:33:20 +010031 if (id == HF_TEE_VM_ID) {
32 vm = &tee_vm;
33 } else {
34 uint16_t vm_index = id - HF_VM_ID_OFFSET;
Andrew Scull19503262018-09-20 14:48:39 +010035
Andrew Walbran9daa57e2019-09-27 13:33:20 +010036 CHECK(id >= HF_VM_ID_OFFSET);
37 CHECK(vm_index < ARRAY_SIZE(vms));
38 vm = &vms[vm_index];
39 }
Wedson Almeida Filho87009642018-07-02 10:20:07 +010040
Andrew Scull2b5fbad2019-04-05 13:55:56 +010041 memset_s(vm, sizeof(*vm), 0, sizeof(*vm));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010042
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000043 list_init(&vm->mailbox.waiter_list);
44 list_init(&vm->mailbox.ready_list);
45 sl_init(&vm->lock);
46
Andrew Walbran9daa57e2019-09-27 13:33:20 +010047 vm->id = id;
Wedson Almeida Filho87009642018-07-02 10:20:07 +010048 vm->vcpu_count = vcpu_count;
Andrew Sculld6ee1102019-04-05 22:12:42 +010049 vm->mailbox.state = MAILBOX_STATE_EMPTY;
Andrew Scull9726c252019-01-23 13:44:19 +000050 atomic_init(&vm->aborting, false);
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010051
Andrew Scullda3df7f2019-01-05 17:49:27 +000052 if (!mm_vm_init(&vm->ptable, ppool)) {
Andrew Walbran9daa57e2019-09-27 13:33:20 +010053 return NULL;
Wedson Almeida Filho03306112018-11-26 00:08:03 +000054 }
55
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000056 /* Initialise waiter entries. */
57 for (i = 0; i < MAX_VMS; i++) {
Wedson Almeida Filhob790f652019-01-22 23:41:56 +000058 vm->wait_entries[i].waiting_vm = vm;
59 list_init(&vm->wait_entries[i].wait_links);
60 list_init(&vm->wait_entries[i].ready_links);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000061 }
62
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000063 /* Do basic initialization of vCPUs. */
Andrew Scull7364a8e2018-07-19 15:39:29 +010064 for (i = 0; i < vcpu_count; i++) {
Andrew Walbrane1310df2019-04-29 17:28:28 +010065 vcpu_init(vm_get_vcpu(vm, i), vm);
Andrew Scull7364a8e2018-07-19 15:39:29 +010066 }
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010067
Andrew Walbran9daa57e2019-09-27 13:33:20 +010068 return vm;
69}
70
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010071bool vm_init_next(ffa_vcpu_count_t vcpu_count, struct mpool *ppool,
Andrew Walbran9daa57e2019-09-27 13:33:20 +010072 struct vm **new_vm)
73{
74 if (vm_count >= MAX_VMS) {
75 return false;
76 }
77
78 /* Generate IDs based on an offset, as low IDs e.g., 0, are reserved */
79 *new_vm = vm_init(vm_count + HF_VM_ID_OFFSET, vcpu_count, ppool);
80 if (*new_vm == NULL) {
81 return false;
82 }
Andrew Scull19503262018-09-20 14:48:39 +010083 ++vm_count;
Andrew Scull19503262018-09-20 14:48:39 +010084
Wedson Almeida Filho03306112018-11-26 00:08:03 +000085 return true;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010086}
87
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010088ffa_vm_count_t vm_get_count(void)
Andrew Scull19503262018-09-20 14:48:39 +010089{
90 return vm_count;
91}
92
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010093struct vm *vm_find(ffa_vm_id_t id)
Andrew Scull19503262018-09-20 14:48:39 +010094{
David Brazdilbc501192019-09-27 13:20:56 +010095 uint16_t index;
Fuad Tabba494376e2019-08-05 12:35:10 +010096
David Brazdilbc501192019-09-27 13:20:56 +010097 /* Check that this is not a reserved ID. */
98 if (id < HF_VM_ID_OFFSET) {
Andrew Scull19503262018-09-20 14:48:39 +010099 return NULL;
100 }
101
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100102 if (id == HF_TEE_VM_ID) {
103 if (tee_vm.id == HF_TEE_VM_ID) {
104 return &tee_vm;
105 }
106 return NULL;
107 }
108
David Brazdilbc501192019-09-27 13:20:56 +0100109 index = id - HF_VM_ID_OFFSET;
110
111 /* Ensure the VM is initialized. */
112 if (index >= vm_count) {
113 return NULL;
114 }
115
116 return &vms[index];
Andrew Scull19503262018-09-20 14:48:39 +0100117}
118
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000119/**
Fuad Tabbaed294af2019-12-20 10:43:01 +0000120 * Locks the given VM and updates `locked` to hold the newly locked VM.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000121 */
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100122struct vm_locked vm_lock(struct vm *vm)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000123{
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100124 struct vm_locked locked = {
125 .vm = vm,
126 };
127
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000128 sl_lock(&vm->lock);
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100129
130 return locked;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000131}
132
133/**
Jose Marinho75509b42019-04-09 09:34:59 +0100134 * Locks two VMs ensuring that the locking order is according to the locks'
135 * addresses.
136 */
137struct two_vm_locked vm_lock_both(struct vm *vm1, struct vm *vm2)
138{
139 struct two_vm_locked dual_lock;
140
141 sl_lock_both(&vm1->lock, &vm2->lock);
142 dual_lock.vm1.vm = vm1;
143 dual_lock.vm2.vm = vm2;
144
145 return dual_lock;
146}
147
148/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000149 * Unlocks a VM previously locked with vm_lock, and updates `locked` to reflect
150 * the fact that the VM is no longer locked.
151 */
152void vm_unlock(struct vm_locked *locked)
153{
154 sl_unlock(&locked->vm->lock);
155 locked->vm = NULL;
156}
Andrew Walbrane1310df2019-04-29 17:28:28 +0100157
158/**
159 * Get the vCPU with the given index from the given VM.
160 * This assumes the index is valid, i.e. less than vm->vcpu_count.
161 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100162struct vcpu *vm_get_vcpu(struct vm *vm, ffa_vcpu_index_t vcpu_index)
Andrew Walbrane1310df2019-04-29 17:28:28 +0100163{
Andrew Scull877ae4b2019-07-02 12:52:33 +0100164 CHECK(vcpu_index < vm->vcpu_count);
Andrew Walbrane1310df2019-04-29 17:28:28 +0100165 return &vm->vcpus[vcpu_index];
166}
Andrew Scull3c257452019-11-26 13:32:50 +0000167
168/**
Andrew Walbranaad8f982019-12-04 10:56:39 +0000169 * Gets `vm`'s wait entry for waiting on the `for_vm`.
170 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100171struct wait_entry *vm_get_wait_entry(struct vm *vm, ffa_vm_id_t for_vm)
Andrew Walbranaad8f982019-12-04 10:56:39 +0000172{
173 uint16_t index;
174
175 CHECK(for_vm >= HF_VM_ID_OFFSET);
176 index = for_vm - HF_VM_ID_OFFSET;
177 CHECK(index < MAX_VMS);
178
179 return &vm->wait_entries[index];
180}
181
182/**
183 * Gets the ID of the VM which the given VM's wait entry is for.
184 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100185ffa_vm_id_t vm_id_for_wait_entry(struct vm *vm, struct wait_entry *entry)
Andrew Walbranaad8f982019-12-04 10:56:39 +0000186{
187 uint16_t index = entry - vm->wait_entries;
188
189 return index + HF_VM_ID_OFFSET;
190}
191
192/**
Andrew Scull3c257452019-11-26 13:32:50 +0000193 * Map a range of addresses to the VM in both the MMU and the IOMMU.
194 *
195 * mm_vm_defrag should always be called after a series of page table updates,
196 * whether they succeed or fail. This is because on failure extra page table
197 * entries may have been allocated and then not used, while on success it may be
198 * possible to compact the page table by merging several entries into a block.
199 *
200 * Returns true on success, or false if the update failed and no changes were
201 * made.
202 *
203 */
204bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
205 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
206{
207 if (!vm_identity_prepare(vm_locked, begin, end, mode, ppool)) {
208 return false;
209 }
210
211 vm_identity_commit(vm_locked, begin, end, mode, ppool, ipa);
212
213 return true;
214}
215
216/**
217 * Prepares the given VM for the given address mapping such that it will be able
218 * to commit the change without failure.
219 *
220 * In particular, multiple calls to this function will result in the
221 * corresponding calls to commit the changes to succeed.
222 *
223 * Returns true on success, or false if the update failed and no changes were
224 * made.
225 */
226bool vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
227 uint32_t mode, struct mpool *ppool)
228{
229 return mm_vm_identity_prepare(&vm_locked.vm->ptable, begin, end, mode,
230 ppool);
231}
232
233/**
234 * Commits the given address mapping to the VM assuming the operation cannot
235 * fail. `vm_identity_prepare` must used correctly before this to ensure
236 * this condition.
237 */
238void vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
239 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
240{
241 mm_vm_identity_commit(&vm_locked.vm->ptable, begin, end, mode, ppool,
242 ipa);
243 plat_iommu_identity_map(vm_locked, begin, end, mode);
244}
245
246/**
247 * Unmap a range of addresses from the VM.
248 *
249 * Returns true on success, or false if the update failed and no changes were
250 * made.
251 */
252bool vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
253 struct mpool *ppool)
254{
255 uint32_t mode = MM_MODE_UNMAPPED_MASK;
256
257 return vm_identity_map(vm_locked, begin, end, mode, ppool, NULL);
258}
259
260/**
261 * Unmaps the hypervisor pages from the given page table.
262 */
263bool vm_unmap_hypervisor(struct vm_locked vm_locked, struct mpool *ppool)
264{
265 /* TODO: If we add pages dynamically, they must be included here too. */
266 return vm_unmap(vm_locked, layout_text_begin(), layout_text_end(),
267 ppool) &&
268 vm_unmap(vm_locked, layout_rodata_begin(), layout_rodata_end(),
269 ppool) &&
270 vm_unmap(vm_locked, layout_data_begin(), layout_data_end(),
271 ppool);
272}