blob: 7237dfbacc27d7c7c6c6aea7f584055c756d2900 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andrew Scull18c78fc2018-08-20 12:57:41 +010017#include "hf/vm.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010018
Andrew Scull18c78fc2018-08-20 12:57:41 +010019#include "hf/api.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010020#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010021#include "hf/cpu.h"
Andrew Scull3c257452019-11-26 13:32:50 +000022#include "hf/layout.h"
23#include "hf/plat/iommu.h"
Andrew Walbranb037d5b2019-06-25 17:19:41 +010024#include "hf/spci.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010025#include "hf/std.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010026
Andrew Scull19503262018-09-20 14:48:39 +010027#include "vmapi/hf/call.h"
28
29static struct vm vms[MAX_VMS];
Andrew Walbran9daa57e2019-09-27 13:33:20 +010030static struct vm tee_vm;
Andrew Walbran52d99672019-06-25 15:51:11 +010031static spci_vm_count_t vm_count;
Andrew Scull19503262018-09-20 14:48:39 +010032
Andrew Walbran9daa57e2019-09-27 13:33:20 +010033struct vm *vm_init(spci_vm_id_t id, spci_vcpu_count_t vcpu_count,
34 struct mpool *ppool)
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010035{
Wedson Almeida Filho87009642018-07-02 10:20:07 +010036 uint32_t i;
Andrew Scull19503262018-09-20 14:48:39 +010037 struct vm *vm;
38
Andrew Walbran9daa57e2019-09-27 13:33:20 +010039 if (id == HF_TEE_VM_ID) {
40 vm = &tee_vm;
41 } else {
42 uint16_t vm_index = id - HF_VM_ID_OFFSET;
Andrew Scull19503262018-09-20 14:48:39 +010043
Andrew Walbran9daa57e2019-09-27 13:33:20 +010044 CHECK(id >= HF_VM_ID_OFFSET);
45 CHECK(vm_index < ARRAY_SIZE(vms));
46 vm = &vms[vm_index];
47 }
Wedson Almeida Filho87009642018-07-02 10:20:07 +010048
Andrew Scull2b5fbad2019-04-05 13:55:56 +010049 memset_s(vm, sizeof(*vm), 0, sizeof(*vm));
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010050
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000051 list_init(&vm->mailbox.waiter_list);
52 list_init(&vm->mailbox.ready_list);
53 sl_init(&vm->lock);
54
Andrew Walbran9daa57e2019-09-27 13:33:20 +010055 vm->id = id;
Wedson Almeida Filho87009642018-07-02 10:20:07 +010056 vm->vcpu_count = vcpu_count;
Andrew Sculld6ee1102019-04-05 22:12:42 +010057 vm->mailbox.state = MAILBOX_STATE_EMPTY;
Andrew Scull9726c252019-01-23 13:44:19 +000058 atomic_init(&vm->aborting, false);
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010059
Andrew Scullda3df7f2019-01-05 17:49:27 +000060 if (!mm_vm_init(&vm->ptable, ppool)) {
Andrew Walbran9daa57e2019-09-27 13:33:20 +010061 return NULL;
Wedson Almeida Filho03306112018-11-26 00:08:03 +000062 }
63
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000064 /* Initialise waiter entries. */
65 for (i = 0; i < MAX_VMS; i++) {
Wedson Almeida Filhob790f652019-01-22 23:41:56 +000066 vm->wait_entries[i].waiting_vm = vm;
67 list_init(&vm->wait_entries[i].wait_links);
68 list_init(&vm->wait_entries[i].ready_links);
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +000069 }
70
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000071 /* Do basic initialization of vCPUs. */
Andrew Scull7364a8e2018-07-19 15:39:29 +010072 for (i = 0; i < vcpu_count; i++) {
Andrew Walbrane1310df2019-04-29 17:28:28 +010073 vcpu_init(vm_get_vcpu(vm, i), vm);
Andrew Scull7364a8e2018-07-19 15:39:29 +010074 }
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010075
Andrew Walbran9daa57e2019-09-27 13:33:20 +010076 return vm;
77}
78
79bool vm_init_next(spci_vcpu_count_t vcpu_count, struct mpool *ppool,
80 struct vm **new_vm)
81{
82 if (vm_count >= MAX_VMS) {
83 return false;
84 }
85
86 /* Generate IDs based on an offset, as low IDs e.g., 0, are reserved */
87 *new_vm = vm_init(vm_count + HF_VM_ID_OFFSET, vcpu_count, ppool);
88 if (*new_vm == NULL) {
89 return false;
90 }
Andrew Scull19503262018-09-20 14:48:39 +010091 ++vm_count;
Andrew Scull19503262018-09-20 14:48:39 +010092
Wedson Almeida Filho03306112018-11-26 00:08:03 +000093 return true;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010094}
95
Andrew Walbran52d99672019-06-25 15:51:11 +010096spci_vm_count_t vm_get_count(void)
Andrew Scull19503262018-09-20 14:48:39 +010097{
98 return vm_count;
99}
100
Andrew Walbran42347a92019-05-09 13:59:03 +0100101struct vm *vm_find(spci_vm_id_t id)
Andrew Scull19503262018-09-20 14:48:39 +0100102{
David Brazdilbc501192019-09-27 13:20:56 +0100103 uint16_t index;
Fuad Tabba494376e2019-08-05 12:35:10 +0100104
David Brazdilbc501192019-09-27 13:20:56 +0100105 /* Check that this is not a reserved ID. */
106 if (id < HF_VM_ID_OFFSET) {
Andrew Scull19503262018-09-20 14:48:39 +0100107 return NULL;
108 }
109
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100110 if (id == HF_TEE_VM_ID) {
111 if (tee_vm.id == HF_TEE_VM_ID) {
112 return &tee_vm;
113 }
114 return NULL;
115 }
116
David Brazdilbc501192019-09-27 13:20:56 +0100117 index = id - HF_VM_ID_OFFSET;
118
119 /* Ensure the VM is initialized. */
120 if (index >= vm_count) {
121 return NULL;
122 }
123
124 return &vms[index];
Andrew Scull19503262018-09-20 14:48:39 +0100125}
126
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000127/**
Fuad Tabbaed294af2019-12-20 10:43:01 +0000128 * Locks the given VM and updates `locked` to hold the newly locked VM.
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000129 */
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100130struct vm_locked vm_lock(struct vm *vm)
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000131{
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100132 struct vm_locked locked = {
133 .vm = vm,
134 };
135
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000136 sl_lock(&vm->lock);
Andrew Walbran7e932bd2019-04-29 16:47:06 +0100137
138 return locked;
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000139}
140
141/**
Jose Marinho75509b42019-04-09 09:34:59 +0100142 * Locks two VMs ensuring that the locking order is according to the locks'
143 * addresses.
144 */
145struct two_vm_locked vm_lock_both(struct vm *vm1, struct vm *vm2)
146{
147 struct two_vm_locked dual_lock;
148
149 sl_lock_both(&vm1->lock, &vm2->lock);
150 dual_lock.vm1.vm = vm1;
151 dual_lock.vm2.vm = vm2;
152
153 return dual_lock;
154}
155
156/**
Wedson Almeida Filhoea62e2e2019-01-09 19:14:59 +0000157 * Unlocks a VM previously locked with vm_lock, and updates `locked` to reflect
158 * the fact that the VM is no longer locked.
159 */
160void vm_unlock(struct vm_locked *locked)
161{
162 sl_unlock(&locked->vm->lock);
163 locked->vm = NULL;
164}
Andrew Walbrane1310df2019-04-29 17:28:28 +0100165
166/**
167 * Get the vCPU with the given index from the given VM.
168 * This assumes the index is valid, i.e. less than vm->vcpu_count.
169 */
Andrew Walbranb037d5b2019-06-25 17:19:41 +0100170struct vcpu *vm_get_vcpu(struct vm *vm, spci_vcpu_index_t vcpu_index)
Andrew Walbrane1310df2019-04-29 17:28:28 +0100171{
Andrew Scull877ae4b2019-07-02 12:52:33 +0100172 CHECK(vcpu_index < vm->vcpu_count);
Andrew Walbrane1310df2019-04-29 17:28:28 +0100173 return &vm->vcpus[vcpu_index];
174}
Andrew Scull3c257452019-11-26 13:32:50 +0000175
176/**
Andrew Walbranaad8f982019-12-04 10:56:39 +0000177 * Gets `vm`'s wait entry for waiting on the `for_vm`.
178 */
179struct wait_entry *vm_get_wait_entry(struct vm *vm, spci_vm_id_t for_vm)
180{
181 uint16_t index;
182
183 CHECK(for_vm >= HF_VM_ID_OFFSET);
184 index = for_vm - HF_VM_ID_OFFSET;
185 CHECK(index < MAX_VMS);
186
187 return &vm->wait_entries[index];
188}
189
190/**
191 * Gets the ID of the VM which the given VM's wait entry is for.
192 */
193spci_vm_id_t vm_id_for_wait_entry(struct vm *vm, struct wait_entry *entry)
194{
195 uint16_t index = entry - vm->wait_entries;
196
197 return index + HF_VM_ID_OFFSET;
198}
199
200/**
Andrew Scull3c257452019-11-26 13:32:50 +0000201 * Map a range of addresses to the VM in both the MMU and the IOMMU.
202 *
203 * mm_vm_defrag should always be called after a series of page table updates,
204 * whether they succeed or fail. This is because on failure extra page table
205 * entries may have been allocated and then not used, while on success it may be
206 * possible to compact the page table by merging several entries into a block.
207 *
208 * Returns true on success, or false if the update failed and no changes were
209 * made.
210 *
211 */
212bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
213 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
214{
215 if (!vm_identity_prepare(vm_locked, begin, end, mode, ppool)) {
216 return false;
217 }
218
219 vm_identity_commit(vm_locked, begin, end, mode, ppool, ipa);
220
221 return true;
222}
223
224/**
225 * Prepares the given VM for the given address mapping such that it will be able
226 * to commit the change without failure.
227 *
228 * In particular, multiple calls to this function will result in the
229 * corresponding calls to commit the changes to succeed.
230 *
231 * Returns true on success, or false if the update failed and no changes were
232 * made.
233 */
234bool vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
235 uint32_t mode, struct mpool *ppool)
236{
237 return mm_vm_identity_prepare(&vm_locked.vm->ptable, begin, end, mode,
238 ppool);
239}
240
241/**
242 * Commits the given address mapping to the VM assuming the operation cannot
243 * fail. `vm_identity_prepare` must used correctly before this to ensure
244 * this condition.
245 */
246void vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
247 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa)
248{
249 mm_vm_identity_commit(&vm_locked.vm->ptable, begin, end, mode, ppool,
250 ipa);
251 plat_iommu_identity_map(vm_locked, begin, end, mode);
252}
253
254/**
255 * Unmap a range of addresses from the VM.
256 *
257 * Returns true on success, or false if the update failed and no changes were
258 * made.
259 */
260bool vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
261 struct mpool *ppool)
262{
263 uint32_t mode = MM_MODE_UNMAPPED_MASK;
264
265 return vm_identity_map(vm_locked, begin, end, mode, ppool, NULL);
266}
267
268/**
269 * Unmaps the hypervisor pages from the given page table.
270 */
271bool vm_unmap_hypervisor(struct vm_locked vm_locked, struct mpool *ppool)
272{
273 /* TODO: If we add pages dynamically, they must be included here too. */
274 return vm_unmap(vm_locked, layout_text_begin(), layout_text_end(),
275 ppool) &&
276 vm_unmap(vm_locked, layout_rodata_begin(), layout_rodata_end(),
277 ppool) &&
278 vm_unmap(vm_locked, layout_data_begin(), layout_data_end(),
279 ppool);
280}