blob: 89c905aab392903a88d6bc4080c1c15b031743ec [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andrew Scull18c78fc2018-08-20 12:57:41 +010017#include "hf/load.h"
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010018
19#include <stdbool.h>
20
Fuad Tabba77a4b012019-11-15 12:13:08 +000021#include "hf/arch/vm.h"
22
Andrew Scull18c78fc2018-08-20 12:57:41 +010023#include "hf/api.h"
Andrew Walbran34ce72e2018-09-13 16:47:44 +010024#include "hf/boot_params.h"
Andrew Scull72b43c02019-09-18 13:53:45 +010025#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010026#include "hf/dlog.h"
Andrew Scull5991ec92018-10-08 14:55:02 +010027#include "hf/layout.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010028#include "hf/memiter.h"
29#include "hf/mm.h"
Andrew Walbran48699362019-05-20 14:38:00 +010030#include "hf/plat/console.h"
Andrew Scullb1a6d0d2020-01-29 11:25:12 +000031#include "hf/plat/iommu.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010032#include "hf/static_assert.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010033#include "hf/std.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010034#include "hf/vm.h"
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010035
Andrew Scull19503262018-09-20 14:48:39 +010036#include "vmapi/hf/call.h"
37
Andrew Walbran9daa57e2019-09-27 13:33:20 +010038alignas(PAGE_SIZE) static uint8_t tee_send_buffer[HF_MAILBOX_SIZE];
39alignas(PAGE_SIZE) static uint8_t tee_recv_buffer[HF_MAILBOX_SIZE];
40
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010041/**
42 * Copies data to an unmapped location by mapping it for write, copying the
43 * data, then unmapping it.
Andrew Sculld9225b32018-11-19 16:12:41 +000044 *
45 * The data is written so that it is available to all cores with the cache
46 * disabled. When switching to the partitions, the caching is initially disabled
47 * so the data must be available without the cache.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010048 */
Andrew Scull3c0a90a2019-07-01 11:55:53 +010049static bool copy_to_unmapped(struct mm_stage1_locked stage1_locked, paddr_t to,
David Brazdil7a462ec2019-08-15 12:27:47 +010050 struct memiter *from_it, struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010051{
David Brazdil7a462ec2019-08-15 12:27:47 +010052 const void *from = memiter_base(from_it);
53 size_t size = memiter_size(from_it);
Andrew Scull80871322018-08-06 12:04:09 +010054 paddr_t to_end = pa_add(to, size);
55 void *ptr;
Andrew Scull265ada92018-07-30 15:19:01 +010056
Andrew Scull3c0a90a2019-07-01 11:55:53 +010057 ptr = mm_identity_map(stage1_locked, to, to_end, MM_MODE_W, ppool);
Andrew Scull80871322018-08-06 12:04:09 +010058 if (!ptr) {
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010059 return false;
60 }
61
Andrew Sculla1aa2ba2019-04-05 11:49:02 +010062 memcpy_s(ptr, size, from, size);
Andrew Scullc059fbe2019-09-12 12:58:40 +010063 arch_mm_flush_dcache(ptr, size);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010064
Andrew Scull72b43c02019-09-18 13:53:45 +010065 CHECK(mm_unmap(stage1_locked, to, to_end, ppool));
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010066
67 return true;
68}
69
Andrew Scull72b43c02019-09-18 13:53:45 +010070static bool load_kernel(struct mm_stage1_locked stage1_locked, paddr_t begin,
71 paddr_t end, const struct manifest_vm *manifest_vm,
72 const struct memiter *cpio, struct mpool *ppool)
73{
Andrew Scull72b43c02019-09-18 13:53:45 +010074 struct memiter kernel;
75
David Brazdil136f2942019-09-23 14:11:03 +010076 if (string_is_empty(&manifest_vm->kernel_filename)) {
Andrew Scull72b43c02019-09-18 13:53:45 +010077 /* This signals the kernel has been preloaded. */
78 return true;
79 }
80
David Brazdil136f2942019-09-23 14:11:03 +010081 if (!cpio_get_file(cpio, &manifest_vm->kernel_filename, &kernel)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +000082 dlog_error("Could not find kernel file \"%s\".\n",
83 string_data(&manifest_vm->kernel_filename));
Andrew Scull72b43c02019-09-18 13:53:45 +010084 return false;
85 }
86
87 if (pa_difference(begin, end) < memiter_size(&kernel)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +000088 dlog_error("Kernel is larger than available memory.\n");
Andrew Scull72b43c02019-09-18 13:53:45 +010089 return false;
90 }
91
92 if (!copy_to_unmapped(stage1_locked, begin, &kernel, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +000093 dlog_error("Unable to copy kernel.\n");
Andrew Scull72b43c02019-09-18 13:53:45 +010094 return false;
95 }
96
97 return true;
98}
99
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100100/**
Andrew Scullae9962e2019-10-03 16:51:16 +0100101 * Performs VM loading activities that are common between the primary and
102 * secondaries.
103 */
104static bool load_common(const struct manifest_vm *manifest_vm, struct vm *vm)
105{
106 vm->smc_whitelist = manifest_vm->smc_whitelist;
107
Fuad Tabba56970712020-01-10 11:20:09 +0000108 /* Initialize architecture-specific features. */
Fuad Tabba77a4b012019-11-15 12:13:08 +0000109 arch_vm_features_set(vm);
110
Andrew Scullae9962e2019-10-03 16:51:16 +0100111 return true;
112}
113
114/**
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100115 * Loads the primary VM.
116 */
Andrew Scull72b43c02019-09-18 13:53:45 +0100117static bool load_primary(struct mm_stage1_locked stage1_locked,
Andrew Scullae9962e2019-10-03 16:51:16 +0100118 const struct manifest_vm *manifest_vm,
Andrew Scullb5f49e02019-10-02 13:20:47 +0100119 const struct memiter *cpio,
120 const struct boot_params *params, struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100121{
Andrew Scullf16c0c22018-10-26 18:41:24 +0100122 paddr_t primary_begin = layout_primary_begin();
David Brazdile6f83222019-09-23 14:47:37 +0100123 struct vm *vm;
Andrew Scull3c257452019-11-26 13:32:50 +0000124 struct vm_locked vm_locked;
David Brazdile6f83222019-09-23 14:47:37 +0100125 struct vcpu_locked vcpu_locked;
Andrew Scullb5f49e02019-10-02 13:20:47 +0100126 size_t i;
Andrew Scull3c257452019-11-26 13:32:50 +0000127 bool ret;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100128
Andrew Scull72b43c02019-09-18 13:53:45 +0100129 /*
130 * TODO: This bound is currently meaningless but will be addressed when
131 * the manifest specifies the load address.
132 */
133 paddr_t primary_end = pa_add(primary_begin, 0x8000000);
134
Andrew Scullae9962e2019-10-03 16:51:16 +0100135 if (!load_kernel(stage1_locked, primary_begin, primary_end, manifest_vm,
136 cpio, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000137 dlog_error("Unable to load primary kernel.");
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100138 return false;
139 }
140
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100141 if (!vm_init_next(MAX_CPUS, ppool, &vm)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000142 dlog_error("Unable to initialise primary vm\n");
David Brazdile6f83222019-09-23 14:47:37 +0100143 return false;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100144 }
145
David Brazdile6f83222019-09-23 14:47:37 +0100146 if (vm->id != HF_PRIMARY_VM_ID) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000147 dlog_error("Primary vm was not given correct id\n");
David Brazdile6f83222019-09-23 14:47:37 +0100148 return false;
149 }
150
Andrew Scull3c257452019-11-26 13:32:50 +0000151 vm_locked = vm_lock(vm);
152
Andrew Scullae9962e2019-10-03 16:51:16 +0100153 if (!load_common(manifest_vm, vm)) {
Andrew Scull3c257452019-11-26 13:32:50 +0000154 ret = false;
155 goto out;
Andrew Scullae9962e2019-10-03 16:51:16 +0100156 }
157
Andrew Scullb5f49e02019-10-02 13:20:47 +0100158 /*
159 * Map 1TB of address space as device memory to, most likely, make all
160 * devices available to the primary VM.
161 *
162 * TODO: We should do a whitelist rather than a blacklist.
163 */
Andrew Scull3c257452019-11-26 13:32:50 +0000164 if (!vm_identity_map(vm_locked, pa_init(0),
165 pa_init(UINT64_C(1024) * 1024 * 1024 * 1024),
166 MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool, NULL)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000167 dlog_error(
168 "Unable to initialise address space for primary vm\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000169 ret = false;
170 goto out;
David Brazdile6f83222019-09-23 14:47:37 +0100171 }
172
Andrew Scullb5f49e02019-10-02 13:20:47 +0100173 /* Map normal memory as such to permit caching, execution, etc. */
174 for (i = 0; i < params->mem_ranges_count; ++i) {
Andrew Scull3c257452019-11-26 13:32:50 +0000175 if (!vm_identity_map(vm_locked, params->mem_ranges[i].begin,
176 params->mem_ranges[i].end,
177 MM_MODE_R | MM_MODE_W | MM_MODE_X, ppool,
178 NULL)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000179 dlog_error(
180 "Unable to initialise memory for primary vm\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000181 ret = false;
182 goto out;
Andrew Scullb5f49e02019-10-02 13:20:47 +0100183 }
184 }
185
Andrew Scull3c257452019-11-26 13:32:50 +0000186 if (!vm_unmap_hypervisor(vm_locked, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000187 dlog_error("Unable to unmap hypervisor from primary vm\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000188 ret = false;
189 goto out;
David Brazdile6f83222019-09-23 14:47:37 +0100190 }
191
Andrew Scullb1a6d0d2020-01-29 11:25:12 +0000192 if (!plat_iommu_unmap_iommus(vm_locked, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000193 dlog_error("Unable to unmap IOMMUs from primary VM\n");
Andrew Scullb1a6d0d2020-01-29 11:25:12 +0000194 ret = false;
195 goto out;
196 }
197
David Brazdile6f83222019-09-23 14:47:37 +0100198 vcpu_locked = vcpu_lock(vm_get_vcpu(vm, 0));
Andrew Scullb5f49e02019-10-02 13:20:47 +0100199 vcpu_on(vcpu_locked, ipa_from_pa(primary_begin), params->kernel_arg);
David Brazdile6f83222019-09-23 14:47:37 +0100200 vcpu_unlock(&vcpu_locked);
Andrew Scull3c257452019-11-26 13:32:50 +0000201 ret = true;
David Brazdile6f83222019-09-23 14:47:37 +0100202
Andrew Scull3c257452019-11-26 13:32:50 +0000203out:
204 vm_unlock(&vm_locked);
205
206 return ret;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100207}
208
Andrew Scull72b43c02019-09-18 13:53:45 +0100209/*
210 * Loads a secondary VM.
211 */
212static bool load_secondary(struct mm_stage1_locked stage1_locked,
213 paddr_t mem_begin, paddr_t mem_end,
214 const struct manifest_vm *manifest_vm,
215 const struct memiter *cpio, struct mpool *ppool)
216{
217 struct vm *vm;
Andrew Scull3c257452019-11-26 13:32:50 +0000218 struct vm_locked vm_locked;
Andrew Scull72b43c02019-09-18 13:53:45 +0100219 struct vcpu *vcpu;
220 ipaddr_t secondary_entry;
Andrew Scull3c257452019-11-26 13:32:50 +0000221 bool ret;
Andrew Scull72b43c02019-09-18 13:53:45 +0100222
223 if (!load_kernel(stage1_locked, mem_begin, mem_end, manifest_vm, cpio,
224 ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000225 dlog_error("Unable to load kernel.\n");
Andrew Scull72b43c02019-09-18 13:53:45 +0100226 return false;
227 }
228
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100229 if (!vm_init_next(manifest_vm->secondary.vcpu_count, ppool, &vm)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000230 dlog_error("Unable to initialise VM.\n");
Andrew Scull72b43c02019-09-18 13:53:45 +0100231 return false;
232 }
233
Andrew Scullae9962e2019-10-03 16:51:16 +0100234 if (!load_common(manifest_vm, vm)) {
235 return false;
236 }
237
Andrew Scull3c257452019-11-26 13:32:50 +0000238 vm_locked = vm_lock(vm);
239
Andrew Scull72b43c02019-09-18 13:53:45 +0100240 /* Grant the VM access to the memory. */
Andrew Scull3c257452019-11-26 13:32:50 +0000241 if (!vm_identity_map(vm_locked, mem_begin, mem_end,
242 MM_MODE_R | MM_MODE_W | MM_MODE_X, ppool,
243 &secondary_entry)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000244 dlog_error("Unable to initialise memory.\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000245 ret = false;
246 goto out;
Andrew Scull72b43c02019-09-18 13:53:45 +0100247 }
248
Andrew Walbran17eebf92020-02-05 16:35:49 +0000249 dlog_info("Loaded with %u vCPUs, entry at %#x.\n",
250 manifest_vm->secondary.vcpu_count, pa_addr(mem_begin));
Andrew Scull72b43c02019-09-18 13:53:45 +0100251
252 vcpu = vm_get_vcpu(vm, 0);
253 vcpu_secondary_reset_and_start(vcpu, secondary_entry,
254 pa_difference(mem_begin, mem_end));
Andrew Scull3c257452019-11-26 13:32:50 +0000255 ret = true;
Andrew Scull72b43c02019-09-18 13:53:45 +0100256
Andrew Scull3c257452019-11-26 13:32:50 +0000257out:
258 vm_unlock(&vm_locked);
259
260 return ret;
Andrew Scull72b43c02019-09-18 13:53:45 +0100261}
262
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100263/**
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100264 * Try to find a memory range of the given size within the given ranges, and
265 * remove it from them. Return true on success, or false if no large enough
266 * contiguous range is found.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100267 */
Hong-Seok Kim09648362019-05-23 15:47:11 +0900268static bool carve_out_mem_range(struct mem_range *mem_ranges,
269 size_t mem_ranges_count, uint64_t size_to_find,
270 paddr_t *found_begin, paddr_t *found_end)
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100271{
272 size_t i;
273
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000274 /*
275 * TODO(b/116191358): Consider being cleverer about how we pack VMs
276 * together, with a non-greedy algorithm.
277 */
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100278 for (i = 0; i < mem_ranges_count; ++i) {
279 if (size_to_find <=
Andrew Walbran2cb43392019-04-17 12:52:45 +0100280 pa_difference(mem_ranges[i].begin, mem_ranges[i].end)) {
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +0100281 /*
282 * This range is big enough, take some of it from the
283 * end and reduce its size accordingly.
284 */
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100285 *found_end = mem_ranges[i].end;
286 *found_begin = pa_init(pa_addr(mem_ranges[i].end) -
287 size_to_find);
288 mem_ranges[i].end = *found_begin;
289 return true;
290 }
291 }
292 return false;
293}
294
295/**
296 * Given arrays of memory ranges before and after memory was removed for
297 * secondary VMs, add the difference to the reserved ranges of the given update.
298 * Return true on success, or false if there would be more than MAX_MEM_RANGES
299 * reserved ranges after adding the new ones.
300 * `before` and `after` must be arrays of exactly `mem_ranges_count` elements.
301 */
Hong-Seok Kim09648362019-05-23 15:47:11 +0900302static bool update_reserved_ranges(struct boot_params_update *update,
303 const struct mem_range *before,
304 const struct mem_range *after,
305 size_t mem_ranges_count)
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100306{
307 size_t i;
308
309 for (i = 0; i < mem_ranges_count; ++i) {
310 if (pa_addr(after[i].begin) > pa_addr(before[i].begin)) {
311 if (update->reserved_ranges_count >= MAX_MEM_RANGES) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000312 dlog_error(
313 "Too many reserved ranges after "
314 "loading secondary VMs.\n");
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100315 return false;
316 }
317 update->reserved_ranges[update->reserved_ranges_count]
318 .begin = before[i].begin;
319 update->reserved_ranges[update->reserved_ranges_count]
320 .end = after[i].begin;
321 update->reserved_ranges_count++;
322 }
323 if (pa_addr(after[i].end) < pa_addr(before[i].end)) {
324 if (update->reserved_ranges_count >= MAX_MEM_RANGES) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000325 dlog_error(
326 "Too many reserved ranges after "
327 "loading secondary VMs.\n");
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100328 return false;
329 }
330 update->reserved_ranges[update->reserved_ranges_count]
331 .begin = after[i].end;
332 update->reserved_ranges[update->reserved_ranges_count]
333 .end = before[i].end;
334 update->reserved_ranges_count++;
335 }
336 }
337
338 return true;
339}
340
Andrew Scull72b43c02019-09-18 13:53:45 +0100341/*
342 * Loads alls VMs from the manifest.
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100343 */
Andrew Scull72b43c02019-09-18 13:53:45 +0100344bool load_vms(struct mm_stage1_locked stage1_locked,
345 const struct manifest *manifest, const struct memiter *cpio,
346 const struct boot_params *params,
347 struct boot_params_update *update, struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100348{
Andrew Scull19503262018-09-20 14:48:39 +0100349 struct vm *primary;
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100350 struct vm *tee;
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100351 struct mem_range mem_ranges_available[MAX_MEM_RANGES];
Andrew Scull3c257452019-11-26 13:32:50 +0000352 struct vm_locked primary_vm_locked;
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100353 size_t i;
Andrew Scull3c257452019-11-26 13:32:50 +0000354 bool success = true;
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100355
Andrew Scullae9962e2019-10-03 16:51:16 +0100356 if (!load_primary(stage1_locked, &manifest->vm[HF_PRIMARY_VM_INDEX],
357 cpio, params, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000358 dlog_error("Unable to load primary VM.\n");
Andrew Scull72b43c02019-09-18 13:53:45 +0100359 return false;
360 }
361
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100362 /*
363 * Initialise the dummy VM which represents TrustZone, and set up its
364 * RX/TX buffers.
365 */
366 tee = vm_init(HF_TEE_VM_ID, 0, ppool);
367 CHECK(tee != NULL);
368 tee->mailbox.send = &tee_send_buffer;
369 tee->mailbox.recv = &tee_recv_buffer;
370
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100371 static_assert(
372 sizeof(mem_ranges_available) == sizeof(params->mem_ranges),
373 "mem_range arrays must be the same size for memcpy.");
374 static_assert(sizeof(mem_ranges_available) < 500,
375 "This will use too much stack, either make "
376 "MAX_MEM_RANGES smaller or change this.");
Andrew Sculla1aa2ba2019-04-05 11:49:02 +0100377 memcpy_s(mem_ranges_available, sizeof(mem_ranges_available),
378 params->mem_ranges, sizeof(params->mem_ranges));
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100379
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100380 /* Round the last addresses down to the page size. */
381 for (i = 0; i < params->mem_ranges_count; ++i) {
Alfredo Mazzinghieb1997c2019-02-07 18:00:01 +0000382 mem_ranges_available[i].end = pa_init(align_down(
383 pa_addr(mem_ranges_available[i].end), PAGE_SIZE));
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100384 }
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100385
Andrew Scull3c257452019-11-26 13:32:50 +0000386 primary = vm_find(HF_PRIMARY_VM_ID);
387 primary_vm_locked = vm_lock(primary);
388
David Brazdil0251b942019-09-10 15:59:50 +0100389 for (i = 0; i < manifest->vm_count; ++i) {
David Brazdil0dbb41f2019-09-09 18:03:35 +0100390 const struct manifest_vm *manifest_vm = &manifest->vm[i];
David Brazdil7a462ec2019-08-15 12:27:47 +0100391 spci_vm_id_t vm_id = HF_VM_ID_OFFSET + i;
David Brazdil7a462ec2019-08-15 12:27:47 +0100392 uint64_t mem_size;
Andrew Scull80871322018-08-06 12:04:09 +0100393 paddr_t secondary_mem_begin;
394 paddr_t secondary_mem_end;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100395
David Brazdil7a462ec2019-08-15 12:27:47 +0100396 if (vm_id == HF_PRIMARY_VM_ID) {
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100397 continue;
398 }
399
Andrew Walbran17eebf92020-02-05 16:35:49 +0000400 dlog_info("Loading VM%d: %s.\n", (int)vm_id,
401 manifest_vm->debug_name);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100402
David Brazdil7a462ec2019-08-15 12:27:47 +0100403 mem_size = align_up(manifest_vm->secondary.mem_size, PAGE_SIZE);
David Brazdil7a462ec2019-08-15 12:27:47 +0100404 if (!carve_out_mem_range(mem_ranges_available,
405 params->mem_ranges_count, mem_size,
406 &secondary_mem_begin,
407 &secondary_mem_end)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000408 dlog_error("Not enough memory (%u bytes).\n", mem_size);
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100409 continue;
410 }
Andrew Scull80871322018-08-06 12:04:09 +0100411
Andrew Scull72b43c02019-09-18 13:53:45 +0100412 if (!load_secondary(stage1_locked, secondary_mem_begin,
413 secondary_mem_end, manifest_vm, cpio,
414 ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000415 dlog_error("Unable to load VM.\n");
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100416 continue;
417 }
418
419 /* Deny the primary VM access to this memory. */
Andrew Scull3c257452019-11-26 13:32:50 +0000420 if (!vm_unmap(primary_vm_locked, secondary_mem_begin,
421 secondary_mem_end, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000422 dlog_error(
423 "Unable to unmap secondary VM from primary "
424 "VM.\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000425 success = false;
426 break;
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100427 }
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100428 }
429
Andrew Scull3c257452019-11-26 13:32:50 +0000430 vm_unlock(&primary_vm_locked);
431
432 if (!success) {
433 return false;
434 }
435
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +0100436 /*
437 * Add newly reserved areas to update params by looking at the
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100438 * difference between the available ranges from the original params and
439 * the updated mem_ranges_available. We assume that the number and order
440 * of available ranges is the same, i.e. we don't remove any ranges
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +0100441 * above only make them smaller.
442 */
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100443 return update_reserved_ranges(update, params->mem_ranges,
444 mem_ranges_available,
445 params->mem_ranges_count);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100446}