blob: f855a563f2e11ce30a3774136fac62dffd31121d [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/load.h"
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010010
11#include <stdbool.h>
12
Fuad Tabba77a4b012019-11-15 12:13:08 +000013#include "hf/arch/vm.h"
14
Andrew Scull18c78fc2018-08-20 12:57:41 +010015#include "hf/api.h"
Andrew Walbran34ce72e2018-09-13 16:47:44 +010016#include "hf/boot_params.h"
Andrew Scull72b43c02019-09-18 13:53:45 +010017#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010018#include "hf/dlog.h"
Andrew Scull5991ec92018-10-08 14:55:02 +010019#include "hf/layout.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010020#include "hf/memiter.h"
21#include "hf/mm.h"
Andrew Walbran48699362019-05-20 14:38:00 +010022#include "hf/plat/console.h"
Andrew Scullb1a6d0d2020-01-29 11:25:12 +000023#include "hf/plat/iommu.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010024#include "hf/static_assert.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010025#include "hf/std.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010026#include "hf/vm.h"
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010027
Andrew Scull19503262018-09-20 14:48:39 +010028#include "vmapi/hf/call.h"
29
Andrew Walbran9daa57e2019-09-27 13:33:20 +010030alignas(PAGE_SIZE) static uint8_t tee_send_buffer[HF_MAILBOX_SIZE];
31alignas(PAGE_SIZE) static uint8_t tee_recv_buffer[HF_MAILBOX_SIZE];
32
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010033/**
34 * Copies data to an unmapped location by mapping it for write, copying the
35 * data, then unmapping it.
Andrew Sculld9225b32018-11-19 16:12:41 +000036 *
37 * The data is written so that it is available to all cores with the cache
38 * disabled. When switching to the partitions, the caching is initially disabled
39 * so the data must be available without the cache.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010040 */
Andrew Scull3c0a90a2019-07-01 11:55:53 +010041static bool copy_to_unmapped(struct mm_stage1_locked stage1_locked, paddr_t to,
David Brazdil7a462ec2019-08-15 12:27:47 +010042 struct memiter *from_it, struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010043{
David Brazdil7a462ec2019-08-15 12:27:47 +010044 const void *from = memiter_base(from_it);
45 size_t size = memiter_size(from_it);
Andrew Scull80871322018-08-06 12:04:09 +010046 paddr_t to_end = pa_add(to, size);
47 void *ptr;
Andrew Scull265ada92018-07-30 15:19:01 +010048
Andrew Scull3c0a90a2019-07-01 11:55:53 +010049 ptr = mm_identity_map(stage1_locked, to, to_end, MM_MODE_W, ppool);
Andrew Scull80871322018-08-06 12:04:09 +010050 if (!ptr) {
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010051 return false;
52 }
53
Andrew Sculla1aa2ba2019-04-05 11:49:02 +010054 memcpy_s(ptr, size, from, size);
Andrew Scullc059fbe2019-09-12 12:58:40 +010055 arch_mm_flush_dcache(ptr, size);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010056
Andrew Scull72b43c02019-09-18 13:53:45 +010057 CHECK(mm_unmap(stage1_locked, to, to_end, ppool));
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010058
59 return true;
60}
61
Andrew Scull72b43c02019-09-18 13:53:45 +010062static bool load_kernel(struct mm_stage1_locked stage1_locked, paddr_t begin,
63 paddr_t end, const struct manifest_vm *manifest_vm,
64 const struct memiter *cpio, struct mpool *ppool)
65{
Andrew Scull72b43c02019-09-18 13:53:45 +010066 struct memiter kernel;
67
David Brazdil136f2942019-09-23 14:11:03 +010068 if (!cpio_get_file(cpio, &manifest_vm->kernel_filename, &kernel)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +000069 dlog_error("Could not find kernel file \"%s\".\n",
70 string_data(&manifest_vm->kernel_filename));
Andrew Scull72b43c02019-09-18 13:53:45 +010071 return false;
72 }
73
74 if (pa_difference(begin, end) < memiter_size(&kernel)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +000075 dlog_error("Kernel is larger than available memory.\n");
Andrew Scull72b43c02019-09-18 13:53:45 +010076 return false;
77 }
78
79 if (!copy_to_unmapped(stage1_locked, begin, &kernel, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +000080 dlog_error("Unable to copy kernel.\n");
Andrew Scull72b43c02019-09-18 13:53:45 +010081 return false;
82 }
83
84 return true;
85}
86
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010087/**
Andrew Scullae9962e2019-10-03 16:51:16 +010088 * Performs VM loading activities that are common between the primary and
89 * secondaries.
90 */
91static bool load_common(const struct manifest_vm *manifest_vm, struct vm *vm)
92{
93 vm->smc_whitelist = manifest_vm->smc_whitelist;
94
Fuad Tabba56970712020-01-10 11:20:09 +000095 /* Initialize architecture-specific features. */
Fuad Tabba77a4b012019-11-15 12:13:08 +000096 arch_vm_features_set(vm);
97
Andrew Scullae9962e2019-10-03 16:51:16 +010098 return true;
99}
100
101/**
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100102 * Loads the primary VM.
103 */
Andrew Scull72b43c02019-09-18 13:53:45 +0100104static bool load_primary(struct mm_stage1_locked stage1_locked,
Andrew Scullae9962e2019-10-03 16:51:16 +0100105 const struct manifest_vm *manifest_vm,
Andrew Scullb5f49e02019-10-02 13:20:47 +0100106 const struct memiter *cpio,
107 const struct boot_params *params, struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100108{
Olivier Deprez62d99e32020-01-09 15:58:07 +0100109 paddr_t primary_begin;
110 ipaddr_t primary_entry;
David Brazdile6f83222019-09-23 14:47:37 +0100111 struct vm *vm;
Andrew Scull3c257452019-11-26 13:32:50 +0000112 struct vm_locked vm_locked;
David Brazdile6f83222019-09-23 14:47:37 +0100113 struct vcpu_locked vcpu_locked;
Andrew Scullb5f49e02019-10-02 13:20:47 +0100114 size_t i;
Andrew Scull3c257452019-11-26 13:32:50 +0000115 bool ret;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100116
Olivier Deprez62d99e32020-01-09 15:58:07 +0100117 if (manifest_vm->is_ffa_partition) {
118 primary_begin = pa_init(manifest_vm->sp.load_addr);
119 primary_entry = ipa_add(ipa_from_pa(primary_begin),
120 manifest_vm->sp.ep_offset);
121 } else {
122 primary_begin =
123 (manifest_vm->primary.boot_address ==
124 MANIFEST_INVALID_ADDRESS)
125 ? layout_primary_begin()
126 : pa_init(manifest_vm->primary.boot_address);
127 primary_entry = ipa_from_pa(primary_begin);
128 }
129
David Brazdil080ee312020-02-25 15:30:30 -0800130 paddr_t primary_end = pa_add(primary_begin, RSIZE_MAX);
Andrew Scull72b43c02019-09-18 13:53:45 +0100131
Olivier Deprez62d99e32020-01-09 15:58:07 +0100132 /*
133 * Load the kernel if a filename is specified in the VM manifest.
134 * For an FF-A partition, kernel_filename is undefined indicating
135 * the partition package has already been loaded prior to Hafnium
136 * booting.
137 */
138 if (!string_is_empty(&manifest_vm->kernel_filename)) {
139 if (!load_kernel(stage1_locked, primary_begin, primary_end,
140 manifest_vm, cpio, ppool)) {
141 dlog_error("Unable to load primary kernel.\n");
142 return false;
143 }
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100144 }
145
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100146 if (!vm_init_next(MAX_CPUS, ppool, &vm)) {
Andrew Walbran7586e042020-02-18 18:19:26 +0000147 dlog_error("Unable to initialise primary VM.\n");
David Brazdile6f83222019-09-23 14:47:37 +0100148 return false;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100149 }
150
David Brazdile6f83222019-09-23 14:47:37 +0100151 if (vm->id != HF_PRIMARY_VM_ID) {
Andrew Walbran7586e042020-02-18 18:19:26 +0000152 dlog_error("Primary VM was not given correct ID.\n");
David Brazdile6f83222019-09-23 14:47:37 +0100153 return false;
154 }
155
Andrew Scull3c257452019-11-26 13:32:50 +0000156 vm_locked = vm_lock(vm);
157
Andrew Scullae9962e2019-10-03 16:51:16 +0100158 if (!load_common(manifest_vm, vm)) {
Andrew Scull3c257452019-11-26 13:32:50 +0000159 ret = false;
160 goto out;
Andrew Scullae9962e2019-10-03 16:51:16 +0100161 }
162
Andrew Scull48929fd2020-01-28 10:39:10 +0000163 if (params->device_mem_ranges_count == 0) {
164 /*
165 * Map 1TB of address space as device memory to, most likely,
166 * make all devices available to the primary VM.
167 *
168 * TODO: remove this once all targets provide valid ranges.
169 */
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800170 dlog_warning(
171 "Device memory not provided, defaulting to 1 TB.\n");
Andrew Scull48929fd2020-01-28 10:39:10 +0000172
173 if (!vm_identity_map(
174 vm_locked, pa_init(0),
175 pa_init(UINT64_C(1024) * 1024 * 1024 * 1024),
176 MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool, NULL)) {
177 dlog_error(
178 "Unable to initialise address space for "
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800179 "primary VM.\n");
Andrew Scull48929fd2020-01-28 10:39:10 +0000180 ret = false;
181 goto out;
182 }
David Brazdile6f83222019-09-23 14:47:37 +0100183 }
184
Andrew Scullb5f49e02019-10-02 13:20:47 +0100185 /* Map normal memory as such to permit caching, execution, etc. */
186 for (i = 0; i < params->mem_ranges_count; ++i) {
Andrew Scull3c257452019-11-26 13:32:50 +0000187 if (!vm_identity_map(vm_locked, params->mem_ranges[i].begin,
188 params->mem_ranges[i].end,
189 MM_MODE_R | MM_MODE_W | MM_MODE_X, ppool,
190 NULL)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000191 dlog_error(
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800192 "Unable to initialise memory for primary "
193 "VM.\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000194 ret = false;
195 goto out;
Andrew Scullb5f49e02019-10-02 13:20:47 +0100196 }
197 }
198
Andrew Scull48929fd2020-01-28 10:39:10 +0000199 /* Map device memory as such to prevent execution, speculation etc. */
200 for (i = 0; i < params->device_mem_ranges_count; ++i) {
201 if (!vm_identity_map(
202 vm_locked, params->device_mem_ranges[i].begin,
203 params->device_mem_ranges[i].end,
204 MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool, NULL)) {
205 dlog("Unable to initialise device memory for primary "
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800206 "VM.\n");
Andrew Scull48929fd2020-01-28 10:39:10 +0000207 ret = false;
208 goto out;
209 }
210 }
211
Andrew Scull3c257452019-11-26 13:32:50 +0000212 if (!vm_unmap_hypervisor(vm_locked, ppool)) {
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800213 dlog_error("Unable to unmap hypervisor from primary VM.\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000214 ret = false;
215 goto out;
David Brazdile6f83222019-09-23 14:47:37 +0100216 }
217
Andrew Scullb1a6d0d2020-01-29 11:25:12 +0000218 if (!plat_iommu_unmap_iommus(vm_locked, ppool)) {
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800219 dlog_error("Unable to unmap IOMMUs from primary VM.\n");
Andrew Scullb1a6d0d2020-01-29 11:25:12 +0000220 ret = false;
221 goto out;
222 }
223
Andrew Walbran7586e042020-02-18 18:19:26 +0000224 dlog_info("Loaded primary VM with %u vCPUs, entry at %#x.\n",
225 vm->vcpu_count, pa_addr(primary_begin));
226
David Brazdile6f83222019-09-23 14:47:37 +0100227 vcpu_locked = vcpu_lock(vm_get_vcpu(vm, 0));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100228 vcpu_on(vcpu_locked, primary_entry, params->kernel_arg);
David Brazdile6f83222019-09-23 14:47:37 +0100229 vcpu_unlock(&vcpu_locked);
Andrew Scull3c257452019-11-26 13:32:50 +0000230 ret = true;
David Brazdile6f83222019-09-23 14:47:37 +0100231
Andrew Scull3c257452019-11-26 13:32:50 +0000232out:
233 vm_unlock(&vm_locked);
234
235 return ret;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100236}
237
Andrew Scull72b43c02019-09-18 13:53:45 +0100238/*
239 * Loads a secondary VM.
240 */
241static bool load_secondary(struct mm_stage1_locked stage1_locked,
242 paddr_t mem_begin, paddr_t mem_end,
243 const struct manifest_vm *manifest_vm,
244 const struct memiter *cpio, struct mpool *ppool)
245{
246 struct vm *vm;
Andrew Scull3c257452019-11-26 13:32:50 +0000247 struct vm_locked vm_locked;
Andrew Scull72b43c02019-09-18 13:53:45 +0100248 struct vcpu *vcpu;
249 ipaddr_t secondary_entry;
Andrew Scull3c257452019-11-26 13:32:50 +0000250 bool ret;
Andrew Scull72b43c02019-09-18 13:53:45 +0100251
Olivier Deprez62d99e32020-01-09 15:58:07 +0100252 /*
253 * Load the kernel if a filename is specified in the VM manifest.
254 * For an FF-A partition, kernel_filename is undefined indicating
255 * the partition package has already been loaded prior to Hafnium
256 * booting.
257 */
258 if (!string_is_empty(&manifest_vm->kernel_filename)) {
259 if (!load_kernel(stage1_locked, mem_begin, mem_end, manifest_vm,
260 cpio, ppool)) {
261 dlog_error("Unable to load kernel.\n");
262 return false;
263 }
Andrew Scull72b43c02019-09-18 13:53:45 +0100264 }
265
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100266 if (!vm_init_next(manifest_vm->secondary.vcpu_count, ppool, &vm)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000267 dlog_error("Unable to initialise VM.\n");
Andrew Scull72b43c02019-09-18 13:53:45 +0100268 return false;
269 }
270
Andrew Scullae9962e2019-10-03 16:51:16 +0100271 if (!load_common(manifest_vm, vm)) {
272 return false;
273 }
274
Andrew Scull3c257452019-11-26 13:32:50 +0000275 vm_locked = vm_lock(vm);
276
Andrew Scull72b43c02019-09-18 13:53:45 +0100277 /* Grant the VM access to the memory. */
Andrew Scull3c257452019-11-26 13:32:50 +0000278 if (!vm_identity_map(vm_locked, mem_begin, mem_end,
279 MM_MODE_R | MM_MODE_W | MM_MODE_X, ppool,
280 &secondary_entry)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000281 dlog_error("Unable to initialise memory.\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000282 ret = false;
283 goto out;
Andrew Scull72b43c02019-09-18 13:53:45 +0100284 }
285
Andrew Walbran17eebf92020-02-05 16:35:49 +0000286 dlog_info("Loaded with %u vCPUs, entry at %#x.\n",
287 manifest_vm->secondary.vcpu_count, pa_addr(mem_begin));
Andrew Scull72b43c02019-09-18 13:53:45 +0100288
Olivier Deprez62d99e32020-01-09 15:58:07 +0100289 if (manifest_vm->is_ffa_partition) {
290 secondary_entry =
291 ipa_add(secondary_entry, manifest_vm->sp.ep_offset);
292 }
293
Andrew Scull72b43c02019-09-18 13:53:45 +0100294 vcpu = vm_get_vcpu(vm, 0);
295 vcpu_secondary_reset_and_start(vcpu, secondary_entry,
296 pa_difference(mem_begin, mem_end));
Andrew Scull3c257452019-11-26 13:32:50 +0000297 ret = true;
Andrew Scull72b43c02019-09-18 13:53:45 +0100298
Andrew Scull3c257452019-11-26 13:32:50 +0000299out:
300 vm_unlock(&vm_locked);
301
302 return ret;
Andrew Scull72b43c02019-09-18 13:53:45 +0100303}
304
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100305/**
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100306 * Try to find a memory range of the given size within the given ranges, and
307 * remove it from them. Return true on success, or false if no large enough
308 * contiguous range is found.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100309 */
Hong-Seok Kim09648362019-05-23 15:47:11 +0900310static bool carve_out_mem_range(struct mem_range *mem_ranges,
311 size_t mem_ranges_count, uint64_t size_to_find,
312 paddr_t *found_begin, paddr_t *found_end)
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100313{
314 size_t i;
315
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000316 /*
317 * TODO(b/116191358): Consider being cleverer about how we pack VMs
318 * together, with a non-greedy algorithm.
319 */
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100320 for (i = 0; i < mem_ranges_count; ++i) {
321 if (size_to_find <=
Andrew Walbran2cb43392019-04-17 12:52:45 +0100322 pa_difference(mem_ranges[i].begin, mem_ranges[i].end)) {
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +0100323 /*
324 * This range is big enough, take some of it from the
325 * end and reduce its size accordingly.
326 */
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100327 *found_end = mem_ranges[i].end;
328 *found_begin = pa_init(pa_addr(mem_ranges[i].end) -
329 size_to_find);
330 mem_ranges[i].end = *found_begin;
331 return true;
332 }
333 }
334 return false;
335}
336
337/**
338 * Given arrays of memory ranges before and after memory was removed for
339 * secondary VMs, add the difference to the reserved ranges of the given update.
340 * Return true on success, or false if there would be more than MAX_MEM_RANGES
341 * reserved ranges after adding the new ones.
342 * `before` and `after` must be arrays of exactly `mem_ranges_count` elements.
343 */
Hong-Seok Kim09648362019-05-23 15:47:11 +0900344static bool update_reserved_ranges(struct boot_params_update *update,
345 const struct mem_range *before,
346 const struct mem_range *after,
347 size_t mem_ranges_count)
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100348{
349 size_t i;
350
351 for (i = 0; i < mem_ranges_count; ++i) {
352 if (pa_addr(after[i].begin) > pa_addr(before[i].begin)) {
353 if (update->reserved_ranges_count >= MAX_MEM_RANGES) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000354 dlog_error(
355 "Too many reserved ranges after "
356 "loading secondary VMs.\n");
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100357 return false;
358 }
359 update->reserved_ranges[update->reserved_ranges_count]
360 .begin = before[i].begin;
361 update->reserved_ranges[update->reserved_ranges_count]
362 .end = after[i].begin;
363 update->reserved_ranges_count++;
364 }
365 if (pa_addr(after[i].end) < pa_addr(before[i].end)) {
366 if (update->reserved_ranges_count >= MAX_MEM_RANGES) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000367 dlog_error(
368 "Too many reserved ranges after "
369 "loading secondary VMs.\n");
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100370 return false;
371 }
372 update->reserved_ranges[update->reserved_ranges_count]
373 .begin = after[i].end;
374 update->reserved_ranges[update->reserved_ranges_count]
375 .end = before[i].end;
376 update->reserved_ranges_count++;
377 }
378 }
379
380 return true;
381}
382
Andrew Scull72b43c02019-09-18 13:53:45 +0100383/*
384 * Loads alls VMs from the manifest.
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100385 */
Andrew Scull72b43c02019-09-18 13:53:45 +0100386bool load_vms(struct mm_stage1_locked stage1_locked,
387 const struct manifest *manifest, const struct memiter *cpio,
388 const struct boot_params *params,
389 struct boot_params_update *update, struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100390{
Andrew Scull19503262018-09-20 14:48:39 +0100391 struct vm *primary;
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100392 struct vm *tee;
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100393 struct mem_range mem_ranges_available[MAX_MEM_RANGES];
Andrew Scull3c257452019-11-26 13:32:50 +0000394 struct vm_locked primary_vm_locked;
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100395 size_t i;
Andrew Scull3c257452019-11-26 13:32:50 +0000396 bool success = true;
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100397
Andrew Scullae9962e2019-10-03 16:51:16 +0100398 if (!load_primary(stage1_locked, &manifest->vm[HF_PRIMARY_VM_INDEX],
399 cpio, params, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000400 dlog_error("Unable to load primary VM.\n");
Andrew Scull72b43c02019-09-18 13:53:45 +0100401 return false;
402 }
403
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100404 /*
405 * Initialise the dummy VM which represents TrustZone, and set up its
406 * RX/TX buffers.
407 */
408 tee = vm_init(HF_TEE_VM_ID, 0, ppool);
409 CHECK(tee != NULL);
410 tee->mailbox.send = &tee_send_buffer;
411 tee->mailbox.recv = &tee_recv_buffer;
412
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100413 static_assert(
414 sizeof(mem_ranges_available) == sizeof(params->mem_ranges),
415 "mem_range arrays must be the same size for memcpy.");
416 static_assert(sizeof(mem_ranges_available) < 500,
417 "This will use too much stack, either make "
418 "MAX_MEM_RANGES smaller or change this.");
Andrew Sculla1aa2ba2019-04-05 11:49:02 +0100419 memcpy_s(mem_ranges_available, sizeof(mem_ranges_available),
420 params->mem_ranges, sizeof(params->mem_ranges));
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100421
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100422 /* Round the last addresses down to the page size. */
423 for (i = 0; i < params->mem_ranges_count; ++i) {
Alfredo Mazzinghieb1997c2019-02-07 18:00:01 +0000424 mem_ranges_available[i].end = pa_init(align_down(
425 pa_addr(mem_ranges_available[i].end), PAGE_SIZE));
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100426 }
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100427
Andrew Scull3c257452019-11-26 13:32:50 +0000428 primary = vm_find(HF_PRIMARY_VM_ID);
429 primary_vm_locked = vm_lock(primary);
430
David Brazdil0251b942019-09-10 15:59:50 +0100431 for (i = 0; i < manifest->vm_count; ++i) {
David Brazdil0dbb41f2019-09-09 18:03:35 +0100432 const struct manifest_vm *manifest_vm = &manifest->vm[i];
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100433 ffa_vm_id_t vm_id = HF_VM_ID_OFFSET + i;
David Brazdil7a462ec2019-08-15 12:27:47 +0100434 uint64_t mem_size;
Andrew Scull80871322018-08-06 12:04:09 +0100435 paddr_t secondary_mem_begin;
436 paddr_t secondary_mem_end;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100437
David Brazdil7a462ec2019-08-15 12:27:47 +0100438 if (vm_id == HF_PRIMARY_VM_ID) {
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100439 continue;
440 }
441
Andrew Walbran17eebf92020-02-05 16:35:49 +0000442 dlog_info("Loading VM%d: %s.\n", (int)vm_id,
443 manifest_vm->debug_name);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100444
David Brazdil7a462ec2019-08-15 12:27:47 +0100445 mem_size = align_up(manifest_vm->secondary.mem_size, PAGE_SIZE);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100446
447 if (manifest_vm->is_ffa_partition) {
448 secondary_mem_begin =
449 pa_init(manifest_vm->sp.load_addr);
450 secondary_mem_end =
451 pa_init(manifest_vm->sp.load_addr + mem_size);
452 } else if (!carve_out_mem_range(mem_ranges_available,
453 params->mem_ranges_count,
454 mem_size, &secondary_mem_begin,
455 &secondary_mem_end)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000456 dlog_error("Not enough memory (%u bytes).\n", mem_size);
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100457 continue;
458 }
Andrew Scull80871322018-08-06 12:04:09 +0100459
Andrew Scull72b43c02019-09-18 13:53:45 +0100460 if (!load_secondary(stage1_locked, secondary_mem_begin,
461 secondary_mem_end, manifest_vm, cpio,
462 ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000463 dlog_error("Unable to load VM.\n");
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100464 continue;
465 }
466
467 /* Deny the primary VM access to this memory. */
Andrew Scull3c257452019-11-26 13:32:50 +0000468 if (!vm_unmap(primary_vm_locked, secondary_mem_begin,
469 secondary_mem_end, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000470 dlog_error(
471 "Unable to unmap secondary VM from primary "
472 "VM.\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000473 success = false;
474 break;
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100475 }
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100476 }
477
Andrew Scull3c257452019-11-26 13:32:50 +0000478 vm_unlock(&primary_vm_locked);
479
480 if (!success) {
481 return false;
482 }
483
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +0100484 /*
485 * Add newly reserved areas to update params by looking at the
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100486 * difference between the available ranges from the original params and
487 * the updated mem_ranges_available. We assume that the number and order
488 * of available ranges is the same, i.e. we don't remove any ranges
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +0100489 * above only make them smaller.
490 */
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100491 return update_reserved_ranges(update, params->mem_ranges,
492 mem_ranges_available,
493 params->mem_ranges_count);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100494}