blob: 80463d0d87f0ef4c00fbd79edc8379b3641a1280 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/load.h"
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010010
11#include <stdbool.h>
12
Olivier Deprez112d2b52020-09-30 07:39:23 +020013#include "hf/arch/other_world.h"
Fuad Tabba77a4b012019-11-15 12:13:08 +000014#include "hf/arch/vm.h"
15
Andrew Scull18c78fc2018-08-20 12:57:41 +010016#include "hf/api.h"
Andrew Walbran34ce72e2018-09-13 16:47:44 +010017#include "hf/boot_params.h"
Andrew Scull72b43c02019-09-18 13:53:45 +010018#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010019#include "hf/dlog.h"
Fuad Tabba50469e02020-06-30 15:14:28 +010020#include "hf/fdt_patch.h"
Andrew Scull5991ec92018-10-08 14:55:02 +010021#include "hf/layout.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010022#include "hf/memiter.h"
23#include "hf/mm.h"
Andrew Walbran48699362019-05-20 14:38:00 +010024#include "hf/plat/console.h"
Andrew Scullb1a6d0d2020-01-29 11:25:12 +000025#include "hf/plat/iommu.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010026#include "hf/static_assert.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010027#include "hf/std.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010028#include "hf/vm.h"
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010029
Andrew Scull19503262018-09-20 14:48:39 +010030#include "vmapi/hf/call.h"
Manish Pandeyd34f8892020-06-19 17:41:07 +010031#include "vmapi/hf/ffa.h"
Andrew Scull19503262018-09-20 14:48:39 +010032
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010033/**
34 * Copies data to an unmapped location by mapping it for write, copying the
35 * data, then unmapping it.
Andrew Sculld9225b32018-11-19 16:12:41 +000036 *
37 * The data is written so that it is available to all cores with the cache
38 * disabled. When switching to the partitions, the caching is initially disabled
39 * so the data must be available without the cache.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010040 */
Andrew Scull3c0a90a2019-07-01 11:55:53 +010041static bool copy_to_unmapped(struct mm_stage1_locked stage1_locked, paddr_t to,
David Brazdil7a462ec2019-08-15 12:27:47 +010042 struct memiter *from_it, struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010043{
David Brazdil7a462ec2019-08-15 12:27:47 +010044 const void *from = memiter_base(from_it);
45 size_t size = memiter_size(from_it);
Andrew Scull80871322018-08-06 12:04:09 +010046 paddr_t to_end = pa_add(to, size);
47 void *ptr;
Andrew Scull265ada92018-07-30 15:19:01 +010048
Andrew Scull3c0a90a2019-07-01 11:55:53 +010049 ptr = mm_identity_map(stage1_locked, to, to_end, MM_MODE_W, ppool);
Andrew Scull80871322018-08-06 12:04:09 +010050 if (!ptr) {
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010051 return false;
52 }
53
Andrew Sculla1aa2ba2019-04-05 11:49:02 +010054 memcpy_s(ptr, size, from, size);
Andrew Scullc059fbe2019-09-12 12:58:40 +010055 arch_mm_flush_dcache(ptr, size);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010056
Andrew Scull72b43c02019-09-18 13:53:45 +010057 CHECK(mm_unmap(stage1_locked, to, to_end, ppool));
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010058
59 return true;
60}
61
Fuad Tabba50469e02020-06-30 15:14:28 +010062/**
63 * Loads the secondary VM's kernel.
64 * Stores the kernel size in kernel_size (if kernel_size is not NULL).
65 * Returns false if it cannot load the kernel.
66 */
Andrew Scull72b43c02019-09-18 13:53:45 +010067static bool load_kernel(struct mm_stage1_locked stage1_locked, paddr_t begin,
68 paddr_t end, const struct manifest_vm *manifest_vm,
Fuad Tabba50469e02020-06-30 15:14:28 +010069 const struct memiter *cpio, struct mpool *ppool,
70 size_t *kernel_size)
Andrew Scull72b43c02019-09-18 13:53:45 +010071{
Andrew Scull72b43c02019-09-18 13:53:45 +010072 struct memiter kernel;
Fuad Tabba50469e02020-06-30 15:14:28 +010073 size_t size;
Andrew Scull72b43c02019-09-18 13:53:45 +010074
David Brazdil136f2942019-09-23 14:11:03 +010075 if (!cpio_get_file(cpio, &manifest_vm->kernel_filename, &kernel)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +000076 dlog_error("Could not find kernel file \"%s\".\n",
77 string_data(&manifest_vm->kernel_filename));
Andrew Scull72b43c02019-09-18 13:53:45 +010078 return false;
79 }
80
Fuad Tabba50469e02020-06-30 15:14:28 +010081 size = memiter_size(&kernel);
82 if (pa_difference(begin, end) < size) {
Andrew Walbran17eebf92020-02-05 16:35:49 +000083 dlog_error("Kernel is larger than available memory.\n");
Andrew Scull72b43c02019-09-18 13:53:45 +010084 return false;
85 }
86
87 if (!copy_to_unmapped(stage1_locked, begin, &kernel, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +000088 dlog_error("Unable to copy kernel.\n");
Andrew Scull72b43c02019-09-18 13:53:45 +010089 return false;
90 }
91
Fuad Tabba50469e02020-06-30 15:14:28 +010092 if (kernel_size) {
93 *kernel_size = size;
94 }
95
Andrew Scull72b43c02019-09-18 13:53:45 +010096 return true;
97}
98
Manish Pandeyd34f8892020-06-19 17:41:07 +010099/*
100 * Link RX/TX buffers provided in partition manifest to mailbox
101 */
102static bool link_rxtx_to_mailbox(struct mm_stage1_locked stage1_locked,
103 struct vm_locked vm_locked, struct rx_tx rxtx,
104 struct mpool *ppool)
105{
106 struct ffa_value ret;
107 ipaddr_t send;
108 ipaddr_t recv;
109 uint32_t page_count;
110
111 send = ipa_init(rxtx.tx_buffer->base_address);
112 recv = ipa_init(rxtx.rx_buffer->base_address);
113 page_count = rxtx.tx_buffer->page_count;
114
115 ret = api_vm_configure_pages(stage1_locked, vm_locked, send, recv,
116 page_count, ppool);
117 if (ret.func != FFA_SUCCESS_32) {
118 return false;
119 }
120
121 dlog_verbose(" mailbox: send = %#x, recv = %#x\n",
122 vm_locked.vm->mailbox.send, vm_locked.vm->mailbox.recv);
123
124 return true;
125}
126
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100127/**
Andrew Scullae9962e2019-10-03 16:51:16 +0100128 * Performs VM loading activities that are common between the primary and
129 * secondaries.
130 */
Manish Pandeyd34f8892020-06-19 17:41:07 +0100131static bool load_common(struct mm_stage1_locked stage1_locked,
132 struct vm_locked vm_locked,
133 const struct manifest_vm *manifest_vm,
134 struct mpool *ppool)
Andrew Scullae9962e2019-10-03 16:51:16 +0100135{
Manish Pandeyd34f8892020-06-19 17:41:07 +0100136 vm_locked.vm->smc_whitelist = manifest_vm->smc_whitelist;
137 vm_locked.vm->uuid = manifest_vm->sp.uuid;
Andrew Scullae9962e2019-10-03 16:51:16 +0100138
Manish Pandeyd34f8892020-06-19 17:41:07 +0100139 if (manifest_vm->is_ffa_partition) {
140 /* Link rxtx buffers to mailbox */
141 if (manifest_vm->sp.rxtx.available) {
142 if (!link_rxtx_to_mailbox(stage1_locked, vm_locked,
143 manifest_vm->sp.rxtx,
144 ppool)) {
145 dlog_error(
146 "Unable to Link RX/TX buffer with "
147 "mailbox.\n");
148 return false;
149 }
150 }
J-Alvesb37fd082020-10-22 12:29:21 +0100151
Manish Pandeyf3be6392020-09-24 17:26:09 +0100152 if (manifest_vm->sp.messaging_method ==
153 DIRECT_MESSAGING_MANAGED_EXIT ||
154 manifest_vm->sp.messaging_method ==
155 BOTH_MESSAGING_MANAGED_EXIT) {
156 vm_locked.vm->supports_managed_exit = true;
157 }
158
J-Alvesb37fd082020-10-22 12:29:21 +0100159 vm_locked.vm->boot_order = manifest_vm->sp.boot_order;
160 /* Updating boot list according to boot_order */
161 vm_update_boot(vm_locked.vm);
Manish Pandeyd34f8892020-06-19 17:41:07 +0100162 }
J-Alvesb37fd082020-10-22 12:29:21 +0100163
Fuad Tabba56970712020-01-10 11:20:09 +0000164 /* Initialize architecture-specific features. */
Manish Pandeyd34f8892020-06-19 17:41:07 +0100165 arch_vm_features_set(vm_locked.vm);
Fuad Tabba77a4b012019-11-15 12:13:08 +0000166
Andrew Scullae9962e2019-10-03 16:51:16 +0100167 return true;
168}
169
170/**
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100171 * Loads the primary VM.
172 */
Andrew Scull72b43c02019-09-18 13:53:45 +0100173static bool load_primary(struct mm_stage1_locked stage1_locked,
Andrew Scullae9962e2019-10-03 16:51:16 +0100174 const struct manifest_vm *manifest_vm,
Andrew Scullb5f49e02019-10-02 13:20:47 +0100175 const struct memiter *cpio,
176 const struct boot_params *params, struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100177{
Olivier Deprez62d99e32020-01-09 15:58:07 +0100178 paddr_t primary_begin;
179 ipaddr_t primary_entry;
David Brazdile6f83222019-09-23 14:47:37 +0100180 struct vm *vm;
Andrew Scull3c257452019-11-26 13:32:50 +0000181 struct vm_locked vm_locked;
David Brazdile6f83222019-09-23 14:47:37 +0100182 struct vcpu_locked vcpu_locked;
Andrew Scullb5f49e02019-10-02 13:20:47 +0100183 size_t i;
Andrew Scull3c257452019-11-26 13:32:50 +0000184 bool ret;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100185
Olivier Deprez62d99e32020-01-09 15:58:07 +0100186 if (manifest_vm->is_ffa_partition) {
187 primary_begin = pa_init(manifest_vm->sp.load_addr);
188 primary_entry = ipa_add(ipa_from_pa(primary_begin),
189 manifest_vm->sp.ep_offset);
190 } else {
191 primary_begin =
192 (manifest_vm->primary.boot_address ==
193 MANIFEST_INVALID_ADDRESS)
194 ? layout_primary_begin()
195 : pa_init(manifest_vm->primary.boot_address);
196 primary_entry = ipa_from_pa(primary_begin);
197 }
198
David Brazdil080ee312020-02-25 15:30:30 -0800199 paddr_t primary_end = pa_add(primary_begin, RSIZE_MAX);
Andrew Scull72b43c02019-09-18 13:53:45 +0100200
Olivier Deprez62d99e32020-01-09 15:58:07 +0100201 /*
202 * Load the kernel if a filename is specified in the VM manifest.
203 * For an FF-A partition, kernel_filename is undefined indicating
204 * the partition package has already been loaded prior to Hafnium
205 * booting.
206 */
207 if (!string_is_empty(&manifest_vm->kernel_filename)) {
208 if (!load_kernel(stage1_locked, primary_begin, primary_end,
Fuad Tabba50469e02020-06-30 15:14:28 +0100209 manifest_vm, cpio, ppool, NULL)) {
Olivier Deprez62d99e32020-01-09 15:58:07 +0100210 dlog_error("Unable to load primary kernel.\n");
211 return false;
212 }
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100213 }
214
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100215 if (!vm_init_next(MAX_CPUS, ppool, &vm)) {
Andrew Walbran7586e042020-02-18 18:19:26 +0000216 dlog_error("Unable to initialise primary VM.\n");
David Brazdile6f83222019-09-23 14:47:37 +0100217 return false;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100218 }
219
David Brazdile6f83222019-09-23 14:47:37 +0100220 if (vm->id != HF_PRIMARY_VM_ID) {
Andrew Walbran7586e042020-02-18 18:19:26 +0000221 dlog_error("Primary VM was not given correct ID.\n");
David Brazdile6f83222019-09-23 14:47:37 +0100222 return false;
223 }
224
Andrew Scull3c257452019-11-26 13:32:50 +0000225 vm_locked = vm_lock(vm);
226
Andrew Scull48929fd2020-01-28 10:39:10 +0000227 if (params->device_mem_ranges_count == 0) {
228 /*
229 * Map 1TB of address space as device memory to, most likely,
230 * make all devices available to the primary VM.
231 *
232 * TODO: remove this once all targets provide valid ranges.
233 */
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800234 dlog_warning(
235 "Device memory not provided, defaulting to 1 TB.\n");
Andrew Scull48929fd2020-01-28 10:39:10 +0000236
237 if (!vm_identity_map(
238 vm_locked, pa_init(0),
239 pa_init(UINT64_C(1024) * 1024 * 1024 * 1024),
240 MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool, NULL)) {
241 dlog_error(
242 "Unable to initialise address space for "
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800243 "primary VM.\n");
Andrew Scull48929fd2020-01-28 10:39:10 +0000244 ret = false;
245 goto out;
246 }
David Brazdile6f83222019-09-23 14:47:37 +0100247 }
248
Andrew Scullb5f49e02019-10-02 13:20:47 +0100249 /* Map normal memory as such to permit caching, execution, etc. */
250 for (i = 0; i < params->mem_ranges_count; ++i) {
Andrew Scull3c257452019-11-26 13:32:50 +0000251 if (!vm_identity_map(vm_locked, params->mem_ranges[i].begin,
252 params->mem_ranges[i].end,
253 MM_MODE_R | MM_MODE_W | MM_MODE_X, ppool,
254 NULL)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000255 dlog_error(
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800256 "Unable to initialise memory for primary "
257 "VM.\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000258 ret = false;
259 goto out;
Andrew Scullb5f49e02019-10-02 13:20:47 +0100260 }
261 }
262
Andrew Scull48929fd2020-01-28 10:39:10 +0000263 /* Map device memory as such to prevent execution, speculation etc. */
264 for (i = 0; i < params->device_mem_ranges_count; ++i) {
265 if (!vm_identity_map(
266 vm_locked, params->device_mem_ranges[i].begin,
267 params->device_mem_ranges[i].end,
268 MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool, NULL)) {
269 dlog("Unable to initialise device memory for primary "
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800270 "VM.\n");
Andrew Scull48929fd2020-01-28 10:39:10 +0000271 ret = false;
272 goto out;
273 }
274 }
275
Manish Pandeyd34f8892020-06-19 17:41:07 +0100276 if (!load_common(stage1_locked, vm_locked, manifest_vm, ppool)) {
277 ret = false;
278 goto out;
279 }
280
Andrew Scull3c257452019-11-26 13:32:50 +0000281 if (!vm_unmap_hypervisor(vm_locked, ppool)) {
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800282 dlog_error("Unable to unmap hypervisor from primary VM.\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000283 ret = false;
284 goto out;
David Brazdile6f83222019-09-23 14:47:37 +0100285 }
286
Andrew Scullb1a6d0d2020-01-29 11:25:12 +0000287 if (!plat_iommu_unmap_iommus(vm_locked, ppool)) {
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800288 dlog_error("Unable to unmap IOMMUs from primary VM.\n");
Andrew Scullb1a6d0d2020-01-29 11:25:12 +0000289 ret = false;
290 goto out;
291 }
292
Andrew Walbran7586e042020-02-18 18:19:26 +0000293 dlog_info("Loaded primary VM with %u vCPUs, entry at %#x.\n",
294 vm->vcpu_count, pa_addr(primary_begin));
295
Olivier Deprezb9adff42021-02-01 12:14:05 +0100296 /* Mark the primary to be the first booted VM */
297 vm_update_boot(vm);
298
David Brazdile6f83222019-09-23 14:47:37 +0100299 vcpu_locked = vcpu_lock(vm_get_vcpu(vm, 0));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100300 vcpu_on(vcpu_locked, primary_entry, params->kernel_arg);
David Brazdile6f83222019-09-23 14:47:37 +0100301 vcpu_unlock(&vcpu_locked);
Andrew Scull3c257452019-11-26 13:32:50 +0000302 ret = true;
David Brazdile6f83222019-09-23 14:47:37 +0100303
Andrew Scull3c257452019-11-26 13:32:50 +0000304out:
305 vm_unlock(&vm_locked);
306
307 return ret;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100308}
309
Fuad Tabba50469e02020-06-30 15:14:28 +0100310/**
311 * Loads the secondary VM's FDT.
312 * Stores the total allocated size for the FDT in fdt_allocated_size (if
313 * fdt_allocated_size is not NULL). The allocated size includes additional space
314 * for potential patching.
315 */
316static bool load_secondary_fdt(struct mm_stage1_locked stage1_locked,
317 paddr_t end, size_t fdt_max_size,
318 const struct manifest_vm *manifest_vm,
319 const struct memiter *cpio, struct mpool *ppool,
320 paddr_t *fdt_addr, size_t *fdt_allocated_size)
321{
322 struct memiter fdt;
323 size_t allocated_size;
324
325 CHECK(!string_is_empty(&manifest_vm->secondary.fdt_filename));
326
327 if (!cpio_get_file(cpio, &manifest_vm->secondary.fdt_filename, &fdt)) {
328 dlog_error("Cannot open the secondary VM's FDT.\n");
329 return false;
330 }
331
332 /*
333 * Ensure the FDT has one additional page at the end for patching, and
334 * and align it to the page boundary.
335 */
336 allocated_size = align_up(memiter_size(&fdt), PAGE_SIZE) + PAGE_SIZE;
337
338 if (allocated_size > fdt_max_size) {
339 dlog_error(
340 "FDT allocated space (%u) is more than the specified "
341 "maximum to use (%u).\n",
342 allocated_size, fdt_max_size);
343 return false;
344 }
345
346 /* Load the FDT to the end of the VM's allocated memory space. */
347 *fdt_addr = pa_init(pa_addr(pa_sub(end, allocated_size)));
348
349 dlog_info("Loading secondary FDT of allocated size %u at 0x%x.\n",
350 allocated_size, pa_addr(*fdt_addr));
351
352 if (!copy_to_unmapped(stage1_locked, *fdt_addr, &fdt, ppool)) {
353 dlog_error("Unable to copy FDT.\n");
354 return false;
355 }
356
357 if (fdt_allocated_size) {
358 *fdt_allocated_size = allocated_size;
359 }
360
361 return true;
362}
363
Andrew Scull72b43c02019-09-18 13:53:45 +0100364/*
365 * Loads a secondary VM.
366 */
367static bool load_secondary(struct mm_stage1_locked stage1_locked,
Manish Pandey2145c212020-05-01 16:04:22 +0100368 struct vm_locked primary_vm_locked,
Andrew Scull72b43c02019-09-18 13:53:45 +0100369 paddr_t mem_begin, paddr_t mem_end,
370 const struct manifest_vm *manifest_vm,
371 const struct memiter *cpio, struct mpool *ppool)
372{
373 struct vm *vm;
Andrew Scull3c257452019-11-26 13:32:50 +0000374 struct vm_locked vm_locked;
Max Shvetsov40108e72020-08-27 12:39:50 +0100375 struct vcpu_locked vcpu_locked;
Andrew Scull72b43c02019-09-18 13:53:45 +0100376 struct vcpu *vcpu;
377 ipaddr_t secondary_entry;
Andrew Scull3c257452019-11-26 13:32:50 +0000378 bool ret;
Fuad Tabba50469e02020-06-30 15:14:28 +0100379 paddr_t fdt_addr;
380 bool has_fdt;
381 size_t kernel_size = 0;
382 const size_t mem_size = pa_difference(mem_begin, mem_end);
Andrew Scull72b43c02019-09-18 13:53:45 +0100383
Olivier Deprez62d99e32020-01-09 15:58:07 +0100384 /*
385 * Load the kernel if a filename is specified in the VM manifest.
386 * For an FF-A partition, kernel_filename is undefined indicating
387 * the partition package has already been loaded prior to Hafnium
388 * booting.
389 */
390 if (!string_is_empty(&manifest_vm->kernel_filename)) {
391 if (!load_kernel(stage1_locked, mem_begin, mem_end, manifest_vm,
Fuad Tabba50469e02020-06-30 15:14:28 +0100392 cpio, ppool, &kernel_size)) {
Olivier Deprez62d99e32020-01-09 15:58:07 +0100393 dlog_error("Unable to load kernel.\n");
394 return false;
395 }
Andrew Scull72b43c02019-09-18 13:53:45 +0100396 }
397
Fuad Tabba50469e02020-06-30 15:14:28 +0100398 has_fdt = !string_is_empty(&manifest_vm->secondary.fdt_filename);
399 if (has_fdt) {
400 /*
401 * Ensure that the FDT does not overwrite the kernel or overlap
402 * its page, for the FDT to start at a page boundary.
403 */
404 const size_t fdt_max_size =
405 mem_size - align_up(kernel_size, PAGE_SIZE);
406
407 size_t fdt_allocated_size;
408
409 if (!load_secondary_fdt(stage1_locked, mem_end, fdt_max_size,
410 manifest_vm, cpio, ppool, &fdt_addr,
411 &fdt_allocated_size)) {
412 dlog_error("Unable to load FDT.\n");
413 return false;
414 }
415
416 if (!fdt_patch_mem(stage1_locked, fdt_addr, fdt_allocated_size,
417 mem_begin, mem_end, ppool)) {
418 dlog_error("Unable to patch FDT.\n");
419 return false;
420 }
421 }
422
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100423 if (!vm_init_next(manifest_vm->secondary.vcpu_count, ppool, &vm)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000424 dlog_error("Unable to initialise VM.\n");
Andrew Scull72b43c02019-09-18 13:53:45 +0100425 return false;
426 }
427
Andrew Scull3c257452019-11-26 13:32:50 +0000428 vm_locked = vm_lock(vm);
429
Andrew Scull72b43c02019-09-18 13:53:45 +0100430 /* Grant the VM access to the memory. */
Andrew Scull3c257452019-11-26 13:32:50 +0000431 if (!vm_identity_map(vm_locked, mem_begin, mem_end,
432 MM_MODE_R | MM_MODE_W | MM_MODE_X, ppool,
433 &secondary_entry)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000434 dlog_error("Unable to initialise memory.\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000435 ret = false;
436 goto out;
Andrew Scull72b43c02019-09-18 13:53:45 +0100437 }
438
Olivier Deprez62d99e32020-01-09 15:58:07 +0100439 if (manifest_vm->is_ffa_partition) {
Manish Pandey2145c212020-05-01 16:04:22 +0100440 int j = 0;
441 paddr_t region_begin;
442 paddr_t region_end;
443 paddr_t alloc_base = mem_end;
444 size_t size;
445 size_t total_alloc = 0;
446
447 /* Map memory-regions */
448 while (j < manifest_vm->sp.mem_region_count) {
449 size = manifest_vm->sp.mem_regions[j].page_count *
450 PAGE_SIZE;
451 /*
452 * For memory-regions without base-address, memory
453 * should be allocated inside partition's page table.
454 * Start allocating memory regions in partition's
455 * page table, starting from the end.
456 * TODO: Add mechanism to let partition know of these
457 * memory regions
458 */
459 if (manifest_vm->sp.mem_regions[j].base_address ==
460 MANIFEST_INVALID_ADDRESS) {
461 total_alloc += size;
462 /* Don't go beyond half the VM's memory space */
463 if (total_alloc >
464 (manifest_vm->secondary.mem_size / 2)) {
465 dlog_error(
466 "Not enough space for memory-"
467 "region allocation");
468 ret = false;
469 goto out;
470 }
471
472 region_end = alloc_base;
473 region_begin = pa_subtract(alloc_base, size);
474 alloc_base = region_begin;
475
476 if (!vm_identity_map(
477 vm_locked, region_begin, region_end,
478 manifest_vm->sp.mem_regions[j]
479 .attributes,
480 ppool, NULL)) {
481 dlog_error(
482 "Unable to map secondary VM "
483 "memory-region.\n");
484 ret = false;
485 goto out;
486 }
487
488 dlog_info(
489 " Memory region %#x - %#x allocated\n",
490 region_begin, region_end);
491 } else {
492 /*
493 * Identity map memory region for both case,
494 * VA(S-EL0) or IPA(S-EL1).
495 */
496 region_begin =
497 pa_init(manifest_vm->sp.mem_regions[j]
498 .base_address);
499 region_end = pa_add(region_begin, size);
500
501 if (!vm_identity_map(
502 vm_locked, region_begin, region_end,
503 manifest_vm->sp.mem_regions[j]
504 .attributes,
505 ppool, NULL)) {
506 dlog_error(
507 "Unable to map secondary VM "
508 "memory-region.\n");
509 ret = false;
510 goto out;
511 }
512 }
513
514 /* Deny the primary VM access to this memory */
515 if (!vm_unmap(primary_vm_locked, region_begin,
516 region_end, ppool)) {
517 dlog_error(
518 "Unable to unmap secondary VM memory-"
519 "region from primary VM.\n");
520 ret = false;
521 goto out;
522 }
523
524 j++;
525 }
526
527 /* Map device-regions */
528 j = 0;
529 while (j < manifest_vm->sp.dev_region_count) {
530 region_begin = pa_init(
531 manifest_vm->sp.dev_regions[j].base_address);
532 size = manifest_vm->sp.dev_regions[j].page_count *
533 PAGE_SIZE;
534 region_end = pa_add(region_begin, size);
535
536 if (!vm_identity_map(
537 vm_locked, region_begin, region_end,
538 manifest_vm->sp.dev_regions[j].attributes,
539 ppool, NULL)) {
540 dlog_error(
541 "Unable to map secondary VM "
542 "device-region.\n");
543 ret = false;
544 goto out;
545 }
546 /* Deny primary VM access to this region */
547 if (!vm_unmap(primary_vm_locked, region_begin,
548 region_end, ppool)) {
549 dlog_error(
550 "Unable to unmap secondary VM device-"
551 "region from primary VM.\n");
552 ret = false;
553 goto out;
554 }
555 j++;
556 }
557
Olivier Deprez62d99e32020-01-09 15:58:07 +0100558 secondary_entry =
559 ipa_add(secondary_entry, manifest_vm->sp.ep_offset);
560 }
561
Manish Pandeyd34f8892020-06-19 17:41:07 +0100562 if (!load_common(stage1_locked, vm_locked, manifest_vm, ppool)) {
563 ret = false;
564 goto out;
565 }
566
Manish Pandey2145c212020-05-01 16:04:22 +0100567 dlog_info("Loaded with %u vCPUs, entry at %#x.\n",
568 manifest_vm->secondary.vcpu_count, pa_addr(mem_begin));
569
Andrew Scull72b43c02019-09-18 13:53:45 +0100570 vcpu = vm_get_vcpu(vm, 0);
Fuad Tabba50469e02020-06-30 15:14:28 +0100571
Max Shvetsov40108e72020-08-27 12:39:50 +0100572 vcpu_locked = vcpu_lock(vcpu);
Fuad Tabba50469e02020-06-30 15:14:28 +0100573 if (has_fdt) {
Max Shvetsov40108e72020-08-27 12:39:50 +0100574 vcpu_secondary_reset_and_start(vcpu_locked, secondary_entry,
Fuad Tabba50469e02020-06-30 15:14:28 +0100575 pa_addr(fdt_addr));
576 } else {
577 /*
578 * Without an FDT, secondary VMs expect the memory size to be
579 * passed in register x0, which is what
580 * vcpu_secondary_reset_and_start does in this case.
581 */
Max Shvetsov40108e72020-08-27 12:39:50 +0100582 vcpu_secondary_reset_and_start(vcpu_locked, secondary_entry,
583 mem_size);
Fuad Tabba50469e02020-06-30 15:14:28 +0100584 }
585
Max Shvetsov40108e72020-08-27 12:39:50 +0100586 vcpu_unlock(&vcpu_locked);
587
Andrew Scull3c257452019-11-26 13:32:50 +0000588 ret = true;
Andrew Scull72b43c02019-09-18 13:53:45 +0100589
Andrew Scull3c257452019-11-26 13:32:50 +0000590out:
591 vm_unlock(&vm_locked);
592
593 return ret;
Andrew Scull72b43c02019-09-18 13:53:45 +0100594}
595
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100596/**
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100597 * Try to find a memory range of the given size within the given ranges, and
598 * remove it from them. Return true on success, or false if no large enough
599 * contiguous range is found.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100600 */
Hong-Seok Kim09648362019-05-23 15:47:11 +0900601static bool carve_out_mem_range(struct mem_range *mem_ranges,
602 size_t mem_ranges_count, uint64_t size_to_find,
603 paddr_t *found_begin, paddr_t *found_end)
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100604{
605 size_t i;
606
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000607 /*
608 * TODO(b/116191358): Consider being cleverer about how we pack VMs
609 * together, with a non-greedy algorithm.
610 */
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100611 for (i = 0; i < mem_ranges_count; ++i) {
612 if (size_to_find <=
Andrew Walbran2cb43392019-04-17 12:52:45 +0100613 pa_difference(mem_ranges[i].begin, mem_ranges[i].end)) {
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +0100614 /*
615 * This range is big enough, take some of it from the
616 * end and reduce its size accordingly.
617 */
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100618 *found_end = mem_ranges[i].end;
619 *found_begin = pa_init(pa_addr(mem_ranges[i].end) -
620 size_to_find);
621 mem_ranges[i].end = *found_begin;
622 return true;
623 }
624 }
625 return false;
626}
627
628/**
629 * Given arrays of memory ranges before and after memory was removed for
630 * secondary VMs, add the difference to the reserved ranges of the given update.
631 * Return true on success, or false if there would be more than MAX_MEM_RANGES
632 * reserved ranges after adding the new ones.
633 * `before` and `after` must be arrays of exactly `mem_ranges_count` elements.
634 */
Hong-Seok Kim09648362019-05-23 15:47:11 +0900635static bool update_reserved_ranges(struct boot_params_update *update,
636 const struct mem_range *before,
637 const struct mem_range *after,
638 size_t mem_ranges_count)
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100639{
640 size_t i;
641
642 for (i = 0; i < mem_ranges_count; ++i) {
643 if (pa_addr(after[i].begin) > pa_addr(before[i].begin)) {
644 if (update->reserved_ranges_count >= MAX_MEM_RANGES) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000645 dlog_error(
646 "Too many reserved ranges after "
647 "loading secondary VMs.\n");
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100648 return false;
649 }
650 update->reserved_ranges[update->reserved_ranges_count]
651 .begin = before[i].begin;
652 update->reserved_ranges[update->reserved_ranges_count]
653 .end = after[i].begin;
654 update->reserved_ranges_count++;
655 }
656 if (pa_addr(after[i].end) < pa_addr(before[i].end)) {
657 if (update->reserved_ranges_count >= MAX_MEM_RANGES) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000658 dlog_error(
659 "Too many reserved ranges after "
660 "loading secondary VMs.\n");
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100661 return false;
662 }
663 update->reserved_ranges[update->reserved_ranges_count]
664 .begin = after[i].end;
665 update->reserved_ranges[update->reserved_ranges_count]
666 .end = before[i].end;
667 update->reserved_ranges_count++;
668 }
669 }
670
671 return true;
672}
673
Olivier Deprez112d2b52020-09-30 07:39:23 +0200674static bool init_other_world_vm(struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100675{
Olivier Deprez96a2a262020-06-11 17:21:38 +0200676 struct vm *other_world_vm;
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100677 size_t i;
Andrew Scull72b43c02019-09-18 13:53:45 +0100678
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100679 /*
Olivier Deprez96a2a262020-06-11 17:21:38 +0200680 * Initialise the dummy VM which represents the opposite world:
681 * -TrustZone (or the SPMC) when running the Hypervisor
682 * -the Hypervisor when running TZ/SPMC
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100683 */
Olivier Deprez96a2a262020-06-11 17:21:38 +0200684 other_world_vm = vm_init(HF_OTHER_WORLD_ID, MAX_CPUS, ppool);
685 CHECK(other_world_vm != NULL);
686
687 for (i = 0; i < MAX_CPUS; i++) {
688 struct vcpu *vcpu = vm_get_vcpu(other_world_vm, i);
689 struct cpu *cpu = cpu_find_index(i);
690
691 vcpu->cpu = cpu;
692 }
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100693
Olivier Deprez112d2b52020-09-30 07:39:23 +0200694 return arch_other_world_vm_init(other_world_vm, ppool);
695}
696
697/*
698 * Loads alls VMs from the manifest.
699 */
700bool load_vms(struct mm_stage1_locked stage1_locked,
701 const struct manifest *manifest, const struct memiter *cpio,
702 const struct boot_params *params,
703 struct boot_params_update *update, struct mpool *ppool)
704{
705 struct vm *primary;
706 struct mem_range mem_ranges_available[MAX_MEM_RANGES];
707 struct vm_locked primary_vm_locked;
708 size_t i;
709 bool success = true;
710
Andrew Walbranf8716782020-10-29 17:07:07 +0000711 /**
712 * Only try to load the primary VM if it is supposed to be in this
713 * world.
714 */
715 if (vm_id_is_current_world(HF_PRIMARY_VM_ID)) {
716 if (!load_primary(stage1_locked,
717 &manifest->vm[HF_PRIMARY_VM_INDEX], cpio,
718 params, ppool)) {
719 dlog_error("Unable to load primary VM.\n");
720 return false;
721 }
Olivier Deprez112d2b52020-09-30 07:39:23 +0200722 }
723
724 if (!init_other_world_vm(ppool)) {
725 return false;
726 }
727
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100728 static_assert(
729 sizeof(mem_ranges_available) == sizeof(params->mem_ranges),
730 "mem_range arrays must be the same size for memcpy.");
731 static_assert(sizeof(mem_ranges_available) < 500,
732 "This will use too much stack, either make "
733 "MAX_MEM_RANGES smaller or change this.");
Andrew Sculla1aa2ba2019-04-05 11:49:02 +0100734 memcpy_s(mem_ranges_available, sizeof(mem_ranges_available),
735 params->mem_ranges, sizeof(params->mem_ranges));
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100736
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100737 /* Round the last addresses down to the page size. */
738 for (i = 0; i < params->mem_ranges_count; ++i) {
Alfredo Mazzinghieb1997c2019-02-07 18:00:01 +0000739 mem_ranges_available[i].end = pa_init(align_down(
740 pa_addr(mem_ranges_available[i].end), PAGE_SIZE));
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100741 }
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100742
Andrew Scull3c257452019-11-26 13:32:50 +0000743 primary = vm_find(HF_PRIMARY_VM_ID);
744 primary_vm_locked = vm_lock(primary);
745
David Brazdil0251b942019-09-10 15:59:50 +0100746 for (i = 0; i < manifest->vm_count; ++i) {
David Brazdil0dbb41f2019-09-09 18:03:35 +0100747 const struct manifest_vm *manifest_vm = &manifest->vm[i];
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100748 ffa_vm_id_t vm_id = HF_VM_ID_OFFSET + i;
David Brazdil7a462ec2019-08-15 12:27:47 +0100749 uint64_t mem_size;
Andrew Scull80871322018-08-06 12:04:09 +0100750 paddr_t secondary_mem_begin;
751 paddr_t secondary_mem_end;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100752
David Brazdil7a462ec2019-08-15 12:27:47 +0100753 if (vm_id == HF_PRIMARY_VM_ID) {
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100754 continue;
755 }
756
Olivier Deprez2a8ee342020-08-03 15:10:44 +0200757 dlog_info("Loading VM id %#x: %s.\n", vm_id,
Andrew Walbran17eebf92020-02-05 16:35:49 +0000758 manifest_vm->debug_name);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100759
David Brazdil7a462ec2019-08-15 12:27:47 +0100760 mem_size = align_up(manifest_vm->secondary.mem_size, PAGE_SIZE);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100761
762 if (manifest_vm->is_ffa_partition) {
763 secondary_mem_begin =
764 pa_init(manifest_vm->sp.load_addr);
765 secondary_mem_end =
766 pa_init(manifest_vm->sp.load_addr + mem_size);
767 } else if (!carve_out_mem_range(mem_ranges_available,
768 params->mem_ranges_count,
769 mem_size, &secondary_mem_begin,
770 &secondary_mem_end)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000771 dlog_error("Not enough memory (%u bytes).\n", mem_size);
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100772 continue;
773 }
Andrew Scull80871322018-08-06 12:04:09 +0100774
Manish Pandey2145c212020-05-01 16:04:22 +0100775 if (!load_secondary(stage1_locked, primary_vm_locked,
776 secondary_mem_begin, secondary_mem_end,
777 manifest_vm, cpio, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000778 dlog_error("Unable to load VM.\n");
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100779 continue;
780 }
781
782 /* Deny the primary VM access to this memory. */
Andrew Scull3c257452019-11-26 13:32:50 +0000783 if (!vm_unmap(primary_vm_locked, secondary_mem_begin,
784 secondary_mem_end, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000785 dlog_error(
786 "Unable to unmap secondary VM from primary "
787 "VM.\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000788 success = false;
789 break;
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100790 }
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100791 }
792
Andrew Scull3c257452019-11-26 13:32:50 +0000793 vm_unlock(&primary_vm_locked);
794
795 if (!success) {
796 return false;
797 }
798
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +0100799 /*
800 * Add newly reserved areas to update params by looking at the
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100801 * difference between the available ranges from the original params and
802 * the updated mem_ranges_available. We assume that the number and order
803 * of available ranges is the same, i.e. we don't remove any ranges
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +0100804 * above only make them smaller.
805 */
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100806 return update_reserved_ranges(update, params->mem_ranges,
807 mem_ranges_available,
808 params->mem_ranges_count);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100809}