blob: 871c94f0035173ff3309aedc5dd80c85db3010e2 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/load.h"
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010010
11#include <stdbool.h>
12
Fuad Tabba77a4b012019-11-15 12:13:08 +000013#include "hf/arch/vm.h"
14
Andrew Scull18c78fc2018-08-20 12:57:41 +010015#include "hf/api.h"
Andrew Walbran34ce72e2018-09-13 16:47:44 +010016#include "hf/boot_params.h"
Andrew Scull72b43c02019-09-18 13:53:45 +010017#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010018#include "hf/dlog.h"
Fuad Tabba50469e02020-06-30 15:14:28 +010019#include "hf/fdt_patch.h"
Andrew Scull5991ec92018-10-08 14:55:02 +010020#include "hf/layout.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010021#include "hf/memiter.h"
22#include "hf/mm.h"
Andrew Walbran48699362019-05-20 14:38:00 +010023#include "hf/plat/console.h"
Andrew Scullb1a6d0d2020-01-29 11:25:12 +000024#include "hf/plat/iommu.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010025#include "hf/static_assert.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010026#include "hf/std.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010027#include "hf/vm.h"
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010028
Andrew Scull19503262018-09-20 14:48:39 +010029#include "vmapi/hf/call.h"
Manish Pandeyd34f8892020-06-19 17:41:07 +010030#include "vmapi/hf/ffa.h"
Andrew Scull19503262018-09-20 14:48:39 +010031
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010032/**
33 * Copies data to an unmapped location by mapping it for write, copying the
34 * data, then unmapping it.
Andrew Sculld9225b32018-11-19 16:12:41 +000035 *
36 * The data is written so that it is available to all cores with the cache
37 * disabled. When switching to the partitions, the caching is initially disabled
38 * so the data must be available without the cache.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010039 */
Andrew Scull3c0a90a2019-07-01 11:55:53 +010040static bool copy_to_unmapped(struct mm_stage1_locked stage1_locked, paddr_t to,
David Brazdil7a462ec2019-08-15 12:27:47 +010041 struct memiter *from_it, struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010042{
David Brazdil7a462ec2019-08-15 12:27:47 +010043 const void *from = memiter_base(from_it);
44 size_t size = memiter_size(from_it);
Andrew Scull80871322018-08-06 12:04:09 +010045 paddr_t to_end = pa_add(to, size);
46 void *ptr;
Andrew Scull265ada92018-07-30 15:19:01 +010047
Andrew Scull3c0a90a2019-07-01 11:55:53 +010048 ptr = mm_identity_map(stage1_locked, to, to_end, MM_MODE_W, ppool);
Andrew Scull80871322018-08-06 12:04:09 +010049 if (!ptr) {
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010050 return false;
51 }
52
Andrew Sculla1aa2ba2019-04-05 11:49:02 +010053 memcpy_s(ptr, size, from, size);
Andrew Scullc059fbe2019-09-12 12:58:40 +010054 arch_mm_flush_dcache(ptr, size);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010055
Andrew Scull72b43c02019-09-18 13:53:45 +010056 CHECK(mm_unmap(stage1_locked, to, to_end, ppool));
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010057
58 return true;
59}
60
Fuad Tabba50469e02020-06-30 15:14:28 +010061/**
62 * Loads the secondary VM's kernel.
63 * Stores the kernel size in kernel_size (if kernel_size is not NULL).
64 * Returns false if it cannot load the kernel.
65 */
Andrew Scull72b43c02019-09-18 13:53:45 +010066static bool load_kernel(struct mm_stage1_locked stage1_locked, paddr_t begin,
67 paddr_t end, const struct manifest_vm *manifest_vm,
Fuad Tabba50469e02020-06-30 15:14:28 +010068 const struct memiter *cpio, struct mpool *ppool,
69 size_t *kernel_size)
Andrew Scull72b43c02019-09-18 13:53:45 +010070{
Andrew Scull72b43c02019-09-18 13:53:45 +010071 struct memiter kernel;
Fuad Tabba50469e02020-06-30 15:14:28 +010072 size_t size;
Andrew Scull72b43c02019-09-18 13:53:45 +010073
David Brazdil136f2942019-09-23 14:11:03 +010074 if (!cpio_get_file(cpio, &manifest_vm->kernel_filename, &kernel)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +000075 dlog_error("Could not find kernel file \"%s\".\n",
76 string_data(&manifest_vm->kernel_filename));
Andrew Scull72b43c02019-09-18 13:53:45 +010077 return false;
78 }
79
Fuad Tabba50469e02020-06-30 15:14:28 +010080 size = memiter_size(&kernel);
81 if (pa_difference(begin, end) < size) {
Andrew Walbran17eebf92020-02-05 16:35:49 +000082 dlog_error("Kernel is larger than available memory.\n");
Andrew Scull72b43c02019-09-18 13:53:45 +010083 return false;
84 }
85
86 if (!copy_to_unmapped(stage1_locked, begin, &kernel, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +000087 dlog_error("Unable to copy kernel.\n");
Andrew Scull72b43c02019-09-18 13:53:45 +010088 return false;
89 }
90
Fuad Tabba50469e02020-06-30 15:14:28 +010091 if (kernel_size) {
92 *kernel_size = size;
93 }
94
Andrew Scull72b43c02019-09-18 13:53:45 +010095 return true;
96}
97
Manish Pandeyd34f8892020-06-19 17:41:07 +010098/*
99 * Link RX/TX buffers provided in partition manifest to mailbox
100 */
101static bool link_rxtx_to_mailbox(struct mm_stage1_locked stage1_locked,
102 struct vm_locked vm_locked, struct rx_tx rxtx,
103 struct mpool *ppool)
104{
105 struct ffa_value ret;
106 ipaddr_t send;
107 ipaddr_t recv;
108 uint32_t page_count;
109
110 send = ipa_init(rxtx.tx_buffer->base_address);
111 recv = ipa_init(rxtx.rx_buffer->base_address);
112 page_count = rxtx.tx_buffer->page_count;
113
114 ret = api_vm_configure_pages(stage1_locked, vm_locked, send, recv,
115 page_count, ppool);
116 if (ret.func != FFA_SUCCESS_32) {
117 return false;
118 }
119
120 dlog_verbose(" mailbox: send = %#x, recv = %#x\n",
121 vm_locked.vm->mailbox.send, vm_locked.vm->mailbox.recv);
122
123 return true;
124}
125
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100126/**
Andrew Scullae9962e2019-10-03 16:51:16 +0100127 * Performs VM loading activities that are common between the primary and
128 * secondaries.
129 */
Manish Pandeyd34f8892020-06-19 17:41:07 +0100130static bool load_common(struct mm_stage1_locked stage1_locked,
131 struct vm_locked vm_locked,
132 const struct manifest_vm *manifest_vm,
133 struct mpool *ppool)
Andrew Scullae9962e2019-10-03 16:51:16 +0100134{
Manish Pandeyd34f8892020-06-19 17:41:07 +0100135 vm_locked.vm->smc_whitelist = manifest_vm->smc_whitelist;
136 vm_locked.vm->uuid = manifest_vm->sp.uuid;
Andrew Scullae9962e2019-10-03 16:51:16 +0100137
Manish Pandeyd34f8892020-06-19 17:41:07 +0100138 if (manifest_vm->is_ffa_partition) {
139 /* Link rxtx buffers to mailbox */
140 if (manifest_vm->sp.rxtx.available) {
141 if (!link_rxtx_to_mailbox(stage1_locked, vm_locked,
142 manifest_vm->sp.rxtx,
143 ppool)) {
144 dlog_error(
145 "Unable to Link RX/TX buffer with "
146 "mailbox.\n");
147 return false;
148 }
149 }
150 }
Fuad Tabba56970712020-01-10 11:20:09 +0000151 /* Initialize architecture-specific features. */
Manish Pandeyd34f8892020-06-19 17:41:07 +0100152 arch_vm_features_set(vm_locked.vm);
Fuad Tabba77a4b012019-11-15 12:13:08 +0000153
Andrew Scullae9962e2019-10-03 16:51:16 +0100154 return true;
155}
156
157/**
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100158 * Loads the primary VM.
159 */
Andrew Scull72b43c02019-09-18 13:53:45 +0100160static bool load_primary(struct mm_stage1_locked stage1_locked,
Andrew Scullae9962e2019-10-03 16:51:16 +0100161 const struct manifest_vm *manifest_vm,
Andrew Scullb5f49e02019-10-02 13:20:47 +0100162 const struct memiter *cpio,
163 const struct boot_params *params, struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100164{
Olivier Deprez62d99e32020-01-09 15:58:07 +0100165 paddr_t primary_begin;
166 ipaddr_t primary_entry;
David Brazdile6f83222019-09-23 14:47:37 +0100167 struct vm *vm;
Andrew Scull3c257452019-11-26 13:32:50 +0000168 struct vm_locked vm_locked;
David Brazdile6f83222019-09-23 14:47:37 +0100169 struct vcpu_locked vcpu_locked;
Andrew Scullb5f49e02019-10-02 13:20:47 +0100170 size_t i;
Andrew Scull3c257452019-11-26 13:32:50 +0000171 bool ret;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100172
Olivier Deprez62d99e32020-01-09 15:58:07 +0100173 if (manifest_vm->is_ffa_partition) {
174 primary_begin = pa_init(manifest_vm->sp.load_addr);
175 primary_entry = ipa_add(ipa_from_pa(primary_begin),
176 manifest_vm->sp.ep_offset);
177 } else {
178 primary_begin =
179 (manifest_vm->primary.boot_address ==
180 MANIFEST_INVALID_ADDRESS)
181 ? layout_primary_begin()
182 : pa_init(manifest_vm->primary.boot_address);
183 primary_entry = ipa_from_pa(primary_begin);
184 }
185
David Brazdil080ee312020-02-25 15:30:30 -0800186 paddr_t primary_end = pa_add(primary_begin, RSIZE_MAX);
Andrew Scull72b43c02019-09-18 13:53:45 +0100187
Olivier Deprez62d99e32020-01-09 15:58:07 +0100188 /*
189 * Load the kernel if a filename is specified in the VM manifest.
190 * For an FF-A partition, kernel_filename is undefined indicating
191 * the partition package has already been loaded prior to Hafnium
192 * booting.
193 */
194 if (!string_is_empty(&manifest_vm->kernel_filename)) {
195 if (!load_kernel(stage1_locked, primary_begin, primary_end,
Fuad Tabba50469e02020-06-30 15:14:28 +0100196 manifest_vm, cpio, ppool, NULL)) {
Olivier Deprez62d99e32020-01-09 15:58:07 +0100197 dlog_error("Unable to load primary kernel.\n");
198 return false;
199 }
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100200 }
201
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100202 if (!vm_init_next(MAX_CPUS, ppool, &vm)) {
Andrew Walbran7586e042020-02-18 18:19:26 +0000203 dlog_error("Unable to initialise primary VM.\n");
David Brazdile6f83222019-09-23 14:47:37 +0100204 return false;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100205 }
206
David Brazdile6f83222019-09-23 14:47:37 +0100207 if (vm->id != HF_PRIMARY_VM_ID) {
Andrew Walbran7586e042020-02-18 18:19:26 +0000208 dlog_error("Primary VM was not given correct ID.\n");
David Brazdile6f83222019-09-23 14:47:37 +0100209 return false;
210 }
211
Andrew Scull3c257452019-11-26 13:32:50 +0000212 vm_locked = vm_lock(vm);
213
Andrew Scull48929fd2020-01-28 10:39:10 +0000214 if (params->device_mem_ranges_count == 0) {
215 /*
216 * Map 1TB of address space as device memory to, most likely,
217 * make all devices available to the primary VM.
218 *
219 * TODO: remove this once all targets provide valid ranges.
220 */
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800221 dlog_warning(
222 "Device memory not provided, defaulting to 1 TB.\n");
Andrew Scull48929fd2020-01-28 10:39:10 +0000223
224 if (!vm_identity_map(
225 vm_locked, pa_init(0),
226 pa_init(UINT64_C(1024) * 1024 * 1024 * 1024),
227 MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool, NULL)) {
228 dlog_error(
229 "Unable to initialise address space for "
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800230 "primary VM.\n");
Andrew Scull48929fd2020-01-28 10:39:10 +0000231 ret = false;
232 goto out;
233 }
David Brazdile6f83222019-09-23 14:47:37 +0100234 }
235
Andrew Scullb5f49e02019-10-02 13:20:47 +0100236 /* Map normal memory as such to permit caching, execution, etc. */
237 for (i = 0; i < params->mem_ranges_count; ++i) {
Andrew Scull3c257452019-11-26 13:32:50 +0000238 if (!vm_identity_map(vm_locked, params->mem_ranges[i].begin,
239 params->mem_ranges[i].end,
240 MM_MODE_R | MM_MODE_W | MM_MODE_X, ppool,
241 NULL)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000242 dlog_error(
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800243 "Unable to initialise memory for primary "
244 "VM.\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000245 ret = false;
246 goto out;
Andrew Scullb5f49e02019-10-02 13:20:47 +0100247 }
248 }
249
Andrew Scull48929fd2020-01-28 10:39:10 +0000250 /* Map device memory as such to prevent execution, speculation etc. */
251 for (i = 0; i < params->device_mem_ranges_count; ++i) {
252 if (!vm_identity_map(
253 vm_locked, params->device_mem_ranges[i].begin,
254 params->device_mem_ranges[i].end,
255 MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool, NULL)) {
256 dlog("Unable to initialise device memory for primary "
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800257 "VM.\n");
Andrew Scull48929fd2020-01-28 10:39:10 +0000258 ret = false;
259 goto out;
260 }
261 }
262
Manish Pandeyd34f8892020-06-19 17:41:07 +0100263 if (!load_common(stage1_locked, vm_locked, manifest_vm, ppool)) {
264 ret = false;
265 goto out;
266 }
267
Andrew Scull3c257452019-11-26 13:32:50 +0000268 if (!vm_unmap_hypervisor(vm_locked, ppool)) {
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800269 dlog_error("Unable to unmap hypervisor from primary VM.\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000270 ret = false;
271 goto out;
David Brazdile6f83222019-09-23 14:47:37 +0100272 }
273
Andrew Scullb1a6d0d2020-01-29 11:25:12 +0000274 if (!plat_iommu_unmap_iommus(vm_locked, ppool)) {
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800275 dlog_error("Unable to unmap IOMMUs from primary VM.\n");
Andrew Scullb1a6d0d2020-01-29 11:25:12 +0000276 ret = false;
277 goto out;
278 }
279
Andrew Walbran7586e042020-02-18 18:19:26 +0000280 dlog_info("Loaded primary VM with %u vCPUs, entry at %#x.\n",
281 vm->vcpu_count, pa_addr(primary_begin));
282
David Brazdile6f83222019-09-23 14:47:37 +0100283 vcpu_locked = vcpu_lock(vm_get_vcpu(vm, 0));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100284 vcpu_on(vcpu_locked, primary_entry, params->kernel_arg);
David Brazdile6f83222019-09-23 14:47:37 +0100285 vcpu_unlock(&vcpu_locked);
Andrew Scull3c257452019-11-26 13:32:50 +0000286 ret = true;
David Brazdile6f83222019-09-23 14:47:37 +0100287
Andrew Scull3c257452019-11-26 13:32:50 +0000288out:
289 vm_unlock(&vm_locked);
290
291 return ret;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100292}
293
Fuad Tabba50469e02020-06-30 15:14:28 +0100294/**
295 * Loads the secondary VM's FDT.
296 * Stores the total allocated size for the FDT in fdt_allocated_size (if
297 * fdt_allocated_size is not NULL). The allocated size includes additional space
298 * for potential patching.
299 */
300static bool load_secondary_fdt(struct mm_stage1_locked stage1_locked,
301 paddr_t end, size_t fdt_max_size,
302 const struct manifest_vm *manifest_vm,
303 const struct memiter *cpio, struct mpool *ppool,
304 paddr_t *fdt_addr, size_t *fdt_allocated_size)
305{
306 struct memiter fdt;
307 size_t allocated_size;
308
309 CHECK(!string_is_empty(&manifest_vm->secondary.fdt_filename));
310
311 if (!cpio_get_file(cpio, &manifest_vm->secondary.fdt_filename, &fdt)) {
312 dlog_error("Cannot open the secondary VM's FDT.\n");
313 return false;
314 }
315
316 /*
317 * Ensure the FDT has one additional page at the end for patching, and
318 * and align it to the page boundary.
319 */
320 allocated_size = align_up(memiter_size(&fdt), PAGE_SIZE) + PAGE_SIZE;
321
322 if (allocated_size > fdt_max_size) {
323 dlog_error(
324 "FDT allocated space (%u) is more than the specified "
325 "maximum to use (%u).\n",
326 allocated_size, fdt_max_size);
327 return false;
328 }
329
330 /* Load the FDT to the end of the VM's allocated memory space. */
331 *fdt_addr = pa_init(pa_addr(pa_sub(end, allocated_size)));
332
333 dlog_info("Loading secondary FDT of allocated size %u at 0x%x.\n",
334 allocated_size, pa_addr(*fdt_addr));
335
336 if (!copy_to_unmapped(stage1_locked, *fdt_addr, &fdt, ppool)) {
337 dlog_error("Unable to copy FDT.\n");
338 return false;
339 }
340
341 if (fdt_allocated_size) {
342 *fdt_allocated_size = allocated_size;
343 }
344
345 return true;
346}
347
Andrew Scull72b43c02019-09-18 13:53:45 +0100348/*
349 * Loads a secondary VM.
350 */
351static bool load_secondary(struct mm_stage1_locked stage1_locked,
Manish Pandey2145c212020-05-01 16:04:22 +0100352 struct vm_locked primary_vm_locked,
Andrew Scull72b43c02019-09-18 13:53:45 +0100353 paddr_t mem_begin, paddr_t mem_end,
354 const struct manifest_vm *manifest_vm,
355 const struct memiter *cpio, struct mpool *ppool)
356{
357 struct vm *vm;
Andrew Scull3c257452019-11-26 13:32:50 +0000358 struct vm_locked vm_locked;
Andrew Scull72b43c02019-09-18 13:53:45 +0100359 struct vcpu *vcpu;
360 ipaddr_t secondary_entry;
Andrew Scull3c257452019-11-26 13:32:50 +0000361 bool ret;
Fuad Tabba50469e02020-06-30 15:14:28 +0100362 paddr_t fdt_addr;
363 bool has_fdt;
364 size_t kernel_size = 0;
365 const size_t mem_size = pa_difference(mem_begin, mem_end);
Andrew Scull72b43c02019-09-18 13:53:45 +0100366
Olivier Deprez62d99e32020-01-09 15:58:07 +0100367 /*
368 * Load the kernel if a filename is specified in the VM manifest.
369 * For an FF-A partition, kernel_filename is undefined indicating
370 * the partition package has already been loaded prior to Hafnium
371 * booting.
372 */
373 if (!string_is_empty(&manifest_vm->kernel_filename)) {
374 if (!load_kernel(stage1_locked, mem_begin, mem_end, manifest_vm,
Fuad Tabba50469e02020-06-30 15:14:28 +0100375 cpio, ppool, &kernel_size)) {
Olivier Deprez62d99e32020-01-09 15:58:07 +0100376 dlog_error("Unable to load kernel.\n");
377 return false;
378 }
Andrew Scull72b43c02019-09-18 13:53:45 +0100379 }
380
Fuad Tabba50469e02020-06-30 15:14:28 +0100381 has_fdt = !string_is_empty(&manifest_vm->secondary.fdt_filename);
382 if (has_fdt) {
383 /*
384 * Ensure that the FDT does not overwrite the kernel or overlap
385 * its page, for the FDT to start at a page boundary.
386 */
387 const size_t fdt_max_size =
388 mem_size - align_up(kernel_size, PAGE_SIZE);
389
390 size_t fdt_allocated_size;
391
392 if (!load_secondary_fdt(stage1_locked, mem_end, fdt_max_size,
393 manifest_vm, cpio, ppool, &fdt_addr,
394 &fdt_allocated_size)) {
395 dlog_error("Unable to load FDT.\n");
396 return false;
397 }
398
399 if (!fdt_patch_mem(stage1_locked, fdt_addr, fdt_allocated_size,
400 mem_begin, mem_end, ppool)) {
401 dlog_error("Unable to patch FDT.\n");
402 return false;
403 }
404 }
405
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100406 if (!vm_init_next(manifest_vm->secondary.vcpu_count, ppool, &vm)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000407 dlog_error("Unable to initialise VM.\n");
Andrew Scull72b43c02019-09-18 13:53:45 +0100408 return false;
409 }
410
Andrew Scull3c257452019-11-26 13:32:50 +0000411 vm_locked = vm_lock(vm);
412
Andrew Scull72b43c02019-09-18 13:53:45 +0100413 /* Grant the VM access to the memory. */
Andrew Scull3c257452019-11-26 13:32:50 +0000414 if (!vm_identity_map(vm_locked, mem_begin, mem_end,
415 MM_MODE_R | MM_MODE_W | MM_MODE_X, ppool,
416 &secondary_entry)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000417 dlog_error("Unable to initialise memory.\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000418 ret = false;
419 goto out;
Andrew Scull72b43c02019-09-18 13:53:45 +0100420 }
421
Olivier Deprez62d99e32020-01-09 15:58:07 +0100422 if (manifest_vm->is_ffa_partition) {
Manish Pandey2145c212020-05-01 16:04:22 +0100423 int j = 0;
424 paddr_t region_begin;
425 paddr_t region_end;
426 paddr_t alloc_base = mem_end;
427 size_t size;
428 size_t total_alloc = 0;
429
430 /* Map memory-regions */
431 while (j < manifest_vm->sp.mem_region_count) {
432 size = manifest_vm->sp.mem_regions[j].page_count *
433 PAGE_SIZE;
434 /*
435 * For memory-regions without base-address, memory
436 * should be allocated inside partition's page table.
437 * Start allocating memory regions in partition's
438 * page table, starting from the end.
439 * TODO: Add mechanism to let partition know of these
440 * memory regions
441 */
442 if (manifest_vm->sp.mem_regions[j].base_address ==
443 MANIFEST_INVALID_ADDRESS) {
444 total_alloc += size;
445 /* Don't go beyond half the VM's memory space */
446 if (total_alloc >
447 (manifest_vm->secondary.mem_size / 2)) {
448 dlog_error(
449 "Not enough space for memory-"
450 "region allocation");
451 ret = false;
452 goto out;
453 }
454
455 region_end = alloc_base;
456 region_begin = pa_subtract(alloc_base, size);
457 alloc_base = region_begin;
458
459 if (!vm_identity_map(
460 vm_locked, region_begin, region_end,
461 manifest_vm->sp.mem_regions[j]
462 .attributes,
463 ppool, NULL)) {
464 dlog_error(
465 "Unable to map secondary VM "
466 "memory-region.\n");
467 ret = false;
468 goto out;
469 }
470
471 dlog_info(
472 " Memory region %#x - %#x allocated\n",
473 region_begin, region_end);
474 } else {
475 /*
476 * Identity map memory region for both case,
477 * VA(S-EL0) or IPA(S-EL1).
478 */
479 region_begin =
480 pa_init(manifest_vm->sp.mem_regions[j]
481 .base_address);
482 region_end = pa_add(region_begin, size);
483
484 if (!vm_identity_map(
485 vm_locked, region_begin, region_end,
486 manifest_vm->sp.mem_regions[j]
487 .attributes,
488 ppool, NULL)) {
489 dlog_error(
490 "Unable to map secondary VM "
491 "memory-region.\n");
492 ret = false;
493 goto out;
494 }
495 }
496
497 /* Deny the primary VM access to this memory */
498 if (!vm_unmap(primary_vm_locked, region_begin,
499 region_end, ppool)) {
500 dlog_error(
501 "Unable to unmap secondary VM memory-"
502 "region from primary VM.\n");
503 ret = false;
504 goto out;
505 }
506
507 j++;
508 }
509
510 /* Map device-regions */
511 j = 0;
512 while (j < manifest_vm->sp.dev_region_count) {
513 region_begin = pa_init(
514 manifest_vm->sp.dev_regions[j].base_address);
515 size = manifest_vm->sp.dev_regions[j].page_count *
516 PAGE_SIZE;
517 region_end = pa_add(region_begin, size);
518
519 if (!vm_identity_map(
520 vm_locked, region_begin, region_end,
521 manifest_vm->sp.dev_regions[j].attributes,
522 ppool, NULL)) {
523 dlog_error(
524 "Unable to map secondary VM "
525 "device-region.\n");
526 ret = false;
527 goto out;
528 }
529 /* Deny primary VM access to this region */
530 if (!vm_unmap(primary_vm_locked, region_begin,
531 region_end, ppool)) {
532 dlog_error(
533 "Unable to unmap secondary VM device-"
534 "region from primary VM.\n");
535 ret = false;
536 goto out;
537 }
538 j++;
539 }
540
Olivier Deprez62d99e32020-01-09 15:58:07 +0100541 secondary_entry =
542 ipa_add(secondary_entry, manifest_vm->sp.ep_offset);
543 }
544
Manish Pandeyd34f8892020-06-19 17:41:07 +0100545 if (!load_common(stage1_locked, vm_locked, manifest_vm, ppool)) {
546 ret = false;
547 goto out;
548 }
549
Manish Pandey2145c212020-05-01 16:04:22 +0100550 dlog_info("Loaded with %u vCPUs, entry at %#x.\n",
551 manifest_vm->secondary.vcpu_count, pa_addr(mem_begin));
552
Andrew Scull72b43c02019-09-18 13:53:45 +0100553 vcpu = vm_get_vcpu(vm, 0);
Fuad Tabba50469e02020-06-30 15:14:28 +0100554
555 if (has_fdt) {
556 vcpu_secondary_reset_and_start(vcpu, secondary_entry,
557 pa_addr(fdt_addr));
558 } else {
559 /*
560 * Without an FDT, secondary VMs expect the memory size to be
561 * passed in register x0, which is what
562 * vcpu_secondary_reset_and_start does in this case.
563 */
564 vcpu_secondary_reset_and_start(vcpu, secondary_entry, mem_size);
565 }
566
Andrew Scull3c257452019-11-26 13:32:50 +0000567 ret = true;
Andrew Scull72b43c02019-09-18 13:53:45 +0100568
Andrew Scull3c257452019-11-26 13:32:50 +0000569out:
570 vm_unlock(&vm_locked);
571
572 return ret;
Andrew Scull72b43c02019-09-18 13:53:45 +0100573}
574
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100575/**
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100576 * Try to find a memory range of the given size within the given ranges, and
577 * remove it from them. Return true on success, or false if no large enough
578 * contiguous range is found.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100579 */
Hong-Seok Kim09648362019-05-23 15:47:11 +0900580static bool carve_out_mem_range(struct mem_range *mem_ranges,
581 size_t mem_ranges_count, uint64_t size_to_find,
582 paddr_t *found_begin, paddr_t *found_end)
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100583{
584 size_t i;
585
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000586 /*
587 * TODO(b/116191358): Consider being cleverer about how we pack VMs
588 * together, with a non-greedy algorithm.
589 */
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100590 for (i = 0; i < mem_ranges_count; ++i) {
591 if (size_to_find <=
Andrew Walbran2cb43392019-04-17 12:52:45 +0100592 pa_difference(mem_ranges[i].begin, mem_ranges[i].end)) {
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +0100593 /*
594 * This range is big enough, take some of it from the
595 * end and reduce its size accordingly.
596 */
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100597 *found_end = mem_ranges[i].end;
598 *found_begin = pa_init(pa_addr(mem_ranges[i].end) -
599 size_to_find);
600 mem_ranges[i].end = *found_begin;
601 return true;
602 }
603 }
604 return false;
605}
606
607/**
608 * Given arrays of memory ranges before and after memory was removed for
609 * secondary VMs, add the difference to the reserved ranges of the given update.
610 * Return true on success, or false if there would be more than MAX_MEM_RANGES
611 * reserved ranges after adding the new ones.
612 * `before` and `after` must be arrays of exactly `mem_ranges_count` elements.
613 */
Hong-Seok Kim09648362019-05-23 15:47:11 +0900614static bool update_reserved_ranges(struct boot_params_update *update,
615 const struct mem_range *before,
616 const struct mem_range *after,
617 size_t mem_ranges_count)
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100618{
619 size_t i;
620
621 for (i = 0; i < mem_ranges_count; ++i) {
622 if (pa_addr(after[i].begin) > pa_addr(before[i].begin)) {
623 if (update->reserved_ranges_count >= MAX_MEM_RANGES) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000624 dlog_error(
625 "Too many reserved ranges after "
626 "loading secondary VMs.\n");
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100627 return false;
628 }
629 update->reserved_ranges[update->reserved_ranges_count]
630 .begin = before[i].begin;
631 update->reserved_ranges[update->reserved_ranges_count]
632 .end = after[i].begin;
633 update->reserved_ranges_count++;
634 }
635 if (pa_addr(after[i].end) < pa_addr(before[i].end)) {
636 if (update->reserved_ranges_count >= MAX_MEM_RANGES) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000637 dlog_error(
638 "Too many reserved ranges after "
639 "loading secondary VMs.\n");
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100640 return false;
641 }
642 update->reserved_ranges[update->reserved_ranges_count]
643 .begin = after[i].end;
644 update->reserved_ranges[update->reserved_ranges_count]
645 .end = before[i].end;
646 update->reserved_ranges_count++;
647 }
648 }
649
650 return true;
651}
652
Andrew Scull72b43c02019-09-18 13:53:45 +0100653/*
654 * Loads alls VMs from the manifest.
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100655 */
Andrew Scull72b43c02019-09-18 13:53:45 +0100656bool load_vms(struct mm_stage1_locked stage1_locked,
657 const struct manifest *manifest, const struct memiter *cpio,
658 const struct boot_params *params,
659 struct boot_params_update *update, struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100660{
Andrew Scull19503262018-09-20 14:48:39 +0100661 struct vm *primary;
Olivier Deprez96a2a262020-06-11 17:21:38 +0200662 struct vm *other_world_vm;
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100663 struct mem_range mem_ranges_available[MAX_MEM_RANGES];
Andrew Scull3c257452019-11-26 13:32:50 +0000664 struct vm_locked primary_vm_locked;
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100665 size_t i;
Andrew Scull3c257452019-11-26 13:32:50 +0000666 bool success = true;
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100667
Andrew Scullae9962e2019-10-03 16:51:16 +0100668 if (!load_primary(stage1_locked, &manifest->vm[HF_PRIMARY_VM_INDEX],
669 cpio, params, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000670 dlog_error("Unable to load primary VM.\n");
Andrew Scull72b43c02019-09-18 13:53:45 +0100671 return false;
672 }
673
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100674 /*
Olivier Deprez96a2a262020-06-11 17:21:38 +0200675 * Initialise the dummy VM which represents the opposite world:
676 * -TrustZone (or the SPMC) when running the Hypervisor
677 * -the Hypervisor when running TZ/SPMC
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100678 */
Olivier Deprez96a2a262020-06-11 17:21:38 +0200679 other_world_vm = vm_init(HF_OTHER_WORLD_ID, MAX_CPUS, ppool);
680 CHECK(other_world_vm != NULL);
681
682 for (i = 0; i < MAX_CPUS; i++) {
683 struct vcpu *vcpu = vm_get_vcpu(other_world_vm, i);
684 struct cpu *cpu = cpu_find_index(i);
685
686 vcpu->cpu = cpu;
687 }
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100688
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100689 static_assert(
690 sizeof(mem_ranges_available) == sizeof(params->mem_ranges),
691 "mem_range arrays must be the same size for memcpy.");
692 static_assert(sizeof(mem_ranges_available) < 500,
693 "This will use too much stack, either make "
694 "MAX_MEM_RANGES smaller or change this.");
Andrew Sculla1aa2ba2019-04-05 11:49:02 +0100695 memcpy_s(mem_ranges_available, sizeof(mem_ranges_available),
696 params->mem_ranges, sizeof(params->mem_ranges));
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100697
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100698 /* Round the last addresses down to the page size. */
699 for (i = 0; i < params->mem_ranges_count; ++i) {
Alfredo Mazzinghieb1997c2019-02-07 18:00:01 +0000700 mem_ranges_available[i].end = pa_init(align_down(
701 pa_addr(mem_ranges_available[i].end), PAGE_SIZE));
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100702 }
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100703
Andrew Scull3c257452019-11-26 13:32:50 +0000704 primary = vm_find(HF_PRIMARY_VM_ID);
705 primary_vm_locked = vm_lock(primary);
706
David Brazdil0251b942019-09-10 15:59:50 +0100707 for (i = 0; i < manifest->vm_count; ++i) {
David Brazdil0dbb41f2019-09-09 18:03:35 +0100708 const struct manifest_vm *manifest_vm = &manifest->vm[i];
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100709 ffa_vm_id_t vm_id = HF_VM_ID_OFFSET + i;
David Brazdil7a462ec2019-08-15 12:27:47 +0100710 uint64_t mem_size;
Andrew Scull80871322018-08-06 12:04:09 +0100711 paddr_t secondary_mem_begin;
712 paddr_t secondary_mem_end;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100713
David Brazdil7a462ec2019-08-15 12:27:47 +0100714 if (vm_id == HF_PRIMARY_VM_ID) {
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100715 continue;
716 }
717
Olivier Deprez2a8ee342020-08-03 15:10:44 +0200718 dlog_info("Loading VM id %#x: %s.\n", vm_id,
Andrew Walbran17eebf92020-02-05 16:35:49 +0000719 manifest_vm->debug_name);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100720
David Brazdil7a462ec2019-08-15 12:27:47 +0100721 mem_size = align_up(manifest_vm->secondary.mem_size, PAGE_SIZE);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100722
723 if (manifest_vm->is_ffa_partition) {
724 secondary_mem_begin =
725 pa_init(manifest_vm->sp.load_addr);
726 secondary_mem_end =
727 pa_init(manifest_vm->sp.load_addr + mem_size);
728 } else if (!carve_out_mem_range(mem_ranges_available,
729 params->mem_ranges_count,
730 mem_size, &secondary_mem_begin,
731 &secondary_mem_end)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000732 dlog_error("Not enough memory (%u bytes).\n", mem_size);
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100733 continue;
734 }
Andrew Scull80871322018-08-06 12:04:09 +0100735
Manish Pandey2145c212020-05-01 16:04:22 +0100736 if (!load_secondary(stage1_locked, primary_vm_locked,
737 secondary_mem_begin, secondary_mem_end,
738 manifest_vm, cpio, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000739 dlog_error("Unable to load VM.\n");
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100740 continue;
741 }
742
743 /* Deny the primary VM access to this memory. */
Andrew Scull3c257452019-11-26 13:32:50 +0000744 if (!vm_unmap(primary_vm_locked, secondary_mem_begin,
745 secondary_mem_end, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000746 dlog_error(
747 "Unable to unmap secondary VM from primary "
748 "VM.\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000749 success = false;
750 break;
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100751 }
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100752 }
753
Andrew Scull3c257452019-11-26 13:32:50 +0000754 vm_unlock(&primary_vm_locked);
755
756 if (!success) {
757 return false;
758 }
759
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +0100760 /*
761 * Add newly reserved areas to update params by looking at the
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100762 * difference between the available ranges from the original params and
763 * the updated mem_ranges_available. We assume that the number and order
764 * of available ranges is the same, i.e. we don't remove any ranges
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +0100765 * above only make them smaller.
766 */
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100767 return update_reserved_ranges(update, params->mem_ranges,
768 mem_ranges_available,
769 params->mem_ranges_count);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100770}