blob: a69880d040bf82f341c1580432fd74ad37a0ae62 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/load.h"
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010010
11#include <stdbool.h>
12
Olivier Deprez112d2b52020-09-30 07:39:23 +020013#include "hf/arch/other_world.h"
Fuad Tabba77a4b012019-11-15 12:13:08 +000014#include "hf/arch/vm.h"
15
Andrew Scull18c78fc2018-08-20 12:57:41 +010016#include "hf/api.h"
Andrew Walbran34ce72e2018-09-13 16:47:44 +010017#include "hf/boot_params.h"
Andrew Scull72b43c02019-09-18 13:53:45 +010018#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010019#include "hf/dlog.h"
Fuad Tabba50469e02020-06-30 15:14:28 +010020#include "hf/fdt_patch.h"
Andrew Scull5991ec92018-10-08 14:55:02 +010021#include "hf/layout.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010022#include "hf/memiter.h"
23#include "hf/mm.h"
Andrew Walbran48699362019-05-20 14:38:00 +010024#include "hf/plat/console.h"
Andrew Scullb1a6d0d2020-01-29 11:25:12 +000025#include "hf/plat/iommu.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010026#include "hf/static_assert.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010027#include "hf/std.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010028#include "hf/vm.h"
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010029
Andrew Scull19503262018-09-20 14:48:39 +010030#include "vmapi/hf/call.h"
Manish Pandeyd34f8892020-06-19 17:41:07 +010031#include "vmapi/hf/ffa.h"
Andrew Scull19503262018-09-20 14:48:39 +010032
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010033/**
34 * Copies data to an unmapped location by mapping it for write, copying the
35 * data, then unmapping it.
Andrew Sculld9225b32018-11-19 16:12:41 +000036 *
37 * The data is written so that it is available to all cores with the cache
38 * disabled. When switching to the partitions, the caching is initially disabled
39 * so the data must be available without the cache.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010040 */
Andrew Scull3c0a90a2019-07-01 11:55:53 +010041static bool copy_to_unmapped(struct mm_stage1_locked stage1_locked, paddr_t to,
David Brazdil7a462ec2019-08-15 12:27:47 +010042 struct memiter *from_it, struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010043{
David Brazdil7a462ec2019-08-15 12:27:47 +010044 const void *from = memiter_base(from_it);
45 size_t size = memiter_size(from_it);
Andrew Scull80871322018-08-06 12:04:09 +010046 paddr_t to_end = pa_add(to, size);
47 void *ptr;
Andrew Scull265ada92018-07-30 15:19:01 +010048
Andrew Scull3c0a90a2019-07-01 11:55:53 +010049 ptr = mm_identity_map(stage1_locked, to, to_end, MM_MODE_W, ppool);
Andrew Scull80871322018-08-06 12:04:09 +010050 if (!ptr) {
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010051 return false;
52 }
53
Andrew Sculla1aa2ba2019-04-05 11:49:02 +010054 memcpy_s(ptr, size, from, size);
Andrew Scullc059fbe2019-09-12 12:58:40 +010055 arch_mm_flush_dcache(ptr, size);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010056
Andrew Scull72b43c02019-09-18 13:53:45 +010057 CHECK(mm_unmap(stage1_locked, to, to_end, ppool));
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010058
59 return true;
60}
61
Fuad Tabba50469e02020-06-30 15:14:28 +010062/**
63 * Loads the secondary VM's kernel.
64 * Stores the kernel size in kernel_size (if kernel_size is not NULL).
65 * Returns false if it cannot load the kernel.
66 */
Andrew Scull72b43c02019-09-18 13:53:45 +010067static bool load_kernel(struct mm_stage1_locked stage1_locked, paddr_t begin,
68 paddr_t end, const struct manifest_vm *manifest_vm,
Fuad Tabba50469e02020-06-30 15:14:28 +010069 const struct memiter *cpio, struct mpool *ppool,
70 size_t *kernel_size)
Andrew Scull72b43c02019-09-18 13:53:45 +010071{
Andrew Scull72b43c02019-09-18 13:53:45 +010072 struct memiter kernel;
Fuad Tabba50469e02020-06-30 15:14:28 +010073 size_t size;
Andrew Scull72b43c02019-09-18 13:53:45 +010074
David Brazdil136f2942019-09-23 14:11:03 +010075 if (!cpio_get_file(cpio, &manifest_vm->kernel_filename, &kernel)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +000076 dlog_error("Could not find kernel file \"%s\".\n",
77 string_data(&manifest_vm->kernel_filename));
Andrew Scull72b43c02019-09-18 13:53:45 +010078 return false;
79 }
80
Fuad Tabba50469e02020-06-30 15:14:28 +010081 size = memiter_size(&kernel);
82 if (pa_difference(begin, end) < size) {
Andrew Walbran17eebf92020-02-05 16:35:49 +000083 dlog_error("Kernel is larger than available memory.\n");
Andrew Scull72b43c02019-09-18 13:53:45 +010084 return false;
85 }
86
87 if (!copy_to_unmapped(stage1_locked, begin, &kernel, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +000088 dlog_error("Unable to copy kernel.\n");
Andrew Scull72b43c02019-09-18 13:53:45 +010089 return false;
90 }
91
Fuad Tabba50469e02020-06-30 15:14:28 +010092 if (kernel_size) {
93 *kernel_size = size;
94 }
95
Andrew Scull72b43c02019-09-18 13:53:45 +010096 return true;
97}
98
Manish Pandeyd34f8892020-06-19 17:41:07 +010099/*
100 * Link RX/TX buffers provided in partition manifest to mailbox
101 */
102static bool link_rxtx_to_mailbox(struct mm_stage1_locked stage1_locked,
103 struct vm_locked vm_locked, struct rx_tx rxtx,
104 struct mpool *ppool)
105{
106 struct ffa_value ret;
107 ipaddr_t send;
108 ipaddr_t recv;
109 uint32_t page_count;
110
111 send = ipa_init(rxtx.tx_buffer->base_address);
112 recv = ipa_init(rxtx.rx_buffer->base_address);
113 page_count = rxtx.tx_buffer->page_count;
114
115 ret = api_vm_configure_pages(stage1_locked, vm_locked, send, recv,
116 page_count, ppool);
117 if (ret.func != FFA_SUCCESS_32) {
118 return false;
119 }
120
121 dlog_verbose(" mailbox: send = %#x, recv = %#x\n",
122 vm_locked.vm->mailbox.send, vm_locked.vm->mailbox.recv);
123
124 return true;
125}
126
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100127/**
Andrew Scullae9962e2019-10-03 16:51:16 +0100128 * Performs VM loading activities that are common between the primary and
129 * secondaries.
130 */
Manish Pandeyd34f8892020-06-19 17:41:07 +0100131static bool load_common(struct mm_stage1_locked stage1_locked,
132 struct vm_locked vm_locked,
133 const struct manifest_vm *manifest_vm,
134 struct mpool *ppool)
Andrew Scullae9962e2019-10-03 16:51:16 +0100135{
Manish Pandeyd34f8892020-06-19 17:41:07 +0100136 vm_locked.vm->smc_whitelist = manifest_vm->smc_whitelist;
137 vm_locked.vm->uuid = manifest_vm->sp.uuid;
Andrew Scullae9962e2019-10-03 16:51:16 +0100138
Manish Pandeyd34f8892020-06-19 17:41:07 +0100139 if (manifest_vm->is_ffa_partition) {
140 /* Link rxtx buffers to mailbox */
141 if (manifest_vm->sp.rxtx.available) {
142 if (!link_rxtx_to_mailbox(stage1_locked, vm_locked,
143 manifest_vm->sp.rxtx,
144 ppool)) {
145 dlog_error(
146 "Unable to Link RX/TX buffer with "
147 "mailbox.\n");
148 return false;
149 }
150 }
J-Alvesb37fd082020-10-22 12:29:21 +0100151
Maksims Svecovsb596eab2021-04-27 00:52:27 +0100152 vm_locked.vm->messaging_method =
153 manifest_vm->sp.messaging_method;
Manish Pandeyf3be6392020-09-24 17:26:09 +0100154
Maksims Svecovs9ddf86a2021-05-06 17:17:21 +0100155 vm_locked.vm->managed_exit = manifest_vm->sp.managed_exit;
156
J-Alvesb37fd082020-10-22 12:29:21 +0100157 vm_locked.vm->boot_order = manifest_vm->sp.boot_order;
158 /* Updating boot list according to boot_order */
159 vm_update_boot(vm_locked.vm);
Manish Pandeyd34f8892020-06-19 17:41:07 +0100160 }
J-Alvesb37fd082020-10-22 12:29:21 +0100161
Fuad Tabba56970712020-01-10 11:20:09 +0000162 /* Initialize architecture-specific features. */
Manish Pandeyd34f8892020-06-19 17:41:07 +0100163 arch_vm_features_set(vm_locked.vm);
Fuad Tabba77a4b012019-11-15 12:13:08 +0000164
Madhukar Pappireddy54680c72020-10-23 15:02:38 -0500165 if (!plat_iommu_attach_peripheral(stage1_locked, vm_locked, manifest_vm,
166 ppool)) {
167 dlog_error("Unable to attach upstream peripheral device\n");
168 return false;
169 }
170
Andrew Scullae9962e2019-10-03 16:51:16 +0100171 return true;
172}
173
174/**
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100175 * Loads the primary VM.
176 */
Andrew Scull72b43c02019-09-18 13:53:45 +0100177static bool load_primary(struct mm_stage1_locked stage1_locked,
Andrew Scullae9962e2019-10-03 16:51:16 +0100178 const struct manifest_vm *manifest_vm,
Andrew Scullb5f49e02019-10-02 13:20:47 +0100179 const struct memiter *cpio,
180 const struct boot_params *params, struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100181{
Olivier Deprez62d99e32020-01-09 15:58:07 +0100182 paddr_t primary_begin;
183 ipaddr_t primary_entry;
David Brazdile6f83222019-09-23 14:47:37 +0100184 struct vm *vm;
Andrew Scull3c257452019-11-26 13:32:50 +0000185 struct vm_locked vm_locked;
David Brazdile6f83222019-09-23 14:47:37 +0100186 struct vcpu_locked vcpu_locked;
Andrew Scullb5f49e02019-10-02 13:20:47 +0100187 size_t i;
Andrew Scull3c257452019-11-26 13:32:50 +0000188 bool ret;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100189
Olivier Deprez62d99e32020-01-09 15:58:07 +0100190 if (manifest_vm->is_ffa_partition) {
191 primary_begin = pa_init(manifest_vm->sp.load_addr);
192 primary_entry = ipa_add(ipa_from_pa(primary_begin),
193 manifest_vm->sp.ep_offset);
194 } else {
195 primary_begin =
196 (manifest_vm->primary.boot_address ==
197 MANIFEST_INVALID_ADDRESS)
198 ? layout_primary_begin()
199 : pa_init(manifest_vm->primary.boot_address);
200 primary_entry = ipa_from_pa(primary_begin);
201 }
202
David Brazdil080ee312020-02-25 15:30:30 -0800203 paddr_t primary_end = pa_add(primary_begin, RSIZE_MAX);
Andrew Scull72b43c02019-09-18 13:53:45 +0100204
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800205 /* Primary VM must be a VM */
206 CHECK(manifest_vm->sp.run_time_el == EL1);
207
Olivier Deprez62d99e32020-01-09 15:58:07 +0100208 /*
209 * Load the kernel if a filename is specified in the VM manifest.
210 * For an FF-A partition, kernel_filename is undefined indicating
211 * the partition package has already been loaded prior to Hafnium
212 * booting.
213 */
214 if (!string_is_empty(&manifest_vm->kernel_filename)) {
215 if (!load_kernel(stage1_locked, primary_begin, primary_end,
Fuad Tabba50469e02020-06-30 15:14:28 +0100216 manifest_vm, cpio, ppool, NULL)) {
Olivier Deprez62d99e32020-01-09 15:58:07 +0100217 dlog_error("Unable to load primary kernel.\n");
218 return false;
219 }
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100220 }
221
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800222 if (!vm_init_next(MAX_CPUS, ppool, &vm, false)) {
Andrew Walbran7586e042020-02-18 18:19:26 +0000223 dlog_error("Unable to initialise primary VM.\n");
David Brazdile6f83222019-09-23 14:47:37 +0100224 return false;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100225 }
226
David Brazdile6f83222019-09-23 14:47:37 +0100227 if (vm->id != HF_PRIMARY_VM_ID) {
Andrew Walbran7586e042020-02-18 18:19:26 +0000228 dlog_error("Primary VM was not given correct ID.\n");
David Brazdile6f83222019-09-23 14:47:37 +0100229 return false;
230 }
231
Andrew Scull3c257452019-11-26 13:32:50 +0000232 vm_locked = vm_lock(vm);
233
Andrew Scull48929fd2020-01-28 10:39:10 +0000234 if (params->device_mem_ranges_count == 0) {
235 /*
236 * Map 1TB of address space as device memory to, most likely,
237 * make all devices available to the primary VM.
238 *
239 * TODO: remove this once all targets provide valid ranges.
240 */
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800241 dlog_warning(
242 "Device memory not provided, defaulting to 1 TB.\n");
Andrew Scull48929fd2020-01-28 10:39:10 +0000243
244 if (!vm_identity_map(
245 vm_locked, pa_init(0),
246 pa_init(UINT64_C(1024) * 1024 * 1024 * 1024),
247 MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool, NULL)) {
248 dlog_error(
249 "Unable to initialise address space for "
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800250 "primary VM.\n");
Andrew Scull48929fd2020-01-28 10:39:10 +0000251 ret = false;
252 goto out;
253 }
David Brazdile6f83222019-09-23 14:47:37 +0100254 }
255
Andrew Scullb5f49e02019-10-02 13:20:47 +0100256 /* Map normal memory as such to permit caching, execution, etc. */
257 for (i = 0; i < params->mem_ranges_count; ++i) {
Andrew Scull3c257452019-11-26 13:32:50 +0000258 if (!vm_identity_map(vm_locked, params->mem_ranges[i].begin,
259 params->mem_ranges[i].end,
260 MM_MODE_R | MM_MODE_W | MM_MODE_X, ppool,
261 NULL)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000262 dlog_error(
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800263 "Unable to initialise memory for primary "
264 "VM.\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000265 ret = false;
266 goto out;
Andrew Scullb5f49e02019-10-02 13:20:47 +0100267 }
268 }
269
Andrew Scull48929fd2020-01-28 10:39:10 +0000270 /* Map device memory as such to prevent execution, speculation etc. */
271 for (i = 0; i < params->device_mem_ranges_count; ++i) {
272 if (!vm_identity_map(
273 vm_locked, params->device_mem_ranges[i].begin,
274 params->device_mem_ranges[i].end,
275 MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool, NULL)) {
276 dlog("Unable to initialise device memory for primary "
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800277 "VM.\n");
Andrew Scull48929fd2020-01-28 10:39:10 +0000278 ret = false;
279 goto out;
280 }
281 }
282
Manish Pandeyd34f8892020-06-19 17:41:07 +0100283 if (!load_common(stage1_locked, vm_locked, manifest_vm, ppool)) {
284 ret = false;
285 goto out;
286 }
287
Andrew Scull3c257452019-11-26 13:32:50 +0000288 if (!vm_unmap_hypervisor(vm_locked, ppool)) {
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800289 dlog_error("Unable to unmap hypervisor from primary VM.\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000290 ret = false;
291 goto out;
David Brazdile6f83222019-09-23 14:47:37 +0100292 }
293
Andrew Scullb1a6d0d2020-01-29 11:25:12 +0000294 if (!plat_iommu_unmap_iommus(vm_locked, ppool)) {
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800295 dlog_error("Unable to unmap IOMMUs from primary VM.\n");
Andrew Scullb1a6d0d2020-01-29 11:25:12 +0000296 ret = false;
297 goto out;
298 }
299
Andrew Walbran7586e042020-02-18 18:19:26 +0000300 dlog_info("Loaded primary VM with %u vCPUs, entry at %#x.\n",
301 vm->vcpu_count, pa_addr(primary_begin));
302
Olivier Deprezb9adff42021-02-01 12:14:05 +0100303 /* Mark the primary to be the first booted VM */
304 vm_update_boot(vm);
305
David Brazdile6f83222019-09-23 14:47:37 +0100306 vcpu_locked = vcpu_lock(vm_get_vcpu(vm, 0));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100307 vcpu_on(vcpu_locked, primary_entry, params->kernel_arg);
David Brazdile6f83222019-09-23 14:47:37 +0100308 vcpu_unlock(&vcpu_locked);
Andrew Scull3c257452019-11-26 13:32:50 +0000309 ret = true;
David Brazdile6f83222019-09-23 14:47:37 +0100310
Andrew Scull3c257452019-11-26 13:32:50 +0000311out:
312 vm_unlock(&vm_locked);
313
314 return ret;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100315}
316
Fuad Tabba50469e02020-06-30 15:14:28 +0100317/**
318 * Loads the secondary VM's FDT.
319 * Stores the total allocated size for the FDT in fdt_allocated_size (if
320 * fdt_allocated_size is not NULL). The allocated size includes additional space
321 * for potential patching.
322 */
323static bool load_secondary_fdt(struct mm_stage1_locked stage1_locked,
324 paddr_t end, size_t fdt_max_size,
325 const struct manifest_vm *manifest_vm,
326 const struct memiter *cpio, struct mpool *ppool,
327 paddr_t *fdt_addr, size_t *fdt_allocated_size)
328{
329 struct memiter fdt;
330 size_t allocated_size;
331
332 CHECK(!string_is_empty(&manifest_vm->secondary.fdt_filename));
333
334 if (!cpio_get_file(cpio, &manifest_vm->secondary.fdt_filename, &fdt)) {
335 dlog_error("Cannot open the secondary VM's FDT.\n");
336 return false;
337 }
338
339 /*
340 * Ensure the FDT has one additional page at the end for patching, and
341 * and align it to the page boundary.
342 */
343 allocated_size = align_up(memiter_size(&fdt), PAGE_SIZE) + PAGE_SIZE;
344
345 if (allocated_size > fdt_max_size) {
346 dlog_error(
347 "FDT allocated space (%u) is more than the specified "
348 "maximum to use (%u).\n",
349 allocated_size, fdt_max_size);
350 return false;
351 }
352
353 /* Load the FDT to the end of the VM's allocated memory space. */
354 *fdt_addr = pa_init(pa_addr(pa_sub(end, allocated_size)));
355
356 dlog_info("Loading secondary FDT of allocated size %u at 0x%x.\n",
357 allocated_size, pa_addr(*fdt_addr));
358
359 if (!copy_to_unmapped(stage1_locked, *fdt_addr, &fdt, ppool)) {
360 dlog_error("Unable to copy FDT.\n");
361 return false;
362 }
363
364 if (fdt_allocated_size) {
365 *fdt_allocated_size = allocated_size;
366 }
367
368 return true;
369}
370
Andrew Scull72b43c02019-09-18 13:53:45 +0100371/*
372 * Loads a secondary VM.
373 */
374static bool load_secondary(struct mm_stage1_locked stage1_locked,
Manish Pandey2145c212020-05-01 16:04:22 +0100375 struct vm_locked primary_vm_locked,
Andrew Scull72b43c02019-09-18 13:53:45 +0100376 paddr_t mem_begin, paddr_t mem_end,
377 const struct manifest_vm *manifest_vm,
378 const struct memiter *cpio, struct mpool *ppool)
379{
380 struct vm *vm;
Andrew Scull3c257452019-11-26 13:32:50 +0000381 struct vm_locked vm_locked;
Max Shvetsov40108e72020-08-27 12:39:50 +0100382 struct vcpu_locked vcpu_locked;
Andrew Scull72b43c02019-09-18 13:53:45 +0100383 struct vcpu *vcpu;
384 ipaddr_t secondary_entry;
Andrew Scull3c257452019-11-26 13:32:50 +0000385 bool ret;
Fuad Tabba50469e02020-06-30 15:14:28 +0100386 paddr_t fdt_addr;
387 bool has_fdt;
388 size_t kernel_size = 0;
389 const size_t mem_size = pa_difference(mem_begin, mem_end);
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800390 uint32_t map_mode;
Andrew Scull72b43c02019-09-18 13:53:45 +0100391
Olivier Deprez62d99e32020-01-09 15:58:07 +0100392 /*
393 * Load the kernel if a filename is specified in the VM manifest.
394 * For an FF-A partition, kernel_filename is undefined indicating
395 * the partition package has already been loaded prior to Hafnium
396 * booting.
397 */
398 if (!string_is_empty(&manifest_vm->kernel_filename)) {
399 if (!load_kernel(stage1_locked, mem_begin, mem_end, manifest_vm,
Fuad Tabba50469e02020-06-30 15:14:28 +0100400 cpio, ppool, &kernel_size)) {
Olivier Deprez62d99e32020-01-09 15:58:07 +0100401 dlog_error("Unable to load kernel.\n");
402 return false;
403 }
Andrew Scull72b43c02019-09-18 13:53:45 +0100404 }
405
Fuad Tabba50469e02020-06-30 15:14:28 +0100406 has_fdt = !string_is_empty(&manifest_vm->secondary.fdt_filename);
407 if (has_fdt) {
408 /*
409 * Ensure that the FDT does not overwrite the kernel or overlap
410 * its page, for the FDT to start at a page boundary.
411 */
412 const size_t fdt_max_size =
413 mem_size - align_up(kernel_size, PAGE_SIZE);
414
415 size_t fdt_allocated_size;
416
417 if (!load_secondary_fdt(stage1_locked, mem_end, fdt_max_size,
418 manifest_vm, cpio, ppool, &fdt_addr,
419 &fdt_allocated_size)) {
420 dlog_error("Unable to load FDT.\n");
421 return false;
422 }
423
424 if (!fdt_patch_mem(stage1_locked, fdt_addr, fdt_allocated_size,
425 mem_begin, mem_end, ppool)) {
426 dlog_error("Unable to patch FDT.\n");
427 return false;
428 }
429 }
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800430 /*
431 * An S-EL0 partition must contain only 1 vCPU (UP migratable) per the
432 * FF-A 1.0 spec.
433 */
434 CHECK(manifest_vm->sp.run_time_el != S_EL0 ||
435 manifest_vm->secondary.vcpu_count == 1);
Fuad Tabba50469e02020-06-30 15:14:28 +0100436
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800437 if (!vm_init_next(manifest_vm->secondary.vcpu_count, ppool, &vm,
438 (manifest_vm->sp.run_time_el == S_EL0))) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000439 dlog_error("Unable to initialise VM.\n");
Andrew Scull72b43c02019-09-18 13:53:45 +0100440 return false;
441 }
442
Andrew Scull3c257452019-11-26 13:32:50 +0000443 vm_locked = vm_lock(vm);
444
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800445 /*
446 * Grant the VM access to the memory. TODO: For S-EL0 partitions,
447 * mapping all of its memory as RWX is bad from a security standpoint.
448 * Should just skip this and expect this to be present in the memory
449 * regions?
450 */
451 map_mode = MM_MODE_R | MM_MODE_W | MM_MODE_X;
452 if (vm->el0_partition) {
453 map_mode |= MM_MODE_USER | MM_MODE_NG;
454 }
455 if (!vm_identity_map(vm_locked, mem_begin, mem_end, map_mode, ppool,
Andrew Scull3c257452019-11-26 13:32:50 +0000456 &secondary_entry)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000457 dlog_error("Unable to initialise memory.\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000458 ret = false;
459 goto out;
Andrew Scull72b43c02019-09-18 13:53:45 +0100460 }
461
Olivier Deprez62d99e32020-01-09 15:58:07 +0100462 if (manifest_vm->is_ffa_partition) {
Manish Pandey2145c212020-05-01 16:04:22 +0100463 int j = 0;
464 paddr_t region_begin;
465 paddr_t region_end;
466 paddr_t alloc_base = mem_end;
467 size_t size;
468 size_t total_alloc = 0;
469
470 /* Map memory-regions */
471 while (j < manifest_vm->sp.mem_region_count) {
472 size = manifest_vm->sp.mem_regions[j].page_count *
473 PAGE_SIZE;
474 /*
475 * For memory-regions without base-address, memory
476 * should be allocated inside partition's page table.
477 * Start allocating memory regions in partition's
478 * page table, starting from the end.
479 * TODO: Add mechanism to let partition know of these
480 * memory regions
481 */
482 if (manifest_vm->sp.mem_regions[j].base_address ==
483 MANIFEST_INVALID_ADDRESS) {
484 total_alloc += size;
485 /* Don't go beyond half the VM's memory space */
486 if (total_alloc >
487 (manifest_vm->secondary.mem_size / 2)) {
488 dlog_error(
489 "Not enough space for memory-"
490 "region allocation");
491 ret = false;
492 goto out;
493 }
494
495 region_end = alloc_base;
496 region_begin = pa_subtract(alloc_base, size);
497 alloc_base = region_begin;
498
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800499 map_mode = manifest_vm->sp.mem_regions[j]
500 .attributes;
501 if (vm->el0_partition) {
502 map_mode |= MM_MODE_USER | MM_MODE_NG;
503 }
504
505 if (!vm_identity_map(vm_locked, region_begin,
506 region_end, map_mode,
507 ppool, NULL)) {
Manish Pandey2145c212020-05-01 16:04:22 +0100508 dlog_error(
509 "Unable to map secondary VM "
510 "memory-region.\n");
511 ret = false;
512 goto out;
513 }
514
515 dlog_info(
516 " Memory region %#x - %#x allocated\n",
517 region_begin, region_end);
518 } else {
519 /*
520 * Identity map memory region for both case,
521 * VA(S-EL0) or IPA(S-EL1).
522 */
523 region_begin =
524 pa_init(manifest_vm->sp.mem_regions[j]
525 .base_address);
526 region_end = pa_add(region_begin, size);
527
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800528 map_mode = manifest_vm->sp.mem_regions[j]
529 .attributes;
530 if (vm->el0_partition) {
531 map_mode |= MM_MODE_USER | MM_MODE_NG;
532 }
533
534 if (!vm_identity_map(vm_locked, region_begin,
535 region_end, map_mode,
536 ppool, NULL)) {
Manish Pandey2145c212020-05-01 16:04:22 +0100537 dlog_error(
538 "Unable to map secondary VM "
539 "memory-region.\n");
540 ret = false;
541 goto out;
542 }
543 }
544
545 /* Deny the primary VM access to this memory */
546 if (!vm_unmap(primary_vm_locked, region_begin,
547 region_end, ppool)) {
548 dlog_error(
549 "Unable to unmap secondary VM memory-"
550 "region from primary VM.\n");
551 ret = false;
552 goto out;
553 }
554
555 j++;
556 }
557
558 /* Map device-regions */
559 j = 0;
560 while (j < manifest_vm->sp.dev_region_count) {
561 region_begin = pa_init(
562 manifest_vm->sp.dev_regions[j].base_address);
563 size = manifest_vm->sp.dev_regions[j].page_count *
564 PAGE_SIZE;
565 region_end = pa_add(region_begin, size);
566
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800567 map_mode = manifest_vm->sp.dev_regions[j].attributes;
568 if (vm->el0_partition) {
569 map_mode |= MM_MODE_USER | MM_MODE_NG;
570 }
571
572 if (!vm_identity_map(vm_locked, region_begin,
573 region_end, map_mode, ppool,
574 NULL)) {
Manish Pandey2145c212020-05-01 16:04:22 +0100575 dlog_error(
576 "Unable to map secondary VM "
577 "device-region.\n");
578 ret = false;
579 goto out;
580 }
581 /* Deny primary VM access to this region */
582 if (!vm_unmap(primary_vm_locked, region_begin,
583 region_end, ppool)) {
584 dlog_error(
585 "Unable to unmap secondary VM device-"
586 "region from primary VM.\n");
587 ret = false;
588 goto out;
589 }
590 j++;
591 }
592
Olivier Deprez62d99e32020-01-09 15:58:07 +0100593 secondary_entry =
594 ipa_add(secondary_entry, manifest_vm->sp.ep_offset);
595 }
596
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800597 /*
598 * Map hypervisor into the VM's page table. The hypervisor pages will
599 * not be accessible from EL0 since it will not be marked for user
600 * access.
601 * TODO: Map only the exception vectors and data that exception vectors
602 * require and not the entire hypervisor. This helps with speculative
603 * side-channel attacks.
604 */
605 if (vm->el0_partition) {
606 CHECK(vm_identity_map(vm_locked, layout_text_begin(),
607 layout_text_end(), MM_MODE_X, ppool,
608 NULL));
609
610 CHECK(vm_identity_map(vm_locked, layout_rodata_begin(),
611 layout_rodata_end(), MM_MODE_R, ppool,
612 NULL));
613
614 CHECK(vm_identity_map(vm_locked, layout_data_begin(),
615 layout_data_end(), MM_MODE_R | MM_MODE_W,
616 ppool, NULL));
617 plat_console_mm_init(mm_lock_ptable_unsafe(&vm->ptable), ppool);
618 }
619
Manish Pandeyd34f8892020-06-19 17:41:07 +0100620 if (!load_common(stage1_locked, vm_locked, manifest_vm, ppool)) {
621 ret = false;
622 goto out;
623 }
624
Manish Pandey2145c212020-05-01 16:04:22 +0100625 dlog_info("Loaded with %u vCPUs, entry at %#x.\n",
626 manifest_vm->secondary.vcpu_count, pa_addr(mem_begin));
627
Andrew Scull72b43c02019-09-18 13:53:45 +0100628 vcpu = vm_get_vcpu(vm, 0);
Fuad Tabba50469e02020-06-30 15:14:28 +0100629
Max Shvetsov40108e72020-08-27 12:39:50 +0100630 vcpu_locked = vcpu_lock(vcpu);
Fuad Tabba50469e02020-06-30 15:14:28 +0100631 if (has_fdt) {
Max Shvetsov40108e72020-08-27 12:39:50 +0100632 vcpu_secondary_reset_and_start(vcpu_locked, secondary_entry,
Fuad Tabba50469e02020-06-30 15:14:28 +0100633 pa_addr(fdt_addr));
634 } else {
635 /*
636 * Without an FDT, secondary VMs expect the memory size to be
637 * passed in register x0, which is what
638 * vcpu_secondary_reset_and_start does in this case.
639 */
Max Shvetsov40108e72020-08-27 12:39:50 +0100640 vcpu_secondary_reset_and_start(vcpu_locked, secondary_entry,
641 mem_size);
Fuad Tabba50469e02020-06-30 15:14:28 +0100642 }
643
Max Shvetsov40108e72020-08-27 12:39:50 +0100644 vcpu_unlock(&vcpu_locked);
645
Andrew Scull3c257452019-11-26 13:32:50 +0000646 ret = true;
Andrew Scull72b43c02019-09-18 13:53:45 +0100647
Andrew Scull3c257452019-11-26 13:32:50 +0000648out:
649 vm_unlock(&vm_locked);
650
651 return ret;
Andrew Scull72b43c02019-09-18 13:53:45 +0100652}
653
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100654/**
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100655 * Try to find a memory range of the given size within the given ranges, and
656 * remove it from them. Return true on success, or false if no large enough
657 * contiguous range is found.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100658 */
Hong-Seok Kim09648362019-05-23 15:47:11 +0900659static bool carve_out_mem_range(struct mem_range *mem_ranges,
660 size_t mem_ranges_count, uint64_t size_to_find,
661 paddr_t *found_begin, paddr_t *found_end)
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100662{
663 size_t i;
664
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000665 /*
666 * TODO(b/116191358): Consider being cleverer about how we pack VMs
667 * together, with a non-greedy algorithm.
668 */
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100669 for (i = 0; i < mem_ranges_count; ++i) {
670 if (size_to_find <=
Andrew Walbran2cb43392019-04-17 12:52:45 +0100671 pa_difference(mem_ranges[i].begin, mem_ranges[i].end)) {
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +0100672 /*
673 * This range is big enough, take some of it from the
674 * end and reduce its size accordingly.
675 */
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100676 *found_end = mem_ranges[i].end;
677 *found_begin = pa_init(pa_addr(mem_ranges[i].end) -
678 size_to_find);
679 mem_ranges[i].end = *found_begin;
680 return true;
681 }
682 }
683 return false;
684}
685
686/**
687 * Given arrays of memory ranges before and after memory was removed for
688 * secondary VMs, add the difference to the reserved ranges of the given update.
689 * Return true on success, or false if there would be more than MAX_MEM_RANGES
690 * reserved ranges after adding the new ones.
691 * `before` and `after` must be arrays of exactly `mem_ranges_count` elements.
692 */
Hong-Seok Kim09648362019-05-23 15:47:11 +0900693static bool update_reserved_ranges(struct boot_params_update *update,
694 const struct mem_range *before,
695 const struct mem_range *after,
696 size_t mem_ranges_count)
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100697{
698 size_t i;
699
700 for (i = 0; i < mem_ranges_count; ++i) {
701 if (pa_addr(after[i].begin) > pa_addr(before[i].begin)) {
702 if (update->reserved_ranges_count >= MAX_MEM_RANGES) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000703 dlog_error(
704 "Too many reserved ranges after "
705 "loading secondary VMs.\n");
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100706 return false;
707 }
708 update->reserved_ranges[update->reserved_ranges_count]
709 .begin = before[i].begin;
710 update->reserved_ranges[update->reserved_ranges_count]
711 .end = after[i].begin;
712 update->reserved_ranges_count++;
713 }
714 if (pa_addr(after[i].end) < pa_addr(before[i].end)) {
715 if (update->reserved_ranges_count >= MAX_MEM_RANGES) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000716 dlog_error(
717 "Too many reserved ranges after "
718 "loading secondary VMs.\n");
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100719 return false;
720 }
721 update->reserved_ranges[update->reserved_ranges_count]
722 .begin = after[i].end;
723 update->reserved_ranges[update->reserved_ranges_count]
724 .end = before[i].end;
725 update->reserved_ranges_count++;
726 }
727 }
728
729 return true;
730}
731
Olivier Deprez112d2b52020-09-30 07:39:23 +0200732static bool init_other_world_vm(struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100733{
Olivier Deprez96a2a262020-06-11 17:21:38 +0200734 struct vm *other_world_vm;
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100735 size_t i;
Andrew Scull72b43c02019-09-18 13:53:45 +0100736
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100737 /*
Olivier Deprez96a2a262020-06-11 17:21:38 +0200738 * Initialise the dummy VM which represents the opposite world:
739 * -TrustZone (or the SPMC) when running the Hypervisor
740 * -the Hypervisor when running TZ/SPMC
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100741 */
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800742 other_world_vm = vm_init(HF_OTHER_WORLD_ID, MAX_CPUS, ppool, false);
Olivier Deprez96a2a262020-06-11 17:21:38 +0200743 CHECK(other_world_vm != NULL);
744
745 for (i = 0; i < MAX_CPUS; i++) {
746 struct vcpu *vcpu = vm_get_vcpu(other_world_vm, i);
747 struct cpu *cpu = cpu_find_index(i);
748
749 vcpu->cpu = cpu;
750 }
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100751
Olivier Deprez112d2b52020-09-30 07:39:23 +0200752 return arch_other_world_vm_init(other_world_vm, ppool);
753}
754
755/*
756 * Loads alls VMs from the manifest.
757 */
758bool load_vms(struct mm_stage1_locked stage1_locked,
759 const struct manifest *manifest, const struct memiter *cpio,
760 const struct boot_params *params,
761 struct boot_params_update *update, struct mpool *ppool)
762{
763 struct vm *primary;
764 struct mem_range mem_ranges_available[MAX_MEM_RANGES];
765 struct vm_locked primary_vm_locked;
766 size_t i;
767 bool success = true;
768
Andrew Walbranf8716782020-10-29 17:07:07 +0000769 /**
770 * Only try to load the primary VM if it is supposed to be in this
771 * world.
772 */
773 if (vm_id_is_current_world(HF_PRIMARY_VM_ID)) {
774 if (!load_primary(stage1_locked,
775 &manifest->vm[HF_PRIMARY_VM_INDEX], cpio,
776 params, ppool)) {
777 dlog_error("Unable to load primary VM.\n");
778 return false;
779 }
Olivier Deprez112d2b52020-09-30 07:39:23 +0200780 }
781
782 if (!init_other_world_vm(ppool)) {
783 return false;
784 }
785
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100786 static_assert(
787 sizeof(mem_ranges_available) == sizeof(params->mem_ranges),
788 "mem_range arrays must be the same size for memcpy.");
789 static_assert(sizeof(mem_ranges_available) < 500,
790 "This will use too much stack, either make "
791 "MAX_MEM_RANGES smaller or change this.");
Andrew Sculla1aa2ba2019-04-05 11:49:02 +0100792 memcpy_s(mem_ranges_available, sizeof(mem_ranges_available),
793 params->mem_ranges, sizeof(params->mem_ranges));
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100794
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100795 /* Round the last addresses down to the page size. */
796 for (i = 0; i < params->mem_ranges_count; ++i) {
Alfredo Mazzinghieb1997c2019-02-07 18:00:01 +0000797 mem_ranges_available[i].end = pa_init(align_down(
798 pa_addr(mem_ranges_available[i].end), PAGE_SIZE));
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100799 }
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100800
Andrew Scull3c257452019-11-26 13:32:50 +0000801 primary = vm_find(HF_PRIMARY_VM_ID);
802 primary_vm_locked = vm_lock(primary);
803
David Brazdil0251b942019-09-10 15:59:50 +0100804 for (i = 0; i < manifest->vm_count; ++i) {
David Brazdil0dbb41f2019-09-09 18:03:35 +0100805 const struct manifest_vm *manifest_vm = &manifest->vm[i];
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100806 ffa_vm_id_t vm_id = HF_VM_ID_OFFSET + i;
David Brazdil7a462ec2019-08-15 12:27:47 +0100807 uint64_t mem_size;
Andrew Scull80871322018-08-06 12:04:09 +0100808 paddr_t secondary_mem_begin;
809 paddr_t secondary_mem_end;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100810
David Brazdil7a462ec2019-08-15 12:27:47 +0100811 if (vm_id == HF_PRIMARY_VM_ID) {
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100812 continue;
813 }
814
Olivier Deprez2a8ee342020-08-03 15:10:44 +0200815 dlog_info("Loading VM id %#x: %s.\n", vm_id,
Andrew Walbran17eebf92020-02-05 16:35:49 +0000816 manifest_vm->debug_name);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100817
David Brazdil7a462ec2019-08-15 12:27:47 +0100818 mem_size = align_up(manifest_vm->secondary.mem_size, PAGE_SIZE);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100819
820 if (manifest_vm->is_ffa_partition) {
821 secondary_mem_begin =
822 pa_init(manifest_vm->sp.load_addr);
823 secondary_mem_end =
824 pa_init(manifest_vm->sp.load_addr + mem_size);
825 } else if (!carve_out_mem_range(mem_ranges_available,
826 params->mem_ranges_count,
827 mem_size, &secondary_mem_begin,
828 &secondary_mem_end)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000829 dlog_error("Not enough memory (%u bytes).\n", mem_size);
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100830 continue;
831 }
Andrew Scull80871322018-08-06 12:04:09 +0100832
Manish Pandey2145c212020-05-01 16:04:22 +0100833 if (!load_secondary(stage1_locked, primary_vm_locked,
834 secondary_mem_begin, secondary_mem_end,
835 manifest_vm, cpio, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000836 dlog_error("Unable to load VM.\n");
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100837 continue;
838 }
839
840 /* Deny the primary VM access to this memory. */
Andrew Scull3c257452019-11-26 13:32:50 +0000841 if (!vm_unmap(primary_vm_locked, secondary_mem_begin,
842 secondary_mem_end, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000843 dlog_error(
844 "Unable to unmap secondary VM from primary "
845 "VM.\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000846 success = false;
847 break;
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100848 }
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100849 }
850
Andrew Scull3c257452019-11-26 13:32:50 +0000851 vm_unlock(&primary_vm_locked);
852
853 if (!success) {
854 return false;
855 }
856
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +0100857 /*
858 * Add newly reserved areas to update params by looking at the
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100859 * difference between the available ranges from the original params and
860 * the updated mem_ranges_available. We assume that the number and order
861 * of available ranges is the same, i.e. we don't remove any ranges
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +0100862 * above only make them smaller.
863 */
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100864 return update_reserved_ranges(update, params->mem_ranges,
865 mem_ranges_available,
866 params->mem_ranges_count);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100867}