blob: 0db27c52b174ad8111dad7f92681fe6bca4b2597 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/load.h"
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010010
11#include <stdbool.h>
12
Olivier Deprez112d2b52020-09-30 07:39:23 +020013#include "hf/arch/other_world.h"
J-Alvesa9c7cba2021-08-25 16:26:11 +010014#include "hf/arch/plat/ffa.h"
Fuad Tabba77a4b012019-11-15 12:13:08 +000015#include "hf/arch/vm.h"
16
Andrew Scull18c78fc2018-08-20 12:57:41 +010017#include "hf/api.h"
Andrew Walbran34ce72e2018-09-13 16:47:44 +010018#include "hf/boot_params.h"
Andrew Scull72b43c02019-09-18 13:53:45 +010019#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010020#include "hf/dlog.h"
Fuad Tabba50469e02020-06-30 15:14:28 +010021#include "hf/fdt_patch.h"
Andrew Scull5991ec92018-10-08 14:55:02 +010022#include "hf/layout.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010023#include "hf/memiter.h"
24#include "hf/mm.h"
Andrew Walbran48699362019-05-20 14:38:00 +010025#include "hf/plat/console.h"
Andrew Scullb1a6d0d2020-01-29 11:25:12 +000026#include "hf/plat/iommu.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010027#include "hf/static_assert.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010028#include "hf/std.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010029#include "hf/vm.h"
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010030
Andrew Scull19503262018-09-20 14:48:39 +010031#include "vmapi/hf/call.h"
Manish Pandeyd34f8892020-06-19 17:41:07 +010032#include "vmapi/hf/ffa.h"
Andrew Scull19503262018-09-20 14:48:39 +010033
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010034/**
35 * Copies data to an unmapped location by mapping it for write, copying the
36 * data, then unmapping it.
Andrew Sculld9225b32018-11-19 16:12:41 +000037 *
38 * The data is written so that it is available to all cores with the cache
39 * disabled. When switching to the partitions, the caching is initially disabled
40 * so the data must be available without the cache.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010041 */
Andrew Scull3c0a90a2019-07-01 11:55:53 +010042static bool copy_to_unmapped(struct mm_stage1_locked stage1_locked, paddr_t to,
David Brazdil7a462ec2019-08-15 12:27:47 +010043 struct memiter *from_it, struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010044{
David Brazdil7a462ec2019-08-15 12:27:47 +010045 const void *from = memiter_base(from_it);
46 size_t size = memiter_size(from_it);
Andrew Scull80871322018-08-06 12:04:09 +010047 paddr_t to_end = pa_add(to, size);
48 void *ptr;
Andrew Scull265ada92018-07-30 15:19:01 +010049
Andrew Scull3c0a90a2019-07-01 11:55:53 +010050 ptr = mm_identity_map(stage1_locked, to, to_end, MM_MODE_W, ppool);
Andrew Scull80871322018-08-06 12:04:09 +010051 if (!ptr) {
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010052 return false;
53 }
54
Andrew Sculla1aa2ba2019-04-05 11:49:02 +010055 memcpy_s(ptr, size, from, size);
Andrew Scullc059fbe2019-09-12 12:58:40 +010056 arch_mm_flush_dcache(ptr, size);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010057
Andrew Scull72b43c02019-09-18 13:53:45 +010058 CHECK(mm_unmap(stage1_locked, to, to_end, ppool));
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010059
60 return true;
61}
62
Fuad Tabba50469e02020-06-30 15:14:28 +010063/**
64 * Loads the secondary VM's kernel.
65 * Stores the kernel size in kernel_size (if kernel_size is not NULL).
66 * Returns false if it cannot load the kernel.
67 */
Andrew Scull72b43c02019-09-18 13:53:45 +010068static bool load_kernel(struct mm_stage1_locked stage1_locked, paddr_t begin,
69 paddr_t end, const struct manifest_vm *manifest_vm,
Fuad Tabba50469e02020-06-30 15:14:28 +010070 const struct memiter *cpio, struct mpool *ppool,
71 size_t *kernel_size)
Andrew Scull72b43c02019-09-18 13:53:45 +010072{
Andrew Scull72b43c02019-09-18 13:53:45 +010073 struct memiter kernel;
Fuad Tabba50469e02020-06-30 15:14:28 +010074 size_t size;
Andrew Scull72b43c02019-09-18 13:53:45 +010075
David Brazdil136f2942019-09-23 14:11:03 +010076 if (!cpio_get_file(cpio, &manifest_vm->kernel_filename, &kernel)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +000077 dlog_error("Could not find kernel file \"%s\".\n",
78 string_data(&manifest_vm->kernel_filename));
Andrew Scull72b43c02019-09-18 13:53:45 +010079 return false;
80 }
81
Fuad Tabba50469e02020-06-30 15:14:28 +010082 size = memiter_size(&kernel);
83 if (pa_difference(begin, end) < size) {
Andrew Walbran17eebf92020-02-05 16:35:49 +000084 dlog_error("Kernel is larger than available memory.\n");
Andrew Scull72b43c02019-09-18 13:53:45 +010085 return false;
86 }
87
88 if (!copy_to_unmapped(stage1_locked, begin, &kernel, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +000089 dlog_error("Unable to copy kernel.\n");
Andrew Scull72b43c02019-09-18 13:53:45 +010090 return false;
91 }
92
Fuad Tabba50469e02020-06-30 15:14:28 +010093 if (kernel_size) {
94 *kernel_size = size;
95 }
96
Andrew Scull72b43c02019-09-18 13:53:45 +010097 return true;
98}
99
Manish Pandeyd34f8892020-06-19 17:41:07 +0100100/*
101 * Link RX/TX buffers provided in partition manifest to mailbox
102 */
103static bool link_rxtx_to_mailbox(struct mm_stage1_locked stage1_locked,
104 struct vm_locked vm_locked, struct rx_tx rxtx,
105 struct mpool *ppool)
106{
107 struct ffa_value ret;
108 ipaddr_t send;
109 ipaddr_t recv;
110 uint32_t page_count;
111
112 send = ipa_init(rxtx.tx_buffer->base_address);
113 recv = ipa_init(rxtx.rx_buffer->base_address);
114 page_count = rxtx.tx_buffer->page_count;
115
116 ret = api_vm_configure_pages(stage1_locked, vm_locked, send, recv,
117 page_count, ppool);
118 if (ret.func != FFA_SUCCESS_32) {
119 return false;
120 }
121
122 dlog_verbose(" mailbox: send = %#x, recv = %#x\n",
123 vm_locked.vm->mailbox.send, vm_locked.vm->mailbox.recv);
124
125 return true;
126}
127
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100128/**
Andrew Scullae9962e2019-10-03 16:51:16 +0100129 * Performs VM loading activities that are common between the primary and
130 * secondaries.
131 */
Manish Pandeyd34f8892020-06-19 17:41:07 +0100132static bool load_common(struct mm_stage1_locked stage1_locked,
133 struct vm_locked vm_locked,
134 const struct manifest_vm *manifest_vm,
135 struct mpool *ppool)
Andrew Scullae9962e2019-10-03 16:51:16 +0100136{
Manish Pandeyd34f8892020-06-19 17:41:07 +0100137 vm_locked.vm->smc_whitelist = manifest_vm->smc_whitelist;
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700138 vm_locked.vm->uuid = manifest_vm->partition.uuid;
Andrew Scullae9962e2019-10-03 16:51:16 +0100139
Manish Pandeyd34f8892020-06-19 17:41:07 +0100140 if (manifest_vm->is_ffa_partition) {
J-Alvesa9c7cba2021-08-25 16:26:11 +0100141 struct ffa_value bitmap_create_res;
142
Manish Pandeyd34f8892020-06-19 17:41:07 +0100143 /* Link rxtx buffers to mailbox */
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700144 if (manifest_vm->partition.rxtx.available) {
Manish Pandeyd34f8892020-06-19 17:41:07 +0100145 if (!link_rxtx_to_mailbox(stage1_locked, vm_locked,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700146 manifest_vm->partition.rxtx,
Manish Pandeyd34f8892020-06-19 17:41:07 +0100147 ppool)) {
148 dlog_error(
149 "Unable to Link RX/TX buffer with "
150 "mailbox.\n");
151 return false;
152 }
153 }
J-Alvesb37fd082020-10-22 12:29:21 +0100154
Maksims Svecovsb596eab2021-04-27 00:52:27 +0100155 vm_locked.vm->messaging_method =
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700156 manifest_vm->partition.messaging_method;
Manish Pandeyf3be6392020-09-24 17:26:09 +0100157
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700158 vm_locked.vm->managed_exit =
159 manifest_vm->partition.managed_exit;
Maksims Svecovs9ddf86a2021-05-06 17:17:21 +0100160
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700161 vm_locked.vm->boot_order = manifest_vm->partition.boot_order;
162
J-Alvesb37fd082020-10-22 12:29:21 +0100163 /* Updating boot list according to boot_order */
164 vm_update_boot(vm_locked.vm);
J-Alvesa0f317d2021-06-09 13:31:59 +0100165
166 /* TODO: Enable in accordance to VM's manifest. */
167 vm_locked.vm->notifications.enabled = true;
J-Alvesa9c7cba2021-08-25 16:26:11 +0100168
169 /* TODO: check if notifications is enabled for the given vm */
170 if (plat_ffa_notifications_bitmap_create_call(
171 vm_locked.vm->id, vm_locked.vm->vcpu_count,
172 &bitmap_create_res)) {
173 if (bitmap_create_res.func == FFA_ERROR_32) {
174 dlog_verbose(
175 "Failed to create notifications bitmap "
176 "to VM: %#x; error: %#x.\n",
177 vm_locked.vm->id,
178 ffa_error_code(bitmap_create_res));
179 return false;
180 }
181 }
Manish Pandeyd34f8892020-06-19 17:41:07 +0100182 }
J-Alvesb37fd082020-10-22 12:29:21 +0100183
Fuad Tabba56970712020-01-10 11:20:09 +0000184 /* Initialize architecture-specific features. */
Manish Pandeyd34f8892020-06-19 17:41:07 +0100185 arch_vm_features_set(vm_locked.vm);
Fuad Tabba77a4b012019-11-15 12:13:08 +0000186
Madhukar Pappireddy54680c72020-10-23 15:02:38 -0500187 if (!plat_iommu_attach_peripheral(stage1_locked, vm_locked, manifest_vm,
188 ppool)) {
189 dlog_error("Unable to attach upstream peripheral device\n");
190 return false;
191 }
192
Andrew Scullae9962e2019-10-03 16:51:16 +0100193 return true;
194}
195
196/**
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100197 * Loads the primary VM.
198 */
Andrew Scull72b43c02019-09-18 13:53:45 +0100199static bool load_primary(struct mm_stage1_locked stage1_locked,
Andrew Scullae9962e2019-10-03 16:51:16 +0100200 const struct manifest_vm *manifest_vm,
Andrew Scullb5f49e02019-10-02 13:20:47 +0100201 const struct memiter *cpio,
202 const struct boot_params *params, struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100203{
Olivier Deprez62d99e32020-01-09 15:58:07 +0100204 paddr_t primary_begin;
205 ipaddr_t primary_entry;
David Brazdile6f83222019-09-23 14:47:37 +0100206 struct vm *vm;
Andrew Scull3c257452019-11-26 13:32:50 +0000207 struct vm_locked vm_locked;
David Brazdile6f83222019-09-23 14:47:37 +0100208 struct vcpu_locked vcpu_locked;
Andrew Scullb5f49e02019-10-02 13:20:47 +0100209 size_t i;
Andrew Scull3c257452019-11-26 13:32:50 +0000210 bool ret;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100211
Olivier Deprez62d99e32020-01-09 15:58:07 +0100212 if (manifest_vm->is_ffa_partition) {
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700213 primary_begin = pa_init(manifest_vm->partition.load_addr);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100214 primary_entry = ipa_add(ipa_from_pa(primary_begin),
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700215 manifest_vm->partition.ep_offset);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100216 } else {
217 primary_begin =
218 (manifest_vm->primary.boot_address ==
219 MANIFEST_INVALID_ADDRESS)
220 ? layout_primary_begin()
221 : pa_init(manifest_vm->primary.boot_address);
222 primary_entry = ipa_from_pa(primary_begin);
223 }
224
David Brazdil080ee312020-02-25 15:30:30 -0800225 paddr_t primary_end = pa_add(primary_begin, RSIZE_MAX);
Andrew Scull72b43c02019-09-18 13:53:45 +0100226
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800227 /* Primary VM must be a VM */
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700228 CHECK(manifest_vm->partition.run_time_el == EL1);
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800229
Olivier Deprez62d99e32020-01-09 15:58:07 +0100230 /*
231 * Load the kernel if a filename is specified in the VM manifest.
232 * For an FF-A partition, kernel_filename is undefined indicating
233 * the partition package has already been loaded prior to Hafnium
234 * booting.
235 */
236 if (!string_is_empty(&manifest_vm->kernel_filename)) {
237 if (!load_kernel(stage1_locked, primary_begin, primary_end,
Fuad Tabba50469e02020-06-30 15:14:28 +0100238 manifest_vm, cpio, ppool, NULL)) {
Olivier Deprez62d99e32020-01-09 15:58:07 +0100239 dlog_error("Unable to load primary kernel.\n");
240 return false;
241 }
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100242 }
243
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800244 if (!vm_init_next(MAX_CPUS, ppool, &vm, false)) {
Andrew Walbran7586e042020-02-18 18:19:26 +0000245 dlog_error("Unable to initialise primary VM.\n");
David Brazdile6f83222019-09-23 14:47:37 +0100246 return false;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100247 }
248
David Brazdile6f83222019-09-23 14:47:37 +0100249 if (vm->id != HF_PRIMARY_VM_ID) {
Andrew Walbran7586e042020-02-18 18:19:26 +0000250 dlog_error("Primary VM was not given correct ID.\n");
David Brazdile6f83222019-09-23 14:47:37 +0100251 return false;
252 }
253
Andrew Scull3c257452019-11-26 13:32:50 +0000254 vm_locked = vm_lock(vm);
255
Andrew Scull48929fd2020-01-28 10:39:10 +0000256 if (params->device_mem_ranges_count == 0) {
257 /*
258 * Map 1TB of address space as device memory to, most likely,
259 * make all devices available to the primary VM.
260 *
261 * TODO: remove this once all targets provide valid ranges.
262 */
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800263 dlog_warning(
264 "Device memory not provided, defaulting to 1 TB.\n");
Andrew Scull48929fd2020-01-28 10:39:10 +0000265
266 if (!vm_identity_map(
267 vm_locked, pa_init(0),
268 pa_init(UINT64_C(1024) * 1024 * 1024 * 1024),
269 MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool, NULL)) {
270 dlog_error(
271 "Unable to initialise address space for "
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800272 "primary VM.\n");
Andrew Scull48929fd2020-01-28 10:39:10 +0000273 ret = false;
274 goto out;
275 }
David Brazdile6f83222019-09-23 14:47:37 +0100276 }
277
Andrew Scullb5f49e02019-10-02 13:20:47 +0100278 /* Map normal memory as such to permit caching, execution, etc. */
279 for (i = 0; i < params->mem_ranges_count; ++i) {
Andrew Scull3c257452019-11-26 13:32:50 +0000280 if (!vm_identity_map(vm_locked, params->mem_ranges[i].begin,
281 params->mem_ranges[i].end,
282 MM_MODE_R | MM_MODE_W | MM_MODE_X, ppool,
283 NULL)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000284 dlog_error(
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800285 "Unable to initialise memory for primary "
286 "VM.\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000287 ret = false;
288 goto out;
Andrew Scullb5f49e02019-10-02 13:20:47 +0100289 }
290 }
291
Andrew Scull48929fd2020-01-28 10:39:10 +0000292 /* Map device memory as such to prevent execution, speculation etc. */
293 for (i = 0; i < params->device_mem_ranges_count; ++i) {
294 if (!vm_identity_map(
295 vm_locked, params->device_mem_ranges[i].begin,
296 params->device_mem_ranges[i].end,
297 MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool, NULL)) {
298 dlog("Unable to initialise device memory for primary "
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800299 "VM.\n");
Andrew Scull48929fd2020-01-28 10:39:10 +0000300 ret = false;
301 goto out;
302 }
303 }
304
Manish Pandeyd34f8892020-06-19 17:41:07 +0100305 if (!load_common(stage1_locked, vm_locked, manifest_vm, ppool)) {
306 ret = false;
307 goto out;
308 }
309
Andrew Scull3c257452019-11-26 13:32:50 +0000310 if (!vm_unmap_hypervisor(vm_locked, ppool)) {
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800311 dlog_error("Unable to unmap hypervisor from primary VM.\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000312 ret = false;
313 goto out;
David Brazdile6f83222019-09-23 14:47:37 +0100314 }
315
Andrew Scullb1a6d0d2020-01-29 11:25:12 +0000316 if (!plat_iommu_unmap_iommus(vm_locked, ppool)) {
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800317 dlog_error("Unable to unmap IOMMUs from primary VM.\n");
Andrew Scullb1a6d0d2020-01-29 11:25:12 +0000318 ret = false;
319 goto out;
320 }
321
Andrew Walbran7586e042020-02-18 18:19:26 +0000322 dlog_info("Loaded primary VM with %u vCPUs, entry at %#x.\n",
323 vm->vcpu_count, pa_addr(primary_begin));
324
Olivier Deprezb9adff42021-02-01 12:14:05 +0100325 /* Mark the primary to be the first booted VM */
326 vm_update_boot(vm);
327
David Brazdile6f83222019-09-23 14:47:37 +0100328 vcpu_locked = vcpu_lock(vm_get_vcpu(vm, 0));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100329 vcpu_on(vcpu_locked, primary_entry, params->kernel_arg);
David Brazdile6f83222019-09-23 14:47:37 +0100330 vcpu_unlock(&vcpu_locked);
Andrew Scull3c257452019-11-26 13:32:50 +0000331 ret = true;
David Brazdile6f83222019-09-23 14:47:37 +0100332
Andrew Scull3c257452019-11-26 13:32:50 +0000333out:
334 vm_unlock(&vm_locked);
335
336 return ret;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100337}
338
Fuad Tabba50469e02020-06-30 15:14:28 +0100339/**
340 * Loads the secondary VM's FDT.
341 * Stores the total allocated size for the FDT in fdt_allocated_size (if
342 * fdt_allocated_size is not NULL). The allocated size includes additional space
343 * for potential patching.
344 */
345static bool load_secondary_fdt(struct mm_stage1_locked stage1_locked,
346 paddr_t end, size_t fdt_max_size,
347 const struct manifest_vm *manifest_vm,
348 const struct memiter *cpio, struct mpool *ppool,
349 paddr_t *fdt_addr, size_t *fdt_allocated_size)
350{
351 struct memiter fdt;
352 size_t allocated_size;
353
354 CHECK(!string_is_empty(&manifest_vm->secondary.fdt_filename));
355
356 if (!cpio_get_file(cpio, &manifest_vm->secondary.fdt_filename, &fdt)) {
357 dlog_error("Cannot open the secondary VM's FDT.\n");
358 return false;
359 }
360
361 /*
362 * Ensure the FDT has one additional page at the end for patching, and
363 * and align it to the page boundary.
364 */
365 allocated_size = align_up(memiter_size(&fdt), PAGE_SIZE) + PAGE_SIZE;
366
367 if (allocated_size > fdt_max_size) {
368 dlog_error(
369 "FDT allocated space (%u) is more than the specified "
370 "maximum to use (%u).\n",
371 allocated_size, fdt_max_size);
372 return false;
373 }
374
375 /* Load the FDT to the end of the VM's allocated memory space. */
376 *fdt_addr = pa_init(pa_addr(pa_sub(end, allocated_size)));
377
378 dlog_info("Loading secondary FDT of allocated size %u at 0x%x.\n",
379 allocated_size, pa_addr(*fdt_addr));
380
381 if (!copy_to_unmapped(stage1_locked, *fdt_addr, &fdt, ppool)) {
382 dlog_error("Unable to copy FDT.\n");
383 return false;
384 }
385
386 if (fdt_allocated_size) {
387 *fdt_allocated_size = allocated_size;
388 }
389
390 return true;
391}
392
Andrew Scull72b43c02019-09-18 13:53:45 +0100393/*
394 * Loads a secondary VM.
395 */
396static bool load_secondary(struct mm_stage1_locked stage1_locked,
Manish Pandey2145c212020-05-01 16:04:22 +0100397 struct vm_locked primary_vm_locked,
Andrew Scull72b43c02019-09-18 13:53:45 +0100398 paddr_t mem_begin, paddr_t mem_end,
399 const struct manifest_vm *manifest_vm,
400 const struct memiter *cpio, struct mpool *ppool)
401{
402 struct vm *vm;
Andrew Scull3c257452019-11-26 13:32:50 +0000403 struct vm_locked vm_locked;
Max Shvetsov40108e72020-08-27 12:39:50 +0100404 struct vcpu_locked vcpu_locked;
Andrew Scull72b43c02019-09-18 13:53:45 +0100405 struct vcpu *vcpu;
406 ipaddr_t secondary_entry;
Andrew Scull3c257452019-11-26 13:32:50 +0000407 bool ret;
Fuad Tabba50469e02020-06-30 15:14:28 +0100408 paddr_t fdt_addr;
409 bool has_fdt;
410 size_t kernel_size = 0;
411 const size_t mem_size = pa_difference(mem_begin, mem_end);
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800412 uint32_t map_mode;
Andrew Scull72b43c02019-09-18 13:53:45 +0100413
Olivier Deprez62d99e32020-01-09 15:58:07 +0100414 /*
415 * Load the kernel if a filename is specified in the VM manifest.
416 * For an FF-A partition, kernel_filename is undefined indicating
417 * the partition package has already been loaded prior to Hafnium
418 * booting.
419 */
420 if (!string_is_empty(&manifest_vm->kernel_filename)) {
421 if (!load_kernel(stage1_locked, mem_begin, mem_end, manifest_vm,
Fuad Tabba50469e02020-06-30 15:14:28 +0100422 cpio, ppool, &kernel_size)) {
Olivier Deprez62d99e32020-01-09 15:58:07 +0100423 dlog_error("Unable to load kernel.\n");
424 return false;
425 }
Andrew Scull72b43c02019-09-18 13:53:45 +0100426 }
427
Fuad Tabba50469e02020-06-30 15:14:28 +0100428 has_fdt = !string_is_empty(&manifest_vm->secondary.fdt_filename);
429 if (has_fdt) {
430 /*
431 * Ensure that the FDT does not overwrite the kernel or overlap
432 * its page, for the FDT to start at a page boundary.
433 */
434 const size_t fdt_max_size =
435 mem_size - align_up(kernel_size, PAGE_SIZE);
436
437 size_t fdt_allocated_size;
438
439 if (!load_secondary_fdt(stage1_locked, mem_end, fdt_max_size,
440 manifest_vm, cpio, ppool, &fdt_addr,
441 &fdt_allocated_size)) {
442 dlog_error("Unable to load FDT.\n");
443 return false;
444 }
445
446 if (!fdt_patch_mem(stage1_locked, fdt_addr, fdt_allocated_size,
447 mem_begin, mem_end, ppool)) {
448 dlog_error("Unable to patch FDT.\n");
449 return false;
450 }
451 }
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800452 /*
453 * An S-EL0 partition must contain only 1 vCPU (UP migratable) per the
454 * FF-A 1.0 spec.
455 */
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700456 CHECK(manifest_vm->partition.run_time_el != S_EL0 ||
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800457 manifest_vm->secondary.vcpu_count == 1);
Fuad Tabba50469e02020-06-30 15:14:28 +0100458
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800459 if (!vm_init_next(manifest_vm->secondary.vcpu_count, ppool, &vm,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700460 (manifest_vm->partition.run_time_el == S_EL0))) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000461 dlog_error("Unable to initialise VM.\n");
Andrew Scull72b43c02019-09-18 13:53:45 +0100462 return false;
463 }
464
Andrew Scull3c257452019-11-26 13:32:50 +0000465 vm_locked = vm_lock(vm);
466
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800467 /*
Raghu Krishnamurthy2b801692021-07-18 17:46:43 -0700468 * Grant the VM access to the memory. For VM's we mark all memory in
469 * stage-2 tables as RWX and the VM can control permissions using
470 * stage-1 translations. For S-EL0 partitions, hafnium maps the entire
471 * region of memory for the partition as RX. The partition is then
472 * expected to perform its owns relocations and call the FFA_MEM_PERM_*
473 * API's to change permissions on its image layout.
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800474 */
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800475 if (vm->el0_partition) {
Raghu Krishnamurthy2b801692021-07-18 17:46:43 -0700476 map_mode = MM_MODE_R | MM_MODE_X | MM_MODE_USER | MM_MODE_NG;
477 } else {
478 map_mode = MM_MODE_R | MM_MODE_W | MM_MODE_X;
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800479 }
Raghu Krishnamurthy2b801692021-07-18 17:46:43 -0700480
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800481 if (!vm_identity_map(vm_locked, mem_begin, mem_end, map_mode, ppool,
Andrew Scull3c257452019-11-26 13:32:50 +0000482 &secondary_entry)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000483 dlog_error("Unable to initialise memory.\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000484 ret = false;
485 goto out;
Andrew Scull72b43c02019-09-18 13:53:45 +0100486 }
487
Olivier Deprez62d99e32020-01-09 15:58:07 +0100488 if (manifest_vm->is_ffa_partition) {
Manish Pandey2145c212020-05-01 16:04:22 +0100489 int j = 0;
490 paddr_t region_begin;
491 paddr_t region_end;
492 paddr_t alloc_base = mem_end;
493 size_t size;
494 size_t total_alloc = 0;
495
496 /* Map memory-regions */
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700497 while (j < manifest_vm->partition.mem_region_count) {
498 size = manifest_vm->partition.mem_regions[j]
499 .page_count *
Manish Pandey2145c212020-05-01 16:04:22 +0100500 PAGE_SIZE;
501 /*
502 * For memory-regions without base-address, memory
503 * should be allocated inside partition's page table.
504 * Start allocating memory regions in partition's
505 * page table, starting from the end.
506 * TODO: Add mechanism to let partition know of these
507 * memory regions
508 */
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700509 if (manifest_vm->partition.mem_regions[j]
510 .base_address == MANIFEST_INVALID_ADDRESS) {
Manish Pandey2145c212020-05-01 16:04:22 +0100511 total_alloc += size;
512 /* Don't go beyond half the VM's memory space */
513 if (total_alloc >
514 (manifest_vm->secondary.mem_size / 2)) {
515 dlog_error(
516 "Not enough space for memory-"
517 "region allocation");
518 ret = false;
519 goto out;
520 }
521
522 region_end = alloc_base;
523 region_begin = pa_subtract(alloc_base, size);
524 alloc_base = region_begin;
525
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700526 map_mode = manifest_vm->partition.mem_regions[j]
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800527 .attributes;
528 if (vm->el0_partition) {
529 map_mode |= MM_MODE_USER | MM_MODE_NG;
530 }
531
532 if (!vm_identity_map(vm_locked, region_begin,
533 region_end, map_mode,
534 ppool, NULL)) {
Manish Pandey2145c212020-05-01 16:04:22 +0100535 dlog_error(
536 "Unable to map secondary VM "
537 "memory-region.\n");
538 ret = false;
539 goto out;
540 }
541
Olivier Deprez86d87ae2021-08-19 14:27:46 +0200542 dlog_verbose(
Manish Pandey2145c212020-05-01 16:04:22 +0100543 " Memory region %#x - %#x allocated\n",
544 region_begin, region_end);
545 } else {
546 /*
547 * Identity map memory region for both case,
548 * VA(S-EL0) or IPA(S-EL1).
549 */
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700550 region_begin = pa_init(
551 manifest_vm->partition.mem_regions[j]
552 .base_address);
Manish Pandey2145c212020-05-01 16:04:22 +0100553 region_end = pa_add(region_begin, size);
554
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700555 map_mode = manifest_vm->partition.mem_regions[j]
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800556 .attributes;
557 if (vm->el0_partition) {
558 map_mode |= MM_MODE_USER | MM_MODE_NG;
559 }
560
561 if (!vm_identity_map(vm_locked, region_begin,
562 region_end, map_mode,
563 ppool, NULL)) {
Manish Pandey2145c212020-05-01 16:04:22 +0100564 dlog_error(
565 "Unable to map secondary VM "
566 "memory-region.\n");
567 ret = false;
568 goto out;
569 }
570 }
571
572 /* Deny the primary VM access to this memory */
573 if (!vm_unmap(primary_vm_locked, region_begin,
574 region_end, ppool)) {
575 dlog_error(
576 "Unable to unmap secondary VM memory-"
577 "region from primary VM.\n");
578 ret = false;
579 goto out;
580 }
581
582 j++;
583 }
584
585 /* Map device-regions */
586 j = 0;
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700587 while (j < manifest_vm->partition.dev_region_count) {
588 region_begin =
589 pa_init(manifest_vm->partition.dev_regions[j]
590 .base_address);
591 size = manifest_vm->partition.dev_regions[j]
592 .page_count *
Manish Pandey2145c212020-05-01 16:04:22 +0100593 PAGE_SIZE;
594 region_end = pa_add(region_begin, size);
595
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700596 map_mode = manifest_vm->partition.dev_regions[j]
597 .attributes;
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800598 if (vm->el0_partition) {
599 map_mode |= MM_MODE_USER | MM_MODE_NG;
600 }
601
602 if (!vm_identity_map(vm_locked, region_begin,
603 region_end, map_mode, ppool,
604 NULL)) {
Manish Pandey2145c212020-05-01 16:04:22 +0100605 dlog_error(
606 "Unable to map secondary VM "
607 "device-region.\n");
608 ret = false;
609 goto out;
610 }
611 /* Deny primary VM access to this region */
612 if (!vm_unmap(primary_vm_locked, region_begin,
613 region_end, ppool)) {
614 dlog_error(
615 "Unable to unmap secondary VM device-"
616 "region from primary VM.\n");
617 ret = false;
618 goto out;
619 }
620 j++;
621 }
622
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700623 secondary_entry = ipa_add(secondary_entry,
624 manifest_vm->partition.ep_offset);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100625 }
626
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800627 /*
628 * Map hypervisor into the VM's page table. The hypervisor pages will
629 * not be accessible from EL0 since it will not be marked for user
630 * access.
631 * TODO: Map only the exception vectors and data that exception vectors
632 * require and not the entire hypervisor. This helps with speculative
633 * side-channel attacks.
634 */
635 if (vm->el0_partition) {
636 CHECK(vm_identity_map(vm_locked, layout_text_begin(),
637 layout_text_end(), MM_MODE_X, ppool,
638 NULL));
639
640 CHECK(vm_identity_map(vm_locked, layout_rodata_begin(),
641 layout_rodata_end(), MM_MODE_R, ppool,
642 NULL));
643
644 CHECK(vm_identity_map(vm_locked, layout_data_begin(),
645 layout_data_end(), MM_MODE_R | MM_MODE_W,
646 ppool, NULL));
647 plat_console_mm_init(mm_lock_ptable_unsafe(&vm->ptable), ppool);
648 }
649
Manish Pandeyd34f8892020-06-19 17:41:07 +0100650 if (!load_common(stage1_locked, vm_locked, manifest_vm, ppool)) {
651 ret = false;
652 goto out;
653 }
654
Manish Pandey2145c212020-05-01 16:04:22 +0100655 dlog_info("Loaded with %u vCPUs, entry at %#x.\n",
656 manifest_vm->secondary.vcpu_count, pa_addr(mem_begin));
657
Andrew Scull72b43c02019-09-18 13:53:45 +0100658 vcpu = vm_get_vcpu(vm, 0);
Fuad Tabba50469e02020-06-30 15:14:28 +0100659
Max Shvetsov40108e72020-08-27 12:39:50 +0100660 vcpu_locked = vcpu_lock(vcpu);
Fuad Tabba50469e02020-06-30 15:14:28 +0100661 if (has_fdt) {
Max Shvetsov40108e72020-08-27 12:39:50 +0100662 vcpu_secondary_reset_and_start(vcpu_locked, secondary_entry,
Fuad Tabba50469e02020-06-30 15:14:28 +0100663 pa_addr(fdt_addr));
664 } else {
665 /*
666 * Without an FDT, secondary VMs expect the memory size to be
667 * passed in register x0, which is what
668 * vcpu_secondary_reset_and_start does in this case.
669 */
Max Shvetsov40108e72020-08-27 12:39:50 +0100670 vcpu_secondary_reset_and_start(vcpu_locked, secondary_entry,
671 mem_size);
Fuad Tabba50469e02020-06-30 15:14:28 +0100672 }
673
Max Shvetsov40108e72020-08-27 12:39:50 +0100674 vcpu_unlock(&vcpu_locked);
675
Andrew Scull3c257452019-11-26 13:32:50 +0000676 ret = true;
Andrew Scull72b43c02019-09-18 13:53:45 +0100677
Andrew Scull3c257452019-11-26 13:32:50 +0000678out:
679 vm_unlock(&vm_locked);
680
681 return ret;
Andrew Scull72b43c02019-09-18 13:53:45 +0100682}
683
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100684/**
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100685 * Try to find a memory range of the given size within the given ranges, and
686 * remove it from them. Return true on success, or false if no large enough
687 * contiguous range is found.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100688 */
Hong-Seok Kim09648362019-05-23 15:47:11 +0900689static bool carve_out_mem_range(struct mem_range *mem_ranges,
690 size_t mem_ranges_count, uint64_t size_to_find,
691 paddr_t *found_begin, paddr_t *found_end)
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100692{
693 size_t i;
694
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000695 /*
696 * TODO(b/116191358): Consider being cleverer about how we pack VMs
697 * together, with a non-greedy algorithm.
698 */
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100699 for (i = 0; i < mem_ranges_count; ++i) {
700 if (size_to_find <=
Andrew Walbran2cb43392019-04-17 12:52:45 +0100701 pa_difference(mem_ranges[i].begin, mem_ranges[i].end)) {
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +0100702 /*
703 * This range is big enough, take some of it from the
704 * end and reduce its size accordingly.
705 */
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100706 *found_end = mem_ranges[i].end;
707 *found_begin = pa_init(pa_addr(mem_ranges[i].end) -
708 size_to_find);
709 mem_ranges[i].end = *found_begin;
710 return true;
711 }
712 }
713 return false;
714}
715
716/**
717 * Given arrays of memory ranges before and after memory was removed for
718 * secondary VMs, add the difference to the reserved ranges of the given update.
719 * Return true on success, or false if there would be more than MAX_MEM_RANGES
720 * reserved ranges after adding the new ones.
721 * `before` and `after` must be arrays of exactly `mem_ranges_count` elements.
722 */
Hong-Seok Kim09648362019-05-23 15:47:11 +0900723static bool update_reserved_ranges(struct boot_params_update *update,
724 const struct mem_range *before,
725 const struct mem_range *after,
726 size_t mem_ranges_count)
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100727{
728 size_t i;
729
730 for (i = 0; i < mem_ranges_count; ++i) {
731 if (pa_addr(after[i].begin) > pa_addr(before[i].begin)) {
732 if (update->reserved_ranges_count >= MAX_MEM_RANGES) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000733 dlog_error(
734 "Too many reserved ranges after "
735 "loading secondary VMs.\n");
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100736 return false;
737 }
738 update->reserved_ranges[update->reserved_ranges_count]
739 .begin = before[i].begin;
740 update->reserved_ranges[update->reserved_ranges_count]
741 .end = after[i].begin;
742 update->reserved_ranges_count++;
743 }
744 if (pa_addr(after[i].end) < pa_addr(before[i].end)) {
745 if (update->reserved_ranges_count >= MAX_MEM_RANGES) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000746 dlog_error(
747 "Too many reserved ranges after "
748 "loading secondary VMs.\n");
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100749 return false;
750 }
751 update->reserved_ranges[update->reserved_ranges_count]
752 .begin = after[i].end;
753 update->reserved_ranges[update->reserved_ranges_count]
754 .end = before[i].end;
755 update->reserved_ranges_count++;
756 }
757 }
758
759 return true;
760}
761
Olivier Deprez112d2b52020-09-30 07:39:23 +0200762static bool init_other_world_vm(struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100763{
Olivier Deprez96a2a262020-06-11 17:21:38 +0200764 struct vm *other_world_vm;
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100765 size_t i;
Andrew Scull72b43c02019-09-18 13:53:45 +0100766
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100767 /*
Olivier Deprez96a2a262020-06-11 17:21:38 +0200768 * Initialise the dummy VM which represents the opposite world:
769 * -TrustZone (or the SPMC) when running the Hypervisor
770 * -the Hypervisor when running TZ/SPMC
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100771 */
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800772 other_world_vm = vm_init(HF_OTHER_WORLD_ID, MAX_CPUS, ppool, false);
Olivier Deprez96a2a262020-06-11 17:21:38 +0200773 CHECK(other_world_vm != NULL);
774
775 for (i = 0; i < MAX_CPUS; i++) {
776 struct vcpu *vcpu = vm_get_vcpu(other_world_vm, i);
777 struct cpu *cpu = cpu_find_index(i);
778
779 vcpu->cpu = cpu;
780 }
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100781
Olivier Deprez112d2b52020-09-30 07:39:23 +0200782 return arch_other_world_vm_init(other_world_vm, ppool);
783}
784
785/*
786 * Loads alls VMs from the manifest.
787 */
788bool load_vms(struct mm_stage1_locked stage1_locked,
789 const struct manifest *manifest, const struct memiter *cpio,
790 const struct boot_params *params,
791 struct boot_params_update *update, struct mpool *ppool)
792{
793 struct vm *primary;
794 struct mem_range mem_ranges_available[MAX_MEM_RANGES];
795 struct vm_locked primary_vm_locked;
796 size_t i;
797 bool success = true;
798
Andrew Walbranf8716782020-10-29 17:07:07 +0000799 /**
800 * Only try to load the primary VM if it is supposed to be in this
801 * world.
802 */
803 if (vm_id_is_current_world(HF_PRIMARY_VM_ID)) {
804 if (!load_primary(stage1_locked,
805 &manifest->vm[HF_PRIMARY_VM_INDEX], cpio,
806 params, ppool)) {
807 dlog_error("Unable to load primary VM.\n");
808 return false;
809 }
Olivier Deprez112d2b52020-09-30 07:39:23 +0200810 }
811
812 if (!init_other_world_vm(ppool)) {
813 return false;
814 }
815
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100816 static_assert(
817 sizeof(mem_ranges_available) == sizeof(params->mem_ranges),
818 "mem_range arrays must be the same size for memcpy.");
819 static_assert(sizeof(mem_ranges_available) < 500,
820 "This will use too much stack, either make "
821 "MAX_MEM_RANGES smaller or change this.");
Andrew Sculla1aa2ba2019-04-05 11:49:02 +0100822 memcpy_s(mem_ranges_available, sizeof(mem_ranges_available),
823 params->mem_ranges, sizeof(params->mem_ranges));
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100824
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100825 /* Round the last addresses down to the page size. */
826 for (i = 0; i < params->mem_ranges_count; ++i) {
Alfredo Mazzinghieb1997c2019-02-07 18:00:01 +0000827 mem_ranges_available[i].end = pa_init(align_down(
828 pa_addr(mem_ranges_available[i].end), PAGE_SIZE));
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100829 }
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100830
Andrew Scull3c257452019-11-26 13:32:50 +0000831 primary = vm_find(HF_PRIMARY_VM_ID);
832 primary_vm_locked = vm_lock(primary);
833
David Brazdil0251b942019-09-10 15:59:50 +0100834 for (i = 0; i < manifest->vm_count; ++i) {
David Brazdil0dbb41f2019-09-09 18:03:35 +0100835 const struct manifest_vm *manifest_vm = &manifest->vm[i];
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100836 ffa_vm_id_t vm_id = HF_VM_ID_OFFSET + i;
David Brazdil7a462ec2019-08-15 12:27:47 +0100837 uint64_t mem_size;
Andrew Scull80871322018-08-06 12:04:09 +0100838 paddr_t secondary_mem_begin;
839 paddr_t secondary_mem_end;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100840
David Brazdil7a462ec2019-08-15 12:27:47 +0100841 if (vm_id == HF_PRIMARY_VM_ID) {
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100842 continue;
843 }
844
Olivier Deprez2a8ee342020-08-03 15:10:44 +0200845 dlog_info("Loading VM id %#x: %s.\n", vm_id,
Andrew Walbran17eebf92020-02-05 16:35:49 +0000846 manifest_vm->debug_name);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100847
David Brazdil7a462ec2019-08-15 12:27:47 +0100848 mem_size = align_up(manifest_vm->secondary.mem_size, PAGE_SIZE);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100849
850 if (manifest_vm->is_ffa_partition) {
851 secondary_mem_begin =
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700852 pa_init(manifest_vm->partition.load_addr);
853 secondary_mem_end = pa_init(
854 manifest_vm->partition.load_addr + mem_size);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100855 } else if (!carve_out_mem_range(mem_ranges_available,
856 params->mem_ranges_count,
857 mem_size, &secondary_mem_begin,
858 &secondary_mem_end)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000859 dlog_error("Not enough memory (%u bytes).\n", mem_size);
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100860 continue;
861 }
Andrew Scull80871322018-08-06 12:04:09 +0100862
Manish Pandey2145c212020-05-01 16:04:22 +0100863 if (!load_secondary(stage1_locked, primary_vm_locked,
864 secondary_mem_begin, secondary_mem_end,
865 manifest_vm, cpio, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000866 dlog_error("Unable to load VM.\n");
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100867 continue;
868 }
869
870 /* Deny the primary VM access to this memory. */
Andrew Scull3c257452019-11-26 13:32:50 +0000871 if (!vm_unmap(primary_vm_locked, secondary_mem_begin,
872 secondary_mem_end, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000873 dlog_error(
874 "Unable to unmap secondary VM from primary "
875 "VM.\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000876 success = false;
877 break;
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100878 }
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100879 }
880
Andrew Scull3c257452019-11-26 13:32:50 +0000881 vm_unlock(&primary_vm_locked);
882
883 if (!success) {
884 return false;
885 }
886
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +0100887 /*
888 * Add newly reserved areas to update params by looking at the
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100889 * difference between the available ranges from the original params and
890 * the updated mem_ranges_available. We assume that the number and order
891 * of available ranges is the same, i.e. we don't remove any ranges
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +0100892 * above only make them smaller.
893 */
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100894 return update_reserved_ranges(update, params->mem_ranges,
895 mem_ranges_available,
896 params->mem_ranges_count);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100897}