blob: 30567697d49cc51939d06643c9d0158d17550280 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/load.h"
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010010
11#include <stdbool.h>
12
Maksims Svecovs134b8f92022-03-04 15:14:09 +000013#include "hf/arch/init.h"
Olivier Deprez112d2b52020-09-30 07:39:23 +020014#include "hf/arch/other_world.h"
J-Alvesa9c7cba2021-08-25 16:26:11 +010015#include "hf/arch/plat/ffa.h"
Fuad Tabba77a4b012019-11-15 12:13:08 +000016#include "hf/arch/vm.h"
17
Andrew Scull18c78fc2018-08-20 12:57:41 +010018#include "hf/api.h"
Andrew Walbran34ce72e2018-09-13 16:47:44 +010019#include "hf/boot_params.h"
Andrew Scull72b43c02019-09-18 13:53:45 +010020#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010021#include "hf/dlog.h"
Fuad Tabba50469e02020-06-30 15:14:28 +010022#include "hf/fdt_patch.h"
Andrew Scull5991ec92018-10-08 14:55:02 +010023#include "hf/layout.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010024#include "hf/memiter.h"
25#include "hf/mm.h"
Andrew Walbran48699362019-05-20 14:38:00 +010026#include "hf/plat/console.h"
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -050027#include "hf/plat/interrupts.h"
Andrew Scullb1a6d0d2020-01-29 11:25:12 +000028#include "hf/plat/iommu.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010029#include "hf/static_assert.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010030#include "hf/std.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010031#include "hf/vm.h"
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010032
Andrew Scull19503262018-09-20 14:48:39 +010033#include "vmapi/hf/call.h"
Manish Pandeyd34f8892020-06-19 17:41:07 +010034#include "vmapi/hf/ffa.h"
Andrew Scull19503262018-09-20 14:48:39 +010035
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010036/**
37 * Copies data to an unmapped location by mapping it for write, copying the
38 * data, then unmapping it.
Andrew Sculld9225b32018-11-19 16:12:41 +000039 *
40 * The data is written so that it is available to all cores with the cache
41 * disabled. When switching to the partitions, the caching is initially disabled
42 * so the data must be available without the cache.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010043 */
Andrew Scull3c0a90a2019-07-01 11:55:53 +010044static bool copy_to_unmapped(struct mm_stage1_locked stage1_locked, paddr_t to,
David Brazdil7a462ec2019-08-15 12:27:47 +010045 struct memiter *from_it, struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010046{
David Brazdil7a462ec2019-08-15 12:27:47 +010047 const void *from = memiter_base(from_it);
48 size_t size = memiter_size(from_it);
Andrew Scull80871322018-08-06 12:04:09 +010049 paddr_t to_end = pa_add(to, size);
50 void *ptr;
Andrew Scull265ada92018-07-30 15:19:01 +010051
Andrew Scull3c0a90a2019-07-01 11:55:53 +010052 ptr = mm_identity_map(stage1_locked, to, to_end, MM_MODE_W, ppool);
Andrew Scull80871322018-08-06 12:04:09 +010053 if (!ptr) {
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010054 return false;
55 }
56
Andrew Sculla1aa2ba2019-04-05 11:49:02 +010057 memcpy_s(ptr, size, from, size);
Andrew Scullc059fbe2019-09-12 12:58:40 +010058 arch_mm_flush_dcache(ptr, size);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010059
Andrew Scull72b43c02019-09-18 13:53:45 +010060 CHECK(mm_unmap(stage1_locked, to, to_end, ppool));
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010061
62 return true;
63}
64
Fuad Tabba50469e02020-06-30 15:14:28 +010065/**
66 * Loads the secondary VM's kernel.
67 * Stores the kernel size in kernel_size (if kernel_size is not NULL).
68 * Returns false if it cannot load the kernel.
69 */
Andrew Scull72b43c02019-09-18 13:53:45 +010070static bool load_kernel(struct mm_stage1_locked stage1_locked, paddr_t begin,
71 paddr_t end, const struct manifest_vm *manifest_vm,
Fuad Tabba50469e02020-06-30 15:14:28 +010072 const struct memiter *cpio, struct mpool *ppool,
73 size_t *kernel_size)
Andrew Scull72b43c02019-09-18 13:53:45 +010074{
Andrew Scull72b43c02019-09-18 13:53:45 +010075 struct memiter kernel;
Fuad Tabba50469e02020-06-30 15:14:28 +010076 size_t size;
Andrew Scull72b43c02019-09-18 13:53:45 +010077
David Brazdil136f2942019-09-23 14:11:03 +010078 if (!cpio_get_file(cpio, &manifest_vm->kernel_filename, &kernel)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +000079 dlog_error("Could not find kernel file \"%s\".\n",
80 string_data(&manifest_vm->kernel_filename));
Andrew Scull72b43c02019-09-18 13:53:45 +010081 return false;
82 }
83
Fuad Tabba50469e02020-06-30 15:14:28 +010084 size = memiter_size(&kernel);
85 if (pa_difference(begin, end) < size) {
Andrew Walbran17eebf92020-02-05 16:35:49 +000086 dlog_error("Kernel is larger than available memory.\n");
Andrew Scull72b43c02019-09-18 13:53:45 +010087 return false;
88 }
89
90 if (!copy_to_unmapped(stage1_locked, begin, &kernel, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +000091 dlog_error("Unable to copy kernel.\n");
Andrew Scull72b43c02019-09-18 13:53:45 +010092 return false;
93 }
94
Fuad Tabba50469e02020-06-30 15:14:28 +010095 if (kernel_size) {
96 *kernel_size = size;
97 }
98
Andrew Scull72b43c02019-09-18 13:53:45 +010099 return true;
100}
101
Manish Pandeyd34f8892020-06-19 17:41:07 +0100102/*
103 * Link RX/TX buffers provided in partition manifest to mailbox
104 */
105static bool link_rxtx_to_mailbox(struct mm_stage1_locked stage1_locked,
106 struct vm_locked vm_locked, struct rx_tx rxtx,
107 struct mpool *ppool)
108{
109 struct ffa_value ret;
110 ipaddr_t send;
111 ipaddr_t recv;
112 uint32_t page_count;
113
114 send = ipa_init(rxtx.tx_buffer->base_address);
115 recv = ipa_init(rxtx.rx_buffer->base_address);
116 page_count = rxtx.tx_buffer->page_count;
117
118 ret = api_vm_configure_pages(stage1_locked, vm_locked, send, recv,
119 page_count, ppool);
120 if (ret.func != FFA_SUCCESS_32) {
121 return false;
122 }
123
124 dlog_verbose(" mailbox: send = %#x, recv = %#x\n",
125 vm_locked.vm->mailbox.send, vm_locked.vm->mailbox.recv);
126
127 return true;
128}
129
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500130static void infer_interrupt(struct interrupt interrupt,
131 struct interrupt_descriptor *int_desc)
132{
133 uint32_t attr = interrupt.attributes;
134
135 interrupt_desc_set_id(int_desc, interrupt.id);
136 interrupt_desc_set_priority(int_desc,
137 (attr >> INT_DESC_PRIORITY_SHIFT) & 0xff);
138
139 /* Refer to the comments in interrupt_descriptor struct definition. */
140 interrupt_desc_set_type_config_sec_state(
141 int_desc,
142 (((attr >> INT_DESC_TYPE_SHIFT) & 0x3) << 2) |
143 (((attr >> INT_DESC_CONFIG_SHIFT) & 0x1) << 1) |
144 ((attr >> INT_DESC_SEC_STATE_SHIFT) & 0x1));
145
146 interrupt_desc_set_valid(int_desc, true);
147}
148
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100149/**
Andrew Scullae9962e2019-10-03 16:51:16 +0100150 * Performs VM loading activities that are common between the primary and
151 * secondaries.
152 */
Manish Pandeyd34f8892020-06-19 17:41:07 +0100153static bool load_common(struct mm_stage1_locked stage1_locked,
154 struct vm_locked vm_locked,
155 const struct manifest_vm *manifest_vm,
156 struct mpool *ppool)
Andrew Scullae9962e2019-10-03 16:51:16 +0100157{
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500158 struct device_region dev_region;
159 struct interrupt interrupt;
160 uint32_t k = 0;
161
Manish Pandeyd34f8892020-06-19 17:41:07 +0100162 vm_locked.vm->smc_whitelist = manifest_vm->smc_whitelist;
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700163 vm_locked.vm->uuid = manifest_vm->partition.uuid;
Andrew Scullae9962e2019-10-03 16:51:16 +0100164
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500165 /* Populate the interrupt descriptor for current VM. */
166 for (uint8_t i = 0; i < SP_MAX_DEVICE_REGIONS; i++) {
167 dev_region = manifest_vm->partition.dev_regions[i];
168
169 CHECK(dev_region.interrupt_count <=
170 SP_MAX_INTERRUPTS_PER_DEVICE);
171
172 for (uint8_t j = 0; j < dev_region.interrupt_count; j++) {
173 struct interrupt_descriptor int_desc;
174
175 interrupt = dev_region.interrupts[j];
176 infer_interrupt(interrupt, &int_desc);
177 vm_locked.vm->interrupt_desc[k] = int_desc;
178
Madhukar Pappireddy72454a12021-08-03 12:21:46 -0500179 /*
180 * Configure the physical interrupts allocated for this
181 * VM in its partition manifest.
182 */
183 plat_interrupts_configure_interrupt(int_desc);
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500184 k++;
185 CHECK(k <= VM_MANIFEST_MAX_INTERRUPTS);
186 }
187 }
188 dlog_verbose("VM has %d physical interrupts defined in manifest.\n", k);
189
Manish Pandeyd34f8892020-06-19 17:41:07 +0100190 if (manifest_vm->is_ffa_partition) {
Daniel Boulbybaeaf2e2021-12-09 11:42:36 +0000191 vm_locked.vm->ffa_version = manifest_vm->partition.ffa_version;
Manish Pandeyd34f8892020-06-19 17:41:07 +0100192 /* Link rxtx buffers to mailbox */
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700193 if (manifest_vm->partition.rxtx.available) {
Manish Pandeyd34f8892020-06-19 17:41:07 +0100194 if (!link_rxtx_to_mailbox(stage1_locked, vm_locked,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700195 manifest_vm->partition.rxtx,
Manish Pandeyd34f8892020-06-19 17:41:07 +0100196 ppool)) {
197 dlog_error(
198 "Unable to Link RX/TX buffer with "
199 "mailbox.\n");
200 return false;
201 }
202 }
J-Alvesb37fd082020-10-22 12:29:21 +0100203
Maksims Svecovsb596eab2021-04-27 00:52:27 +0100204 vm_locked.vm->messaging_method =
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700205 manifest_vm->partition.messaging_method;
Manish Pandeyf3be6392020-09-24 17:26:09 +0100206
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700207 vm_locked.vm->managed_exit =
208 manifest_vm->partition.managed_exit;
Maksims Svecovs9ddf86a2021-05-06 17:17:21 +0100209
J-Alvesa4730db2021-11-02 10:31:01 +0000210 vm_locked.vm->notifications.enabled =
211 manifest_vm->partition.notification_support;
212
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700213 vm_locked.vm->boot_order = manifest_vm->partition.boot_order;
214
J-Alvesb37fd082020-10-22 12:29:21 +0100215 /* Updating boot list according to boot_order */
216 vm_update_boot(vm_locked.vm);
J-Alvesa0f317d2021-06-09 13:31:59 +0100217
J-Alves09ff9d82021-11-02 11:55:20 +0000218 if (vm_locked_are_notifications_enabled(vm_locked) &&
J-Alvesa4730db2021-11-02 10:31:01 +0000219 !plat_ffa_notifications_bitmap_create_call(
220 vm_locked.vm->id, vm_locked.vm->vcpu_count)) {
221 return false;
J-Alvesa9c7cba2021-08-25 16:26:11 +0100222 }
Manish Pandeyd34f8892020-06-19 17:41:07 +0100223 }
J-Alvesb37fd082020-10-22 12:29:21 +0100224
Fuad Tabba56970712020-01-10 11:20:09 +0000225 /* Initialize architecture-specific features. */
Manish Pandeyd34f8892020-06-19 17:41:07 +0100226 arch_vm_features_set(vm_locked.vm);
Fuad Tabba77a4b012019-11-15 12:13:08 +0000227
Madhukar Pappireddy54680c72020-10-23 15:02:38 -0500228 if (!plat_iommu_attach_peripheral(stage1_locked, vm_locked, manifest_vm,
229 ppool)) {
230 dlog_error("Unable to attach upstream peripheral device\n");
231 return false;
232 }
233
Andrew Scullae9962e2019-10-03 16:51:16 +0100234 return true;
235}
236
237/**
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100238 * Loads the primary VM.
239 */
Andrew Scull72b43c02019-09-18 13:53:45 +0100240static bool load_primary(struct mm_stage1_locked stage1_locked,
Andrew Scullae9962e2019-10-03 16:51:16 +0100241 const struct manifest_vm *manifest_vm,
Andrew Scullb5f49e02019-10-02 13:20:47 +0100242 const struct memiter *cpio,
243 const struct boot_params *params, struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100244{
Olivier Deprez62d99e32020-01-09 15:58:07 +0100245 paddr_t primary_begin;
246 ipaddr_t primary_entry;
David Brazdile6f83222019-09-23 14:47:37 +0100247 struct vm *vm;
Andrew Scull3c257452019-11-26 13:32:50 +0000248 struct vm_locked vm_locked;
David Brazdile6f83222019-09-23 14:47:37 +0100249 struct vcpu_locked vcpu_locked;
Andrew Scullb5f49e02019-10-02 13:20:47 +0100250 size_t i;
Andrew Scull3c257452019-11-26 13:32:50 +0000251 bool ret;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100252
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -0700253 if (manifest_vm->is_ffa_partition && !manifest_vm->is_hyp_loaded) {
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700254 primary_begin = pa_init(manifest_vm->partition.load_addr);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100255 primary_entry = ipa_add(ipa_from_pa(primary_begin),
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700256 manifest_vm->partition.ep_offset);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100257 } else {
258 primary_begin =
259 (manifest_vm->primary.boot_address ==
260 MANIFEST_INVALID_ADDRESS)
261 ? layout_primary_begin()
262 : pa_init(manifest_vm->primary.boot_address);
263 primary_entry = ipa_from_pa(primary_begin);
264 }
265
David Brazdil080ee312020-02-25 15:30:30 -0800266 paddr_t primary_end = pa_add(primary_begin, RSIZE_MAX);
Andrew Scull72b43c02019-09-18 13:53:45 +0100267
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800268 /* Primary VM must be a VM */
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700269 CHECK(manifest_vm->partition.run_time_el == EL1);
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800270
Olivier Deprez62d99e32020-01-09 15:58:07 +0100271 /*
272 * Load the kernel if a filename is specified in the VM manifest.
273 * For an FF-A partition, kernel_filename is undefined indicating
274 * the partition package has already been loaded prior to Hafnium
275 * booting.
276 */
277 if (!string_is_empty(&manifest_vm->kernel_filename)) {
278 if (!load_kernel(stage1_locked, primary_begin, primary_end,
Fuad Tabba50469e02020-06-30 15:14:28 +0100279 manifest_vm, cpio, ppool, NULL)) {
Olivier Deprez62d99e32020-01-09 15:58:07 +0100280 dlog_error("Unable to load primary kernel.\n");
281 return false;
282 }
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100283 }
284
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800285 if (!vm_init_next(MAX_CPUS, ppool, &vm, false)) {
Andrew Walbran7586e042020-02-18 18:19:26 +0000286 dlog_error("Unable to initialise primary VM.\n");
David Brazdile6f83222019-09-23 14:47:37 +0100287 return false;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100288 }
289
David Brazdile6f83222019-09-23 14:47:37 +0100290 if (vm->id != HF_PRIMARY_VM_ID) {
Andrew Walbran7586e042020-02-18 18:19:26 +0000291 dlog_error("Primary VM was not given correct ID.\n");
David Brazdile6f83222019-09-23 14:47:37 +0100292 return false;
293 }
294
Andrew Scull3c257452019-11-26 13:32:50 +0000295 vm_locked = vm_lock(vm);
296
Andrew Scull48929fd2020-01-28 10:39:10 +0000297 if (params->device_mem_ranges_count == 0) {
298 /*
299 * Map 1TB of address space as device memory to, most likely,
300 * make all devices available to the primary VM.
301 *
302 * TODO: remove this once all targets provide valid ranges.
303 */
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800304 dlog_warning(
305 "Device memory not provided, defaulting to 1 TB.\n");
Andrew Scull48929fd2020-01-28 10:39:10 +0000306
307 if (!vm_identity_map(
308 vm_locked, pa_init(0),
309 pa_init(UINT64_C(1024) * 1024 * 1024 * 1024),
310 MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool, NULL)) {
311 dlog_error(
312 "Unable to initialise address space for "
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800313 "primary VM.\n");
Andrew Scull48929fd2020-01-28 10:39:10 +0000314 ret = false;
315 goto out;
316 }
David Brazdile6f83222019-09-23 14:47:37 +0100317 }
318
Andrew Scullb5f49e02019-10-02 13:20:47 +0100319 /* Map normal memory as such to permit caching, execution, etc. */
320 for (i = 0; i < params->mem_ranges_count; ++i) {
Andrew Scull3c257452019-11-26 13:32:50 +0000321 if (!vm_identity_map(vm_locked, params->mem_ranges[i].begin,
322 params->mem_ranges[i].end,
323 MM_MODE_R | MM_MODE_W | MM_MODE_X, ppool,
324 NULL)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000325 dlog_error(
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800326 "Unable to initialise memory for primary "
327 "VM.\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000328 ret = false;
329 goto out;
Andrew Scullb5f49e02019-10-02 13:20:47 +0100330 }
331 }
332
Andrew Scull48929fd2020-01-28 10:39:10 +0000333 /* Map device memory as such to prevent execution, speculation etc. */
334 for (i = 0; i < params->device_mem_ranges_count; ++i) {
335 if (!vm_identity_map(
336 vm_locked, params->device_mem_ranges[i].begin,
337 params->device_mem_ranges[i].end,
338 MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool, NULL)) {
339 dlog("Unable to initialise device memory for primary "
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800340 "VM.\n");
Andrew Scull48929fd2020-01-28 10:39:10 +0000341 ret = false;
342 goto out;
343 }
344 }
345
Manish Pandeyd34f8892020-06-19 17:41:07 +0100346 if (!load_common(stage1_locked, vm_locked, manifest_vm, ppool)) {
347 ret = false;
348 goto out;
349 }
350
Andrew Scull3c257452019-11-26 13:32:50 +0000351 if (!vm_unmap_hypervisor(vm_locked, ppool)) {
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800352 dlog_error("Unable to unmap hypervisor from primary VM.\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000353 ret = false;
354 goto out;
David Brazdile6f83222019-09-23 14:47:37 +0100355 }
356
Andrew Scullb1a6d0d2020-01-29 11:25:12 +0000357 if (!plat_iommu_unmap_iommus(vm_locked, ppool)) {
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800358 dlog_error("Unable to unmap IOMMUs from primary VM.\n");
Andrew Scullb1a6d0d2020-01-29 11:25:12 +0000359 ret = false;
360 goto out;
361 }
362
Andrew Walbran7586e042020-02-18 18:19:26 +0000363 dlog_info("Loaded primary VM with %u vCPUs, entry at %#x.\n",
364 vm->vcpu_count, pa_addr(primary_begin));
365
Olivier Deprezb9adff42021-02-01 12:14:05 +0100366 /* Mark the primary to be the first booted VM */
367 vm_update_boot(vm);
368
David Brazdile6f83222019-09-23 14:47:37 +0100369 vcpu_locked = vcpu_lock(vm_get_vcpu(vm, 0));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100370 vcpu_on(vcpu_locked, primary_entry, params->kernel_arg);
David Brazdile6f83222019-09-23 14:47:37 +0100371 vcpu_unlock(&vcpu_locked);
Andrew Scull3c257452019-11-26 13:32:50 +0000372 ret = true;
David Brazdile6f83222019-09-23 14:47:37 +0100373
Andrew Scull3c257452019-11-26 13:32:50 +0000374out:
375 vm_unlock(&vm_locked);
376
377 return ret;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100378}
379
Fuad Tabba50469e02020-06-30 15:14:28 +0100380/**
381 * Loads the secondary VM's FDT.
382 * Stores the total allocated size for the FDT in fdt_allocated_size (if
383 * fdt_allocated_size is not NULL). The allocated size includes additional space
384 * for potential patching.
385 */
386static bool load_secondary_fdt(struct mm_stage1_locked stage1_locked,
387 paddr_t end, size_t fdt_max_size,
388 const struct manifest_vm *manifest_vm,
389 const struct memiter *cpio, struct mpool *ppool,
390 paddr_t *fdt_addr, size_t *fdt_allocated_size)
391{
392 struct memiter fdt;
393 size_t allocated_size;
394
395 CHECK(!string_is_empty(&manifest_vm->secondary.fdt_filename));
396
397 if (!cpio_get_file(cpio, &manifest_vm->secondary.fdt_filename, &fdt)) {
398 dlog_error("Cannot open the secondary VM's FDT.\n");
399 return false;
400 }
401
402 /*
Olivier Deprez701e8bf2022-04-06 18:45:18 +0200403 * Ensure the FDT has one additional page at the end for patching,
Fuad Tabba50469e02020-06-30 15:14:28 +0100404 * and align it to the page boundary.
405 */
406 allocated_size = align_up(memiter_size(&fdt), PAGE_SIZE) + PAGE_SIZE;
407
408 if (allocated_size > fdt_max_size) {
409 dlog_error(
410 "FDT allocated space (%u) is more than the specified "
411 "maximum to use (%u).\n",
412 allocated_size, fdt_max_size);
413 return false;
414 }
415
416 /* Load the FDT to the end of the VM's allocated memory space. */
417 *fdt_addr = pa_init(pa_addr(pa_sub(end, allocated_size)));
418
419 dlog_info("Loading secondary FDT of allocated size %u at 0x%x.\n",
420 allocated_size, pa_addr(*fdt_addr));
421
422 if (!copy_to_unmapped(stage1_locked, *fdt_addr, &fdt, ppool)) {
423 dlog_error("Unable to copy FDT.\n");
424 return false;
425 }
426
427 if (fdt_allocated_size) {
428 *fdt_allocated_size = allocated_size;
429 }
430
431 return true;
432}
433
Andrew Scull72b43c02019-09-18 13:53:45 +0100434/*
435 * Loads a secondary VM.
436 */
437static bool load_secondary(struct mm_stage1_locked stage1_locked,
Manish Pandey2145c212020-05-01 16:04:22 +0100438 struct vm_locked primary_vm_locked,
Andrew Scull72b43c02019-09-18 13:53:45 +0100439 paddr_t mem_begin, paddr_t mem_end,
440 const struct manifest_vm *manifest_vm,
441 const struct memiter *cpio, struct mpool *ppool)
442{
443 struct vm *vm;
Andrew Scull3c257452019-11-26 13:32:50 +0000444 struct vm_locked vm_locked;
Max Shvetsov40108e72020-08-27 12:39:50 +0100445 struct vcpu_locked vcpu_locked;
Andrew Scull72b43c02019-09-18 13:53:45 +0100446 struct vcpu *vcpu;
447 ipaddr_t secondary_entry;
Andrew Scull3c257452019-11-26 13:32:50 +0000448 bool ret;
Fuad Tabba50469e02020-06-30 15:14:28 +0100449 paddr_t fdt_addr;
450 bool has_fdt;
451 size_t kernel_size = 0;
452 const size_t mem_size = pa_difference(mem_begin, mem_end);
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800453 uint32_t map_mode;
Andrew Scull72b43c02019-09-18 13:53:45 +0100454
Olivier Deprez62d99e32020-01-09 15:58:07 +0100455 /*
456 * Load the kernel if a filename is specified in the VM manifest.
457 * For an FF-A partition, kernel_filename is undefined indicating
458 * the partition package has already been loaded prior to Hafnium
459 * booting.
460 */
461 if (!string_is_empty(&manifest_vm->kernel_filename)) {
462 if (!load_kernel(stage1_locked, mem_begin, mem_end, manifest_vm,
Fuad Tabba50469e02020-06-30 15:14:28 +0100463 cpio, ppool, &kernel_size)) {
Olivier Deprez62d99e32020-01-09 15:58:07 +0100464 dlog_error("Unable to load kernel.\n");
465 return false;
466 }
Andrew Scull72b43c02019-09-18 13:53:45 +0100467 }
468
Fuad Tabba50469e02020-06-30 15:14:28 +0100469 has_fdt = !string_is_empty(&manifest_vm->secondary.fdt_filename);
470 if (has_fdt) {
471 /*
472 * Ensure that the FDT does not overwrite the kernel or overlap
473 * its page, for the FDT to start at a page boundary.
474 */
475 const size_t fdt_max_size =
476 mem_size - align_up(kernel_size, PAGE_SIZE);
477
478 size_t fdt_allocated_size;
479
480 if (!load_secondary_fdt(stage1_locked, mem_end, fdt_max_size,
481 manifest_vm, cpio, ppool, &fdt_addr,
482 &fdt_allocated_size)) {
483 dlog_error("Unable to load FDT.\n");
484 return false;
485 }
486
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -0700487 if (manifest_vm->is_ffa_partition) {
488 plat_ffa_parse_partition_manifest(
489 stage1_locked, fdt_addr, fdt_allocated_size,
490 manifest_vm, ppool);
491 }
492
Fuad Tabba50469e02020-06-30 15:14:28 +0100493 if (!fdt_patch_mem(stage1_locked, fdt_addr, fdt_allocated_size,
494 mem_begin, mem_end, ppool)) {
495 dlog_error("Unable to patch FDT.\n");
496 return false;
497 }
498 }
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800499 /*
500 * An S-EL0 partition must contain only 1 vCPU (UP migratable) per the
501 * FF-A 1.0 spec.
502 */
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700503 CHECK(manifest_vm->partition.run_time_el != S_EL0 ||
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800504 manifest_vm->secondary.vcpu_count == 1);
Fuad Tabba50469e02020-06-30 15:14:28 +0100505
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800506 if (!vm_init_next(manifest_vm->secondary.vcpu_count, ppool, &vm,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700507 (manifest_vm->partition.run_time_el == S_EL0))) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000508 dlog_error("Unable to initialise VM.\n");
Andrew Scull72b43c02019-09-18 13:53:45 +0100509 return false;
510 }
511
Andrew Scull3c257452019-11-26 13:32:50 +0000512 vm_locked = vm_lock(vm);
513
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800514 /*
Raghu Krishnamurthy2b801692021-07-18 17:46:43 -0700515 * Grant the VM access to the memory. For VM's we mark all memory in
516 * stage-2 tables as RWX and the VM can control permissions using
517 * stage-1 translations. For S-EL0 partitions, hafnium maps the entire
518 * region of memory for the partition as RX. The partition is then
519 * expected to perform its owns relocations and call the FFA_MEM_PERM_*
520 * API's to change permissions on its image layout.
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800521 */
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800522 if (vm->el0_partition) {
Raghu Krishnamurthy2b801692021-07-18 17:46:43 -0700523 map_mode = MM_MODE_R | MM_MODE_X | MM_MODE_USER | MM_MODE_NG;
524 } else {
525 map_mode = MM_MODE_R | MM_MODE_W | MM_MODE_X;
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800526 }
Raghu Krishnamurthy2b801692021-07-18 17:46:43 -0700527
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800528 if (!vm_identity_map(vm_locked, mem_begin, mem_end, map_mode, ppool,
Andrew Scull3c257452019-11-26 13:32:50 +0000529 &secondary_entry)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000530 dlog_error("Unable to initialise memory.\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000531 ret = false;
532 goto out;
Andrew Scull72b43c02019-09-18 13:53:45 +0100533 }
534
Olivier Deprez62d99e32020-01-09 15:58:07 +0100535 if (manifest_vm->is_ffa_partition) {
Manish Pandey2145c212020-05-01 16:04:22 +0100536 int j = 0;
537 paddr_t region_begin;
538 paddr_t region_end;
539 paddr_t alloc_base = mem_end;
540 size_t size;
541 size_t total_alloc = 0;
542
543 /* Map memory-regions */
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700544 while (j < manifest_vm->partition.mem_region_count) {
545 size = manifest_vm->partition.mem_regions[j]
546 .page_count *
Manish Pandey2145c212020-05-01 16:04:22 +0100547 PAGE_SIZE;
548 /*
549 * For memory-regions without base-address, memory
550 * should be allocated inside partition's page table.
551 * Start allocating memory regions in partition's
552 * page table, starting from the end.
553 * TODO: Add mechanism to let partition know of these
554 * memory regions
555 */
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700556 if (manifest_vm->partition.mem_regions[j]
557 .base_address == MANIFEST_INVALID_ADDRESS) {
Manish Pandey2145c212020-05-01 16:04:22 +0100558 total_alloc += size;
559 /* Don't go beyond half the VM's memory space */
560 if (total_alloc >
561 (manifest_vm->secondary.mem_size / 2)) {
562 dlog_error(
563 "Not enough space for memory-"
564 "region allocation");
565 ret = false;
566 goto out;
567 }
568
569 region_end = alloc_base;
570 region_begin = pa_subtract(alloc_base, size);
571 alloc_base = region_begin;
572
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700573 map_mode = manifest_vm->partition.mem_regions[j]
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800574 .attributes;
575 if (vm->el0_partition) {
576 map_mode |= MM_MODE_USER | MM_MODE_NG;
577 }
578
579 if (!vm_identity_map(vm_locked, region_begin,
580 region_end, map_mode,
581 ppool, NULL)) {
Manish Pandey2145c212020-05-01 16:04:22 +0100582 dlog_error(
583 "Unable to map secondary VM "
584 "memory-region.\n");
585 ret = false;
586 goto out;
587 }
588
Olivier Deprez86d87ae2021-08-19 14:27:46 +0200589 dlog_verbose(
Manish Pandey2145c212020-05-01 16:04:22 +0100590 " Memory region %#x - %#x allocated\n",
591 region_begin, region_end);
592 } else {
593 /*
594 * Identity map memory region for both case,
595 * VA(S-EL0) or IPA(S-EL1).
596 */
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700597 region_begin = pa_init(
598 manifest_vm->partition.mem_regions[j]
599 .base_address);
Manish Pandey2145c212020-05-01 16:04:22 +0100600 region_end = pa_add(region_begin, size);
601
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700602 map_mode = manifest_vm->partition.mem_regions[j]
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800603 .attributes;
604 if (vm->el0_partition) {
605 map_mode |= MM_MODE_USER | MM_MODE_NG;
606 }
607
608 if (!vm_identity_map(vm_locked, region_begin,
609 region_end, map_mode,
610 ppool, NULL)) {
Manish Pandey2145c212020-05-01 16:04:22 +0100611 dlog_error(
612 "Unable to map secondary VM "
613 "memory-region.\n");
614 ret = false;
615 goto out;
616 }
617 }
618
619 /* Deny the primary VM access to this memory */
620 if (!vm_unmap(primary_vm_locked, region_begin,
621 region_end, ppool)) {
622 dlog_error(
623 "Unable to unmap secondary VM memory-"
624 "region from primary VM.\n");
625 ret = false;
626 goto out;
627 }
628
629 j++;
630 }
631
632 /* Map device-regions */
633 j = 0;
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700634 while (j < manifest_vm->partition.dev_region_count) {
635 region_begin =
636 pa_init(manifest_vm->partition.dev_regions[j]
637 .base_address);
638 size = manifest_vm->partition.dev_regions[j]
639 .page_count *
Manish Pandey2145c212020-05-01 16:04:22 +0100640 PAGE_SIZE;
641 region_end = pa_add(region_begin, size);
642
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700643 map_mode = manifest_vm->partition.dev_regions[j]
644 .attributes;
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800645 if (vm->el0_partition) {
646 map_mode |= MM_MODE_USER | MM_MODE_NG;
647 }
648
649 if (!vm_identity_map(vm_locked, region_begin,
650 region_end, map_mode, ppool,
651 NULL)) {
Manish Pandey2145c212020-05-01 16:04:22 +0100652 dlog_error(
653 "Unable to map secondary VM "
654 "device-region.\n");
655 ret = false;
656 goto out;
657 }
658 /* Deny primary VM access to this region */
659 if (!vm_unmap(primary_vm_locked, region_begin,
660 region_end, ppool)) {
661 dlog_error(
662 "Unable to unmap secondary VM device-"
663 "region from primary VM.\n");
664 ret = false;
665 goto out;
666 }
667 j++;
668 }
669
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700670 secondary_entry = ipa_add(secondary_entry,
671 manifest_vm->partition.ep_offset);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100672 }
673
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800674 /*
675 * Map hypervisor into the VM's page table. The hypervisor pages will
676 * not be accessible from EL0 since it will not be marked for user
677 * access.
678 * TODO: Map only the exception vectors and data that exception vectors
679 * require and not the entire hypervisor. This helps with speculative
680 * side-channel attacks.
681 */
682 if (vm->el0_partition) {
683 CHECK(vm_identity_map(vm_locked, layout_text_begin(),
684 layout_text_end(), MM_MODE_X, ppool,
685 NULL));
686
687 CHECK(vm_identity_map(vm_locked, layout_rodata_begin(),
688 layout_rodata_end(), MM_MODE_R, ppool,
689 NULL));
690
691 CHECK(vm_identity_map(vm_locked, layout_data_begin(),
692 layout_data_end(), MM_MODE_R | MM_MODE_W,
693 ppool, NULL));
Maksims Svecovs134b8f92022-03-04 15:14:09 +0000694
695 CHECK(arch_stack_mm_init(mm_lock_ptable_unsafe(&vm->ptable),
696 ppool));
697
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800698 plat_console_mm_init(mm_lock_ptable_unsafe(&vm->ptable), ppool);
699 }
700
Manish Pandeyd34f8892020-06-19 17:41:07 +0100701 if (!load_common(stage1_locked, vm_locked, manifest_vm, ppool)) {
702 ret = false;
703 goto out;
704 }
705
Manish Pandey2145c212020-05-01 16:04:22 +0100706 dlog_info("Loaded with %u vCPUs, entry at %#x.\n",
707 manifest_vm->secondary.vcpu_count, pa_addr(mem_begin));
708
Andrew Scull72b43c02019-09-18 13:53:45 +0100709 vcpu = vm_get_vcpu(vm, 0);
Fuad Tabba50469e02020-06-30 15:14:28 +0100710
Max Shvetsov40108e72020-08-27 12:39:50 +0100711 vcpu_locked = vcpu_lock(vcpu);
Fuad Tabba50469e02020-06-30 15:14:28 +0100712 if (has_fdt) {
Max Shvetsov40108e72020-08-27 12:39:50 +0100713 vcpu_secondary_reset_and_start(vcpu_locked, secondary_entry,
Fuad Tabba50469e02020-06-30 15:14:28 +0100714 pa_addr(fdt_addr));
715 } else {
716 /*
717 * Without an FDT, secondary VMs expect the memory size to be
718 * passed in register x0, which is what
719 * vcpu_secondary_reset_and_start does in this case.
720 */
Max Shvetsov40108e72020-08-27 12:39:50 +0100721 vcpu_secondary_reset_and_start(vcpu_locked, secondary_entry,
722 mem_size);
Fuad Tabba50469e02020-06-30 15:14:28 +0100723 }
724
Max Shvetsov40108e72020-08-27 12:39:50 +0100725 vcpu_unlock(&vcpu_locked);
726
Andrew Scull3c257452019-11-26 13:32:50 +0000727 ret = true;
Andrew Scull72b43c02019-09-18 13:53:45 +0100728
Andrew Scull3c257452019-11-26 13:32:50 +0000729out:
730 vm_unlock(&vm_locked);
731
732 return ret;
Andrew Scull72b43c02019-09-18 13:53:45 +0100733}
734
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100735/**
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100736 * Try to find a memory range of the given size within the given ranges, and
737 * remove it from them. Return true on success, or false if no large enough
738 * contiguous range is found.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100739 */
Hong-Seok Kim09648362019-05-23 15:47:11 +0900740static bool carve_out_mem_range(struct mem_range *mem_ranges,
741 size_t mem_ranges_count, uint64_t size_to_find,
742 paddr_t *found_begin, paddr_t *found_end)
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100743{
744 size_t i;
745
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000746 /*
747 * TODO(b/116191358): Consider being cleverer about how we pack VMs
748 * together, with a non-greedy algorithm.
749 */
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100750 for (i = 0; i < mem_ranges_count; ++i) {
751 if (size_to_find <=
Andrew Walbran2cb43392019-04-17 12:52:45 +0100752 pa_difference(mem_ranges[i].begin, mem_ranges[i].end)) {
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +0100753 /*
754 * This range is big enough, take some of it from the
755 * end and reduce its size accordingly.
756 */
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100757 *found_end = mem_ranges[i].end;
758 *found_begin = pa_init(pa_addr(mem_ranges[i].end) -
759 size_to_find);
760 mem_ranges[i].end = *found_begin;
761 return true;
762 }
763 }
764 return false;
765}
766
767/**
768 * Given arrays of memory ranges before and after memory was removed for
769 * secondary VMs, add the difference to the reserved ranges of the given update.
770 * Return true on success, or false if there would be more than MAX_MEM_RANGES
771 * reserved ranges after adding the new ones.
772 * `before` and `after` must be arrays of exactly `mem_ranges_count` elements.
773 */
Hong-Seok Kim09648362019-05-23 15:47:11 +0900774static bool update_reserved_ranges(struct boot_params_update *update,
775 const struct mem_range *before,
776 const struct mem_range *after,
777 size_t mem_ranges_count)
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100778{
779 size_t i;
780
781 for (i = 0; i < mem_ranges_count; ++i) {
782 if (pa_addr(after[i].begin) > pa_addr(before[i].begin)) {
783 if (update->reserved_ranges_count >= MAX_MEM_RANGES) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000784 dlog_error(
785 "Too many reserved ranges after "
786 "loading secondary VMs.\n");
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100787 return false;
788 }
789 update->reserved_ranges[update->reserved_ranges_count]
790 .begin = before[i].begin;
791 update->reserved_ranges[update->reserved_ranges_count]
792 .end = after[i].begin;
793 update->reserved_ranges_count++;
794 }
795 if (pa_addr(after[i].end) < pa_addr(before[i].end)) {
796 if (update->reserved_ranges_count >= MAX_MEM_RANGES) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000797 dlog_error(
798 "Too many reserved ranges after "
799 "loading secondary VMs.\n");
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100800 return false;
801 }
802 update->reserved_ranges[update->reserved_ranges_count]
803 .begin = after[i].end;
804 update->reserved_ranges[update->reserved_ranges_count]
805 .end = before[i].end;
806 update->reserved_ranges_count++;
807 }
808 }
809
810 return true;
811}
812
Olivier Deprez112d2b52020-09-30 07:39:23 +0200813static bool init_other_world_vm(struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100814{
Olivier Deprez96a2a262020-06-11 17:21:38 +0200815 struct vm *other_world_vm;
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100816 size_t i;
Andrew Scull72b43c02019-09-18 13:53:45 +0100817
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100818 /*
Olivier Deprez96a2a262020-06-11 17:21:38 +0200819 * Initialise the dummy VM which represents the opposite world:
820 * -TrustZone (or the SPMC) when running the Hypervisor
821 * -the Hypervisor when running TZ/SPMC
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100822 */
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800823 other_world_vm = vm_init(HF_OTHER_WORLD_ID, MAX_CPUS, ppool, false);
Olivier Deprez96a2a262020-06-11 17:21:38 +0200824 CHECK(other_world_vm != NULL);
825
826 for (i = 0; i < MAX_CPUS; i++) {
827 struct vcpu *vcpu = vm_get_vcpu(other_world_vm, i);
828 struct cpu *cpu = cpu_find_index(i);
829
830 vcpu->cpu = cpu;
831 }
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100832
Olivier Deprez112d2b52020-09-30 07:39:23 +0200833 return arch_other_world_vm_init(other_world_vm, ppool);
834}
835
836/*
837 * Loads alls VMs from the manifest.
838 */
839bool load_vms(struct mm_stage1_locked stage1_locked,
840 const struct manifest *manifest, const struct memiter *cpio,
841 const struct boot_params *params,
842 struct boot_params_update *update, struct mpool *ppool)
843{
844 struct vm *primary;
845 struct mem_range mem_ranges_available[MAX_MEM_RANGES];
846 struct vm_locked primary_vm_locked;
847 size_t i;
848 bool success = true;
849
Andrew Walbranf8716782020-10-29 17:07:07 +0000850 /**
851 * Only try to load the primary VM if it is supposed to be in this
852 * world.
853 */
854 if (vm_id_is_current_world(HF_PRIMARY_VM_ID)) {
855 if (!load_primary(stage1_locked,
856 &manifest->vm[HF_PRIMARY_VM_INDEX], cpio,
857 params, ppool)) {
858 dlog_error("Unable to load primary VM.\n");
859 return false;
860 }
Olivier Deprez112d2b52020-09-30 07:39:23 +0200861 }
862
863 if (!init_other_world_vm(ppool)) {
864 return false;
865 }
866
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100867 static_assert(
868 sizeof(mem_ranges_available) == sizeof(params->mem_ranges),
869 "mem_range arrays must be the same size for memcpy.");
870 static_assert(sizeof(mem_ranges_available) < 500,
871 "This will use too much stack, either make "
872 "MAX_MEM_RANGES smaller or change this.");
Andrew Sculla1aa2ba2019-04-05 11:49:02 +0100873 memcpy_s(mem_ranges_available, sizeof(mem_ranges_available),
874 params->mem_ranges, sizeof(params->mem_ranges));
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100875
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100876 /* Round the last addresses down to the page size. */
877 for (i = 0; i < params->mem_ranges_count; ++i) {
Alfredo Mazzinghieb1997c2019-02-07 18:00:01 +0000878 mem_ranges_available[i].end = pa_init(align_down(
879 pa_addr(mem_ranges_available[i].end), PAGE_SIZE));
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100880 }
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100881
Andrew Scull3c257452019-11-26 13:32:50 +0000882 primary = vm_find(HF_PRIMARY_VM_ID);
883 primary_vm_locked = vm_lock(primary);
884
David Brazdil0251b942019-09-10 15:59:50 +0100885 for (i = 0; i < manifest->vm_count; ++i) {
David Brazdil0dbb41f2019-09-09 18:03:35 +0100886 const struct manifest_vm *manifest_vm = &manifest->vm[i];
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100887 ffa_vm_id_t vm_id = HF_VM_ID_OFFSET + i;
David Brazdil7a462ec2019-08-15 12:27:47 +0100888 uint64_t mem_size;
Andrew Scull80871322018-08-06 12:04:09 +0100889 paddr_t secondary_mem_begin;
890 paddr_t secondary_mem_end;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100891
David Brazdil7a462ec2019-08-15 12:27:47 +0100892 if (vm_id == HF_PRIMARY_VM_ID) {
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100893 continue;
894 }
895
Olivier Deprez2a8ee342020-08-03 15:10:44 +0200896 dlog_info("Loading VM id %#x: %s.\n", vm_id,
Andrew Walbran17eebf92020-02-05 16:35:49 +0000897 manifest_vm->debug_name);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100898
David Brazdil7a462ec2019-08-15 12:27:47 +0100899 mem_size = align_up(manifest_vm->secondary.mem_size, PAGE_SIZE);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100900
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -0700901 if (manifest_vm->is_ffa_partition &&
902 !manifest->vm[i].is_hyp_loaded) {
Olivier Deprez62d99e32020-01-09 15:58:07 +0100903 secondary_mem_begin =
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700904 pa_init(manifest_vm->partition.load_addr);
905 secondary_mem_end = pa_init(
906 manifest_vm->partition.load_addr + mem_size);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100907 } else if (!carve_out_mem_range(mem_ranges_available,
908 params->mem_ranges_count,
909 mem_size, &secondary_mem_begin,
910 &secondary_mem_end)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000911 dlog_error("Not enough memory (%u bytes).\n", mem_size);
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100912 continue;
913 }
Andrew Scull80871322018-08-06 12:04:09 +0100914
Manish Pandey2145c212020-05-01 16:04:22 +0100915 if (!load_secondary(stage1_locked, primary_vm_locked,
916 secondary_mem_begin, secondary_mem_end,
917 manifest_vm, cpio, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000918 dlog_error("Unable to load VM.\n");
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100919 continue;
920 }
921
922 /* Deny the primary VM access to this memory. */
Andrew Scull3c257452019-11-26 13:32:50 +0000923 if (!vm_unmap(primary_vm_locked, secondary_mem_begin,
924 secondary_mem_end, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000925 dlog_error(
926 "Unable to unmap secondary VM from primary "
927 "VM.\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000928 success = false;
929 break;
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100930 }
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100931 }
932
Andrew Scull3c257452019-11-26 13:32:50 +0000933 vm_unlock(&primary_vm_locked);
934
935 if (!success) {
936 return false;
937 }
938
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +0100939 /*
940 * Add newly reserved areas to update params by looking at the
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100941 * difference between the available ranges from the original params and
942 * the updated mem_ranges_available. We assume that the number and order
943 * of available ranges is the same, i.e. we don't remove any ranges
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +0100944 * above only make them smaller.
945 */
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100946 return update_reserved_ranges(update, params->mem_ranges,
947 mem_ranges_available,
948 params->mem_ranges_count);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100949}