blob: 28e886c47b59a22bb2771208f2139ad0248a24ce [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/load.h"
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010010
11#include <stdbool.h>
12
Olivier Deprez112d2b52020-09-30 07:39:23 +020013#include "hf/arch/other_world.h"
J-Alvesa9c7cba2021-08-25 16:26:11 +010014#include "hf/arch/plat/ffa.h"
Fuad Tabba77a4b012019-11-15 12:13:08 +000015#include "hf/arch/vm.h"
16
Andrew Scull18c78fc2018-08-20 12:57:41 +010017#include "hf/api.h"
Andrew Walbran34ce72e2018-09-13 16:47:44 +010018#include "hf/boot_params.h"
Andrew Scull72b43c02019-09-18 13:53:45 +010019#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010020#include "hf/dlog.h"
Fuad Tabba50469e02020-06-30 15:14:28 +010021#include "hf/fdt_patch.h"
Andrew Scull5991ec92018-10-08 14:55:02 +010022#include "hf/layout.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010023#include "hf/memiter.h"
24#include "hf/mm.h"
Andrew Walbran48699362019-05-20 14:38:00 +010025#include "hf/plat/console.h"
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -050026#include "hf/plat/interrupts.h"
Andrew Scullb1a6d0d2020-01-29 11:25:12 +000027#include "hf/plat/iommu.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010028#include "hf/static_assert.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010029#include "hf/std.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010030#include "hf/vm.h"
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010031
Andrew Scull19503262018-09-20 14:48:39 +010032#include "vmapi/hf/call.h"
Manish Pandeyd34f8892020-06-19 17:41:07 +010033#include "vmapi/hf/ffa.h"
Andrew Scull19503262018-09-20 14:48:39 +010034
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010035/**
36 * Copies data to an unmapped location by mapping it for write, copying the
37 * data, then unmapping it.
Andrew Sculld9225b32018-11-19 16:12:41 +000038 *
39 * The data is written so that it is available to all cores with the cache
40 * disabled. When switching to the partitions, the caching is initially disabled
41 * so the data must be available without the cache.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010042 */
Andrew Scull3c0a90a2019-07-01 11:55:53 +010043static bool copy_to_unmapped(struct mm_stage1_locked stage1_locked, paddr_t to,
David Brazdil7a462ec2019-08-15 12:27:47 +010044 struct memiter *from_it, struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010045{
David Brazdil7a462ec2019-08-15 12:27:47 +010046 const void *from = memiter_base(from_it);
47 size_t size = memiter_size(from_it);
Andrew Scull80871322018-08-06 12:04:09 +010048 paddr_t to_end = pa_add(to, size);
49 void *ptr;
Andrew Scull265ada92018-07-30 15:19:01 +010050
Andrew Scull3c0a90a2019-07-01 11:55:53 +010051 ptr = mm_identity_map(stage1_locked, to, to_end, MM_MODE_W, ppool);
Andrew Scull80871322018-08-06 12:04:09 +010052 if (!ptr) {
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010053 return false;
54 }
55
Andrew Sculla1aa2ba2019-04-05 11:49:02 +010056 memcpy_s(ptr, size, from, size);
Andrew Scullc059fbe2019-09-12 12:58:40 +010057 arch_mm_flush_dcache(ptr, size);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010058
Andrew Scull72b43c02019-09-18 13:53:45 +010059 CHECK(mm_unmap(stage1_locked, to, to_end, ppool));
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010060
61 return true;
62}
63
Fuad Tabba50469e02020-06-30 15:14:28 +010064/**
65 * Loads the secondary VM's kernel.
66 * Stores the kernel size in kernel_size (if kernel_size is not NULL).
67 * Returns false if it cannot load the kernel.
68 */
Andrew Scull72b43c02019-09-18 13:53:45 +010069static bool load_kernel(struct mm_stage1_locked stage1_locked, paddr_t begin,
70 paddr_t end, const struct manifest_vm *manifest_vm,
Fuad Tabba50469e02020-06-30 15:14:28 +010071 const struct memiter *cpio, struct mpool *ppool,
72 size_t *kernel_size)
Andrew Scull72b43c02019-09-18 13:53:45 +010073{
Andrew Scull72b43c02019-09-18 13:53:45 +010074 struct memiter kernel;
Fuad Tabba50469e02020-06-30 15:14:28 +010075 size_t size;
Andrew Scull72b43c02019-09-18 13:53:45 +010076
David Brazdil136f2942019-09-23 14:11:03 +010077 if (!cpio_get_file(cpio, &manifest_vm->kernel_filename, &kernel)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +000078 dlog_error("Could not find kernel file \"%s\".\n",
79 string_data(&manifest_vm->kernel_filename));
Andrew Scull72b43c02019-09-18 13:53:45 +010080 return false;
81 }
82
Fuad Tabba50469e02020-06-30 15:14:28 +010083 size = memiter_size(&kernel);
84 if (pa_difference(begin, end) < size) {
Andrew Walbran17eebf92020-02-05 16:35:49 +000085 dlog_error("Kernel is larger than available memory.\n");
Andrew Scull72b43c02019-09-18 13:53:45 +010086 return false;
87 }
88
89 if (!copy_to_unmapped(stage1_locked, begin, &kernel, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +000090 dlog_error("Unable to copy kernel.\n");
Andrew Scull72b43c02019-09-18 13:53:45 +010091 return false;
92 }
93
Fuad Tabba50469e02020-06-30 15:14:28 +010094 if (kernel_size) {
95 *kernel_size = size;
96 }
97
Andrew Scull72b43c02019-09-18 13:53:45 +010098 return true;
99}
100
Manish Pandeyd34f8892020-06-19 17:41:07 +0100101/*
102 * Link RX/TX buffers provided in partition manifest to mailbox
103 */
104static bool link_rxtx_to_mailbox(struct mm_stage1_locked stage1_locked,
105 struct vm_locked vm_locked, struct rx_tx rxtx,
106 struct mpool *ppool)
107{
108 struct ffa_value ret;
109 ipaddr_t send;
110 ipaddr_t recv;
111 uint32_t page_count;
112
113 send = ipa_init(rxtx.tx_buffer->base_address);
114 recv = ipa_init(rxtx.rx_buffer->base_address);
115 page_count = rxtx.tx_buffer->page_count;
116
117 ret = api_vm_configure_pages(stage1_locked, vm_locked, send, recv,
118 page_count, ppool);
119 if (ret.func != FFA_SUCCESS_32) {
120 return false;
121 }
122
123 dlog_verbose(" mailbox: send = %#x, recv = %#x\n",
124 vm_locked.vm->mailbox.send, vm_locked.vm->mailbox.recv);
125
126 return true;
127}
128
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500129static void infer_interrupt(struct interrupt interrupt,
130 struct interrupt_descriptor *int_desc)
131{
132 uint32_t attr = interrupt.attributes;
133
134 interrupt_desc_set_id(int_desc, interrupt.id);
135 interrupt_desc_set_priority(int_desc,
136 (attr >> INT_DESC_PRIORITY_SHIFT) & 0xff);
137
138 /* Refer to the comments in interrupt_descriptor struct definition. */
139 interrupt_desc_set_type_config_sec_state(
140 int_desc,
141 (((attr >> INT_DESC_TYPE_SHIFT) & 0x3) << 2) |
142 (((attr >> INT_DESC_CONFIG_SHIFT) & 0x1) << 1) |
143 ((attr >> INT_DESC_SEC_STATE_SHIFT) & 0x1));
144
145 interrupt_desc_set_valid(int_desc, true);
146}
147
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100148/**
Andrew Scullae9962e2019-10-03 16:51:16 +0100149 * Performs VM loading activities that are common between the primary and
150 * secondaries.
151 */
Manish Pandeyd34f8892020-06-19 17:41:07 +0100152static bool load_common(struct mm_stage1_locked stage1_locked,
153 struct vm_locked vm_locked,
154 const struct manifest_vm *manifest_vm,
155 struct mpool *ppool)
Andrew Scullae9962e2019-10-03 16:51:16 +0100156{
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500157 struct device_region dev_region;
158 struct interrupt interrupt;
159 uint32_t k = 0;
160
Manish Pandeyd34f8892020-06-19 17:41:07 +0100161 vm_locked.vm->smc_whitelist = manifest_vm->smc_whitelist;
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700162 vm_locked.vm->uuid = manifest_vm->partition.uuid;
Andrew Scullae9962e2019-10-03 16:51:16 +0100163
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500164 /* Populate the interrupt descriptor for current VM. */
165 for (uint8_t i = 0; i < SP_MAX_DEVICE_REGIONS; i++) {
166 dev_region = manifest_vm->partition.dev_regions[i];
167
168 CHECK(dev_region.interrupt_count <=
169 SP_MAX_INTERRUPTS_PER_DEVICE);
170
171 for (uint8_t j = 0; j < dev_region.interrupt_count; j++) {
172 struct interrupt_descriptor int_desc;
173
174 interrupt = dev_region.interrupts[j];
175 infer_interrupt(interrupt, &int_desc);
176 vm_locked.vm->interrupt_desc[k] = int_desc;
177
Madhukar Pappireddy72454a12021-08-03 12:21:46 -0500178 /*
179 * Configure the physical interrupts allocated for this
180 * VM in its partition manifest.
181 */
182 plat_interrupts_configure_interrupt(int_desc);
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500183 k++;
184 CHECK(k <= VM_MANIFEST_MAX_INTERRUPTS);
185 }
186 }
187 dlog_verbose("VM has %d physical interrupts defined in manifest.\n", k);
188
Manish Pandeyd34f8892020-06-19 17:41:07 +0100189 if (manifest_vm->is_ffa_partition) {
J-Alvesa9c7cba2021-08-25 16:26:11 +0100190 struct ffa_value bitmap_create_res;
191
Manish Pandeyd34f8892020-06-19 17:41:07 +0100192 /* Link rxtx buffers to mailbox */
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700193 if (manifest_vm->partition.rxtx.available) {
Manish Pandeyd34f8892020-06-19 17:41:07 +0100194 if (!link_rxtx_to_mailbox(stage1_locked, vm_locked,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700195 manifest_vm->partition.rxtx,
Manish Pandeyd34f8892020-06-19 17:41:07 +0100196 ppool)) {
197 dlog_error(
198 "Unable to Link RX/TX buffer with "
199 "mailbox.\n");
200 return false;
201 }
202 }
J-Alvesb37fd082020-10-22 12:29:21 +0100203
Maksims Svecovsb596eab2021-04-27 00:52:27 +0100204 vm_locked.vm->messaging_method =
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700205 manifest_vm->partition.messaging_method;
Manish Pandeyf3be6392020-09-24 17:26:09 +0100206
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700207 vm_locked.vm->managed_exit =
208 manifest_vm->partition.managed_exit;
Maksims Svecovs9ddf86a2021-05-06 17:17:21 +0100209
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700210 vm_locked.vm->boot_order = manifest_vm->partition.boot_order;
211
J-Alvesb37fd082020-10-22 12:29:21 +0100212 /* Updating boot list according to boot_order */
213 vm_update_boot(vm_locked.vm);
J-Alvesa0f317d2021-06-09 13:31:59 +0100214
215 /* TODO: Enable in accordance to VM's manifest. */
216 vm_locked.vm->notifications.enabled = true;
J-Alvesa9c7cba2021-08-25 16:26:11 +0100217
218 /* TODO: check if notifications is enabled for the given vm */
219 if (plat_ffa_notifications_bitmap_create_call(
220 vm_locked.vm->id, vm_locked.vm->vcpu_count,
221 &bitmap_create_res)) {
222 if (bitmap_create_res.func == FFA_ERROR_32) {
223 dlog_verbose(
224 "Failed to create notifications bitmap "
225 "to VM: %#x; error: %#x.\n",
226 vm_locked.vm->id,
227 ffa_error_code(bitmap_create_res));
228 return false;
229 }
230 }
Manish Pandeyd34f8892020-06-19 17:41:07 +0100231 }
J-Alvesb37fd082020-10-22 12:29:21 +0100232
Fuad Tabba56970712020-01-10 11:20:09 +0000233 /* Initialize architecture-specific features. */
Manish Pandeyd34f8892020-06-19 17:41:07 +0100234 arch_vm_features_set(vm_locked.vm);
Fuad Tabba77a4b012019-11-15 12:13:08 +0000235
Madhukar Pappireddy54680c72020-10-23 15:02:38 -0500236 if (!plat_iommu_attach_peripheral(stage1_locked, vm_locked, manifest_vm,
237 ppool)) {
238 dlog_error("Unable to attach upstream peripheral device\n");
239 return false;
240 }
241
Andrew Scullae9962e2019-10-03 16:51:16 +0100242 return true;
243}
244
245/**
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100246 * Loads the primary VM.
247 */
Andrew Scull72b43c02019-09-18 13:53:45 +0100248static bool load_primary(struct mm_stage1_locked stage1_locked,
Andrew Scullae9962e2019-10-03 16:51:16 +0100249 const struct manifest_vm *manifest_vm,
Andrew Scullb5f49e02019-10-02 13:20:47 +0100250 const struct memiter *cpio,
251 const struct boot_params *params, struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100252{
Olivier Deprez62d99e32020-01-09 15:58:07 +0100253 paddr_t primary_begin;
254 ipaddr_t primary_entry;
David Brazdile6f83222019-09-23 14:47:37 +0100255 struct vm *vm;
Andrew Scull3c257452019-11-26 13:32:50 +0000256 struct vm_locked vm_locked;
David Brazdile6f83222019-09-23 14:47:37 +0100257 struct vcpu_locked vcpu_locked;
Andrew Scullb5f49e02019-10-02 13:20:47 +0100258 size_t i;
Andrew Scull3c257452019-11-26 13:32:50 +0000259 bool ret;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100260
Olivier Deprez62d99e32020-01-09 15:58:07 +0100261 if (manifest_vm->is_ffa_partition) {
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700262 primary_begin = pa_init(manifest_vm->partition.load_addr);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100263 primary_entry = ipa_add(ipa_from_pa(primary_begin),
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700264 manifest_vm->partition.ep_offset);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100265 } else {
266 primary_begin =
267 (manifest_vm->primary.boot_address ==
268 MANIFEST_INVALID_ADDRESS)
269 ? layout_primary_begin()
270 : pa_init(manifest_vm->primary.boot_address);
271 primary_entry = ipa_from_pa(primary_begin);
272 }
273
David Brazdil080ee312020-02-25 15:30:30 -0800274 paddr_t primary_end = pa_add(primary_begin, RSIZE_MAX);
Andrew Scull72b43c02019-09-18 13:53:45 +0100275
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800276 /* Primary VM must be a VM */
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700277 CHECK(manifest_vm->partition.run_time_el == EL1);
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800278
Olivier Deprez62d99e32020-01-09 15:58:07 +0100279 /*
280 * Load the kernel if a filename is specified in the VM manifest.
281 * For an FF-A partition, kernel_filename is undefined indicating
282 * the partition package has already been loaded prior to Hafnium
283 * booting.
284 */
285 if (!string_is_empty(&manifest_vm->kernel_filename)) {
286 if (!load_kernel(stage1_locked, primary_begin, primary_end,
Fuad Tabba50469e02020-06-30 15:14:28 +0100287 manifest_vm, cpio, ppool, NULL)) {
Olivier Deprez62d99e32020-01-09 15:58:07 +0100288 dlog_error("Unable to load primary kernel.\n");
289 return false;
290 }
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100291 }
292
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800293 if (!vm_init_next(MAX_CPUS, ppool, &vm, false)) {
Andrew Walbran7586e042020-02-18 18:19:26 +0000294 dlog_error("Unable to initialise primary VM.\n");
David Brazdile6f83222019-09-23 14:47:37 +0100295 return false;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100296 }
297
David Brazdile6f83222019-09-23 14:47:37 +0100298 if (vm->id != HF_PRIMARY_VM_ID) {
Andrew Walbran7586e042020-02-18 18:19:26 +0000299 dlog_error("Primary VM was not given correct ID.\n");
David Brazdile6f83222019-09-23 14:47:37 +0100300 return false;
301 }
302
Andrew Scull3c257452019-11-26 13:32:50 +0000303 vm_locked = vm_lock(vm);
304
Andrew Scull48929fd2020-01-28 10:39:10 +0000305 if (params->device_mem_ranges_count == 0) {
306 /*
307 * Map 1TB of address space as device memory to, most likely,
308 * make all devices available to the primary VM.
309 *
310 * TODO: remove this once all targets provide valid ranges.
311 */
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800312 dlog_warning(
313 "Device memory not provided, defaulting to 1 TB.\n");
Andrew Scull48929fd2020-01-28 10:39:10 +0000314
315 if (!vm_identity_map(
316 vm_locked, pa_init(0),
317 pa_init(UINT64_C(1024) * 1024 * 1024 * 1024),
318 MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool, NULL)) {
319 dlog_error(
320 "Unable to initialise address space for "
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800321 "primary VM.\n");
Andrew Scull48929fd2020-01-28 10:39:10 +0000322 ret = false;
323 goto out;
324 }
David Brazdile6f83222019-09-23 14:47:37 +0100325 }
326
Andrew Scullb5f49e02019-10-02 13:20:47 +0100327 /* Map normal memory as such to permit caching, execution, etc. */
328 for (i = 0; i < params->mem_ranges_count; ++i) {
Andrew Scull3c257452019-11-26 13:32:50 +0000329 if (!vm_identity_map(vm_locked, params->mem_ranges[i].begin,
330 params->mem_ranges[i].end,
331 MM_MODE_R | MM_MODE_W | MM_MODE_X, ppool,
332 NULL)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000333 dlog_error(
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800334 "Unable to initialise memory for primary "
335 "VM.\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000336 ret = false;
337 goto out;
Andrew Scullb5f49e02019-10-02 13:20:47 +0100338 }
339 }
340
Andrew Scull48929fd2020-01-28 10:39:10 +0000341 /* Map device memory as such to prevent execution, speculation etc. */
342 for (i = 0; i < params->device_mem_ranges_count; ++i) {
343 if (!vm_identity_map(
344 vm_locked, params->device_mem_ranges[i].begin,
345 params->device_mem_ranges[i].end,
346 MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool, NULL)) {
347 dlog("Unable to initialise device memory for primary "
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800348 "VM.\n");
Andrew Scull48929fd2020-01-28 10:39:10 +0000349 ret = false;
350 goto out;
351 }
352 }
353
Manish Pandeyd34f8892020-06-19 17:41:07 +0100354 if (!load_common(stage1_locked, vm_locked, manifest_vm, ppool)) {
355 ret = false;
356 goto out;
357 }
358
Andrew Scull3c257452019-11-26 13:32:50 +0000359 if (!vm_unmap_hypervisor(vm_locked, ppool)) {
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800360 dlog_error("Unable to unmap hypervisor from primary VM.\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000361 ret = false;
362 goto out;
David Brazdile6f83222019-09-23 14:47:37 +0100363 }
364
Andrew Scullb1a6d0d2020-01-29 11:25:12 +0000365 if (!plat_iommu_unmap_iommus(vm_locked, ppool)) {
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800366 dlog_error("Unable to unmap IOMMUs from primary VM.\n");
Andrew Scullb1a6d0d2020-01-29 11:25:12 +0000367 ret = false;
368 goto out;
369 }
370
Andrew Walbran7586e042020-02-18 18:19:26 +0000371 dlog_info("Loaded primary VM with %u vCPUs, entry at %#x.\n",
372 vm->vcpu_count, pa_addr(primary_begin));
373
Olivier Deprezb9adff42021-02-01 12:14:05 +0100374 /* Mark the primary to be the first booted VM */
375 vm_update_boot(vm);
376
David Brazdile6f83222019-09-23 14:47:37 +0100377 vcpu_locked = vcpu_lock(vm_get_vcpu(vm, 0));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100378 vcpu_on(vcpu_locked, primary_entry, params->kernel_arg);
David Brazdile6f83222019-09-23 14:47:37 +0100379 vcpu_unlock(&vcpu_locked);
Andrew Scull3c257452019-11-26 13:32:50 +0000380 ret = true;
David Brazdile6f83222019-09-23 14:47:37 +0100381
Andrew Scull3c257452019-11-26 13:32:50 +0000382out:
383 vm_unlock(&vm_locked);
384
385 return ret;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100386}
387
Fuad Tabba50469e02020-06-30 15:14:28 +0100388/**
389 * Loads the secondary VM's FDT.
390 * Stores the total allocated size for the FDT in fdt_allocated_size (if
391 * fdt_allocated_size is not NULL). The allocated size includes additional space
392 * for potential patching.
393 */
394static bool load_secondary_fdt(struct mm_stage1_locked stage1_locked,
395 paddr_t end, size_t fdt_max_size,
396 const struct manifest_vm *manifest_vm,
397 const struct memiter *cpio, struct mpool *ppool,
398 paddr_t *fdt_addr, size_t *fdt_allocated_size)
399{
400 struct memiter fdt;
401 size_t allocated_size;
402
403 CHECK(!string_is_empty(&manifest_vm->secondary.fdt_filename));
404
405 if (!cpio_get_file(cpio, &manifest_vm->secondary.fdt_filename, &fdt)) {
406 dlog_error("Cannot open the secondary VM's FDT.\n");
407 return false;
408 }
409
410 /*
411 * Ensure the FDT has one additional page at the end for patching, and
412 * and align it to the page boundary.
413 */
414 allocated_size = align_up(memiter_size(&fdt), PAGE_SIZE) + PAGE_SIZE;
415
416 if (allocated_size > fdt_max_size) {
417 dlog_error(
418 "FDT allocated space (%u) is more than the specified "
419 "maximum to use (%u).\n",
420 allocated_size, fdt_max_size);
421 return false;
422 }
423
424 /* Load the FDT to the end of the VM's allocated memory space. */
425 *fdt_addr = pa_init(pa_addr(pa_sub(end, allocated_size)));
426
427 dlog_info("Loading secondary FDT of allocated size %u at 0x%x.\n",
428 allocated_size, pa_addr(*fdt_addr));
429
430 if (!copy_to_unmapped(stage1_locked, *fdt_addr, &fdt, ppool)) {
431 dlog_error("Unable to copy FDT.\n");
432 return false;
433 }
434
435 if (fdt_allocated_size) {
436 *fdt_allocated_size = allocated_size;
437 }
438
439 return true;
440}
441
Andrew Scull72b43c02019-09-18 13:53:45 +0100442/*
443 * Loads a secondary VM.
444 */
445static bool load_secondary(struct mm_stage1_locked stage1_locked,
Manish Pandey2145c212020-05-01 16:04:22 +0100446 struct vm_locked primary_vm_locked,
Andrew Scull72b43c02019-09-18 13:53:45 +0100447 paddr_t mem_begin, paddr_t mem_end,
448 const struct manifest_vm *manifest_vm,
449 const struct memiter *cpio, struct mpool *ppool)
450{
451 struct vm *vm;
Andrew Scull3c257452019-11-26 13:32:50 +0000452 struct vm_locked vm_locked;
Max Shvetsov40108e72020-08-27 12:39:50 +0100453 struct vcpu_locked vcpu_locked;
Andrew Scull72b43c02019-09-18 13:53:45 +0100454 struct vcpu *vcpu;
455 ipaddr_t secondary_entry;
Andrew Scull3c257452019-11-26 13:32:50 +0000456 bool ret;
Fuad Tabba50469e02020-06-30 15:14:28 +0100457 paddr_t fdt_addr;
458 bool has_fdt;
459 size_t kernel_size = 0;
460 const size_t mem_size = pa_difference(mem_begin, mem_end);
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800461 uint32_t map_mode;
Andrew Scull72b43c02019-09-18 13:53:45 +0100462
Olivier Deprez62d99e32020-01-09 15:58:07 +0100463 /*
464 * Load the kernel if a filename is specified in the VM manifest.
465 * For an FF-A partition, kernel_filename is undefined indicating
466 * the partition package has already been loaded prior to Hafnium
467 * booting.
468 */
469 if (!string_is_empty(&manifest_vm->kernel_filename)) {
470 if (!load_kernel(stage1_locked, mem_begin, mem_end, manifest_vm,
Fuad Tabba50469e02020-06-30 15:14:28 +0100471 cpio, ppool, &kernel_size)) {
Olivier Deprez62d99e32020-01-09 15:58:07 +0100472 dlog_error("Unable to load kernel.\n");
473 return false;
474 }
Andrew Scull72b43c02019-09-18 13:53:45 +0100475 }
476
Fuad Tabba50469e02020-06-30 15:14:28 +0100477 has_fdt = !string_is_empty(&manifest_vm->secondary.fdt_filename);
478 if (has_fdt) {
479 /*
480 * Ensure that the FDT does not overwrite the kernel or overlap
481 * its page, for the FDT to start at a page boundary.
482 */
483 const size_t fdt_max_size =
484 mem_size - align_up(kernel_size, PAGE_SIZE);
485
486 size_t fdt_allocated_size;
487
488 if (!load_secondary_fdt(stage1_locked, mem_end, fdt_max_size,
489 manifest_vm, cpio, ppool, &fdt_addr,
490 &fdt_allocated_size)) {
491 dlog_error("Unable to load FDT.\n");
492 return false;
493 }
494
495 if (!fdt_patch_mem(stage1_locked, fdt_addr, fdt_allocated_size,
496 mem_begin, mem_end, ppool)) {
497 dlog_error("Unable to patch FDT.\n");
498 return false;
499 }
500 }
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800501 /*
502 * An S-EL0 partition must contain only 1 vCPU (UP migratable) per the
503 * FF-A 1.0 spec.
504 */
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700505 CHECK(manifest_vm->partition.run_time_el != S_EL0 ||
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800506 manifest_vm->secondary.vcpu_count == 1);
Fuad Tabba50469e02020-06-30 15:14:28 +0100507
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800508 if (!vm_init_next(manifest_vm->secondary.vcpu_count, ppool, &vm,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700509 (manifest_vm->partition.run_time_el == S_EL0))) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000510 dlog_error("Unable to initialise VM.\n");
Andrew Scull72b43c02019-09-18 13:53:45 +0100511 return false;
512 }
513
Andrew Scull3c257452019-11-26 13:32:50 +0000514 vm_locked = vm_lock(vm);
515
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800516 /*
Raghu Krishnamurthy2b801692021-07-18 17:46:43 -0700517 * Grant the VM access to the memory. For VM's we mark all memory in
518 * stage-2 tables as RWX and the VM can control permissions using
519 * stage-1 translations. For S-EL0 partitions, hafnium maps the entire
520 * region of memory for the partition as RX. The partition is then
521 * expected to perform its owns relocations and call the FFA_MEM_PERM_*
522 * API's to change permissions on its image layout.
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800523 */
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800524 if (vm->el0_partition) {
Raghu Krishnamurthy2b801692021-07-18 17:46:43 -0700525 map_mode = MM_MODE_R | MM_MODE_X | MM_MODE_USER | MM_MODE_NG;
526 } else {
527 map_mode = MM_MODE_R | MM_MODE_W | MM_MODE_X;
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800528 }
Raghu Krishnamurthy2b801692021-07-18 17:46:43 -0700529
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800530 if (!vm_identity_map(vm_locked, mem_begin, mem_end, map_mode, ppool,
Andrew Scull3c257452019-11-26 13:32:50 +0000531 &secondary_entry)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000532 dlog_error("Unable to initialise memory.\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000533 ret = false;
534 goto out;
Andrew Scull72b43c02019-09-18 13:53:45 +0100535 }
536
Olivier Deprez62d99e32020-01-09 15:58:07 +0100537 if (manifest_vm->is_ffa_partition) {
Manish Pandey2145c212020-05-01 16:04:22 +0100538 int j = 0;
539 paddr_t region_begin;
540 paddr_t region_end;
541 paddr_t alloc_base = mem_end;
542 size_t size;
543 size_t total_alloc = 0;
544
545 /* Map memory-regions */
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700546 while (j < manifest_vm->partition.mem_region_count) {
547 size = manifest_vm->partition.mem_regions[j]
548 .page_count *
Manish Pandey2145c212020-05-01 16:04:22 +0100549 PAGE_SIZE;
550 /*
551 * For memory-regions without base-address, memory
552 * should be allocated inside partition's page table.
553 * Start allocating memory regions in partition's
554 * page table, starting from the end.
555 * TODO: Add mechanism to let partition know of these
556 * memory regions
557 */
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700558 if (manifest_vm->partition.mem_regions[j]
559 .base_address == MANIFEST_INVALID_ADDRESS) {
Manish Pandey2145c212020-05-01 16:04:22 +0100560 total_alloc += size;
561 /* Don't go beyond half the VM's memory space */
562 if (total_alloc >
563 (manifest_vm->secondary.mem_size / 2)) {
564 dlog_error(
565 "Not enough space for memory-"
566 "region allocation");
567 ret = false;
568 goto out;
569 }
570
571 region_end = alloc_base;
572 region_begin = pa_subtract(alloc_base, size);
573 alloc_base = region_begin;
574
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700575 map_mode = manifest_vm->partition.mem_regions[j]
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800576 .attributes;
577 if (vm->el0_partition) {
578 map_mode |= MM_MODE_USER | MM_MODE_NG;
579 }
580
581 if (!vm_identity_map(vm_locked, region_begin,
582 region_end, map_mode,
583 ppool, NULL)) {
Manish Pandey2145c212020-05-01 16:04:22 +0100584 dlog_error(
585 "Unable to map secondary VM "
586 "memory-region.\n");
587 ret = false;
588 goto out;
589 }
590
Olivier Deprez86d87ae2021-08-19 14:27:46 +0200591 dlog_verbose(
Manish Pandey2145c212020-05-01 16:04:22 +0100592 " Memory region %#x - %#x allocated\n",
593 region_begin, region_end);
594 } else {
595 /*
596 * Identity map memory region for both case,
597 * VA(S-EL0) or IPA(S-EL1).
598 */
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700599 region_begin = pa_init(
600 manifest_vm->partition.mem_regions[j]
601 .base_address);
Manish Pandey2145c212020-05-01 16:04:22 +0100602 region_end = pa_add(region_begin, size);
603
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700604 map_mode = manifest_vm->partition.mem_regions[j]
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800605 .attributes;
606 if (vm->el0_partition) {
607 map_mode |= MM_MODE_USER | MM_MODE_NG;
608 }
609
610 if (!vm_identity_map(vm_locked, region_begin,
611 region_end, map_mode,
612 ppool, NULL)) {
Manish Pandey2145c212020-05-01 16:04:22 +0100613 dlog_error(
614 "Unable to map secondary VM "
615 "memory-region.\n");
616 ret = false;
617 goto out;
618 }
619 }
620
621 /* Deny the primary VM access to this memory */
622 if (!vm_unmap(primary_vm_locked, region_begin,
623 region_end, ppool)) {
624 dlog_error(
625 "Unable to unmap secondary VM memory-"
626 "region from primary VM.\n");
627 ret = false;
628 goto out;
629 }
630
631 j++;
632 }
633
634 /* Map device-regions */
635 j = 0;
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700636 while (j < manifest_vm->partition.dev_region_count) {
637 region_begin =
638 pa_init(manifest_vm->partition.dev_regions[j]
639 .base_address);
640 size = manifest_vm->partition.dev_regions[j]
641 .page_count *
Manish Pandey2145c212020-05-01 16:04:22 +0100642 PAGE_SIZE;
643 region_end = pa_add(region_begin, size);
644
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700645 map_mode = manifest_vm->partition.dev_regions[j]
646 .attributes;
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800647 if (vm->el0_partition) {
648 map_mode |= MM_MODE_USER | MM_MODE_NG;
649 }
650
651 if (!vm_identity_map(vm_locked, region_begin,
652 region_end, map_mode, ppool,
653 NULL)) {
Manish Pandey2145c212020-05-01 16:04:22 +0100654 dlog_error(
655 "Unable to map secondary VM "
656 "device-region.\n");
657 ret = false;
658 goto out;
659 }
660 /* Deny primary VM access to this region */
661 if (!vm_unmap(primary_vm_locked, region_begin,
662 region_end, ppool)) {
663 dlog_error(
664 "Unable to unmap secondary VM device-"
665 "region from primary VM.\n");
666 ret = false;
667 goto out;
668 }
669 j++;
670 }
671
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700672 secondary_entry = ipa_add(secondary_entry,
673 manifest_vm->partition.ep_offset);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100674 }
675
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800676 /*
677 * Map hypervisor into the VM's page table. The hypervisor pages will
678 * not be accessible from EL0 since it will not be marked for user
679 * access.
680 * TODO: Map only the exception vectors and data that exception vectors
681 * require and not the entire hypervisor. This helps with speculative
682 * side-channel attacks.
683 */
684 if (vm->el0_partition) {
685 CHECK(vm_identity_map(vm_locked, layout_text_begin(),
686 layout_text_end(), MM_MODE_X, ppool,
687 NULL));
688
689 CHECK(vm_identity_map(vm_locked, layout_rodata_begin(),
690 layout_rodata_end(), MM_MODE_R, ppool,
691 NULL));
692
693 CHECK(vm_identity_map(vm_locked, layout_data_begin(),
694 layout_data_end(), MM_MODE_R | MM_MODE_W,
695 ppool, NULL));
696 plat_console_mm_init(mm_lock_ptable_unsafe(&vm->ptable), ppool);
697 }
698
Manish Pandeyd34f8892020-06-19 17:41:07 +0100699 if (!load_common(stage1_locked, vm_locked, manifest_vm, ppool)) {
700 ret = false;
701 goto out;
702 }
703
Manish Pandey2145c212020-05-01 16:04:22 +0100704 dlog_info("Loaded with %u vCPUs, entry at %#x.\n",
705 manifest_vm->secondary.vcpu_count, pa_addr(mem_begin));
706
Andrew Scull72b43c02019-09-18 13:53:45 +0100707 vcpu = vm_get_vcpu(vm, 0);
Fuad Tabba50469e02020-06-30 15:14:28 +0100708
Max Shvetsov40108e72020-08-27 12:39:50 +0100709 vcpu_locked = vcpu_lock(vcpu);
Fuad Tabba50469e02020-06-30 15:14:28 +0100710 if (has_fdt) {
Max Shvetsov40108e72020-08-27 12:39:50 +0100711 vcpu_secondary_reset_and_start(vcpu_locked, secondary_entry,
Fuad Tabba50469e02020-06-30 15:14:28 +0100712 pa_addr(fdt_addr));
713 } else {
714 /*
715 * Without an FDT, secondary VMs expect the memory size to be
716 * passed in register x0, which is what
717 * vcpu_secondary_reset_and_start does in this case.
718 */
Max Shvetsov40108e72020-08-27 12:39:50 +0100719 vcpu_secondary_reset_and_start(vcpu_locked, secondary_entry,
720 mem_size);
Fuad Tabba50469e02020-06-30 15:14:28 +0100721 }
722
Max Shvetsov40108e72020-08-27 12:39:50 +0100723 vcpu_unlock(&vcpu_locked);
724
Andrew Scull3c257452019-11-26 13:32:50 +0000725 ret = true;
Andrew Scull72b43c02019-09-18 13:53:45 +0100726
Andrew Scull3c257452019-11-26 13:32:50 +0000727out:
728 vm_unlock(&vm_locked);
729
730 return ret;
Andrew Scull72b43c02019-09-18 13:53:45 +0100731}
732
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100733/**
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100734 * Try to find a memory range of the given size within the given ranges, and
735 * remove it from them. Return true on success, or false if no large enough
736 * contiguous range is found.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100737 */
Hong-Seok Kim09648362019-05-23 15:47:11 +0900738static bool carve_out_mem_range(struct mem_range *mem_ranges,
739 size_t mem_ranges_count, uint64_t size_to_find,
740 paddr_t *found_begin, paddr_t *found_end)
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100741{
742 size_t i;
743
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000744 /*
745 * TODO(b/116191358): Consider being cleverer about how we pack VMs
746 * together, with a non-greedy algorithm.
747 */
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100748 for (i = 0; i < mem_ranges_count; ++i) {
749 if (size_to_find <=
Andrew Walbran2cb43392019-04-17 12:52:45 +0100750 pa_difference(mem_ranges[i].begin, mem_ranges[i].end)) {
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +0100751 /*
752 * This range is big enough, take some of it from the
753 * end and reduce its size accordingly.
754 */
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100755 *found_end = mem_ranges[i].end;
756 *found_begin = pa_init(pa_addr(mem_ranges[i].end) -
757 size_to_find);
758 mem_ranges[i].end = *found_begin;
759 return true;
760 }
761 }
762 return false;
763}
764
765/**
766 * Given arrays of memory ranges before and after memory was removed for
767 * secondary VMs, add the difference to the reserved ranges of the given update.
768 * Return true on success, or false if there would be more than MAX_MEM_RANGES
769 * reserved ranges after adding the new ones.
770 * `before` and `after` must be arrays of exactly `mem_ranges_count` elements.
771 */
Hong-Seok Kim09648362019-05-23 15:47:11 +0900772static bool update_reserved_ranges(struct boot_params_update *update,
773 const struct mem_range *before,
774 const struct mem_range *after,
775 size_t mem_ranges_count)
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100776{
777 size_t i;
778
779 for (i = 0; i < mem_ranges_count; ++i) {
780 if (pa_addr(after[i].begin) > pa_addr(before[i].begin)) {
781 if (update->reserved_ranges_count >= MAX_MEM_RANGES) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000782 dlog_error(
783 "Too many reserved ranges after "
784 "loading secondary VMs.\n");
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100785 return false;
786 }
787 update->reserved_ranges[update->reserved_ranges_count]
788 .begin = before[i].begin;
789 update->reserved_ranges[update->reserved_ranges_count]
790 .end = after[i].begin;
791 update->reserved_ranges_count++;
792 }
793 if (pa_addr(after[i].end) < pa_addr(before[i].end)) {
794 if (update->reserved_ranges_count >= MAX_MEM_RANGES) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000795 dlog_error(
796 "Too many reserved ranges after "
797 "loading secondary VMs.\n");
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100798 return false;
799 }
800 update->reserved_ranges[update->reserved_ranges_count]
801 .begin = after[i].end;
802 update->reserved_ranges[update->reserved_ranges_count]
803 .end = before[i].end;
804 update->reserved_ranges_count++;
805 }
806 }
807
808 return true;
809}
810
Olivier Deprez112d2b52020-09-30 07:39:23 +0200811static bool init_other_world_vm(struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100812{
Olivier Deprez96a2a262020-06-11 17:21:38 +0200813 struct vm *other_world_vm;
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100814 size_t i;
Andrew Scull72b43c02019-09-18 13:53:45 +0100815
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100816 /*
Olivier Deprez96a2a262020-06-11 17:21:38 +0200817 * Initialise the dummy VM which represents the opposite world:
818 * -TrustZone (or the SPMC) when running the Hypervisor
819 * -the Hypervisor when running TZ/SPMC
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100820 */
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800821 other_world_vm = vm_init(HF_OTHER_WORLD_ID, MAX_CPUS, ppool, false);
Olivier Deprez96a2a262020-06-11 17:21:38 +0200822 CHECK(other_world_vm != NULL);
823
824 for (i = 0; i < MAX_CPUS; i++) {
825 struct vcpu *vcpu = vm_get_vcpu(other_world_vm, i);
826 struct cpu *cpu = cpu_find_index(i);
827
828 vcpu->cpu = cpu;
829 }
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100830
Olivier Deprez112d2b52020-09-30 07:39:23 +0200831 return arch_other_world_vm_init(other_world_vm, ppool);
832}
833
834/*
835 * Loads alls VMs from the manifest.
836 */
837bool load_vms(struct mm_stage1_locked stage1_locked,
838 const struct manifest *manifest, const struct memiter *cpio,
839 const struct boot_params *params,
840 struct boot_params_update *update, struct mpool *ppool)
841{
842 struct vm *primary;
843 struct mem_range mem_ranges_available[MAX_MEM_RANGES];
844 struct vm_locked primary_vm_locked;
845 size_t i;
846 bool success = true;
847
Andrew Walbranf8716782020-10-29 17:07:07 +0000848 /**
849 * Only try to load the primary VM if it is supposed to be in this
850 * world.
851 */
852 if (vm_id_is_current_world(HF_PRIMARY_VM_ID)) {
853 if (!load_primary(stage1_locked,
854 &manifest->vm[HF_PRIMARY_VM_INDEX], cpio,
855 params, ppool)) {
856 dlog_error("Unable to load primary VM.\n");
857 return false;
858 }
Olivier Deprez112d2b52020-09-30 07:39:23 +0200859 }
860
861 if (!init_other_world_vm(ppool)) {
862 return false;
863 }
864
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100865 static_assert(
866 sizeof(mem_ranges_available) == sizeof(params->mem_ranges),
867 "mem_range arrays must be the same size for memcpy.");
868 static_assert(sizeof(mem_ranges_available) < 500,
869 "This will use too much stack, either make "
870 "MAX_MEM_RANGES smaller or change this.");
Andrew Sculla1aa2ba2019-04-05 11:49:02 +0100871 memcpy_s(mem_ranges_available, sizeof(mem_ranges_available),
872 params->mem_ranges, sizeof(params->mem_ranges));
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100873
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100874 /* Round the last addresses down to the page size. */
875 for (i = 0; i < params->mem_ranges_count; ++i) {
Alfredo Mazzinghieb1997c2019-02-07 18:00:01 +0000876 mem_ranges_available[i].end = pa_init(align_down(
877 pa_addr(mem_ranges_available[i].end), PAGE_SIZE));
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100878 }
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100879
Andrew Scull3c257452019-11-26 13:32:50 +0000880 primary = vm_find(HF_PRIMARY_VM_ID);
881 primary_vm_locked = vm_lock(primary);
882
David Brazdil0251b942019-09-10 15:59:50 +0100883 for (i = 0; i < manifest->vm_count; ++i) {
David Brazdil0dbb41f2019-09-09 18:03:35 +0100884 const struct manifest_vm *manifest_vm = &manifest->vm[i];
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100885 ffa_vm_id_t vm_id = HF_VM_ID_OFFSET + i;
David Brazdil7a462ec2019-08-15 12:27:47 +0100886 uint64_t mem_size;
Andrew Scull80871322018-08-06 12:04:09 +0100887 paddr_t secondary_mem_begin;
888 paddr_t secondary_mem_end;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100889
David Brazdil7a462ec2019-08-15 12:27:47 +0100890 if (vm_id == HF_PRIMARY_VM_ID) {
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100891 continue;
892 }
893
Olivier Deprez2a8ee342020-08-03 15:10:44 +0200894 dlog_info("Loading VM id %#x: %s.\n", vm_id,
Andrew Walbran17eebf92020-02-05 16:35:49 +0000895 manifest_vm->debug_name);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100896
David Brazdil7a462ec2019-08-15 12:27:47 +0100897 mem_size = align_up(manifest_vm->secondary.mem_size, PAGE_SIZE);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100898
899 if (manifest_vm->is_ffa_partition) {
900 secondary_mem_begin =
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700901 pa_init(manifest_vm->partition.load_addr);
902 secondary_mem_end = pa_init(
903 manifest_vm->partition.load_addr + mem_size);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100904 } else if (!carve_out_mem_range(mem_ranges_available,
905 params->mem_ranges_count,
906 mem_size, &secondary_mem_begin,
907 &secondary_mem_end)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000908 dlog_error("Not enough memory (%u bytes).\n", mem_size);
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100909 continue;
910 }
Andrew Scull80871322018-08-06 12:04:09 +0100911
Manish Pandey2145c212020-05-01 16:04:22 +0100912 if (!load_secondary(stage1_locked, primary_vm_locked,
913 secondary_mem_begin, secondary_mem_end,
914 manifest_vm, cpio, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000915 dlog_error("Unable to load VM.\n");
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100916 continue;
917 }
918
919 /* Deny the primary VM access to this memory. */
Andrew Scull3c257452019-11-26 13:32:50 +0000920 if (!vm_unmap(primary_vm_locked, secondary_mem_begin,
921 secondary_mem_end, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000922 dlog_error(
923 "Unable to unmap secondary VM from primary "
924 "VM.\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000925 success = false;
926 break;
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100927 }
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100928 }
929
Andrew Scull3c257452019-11-26 13:32:50 +0000930 vm_unlock(&primary_vm_locked);
931
932 if (!success) {
933 return false;
934 }
935
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +0100936 /*
937 * Add newly reserved areas to update params by looking at the
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100938 * difference between the available ranges from the original params and
939 * the updated mem_ranges_available. We assume that the number and order
940 * of available ranges is the same, i.e. we don't remove any ranges
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +0100941 * above only make them smaller.
942 */
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100943 return update_reserved_ranges(update, params->mem_ranges,
944 mem_ranges_available,
945 params->mem_ranges_count);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100946}