blob: 59c3f1c5cf404c94d37ed3c00089dd8586befba1 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/load.h"
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010010
11#include <stdbool.h>
12
Maksims Svecovs134b8f92022-03-04 15:14:09 +000013#include "hf/arch/init.h"
Olivier Deprez112d2b52020-09-30 07:39:23 +020014#include "hf/arch/other_world.h"
J-Alvesa9c7cba2021-08-25 16:26:11 +010015#include "hf/arch/plat/ffa.h"
Fuad Tabba77a4b012019-11-15 12:13:08 +000016#include "hf/arch/vm.h"
17
Andrew Scull18c78fc2018-08-20 12:57:41 +010018#include "hf/api.h"
Andrew Walbran34ce72e2018-09-13 16:47:44 +010019#include "hf/boot_params.h"
Andrew Scull72b43c02019-09-18 13:53:45 +010020#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010021#include "hf/dlog.h"
Fuad Tabba50469e02020-06-30 15:14:28 +010022#include "hf/fdt_patch.h"
Andrew Scull5991ec92018-10-08 14:55:02 +010023#include "hf/layout.h"
J-Alves77b6f4f2023-03-15 11:34:49 +000024#include "hf/manifest.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010025#include "hf/memiter.h"
26#include "hf/mm.h"
Andrew Walbran48699362019-05-20 14:38:00 +010027#include "hf/plat/console.h"
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -050028#include "hf/plat/interrupts.h"
Andrew Scullb1a6d0d2020-01-29 11:25:12 +000029#include "hf/plat/iommu.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010030#include "hf/static_assert.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010031#include "hf/std.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010032#include "hf/vm.h"
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010033
Andrew Scull19503262018-09-20 14:48:39 +010034#include "vmapi/hf/call.h"
Manish Pandeyd34f8892020-06-19 17:41:07 +010035#include "vmapi/hf/ffa.h"
Andrew Scull19503262018-09-20 14:48:39 +010036
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010037/**
38 * Copies data to an unmapped location by mapping it for write, copying the
39 * data, then unmapping it.
Andrew Sculld9225b32018-11-19 16:12:41 +000040 *
41 * The data is written so that it is available to all cores with the cache
42 * disabled. When switching to the partitions, the caching is initially disabled
43 * so the data must be available without the cache.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010044 */
Andrew Scull3c0a90a2019-07-01 11:55:53 +010045static bool copy_to_unmapped(struct mm_stage1_locked stage1_locked, paddr_t to,
David Brazdil7a462ec2019-08-15 12:27:47 +010046 struct memiter *from_it, struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010047{
David Brazdil7a462ec2019-08-15 12:27:47 +010048 const void *from = memiter_base(from_it);
49 size_t size = memiter_size(from_it);
Andrew Scull80871322018-08-06 12:04:09 +010050 paddr_t to_end = pa_add(to, size);
51 void *ptr;
Andrew Scull265ada92018-07-30 15:19:01 +010052
Andrew Scull3c0a90a2019-07-01 11:55:53 +010053 ptr = mm_identity_map(stage1_locked, to, to_end, MM_MODE_W, ppool);
Andrew Scull80871322018-08-06 12:04:09 +010054 if (!ptr) {
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010055 return false;
56 }
57
Andrew Sculla1aa2ba2019-04-05 11:49:02 +010058 memcpy_s(ptr, size, from, size);
Andrew Scullc059fbe2019-09-12 12:58:40 +010059 arch_mm_flush_dcache(ptr, size);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010060
Andrew Scull72b43c02019-09-18 13:53:45 +010061 CHECK(mm_unmap(stage1_locked, to, to_end, ppool));
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010062
63 return true;
64}
65
Fuad Tabba50469e02020-06-30 15:14:28 +010066/**
67 * Loads the secondary VM's kernel.
68 * Stores the kernel size in kernel_size (if kernel_size is not NULL).
69 * Returns false if it cannot load the kernel.
70 */
Andrew Scull72b43c02019-09-18 13:53:45 +010071static bool load_kernel(struct mm_stage1_locked stage1_locked, paddr_t begin,
72 paddr_t end, const struct manifest_vm *manifest_vm,
Fuad Tabba50469e02020-06-30 15:14:28 +010073 const struct memiter *cpio, struct mpool *ppool,
74 size_t *kernel_size)
Andrew Scull72b43c02019-09-18 13:53:45 +010075{
Andrew Scull72b43c02019-09-18 13:53:45 +010076 struct memiter kernel;
Fuad Tabba50469e02020-06-30 15:14:28 +010077 size_t size;
Andrew Scull72b43c02019-09-18 13:53:45 +010078
David Brazdil136f2942019-09-23 14:11:03 +010079 if (!cpio_get_file(cpio, &manifest_vm->kernel_filename, &kernel)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +000080 dlog_error("Could not find kernel file \"%s\".\n",
81 string_data(&manifest_vm->kernel_filename));
Andrew Scull72b43c02019-09-18 13:53:45 +010082 return false;
83 }
84
Fuad Tabba50469e02020-06-30 15:14:28 +010085 size = memiter_size(&kernel);
86 if (pa_difference(begin, end) < size) {
Andrew Walbran17eebf92020-02-05 16:35:49 +000087 dlog_error("Kernel is larger than available memory.\n");
Andrew Scull72b43c02019-09-18 13:53:45 +010088 return false;
89 }
90
91 if (!copy_to_unmapped(stage1_locked, begin, &kernel, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +000092 dlog_error("Unable to copy kernel.\n");
Andrew Scull72b43c02019-09-18 13:53:45 +010093 return false;
94 }
95
Fuad Tabba50469e02020-06-30 15:14:28 +010096 if (kernel_size) {
97 *kernel_size = size;
98 }
99
Andrew Scull72b43c02019-09-18 13:53:45 +0100100 return true;
101}
102
Manish Pandeyd34f8892020-06-19 17:41:07 +0100103/*
104 * Link RX/TX buffers provided in partition manifest to mailbox
105 */
106static bool link_rxtx_to_mailbox(struct mm_stage1_locked stage1_locked,
107 struct vm_locked vm_locked, struct rx_tx rxtx,
108 struct mpool *ppool)
109{
110 struct ffa_value ret;
111 ipaddr_t send;
112 ipaddr_t recv;
113 uint32_t page_count;
114
115 send = ipa_init(rxtx.tx_buffer->base_address);
116 recv = ipa_init(rxtx.rx_buffer->base_address);
117 page_count = rxtx.tx_buffer->page_count;
118
119 ret = api_vm_configure_pages(stage1_locked, vm_locked, send, recv,
120 page_count, ppool);
121 if (ret.func != FFA_SUCCESS_32) {
122 return false;
123 }
124
125 dlog_verbose(" mailbox: send = %#x, recv = %#x\n",
126 vm_locked.vm->mailbox.send, vm_locked.vm->mailbox.recv);
127
128 return true;
129}
130
Raghu Krishnamurthyad38a9c2022-07-20 07:30:36 -0700131static void infer_interrupt(struct interrupt_info interrupt,
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500132 struct interrupt_descriptor *int_desc)
133{
134 uint32_t attr = interrupt.attributes;
135
136 interrupt_desc_set_id(int_desc, interrupt.id);
137 interrupt_desc_set_priority(int_desc,
138 (attr >> INT_DESC_PRIORITY_SHIFT) & 0xff);
139
140 /* Refer to the comments in interrupt_descriptor struct definition. */
141 interrupt_desc_set_type_config_sec_state(
142 int_desc,
143 (((attr >> INT_DESC_TYPE_SHIFT) & 0x3) << 2) |
144 (((attr >> INT_DESC_CONFIG_SHIFT) & 0x1) << 1) |
145 ((attr >> INT_DESC_SEC_STATE_SHIFT) & 0x1));
146
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -0700147 if (interrupt.mpidr_valid) {
148 interrupt_desc_set_mpidr(int_desc, interrupt.mpidr);
149 } else {
150 interrupt_desc_set_mpidr_invalid(int_desc);
151 }
152
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500153 interrupt_desc_set_valid(int_desc, true);
Madhukar Pappireddy938faaf2023-07-31 17:56:55 -0500154 interrupt_desc_set_enabled(int_desc, true);
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500155}
156
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100157/**
Andrew Scullae9962e2019-10-03 16:51:16 +0100158 * Performs VM loading activities that are common between the primary and
159 * secondaries.
160 */
Manish Pandeyd34f8892020-06-19 17:41:07 +0100161static bool load_common(struct mm_stage1_locked stage1_locked,
162 struct vm_locked vm_locked,
163 const struct manifest_vm *manifest_vm,
164 struct mpool *ppool)
Andrew Scullae9962e2019-10-03 16:51:16 +0100165{
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500166 struct device_region dev_region;
Raghu Krishnamurthyad38a9c2022-07-20 07:30:36 -0700167 struct interrupt_info interrupt;
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500168 uint32_t k = 0;
169
Manish Pandeyd34f8892020-06-19 17:41:07 +0100170 vm_locked.vm->smc_whitelist = manifest_vm->smc_whitelist;
Olivier Depreza15f2352022-09-26 09:17:24 +0200171 vm_locked.vm->power_management =
172 manifest_vm->partition.power_management;
Andrew Scullae9962e2019-10-03 16:51:16 +0100173
Kathleen Capella422b10b2023-06-30 18:28:27 -0400174 /* Populate array of UUIDs. */
175 for (uint16_t i = 0; i < PARTITION_MAX_UUIDS; i++) {
176 struct ffa_uuid current_uuid = manifest_vm->partition.uuids[i];
177
178 if (ffa_uuid_is_null(&current_uuid)) {
179 break;
180 }
181
182 vm_locked.vm->uuids[i] = current_uuid;
183 }
184
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500185 /* Populate the interrupt descriptor for current VM. */
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700186 for (uint16_t i = 0; i < PARTITION_MAX_DEVICE_REGIONS; i++) {
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500187 dev_region = manifest_vm->partition.dev_regions[i];
188
189 CHECK(dev_region.interrupt_count <=
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700190 PARTITION_MAX_INTERRUPTS_PER_DEVICE);
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500191
192 for (uint8_t j = 0; j < dev_region.interrupt_count; j++) {
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -0700193 struct interrupt_descriptor int_desc = {0};
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500194
195 interrupt = dev_region.interrupts[j];
196 infer_interrupt(interrupt, &int_desc);
197 vm_locked.vm->interrupt_desc[k] = int_desc;
Madhukar Pappireddy938faaf2023-07-31 17:56:55 -0500198 assert(int_desc.enabled);
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500199
Madhukar Pappireddy72454a12021-08-03 12:21:46 -0500200 /*
201 * Configure the physical interrupts allocated for this
202 * VM in its partition manifest.
203 */
204 plat_interrupts_configure_interrupt(int_desc);
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500205 k++;
206 CHECK(k <= VM_MANIFEST_MAX_INTERRUPTS);
207 }
208 }
209 dlog_verbose("VM has %d physical interrupts defined in manifest.\n", k);
210
Manish Pandeyd34f8892020-06-19 17:41:07 +0100211 if (manifest_vm->is_ffa_partition) {
Daniel Boulbybaeaf2e2021-12-09 11:42:36 +0000212 vm_locked.vm->ffa_version = manifest_vm->partition.ffa_version;
Manish Pandeyd34f8892020-06-19 17:41:07 +0100213 /* Link rxtx buffers to mailbox */
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700214 if (manifest_vm->partition.rxtx.available) {
Manish Pandeyd34f8892020-06-19 17:41:07 +0100215 if (!link_rxtx_to_mailbox(stage1_locked, vm_locked,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700216 manifest_vm->partition.rxtx,
Manish Pandeyd34f8892020-06-19 17:41:07 +0100217 ppool)) {
218 dlog_error(
219 "Unable to Link RX/TX buffer with "
220 "mailbox.\n");
221 return false;
222 }
223 }
J-Alvesb37fd082020-10-22 12:29:21 +0100224
Maksims Svecovsb596eab2021-04-27 00:52:27 +0100225 vm_locked.vm->messaging_method =
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700226 manifest_vm->partition.messaging_method;
Manish Pandeyf3be6392020-09-24 17:26:09 +0100227
Madhukar Pappireddy84154052022-06-21 18:30:25 -0500228 vm_locked.vm->ns_interrupts_action =
229 manifest_vm->partition.ns_interrupts_action;
Maksims Svecovs9ddf86a2021-05-06 17:17:21 +0100230
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -0600231 vm_locked.vm->other_s_interrupts_action =
232 manifest_vm->partition.other_s_interrupts_action;
233
Madhukar Pappireddy046dad02022-06-21 18:43:33 -0500234 vm_locked.vm->me_signal_virq =
235 manifest_vm->partition.me_signal_virq;
236
J-Alvesa4730db2021-11-02 10:31:01 +0000237 vm_locked.vm->notifications.enabled =
238 manifest_vm->partition.notification_support;
239
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700240 vm_locked.vm->boot_order = manifest_vm->partition.boot_order;
241
J-Alves7d38f7b2022-04-13 13:22:30 +0100242 vm_locked.vm->boot_info.gp_register_num =
243 manifest_vm->partition.gp_register_num;
244
245 if (manifest_vm->partition.boot_info) {
246 /*
247 * If the partition expects the boot information blob
248 * per the ff-a v1.1 boot protocol, then its address
249 * shall match the partition's load address.
250 */
251 vm_locked.vm->boot_info.blob_addr =
252 ipa_init(manifest_vm->partition.load_addr);
253 }
254
J-Alvesb37fd082020-10-22 12:29:21 +0100255 /* Updating boot list according to boot_order */
Olivier Deprez181074b2023-02-02 14:53:23 +0100256 vcpu_update_boot(vm_get_vcpu(vm_locked.vm, 0));
J-Alvesa0f317d2021-06-09 13:31:59 +0100257
J-Alves09ff9d82021-11-02 11:55:20 +0000258 if (vm_locked_are_notifications_enabled(vm_locked) &&
J-Alvesa4730db2021-11-02 10:31:01 +0000259 !plat_ffa_notifications_bitmap_create_call(
260 vm_locked.vm->id, vm_locked.vm->vcpu_count)) {
261 return false;
J-Alvesa9c7cba2021-08-25 16:26:11 +0100262 }
Manish Pandeyd34f8892020-06-19 17:41:07 +0100263 }
J-Alvesb37fd082020-10-22 12:29:21 +0100264
Fuad Tabba56970712020-01-10 11:20:09 +0000265 /* Initialize architecture-specific features. */
Manish Pandeyd34f8892020-06-19 17:41:07 +0100266 arch_vm_features_set(vm_locked.vm);
Fuad Tabba77a4b012019-11-15 12:13:08 +0000267
Madhukar Pappireddy54680c72020-10-23 15:02:38 -0500268 if (!plat_iommu_attach_peripheral(stage1_locked, vm_locked, manifest_vm,
269 ppool)) {
270 dlog_error("Unable to attach upstream peripheral device\n");
271 return false;
272 }
273
Andrew Scullae9962e2019-10-03 16:51:16 +0100274 return true;
275}
276
277/**
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100278 * Loads the primary VM.
279 */
Andrew Scull72b43c02019-09-18 13:53:45 +0100280static bool load_primary(struct mm_stage1_locked stage1_locked,
Andrew Scullae9962e2019-10-03 16:51:16 +0100281 const struct manifest_vm *manifest_vm,
Andrew Scullb5f49e02019-10-02 13:20:47 +0100282 const struct memiter *cpio,
283 const struct boot_params *params, struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100284{
Olivier Deprez62d99e32020-01-09 15:58:07 +0100285 paddr_t primary_begin;
286 ipaddr_t primary_entry;
David Brazdile6f83222019-09-23 14:47:37 +0100287 struct vm *vm;
Andrew Scull3c257452019-11-26 13:32:50 +0000288 struct vm_locked vm_locked;
David Brazdile6f83222019-09-23 14:47:37 +0100289 struct vcpu_locked vcpu_locked;
Andrew Scullb5f49e02019-10-02 13:20:47 +0100290 size_t i;
Andrew Scull3c257452019-11-26 13:32:50 +0000291 bool ret;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100292
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -0700293 if (manifest_vm->is_ffa_partition && !manifest_vm->is_hyp_loaded) {
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700294 primary_begin = pa_init(manifest_vm->partition.load_addr);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100295 primary_entry = ipa_add(ipa_from_pa(primary_begin),
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700296 manifest_vm->partition.ep_offset);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100297 } else {
298 primary_begin =
299 (manifest_vm->primary.boot_address ==
300 MANIFEST_INVALID_ADDRESS)
301 ? layout_primary_begin()
302 : pa_init(manifest_vm->primary.boot_address);
303 primary_entry = ipa_from_pa(primary_begin);
304 }
305
David Brazdil080ee312020-02-25 15:30:30 -0800306 paddr_t primary_end = pa_add(primary_begin, RSIZE_MAX);
Andrew Scull72b43c02019-09-18 13:53:45 +0100307
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800308 /* Primary VM must be a VM */
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700309 CHECK(manifest_vm->partition.run_time_el == EL1);
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800310
Olivier Deprez62d99e32020-01-09 15:58:07 +0100311 /*
312 * Load the kernel if a filename is specified in the VM manifest.
313 * For an FF-A partition, kernel_filename is undefined indicating
314 * the partition package has already been loaded prior to Hafnium
315 * booting.
316 */
317 if (!string_is_empty(&manifest_vm->kernel_filename)) {
318 if (!load_kernel(stage1_locked, primary_begin, primary_end,
Fuad Tabba50469e02020-06-30 15:14:28 +0100319 manifest_vm, cpio, ppool, NULL)) {
Olivier Deprez62d99e32020-01-09 15:58:07 +0100320 dlog_error("Unable to load primary kernel.\n");
321 return false;
322 }
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100323 }
324
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800325 if (!vm_init_next(MAX_CPUS, ppool, &vm, false)) {
Andrew Walbran7586e042020-02-18 18:19:26 +0000326 dlog_error("Unable to initialise primary VM.\n");
David Brazdile6f83222019-09-23 14:47:37 +0100327 return false;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100328 }
329
David Brazdile6f83222019-09-23 14:47:37 +0100330 if (vm->id != HF_PRIMARY_VM_ID) {
Andrew Walbran7586e042020-02-18 18:19:26 +0000331 dlog_error("Primary VM was not given correct ID.\n");
David Brazdile6f83222019-09-23 14:47:37 +0100332 return false;
333 }
334
Andrew Scull3c257452019-11-26 13:32:50 +0000335 vm_locked = vm_lock(vm);
336
Andrew Scull48929fd2020-01-28 10:39:10 +0000337 if (params->device_mem_ranges_count == 0) {
338 /*
339 * Map 1TB of address space as device memory to, most likely,
340 * make all devices available to the primary VM.
341 *
342 * TODO: remove this once all targets provide valid ranges.
343 */
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800344 dlog_warning(
345 "Device memory not provided, defaulting to 1 TB.\n");
Andrew Scull48929fd2020-01-28 10:39:10 +0000346
347 if (!vm_identity_map(
348 vm_locked, pa_init(0),
349 pa_init(UINT64_C(1024) * 1024 * 1024 * 1024),
350 MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool, NULL)) {
351 dlog_error(
352 "Unable to initialise address space for "
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800353 "primary VM.\n");
Andrew Scull48929fd2020-01-28 10:39:10 +0000354 ret = false;
355 goto out;
356 }
David Brazdile6f83222019-09-23 14:47:37 +0100357 }
358
Andrew Scullb5f49e02019-10-02 13:20:47 +0100359 /* Map normal memory as such to permit caching, execution, etc. */
360 for (i = 0; i < params->mem_ranges_count; ++i) {
Andrew Scull3c257452019-11-26 13:32:50 +0000361 if (!vm_identity_map(vm_locked, params->mem_ranges[i].begin,
362 params->mem_ranges[i].end,
363 MM_MODE_R | MM_MODE_W | MM_MODE_X, ppool,
364 NULL)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000365 dlog_error(
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800366 "Unable to initialise memory for primary "
367 "VM.\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000368 ret = false;
369 goto out;
Andrew Scullb5f49e02019-10-02 13:20:47 +0100370 }
371 }
372
Andrew Scull48929fd2020-01-28 10:39:10 +0000373 /* Map device memory as such to prevent execution, speculation etc. */
374 for (i = 0; i < params->device_mem_ranges_count; ++i) {
375 if (!vm_identity_map(
376 vm_locked, params->device_mem_ranges[i].begin,
377 params->device_mem_ranges[i].end,
378 MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool, NULL)) {
379 dlog("Unable to initialise device memory for primary "
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800380 "VM.\n");
Andrew Scull48929fd2020-01-28 10:39:10 +0000381 ret = false;
382 goto out;
383 }
384 }
385
Manish Pandeyd34f8892020-06-19 17:41:07 +0100386 if (!load_common(stage1_locked, vm_locked, manifest_vm, ppool)) {
387 ret = false;
388 goto out;
389 }
390
Andrew Scull3c257452019-11-26 13:32:50 +0000391 if (!vm_unmap_hypervisor(vm_locked, ppool)) {
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800392 dlog_error("Unable to unmap hypervisor from primary VM.\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000393 ret = false;
394 goto out;
David Brazdile6f83222019-09-23 14:47:37 +0100395 }
396
Andrew Scullb1a6d0d2020-01-29 11:25:12 +0000397 if (!plat_iommu_unmap_iommus(vm_locked, ppool)) {
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800398 dlog_error("Unable to unmap IOMMUs from primary VM.\n");
Andrew Scullb1a6d0d2020-01-29 11:25:12 +0000399 ret = false;
400 goto out;
401 }
402
Andrew Walbran7586e042020-02-18 18:19:26 +0000403 dlog_info("Loaded primary VM with %u vCPUs, entry at %#x.\n",
404 vm->vcpu_count, pa_addr(primary_begin));
405
Olivier Deprez181074b2023-02-02 14:53:23 +0100406 /* Mark the first VM vCPU to be the first booted vCPU. */
407 vcpu_update_boot(vm_get_vcpu(vm, 0));
Olivier Deprezb9adff42021-02-01 12:14:05 +0100408
David Brazdile6f83222019-09-23 14:47:37 +0100409 vcpu_locked = vcpu_lock(vm_get_vcpu(vm, 0));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100410 vcpu_on(vcpu_locked, primary_entry, params->kernel_arg);
David Brazdile6f83222019-09-23 14:47:37 +0100411 vcpu_unlock(&vcpu_locked);
Andrew Scull3c257452019-11-26 13:32:50 +0000412 ret = true;
David Brazdile6f83222019-09-23 14:47:37 +0100413
Andrew Scull3c257452019-11-26 13:32:50 +0000414out:
415 vm_unlock(&vm_locked);
416
417 return ret;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100418}
419
Fuad Tabba50469e02020-06-30 15:14:28 +0100420/**
421 * Loads the secondary VM's FDT.
422 * Stores the total allocated size for the FDT in fdt_allocated_size (if
423 * fdt_allocated_size is not NULL). The allocated size includes additional space
424 * for potential patching.
425 */
426static bool load_secondary_fdt(struct mm_stage1_locked stage1_locked,
427 paddr_t end, size_t fdt_max_size,
428 const struct manifest_vm *manifest_vm,
429 const struct memiter *cpio, struct mpool *ppool,
430 paddr_t *fdt_addr, size_t *fdt_allocated_size)
431{
432 struct memiter fdt;
433 size_t allocated_size;
434
435 CHECK(!string_is_empty(&manifest_vm->secondary.fdt_filename));
436
437 if (!cpio_get_file(cpio, &manifest_vm->secondary.fdt_filename, &fdt)) {
438 dlog_error("Cannot open the secondary VM's FDT.\n");
439 return false;
440 }
441
442 /*
Olivier Deprez701e8bf2022-04-06 18:45:18 +0200443 * Ensure the FDT has one additional page at the end for patching,
Fuad Tabba50469e02020-06-30 15:14:28 +0100444 * and align it to the page boundary.
445 */
446 allocated_size = align_up(memiter_size(&fdt), PAGE_SIZE) + PAGE_SIZE;
447
448 if (allocated_size > fdt_max_size) {
449 dlog_error(
450 "FDT allocated space (%u) is more than the specified "
451 "maximum to use (%u).\n",
452 allocated_size, fdt_max_size);
453 return false;
454 }
455
456 /* Load the FDT to the end of the VM's allocated memory space. */
457 *fdt_addr = pa_init(pa_addr(pa_sub(end, allocated_size)));
458
459 dlog_info("Loading secondary FDT of allocated size %u at 0x%x.\n",
460 allocated_size, pa_addr(*fdt_addr));
461
462 if (!copy_to_unmapped(stage1_locked, *fdt_addr, &fdt, ppool)) {
463 dlog_error("Unable to copy FDT.\n");
464 return false;
465 }
466
467 if (fdt_allocated_size) {
468 *fdt_allocated_size = allocated_size;
469 }
470
471 return true;
472}
473
Olivier Deprez035fa152022-03-14 11:19:10 +0100474/**
475 * Convert the manifest memory region attributes to mode consumed by mm layer.
476 */
477static uint32_t memory_region_attributes_to_mode(uint32_t attributes)
478{
479 uint32_t mode = 0U;
480
481 if ((attributes & MANIFEST_REGION_ATTR_READ) != 0U) {
482 mode |= MM_MODE_R;
483 }
484
485 if ((attributes & MANIFEST_REGION_ATTR_WRITE) != 0U) {
486 mode |= MM_MODE_W;
487 }
488
489 if ((attributes & MANIFEST_REGION_ATTR_EXEC) != 0U) {
490 mode |= MM_MODE_X;
491 }
492
493 assert((mode == (MM_MODE_R | MM_MODE_W)) || (mode == MM_MODE_R) ||
494 (mode == (MM_MODE_R | MM_MODE_X)));
495
496 if ((attributes & MANIFEST_REGION_ATTR_SECURITY) != 0U) {
497 mode |= arch_mm_extra_attributes_from_vm(HF_HYPERVISOR_VM_ID);
498 }
499
500 return mode;
501}
502
503/**
504 * Convert the manifest device region attributes to mode consumed by mm layer.
505 */
506static uint32_t device_region_attributes_to_mode(uint32_t attributes)
507{
508 uint32_t mode = 0U;
509
510 if ((attributes & MANIFEST_REGION_ATTR_READ) != 0U) {
511 mode |= MM_MODE_R;
512 }
513
514 if ((attributes & MANIFEST_REGION_ATTR_WRITE) != 0U) {
515 mode |= MM_MODE_W;
516 }
517
518 assert((mode == (MM_MODE_R | MM_MODE_W)) || (mode == MM_MODE_R));
519
520 if ((attributes & MANIFEST_REGION_ATTR_SECURITY) != 0U) {
521 mode |= arch_mm_extra_attributes_from_vm(HF_HYPERVISOR_VM_ID);
522 }
523
524 return mode | MM_MODE_D;
525}
526
Daniel Boulbye6df3452022-07-14 18:14:26 +0100527static bool ffa_map_memory_regions(const struct manifest_vm *manifest_vm,
528 const struct vm_locked vm_locked,
529 const struct vm_locked primary_vm_locked,
J-Alvesd8a1d362023-03-08 11:15:28 +0000530 bool is_el0_partition, struct mpool *ppool)
Daniel Boulbye6df3452022-07-14 18:14:26 +0100531{
532#if LOG_LEVEL >= LOG_LEVEL_WARNING
533 const char *error_string = " region security state ignored for ";
534#endif
535 int j = 0;
536 paddr_t region_begin;
537 paddr_t region_end;
Daniel Boulbye6df3452022-07-14 18:14:26 +0100538 size_t size;
Daniel Boulbye6df3452022-07-14 18:14:26 +0100539 uint32_t map_mode;
540 uint32_t attributes;
541
542 /* Map memory-regions */
543 while (j < manifest_vm->partition.mem_region_count) {
544 size = manifest_vm->partition.mem_regions[j].page_count *
545 PAGE_SIZE;
546 /*
J-Alvesd8a1d362023-03-08 11:15:28 +0000547 * Identity map memory region for both case,
548 * VA(S-EL0) or IPA(S-EL1).
Daniel Boulbye6df3452022-07-14 18:14:26 +0100549 */
J-Alvesd8a1d362023-03-08 11:15:28 +0000550 region_begin = pa_init(
551 manifest_vm->partition.mem_regions[j].base_address);
552 region_end = pa_add(region_begin, size);
Daniel Boulbye6df3452022-07-14 18:14:26 +0100553
554 attributes = manifest_vm->partition.mem_regions[j].attributes;
555 if ((attributes & MANIFEST_REGION_ATTR_SECURITY) != 0) {
J-Alves661e1b72023-08-02 13:39:40 +0100556 if (ffa_is_vm_id(vm_locked.vm->id)) {
Daniel Boulbye6df3452022-07-14 18:14:26 +0100557 dlog_warning("Memory%sVMs\n", error_string);
558 attributes &= ~MANIFEST_REGION_ATTR_SECURITY;
Daniel Boulbye6df3452022-07-14 18:14:26 +0100559 }
560 }
561
562 map_mode = memory_region_attributes_to_mode(attributes);
563
564 if (is_el0_partition) {
565 map_mode |= MM_MODE_USER | MM_MODE_NG;
566 }
567
568 if (!vm_identity_map(vm_locked, region_begin, region_end,
569 map_mode, ppool, NULL)) {
570 dlog_error(
571 "Unable to map secondary VM "
572 "memory-region.\n");
573 return false;
574 }
575
576 /* Deny the primary VM access to this memory */
577 if (!vm_unmap(primary_vm_locked, region_begin, region_end,
578 ppool)) {
579 dlog_error(
580 "Unable to unmap secondary VM memory-"
581 "region from primary VM.\n");
582 return false;
583 }
584
585 dlog_verbose("Memory region %#x - %#x allocated.\n",
586 region_begin, region_end);
587
588 j++;
589 }
590
591 /* Map device-regions */
592 j = 0;
593 while (j < manifest_vm->partition.dev_region_count) {
594 region_begin = pa_init(
595 manifest_vm->partition.dev_regions[j].base_address);
596 size = manifest_vm->partition.dev_regions[j].page_count *
597 PAGE_SIZE;
598 region_end = pa_add(region_begin, size);
599
600 attributes = manifest_vm->partition.dev_regions[j].attributes;
601 if ((attributes & MANIFEST_REGION_ATTR_SECURITY) != 0) {
J-Alves661e1b72023-08-02 13:39:40 +0100602 if (ffa_is_vm_id(vm_locked.vm->id)) {
Daniel Boulbye6df3452022-07-14 18:14:26 +0100603 dlog_warning("Device%sVMs\n", error_string);
604 attributes &= ~MANIFEST_REGION_ATTR_SECURITY;
Daniel Boulbye6df3452022-07-14 18:14:26 +0100605 }
606 }
607
608 map_mode = device_region_attributes_to_mode(attributes);
609 if (is_el0_partition) {
610 map_mode |= MM_MODE_USER | MM_MODE_NG;
611 }
612
613 if (!vm_identity_map(vm_locked, region_begin, region_end,
614 map_mode, ppool, NULL)) {
615 dlog_error(
616 "Unable to map secondary VM "
617 "device-region.\n");
618 return false;
619 }
620 /* Deny primary VM access to this region */
621 if (!vm_unmap(primary_vm_locked, region_begin, region_end,
622 ppool)) {
623 dlog_error(
624 "Unable to unmap secondary VM device-"
625 "region from primary VM.\n");
626 return false;
627 }
628 j++;
629 }
630 return true;
631}
632
Andrew Scull72b43c02019-09-18 13:53:45 +0100633/*
634 * Loads a secondary VM.
635 */
636static bool load_secondary(struct mm_stage1_locked stage1_locked,
Manish Pandey2145c212020-05-01 16:04:22 +0100637 struct vm_locked primary_vm_locked,
Andrew Scull72b43c02019-09-18 13:53:45 +0100638 paddr_t mem_begin, paddr_t mem_end,
639 const struct manifest_vm *manifest_vm,
J-Alves77b6f4f2023-03-15 11:34:49 +0000640 const struct boot_params *boot_params,
Andrew Scull72b43c02019-09-18 13:53:45 +0100641 const struct memiter *cpio, struct mpool *ppool)
642{
643 struct vm *vm;
Andrew Scull3c257452019-11-26 13:32:50 +0000644 struct vm_locked vm_locked;
Max Shvetsov40108e72020-08-27 12:39:50 +0100645 struct vcpu_locked vcpu_locked;
Andrew Scull72b43c02019-09-18 13:53:45 +0100646 struct vcpu *vcpu;
647 ipaddr_t secondary_entry;
Andrew Scull3c257452019-11-26 13:32:50 +0000648 bool ret;
Fuad Tabba50469e02020-06-30 15:14:28 +0100649 paddr_t fdt_addr;
650 bool has_fdt;
651 size_t kernel_size = 0;
652 const size_t mem_size = pa_difference(mem_begin, mem_end);
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800653 uint32_t map_mode;
Daniel Boulby874d5432023-04-27 12:40:24 +0100654 bool is_el0_partition = manifest_vm->partition.run_time_el == S_EL0 ||
655 manifest_vm->partition.run_time_el == EL0;
Jens Wiklanderb697ad02022-11-04 10:15:03 +0100656 size_t n;
Andrew Scull72b43c02019-09-18 13:53:45 +0100657
Olivier Deprez62d99e32020-01-09 15:58:07 +0100658 /*
659 * Load the kernel if a filename is specified in the VM manifest.
660 * For an FF-A partition, kernel_filename is undefined indicating
661 * the partition package has already been loaded prior to Hafnium
662 * booting.
663 */
664 if (!string_is_empty(&manifest_vm->kernel_filename)) {
665 if (!load_kernel(stage1_locked, mem_begin, mem_end, manifest_vm,
Fuad Tabba50469e02020-06-30 15:14:28 +0100666 cpio, ppool, &kernel_size)) {
Olivier Deprez62d99e32020-01-09 15:58:07 +0100667 dlog_error("Unable to load kernel.\n");
668 return false;
669 }
Andrew Scull72b43c02019-09-18 13:53:45 +0100670 }
671
Fuad Tabba50469e02020-06-30 15:14:28 +0100672 has_fdt = !string_is_empty(&manifest_vm->secondary.fdt_filename);
673 if (has_fdt) {
674 /*
675 * Ensure that the FDT does not overwrite the kernel or overlap
676 * its page, for the FDT to start at a page boundary.
677 */
678 const size_t fdt_max_size =
679 mem_size - align_up(kernel_size, PAGE_SIZE);
680
681 size_t fdt_allocated_size;
682
683 if (!load_secondary_fdt(stage1_locked, mem_end, fdt_max_size,
684 manifest_vm, cpio, ppool, &fdt_addr,
685 &fdt_allocated_size)) {
686 dlog_error("Unable to load FDT.\n");
687 return false;
688 }
689
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -0700690 if (manifest_vm->is_ffa_partition) {
691 plat_ffa_parse_partition_manifest(
692 stage1_locked, fdt_addr, fdt_allocated_size,
J-Alves77b6f4f2023-03-15 11:34:49 +0000693 manifest_vm, boot_params, ppool);
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -0700694 }
695
Fuad Tabba50469e02020-06-30 15:14:28 +0100696 if (!fdt_patch_mem(stage1_locked, fdt_addr, fdt_allocated_size,
697 mem_begin, mem_end, ppool)) {
698 dlog_error("Unable to patch FDT.\n");
699 return false;
700 }
701 }
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800702 /*
703 * An S-EL0 partition must contain only 1 vCPU (UP migratable) per the
704 * FF-A 1.0 spec.
705 */
Daniel Boulbye6df3452022-07-14 18:14:26 +0100706 CHECK(!is_el0_partition || manifest_vm->secondary.vcpu_count == 1);
Fuad Tabba50469e02020-06-30 15:14:28 +0100707
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800708 if (!vm_init_next(manifest_vm->secondary.vcpu_count, ppool, &vm,
Daniel Boulbye6df3452022-07-14 18:14:26 +0100709 is_el0_partition)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000710 dlog_error("Unable to initialise VM.\n");
Andrew Scull72b43c02019-09-18 13:53:45 +0100711 return false;
712 }
713
Andrew Scull3c257452019-11-26 13:32:50 +0000714 vm_locked = vm_lock(vm);
715
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800716 /*
Raghu Krishnamurthy2b801692021-07-18 17:46:43 -0700717 * Grant the VM access to the memory. For VM's we mark all memory in
718 * stage-2 tables as RWX and the VM can control permissions using
719 * stage-1 translations. For S-EL0 partitions, hafnium maps the entire
720 * region of memory for the partition as RX. The partition is then
721 * expected to perform its owns relocations and call the FFA_MEM_PERM_*
722 * API's to change permissions on its image layout.
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800723 */
Daniel Boulbye6df3452022-07-14 18:14:26 +0100724 if (is_el0_partition) {
Raghu Krishnamurthy2b801692021-07-18 17:46:43 -0700725 map_mode = MM_MODE_R | MM_MODE_X | MM_MODE_USER | MM_MODE_NG;
726 } else {
727 map_mode = MM_MODE_R | MM_MODE_W | MM_MODE_X;
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800728 }
Raghu Krishnamurthy2b801692021-07-18 17:46:43 -0700729
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800730 if (!vm_identity_map(vm_locked, mem_begin, mem_end, map_mode, ppool,
Andrew Scull3c257452019-11-26 13:32:50 +0000731 &secondary_entry)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000732 dlog_error("Unable to initialise memory.\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000733 ret = false;
734 goto out;
Andrew Scull72b43c02019-09-18 13:53:45 +0100735 }
736
Olivier Deprez62d99e32020-01-09 15:58:07 +0100737 if (manifest_vm->is_ffa_partition) {
Daniel Boulbye6df3452022-07-14 18:14:26 +0100738 if (!ffa_map_memory_regions(manifest_vm, vm_locked,
J-Alvesd8a1d362023-03-08 11:15:28 +0000739 primary_vm_locked, is_el0_partition,
740 ppool)) {
Daniel Boulbye6df3452022-07-14 18:14:26 +0100741 ret = false;
742 goto out;
Manish Pandey2145c212020-05-01 16:04:22 +0100743 }
744
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700745 secondary_entry = ipa_add(secondary_entry,
746 manifest_vm->partition.ep_offset);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100747 }
748
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800749 /*
750 * Map hypervisor into the VM's page table. The hypervisor pages will
751 * not be accessible from EL0 since it will not be marked for user
752 * access.
753 * TODO: Map only the exception vectors and data that exception vectors
754 * require and not the entire hypervisor. This helps with speculative
755 * side-channel attacks.
756 */
Daniel Boulbye6df3452022-07-14 18:14:26 +0100757 if (is_el0_partition) {
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800758 CHECK(vm_identity_map(vm_locked, layout_text_begin(),
759 layout_text_end(), MM_MODE_X, ppool,
760 NULL));
761
762 CHECK(vm_identity_map(vm_locked, layout_rodata_begin(),
763 layout_rodata_end(), MM_MODE_R, ppool,
764 NULL));
765
766 CHECK(vm_identity_map(vm_locked, layout_data_begin(),
767 layout_data_end(), MM_MODE_R | MM_MODE_W,
768 ppool, NULL));
Maksims Svecovs134b8f92022-03-04 15:14:09 +0000769
770 CHECK(arch_stack_mm_init(mm_lock_ptable_unsafe(&vm->ptable),
771 ppool));
772
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800773 plat_console_mm_init(mm_lock_ptable_unsafe(&vm->ptable), ppool);
774 }
775
Manish Pandeyd34f8892020-06-19 17:41:07 +0100776 if (!load_common(stage1_locked, vm_locked, manifest_vm, ppool)) {
777 ret = false;
778 goto out;
779 }
780
Manish Pandey2145c212020-05-01 16:04:22 +0100781 dlog_info("Loaded with %u vCPUs, entry at %#x.\n",
782 manifest_vm->secondary.vcpu_count, pa_addr(mem_begin));
783
Andrew Scull72b43c02019-09-18 13:53:45 +0100784 vcpu = vm_get_vcpu(vm, 0);
Fuad Tabba50469e02020-06-30 15:14:28 +0100785
Max Shvetsov40108e72020-08-27 12:39:50 +0100786 vcpu_locked = vcpu_lock(vcpu);
Madhukar Pappireddy046dad02022-06-21 18:43:33 -0500787
Fuad Tabba50469e02020-06-30 15:14:28 +0100788 if (has_fdt) {
Max Shvetsov40108e72020-08-27 12:39:50 +0100789 vcpu_secondary_reset_and_start(vcpu_locked, secondary_entry,
Fuad Tabba50469e02020-06-30 15:14:28 +0100790 pa_addr(fdt_addr));
791 } else {
792 /*
793 * Without an FDT, secondary VMs expect the memory size to be
794 * passed in register x0, which is what
795 * vcpu_secondary_reset_and_start does in this case.
796 */
Max Shvetsov40108e72020-08-27 12:39:50 +0100797 vcpu_secondary_reset_and_start(vcpu_locked, secondary_entry,
798 mem_size);
Fuad Tabba50469e02020-06-30 15:14:28 +0100799 }
800
Max Shvetsov40108e72020-08-27 12:39:50 +0100801 vcpu_unlock(&vcpu_locked);
802
Jens Wiklanderb697ad02022-11-04 10:15:03 +0100803 /*
804 * For all vCPUs,
805 * in a VM: enable the notification pending virtual interrupt if
806 * requested in the manifest.
807 * in a SP: enable the NPI and managed exit virtual interrupts if
808 * requested in the manifest. For a S-EL0 partition, enable
809 * the virtual interrupts IDs matching the secure physical
810 * interrupt IDs declared in device regions.
811 */
812 for (n = 0; n < manifest_vm->secondary.vcpu_count; n++) {
813 vcpu = vm_get_vcpu(vm, n);
814 vcpu_locked = vcpu_lock(vcpu);
815 plat_ffa_enable_virtual_interrupts(vcpu_locked, vm_locked);
816 vcpu_unlock(&vcpu_locked);
817 }
818
Andrew Scull3c257452019-11-26 13:32:50 +0000819 ret = true;
Andrew Scull72b43c02019-09-18 13:53:45 +0100820
Andrew Scull3c257452019-11-26 13:32:50 +0000821out:
822 vm_unlock(&vm_locked);
823
824 return ret;
Andrew Scull72b43c02019-09-18 13:53:45 +0100825}
826
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100827/**
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100828 * Try to find a memory range of the given size within the given ranges, and
829 * remove it from them. Return true on success, or false if no large enough
830 * contiguous range is found.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100831 */
Hong-Seok Kim09648362019-05-23 15:47:11 +0900832static bool carve_out_mem_range(struct mem_range *mem_ranges,
833 size_t mem_ranges_count, uint64_t size_to_find,
834 paddr_t *found_begin, paddr_t *found_end)
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100835{
836 size_t i;
837
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000838 /*
839 * TODO(b/116191358): Consider being cleverer about how we pack VMs
840 * together, with a non-greedy algorithm.
841 */
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100842 for (i = 0; i < mem_ranges_count; ++i) {
843 if (size_to_find <=
Andrew Walbran2cb43392019-04-17 12:52:45 +0100844 pa_difference(mem_ranges[i].begin, mem_ranges[i].end)) {
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +0100845 /*
846 * This range is big enough, take some of it from the
847 * end and reduce its size accordingly.
848 */
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100849 *found_end = mem_ranges[i].end;
850 *found_begin = pa_init(pa_addr(mem_ranges[i].end) -
851 size_to_find);
852 mem_ranges[i].end = *found_begin;
853 return true;
854 }
855 }
856 return false;
857}
858
859/**
860 * Given arrays of memory ranges before and after memory was removed for
861 * secondary VMs, add the difference to the reserved ranges of the given update.
862 * Return true on success, or false if there would be more than MAX_MEM_RANGES
863 * reserved ranges after adding the new ones.
864 * `before` and `after` must be arrays of exactly `mem_ranges_count` elements.
865 */
Hong-Seok Kim09648362019-05-23 15:47:11 +0900866static bool update_reserved_ranges(struct boot_params_update *update,
867 const struct mem_range *before,
868 const struct mem_range *after,
869 size_t mem_ranges_count)
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100870{
871 size_t i;
872
873 for (i = 0; i < mem_ranges_count; ++i) {
874 if (pa_addr(after[i].begin) > pa_addr(before[i].begin)) {
875 if (update->reserved_ranges_count >= MAX_MEM_RANGES) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000876 dlog_error(
877 "Too many reserved ranges after "
878 "loading secondary VMs.\n");
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100879 return false;
880 }
881 update->reserved_ranges[update->reserved_ranges_count]
882 .begin = before[i].begin;
883 update->reserved_ranges[update->reserved_ranges_count]
884 .end = after[i].begin;
885 update->reserved_ranges_count++;
886 }
887 if (pa_addr(after[i].end) < pa_addr(before[i].end)) {
888 if (update->reserved_ranges_count >= MAX_MEM_RANGES) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000889 dlog_error(
890 "Too many reserved ranges after "
891 "loading secondary VMs.\n");
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100892 return false;
893 }
894 update->reserved_ranges[update->reserved_ranges_count]
895 .begin = after[i].end;
896 update->reserved_ranges[update->reserved_ranges_count]
897 .end = before[i].end;
898 update->reserved_ranges_count++;
899 }
900 }
901
902 return true;
903}
904
Olivier Deprez05046922023-03-09 15:48:40 +0100905static bool init_other_world_vm(const struct boot_params *params,
906 struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100907{
Olivier Deprez96a2a262020-06-11 17:21:38 +0200908 struct vm *other_world_vm;
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100909 size_t i;
Andrew Scull72b43c02019-09-18 13:53:45 +0100910
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100911 /*
Olivier Deprez96a2a262020-06-11 17:21:38 +0200912 * Initialise the dummy VM which represents the opposite world:
913 * -TrustZone (or the SPMC) when running the Hypervisor
914 * -the Hypervisor when running TZ/SPMC
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100915 */
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800916 other_world_vm = vm_init(HF_OTHER_WORLD_ID, MAX_CPUS, ppool, false);
Olivier Deprez96a2a262020-06-11 17:21:38 +0200917 CHECK(other_world_vm != NULL);
918
919 for (i = 0; i < MAX_CPUS; i++) {
920 struct vcpu *vcpu = vm_get_vcpu(other_world_vm, i);
921 struct cpu *cpu = cpu_find_index(i);
922
923 vcpu->cpu = cpu;
924 }
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100925
Olivier Deprez05046922023-03-09 15:48:40 +0100926 return arch_other_world_vm_init(other_world_vm, params, ppool);
Olivier Deprez112d2b52020-09-30 07:39:23 +0200927}
928
929/*
930 * Loads alls VMs from the manifest.
931 */
932bool load_vms(struct mm_stage1_locked stage1_locked,
933 const struct manifest *manifest, const struct memiter *cpio,
934 const struct boot_params *params,
935 struct boot_params_update *update, struct mpool *ppool)
936{
937 struct vm *primary;
938 struct mem_range mem_ranges_available[MAX_MEM_RANGES];
939 struct vm_locked primary_vm_locked;
940 size_t i;
941 bool success = true;
942
Andrew Walbranf8716782020-10-29 17:07:07 +0000943 /**
944 * Only try to load the primary VM if it is supposed to be in this
945 * world.
946 */
947 if (vm_id_is_current_world(HF_PRIMARY_VM_ID)) {
948 if (!load_primary(stage1_locked,
949 &manifest->vm[HF_PRIMARY_VM_INDEX], cpio,
950 params, ppool)) {
951 dlog_error("Unable to load primary VM.\n");
952 return false;
953 }
Olivier Deprez112d2b52020-09-30 07:39:23 +0200954 }
955
Olivier Deprez05046922023-03-09 15:48:40 +0100956 if (!init_other_world_vm(params, ppool)) {
Olivier Deprez112d2b52020-09-30 07:39:23 +0200957 return false;
958 }
959
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100960 static_assert(
961 sizeof(mem_ranges_available) == sizeof(params->mem_ranges),
962 "mem_range arrays must be the same size for memcpy.");
963 static_assert(sizeof(mem_ranges_available) < 500,
964 "This will use too much stack, either make "
965 "MAX_MEM_RANGES smaller or change this.");
Andrew Sculla1aa2ba2019-04-05 11:49:02 +0100966 memcpy_s(mem_ranges_available, sizeof(mem_ranges_available),
967 params->mem_ranges, sizeof(params->mem_ranges));
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100968
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100969 /* Round the last addresses down to the page size. */
970 for (i = 0; i < params->mem_ranges_count; ++i) {
Alfredo Mazzinghieb1997c2019-02-07 18:00:01 +0000971 mem_ranges_available[i].end = pa_init(align_down(
972 pa_addr(mem_ranges_available[i].end), PAGE_SIZE));
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100973 }
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +0100974
Andrew Scull3c257452019-11-26 13:32:50 +0000975 primary = vm_find(HF_PRIMARY_VM_ID);
976 primary_vm_locked = vm_lock(primary);
977
David Brazdil0251b942019-09-10 15:59:50 +0100978 for (i = 0; i < manifest->vm_count; ++i) {
David Brazdil0dbb41f2019-09-09 18:03:35 +0100979 const struct manifest_vm *manifest_vm = &manifest->vm[i];
J-Alves19e20cf2023-08-02 12:48:55 +0100980 ffa_id_t vm_id = HF_VM_ID_OFFSET + i;
David Brazdil7a462ec2019-08-15 12:27:47 +0100981 uint64_t mem_size;
Andrew Scull80871322018-08-06 12:04:09 +0100982 paddr_t secondary_mem_begin;
983 paddr_t secondary_mem_end;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100984
David Brazdil7a462ec2019-08-15 12:27:47 +0100985 if (vm_id == HF_PRIMARY_VM_ID) {
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100986 continue;
987 }
988
Olivier Deprez2a8ee342020-08-03 15:10:44 +0200989 dlog_info("Loading VM id %#x: %s.\n", vm_id,
Andrew Walbran17eebf92020-02-05 16:35:49 +0000990 manifest_vm->debug_name);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100991
David Brazdil7a462ec2019-08-15 12:27:47 +0100992 mem_size = align_up(manifest_vm->secondary.mem_size, PAGE_SIZE);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100993
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -0700994 if (manifest_vm->is_ffa_partition &&
995 !manifest->vm[i].is_hyp_loaded) {
Olivier Deprez62d99e32020-01-09 15:58:07 +0100996 secondary_mem_begin =
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700997 pa_init(manifest_vm->partition.load_addr);
998 secondary_mem_end = pa_init(
999 manifest_vm->partition.load_addr + mem_size);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001000 } else if (!carve_out_mem_range(mem_ranges_available,
1001 params->mem_ranges_count,
1002 mem_size, &secondary_mem_begin,
1003 &secondary_mem_end)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +00001004 dlog_error("Not enough memory (%u bytes).\n", mem_size);
Andrew Walbran34ce72e2018-09-13 16:47:44 +01001005 continue;
1006 }
Andrew Scull80871322018-08-06 12:04:09 +01001007
Manish Pandey2145c212020-05-01 16:04:22 +01001008 if (!load_secondary(stage1_locked, primary_vm_locked,
1009 secondary_mem_begin, secondary_mem_end,
J-Alves77b6f4f2023-03-15 11:34:49 +00001010 manifest_vm, params, cpio, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +00001011 dlog_error("Unable to load VM.\n");
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +01001012 continue;
1013 }
1014
1015 /* Deny the primary VM access to this memory. */
Andrew Scull3c257452019-11-26 13:32:50 +00001016 if (!vm_unmap(primary_vm_locked, secondary_mem_begin,
1017 secondary_mem_end, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +00001018 dlog_error(
1019 "Unable to unmap secondary VM from primary "
1020 "VM.\n");
Andrew Scull3c257452019-11-26 13:32:50 +00001021 success = false;
1022 break;
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +01001023 }
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001024 }
1025
Andrew Scull3c257452019-11-26 13:32:50 +00001026 vm_unlock(&primary_vm_locked);
1027
1028 if (!success) {
1029 return false;
1030 }
1031
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +01001032 /*
1033 * Add newly reserved areas to update params by looking at the
Andrew Walbran34ce72e2018-09-13 16:47:44 +01001034 * difference between the available ranges from the original params and
1035 * the updated mem_ranges_available. We assume that the number and order
1036 * of available ranges is the same, i.e. we don't remove any ranges
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +01001037 * above only make them smaller.
1038 */
Andrew Walbran34ce72e2018-09-13 16:47:44 +01001039 return update_reserved_ranges(update, params->mem_ranges,
1040 mem_ranges_available,
1041 params->mem_ranges_count);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001042}