blob: a8b59d26157aea96ea4d4812485c42c8bdc491eb [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/load.h"
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010010
11#include <stdbool.h>
12
Maksims Svecovs134b8f92022-03-04 15:14:09 +000013#include "hf/arch/init.h"
Olivier Deprez112d2b52020-09-30 07:39:23 +020014#include "hf/arch/other_world.h"
J-Alvesa9c7cba2021-08-25 16:26:11 +010015#include "hf/arch/plat/ffa.h"
Karl Meakin07de26a2024-07-23 17:59:33 +010016#include "hf/arch/plat/ffa/direct_messaging.h"
Karl Meakin8e58ddc2024-11-08 23:19:34 +000017#include "hf/arch/plat/ffa/interrupts.h"
Karl Meakin7a664f62024-07-24 17:20:29 +010018#include "hf/arch/plat/ffa/notifications.h"
Karl Meakin48e049c2024-07-25 18:07:41 +010019#include "hf/arch/plat/ffa/setup_and_discovery.h"
Fuad Tabba77a4b012019-11-15 12:13:08 +000020#include "hf/arch/vm.h"
21
Andrew Scull18c78fc2018-08-20 12:57:41 +010022#include "hf/api.h"
Andrew Walbran34ce72e2018-09-13 16:47:44 +010023#include "hf/boot_params.h"
Andrew Scull72b43c02019-09-18 13:53:45 +010024#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010025#include "hf/dlog.h"
Fuad Tabba50469e02020-06-30 15:14:28 +010026#include "hf/fdt_patch.h"
Andrew Scull5991ec92018-10-08 14:55:02 +010027#include "hf/layout.h"
J-Alves77b6f4f2023-03-15 11:34:49 +000028#include "hf/manifest.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010029#include "hf/memiter.h"
30#include "hf/mm.h"
Andrew Walbran48699362019-05-20 14:38:00 +010031#include "hf/plat/console.h"
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -050032#include "hf/plat/interrupts.h"
Andrew Scullb1a6d0d2020-01-29 11:25:12 +000033#include "hf/plat/iommu.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010034#include "hf/static_assert.h"
Andrew Scull8d9e1212019-04-05 13:52:55 +010035#include "hf/std.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010036#include "hf/vm.h"
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010037
Andrew Scull19503262018-09-20 14:48:39 +010038#include "vmapi/hf/call.h"
Manish Pandeyd34f8892020-06-19 17:41:07 +010039#include "vmapi/hf/ffa.h"
Andrew Scull19503262018-09-20 14:48:39 +010040
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010041/**
42 * Copies data to an unmapped location by mapping it for write, copying the
43 * data, then unmapping it.
Andrew Sculld9225b32018-11-19 16:12:41 +000044 *
45 * The data is written so that it is available to all cores with the cache
46 * disabled. When switching to the partitions, the caching is initially disabled
47 * so the data must be available without the cache.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010048 */
Andrew Scull3c0a90a2019-07-01 11:55:53 +010049static bool copy_to_unmapped(struct mm_stage1_locked stage1_locked, paddr_t to,
David Brazdil7a462ec2019-08-15 12:27:47 +010050 struct memiter *from_it, struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010051{
David Brazdil7a462ec2019-08-15 12:27:47 +010052 const void *from = memiter_base(from_it);
53 size_t size = memiter_size(from_it);
Andrew Scull80871322018-08-06 12:04:09 +010054 paddr_t to_end = pa_add(to, size);
55 void *ptr;
Andrew Scull265ada92018-07-30 15:19:01 +010056
Andrew Scull3c0a90a2019-07-01 11:55:53 +010057 ptr = mm_identity_map(stage1_locked, to, to_end, MM_MODE_W, ppool);
Andrew Scull80871322018-08-06 12:04:09 +010058 if (!ptr) {
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010059 return false;
60 }
61
Andrew Sculla1aa2ba2019-04-05 11:49:02 +010062 memcpy_s(ptr, size, from, size);
Andrew Scullc059fbe2019-09-12 12:58:40 +010063 arch_mm_flush_dcache(ptr, size);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010064
Andrew Scull72b43c02019-09-18 13:53:45 +010065 CHECK(mm_unmap(stage1_locked, to, to_end, ppool));
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010066
67 return true;
68}
69
Fuad Tabba50469e02020-06-30 15:14:28 +010070/**
71 * Loads the secondary VM's kernel.
72 * Stores the kernel size in kernel_size (if kernel_size is not NULL).
73 * Returns false if it cannot load the kernel.
74 */
Andrew Scull72b43c02019-09-18 13:53:45 +010075static bool load_kernel(struct mm_stage1_locked stage1_locked, paddr_t begin,
76 paddr_t end, const struct manifest_vm *manifest_vm,
Fuad Tabba50469e02020-06-30 15:14:28 +010077 const struct memiter *cpio, struct mpool *ppool,
78 size_t *kernel_size)
Andrew Scull72b43c02019-09-18 13:53:45 +010079{
Andrew Scull72b43c02019-09-18 13:53:45 +010080 struct memiter kernel;
Fuad Tabba50469e02020-06-30 15:14:28 +010081 size_t size;
Andrew Scull72b43c02019-09-18 13:53:45 +010082
David Brazdil136f2942019-09-23 14:11:03 +010083 if (!cpio_get_file(cpio, &manifest_vm->kernel_filename, &kernel)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +000084 dlog_error("Could not find kernel file \"%s\".\n",
85 string_data(&manifest_vm->kernel_filename));
Andrew Scull72b43c02019-09-18 13:53:45 +010086 return false;
87 }
88
Fuad Tabba50469e02020-06-30 15:14:28 +010089 size = memiter_size(&kernel);
90 if (pa_difference(begin, end) < size) {
Andrew Walbran17eebf92020-02-05 16:35:49 +000091 dlog_error("Kernel is larger than available memory.\n");
Andrew Scull72b43c02019-09-18 13:53:45 +010092 return false;
93 }
94
95 if (!copy_to_unmapped(stage1_locked, begin, &kernel, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +000096 dlog_error("Unable to copy kernel.\n");
Andrew Scull72b43c02019-09-18 13:53:45 +010097 return false;
98 }
99
Fuad Tabba50469e02020-06-30 15:14:28 +0100100 if (kernel_size) {
101 *kernel_size = size;
102 }
103
Andrew Scull72b43c02019-09-18 13:53:45 +0100104 return true;
105}
106
Manish Pandeyd34f8892020-06-19 17:41:07 +0100107/*
108 * Link RX/TX buffers provided in partition manifest to mailbox
109 */
110static bool link_rxtx_to_mailbox(struct mm_stage1_locked stage1_locked,
111 struct vm_locked vm_locked, struct rx_tx rxtx,
112 struct mpool *ppool)
113{
114 struct ffa_value ret;
115 ipaddr_t send;
116 ipaddr_t recv;
117 uint32_t page_count;
118
119 send = ipa_init(rxtx.tx_buffer->base_address);
120 recv = ipa_init(rxtx.rx_buffer->base_address);
121 page_count = rxtx.tx_buffer->page_count;
122
123 ret = api_vm_configure_pages(stage1_locked, vm_locked, send, recv,
124 page_count, ppool);
125 if (ret.func != FFA_SUCCESS_32) {
126 return false;
127 }
128
Karl Meakine8937d92024-03-19 16:04:25 +0000129 dlog_verbose(" mailbox: send = %p, recv = %p\n",
Manish Pandeyd34f8892020-06-19 17:41:07 +0100130 vm_locked.vm->mailbox.send, vm_locked.vm->mailbox.recv);
131
132 return true;
133}
134
Raghu Krishnamurthyad38a9c2022-07-20 07:30:36 -0700135static void infer_interrupt(struct interrupt_info interrupt,
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500136 struct interrupt_descriptor *int_desc)
137{
138 uint32_t attr = interrupt.attributes;
139
Daniel Boulby18485942024-10-14 16:23:03 +0100140 int_desc->interrupt_id = interrupt.id;
141 int_desc->priority = (attr >> INT_INFO_ATTR_PRIORITY_SHIFT) & 0xff;
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500142
Daniel Boulby18485942024-10-14 16:23:03 +0100143 int_desc->type = (attr >> INT_INFO_ATTR_TYPE_SHIFT) & 0x3;
144 int_desc->config = (attr >> INT_INFO_ATTR_CONFIG_SHIFT) & 0x1;
145 int_desc->sec_state = (attr >> INT_INFO_ATTR_SEC_STATE_SHIFT) & 0x1;
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500146
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -0700147 if (interrupt.mpidr_valid) {
Daniel Boulby18485942024-10-14 16:23:03 +0100148 int_desc->mpidr_valid = true;
149 int_desc->mpidr = interrupt.mpidr;
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -0700150 } else {
Daniel Boulby18485942024-10-14 16:23:03 +0100151 int_desc->mpidr_valid = false;
152 int_desc->mpidr = 0;
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -0700153 }
154
Daniel Boulby18485942024-10-14 16:23:03 +0100155 int_desc->valid = true;
156 int_desc->enabled = true;
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500157}
158
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100159/**
Andrew Scullae9962e2019-10-03 16:51:16 +0100160 * Performs VM loading activities that are common between the primary and
161 * secondaries.
162 */
Manish Pandeyd34f8892020-06-19 17:41:07 +0100163static bool load_common(struct mm_stage1_locked stage1_locked,
164 struct vm_locked vm_locked,
165 const struct manifest_vm *manifest_vm,
166 struct mpool *ppool)
Andrew Scullae9962e2019-10-03 16:51:16 +0100167{
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500168 struct device_region dev_region;
Raghu Krishnamurthyad38a9c2022-07-20 07:30:36 -0700169 struct interrupt_info interrupt;
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500170 uint32_t k = 0;
171
Manish Pandeyd34f8892020-06-19 17:41:07 +0100172 vm_locked.vm->smc_whitelist = manifest_vm->smc_whitelist;
Olivier Depreza15f2352022-09-26 09:17:24 +0200173 vm_locked.vm->power_management =
174 manifest_vm->partition.power_management;
Andrew Scullae9962e2019-10-03 16:51:16 +0100175
Kathleen Capella422b10b2023-06-30 18:28:27 -0400176 /* Populate array of UUIDs. */
177 for (uint16_t i = 0; i < PARTITION_MAX_UUIDS; i++) {
178 struct ffa_uuid current_uuid = manifest_vm->partition.uuids[i];
179
180 if (ffa_uuid_is_null(&current_uuid)) {
181 break;
182 }
183
184 vm_locked.vm->uuids[i] = current_uuid;
185 }
186
J-Alvescc542042024-07-24 19:13:22 +0100187 /*
188 * Populate the interrupt descriptor for current VM.
189 * They can be enabled in runtime using HF_INTERRUPT_ENABLE.
190 */
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700191 for (uint16_t i = 0; i < PARTITION_MAX_DEVICE_REGIONS; i++) {
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500192 dev_region = manifest_vm->partition.dev_regions[i];
193
194 CHECK(dev_region.interrupt_count <=
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700195 PARTITION_MAX_INTERRUPTS_PER_DEVICE);
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500196
197 for (uint8_t j = 0; j < dev_region.interrupt_count; j++) {
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -0700198 struct interrupt_descriptor int_desc = {0};
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500199
200 interrupt = dev_region.interrupts[j];
201 infer_interrupt(interrupt, &int_desc);
202 vm_locked.vm->interrupt_desc[k] = int_desc;
Madhukar Pappireddy938faaf2023-07-31 17:56:55 -0500203 assert(int_desc.enabled);
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500204
205 k++;
206 CHECK(k <= VM_MANIFEST_MAX_INTERRUPTS);
207 }
208 }
J-Alvescc542042024-07-24 19:13:22 +0100209
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500210 dlog_verbose("VM has %d physical interrupts defined in manifest.\n", k);
211
Manish Pandeyd34f8892020-06-19 17:41:07 +0100212 if (manifest_vm->is_ffa_partition) {
Daniel Boulbybaeaf2e2021-12-09 11:42:36 +0000213 vm_locked.vm->ffa_version = manifest_vm->partition.ffa_version;
Manish Pandeyd34f8892020-06-19 17:41:07 +0100214 /* Link rxtx buffers to mailbox */
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700215 if (manifest_vm->partition.rxtx.available) {
Manish Pandeyd34f8892020-06-19 17:41:07 +0100216 if (!link_rxtx_to_mailbox(stage1_locked, vm_locked,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700217 manifest_vm->partition.rxtx,
Manish Pandeyd34f8892020-06-19 17:41:07 +0100218 ppool)) {
219 dlog_error(
220 "Unable to Link RX/TX buffer with "
221 "mailbox.\n");
222 return false;
223 }
224 }
J-Alvesb37fd082020-10-22 12:29:21 +0100225
Maksims Svecovsb596eab2021-04-27 00:52:27 +0100226 vm_locked.vm->messaging_method =
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700227 manifest_vm->partition.messaging_method;
Manish Pandeyf3be6392020-09-24 17:26:09 +0100228
Madhukar Pappireddy84154052022-06-21 18:30:25 -0500229 vm_locked.vm->ns_interrupts_action =
230 manifest_vm->partition.ns_interrupts_action;
Maksims Svecovs9ddf86a2021-05-06 17:17:21 +0100231
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -0600232 vm_locked.vm->other_s_interrupts_action =
233 manifest_vm->partition.other_s_interrupts_action;
234
Madhukar Pappireddy046dad02022-06-21 18:43:33 -0500235 vm_locked.vm->me_signal_virq =
236 manifest_vm->partition.me_signal_virq;
237
J-Alvesa4730db2021-11-02 10:31:01 +0000238 vm_locked.vm->notifications.enabled =
239 manifest_vm->partition.notification_support;
240
Karl Meakina603e082024-08-02 17:57:27 +0100241 vm_locked.vm->vm_availability_messages.vm_created =
242 manifest_vm->partition.vm_availability_messages
243 .vm_created;
244 vm_locked.vm->vm_availability_messages.vm_destroyed =
245 manifest_vm->partition.vm_availability_messages
246 .vm_destroyed;
247
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700248 vm_locked.vm->boot_order = manifest_vm->partition.boot_order;
249
J-Alves7d38f7b2022-04-13 13:22:30 +0100250 vm_locked.vm->boot_info.gp_register_num =
251 manifest_vm->partition.gp_register_num;
252
253 if (manifest_vm->partition.boot_info) {
254 /*
255 * If the partition expects the boot information blob
256 * per the ff-a v1.1 boot protocol, then its address
257 * shall match the partition's load address.
258 */
259 vm_locked.vm->boot_info.blob_addr =
260 ipa_init(manifest_vm->partition.load_addr);
261 }
262
J-Alvesb37fd082020-10-22 12:29:21 +0100263 /* Updating boot list according to boot_order */
Olivier Deprez181074b2023-02-02 14:53:23 +0100264 vcpu_update_boot(vm_get_vcpu(vm_locked.vm, 0));
J-Alvesa0f317d2021-06-09 13:31:59 +0100265
J-Alves09ff9d82021-11-02 11:55:20 +0000266 if (vm_locked_are_notifications_enabled(vm_locked) &&
J-Alvesa4730db2021-11-02 10:31:01 +0000267 !plat_ffa_notifications_bitmap_create_call(
268 vm_locked.vm->id, vm_locked.vm->vcpu_count)) {
269 return false;
J-Alvesa9c7cba2021-08-25 16:26:11 +0100270 }
Manish Pandeyd34f8892020-06-19 17:41:07 +0100271 }
J-Alvesb37fd082020-10-22 12:29:21 +0100272
Fuad Tabba56970712020-01-10 11:20:09 +0000273 /* Initialize architecture-specific features. */
Manish Pandeyd34f8892020-06-19 17:41:07 +0100274 arch_vm_features_set(vm_locked.vm);
Fuad Tabba77a4b012019-11-15 12:13:08 +0000275
Madhukar Pappireddy54680c72020-10-23 15:02:38 -0500276 if (!plat_iommu_attach_peripheral(stage1_locked, vm_locked, manifest_vm,
277 ppool)) {
278 dlog_error("Unable to attach upstream peripheral device\n");
279 return false;
280 }
281
Andrew Scullae9962e2019-10-03 16:51:16 +0100282 return true;
283}
284
285/**
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100286 * Loads the primary VM.
287 */
Andrew Scull72b43c02019-09-18 13:53:45 +0100288static bool load_primary(struct mm_stage1_locked stage1_locked,
Andrew Scullae9962e2019-10-03 16:51:16 +0100289 const struct manifest_vm *manifest_vm,
Andrew Scullb5f49e02019-10-02 13:20:47 +0100290 const struct memiter *cpio,
291 const struct boot_params *params, struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100292{
Olivier Deprez62d99e32020-01-09 15:58:07 +0100293 paddr_t primary_begin;
294 ipaddr_t primary_entry;
David Brazdile6f83222019-09-23 14:47:37 +0100295 struct vm *vm;
Andrew Scull3c257452019-11-26 13:32:50 +0000296 struct vm_locked vm_locked;
David Brazdile6f83222019-09-23 14:47:37 +0100297 struct vcpu_locked vcpu_locked;
Andrew Scullb5f49e02019-10-02 13:20:47 +0100298 size_t i;
Andrew Scull3c257452019-11-26 13:32:50 +0000299 bool ret;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100300
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -0700301 if (manifest_vm->is_ffa_partition && !manifest_vm->is_hyp_loaded) {
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700302 primary_begin = pa_init(manifest_vm->partition.load_addr);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100303 primary_entry = ipa_add(ipa_from_pa(primary_begin),
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700304 manifest_vm->partition.ep_offset);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100305 } else {
306 primary_begin =
307 (manifest_vm->primary.boot_address ==
308 MANIFEST_INVALID_ADDRESS)
309 ? layout_primary_begin()
310 : pa_init(manifest_vm->primary.boot_address);
311 primary_entry = ipa_from_pa(primary_begin);
312 }
313
David Brazdil080ee312020-02-25 15:30:30 -0800314 paddr_t primary_end = pa_add(primary_begin, RSIZE_MAX);
Andrew Scull72b43c02019-09-18 13:53:45 +0100315
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800316 /* Primary VM must be a VM */
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700317 CHECK(manifest_vm->partition.run_time_el == EL1);
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800318
Olivier Deprez62d99e32020-01-09 15:58:07 +0100319 /*
320 * Load the kernel if a filename is specified in the VM manifest.
321 * For an FF-A partition, kernel_filename is undefined indicating
322 * the partition package has already been loaded prior to Hafnium
323 * booting.
324 */
325 if (!string_is_empty(&manifest_vm->kernel_filename)) {
326 if (!load_kernel(stage1_locked, primary_begin, primary_end,
Fuad Tabba50469e02020-06-30 15:14:28 +0100327 manifest_vm, cpio, ppool, NULL)) {
Olivier Deprez62d99e32020-01-09 15:58:07 +0100328 dlog_error("Unable to load primary kernel.\n");
329 return false;
330 }
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100331 }
332
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -0600333 if (!vm_init_next(MAX_CPUS, ppool, &vm, false,
334 manifest_vm->partition.dma_device_count)) {
Andrew Walbran7586e042020-02-18 18:19:26 +0000335 dlog_error("Unable to initialise primary VM.\n");
David Brazdile6f83222019-09-23 14:47:37 +0100336 return false;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100337 }
338
Karl Meakin5e996992024-05-20 11:27:07 +0100339 if (!vm_is_primary(vm)) {
Andrew Walbran7586e042020-02-18 18:19:26 +0000340 dlog_error("Primary VM was not given correct ID.\n");
David Brazdile6f83222019-09-23 14:47:37 +0100341 return false;
342 }
343
Andrew Scull3c257452019-11-26 13:32:50 +0000344 vm_locked = vm_lock(vm);
345
Andrew Scull48929fd2020-01-28 10:39:10 +0000346 if (params->device_mem_ranges_count == 0) {
347 /*
348 * Map 1TB of address space as device memory to, most likely,
349 * make all devices available to the primary VM.
350 *
351 * TODO: remove this once all targets provide valid ranges.
352 */
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800353 dlog_warning(
354 "Device memory not provided, defaulting to 1 TB.\n");
Andrew Scull48929fd2020-01-28 10:39:10 +0000355
356 if (!vm_identity_map(
357 vm_locked, pa_init(0),
358 pa_init(UINT64_C(1024) * 1024 * 1024 * 1024),
359 MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool, NULL)) {
360 dlog_error(
361 "Unable to initialise address space for "
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800362 "primary VM.\n");
Andrew Scull48929fd2020-01-28 10:39:10 +0000363 ret = false;
364 goto out;
365 }
David Brazdile6f83222019-09-23 14:47:37 +0100366 }
367
Andrew Scullb5f49e02019-10-02 13:20:47 +0100368 /* Map normal memory as such to permit caching, execution, etc. */
369 for (i = 0; i < params->mem_ranges_count; ++i) {
Andrew Scull3c257452019-11-26 13:32:50 +0000370 if (!vm_identity_map(vm_locked, params->mem_ranges[i].begin,
371 params->mem_ranges[i].end,
372 MM_MODE_R | MM_MODE_W | MM_MODE_X, ppool,
373 NULL)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000374 dlog_error(
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800375 "Unable to initialise memory for primary "
376 "VM.\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000377 ret = false;
378 goto out;
Andrew Scullb5f49e02019-10-02 13:20:47 +0100379 }
380 }
381
Andrew Scull48929fd2020-01-28 10:39:10 +0000382 /* Map device memory as such to prevent execution, speculation etc. */
383 for (i = 0; i < params->device_mem_ranges_count; ++i) {
384 if (!vm_identity_map(
385 vm_locked, params->device_mem_ranges[i].begin,
386 params->device_mem_ranges[i].end,
387 MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool, NULL)) {
388 dlog("Unable to initialise device memory for primary "
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800389 "VM.\n");
Andrew Scull48929fd2020-01-28 10:39:10 +0000390 ret = false;
391 goto out;
392 }
393 }
394
Manish Pandeyd34f8892020-06-19 17:41:07 +0100395 if (!load_common(stage1_locked, vm_locked, manifest_vm, ppool)) {
396 ret = false;
397 goto out;
398 }
399
Andrew Scull3c257452019-11-26 13:32:50 +0000400 if (!vm_unmap_hypervisor(vm_locked, ppool)) {
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800401 dlog_error("Unable to unmap hypervisor from primary VM.\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000402 ret = false;
403 goto out;
David Brazdile6f83222019-09-23 14:47:37 +0100404 }
405
Andrew Scullb1a6d0d2020-01-29 11:25:12 +0000406 if (!plat_iommu_unmap_iommus(vm_locked, ppool)) {
Andrew Scullf6ab9bc2020-02-26 12:56:37 -0800407 dlog_error("Unable to unmap IOMMUs from primary VM.\n");
Andrew Scullb1a6d0d2020-01-29 11:25:12 +0000408 ret = false;
409 goto out;
410 }
411
Karl Meakine8937d92024-03-19 16:04:25 +0000412 dlog_info("Loaded primary VM with %u vCPUs, entry at %#lx.\n",
Andrew Walbran7586e042020-02-18 18:19:26 +0000413 vm->vcpu_count, pa_addr(primary_begin));
414
Olivier Deprez181074b2023-02-02 14:53:23 +0100415 /* Mark the first VM vCPU to be the first booted vCPU. */
416 vcpu_update_boot(vm_get_vcpu(vm, 0));
Olivier Deprezb9adff42021-02-01 12:14:05 +0100417
David Brazdile6f83222019-09-23 14:47:37 +0100418 vcpu_locked = vcpu_lock(vm_get_vcpu(vm, 0));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100419 vcpu_on(vcpu_locked, primary_entry, params->kernel_arg);
David Brazdile6f83222019-09-23 14:47:37 +0100420 vcpu_unlock(&vcpu_locked);
Andrew Scull3c257452019-11-26 13:32:50 +0000421 ret = true;
David Brazdile6f83222019-09-23 14:47:37 +0100422
Andrew Scull3c257452019-11-26 13:32:50 +0000423out:
424 vm_unlock(&vm_locked);
425
426 return ret;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100427}
428
Fuad Tabba50469e02020-06-30 15:14:28 +0100429/**
430 * Loads the secondary VM's FDT.
431 * Stores the total allocated size for the FDT in fdt_allocated_size (if
432 * fdt_allocated_size is not NULL). The allocated size includes additional space
433 * for potential patching.
434 */
435static bool load_secondary_fdt(struct mm_stage1_locked stage1_locked,
436 paddr_t end, size_t fdt_max_size,
437 const struct manifest_vm *manifest_vm,
438 const struct memiter *cpio, struct mpool *ppool,
439 paddr_t *fdt_addr, size_t *fdt_allocated_size)
440{
441 struct memiter fdt;
442 size_t allocated_size;
443
444 CHECK(!string_is_empty(&manifest_vm->secondary.fdt_filename));
445
446 if (!cpio_get_file(cpio, &manifest_vm->secondary.fdt_filename, &fdt)) {
447 dlog_error("Cannot open the secondary VM's FDT.\n");
448 return false;
449 }
450
451 /*
Olivier Deprez701e8bf2022-04-06 18:45:18 +0200452 * Ensure the FDT has one additional page at the end for patching,
Fuad Tabba50469e02020-06-30 15:14:28 +0100453 * and align it to the page boundary.
454 */
455 allocated_size = align_up(memiter_size(&fdt), PAGE_SIZE) + PAGE_SIZE;
456
457 if (allocated_size > fdt_max_size) {
458 dlog_error(
Karl Meakine8937d92024-03-19 16:04:25 +0000459 "FDT allocated space (%zu) is more than the specified "
460 "maximum to use (%zu).\n",
Fuad Tabba50469e02020-06-30 15:14:28 +0100461 allocated_size, fdt_max_size);
462 return false;
463 }
464
465 /* Load the FDT to the end of the VM's allocated memory space. */
466 *fdt_addr = pa_init(pa_addr(pa_sub(end, allocated_size)));
467
Karl Meakine8937d92024-03-19 16:04:25 +0000468 dlog_info("Loading secondary FDT of allocated size %zu at 0x%lx.\n",
Fuad Tabba50469e02020-06-30 15:14:28 +0100469 allocated_size, pa_addr(*fdt_addr));
470
471 if (!copy_to_unmapped(stage1_locked, *fdt_addr, &fdt, ppool)) {
472 dlog_error("Unable to copy FDT.\n");
473 return false;
474 }
475
476 if (fdt_allocated_size) {
477 *fdt_allocated_size = allocated_size;
478 }
479
480 return true;
481}
482
Olivier Deprez035fa152022-03-14 11:19:10 +0100483/**
484 * Convert the manifest memory region attributes to mode consumed by mm layer.
485 */
486static uint32_t memory_region_attributes_to_mode(uint32_t attributes)
487{
488 uint32_t mode = 0U;
489
490 if ((attributes & MANIFEST_REGION_ATTR_READ) != 0U) {
491 mode |= MM_MODE_R;
492 }
493
494 if ((attributes & MANIFEST_REGION_ATTR_WRITE) != 0U) {
495 mode |= MM_MODE_W;
496 }
497
498 if ((attributes & MANIFEST_REGION_ATTR_EXEC) != 0U) {
499 mode |= MM_MODE_X;
500 }
501
502 assert((mode == (MM_MODE_R | MM_MODE_W)) || (mode == MM_MODE_R) ||
503 (mode == (MM_MODE_R | MM_MODE_X)));
504
505 if ((attributes & MANIFEST_REGION_ATTR_SECURITY) != 0U) {
506 mode |= arch_mm_extra_attributes_from_vm(HF_HYPERVISOR_VM_ID);
507 }
508
509 return mode;
510}
511
512/**
513 * Convert the manifest device region attributes to mode consumed by mm layer.
514 */
515static uint32_t device_region_attributes_to_mode(uint32_t attributes)
516{
517 uint32_t mode = 0U;
518
519 if ((attributes & MANIFEST_REGION_ATTR_READ) != 0U) {
520 mode |= MM_MODE_R;
521 }
522
523 if ((attributes & MANIFEST_REGION_ATTR_WRITE) != 0U) {
524 mode |= MM_MODE_W;
525 }
526
527 assert((mode == (MM_MODE_R | MM_MODE_W)) || (mode == MM_MODE_R));
528
529 if ((attributes & MANIFEST_REGION_ATTR_SECURITY) != 0U) {
530 mode |= arch_mm_extra_attributes_from_vm(HF_HYPERVISOR_VM_ID);
531 }
532
533 return mode | MM_MODE_D;
534}
535
Daniel Boulbye6df3452022-07-14 18:14:26 +0100536static bool ffa_map_memory_regions(const struct manifest_vm *manifest_vm,
537 const struct vm_locked vm_locked,
538 const struct vm_locked primary_vm_locked,
J-Alvesd8a1d362023-03-08 11:15:28 +0000539 bool is_el0_partition, struct mpool *ppool)
Daniel Boulbye6df3452022-07-14 18:14:26 +0100540{
541#if LOG_LEVEL >= LOG_LEVEL_WARNING
542 const char *error_string = " region security state ignored for ";
543#endif
544 int j = 0;
545 paddr_t region_begin;
546 paddr_t region_end;
Daniel Boulbye6df3452022-07-14 18:14:26 +0100547 size_t size;
Daniel Boulbye6df3452022-07-14 18:14:26 +0100548 uint32_t map_mode;
549 uint32_t attributes;
550
551 /* Map memory-regions */
552 while (j < manifest_vm->partition.mem_region_count) {
Madhukar Pappireddy0e57d3d2023-10-11 15:49:05 -0500553 struct memory_region mem_region;
554
555 mem_region = manifest_vm->partition.mem_regions[j];
556 size = mem_region.page_count * PAGE_SIZE;
Daniel Boulbye6df3452022-07-14 18:14:26 +0100557 /*
J-Alvesd8a1d362023-03-08 11:15:28 +0000558 * Identity map memory region for both case,
559 * VA(S-EL0) or IPA(S-EL1).
Daniel Boulbye6df3452022-07-14 18:14:26 +0100560 */
Madhukar Pappireddy0e57d3d2023-10-11 15:49:05 -0500561 region_begin = pa_init(mem_region.base_address);
J-Alvesd8a1d362023-03-08 11:15:28 +0000562 region_end = pa_add(region_begin, size);
Daniel Boulbye6df3452022-07-14 18:14:26 +0100563
Madhukar Pappireddy0e57d3d2023-10-11 15:49:05 -0500564 attributes = mem_region.attributes;
Daniel Boulbye6df3452022-07-14 18:14:26 +0100565 if ((attributes & MANIFEST_REGION_ATTR_SECURITY) != 0) {
J-Alves661e1b72023-08-02 13:39:40 +0100566 if (ffa_is_vm_id(vm_locked.vm->id)) {
Daniel Boulbye6df3452022-07-14 18:14:26 +0100567 dlog_warning("Memory%sVMs\n", error_string);
568 attributes &= ~MANIFEST_REGION_ATTR_SECURITY;
Daniel Boulbye6df3452022-07-14 18:14:26 +0100569 }
570 }
571
572 map_mode = memory_region_attributes_to_mode(attributes);
573
574 if (is_el0_partition) {
575 map_mode |= MM_MODE_USER | MM_MODE_NG;
576 }
577
578 if (!vm_identity_map(vm_locked, region_begin, region_end,
579 map_mode, ppool, NULL)) {
580 dlog_error(
581 "Unable to map secondary VM "
582 "memory-region.\n");
583 return false;
584 }
585
Madhukar Pappireddy0e57d3d2023-10-11 15:49:05 -0500586 /*
587 * Enforce static DMA isolation through stage 2 address
588 * translation.
589 * Only the DMA device that is specified as part of this memory
590 * region node in the partition manifest will be granted access
591 * to the memory region.
592 */
593 if (mem_region.dma_prop.stream_count > 0 &&
594 !vm_iommu_mm_identity_map(
595 vm_locked, region_begin, region_end, map_mode,
596 ppool, NULL, mem_region.dma_prop.dma_device_id)) {
597 dlog_error(
598 "Unable to map memory-region in the page "
599 "tables of DMA device.\n");
600 return false;
601 }
602
Daniel Boulbye6df3452022-07-14 18:14:26 +0100603 /* Deny the primary VM access to this memory */
604 if (!vm_unmap(primary_vm_locked, region_begin, region_end,
605 ppool)) {
606 dlog_error(
607 "Unable to unmap secondary VM memory-"
608 "region from primary VM.\n");
609 return false;
610 }
611
Karl Meakine8937d92024-03-19 16:04:25 +0000612 dlog_verbose("Memory region %#lx - %#lx allocated.\n",
613 pa_addr(region_begin), pa_addr(region_end));
Daniel Boulbye6df3452022-07-14 18:14:26 +0100614
615 j++;
616 }
617
618 /* Map device-regions */
619 j = 0;
620 while (j < manifest_vm->partition.dev_region_count) {
621 region_begin = pa_init(
622 manifest_vm->partition.dev_regions[j].base_address);
623 size = manifest_vm->partition.dev_regions[j].page_count *
624 PAGE_SIZE;
625 region_end = pa_add(region_begin, size);
626
627 attributes = manifest_vm->partition.dev_regions[j].attributes;
628 if ((attributes & MANIFEST_REGION_ATTR_SECURITY) != 0) {
J-Alves661e1b72023-08-02 13:39:40 +0100629 if (ffa_is_vm_id(vm_locked.vm->id)) {
Daniel Boulbye6df3452022-07-14 18:14:26 +0100630 dlog_warning("Device%sVMs\n", error_string);
631 attributes &= ~MANIFEST_REGION_ATTR_SECURITY;
Daniel Boulbye6df3452022-07-14 18:14:26 +0100632 }
633 }
634
635 map_mode = device_region_attributes_to_mode(attributes);
636 if (is_el0_partition) {
637 map_mode |= MM_MODE_USER | MM_MODE_NG;
638 }
639
640 if (!vm_identity_map(vm_locked, region_begin, region_end,
641 map_mode, ppool, NULL)) {
642 dlog_error(
643 "Unable to map secondary VM "
644 "device-region.\n");
645 return false;
646 }
647 /* Deny primary VM access to this region */
648 if (!vm_unmap(primary_vm_locked, region_begin, region_end,
649 ppool)) {
650 dlog_error(
651 "Unable to unmap secondary VM device-"
652 "region from primary VM.\n");
653 return false;
654 }
655 j++;
656 }
657 return true;
658}
659
Andrew Scull72b43c02019-09-18 13:53:45 +0100660/*
661 * Loads a secondary VM.
662 */
663static bool load_secondary(struct mm_stage1_locked stage1_locked,
Manish Pandey2145c212020-05-01 16:04:22 +0100664 struct vm_locked primary_vm_locked,
Andrew Scull72b43c02019-09-18 13:53:45 +0100665 paddr_t mem_begin, paddr_t mem_end,
666 const struct manifest_vm *manifest_vm,
J-Alves77b6f4f2023-03-15 11:34:49 +0000667 const struct boot_params *boot_params,
Andrew Scull72b43c02019-09-18 13:53:45 +0100668 const struct memiter *cpio, struct mpool *ppool)
669{
670 struct vm *vm;
Andrew Scull3c257452019-11-26 13:32:50 +0000671 struct vm_locked vm_locked;
Max Shvetsov40108e72020-08-27 12:39:50 +0100672 struct vcpu_locked vcpu_locked;
Andrew Scull72b43c02019-09-18 13:53:45 +0100673 struct vcpu *vcpu;
674 ipaddr_t secondary_entry;
Andrew Scull3c257452019-11-26 13:32:50 +0000675 bool ret;
Fuad Tabba50469e02020-06-30 15:14:28 +0100676 paddr_t fdt_addr;
677 bool has_fdt;
678 size_t kernel_size = 0;
679 const size_t mem_size = pa_difference(mem_begin, mem_end);
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800680 uint32_t map_mode;
Daniel Boulby874d5432023-04-27 12:40:24 +0100681 bool is_el0_partition = manifest_vm->partition.run_time_el == S_EL0 ||
682 manifest_vm->partition.run_time_el == EL0;
Jens Wiklanderb697ad02022-11-04 10:15:03 +0100683 size_t n;
Andrew Scull72b43c02019-09-18 13:53:45 +0100684
Olivier Deprez62d99e32020-01-09 15:58:07 +0100685 /*
686 * Load the kernel if a filename is specified in the VM manifest.
687 * For an FF-A partition, kernel_filename is undefined indicating
688 * the partition package has already been loaded prior to Hafnium
689 * booting.
690 */
691 if (!string_is_empty(&manifest_vm->kernel_filename)) {
692 if (!load_kernel(stage1_locked, mem_begin, mem_end, manifest_vm,
Fuad Tabba50469e02020-06-30 15:14:28 +0100693 cpio, ppool, &kernel_size)) {
Olivier Deprez62d99e32020-01-09 15:58:07 +0100694 dlog_error("Unable to load kernel.\n");
695 return false;
696 }
Andrew Scull72b43c02019-09-18 13:53:45 +0100697 }
698
Fuad Tabba50469e02020-06-30 15:14:28 +0100699 has_fdt = !string_is_empty(&manifest_vm->secondary.fdt_filename);
700 if (has_fdt) {
701 /*
702 * Ensure that the FDT does not overwrite the kernel or overlap
703 * its page, for the FDT to start at a page boundary.
704 */
705 const size_t fdt_max_size =
706 mem_size - align_up(kernel_size, PAGE_SIZE);
707
708 size_t fdt_allocated_size;
709
710 if (!load_secondary_fdt(stage1_locked, mem_end, fdt_max_size,
711 manifest_vm, cpio, ppool, &fdt_addr,
712 &fdt_allocated_size)) {
713 dlog_error("Unable to load FDT.\n");
714 return false;
715 }
716
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -0700717 if (manifest_vm->is_ffa_partition) {
718 plat_ffa_parse_partition_manifest(
719 stage1_locked, fdt_addr, fdt_allocated_size,
J-Alves77b6f4f2023-03-15 11:34:49 +0000720 manifest_vm, boot_params, ppool);
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -0700721 }
722
Fuad Tabba50469e02020-06-30 15:14:28 +0100723 if (!fdt_patch_mem(stage1_locked, fdt_addr, fdt_allocated_size,
724 mem_begin, mem_end, ppool)) {
725 dlog_error("Unable to patch FDT.\n");
726 return false;
727 }
728 }
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800729 /*
730 * An S-EL0 partition must contain only 1 vCPU (UP migratable) per the
731 * FF-A 1.0 spec.
732 */
Daniel Boulbye6df3452022-07-14 18:14:26 +0100733 CHECK(!is_el0_partition || manifest_vm->secondary.vcpu_count == 1);
Fuad Tabba50469e02020-06-30 15:14:28 +0100734
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800735 if (!vm_init_next(manifest_vm->secondary.vcpu_count, ppool, &vm,
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -0600736 is_el0_partition,
737 manifest_vm->partition.dma_device_count)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000738 dlog_error("Unable to initialise VM.\n");
Andrew Scull72b43c02019-09-18 13:53:45 +0100739 return false;
740 }
741
Andrew Scull3c257452019-11-26 13:32:50 +0000742 vm_locked = vm_lock(vm);
743
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800744 /*
Raghu Krishnamurthy2b801692021-07-18 17:46:43 -0700745 * Grant the VM access to the memory. For VM's we mark all memory in
746 * stage-2 tables as RWX and the VM can control permissions using
747 * stage-1 translations. For S-EL0 partitions, hafnium maps the entire
748 * region of memory for the partition as RX. The partition is then
749 * expected to perform its owns relocations and call the FFA_MEM_PERM_*
750 * API's to change permissions on its image layout.
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800751 */
Daniel Boulbye6df3452022-07-14 18:14:26 +0100752 if (is_el0_partition) {
Raghu Krishnamurthy2b801692021-07-18 17:46:43 -0700753 map_mode = MM_MODE_R | MM_MODE_X | MM_MODE_USER | MM_MODE_NG;
754 } else {
755 map_mode = MM_MODE_R | MM_MODE_W | MM_MODE_X;
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800756 }
Raghu Krishnamurthy2b801692021-07-18 17:46:43 -0700757
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800758 if (!vm_identity_map(vm_locked, mem_begin, mem_end, map_mode, ppool,
Andrew Scull3c257452019-11-26 13:32:50 +0000759 &secondary_entry)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000760 dlog_error("Unable to initialise memory.\n");
Andrew Scull3c257452019-11-26 13:32:50 +0000761 ret = false;
762 goto out;
Andrew Scull72b43c02019-09-18 13:53:45 +0100763 }
764
Olivier Deprez62d99e32020-01-09 15:58:07 +0100765 if (manifest_vm->is_ffa_partition) {
Daniel Boulbye6df3452022-07-14 18:14:26 +0100766 if (!ffa_map_memory_regions(manifest_vm, vm_locked,
J-Alvesd8a1d362023-03-08 11:15:28 +0000767 primary_vm_locked, is_el0_partition,
768 ppool)) {
Daniel Boulbye6df3452022-07-14 18:14:26 +0100769 ret = false;
770 goto out;
Manish Pandey2145c212020-05-01 16:04:22 +0100771 }
772
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700773 secondary_entry = ipa_add(secondary_entry,
774 manifest_vm->partition.ep_offset);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100775 }
776
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800777 /*
778 * Map hypervisor into the VM's page table. The hypervisor pages will
779 * not be accessible from EL0 since it will not be marked for user
780 * access.
781 * TODO: Map only the exception vectors and data that exception vectors
782 * require and not the entire hypervisor. This helps with speculative
783 * side-channel attacks.
784 */
Daniel Boulbye6df3452022-07-14 18:14:26 +0100785 if (is_el0_partition) {
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800786 CHECK(vm_identity_map(vm_locked, layout_text_begin(),
787 layout_text_end(), MM_MODE_X, ppool,
788 NULL));
789
790 CHECK(vm_identity_map(vm_locked, layout_rodata_begin(),
791 layout_rodata_end(), MM_MODE_R, ppool,
792 NULL));
793
794 CHECK(vm_identity_map(vm_locked, layout_data_begin(),
795 layout_data_end(), MM_MODE_R | MM_MODE_W,
796 ppool, NULL));
Maksims Svecovs134b8f92022-03-04 15:14:09 +0000797
798 CHECK(arch_stack_mm_init(mm_lock_ptable_unsafe(&vm->ptable),
799 ppool));
800
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800801 plat_console_mm_init(mm_lock_ptable_unsafe(&vm->ptable), ppool);
802 }
803
Manish Pandeyd34f8892020-06-19 17:41:07 +0100804 if (!load_common(stage1_locked, vm_locked, manifest_vm, ppool)) {
805 ret = false;
806 goto out;
807 }
808
Karl Meakine8937d92024-03-19 16:04:25 +0000809 dlog_info("Loaded with %u vCPUs, entry at %#lx.\n",
Manish Pandey2145c212020-05-01 16:04:22 +0100810 manifest_vm->secondary.vcpu_count, pa_addr(mem_begin));
811
Andrew Scull72b43c02019-09-18 13:53:45 +0100812 vcpu = vm_get_vcpu(vm, 0);
Fuad Tabba50469e02020-06-30 15:14:28 +0100813
Max Shvetsov40108e72020-08-27 12:39:50 +0100814 vcpu_locked = vcpu_lock(vcpu);
Madhukar Pappireddy046dad02022-06-21 18:43:33 -0500815
Fuad Tabba50469e02020-06-30 15:14:28 +0100816 if (has_fdt) {
Max Shvetsov40108e72020-08-27 12:39:50 +0100817 vcpu_secondary_reset_and_start(vcpu_locked, secondary_entry,
Fuad Tabba50469e02020-06-30 15:14:28 +0100818 pa_addr(fdt_addr));
819 } else {
820 /*
821 * Without an FDT, secondary VMs expect the memory size to be
822 * passed in register x0, which is what
823 * vcpu_secondary_reset_and_start does in this case.
824 */
Max Shvetsov40108e72020-08-27 12:39:50 +0100825 vcpu_secondary_reset_and_start(vcpu_locked, secondary_entry,
826 mem_size);
Fuad Tabba50469e02020-06-30 15:14:28 +0100827 }
828
Max Shvetsov40108e72020-08-27 12:39:50 +0100829 vcpu_unlock(&vcpu_locked);
830
Jens Wiklanderb697ad02022-11-04 10:15:03 +0100831 /*
832 * For all vCPUs,
833 * in a VM: enable the notification pending virtual interrupt if
834 * requested in the manifest.
835 * in a SP: enable the NPI and managed exit virtual interrupts if
836 * requested in the manifest. For a S-EL0 partition, enable
837 * the virtual interrupts IDs matching the secure physical
838 * interrupt IDs declared in device regions.
839 */
840 for (n = 0; n < manifest_vm->secondary.vcpu_count; n++) {
841 vcpu = vm_get_vcpu(vm, n);
842 vcpu_locked = vcpu_lock(vcpu);
843 plat_ffa_enable_virtual_interrupts(vcpu_locked, vm_locked);
844 vcpu_unlock(&vcpu_locked);
845 }
846
Andrew Scull3c257452019-11-26 13:32:50 +0000847 ret = true;
Andrew Scull72b43c02019-09-18 13:53:45 +0100848
Andrew Scull3c257452019-11-26 13:32:50 +0000849out:
850 vm_unlock(&vm_locked);
851
852 return ret;
Andrew Scull72b43c02019-09-18 13:53:45 +0100853}
854
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100855/**
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100856 * Try to find a memory range of the given size within the given ranges, and
857 * remove it from them. Return true on success, or false if no large enough
858 * contiguous range is found.
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100859 */
Hong-Seok Kim09648362019-05-23 15:47:11 +0900860static bool carve_out_mem_range(struct mem_range *mem_ranges,
861 size_t mem_ranges_count, uint64_t size_to_find,
862 paddr_t *found_begin, paddr_t *found_end)
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100863{
864 size_t i;
865
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000866 /*
867 * TODO(b/116191358): Consider being cleverer about how we pack VMs
868 * together, with a non-greedy algorithm.
869 */
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100870 for (i = 0; i < mem_ranges_count; ++i) {
871 if (size_to_find <=
Andrew Walbran2cb43392019-04-17 12:52:45 +0100872 pa_difference(mem_ranges[i].begin, mem_ranges[i].end)) {
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +0100873 /*
874 * This range is big enough, take some of it from the
875 * end and reduce its size accordingly.
876 */
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100877 *found_end = mem_ranges[i].end;
878 *found_begin = pa_init(pa_addr(mem_ranges[i].end) -
879 size_to_find);
880 mem_ranges[i].end = *found_begin;
881 return true;
882 }
883 }
884 return false;
885}
886
887/**
888 * Given arrays of memory ranges before and after memory was removed for
889 * secondary VMs, add the difference to the reserved ranges of the given update.
890 * Return true on success, or false if there would be more than MAX_MEM_RANGES
891 * reserved ranges after adding the new ones.
892 * `before` and `after` must be arrays of exactly `mem_ranges_count` elements.
893 */
Hong-Seok Kim09648362019-05-23 15:47:11 +0900894static bool update_reserved_ranges(struct boot_params_update *update,
895 const struct mem_range *before,
896 const struct mem_range *after,
897 size_t mem_ranges_count)
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100898{
899 size_t i;
900
901 for (i = 0; i < mem_ranges_count; ++i) {
902 if (pa_addr(after[i].begin) > pa_addr(before[i].begin)) {
903 if (update->reserved_ranges_count >= MAX_MEM_RANGES) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000904 dlog_error(
905 "Too many reserved ranges after "
906 "loading secondary VMs.\n");
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100907 return false;
908 }
909 update->reserved_ranges[update->reserved_ranges_count]
910 .begin = before[i].begin;
911 update->reserved_ranges[update->reserved_ranges_count]
912 .end = after[i].begin;
913 update->reserved_ranges_count++;
914 }
915 if (pa_addr(after[i].end) < pa_addr(before[i].end)) {
916 if (update->reserved_ranges_count >= MAX_MEM_RANGES) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000917 dlog_error(
918 "Too many reserved ranges after "
919 "loading secondary VMs.\n");
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100920 return false;
921 }
922 update->reserved_ranges[update->reserved_ranges_count]
923 .begin = after[i].end;
924 update->reserved_ranges[update->reserved_ranges_count]
925 .end = before[i].end;
926 update->reserved_ranges_count++;
927 }
928 }
929
930 return true;
931}
932
Olivier Deprez05046922023-03-09 15:48:40 +0100933static bool init_other_world_vm(const struct boot_params *params,
934 struct mpool *ppool)
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100935{
Olivier Deprez96a2a262020-06-11 17:21:38 +0200936 struct vm *other_world_vm;
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100937 size_t i;
Andrew Scull72b43c02019-09-18 13:53:45 +0100938
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100939 /*
Olivier Deprez96a2a262020-06-11 17:21:38 +0200940 * Initialise the dummy VM which represents the opposite world:
941 * -TrustZone (or the SPMC) when running the Hypervisor
942 * -the Hypervisor when running TZ/SPMC
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100943 */
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -0600944 other_world_vm = vm_init(HF_OTHER_WORLD_ID, MAX_CPUS, ppool, false, 0);
Olivier Deprez96a2a262020-06-11 17:21:38 +0200945 CHECK(other_world_vm != NULL);
946
947 for (i = 0; i < MAX_CPUS; i++) {
948 struct vcpu *vcpu = vm_get_vcpu(other_world_vm, i);
949 struct cpu *cpu = cpu_find_index(i);
950
951 vcpu->cpu = cpu;
952 }
Andrew Walbran9daa57e2019-09-27 13:33:20 +0100953
Olivier Deprez05046922023-03-09 15:48:40 +0100954 return arch_other_world_vm_init(other_world_vm, params, ppool);
Olivier Deprez112d2b52020-09-30 07:39:23 +0200955}
956
957/*
958 * Loads alls VMs from the manifest.
959 */
960bool load_vms(struct mm_stage1_locked stage1_locked,
961 const struct manifest *manifest, const struct memiter *cpio,
962 const struct boot_params *params,
963 struct boot_params_update *update, struct mpool *ppool)
964{
965 struct vm *primary;
966 struct mem_range mem_ranges_available[MAX_MEM_RANGES];
967 struct vm_locked primary_vm_locked;
968 size_t i;
969 bool success = true;
970
Andrew Walbranf8716782020-10-29 17:07:07 +0000971 /**
972 * Only try to load the primary VM if it is supposed to be in this
973 * world.
974 */
975 if (vm_id_is_current_world(HF_PRIMARY_VM_ID)) {
976 if (!load_primary(stage1_locked,
977 &manifest->vm[HF_PRIMARY_VM_INDEX], cpio,
978 params, ppool)) {
979 dlog_error("Unable to load primary VM.\n");
980 return false;
981 }
Olivier Deprez112d2b52020-09-30 07:39:23 +0200982 }
983
Olivier Deprez05046922023-03-09 15:48:40 +0100984 if (!init_other_world_vm(params, ppool)) {
Olivier Deprez112d2b52020-09-30 07:39:23 +0200985 return false;
986 }
987
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100988 static_assert(
989 sizeof(mem_ranges_available) == sizeof(params->mem_ranges),
990 "mem_range arrays must be the same size for memcpy.");
991 static_assert(sizeof(mem_ranges_available) < 500,
992 "This will use too much stack, either make "
993 "MAX_MEM_RANGES smaller or change this.");
Andrew Sculla1aa2ba2019-04-05 11:49:02 +0100994 memcpy_s(mem_ranges_available, sizeof(mem_ranges_available),
995 params->mem_ranges, sizeof(params->mem_ranges));
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +0100996
Andrew Walbran34ce72e2018-09-13 16:47:44 +0100997 /* Round the last addresses down to the page size. */
998 for (i = 0; i < params->mem_ranges_count; ++i) {
Alfredo Mazzinghieb1997c2019-02-07 18:00:01 +0000999 mem_ranges_available[i].end = pa_init(align_down(
1000 pa_addr(mem_ranges_available[i].end), PAGE_SIZE));
Andrew Walbran34ce72e2018-09-13 16:47:44 +01001001 }
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +01001002
Andrew Scull3c257452019-11-26 13:32:50 +00001003 primary = vm_find(HF_PRIMARY_VM_ID);
1004 primary_vm_locked = vm_lock(primary);
1005
David Brazdil0251b942019-09-10 15:59:50 +01001006 for (i = 0; i < manifest->vm_count; ++i) {
David Brazdil0dbb41f2019-09-09 18:03:35 +01001007 const struct manifest_vm *manifest_vm = &manifest->vm[i];
J-Alves19e20cf2023-08-02 12:48:55 +01001008 ffa_id_t vm_id = HF_VM_ID_OFFSET + i;
David Brazdil7a462ec2019-08-15 12:27:47 +01001009 uint64_t mem_size;
Andrew Scull80871322018-08-06 12:04:09 +01001010 paddr_t secondary_mem_begin;
1011 paddr_t secondary_mem_end;
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001012
David Brazdil7a462ec2019-08-15 12:27:47 +01001013 if (vm_id == HF_PRIMARY_VM_ID) {
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001014 continue;
1015 }
1016
Olivier Deprez2a8ee342020-08-03 15:10:44 +02001017 dlog_info("Loading VM id %#x: %s.\n", vm_id,
Karl Meakine8937d92024-03-19 16:04:25 +00001018 manifest_vm->debug_name.data);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001019
David Brazdil7a462ec2019-08-15 12:27:47 +01001020 mem_size = align_up(manifest_vm->secondary.mem_size, PAGE_SIZE);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001021
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -07001022 if (manifest_vm->is_ffa_partition &&
1023 !manifest->vm[i].is_hyp_loaded) {
Olivier Deprez62d99e32020-01-09 15:58:07 +01001024 secondary_mem_begin =
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001025 pa_init(manifest_vm->partition.load_addr);
1026 secondary_mem_end = pa_init(
1027 manifest_vm->partition.load_addr + mem_size);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001028 } else if (!carve_out_mem_range(mem_ranges_available,
1029 params->mem_ranges_count,
1030 mem_size, &secondary_mem_begin,
1031 &secondary_mem_end)) {
Karl Meakine8937d92024-03-19 16:04:25 +00001032 dlog_error("Not enough memory (%lu bytes).\n",
1033 mem_size);
Andrew Walbran34ce72e2018-09-13 16:47:44 +01001034 continue;
1035 }
Andrew Scull80871322018-08-06 12:04:09 +01001036
Manish Pandey2145c212020-05-01 16:04:22 +01001037 if (!load_secondary(stage1_locked, primary_vm_locked,
1038 secondary_mem_begin, secondary_mem_end,
J-Alves77b6f4f2023-03-15 11:34:49 +00001039 manifest_vm, params, cpio, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +00001040 dlog_error("Unable to load VM.\n");
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +01001041 continue;
1042 }
1043
1044 /* Deny the primary VM access to this memory. */
Andrew Scull3c257452019-11-26 13:32:50 +00001045 if (!vm_unmap(primary_vm_locked, secondary_mem_begin,
1046 secondary_mem_end, ppool)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +00001047 dlog_error(
1048 "Unable to unmap secondary VM from primary "
1049 "VM.\n");
Andrew Scull3c257452019-11-26 13:32:50 +00001050 success = false;
1051 break;
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +01001052 }
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001053 }
1054
Andrew Scull3c257452019-11-26 13:32:50 +00001055 vm_unlock(&primary_vm_locked);
1056
1057 if (!success) {
1058 return false;
1059 }
1060
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +01001061 /*
1062 * Add newly reserved areas to update params by looking at the
Andrew Walbran34ce72e2018-09-13 16:47:44 +01001063 * difference between the available ranges from the original params and
1064 * the updated mem_ranges_available. We assume that the number and order
1065 * of available ranges is the same, i.e. we don't remove any ranges
Wedson Almeida Filhob2c159e2018-10-25 13:27:47 +01001066 * above only make them smaller.
1067 */
Andrew Walbran34ce72e2018-09-13 16:47:44 +01001068 return update_reserved_ranges(update, params->mem_ranges,
1069 mem_ranges_available,
1070 params->mem_ranges_count);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +01001071}