Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 1 | /* |
Andrew Walbran | 692b325 | 2019-03-07 15:51:31 +0000 | [diff] [blame] | 2 | * Copyright 2018 The Hafnium Authors. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 3 | * |
Andrew Walbran | e959ec1 | 2020-06-17 15:01:09 +0100 | [diff] [blame] | 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 7 | */ |
| 8 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 9 | #include "hf/load.h" |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 10 | |
| 11 | #include <stdbool.h> |
| 12 | |
Maksims Svecovs | 134b8f9 | 2022-03-04 15:14:09 +0000 | [diff] [blame] | 13 | #include "hf/arch/init.h" |
Olivier Deprez | 112d2b5 | 2020-09-30 07:39:23 +0200 | [diff] [blame] | 14 | #include "hf/arch/other_world.h" |
J-Alves | a9c7cba | 2021-08-25 16:26:11 +0100 | [diff] [blame] | 15 | #include "hf/arch/plat/ffa.h" |
Fuad Tabba | 77a4b01 | 2019-11-15 12:13:08 +0000 | [diff] [blame] | 16 | #include "hf/arch/vm.h" |
| 17 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 18 | #include "hf/api.h" |
Andrew Walbran | 34ce72e | 2018-09-13 16:47:44 +0100 | [diff] [blame] | 19 | #include "hf/boot_params.h" |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 20 | #include "hf/check.h" |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 21 | #include "hf/dlog.h" |
Fuad Tabba | 50469e0 | 2020-06-30 15:14:28 +0100 | [diff] [blame] | 22 | #include "hf/fdt_patch.h" |
Andrew Scull | 5991ec9 | 2018-10-08 14:55:02 +0100 | [diff] [blame] | 23 | #include "hf/layout.h" |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 24 | #include "hf/memiter.h" |
| 25 | #include "hf/mm.h" |
Andrew Walbran | 4869936 | 2019-05-20 14:38:00 +0100 | [diff] [blame] | 26 | #include "hf/plat/console.h" |
Madhukar Pappireddy | 5fc8be1 | 2021-08-03 11:42:53 -0500 | [diff] [blame] | 27 | #include "hf/plat/interrupts.h" |
Andrew Scull | b1a6d0d | 2020-01-29 11:25:12 +0000 | [diff] [blame] | 28 | #include "hf/plat/iommu.h" |
Andrew Scull | 877ae4b | 2019-07-02 12:52:33 +0100 | [diff] [blame] | 29 | #include "hf/static_assert.h" |
Andrew Scull | 8d9e121 | 2019-04-05 13:52:55 +0100 | [diff] [blame] | 30 | #include "hf/std.h" |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 31 | #include "hf/vm.h" |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 32 | |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 33 | #include "vmapi/hf/call.h" |
Manish Pandey | d34f889 | 2020-06-19 17:41:07 +0100 | [diff] [blame] | 34 | #include "vmapi/hf/ffa.h" |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 35 | |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 36 | /** |
| 37 | * Copies data to an unmapped location by mapping it for write, copying the |
| 38 | * data, then unmapping it. |
Andrew Scull | d9225b3 | 2018-11-19 16:12:41 +0000 | [diff] [blame] | 39 | * |
| 40 | * The data is written so that it is available to all cores with the cache |
| 41 | * disabled. When switching to the partitions, the caching is initially disabled |
| 42 | * so the data must be available without the cache. |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 43 | */ |
Andrew Scull | 3c0a90a | 2019-07-01 11:55:53 +0100 | [diff] [blame] | 44 | static bool copy_to_unmapped(struct mm_stage1_locked stage1_locked, paddr_t to, |
David Brazdil | 7a462ec | 2019-08-15 12:27:47 +0100 | [diff] [blame] | 45 | struct memiter *from_it, struct mpool *ppool) |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 46 | { |
David Brazdil | 7a462ec | 2019-08-15 12:27:47 +0100 | [diff] [blame] | 47 | const void *from = memiter_base(from_it); |
| 48 | size_t size = memiter_size(from_it); |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 49 | paddr_t to_end = pa_add(to, size); |
| 50 | void *ptr; |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 51 | |
Andrew Scull | 3c0a90a | 2019-07-01 11:55:53 +0100 | [diff] [blame] | 52 | ptr = mm_identity_map(stage1_locked, to, to_end, MM_MODE_W, ppool); |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 53 | if (!ptr) { |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 54 | return false; |
| 55 | } |
| 56 | |
Andrew Scull | a1aa2ba | 2019-04-05 11:49:02 +0100 | [diff] [blame] | 57 | memcpy_s(ptr, size, from, size); |
Andrew Scull | c059fbe | 2019-09-12 12:58:40 +0100 | [diff] [blame] | 58 | arch_mm_flush_dcache(ptr, size); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 59 | |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 60 | CHECK(mm_unmap(stage1_locked, to, to_end, ppool)); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 61 | |
| 62 | return true; |
| 63 | } |
| 64 | |
Fuad Tabba | 50469e0 | 2020-06-30 15:14:28 +0100 | [diff] [blame] | 65 | /** |
| 66 | * Loads the secondary VM's kernel. |
| 67 | * Stores the kernel size in kernel_size (if kernel_size is not NULL). |
| 68 | * Returns false if it cannot load the kernel. |
| 69 | */ |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 70 | static bool load_kernel(struct mm_stage1_locked stage1_locked, paddr_t begin, |
| 71 | paddr_t end, const struct manifest_vm *manifest_vm, |
Fuad Tabba | 50469e0 | 2020-06-30 15:14:28 +0100 | [diff] [blame] | 72 | const struct memiter *cpio, struct mpool *ppool, |
| 73 | size_t *kernel_size) |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 74 | { |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 75 | struct memiter kernel; |
Fuad Tabba | 50469e0 | 2020-06-30 15:14:28 +0100 | [diff] [blame] | 76 | size_t size; |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 77 | |
David Brazdil | 136f294 | 2019-09-23 14:11:03 +0100 | [diff] [blame] | 78 | if (!cpio_get_file(cpio, &manifest_vm->kernel_filename, &kernel)) { |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 79 | dlog_error("Could not find kernel file \"%s\".\n", |
| 80 | string_data(&manifest_vm->kernel_filename)); |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 81 | return false; |
| 82 | } |
| 83 | |
Fuad Tabba | 50469e0 | 2020-06-30 15:14:28 +0100 | [diff] [blame] | 84 | size = memiter_size(&kernel); |
| 85 | if (pa_difference(begin, end) < size) { |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 86 | dlog_error("Kernel is larger than available memory.\n"); |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 87 | return false; |
| 88 | } |
| 89 | |
| 90 | if (!copy_to_unmapped(stage1_locked, begin, &kernel, ppool)) { |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 91 | dlog_error("Unable to copy kernel.\n"); |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 92 | return false; |
| 93 | } |
| 94 | |
Fuad Tabba | 50469e0 | 2020-06-30 15:14:28 +0100 | [diff] [blame] | 95 | if (kernel_size) { |
| 96 | *kernel_size = size; |
| 97 | } |
| 98 | |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 99 | return true; |
| 100 | } |
| 101 | |
Manish Pandey | d34f889 | 2020-06-19 17:41:07 +0100 | [diff] [blame] | 102 | /* |
| 103 | * Link RX/TX buffers provided in partition manifest to mailbox |
| 104 | */ |
| 105 | static bool link_rxtx_to_mailbox(struct mm_stage1_locked stage1_locked, |
| 106 | struct vm_locked vm_locked, struct rx_tx rxtx, |
| 107 | struct mpool *ppool) |
| 108 | { |
| 109 | struct ffa_value ret; |
| 110 | ipaddr_t send; |
| 111 | ipaddr_t recv; |
| 112 | uint32_t page_count; |
| 113 | |
| 114 | send = ipa_init(rxtx.tx_buffer->base_address); |
| 115 | recv = ipa_init(rxtx.rx_buffer->base_address); |
| 116 | page_count = rxtx.tx_buffer->page_count; |
| 117 | |
| 118 | ret = api_vm_configure_pages(stage1_locked, vm_locked, send, recv, |
| 119 | page_count, ppool); |
| 120 | if (ret.func != FFA_SUCCESS_32) { |
| 121 | return false; |
| 122 | } |
| 123 | |
| 124 | dlog_verbose(" mailbox: send = %#x, recv = %#x\n", |
| 125 | vm_locked.vm->mailbox.send, vm_locked.vm->mailbox.recv); |
| 126 | |
| 127 | return true; |
| 128 | } |
| 129 | |
Raghu Krishnamurthy | ad38a9c | 2022-07-20 07:30:36 -0700 | [diff] [blame] | 130 | static void infer_interrupt(struct interrupt_info interrupt, |
Madhukar Pappireddy | 5fc8be1 | 2021-08-03 11:42:53 -0500 | [diff] [blame] | 131 | struct interrupt_descriptor *int_desc) |
| 132 | { |
| 133 | uint32_t attr = interrupt.attributes; |
| 134 | |
| 135 | interrupt_desc_set_id(int_desc, interrupt.id); |
| 136 | interrupt_desc_set_priority(int_desc, |
| 137 | (attr >> INT_DESC_PRIORITY_SHIFT) & 0xff); |
| 138 | |
| 139 | /* Refer to the comments in interrupt_descriptor struct definition. */ |
| 140 | interrupt_desc_set_type_config_sec_state( |
| 141 | int_desc, |
| 142 | (((attr >> INT_DESC_TYPE_SHIFT) & 0x3) << 2) | |
| 143 | (((attr >> INT_DESC_CONFIG_SHIFT) & 0x1) << 1) | |
| 144 | ((attr >> INT_DESC_SEC_STATE_SHIFT) & 0x1)); |
| 145 | |
Raghu Krishnamurthy | 98da1ca | 2022-10-04 08:59:01 -0700 | [diff] [blame] | 146 | if (interrupt.mpidr_valid) { |
| 147 | interrupt_desc_set_mpidr(int_desc, interrupt.mpidr); |
| 148 | } else { |
| 149 | interrupt_desc_set_mpidr_invalid(int_desc); |
| 150 | } |
| 151 | |
Madhukar Pappireddy | 5fc8be1 | 2021-08-03 11:42:53 -0500 | [diff] [blame] | 152 | interrupt_desc_set_valid(int_desc, true); |
| 153 | } |
| 154 | |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 155 | /** |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 156 | * Performs VM loading activities that are common between the primary and |
| 157 | * secondaries. |
| 158 | */ |
Manish Pandey | d34f889 | 2020-06-19 17:41:07 +0100 | [diff] [blame] | 159 | static bool load_common(struct mm_stage1_locked stage1_locked, |
| 160 | struct vm_locked vm_locked, |
| 161 | const struct manifest_vm *manifest_vm, |
| 162 | struct mpool *ppool) |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 163 | { |
Madhukar Pappireddy | 5fc8be1 | 2021-08-03 11:42:53 -0500 | [diff] [blame] | 164 | struct device_region dev_region; |
Raghu Krishnamurthy | ad38a9c | 2022-07-20 07:30:36 -0700 | [diff] [blame] | 165 | struct interrupt_info interrupt; |
Madhukar Pappireddy | 5fc8be1 | 2021-08-03 11:42:53 -0500 | [diff] [blame] | 166 | uint32_t k = 0; |
| 167 | |
Manish Pandey | d34f889 | 2020-06-19 17:41:07 +0100 | [diff] [blame] | 168 | vm_locked.vm->smc_whitelist = manifest_vm->smc_whitelist; |
Raghu Krishnamurthy | 8c250a9 | 2021-07-02 12:16:42 -0700 | [diff] [blame] | 169 | vm_locked.vm->uuid = manifest_vm->partition.uuid; |
Olivier Deprez | a15f235 | 2022-09-26 09:17:24 +0200 | [diff] [blame^] | 170 | vm_locked.vm->power_management = |
| 171 | manifest_vm->partition.power_management; |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 172 | |
Madhukar Pappireddy | 5fc8be1 | 2021-08-03 11:42:53 -0500 | [diff] [blame] | 173 | /* Populate the interrupt descriptor for current VM. */ |
Raghu Krishnamurthy | 641dcd8 | 2022-07-19 23:21:20 -0700 | [diff] [blame] | 174 | for (uint16_t i = 0; i < PARTITION_MAX_DEVICE_REGIONS; i++) { |
Madhukar Pappireddy | 5fc8be1 | 2021-08-03 11:42:53 -0500 | [diff] [blame] | 175 | dev_region = manifest_vm->partition.dev_regions[i]; |
| 176 | |
| 177 | CHECK(dev_region.interrupt_count <= |
Raghu Krishnamurthy | 641dcd8 | 2022-07-19 23:21:20 -0700 | [diff] [blame] | 178 | PARTITION_MAX_INTERRUPTS_PER_DEVICE); |
Madhukar Pappireddy | 5fc8be1 | 2021-08-03 11:42:53 -0500 | [diff] [blame] | 179 | |
| 180 | for (uint8_t j = 0; j < dev_region.interrupt_count; j++) { |
Raghu Krishnamurthy | 98da1ca | 2022-10-04 08:59:01 -0700 | [diff] [blame] | 181 | struct interrupt_descriptor int_desc = {0}; |
Madhukar Pappireddy | 5fc8be1 | 2021-08-03 11:42:53 -0500 | [diff] [blame] | 182 | |
| 183 | interrupt = dev_region.interrupts[j]; |
| 184 | infer_interrupt(interrupt, &int_desc); |
| 185 | vm_locked.vm->interrupt_desc[k] = int_desc; |
| 186 | |
Madhukar Pappireddy | 72454a1 | 2021-08-03 12:21:46 -0500 | [diff] [blame] | 187 | /* |
| 188 | * Configure the physical interrupts allocated for this |
| 189 | * VM in its partition manifest. |
| 190 | */ |
| 191 | plat_interrupts_configure_interrupt(int_desc); |
Madhukar Pappireddy | 5fc8be1 | 2021-08-03 11:42:53 -0500 | [diff] [blame] | 192 | k++; |
| 193 | CHECK(k <= VM_MANIFEST_MAX_INTERRUPTS); |
| 194 | } |
| 195 | } |
| 196 | dlog_verbose("VM has %d physical interrupts defined in manifest.\n", k); |
| 197 | |
Manish Pandey | d34f889 | 2020-06-19 17:41:07 +0100 | [diff] [blame] | 198 | if (manifest_vm->is_ffa_partition) { |
Daniel Boulby | baeaf2e | 2021-12-09 11:42:36 +0000 | [diff] [blame] | 199 | vm_locked.vm->ffa_version = manifest_vm->partition.ffa_version; |
Manish Pandey | d34f889 | 2020-06-19 17:41:07 +0100 | [diff] [blame] | 200 | /* Link rxtx buffers to mailbox */ |
Raghu Krishnamurthy | 8c250a9 | 2021-07-02 12:16:42 -0700 | [diff] [blame] | 201 | if (manifest_vm->partition.rxtx.available) { |
Manish Pandey | d34f889 | 2020-06-19 17:41:07 +0100 | [diff] [blame] | 202 | if (!link_rxtx_to_mailbox(stage1_locked, vm_locked, |
Raghu Krishnamurthy | 8c250a9 | 2021-07-02 12:16:42 -0700 | [diff] [blame] | 203 | manifest_vm->partition.rxtx, |
Manish Pandey | d34f889 | 2020-06-19 17:41:07 +0100 | [diff] [blame] | 204 | ppool)) { |
| 205 | dlog_error( |
| 206 | "Unable to Link RX/TX buffer with " |
| 207 | "mailbox.\n"); |
| 208 | return false; |
| 209 | } |
| 210 | } |
J-Alves | b37fd08 | 2020-10-22 12:29:21 +0100 | [diff] [blame] | 211 | |
Maksims Svecovs | b596eab | 2021-04-27 00:52:27 +0100 | [diff] [blame] | 212 | vm_locked.vm->messaging_method = |
Raghu Krishnamurthy | 8c250a9 | 2021-07-02 12:16:42 -0700 | [diff] [blame] | 213 | manifest_vm->partition.messaging_method; |
Manish Pandey | f3be639 | 2020-09-24 17:26:09 +0100 | [diff] [blame] | 214 | |
Madhukar Pappireddy | 8415405 | 2022-06-21 18:30:25 -0500 | [diff] [blame] | 215 | vm_locked.vm->ns_interrupts_action = |
| 216 | manifest_vm->partition.ns_interrupts_action; |
Maksims Svecovs | 9ddf86a | 2021-05-06 17:17:21 +0100 | [diff] [blame] | 217 | |
Madhukar Pappireddy | 5c04a38 | 2022-12-28 11:29:26 -0600 | [diff] [blame] | 218 | vm_locked.vm->other_s_interrupts_action = |
| 219 | manifest_vm->partition.other_s_interrupts_action; |
| 220 | |
Madhukar Pappireddy | 046dad0 | 2022-06-21 18:43:33 -0500 | [diff] [blame] | 221 | vm_locked.vm->me_signal_virq = |
| 222 | manifest_vm->partition.me_signal_virq; |
| 223 | |
J-Alves | a4730db | 2021-11-02 10:31:01 +0000 | [diff] [blame] | 224 | vm_locked.vm->notifications.enabled = |
| 225 | manifest_vm->partition.notification_support; |
| 226 | |
Raghu Krishnamurthy | 8c250a9 | 2021-07-02 12:16:42 -0700 | [diff] [blame] | 227 | vm_locked.vm->boot_order = manifest_vm->partition.boot_order; |
| 228 | |
J-Alves | 7d38f7b | 2022-04-13 13:22:30 +0100 | [diff] [blame] | 229 | vm_locked.vm->boot_info.gp_register_num = |
| 230 | manifest_vm->partition.gp_register_num; |
| 231 | |
| 232 | if (manifest_vm->partition.boot_info) { |
| 233 | /* |
| 234 | * If the partition expects the boot information blob |
| 235 | * per the ff-a v1.1 boot protocol, then its address |
| 236 | * shall match the partition's load address. |
| 237 | */ |
| 238 | vm_locked.vm->boot_info.blob_addr = |
| 239 | ipa_init(manifest_vm->partition.load_addr); |
| 240 | } |
| 241 | |
J-Alves | b37fd08 | 2020-10-22 12:29:21 +0100 | [diff] [blame] | 242 | /* Updating boot list according to boot_order */ |
Olivier Deprez | 181074b | 2023-02-02 14:53:23 +0100 | [diff] [blame] | 243 | vcpu_update_boot(vm_get_vcpu(vm_locked.vm, 0)); |
J-Alves | a0f317d | 2021-06-09 13:31:59 +0100 | [diff] [blame] | 244 | |
J-Alves | 09ff9d8 | 2021-11-02 11:55:20 +0000 | [diff] [blame] | 245 | if (vm_locked_are_notifications_enabled(vm_locked) && |
J-Alves | a4730db | 2021-11-02 10:31:01 +0000 | [diff] [blame] | 246 | !plat_ffa_notifications_bitmap_create_call( |
| 247 | vm_locked.vm->id, vm_locked.vm->vcpu_count)) { |
| 248 | return false; |
J-Alves | a9c7cba | 2021-08-25 16:26:11 +0100 | [diff] [blame] | 249 | } |
Manish Pandey | d34f889 | 2020-06-19 17:41:07 +0100 | [diff] [blame] | 250 | } |
J-Alves | b37fd08 | 2020-10-22 12:29:21 +0100 | [diff] [blame] | 251 | |
Fuad Tabba | 5697071 | 2020-01-10 11:20:09 +0000 | [diff] [blame] | 252 | /* Initialize architecture-specific features. */ |
Manish Pandey | d34f889 | 2020-06-19 17:41:07 +0100 | [diff] [blame] | 253 | arch_vm_features_set(vm_locked.vm); |
Fuad Tabba | 77a4b01 | 2019-11-15 12:13:08 +0000 | [diff] [blame] | 254 | |
Madhukar Pappireddy | 54680c7 | 2020-10-23 15:02:38 -0500 | [diff] [blame] | 255 | if (!plat_iommu_attach_peripheral(stage1_locked, vm_locked, manifest_vm, |
| 256 | ppool)) { |
| 257 | dlog_error("Unable to attach upstream peripheral device\n"); |
| 258 | return false; |
| 259 | } |
| 260 | |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 261 | return true; |
| 262 | } |
| 263 | |
| 264 | /** |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 265 | * Loads the primary VM. |
| 266 | */ |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 267 | static bool load_primary(struct mm_stage1_locked stage1_locked, |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 268 | const struct manifest_vm *manifest_vm, |
Andrew Scull | b5f49e0 | 2019-10-02 13:20:47 +0100 | [diff] [blame] | 269 | const struct memiter *cpio, |
| 270 | const struct boot_params *params, struct mpool *ppool) |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 271 | { |
Olivier Deprez | 62d99e3 | 2020-01-09 15:58:07 +0100 | [diff] [blame] | 272 | paddr_t primary_begin; |
| 273 | ipaddr_t primary_entry; |
David Brazdil | e6f8322 | 2019-09-23 14:47:37 +0100 | [diff] [blame] | 274 | struct vm *vm; |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 275 | struct vm_locked vm_locked; |
David Brazdil | e6f8322 | 2019-09-23 14:47:37 +0100 | [diff] [blame] | 276 | struct vcpu_locked vcpu_locked; |
Andrew Scull | b5f49e0 | 2019-10-02 13:20:47 +0100 | [diff] [blame] | 277 | size_t i; |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 278 | bool ret; |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 279 | |
Raghu Krishnamurthy | b49549e | 2021-07-02 08:27:38 -0700 | [diff] [blame] | 280 | if (manifest_vm->is_ffa_partition && !manifest_vm->is_hyp_loaded) { |
Raghu Krishnamurthy | 8c250a9 | 2021-07-02 12:16:42 -0700 | [diff] [blame] | 281 | primary_begin = pa_init(manifest_vm->partition.load_addr); |
Olivier Deprez | 62d99e3 | 2020-01-09 15:58:07 +0100 | [diff] [blame] | 282 | primary_entry = ipa_add(ipa_from_pa(primary_begin), |
Raghu Krishnamurthy | 8c250a9 | 2021-07-02 12:16:42 -0700 | [diff] [blame] | 283 | manifest_vm->partition.ep_offset); |
Olivier Deprez | 62d99e3 | 2020-01-09 15:58:07 +0100 | [diff] [blame] | 284 | } else { |
| 285 | primary_begin = |
| 286 | (manifest_vm->primary.boot_address == |
| 287 | MANIFEST_INVALID_ADDRESS) |
| 288 | ? layout_primary_begin() |
| 289 | : pa_init(manifest_vm->primary.boot_address); |
| 290 | primary_entry = ipa_from_pa(primary_begin); |
| 291 | } |
| 292 | |
David Brazdil | 080ee31 | 2020-02-25 15:30:30 -0800 | [diff] [blame] | 293 | paddr_t primary_end = pa_add(primary_begin, RSIZE_MAX); |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 294 | |
Raghu Krishnamurthy | cd1eceb | 2021-01-04 12:20:48 -0800 | [diff] [blame] | 295 | /* Primary VM must be a VM */ |
Raghu Krishnamurthy | 8c250a9 | 2021-07-02 12:16:42 -0700 | [diff] [blame] | 296 | CHECK(manifest_vm->partition.run_time_el == EL1); |
Raghu Krishnamurthy | cd1eceb | 2021-01-04 12:20:48 -0800 | [diff] [blame] | 297 | |
Olivier Deprez | 62d99e3 | 2020-01-09 15:58:07 +0100 | [diff] [blame] | 298 | /* |
| 299 | * Load the kernel if a filename is specified in the VM manifest. |
| 300 | * For an FF-A partition, kernel_filename is undefined indicating |
| 301 | * the partition package has already been loaded prior to Hafnium |
| 302 | * booting. |
| 303 | */ |
| 304 | if (!string_is_empty(&manifest_vm->kernel_filename)) { |
| 305 | if (!load_kernel(stage1_locked, primary_begin, primary_end, |
Fuad Tabba | 50469e0 | 2020-06-30 15:14:28 +0100 | [diff] [blame] | 306 | manifest_vm, cpio, ppool, NULL)) { |
Olivier Deprez | 62d99e3 | 2020-01-09 15:58:07 +0100 | [diff] [blame] | 307 | dlog_error("Unable to load primary kernel.\n"); |
| 308 | return false; |
| 309 | } |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 310 | } |
| 311 | |
Raghu Krishnamurthy | cd1eceb | 2021-01-04 12:20:48 -0800 | [diff] [blame] | 312 | if (!vm_init_next(MAX_CPUS, ppool, &vm, false)) { |
Andrew Walbran | 7586e04 | 2020-02-18 18:19:26 +0000 | [diff] [blame] | 313 | dlog_error("Unable to initialise primary VM.\n"); |
David Brazdil | e6f8322 | 2019-09-23 14:47:37 +0100 | [diff] [blame] | 314 | return false; |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 315 | } |
| 316 | |
David Brazdil | e6f8322 | 2019-09-23 14:47:37 +0100 | [diff] [blame] | 317 | if (vm->id != HF_PRIMARY_VM_ID) { |
Andrew Walbran | 7586e04 | 2020-02-18 18:19:26 +0000 | [diff] [blame] | 318 | dlog_error("Primary VM was not given correct ID.\n"); |
David Brazdil | e6f8322 | 2019-09-23 14:47:37 +0100 | [diff] [blame] | 319 | return false; |
| 320 | } |
| 321 | |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 322 | vm_locked = vm_lock(vm); |
| 323 | |
Andrew Scull | 48929fd | 2020-01-28 10:39:10 +0000 | [diff] [blame] | 324 | if (params->device_mem_ranges_count == 0) { |
| 325 | /* |
| 326 | * Map 1TB of address space as device memory to, most likely, |
| 327 | * make all devices available to the primary VM. |
| 328 | * |
| 329 | * TODO: remove this once all targets provide valid ranges. |
| 330 | */ |
Andrew Scull | f6ab9bc | 2020-02-26 12:56:37 -0800 | [diff] [blame] | 331 | dlog_warning( |
| 332 | "Device memory not provided, defaulting to 1 TB.\n"); |
Andrew Scull | 48929fd | 2020-01-28 10:39:10 +0000 | [diff] [blame] | 333 | |
| 334 | if (!vm_identity_map( |
| 335 | vm_locked, pa_init(0), |
| 336 | pa_init(UINT64_C(1024) * 1024 * 1024 * 1024), |
| 337 | MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool, NULL)) { |
| 338 | dlog_error( |
| 339 | "Unable to initialise address space for " |
Andrew Scull | f6ab9bc | 2020-02-26 12:56:37 -0800 | [diff] [blame] | 340 | "primary VM.\n"); |
Andrew Scull | 48929fd | 2020-01-28 10:39:10 +0000 | [diff] [blame] | 341 | ret = false; |
| 342 | goto out; |
| 343 | } |
David Brazdil | e6f8322 | 2019-09-23 14:47:37 +0100 | [diff] [blame] | 344 | } |
| 345 | |
Andrew Scull | b5f49e0 | 2019-10-02 13:20:47 +0100 | [diff] [blame] | 346 | /* Map normal memory as such to permit caching, execution, etc. */ |
| 347 | for (i = 0; i < params->mem_ranges_count; ++i) { |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 348 | if (!vm_identity_map(vm_locked, params->mem_ranges[i].begin, |
| 349 | params->mem_ranges[i].end, |
| 350 | MM_MODE_R | MM_MODE_W | MM_MODE_X, ppool, |
| 351 | NULL)) { |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 352 | dlog_error( |
Andrew Scull | f6ab9bc | 2020-02-26 12:56:37 -0800 | [diff] [blame] | 353 | "Unable to initialise memory for primary " |
| 354 | "VM.\n"); |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 355 | ret = false; |
| 356 | goto out; |
Andrew Scull | b5f49e0 | 2019-10-02 13:20:47 +0100 | [diff] [blame] | 357 | } |
| 358 | } |
| 359 | |
Andrew Scull | 48929fd | 2020-01-28 10:39:10 +0000 | [diff] [blame] | 360 | /* Map device memory as such to prevent execution, speculation etc. */ |
| 361 | for (i = 0; i < params->device_mem_ranges_count; ++i) { |
| 362 | if (!vm_identity_map( |
| 363 | vm_locked, params->device_mem_ranges[i].begin, |
| 364 | params->device_mem_ranges[i].end, |
| 365 | MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool, NULL)) { |
| 366 | dlog("Unable to initialise device memory for primary " |
Andrew Scull | f6ab9bc | 2020-02-26 12:56:37 -0800 | [diff] [blame] | 367 | "VM.\n"); |
Andrew Scull | 48929fd | 2020-01-28 10:39:10 +0000 | [diff] [blame] | 368 | ret = false; |
| 369 | goto out; |
| 370 | } |
| 371 | } |
| 372 | |
Manish Pandey | d34f889 | 2020-06-19 17:41:07 +0100 | [diff] [blame] | 373 | if (!load_common(stage1_locked, vm_locked, manifest_vm, ppool)) { |
| 374 | ret = false; |
| 375 | goto out; |
| 376 | } |
| 377 | |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 378 | if (!vm_unmap_hypervisor(vm_locked, ppool)) { |
Andrew Scull | f6ab9bc | 2020-02-26 12:56:37 -0800 | [diff] [blame] | 379 | dlog_error("Unable to unmap hypervisor from primary VM.\n"); |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 380 | ret = false; |
| 381 | goto out; |
David Brazdil | e6f8322 | 2019-09-23 14:47:37 +0100 | [diff] [blame] | 382 | } |
| 383 | |
Andrew Scull | b1a6d0d | 2020-01-29 11:25:12 +0000 | [diff] [blame] | 384 | if (!plat_iommu_unmap_iommus(vm_locked, ppool)) { |
Andrew Scull | f6ab9bc | 2020-02-26 12:56:37 -0800 | [diff] [blame] | 385 | dlog_error("Unable to unmap IOMMUs from primary VM.\n"); |
Andrew Scull | b1a6d0d | 2020-01-29 11:25:12 +0000 | [diff] [blame] | 386 | ret = false; |
| 387 | goto out; |
| 388 | } |
| 389 | |
Andrew Walbran | 7586e04 | 2020-02-18 18:19:26 +0000 | [diff] [blame] | 390 | dlog_info("Loaded primary VM with %u vCPUs, entry at %#x.\n", |
| 391 | vm->vcpu_count, pa_addr(primary_begin)); |
| 392 | |
Olivier Deprez | 181074b | 2023-02-02 14:53:23 +0100 | [diff] [blame] | 393 | /* Mark the first VM vCPU to be the first booted vCPU. */ |
| 394 | vcpu_update_boot(vm_get_vcpu(vm, 0)); |
Olivier Deprez | b9adff4 | 2021-02-01 12:14:05 +0100 | [diff] [blame] | 395 | |
David Brazdil | e6f8322 | 2019-09-23 14:47:37 +0100 | [diff] [blame] | 396 | vcpu_locked = vcpu_lock(vm_get_vcpu(vm, 0)); |
Olivier Deprez | 62d99e3 | 2020-01-09 15:58:07 +0100 | [diff] [blame] | 397 | vcpu_on(vcpu_locked, primary_entry, params->kernel_arg); |
David Brazdil | e6f8322 | 2019-09-23 14:47:37 +0100 | [diff] [blame] | 398 | vcpu_unlock(&vcpu_locked); |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 399 | ret = true; |
David Brazdil | e6f8322 | 2019-09-23 14:47:37 +0100 | [diff] [blame] | 400 | |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 401 | out: |
| 402 | vm_unlock(&vm_locked); |
| 403 | |
| 404 | return ret; |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 405 | } |
| 406 | |
Fuad Tabba | 50469e0 | 2020-06-30 15:14:28 +0100 | [diff] [blame] | 407 | /** |
| 408 | * Loads the secondary VM's FDT. |
| 409 | * Stores the total allocated size for the FDT in fdt_allocated_size (if |
| 410 | * fdt_allocated_size is not NULL). The allocated size includes additional space |
| 411 | * for potential patching. |
| 412 | */ |
| 413 | static bool load_secondary_fdt(struct mm_stage1_locked stage1_locked, |
| 414 | paddr_t end, size_t fdt_max_size, |
| 415 | const struct manifest_vm *manifest_vm, |
| 416 | const struct memiter *cpio, struct mpool *ppool, |
| 417 | paddr_t *fdt_addr, size_t *fdt_allocated_size) |
| 418 | { |
| 419 | struct memiter fdt; |
| 420 | size_t allocated_size; |
| 421 | |
| 422 | CHECK(!string_is_empty(&manifest_vm->secondary.fdt_filename)); |
| 423 | |
| 424 | if (!cpio_get_file(cpio, &manifest_vm->secondary.fdt_filename, &fdt)) { |
| 425 | dlog_error("Cannot open the secondary VM's FDT.\n"); |
| 426 | return false; |
| 427 | } |
| 428 | |
| 429 | /* |
Olivier Deprez | 701e8bf | 2022-04-06 18:45:18 +0200 | [diff] [blame] | 430 | * Ensure the FDT has one additional page at the end for patching, |
Fuad Tabba | 50469e0 | 2020-06-30 15:14:28 +0100 | [diff] [blame] | 431 | * and align it to the page boundary. |
| 432 | */ |
| 433 | allocated_size = align_up(memiter_size(&fdt), PAGE_SIZE) + PAGE_SIZE; |
| 434 | |
| 435 | if (allocated_size > fdt_max_size) { |
| 436 | dlog_error( |
| 437 | "FDT allocated space (%u) is more than the specified " |
| 438 | "maximum to use (%u).\n", |
| 439 | allocated_size, fdt_max_size); |
| 440 | return false; |
| 441 | } |
| 442 | |
| 443 | /* Load the FDT to the end of the VM's allocated memory space. */ |
| 444 | *fdt_addr = pa_init(pa_addr(pa_sub(end, allocated_size))); |
| 445 | |
| 446 | dlog_info("Loading secondary FDT of allocated size %u at 0x%x.\n", |
| 447 | allocated_size, pa_addr(*fdt_addr)); |
| 448 | |
| 449 | if (!copy_to_unmapped(stage1_locked, *fdt_addr, &fdt, ppool)) { |
| 450 | dlog_error("Unable to copy FDT.\n"); |
| 451 | return false; |
| 452 | } |
| 453 | |
| 454 | if (fdt_allocated_size) { |
| 455 | *fdt_allocated_size = allocated_size; |
| 456 | } |
| 457 | |
| 458 | return true; |
| 459 | } |
| 460 | |
Olivier Deprez | 035fa15 | 2022-03-14 11:19:10 +0100 | [diff] [blame] | 461 | /** |
| 462 | * Convert the manifest memory region attributes to mode consumed by mm layer. |
| 463 | */ |
| 464 | static uint32_t memory_region_attributes_to_mode(uint32_t attributes) |
| 465 | { |
| 466 | uint32_t mode = 0U; |
| 467 | |
| 468 | if ((attributes & MANIFEST_REGION_ATTR_READ) != 0U) { |
| 469 | mode |= MM_MODE_R; |
| 470 | } |
| 471 | |
| 472 | if ((attributes & MANIFEST_REGION_ATTR_WRITE) != 0U) { |
| 473 | mode |= MM_MODE_W; |
| 474 | } |
| 475 | |
| 476 | if ((attributes & MANIFEST_REGION_ATTR_EXEC) != 0U) { |
| 477 | mode |= MM_MODE_X; |
| 478 | } |
| 479 | |
| 480 | assert((mode == (MM_MODE_R | MM_MODE_W)) || (mode == MM_MODE_R) || |
| 481 | (mode == (MM_MODE_R | MM_MODE_X))); |
| 482 | |
| 483 | if ((attributes & MANIFEST_REGION_ATTR_SECURITY) != 0U) { |
| 484 | mode |= arch_mm_extra_attributes_from_vm(HF_HYPERVISOR_VM_ID); |
| 485 | } |
| 486 | |
| 487 | return mode; |
| 488 | } |
| 489 | |
| 490 | /** |
| 491 | * Convert the manifest device region attributes to mode consumed by mm layer. |
| 492 | */ |
| 493 | static uint32_t device_region_attributes_to_mode(uint32_t attributes) |
| 494 | { |
| 495 | uint32_t mode = 0U; |
| 496 | |
| 497 | if ((attributes & MANIFEST_REGION_ATTR_READ) != 0U) { |
| 498 | mode |= MM_MODE_R; |
| 499 | } |
| 500 | |
| 501 | if ((attributes & MANIFEST_REGION_ATTR_WRITE) != 0U) { |
| 502 | mode |= MM_MODE_W; |
| 503 | } |
| 504 | |
| 505 | assert((mode == (MM_MODE_R | MM_MODE_W)) || (mode == MM_MODE_R)); |
| 506 | |
| 507 | if ((attributes & MANIFEST_REGION_ATTR_SECURITY) != 0U) { |
| 508 | mode |= arch_mm_extra_attributes_from_vm(HF_HYPERVISOR_VM_ID); |
| 509 | } |
| 510 | |
| 511 | return mode | MM_MODE_D; |
| 512 | } |
| 513 | |
Daniel Boulby | e6df345 | 2022-07-14 18:14:26 +0100 | [diff] [blame] | 514 | static bool ffa_map_memory_regions(const struct manifest_vm *manifest_vm, |
| 515 | const struct vm_locked vm_locked, |
| 516 | const struct vm_locked primary_vm_locked, |
| 517 | paddr_t mem_end, bool is_el0_partition, |
| 518 | struct mpool *ppool) |
| 519 | { |
| 520 | #if LOG_LEVEL >= LOG_LEVEL_WARNING |
| 521 | const char *error_string = " region security state ignored for "; |
| 522 | #endif |
| 523 | int j = 0; |
| 524 | paddr_t region_begin; |
| 525 | paddr_t region_end; |
| 526 | paddr_t alloc_base = mem_end; |
| 527 | size_t size; |
| 528 | size_t total_alloc = 0; |
| 529 | uint32_t map_mode; |
| 530 | uint32_t attributes; |
| 531 | |
| 532 | /* Map memory-regions */ |
| 533 | while (j < manifest_vm->partition.mem_region_count) { |
| 534 | size = manifest_vm->partition.mem_regions[j].page_count * |
| 535 | PAGE_SIZE; |
| 536 | /* |
| 537 | * For memory-regions without base-address, memory |
| 538 | * should be allocated inside partition's page table. |
| 539 | * Start allocating memory regions in partition's |
| 540 | * page table, starting from the end. |
| 541 | * TODO: Add mechanism to let partition know of these |
| 542 | * memory regions |
| 543 | */ |
| 544 | if (manifest_vm->partition.mem_regions[j].base_address == |
| 545 | MANIFEST_INVALID_ADDRESS) { |
| 546 | total_alloc += size; |
| 547 | /* Don't go beyond half the VM's memory space */ |
| 548 | if (total_alloc > |
| 549 | (manifest_vm->secondary.mem_size / 2)) { |
| 550 | dlog_error( |
| 551 | "Not enough space for memory-" |
| 552 | "region allocation"); |
| 553 | return false; |
| 554 | } |
| 555 | |
| 556 | region_end = alloc_base; |
| 557 | region_begin = pa_subtract(alloc_base, size); |
| 558 | alloc_base = region_begin; |
| 559 | } else { |
| 560 | /* |
| 561 | * Identity map memory region for both case, |
| 562 | * VA(S-EL0) or IPA(S-EL1). |
| 563 | */ |
| 564 | region_begin = |
| 565 | pa_init(manifest_vm->partition.mem_regions[j] |
| 566 | .base_address); |
| 567 | region_end = pa_add(region_begin, size); |
| 568 | } |
| 569 | |
| 570 | attributes = manifest_vm->partition.mem_regions[j].attributes; |
| 571 | if ((attributes & MANIFEST_REGION_ATTR_SECURITY) != 0) { |
| 572 | if (plat_ffa_is_vm_id(vm_locked.vm->id)) { |
| 573 | dlog_warning("Memory%sVMs\n", error_string); |
| 574 | attributes &= ~MANIFEST_REGION_ATTR_SECURITY; |
| 575 | |
| 576 | } else if (!is_el0_partition) { |
| 577 | dlog_warning( |
| 578 | "Memory%sS-EL1 " |
| 579 | "partitions.\n", |
| 580 | error_string); |
| 581 | attributes &= ~MANIFEST_REGION_ATTR_SECURITY; |
| 582 | } |
| 583 | } |
| 584 | |
| 585 | map_mode = memory_region_attributes_to_mode(attributes); |
| 586 | |
| 587 | if (is_el0_partition) { |
| 588 | map_mode |= MM_MODE_USER | MM_MODE_NG; |
| 589 | } |
| 590 | |
| 591 | if (!vm_identity_map(vm_locked, region_begin, region_end, |
| 592 | map_mode, ppool, NULL)) { |
| 593 | dlog_error( |
| 594 | "Unable to map secondary VM " |
| 595 | "memory-region.\n"); |
| 596 | return false; |
| 597 | } |
| 598 | |
| 599 | /* Deny the primary VM access to this memory */ |
| 600 | if (!vm_unmap(primary_vm_locked, region_begin, region_end, |
| 601 | ppool)) { |
| 602 | dlog_error( |
| 603 | "Unable to unmap secondary VM memory-" |
| 604 | "region from primary VM.\n"); |
| 605 | return false; |
| 606 | } |
| 607 | |
| 608 | dlog_verbose("Memory region %#x - %#x allocated.\n", |
| 609 | region_begin, region_end); |
| 610 | |
| 611 | j++; |
| 612 | } |
| 613 | |
| 614 | /* Map device-regions */ |
| 615 | j = 0; |
| 616 | while (j < manifest_vm->partition.dev_region_count) { |
| 617 | region_begin = pa_init( |
| 618 | manifest_vm->partition.dev_regions[j].base_address); |
| 619 | size = manifest_vm->partition.dev_regions[j].page_count * |
| 620 | PAGE_SIZE; |
| 621 | region_end = pa_add(region_begin, size); |
| 622 | |
| 623 | attributes = manifest_vm->partition.dev_regions[j].attributes; |
| 624 | if ((attributes & MANIFEST_REGION_ATTR_SECURITY) != 0) { |
| 625 | if (plat_ffa_is_vm_id(vm_locked.vm->id)) { |
| 626 | dlog_warning("Device%sVMs\n", error_string); |
| 627 | attributes &= ~MANIFEST_REGION_ATTR_SECURITY; |
| 628 | } else if (!is_el0_partition) { |
| 629 | dlog_warning( |
| 630 | "Device%sS-EL1 " |
| 631 | "partitions.\n", |
| 632 | error_string); |
| 633 | attributes &= ~MANIFEST_REGION_ATTR_SECURITY; |
| 634 | } |
| 635 | } |
| 636 | |
| 637 | map_mode = device_region_attributes_to_mode(attributes); |
| 638 | if (is_el0_partition) { |
| 639 | map_mode |= MM_MODE_USER | MM_MODE_NG; |
| 640 | } |
| 641 | |
| 642 | if (!vm_identity_map(vm_locked, region_begin, region_end, |
| 643 | map_mode, ppool, NULL)) { |
| 644 | dlog_error( |
| 645 | "Unable to map secondary VM " |
| 646 | "device-region.\n"); |
| 647 | return false; |
| 648 | } |
| 649 | /* Deny primary VM access to this region */ |
| 650 | if (!vm_unmap(primary_vm_locked, region_begin, region_end, |
| 651 | ppool)) { |
| 652 | dlog_error( |
| 653 | "Unable to unmap secondary VM device-" |
| 654 | "region from primary VM.\n"); |
| 655 | return false; |
| 656 | } |
| 657 | j++; |
| 658 | } |
| 659 | return true; |
| 660 | } |
| 661 | |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 662 | /* |
| 663 | * Loads a secondary VM. |
| 664 | */ |
| 665 | static bool load_secondary(struct mm_stage1_locked stage1_locked, |
Manish Pandey | 2145c21 | 2020-05-01 16:04:22 +0100 | [diff] [blame] | 666 | struct vm_locked primary_vm_locked, |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 667 | paddr_t mem_begin, paddr_t mem_end, |
| 668 | const struct manifest_vm *manifest_vm, |
| 669 | const struct memiter *cpio, struct mpool *ppool) |
| 670 | { |
| 671 | struct vm *vm; |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 672 | struct vm_locked vm_locked; |
Max Shvetsov | 40108e7 | 2020-08-27 12:39:50 +0100 | [diff] [blame] | 673 | struct vcpu_locked vcpu_locked; |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 674 | struct vcpu *vcpu; |
| 675 | ipaddr_t secondary_entry; |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 676 | bool ret; |
Fuad Tabba | 50469e0 | 2020-06-30 15:14:28 +0100 | [diff] [blame] | 677 | paddr_t fdt_addr; |
| 678 | bool has_fdt; |
| 679 | size_t kernel_size = 0; |
| 680 | const size_t mem_size = pa_difference(mem_begin, mem_end); |
Raghu Krishnamurthy | d3ab8c3 | 2021-02-10 19:11:30 -0800 | [diff] [blame] | 681 | uint32_t map_mode; |
Daniel Boulby | e6df345 | 2022-07-14 18:14:26 +0100 | [diff] [blame] | 682 | bool is_el0_partition = manifest_vm->partition.run_time_el == S_EL0; |
Jens Wiklander | b697ad0 | 2022-11-04 10:15:03 +0100 | [diff] [blame] | 683 | size_t n; |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 684 | |
Olivier Deprez | 62d99e3 | 2020-01-09 15:58:07 +0100 | [diff] [blame] | 685 | /* |
| 686 | * Load the kernel if a filename is specified in the VM manifest. |
| 687 | * For an FF-A partition, kernel_filename is undefined indicating |
| 688 | * the partition package has already been loaded prior to Hafnium |
| 689 | * booting. |
| 690 | */ |
| 691 | if (!string_is_empty(&manifest_vm->kernel_filename)) { |
| 692 | if (!load_kernel(stage1_locked, mem_begin, mem_end, manifest_vm, |
Fuad Tabba | 50469e0 | 2020-06-30 15:14:28 +0100 | [diff] [blame] | 693 | cpio, ppool, &kernel_size)) { |
Olivier Deprez | 62d99e3 | 2020-01-09 15:58:07 +0100 | [diff] [blame] | 694 | dlog_error("Unable to load kernel.\n"); |
| 695 | return false; |
| 696 | } |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 697 | } |
| 698 | |
Fuad Tabba | 50469e0 | 2020-06-30 15:14:28 +0100 | [diff] [blame] | 699 | has_fdt = !string_is_empty(&manifest_vm->secondary.fdt_filename); |
| 700 | if (has_fdt) { |
| 701 | /* |
| 702 | * Ensure that the FDT does not overwrite the kernel or overlap |
| 703 | * its page, for the FDT to start at a page boundary. |
| 704 | */ |
| 705 | const size_t fdt_max_size = |
| 706 | mem_size - align_up(kernel_size, PAGE_SIZE); |
| 707 | |
| 708 | size_t fdt_allocated_size; |
| 709 | |
| 710 | if (!load_secondary_fdt(stage1_locked, mem_end, fdt_max_size, |
| 711 | manifest_vm, cpio, ppool, &fdt_addr, |
| 712 | &fdt_allocated_size)) { |
| 713 | dlog_error("Unable to load FDT.\n"); |
| 714 | return false; |
| 715 | } |
| 716 | |
Raghu Krishnamurthy | b49549e | 2021-07-02 08:27:38 -0700 | [diff] [blame] | 717 | if (manifest_vm->is_ffa_partition) { |
| 718 | plat_ffa_parse_partition_manifest( |
| 719 | stage1_locked, fdt_addr, fdt_allocated_size, |
| 720 | manifest_vm, ppool); |
| 721 | } |
| 722 | |
Fuad Tabba | 50469e0 | 2020-06-30 15:14:28 +0100 | [diff] [blame] | 723 | if (!fdt_patch_mem(stage1_locked, fdt_addr, fdt_allocated_size, |
| 724 | mem_begin, mem_end, ppool)) { |
| 725 | dlog_error("Unable to patch FDT.\n"); |
| 726 | return false; |
| 727 | } |
| 728 | } |
Raghu Krishnamurthy | cd1eceb | 2021-01-04 12:20:48 -0800 | [diff] [blame] | 729 | /* |
| 730 | * An S-EL0 partition must contain only 1 vCPU (UP migratable) per the |
| 731 | * FF-A 1.0 spec. |
| 732 | */ |
Daniel Boulby | e6df345 | 2022-07-14 18:14:26 +0100 | [diff] [blame] | 733 | CHECK(!is_el0_partition || manifest_vm->secondary.vcpu_count == 1); |
Fuad Tabba | 50469e0 | 2020-06-30 15:14:28 +0100 | [diff] [blame] | 734 | |
Raghu Krishnamurthy | cd1eceb | 2021-01-04 12:20:48 -0800 | [diff] [blame] | 735 | if (!vm_init_next(manifest_vm->secondary.vcpu_count, ppool, &vm, |
Daniel Boulby | e6df345 | 2022-07-14 18:14:26 +0100 | [diff] [blame] | 736 | is_el0_partition)) { |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 737 | dlog_error("Unable to initialise VM.\n"); |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 738 | return false; |
| 739 | } |
| 740 | |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 741 | vm_locked = vm_lock(vm); |
| 742 | |
Raghu Krishnamurthy | d3ab8c3 | 2021-02-10 19:11:30 -0800 | [diff] [blame] | 743 | /* |
Raghu Krishnamurthy | 2b80169 | 2021-07-18 17:46:43 -0700 | [diff] [blame] | 744 | * Grant the VM access to the memory. For VM's we mark all memory in |
| 745 | * stage-2 tables as RWX and the VM can control permissions using |
| 746 | * stage-1 translations. For S-EL0 partitions, hafnium maps the entire |
| 747 | * region of memory for the partition as RX. The partition is then |
| 748 | * expected to perform its owns relocations and call the FFA_MEM_PERM_* |
| 749 | * API's to change permissions on its image layout. |
Raghu Krishnamurthy | d3ab8c3 | 2021-02-10 19:11:30 -0800 | [diff] [blame] | 750 | */ |
Daniel Boulby | e6df345 | 2022-07-14 18:14:26 +0100 | [diff] [blame] | 751 | if (is_el0_partition) { |
Raghu Krishnamurthy | 2b80169 | 2021-07-18 17:46:43 -0700 | [diff] [blame] | 752 | map_mode = MM_MODE_R | MM_MODE_X | MM_MODE_USER | MM_MODE_NG; |
| 753 | } else { |
| 754 | map_mode = MM_MODE_R | MM_MODE_W | MM_MODE_X; |
Raghu Krishnamurthy | d3ab8c3 | 2021-02-10 19:11:30 -0800 | [diff] [blame] | 755 | } |
Raghu Krishnamurthy | 2b80169 | 2021-07-18 17:46:43 -0700 | [diff] [blame] | 756 | |
Raghu Krishnamurthy | d3ab8c3 | 2021-02-10 19:11:30 -0800 | [diff] [blame] | 757 | if (!vm_identity_map(vm_locked, mem_begin, mem_end, map_mode, ppool, |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 758 | &secondary_entry)) { |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 759 | dlog_error("Unable to initialise memory.\n"); |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 760 | ret = false; |
| 761 | goto out; |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 762 | } |
| 763 | |
Olivier Deprez | 62d99e3 | 2020-01-09 15:58:07 +0100 | [diff] [blame] | 764 | if (manifest_vm->is_ffa_partition) { |
Daniel Boulby | e6df345 | 2022-07-14 18:14:26 +0100 | [diff] [blame] | 765 | if (!ffa_map_memory_regions(manifest_vm, vm_locked, |
| 766 | primary_vm_locked, mem_end, |
| 767 | is_el0_partition, ppool)) { |
| 768 | ret = false; |
| 769 | goto out; |
Manish Pandey | 2145c21 | 2020-05-01 16:04:22 +0100 | [diff] [blame] | 770 | } |
| 771 | |
Raghu Krishnamurthy | 8c250a9 | 2021-07-02 12:16:42 -0700 | [diff] [blame] | 772 | secondary_entry = ipa_add(secondary_entry, |
| 773 | manifest_vm->partition.ep_offset); |
Olivier Deprez | 62d99e3 | 2020-01-09 15:58:07 +0100 | [diff] [blame] | 774 | } |
| 775 | |
Raghu Krishnamurthy | d3ab8c3 | 2021-02-10 19:11:30 -0800 | [diff] [blame] | 776 | /* |
| 777 | * Map hypervisor into the VM's page table. The hypervisor pages will |
| 778 | * not be accessible from EL0 since it will not be marked for user |
| 779 | * access. |
| 780 | * TODO: Map only the exception vectors and data that exception vectors |
| 781 | * require and not the entire hypervisor. This helps with speculative |
| 782 | * side-channel attacks. |
| 783 | */ |
Daniel Boulby | e6df345 | 2022-07-14 18:14:26 +0100 | [diff] [blame] | 784 | if (is_el0_partition) { |
Raghu Krishnamurthy | d3ab8c3 | 2021-02-10 19:11:30 -0800 | [diff] [blame] | 785 | CHECK(vm_identity_map(vm_locked, layout_text_begin(), |
| 786 | layout_text_end(), MM_MODE_X, ppool, |
| 787 | NULL)); |
| 788 | |
| 789 | CHECK(vm_identity_map(vm_locked, layout_rodata_begin(), |
| 790 | layout_rodata_end(), MM_MODE_R, ppool, |
| 791 | NULL)); |
| 792 | |
| 793 | CHECK(vm_identity_map(vm_locked, layout_data_begin(), |
| 794 | layout_data_end(), MM_MODE_R | MM_MODE_W, |
| 795 | ppool, NULL)); |
Maksims Svecovs | 134b8f9 | 2022-03-04 15:14:09 +0000 | [diff] [blame] | 796 | |
| 797 | CHECK(arch_stack_mm_init(mm_lock_ptable_unsafe(&vm->ptable), |
| 798 | ppool)); |
| 799 | |
Raghu Krishnamurthy | d3ab8c3 | 2021-02-10 19:11:30 -0800 | [diff] [blame] | 800 | plat_console_mm_init(mm_lock_ptable_unsafe(&vm->ptable), ppool); |
| 801 | } |
| 802 | |
Manish Pandey | d34f889 | 2020-06-19 17:41:07 +0100 | [diff] [blame] | 803 | if (!load_common(stage1_locked, vm_locked, manifest_vm, ppool)) { |
| 804 | ret = false; |
| 805 | goto out; |
| 806 | } |
| 807 | |
Manish Pandey | 2145c21 | 2020-05-01 16:04:22 +0100 | [diff] [blame] | 808 | dlog_info("Loaded with %u vCPUs, entry at %#x.\n", |
| 809 | manifest_vm->secondary.vcpu_count, pa_addr(mem_begin)); |
| 810 | |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 811 | vcpu = vm_get_vcpu(vm, 0); |
Fuad Tabba | 50469e0 | 2020-06-30 15:14:28 +0100 | [diff] [blame] | 812 | |
Max Shvetsov | 40108e7 | 2020-08-27 12:39:50 +0100 | [diff] [blame] | 813 | vcpu_locked = vcpu_lock(vcpu); |
Madhukar Pappireddy | 046dad0 | 2022-06-21 18:43:33 -0500 | [diff] [blame] | 814 | |
Fuad Tabba | 50469e0 | 2020-06-30 15:14:28 +0100 | [diff] [blame] | 815 | if (has_fdt) { |
Max Shvetsov | 40108e7 | 2020-08-27 12:39:50 +0100 | [diff] [blame] | 816 | vcpu_secondary_reset_and_start(vcpu_locked, secondary_entry, |
Fuad Tabba | 50469e0 | 2020-06-30 15:14:28 +0100 | [diff] [blame] | 817 | pa_addr(fdt_addr)); |
| 818 | } else { |
| 819 | /* |
| 820 | * Without an FDT, secondary VMs expect the memory size to be |
| 821 | * passed in register x0, which is what |
| 822 | * vcpu_secondary_reset_and_start does in this case. |
| 823 | */ |
Max Shvetsov | 40108e7 | 2020-08-27 12:39:50 +0100 | [diff] [blame] | 824 | vcpu_secondary_reset_and_start(vcpu_locked, secondary_entry, |
| 825 | mem_size); |
Fuad Tabba | 50469e0 | 2020-06-30 15:14:28 +0100 | [diff] [blame] | 826 | } |
| 827 | |
Max Shvetsov | 40108e7 | 2020-08-27 12:39:50 +0100 | [diff] [blame] | 828 | vcpu_unlock(&vcpu_locked); |
| 829 | |
Jens Wiklander | b697ad0 | 2022-11-04 10:15:03 +0100 | [diff] [blame] | 830 | /* |
| 831 | * For all vCPUs, |
| 832 | * in a VM: enable the notification pending virtual interrupt if |
| 833 | * requested in the manifest. |
| 834 | * in a SP: enable the NPI and managed exit virtual interrupts if |
| 835 | * requested in the manifest. For a S-EL0 partition, enable |
| 836 | * the virtual interrupts IDs matching the secure physical |
| 837 | * interrupt IDs declared in device regions. |
| 838 | */ |
| 839 | for (n = 0; n < manifest_vm->secondary.vcpu_count; n++) { |
| 840 | vcpu = vm_get_vcpu(vm, n); |
| 841 | vcpu_locked = vcpu_lock(vcpu); |
| 842 | plat_ffa_enable_virtual_interrupts(vcpu_locked, vm_locked); |
| 843 | vcpu_unlock(&vcpu_locked); |
| 844 | } |
| 845 | |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 846 | ret = true; |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 847 | |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 848 | out: |
| 849 | vm_unlock(&vm_locked); |
| 850 | |
| 851 | return ret; |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 852 | } |
| 853 | |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 854 | /** |
Andrew Walbran | 34ce72e | 2018-09-13 16:47:44 +0100 | [diff] [blame] | 855 | * Try to find a memory range of the given size within the given ranges, and |
| 856 | * remove it from them. Return true on success, or false if no large enough |
| 857 | * contiguous range is found. |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 858 | */ |
Hong-Seok Kim | 0964836 | 2019-05-23 15:47:11 +0900 | [diff] [blame] | 859 | static bool carve_out_mem_range(struct mem_range *mem_ranges, |
| 860 | size_t mem_ranges_count, uint64_t size_to_find, |
| 861 | paddr_t *found_begin, paddr_t *found_end) |
Andrew Walbran | 34ce72e | 2018-09-13 16:47:44 +0100 | [diff] [blame] | 862 | { |
| 863 | size_t i; |
| 864 | |
Wedson Almeida Filho | 81568c4 | 2019-01-04 13:33:02 +0000 | [diff] [blame] | 865 | /* |
| 866 | * TODO(b/116191358): Consider being cleverer about how we pack VMs |
| 867 | * together, with a non-greedy algorithm. |
| 868 | */ |
Andrew Walbran | 34ce72e | 2018-09-13 16:47:44 +0100 | [diff] [blame] | 869 | for (i = 0; i < mem_ranges_count; ++i) { |
| 870 | if (size_to_find <= |
Andrew Walbran | 2cb4339 | 2019-04-17 12:52:45 +0100 | [diff] [blame] | 871 | pa_difference(mem_ranges[i].begin, mem_ranges[i].end)) { |
Wedson Almeida Filho | b2c159e | 2018-10-25 13:27:47 +0100 | [diff] [blame] | 872 | /* |
| 873 | * This range is big enough, take some of it from the |
| 874 | * end and reduce its size accordingly. |
| 875 | */ |
Andrew Walbran | 34ce72e | 2018-09-13 16:47:44 +0100 | [diff] [blame] | 876 | *found_end = mem_ranges[i].end; |
| 877 | *found_begin = pa_init(pa_addr(mem_ranges[i].end) - |
| 878 | size_to_find); |
| 879 | mem_ranges[i].end = *found_begin; |
| 880 | return true; |
| 881 | } |
| 882 | } |
| 883 | return false; |
| 884 | } |
| 885 | |
| 886 | /** |
| 887 | * Given arrays of memory ranges before and after memory was removed for |
| 888 | * secondary VMs, add the difference to the reserved ranges of the given update. |
| 889 | * Return true on success, or false if there would be more than MAX_MEM_RANGES |
| 890 | * reserved ranges after adding the new ones. |
| 891 | * `before` and `after` must be arrays of exactly `mem_ranges_count` elements. |
| 892 | */ |
Hong-Seok Kim | 0964836 | 2019-05-23 15:47:11 +0900 | [diff] [blame] | 893 | static bool update_reserved_ranges(struct boot_params_update *update, |
| 894 | const struct mem_range *before, |
| 895 | const struct mem_range *after, |
| 896 | size_t mem_ranges_count) |
Andrew Walbran | 34ce72e | 2018-09-13 16:47:44 +0100 | [diff] [blame] | 897 | { |
| 898 | size_t i; |
| 899 | |
| 900 | for (i = 0; i < mem_ranges_count; ++i) { |
| 901 | if (pa_addr(after[i].begin) > pa_addr(before[i].begin)) { |
| 902 | if (update->reserved_ranges_count >= MAX_MEM_RANGES) { |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 903 | dlog_error( |
| 904 | "Too many reserved ranges after " |
| 905 | "loading secondary VMs.\n"); |
Andrew Walbran | 34ce72e | 2018-09-13 16:47:44 +0100 | [diff] [blame] | 906 | return false; |
| 907 | } |
| 908 | update->reserved_ranges[update->reserved_ranges_count] |
| 909 | .begin = before[i].begin; |
| 910 | update->reserved_ranges[update->reserved_ranges_count] |
| 911 | .end = after[i].begin; |
| 912 | update->reserved_ranges_count++; |
| 913 | } |
| 914 | if (pa_addr(after[i].end) < pa_addr(before[i].end)) { |
| 915 | if (update->reserved_ranges_count >= MAX_MEM_RANGES) { |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 916 | dlog_error( |
| 917 | "Too many reserved ranges after " |
| 918 | "loading secondary VMs.\n"); |
Andrew Walbran | 34ce72e | 2018-09-13 16:47:44 +0100 | [diff] [blame] | 919 | return false; |
| 920 | } |
| 921 | update->reserved_ranges[update->reserved_ranges_count] |
| 922 | .begin = after[i].end; |
| 923 | update->reserved_ranges[update->reserved_ranges_count] |
| 924 | .end = before[i].end; |
| 925 | update->reserved_ranges_count++; |
| 926 | } |
| 927 | } |
| 928 | |
| 929 | return true; |
| 930 | } |
| 931 | |
Olivier Deprez | 112d2b5 | 2020-09-30 07:39:23 +0200 | [diff] [blame] | 932 | static bool init_other_world_vm(struct mpool *ppool) |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 933 | { |
Olivier Deprez | 96a2a26 | 2020-06-11 17:21:38 +0200 | [diff] [blame] | 934 | struct vm *other_world_vm; |
Andrew Walbran | 34ce72e | 2018-09-13 16:47:44 +0100 | [diff] [blame] | 935 | size_t i; |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 936 | |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 937 | /* |
Olivier Deprez | 96a2a26 | 2020-06-11 17:21:38 +0200 | [diff] [blame] | 938 | * Initialise the dummy VM which represents the opposite world: |
| 939 | * -TrustZone (or the SPMC) when running the Hypervisor |
| 940 | * -the Hypervisor when running TZ/SPMC |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 941 | */ |
Raghu Krishnamurthy | cd1eceb | 2021-01-04 12:20:48 -0800 | [diff] [blame] | 942 | other_world_vm = vm_init(HF_OTHER_WORLD_ID, MAX_CPUS, ppool, false); |
Olivier Deprez | 96a2a26 | 2020-06-11 17:21:38 +0200 | [diff] [blame] | 943 | CHECK(other_world_vm != NULL); |
| 944 | |
| 945 | for (i = 0; i < MAX_CPUS; i++) { |
| 946 | struct vcpu *vcpu = vm_get_vcpu(other_world_vm, i); |
| 947 | struct cpu *cpu = cpu_find_index(i); |
| 948 | |
| 949 | vcpu->cpu = cpu; |
| 950 | } |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 951 | |
Olivier Deprez | 112d2b5 | 2020-09-30 07:39:23 +0200 | [diff] [blame] | 952 | return arch_other_world_vm_init(other_world_vm, ppool); |
| 953 | } |
| 954 | |
| 955 | /* |
| 956 | * Loads alls VMs from the manifest. |
| 957 | */ |
| 958 | bool load_vms(struct mm_stage1_locked stage1_locked, |
| 959 | const struct manifest *manifest, const struct memiter *cpio, |
| 960 | const struct boot_params *params, |
| 961 | struct boot_params_update *update, struct mpool *ppool) |
| 962 | { |
| 963 | struct vm *primary; |
| 964 | struct mem_range mem_ranges_available[MAX_MEM_RANGES]; |
| 965 | struct vm_locked primary_vm_locked; |
| 966 | size_t i; |
| 967 | bool success = true; |
| 968 | |
Andrew Walbran | f871678 | 2020-10-29 17:07:07 +0000 | [diff] [blame] | 969 | /** |
| 970 | * Only try to load the primary VM if it is supposed to be in this |
| 971 | * world. |
| 972 | */ |
| 973 | if (vm_id_is_current_world(HF_PRIMARY_VM_ID)) { |
| 974 | if (!load_primary(stage1_locked, |
| 975 | &manifest->vm[HF_PRIMARY_VM_INDEX], cpio, |
| 976 | params, ppool)) { |
| 977 | dlog_error("Unable to load primary VM.\n"); |
| 978 | return false; |
| 979 | } |
Olivier Deprez | 112d2b5 | 2020-09-30 07:39:23 +0200 | [diff] [blame] | 980 | } |
| 981 | |
| 982 | if (!init_other_world_vm(ppool)) { |
| 983 | return false; |
| 984 | } |
| 985 | |
Andrew Walbran | 34ce72e | 2018-09-13 16:47:44 +0100 | [diff] [blame] | 986 | static_assert( |
| 987 | sizeof(mem_ranges_available) == sizeof(params->mem_ranges), |
| 988 | "mem_range arrays must be the same size for memcpy."); |
| 989 | static_assert(sizeof(mem_ranges_available) < 500, |
| 990 | "This will use too much stack, either make " |
| 991 | "MAX_MEM_RANGES smaller or change this."); |
Andrew Scull | a1aa2ba | 2019-04-05 11:49:02 +0100 | [diff] [blame] | 992 | memcpy_s(mem_ranges_available, sizeof(mem_ranges_available), |
| 993 | params->mem_ranges, sizeof(params->mem_ranges)); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 994 | |
Andrew Walbran | 34ce72e | 2018-09-13 16:47:44 +0100 | [diff] [blame] | 995 | /* Round the last addresses down to the page size. */ |
| 996 | for (i = 0; i < params->mem_ranges_count; ++i) { |
Alfredo Mazzinghi | eb1997c | 2019-02-07 18:00:01 +0000 | [diff] [blame] | 997 | mem_ranges_available[i].end = pa_init(align_down( |
| 998 | pa_addr(mem_ranges_available[i].end), PAGE_SIZE)); |
Andrew Walbran | 34ce72e | 2018-09-13 16:47:44 +0100 | [diff] [blame] | 999 | } |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 1000 | |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 1001 | primary = vm_find(HF_PRIMARY_VM_ID); |
| 1002 | primary_vm_locked = vm_lock(primary); |
| 1003 | |
David Brazdil | 0251b94 | 2019-09-10 15:59:50 +0100 | [diff] [blame] | 1004 | for (i = 0; i < manifest->vm_count; ++i) { |
David Brazdil | 0dbb41f | 2019-09-09 18:03:35 +0100 | [diff] [blame] | 1005 | const struct manifest_vm *manifest_vm = &manifest->vm[i]; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1006 | ffa_vm_id_t vm_id = HF_VM_ID_OFFSET + i; |
David Brazdil | 7a462ec | 2019-08-15 12:27:47 +0100 | [diff] [blame] | 1007 | uint64_t mem_size; |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 1008 | paddr_t secondary_mem_begin; |
| 1009 | paddr_t secondary_mem_end; |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 1010 | |
David Brazdil | 7a462ec | 2019-08-15 12:27:47 +0100 | [diff] [blame] | 1011 | if (vm_id == HF_PRIMARY_VM_ID) { |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 1012 | continue; |
| 1013 | } |
| 1014 | |
Olivier Deprez | 2a8ee34 | 2020-08-03 15:10:44 +0200 | [diff] [blame] | 1015 | dlog_info("Loading VM id %#x: %s.\n", vm_id, |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 1016 | manifest_vm->debug_name); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 1017 | |
David Brazdil | 7a462ec | 2019-08-15 12:27:47 +0100 | [diff] [blame] | 1018 | mem_size = align_up(manifest_vm->secondary.mem_size, PAGE_SIZE); |
Olivier Deprez | 62d99e3 | 2020-01-09 15:58:07 +0100 | [diff] [blame] | 1019 | |
Raghu Krishnamurthy | b49549e | 2021-07-02 08:27:38 -0700 | [diff] [blame] | 1020 | if (manifest_vm->is_ffa_partition && |
| 1021 | !manifest->vm[i].is_hyp_loaded) { |
Olivier Deprez | 62d99e3 | 2020-01-09 15:58:07 +0100 | [diff] [blame] | 1022 | secondary_mem_begin = |
Raghu Krishnamurthy | 8c250a9 | 2021-07-02 12:16:42 -0700 | [diff] [blame] | 1023 | pa_init(manifest_vm->partition.load_addr); |
| 1024 | secondary_mem_end = pa_init( |
| 1025 | manifest_vm->partition.load_addr + mem_size); |
Olivier Deprez | 62d99e3 | 2020-01-09 15:58:07 +0100 | [diff] [blame] | 1026 | } else if (!carve_out_mem_range(mem_ranges_available, |
| 1027 | params->mem_ranges_count, |
| 1028 | mem_size, &secondary_mem_begin, |
| 1029 | &secondary_mem_end)) { |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 1030 | dlog_error("Not enough memory (%u bytes).\n", mem_size); |
Andrew Walbran | 34ce72e | 2018-09-13 16:47:44 +0100 | [diff] [blame] | 1031 | continue; |
| 1032 | } |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 1033 | |
Manish Pandey | 2145c21 | 2020-05-01 16:04:22 +0100 | [diff] [blame] | 1034 | if (!load_secondary(stage1_locked, primary_vm_locked, |
| 1035 | secondary_mem_begin, secondary_mem_end, |
| 1036 | manifest_vm, cpio, ppool)) { |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 1037 | dlog_error("Unable to load VM.\n"); |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 1038 | continue; |
| 1039 | } |
| 1040 | |
| 1041 | /* Deny the primary VM access to this memory. */ |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 1042 | if (!vm_unmap(primary_vm_locked, secondary_mem_begin, |
| 1043 | secondary_mem_end, ppool)) { |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 1044 | dlog_error( |
| 1045 | "Unable to unmap secondary VM from primary " |
| 1046 | "VM.\n"); |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 1047 | success = false; |
| 1048 | break; |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 1049 | } |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 1050 | } |
| 1051 | |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 1052 | vm_unlock(&primary_vm_locked); |
| 1053 | |
| 1054 | if (!success) { |
| 1055 | return false; |
| 1056 | } |
| 1057 | |
Wedson Almeida Filho | b2c159e | 2018-10-25 13:27:47 +0100 | [diff] [blame] | 1058 | /* |
| 1059 | * Add newly reserved areas to update params by looking at the |
Andrew Walbran | 34ce72e | 2018-09-13 16:47:44 +0100 | [diff] [blame] | 1060 | * difference between the available ranges from the original params and |
| 1061 | * the updated mem_ranges_available. We assume that the number and order |
| 1062 | * of available ranges is the same, i.e. we don't remove any ranges |
Wedson Almeida Filho | b2c159e | 2018-10-25 13:27:47 +0100 | [diff] [blame] | 1063 | * above only make them smaller. |
| 1064 | */ |
Andrew Walbran | 34ce72e | 2018-09-13 16:47:44 +0100 | [diff] [blame] | 1065 | return update_reserved_ranges(update, params->mem_ranges, |
| 1066 | mem_ranges_available, |
| 1067 | params->mem_ranges_count); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 1068 | } |