Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 1 | /* |
Andrew Walbran | 692b325 | 2019-03-07 15:51:31 +0000 | [diff] [blame] | 2 | * Copyright 2018 The Hafnium Authors. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 3 | * |
Andrew Walbran | e959ec1 | 2020-06-17 15:01:09 +0100 | [diff] [blame] | 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 7 | */ |
| 8 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 9 | #include "hf/load.h" |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 10 | |
| 11 | #include <stdbool.h> |
| 12 | |
Fuad Tabba | 77a4b01 | 2019-11-15 12:13:08 +0000 | [diff] [blame] | 13 | #include "hf/arch/vm.h" |
| 14 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 15 | #include "hf/api.h" |
Andrew Walbran | 34ce72e | 2018-09-13 16:47:44 +0100 | [diff] [blame] | 16 | #include "hf/boot_params.h" |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 17 | #include "hf/check.h" |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 18 | #include "hf/dlog.h" |
Andrew Scull | 5991ec9 | 2018-10-08 14:55:02 +0100 | [diff] [blame] | 19 | #include "hf/layout.h" |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 20 | #include "hf/memiter.h" |
| 21 | #include "hf/mm.h" |
Andrew Walbran | 4869936 | 2019-05-20 14:38:00 +0100 | [diff] [blame] | 22 | #include "hf/plat/console.h" |
Andrew Scull | b1a6d0d | 2020-01-29 11:25:12 +0000 | [diff] [blame] | 23 | #include "hf/plat/iommu.h" |
Andrew Scull | 877ae4b | 2019-07-02 12:52:33 +0100 | [diff] [blame] | 24 | #include "hf/static_assert.h" |
Andrew Scull | 8d9e121 | 2019-04-05 13:52:55 +0100 | [diff] [blame] | 25 | #include "hf/std.h" |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 26 | #include "hf/vm.h" |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 27 | |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 28 | #include "vmapi/hf/call.h" |
| 29 | |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 30 | alignas(PAGE_SIZE) static uint8_t tee_send_buffer[HF_MAILBOX_SIZE]; |
| 31 | alignas(PAGE_SIZE) static uint8_t tee_recv_buffer[HF_MAILBOX_SIZE]; |
| 32 | |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 33 | /** |
| 34 | * Copies data to an unmapped location by mapping it for write, copying the |
| 35 | * data, then unmapping it. |
Andrew Scull | d9225b3 | 2018-11-19 16:12:41 +0000 | [diff] [blame] | 36 | * |
| 37 | * The data is written so that it is available to all cores with the cache |
| 38 | * disabled. When switching to the partitions, the caching is initially disabled |
| 39 | * so the data must be available without the cache. |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 40 | */ |
Andrew Scull | 3c0a90a | 2019-07-01 11:55:53 +0100 | [diff] [blame] | 41 | static bool copy_to_unmapped(struct mm_stage1_locked stage1_locked, paddr_t to, |
David Brazdil | 7a462ec | 2019-08-15 12:27:47 +0100 | [diff] [blame] | 42 | struct memiter *from_it, struct mpool *ppool) |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 43 | { |
David Brazdil | 7a462ec | 2019-08-15 12:27:47 +0100 | [diff] [blame] | 44 | const void *from = memiter_base(from_it); |
| 45 | size_t size = memiter_size(from_it); |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 46 | paddr_t to_end = pa_add(to, size); |
| 47 | void *ptr; |
Andrew Scull | 265ada9 | 2018-07-30 15:19:01 +0100 | [diff] [blame] | 48 | |
Andrew Scull | 3c0a90a | 2019-07-01 11:55:53 +0100 | [diff] [blame] | 49 | ptr = mm_identity_map(stage1_locked, to, to_end, MM_MODE_W, ppool); |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 50 | if (!ptr) { |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 51 | return false; |
| 52 | } |
| 53 | |
Andrew Scull | a1aa2ba | 2019-04-05 11:49:02 +0100 | [diff] [blame] | 54 | memcpy_s(ptr, size, from, size); |
Andrew Scull | c059fbe | 2019-09-12 12:58:40 +0100 | [diff] [blame] | 55 | arch_mm_flush_dcache(ptr, size); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 56 | |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 57 | CHECK(mm_unmap(stage1_locked, to, to_end, ppool)); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 58 | |
| 59 | return true; |
| 60 | } |
| 61 | |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 62 | static bool load_kernel(struct mm_stage1_locked stage1_locked, paddr_t begin, |
| 63 | paddr_t end, const struct manifest_vm *manifest_vm, |
| 64 | const struct memiter *cpio, struct mpool *ppool) |
| 65 | { |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 66 | struct memiter kernel; |
| 67 | |
David Brazdil | 136f294 | 2019-09-23 14:11:03 +0100 | [diff] [blame] | 68 | if (!cpio_get_file(cpio, &manifest_vm->kernel_filename, &kernel)) { |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 69 | dlog_error("Could not find kernel file \"%s\".\n", |
| 70 | string_data(&manifest_vm->kernel_filename)); |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 71 | return false; |
| 72 | } |
| 73 | |
| 74 | if (pa_difference(begin, end) < memiter_size(&kernel)) { |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 75 | dlog_error("Kernel is larger than available memory.\n"); |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 76 | return false; |
| 77 | } |
| 78 | |
| 79 | if (!copy_to_unmapped(stage1_locked, begin, &kernel, ppool)) { |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 80 | dlog_error("Unable to copy kernel.\n"); |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 81 | return false; |
| 82 | } |
| 83 | |
| 84 | return true; |
| 85 | } |
| 86 | |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 87 | /** |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 88 | * Performs VM loading activities that are common between the primary and |
| 89 | * secondaries. |
| 90 | */ |
| 91 | static bool load_common(const struct manifest_vm *manifest_vm, struct vm *vm) |
| 92 | { |
| 93 | vm->smc_whitelist = manifest_vm->smc_whitelist; |
| 94 | |
Fuad Tabba | 5697071 | 2020-01-10 11:20:09 +0000 | [diff] [blame] | 95 | /* Initialize architecture-specific features. */ |
Fuad Tabba | 77a4b01 | 2019-11-15 12:13:08 +0000 | [diff] [blame] | 96 | arch_vm_features_set(vm); |
| 97 | |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 98 | return true; |
| 99 | } |
| 100 | |
| 101 | /** |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 102 | * Loads the primary VM. |
| 103 | */ |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 104 | static bool load_primary(struct mm_stage1_locked stage1_locked, |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 105 | const struct manifest_vm *manifest_vm, |
Andrew Scull | b5f49e0 | 2019-10-02 13:20:47 +0100 | [diff] [blame] | 106 | const struct memiter *cpio, |
| 107 | const struct boot_params *params, struct mpool *ppool) |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 108 | { |
Olivier Deprez | 62d99e3 | 2020-01-09 15:58:07 +0100 | [diff] [blame] | 109 | paddr_t primary_begin; |
| 110 | ipaddr_t primary_entry; |
David Brazdil | e6f8322 | 2019-09-23 14:47:37 +0100 | [diff] [blame] | 111 | struct vm *vm; |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 112 | struct vm_locked vm_locked; |
David Brazdil | e6f8322 | 2019-09-23 14:47:37 +0100 | [diff] [blame] | 113 | struct vcpu_locked vcpu_locked; |
Andrew Scull | b5f49e0 | 2019-10-02 13:20:47 +0100 | [diff] [blame] | 114 | size_t i; |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 115 | bool ret; |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 116 | |
Olivier Deprez | 62d99e3 | 2020-01-09 15:58:07 +0100 | [diff] [blame] | 117 | if (manifest_vm->is_ffa_partition) { |
| 118 | primary_begin = pa_init(manifest_vm->sp.load_addr); |
| 119 | primary_entry = ipa_add(ipa_from_pa(primary_begin), |
| 120 | manifest_vm->sp.ep_offset); |
| 121 | } else { |
| 122 | primary_begin = |
| 123 | (manifest_vm->primary.boot_address == |
| 124 | MANIFEST_INVALID_ADDRESS) |
| 125 | ? layout_primary_begin() |
| 126 | : pa_init(manifest_vm->primary.boot_address); |
| 127 | primary_entry = ipa_from_pa(primary_begin); |
| 128 | } |
| 129 | |
David Brazdil | 080ee31 | 2020-02-25 15:30:30 -0800 | [diff] [blame] | 130 | paddr_t primary_end = pa_add(primary_begin, RSIZE_MAX); |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 131 | |
Olivier Deprez | 62d99e3 | 2020-01-09 15:58:07 +0100 | [diff] [blame] | 132 | /* |
| 133 | * Load the kernel if a filename is specified in the VM manifest. |
| 134 | * For an FF-A partition, kernel_filename is undefined indicating |
| 135 | * the partition package has already been loaded prior to Hafnium |
| 136 | * booting. |
| 137 | */ |
| 138 | if (!string_is_empty(&manifest_vm->kernel_filename)) { |
| 139 | if (!load_kernel(stage1_locked, primary_begin, primary_end, |
| 140 | manifest_vm, cpio, ppool)) { |
| 141 | dlog_error("Unable to load primary kernel.\n"); |
| 142 | return false; |
| 143 | } |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 144 | } |
| 145 | |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 146 | if (!vm_init_next(MAX_CPUS, ppool, &vm)) { |
Andrew Walbran | 7586e04 | 2020-02-18 18:19:26 +0000 | [diff] [blame] | 147 | dlog_error("Unable to initialise primary VM.\n"); |
David Brazdil | e6f8322 | 2019-09-23 14:47:37 +0100 | [diff] [blame] | 148 | return false; |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 149 | } |
| 150 | |
David Brazdil | e6f8322 | 2019-09-23 14:47:37 +0100 | [diff] [blame] | 151 | if (vm->id != HF_PRIMARY_VM_ID) { |
Andrew Walbran | 7586e04 | 2020-02-18 18:19:26 +0000 | [diff] [blame] | 152 | dlog_error("Primary VM was not given correct ID.\n"); |
David Brazdil | e6f8322 | 2019-09-23 14:47:37 +0100 | [diff] [blame] | 153 | return false; |
| 154 | } |
| 155 | |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 156 | vm_locked = vm_lock(vm); |
| 157 | |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 158 | if (!load_common(manifest_vm, vm)) { |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 159 | ret = false; |
| 160 | goto out; |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 161 | } |
| 162 | |
Andrew Scull | 48929fd | 2020-01-28 10:39:10 +0000 | [diff] [blame] | 163 | if (params->device_mem_ranges_count == 0) { |
| 164 | /* |
| 165 | * Map 1TB of address space as device memory to, most likely, |
| 166 | * make all devices available to the primary VM. |
| 167 | * |
| 168 | * TODO: remove this once all targets provide valid ranges. |
| 169 | */ |
Andrew Scull | f6ab9bc | 2020-02-26 12:56:37 -0800 | [diff] [blame] | 170 | dlog_warning( |
| 171 | "Device memory not provided, defaulting to 1 TB.\n"); |
Andrew Scull | 48929fd | 2020-01-28 10:39:10 +0000 | [diff] [blame] | 172 | |
| 173 | if (!vm_identity_map( |
| 174 | vm_locked, pa_init(0), |
| 175 | pa_init(UINT64_C(1024) * 1024 * 1024 * 1024), |
| 176 | MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool, NULL)) { |
| 177 | dlog_error( |
| 178 | "Unable to initialise address space for " |
Andrew Scull | f6ab9bc | 2020-02-26 12:56:37 -0800 | [diff] [blame] | 179 | "primary VM.\n"); |
Andrew Scull | 48929fd | 2020-01-28 10:39:10 +0000 | [diff] [blame] | 180 | ret = false; |
| 181 | goto out; |
| 182 | } |
David Brazdil | e6f8322 | 2019-09-23 14:47:37 +0100 | [diff] [blame] | 183 | } |
| 184 | |
Andrew Scull | b5f49e0 | 2019-10-02 13:20:47 +0100 | [diff] [blame] | 185 | /* Map normal memory as such to permit caching, execution, etc. */ |
| 186 | for (i = 0; i < params->mem_ranges_count; ++i) { |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 187 | if (!vm_identity_map(vm_locked, params->mem_ranges[i].begin, |
| 188 | params->mem_ranges[i].end, |
| 189 | MM_MODE_R | MM_MODE_W | MM_MODE_X, ppool, |
| 190 | NULL)) { |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 191 | dlog_error( |
Andrew Scull | f6ab9bc | 2020-02-26 12:56:37 -0800 | [diff] [blame] | 192 | "Unable to initialise memory for primary " |
| 193 | "VM.\n"); |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 194 | ret = false; |
| 195 | goto out; |
Andrew Scull | b5f49e0 | 2019-10-02 13:20:47 +0100 | [diff] [blame] | 196 | } |
| 197 | } |
| 198 | |
Andrew Scull | 48929fd | 2020-01-28 10:39:10 +0000 | [diff] [blame] | 199 | /* Map device memory as such to prevent execution, speculation etc. */ |
| 200 | for (i = 0; i < params->device_mem_ranges_count; ++i) { |
| 201 | if (!vm_identity_map( |
| 202 | vm_locked, params->device_mem_ranges[i].begin, |
| 203 | params->device_mem_ranges[i].end, |
| 204 | MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool, NULL)) { |
| 205 | dlog("Unable to initialise device memory for primary " |
Andrew Scull | f6ab9bc | 2020-02-26 12:56:37 -0800 | [diff] [blame] | 206 | "VM.\n"); |
Andrew Scull | 48929fd | 2020-01-28 10:39:10 +0000 | [diff] [blame] | 207 | ret = false; |
| 208 | goto out; |
| 209 | } |
| 210 | } |
| 211 | |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 212 | if (!vm_unmap_hypervisor(vm_locked, ppool)) { |
Andrew Scull | f6ab9bc | 2020-02-26 12:56:37 -0800 | [diff] [blame] | 213 | dlog_error("Unable to unmap hypervisor from primary VM.\n"); |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 214 | ret = false; |
| 215 | goto out; |
David Brazdil | e6f8322 | 2019-09-23 14:47:37 +0100 | [diff] [blame] | 216 | } |
| 217 | |
Andrew Scull | b1a6d0d | 2020-01-29 11:25:12 +0000 | [diff] [blame] | 218 | if (!plat_iommu_unmap_iommus(vm_locked, ppool)) { |
Andrew Scull | f6ab9bc | 2020-02-26 12:56:37 -0800 | [diff] [blame] | 219 | dlog_error("Unable to unmap IOMMUs from primary VM.\n"); |
Andrew Scull | b1a6d0d | 2020-01-29 11:25:12 +0000 | [diff] [blame] | 220 | ret = false; |
| 221 | goto out; |
| 222 | } |
| 223 | |
Andrew Walbran | 7586e04 | 2020-02-18 18:19:26 +0000 | [diff] [blame] | 224 | dlog_info("Loaded primary VM with %u vCPUs, entry at %#x.\n", |
| 225 | vm->vcpu_count, pa_addr(primary_begin)); |
| 226 | |
David Brazdil | e6f8322 | 2019-09-23 14:47:37 +0100 | [diff] [blame] | 227 | vcpu_locked = vcpu_lock(vm_get_vcpu(vm, 0)); |
Olivier Deprez | 62d99e3 | 2020-01-09 15:58:07 +0100 | [diff] [blame] | 228 | vcpu_on(vcpu_locked, primary_entry, params->kernel_arg); |
David Brazdil | e6f8322 | 2019-09-23 14:47:37 +0100 | [diff] [blame] | 229 | vcpu_unlock(&vcpu_locked); |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 230 | ret = true; |
David Brazdil | e6f8322 | 2019-09-23 14:47:37 +0100 | [diff] [blame] | 231 | |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 232 | out: |
| 233 | vm_unlock(&vm_locked); |
| 234 | |
| 235 | return ret; |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 236 | } |
| 237 | |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 238 | /* |
| 239 | * Loads a secondary VM. |
| 240 | */ |
| 241 | static bool load_secondary(struct mm_stage1_locked stage1_locked, |
| 242 | paddr_t mem_begin, paddr_t mem_end, |
| 243 | const struct manifest_vm *manifest_vm, |
| 244 | const struct memiter *cpio, struct mpool *ppool) |
| 245 | { |
| 246 | struct vm *vm; |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 247 | struct vm_locked vm_locked; |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 248 | struct vcpu *vcpu; |
| 249 | ipaddr_t secondary_entry; |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 250 | bool ret; |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 251 | |
Olivier Deprez | 62d99e3 | 2020-01-09 15:58:07 +0100 | [diff] [blame] | 252 | /* |
| 253 | * Load the kernel if a filename is specified in the VM manifest. |
| 254 | * For an FF-A partition, kernel_filename is undefined indicating |
| 255 | * the partition package has already been loaded prior to Hafnium |
| 256 | * booting. |
| 257 | */ |
| 258 | if (!string_is_empty(&manifest_vm->kernel_filename)) { |
| 259 | if (!load_kernel(stage1_locked, mem_begin, mem_end, manifest_vm, |
| 260 | cpio, ppool)) { |
| 261 | dlog_error("Unable to load kernel.\n"); |
| 262 | return false; |
| 263 | } |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 264 | } |
| 265 | |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 266 | if (!vm_init_next(manifest_vm->secondary.vcpu_count, ppool, &vm)) { |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 267 | dlog_error("Unable to initialise VM.\n"); |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 268 | return false; |
| 269 | } |
| 270 | |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 271 | if (!load_common(manifest_vm, vm)) { |
| 272 | return false; |
| 273 | } |
| 274 | |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 275 | vm_locked = vm_lock(vm); |
| 276 | |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 277 | /* Grant the VM access to the memory. */ |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 278 | if (!vm_identity_map(vm_locked, mem_begin, mem_end, |
| 279 | MM_MODE_R | MM_MODE_W | MM_MODE_X, ppool, |
| 280 | &secondary_entry)) { |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 281 | dlog_error("Unable to initialise memory.\n"); |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 282 | ret = false; |
| 283 | goto out; |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 284 | } |
| 285 | |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 286 | dlog_info("Loaded with %u vCPUs, entry at %#x.\n", |
| 287 | manifest_vm->secondary.vcpu_count, pa_addr(mem_begin)); |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 288 | |
Olivier Deprez | 62d99e3 | 2020-01-09 15:58:07 +0100 | [diff] [blame] | 289 | if (manifest_vm->is_ffa_partition) { |
| 290 | secondary_entry = |
| 291 | ipa_add(secondary_entry, manifest_vm->sp.ep_offset); |
| 292 | } |
| 293 | |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 294 | vcpu = vm_get_vcpu(vm, 0); |
| 295 | vcpu_secondary_reset_and_start(vcpu, secondary_entry, |
| 296 | pa_difference(mem_begin, mem_end)); |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 297 | ret = true; |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 298 | |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 299 | out: |
| 300 | vm_unlock(&vm_locked); |
| 301 | |
| 302 | return ret; |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 303 | } |
| 304 | |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 305 | /** |
Andrew Walbran | 34ce72e | 2018-09-13 16:47:44 +0100 | [diff] [blame] | 306 | * Try to find a memory range of the given size within the given ranges, and |
| 307 | * remove it from them. Return true on success, or false if no large enough |
| 308 | * contiguous range is found. |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 309 | */ |
Hong-Seok Kim | 0964836 | 2019-05-23 15:47:11 +0900 | [diff] [blame] | 310 | static bool carve_out_mem_range(struct mem_range *mem_ranges, |
| 311 | size_t mem_ranges_count, uint64_t size_to_find, |
| 312 | paddr_t *found_begin, paddr_t *found_end) |
Andrew Walbran | 34ce72e | 2018-09-13 16:47:44 +0100 | [diff] [blame] | 313 | { |
| 314 | size_t i; |
| 315 | |
Wedson Almeida Filho | 81568c4 | 2019-01-04 13:33:02 +0000 | [diff] [blame] | 316 | /* |
| 317 | * TODO(b/116191358): Consider being cleverer about how we pack VMs |
| 318 | * together, with a non-greedy algorithm. |
| 319 | */ |
Andrew Walbran | 34ce72e | 2018-09-13 16:47:44 +0100 | [diff] [blame] | 320 | for (i = 0; i < mem_ranges_count; ++i) { |
| 321 | if (size_to_find <= |
Andrew Walbran | 2cb4339 | 2019-04-17 12:52:45 +0100 | [diff] [blame] | 322 | pa_difference(mem_ranges[i].begin, mem_ranges[i].end)) { |
Wedson Almeida Filho | b2c159e | 2018-10-25 13:27:47 +0100 | [diff] [blame] | 323 | /* |
| 324 | * This range is big enough, take some of it from the |
| 325 | * end and reduce its size accordingly. |
| 326 | */ |
Andrew Walbran | 34ce72e | 2018-09-13 16:47:44 +0100 | [diff] [blame] | 327 | *found_end = mem_ranges[i].end; |
| 328 | *found_begin = pa_init(pa_addr(mem_ranges[i].end) - |
| 329 | size_to_find); |
| 330 | mem_ranges[i].end = *found_begin; |
| 331 | return true; |
| 332 | } |
| 333 | } |
| 334 | return false; |
| 335 | } |
| 336 | |
| 337 | /** |
| 338 | * Given arrays of memory ranges before and after memory was removed for |
| 339 | * secondary VMs, add the difference to the reserved ranges of the given update. |
| 340 | * Return true on success, or false if there would be more than MAX_MEM_RANGES |
| 341 | * reserved ranges after adding the new ones. |
| 342 | * `before` and `after` must be arrays of exactly `mem_ranges_count` elements. |
| 343 | */ |
Hong-Seok Kim | 0964836 | 2019-05-23 15:47:11 +0900 | [diff] [blame] | 344 | static bool update_reserved_ranges(struct boot_params_update *update, |
| 345 | const struct mem_range *before, |
| 346 | const struct mem_range *after, |
| 347 | size_t mem_ranges_count) |
Andrew Walbran | 34ce72e | 2018-09-13 16:47:44 +0100 | [diff] [blame] | 348 | { |
| 349 | size_t i; |
| 350 | |
| 351 | for (i = 0; i < mem_ranges_count; ++i) { |
| 352 | if (pa_addr(after[i].begin) > pa_addr(before[i].begin)) { |
| 353 | if (update->reserved_ranges_count >= MAX_MEM_RANGES) { |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 354 | dlog_error( |
| 355 | "Too many reserved ranges after " |
| 356 | "loading secondary VMs.\n"); |
Andrew Walbran | 34ce72e | 2018-09-13 16:47:44 +0100 | [diff] [blame] | 357 | return false; |
| 358 | } |
| 359 | update->reserved_ranges[update->reserved_ranges_count] |
| 360 | .begin = before[i].begin; |
| 361 | update->reserved_ranges[update->reserved_ranges_count] |
| 362 | .end = after[i].begin; |
| 363 | update->reserved_ranges_count++; |
| 364 | } |
| 365 | if (pa_addr(after[i].end) < pa_addr(before[i].end)) { |
| 366 | if (update->reserved_ranges_count >= MAX_MEM_RANGES) { |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 367 | dlog_error( |
| 368 | "Too many reserved ranges after " |
| 369 | "loading secondary VMs.\n"); |
Andrew Walbran | 34ce72e | 2018-09-13 16:47:44 +0100 | [diff] [blame] | 370 | return false; |
| 371 | } |
| 372 | update->reserved_ranges[update->reserved_ranges_count] |
| 373 | .begin = after[i].end; |
| 374 | update->reserved_ranges[update->reserved_ranges_count] |
| 375 | .end = before[i].end; |
| 376 | update->reserved_ranges_count++; |
| 377 | } |
| 378 | } |
| 379 | |
| 380 | return true; |
| 381 | } |
| 382 | |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 383 | /* |
| 384 | * Loads alls VMs from the manifest. |
Andrew Walbran | 34ce72e | 2018-09-13 16:47:44 +0100 | [diff] [blame] | 385 | */ |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 386 | bool load_vms(struct mm_stage1_locked stage1_locked, |
| 387 | const struct manifest *manifest, const struct memiter *cpio, |
| 388 | const struct boot_params *params, |
| 389 | struct boot_params_update *update, struct mpool *ppool) |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 390 | { |
Andrew Scull | 1950326 | 2018-09-20 14:48:39 +0100 | [diff] [blame] | 391 | struct vm *primary; |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 392 | struct vm *tee; |
Andrew Walbran | 34ce72e | 2018-09-13 16:47:44 +0100 | [diff] [blame] | 393 | struct mem_range mem_ranges_available[MAX_MEM_RANGES]; |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 394 | struct vm_locked primary_vm_locked; |
Andrew Walbran | 34ce72e | 2018-09-13 16:47:44 +0100 | [diff] [blame] | 395 | size_t i; |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 396 | bool success = true; |
Andrew Walbran | 34ce72e | 2018-09-13 16:47:44 +0100 | [diff] [blame] | 397 | |
Andrew Scull | ae9962e | 2019-10-03 16:51:16 +0100 | [diff] [blame] | 398 | if (!load_primary(stage1_locked, &manifest->vm[HF_PRIMARY_VM_INDEX], |
| 399 | cpio, params, ppool)) { |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 400 | dlog_error("Unable to load primary VM.\n"); |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 401 | return false; |
| 402 | } |
| 403 | |
Andrew Walbran | 9daa57e | 2019-09-27 13:33:20 +0100 | [diff] [blame] | 404 | /* |
| 405 | * Initialise the dummy VM which represents TrustZone, and set up its |
| 406 | * RX/TX buffers. |
| 407 | */ |
| 408 | tee = vm_init(HF_TEE_VM_ID, 0, ppool); |
| 409 | CHECK(tee != NULL); |
| 410 | tee->mailbox.send = &tee_send_buffer; |
| 411 | tee->mailbox.recv = &tee_recv_buffer; |
| 412 | |
Andrew Walbran | 34ce72e | 2018-09-13 16:47:44 +0100 | [diff] [blame] | 413 | static_assert( |
| 414 | sizeof(mem_ranges_available) == sizeof(params->mem_ranges), |
| 415 | "mem_range arrays must be the same size for memcpy."); |
| 416 | static_assert(sizeof(mem_ranges_available) < 500, |
| 417 | "This will use too much stack, either make " |
| 418 | "MAX_MEM_RANGES smaller or change this."); |
Andrew Scull | a1aa2ba | 2019-04-05 11:49:02 +0100 | [diff] [blame] | 419 | memcpy_s(mem_ranges_available, sizeof(mem_ranges_available), |
| 420 | params->mem_ranges, sizeof(params->mem_ranges)); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 421 | |
Andrew Walbran | 34ce72e | 2018-09-13 16:47:44 +0100 | [diff] [blame] | 422 | /* Round the last addresses down to the page size. */ |
| 423 | for (i = 0; i < params->mem_ranges_count; ++i) { |
Alfredo Mazzinghi | eb1997c | 2019-02-07 18:00:01 +0000 | [diff] [blame] | 424 | mem_ranges_available[i].end = pa_init(align_down( |
| 425 | pa_addr(mem_ranges_available[i].end), PAGE_SIZE)); |
Andrew Walbran | 34ce72e | 2018-09-13 16:47:44 +0100 | [diff] [blame] | 426 | } |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 427 | |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 428 | primary = vm_find(HF_PRIMARY_VM_ID); |
| 429 | primary_vm_locked = vm_lock(primary); |
| 430 | |
David Brazdil | 0251b94 | 2019-09-10 15:59:50 +0100 | [diff] [blame] | 431 | for (i = 0; i < manifest->vm_count; ++i) { |
David Brazdil | 0dbb41f | 2019-09-09 18:03:35 +0100 | [diff] [blame] | 432 | const struct manifest_vm *manifest_vm = &manifest->vm[i]; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 433 | ffa_vm_id_t vm_id = HF_VM_ID_OFFSET + i; |
David Brazdil | 7a462ec | 2019-08-15 12:27:47 +0100 | [diff] [blame] | 434 | uint64_t mem_size; |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 435 | paddr_t secondary_mem_begin; |
| 436 | paddr_t secondary_mem_end; |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 437 | |
David Brazdil | 7a462ec | 2019-08-15 12:27:47 +0100 | [diff] [blame] | 438 | if (vm_id == HF_PRIMARY_VM_ID) { |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 439 | continue; |
| 440 | } |
| 441 | |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 442 | dlog_info("Loading VM%d: %s.\n", (int)vm_id, |
| 443 | manifest_vm->debug_name); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 444 | |
David Brazdil | 7a462ec | 2019-08-15 12:27:47 +0100 | [diff] [blame] | 445 | mem_size = align_up(manifest_vm->secondary.mem_size, PAGE_SIZE); |
Olivier Deprez | 62d99e3 | 2020-01-09 15:58:07 +0100 | [diff] [blame] | 446 | |
| 447 | if (manifest_vm->is_ffa_partition) { |
| 448 | secondary_mem_begin = |
| 449 | pa_init(manifest_vm->sp.load_addr); |
| 450 | secondary_mem_end = |
| 451 | pa_init(manifest_vm->sp.load_addr + mem_size); |
| 452 | } else if (!carve_out_mem_range(mem_ranges_available, |
| 453 | params->mem_ranges_count, |
| 454 | mem_size, &secondary_mem_begin, |
| 455 | &secondary_mem_end)) { |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 456 | dlog_error("Not enough memory (%u bytes).\n", mem_size); |
Andrew Walbran | 34ce72e | 2018-09-13 16:47:44 +0100 | [diff] [blame] | 457 | continue; |
| 458 | } |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 459 | |
Andrew Scull | 72b43c0 | 2019-09-18 13:53:45 +0100 | [diff] [blame] | 460 | if (!load_secondary(stage1_locked, secondary_mem_begin, |
| 461 | secondary_mem_end, manifest_vm, cpio, |
| 462 | ppool)) { |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 463 | dlog_error("Unable to load VM.\n"); |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 464 | continue; |
| 465 | } |
| 466 | |
| 467 | /* Deny the primary VM access to this memory. */ |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 468 | if (!vm_unmap(primary_vm_locked, secondary_mem_begin, |
| 469 | secondary_mem_end, ppool)) { |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 470 | dlog_error( |
| 471 | "Unable to unmap secondary VM from primary " |
| 472 | "VM.\n"); |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 473 | success = false; |
| 474 | break; |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 475 | } |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 476 | } |
| 477 | |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 478 | vm_unlock(&primary_vm_locked); |
| 479 | |
| 480 | if (!success) { |
| 481 | return false; |
| 482 | } |
| 483 | |
Wedson Almeida Filho | b2c159e | 2018-10-25 13:27:47 +0100 | [diff] [blame] | 484 | /* |
| 485 | * Add newly reserved areas to update params by looking at the |
Andrew Walbran | 34ce72e | 2018-09-13 16:47:44 +0100 | [diff] [blame] | 486 | * difference between the available ranges from the original params and |
| 487 | * the updated mem_ranges_available. We assume that the number and order |
| 488 | * of available ranges is the same, i.e. we don't remove any ranges |
Wedson Almeida Filho | b2c159e | 2018-10-25 13:27:47 +0100 | [diff] [blame] | 489 | * above only make them smaller. |
| 490 | */ |
Andrew Walbran | 34ce72e | 2018-09-13 16:47:44 +0100 | [diff] [blame] | 491 | return update_reserved_ranges(update, params->mem_ranges, |
| 492 | mem_ranges_available, |
| 493 | params->mem_ranges_count); |
Wedson Almeida Filho | fdf4afc | 2018-07-19 15:45:21 +0100 | [diff] [blame] | 494 | } |