Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2019 The Hafnium Authors. |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * https://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 17 | #include "hf/spci_memory.h" |
| 18 | |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 19 | #include "hf/api.h" |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 20 | #include "hf/check.h" |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 21 | #include "hf/dlog.h" |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 22 | #include "hf/mpool.h" |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 23 | #include "hf/spci_internal.h" |
| 24 | #include "hf/std.h" |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 25 | #include "hf/vm.h" |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 26 | |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 27 | /** The maximum number of recipients a memory region may be sent to. */ |
| 28 | #define MAX_MEM_SHARE_RECIPIENTS 1 |
| 29 | |
| 30 | /** |
| 31 | * The maximum number of memory sharing handles which may be active at once. A |
| 32 | * DONATE handle is active from when it is sent to when it is retrieved; a SHARE |
| 33 | * or LEND handle is active from when it is sent to when it is reclaimed. |
| 34 | */ |
| 35 | #define MAX_MEM_SHARES 100 |
| 36 | |
Andrew Walbran | c34c7b2 | 2020-02-28 11:16:59 +0000 | [diff] [blame] | 37 | static_assert(sizeof(struct spci_memory_region_constituent) % 16 == 0, |
| 38 | "struct spci_memory_region_constituent must be a multiple of 16 " |
| 39 | "bytes long."); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 40 | static_assert(sizeof(struct spci_composite_memory_region) % 16 == 0, |
| 41 | "struct spci_composite_memory_region must be a multiple of 16 " |
Andrew Walbran | c34c7b2 | 2020-02-28 11:16:59 +0000 | [diff] [blame] | 42 | "bytes long."); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 43 | static_assert(sizeof(struct spci_memory_region_attributes) == 4, |
| 44 | "struct spci_memory_region_attributes must be 4bytes long."); |
| 45 | static_assert(sizeof(struct spci_memory_access) % 16 == 0, |
| 46 | "struct spci_memory_access must be a multiple of 16 bytes long."); |
Andrew Walbran | c34c7b2 | 2020-02-28 11:16:59 +0000 | [diff] [blame] | 47 | static_assert(sizeof(struct spci_memory_region) % 16 == 0, |
| 48 | "struct spci_memory_region must be a multiple of 16 bytes long."); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 49 | static_assert(sizeof(struct spci_mem_relinquish) % 16 == 0, |
| 50 | "struct spci_mem_relinquish must be a multiple of 16 " |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 51 | "bytes long."); |
Andrew Walbran | c34c7b2 | 2020-02-28 11:16:59 +0000 | [diff] [blame] | 52 | |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 53 | struct spci_memory_share_state { |
| 54 | /** |
| 55 | * The memory region being shared, or NULL if this share state is |
| 56 | * unallocated. |
| 57 | */ |
| 58 | struct spci_memory_region *memory_region; |
| 59 | |
| 60 | /** |
| 61 | * The SPCI function used for sharing the memory. Must be one of |
| 62 | * SPCI_MEM_DONATE_32, SPCI_MEM_LEND_32 or SPCI_MEM_SHARE_32 if the |
| 63 | * share state is allocated, or 0. |
| 64 | */ |
| 65 | uint32_t share_func; |
| 66 | |
| 67 | /** |
| 68 | * Whether each recipient has retrieved the memory region yet. The order |
| 69 | * of this array matches the order of the attribute descriptors in the |
| 70 | * memory region descriptor. Any entries beyond the attribute_count will |
| 71 | * always be false. |
| 72 | */ |
| 73 | bool retrieved[MAX_MEM_SHARE_RECIPIENTS]; |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 74 | }; |
| 75 | |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 76 | /** |
| 77 | * Encapsulates the set of share states while the `share_states_lock` is held. |
| 78 | */ |
| 79 | struct share_states_locked { |
| 80 | struct spci_memory_share_state *share_states; |
| 81 | }; |
| 82 | |
| 83 | /** |
| 84 | * All access to members of a `struct spci_memory_share_state` must be guarded |
| 85 | * by this lock. |
| 86 | */ |
| 87 | static struct spinlock share_states_lock_instance = SPINLOCK_INIT; |
| 88 | static struct spci_memory_share_state share_states[MAX_MEM_SHARES]; |
| 89 | |
| 90 | /** |
| 91 | * Initialises the next available `struct spci_memory_share_state` and sets |
| 92 | * `handle` to its handle. Returns true on succes or false if none are |
| 93 | * available. |
| 94 | */ |
| 95 | static bool allocate_share_state(uint32_t share_func, |
| 96 | struct spci_memory_region *memory_region, |
| 97 | spci_memory_handle_t *handle) |
| 98 | { |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 99 | uint64_t i; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 100 | |
| 101 | CHECK(memory_region != NULL); |
| 102 | |
| 103 | sl_lock(&share_states_lock_instance); |
| 104 | for (i = 0; i < MAX_MEM_SHARES; ++i) { |
| 105 | if (share_states[i].share_func == 0) { |
| 106 | uint32_t j; |
| 107 | struct spci_memory_share_state *allocated_state = |
| 108 | &share_states[i]; |
| 109 | allocated_state->share_func = share_func; |
| 110 | allocated_state->memory_region = memory_region; |
| 111 | for (j = 0; j < MAX_MEM_SHARE_RECIPIENTS; ++j) { |
| 112 | allocated_state->retrieved[j] = false; |
| 113 | } |
| 114 | *handle = i | SPCI_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR; |
| 115 | sl_unlock(&share_states_lock_instance); |
| 116 | return true; |
| 117 | } |
| 118 | } |
| 119 | |
| 120 | sl_unlock(&share_states_lock_instance); |
| 121 | return false; |
| 122 | } |
| 123 | |
| 124 | /** Locks the share states lock. */ |
| 125 | struct share_states_locked share_states_lock(void) |
| 126 | { |
| 127 | sl_lock(&share_states_lock_instance); |
| 128 | |
| 129 | return (struct share_states_locked){.share_states = share_states}; |
| 130 | } |
| 131 | |
| 132 | /** Unlocks the share states lock. */ |
| 133 | static void share_states_unlock(struct share_states_locked *share_states) |
| 134 | { |
| 135 | CHECK(share_states->share_states != NULL); |
| 136 | share_states->share_states = NULL; |
| 137 | sl_unlock(&share_states_lock_instance); |
| 138 | } |
| 139 | |
| 140 | /** |
| 141 | * If the given handle is a valid handle for an allocated share state then takes |
| 142 | * the lock, initialises `share_state_locked` to point to the share state and |
| 143 | * returns true. Otherwise returns false and doesn't take the lock. |
| 144 | */ |
| 145 | static bool get_share_state(struct share_states_locked share_states, |
| 146 | spci_memory_handle_t handle, |
| 147 | struct spci_memory_share_state **share_state_ret) |
| 148 | { |
| 149 | struct spci_memory_share_state *share_state; |
| 150 | uint32_t index = handle & ~SPCI_MEMORY_HANDLE_ALLOCATOR_MASK; |
| 151 | |
| 152 | if (index >= MAX_MEM_SHARES) { |
| 153 | return false; |
| 154 | } |
| 155 | |
| 156 | share_state = &share_states.share_states[index]; |
| 157 | |
| 158 | if (share_state->share_func == 0) { |
| 159 | return false; |
| 160 | } |
| 161 | |
| 162 | *share_state_ret = share_state; |
| 163 | return true; |
| 164 | } |
| 165 | |
| 166 | /** Marks a share state as unallocated. */ |
| 167 | static void share_state_free(struct share_states_locked share_states, |
| 168 | struct spci_memory_share_state *share_state, |
| 169 | struct mpool *page_pool) |
| 170 | { |
| 171 | CHECK(share_states.share_states != NULL); |
| 172 | share_state->share_func = 0; |
| 173 | mpool_free(page_pool, share_state->memory_region); |
| 174 | share_state->memory_region = NULL; |
| 175 | } |
| 176 | |
| 177 | /** |
| 178 | * Marks the share state with the given handle as unallocated, or returns false |
| 179 | * if the handle was invalid. |
| 180 | */ |
| 181 | static bool share_state_free_handle(spci_memory_handle_t handle, |
| 182 | struct mpool *page_pool) |
| 183 | { |
| 184 | struct share_states_locked share_states = share_states_lock(); |
| 185 | struct spci_memory_share_state *share_state; |
| 186 | |
| 187 | if (!get_share_state(share_states, handle, &share_state)) { |
| 188 | share_states_unlock(&share_states); |
| 189 | return false; |
| 190 | } |
| 191 | |
| 192 | share_state_free(share_states, share_state, page_pool); |
| 193 | share_states_unlock(&share_states); |
| 194 | |
| 195 | return true; |
| 196 | } |
| 197 | |
| 198 | static void dump_memory_region(struct spci_memory_region *memory_region) |
| 199 | { |
| 200 | uint32_t i; |
| 201 | |
| 202 | if (LOG_LEVEL < LOG_LEVEL_VERBOSE) { |
| 203 | return; |
| 204 | } |
| 205 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 206 | dlog("from VM %d, attributes %#x, flags %#x, handle %#x, tag %d, to %d " |
| 207 | "recipients [", |
| 208 | memory_region->sender, memory_region->attributes, |
| 209 | memory_region->flags, memory_region->handle, memory_region->tag, |
| 210 | memory_region->receiver_count); |
| 211 | for (i = 0; i < memory_region->receiver_count; ++i) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 212 | if (i != 0) { |
| 213 | dlog(", "); |
| 214 | } |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 215 | dlog("VM %d: %#x (offset %d)", |
| 216 | memory_region->receivers[i].receiver_permissions.receiver, |
| 217 | memory_region->receivers[i] |
| 218 | .receiver_permissions.permissions, |
| 219 | memory_region->receivers[i] |
| 220 | .composite_memory_region_offset); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 221 | } |
| 222 | dlog("]"); |
| 223 | } |
| 224 | |
| 225 | static void dump_share_states(void) |
| 226 | { |
| 227 | uint32_t i; |
| 228 | |
| 229 | if (LOG_LEVEL < LOG_LEVEL_VERBOSE) { |
| 230 | return; |
| 231 | } |
| 232 | |
| 233 | dlog("Current share states:\n"); |
| 234 | sl_lock(&share_states_lock_instance); |
| 235 | for (i = 0; i < MAX_MEM_SHARES; ++i) { |
| 236 | if (share_states[i].share_func != 0) { |
| 237 | dlog("%d: ", i); |
| 238 | switch (share_states[i].share_func) { |
| 239 | case SPCI_MEM_SHARE_32: |
| 240 | dlog("SHARE"); |
| 241 | break; |
| 242 | case SPCI_MEM_LEND_32: |
| 243 | dlog("LEND"); |
| 244 | break; |
| 245 | case SPCI_MEM_DONATE_32: |
| 246 | dlog("DONATE"); |
| 247 | break; |
| 248 | default: |
| 249 | dlog("invalid share_func %#x", |
| 250 | share_states[i].share_func); |
| 251 | } |
| 252 | dlog(" ("); |
| 253 | dump_memory_region(share_states[i].memory_region); |
| 254 | if (share_states[i].retrieved[0]) { |
| 255 | dlog("): retrieved\n"); |
| 256 | } else { |
| 257 | dlog("): not retrieved\n"); |
| 258 | } |
| 259 | break; |
| 260 | } |
| 261 | } |
| 262 | sl_unlock(&share_states_lock_instance); |
| 263 | } |
| 264 | |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 265 | /* TODO: Add device attributes: GRE, cacheability, shareability. */ |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 266 | static inline uint32_t spci_memory_permissions_to_mode( |
| 267 | spci_memory_access_permissions_t permissions) |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 268 | { |
| 269 | uint32_t mode = 0; |
| 270 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 271 | switch (spci_get_data_access_attr(permissions)) { |
| 272 | case SPCI_DATA_ACCESS_RO: |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 273 | mode = MM_MODE_R; |
| 274 | break; |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 275 | case SPCI_DATA_ACCESS_RW: |
| 276 | case SPCI_DATA_ACCESS_NOT_SPECIFIED: |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 277 | mode = MM_MODE_R | MM_MODE_W; |
| 278 | break; |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 279 | case SPCI_DATA_ACCESS_RESERVED: |
| 280 | panic("Tried to convert SPCI_DATA_ACCESS_RESERVED."); |
| 281 | } |
| 282 | |
| 283 | switch (spci_get_instruction_access_attr(permissions)) { |
| 284 | case SPCI_INSTRUCTION_ACCESS_NX: |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 285 | break; |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 286 | case SPCI_INSTRUCTION_ACCESS_X: |
| 287 | case SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED: |
| 288 | mode |= MM_MODE_X; |
| 289 | break; |
| 290 | case SPCI_INSTRUCTION_ACCESS_RESERVED: |
| 291 | panic("Tried to convert SPCI_INSTRUCTION_ACCESS_RESVERVED."); |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 292 | } |
| 293 | |
| 294 | return mode; |
| 295 | } |
| 296 | |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 297 | /** |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 298 | * Get the current mode in the stage-2 page table of the given vm of all the |
| 299 | * pages in the given constituents, if they all have the same mode, or return |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 300 | * an appropriate SPCI error if not. |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 301 | */ |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 302 | static struct spci_value constituents_get_mode( |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 303 | struct vm_locked vm, uint32_t *orig_mode, |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 304 | struct spci_memory_region_constituent *constituents, |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 305 | uint32_t constituent_count) |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 306 | { |
Jose Marinho | 7fbbf2e | 2019-08-05 13:19:58 +0100 | [diff] [blame] | 307 | uint32_t i; |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 308 | |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 309 | if (constituent_count == 0) { |
Jose Marinho | 7fbbf2e | 2019-08-05 13:19:58 +0100 | [diff] [blame] | 310 | /* |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 311 | * Fail if there are no constituents. Otherwise we would get an |
| 312 | * uninitialised *orig_mode. |
Jose Marinho | 7fbbf2e | 2019-08-05 13:19:58 +0100 | [diff] [blame] | 313 | */ |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 314 | return spci_error(SPCI_INVALID_PARAMETERS); |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 315 | } |
| 316 | |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 317 | for (i = 0; i < constituent_count; ++i) { |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 318 | ipaddr_t begin = ipa_init(constituents[i].address); |
Jose Marinho | 7fbbf2e | 2019-08-05 13:19:58 +0100 | [diff] [blame] | 319 | size_t size = constituents[i].page_count * PAGE_SIZE; |
| 320 | ipaddr_t end = ipa_add(begin, size); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 321 | uint32_t current_mode; |
Jose Marinho | 7fbbf2e | 2019-08-05 13:19:58 +0100 | [diff] [blame] | 322 | |
| 323 | /* Fail if addresses are not page-aligned. */ |
| 324 | if (!is_aligned(ipa_addr(begin), PAGE_SIZE) || |
| 325 | !is_aligned(ipa_addr(end), PAGE_SIZE)) { |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 326 | return spci_error(SPCI_INVALID_PARAMETERS); |
Jose Marinho | 7fbbf2e | 2019-08-05 13:19:58 +0100 | [diff] [blame] | 327 | } |
| 328 | |
| 329 | /* |
| 330 | * Ensure that this constituent memory range is all mapped with |
| 331 | * the same mode. |
| 332 | */ |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 333 | if (!mm_vm_get_mode(&vm.vm->ptable, begin, end, |
| 334 | ¤t_mode)) { |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 335 | return spci_error(SPCI_DENIED); |
Jose Marinho | 7fbbf2e | 2019-08-05 13:19:58 +0100 | [diff] [blame] | 336 | } |
| 337 | |
| 338 | /* |
| 339 | * Ensure that all constituents are mapped with the same mode. |
| 340 | */ |
| 341 | if (i == 0) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 342 | *orig_mode = current_mode; |
| 343 | } else if (current_mode != *orig_mode) { |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 344 | return spci_error(SPCI_DENIED); |
Jose Marinho | 7fbbf2e | 2019-08-05 13:19:58 +0100 | [diff] [blame] | 345 | } |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 346 | } |
| 347 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 348 | return (struct spci_value){.func = SPCI_SUCCESS_32}; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 349 | } |
| 350 | |
| 351 | /** |
| 352 | * Verify that all pages have the same mode, that the starting mode |
| 353 | * constitutes a valid state and obtain the next mode to apply |
| 354 | * to the sending VM. |
| 355 | * |
| 356 | * Returns: |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 357 | * 1) SPCI_DENIED if a state transition was not found; |
| 358 | * 2) SPCI_DENIED if the pages being shared do not have the same mode within |
| 359 | * the <from> VM; |
| 360 | * 3) SPCI_INVALID_PARAMETERS if the beginning and end IPAs are not page |
| 361 | * aligned; |
| 362 | * 4) SPCI_INVALID_PARAMETERS if the requested share type was not handled. |
| 363 | * Or SPCI_SUCCESS on success. |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 364 | */ |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 365 | static struct spci_value spci_send_check_transition( |
| 366 | struct vm_locked from, uint32_t share_func, |
| 367 | spci_memory_access_permissions_t permissions, uint32_t *orig_from_mode, |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 368 | struct spci_memory_region_constituent *constituents, |
| 369 | uint32_t constituent_count, uint32_t *from_mode) |
| 370 | { |
| 371 | const uint32_t state_mask = |
| 372 | MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED; |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 373 | const uint32_t required_from_mode = |
| 374 | spci_memory_permissions_to_mode(permissions); |
| 375 | struct spci_value ret; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 376 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 377 | ret = constituents_get_mode(from, orig_from_mode, constituents, |
| 378 | constituent_count); |
| 379 | if (ret.func != SPCI_SUCCESS_32) { |
| 380 | return ret; |
Andrew Scull | b5f49e0 | 2019-10-02 13:20:47 +0100 | [diff] [blame] | 381 | } |
| 382 | |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 383 | /* Ensure the address range is normal memory and not a device. */ |
| 384 | if (*orig_from_mode & MM_MODE_D) { |
| 385 | dlog_verbose("Can't share device memory (mode is %#x).\n", |
| 386 | *orig_from_mode); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 387 | return spci_error(SPCI_DENIED); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 388 | } |
| 389 | |
| 390 | /* |
| 391 | * Ensure the sender is the owner and has exclusive access to the |
| 392 | * memory. |
| 393 | */ |
| 394 | if ((*orig_from_mode & state_mask) != 0) { |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 395 | return spci_error(SPCI_DENIED); |
| 396 | } |
| 397 | |
| 398 | if ((*orig_from_mode & required_from_mode) != required_from_mode) { |
| 399 | dlog_verbose( |
| 400 | "Sender tried to send memory with permissions which " |
| 401 | "required mode %#x but only had %#x itself.\n", |
| 402 | required_from_mode, *orig_from_mode); |
| 403 | return spci_error(SPCI_DENIED); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 404 | } |
| 405 | |
| 406 | /* Find the appropriate new mode. */ |
| 407 | *from_mode = ~state_mask & *orig_from_mode; |
Andrew Walbran | e7ad3c0 | 2019-12-24 17:03:04 +0000 | [diff] [blame] | 408 | switch (share_func) { |
| 409 | case SPCI_MEM_DONATE_32: |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 410 | *from_mode |= MM_MODE_INVALID | MM_MODE_UNOWNED; |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 411 | break; |
| 412 | |
Andrew Walbran | e7ad3c0 | 2019-12-24 17:03:04 +0000 | [diff] [blame] | 413 | case SPCI_MEM_LEND_32: |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 414 | *from_mode |= MM_MODE_INVALID; |
Andrew Walbran | 648fc3e | 2019-10-22 16:23:05 +0100 | [diff] [blame] | 415 | break; |
| 416 | |
Andrew Walbran | e7ad3c0 | 2019-12-24 17:03:04 +0000 | [diff] [blame] | 417 | case SPCI_MEM_SHARE_32: |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 418 | *from_mode |= MM_MODE_SHARED; |
Jose Marinho | 56c2573 | 2019-05-20 09:48:53 +0100 | [diff] [blame] | 419 | break; |
| 420 | |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 421 | default: |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 422 | return spci_error(SPCI_INVALID_PARAMETERS); |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 423 | } |
| 424 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 425 | return (struct spci_value){.func = SPCI_SUCCESS_32}; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 426 | } |
| 427 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 428 | static struct spci_value spci_relinquish_check_transition( |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 429 | struct vm_locked from, uint32_t *orig_from_mode, |
| 430 | struct spci_memory_region_constituent *constituents, |
| 431 | uint32_t constituent_count, uint32_t *from_mode) |
| 432 | { |
| 433 | const uint32_t state_mask = |
| 434 | MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED; |
| 435 | uint32_t orig_from_state; |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 436 | struct spci_value ret; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 437 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 438 | ret = constituents_get_mode(from, orig_from_mode, constituents, |
| 439 | constituent_count); |
| 440 | if (ret.func != SPCI_SUCCESS_32) { |
| 441 | return ret; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 442 | } |
| 443 | |
| 444 | /* Ensure the address range is normal memory and not a device. */ |
| 445 | if (*orig_from_mode & MM_MODE_D) { |
| 446 | dlog_verbose("Can't relinquish device memory (mode is %#x).\n", |
| 447 | *orig_from_mode); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 448 | return spci_error(SPCI_DENIED); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 449 | } |
| 450 | |
| 451 | /* |
| 452 | * Ensure the relinquishing VM is not the owner but has access to the |
| 453 | * memory. |
| 454 | */ |
| 455 | orig_from_state = *orig_from_mode & state_mask; |
| 456 | if ((orig_from_state & ~MM_MODE_SHARED) != MM_MODE_UNOWNED) { |
| 457 | dlog_verbose( |
| 458 | "Tried to relinquish memory in state %#x (masked %#x " |
| 459 | "but " |
| 460 | "should be %#x).\n", |
| 461 | *orig_from_mode, orig_from_state, MM_MODE_UNOWNED); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 462 | return spci_error(SPCI_DENIED); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 463 | } |
| 464 | |
| 465 | /* Find the appropriate new mode. */ |
| 466 | *from_mode = (~state_mask & *orig_from_mode) | MM_MODE_UNMAPPED_MASK; |
| 467 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 468 | return (struct spci_value){.func = SPCI_SUCCESS_32}; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 469 | } |
| 470 | |
| 471 | /** |
| 472 | * Verify that all pages have the same mode, that the starting mode |
| 473 | * constitutes a valid state and obtain the next mode to apply |
| 474 | * to the retrieving VM. |
| 475 | * |
| 476 | * Returns: |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 477 | * 1) SPCI_DENIED if a state transition was not found; |
| 478 | * 2) SPCI_DENIED if the pages being shared do not have the same mode within |
| 479 | * the <to> VM; |
| 480 | * 3) SPCI_INVALID_PARAMETERS if the beginning and end IPAs are not page |
| 481 | * aligned; |
| 482 | * 4) SPCI_INVALID_PARAMETERS if the requested share type was not handled. |
| 483 | * Or SPCI_SUCCESS on success. |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 484 | */ |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 485 | static struct spci_value spci_retrieve_check_transition( |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 486 | struct vm_locked to, uint32_t share_func, |
| 487 | struct spci_memory_region_constituent *constituents, |
| 488 | uint32_t constituent_count, uint32_t memory_to_attributes, |
| 489 | uint32_t *to_mode) |
| 490 | { |
| 491 | uint32_t orig_to_mode; |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 492 | struct spci_value ret; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 493 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 494 | ret = constituents_get_mode(to, &orig_to_mode, constituents, |
| 495 | constituent_count); |
| 496 | if (ret.func != SPCI_SUCCESS_32) { |
| 497 | return ret; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 498 | } |
| 499 | |
| 500 | if (share_func == SPCI_MEM_RECLAIM_32) { |
| 501 | const uint32_t state_mask = |
| 502 | MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED; |
| 503 | uint32_t orig_to_state = orig_to_mode & state_mask; |
| 504 | |
| 505 | if (orig_to_state != MM_MODE_INVALID && |
| 506 | orig_to_state != MM_MODE_SHARED) { |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 507 | return spci_error(SPCI_DENIED); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 508 | } |
| 509 | } else { |
| 510 | /* |
| 511 | * Ensure the retriever has the expected state. We don't care |
| 512 | * about the MM_MODE_SHARED bit; either with or without it set |
| 513 | * are both valid representations of the !O-NA state. |
| 514 | */ |
| 515 | if ((orig_to_mode & MM_MODE_UNMAPPED_MASK) != |
| 516 | MM_MODE_UNMAPPED_MASK) { |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 517 | return spci_error(SPCI_DENIED); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 518 | } |
| 519 | } |
| 520 | |
| 521 | /* Find the appropriate new mode. */ |
| 522 | *to_mode = memory_to_attributes; |
| 523 | switch (share_func) { |
| 524 | case SPCI_MEM_DONATE_32: |
| 525 | *to_mode |= 0; |
| 526 | break; |
| 527 | |
| 528 | case SPCI_MEM_LEND_32: |
| 529 | *to_mode |= MM_MODE_UNOWNED; |
| 530 | break; |
| 531 | |
| 532 | case SPCI_MEM_SHARE_32: |
| 533 | *to_mode |= MM_MODE_UNOWNED | MM_MODE_SHARED; |
| 534 | break; |
| 535 | |
| 536 | case SPCI_MEM_RECLAIM_32: |
| 537 | *to_mode |= 0; |
| 538 | break; |
| 539 | |
| 540 | default: |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 541 | return spci_error(SPCI_INVALID_PARAMETERS); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 542 | } |
| 543 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 544 | return (struct spci_value){.func = SPCI_SUCCESS_32}; |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 545 | } |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 546 | |
| 547 | /** |
| 548 | * Updates a VM's page table such that the given set of physical address ranges |
| 549 | * are mapped in the address space at the corresponding address ranges, in the |
| 550 | * mode provided. |
| 551 | * |
| 552 | * If commit is false, the page tables will be allocated from the mpool but no |
| 553 | * mappings will actually be updated. This function must always be called first |
| 554 | * with commit false to check that it will succeed before calling with commit |
| 555 | * true, to avoid leaving the page table in a half-updated state. To make a |
| 556 | * series of changes atomically you can call them all with commit false before |
| 557 | * calling them all with commit true. |
| 558 | * |
| 559 | * mm_vm_defrag should always be called after a series of page table updates, |
| 560 | * whether they succeed or fail. |
| 561 | * |
| 562 | * Returns true on success, or false if the update failed and no changes were |
| 563 | * made to memory mappings. |
| 564 | */ |
| 565 | static bool spci_region_group_identity_map( |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 566 | struct vm_locked vm_locked, |
| 567 | struct spci_memory_region_constituent *constituents, |
| 568 | uint32_t constituent_count, int mode, struct mpool *ppool, bool commit) |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 569 | { |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 570 | /* Iterate over the memory region constituents. */ |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 571 | for (uint32_t index = 0; index < constituent_count; index++) { |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 572 | size_t size = constituents[index].page_count * PAGE_SIZE; |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 573 | paddr_t pa_begin = |
| 574 | pa_from_ipa(ipa_init(constituents[index].address)); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 575 | paddr_t pa_end = pa_add(pa_begin, size); |
| 576 | |
| 577 | if (commit) { |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 578 | vm_identity_commit(vm_locked, pa_begin, pa_end, mode, |
| 579 | ppool, NULL); |
| 580 | } else if (!vm_identity_prepare(vm_locked, pa_begin, pa_end, |
| 581 | mode, ppool)) { |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 582 | return false; |
| 583 | } |
| 584 | } |
| 585 | |
| 586 | return true; |
| 587 | } |
| 588 | |
| 589 | /** |
| 590 | * Clears a region of physical memory by overwriting it with zeros. The data is |
| 591 | * flushed from the cache so the memory has been cleared across the system. |
| 592 | */ |
| 593 | static bool clear_memory(paddr_t begin, paddr_t end, struct mpool *ppool) |
| 594 | { |
| 595 | /* |
Fuad Tabba | ed294af | 2019-12-20 10:43:01 +0000 | [diff] [blame] | 596 | * TODO: change this to a CPU local single page window rather than a |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 597 | * global mapping of the whole range. Such an approach will limit |
| 598 | * the changes to stage-1 tables and will allow only local |
| 599 | * invalidation. |
| 600 | */ |
| 601 | bool ret; |
| 602 | struct mm_stage1_locked stage1_locked = mm_lock_stage1(); |
| 603 | void *ptr = |
| 604 | mm_identity_map(stage1_locked, begin, end, MM_MODE_W, ppool); |
| 605 | size_t size = pa_difference(begin, end); |
| 606 | |
| 607 | if (!ptr) { |
| 608 | /* TODO: partial defrag of failed range. */ |
| 609 | /* Recover any memory consumed in failed mapping. */ |
| 610 | mm_defrag(stage1_locked, ppool); |
| 611 | goto fail; |
| 612 | } |
| 613 | |
| 614 | memset_s(ptr, size, 0, size); |
| 615 | arch_mm_flush_dcache(ptr, size); |
| 616 | mm_unmap(stage1_locked, begin, end, ppool); |
| 617 | |
| 618 | ret = true; |
| 619 | goto out; |
| 620 | |
| 621 | fail: |
| 622 | ret = false; |
| 623 | |
| 624 | out: |
| 625 | mm_unlock_stage1(&stage1_locked); |
| 626 | |
| 627 | return ret; |
| 628 | } |
| 629 | |
| 630 | /** |
| 631 | * Clears a region of physical memory by overwriting it with zeros. The data is |
| 632 | * flushed from the cache so the memory has been cleared across the system. |
| 633 | */ |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 634 | static bool spci_clear_memory_constituents( |
| 635 | struct spci_memory_region_constituent *constituents, |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 636 | uint32_t constituent_count, struct mpool *page_pool) |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 637 | { |
| 638 | struct mpool local_page_pool; |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 639 | struct mm_stage1_locked stage1_locked; |
| 640 | bool ret = false; |
| 641 | |
| 642 | /* |
| 643 | * Create a local pool so any freed memory can't be used by another |
| 644 | * thread. This is to ensure each constituent that is mapped can be |
| 645 | * unmapped again afterwards. |
| 646 | */ |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 647 | mpool_init_with_fallback(&local_page_pool, page_pool); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 648 | |
| 649 | /* Iterate over the memory region constituents. */ |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 650 | for (uint32_t i = 0; i < constituent_count; ++i) { |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 651 | size_t size = constituents[i].page_count * PAGE_SIZE; |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 652 | paddr_t begin = pa_from_ipa(ipa_init(constituents[i].address)); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 653 | paddr_t end = pa_add(begin, size); |
| 654 | |
| 655 | if (!clear_memory(begin, end, &local_page_pool)) { |
| 656 | /* |
| 657 | * api_clear_memory will defrag on failure, so no need |
| 658 | * to do it here. |
| 659 | */ |
| 660 | goto out; |
| 661 | } |
| 662 | } |
| 663 | |
| 664 | /* |
| 665 | * Need to defrag after clearing, as it may have added extra mappings to |
| 666 | * the stage 1 page table. |
| 667 | */ |
| 668 | stage1_locked = mm_lock_stage1(); |
| 669 | mm_defrag(stage1_locked, &local_page_pool); |
| 670 | mm_unlock_stage1(&stage1_locked); |
| 671 | |
| 672 | ret = true; |
| 673 | |
| 674 | out: |
| 675 | mpool_fini(&local_page_pool); |
| 676 | return ret; |
| 677 | } |
| 678 | |
| 679 | /** |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 680 | * Validates and prepares memory to be sent from the calling VM to another. |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 681 | * |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 682 | * This function requires the calling context to hold the <from> VM lock. |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 683 | * |
| 684 | * Returns: |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 685 | * In case of error, one of the following values is returned: |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 686 | * 1) SPCI_INVALID_PARAMETERS - The endpoint provided parameters were |
| 687 | * erroneous; |
| 688 | * 2) SPCI_NO_MEMORY - Hafnium did not have sufficient memory to complete |
| 689 | * the request. |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 690 | * 3) SPCI_DENIED - The sender doesn't have sufficient access to send the |
| 691 | * memory with the given permissions. |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 692 | * Success is indicated by SPCI_SUCCESS. |
| 693 | */ |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 694 | static struct spci_value spci_send_memory( |
| 695 | struct vm_locked from_locked, |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 696 | struct spci_memory_region_constituent *constituents, |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 697 | uint32_t constituent_count, uint32_t share_func, |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 698 | spci_memory_access_permissions_t permissions, struct mpool *page_pool, |
| 699 | bool clear) |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 700 | { |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 701 | struct vm *from = from_locked.vm; |
| 702 | uint32_t orig_from_mode; |
| 703 | uint32_t from_mode; |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 704 | struct mpool local_page_pool; |
| 705 | struct spci_value ret; |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 706 | |
| 707 | /* |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 708 | * Make sure constituents are properly aligned to a 64-bit boundary. If |
| 709 | * not we would get alignment faults trying to read (64-bit) values. |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 710 | */ |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 711 | if (!is_aligned(constituents, 8)) { |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 712 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 713 | } |
| 714 | |
| 715 | /* |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 716 | * Check if the state transition is lawful for the sender, ensure that |
| 717 | * all constituents of a memory region being shared are at the same |
| 718 | * state. |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 719 | */ |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 720 | ret = spci_send_check_transition(from_locked, share_func, permissions, |
| 721 | &orig_from_mode, constituents, |
| 722 | constituent_count, &from_mode); |
| 723 | if (ret.func != SPCI_SUCCESS_32) { |
| 724 | return ret; |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 725 | } |
| 726 | |
| 727 | /* |
| 728 | * Create a local pool so any freed memory can't be used by another |
| 729 | * thread. This is to ensure the original mapping can be restored if the |
| 730 | * clear fails. |
| 731 | */ |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 732 | mpool_init_with_fallback(&local_page_pool, page_pool); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 733 | |
| 734 | /* |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 735 | * First reserve all required memory for the new page table entries |
| 736 | * without committing, to make sure the entire operation will succeed |
| 737 | * without exhausting the page pool. |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 738 | */ |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 739 | if (!spci_region_group_identity_map(from_locked, constituents, |
| 740 | constituent_count, from_mode, |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 741 | page_pool, false)) { |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 742 | /* TODO: partial defrag of failed range. */ |
| 743 | ret = spci_error(SPCI_NO_MEMORY); |
| 744 | goto out; |
| 745 | } |
| 746 | |
| 747 | /* |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 748 | * Update the mapping for the sender. This won't allocate because the |
| 749 | * transaction was already prepared above, but may free pages in the |
| 750 | * case that a whole block is being unmapped that was previously |
| 751 | * partially mapped. |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 752 | */ |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 753 | CHECK(spci_region_group_identity_map(from_locked, constituents, |
| 754 | constituent_count, from_mode, |
| 755 | &local_page_pool, true)); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 756 | |
| 757 | /* Clear the memory so no VM or device can see the previous contents. */ |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 758 | if (clear && !spci_clear_memory_constituents( |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 759 | constituents, constituent_count, page_pool)) { |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 760 | /* |
| 761 | * On failure, roll back by returning memory to the sender. This |
| 762 | * may allocate pages which were previously freed into |
| 763 | * `local_page_pool` by the call above, but will never allocate |
| 764 | * more pages than that so can never fail. |
| 765 | */ |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 766 | CHECK(spci_region_group_identity_map( |
| 767 | from_locked, constituents, constituent_count, |
| 768 | orig_from_mode, &local_page_pool, true)); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 769 | |
| 770 | ret = spci_error(SPCI_NO_MEMORY); |
| 771 | goto out; |
| 772 | } |
| 773 | |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 774 | ret = (struct spci_value){.func = SPCI_SUCCESS_32}; |
| 775 | |
| 776 | out: |
| 777 | mpool_fini(&local_page_pool); |
| 778 | |
| 779 | /* |
| 780 | * Tidy up the page table by reclaiming failed mappings (if there was an |
| 781 | * error) or merging entries into blocks where possible (on success). |
| 782 | */ |
| 783 | mm_vm_defrag(&from->ptable, page_pool); |
| 784 | |
| 785 | return ret; |
| 786 | } |
| 787 | |
| 788 | /** |
| 789 | * Validates and maps memory shared from one VM to another. |
| 790 | * |
| 791 | * This function requires the calling context to hold the <to> lock. |
| 792 | * |
| 793 | * Returns: |
| 794 | * In case of error, one of the following values is returned: |
| 795 | * 1) SPCI_INVALID_PARAMETERS - The endpoint provided parameters were |
| 796 | * erroneous; |
| 797 | * 2) SPCI_NO_MEMORY - Hafnium did not have sufficient memory to complete |
| 798 | * the request. |
| 799 | * Success is indicated by SPCI_SUCCESS. |
| 800 | */ |
| 801 | static struct spci_value spci_retrieve_memory( |
| 802 | struct vm_locked to_locked, |
| 803 | struct spci_memory_region_constituent *constituents, |
| 804 | uint32_t constituent_count, uint32_t memory_to_attributes, |
| 805 | uint32_t share_func, bool clear, struct mpool *page_pool) |
| 806 | { |
| 807 | struct vm *to = to_locked.vm; |
| 808 | uint32_t to_mode; |
| 809 | struct mpool local_page_pool; |
| 810 | struct spci_value ret; |
| 811 | |
| 812 | /* |
| 813 | * Make sure constituents are properly aligned to a 32-bit boundary. If |
| 814 | * not we would get alignment faults trying to read (32-bit) values. |
| 815 | */ |
| 816 | if (!is_aligned(constituents, 4)) { |
| 817 | dlog_verbose("Constituents not aligned.\n"); |
| 818 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 819 | } |
| 820 | |
| 821 | /* |
| 822 | * Check if the state transition is lawful for the recipient, and ensure |
| 823 | * that all constituents of the memory region being retrieved are at the |
| 824 | * same state. |
| 825 | */ |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 826 | ret = spci_retrieve_check_transition(to_locked, share_func, |
| 827 | constituents, constituent_count, |
| 828 | memory_to_attributes, &to_mode); |
| 829 | if (ret.func != SPCI_SUCCESS_32) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 830 | dlog_verbose("Invalid transition.\n"); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 831 | return ret; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 832 | } |
| 833 | |
| 834 | /* |
| 835 | * Create a local pool so any freed memory can't be used by another |
| 836 | * thread. This is to ensure the original mapping can be restored if the |
| 837 | * clear fails. |
| 838 | */ |
| 839 | mpool_init_with_fallback(&local_page_pool, page_pool); |
| 840 | |
| 841 | /* |
| 842 | * First reserve all required memory for the new page table entries in |
| 843 | * the recipient page tables without committing, to make sure the entire |
| 844 | * operation will succeed without exhausting the page pool. |
| 845 | */ |
| 846 | if (!spci_region_group_identity_map(to_locked, constituents, |
| 847 | constituent_count, to_mode, |
| 848 | page_pool, false)) { |
| 849 | /* TODO: partial defrag of failed range. */ |
| 850 | dlog_verbose( |
| 851 | "Insufficient memory to update recipient page " |
| 852 | "table.\n"); |
| 853 | ret = spci_error(SPCI_NO_MEMORY); |
| 854 | goto out; |
| 855 | } |
| 856 | |
| 857 | /* Clear the memory so no VM or device can see the previous contents. */ |
| 858 | if (clear && !spci_clear_memory_constituents( |
| 859 | constituents, constituent_count, page_pool)) { |
| 860 | ret = spci_error(SPCI_NO_MEMORY); |
| 861 | goto out; |
| 862 | } |
| 863 | |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 864 | /* |
| 865 | * Complete the transfer by mapping the memory into the recipient. This |
| 866 | * won't allocate because the transaction was already prepared above, so |
| 867 | * it doesn't need to use the `local_page_pool`. |
| 868 | */ |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 869 | CHECK(spci_region_group_identity_map(to_locked, constituents, |
| 870 | constituent_count, to_mode, |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 871 | page_pool, true)); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 872 | |
| 873 | ret = (struct spci_value){.func = SPCI_SUCCESS_32}; |
| 874 | |
| 875 | out: |
| 876 | mpool_fini(&local_page_pool); |
| 877 | |
| 878 | /* |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 879 | * Tidy up the page table by reclaiming failed mappings (if there was |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 880 | * an error) or merging entries into blocks where possible (on success). |
| 881 | */ |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 882 | mm_vm_defrag(&to->ptable, page_pool); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 883 | |
| 884 | return ret; |
| 885 | } |
| 886 | |
| 887 | static struct spci_value spci_relinquish_memory( |
| 888 | struct vm_locked from_locked, |
| 889 | struct spci_memory_region_constituent *constituents, |
| 890 | uint32_t constituent_count, struct mpool *page_pool, bool clear) |
| 891 | { |
| 892 | uint32_t orig_from_mode; |
| 893 | uint32_t from_mode; |
| 894 | struct mpool local_page_pool; |
| 895 | struct spci_value ret; |
| 896 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 897 | ret = spci_relinquish_check_transition(from_locked, &orig_from_mode, |
| 898 | constituents, constituent_count, |
| 899 | &from_mode); |
| 900 | if (ret.func != SPCI_SUCCESS_32) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 901 | dlog_verbose("Invalid transition.\n"); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 902 | return ret; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 903 | } |
| 904 | |
| 905 | /* |
| 906 | * Create a local pool so any freed memory can't be used by another |
| 907 | * thread. This is to ensure the original mapping can be restored if the |
| 908 | * clear fails. |
| 909 | */ |
| 910 | mpool_init_with_fallback(&local_page_pool, page_pool); |
| 911 | |
| 912 | /* |
| 913 | * First reserve all required memory for the new page table entries |
| 914 | * without committing, to make sure the entire operation will succeed |
| 915 | * without exhausting the page pool. |
| 916 | */ |
| 917 | if (!spci_region_group_identity_map(from_locked, constituents, |
| 918 | constituent_count, from_mode, |
| 919 | page_pool, false)) { |
| 920 | /* TODO: partial defrag of failed range. */ |
| 921 | ret = spci_error(SPCI_NO_MEMORY); |
| 922 | goto out; |
| 923 | } |
| 924 | |
| 925 | /* |
| 926 | * Update the mapping for the sender. This won't allocate because the |
| 927 | * transaction was already prepared above, but may free pages in the |
| 928 | * case that a whole block is being unmapped that was previously |
| 929 | * partially mapped. |
| 930 | */ |
| 931 | CHECK(spci_region_group_identity_map(from_locked, constituents, |
| 932 | constituent_count, from_mode, |
| 933 | &local_page_pool, true)); |
| 934 | |
| 935 | /* Clear the memory so no VM or device can see the previous contents. */ |
| 936 | if (clear && !spci_clear_memory_constituents( |
| 937 | constituents, constituent_count, page_pool)) { |
| 938 | /* |
| 939 | * On failure, roll back by returning memory to the sender. This |
| 940 | * may allocate pages which were previously freed into |
| 941 | * `local_page_pool` by the call above, but will never allocate |
| 942 | * more pages than that so can never fail. |
| 943 | */ |
| 944 | CHECK(spci_region_group_identity_map( |
| 945 | from_locked, constituents, constituent_count, |
| 946 | orig_from_mode, &local_page_pool, true)); |
| 947 | |
| 948 | ret = spci_error(SPCI_NO_MEMORY); |
| 949 | goto out; |
| 950 | } |
| 951 | |
| 952 | ret = (struct spci_value){.func = SPCI_SUCCESS_32}; |
| 953 | |
| 954 | out: |
| 955 | mpool_fini(&local_page_pool); |
| 956 | |
| 957 | /* |
| 958 | * Tidy up the page table by reclaiming failed mappings (if there was an |
| 959 | * error) or merging entries into blocks where possible (on success). |
| 960 | */ |
| 961 | mm_vm_defrag(&from_locked.vm->ptable, page_pool); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 962 | |
| 963 | return ret; |
| 964 | } |
| 965 | |
| 966 | /** |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 967 | * Check that the given `memory_region` represents a valid memory send request |
| 968 | * of the given `share_func` type, return the clear flag and permissions via the |
| 969 | * respective output parameters, and update the permissions if necessary. |
| 970 | * Returns SPCI_SUCCESS if the request was valid, or the relevant SPCI_ERROR if |
| 971 | * not. |
| 972 | */ |
| 973 | static struct spci_value spci_memory_send_validate( |
| 974 | struct vm *to, struct vm_locked from_locked, |
| 975 | struct spci_memory_region *memory_region, uint32_t memory_share_size, |
| 976 | uint32_t share_func, bool *clear, |
| 977 | spci_memory_access_permissions_t *permissions) |
| 978 | { |
| 979 | struct spci_composite_memory_region *composite; |
| 980 | uint32_t receivers_size; |
| 981 | uint32_t constituents_size; |
| 982 | enum spci_data_access data_access; |
| 983 | enum spci_instruction_access instruction_access; |
| 984 | |
| 985 | CHECK(clear != NULL); |
| 986 | CHECK(permissions != NULL); |
| 987 | |
| 988 | /* The sender must match the message sender. */ |
| 989 | if (memory_region->sender != from_locked.vm->id) { |
| 990 | dlog_verbose("Invalid sender %d.\n", memory_region->sender); |
| 991 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 992 | } |
| 993 | |
| 994 | /* We only support a single recipient. */ |
| 995 | if (memory_region->receiver_count != 1) { |
| 996 | dlog_verbose("Multiple recipients not supported.\n"); |
| 997 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 998 | } |
| 999 | |
| 1000 | /* |
| 1001 | * Ensure that the composite header is within the memory bounds and |
| 1002 | * doesn't overlap the first part of the message. |
| 1003 | */ |
| 1004 | receivers_size = sizeof(struct spci_memory_access) * |
| 1005 | memory_region->receiver_count; |
| 1006 | if (memory_region->receivers[0].composite_memory_region_offset < |
| 1007 | sizeof(struct spci_memory_region) + receivers_size || |
| 1008 | memory_region->receivers[0].composite_memory_region_offset + |
| 1009 | sizeof(struct spci_composite_memory_region) >= |
| 1010 | memory_share_size) { |
| 1011 | dlog_verbose( |
| 1012 | "Invalid composite memory region descriptor offset.\n"); |
| 1013 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 1014 | } |
| 1015 | |
| 1016 | composite = spci_memory_region_get_composite(memory_region, 0); |
| 1017 | |
| 1018 | /* |
| 1019 | * Ensure the number of constituents are within the memory |
| 1020 | * bounds. |
| 1021 | */ |
| 1022 | constituents_size = sizeof(struct spci_memory_region_constituent) * |
| 1023 | composite->constituent_count; |
| 1024 | if (memory_share_size != |
| 1025 | memory_region->receivers[0].composite_memory_region_offset + |
| 1026 | sizeof(struct spci_composite_memory_region) + |
| 1027 | constituents_size) { |
| 1028 | dlog_verbose("Invalid size %d or constituent offset %d.\n", |
| 1029 | memory_share_size, |
| 1030 | memory_region->receivers[0] |
| 1031 | .composite_memory_region_offset); |
| 1032 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 1033 | } |
| 1034 | |
| 1035 | /* The recipient must match the message recipient. */ |
| 1036 | if (memory_region->receivers[0].receiver_permissions.receiver != |
| 1037 | to->id) { |
| 1038 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 1039 | } |
| 1040 | |
| 1041 | *clear = memory_region->flags & SPCI_MEMORY_REGION_FLAG_CLEAR; |
| 1042 | /* |
| 1043 | * Clear is not allowed for memory sharing, as the sender still has |
| 1044 | * access to the memory. |
| 1045 | */ |
| 1046 | if (*clear && share_func == SPCI_MEM_SHARE_32) { |
| 1047 | dlog_verbose("Memory can't be cleared while being shared.\n"); |
| 1048 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 1049 | } |
| 1050 | |
| 1051 | /* No other flags are allowed/supported here. */ |
| 1052 | if (memory_region->flags & ~SPCI_MEMORY_REGION_FLAG_CLEAR) { |
| 1053 | dlog_verbose("Invalid flags %#x.\n", memory_region->flags); |
| 1054 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 1055 | } |
| 1056 | |
| 1057 | /* Check that the permissions are valid. */ |
| 1058 | *permissions = |
| 1059 | memory_region->receivers[0].receiver_permissions.permissions; |
| 1060 | data_access = spci_get_data_access_attr(*permissions); |
| 1061 | instruction_access = spci_get_instruction_access_attr(*permissions); |
| 1062 | if (data_access == SPCI_DATA_ACCESS_RESERVED || |
| 1063 | instruction_access == SPCI_INSTRUCTION_ACCESS_RESERVED) { |
| 1064 | dlog_verbose("Reserved value for receiver permissions %#x.\n", |
| 1065 | *permissions); |
| 1066 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 1067 | } |
| 1068 | if (instruction_access != SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED) { |
| 1069 | dlog_verbose( |
| 1070 | "Invalid instruction access permissions %#x for " |
| 1071 | "sending memory.\n", |
| 1072 | *permissions); |
| 1073 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 1074 | } |
| 1075 | if (share_func == SPCI_MEM_SHARE_32) { |
| 1076 | if (data_access == SPCI_DATA_ACCESS_NOT_SPECIFIED) { |
| 1077 | dlog_verbose( |
| 1078 | "Invalid data access permissions %#x for " |
| 1079 | "sharing memory.\n", |
| 1080 | *permissions); |
| 1081 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 1082 | } |
| 1083 | /* |
| 1084 | * According to section 6.11.3 of the FF-A spec NX is required |
| 1085 | * for share operations (but must not be specified by the |
| 1086 | * sender) so set it in the copy that we store, ready to be |
| 1087 | * returned to the retriever. |
| 1088 | */ |
| 1089 | spci_set_instruction_access_attr(permissions, |
| 1090 | SPCI_INSTRUCTION_ACCESS_NX); |
| 1091 | memory_region->receivers[0].receiver_permissions.permissions = |
| 1092 | *permissions; |
| 1093 | } |
| 1094 | if (share_func == SPCI_MEM_LEND_32 && |
| 1095 | data_access == SPCI_DATA_ACCESS_NOT_SPECIFIED) { |
| 1096 | dlog_verbose( |
| 1097 | "Invalid data access permissions %#x for lending " |
| 1098 | "memory.\n", |
| 1099 | *permissions); |
| 1100 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 1101 | } |
| 1102 | if (share_func == SPCI_MEM_DONATE_32 && |
| 1103 | data_access != SPCI_DATA_ACCESS_NOT_SPECIFIED) { |
| 1104 | dlog_verbose( |
| 1105 | "Invalid data access permissions %#x for donating " |
| 1106 | "memory.\n", |
| 1107 | *permissions); |
| 1108 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 1109 | } |
| 1110 | |
| 1111 | return (struct spci_value){.func = SPCI_SUCCESS_32}; |
| 1112 | } |
| 1113 | |
| 1114 | /** |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 1115 | * Validates a call to donate, lend or share memory and then updates the stage-2 |
| 1116 | * page tables. Specifically, check if the message length and number of memory |
| 1117 | * region constituents match, and if the transition is valid for the type of |
| 1118 | * memory sending operation. |
| 1119 | * |
| 1120 | * Assumes that the caller has already found and locked both VMs and ensured |
| 1121 | * that the destination RX buffer is available, and copied the memory region |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1122 | * descriptor from the sender's TX buffer to a freshly allocated page from |
| 1123 | * Hafnium's internal pool. |
| 1124 | * |
| 1125 | * This function takes ownership of the `memory_region` passed in; it must not |
| 1126 | * be freed by the caller. |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 1127 | */ |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1128 | struct spci_value spci_memory_send(struct vm *to, struct vm_locked from_locked, |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 1129 | struct spci_memory_region *memory_region, |
| 1130 | uint32_t memory_share_size, |
| 1131 | uint32_t share_func, struct mpool *page_pool) |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 1132 | { |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 1133 | struct spci_composite_memory_region *composite; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1134 | bool clear; |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 1135 | spci_memory_access_permissions_t permissions; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1136 | struct spci_value ret; |
| 1137 | spci_memory_handle_t handle; |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 1138 | |
| 1139 | /* |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 1140 | * If there is an error validating the `memory_region` then we need to |
| 1141 | * free it because we own it but we won't be storing it in a share state |
| 1142 | * after all. |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 1143 | */ |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 1144 | ret = spci_memory_send_validate(to, from_locked, memory_region, |
| 1145 | memory_share_size, share_func, &clear, |
| 1146 | &permissions); |
| 1147 | if (ret.func != SPCI_SUCCESS_32) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1148 | mpool_free(page_pool, memory_region); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 1149 | return ret; |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 1150 | } |
| 1151 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 1152 | /* Set flag for share function, ready to be retrieved later. */ |
| 1153 | switch (share_func) { |
| 1154 | case SPCI_MEM_SHARE_32: |
| 1155 | memory_region->flags |= |
| 1156 | SPCI_MEMORY_REGION_TRANSACTION_TYPE_SHARE; |
| 1157 | break; |
| 1158 | case SPCI_MEM_LEND_32: |
| 1159 | memory_region->flags |= |
| 1160 | SPCI_MEMORY_REGION_TRANSACTION_TYPE_LEND; |
| 1161 | break; |
| 1162 | case SPCI_MEM_DONATE_32: |
| 1163 | memory_region->flags |= |
| 1164 | SPCI_MEMORY_REGION_TRANSACTION_TYPE_DONATE; |
| 1165 | break; |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 1166 | } |
| 1167 | |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1168 | /* |
| 1169 | * Allocate a share state before updating the page table. Otherwise if |
| 1170 | * updating the page table succeeded but allocating the share state |
| 1171 | * failed then it would leave the memory in a state where nobody could |
| 1172 | * get it back. |
| 1173 | */ |
| 1174 | if (to->id != HF_TEE_VM_ID && |
| 1175 | !allocate_share_state(share_func, memory_region, &handle)) { |
| 1176 | dlog_verbose("Failed to allocate share state.\n"); |
| 1177 | mpool_free(page_pool, memory_region); |
| 1178 | return spci_error(SPCI_NO_MEMORY); |
| 1179 | } |
| 1180 | |
| 1181 | dump_share_states(); |
| 1182 | |
| 1183 | /* Check that state is valid in sender page table and update. */ |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 1184 | composite = spci_memory_region_get_composite(memory_region, 0); |
| 1185 | ret = spci_send_memory(from_locked, composite->constituents, |
| 1186 | composite->constituent_count, share_func, |
| 1187 | permissions, page_pool, clear); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1188 | if (ret.func != SPCI_SUCCESS_32) { |
| 1189 | if (to->id != HF_TEE_VM_ID) { |
| 1190 | /* Free share state. */ |
| 1191 | bool freed = share_state_free_handle(handle, page_pool); |
| 1192 | |
| 1193 | CHECK(freed); |
| 1194 | } |
| 1195 | |
| 1196 | return ret; |
| 1197 | } |
| 1198 | |
| 1199 | if (to->id == HF_TEE_VM_ID) { |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 1200 | /* No share state allocated here so no handle to return. */ |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1201 | return (struct spci_value){.func = SPCI_SUCCESS_32}; |
| 1202 | } |
| 1203 | |
| 1204 | return (struct spci_value){.func = SPCI_SUCCESS_32, .arg2 = handle}; |
| 1205 | } |
| 1206 | |
| 1207 | struct spci_value spci_memory_retrieve( |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 1208 | struct vm_locked to_locked, struct spci_memory_region *retrieve_request, |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1209 | uint32_t retrieve_request_size, struct mpool *page_pool) |
| 1210 | { |
| 1211 | uint32_t expected_retrieve_request_size = |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 1212 | sizeof(struct spci_memory_region) + |
| 1213 | retrieve_request->receiver_count * |
| 1214 | sizeof(struct spci_memory_access); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1215 | spci_memory_handle_t handle = retrieve_request->handle; |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 1216 | spci_memory_region_flags_t transaction_type = |
| 1217 | retrieve_request->flags & |
| 1218 | SPCI_MEMORY_REGION_TRANSACTION_TYPE_MASK; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1219 | struct spci_memory_region *memory_region; |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 1220 | spci_memory_access_permissions_t sent_permissions; |
| 1221 | enum spci_data_access sent_data_access; |
| 1222 | enum spci_instruction_access sent_instruction_access; |
| 1223 | spci_memory_access_permissions_t requested_permissions; |
| 1224 | enum spci_data_access requested_data_access; |
| 1225 | enum spci_instruction_access requested_instruction_access; |
| 1226 | spci_memory_access_permissions_t permissions; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1227 | uint32_t memory_to_attributes; |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 1228 | struct spci_composite_memory_region *composite; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1229 | struct share_states_locked share_states; |
| 1230 | struct spci_memory_share_state *share_state; |
| 1231 | struct spci_value ret; |
| 1232 | uint32_t response_size; |
| 1233 | |
| 1234 | dump_share_states(); |
| 1235 | |
| 1236 | if (retrieve_request_size != expected_retrieve_request_size) { |
| 1237 | dlog_verbose( |
| 1238 | "Invalid length for SPCI_MEM_RETRIEVE_REQ, expected %d " |
| 1239 | "but was %d.\n", |
| 1240 | expected_retrieve_request_size, retrieve_request_size); |
| 1241 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 1242 | } |
| 1243 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 1244 | if (retrieve_request->receiver_count != 1) { |
| 1245 | dlog_verbose( |
| 1246 | "Multi-way memory sharing not supported (got %d " |
| 1247 | "receivers descriptors on SPCI_MEM_RETRIEVE_REQ, " |
| 1248 | "expected 1).\n", |
| 1249 | retrieve_request->receiver_count); |
| 1250 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 1251 | } |
| 1252 | |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1253 | share_states = share_states_lock(); |
| 1254 | if (!get_share_state(share_states, handle, &share_state)) { |
| 1255 | dlog_verbose("Invalid handle %#x for SPCI_MEM_RETRIEVE_REQ.\n", |
| 1256 | handle); |
| 1257 | ret = spci_error(SPCI_INVALID_PARAMETERS); |
| 1258 | goto out; |
| 1259 | } |
| 1260 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 1261 | memory_region = share_state->memory_region; |
| 1262 | CHECK(memory_region != NULL); |
| 1263 | |
| 1264 | /* |
| 1265 | * Check that the transaction type expected by the receiver is correct, |
| 1266 | * if it has been specified. |
| 1267 | */ |
| 1268 | if (transaction_type != |
| 1269 | SPCI_MEMORY_REGION_TRANSACTION_TYPE_UNSPECIFIED && |
| 1270 | transaction_type != (memory_region->flags & |
| 1271 | SPCI_MEMORY_REGION_TRANSACTION_TYPE_MASK)) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1272 | dlog_verbose( |
| 1273 | "Incorrect transaction type %#x for " |
| 1274 | "SPCI_MEM_RETRIEVE_REQ, expected %#x for handle %#x.\n", |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 1275 | transaction_type, |
| 1276 | memory_region->flags & |
| 1277 | SPCI_MEMORY_REGION_TRANSACTION_TYPE_MASK, |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1278 | handle); |
| 1279 | ret = spci_error(SPCI_INVALID_PARAMETERS); |
| 1280 | goto out; |
| 1281 | } |
| 1282 | |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1283 | if (retrieve_request->sender != memory_region->sender) { |
| 1284 | dlog_verbose( |
| 1285 | "Incorrect sender ID %d for SPCI_MEM_RETRIEVE_REQ, " |
| 1286 | "expected %d for handle %#x.\n", |
| 1287 | retrieve_request->sender, memory_region->sender, |
| 1288 | handle); |
| 1289 | ret = spci_error(SPCI_INVALID_PARAMETERS); |
| 1290 | goto out; |
| 1291 | } |
| 1292 | |
| 1293 | if (retrieve_request->tag != memory_region->tag) { |
| 1294 | dlog_verbose( |
| 1295 | "Incorrect tag %d for SPCI_MEM_RETRIEVE_REQ, expected " |
| 1296 | "%d for handle %#x.\n", |
| 1297 | retrieve_request->tag, memory_region->tag, handle); |
| 1298 | ret = spci_error(SPCI_INVALID_PARAMETERS); |
| 1299 | goto out; |
| 1300 | } |
| 1301 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 1302 | if (retrieve_request->receivers[0].receiver_permissions.receiver != |
| 1303 | to_locked.vm->id) { |
| 1304 | dlog_verbose( |
| 1305 | "Retrieve request receiver VM ID %d didn't match " |
| 1306 | "caller of SPCI_MEM_RETRIEVE_REQ.\n", |
| 1307 | retrieve_request->receivers[0] |
| 1308 | .receiver_permissions.receiver); |
| 1309 | ret = spci_error(SPCI_INVALID_PARAMETERS); |
| 1310 | goto out; |
| 1311 | } |
| 1312 | |
| 1313 | if (memory_region->receivers[0].receiver_permissions.receiver != |
| 1314 | to_locked.vm->id) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1315 | dlog_verbose( |
| 1316 | "Incorrect receiver VM ID %d for " |
| 1317 | "SPCI_MEM_RETRIEVE_REQ, expected %d for handle %#x.\n", |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 1318 | to_locked.vm->id, |
| 1319 | memory_region->receivers[0] |
| 1320 | .receiver_permissions.receiver, |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1321 | handle); |
| 1322 | ret = spci_error(SPCI_INVALID_PARAMETERS); |
| 1323 | goto out; |
| 1324 | } |
| 1325 | |
| 1326 | if (share_state->retrieved[0]) { |
| 1327 | dlog_verbose("Memory with handle %#x already retrieved.\n", |
| 1328 | handle); |
| 1329 | ret = spci_error(SPCI_DENIED); |
| 1330 | goto out; |
| 1331 | } |
| 1332 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 1333 | if (retrieve_request->receivers[0].composite_memory_region_offset != |
| 1334 | 0) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1335 | dlog_verbose( |
| 1336 | "Retriever specified address ranges not supported (got " |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 1337 | "offset" |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1338 | "%d).\n", |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 1339 | retrieve_request->receivers[0] |
| 1340 | .composite_memory_region_offset); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1341 | ret = spci_error(SPCI_INVALID_PARAMETERS); |
| 1342 | goto out; |
| 1343 | } |
| 1344 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 1345 | /* |
| 1346 | * Check permissions from sender against permissions requested by |
| 1347 | * receiver. |
| 1348 | */ |
| 1349 | /* TODO: Check attributes too. */ |
| 1350 | sent_permissions = |
| 1351 | memory_region->receivers[0].receiver_permissions.permissions; |
| 1352 | sent_data_access = spci_get_data_access_attr(sent_permissions); |
| 1353 | sent_instruction_access = |
| 1354 | spci_get_instruction_access_attr(sent_permissions); |
| 1355 | requested_permissions = |
| 1356 | retrieve_request->receivers[0].receiver_permissions.permissions; |
| 1357 | requested_data_access = |
| 1358 | spci_get_data_access_attr(requested_permissions); |
| 1359 | requested_instruction_access = |
| 1360 | spci_get_instruction_access_attr(requested_permissions); |
| 1361 | permissions = 0; |
| 1362 | switch (sent_data_access) { |
| 1363 | case SPCI_DATA_ACCESS_NOT_SPECIFIED: |
| 1364 | case SPCI_DATA_ACCESS_RW: |
| 1365 | if (requested_data_access == SPCI_DATA_ACCESS_NOT_SPECIFIED || |
| 1366 | requested_data_access == SPCI_DATA_ACCESS_RW) { |
| 1367 | spci_set_data_access_attr(&permissions, |
| 1368 | SPCI_DATA_ACCESS_RW); |
| 1369 | break; |
| 1370 | } |
| 1371 | /* Intentional fall-through. */ |
| 1372 | case SPCI_DATA_ACCESS_RO: |
| 1373 | if (requested_data_access == SPCI_DATA_ACCESS_NOT_SPECIFIED || |
| 1374 | requested_data_access == SPCI_DATA_ACCESS_RO) { |
| 1375 | spci_set_data_access_attr(&permissions, |
| 1376 | SPCI_DATA_ACCESS_RO); |
| 1377 | break; |
| 1378 | } |
| 1379 | dlog_verbose( |
| 1380 | "Invalid data access requested; sender specified " |
| 1381 | "permissions %#x but receiver requested %#x.\n", |
| 1382 | sent_permissions, requested_permissions); |
| 1383 | ret = spci_error(SPCI_DENIED); |
| 1384 | goto out; |
| 1385 | case SPCI_DATA_ACCESS_RESERVED: |
| 1386 | panic("Got unexpected SPCI_DATA_ACCESS_RESERVED. Should be " |
| 1387 | "checked before this point."); |
| 1388 | } |
| 1389 | switch (sent_instruction_access) { |
| 1390 | case SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED: |
| 1391 | case SPCI_INSTRUCTION_ACCESS_X: |
| 1392 | if (requested_instruction_access == |
| 1393 | SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED || |
| 1394 | requested_instruction_access == SPCI_INSTRUCTION_ACCESS_X) { |
| 1395 | spci_set_instruction_access_attr( |
| 1396 | &permissions, SPCI_INSTRUCTION_ACCESS_X); |
| 1397 | break; |
| 1398 | } |
| 1399 | case SPCI_INSTRUCTION_ACCESS_NX: |
| 1400 | if (requested_instruction_access == |
| 1401 | SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED || |
| 1402 | requested_instruction_access == |
| 1403 | SPCI_INSTRUCTION_ACCESS_NX) { |
| 1404 | spci_set_instruction_access_attr( |
| 1405 | &permissions, SPCI_INSTRUCTION_ACCESS_NX); |
| 1406 | break; |
| 1407 | } |
| 1408 | dlog_verbose( |
| 1409 | "Invalid instruction access requested; sender " |
| 1410 | "specified " |
| 1411 | "permissions %#x but receiver requested %#x.\n", |
| 1412 | sent_permissions, requested_permissions); |
| 1413 | ret = spci_error(SPCI_DENIED); |
| 1414 | goto out; |
| 1415 | case SPCI_INSTRUCTION_ACCESS_RESERVED: |
| 1416 | panic("Got unexpected SPCI_INSTRUCTION_ACCESS_RESERVED. Should " |
| 1417 | "be checked before this point."); |
| 1418 | } |
| 1419 | memory_to_attributes = spci_memory_permissions_to_mode(permissions); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1420 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 1421 | composite = spci_memory_region_get_composite(memory_region, 0); |
| 1422 | ret = spci_retrieve_memory(to_locked, composite->constituents, |
| 1423 | composite->constituent_count, |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1424 | memory_to_attributes, |
| 1425 | share_state->share_func, false, page_pool); |
| 1426 | if (ret.func != SPCI_SUCCESS_32) { |
| 1427 | goto out; |
| 1428 | } |
| 1429 | |
| 1430 | /* |
| 1431 | * Copy response to RX buffer of caller and deliver the message. This |
| 1432 | * must be done before the share_state is (possibly) freed. |
| 1433 | */ |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 1434 | /* TODO: combine attributes from sender and request. */ |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1435 | response_size = spci_retrieved_memory_region_init( |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 1436 | to_locked.vm->mailbox.recv, HF_MAILBOX_SIZE, |
| 1437 | memory_region->sender, memory_region->attributes, |
| 1438 | memory_region->flags, handle, to_locked.vm->id, permissions, |
| 1439 | composite->constituents, composite->constituent_count); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1440 | to_locked.vm->mailbox.recv_size = response_size; |
| 1441 | to_locked.vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID; |
| 1442 | to_locked.vm->mailbox.recv_func = SPCI_MEM_RETRIEVE_RESP_32; |
| 1443 | to_locked.vm->mailbox.state = MAILBOX_STATE_READ; |
| 1444 | |
| 1445 | if (share_state->share_func == SPCI_MEM_DONATE_32) { |
| 1446 | /* |
| 1447 | * Memory that has been donated can't be relinquished, so no |
| 1448 | * need to keep the share state around. |
| 1449 | */ |
| 1450 | share_state_free(share_states, share_state, page_pool); |
| 1451 | dlog_verbose("Freed share state for donate.\n"); |
| 1452 | } else { |
| 1453 | share_state->retrieved[0] = true; |
| 1454 | } |
| 1455 | |
| 1456 | ret = (struct spci_value){.func = SPCI_MEM_RETRIEVE_RESP_32, |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 1457 | .arg1 = response_size, |
| 1458 | .arg2 = response_size}; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1459 | |
| 1460 | out: |
| 1461 | share_states_unlock(&share_states); |
| 1462 | dump_share_states(); |
| 1463 | return ret; |
| 1464 | } |
| 1465 | |
| 1466 | struct spci_value spci_memory_relinquish( |
| 1467 | struct vm_locked from_locked, |
| 1468 | struct spci_mem_relinquish *relinquish_request, struct mpool *page_pool) |
| 1469 | { |
| 1470 | spci_memory_handle_t handle = relinquish_request->handle; |
| 1471 | struct share_states_locked share_states; |
| 1472 | struct spci_memory_share_state *share_state; |
| 1473 | struct spci_memory_region *memory_region; |
| 1474 | bool clear; |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 1475 | struct spci_composite_memory_region *composite; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1476 | struct spci_value ret; |
| 1477 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 1478 | if (relinquish_request->endpoint_count != 1) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1479 | dlog_verbose( |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 1480 | "Stream endpoints not supported (got %d endpoints on " |
| 1481 | "SPCI_MEM_RELINQUISH, expected 1).\n", |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1482 | relinquish_request->endpoint_count); |
| 1483 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 1484 | } |
| 1485 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 1486 | if (relinquish_request->endpoints[0] != from_locked.vm->id) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1487 | dlog_verbose( |
| 1488 | "VM ID %d in relinquish message doesn't match calling " |
| 1489 | "VM ID %d.\n", |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 1490 | relinquish_request->endpoints[0], from_locked.vm->id); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1491 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 1492 | } |
| 1493 | |
| 1494 | dump_share_states(); |
| 1495 | |
| 1496 | share_states = share_states_lock(); |
| 1497 | if (!get_share_state(share_states, handle, &share_state)) { |
| 1498 | dlog_verbose("Invalid handle %#x for SPCI_MEM_RELINQUISH.\n", |
| 1499 | handle); |
| 1500 | ret = spci_error(SPCI_INVALID_PARAMETERS); |
| 1501 | goto out; |
| 1502 | } |
| 1503 | |
| 1504 | memory_region = share_state->memory_region; |
| 1505 | CHECK(memory_region != NULL); |
| 1506 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 1507 | if (memory_region->receivers[0].receiver_permissions.receiver != |
| 1508 | from_locked.vm->id) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1509 | dlog_verbose( |
| 1510 | "VM ID %d tried to relinquish memory region with " |
| 1511 | "handle %#x but receiver was %d.\n", |
| 1512 | from_locked.vm->id, handle, |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 1513 | memory_region->receivers[0] |
| 1514 | .receiver_permissions.receiver); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1515 | ret = spci_error(SPCI_INVALID_PARAMETERS); |
| 1516 | goto out; |
| 1517 | } |
| 1518 | |
| 1519 | if (!share_state->retrieved[0]) { |
| 1520 | dlog_verbose( |
| 1521 | "Memory with handle %#x not yet retrieved, can't " |
| 1522 | "relinquish.\n", |
| 1523 | handle); |
| 1524 | ret = spci_error(SPCI_INVALID_PARAMETERS); |
| 1525 | goto out; |
| 1526 | } |
| 1527 | |
| 1528 | clear = relinquish_request->flags & SPCI_MEMORY_REGION_FLAG_CLEAR; |
| 1529 | |
| 1530 | /* |
| 1531 | * Clear is not allowed for memory that was shared, as the original |
| 1532 | * sender still has access to the memory. |
| 1533 | */ |
| 1534 | if (clear && share_state->share_func == SPCI_MEM_SHARE_32) { |
| 1535 | dlog_verbose("Memory which was shared can't be cleared.\n"); |
| 1536 | ret = spci_error(SPCI_INVALID_PARAMETERS); |
| 1537 | goto out; |
| 1538 | } |
| 1539 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 1540 | composite = spci_memory_region_get_composite(memory_region, 0); |
| 1541 | ret = spci_relinquish_memory(from_locked, composite->constituents, |
| 1542 | composite->constituent_count, page_pool, |
| 1543 | clear); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1544 | |
| 1545 | if (ret.func == SPCI_SUCCESS_32) { |
| 1546 | /* |
| 1547 | * Mark memory handle as not retrieved, so it can be reclaimed |
| 1548 | * (or retrieved again). |
| 1549 | */ |
| 1550 | share_state->retrieved[0] = false; |
| 1551 | } |
| 1552 | |
| 1553 | out: |
| 1554 | share_states_unlock(&share_states); |
| 1555 | dump_share_states(); |
| 1556 | return ret; |
| 1557 | } |
| 1558 | |
| 1559 | /** |
| 1560 | * Validates that the reclaim transition is allowed for the given handle, |
| 1561 | * updates the page table of the reclaiming VM, and frees the internal state |
| 1562 | * associated with the handle. |
| 1563 | */ |
| 1564 | struct spci_value spci_memory_reclaim(struct vm_locked to_locked, |
| 1565 | spci_memory_handle_t handle, bool clear, |
| 1566 | struct mpool *page_pool) |
| 1567 | { |
| 1568 | struct share_states_locked share_states; |
| 1569 | struct spci_memory_share_state *share_state; |
| 1570 | struct spci_memory_region *memory_region; |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 1571 | struct spci_composite_memory_region *composite; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1572 | uint32_t memory_to_attributes = MM_MODE_R | MM_MODE_W | MM_MODE_X; |
| 1573 | struct spci_value ret; |
| 1574 | |
| 1575 | dump_share_states(); |
| 1576 | |
| 1577 | share_states = share_states_lock(); |
| 1578 | if (!get_share_state(share_states, handle, &share_state)) { |
| 1579 | dlog_verbose("Invalid handle %#x for SPCI_MEM_RECLAIM.\n", |
| 1580 | handle); |
| 1581 | ret = spci_error(SPCI_INVALID_PARAMETERS); |
| 1582 | goto out; |
| 1583 | } |
| 1584 | |
| 1585 | memory_region = share_state->memory_region; |
| 1586 | CHECK(memory_region != NULL); |
| 1587 | |
| 1588 | if (to_locked.vm->id != memory_region->sender) { |
| 1589 | dlog_verbose( |
| 1590 | "VM %d attempted to reclaim memory handle %#x " |
| 1591 | "originally sent by VM %d.\n", |
| 1592 | to_locked.vm->id, handle, memory_region->sender); |
| 1593 | ret = spci_error(SPCI_INVALID_PARAMETERS); |
| 1594 | goto out; |
| 1595 | } |
| 1596 | |
| 1597 | if (share_state->retrieved[0]) { |
| 1598 | dlog_verbose( |
| 1599 | "Tried to reclaim memory handle %#x that has not been " |
| 1600 | "relinquished.\n", |
| 1601 | handle); |
| 1602 | ret = spci_error(SPCI_DENIED); |
| 1603 | goto out; |
| 1604 | } |
| 1605 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame^] | 1606 | composite = spci_memory_region_get_composite(memory_region, 0); |
| 1607 | ret = spci_retrieve_memory(to_locked, composite->constituents, |
| 1608 | composite->constituent_count, |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1609 | memory_to_attributes, SPCI_MEM_RECLAIM_32, |
| 1610 | clear, page_pool); |
| 1611 | |
| 1612 | if (ret.func == SPCI_SUCCESS_32) { |
| 1613 | share_state_free(share_states, share_state, page_pool); |
| 1614 | dlog_verbose("Freed share state after successful reclaim.\n"); |
| 1615 | } |
| 1616 | |
| 1617 | out: |
| 1618 | share_states_unlock(&share_states); |
| 1619 | return ret; |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 1620 | } |