Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2019 The Hafnium Authors. |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * https://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 17 | #include "hf/spci_memory.h" |
| 18 | |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 19 | #include "hf/api.h" |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 20 | #include "hf/check.h" |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 21 | #include "hf/dlog.h" |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 22 | #include "hf/mpool.h" |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 23 | #include "hf/spci_internal.h" |
| 24 | #include "hf/std.h" |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 25 | #include "hf/vm.h" |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 26 | |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 27 | /** The maximum number of recipients a memory region may be sent to. */ |
| 28 | #define MAX_MEM_SHARE_RECIPIENTS 1 |
| 29 | |
| 30 | /** |
| 31 | * The maximum number of memory sharing handles which may be active at once. A |
| 32 | * DONATE handle is active from when it is sent to when it is retrieved; a SHARE |
| 33 | * or LEND handle is active from when it is sent to when it is reclaimed. |
| 34 | */ |
| 35 | #define MAX_MEM_SHARES 100 |
| 36 | |
Andrew Walbran | c34c7b2 | 2020-02-28 11:16:59 +0000 | [diff] [blame] | 37 | static_assert(sizeof(struct spci_memory_region_constituent) % 16 == 0, |
| 38 | "struct spci_memory_region_constituent must be a multiple of 16 " |
| 39 | "bytes long."); |
| 40 | static_assert(sizeof(struct spci_memory_region_attributes) % 16 == 0, |
| 41 | "struct spci_memory_region_attributes must be a multiple of 16 " |
| 42 | "bytes long."); |
| 43 | static_assert(sizeof(struct spci_memory_region) % 16 == 0, |
| 44 | "struct spci_memory_region must be a multiple of 16 bytes long."); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 45 | static_assert(sizeof(struct spci_receiver_address_range) % 16 == 0, |
| 46 | "struct spci_receiver_address_range must be a multiple of 16 " |
| 47 | "bytes long."); |
| 48 | static_assert(sizeof(struct spci_retrieved_memory_region) % 16 == 0, |
| 49 | "struct spci_retrieved_memory_region must be a multiple of 16 " |
| 50 | "bytes long."); |
| 51 | static_assert(sizeof(struct spci_memory_retrieve_properties) % 16 == 0, |
| 52 | "struct spci_memory_retrieve_properties must be a multiple of 16 " |
| 53 | "bytes long."); |
| 54 | static_assert(sizeof(struct spci_memory_retrieve_request) % 16 == 0, |
| 55 | "struct spci_memory_retrieve_request must be a multiple of 16 " |
| 56 | "bytes long."); |
Andrew Walbran | c34c7b2 | 2020-02-28 11:16:59 +0000 | [diff] [blame] | 57 | |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 58 | struct spci_memory_share_state { |
| 59 | /** |
| 60 | * The memory region being shared, or NULL if this share state is |
| 61 | * unallocated. |
| 62 | */ |
| 63 | struct spci_memory_region *memory_region; |
| 64 | |
| 65 | /** |
| 66 | * The SPCI function used for sharing the memory. Must be one of |
| 67 | * SPCI_MEM_DONATE_32, SPCI_MEM_LEND_32 or SPCI_MEM_SHARE_32 if the |
| 68 | * share state is allocated, or 0. |
| 69 | */ |
| 70 | uint32_t share_func; |
| 71 | |
| 72 | /** |
| 73 | * Whether each recipient has retrieved the memory region yet. The order |
| 74 | * of this array matches the order of the attribute descriptors in the |
| 75 | * memory region descriptor. Any entries beyond the attribute_count will |
| 76 | * always be false. |
| 77 | */ |
| 78 | bool retrieved[MAX_MEM_SHARE_RECIPIENTS]; |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 79 | }; |
| 80 | |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 81 | /** |
| 82 | * Encapsulates the set of share states while the `share_states_lock` is held. |
| 83 | */ |
| 84 | struct share_states_locked { |
| 85 | struct spci_memory_share_state *share_states; |
| 86 | }; |
| 87 | |
| 88 | /** |
| 89 | * All access to members of a `struct spci_memory_share_state` must be guarded |
| 90 | * by this lock. |
| 91 | */ |
| 92 | static struct spinlock share_states_lock_instance = SPINLOCK_INIT; |
| 93 | static struct spci_memory_share_state share_states[MAX_MEM_SHARES]; |
| 94 | |
| 95 | /** |
| 96 | * Initialises the next available `struct spci_memory_share_state` and sets |
| 97 | * `handle` to its handle. Returns true on succes or false if none are |
| 98 | * available. |
| 99 | */ |
| 100 | static bool allocate_share_state(uint32_t share_func, |
| 101 | struct spci_memory_region *memory_region, |
| 102 | spci_memory_handle_t *handle) |
| 103 | { |
| 104 | uint32_t i; |
| 105 | |
| 106 | CHECK(memory_region != NULL); |
| 107 | |
| 108 | sl_lock(&share_states_lock_instance); |
| 109 | for (i = 0; i < MAX_MEM_SHARES; ++i) { |
| 110 | if (share_states[i].share_func == 0) { |
| 111 | uint32_t j; |
| 112 | struct spci_memory_share_state *allocated_state = |
| 113 | &share_states[i]; |
| 114 | allocated_state->share_func = share_func; |
| 115 | allocated_state->memory_region = memory_region; |
| 116 | for (j = 0; j < MAX_MEM_SHARE_RECIPIENTS; ++j) { |
| 117 | allocated_state->retrieved[j] = false; |
| 118 | } |
| 119 | *handle = i | SPCI_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR; |
| 120 | sl_unlock(&share_states_lock_instance); |
| 121 | return true; |
| 122 | } |
| 123 | } |
| 124 | |
| 125 | sl_unlock(&share_states_lock_instance); |
| 126 | return false; |
| 127 | } |
| 128 | |
| 129 | /** Locks the share states lock. */ |
| 130 | struct share_states_locked share_states_lock(void) |
| 131 | { |
| 132 | sl_lock(&share_states_lock_instance); |
| 133 | |
| 134 | return (struct share_states_locked){.share_states = share_states}; |
| 135 | } |
| 136 | |
| 137 | /** Unlocks the share states lock. */ |
| 138 | static void share_states_unlock(struct share_states_locked *share_states) |
| 139 | { |
| 140 | CHECK(share_states->share_states != NULL); |
| 141 | share_states->share_states = NULL; |
| 142 | sl_unlock(&share_states_lock_instance); |
| 143 | } |
| 144 | |
| 145 | /** |
| 146 | * If the given handle is a valid handle for an allocated share state then takes |
| 147 | * the lock, initialises `share_state_locked` to point to the share state and |
| 148 | * returns true. Otherwise returns false and doesn't take the lock. |
| 149 | */ |
| 150 | static bool get_share_state(struct share_states_locked share_states, |
| 151 | spci_memory_handle_t handle, |
| 152 | struct spci_memory_share_state **share_state_ret) |
| 153 | { |
| 154 | struct spci_memory_share_state *share_state; |
| 155 | uint32_t index = handle & ~SPCI_MEMORY_HANDLE_ALLOCATOR_MASK; |
| 156 | |
| 157 | if (index >= MAX_MEM_SHARES) { |
| 158 | return false; |
| 159 | } |
| 160 | |
| 161 | share_state = &share_states.share_states[index]; |
| 162 | |
| 163 | if (share_state->share_func == 0) { |
| 164 | return false; |
| 165 | } |
| 166 | |
| 167 | *share_state_ret = share_state; |
| 168 | return true; |
| 169 | } |
| 170 | |
| 171 | /** Marks a share state as unallocated. */ |
| 172 | static void share_state_free(struct share_states_locked share_states, |
| 173 | struct spci_memory_share_state *share_state, |
| 174 | struct mpool *page_pool) |
| 175 | { |
| 176 | CHECK(share_states.share_states != NULL); |
| 177 | share_state->share_func = 0; |
| 178 | mpool_free(page_pool, share_state->memory_region); |
| 179 | share_state->memory_region = NULL; |
| 180 | } |
| 181 | |
| 182 | /** |
| 183 | * Marks the share state with the given handle as unallocated, or returns false |
| 184 | * if the handle was invalid. |
| 185 | */ |
| 186 | static bool share_state_free_handle(spci_memory_handle_t handle, |
| 187 | struct mpool *page_pool) |
| 188 | { |
| 189 | struct share_states_locked share_states = share_states_lock(); |
| 190 | struct spci_memory_share_state *share_state; |
| 191 | |
| 192 | if (!get_share_state(share_states, handle, &share_state)) { |
| 193 | share_states_unlock(&share_states); |
| 194 | return false; |
| 195 | } |
| 196 | |
| 197 | share_state_free(share_states, share_state, page_pool); |
| 198 | share_states_unlock(&share_states); |
| 199 | |
| 200 | return true; |
| 201 | } |
| 202 | |
| 203 | static void dump_memory_region(struct spci_memory_region *memory_region) |
| 204 | { |
| 205 | uint32_t i; |
| 206 | |
| 207 | if (LOG_LEVEL < LOG_LEVEL_VERBOSE) { |
| 208 | return; |
| 209 | } |
| 210 | |
| 211 | dlog("from VM %d, tag %d, flags %#x, %d total pages in %d constituents " |
| 212 | "to %d recipients [", |
| 213 | memory_region->sender, memory_region->tag, memory_region->flags, |
| 214 | memory_region->page_count, memory_region->constituent_count, |
| 215 | memory_region->attribute_count); |
| 216 | for (i = 0; i < memory_region->attribute_count; ++i) { |
| 217 | if (i != 0) { |
| 218 | dlog(", "); |
| 219 | } |
| 220 | dlog("VM %d: %#x", memory_region->attributes[i].receiver, |
| 221 | memory_region->attributes[i].memory_attributes); |
| 222 | } |
| 223 | dlog("]"); |
| 224 | } |
| 225 | |
| 226 | static void dump_share_states(void) |
| 227 | { |
| 228 | uint32_t i; |
| 229 | |
| 230 | if (LOG_LEVEL < LOG_LEVEL_VERBOSE) { |
| 231 | return; |
| 232 | } |
| 233 | |
| 234 | dlog("Current share states:\n"); |
| 235 | sl_lock(&share_states_lock_instance); |
| 236 | for (i = 0; i < MAX_MEM_SHARES; ++i) { |
| 237 | if (share_states[i].share_func != 0) { |
| 238 | dlog("%d: ", i); |
| 239 | switch (share_states[i].share_func) { |
| 240 | case SPCI_MEM_SHARE_32: |
| 241 | dlog("SHARE"); |
| 242 | break; |
| 243 | case SPCI_MEM_LEND_32: |
| 244 | dlog("LEND"); |
| 245 | break; |
| 246 | case SPCI_MEM_DONATE_32: |
| 247 | dlog("DONATE"); |
| 248 | break; |
| 249 | default: |
| 250 | dlog("invalid share_func %#x", |
| 251 | share_states[i].share_func); |
| 252 | } |
| 253 | dlog(" ("); |
| 254 | dump_memory_region(share_states[i].memory_region); |
| 255 | if (share_states[i].retrieved[0]) { |
| 256 | dlog("): retrieved\n"); |
| 257 | } else { |
| 258 | dlog("): not retrieved\n"); |
| 259 | } |
| 260 | break; |
| 261 | } |
| 262 | } |
| 263 | sl_unlock(&share_states_lock_instance); |
| 264 | } |
| 265 | |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 266 | /* TODO: Add device attributes: GRE, cacheability, shareability. */ |
| 267 | static inline uint32_t spci_memory_attrs_to_mode(uint16_t memory_attributes) |
| 268 | { |
| 269 | uint32_t mode = 0; |
| 270 | |
| 271 | switch (spci_get_memory_access_attr(memory_attributes)) { |
| 272 | case SPCI_MEMORY_RO_NX: |
| 273 | mode = MM_MODE_R; |
| 274 | break; |
| 275 | case SPCI_MEMORY_RO_X: |
| 276 | mode = MM_MODE_R | MM_MODE_X; |
| 277 | break; |
| 278 | case SPCI_MEMORY_RW_NX: |
| 279 | mode = MM_MODE_R | MM_MODE_W; |
| 280 | break; |
| 281 | case SPCI_MEMORY_RW_X: |
| 282 | mode = MM_MODE_R | MM_MODE_W | MM_MODE_X; |
| 283 | break; |
| 284 | } |
| 285 | |
| 286 | return mode; |
| 287 | } |
| 288 | |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 289 | /** |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 290 | * Get the current mode in the stage-2 page table of the given vm of all the |
| 291 | * pages in the given constituents, if they all have the same mode, or return |
| 292 | * false if not. |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 293 | */ |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 294 | static bool constituents_get_mode( |
| 295 | struct vm_locked vm, uint32_t *orig_mode, |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 296 | struct spci_memory_region_constituent *constituents, |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 297 | uint32_t constituent_count) |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 298 | { |
Jose Marinho | 7fbbf2e | 2019-08-05 13:19:58 +0100 | [diff] [blame] | 299 | uint32_t i; |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 300 | |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 301 | if (constituent_count == 0) { |
Jose Marinho | 7fbbf2e | 2019-08-05 13:19:58 +0100 | [diff] [blame] | 302 | /* |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 303 | * Fail if there are no constituents. Otherwise we would get an |
| 304 | * uninitialised *orig_mode. |
Jose Marinho | 7fbbf2e | 2019-08-05 13:19:58 +0100 | [diff] [blame] | 305 | */ |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 306 | return false; |
| 307 | } |
| 308 | |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 309 | for (i = 0; i < constituent_count; ++i) { |
Andrew Walbran | d88ee92 | 2020-01-15 18:13:21 +0000 | [diff] [blame] | 310 | ipaddr_t begin = |
| 311 | ipa_init(spci_memory_region_constituent_get_address( |
| 312 | &constituents[i])); |
Jose Marinho | 7fbbf2e | 2019-08-05 13:19:58 +0100 | [diff] [blame] | 313 | size_t size = constituents[i].page_count * PAGE_SIZE; |
| 314 | ipaddr_t end = ipa_add(begin, size); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 315 | uint32_t current_mode; |
Jose Marinho | 7fbbf2e | 2019-08-05 13:19:58 +0100 | [diff] [blame] | 316 | |
| 317 | /* Fail if addresses are not page-aligned. */ |
| 318 | if (!is_aligned(ipa_addr(begin), PAGE_SIZE) || |
| 319 | !is_aligned(ipa_addr(end), PAGE_SIZE)) { |
| 320 | return false; |
| 321 | } |
| 322 | |
| 323 | /* |
| 324 | * Ensure that this constituent memory range is all mapped with |
| 325 | * the same mode. |
| 326 | */ |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 327 | if (!mm_vm_get_mode(&vm.vm->ptable, begin, end, |
| 328 | ¤t_mode)) { |
Jose Marinho | 7fbbf2e | 2019-08-05 13:19:58 +0100 | [diff] [blame] | 329 | return false; |
| 330 | } |
| 331 | |
| 332 | /* |
| 333 | * Ensure that all constituents are mapped with the same mode. |
| 334 | */ |
| 335 | if (i == 0) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 336 | *orig_mode = current_mode; |
| 337 | } else if (current_mode != *orig_mode) { |
Jose Marinho | 7fbbf2e | 2019-08-05 13:19:58 +0100 | [diff] [blame] | 338 | return false; |
| 339 | } |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 340 | } |
| 341 | |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 342 | return true; |
| 343 | } |
| 344 | |
| 345 | /** |
| 346 | * Verify that all pages have the same mode, that the starting mode |
| 347 | * constitutes a valid state and obtain the next mode to apply |
| 348 | * to the sending VM. |
| 349 | * |
| 350 | * Returns: |
| 351 | * The error code false indicates that: |
| 352 | * 1) a state transition was not found; |
| 353 | * 2) the pages being shared do not have the same mode within the <from> VM; |
| 354 | * 3) The beginning and end IPAs are not page aligned; |
| 355 | * 4) The requested share type was not handled. |
| 356 | * Success is indicated by true. |
| 357 | * |
| 358 | */ |
| 359 | static bool spci_send_check_transition( |
| 360 | struct vm_locked from, uint32_t share_func, uint32_t *orig_from_mode, |
| 361 | struct spci_memory_region_constituent *constituents, |
| 362 | uint32_t constituent_count, uint32_t *from_mode) |
| 363 | { |
| 364 | const uint32_t state_mask = |
| 365 | MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED; |
| 366 | |
| 367 | if (!constituents_get_mode(from, orig_from_mode, constituents, |
| 368 | constituent_count)) { |
Andrew Scull | b5f49e0 | 2019-10-02 13:20:47 +0100 | [diff] [blame] | 369 | return false; |
| 370 | } |
| 371 | |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 372 | /* Ensure the address range is normal memory and not a device. */ |
| 373 | if (*orig_from_mode & MM_MODE_D) { |
| 374 | dlog_verbose("Can't share device memory (mode is %#x).\n", |
| 375 | *orig_from_mode); |
| 376 | return false; |
| 377 | } |
| 378 | |
| 379 | /* |
| 380 | * Ensure the sender is the owner and has exclusive access to the |
| 381 | * memory. |
| 382 | */ |
| 383 | if ((*orig_from_mode & state_mask) != 0) { |
| 384 | return false; |
| 385 | } |
| 386 | |
| 387 | /* Find the appropriate new mode. */ |
| 388 | *from_mode = ~state_mask & *orig_from_mode; |
Andrew Walbran | e7ad3c0 | 2019-12-24 17:03:04 +0000 | [diff] [blame] | 389 | switch (share_func) { |
| 390 | case SPCI_MEM_DONATE_32: |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 391 | *from_mode |= MM_MODE_INVALID | MM_MODE_UNOWNED; |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 392 | break; |
| 393 | |
Andrew Walbran | e7ad3c0 | 2019-12-24 17:03:04 +0000 | [diff] [blame] | 394 | case SPCI_MEM_LEND_32: |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 395 | *from_mode |= MM_MODE_INVALID; |
Andrew Walbran | 648fc3e | 2019-10-22 16:23:05 +0100 | [diff] [blame] | 396 | break; |
| 397 | |
Andrew Walbran | e7ad3c0 | 2019-12-24 17:03:04 +0000 | [diff] [blame] | 398 | case SPCI_MEM_SHARE_32: |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 399 | *from_mode |= MM_MODE_SHARED; |
Jose Marinho | 56c2573 | 2019-05-20 09:48:53 +0100 | [diff] [blame] | 400 | break; |
| 401 | |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 402 | default: |
| 403 | return false; |
| 404 | } |
| 405 | |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 406 | return true; |
| 407 | } |
| 408 | |
| 409 | static bool spci_relinquish_check_transition( |
| 410 | struct vm_locked from, uint32_t *orig_from_mode, |
| 411 | struct spci_memory_region_constituent *constituents, |
| 412 | uint32_t constituent_count, uint32_t *from_mode) |
| 413 | { |
| 414 | const uint32_t state_mask = |
| 415 | MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED; |
| 416 | uint32_t orig_from_state; |
| 417 | |
| 418 | if (!constituents_get_mode(from, orig_from_mode, constituents, |
| 419 | constituent_count)) { |
| 420 | return false; |
| 421 | } |
| 422 | |
| 423 | /* Ensure the address range is normal memory and not a device. */ |
| 424 | if (*orig_from_mode & MM_MODE_D) { |
| 425 | dlog_verbose("Can't relinquish device memory (mode is %#x).\n", |
| 426 | *orig_from_mode); |
| 427 | return false; |
| 428 | } |
| 429 | |
| 430 | /* |
| 431 | * Ensure the relinquishing VM is not the owner but has access to the |
| 432 | * memory. |
| 433 | */ |
| 434 | orig_from_state = *orig_from_mode & state_mask; |
| 435 | if ((orig_from_state & ~MM_MODE_SHARED) != MM_MODE_UNOWNED) { |
| 436 | dlog_verbose( |
| 437 | "Tried to relinquish memory in state %#x (masked %#x " |
| 438 | "but " |
| 439 | "should be %#x).\n", |
| 440 | *orig_from_mode, orig_from_state, MM_MODE_UNOWNED); |
| 441 | return false; |
| 442 | } |
| 443 | |
| 444 | /* Find the appropriate new mode. */ |
| 445 | *from_mode = (~state_mask & *orig_from_mode) | MM_MODE_UNMAPPED_MASK; |
| 446 | |
| 447 | return true; |
| 448 | } |
| 449 | |
| 450 | /** |
| 451 | * Verify that all pages have the same mode, that the starting mode |
| 452 | * constitutes a valid state and obtain the next mode to apply |
| 453 | * to the retrieving VM. |
| 454 | * |
| 455 | * Returns: |
| 456 | * The error code false indicates that: |
| 457 | * 1) a state transition was not found; |
| 458 | * 2) the pages being shared do not have the same mode within the <to> VM; |
| 459 | * 3) The beginning and end IPAs are not page aligned; |
| 460 | * 4) The requested share type was not handled. |
| 461 | * Success is indicated by true. |
| 462 | */ |
| 463 | static bool spci_retrieve_check_transition( |
| 464 | struct vm_locked to, uint32_t share_func, |
| 465 | struct spci_memory_region_constituent *constituents, |
| 466 | uint32_t constituent_count, uint32_t memory_to_attributes, |
| 467 | uint32_t *to_mode) |
| 468 | { |
| 469 | uint32_t orig_to_mode; |
| 470 | |
| 471 | if (!constituents_get_mode(to, &orig_to_mode, constituents, |
| 472 | constituent_count)) { |
| 473 | return false; |
| 474 | } |
| 475 | |
| 476 | if (share_func == SPCI_MEM_RECLAIM_32) { |
| 477 | const uint32_t state_mask = |
| 478 | MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED; |
| 479 | uint32_t orig_to_state = orig_to_mode & state_mask; |
| 480 | |
| 481 | if (orig_to_state != MM_MODE_INVALID && |
| 482 | orig_to_state != MM_MODE_SHARED) { |
| 483 | return false; |
| 484 | } |
| 485 | } else { |
| 486 | /* |
| 487 | * Ensure the retriever has the expected state. We don't care |
| 488 | * about the MM_MODE_SHARED bit; either with or without it set |
| 489 | * are both valid representations of the !O-NA state. |
| 490 | */ |
| 491 | if ((orig_to_mode & MM_MODE_UNMAPPED_MASK) != |
| 492 | MM_MODE_UNMAPPED_MASK) { |
| 493 | return false; |
| 494 | } |
| 495 | } |
| 496 | |
| 497 | /* Find the appropriate new mode. */ |
| 498 | *to_mode = memory_to_attributes; |
| 499 | switch (share_func) { |
| 500 | case SPCI_MEM_DONATE_32: |
| 501 | *to_mode |= 0; |
| 502 | break; |
| 503 | |
| 504 | case SPCI_MEM_LEND_32: |
| 505 | *to_mode |= MM_MODE_UNOWNED; |
| 506 | break; |
| 507 | |
| 508 | case SPCI_MEM_SHARE_32: |
| 509 | *to_mode |= MM_MODE_UNOWNED | MM_MODE_SHARED; |
| 510 | break; |
| 511 | |
| 512 | case SPCI_MEM_RECLAIM_32: |
| 513 | *to_mode |= 0; |
| 514 | break; |
| 515 | |
| 516 | default: |
| 517 | return false; |
| 518 | } |
| 519 | |
| 520 | return true; |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 521 | } |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 522 | |
| 523 | /** |
| 524 | * Updates a VM's page table such that the given set of physical address ranges |
| 525 | * are mapped in the address space at the corresponding address ranges, in the |
| 526 | * mode provided. |
| 527 | * |
| 528 | * If commit is false, the page tables will be allocated from the mpool but no |
| 529 | * mappings will actually be updated. This function must always be called first |
| 530 | * with commit false to check that it will succeed before calling with commit |
| 531 | * true, to avoid leaving the page table in a half-updated state. To make a |
| 532 | * series of changes atomically you can call them all with commit false before |
| 533 | * calling them all with commit true. |
| 534 | * |
| 535 | * mm_vm_defrag should always be called after a series of page table updates, |
| 536 | * whether they succeed or fail. |
| 537 | * |
| 538 | * Returns true on success, or false if the update failed and no changes were |
| 539 | * made to memory mappings. |
| 540 | */ |
| 541 | static bool spci_region_group_identity_map( |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 542 | struct vm_locked vm_locked, |
| 543 | struct spci_memory_region_constituent *constituents, |
| 544 | uint32_t constituent_count, int mode, struct mpool *ppool, bool commit) |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 545 | { |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 546 | /* Iterate over the memory region constituents. */ |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 547 | for (uint32_t index = 0; index < constituent_count; index++) { |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 548 | size_t size = constituents[index].page_count * PAGE_SIZE; |
Andrew Walbran | d88ee92 | 2020-01-15 18:13:21 +0000 | [diff] [blame] | 549 | paddr_t pa_begin = pa_from_ipa( |
| 550 | ipa_init(spci_memory_region_constituent_get_address( |
| 551 | &constituents[index]))); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 552 | paddr_t pa_end = pa_add(pa_begin, size); |
| 553 | |
| 554 | if (commit) { |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 555 | vm_identity_commit(vm_locked, pa_begin, pa_end, mode, |
| 556 | ppool, NULL); |
| 557 | } else if (!vm_identity_prepare(vm_locked, pa_begin, pa_end, |
| 558 | mode, ppool)) { |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 559 | return false; |
| 560 | } |
| 561 | } |
| 562 | |
| 563 | return true; |
| 564 | } |
| 565 | |
| 566 | /** |
| 567 | * Clears a region of physical memory by overwriting it with zeros. The data is |
| 568 | * flushed from the cache so the memory has been cleared across the system. |
| 569 | */ |
| 570 | static bool clear_memory(paddr_t begin, paddr_t end, struct mpool *ppool) |
| 571 | { |
| 572 | /* |
Fuad Tabba | ed294af | 2019-12-20 10:43:01 +0000 | [diff] [blame] | 573 | * TODO: change this to a CPU local single page window rather than a |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 574 | * global mapping of the whole range. Such an approach will limit |
| 575 | * the changes to stage-1 tables and will allow only local |
| 576 | * invalidation. |
| 577 | */ |
| 578 | bool ret; |
| 579 | struct mm_stage1_locked stage1_locked = mm_lock_stage1(); |
| 580 | void *ptr = |
| 581 | mm_identity_map(stage1_locked, begin, end, MM_MODE_W, ppool); |
| 582 | size_t size = pa_difference(begin, end); |
| 583 | |
| 584 | if (!ptr) { |
| 585 | /* TODO: partial defrag of failed range. */ |
| 586 | /* Recover any memory consumed in failed mapping. */ |
| 587 | mm_defrag(stage1_locked, ppool); |
| 588 | goto fail; |
| 589 | } |
| 590 | |
| 591 | memset_s(ptr, size, 0, size); |
| 592 | arch_mm_flush_dcache(ptr, size); |
| 593 | mm_unmap(stage1_locked, begin, end, ppool); |
| 594 | |
| 595 | ret = true; |
| 596 | goto out; |
| 597 | |
| 598 | fail: |
| 599 | ret = false; |
| 600 | |
| 601 | out: |
| 602 | mm_unlock_stage1(&stage1_locked); |
| 603 | |
| 604 | return ret; |
| 605 | } |
| 606 | |
| 607 | /** |
| 608 | * Clears a region of physical memory by overwriting it with zeros. The data is |
| 609 | * flushed from the cache so the memory has been cleared across the system. |
| 610 | */ |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 611 | static bool spci_clear_memory_constituents( |
| 612 | struct spci_memory_region_constituent *constituents, |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 613 | uint32_t constituent_count, struct mpool *page_pool) |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 614 | { |
| 615 | struct mpool local_page_pool; |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 616 | struct mm_stage1_locked stage1_locked; |
| 617 | bool ret = false; |
| 618 | |
| 619 | /* |
| 620 | * Create a local pool so any freed memory can't be used by another |
| 621 | * thread. This is to ensure each constituent that is mapped can be |
| 622 | * unmapped again afterwards. |
| 623 | */ |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 624 | mpool_init_with_fallback(&local_page_pool, page_pool); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 625 | |
| 626 | /* Iterate over the memory region constituents. */ |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 627 | for (uint32_t i = 0; i < constituent_count; ++i) { |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 628 | size_t size = constituents[i].page_count * PAGE_SIZE; |
Andrew Walbran | d88ee92 | 2020-01-15 18:13:21 +0000 | [diff] [blame] | 629 | paddr_t begin = pa_from_ipa( |
| 630 | ipa_init(spci_memory_region_constituent_get_address( |
| 631 | &constituents[i]))); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 632 | paddr_t end = pa_add(begin, size); |
| 633 | |
| 634 | if (!clear_memory(begin, end, &local_page_pool)) { |
| 635 | /* |
| 636 | * api_clear_memory will defrag on failure, so no need |
| 637 | * to do it here. |
| 638 | */ |
| 639 | goto out; |
| 640 | } |
| 641 | } |
| 642 | |
| 643 | /* |
| 644 | * Need to defrag after clearing, as it may have added extra mappings to |
| 645 | * the stage 1 page table. |
| 646 | */ |
| 647 | stage1_locked = mm_lock_stage1(); |
| 648 | mm_defrag(stage1_locked, &local_page_pool); |
| 649 | mm_unlock_stage1(&stage1_locked); |
| 650 | |
| 651 | ret = true; |
| 652 | |
| 653 | out: |
| 654 | mpool_fini(&local_page_pool); |
| 655 | return ret; |
| 656 | } |
| 657 | |
| 658 | /** |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 659 | * Validates and prepares memory to be sent from the calling VM to another. |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 660 | * |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 661 | * This function requires the calling context to hold the <from> VM lock. |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 662 | * |
| 663 | * Returns: |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 664 | * In case of error, one of the following values is returned: |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 665 | * 1) SPCI_INVALID_PARAMETERS - The endpoint provided parameters were |
| 666 | * erroneous; |
| 667 | * 2) SPCI_NO_MEMORY - Hafnium did not have sufficient memory to complete |
| 668 | * the request. |
| 669 | * Success is indicated by SPCI_SUCCESS. |
| 670 | */ |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 671 | static struct spci_value spci_send_memory( |
| 672 | struct vm_locked from_locked, |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 673 | struct spci_memory_region_constituent *constituents, |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 674 | uint32_t constituent_count, uint32_t share_func, |
| 675 | struct mpool *page_pool, bool clear) |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 676 | { |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 677 | struct vm *from = from_locked.vm; |
| 678 | uint32_t orig_from_mode; |
| 679 | uint32_t from_mode; |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 680 | struct mpool local_page_pool; |
| 681 | struct spci_value ret; |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 682 | |
| 683 | /* |
Andrew Walbran | d88ee92 | 2020-01-15 18:13:21 +0000 | [diff] [blame] | 684 | * Make sure constituents are properly aligned to a 32-bit boundary. If |
| 685 | * not we would get alignment faults trying to read (32-bit) values. |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 686 | */ |
Andrew Walbran | d88ee92 | 2020-01-15 18:13:21 +0000 | [diff] [blame] | 687 | if (!is_aligned(constituents, 4)) { |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 688 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 689 | } |
| 690 | |
| 691 | /* |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 692 | * Check if the state transition is lawful for the sender, ensure that |
| 693 | * all constituents of a memory region being shared are at the same |
| 694 | * state. |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 695 | */ |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 696 | if (!spci_send_check_transition(from_locked, share_func, |
| 697 | &orig_from_mode, constituents, |
| 698 | constituent_count, &from_mode)) { |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 699 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 700 | } |
| 701 | |
| 702 | /* |
| 703 | * Create a local pool so any freed memory can't be used by another |
| 704 | * thread. This is to ensure the original mapping can be restored if the |
| 705 | * clear fails. |
| 706 | */ |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 707 | mpool_init_with_fallback(&local_page_pool, page_pool); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 708 | |
| 709 | /* |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 710 | * First reserve all required memory for the new page table entries |
| 711 | * without committing, to make sure the entire operation will succeed |
| 712 | * without exhausting the page pool. |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 713 | */ |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 714 | if (!spci_region_group_identity_map(from_locked, constituents, |
| 715 | constituent_count, from_mode, |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 716 | page_pool, false)) { |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 717 | /* TODO: partial defrag of failed range. */ |
| 718 | ret = spci_error(SPCI_NO_MEMORY); |
| 719 | goto out; |
| 720 | } |
| 721 | |
| 722 | /* |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 723 | * Update the mapping for the sender. This won't allocate because the |
| 724 | * transaction was already prepared above, but may free pages in the |
| 725 | * case that a whole block is being unmapped that was previously |
| 726 | * partially mapped. |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 727 | */ |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 728 | CHECK(spci_region_group_identity_map(from_locked, constituents, |
| 729 | constituent_count, from_mode, |
| 730 | &local_page_pool, true)); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 731 | |
| 732 | /* Clear the memory so no VM or device can see the previous contents. */ |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 733 | if (clear && !spci_clear_memory_constituents( |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 734 | constituents, constituent_count, page_pool)) { |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 735 | /* |
| 736 | * On failure, roll back by returning memory to the sender. This |
| 737 | * may allocate pages which were previously freed into |
| 738 | * `local_page_pool` by the call above, but will never allocate |
| 739 | * more pages than that so can never fail. |
| 740 | */ |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 741 | CHECK(spci_region_group_identity_map( |
| 742 | from_locked, constituents, constituent_count, |
| 743 | orig_from_mode, &local_page_pool, true)); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 744 | |
| 745 | ret = spci_error(SPCI_NO_MEMORY); |
| 746 | goto out; |
| 747 | } |
| 748 | |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 749 | ret = (struct spci_value){.func = SPCI_SUCCESS_32}; |
| 750 | |
| 751 | out: |
| 752 | mpool_fini(&local_page_pool); |
| 753 | |
| 754 | /* |
| 755 | * Tidy up the page table by reclaiming failed mappings (if there was an |
| 756 | * error) or merging entries into blocks where possible (on success). |
| 757 | */ |
| 758 | mm_vm_defrag(&from->ptable, page_pool); |
| 759 | |
| 760 | return ret; |
| 761 | } |
| 762 | |
| 763 | /** |
| 764 | * Validates and maps memory shared from one VM to another. |
| 765 | * |
| 766 | * This function requires the calling context to hold the <to> lock. |
| 767 | * |
| 768 | * Returns: |
| 769 | * In case of error, one of the following values is returned: |
| 770 | * 1) SPCI_INVALID_PARAMETERS - The endpoint provided parameters were |
| 771 | * erroneous; |
| 772 | * 2) SPCI_NO_MEMORY - Hafnium did not have sufficient memory to complete |
| 773 | * the request. |
| 774 | * Success is indicated by SPCI_SUCCESS. |
| 775 | */ |
| 776 | static struct spci_value spci_retrieve_memory( |
| 777 | struct vm_locked to_locked, |
| 778 | struct spci_memory_region_constituent *constituents, |
| 779 | uint32_t constituent_count, uint32_t memory_to_attributes, |
| 780 | uint32_t share_func, bool clear, struct mpool *page_pool) |
| 781 | { |
| 782 | struct vm *to = to_locked.vm; |
| 783 | uint32_t to_mode; |
| 784 | struct mpool local_page_pool; |
| 785 | struct spci_value ret; |
| 786 | |
| 787 | /* |
| 788 | * Make sure constituents are properly aligned to a 32-bit boundary. If |
| 789 | * not we would get alignment faults trying to read (32-bit) values. |
| 790 | */ |
| 791 | if (!is_aligned(constituents, 4)) { |
| 792 | dlog_verbose("Constituents not aligned.\n"); |
| 793 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 794 | } |
| 795 | |
| 796 | /* |
| 797 | * Check if the state transition is lawful for the recipient, and ensure |
| 798 | * that all constituents of the memory region being retrieved are at the |
| 799 | * same state. |
| 800 | */ |
| 801 | if (!spci_retrieve_check_transition(to_locked, share_func, constituents, |
| 802 | constituent_count, |
| 803 | memory_to_attributes, &to_mode)) { |
| 804 | dlog_verbose("Invalid transition.\n"); |
| 805 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 806 | } |
| 807 | |
| 808 | /* |
| 809 | * Create a local pool so any freed memory can't be used by another |
| 810 | * thread. This is to ensure the original mapping can be restored if the |
| 811 | * clear fails. |
| 812 | */ |
| 813 | mpool_init_with_fallback(&local_page_pool, page_pool); |
| 814 | |
| 815 | /* |
| 816 | * First reserve all required memory for the new page table entries in |
| 817 | * the recipient page tables without committing, to make sure the entire |
| 818 | * operation will succeed without exhausting the page pool. |
| 819 | */ |
| 820 | if (!spci_region_group_identity_map(to_locked, constituents, |
| 821 | constituent_count, to_mode, |
| 822 | page_pool, false)) { |
| 823 | /* TODO: partial defrag of failed range. */ |
| 824 | dlog_verbose( |
| 825 | "Insufficient memory to update recipient page " |
| 826 | "table.\n"); |
| 827 | ret = spci_error(SPCI_NO_MEMORY); |
| 828 | goto out; |
| 829 | } |
| 830 | |
| 831 | /* Clear the memory so no VM or device can see the previous contents. */ |
| 832 | if (clear && !spci_clear_memory_constituents( |
| 833 | constituents, constituent_count, page_pool)) { |
| 834 | ret = spci_error(SPCI_NO_MEMORY); |
| 835 | goto out; |
| 836 | } |
| 837 | |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 838 | /* |
| 839 | * Complete the transfer by mapping the memory into the recipient. This |
| 840 | * won't allocate because the transaction was already prepared above, so |
| 841 | * it doesn't need to use the `local_page_pool`. |
| 842 | */ |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 843 | CHECK(spci_region_group_identity_map(to_locked, constituents, |
| 844 | constituent_count, to_mode, |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 845 | page_pool, true)); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 846 | |
| 847 | ret = (struct spci_value){.func = SPCI_SUCCESS_32}; |
| 848 | |
| 849 | out: |
| 850 | mpool_fini(&local_page_pool); |
| 851 | |
| 852 | /* |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 853 | * Tidy up the page table by reclaiming failed mappings (if there was |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 854 | * an error) or merging entries into blocks where possible (on success). |
| 855 | */ |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 856 | mm_vm_defrag(&to->ptable, page_pool); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 857 | |
| 858 | return ret; |
| 859 | } |
| 860 | |
| 861 | static struct spci_value spci_relinquish_memory( |
| 862 | struct vm_locked from_locked, |
| 863 | struct spci_memory_region_constituent *constituents, |
| 864 | uint32_t constituent_count, struct mpool *page_pool, bool clear) |
| 865 | { |
| 866 | uint32_t orig_from_mode; |
| 867 | uint32_t from_mode; |
| 868 | struct mpool local_page_pool; |
| 869 | struct spci_value ret; |
| 870 | |
| 871 | if (!spci_relinquish_check_transition(from_locked, &orig_from_mode, |
| 872 | constituents, constituent_count, |
| 873 | &from_mode)) { |
| 874 | dlog_verbose("Invalid transition.\n"); |
| 875 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 876 | } |
| 877 | |
| 878 | /* |
| 879 | * Create a local pool so any freed memory can't be used by another |
| 880 | * thread. This is to ensure the original mapping can be restored if the |
| 881 | * clear fails. |
| 882 | */ |
| 883 | mpool_init_with_fallback(&local_page_pool, page_pool); |
| 884 | |
| 885 | /* |
| 886 | * First reserve all required memory for the new page table entries |
| 887 | * without committing, to make sure the entire operation will succeed |
| 888 | * without exhausting the page pool. |
| 889 | */ |
| 890 | if (!spci_region_group_identity_map(from_locked, constituents, |
| 891 | constituent_count, from_mode, |
| 892 | page_pool, false)) { |
| 893 | /* TODO: partial defrag of failed range. */ |
| 894 | ret = spci_error(SPCI_NO_MEMORY); |
| 895 | goto out; |
| 896 | } |
| 897 | |
| 898 | /* |
| 899 | * Update the mapping for the sender. This won't allocate because the |
| 900 | * transaction was already prepared above, but may free pages in the |
| 901 | * case that a whole block is being unmapped that was previously |
| 902 | * partially mapped. |
| 903 | */ |
| 904 | CHECK(spci_region_group_identity_map(from_locked, constituents, |
| 905 | constituent_count, from_mode, |
| 906 | &local_page_pool, true)); |
| 907 | |
| 908 | /* Clear the memory so no VM or device can see the previous contents. */ |
| 909 | if (clear && !spci_clear_memory_constituents( |
| 910 | constituents, constituent_count, page_pool)) { |
| 911 | /* |
| 912 | * On failure, roll back by returning memory to the sender. This |
| 913 | * may allocate pages which were previously freed into |
| 914 | * `local_page_pool` by the call above, but will never allocate |
| 915 | * more pages than that so can never fail. |
| 916 | */ |
| 917 | CHECK(spci_region_group_identity_map( |
| 918 | from_locked, constituents, constituent_count, |
| 919 | orig_from_mode, &local_page_pool, true)); |
| 920 | |
| 921 | ret = spci_error(SPCI_NO_MEMORY); |
| 922 | goto out; |
| 923 | } |
| 924 | |
| 925 | ret = (struct spci_value){.func = SPCI_SUCCESS_32}; |
| 926 | |
| 927 | out: |
| 928 | mpool_fini(&local_page_pool); |
| 929 | |
| 930 | /* |
| 931 | * Tidy up the page table by reclaiming failed mappings (if there was an |
| 932 | * error) or merging entries into blocks where possible (on success). |
| 933 | */ |
| 934 | mm_vm_defrag(&from_locked.vm->ptable, page_pool); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 935 | |
| 936 | return ret; |
| 937 | } |
| 938 | |
| 939 | /** |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 940 | * Validates a call to donate, lend or share memory and then updates the stage-2 |
| 941 | * page tables. Specifically, check if the message length and number of memory |
| 942 | * region constituents match, and if the transition is valid for the type of |
| 943 | * memory sending operation. |
| 944 | * |
| 945 | * Assumes that the caller has already found and locked both VMs and ensured |
| 946 | * that the destination RX buffer is available, and copied the memory region |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 947 | * descriptor from the sender's TX buffer to a freshly allocated page from |
| 948 | * Hafnium's internal pool. |
| 949 | * |
| 950 | * This function takes ownership of the `memory_region` passed in; it must not |
| 951 | * be freed by the caller. |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 952 | */ |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 953 | struct spci_value spci_memory_send(struct vm *to, struct vm_locked from_locked, |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 954 | struct spci_memory_region *memory_region, |
| 955 | uint32_t memory_share_size, |
| 956 | uint32_t share_func, struct mpool *page_pool) |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 957 | { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 958 | struct spci_memory_region_constituent *constituents = |
| 959 | spci_memory_region_get_constituents(memory_region); |
| 960 | uint32_t constituent_count = memory_region->constituent_count; |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 961 | uint32_t attributes_size; |
| 962 | uint32_t constituents_size; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 963 | bool clear; |
| 964 | struct spci_value ret; |
| 965 | spci_memory_handle_t handle; |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 966 | |
| 967 | /* |
| 968 | * Ensure the number of constituents are within the memory |
| 969 | * bounds. |
| 970 | */ |
| 971 | attributes_size = sizeof(struct spci_memory_region_attributes) * |
| 972 | memory_region->attribute_count; |
| 973 | constituents_size = sizeof(struct spci_memory_region_constituent) * |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 974 | constituent_count; |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 975 | if (memory_region->constituent_offset < |
| 976 | sizeof(struct spci_memory_region) + attributes_size || |
| 977 | memory_share_size != |
| 978 | memory_region->constituent_offset + constituents_size) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 979 | dlog_verbose("Invalid size %d or constituent offset %d.\n", |
| 980 | memory_share_size, |
| 981 | memory_region->constituent_offset); |
| 982 | mpool_free(page_pool, memory_region); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 983 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 984 | } |
| 985 | |
Andrew Walbran | e28f4a2 | 2019-12-24 15:45:36 +0000 | [diff] [blame] | 986 | /* The sender must match the message sender. */ |
| 987 | if (memory_region->sender != from_locked.vm->id) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 988 | dlog_verbose("Invalid sender %d.\n", memory_region->sender); |
| 989 | mpool_free(page_pool, memory_region); |
Andrew Walbran | e28f4a2 | 2019-12-24 15:45:36 +0000 | [diff] [blame] | 990 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 991 | } |
| 992 | |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 993 | /* We only support a single recipient. */ |
| 994 | if (memory_region->attribute_count != 1) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 995 | dlog_verbose("Multiple recipients not supported.\n"); |
| 996 | mpool_free(page_pool, memory_region); |
Andrew Walbran | e908c4a | 2019-12-02 17:13:47 +0000 | [diff] [blame] | 997 | return spci_error(SPCI_NOT_SUPPORTED); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 998 | } |
| 999 | |
Andrew Walbran | cfe6164 | 2019-12-02 15:34:06 +0000 | [diff] [blame] | 1000 | /* The recipient must match the message recipient. */ |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1001 | if (memory_region->attributes[0].receiver != to->id) { |
| 1002 | mpool_free(page_pool, memory_region); |
Andrew Walbran | cfe6164 | 2019-12-02 15:34:06 +0000 | [diff] [blame] | 1003 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 1004 | } |
| 1005 | |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1006 | clear = memory_region->flags & SPCI_MEMORY_REGION_FLAG_CLEAR; |
| 1007 | /* |
| 1008 | * Clear is not allowed for memory sharing, as the sender still has |
| 1009 | * access to the memory. |
| 1010 | */ |
| 1011 | if (clear && share_func == SPCI_MEM_SHARE_32) { |
| 1012 | dlog_verbose("Memory can't be cleared while being shared.\n"); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 1013 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 1014 | } |
| 1015 | |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1016 | /* |
| 1017 | * Allocate a share state before updating the page table. Otherwise if |
| 1018 | * updating the page table succeeded but allocating the share state |
| 1019 | * failed then it would leave the memory in a state where nobody could |
| 1020 | * get it back. |
| 1021 | */ |
| 1022 | if (to->id != HF_TEE_VM_ID && |
| 1023 | !allocate_share_state(share_func, memory_region, &handle)) { |
| 1024 | dlog_verbose("Failed to allocate share state.\n"); |
| 1025 | mpool_free(page_pool, memory_region); |
| 1026 | return spci_error(SPCI_NO_MEMORY); |
| 1027 | } |
| 1028 | |
| 1029 | dump_share_states(); |
| 1030 | |
| 1031 | /* Check that state is valid in sender page table and update. */ |
| 1032 | ret = spci_send_memory(from_locked, constituents, constituent_count, |
| 1033 | share_func, page_pool, clear); |
| 1034 | if (ret.func != SPCI_SUCCESS_32) { |
| 1035 | if (to->id != HF_TEE_VM_ID) { |
| 1036 | /* Free share state. */ |
| 1037 | bool freed = share_state_free_handle(handle, page_pool); |
| 1038 | |
| 1039 | CHECK(freed); |
| 1040 | } |
| 1041 | |
| 1042 | return ret; |
| 1043 | } |
| 1044 | |
| 1045 | if (to->id == HF_TEE_VM_ID) { |
| 1046 | /* Return directly, no need to allocate share state. */ |
| 1047 | return (struct spci_value){.func = SPCI_SUCCESS_32}; |
| 1048 | } |
| 1049 | |
| 1050 | return (struct spci_value){.func = SPCI_SUCCESS_32, .arg2 = handle}; |
| 1051 | } |
| 1052 | |
| 1053 | struct spci_value spci_memory_retrieve( |
| 1054 | struct vm_locked to_locked, |
| 1055 | struct spci_memory_retrieve_request *retrieve_request, |
| 1056 | uint32_t retrieve_request_size, struct mpool *page_pool) |
| 1057 | { |
| 1058 | uint32_t expected_retrieve_request_size = |
| 1059 | sizeof(struct spci_memory_retrieve_request) + |
| 1060 | retrieve_request->retrieve_properties_count * |
| 1061 | sizeof(struct spci_memory_retrieve_properties); |
| 1062 | spci_memory_handle_t handle = retrieve_request->handle; |
| 1063 | struct spci_memory_region *memory_region; |
| 1064 | struct spci_memory_retrieve_properties *retrieve_properties; |
| 1065 | uint32_t memory_to_attributes; |
| 1066 | struct spci_memory_region_constituent *constituents; |
| 1067 | uint32_t constituent_count; |
| 1068 | struct share_states_locked share_states; |
| 1069 | struct spci_memory_share_state *share_state; |
| 1070 | struct spci_value ret; |
| 1071 | uint32_t response_size; |
| 1072 | |
| 1073 | dump_share_states(); |
| 1074 | |
| 1075 | if (retrieve_request_size != expected_retrieve_request_size) { |
| 1076 | dlog_verbose( |
| 1077 | "Invalid length for SPCI_MEM_RETRIEVE_REQ, expected %d " |
| 1078 | "but was %d.\n", |
| 1079 | expected_retrieve_request_size, retrieve_request_size); |
| 1080 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 1081 | } |
| 1082 | |
| 1083 | share_states = share_states_lock(); |
| 1084 | if (!get_share_state(share_states, handle, &share_state)) { |
| 1085 | dlog_verbose("Invalid handle %#x for SPCI_MEM_RETRIEVE_REQ.\n", |
| 1086 | handle); |
| 1087 | ret = spci_error(SPCI_INVALID_PARAMETERS); |
| 1088 | goto out; |
| 1089 | } |
| 1090 | |
| 1091 | if (retrieve_request->share_func != share_state->share_func) { |
| 1092 | dlog_verbose( |
| 1093 | "Incorrect transaction type %#x for " |
| 1094 | "SPCI_MEM_RETRIEVE_REQ, expected %#x for handle %#x.\n", |
| 1095 | retrieve_request->share_func, share_state->share_func, |
| 1096 | handle); |
| 1097 | ret = spci_error(SPCI_INVALID_PARAMETERS); |
| 1098 | goto out; |
| 1099 | } |
| 1100 | |
| 1101 | memory_region = share_state->memory_region; |
| 1102 | CHECK(memory_region != NULL); |
| 1103 | |
| 1104 | if (retrieve_request->sender != memory_region->sender) { |
| 1105 | dlog_verbose( |
| 1106 | "Incorrect sender ID %d for SPCI_MEM_RETRIEVE_REQ, " |
| 1107 | "expected %d for handle %#x.\n", |
| 1108 | retrieve_request->sender, memory_region->sender, |
| 1109 | handle); |
| 1110 | ret = spci_error(SPCI_INVALID_PARAMETERS); |
| 1111 | goto out; |
| 1112 | } |
| 1113 | |
| 1114 | if (retrieve_request->tag != memory_region->tag) { |
| 1115 | dlog_verbose( |
| 1116 | "Incorrect tag %d for SPCI_MEM_RETRIEVE_REQ, expected " |
| 1117 | "%d for handle %#x.\n", |
| 1118 | retrieve_request->tag, memory_region->tag, handle); |
| 1119 | ret = spci_error(SPCI_INVALID_PARAMETERS); |
| 1120 | goto out; |
| 1121 | } |
| 1122 | |
| 1123 | if (memory_region->attributes[0].receiver != to_locked.vm->id) { |
| 1124 | dlog_verbose( |
| 1125 | "Incorrect receiver VM ID %d for " |
| 1126 | "SPCI_MEM_RETRIEVE_REQ, expected %d for handle %#x.\n", |
| 1127 | to_locked.vm->id, memory_region->attributes[0].receiver, |
| 1128 | handle); |
| 1129 | ret = spci_error(SPCI_INVALID_PARAMETERS); |
| 1130 | goto out; |
| 1131 | } |
| 1132 | |
| 1133 | if (share_state->retrieved[0]) { |
| 1134 | dlog_verbose("Memory with handle %#x already retrieved.\n", |
| 1135 | handle); |
| 1136 | ret = spci_error(SPCI_DENIED); |
| 1137 | goto out; |
| 1138 | } |
| 1139 | |
| 1140 | if (retrieve_request->attribute_count != 0) { |
| 1141 | dlog_verbose( |
| 1142 | "Multi-way memory sharing not supported (got %d " |
| 1143 | "attribute descriptors on SPCI_MEM_RETRIEVE_REQ, " |
| 1144 | "expected 0).\n", |
| 1145 | retrieve_request->attribute_count); |
| 1146 | ret = spci_error(SPCI_NOT_SUPPORTED); |
| 1147 | goto out; |
| 1148 | } |
| 1149 | |
| 1150 | if (retrieve_request->retrieve_properties_count != 1) { |
| 1151 | dlog_verbose( |
| 1152 | "Stream endpoints not supported (got %d retrieve " |
| 1153 | "properties descriptors on SPCI_MEM_RETRIEVE_REQ, " |
| 1154 | "expected 1).\n", |
| 1155 | retrieve_request->retrieve_properties_count); |
| 1156 | ret = spci_error(SPCI_INVALID_PARAMETERS); |
| 1157 | goto out; |
| 1158 | } |
| 1159 | |
| 1160 | retrieve_properties = |
| 1161 | spci_memory_retrieve_request_first_retrieve_properties( |
| 1162 | retrieve_request); |
| 1163 | |
| 1164 | if (retrieve_properties->attributes.receiver != to_locked.vm->id) { |
| 1165 | dlog_verbose( |
| 1166 | "Retrieve properties receiver VM ID %d didn't match " |
| 1167 | "caller of SPCI_MEM_RETRIEVE_REQ.\n", |
| 1168 | retrieve_properties->attributes.receiver); |
| 1169 | ret = spci_error(SPCI_INVALID_PARAMETERS); |
| 1170 | goto out; |
| 1171 | } |
| 1172 | |
| 1173 | if (retrieve_properties->page_count != memory_region->page_count) { |
| 1174 | dlog_verbose( |
| 1175 | "Incorrect page count %d for " |
| 1176 | "SPCI_MEM_RETRIEVE_REQ, expected %d for handle %#x.\n", |
| 1177 | retrieve_properties->page_count, |
| 1178 | memory_region->page_count, handle); |
| 1179 | ret = spci_error(SPCI_INVALID_PARAMETERS); |
| 1180 | goto out; |
| 1181 | } |
| 1182 | |
| 1183 | if (retrieve_properties->constituent_count != 0) { |
| 1184 | dlog_verbose( |
| 1185 | "Retriever specified address ranges not supported (got " |
| 1186 | "%d).\n", |
| 1187 | retrieve_properties->constituent_count); |
| 1188 | ret = spci_error(SPCI_INVALID_PARAMETERS); |
| 1189 | goto out; |
| 1190 | } |
| 1191 | |
| 1192 | memory_to_attributes = spci_memory_attrs_to_mode( |
| 1193 | memory_region->attributes[0].memory_attributes); |
| 1194 | |
| 1195 | constituents = spci_memory_region_get_constituents(memory_region); |
| 1196 | constituent_count = memory_region->constituent_count; |
| 1197 | ret = spci_retrieve_memory(to_locked, constituents, constituent_count, |
| 1198 | memory_to_attributes, |
| 1199 | share_state->share_func, false, page_pool); |
| 1200 | if (ret.func != SPCI_SUCCESS_32) { |
| 1201 | goto out; |
| 1202 | } |
| 1203 | |
| 1204 | /* |
| 1205 | * Copy response to RX buffer of caller and deliver the message. This |
| 1206 | * must be done before the share_state is (possibly) freed. |
| 1207 | */ |
| 1208 | response_size = spci_retrieved_memory_region_init( |
| 1209 | to_locked.vm->mailbox.recv, HF_MAILBOX_SIZE, to_locked.vm->id, |
| 1210 | constituents, constituent_count, memory_region->page_count); |
| 1211 | to_locked.vm->mailbox.recv_size = response_size; |
| 1212 | to_locked.vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID; |
| 1213 | to_locked.vm->mailbox.recv_func = SPCI_MEM_RETRIEVE_RESP_32; |
| 1214 | to_locked.vm->mailbox.state = MAILBOX_STATE_READ; |
| 1215 | |
| 1216 | if (share_state->share_func == SPCI_MEM_DONATE_32) { |
| 1217 | /* |
| 1218 | * Memory that has been donated can't be relinquished, so no |
| 1219 | * need to keep the share state around. |
| 1220 | */ |
| 1221 | share_state_free(share_states, share_state, page_pool); |
| 1222 | dlog_verbose("Freed share state for donate.\n"); |
| 1223 | } else { |
| 1224 | share_state->retrieved[0] = true; |
| 1225 | } |
| 1226 | |
| 1227 | ret = (struct spci_value){.func = SPCI_MEM_RETRIEVE_RESP_32, |
| 1228 | .arg3 = response_size, |
| 1229 | .arg4 = response_size}; |
| 1230 | |
| 1231 | out: |
| 1232 | share_states_unlock(&share_states); |
| 1233 | dump_share_states(); |
| 1234 | return ret; |
| 1235 | } |
| 1236 | |
| 1237 | struct spci_value spci_memory_relinquish( |
| 1238 | struct vm_locked from_locked, |
| 1239 | struct spci_mem_relinquish *relinquish_request, struct mpool *page_pool) |
| 1240 | { |
| 1241 | spci_memory_handle_t handle = relinquish_request->handle; |
| 1242 | struct share_states_locked share_states; |
| 1243 | struct spci_memory_share_state *share_state; |
| 1244 | struct spci_memory_region *memory_region; |
| 1245 | bool clear; |
| 1246 | struct spci_memory_region_constituent *constituents; |
| 1247 | uint32_t constituent_count; |
| 1248 | struct spci_value ret; |
| 1249 | |
| 1250 | if (relinquish_request->endpoint_count != 0) { |
| 1251 | dlog_verbose( |
| 1252 | "Stream endpoints not supported (got %d extra " |
| 1253 | "endpoints on SPCI_MEM_RELINQUISH, expected 0).\n", |
| 1254 | relinquish_request->endpoint_count); |
| 1255 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 1256 | } |
| 1257 | |
| 1258 | if (relinquish_request->sender != from_locked.vm->id) { |
| 1259 | dlog_verbose( |
| 1260 | "VM ID %d in relinquish message doesn't match calling " |
| 1261 | "VM ID %d.\n", |
| 1262 | relinquish_request->sender, from_locked.vm->id); |
| 1263 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 1264 | } |
| 1265 | |
| 1266 | dump_share_states(); |
| 1267 | |
| 1268 | share_states = share_states_lock(); |
| 1269 | if (!get_share_state(share_states, handle, &share_state)) { |
| 1270 | dlog_verbose("Invalid handle %#x for SPCI_MEM_RELINQUISH.\n", |
| 1271 | handle); |
| 1272 | ret = spci_error(SPCI_INVALID_PARAMETERS); |
| 1273 | goto out; |
| 1274 | } |
| 1275 | |
| 1276 | memory_region = share_state->memory_region; |
| 1277 | CHECK(memory_region != NULL); |
| 1278 | |
| 1279 | if (memory_region->attributes[0].receiver != from_locked.vm->id) { |
| 1280 | dlog_verbose( |
| 1281 | "VM ID %d tried to relinquish memory region with " |
| 1282 | "handle %#x but receiver was %d.\n", |
| 1283 | from_locked.vm->id, handle, |
| 1284 | memory_region->attributes[0].receiver); |
| 1285 | ret = spci_error(SPCI_INVALID_PARAMETERS); |
| 1286 | goto out; |
| 1287 | } |
| 1288 | |
| 1289 | if (!share_state->retrieved[0]) { |
| 1290 | dlog_verbose( |
| 1291 | "Memory with handle %#x not yet retrieved, can't " |
| 1292 | "relinquish.\n", |
| 1293 | handle); |
| 1294 | ret = spci_error(SPCI_INVALID_PARAMETERS); |
| 1295 | goto out; |
| 1296 | } |
| 1297 | |
| 1298 | clear = relinquish_request->flags & SPCI_MEMORY_REGION_FLAG_CLEAR; |
| 1299 | |
| 1300 | /* |
| 1301 | * Clear is not allowed for memory that was shared, as the original |
| 1302 | * sender still has access to the memory. |
| 1303 | */ |
| 1304 | if (clear && share_state->share_func == SPCI_MEM_SHARE_32) { |
| 1305 | dlog_verbose("Memory which was shared can't be cleared.\n"); |
| 1306 | ret = spci_error(SPCI_INVALID_PARAMETERS); |
| 1307 | goto out; |
| 1308 | } |
| 1309 | |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 1310 | constituents = spci_memory_region_get_constituents(memory_region); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1311 | constituent_count = memory_region->constituent_count; |
| 1312 | ret = spci_relinquish_memory(from_locked, constituents, |
| 1313 | constituent_count, page_pool, clear); |
| 1314 | |
| 1315 | if (ret.func == SPCI_SUCCESS_32) { |
| 1316 | /* |
| 1317 | * Mark memory handle as not retrieved, so it can be reclaimed |
| 1318 | * (or retrieved again). |
| 1319 | */ |
| 1320 | share_state->retrieved[0] = false; |
| 1321 | } |
| 1322 | |
| 1323 | out: |
| 1324 | share_states_unlock(&share_states); |
| 1325 | dump_share_states(); |
| 1326 | return ret; |
| 1327 | } |
| 1328 | |
| 1329 | /** |
| 1330 | * Validates that the reclaim transition is allowed for the given handle, |
| 1331 | * updates the page table of the reclaiming VM, and frees the internal state |
| 1332 | * associated with the handle. |
| 1333 | */ |
| 1334 | struct spci_value spci_memory_reclaim(struct vm_locked to_locked, |
| 1335 | spci_memory_handle_t handle, bool clear, |
| 1336 | struct mpool *page_pool) |
| 1337 | { |
| 1338 | struct share_states_locked share_states; |
| 1339 | struct spci_memory_share_state *share_state; |
| 1340 | struct spci_memory_region *memory_region; |
| 1341 | struct spci_memory_region_constituent *constituents; |
| 1342 | uint32_t constituent_count; |
| 1343 | uint32_t memory_to_attributes = MM_MODE_R | MM_MODE_W | MM_MODE_X; |
| 1344 | struct spci_value ret; |
| 1345 | |
| 1346 | dump_share_states(); |
| 1347 | |
| 1348 | share_states = share_states_lock(); |
| 1349 | if (!get_share_state(share_states, handle, &share_state)) { |
| 1350 | dlog_verbose("Invalid handle %#x for SPCI_MEM_RECLAIM.\n", |
| 1351 | handle); |
| 1352 | ret = spci_error(SPCI_INVALID_PARAMETERS); |
| 1353 | goto out; |
| 1354 | } |
| 1355 | |
| 1356 | memory_region = share_state->memory_region; |
| 1357 | CHECK(memory_region != NULL); |
| 1358 | |
| 1359 | if (to_locked.vm->id != memory_region->sender) { |
| 1360 | dlog_verbose( |
| 1361 | "VM %d attempted to reclaim memory handle %#x " |
| 1362 | "originally sent by VM %d.\n", |
| 1363 | to_locked.vm->id, handle, memory_region->sender); |
| 1364 | ret = spci_error(SPCI_INVALID_PARAMETERS); |
| 1365 | goto out; |
| 1366 | } |
| 1367 | |
| 1368 | if (share_state->retrieved[0]) { |
| 1369 | dlog_verbose( |
| 1370 | "Tried to reclaim memory handle %#x that has not been " |
| 1371 | "relinquished.\n", |
| 1372 | handle); |
| 1373 | ret = spci_error(SPCI_DENIED); |
| 1374 | goto out; |
| 1375 | } |
| 1376 | |
| 1377 | constituents = spci_memory_region_get_constituents(memory_region); |
| 1378 | constituent_count = memory_region->constituent_count; |
| 1379 | ret = spci_retrieve_memory(to_locked, constituents, constituent_count, |
| 1380 | memory_to_attributes, SPCI_MEM_RECLAIM_32, |
| 1381 | clear, page_pool); |
| 1382 | |
| 1383 | if (ret.func == SPCI_SUCCESS_32) { |
| 1384 | share_state_free(share_states, share_state, page_pool); |
| 1385 | dlog_verbose("Freed share state after successful reclaim.\n"); |
| 1386 | } |
| 1387 | |
| 1388 | out: |
| 1389 | share_states_unlock(&share_states); |
| 1390 | return ret; |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 1391 | } |