Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2019 The Hafnium Authors. |
| 3 | * |
Andrew Walbran | e959ec1 | 2020-06-17 15:01:09 +0100 | [diff] [blame] | 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 7 | */ |
| 8 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 9 | #include "hf/ffa_memory.h" |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 10 | |
Olivier Deprez | 112d2b5 | 2020-09-30 07:39:23 +0200 | [diff] [blame] | 11 | #include "hf/arch/other_world.h" |
Andrew Walbran | 290b0c9 | 2020-02-03 16:37:14 +0000 | [diff] [blame] | 12 | |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 13 | #include "hf/api.h" |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 14 | #include "hf/check.h" |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 15 | #include "hf/dlog.h" |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 16 | #include "hf/ffa_internal.h" |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 17 | #include "hf/mpool.h" |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 18 | #include "hf/std.h" |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 19 | #include "hf/vm.h" |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 20 | |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 21 | /** The maximum number of recipients a memory region may be sent to. */ |
| 22 | #define MAX_MEM_SHARE_RECIPIENTS 1 |
| 23 | |
| 24 | /** |
| 25 | * The maximum number of memory sharing handles which may be active at once. A |
| 26 | * DONATE handle is active from when it is sent to when it is retrieved; a SHARE |
| 27 | * or LEND handle is active from when it is sent to when it is reclaimed. |
| 28 | */ |
| 29 | #define MAX_MEM_SHARES 100 |
| 30 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 31 | /** |
| 32 | * The maximum number of fragments into which a memory sharing message may be |
| 33 | * broken. |
| 34 | */ |
| 35 | #define MAX_FRAGMENTS 20 |
| 36 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 37 | static_assert(sizeof(struct ffa_memory_region_constituent) % 16 == 0, |
| 38 | "struct ffa_memory_region_constituent must be a multiple of 16 " |
Andrew Walbran | c34c7b2 | 2020-02-28 11:16:59 +0000 | [diff] [blame] | 39 | "bytes long."); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 40 | static_assert(sizeof(struct ffa_composite_memory_region) % 16 == 0, |
| 41 | "struct ffa_composite_memory_region must be a multiple of 16 " |
Andrew Walbran | c34c7b2 | 2020-02-28 11:16:59 +0000 | [diff] [blame] | 42 | "bytes long."); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 43 | static_assert(sizeof(struct ffa_memory_region_attributes) == 4, |
Andrew Walbran | 41890ff | 2020-09-23 15:09:39 +0100 | [diff] [blame] | 44 | "struct ffa_memory_region_attributes must be 4 bytes long."); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 45 | static_assert(sizeof(struct ffa_memory_access) % 16 == 0, |
| 46 | "struct ffa_memory_access must be a multiple of 16 bytes long."); |
| 47 | static_assert(sizeof(struct ffa_memory_region) % 16 == 0, |
| 48 | "struct ffa_memory_region must be a multiple of 16 bytes long."); |
| 49 | static_assert(sizeof(struct ffa_mem_relinquish) % 16 == 0, |
| 50 | "struct ffa_mem_relinquish must be a multiple of 16 " |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 51 | "bytes long."); |
Andrew Walbran | c34c7b2 | 2020-02-28 11:16:59 +0000 | [diff] [blame] | 52 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 53 | struct ffa_memory_share_state { |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 54 | ffa_memory_handle_t handle; |
| 55 | |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 56 | /** |
| 57 | * The memory region being shared, or NULL if this share state is |
| 58 | * unallocated. |
| 59 | */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 60 | struct ffa_memory_region *memory_region; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 61 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 62 | struct ffa_memory_region_constituent *fragments[MAX_FRAGMENTS]; |
| 63 | |
| 64 | /** The number of constituents in each fragment. */ |
| 65 | uint32_t fragment_constituent_counts[MAX_FRAGMENTS]; |
| 66 | |
| 67 | /** |
| 68 | * The number of valid elements in the `fragments` and |
| 69 | * `fragment_constituent_counts` arrays. |
| 70 | */ |
| 71 | uint32_t fragment_count; |
| 72 | |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 73 | /** |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 74 | * The FF-A function used for sharing the memory. Must be one of |
| 75 | * FFA_MEM_DONATE_32, FFA_MEM_LEND_32 or FFA_MEM_SHARE_32 if the |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 76 | * share state is allocated, or 0. |
| 77 | */ |
| 78 | uint32_t share_func; |
| 79 | |
| 80 | /** |
J-Alves | 2a0d288 | 2020-10-29 14:49:50 +0000 | [diff] [blame] | 81 | * The sender's original mode before invoking the FF-A function for |
| 82 | * sharing the memory. |
| 83 | * This is used to reset the original configuration when sender invokes |
| 84 | * FFA_MEM_RECLAIM_32. |
| 85 | */ |
| 86 | uint32_t sender_orig_mode; |
| 87 | |
| 88 | /** |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 89 | * True if all the fragments of this sharing request have been sent and |
| 90 | * Hafnium has updated the sender page table accordingly. |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 91 | */ |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 92 | bool sending_complete; |
| 93 | |
| 94 | /** |
| 95 | * How many fragments of the memory region each recipient has retrieved |
| 96 | * so far. The order of this array matches the order of the endpoint |
| 97 | * memory access descriptors in the memory region descriptor. Any |
| 98 | * entries beyond the receiver_count will always be 0. |
| 99 | */ |
| 100 | uint32_t retrieved_fragment_count[MAX_MEM_SHARE_RECIPIENTS]; |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 101 | }; |
| 102 | |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 103 | /** |
| 104 | * Encapsulates the set of share states while the `share_states_lock` is held. |
| 105 | */ |
| 106 | struct share_states_locked { |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 107 | struct ffa_memory_share_state *share_states; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 108 | }; |
| 109 | |
| 110 | /** |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 111 | * All access to members of a `struct ffa_memory_share_state` must be guarded |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 112 | * by this lock. |
| 113 | */ |
| 114 | static struct spinlock share_states_lock_instance = SPINLOCK_INIT; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 115 | static struct ffa_memory_share_state share_states[MAX_MEM_SHARES]; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 116 | |
| 117 | /** |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 118 | * Buffer for retrieving memory region information from the TEE for when a |
| 119 | * region is reclaimed by a VM. Access to this buffer must be guarded by the VM |
| 120 | * lock of the TEE VM. |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 121 | */ |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 122 | alignas(PAGE_SIZE) static uint8_t |
| 123 | tee_retrieve_buffer[HF_MAILBOX_SIZE * MAX_FRAGMENTS]; |
| 124 | |
| 125 | /** |
| 126 | * Initialises the next available `struct ffa_memory_share_state` and sets |
| 127 | * `share_state_ret` to a pointer to it. If `handle` is |
| 128 | * `FFA_MEMORY_HANDLE_INVALID` then allocates an appropriate handle, otherwise |
| 129 | * uses the provided handle which is assumed to be globally unique. |
| 130 | * |
| 131 | * Returns true on success or false if none are available. |
| 132 | */ |
| 133 | static bool allocate_share_state( |
| 134 | struct share_states_locked share_states, uint32_t share_func, |
| 135 | struct ffa_memory_region *memory_region, uint32_t fragment_length, |
| 136 | ffa_memory_handle_t handle, |
| 137 | struct ffa_memory_share_state **share_state_ret) |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 138 | { |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 139 | uint64_t i; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 140 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 141 | CHECK(share_states.share_states != NULL); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 142 | CHECK(memory_region != NULL); |
| 143 | |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 144 | for (i = 0; i < MAX_MEM_SHARES; ++i) { |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 145 | if (share_states.share_states[i].share_func == 0) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 146 | uint32_t j; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 147 | struct ffa_memory_share_state *allocated_state = |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 148 | &share_states.share_states[i]; |
| 149 | struct ffa_composite_memory_region *composite = |
| 150 | ffa_memory_region_get_composite(memory_region, |
| 151 | 0); |
| 152 | |
| 153 | if (handle == FFA_MEMORY_HANDLE_INVALID) { |
| 154 | allocated_state->handle = |
| 155 | i | |
| 156 | FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR; |
| 157 | } else { |
| 158 | allocated_state->handle = handle; |
| 159 | } |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 160 | allocated_state->share_func = share_func; |
| 161 | allocated_state->memory_region = memory_region; |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 162 | allocated_state->fragment_count = 1; |
| 163 | allocated_state->fragments[0] = composite->constituents; |
| 164 | allocated_state->fragment_constituent_counts[0] = |
| 165 | (fragment_length - |
| 166 | ffa_composite_constituent_offset(memory_region, |
| 167 | 0)) / |
| 168 | sizeof(struct ffa_memory_region_constituent); |
| 169 | allocated_state->sending_complete = false; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 170 | for (j = 0; j < MAX_MEM_SHARE_RECIPIENTS; ++j) { |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 171 | allocated_state->retrieved_fragment_count[j] = |
| 172 | 0; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 173 | } |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 174 | if (share_state_ret != NULL) { |
| 175 | *share_state_ret = allocated_state; |
| 176 | } |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 177 | return true; |
| 178 | } |
| 179 | } |
| 180 | |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 181 | return false; |
| 182 | } |
| 183 | |
| 184 | /** Locks the share states lock. */ |
| 185 | struct share_states_locked share_states_lock(void) |
| 186 | { |
| 187 | sl_lock(&share_states_lock_instance); |
| 188 | |
| 189 | return (struct share_states_locked){.share_states = share_states}; |
| 190 | } |
| 191 | |
| 192 | /** Unlocks the share states lock. */ |
| 193 | static void share_states_unlock(struct share_states_locked *share_states) |
| 194 | { |
| 195 | CHECK(share_states->share_states != NULL); |
| 196 | share_states->share_states = NULL; |
| 197 | sl_unlock(&share_states_lock_instance); |
| 198 | } |
| 199 | |
| 200 | /** |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 201 | * If the given handle is a valid handle for an allocated share state then |
| 202 | * initialises `share_state_ret` to point to the share state and returns true. |
| 203 | * Otherwise returns false. |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 204 | */ |
| 205 | static bool get_share_state(struct share_states_locked share_states, |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 206 | ffa_memory_handle_t handle, |
| 207 | struct ffa_memory_share_state **share_state_ret) |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 208 | { |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 209 | struct ffa_memory_share_state *share_state; |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 210 | uint32_t index; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 211 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 212 | CHECK(share_states.share_states != NULL); |
| 213 | CHECK(share_state_ret != NULL); |
| 214 | |
| 215 | /* |
| 216 | * First look for a share_state allocated by us, in which case the |
| 217 | * handle is based on the index. |
| 218 | */ |
| 219 | if ((handle & FFA_MEMORY_HANDLE_ALLOCATOR_MASK) == |
| 220 | FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR) { |
| 221 | index = handle & ~FFA_MEMORY_HANDLE_ALLOCATOR_MASK; |
| 222 | if (index < MAX_MEM_SHARES) { |
| 223 | share_state = &share_states.share_states[index]; |
| 224 | if (share_state->share_func != 0) { |
| 225 | *share_state_ret = share_state; |
| 226 | return true; |
| 227 | } |
| 228 | } |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 229 | } |
| 230 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 231 | /* Fall back to a linear scan. */ |
| 232 | for (index = 0; index < MAX_MEM_SHARES; ++index) { |
| 233 | share_state = &share_states.share_states[index]; |
| 234 | if (share_state->handle == handle && |
| 235 | share_state->share_func != 0) { |
| 236 | *share_state_ret = share_state; |
| 237 | return true; |
| 238 | } |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 239 | } |
| 240 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 241 | return false; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 242 | } |
| 243 | |
| 244 | /** Marks a share state as unallocated. */ |
| 245 | static void share_state_free(struct share_states_locked share_states, |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 246 | struct ffa_memory_share_state *share_state, |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 247 | struct mpool *page_pool) |
| 248 | { |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 249 | uint32_t i; |
| 250 | |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 251 | CHECK(share_states.share_states != NULL); |
| 252 | share_state->share_func = 0; |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 253 | share_state->sending_complete = false; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 254 | mpool_free(page_pool, share_state->memory_region); |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 255 | /* |
| 256 | * First fragment is part of the same page as the `memory_region`, so it |
| 257 | * doesn't need to be freed separately. |
| 258 | */ |
| 259 | share_state->fragments[0] = NULL; |
| 260 | share_state->fragment_constituent_counts[0] = 0; |
| 261 | for (i = 1; i < share_state->fragment_count; ++i) { |
| 262 | mpool_free(page_pool, share_state->fragments[i]); |
| 263 | share_state->fragments[i] = NULL; |
| 264 | share_state->fragment_constituent_counts[i] = 0; |
| 265 | } |
| 266 | share_state->fragment_count = 0; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 267 | share_state->memory_region = NULL; |
| 268 | } |
| 269 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 270 | /** Checks whether the given share state has been fully sent. */ |
| 271 | static bool share_state_sending_complete( |
| 272 | struct share_states_locked share_states, |
| 273 | struct ffa_memory_share_state *share_state) |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 274 | { |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 275 | struct ffa_composite_memory_region *composite; |
| 276 | uint32_t expected_constituent_count; |
| 277 | uint32_t fragment_constituent_count_total = 0; |
| 278 | uint32_t i; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 279 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 280 | /* Lock must be held. */ |
| 281 | CHECK(share_states.share_states != NULL); |
| 282 | |
| 283 | /* |
| 284 | * Share state must already be valid, or it's not possible to get hold |
| 285 | * of it. |
| 286 | */ |
| 287 | CHECK(share_state->memory_region != NULL && |
| 288 | share_state->share_func != 0); |
| 289 | |
| 290 | composite = |
| 291 | ffa_memory_region_get_composite(share_state->memory_region, 0); |
| 292 | expected_constituent_count = composite->constituent_count; |
| 293 | for (i = 0; i < share_state->fragment_count; ++i) { |
| 294 | fragment_constituent_count_total += |
| 295 | share_state->fragment_constituent_counts[i]; |
| 296 | } |
| 297 | dlog_verbose( |
| 298 | "Checking completion: constituent count %d/%d from %d " |
| 299 | "fragments.\n", |
| 300 | fragment_constituent_count_total, expected_constituent_count, |
| 301 | share_state->fragment_count); |
| 302 | |
| 303 | return fragment_constituent_count_total == expected_constituent_count; |
| 304 | } |
| 305 | |
| 306 | /** |
| 307 | * Calculates the offset of the next fragment expected for the given share |
| 308 | * state. |
| 309 | */ |
| 310 | static uint32_t share_state_next_fragment_offset( |
| 311 | struct share_states_locked share_states, |
| 312 | struct ffa_memory_share_state *share_state) |
| 313 | { |
| 314 | uint32_t next_fragment_offset; |
| 315 | uint32_t i; |
| 316 | |
| 317 | /* Lock must be held. */ |
| 318 | CHECK(share_states.share_states != NULL); |
| 319 | |
| 320 | next_fragment_offset = |
| 321 | ffa_composite_constituent_offset(share_state->memory_region, 0); |
| 322 | for (i = 0; i < share_state->fragment_count; ++i) { |
| 323 | next_fragment_offset += |
| 324 | share_state->fragment_constituent_counts[i] * |
| 325 | sizeof(struct ffa_memory_region_constituent); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 326 | } |
| 327 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 328 | return next_fragment_offset; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 329 | } |
| 330 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 331 | static void dump_memory_region(struct ffa_memory_region *memory_region) |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 332 | { |
| 333 | uint32_t i; |
| 334 | |
| 335 | if (LOG_LEVEL < LOG_LEVEL_VERBOSE) { |
| 336 | return; |
| 337 | } |
| 338 | |
Olivier Deprez | f92e5d4 | 2020-11-13 16:00:54 +0100 | [diff] [blame] | 339 | dlog("from VM %#x, attributes %#x, flags %#x, handle %#x, tag %u, to " |
| 340 | "%u " |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 341 | "recipients [", |
| 342 | memory_region->sender, memory_region->attributes, |
| 343 | memory_region->flags, memory_region->handle, memory_region->tag, |
| 344 | memory_region->receiver_count); |
| 345 | for (i = 0; i < memory_region->receiver_count; ++i) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 346 | if (i != 0) { |
| 347 | dlog(", "); |
| 348 | } |
Olivier Deprez | f92e5d4 | 2020-11-13 16:00:54 +0100 | [diff] [blame] | 349 | dlog("VM %#x: %#x (offset %u)", |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 350 | memory_region->receivers[i].receiver_permissions.receiver, |
| 351 | memory_region->receivers[i] |
| 352 | .receiver_permissions.permissions, |
| 353 | memory_region->receivers[i] |
| 354 | .composite_memory_region_offset); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 355 | } |
| 356 | dlog("]"); |
| 357 | } |
| 358 | |
| 359 | static void dump_share_states(void) |
| 360 | { |
| 361 | uint32_t i; |
| 362 | |
| 363 | if (LOG_LEVEL < LOG_LEVEL_VERBOSE) { |
| 364 | return; |
| 365 | } |
| 366 | |
| 367 | dlog("Current share states:\n"); |
| 368 | sl_lock(&share_states_lock_instance); |
| 369 | for (i = 0; i < MAX_MEM_SHARES; ++i) { |
| 370 | if (share_states[i].share_func != 0) { |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 371 | dlog("%#x: ", share_states[i].handle); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 372 | switch (share_states[i].share_func) { |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 373 | case FFA_MEM_SHARE_32: |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 374 | dlog("SHARE"); |
| 375 | break; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 376 | case FFA_MEM_LEND_32: |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 377 | dlog("LEND"); |
| 378 | break; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 379 | case FFA_MEM_DONATE_32: |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 380 | dlog("DONATE"); |
| 381 | break; |
| 382 | default: |
| 383 | dlog("invalid share_func %#x", |
| 384 | share_states[i].share_func); |
| 385 | } |
| 386 | dlog(" ("); |
| 387 | dump_memory_region(share_states[i].memory_region); |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 388 | if (share_states[i].sending_complete) { |
| 389 | dlog("): fully sent"); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 390 | } else { |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 391 | dlog("): partially sent"); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 392 | } |
J-Alves | 2a0d288 | 2020-10-29 14:49:50 +0000 | [diff] [blame] | 393 | dlog(" with %d fragments, %d retrieved, " |
| 394 | " sender's original mode: %#x\n", |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 395 | share_states[i].fragment_count, |
J-Alves | 2a0d288 | 2020-10-29 14:49:50 +0000 | [diff] [blame] | 396 | share_states[i].retrieved_fragment_count[0], |
| 397 | share_states[i].sender_orig_mode); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 398 | } |
| 399 | } |
| 400 | sl_unlock(&share_states_lock_instance); |
| 401 | } |
| 402 | |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 403 | /* TODO: Add device attributes: GRE, cacheability, shareability. */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 404 | static inline uint32_t ffa_memory_permissions_to_mode( |
J-Alves | 7cd5eb3 | 2020-10-16 19:06:10 +0100 | [diff] [blame^] | 405 | ffa_memory_access_permissions_t permissions, uint32_t default_mode) |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 406 | { |
| 407 | uint32_t mode = 0; |
| 408 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 409 | switch (ffa_get_data_access_attr(permissions)) { |
| 410 | case FFA_DATA_ACCESS_RO: |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 411 | mode = MM_MODE_R; |
| 412 | break; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 413 | case FFA_DATA_ACCESS_RW: |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 414 | mode = MM_MODE_R | MM_MODE_W; |
| 415 | break; |
J-Alves | 7cd5eb3 | 2020-10-16 19:06:10 +0100 | [diff] [blame^] | 416 | case FFA_DATA_ACCESS_NOT_SPECIFIED: |
| 417 | mode = (default_mode & (MM_MODE_R | MM_MODE_W)); |
| 418 | break; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 419 | case FFA_DATA_ACCESS_RESERVED: |
| 420 | panic("Tried to convert FFA_DATA_ACCESS_RESERVED."); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 421 | } |
| 422 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 423 | switch (ffa_get_instruction_access_attr(permissions)) { |
| 424 | case FFA_INSTRUCTION_ACCESS_NX: |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 425 | break; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 426 | case FFA_INSTRUCTION_ACCESS_X: |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 427 | mode |= MM_MODE_X; |
| 428 | break; |
J-Alves | 7cd5eb3 | 2020-10-16 19:06:10 +0100 | [diff] [blame^] | 429 | case FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED: |
| 430 | mode |= (default_mode & MM_MODE_X); |
| 431 | break; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 432 | case FFA_INSTRUCTION_ACCESS_RESERVED: |
| 433 | panic("Tried to convert FFA_INSTRUCTION_ACCESS_RESVERVED."); |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 434 | } |
| 435 | |
| 436 | return mode; |
| 437 | } |
| 438 | |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 439 | /** |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 440 | * Get the current mode in the stage-2 page table of the given vm of all the |
| 441 | * pages in the given constituents, if they all have the same mode, or return |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 442 | * an appropriate FF-A error if not. |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 443 | */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 444 | static struct ffa_value constituents_get_mode( |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 445 | struct vm_locked vm, uint32_t *orig_mode, |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 446 | struct ffa_memory_region_constituent **fragments, |
| 447 | const uint32_t *fragment_constituent_counts, uint32_t fragment_count) |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 448 | { |
Jose Marinho | 7fbbf2e | 2019-08-05 13:19:58 +0100 | [diff] [blame] | 449 | uint32_t i; |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 450 | uint32_t j; |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 451 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 452 | if (fragment_count == 0 || fragment_constituent_counts[0] == 0) { |
Jose Marinho | 7fbbf2e | 2019-08-05 13:19:58 +0100 | [diff] [blame] | 453 | /* |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 454 | * Fail if there are no constituents. Otherwise we would get an |
| 455 | * uninitialised *orig_mode. |
Jose Marinho | 7fbbf2e | 2019-08-05 13:19:58 +0100 | [diff] [blame] | 456 | */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 457 | return ffa_error(FFA_INVALID_PARAMETERS); |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 458 | } |
| 459 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 460 | for (i = 0; i < fragment_count; ++i) { |
| 461 | for (j = 0; j < fragment_constituent_counts[i]; ++j) { |
| 462 | ipaddr_t begin = ipa_init(fragments[i][j].address); |
| 463 | size_t size = fragments[i][j].page_count * PAGE_SIZE; |
| 464 | ipaddr_t end = ipa_add(begin, size); |
| 465 | uint32_t current_mode; |
Jose Marinho | 7fbbf2e | 2019-08-05 13:19:58 +0100 | [diff] [blame] | 466 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 467 | /* Fail if addresses are not page-aligned. */ |
| 468 | if (!is_aligned(ipa_addr(begin), PAGE_SIZE) || |
| 469 | !is_aligned(ipa_addr(end), PAGE_SIZE)) { |
| 470 | return ffa_error(FFA_INVALID_PARAMETERS); |
| 471 | } |
Jose Marinho | 7fbbf2e | 2019-08-05 13:19:58 +0100 | [diff] [blame] | 472 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 473 | /* |
| 474 | * Ensure that this constituent memory range is all |
| 475 | * mapped with the same mode. |
| 476 | */ |
| 477 | if (!mm_vm_get_mode(&vm.vm->ptable, begin, end, |
| 478 | ¤t_mode)) { |
| 479 | return ffa_error(FFA_DENIED); |
| 480 | } |
Jose Marinho | 7fbbf2e | 2019-08-05 13:19:58 +0100 | [diff] [blame] | 481 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 482 | /* |
| 483 | * Ensure that all constituents are mapped with the same |
| 484 | * mode. |
| 485 | */ |
| 486 | if (i == 0) { |
| 487 | *orig_mode = current_mode; |
| 488 | } else if (current_mode != *orig_mode) { |
| 489 | dlog_verbose( |
| 490 | "Expected mode %#x but was %#x for %d " |
| 491 | "pages at %#x.\n", |
| 492 | *orig_mode, current_mode, |
| 493 | fragments[i][j].page_count, |
| 494 | ipa_addr(begin)); |
| 495 | return ffa_error(FFA_DENIED); |
| 496 | } |
Jose Marinho | 7fbbf2e | 2019-08-05 13:19:58 +0100 | [diff] [blame] | 497 | } |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 498 | } |
| 499 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 500 | return (struct ffa_value){.func = FFA_SUCCESS_32}; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 501 | } |
| 502 | |
| 503 | /** |
| 504 | * Verify that all pages have the same mode, that the starting mode |
| 505 | * constitutes a valid state and obtain the next mode to apply |
| 506 | * to the sending VM. |
| 507 | * |
| 508 | * Returns: |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 509 | * 1) FFA_DENIED if a state transition was not found; |
| 510 | * 2) FFA_DENIED if the pages being shared do not have the same mode within |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 511 | * the <from> VM; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 512 | * 3) FFA_INVALID_PARAMETERS if the beginning and end IPAs are not page |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 513 | * aligned; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 514 | * 4) FFA_INVALID_PARAMETERS if the requested share type was not handled. |
| 515 | * Or FFA_SUCCESS on success. |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 516 | */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 517 | static struct ffa_value ffa_send_check_transition( |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 518 | struct vm_locked from, uint32_t share_func, |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 519 | ffa_memory_access_permissions_t permissions, uint32_t *orig_from_mode, |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 520 | struct ffa_memory_region_constituent **fragments, |
| 521 | uint32_t *fragment_constituent_counts, uint32_t fragment_count, |
| 522 | uint32_t *from_mode) |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 523 | { |
| 524 | const uint32_t state_mask = |
| 525 | MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED; |
J-Alves | 7cd5eb3 | 2020-10-16 19:06:10 +0100 | [diff] [blame^] | 526 | uint32_t required_from_mode; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 527 | struct ffa_value ret; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 528 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 529 | ret = constituents_get_mode(from, orig_from_mode, fragments, |
| 530 | fragment_constituent_counts, |
| 531 | fragment_count); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 532 | if (ret.func != FFA_SUCCESS_32) { |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 533 | dlog_verbose("Inconsistent modes.\n", fragment_count); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 534 | return ret; |
Andrew Scull | b5f49e0 | 2019-10-02 13:20:47 +0100 | [diff] [blame] | 535 | } |
| 536 | |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 537 | /* Ensure the address range is normal memory and not a device. */ |
| 538 | if (*orig_from_mode & MM_MODE_D) { |
| 539 | dlog_verbose("Can't share device memory (mode is %#x).\n", |
| 540 | *orig_from_mode); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 541 | return ffa_error(FFA_DENIED); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 542 | } |
| 543 | |
| 544 | /* |
| 545 | * Ensure the sender is the owner and has exclusive access to the |
| 546 | * memory. |
| 547 | */ |
| 548 | if ((*orig_from_mode & state_mask) != 0) { |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 549 | return ffa_error(FFA_DENIED); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 550 | } |
| 551 | |
J-Alves | 7cd5eb3 | 2020-10-16 19:06:10 +0100 | [diff] [blame^] | 552 | required_from_mode = |
| 553 | ffa_memory_permissions_to_mode(permissions, *orig_from_mode); |
| 554 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 555 | if ((*orig_from_mode & required_from_mode) != required_from_mode) { |
| 556 | dlog_verbose( |
| 557 | "Sender tried to send memory with permissions which " |
| 558 | "required mode %#x but only had %#x itself.\n", |
| 559 | required_from_mode, *orig_from_mode); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 560 | return ffa_error(FFA_DENIED); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 561 | } |
| 562 | |
| 563 | /* Find the appropriate new mode. */ |
| 564 | *from_mode = ~state_mask & *orig_from_mode; |
Andrew Walbran | e7ad3c0 | 2019-12-24 17:03:04 +0000 | [diff] [blame] | 565 | switch (share_func) { |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 566 | case FFA_MEM_DONATE_32: |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 567 | *from_mode |= MM_MODE_INVALID | MM_MODE_UNOWNED; |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 568 | break; |
| 569 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 570 | case FFA_MEM_LEND_32: |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 571 | *from_mode |= MM_MODE_INVALID; |
Andrew Walbran | 648fc3e | 2019-10-22 16:23:05 +0100 | [diff] [blame] | 572 | break; |
| 573 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 574 | case FFA_MEM_SHARE_32: |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 575 | *from_mode |= MM_MODE_SHARED; |
Jose Marinho | 56c2573 | 2019-05-20 09:48:53 +0100 | [diff] [blame] | 576 | break; |
| 577 | |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 578 | default: |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 579 | return ffa_error(FFA_INVALID_PARAMETERS); |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 580 | } |
| 581 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 582 | return (struct ffa_value){.func = FFA_SUCCESS_32}; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 583 | } |
| 584 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 585 | static struct ffa_value ffa_relinquish_check_transition( |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 586 | struct vm_locked from, uint32_t *orig_from_mode, |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 587 | struct ffa_memory_region_constituent **fragments, |
| 588 | uint32_t *fragment_constituent_counts, uint32_t fragment_count, |
| 589 | uint32_t *from_mode) |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 590 | { |
| 591 | const uint32_t state_mask = |
| 592 | MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED; |
| 593 | uint32_t orig_from_state; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 594 | struct ffa_value ret; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 595 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 596 | ret = constituents_get_mode(from, orig_from_mode, fragments, |
| 597 | fragment_constituent_counts, |
| 598 | fragment_count); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 599 | if (ret.func != FFA_SUCCESS_32) { |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 600 | return ret; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 601 | } |
| 602 | |
| 603 | /* Ensure the address range is normal memory and not a device. */ |
| 604 | if (*orig_from_mode & MM_MODE_D) { |
| 605 | dlog_verbose("Can't relinquish device memory (mode is %#x).\n", |
| 606 | *orig_from_mode); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 607 | return ffa_error(FFA_DENIED); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 608 | } |
| 609 | |
| 610 | /* |
| 611 | * Ensure the relinquishing VM is not the owner but has access to the |
| 612 | * memory. |
| 613 | */ |
| 614 | orig_from_state = *orig_from_mode & state_mask; |
| 615 | if ((orig_from_state & ~MM_MODE_SHARED) != MM_MODE_UNOWNED) { |
| 616 | dlog_verbose( |
| 617 | "Tried to relinquish memory in state %#x (masked %#x " |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 618 | "but should be %#x).\n", |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 619 | *orig_from_mode, orig_from_state, MM_MODE_UNOWNED); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 620 | return ffa_error(FFA_DENIED); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 621 | } |
| 622 | |
| 623 | /* Find the appropriate new mode. */ |
| 624 | *from_mode = (~state_mask & *orig_from_mode) | MM_MODE_UNMAPPED_MASK; |
| 625 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 626 | return (struct ffa_value){.func = FFA_SUCCESS_32}; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 627 | } |
| 628 | |
| 629 | /** |
| 630 | * Verify that all pages have the same mode, that the starting mode |
| 631 | * constitutes a valid state and obtain the next mode to apply |
| 632 | * to the retrieving VM. |
| 633 | * |
| 634 | * Returns: |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 635 | * 1) FFA_DENIED if a state transition was not found; |
| 636 | * 2) FFA_DENIED if the pages being shared do not have the same mode within |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 637 | * the <to> VM; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 638 | * 3) FFA_INVALID_PARAMETERS if the beginning and end IPAs are not page |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 639 | * aligned; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 640 | * 4) FFA_INVALID_PARAMETERS if the requested share type was not handled. |
| 641 | * Or FFA_SUCCESS on success. |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 642 | */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 643 | static struct ffa_value ffa_retrieve_check_transition( |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 644 | struct vm_locked to, uint32_t share_func, |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 645 | struct ffa_memory_region_constituent **fragments, |
| 646 | uint32_t *fragment_constituent_counts, uint32_t fragment_count, |
| 647 | uint32_t memory_to_attributes, uint32_t *to_mode) |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 648 | { |
| 649 | uint32_t orig_to_mode; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 650 | struct ffa_value ret; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 651 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 652 | ret = constituents_get_mode(to, &orig_to_mode, fragments, |
| 653 | fragment_constituent_counts, |
| 654 | fragment_count); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 655 | if (ret.func != FFA_SUCCESS_32) { |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 656 | dlog_verbose("Inconsistent modes.\n"); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 657 | return ret; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 658 | } |
| 659 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 660 | if (share_func == FFA_MEM_RECLAIM_32) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 661 | const uint32_t state_mask = |
| 662 | MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED; |
| 663 | uint32_t orig_to_state = orig_to_mode & state_mask; |
| 664 | |
| 665 | if (orig_to_state != MM_MODE_INVALID && |
| 666 | orig_to_state != MM_MODE_SHARED) { |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 667 | return ffa_error(FFA_DENIED); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 668 | } |
| 669 | } else { |
| 670 | /* |
| 671 | * Ensure the retriever has the expected state. We don't care |
| 672 | * about the MM_MODE_SHARED bit; either with or without it set |
| 673 | * are both valid representations of the !O-NA state. |
| 674 | */ |
| 675 | if ((orig_to_mode & MM_MODE_UNMAPPED_MASK) != |
| 676 | MM_MODE_UNMAPPED_MASK) { |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 677 | return ffa_error(FFA_DENIED); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 678 | } |
| 679 | } |
| 680 | |
| 681 | /* Find the appropriate new mode. */ |
| 682 | *to_mode = memory_to_attributes; |
| 683 | switch (share_func) { |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 684 | case FFA_MEM_DONATE_32: |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 685 | *to_mode |= 0; |
| 686 | break; |
| 687 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 688 | case FFA_MEM_LEND_32: |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 689 | *to_mode |= MM_MODE_UNOWNED; |
| 690 | break; |
| 691 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 692 | case FFA_MEM_SHARE_32: |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 693 | *to_mode |= MM_MODE_UNOWNED | MM_MODE_SHARED; |
| 694 | break; |
| 695 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 696 | case FFA_MEM_RECLAIM_32: |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 697 | *to_mode |= 0; |
| 698 | break; |
| 699 | |
| 700 | default: |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 701 | dlog_error("Invalid share_func %#x.\n", share_func); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 702 | return ffa_error(FFA_INVALID_PARAMETERS); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 703 | } |
| 704 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 705 | return (struct ffa_value){.func = FFA_SUCCESS_32}; |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 706 | } |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 707 | |
| 708 | /** |
| 709 | * Updates a VM's page table such that the given set of physical address ranges |
| 710 | * are mapped in the address space at the corresponding address ranges, in the |
| 711 | * mode provided. |
| 712 | * |
| 713 | * If commit is false, the page tables will be allocated from the mpool but no |
| 714 | * mappings will actually be updated. This function must always be called first |
| 715 | * with commit false to check that it will succeed before calling with commit |
| 716 | * true, to avoid leaving the page table in a half-updated state. To make a |
| 717 | * series of changes atomically you can call them all with commit false before |
| 718 | * calling them all with commit true. |
| 719 | * |
| 720 | * mm_vm_defrag should always be called after a series of page table updates, |
| 721 | * whether they succeed or fail. |
| 722 | * |
| 723 | * Returns true on success, or false if the update failed and no changes were |
| 724 | * made to memory mappings. |
| 725 | */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 726 | static bool ffa_region_group_identity_map( |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 727 | struct vm_locked vm_locked, |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 728 | struct ffa_memory_region_constituent **fragments, |
| 729 | const uint32_t *fragment_constituent_counts, uint32_t fragment_count, |
| 730 | int mode, struct mpool *ppool, bool commit) |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 731 | { |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 732 | uint32_t i; |
| 733 | uint32_t j; |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 734 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 735 | /* Iterate over the memory region constituents within each fragment. */ |
| 736 | for (i = 0; i < fragment_count; ++i) { |
| 737 | for (j = 0; j < fragment_constituent_counts[i]; ++j) { |
| 738 | size_t size = fragments[i][j].page_count * PAGE_SIZE; |
| 739 | paddr_t pa_begin = |
| 740 | pa_from_ipa(ipa_init(fragments[i][j].address)); |
| 741 | paddr_t pa_end = pa_add(pa_begin, size); |
| 742 | |
| 743 | if (commit) { |
| 744 | vm_identity_commit(vm_locked, pa_begin, pa_end, |
| 745 | mode, ppool, NULL); |
| 746 | } else if (!vm_identity_prepare(vm_locked, pa_begin, |
| 747 | pa_end, mode, ppool)) { |
| 748 | return false; |
| 749 | } |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 750 | } |
| 751 | } |
| 752 | |
| 753 | return true; |
| 754 | } |
| 755 | |
| 756 | /** |
| 757 | * Clears a region of physical memory by overwriting it with zeros. The data is |
| 758 | * flushed from the cache so the memory has been cleared across the system. |
| 759 | */ |
| 760 | static bool clear_memory(paddr_t begin, paddr_t end, struct mpool *ppool) |
| 761 | { |
| 762 | /* |
Fuad Tabba | ed294af | 2019-12-20 10:43:01 +0000 | [diff] [blame] | 763 | * TODO: change this to a CPU local single page window rather than a |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 764 | * global mapping of the whole range. Such an approach will limit |
| 765 | * the changes to stage-1 tables and will allow only local |
| 766 | * invalidation. |
| 767 | */ |
| 768 | bool ret; |
| 769 | struct mm_stage1_locked stage1_locked = mm_lock_stage1(); |
| 770 | void *ptr = |
| 771 | mm_identity_map(stage1_locked, begin, end, MM_MODE_W, ppool); |
| 772 | size_t size = pa_difference(begin, end); |
| 773 | |
| 774 | if (!ptr) { |
| 775 | /* TODO: partial defrag of failed range. */ |
| 776 | /* Recover any memory consumed in failed mapping. */ |
| 777 | mm_defrag(stage1_locked, ppool); |
| 778 | goto fail; |
| 779 | } |
| 780 | |
| 781 | memset_s(ptr, size, 0, size); |
| 782 | arch_mm_flush_dcache(ptr, size); |
| 783 | mm_unmap(stage1_locked, begin, end, ppool); |
| 784 | |
| 785 | ret = true; |
| 786 | goto out; |
| 787 | |
| 788 | fail: |
| 789 | ret = false; |
| 790 | |
| 791 | out: |
| 792 | mm_unlock_stage1(&stage1_locked); |
| 793 | |
| 794 | return ret; |
| 795 | } |
| 796 | |
| 797 | /** |
| 798 | * Clears a region of physical memory by overwriting it with zeros. The data is |
| 799 | * flushed from the cache so the memory has been cleared across the system. |
| 800 | */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 801 | static bool ffa_clear_memory_constituents( |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 802 | struct ffa_memory_region_constituent **fragments, |
| 803 | const uint32_t *fragment_constituent_counts, uint32_t fragment_count, |
| 804 | struct mpool *page_pool) |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 805 | { |
| 806 | struct mpool local_page_pool; |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 807 | uint32_t i; |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 808 | struct mm_stage1_locked stage1_locked; |
| 809 | bool ret = false; |
| 810 | |
| 811 | /* |
| 812 | * Create a local pool so any freed memory can't be used by another |
| 813 | * thread. This is to ensure each constituent that is mapped can be |
| 814 | * unmapped again afterwards. |
| 815 | */ |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 816 | mpool_init_with_fallback(&local_page_pool, page_pool); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 817 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 818 | /* Iterate over the memory region constituents within each fragment. */ |
| 819 | for (i = 0; i < fragment_count; ++i) { |
| 820 | uint32_t j; |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 821 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 822 | for (j = 0; j < fragment_constituent_counts[j]; ++j) { |
| 823 | size_t size = fragments[i][j].page_count * PAGE_SIZE; |
| 824 | paddr_t begin = |
| 825 | pa_from_ipa(ipa_init(fragments[i][j].address)); |
| 826 | paddr_t end = pa_add(begin, size); |
| 827 | |
| 828 | if (!clear_memory(begin, end, &local_page_pool)) { |
| 829 | /* |
| 830 | * api_clear_memory will defrag on failure, so |
| 831 | * no need to do it here. |
| 832 | */ |
| 833 | goto out; |
| 834 | } |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 835 | } |
| 836 | } |
| 837 | |
| 838 | /* |
| 839 | * Need to defrag after clearing, as it may have added extra mappings to |
| 840 | * the stage 1 page table. |
| 841 | */ |
| 842 | stage1_locked = mm_lock_stage1(); |
| 843 | mm_defrag(stage1_locked, &local_page_pool); |
| 844 | mm_unlock_stage1(&stage1_locked); |
| 845 | |
| 846 | ret = true; |
| 847 | |
| 848 | out: |
| 849 | mpool_fini(&local_page_pool); |
| 850 | return ret; |
| 851 | } |
| 852 | |
| 853 | /** |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 854 | * Validates and prepares memory to be sent from the calling VM to another. |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 855 | * |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 856 | * This function requires the calling context to hold the <from> VM lock. |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 857 | * |
| 858 | * Returns: |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 859 | * In case of error, one of the following values is returned: |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 860 | * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 861 | * erroneous; |
Andrew Walbran | f07f04d | 2020-05-01 18:09:00 +0100 | [diff] [blame] | 862 | * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete the |
| 863 | * request. |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 864 | * 3) FFA_DENIED - The sender doesn't have sufficient access to send the |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 865 | * memory with the given permissions. |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 866 | * Success is indicated by FFA_SUCCESS. |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 867 | */ |
Andrew Walbran | 996d1d1 | 2020-05-27 14:08:43 +0100 | [diff] [blame] | 868 | static struct ffa_value ffa_send_check_update( |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 869 | struct vm_locked from_locked, |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 870 | struct ffa_memory_region_constituent **fragments, |
| 871 | uint32_t *fragment_constituent_counts, uint32_t fragment_count, |
| 872 | uint32_t share_func, ffa_memory_access_permissions_t permissions, |
Andrew Walbran | 37c574e | 2020-06-03 11:45:46 +0100 | [diff] [blame] | 873 | struct mpool *page_pool, bool clear, uint32_t *orig_from_mode_ret) |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 874 | { |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 875 | struct vm *from = from_locked.vm; |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 876 | uint32_t i; |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 877 | uint32_t orig_from_mode; |
| 878 | uint32_t from_mode; |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 879 | struct mpool local_page_pool; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 880 | struct ffa_value ret; |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 881 | |
| 882 | /* |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 883 | * Make sure constituents are properly aligned to a 64-bit boundary. If |
| 884 | * not we would get alignment faults trying to read (64-bit) values. |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 885 | */ |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 886 | for (i = 0; i < fragment_count; ++i) { |
| 887 | if (!is_aligned(fragments[i], 8)) { |
| 888 | dlog_verbose("Constituents not aligned.\n"); |
| 889 | return ffa_error(FFA_INVALID_PARAMETERS); |
| 890 | } |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 891 | } |
| 892 | |
| 893 | /* |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 894 | * Check if the state transition is lawful for the sender, ensure that |
| 895 | * all constituents of a memory region being shared are at the same |
| 896 | * state. |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 897 | */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 898 | ret = ffa_send_check_transition(from_locked, share_func, permissions, |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 899 | &orig_from_mode, fragments, |
| 900 | fragment_constituent_counts, |
| 901 | fragment_count, &from_mode); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 902 | if (ret.func != FFA_SUCCESS_32) { |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 903 | dlog_verbose("Invalid transition for send.\n"); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 904 | return ret; |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 905 | } |
| 906 | |
Andrew Walbran | 37c574e | 2020-06-03 11:45:46 +0100 | [diff] [blame] | 907 | if (orig_from_mode_ret != NULL) { |
| 908 | *orig_from_mode_ret = orig_from_mode; |
| 909 | } |
| 910 | |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 911 | /* |
| 912 | * Create a local pool so any freed memory can't be used by another |
| 913 | * thread. This is to ensure the original mapping can be restored if the |
| 914 | * clear fails. |
| 915 | */ |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 916 | mpool_init_with_fallback(&local_page_pool, page_pool); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 917 | |
| 918 | /* |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 919 | * First reserve all required memory for the new page table entries |
| 920 | * without committing, to make sure the entire operation will succeed |
| 921 | * without exhausting the page pool. |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 922 | */ |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 923 | if (!ffa_region_group_identity_map( |
| 924 | from_locked, fragments, fragment_constituent_counts, |
| 925 | fragment_count, from_mode, page_pool, false)) { |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 926 | /* TODO: partial defrag of failed range. */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 927 | ret = ffa_error(FFA_NO_MEMORY); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 928 | goto out; |
| 929 | } |
| 930 | |
| 931 | /* |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 932 | * Update the mapping for the sender. This won't allocate because the |
| 933 | * transaction was already prepared above, but may free pages in the |
| 934 | * case that a whole block is being unmapped that was previously |
| 935 | * partially mapped. |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 936 | */ |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 937 | CHECK(ffa_region_group_identity_map( |
| 938 | from_locked, fragments, fragment_constituent_counts, |
| 939 | fragment_count, from_mode, &local_page_pool, true)); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 940 | |
| 941 | /* Clear the memory so no VM or device can see the previous contents. */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 942 | if (clear && !ffa_clear_memory_constituents( |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 943 | fragments, fragment_constituent_counts, |
| 944 | fragment_count, page_pool)) { |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 945 | /* |
| 946 | * On failure, roll back by returning memory to the sender. This |
| 947 | * may allocate pages which were previously freed into |
| 948 | * `local_page_pool` by the call above, but will never allocate |
| 949 | * more pages than that so can never fail. |
| 950 | */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 951 | CHECK(ffa_region_group_identity_map( |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 952 | from_locked, fragments, fragment_constituent_counts, |
| 953 | fragment_count, orig_from_mode, &local_page_pool, |
| 954 | true)); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 955 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 956 | ret = ffa_error(FFA_NO_MEMORY); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 957 | goto out; |
| 958 | } |
| 959 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 960 | ret = (struct ffa_value){.func = FFA_SUCCESS_32}; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 961 | |
| 962 | out: |
| 963 | mpool_fini(&local_page_pool); |
| 964 | |
| 965 | /* |
| 966 | * Tidy up the page table by reclaiming failed mappings (if there was an |
| 967 | * error) or merging entries into blocks where possible (on success). |
| 968 | */ |
| 969 | mm_vm_defrag(&from->ptable, page_pool); |
| 970 | |
| 971 | return ret; |
| 972 | } |
| 973 | |
| 974 | /** |
| 975 | * Validates and maps memory shared from one VM to another. |
| 976 | * |
| 977 | * This function requires the calling context to hold the <to> lock. |
| 978 | * |
| 979 | * Returns: |
| 980 | * In case of error, one of the following values is returned: |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 981 | * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 982 | * erroneous; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 983 | * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 984 | * the request. |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 985 | * Success is indicated by FFA_SUCCESS. |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 986 | */ |
Andrew Walbran | 996d1d1 | 2020-05-27 14:08:43 +0100 | [diff] [blame] | 987 | static struct ffa_value ffa_retrieve_check_update( |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 988 | struct vm_locked to_locked, |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 989 | struct ffa_memory_region_constituent **fragments, |
| 990 | uint32_t *fragment_constituent_counts, uint32_t fragment_count, |
| 991 | uint32_t memory_to_attributes, uint32_t share_func, bool clear, |
| 992 | struct mpool *page_pool) |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 993 | { |
| 994 | struct vm *to = to_locked.vm; |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 995 | uint32_t i; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 996 | uint32_t to_mode; |
| 997 | struct mpool local_page_pool; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 998 | struct ffa_value ret; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 999 | |
| 1000 | /* |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1001 | * Make sure constituents are properly aligned to a 64-bit boundary. If |
| 1002 | * not we would get alignment faults trying to read (64-bit) values. |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1003 | */ |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1004 | for (i = 0; i < fragment_count; ++i) { |
| 1005 | if (!is_aligned(fragments[i], 8)) { |
| 1006 | return ffa_error(FFA_INVALID_PARAMETERS); |
| 1007 | } |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1008 | } |
| 1009 | |
| 1010 | /* |
| 1011 | * Check if the state transition is lawful for the recipient, and ensure |
| 1012 | * that all constituents of the memory region being retrieved are at the |
| 1013 | * same state. |
| 1014 | */ |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1015 | ret = ffa_retrieve_check_transition( |
| 1016 | to_locked, share_func, fragments, fragment_constituent_counts, |
| 1017 | fragment_count, memory_to_attributes, &to_mode); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1018 | if (ret.func != FFA_SUCCESS_32) { |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1019 | dlog_verbose("Invalid transition for retrieve.\n"); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1020 | return ret; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1021 | } |
| 1022 | |
| 1023 | /* |
| 1024 | * Create a local pool so any freed memory can't be used by another |
| 1025 | * thread. This is to ensure the original mapping can be restored if the |
| 1026 | * clear fails. |
| 1027 | */ |
| 1028 | mpool_init_with_fallback(&local_page_pool, page_pool); |
| 1029 | |
| 1030 | /* |
| 1031 | * First reserve all required memory for the new page table entries in |
| 1032 | * the recipient page tables without committing, to make sure the entire |
| 1033 | * operation will succeed without exhausting the page pool. |
| 1034 | */ |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1035 | if (!ffa_region_group_identity_map( |
| 1036 | to_locked, fragments, fragment_constituent_counts, |
| 1037 | fragment_count, to_mode, page_pool, false)) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1038 | /* TODO: partial defrag of failed range. */ |
| 1039 | dlog_verbose( |
| 1040 | "Insufficient memory to update recipient page " |
| 1041 | "table.\n"); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1042 | ret = ffa_error(FFA_NO_MEMORY); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1043 | goto out; |
| 1044 | } |
| 1045 | |
| 1046 | /* Clear the memory so no VM or device can see the previous contents. */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1047 | if (clear && !ffa_clear_memory_constituents( |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1048 | fragments, fragment_constituent_counts, |
| 1049 | fragment_count, page_pool)) { |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1050 | ret = ffa_error(FFA_NO_MEMORY); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1051 | goto out; |
| 1052 | } |
| 1053 | |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 1054 | /* |
| 1055 | * Complete the transfer by mapping the memory into the recipient. This |
| 1056 | * won't allocate because the transaction was already prepared above, so |
| 1057 | * it doesn't need to use the `local_page_pool`. |
| 1058 | */ |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1059 | CHECK(ffa_region_group_identity_map( |
| 1060 | to_locked, fragments, fragment_constituent_counts, |
| 1061 | fragment_count, to_mode, page_pool, true)); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 1062 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1063 | ret = (struct ffa_value){.func = FFA_SUCCESS_32}; |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 1064 | |
| 1065 | out: |
| 1066 | mpool_fini(&local_page_pool); |
| 1067 | |
| 1068 | /* |
Andrew Walbran | f07f04d | 2020-05-01 18:09:00 +0100 | [diff] [blame] | 1069 | * Tidy up the page table by reclaiming failed mappings (if there was an |
| 1070 | * error) or merging entries into blocks where possible (on success). |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 1071 | */ |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 1072 | mm_vm_defrag(&to->ptable, page_pool); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1073 | |
| 1074 | return ret; |
| 1075 | } |
| 1076 | |
Andrew Walbran | 290b0c9 | 2020-02-03 16:37:14 +0000 | [diff] [blame] | 1077 | /** |
| 1078 | * Reclaims the given memory from the TEE. To do this space is first reserved in |
| 1079 | * the <to> VM's page table, then the reclaim request is sent on to the TEE, |
| 1080 | * then (if that is successful) the memory is mapped back into the <to> VM's |
| 1081 | * page table. |
| 1082 | * |
| 1083 | * This function requires the calling context to hold the <to> lock. |
| 1084 | * |
| 1085 | * Returns: |
| 1086 | * In case of error, one of the following values is returned: |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1087 | * 1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were |
Andrew Walbran | 290b0c9 | 2020-02-03 16:37:14 +0000 | [diff] [blame] | 1088 | * erroneous; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1089 | * 2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete |
Andrew Walbran | 290b0c9 | 2020-02-03 16:37:14 +0000 | [diff] [blame] | 1090 | * the request. |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1091 | * Success is indicated by FFA_SUCCESS. |
Andrew Walbran | 290b0c9 | 2020-02-03 16:37:14 +0000 | [diff] [blame] | 1092 | */ |
Andrew Walbran | 996d1d1 | 2020-05-27 14:08:43 +0100 | [diff] [blame] | 1093 | static struct ffa_value ffa_tee_reclaim_check_update( |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1094 | struct vm_locked to_locked, ffa_memory_handle_t handle, |
| 1095 | struct ffa_memory_region_constituent *constituents, |
Andrew Walbran | 290b0c9 | 2020-02-03 16:37:14 +0000 | [diff] [blame] | 1096 | uint32_t constituent_count, uint32_t memory_to_attributes, bool clear, |
| 1097 | struct mpool *page_pool) |
| 1098 | { |
| 1099 | struct vm *to = to_locked.vm; |
| 1100 | uint32_t to_mode; |
| 1101 | struct mpool local_page_pool; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1102 | struct ffa_value ret; |
| 1103 | ffa_memory_region_flags_t tee_flags; |
Andrew Walbran | 290b0c9 | 2020-02-03 16:37:14 +0000 | [diff] [blame] | 1104 | |
| 1105 | /* |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1106 | * Make sure constituents are properly aligned to a 64-bit boundary. If |
| 1107 | * not we would get alignment faults trying to read (64-bit) values. |
Andrew Walbran | 290b0c9 | 2020-02-03 16:37:14 +0000 | [diff] [blame] | 1108 | */ |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1109 | if (!is_aligned(constituents, 8)) { |
Andrew Walbran | 290b0c9 | 2020-02-03 16:37:14 +0000 | [diff] [blame] | 1110 | dlog_verbose("Constituents not aligned.\n"); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1111 | return ffa_error(FFA_INVALID_PARAMETERS); |
Andrew Walbran | 290b0c9 | 2020-02-03 16:37:14 +0000 | [diff] [blame] | 1112 | } |
| 1113 | |
| 1114 | /* |
| 1115 | * Check if the state transition is lawful for the recipient, and ensure |
| 1116 | * that all constituents of the memory region being retrieved are at the |
| 1117 | * same state. |
| 1118 | */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1119 | ret = ffa_retrieve_check_transition(to_locked, FFA_MEM_RECLAIM_32, |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1120 | &constituents, &constituent_count, |
| 1121 | 1, memory_to_attributes, &to_mode); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1122 | if (ret.func != FFA_SUCCESS_32) { |
Andrew Walbran | 290b0c9 | 2020-02-03 16:37:14 +0000 | [diff] [blame] | 1123 | dlog_verbose("Invalid transition.\n"); |
| 1124 | return ret; |
| 1125 | } |
| 1126 | |
| 1127 | /* |
| 1128 | * Create a local pool so any freed memory can't be used by another |
| 1129 | * thread. This is to ensure the original mapping can be restored if the |
| 1130 | * clear fails. |
| 1131 | */ |
| 1132 | mpool_init_with_fallback(&local_page_pool, page_pool); |
| 1133 | |
| 1134 | /* |
| 1135 | * First reserve all required memory for the new page table entries in |
| 1136 | * the recipient page tables without committing, to make sure the entire |
| 1137 | * operation will succeed without exhausting the page pool. |
| 1138 | */ |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1139 | if (!ffa_region_group_identity_map(to_locked, &constituents, |
| 1140 | &constituent_count, 1, to_mode, |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1141 | page_pool, false)) { |
Andrew Walbran | 290b0c9 | 2020-02-03 16:37:14 +0000 | [diff] [blame] | 1142 | /* TODO: partial defrag of failed range. */ |
| 1143 | dlog_verbose( |
| 1144 | "Insufficient memory to update recipient page " |
| 1145 | "table.\n"); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1146 | ret = ffa_error(FFA_NO_MEMORY); |
Andrew Walbran | 290b0c9 | 2020-02-03 16:37:14 +0000 | [diff] [blame] | 1147 | goto out; |
| 1148 | } |
| 1149 | |
| 1150 | /* |
| 1151 | * Forward the request to the TEE and see what happens. |
| 1152 | */ |
| 1153 | tee_flags = 0; |
| 1154 | if (clear) { |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1155 | tee_flags |= FFA_MEMORY_REGION_FLAG_CLEAR; |
Andrew Walbran | 290b0c9 | 2020-02-03 16:37:14 +0000 | [diff] [blame] | 1156 | } |
Olivier Deprez | 112d2b5 | 2020-09-30 07:39:23 +0200 | [diff] [blame] | 1157 | ret = arch_other_world_call( |
| 1158 | (struct ffa_value){.func = FFA_MEM_RECLAIM_32, |
| 1159 | .arg1 = (uint32_t)handle, |
| 1160 | .arg2 = (uint32_t)(handle >> 32), |
| 1161 | .arg3 = tee_flags}); |
Andrew Walbran | 290b0c9 | 2020-02-03 16:37:14 +0000 | [diff] [blame] | 1162 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1163 | if (ret.func != FFA_SUCCESS_32) { |
Andrew Walbran | 290b0c9 | 2020-02-03 16:37:14 +0000 | [diff] [blame] | 1164 | dlog_verbose( |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1165 | "Got %#x (%d) from TEE in response to FFA_MEM_RECLAIM, " |
| 1166 | "expected FFA_SUCCESS.\n", |
Andrew Walbran | 290b0c9 | 2020-02-03 16:37:14 +0000 | [diff] [blame] | 1167 | ret.func, ret.arg2); |
| 1168 | goto out; |
| 1169 | } |
| 1170 | |
| 1171 | /* |
| 1172 | * The TEE was happy with it, so complete the reclaim by mapping the |
| 1173 | * memory into the recipient. This won't allocate because the |
| 1174 | * transaction was already prepared above, so it doesn't need to use the |
| 1175 | * `local_page_pool`. |
| 1176 | */ |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1177 | CHECK(ffa_region_group_identity_map(to_locked, &constituents, |
| 1178 | &constituent_count, 1, to_mode, |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1179 | page_pool, true)); |
Andrew Walbran | 290b0c9 | 2020-02-03 16:37:14 +0000 | [diff] [blame] | 1180 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1181 | ret = (struct ffa_value){.func = FFA_SUCCESS_32}; |
Andrew Walbran | 290b0c9 | 2020-02-03 16:37:14 +0000 | [diff] [blame] | 1182 | |
| 1183 | out: |
| 1184 | mpool_fini(&local_page_pool); |
| 1185 | |
| 1186 | /* |
Andrew Walbran | f07f04d | 2020-05-01 18:09:00 +0100 | [diff] [blame] | 1187 | * Tidy up the page table by reclaiming failed mappings (if there was an |
| 1188 | * error) or merging entries into blocks where possible (on success). |
Andrew Walbran | 290b0c9 | 2020-02-03 16:37:14 +0000 | [diff] [blame] | 1189 | */ |
| 1190 | mm_vm_defrag(&to->ptable, page_pool); |
| 1191 | |
| 1192 | return ret; |
| 1193 | } |
| 1194 | |
Andrew Walbran | 996d1d1 | 2020-05-27 14:08:43 +0100 | [diff] [blame] | 1195 | static struct ffa_value ffa_relinquish_check_update( |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1196 | struct vm_locked from_locked, |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1197 | struct ffa_memory_region_constituent **fragments, |
| 1198 | uint32_t *fragment_constituent_counts, uint32_t fragment_count, |
| 1199 | struct mpool *page_pool, bool clear) |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1200 | { |
| 1201 | uint32_t orig_from_mode; |
| 1202 | uint32_t from_mode; |
| 1203 | struct mpool local_page_pool; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1204 | struct ffa_value ret; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1205 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1206 | ret = ffa_relinquish_check_transition( |
| 1207 | from_locked, &orig_from_mode, fragments, |
| 1208 | fragment_constituent_counts, fragment_count, &from_mode); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1209 | if (ret.func != FFA_SUCCESS_32) { |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1210 | dlog_verbose("Invalid transition for relinquish.\n"); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1211 | return ret; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1212 | } |
| 1213 | |
| 1214 | /* |
| 1215 | * Create a local pool so any freed memory can't be used by another |
| 1216 | * thread. This is to ensure the original mapping can be restored if the |
| 1217 | * clear fails. |
| 1218 | */ |
| 1219 | mpool_init_with_fallback(&local_page_pool, page_pool); |
| 1220 | |
| 1221 | /* |
| 1222 | * First reserve all required memory for the new page table entries |
| 1223 | * without committing, to make sure the entire operation will succeed |
| 1224 | * without exhausting the page pool. |
| 1225 | */ |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1226 | if (!ffa_region_group_identity_map( |
| 1227 | from_locked, fragments, fragment_constituent_counts, |
| 1228 | fragment_count, from_mode, page_pool, false)) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1229 | /* TODO: partial defrag of failed range. */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1230 | ret = ffa_error(FFA_NO_MEMORY); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1231 | goto out; |
| 1232 | } |
| 1233 | |
| 1234 | /* |
| 1235 | * Update the mapping for the sender. This won't allocate because the |
| 1236 | * transaction was already prepared above, but may free pages in the |
| 1237 | * case that a whole block is being unmapped that was previously |
| 1238 | * partially mapped. |
| 1239 | */ |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1240 | CHECK(ffa_region_group_identity_map( |
| 1241 | from_locked, fragments, fragment_constituent_counts, |
| 1242 | fragment_count, from_mode, &local_page_pool, true)); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1243 | |
| 1244 | /* Clear the memory so no VM or device can see the previous contents. */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1245 | if (clear && !ffa_clear_memory_constituents( |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1246 | fragments, fragment_constituent_counts, |
| 1247 | fragment_count, page_pool)) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1248 | /* |
| 1249 | * On failure, roll back by returning memory to the sender. This |
| 1250 | * may allocate pages which were previously freed into |
| 1251 | * `local_page_pool` by the call above, but will never allocate |
| 1252 | * more pages than that so can never fail. |
| 1253 | */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1254 | CHECK(ffa_region_group_identity_map( |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1255 | from_locked, fragments, fragment_constituent_counts, |
| 1256 | fragment_count, orig_from_mode, &local_page_pool, |
| 1257 | true)); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1258 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1259 | ret = ffa_error(FFA_NO_MEMORY); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1260 | goto out; |
| 1261 | } |
| 1262 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1263 | ret = (struct ffa_value){.func = FFA_SUCCESS_32}; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1264 | |
| 1265 | out: |
| 1266 | mpool_fini(&local_page_pool); |
| 1267 | |
| 1268 | /* |
| 1269 | * Tidy up the page table by reclaiming failed mappings (if there was an |
| 1270 | * error) or merging entries into blocks where possible (on success). |
| 1271 | */ |
| 1272 | mm_vm_defrag(&from_locked.vm->ptable, page_pool); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 1273 | |
| 1274 | return ret; |
| 1275 | } |
| 1276 | |
| 1277 | /** |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1278 | * Complete a memory sending operation by checking that it is valid, updating |
| 1279 | * the sender page table, and then either marking the share state as having |
| 1280 | * completed sending (on success) or freeing it (on failure). |
| 1281 | * |
| 1282 | * Returns FFA_SUCCESS with the handle encoded, or the relevant FFA_ERROR. |
| 1283 | */ |
| 1284 | static struct ffa_value ffa_memory_send_complete( |
| 1285 | struct vm_locked from_locked, struct share_states_locked share_states, |
Andrew Walbran | 37c574e | 2020-06-03 11:45:46 +0100 | [diff] [blame] | 1286 | struct ffa_memory_share_state *share_state, struct mpool *page_pool, |
| 1287 | uint32_t *orig_from_mode_ret) |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1288 | { |
| 1289 | struct ffa_memory_region *memory_region = share_state->memory_region; |
| 1290 | struct ffa_value ret; |
| 1291 | |
| 1292 | /* Lock must be held. */ |
| 1293 | CHECK(share_states.share_states != NULL); |
| 1294 | |
| 1295 | /* Check that state is valid in sender page table and update. */ |
| 1296 | ret = ffa_send_check_update( |
| 1297 | from_locked, share_state->fragments, |
| 1298 | share_state->fragment_constituent_counts, |
| 1299 | share_state->fragment_count, share_state->share_func, |
| 1300 | memory_region->receivers[0].receiver_permissions.permissions, |
Andrew Walbran | 37c574e | 2020-06-03 11:45:46 +0100 | [diff] [blame] | 1301 | page_pool, memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR, |
| 1302 | orig_from_mode_ret); |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1303 | if (ret.func != FFA_SUCCESS_32) { |
| 1304 | /* |
| 1305 | * Free share state, it failed to send so it can't be retrieved. |
| 1306 | */ |
| 1307 | dlog_verbose("Complete failed, freeing share state.\n"); |
| 1308 | share_state_free(share_states, share_state, page_pool); |
| 1309 | return ret; |
| 1310 | } |
| 1311 | |
| 1312 | share_state->sending_complete = true; |
| 1313 | dlog_verbose("Marked sending complete.\n"); |
| 1314 | |
| 1315 | return ffa_mem_success(share_state->handle); |
| 1316 | } |
| 1317 | |
| 1318 | /** |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1319 | * Check that the given `memory_region` represents a valid memory send request |
| 1320 | * of the given `share_func` type, return the clear flag and permissions via the |
| 1321 | * respective output parameters, and update the permissions if necessary. |
Andrew Walbran | 1a86aa9 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1322 | * |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1323 | * Returns FFA_SUCCESS if the request was valid, or the relevant FFA_ERROR if |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1324 | * not. |
| 1325 | */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1326 | static struct ffa_value ffa_memory_send_validate( |
Andrew Walbran | 1a86aa9 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1327 | struct vm_locked from_locked, struct ffa_memory_region *memory_region, |
| 1328 | uint32_t memory_share_length, uint32_t fragment_length, |
| 1329 | uint32_t share_func, ffa_memory_access_permissions_t *permissions) |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1330 | { |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1331 | struct ffa_composite_memory_region *composite; |
Andrew Walbran | 130a8ae | 2020-05-15 16:27:15 +0100 | [diff] [blame] | 1332 | uint32_t receivers_length; |
Andrew Walbran | 352aa3d | 2020-05-01 17:51:33 +0100 | [diff] [blame] | 1333 | uint32_t constituents_offset; |
Andrew Walbran | 130a8ae | 2020-05-15 16:27:15 +0100 | [diff] [blame] | 1334 | uint32_t constituents_length; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1335 | enum ffa_data_access data_access; |
| 1336 | enum ffa_instruction_access instruction_access; |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1337 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1338 | CHECK(permissions != NULL); |
| 1339 | |
Andrew Walbran | 1a86aa9 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1340 | /* |
| 1341 | * This should already be checked by the caller, just making the |
| 1342 | * assumption clear here. |
| 1343 | */ |
| 1344 | CHECK(memory_region->receiver_count == 1); |
| 1345 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1346 | /* The sender must match the message sender. */ |
| 1347 | if (memory_region->sender != from_locked.vm->id) { |
| 1348 | dlog_verbose("Invalid sender %d.\n", memory_region->sender); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1349 | return ffa_error(FFA_INVALID_PARAMETERS); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1350 | } |
| 1351 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1352 | /* |
| 1353 | * Ensure that the composite header is within the memory bounds and |
| 1354 | * doesn't overlap the first part of the message. |
| 1355 | */ |
Andrew Walbran | 130a8ae | 2020-05-15 16:27:15 +0100 | [diff] [blame] | 1356 | receivers_length = sizeof(struct ffa_memory_access) * |
| 1357 | memory_region->receiver_count; |
Andrew Walbran | 352aa3d | 2020-05-01 17:51:33 +0100 | [diff] [blame] | 1358 | constituents_offset = |
| 1359 | ffa_composite_constituent_offset(memory_region, 0); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1360 | if (memory_region->receivers[0].composite_memory_region_offset < |
Andrew Walbran | 130a8ae | 2020-05-15 16:27:15 +0100 | [diff] [blame] | 1361 | sizeof(struct ffa_memory_region) + receivers_length || |
Andrew Walbran | 1a86aa9 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1362 | constituents_offset > fragment_length) { |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1363 | dlog_verbose( |
Andrew Walbran | 352aa3d | 2020-05-01 17:51:33 +0100 | [diff] [blame] | 1364 | "Invalid composite memory region descriptor offset " |
| 1365 | "%d.\n", |
| 1366 | memory_region->receivers[0] |
| 1367 | .composite_memory_region_offset); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1368 | return ffa_error(FFA_INVALID_PARAMETERS); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1369 | } |
| 1370 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1371 | composite = ffa_memory_region_get_composite(memory_region, 0); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1372 | |
| 1373 | /* |
Andrew Walbran | f07f04d | 2020-05-01 18:09:00 +0100 | [diff] [blame] | 1374 | * Ensure the number of constituents are within the memory bounds. |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1375 | */ |
Andrew Walbran | 130a8ae | 2020-05-15 16:27:15 +0100 | [diff] [blame] | 1376 | constituents_length = sizeof(struct ffa_memory_region_constituent) * |
| 1377 | composite->constituent_count; |
Andrew Walbran | 352aa3d | 2020-05-01 17:51:33 +0100 | [diff] [blame] | 1378 | if (memory_share_length != constituents_offset + constituents_length) { |
| 1379 | dlog_verbose("Invalid length %d or composite offset %d.\n", |
Andrew Walbran | 130a8ae | 2020-05-15 16:27:15 +0100 | [diff] [blame] | 1380 | memory_share_length, |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1381 | memory_region->receivers[0] |
| 1382 | .composite_memory_region_offset); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1383 | return ffa_error(FFA_INVALID_PARAMETERS); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1384 | } |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1385 | if (fragment_length < memory_share_length && |
| 1386 | fragment_length < HF_MAILBOX_SIZE) { |
| 1387 | dlog_warning( |
| 1388 | "Initial fragment length %d smaller than mailbox " |
| 1389 | "size.\n", |
| 1390 | fragment_length); |
| 1391 | } |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1392 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1393 | /* |
| 1394 | * Clear is not allowed for memory sharing, as the sender still has |
| 1395 | * access to the memory. |
| 1396 | */ |
Andrew Walbran | 1a86aa9 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1397 | if ((memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR) && |
| 1398 | share_func == FFA_MEM_SHARE_32) { |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1399 | dlog_verbose("Memory can't be cleared while being shared.\n"); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1400 | return ffa_error(FFA_INVALID_PARAMETERS); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1401 | } |
| 1402 | |
| 1403 | /* No other flags are allowed/supported here. */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1404 | if (memory_region->flags & ~FFA_MEMORY_REGION_FLAG_CLEAR) { |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1405 | dlog_verbose("Invalid flags %#x.\n", memory_region->flags); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1406 | return ffa_error(FFA_INVALID_PARAMETERS); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1407 | } |
| 1408 | |
| 1409 | /* Check that the permissions are valid. */ |
| 1410 | *permissions = |
| 1411 | memory_region->receivers[0].receiver_permissions.permissions; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1412 | data_access = ffa_get_data_access_attr(*permissions); |
| 1413 | instruction_access = ffa_get_instruction_access_attr(*permissions); |
| 1414 | if (data_access == FFA_DATA_ACCESS_RESERVED || |
| 1415 | instruction_access == FFA_INSTRUCTION_ACCESS_RESERVED) { |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1416 | dlog_verbose("Reserved value for receiver permissions %#x.\n", |
| 1417 | *permissions); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1418 | return ffa_error(FFA_INVALID_PARAMETERS); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1419 | } |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1420 | if (instruction_access != FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED) { |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1421 | dlog_verbose( |
| 1422 | "Invalid instruction access permissions %#x for " |
| 1423 | "sending memory.\n", |
| 1424 | *permissions); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1425 | return ffa_error(FFA_INVALID_PARAMETERS); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1426 | } |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1427 | if (share_func == FFA_MEM_SHARE_32) { |
| 1428 | if (data_access == FFA_DATA_ACCESS_NOT_SPECIFIED) { |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1429 | dlog_verbose( |
| 1430 | "Invalid data access permissions %#x for " |
| 1431 | "sharing memory.\n", |
| 1432 | *permissions); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1433 | return ffa_error(FFA_INVALID_PARAMETERS); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1434 | } |
| 1435 | /* |
Andrew Walbran | dd8248f | 2020-06-22 13:39:30 +0100 | [diff] [blame] | 1436 | * According to section 5.11.3 of the FF-A 1.0 spec NX is |
| 1437 | * required for share operations (but must not be specified by |
| 1438 | * the sender) so set it in the copy that we store, ready to be |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1439 | * returned to the retriever. |
| 1440 | */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1441 | ffa_set_instruction_access_attr(permissions, |
| 1442 | FFA_INSTRUCTION_ACCESS_NX); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1443 | memory_region->receivers[0].receiver_permissions.permissions = |
| 1444 | *permissions; |
| 1445 | } |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1446 | if (share_func == FFA_MEM_LEND_32 && |
| 1447 | data_access == FFA_DATA_ACCESS_NOT_SPECIFIED) { |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1448 | dlog_verbose( |
| 1449 | "Invalid data access permissions %#x for lending " |
| 1450 | "memory.\n", |
| 1451 | *permissions); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1452 | return ffa_error(FFA_INVALID_PARAMETERS); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1453 | } |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1454 | if (share_func == FFA_MEM_DONATE_32 && |
| 1455 | data_access != FFA_DATA_ACCESS_NOT_SPECIFIED) { |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1456 | dlog_verbose( |
| 1457 | "Invalid data access permissions %#x for donating " |
| 1458 | "memory.\n", |
| 1459 | *permissions); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1460 | return ffa_error(FFA_INVALID_PARAMETERS); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1461 | } |
| 1462 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1463 | return (struct ffa_value){.func = FFA_SUCCESS_32}; |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1464 | } |
| 1465 | |
Andrew Walbran | 1a86aa9 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1466 | /** Forwards a memory send message on to the TEE. */ |
| 1467 | static struct ffa_value memory_send_tee_forward( |
| 1468 | struct vm_locked tee_locked, ffa_vm_id_t sender_vm_id, |
| 1469 | uint32_t share_func, struct ffa_memory_region *memory_region, |
| 1470 | uint32_t memory_share_length, uint32_t fragment_length) |
| 1471 | { |
| 1472 | struct ffa_value ret; |
| 1473 | |
| 1474 | memcpy_s(tee_locked.vm->mailbox.recv, FFA_MSG_PAYLOAD_MAX, |
| 1475 | memory_region, fragment_length); |
| 1476 | tee_locked.vm->mailbox.recv_size = fragment_length; |
| 1477 | tee_locked.vm->mailbox.recv_sender = sender_vm_id; |
| 1478 | tee_locked.vm->mailbox.recv_func = share_func; |
| 1479 | tee_locked.vm->mailbox.state = MAILBOX_STATE_RECEIVED; |
Olivier Deprez | 112d2b5 | 2020-09-30 07:39:23 +0200 | [diff] [blame] | 1480 | ret = arch_other_world_call( |
| 1481 | (struct ffa_value){.func = share_func, |
| 1482 | .arg1 = memory_share_length, |
| 1483 | .arg2 = fragment_length}); |
Andrew Walbran | 1a86aa9 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1484 | /* |
| 1485 | * After the call to the TEE completes it must have finished reading its |
| 1486 | * RX buffer, so it is ready for another message. |
| 1487 | */ |
| 1488 | tee_locked.vm->mailbox.state = MAILBOX_STATE_EMPTY; |
| 1489 | |
| 1490 | return ret; |
| 1491 | } |
| 1492 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1493 | /** |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1494 | * Gets the share state for continuing an operation to donate, lend or share |
| 1495 | * memory, and checks that it is a valid request. |
| 1496 | * |
| 1497 | * Returns FFA_SUCCESS if the request was valid, or the relevant FFA_ERROR if |
| 1498 | * not. |
| 1499 | */ |
| 1500 | static struct ffa_value ffa_memory_send_continue_validate( |
| 1501 | struct share_states_locked share_states, ffa_memory_handle_t handle, |
| 1502 | struct ffa_memory_share_state **share_state_ret, ffa_vm_id_t from_vm_id, |
| 1503 | struct mpool *page_pool) |
| 1504 | { |
| 1505 | struct ffa_memory_share_state *share_state; |
| 1506 | struct ffa_memory_region *memory_region; |
| 1507 | |
| 1508 | CHECK(share_state_ret != NULL); |
| 1509 | |
| 1510 | /* |
| 1511 | * Look up the share state by handle and make sure that the VM ID |
| 1512 | * matches. |
| 1513 | */ |
| 1514 | if (!get_share_state(share_states, handle, &share_state)) { |
| 1515 | dlog_verbose( |
| 1516 | "Invalid handle %#x for memory send continuation.\n", |
| 1517 | handle); |
| 1518 | return ffa_error(FFA_INVALID_PARAMETERS); |
| 1519 | } |
| 1520 | memory_region = share_state->memory_region; |
| 1521 | |
| 1522 | if (memory_region->sender != from_vm_id) { |
| 1523 | dlog_verbose("Invalid sender %d.\n", memory_region->sender); |
| 1524 | return ffa_error(FFA_INVALID_PARAMETERS); |
| 1525 | } |
| 1526 | |
| 1527 | if (share_state->sending_complete) { |
| 1528 | dlog_verbose( |
| 1529 | "Sending of memory handle %#x is already complete.\n", |
| 1530 | handle); |
| 1531 | return ffa_error(FFA_INVALID_PARAMETERS); |
| 1532 | } |
| 1533 | |
| 1534 | if (share_state->fragment_count == MAX_FRAGMENTS) { |
| 1535 | /* |
| 1536 | * Log a warning as this is a sign that MAX_FRAGMENTS should |
| 1537 | * probably be increased. |
| 1538 | */ |
| 1539 | dlog_warning( |
| 1540 | "Too many fragments for memory share with handle %#x; " |
| 1541 | "only %d supported.\n", |
| 1542 | handle, MAX_FRAGMENTS); |
| 1543 | /* Free share state, as it's not possible to complete it. */ |
| 1544 | share_state_free(share_states, share_state, page_pool); |
| 1545 | return ffa_error(FFA_NO_MEMORY); |
| 1546 | } |
| 1547 | |
| 1548 | *share_state_ret = share_state; |
| 1549 | |
| 1550 | return (struct ffa_value){.func = FFA_SUCCESS_32}; |
| 1551 | } |
| 1552 | |
| 1553 | /** |
| 1554 | * Forwards a memory send continuation message on to the TEE. |
| 1555 | */ |
| 1556 | static struct ffa_value memory_send_continue_tee_forward( |
| 1557 | struct vm_locked tee_locked, ffa_vm_id_t sender_vm_id, void *fragment, |
| 1558 | uint32_t fragment_length, ffa_memory_handle_t handle) |
| 1559 | { |
| 1560 | struct ffa_value ret; |
| 1561 | |
| 1562 | memcpy_s(tee_locked.vm->mailbox.recv, FFA_MSG_PAYLOAD_MAX, fragment, |
| 1563 | fragment_length); |
| 1564 | tee_locked.vm->mailbox.recv_size = fragment_length; |
| 1565 | tee_locked.vm->mailbox.recv_sender = sender_vm_id; |
| 1566 | tee_locked.vm->mailbox.recv_func = FFA_MEM_FRAG_TX_32; |
| 1567 | tee_locked.vm->mailbox.state = MAILBOX_STATE_RECEIVED; |
Olivier Deprez | 112d2b5 | 2020-09-30 07:39:23 +0200 | [diff] [blame] | 1568 | ret = arch_other_world_call( |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1569 | (struct ffa_value){.func = FFA_MEM_FRAG_TX_32, |
| 1570 | .arg1 = (uint32_t)handle, |
| 1571 | .arg2 = (uint32_t)(handle >> 32), |
| 1572 | .arg3 = fragment_length, |
| 1573 | .arg4 = (uint64_t)sender_vm_id << 16}); |
| 1574 | /* |
| 1575 | * After the call to the TEE completes it must have finished reading its |
| 1576 | * RX buffer, so it is ready for another message. |
| 1577 | */ |
| 1578 | tee_locked.vm->mailbox.state = MAILBOX_STATE_EMPTY; |
| 1579 | |
| 1580 | return ret; |
| 1581 | } |
| 1582 | |
| 1583 | /** |
Andrew Walbran | 1a86aa9 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1584 | * Validates a call to donate, lend or share memory to a non-TEE VM and then |
| 1585 | * updates the stage-2 page tables. Specifically, check if the message length |
| 1586 | * and number of memory region constituents match, and if the transition is |
| 1587 | * valid for the type of memory sending operation. |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 1588 | * |
Andrew Walbran | 1a86aa9 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1589 | * Assumes that the caller has already found and locked the sender VM and copied |
| 1590 | * the memory region descriptor from the sender's TX buffer to a freshly |
| 1591 | * allocated page from Hafnium's internal pool. The caller must have also |
| 1592 | * validated that the receiver VM ID is valid. |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1593 | * |
Andrew Walbran | 1a86aa9 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1594 | * This function takes ownership of the `memory_region` passed in and will free |
| 1595 | * it when necessary; it must not be freed by the caller. |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 1596 | */ |
Andrew Walbran | 1a86aa9 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1597 | struct ffa_value ffa_memory_send(struct vm_locked from_locked, |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1598 | struct ffa_memory_region *memory_region, |
Andrew Walbran | 130a8ae | 2020-05-15 16:27:15 +0100 | [diff] [blame] | 1599 | uint32_t memory_share_length, |
Andrew Walbran | 1a86aa9 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1600 | uint32_t fragment_length, uint32_t share_func, |
| 1601 | struct mpool *page_pool) |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 1602 | { |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1603 | ffa_memory_access_permissions_t permissions; |
| 1604 | struct ffa_value ret; |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1605 | struct share_states_locked share_states; |
| 1606 | struct ffa_memory_share_state *share_state; |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 1607 | |
| 1608 | /* |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1609 | * If there is an error validating the `memory_region` then we need to |
| 1610 | * free it because we own it but we won't be storing it in a share state |
| 1611 | * after all. |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 1612 | */ |
Andrew Walbran | 1a86aa9 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1613 | ret = ffa_memory_send_validate(from_locked, memory_region, |
| 1614 | memory_share_length, fragment_length, |
| 1615 | share_func, &permissions); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1616 | if (ret.func != FFA_SUCCESS_32) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1617 | mpool_free(page_pool, memory_region); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1618 | return ret; |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 1619 | } |
| 1620 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1621 | /* Set flag for share function, ready to be retrieved later. */ |
| 1622 | switch (share_func) { |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1623 | case FFA_MEM_SHARE_32: |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1624 | memory_region->flags |= |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1625 | FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE; |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1626 | break; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1627 | case FFA_MEM_LEND_32: |
| 1628 | memory_region->flags |= FFA_MEMORY_REGION_TRANSACTION_TYPE_LEND; |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1629 | break; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1630 | case FFA_MEM_DONATE_32: |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1631 | memory_region->flags |= |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1632 | FFA_MEMORY_REGION_TRANSACTION_TYPE_DONATE; |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 1633 | break; |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 1634 | } |
| 1635 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1636 | share_states = share_states_lock(); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1637 | /* |
| 1638 | * Allocate a share state before updating the page table. Otherwise if |
| 1639 | * updating the page table succeeded but allocating the share state |
| 1640 | * failed then it would leave the memory in a state where nobody could |
| 1641 | * get it back. |
| 1642 | */ |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1643 | if (!allocate_share_state(share_states, share_func, memory_region, |
| 1644 | fragment_length, FFA_MEMORY_HANDLE_INVALID, |
| 1645 | &share_state)) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1646 | dlog_verbose("Failed to allocate share state.\n"); |
| 1647 | mpool_free(page_pool, memory_region); |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1648 | ret = ffa_error(FFA_NO_MEMORY); |
| 1649 | goto out; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1650 | } |
| 1651 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1652 | if (fragment_length == memory_share_length) { |
| 1653 | /* No more fragments to come, everything fit in one message. */ |
J-Alves | 2a0d288 | 2020-10-29 14:49:50 +0000 | [diff] [blame] | 1654 | ret = ffa_memory_send_complete( |
| 1655 | from_locked, share_states, share_state, page_pool, |
| 1656 | &(share_state->sender_orig_mode)); |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1657 | } else { |
| 1658 | ret = (struct ffa_value){ |
| 1659 | .func = FFA_MEM_FRAG_RX_32, |
| 1660 | .arg1 = (uint32_t)share_state->handle, |
| 1661 | .arg2 = (uint32_t)(share_state->handle >> 32), |
| 1662 | .arg3 = fragment_length}; |
| 1663 | } |
| 1664 | |
| 1665 | out: |
| 1666 | share_states_unlock(&share_states); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1667 | dump_share_states(); |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1668 | return ret; |
Andrew Walbran | 1a86aa9 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1669 | } |
| 1670 | |
| 1671 | /** |
| 1672 | * Validates a call to donate, lend or share memory to the TEE and then updates |
| 1673 | * the stage-2 page tables. Specifically, check if the message length and number |
| 1674 | * of memory region constituents match, and if the transition is valid for the |
| 1675 | * type of memory sending operation. |
| 1676 | * |
| 1677 | * Assumes that the caller has already found and locked the sender VM and the |
| 1678 | * TEE VM, and copied the memory region descriptor from the sender's TX buffer |
| 1679 | * to a freshly allocated page from Hafnium's internal pool. The caller must |
| 1680 | * have also validated that the receiver VM ID is valid. |
| 1681 | * |
| 1682 | * This function takes ownership of the `memory_region` passed in and will free |
| 1683 | * it when necessary; it must not be freed by the caller. |
| 1684 | */ |
| 1685 | struct ffa_value ffa_memory_tee_send( |
| 1686 | struct vm_locked from_locked, struct vm_locked to_locked, |
| 1687 | struct ffa_memory_region *memory_region, uint32_t memory_share_length, |
| 1688 | uint32_t fragment_length, uint32_t share_func, struct mpool *page_pool) |
| 1689 | { |
| 1690 | ffa_memory_access_permissions_t permissions; |
| 1691 | struct ffa_value ret; |
Andrew Walbran | 1a86aa9 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1692 | |
| 1693 | /* |
| 1694 | * If there is an error validating the `memory_region` then we need to |
| 1695 | * free it because we own it but we won't be storing it in a share state |
| 1696 | * after all. |
| 1697 | */ |
| 1698 | ret = ffa_memory_send_validate(from_locked, memory_region, |
| 1699 | memory_share_length, fragment_length, |
| 1700 | share_func, &permissions); |
| 1701 | if (ret.func != FFA_SUCCESS_32) { |
| 1702 | goto out; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1703 | } |
| 1704 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1705 | if (fragment_length == memory_share_length) { |
| 1706 | /* No more fragments to come, everything fit in one message. */ |
| 1707 | struct ffa_composite_memory_region *composite = |
| 1708 | ffa_memory_region_get_composite(memory_region, 0); |
| 1709 | struct ffa_memory_region_constituent *constituents = |
| 1710 | composite->constituents; |
Andrew Walbran | 37c574e | 2020-06-03 11:45:46 +0100 | [diff] [blame] | 1711 | struct mpool local_page_pool; |
| 1712 | uint32_t orig_from_mode; |
| 1713 | |
| 1714 | /* |
| 1715 | * Use a local page pool so that we can roll back if necessary. |
| 1716 | */ |
| 1717 | mpool_init_with_fallback(&local_page_pool, page_pool); |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1718 | |
| 1719 | ret = ffa_send_check_update( |
| 1720 | from_locked, &constituents, |
| 1721 | &composite->constituent_count, 1, share_func, |
Andrew Walbran | 37c574e | 2020-06-03 11:45:46 +0100 | [diff] [blame] | 1722 | permissions, &local_page_pool, |
| 1723 | memory_region->flags & FFA_MEMORY_REGION_FLAG_CLEAR, |
| 1724 | &orig_from_mode); |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1725 | if (ret.func != FFA_SUCCESS_32) { |
Andrew Walbran | 37c574e | 2020-06-03 11:45:46 +0100 | [diff] [blame] | 1726 | mpool_fini(&local_page_pool); |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1727 | goto out; |
| 1728 | } |
| 1729 | |
| 1730 | /* Forward memory send message on to TEE. */ |
| 1731 | ret = memory_send_tee_forward( |
| 1732 | to_locked, from_locked.vm->id, share_func, |
| 1733 | memory_region, memory_share_length, fragment_length); |
Andrew Walbran | 37c574e | 2020-06-03 11:45:46 +0100 | [diff] [blame] | 1734 | |
| 1735 | if (ret.func != FFA_SUCCESS_32) { |
| 1736 | dlog_verbose( |
| 1737 | "TEE didn't successfully complete memory send " |
| 1738 | "operation; returned %#x (%d). Rolling back.\n", |
| 1739 | ret.func, ret.arg2); |
| 1740 | |
| 1741 | /* |
| 1742 | * The TEE failed to complete the send operation, so |
| 1743 | * roll back the page table update for the VM. This |
| 1744 | * can't fail because it won't try to allocate more |
| 1745 | * memory than was freed into the `local_page_pool` by |
| 1746 | * `ffa_send_check_update` in the initial update. |
| 1747 | */ |
| 1748 | CHECK(ffa_region_group_identity_map( |
| 1749 | from_locked, &constituents, |
| 1750 | &composite->constituent_count, 1, |
| 1751 | orig_from_mode, &local_page_pool, true)); |
| 1752 | } |
| 1753 | |
| 1754 | mpool_fini(&local_page_pool); |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1755 | } else { |
| 1756 | struct share_states_locked share_states = share_states_lock(); |
| 1757 | ffa_memory_handle_t handle; |
| 1758 | |
| 1759 | /* |
| 1760 | * We need to wait for the rest of the fragments before we can |
| 1761 | * check whether the transaction is valid and unmap the memory. |
| 1762 | * Call the TEE so it can do its initial validation and assign a |
| 1763 | * handle, and allocate a share state to keep what we have so |
| 1764 | * far. |
| 1765 | */ |
| 1766 | ret = memory_send_tee_forward( |
| 1767 | to_locked, from_locked.vm->id, share_func, |
| 1768 | memory_region, memory_share_length, fragment_length); |
| 1769 | if (ret.func == FFA_ERROR_32) { |
| 1770 | goto out_unlock; |
| 1771 | } else if (ret.func != FFA_MEM_FRAG_RX_32) { |
| 1772 | dlog_warning( |
| 1773 | "Got %#x from TEE in response to %#x for " |
| 1774 | "fragment with with %d/%d, expected " |
| 1775 | "FFA_MEM_FRAG_RX.\n", |
| 1776 | ret.func, share_func, fragment_length, |
| 1777 | memory_share_length); |
| 1778 | ret = ffa_error(FFA_INVALID_PARAMETERS); |
| 1779 | goto out_unlock; |
| 1780 | } |
| 1781 | handle = ffa_frag_handle(ret); |
| 1782 | if (ret.arg3 != fragment_length) { |
| 1783 | dlog_warning( |
| 1784 | "Got unexpected fragment offset %d for " |
| 1785 | "FFA_MEM_FRAG_RX from TEE (expected %d).\n", |
| 1786 | ret.arg3, fragment_length); |
| 1787 | ret = ffa_error(FFA_INVALID_PARAMETERS); |
| 1788 | goto out_unlock; |
| 1789 | } |
| 1790 | if (ffa_frag_sender(ret) != from_locked.vm->id) { |
| 1791 | dlog_warning( |
| 1792 | "Got unexpected sender ID %d for " |
| 1793 | "FFA_MEM_FRAG_RX from TEE (expected %d).\n", |
| 1794 | ffa_frag_sender(ret), from_locked.vm->id); |
| 1795 | ret = ffa_error(FFA_INVALID_PARAMETERS); |
| 1796 | goto out_unlock; |
| 1797 | } |
| 1798 | |
| 1799 | if (!allocate_share_state(share_states, share_func, |
| 1800 | memory_region, fragment_length, |
| 1801 | handle, NULL)) { |
| 1802 | dlog_verbose("Failed to allocate share state.\n"); |
| 1803 | ret = ffa_error(FFA_NO_MEMORY); |
| 1804 | goto out_unlock; |
| 1805 | } |
| 1806 | /* |
| 1807 | * Don't free the memory region fragment, as it has been stored |
| 1808 | * in the share state. |
| 1809 | */ |
| 1810 | memory_region = NULL; |
| 1811 | out_unlock: |
| 1812 | share_states_unlock(&share_states); |
Andrew Walbran | 1a86aa9 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1813 | } |
| 1814 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1815 | out: |
| 1816 | if (memory_region != NULL) { |
| 1817 | mpool_free(page_pool, memory_region); |
| 1818 | } |
| 1819 | dump_share_states(); |
| 1820 | return ret; |
| 1821 | } |
| 1822 | |
| 1823 | /** |
| 1824 | * Continues an operation to donate, lend or share memory to a non-TEE VM. If |
| 1825 | * this is the last fragment then checks that the transition is valid for the |
| 1826 | * type of memory sending operation and updates the stage-2 page tables of the |
| 1827 | * sender. |
| 1828 | * |
| 1829 | * Assumes that the caller has already found and locked the sender VM and copied |
| 1830 | * the memory region descriptor from the sender's TX buffer to a freshly |
| 1831 | * allocated page from Hafnium's internal pool. |
| 1832 | * |
| 1833 | * This function takes ownership of the `fragment` passed in; it must not be |
| 1834 | * freed by the caller. |
| 1835 | */ |
| 1836 | struct ffa_value ffa_memory_send_continue(struct vm_locked from_locked, |
| 1837 | void *fragment, |
| 1838 | uint32_t fragment_length, |
| 1839 | ffa_memory_handle_t handle, |
| 1840 | struct mpool *page_pool) |
| 1841 | { |
| 1842 | struct share_states_locked share_states = share_states_lock(); |
| 1843 | struct ffa_memory_share_state *share_state; |
| 1844 | struct ffa_value ret; |
| 1845 | struct ffa_memory_region *memory_region; |
| 1846 | |
| 1847 | ret = ffa_memory_send_continue_validate(share_states, handle, |
| 1848 | &share_state, |
| 1849 | from_locked.vm->id, page_pool); |
| 1850 | if (ret.func != FFA_SUCCESS_32) { |
| 1851 | goto out_free_fragment; |
| 1852 | } |
| 1853 | memory_region = share_state->memory_region; |
| 1854 | |
| 1855 | if (memory_region->receivers[0].receiver_permissions.receiver == |
| 1856 | HF_TEE_VM_ID) { |
| 1857 | dlog_error( |
| 1858 | "Got hypervisor-allocated handle for memory send to " |
| 1859 | "TEE. This should never happen, and indicates a bug in " |
| 1860 | "EL3 code.\n"); |
| 1861 | ret = ffa_error(FFA_INVALID_PARAMETERS); |
| 1862 | goto out_free_fragment; |
| 1863 | } |
| 1864 | |
| 1865 | /* Add this fragment. */ |
| 1866 | share_state->fragments[share_state->fragment_count] = fragment; |
| 1867 | share_state->fragment_constituent_counts[share_state->fragment_count] = |
| 1868 | fragment_length / sizeof(struct ffa_memory_region_constituent); |
| 1869 | share_state->fragment_count++; |
| 1870 | |
| 1871 | /* Check whether the memory send operation is now ready to complete. */ |
| 1872 | if (share_state_sending_complete(share_states, share_state)) { |
J-Alves | 2a0d288 | 2020-10-29 14:49:50 +0000 | [diff] [blame] | 1873 | ret = ffa_memory_send_complete( |
| 1874 | from_locked, share_states, share_state, page_pool, |
| 1875 | &(share_state->sender_orig_mode)); |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1876 | } else { |
| 1877 | ret = (struct ffa_value){ |
| 1878 | .func = FFA_MEM_FRAG_RX_32, |
| 1879 | .arg1 = (uint32_t)handle, |
| 1880 | .arg2 = (uint32_t)(handle >> 32), |
| 1881 | .arg3 = share_state_next_fragment_offset(share_states, |
| 1882 | share_state)}; |
| 1883 | } |
| 1884 | goto out; |
| 1885 | |
| 1886 | out_free_fragment: |
| 1887 | mpool_free(page_pool, fragment); |
Andrew Walbran | 1a86aa9 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1888 | |
| 1889 | out: |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1890 | share_states_unlock(&share_states); |
Andrew Walbran | 1a86aa9 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1891 | return ret; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 1892 | } |
| 1893 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1894 | /** |
| 1895 | * Continues an operation to donate, lend or share memory to the TEE VM. If this |
| 1896 | * is the last fragment then checks that the transition is valid for the type of |
| 1897 | * memory sending operation and updates the stage-2 page tables of the sender. |
| 1898 | * |
| 1899 | * Assumes that the caller has already found and locked the sender VM and copied |
| 1900 | * the memory region descriptor from the sender's TX buffer to a freshly |
| 1901 | * allocated page from Hafnium's internal pool. |
| 1902 | * |
| 1903 | * This function takes ownership of the `memory_region` passed in and will free |
| 1904 | * it when necessary; it must not be freed by the caller. |
| 1905 | */ |
| 1906 | struct ffa_value ffa_memory_tee_send_continue(struct vm_locked from_locked, |
| 1907 | struct vm_locked to_locked, |
| 1908 | void *fragment, |
| 1909 | uint32_t fragment_length, |
| 1910 | ffa_memory_handle_t handle, |
| 1911 | struct mpool *page_pool) |
| 1912 | { |
| 1913 | struct share_states_locked share_states = share_states_lock(); |
| 1914 | struct ffa_memory_share_state *share_state; |
| 1915 | struct ffa_value ret; |
| 1916 | struct ffa_memory_region *memory_region; |
| 1917 | |
| 1918 | ret = ffa_memory_send_continue_validate(share_states, handle, |
| 1919 | &share_state, |
| 1920 | from_locked.vm->id, page_pool); |
| 1921 | if (ret.func != FFA_SUCCESS_32) { |
| 1922 | goto out_free_fragment; |
| 1923 | } |
| 1924 | memory_region = share_state->memory_region; |
| 1925 | |
| 1926 | if (memory_region->receivers[0].receiver_permissions.receiver != |
| 1927 | HF_TEE_VM_ID) { |
| 1928 | dlog_error( |
| 1929 | "Got SPM-allocated handle for memory send to non-TEE " |
| 1930 | "VM. This should never happen, and indicates a bug.\n"); |
| 1931 | ret = ffa_error(FFA_INVALID_PARAMETERS); |
| 1932 | goto out_free_fragment; |
| 1933 | } |
| 1934 | |
| 1935 | if (to_locked.vm->mailbox.state != MAILBOX_STATE_EMPTY || |
| 1936 | to_locked.vm->mailbox.recv == NULL) { |
| 1937 | /* |
| 1938 | * If the TEE RX buffer is not available, tell the sender to |
| 1939 | * retry by returning the current offset again. |
| 1940 | */ |
| 1941 | ret = (struct ffa_value){ |
| 1942 | .func = FFA_MEM_FRAG_RX_32, |
| 1943 | .arg1 = (uint32_t)handle, |
| 1944 | .arg2 = (uint32_t)(handle >> 32), |
| 1945 | .arg3 = share_state_next_fragment_offset(share_states, |
| 1946 | share_state), |
| 1947 | }; |
| 1948 | goto out_free_fragment; |
| 1949 | } |
| 1950 | |
| 1951 | /* Add this fragment. */ |
| 1952 | share_state->fragments[share_state->fragment_count] = fragment; |
| 1953 | share_state->fragment_constituent_counts[share_state->fragment_count] = |
| 1954 | fragment_length / sizeof(struct ffa_memory_region_constituent); |
| 1955 | share_state->fragment_count++; |
| 1956 | |
| 1957 | /* Check whether the memory send operation is now ready to complete. */ |
| 1958 | if (share_state_sending_complete(share_states, share_state)) { |
Andrew Walbran | 37c574e | 2020-06-03 11:45:46 +0100 | [diff] [blame] | 1959 | struct mpool local_page_pool; |
| 1960 | uint32_t orig_from_mode; |
| 1961 | |
| 1962 | /* |
| 1963 | * Use a local page pool so that we can roll back if necessary. |
| 1964 | */ |
| 1965 | mpool_init_with_fallback(&local_page_pool, page_pool); |
| 1966 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1967 | ret = ffa_memory_send_complete(from_locked, share_states, |
Andrew Walbran | 37c574e | 2020-06-03 11:45:46 +0100 | [diff] [blame] | 1968 | share_state, &local_page_pool, |
| 1969 | &orig_from_mode); |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1970 | |
| 1971 | if (ret.func == FFA_SUCCESS_32) { |
| 1972 | /* |
| 1973 | * Forward final fragment on to the TEE so that |
| 1974 | * it can complete the memory sending operation. |
| 1975 | */ |
| 1976 | ret = memory_send_continue_tee_forward( |
| 1977 | to_locked, from_locked.vm->id, fragment, |
| 1978 | fragment_length, handle); |
| 1979 | |
| 1980 | if (ret.func != FFA_SUCCESS_32) { |
| 1981 | /* |
| 1982 | * The error will be passed on to the caller, |
| 1983 | * but log it here too. |
| 1984 | */ |
| 1985 | dlog_verbose( |
| 1986 | "TEE didn't successfully complete " |
| 1987 | "memory send operation; returned %#x " |
Andrew Walbran | 37c574e | 2020-06-03 11:45:46 +0100 | [diff] [blame] | 1988 | "(%d). Rolling back.\n", |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 1989 | ret.func, ret.arg2); |
Andrew Walbran | 37c574e | 2020-06-03 11:45:46 +0100 | [diff] [blame] | 1990 | |
| 1991 | /* |
| 1992 | * The TEE failed to complete the send |
| 1993 | * operation, so roll back the page table update |
| 1994 | * for the VM. This can't fail because it won't |
| 1995 | * try to allocate more memory than was freed |
| 1996 | * into the `local_page_pool` by |
| 1997 | * `ffa_send_check_update` in the initial |
| 1998 | * update. |
| 1999 | */ |
| 2000 | CHECK(ffa_region_group_identity_map( |
| 2001 | from_locked, share_state->fragments, |
| 2002 | share_state |
| 2003 | ->fragment_constituent_counts, |
| 2004 | share_state->fragment_count, |
| 2005 | orig_from_mode, &local_page_pool, |
| 2006 | true)); |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 2007 | } |
Andrew Walbran | 37c574e | 2020-06-03 11:45:46 +0100 | [diff] [blame] | 2008 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 2009 | /* Free share state. */ |
| 2010 | share_state_free(share_states, share_state, page_pool); |
| 2011 | } else { |
| 2012 | /* Abort sending to TEE. */ |
| 2013 | struct ffa_value tee_ret = |
Olivier Deprez | 112d2b5 | 2020-09-30 07:39:23 +0200 | [diff] [blame] | 2014 | arch_other_world_call((struct ffa_value){ |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 2015 | .func = FFA_MEM_RECLAIM_32, |
| 2016 | .arg1 = (uint32_t)handle, |
| 2017 | .arg2 = (uint32_t)(handle >> 32)}); |
| 2018 | |
| 2019 | if (tee_ret.func != FFA_SUCCESS_32) { |
| 2020 | /* |
| 2021 | * Nothing we can do if TEE doesn't abort |
| 2022 | * properly, just log it. |
| 2023 | */ |
| 2024 | dlog_verbose( |
| 2025 | "TEE didn't successfully abort failed " |
| 2026 | "memory send operation; returned %#x " |
| 2027 | "(%d).\n", |
| 2028 | tee_ret.func, tee_ret.arg2); |
| 2029 | } |
| 2030 | /* |
| 2031 | * We don't need to free the share state in this case |
| 2032 | * because ffa_memory_send_complete does that already. |
| 2033 | */ |
| 2034 | } |
Andrew Walbran | 37c574e | 2020-06-03 11:45:46 +0100 | [diff] [blame] | 2035 | |
| 2036 | mpool_fini(&local_page_pool); |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 2037 | } else { |
| 2038 | uint32_t next_fragment_offset = |
| 2039 | share_state_next_fragment_offset(share_states, |
| 2040 | share_state); |
| 2041 | |
| 2042 | ret = memory_send_continue_tee_forward( |
| 2043 | to_locked, from_locked.vm->id, fragment, |
| 2044 | fragment_length, handle); |
| 2045 | |
| 2046 | if (ret.func != FFA_MEM_FRAG_RX_32 || |
| 2047 | ffa_frag_handle(ret) != handle || |
| 2048 | ret.arg3 != next_fragment_offset || |
| 2049 | ffa_frag_sender(ret) != from_locked.vm->id) { |
| 2050 | dlog_verbose( |
| 2051 | "Got unexpected result from forwarding " |
| 2052 | "FFA_MEM_FRAG_TX to TEE: %#x (handle %#x, " |
| 2053 | "offset %d, sender %d); expected " |
| 2054 | "FFA_MEM_FRAG_RX (handle %#x, offset %d, " |
| 2055 | "sender %d).\n", |
| 2056 | ret.func, ffa_frag_handle(ret), ret.arg3, |
| 2057 | ffa_frag_sender(ret), handle, |
| 2058 | next_fragment_offset, from_locked.vm->id); |
| 2059 | /* Free share state. */ |
| 2060 | share_state_free(share_states, share_state, page_pool); |
| 2061 | ret = ffa_error(FFA_INVALID_PARAMETERS); |
| 2062 | goto out; |
| 2063 | } |
| 2064 | |
| 2065 | ret = (struct ffa_value){.func = FFA_MEM_FRAG_RX_32, |
| 2066 | .arg1 = (uint32_t)handle, |
| 2067 | .arg2 = (uint32_t)(handle >> 32), |
| 2068 | .arg3 = next_fragment_offset}; |
| 2069 | } |
| 2070 | goto out; |
| 2071 | |
| 2072 | out_free_fragment: |
| 2073 | mpool_free(page_pool, fragment); |
| 2074 | |
| 2075 | out: |
| 2076 | share_states_unlock(&share_states); |
| 2077 | return ret; |
| 2078 | } |
| 2079 | |
| 2080 | /** Clean up after the receiver has finished retrieving a memory region. */ |
| 2081 | static void ffa_memory_retrieve_complete( |
| 2082 | struct share_states_locked share_states, |
| 2083 | struct ffa_memory_share_state *share_state, struct mpool *page_pool) |
| 2084 | { |
| 2085 | if (share_state->share_func == FFA_MEM_DONATE_32) { |
| 2086 | /* |
| 2087 | * Memory that has been donated can't be relinquished, |
| 2088 | * so no need to keep the share state around. |
| 2089 | */ |
| 2090 | share_state_free(share_states, share_state, page_pool); |
| 2091 | dlog_verbose("Freed share state for donate.\n"); |
| 2092 | } |
| 2093 | } |
| 2094 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2095 | struct ffa_value ffa_memory_retrieve(struct vm_locked to_locked, |
| 2096 | struct ffa_memory_region *retrieve_request, |
Andrew Walbran | 130a8ae | 2020-05-15 16:27:15 +0100 | [diff] [blame] | 2097 | uint32_t retrieve_request_length, |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2098 | struct mpool *page_pool) |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2099 | { |
Andrew Walbran | 130a8ae | 2020-05-15 16:27:15 +0100 | [diff] [blame] | 2100 | uint32_t expected_retrieve_request_length = |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2101 | sizeof(struct ffa_memory_region) + |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2102 | retrieve_request->receiver_count * |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2103 | sizeof(struct ffa_memory_access); |
| 2104 | ffa_memory_handle_t handle = retrieve_request->handle; |
| 2105 | ffa_memory_region_flags_t transaction_type = |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2106 | retrieve_request->flags & |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2107 | FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK; |
| 2108 | struct ffa_memory_region *memory_region; |
| 2109 | ffa_memory_access_permissions_t sent_permissions; |
| 2110 | enum ffa_data_access sent_data_access; |
| 2111 | enum ffa_instruction_access sent_instruction_access; |
| 2112 | ffa_memory_access_permissions_t requested_permissions; |
| 2113 | enum ffa_data_access requested_data_access; |
| 2114 | enum ffa_instruction_access requested_instruction_access; |
| 2115 | ffa_memory_access_permissions_t permissions; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2116 | uint32_t memory_to_attributes; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2117 | struct share_states_locked share_states; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2118 | struct ffa_memory_share_state *share_state; |
| 2119 | struct ffa_value ret; |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 2120 | struct ffa_composite_memory_region *composite; |
| 2121 | uint32_t total_length; |
| 2122 | uint32_t fragment_length; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2123 | |
| 2124 | dump_share_states(); |
| 2125 | |
Andrew Walbran | 130a8ae | 2020-05-15 16:27:15 +0100 | [diff] [blame] | 2126 | if (retrieve_request_length != expected_retrieve_request_length) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2127 | dlog_verbose( |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2128 | "Invalid length for FFA_MEM_RETRIEVE_REQ, expected %d " |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2129 | "but was %d.\n", |
Andrew Walbran | 130a8ae | 2020-05-15 16:27:15 +0100 | [diff] [blame] | 2130 | expected_retrieve_request_length, |
| 2131 | retrieve_request_length); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2132 | return ffa_error(FFA_INVALID_PARAMETERS); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2133 | } |
| 2134 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2135 | if (retrieve_request->receiver_count != 1) { |
| 2136 | dlog_verbose( |
| 2137 | "Multi-way memory sharing not supported (got %d " |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2138 | "receivers descriptors on FFA_MEM_RETRIEVE_REQ, " |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2139 | "expected 1).\n", |
| 2140 | retrieve_request->receiver_count); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2141 | return ffa_error(FFA_INVALID_PARAMETERS); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2142 | } |
| 2143 | |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2144 | share_states = share_states_lock(); |
| 2145 | if (!get_share_state(share_states, handle, &share_state)) { |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2146 | dlog_verbose("Invalid handle %#x for FFA_MEM_RETRIEVE_REQ.\n", |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2147 | handle); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2148 | ret = ffa_error(FFA_INVALID_PARAMETERS); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2149 | goto out; |
| 2150 | } |
| 2151 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2152 | memory_region = share_state->memory_region; |
| 2153 | CHECK(memory_region != NULL); |
| 2154 | |
| 2155 | /* |
| 2156 | * Check that the transaction type expected by the receiver is correct, |
| 2157 | * if it has been specified. |
| 2158 | */ |
| 2159 | if (transaction_type != |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2160 | FFA_MEMORY_REGION_TRANSACTION_TYPE_UNSPECIFIED && |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2161 | transaction_type != (memory_region->flags & |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2162 | FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK)) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2163 | dlog_verbose( |
| 2164 | "Incorrect transaction type %#x for " |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2165 | "FFA_MEM_RETRIEVE_REQ, expected %#x for handle %#x.\n", |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2166 | transaction_type, |
| 2167 | memory_region->flags & |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2168 | FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK, |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2169 | handle); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2170 | ret = ffa_error(FFA_INVALID_PARAMETERS); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2171 | goto out; |
| 2172 | } |
| 2173 | |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2174 | if (retrieve_request->sender != memory_region->sender) { |
| 2175 | dlog_verbose( |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2176 | "Incorrect sender ID %d for FFA_MEM_RETRIEVE_REQ, " |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2177 | "expected %d for handle %#x.\n", |
| 2178 | retrieve_request->sender, memory_region->sender, |
| 2179 | handle); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2180 | ret = ffa_error(FFA_INVALID_PARAMETERS); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2181 | goto out; |
| 2182 | } |
| 2183 | |
| 2184 | if (retrieve_request->tag != memory_region->tag) { |
| 2185 | dlog_verbose( |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2186 | "Incorrect tag %d for FFA_MEM_RETRIEVE_REQ, expected " |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2187 | "%d for handle %#x.\n", |
| 2188 | retrieve_request->tag, memory_region->tag, handle); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2189 | ret = ffa_error(FFA_INVALID_PARAMETERS); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2190 | goto out; |
| 2191 | } |
| 2192 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2193 | if (retrieve_request->receivers[0].receiver_permissions.receiver != |
| 2194 | to_locked.vm->id) { |
| 2195 | dlog_verbose( |
| 2196 | "Retrieve request receiver VM ID %d didn't match " |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2197 | "caller of FFA_MEM_RETRIEVE_REQ.\n", |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2198 | retrieve_request->receivers[0] |
| 2199 | .receiver_permissions.receiver); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2200 | ret = ffa_error(FFA_INVALID_PARAMETERS); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2201 | goto out; |
| 2202 | } |
| 2203 | |
| 2204 | if (memory_region->receivers[0].receiver_permissions.receiver != |
| 2205 | to_locked.vm->id) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2206 | dlog_verbose( |
Andrew Walbran | f07f04d | 2020-05-01 18:09:00 +0100 | [diff] [blame] | 2207 | "Incorrect receiver VM ID %d for FFA_MEM_RETRIEVE_REQ, " |
| 2208 | "expected %d for handle %#x.\n", |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2209 | to_locked.vm->id, |
| 2210 | memory_region->receivers[0] |
| 2211 | .receiver_permissions.receiver, |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2212 | handle); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2213 | ret = ffa_error(FFA_INVALID_PARAMETERS); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2214 | goto out; |
| 2215 | } |
| 2216 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 2217 | if (!share_state->sending_complete) { |
| 2218 | dlog_verbose( |
| 2219 | "Memory with handle %#x not fully sent, can't " |
| 2220 | "retrieve.\n", |
| 2221 | handle); |
| 2222 | ret = ffa_error(FFA_INVALID_PARAMETERS); |
| 2223 | goto out; |
| 2224 | } |
| 2225 | |
| 2226 | if (share_state->retrieved_fragment_count[0] != 0) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2227 | dlog_verbose("Memory with handle %#x already retrieved.\n", |
| 2228 | handle); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2229 | ret = ffa_error(FFA_DENIED); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2230 | goto out; |
| 2231 | } |
| 2232 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2233 | if (retrieve_request->receivers[0].composite_memory_region_offset != |
| 2234 | 0) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2235 | dlog_verbose( |
| 2236 | "Retriever specified address ranges not supported (got " |
Andrew Walbran | f07f04d | 2020-05-01 18:09:00 +0100 | [diff] [blame] | 2237 | "offset %d).\n", |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2238 | retrieve_request->receivers[0] |
| 2239 | .composite_memory_region_offset); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2240 | ret = ffa_error(FFA_INVALID_PARAMETERS); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2241 | goto out; |
| 2242 | } |
| 2243 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2244 | /* |
| 2245 | * Check permissions from sender against permissions requested by |
| 2246 | * receiver. |
| 2247 | */ |
| 2248 | /* TODO: Check attributes too. */ |
| 2249 | sent_permissions = |
| 2250 | memory_region->receivers[0].receiver_permissions.permissions; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2251 | sent_data_access = ffa_get_data_access_attr(sent_permissions); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2252 | sent_instruction_access = |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2253 | ffa_get_instruction_access_attr(sent_permissions); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2254 | requested_permissions = |
| 2255 | retrieve_request->receivers[0].receiver_permissions.permissions; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2256 | requested_data_access = ffa_get_data_access_attr(requested_permissions); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2257 | requested_instruction_access = |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2258 | ffa_get_instruction_access_attr(requested_permissions); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2259 | permissions = 0; |
| 2260 | switch (sent_data_access) { |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2261 | case FFA_DATA_ACCESS_NOT_SPECIFIED: |
| 2262 | case FFA_DATA_ACCESS_RW: |
| 2263 | if (requested_data_access == FFA_DATA_ACCESS_NOT_SPECIFIED || |
| 2264 | requested_data_access == FFA_DATA_ACCESS_RW) { |
| 2265 | ffa_set_data_access_attr(&permissions, |
| 2266 | FFA_DATA_ACCESS_RW); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2267 | break; |
| 2268 | } |
| 2269 | /* Intentional fall-through. */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2270 | case FFA_DATA_ACCESS_RO: |
| 2271 | if (requested_data_access == FFA_DATA_ACCESS_NOT_SPECIFIED || |
| 2272 | requested_data_access == FFA_DATA_ACCESS_RO) { |
| 2273 | ffa_set_data_access_attr(&permissions, |
| 2274 | FFA_DATA_ACCESS_RO); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2275 | break; |
| 2276 | } |
| 2277 | dlog_verbose( |
| 2278 | "Invalid data access requested; sender specified " |
| 2279 | "permissions %#x but receiver requested %#x.\n", |
| 2280 | sent_permissions, requested_permissions); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2281 | ret = ffa_error(FFA_DENIED); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2282 | goto out; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2283 | case FFA_DATA_ACCESS_RESERVED: |
| 2284 | panic("Got unexpected FFA_DATA_ACCESS_RESERVED. Should be " |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2285 | "checked before this point."); |
| 2286 | } |
| 2287 | switch (sent_instruction_access) { |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2288 | case FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED: |
| 2289 | case FFA_INSTRUCTION_ACCESS_X: |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2290 | if (requested_instruction_access == |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2291 | FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED || |
| 2292 | requested_instruction_access == FFA_INSTRUCTION_ACCESS_X) { |
| 2293 | ffa_set_instruction_access_attr( |
| 2294 | &permissions, FFA_INSTRUCTION_ACCESS_X); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2295 | break; |
| 2296 | } |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2297 | case FFA_INSTRUCTION_ACCESS_NX: |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2298 | if (requested_instruction_access == |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2299 | FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED || |
| 2300 | requested_instruction_access == FFA_INSTRUCTION_ACCESS_NX) { |
| 2301 | ffa_set_instruction_access_attr( |
| 2302 | &permissions, FFA_INSTRUCTION_ACCESS_NX); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2303 | break; |
| 2304 | } |
| 2305 | dlog_verbose( |
| 2306 | "Invalid instruction access requested; sender " |
Andrew Walbran | f07f04d | 2020-05-01 18:09:00 +0100 | [diff] [blame] | 2307 | "specified permissions %#x but receiver requested " |
| 2308 | "%#x.\n", |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2309 | sent_permissions, requested_permissions); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2310 | ret = ffa_error(FFA_DENIED); |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2311 | goto out; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2312 | case FFA_INSTRUCTION_ACCESS_RESERVED: |
| 2313 | panic("Got unexpected FFA_INSTRUCTION_ACCESS_RESERVED. Should " |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2314 | "be checked before this point."); |
| 2315 | } |
J-Alves | 7cd5eb3 | 2020-10-16 19:06:10 +0100 | [diff] [blame^] | 2316 | memory_to_attributes = ffa_memory_permissions_to_mode( |
| 2317 | permissions, share_state->sender_orig_mode); |
Andrew Walbran | 996d1d1 | 2020-05-27 14:08:43 +0100 | [diff] [blame] | 2318 | ret = ffa_retrieve_check_update( |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 2319 | to_locked, share_state->fragments, |
| 2320 | share_state->fragment_constituent_counts, |
| 2321 | share_state->fragment_count, memory_to_attributes, |
Andrew Walbran | 996d1d1 | 2020-05-27 14:08:43 +0100 | [diff] [blame] | 2322 | share_state->share_func, false, page_pool); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2323 | if (ret.func != FFA_SUCCESS_32) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2324 | goto out; |
| 2325 | } |
| 2326 | |
| 2327 | /* |
| 2328 | * Copy response to RX buffer of caller and deliver the message. This |
| 2329 | * must be done before the share_state is (possibly) freed. |
| 2330 | */ |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2331 | /* TODO: combine attributes from sender and request. */ |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 2332 | composite = ffa_memory_region_get_composite(memory_region, 0); |
| 2333 | /* |
| 2334 | * Constituents which we received in the first fragment should always |
| 2335 | * fit in the first fragment we are sending, because the header is the |
| 2336 | * same size in both cases and we have a fixed message buffer size. So |
| 2337 | * `ffa_retrieved_memory_region_init` should never fail. |
| 2338 | */ |
| 2339 | CHECK(ffa_retrieved_memory_region_init( |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2340 | to_locked.vm->mailbox.recv, HF_MAILBOX_SIZE, |
| 2341 | memory_region->sender, memory_region->attributes, |
| 2342 | memory_region->flags, handle, to_locked.vm->id, permissions, |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 2343 | composite->page_count, composite->constituent_count, |
| 2344 | share_state->fragments[0], |
| 2345 | share_state->fragment_constituent_counts[0], &total_length, |
| 2346 | &fragment_length)); |
| 2347 | to_locked.vm->mailbox.recv_size = fragment_length; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2348 | to_locked.vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2349 | to_locked.vm->mailbox.recv_func = FFA_MEM_RETRIEVE_RESP_32; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2350 | to_locked.vm->mailbox.state = MAILBOX_STATE_READ; |
| 2351 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 2352 | share_state->retrieved_fragment_count[0] = 1; |
| 2353 | if (share_state->retrieved_fragment_count[0] == |
| 2354 | share_state->fragment_count) { |
| 2355 | ffa_memory_retrieve_complete(share_states, share_state, |
| 2356 | page_pool); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2357 | } |
| 2358 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2359 | ret = (struct ffa_value){.func = FFA_MEM_RETRIEVE_RESP_32, |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 2360 | .arg1 = total_length, |
| 2361 | .arg2 = fragment_length}; |
| 2362 | |
| 2363 | out: |
| 2364 | share_states_unlock(&share_states); |
| 2365 | dump_share_states(); |
| 2366 | return ret; |
| 2367 | } |
| 2368 | |
| 2369 | struct ffa_value ffa_memory_retrieve_continue(struct vm_locked to_locked, |
| 2370 | ffa_memory_handle_t handle, |
| 2371 | uint32_t fragment_offset, |
| 2372 | struct mpool *page_pool) |
| 2373 | { |
| 2374 | struct ffa_memory_region *memory_region; |
| 2375 | struct share_states_locked share_states; |
| 2376 | struct ffa_memory_share_state *share_state; |
| 2377 | struct ffa_value ret; |
| 2378 | uint32_t fragment_index; |
| 2379 | uint32_t retrieved_constituents_count; |
| 2380 | uint32_t i; |
| 2381 | uint32_t expected_fragment_offset; |
| 2382 | uint32_t remaining_constituent_count; |
| 2383 | uint32_t fragment_length; |
| 2384 | |
| 2385 | dump_share_states(); |
| 2386 | |
| 2387 | share_states = share_states_lock(); |
| 2388 | if (!get_share_state(share_states, handle, &share_state)) { |
| 2389 | dlog_verbose("Invalid handle %#x for FFA_MEM_FRAG_RX.\n", |
| 2390 | handle); |
| 2391 | ret = ffa_error(FFA_INVALID_PARAMETERS); |
| 2392 | goto out; |
| 2393 | } |
| 2394 | |
| 2395 | memory_region = share_state->memory_region; |
| 2396 | CHECK(memory_region != NULL); |
| 2397 | |
| 2398 | if (memory_region->receivers[0].receiver_permissions.receiver != |
| 2399 | to_locked.vm->id) { |
| 2400 | dlog_verbose( |
| 2401 | "Caller of FFA_MEM_FRAG_RX (%d) is not receiver (%d) " |
| 2402 | "of handle %#x.\n", |
| 2403 | to_locked.vm->id, |
| 2404 | memory_region->receivers[0] |
| 2405 | .receiver_permissions.receiver, |
| 2406 | handle); |
| 2407 | ret = ffa_error(FFA_INVALID_PARAMETERS); |
| 2408 | goto out; |
| 2409 | } |
| 2410 | |
| 2411 | if (!share_state->sending_complete) { |
| 2412 | dlog_verbose( |
| 2413 | "Memory with handle %#x not fully sent, can't " |
| 2414 | "retrieve.\n", |
| 2415 | handle); |
| 2416 | ret = ffa_error(FFA_INVALID_PARAMETERS); |
| 2417 | goto out; |
| 2418 | } |
| 2419 | |
| 2420 | if (share_state->retrieved_fragment_count[0] == 0 || |
| 2421 | share_state->retrieved_fragment_count[0] >= |
| 2422 | share_state->fragment_count) { |
| 2423 | dlog_verbose( |
| 2424 | "Retrieval of memory with handle %#x not yet started " |
| 2425 | "or already completed (%d/%d fragments retrieved).\n", |
| 2426 | handle, share_state->retrieved_fragment_count[0], |
| 2427 | share_state->fragment_count); |
| 2428 | ret = ffa_error(FFA_INVALID_PARAMETERS); |
| 2429 | goto out; |
| 2430 | } |
| 2431 | |
| 2432 | fragment_index = share_state->retrieved_fragment_count[0]; |
| 2433 | |
| 2434 | /* |
| 2435 | * Check that the given fragment offset is correct by counting how many |
| 2436 | * constituents were in the fragments previously sent. |
| 2437 | */ |
| 2438 | retrieved_constituents_count = 0; |
| 2439 | for (i = 0; i < fragment_index; ++i) { |
| 2440 | retrieved_constituents_count += |
| 2441 | share_state->fragment_constituent_counts[i]; |
| 2442 | } |
| 2443 | expected_fragment_offset = |
| 2444 | ffa_composite_constituent_offset(memory_region, 0) + |
| 2445 | retrieved_constituents_count * |
| 2446 | sizeof(struct ffa_memory_region_constituent); |
| 2447 | if (fragment_offset != expected_fragment_offset) { |
| 2448 | dlog_verbose("Fragment offset was %d but expected %d.\n", |
| 2449 | fragment_offset, expected_fragment_offset); |
| 2450 | ret = ffa_error(FFA_INVALID_PARAMETERS); |
| 2451 | goto out; |
| 2452 | } |
| 2453 | |
| 2454 | remaining_constituent_count = ffa_memory_fragment_init( |
| 2455 | to_locked.vm->mailbox.recv, HF_MAILBOX_SIZE, |
| 2456 | share_state->fragments[fragment_index], |
| 2457 | share_state->fragment_constituent_counts[fragment_index], |
| 2458 | &fragment_length); |
| 2459 | CHECK(remaining_constituent_count == 0); |
| 2460 | to_locked.vm->mailbox.recv_size = fragment_length; |
| 2461 | to_locked.vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID; |
| 2462 | to_locked.vm->mailbox.recv_func = FFA_MEM_FRAG_TX_32; |
| 2463 | to_locked.vm->mailbox.state = MAILBOX_STATE_READ; |
| 2464 | share_state->retrieved_fragment_count[0]++; |
| 2465 | if (share_state->retrieved_fragment_count[0] == |
| 2466 | share_state->fragment_count) { |
| 2467 | ffa_memory_retrieve_complete(share_states, share_state, |
| 2468 | page_pool); |
| 2469 | } |
| 2470 | |
| 2471 | ret = (struct ffa_value){.func = FFA_MEM_FRAG_TX_32, |
| 2472 | .arg1 = (uint32_t)handle, |
| 2473 | .arg2 = (uint32_t)(handle >> 32), |
| 2474 | .arg3 = fragment_length}; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2475 | |
| 2476 | out: |
| 2477 | share_states_unlock(&share_states); |
| 2478 | dump_share_states(); |
| 2479 | return ret; |
| 2480 | } |
| 2481 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2482 | struct ffa_value ffa_memory_relinquish( |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2483 | struct vm_locked from_locked, |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2484 | struct ffa_mem_relinquish *relinquish_request, struct mpool *page_pool) |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2485 | { |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2486 | ffa_memory_handle_t handle = relinquish_request->handle; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2487 | struct share_states_locked share_states; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2488 | struct ffa_memory_share_state *share_state; |
| 2489 | struct ffa_memory_region *memory_region; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2490 | bool clear; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2491 | struct ffa_value ret; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2492 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2493 | if (relinquish_request->endpoint_count != 1) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2494 | dlog_verbose( |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2495 | "Stream endpoints not supported (got %d endpoints on " |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2496 | "FFA_MEM_RELINQUISH, expected 1).\n", |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2497 | relinquish_request->endpoint_count); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2498 | return ffa_error(FFA_INVALID_PARAMETERS); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2499 | } |
| 2500 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2501 | if (relinquish_request->endpoints[0] != from_locked.vm->id) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2502 | dlog_verbose( |
| 2503 | "VM ID %d in relinquish message doesn't match calling " |
| 2504 | "VM ID %d.\n", |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2505 | relinquish_request->endpoints[0], from_locked.vm->id); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2506 | return ffa_error(FFA_INVALID_PARAMETERS); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2507 | } |
| 2508 | |
| 2509 | dump_share_states(); |
| 2510 | |
| 2511 | share_states = share_states_lock(); |
| 2512 | if (!get_share_state(share_states, handle, &share_state)) { |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2513 | dlog_verbose("Invalid handle %#x for FFA_MEM_RELINQUISH.\n", |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2514 | handle); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2515 | ret = ffa_error(FFA_INVALID_PARAMETERS); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2516 | goto out; |
| 2517 | } |
| 2518 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 2519 | if (!share_state->sending_complete) { |
| 2520 | dlog_verbose( |
| 2521 | "Memory with handle %#x not fully sent, can't " |
| 2522 | "relinquish.\n", |
| 2523 | handle); |
| 2524 | ret = ffa_error(FFA_INVALID_PARAMETERS); |
| 2525 | goto out; |
| 2526 | } |
| 2527 | |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2528 | memory_region = share_state->memory_region; |
| 2529 | CHECK(memory_region != NULL); |
| 2530 | |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2531 | if (memory_region->receivers[0].receiver_permissions.receiver != |
| 2532 | from_locked.vm->id) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2533 | dlog_verbose( |
| 2534 | "VM ID %d tried to relinquish memory region with " |
| 2535 | "handle %#x but receiver was %d.\n", |
| 2536 | from_locked.vm->id, handle, |
Andrew Walbran | a65a132 | 2020-04-06 19:32:32 +0100 | [diff] [blame] | 2537 | memory_region->receivers[0] |
| 2538 | .receiver_permissions.receiver); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2539 | ret = ffa_error(FFA_INVALID_PARAMETERS); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2540 | goto out; |
| 2541 | } |
| 2542 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 2543 | if (share_state->retrieved_fragment_count[0] != |
| 2544 | share_state->fragment_count) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2545 | dlog_verbose( |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 2546 | "Memory with handle %#x not yet fully retrieved, can't " |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2547 | "relinquish.\n", |
| 2548 | handle); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2549 | ret = ffa_error(FFA_INVALID_PARAMETERS); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2550 | goto out; |
| 2551 | } |
| 2552 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2553 | clear = relinquish_request->flags & FFA_MEMORY_REGION_FLAG_CLEAR; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2554 | |
| 2555 | /* |
| 2556 | * Clear is not allowed for memory that was shared, as the original |
| 2557 | * sender still has access to the memory. |
| 2558 | */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2559 | if (clear && share_state->share_func == FFA_MEM_SHARE_32) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2560 | dlog_verbose("Memory which was shared can't be cleared.\n"); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2561 | ret = ffa_error(FFA_INVALID_PARAMETERS); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2562 | goto out; |
| 2563 | } |
| 2564 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 2565 | ret = ffa_relinquish_check_update( |
| 2566 | from_locked, share_state->fragments, |
| 2567 | share_state->fragment_constituent_counts, |
| 2568 | share_state->fragment_count, page_pool, clear); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2569 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2570 | if (ret.func == FFA_SUCCESS_32) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2571 | /* |
| 2572 | * Mark memory handle as not retrieved, so it can be reclaimed |
| 2573 | * (or retrieved again). |
| 2574 | */ |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 2575 | share_state->retrieved_fragment_count[0] = 0; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2576 | } |
| 2577 | |
| 2578 | out: |
| 2579 | share_states_unlock(&share_states); |
| 2580 | dump_share_states(); |
| 2581 | return ret; |
| 2582 | } |
| 2583 | |
| 2584 | /** |
| 2585 | * Validates that the reclaim transition is allowed for the given handle, |
| 2586 | * updates the page table of the reclaiming VM, and frees the internal state |
| 2587 | * associated with the handle. |
| 2588 | */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2589 | struct ffa_value ffa_memory_reclaim(struct vm_locked to_locked, |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 2590 | ffa_memory_handle_t handle, |
| 2591 | ffa_memory_region_flags_t flags, |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2592 | struct mpool *page_pool) |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2593 | { |
| 2594 | struct share_states_locked share_states; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2595 | struct ffa_memory_share_state *share_state; |
| 2596 | struct ffa_memory_region *memory_region; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2597 | struct ffa_value ret; |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2598 | |
| 2599 | dump_share_states(); |
| 2600 | |
| 2601 | share_states = share_states_lock(); |
| 2602 | if (!get_share_state(share_states, handle, &share_state)) { |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2603 | dlog_verbose("Invalid handle %#x for FFA_MEM_RECLAIM.\n", |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2604 | handle); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2605 | ret = ffa_error(FFA_INVALID_PARAMETERS); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2606 | goto out; |
| 2607 | } |
| 2608 | |
| 2609 | memory_region = share_state->memory_region; |
| 2610 | CHECK(memory_region != NULL); |
| 2611 | |
| 2612 | if (to_locked.vm->id != memory_region->sender) { |
| 2613 | dlog_verbose( |
Olivier Deprez | f92e5d4 | 2020-11-13 16:00:54 +0100 | [diff] [blame] | 2614 | "VM %#x attempted to reclaim memory handle %#x " |
| 2615 | "originally sent by VM %#x.\n", |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2616 | to_locked.vm->id, handle, memory_region->sender); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2617 | ret = ffa_error(FFA_INVALID_PARAMETERS); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2618 | goto out; |
| 2619 | } |
| 2620 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 2621 | if (!share_state->sending_complete) { |
| 2622 | dlog_verbose( |
| 2623 | "Memory with handle %#x not fully sent, can't " |
| 2624 | "reclaim.\n", |
| 2625 | handle); |
| 2626 | ret = ffa_error(FFA_INVALID_PARAMETERS); |
| 2627 | goto out; |
| 2628 | } |
| 2629 | |
| 2630 | if (share_state->retrieved_fragment_count[0] != 0) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2631 | dlog_verbose( |
| 2632 | "Tried to reclaim memory handle %#x that has not been " |
| 2633 | "relinquished.\n", |
| 2634 | handle); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2635 | ret = ffa_error(FFA_DENIED); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2636 | goto out; |
| 2637 | } |
| 2638 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 2639 | ret = ffa_retrieve_check_update( |
| 2640 | to_locked, share_state->fragments, |
| 2641 | share_state->fragment_constituent_counts, |
J-Alves | 2a0d288 | 2020-10-29 14:49:50 +0000 | [diff] [blame] | 2642 | share_state->fragment_count, share_state->sender_orig_mode, |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 2643 | FFA_MEM_RECLAIM_32, flags & FFA_MEM_RECLAIM_CLEAR, page_pool); |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2644 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2645 | if (ret.func == FFA_SUCCESS_32) { |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 2646 | share_state_free(share_states, share_state, page_pool); |
| 2647 | dlog_verbose("Freed share state after successful reclaim.\n"); |
| 2648 | } |
| 2649 | |
| 2650 | out: |
| 2651 | share_states_unlock(&share_states); |
| 2652 | return ret; |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 2653 | } |
Andrew Walbran | 290b0c9 | 2020-02-03 16:37:14 +0000 | [diff] [blame] | 2654 | |
| 2655 | /** |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 2656 | * Validates that the reclaim transition is allowed for the memory region with |
| 2657 | * the given handle which was previously shared with the TEE, tells the TEE to |
| 2658 | * mark it as reclaimed, and updates the page table of the reclaiming VM. |
| 2659 | * |
| 2660 | * To do this information about the memory region is first fetched from the TEE. |
Andrew Walbran | 290b0c9 | 2020-02-03 16:37:14 +0000 | [diff] [blame] | 2661 | */ |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2662 | struct ffa_value ffa_memory_tee_reclaim(struct vm_locked to_locked, |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 2663 | struct vm_locked from_locked, |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2664 | ffa_memory_handle_t handle, |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 2665 | ffa_memory_region_flags_t flags, |
| 2666 | struct mpool *page_pool) |
Andrew Walbran | 290b0c9 | 2020-02-03 16:37:14 +0000 | [diff] [blame] | 2667 | { |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 2668 | uint32_t request_length = ffa_memory_lender_retrieve_request_init( |
| 2669 | from_locked.vm->mailbox.recv, handle, to_locked.vm->id); |
| 2670 | struct ffa_value tee_ret; |
| 2671 | uint32_t length; |
| 2672 | uint32_t fragment_length; |
| 2673 | uint32_t fragment_offset; |
| 2674 | struct ffa_memory_region *memory_region; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2675 | struct ffa_composite_memory_region *composite; |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 2676 | uint32_t memory_to_attributes = MM_MODE_R | MM_MODE_W | MM_MODE_X; |
| 2677 | |
| 2678 | CHECK(request_length <= HF_MAILBOX_SIZE); |
| 2679 | CHECK(from_locked.vm->id == HF_TEE_VM_ID); |
| 2680 | |
| 2681 | /* Retrieve memory region information from the TEE. */ |
Olivier Deprez | 112d2b5 | 2020-09-30 07:39:23 +0200 | [diff] [blame] | 2682 | tee_ret = arch_other_world_call( |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 2683 | (struct ffa_value){.func = FFA_MEM_RETRIEVE_REQ_32, |
| 2684 | .arg1 = request_length, |
| 2685 | .arg2 = request_length}); |
| 2686 | if (tee_ret.func == FFA_ERROR_32) { |
| 2687 | dlog_verbose("Got error %d from EL3.\n", tee_ret.arg2); |
| 2688 | return tee_ret; |
| 2689 | } |
| 2690 | if (tee_ret.func != FFA_MEM_RETRIEVE_RESP_32) { |
| 2691 | dlog_verbose( |
| 2692 | "Got %#x from EL3, expected FFA_MEM_RETRIEVE_RESP.\n", |
| 2693 | tee_ret.func); |
| 2694 | return ffa_error(FFA_INVALID_PARAMETERS); |
| 2695 | } |
| 2696 | |
| 2697 | length = tee_ret.arg1; |
| 2698 | fragment_length = tee_ret.arg2; |
| 2699 | |
| 2700 | if (fragment_length > HF_MAILBOX_SIZE || fragment_length > length || |
| 2701 | length > sizeof(tee_retrieve_buffer)) { |
| 2702 | dlog_verbose("Invalid fragment length %d/%d (max %d/%d).\n", |
| 2703 | fragment_length, length, HF_MAILBOX_SIZE, |
| 2704 | sizeof(tee_retrieve_buffer)); |
| 2705 | return ffa_error(FFA_INVALID_PARAMETERS); |
| 2706 | } |
| 2707 | |
| 2708 | /* |
| 2709 | * Copy the first fragment of the memory region descriptor to an |
| 2710 | * internal buffer. |
| 2711 | */ |
| 2712 | memcpy_s(tee_retrieve_buffer, sizeof(tee_retrieve_buffer), |
| 2713 | from_locked.vm->mailbox.send, fragment_length); |
| 2714 | |
| 2715 | /* Fetch the remaining fragments into the same buffer. */ |
| 2716 | fragment_offset = fragment_length; |
| 2717 | while (fragment_offset < length) { |
Olivier Deprez | 112d2b5 | 2020-09-30 07:39:23 +0200 | [diff] [blame] | 2718 | tee_ret = arch_other_world_call( |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 2719 | (struct ffa_value){.func = FFA_MEM_FRAG_RX_32, |
| 2720 | .arg1 = (uint32_t)handle, |
| 2721 | .arg2 = (uint32_t)(handle >> 32), |
| 2722 | .arg3 = fragment_offset}); |
| 2723 | if (tee_ret.func != FFA_MEM_FRAG_TX_32) { |
| 2724 | dlog_verbose( |
| 2725 | "Got %#x (%d) from TEE in response to " |
| 2726 | "FFA_MEM_FRAG_RX, expected FFA_MEM_FRAG_TX.\n", |
| 2727 | tee_ret.func, tee_ret.arg2); |
| 2728 | return tee_ret; |
| 2729 | } |
| 2730 | if (ffa_frag_handle(tee_ret) != handle) { |
| 2731 | dlog_verbose( |
| 2732 | "Got FFA_MEM_FRAG_TX for unexpected handle %#x " |
| 2733 | "in response to FFA_MEM_FRAG_RX for handle " |
| 2734 | "%#x.\n", |
| 2735 | ffa_frag_handle(tee_ret), handle); |
| 2736 | return ffa_error(FFA_INVALID_PARAMETERS); |
| 2737 | } |
| 2738 | if (ffa_frag_sender(tee_ret) != 0) { |
| 2739 | dlog_verbose( |
| 2740 | "Got FFA_MEM_FRAG_TX with unexpected sender %d " |
| 2741 | "(expected 0).\n", |
| 2742 | ffa_frag_sender(tee_ret)); |
| 2743 | return ffa_error(FFA_INVALID_PARAMETERS); |
| 2744 | } |
| 2745 | fragment_length = tee_ret.arg3; |
| 2746 | if (fragment_length > HF_MAILBOX_SIZE || |
| 2747 | fragment_offset + fragment_length > length) { |
| 2748 | dlog_verbose( |
| 2749 | "Invalid fragment length %d at offset %d (max " |
| 2750 | "%d).\n", |
| 2751 | fragment_length, fragment_offset, |
| 2752 | HF_MAILBOX_SIZE); |
| 2753 | return ffa_error(FFA_INVALID_PARAMETERS); |
| 2754 | } |
| 2755 | memcpy_s(tee_retrieve_buffer + fragment_offset, |
| 2756 | sizeof(tee_retrieve_buffer) - fragment_offset, |
| 2757 | from_locked.vm->mailbox.send, fragment_length); |
| 2758 | |
| 2759 | fragment_offset += fragment_length; |
| 2760 | } |
| 2761 | |
| 2762 | memory_region = (struct ffa_memory_region *)tee_retrieve_buffer; |
Andrew Walbran | 290b0c9 | 2020-02-03 16:37:14 +0000 | [diff] [blame] | 2763 | |
| 2764 | if (memory_region->receiver_count != 1) { |
| 2765 | /* Only one receiver supported by Hafnium for now. */ |
| 2766 | dlog_verbose( |
| 2767 | "Multiple recipients not supported (got %d, expected " |
| 2768 | "1).\n", |
| 2769 | memory_region->receiver_count); |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 2770 | return ffa_error(FFA_INVALID_PARAMETERS); |
Andrew Walbran | 290b0c9 | 2020-02-03 16:37:14 +0000 | [diff] [blame] | 2771 | } |
| 2772 | |
| 2773 | if (memory_region->handle != handle) { |
| 2774 | dlog_verbose( |
| 2775 | "Got memory region handle %#x from TEE but requested " |
| 2776 | "handle %#x.\n", |
| 2777 | memory_region->handle, handle); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2778 | return ffa_error(FFA_INVALID_PARAMETERS); |
Andrew Walbran | 290b0c9 | 2020-02-03 16:37:14 +0000 | [diff] [blame] | 2779 | } |
| 2780 | |
| 2781 | /* The original sender must match the caller. */ |
| 2782 | if (to_locked.vm->id != memory_region->sender) { |
| 2783 | dlog_verbose( |
Olivier Deprez | f92e5d4 | 2020-11-13 16:00:54 +0100 | [diff] [blame] | 2784 | "VM %#x attempted to reclaim memory handle %#x " |
| 2785 | "originally sent by VM %#x.\n", |
Andrew Walbran | 290b0c9 | 2020-02-03 16:37:14 +0000 | [diff] [blame] | 2786 | to_locked.vm->id, handle, memory_region->sender); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2787 | return ffa_error(FFA_INVALID_PARAMETERS); |
Andrew Walbran | 290b0c9 | 2020-02-03 16:37:14 +0000 | [diff] [blame] | 2788 | } |
| 2789 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 2790 | composite = ffa_memory_region_get_composite(memory_region, 0); |
Andrew Walbran | 290b0c9 | 2020-02-03 16:37:14 +0000 | [diff] [blame] | 2791 | |
| 2792 | /* |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 2793 | * Validate that the reclaim transition is allowed for the given memory |
| 2794 | * region, forward the request to the TEE and then map the memory back |
| 2795 | * into the caller's stage-2 page table. |
Andrew Walbran | 290b0c9 | 2020-02-03 16:37:14 +0000 | [diff] [blame] | 2796 | */ |
Andrew Walbran | 996d1d1 | 2020-05-27 14:08:43 +0100 | [diff] [blame] | 2797 | return ffa_tee_reclaim_check_update( |
| 2798 | to_locked, handle, composite->constituents, |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 2799 | composite->constituent_count, memory_to_attributes, |
| 2800 | flags & FFA_MEM_RECLAIM_CLEAR, page_pool); |
Andrew Walbran | 290b0c9 | 2020-02-03 16:37:14 +0000 | [diff] [blame] | 2801 | } |