Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2019 The Hafnium Authors. |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * https://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 17 | #include "hf/spci_memory.h" |
| 18 | |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 19 | #include "hf/api.h" |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 20 | #include "hf/check.h" |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 21 | #include "hf/dlog.h" |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 22 | #include "hf/mpool.h" |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 23 | #include "hf/spci_internal.h" |
| 24 | #include "hf/std.h" |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 25 | #include "hf/vm.h" |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 26 | |
Andrew Walbran | c34c7b2 | 2020-02-28 11:16:59 +0000 | [diff] [blame^] | 27 | static_assert(sizeof(struct spci_memory_region_constituent) % 16 == 0, |
| 28 | "struct spci_memory_region_constituent must be a multiple of 16 " |
| 29 | "bytes long."); |
| 30 | static_assert(sizeof(struct spci_memory_region_attributes) % 16 == 0, |
| 31 | "struct spci_memory_region_attributes must be a multiple of 16 " |
| 32 | "bytes long."); |
| 33 | static_assert(sizeof(struct spci_memory_region) % 16 == 0, |
| 34 | "struct spci_memory_region must be a multiple of 16 bytes long."); |
| 35 | |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 36 | struct spci_mem_transitions { |
| 37 | uint32_t orig_from_mode; |
| 38 | uint32_t orig_to_mode; |
| 39 | uint32_t from_mode; |
| 40 | uint32_t to_mode; |
| 41 | }; |
| 42 | |
| 43 | /* TODO: Add device attributes: GRE, cacheability, shareability. */ |
| 44 | static inline uint32_t spci_memory_attrs_to_mode(uint16_t memory_attributes) |
| 45 | { |
| 46 | uint32_t mode = 0; |
| 47 | |
| 48 | switch (spci_get_memory_access_attr(memory_attributes)) { |
| 49 | case SPCI_MEMORY_RO_NX: |
| 50 | mode = MM_MODE_R; |
| 51 | break; |
| 52 | case SPCI_MEMORY_RO_X: |
| 53 | mode = MM_MODE_R | MM_MODE_X; |
| 54 | break; |
| 55 | case SPCI_MEMORY_RW_NX: |
| 56 | mode = MM_MODE_R | MM_MODE_W; |
| 57 | break; |
| 58 | case SPCI_MEMORY_RW_X: |
| 59 | mode = MM_MODE_R | MM_MODE_W | MM_MODE_X; |
| 60 | break; |
| 61 | } |
| 62 | |
| 63 | return mode; |
| 64 | } |
| 65 | |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 66 | /** |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 67 | * Obtain the next mode to apply to the two VMs. |
| 68 | * |
Jose Marinho | 7fbbf2e | 2019-08-05 13:19:58 +0100 | [diff] [blame] | 69 | * Returns true iff a state transition was found. |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 70 | */ |
| 71 | static bool spci_msg_get_next_state( |
| 72 | const struct spci_mem_transitions *transitions, |
| 73 | uint32_t transition_count, uint32_t memory_to_attributes, |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 74 | uint32_t orig_from_mode, uint32_t orig_to_mode, uint32_t *from_mode, |
| 75 | uint32_t *to_mode) |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 76 | { |
| 77 | const uint32_t state_mask = |
| 78 | MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED; |
| 79 | const uint32_t orig_from_state = orig_from_mode & state_mask; |
| 80 | |
| 81 | for (uint32_t index = 0; index < transition_count; index++) { |
| 82 | uint32_t table_orig_from_mode = |
| 83 | transitions[index].orig_from_mode; |
| 84 | uint32_t table_orig_to_mode = transitions[index].orig_to_mode; |
| 85 | |
| 86 | if (((orig_from_state) == table_orig_from_mode) && |
| 87 | ((orig_to_mode & state_mask) == table_orig_to_mode)) { |
| 88 | *to_mode = transitions[index].to_mode | |
| 89 | memory_to_attributes; |
Jose Marinho | 713f13a | 2019-05-21 11:54:16 +0100 | [diff] [blame] | 90 | |
| 91 | *from_mode = transitions[index].from_mode | |
| 92 | (~state_mask & orig_from_mode); |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 93 | |
| 94 | return true; |
| 95 | } |
| 96 | } |
| 97 | return false; |
| 98 | } |
| 99 | |
| 100 | /** |
| 101 | * Verify that all pages have the same mode, that the starting mode |
| 102 | * constitutes a valid state and obtain the next mode to apply |
| 103 | * to the two VMs. |
| 104 | * |
| 105 | * Returns: |
| 106 | * The error code false indicates that: |
| 107 | * 1) a state transition was not found; |
| 108 | * 2) the pages being shared do not have the same mode within the <to> |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 109 | * or <from> VMs; |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 110 | * 3) The beginning and end IPAs are not page aligned; |
| 111 | * 4) The requested share type was not handled. |
| 112 | * Success is indicated by true. |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 113 | */ |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 114 | static bool spci_msg_check_transition( |
| 115 | struct vm *to, struct vm *from, uint32_t share_func, |
| 116 | uint32_t *orig_from_mode, |
| 117 | struct spci_memory_region_constituent *constituents, |
| 118 | uint32_t constituent_count, uint32_t memory_to_attributes, |
| 119 | uint32_t *from_mode, uint32_t *to_mode) |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 120 | { |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 121 | uint32_t orig_to_mode; |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 122 | const struct spci_mem_transitions *mem_transition_table; |
| 123 | uint32_t transition_table_size; |
Jose Marinho | 7fbbf2e | 2019-08-05 13:19:58 +0100 | [diff] [blame] | 124 | uint32_t i; |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 125 | |
| 126 | /* |
| 127 | * TODO: Transition table does not currently consider the multiple |
| 128 | * shared case. |
| 129 | */ |
| 130 | static const struct spci_mem_transitions donate_transitions[] = { |
| 131 | { |
| 132 | /* 1) {O-EA, !O-NA} -> {!O-NA, O-EA} */ |
| 133 | .orig_from_mode = 0, |
| 134 | .orig_to_mode = MM_MODE_INVALID | MM_MODE_UNOWNED, |
| 135 | .from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED, |
| 136 | .to_mode = 0, |
| 137 | }, |
| 138 | { |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 139 | /* |
| 140 | * Duplicate of 1) in order to cater for an alternative |
| 141 | * representation of !O-NA: |
| 142 | * (INVALID | UNOWNED | SHARED) and (INVALID | UNOWNED) |
| 143 | * are both alternate representations of !O-NA. |
| 144 | */ |
| 145 | /* 4) {O-EA, !O-NA} -> {!O-NA, O-EA} */ |
| 146 | .orig_from_mode = 0, |
| 147 | .orig_to_mode = MM_MODE_INVALID | MM_MODE_UNOWNED | |
| 148 | MM_MODE_SHARED, |
| 149 | .from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED | |
| 150 | MM_MODE_SHARED, |
| 151 | .to_mode = 0, |
| 152 | }, |
| 153 | }; |
Jose Marinho | 56c2573 | 2019-05-20 09:48:53 +0100 | [diff] [blame] | 154 | |
Jose Marinho | 713f13a | 2019-05-21 11:54:16 +0100 | [diff] [blame] | 155 | static const uint32_t size_donate_transitions = |
| 156 | ARRAY_SIZE(donate_transitions); |
| 157 | |
Andrew Walbran | 648fc3e | 2019-10-22 16:23:05 +0100 | [diff] [blame] | 158 | /* |
| 159 | * This data structure holds the allowed state transitions for the |
| 160 | * "lend" state machine. In this state machine the owner keeps ownership |
| 161 | * but loses access to the lent pages. |
| 162 | */ |
| 163 | static const struct spci_mem_transitions lend_transitions[] = { |
Jose Marinho | 56c2573 | 2019-05-20 09:48:53 +0100 | [diff] [blame] | 164 | { |
Andrew Walbran | 648fc3e | 2019-10-22 16:23:05 +0100 | [diff] [blame] | 165 | /* 1) {O-EA, !O-NA} -> {O-NA, !O-EA} */ |
| 166 | .orig_from_mode = 0, |
| 167 | .orig_to_mode = MM_MODE_INVALID | MM_MODE_UNOWNED | |
| 168 | MM_MODE_SHARED, |
| 169 | .from_mode = MM_MODE_INVALID, |
| 170 | .to_mode = MM_MODE_UNOWNED, |
Jose Marinho | 56c2573 | 2019-05-20 09:48:53 +0100 | [diff] [blame] | 171 | }, |
| 172 | { |
Andrew Walbran | 648fc3e | 2019-10-22 16:23:05 +0100 | [diff] [blame] | 173 | /* |
| 174 | * Duplicate of 1) in order to cater for an alternative |
| 175 | * representation of !O-NA: |
| 176 | * (INVALID | UNOWNED | SHARED) and (INVALID | UNOWNED) |
| 177 | * are both alternate representations of !O-NA. |
| 178 | */ |
| 179 | /* 2) {O-EA, !O-NA} -> {O-NA, !O-EA} */ |
| 180 | .orig_from_mode = 0, |
| 181 | .orig_to_mode = MM_MODE_INVALID | MM_MODE_UNOWNED, |
| 182 | .from_mode = MM_MODE_INVALID, |
| 183 | .to_mode = MM_MODE_UNOWNED, |
Jose Marinho | 56c2573 | 2019-05-20 09:48:53 +0100 | [diff] [blame] | 184 | }, |
| 185 | }; |
| 186 | |
Andrew Walbran | 648fc3e | 2019-10-22 16:23:05 +0100 | [diff] [blame] | 187 | static const uint32_t size_lend_transitions = |
| 188 | ARRAY_SIZE(lend_transitions); |
Jose Marinho | 56c2573 | 2019-05-20 09:48:53 +0100 | [diff] [blame] | 189 | |
Jose Marinho | 713f13a | 2019-05-21 11:54:16 +0100 | [diff] [blame] | 190 | /* |
Andrew Walbran | 648fc3e | 2019-10-22 16:23:05 +0100 | [diff] [blame] | 191 | * This data structure holds the allowed state transitions for the |
| 192 | * "share" state machine. In this state machine the owner keeps the |
| 193 | * shared pages mapped on its stage2 table and keeps access as well. |
Jose Marinho | 713f13a | 2019-05-21 11:54:16 +0100 | [diff] [blame] | 194 | */ |
Andrew Walbran | 648fc3e | 2019-10-22 16:23:05 +0100 | [diff] [blame] | 195 | static const struct spci_mem_transitions share_transitions[] = { |
Jose Marinho | 713f13a | 2019-05-21 11:54:16 +0100 | [diff] [blame] | 196 | { |
| 197 | /* 1) {O-EA, !O-NA} -> {O-SA, !O-SA} */ |
| 198 | .orig_from_mode = 0, |
| 199 | .orig_to_mode = MM_MODE_INVALID | MM_MODE_UNOWNED | |
| 200 | MM_MODE_SHARED, |
| 201 | .from_mode = MM_MODE_SHARED, |
| 202 | .to_mode = MM_MODE_UNOWNED | MM_MODE_SHARED, |
| 203 | }, |
| 204 | { |
| 205 | /* |
| 206 | * Duplicate of 1) in order to cater for an alternative |
| 207 | * representation of !O-NA: |
| 208 | * (INVALID | UNOWNED | SHARED) and (INVALID | UNOWNED) |
| 209 | * are both alternate representations of !O-NA. |
| 210 | */ |
| 211 | /* 2) {O-EA, !O-NA} -> {O-SA, !O-SA} */ |
| 212 | .orig_from_mode = 0, |
| 213 | .orig_to_mode = MM_MODE_INVALID | MM_MODE_UNOWNED, |
| 214 | .from_mode = MM_MODE_SHARED, |
| 215 | .to_mode = MM_MODE_UNOWNED | MM_MODE_SHARED, |
| 216 | }, |
| 217 | }; |
| 218 | |
Andrew Walbran | 648fc3e | 2019-10-22 16:23:05 +0100 | [diff] [blame] | 219 | static const uint32_t size_share_transitions = |
| 220 | ARRAY_SIZE(share_transitions); |
| 221 | |
| 222 | static const struct spci_mem_transitions relinquish_transitions[] = { |
| 223 | { |
| 224 | /* 1) {!O-EA, O-NA} -> {!O-NA, O-EA} */ |
| 225 | .orig_from_mode = MM_MODE_UNOWNED, |
| 226 | .orig_to_mode = MM_MODE_INVALID, |
| 227 | .from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED | |
| 228 | MM_MODE_SHARED, |
| 229 | .to_mode = 0, |
| 230 | }, |
| 231 | { |
| 232 | /* 2) {!O-SA, O-SA} -> {!O-NA, O-EA} */ |
| 233 | .orig_from_mode = MM_MODE_UNOWNED | MM_MODE_SHARED, |
| 234 | .orig_to_mode = MM_MODE_SHARED, |
| 235 | .from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED | |
| 236 | MM_MODE_SHARED, |
| 237 | .to_mode = 0, |
| 238 | }, |
| 239 | }; |
| 240 | |
| 241 | static const uint32_t size_relinquish_transitions = |
| 242 | ARRAY_SIZE(relinquish_transitions); |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 243 | |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 244 | if (constituent_count == 0) { |
Jose Marinho | 7fbbf2e | 2019-08-05 13:19:58 +0100 | [diff] [blame] | 245 | /* |
| 246 | * Fail if there are no constituents. Otherwise |
| 247 | * spci_msg_get_next_state would get an unitialised |
| 248 | * *orig_from_mode and orig_to_mode. |
| 249 | */ |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 250 | return false; |
| 251 | } |
| 252 | |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 253 | for (i = 0; i < constituent_count; ++i) { |
Andrew Walbran | d88ee92 | 2020-01-15 18:13:21 +0000 | [diff] [blame] | 254 | ipaddr_t begin = |
| 255 | ipa_init(spci_memory_region_constituent_get_address( |
| 256 | &constituents[i])); |
Jose Marinho | 7fbbf2e | 2019-08-05 13:19:58 +0100 | [diff] [blame] | 257 | size_t size = constituents[i].page_count * PAGE_SIZE; |
| 258 | ipaddr_t end = ipa_add(begin, size); |
| 259 | uint32_t current_from_mode; |
| 260 | uint32_t current_to_mode; |
| 261 | |
| 262 | /* Fail if addresses are not page-aligned. */ |
| 263 | if (!is_aligned(ipa_addr(begin), PAGE_SIZE) || |
| 264 | !is_aligned(ipa_addr(end), PAGE_SIZE)) { |
| 265 | return false; |
| 266 | } |
| 267 | |
| 268 | /* |
| 269 | * Ensure that this constituent memory range is all mapped with |
| 270 | * the same mode. |
| 271 | */ |
| 272 | if (!mm_vm_get_mode(&from->ptable, begin, end, |
| 273 | ¤t_from_mode) || |
| 274 | !mm_vm_get_mode(&to->ptable, begin, end, |
| 275 | ¤t_to_mode)) { |
| 276 | return false; |
| 277 | } |
| 278 | |
| 279 | /* |
| 280 | * Ensure that all constituents are mapped with the same mode. |
| 281 | */ |
| 282 | if (i == 0) { |
| 283 | *orig_from_mode = current_from_mode; |
| 284 | orig_to_mode = current_to_mode; |
| 285 | } else if (current_from_mode != *orig_from_mode || |
| 286 | current_to_mode != orig_to_mode) { |
| 287 | return false; |
| 288 | } |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 289 | } |
| 290 | |
Andrew Scull | b5f49e0 | 2019-10-02 13:20:47 +0100 | [diff] [blame] | 291 | /* Ensure the address range is normal memory and not a device. */ |
| 292 | if (*orig_from_mode & MM_MODE_D) { |
| 293 | return false; |
| 294 | } |
| 295 | |
Andrew Walbran | e7ad3c0 | 2019-12-24 17:03:04 +0000 | [diff] [blame] | 296 | switch (share_func) { |
| 297 | case SPCI_MEM_DONATE_32: |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 298 | mem_transition_table = donate_transitions; |
| 299 | transition_table_size = size_donate_transitions; |
| 300 | break; |
| 301 | |
Andrew Walbran | e7ad3c0 | 2019-12-24 17:03:04 +0000 | [diff] [blame] | 302 | case SPCI_MEM_LEND_32: |
Andrew Walbran | 648fc3e | 2019-10-22 16:23:05 +0100 | [diff] [blame] | 303 | mem_transition_table = lend_transitions; |
| 304 | transition_table_size = size_lend_transitions; |
| 305 | break; |
| 306 | |
Andrew Walbran | e7ad3c0 | 2019-12-24 17:03:04 +0000 | [diff] [blame] | 307 | case SPCI_MEM_SHARE_32: |
Andrew Walbran | 648fc3e | 2019-10-22 16:23:05 +0100 | [diff] [blame] | 308 | mem_transition_table = share_transitions; |
| 309 | transition_table_size = size_share_transitions; |
| 310 | break; |
| 311 | |
Andrew Walbran | e7ad3c0 | 2019-12-24 17:03:04 +0000 | [diff] [blame] | 312 | case HF_SPCI_MEM_RELINQUISH: |
Jose Marinho | 56c2573 | 2019-05-20 09:48:53 +0100 | [diff] [blame] | 313 | mem_transition_table = relinquish_transitions; |
| 314 | transition_table_size = size_relinquish_transitions; |
| 315 | break; |
| 316 | |
Jose Marinho | 75509b4 | 2019-04-09 09:34:59 +0100 | [diff] [blame] | 317 | default: |
| 318 | return false; |
| 319 | } |
| 320 | |
| 321 | return spci_msg_get_next_state(mem_transition_table, |
| 322 | transition_table_size, |
| 323 | memory_to_attributes, *orig_from_mode, |
| 324 | orig_to_mode, from_mode, to_mode); |
| 325 | } |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 326 | |
| 327 | /** |
| 328 | * Updates a VM's page table such that the given set of physical address ranges |
| 329 | * are mapped in the address space at the corresponding address ranges, in the |
| 330 | * mode provided. |
| 331 | * |
| 332 | * If commit is false, the page tables will be allocated from the mpool but no |
| 333 | * mappings will actually be updated. This function must always be called first |
| 334 | * with commit false to check that it will succeed before calling with commit |
| 335 | * true, to avoid leaving the page table in a half-updated state. To make a |
| 336 | * series of changes atomically you can call them all with commit false before |
| 337 | * calling them all with commit true. |
| 338 | * |
| 339 | * mm_vm_defrag should always be called after a series of page table updates, |
| 340 | * whether they succeed or fail. |
| 341 | * |
| 342 | * Returns true on success, or false if the update failed and no changes were |
| 343 | * made to memory mappings. |
| 344 | */ |
| 345 | static bool spci_region_group_identity_map( |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 346 | struct vm_locked vm_locked, |
| 347 | struct spci_memory_region_constituent *constituents, |
| 348 | uint32_t constituent_count, int mode, struct mpool *ppool, bool commit) |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 349 | { |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 350 | /* Iterate over the memory region constituents. */ |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 351 | for (uint32_t index = 0; index < constituent_count; index++) { |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 352 | size_t size = constituents[index].page_count * PAGE_SIZE; |
Andrew Walbran | d88ee92 | 2020-01-15 18:13:21 +0000 | [diff] [blame] | 353 | paddr_t pa_begin = pa_from_ipa( |
| 354 | ipa_init(spci_memory_region_constituent_get_address( |
| 355 | &constituents[index]))); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 356 | paddr_t pa_end = pa_add(pa_begin, size); |
| 357 | |
| 358 | if (commit) { |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 359 | vm_identity_commit(vm_locked, pa_begin, pa_end, mode, |
| 360 | ppool, NULL); |
| 361 | } else if (!vm_identity_prepare(vm_locked, pa_begin, pa_end, |
| 362 | mode, ppool)) { |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 363 | return false; |
| 364 | } |
| 365 | } |
| 366 | |
| 367 | return true; |
| 368 | } |
| 369 | |
| 370 | /** |
| 371 | * Clears a region of physical memory by overwriting it with zeros. The data is |
| 372 | * flushed from the cache so the memory has been cleared across the system. |
| 373 | */ |
| 374 | static bool clear_memory(paddr_t begin, paddr_t end, struct mpool *ppool) |
| 375 | { |
| 376 | /* |
Fuad Tabba | ed294af | 2019-12-20 10:43:01 +0000 | [diff] [blame] | 377 | * TODO: change this to a CPU local single page window rather than a |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 378 | * global mapping of the whole range. Such an approach will limit |
| 379 | * the changes to stage-1 tables and will allow only local |
| 380 | * invalidation. |
| 381 | */ |
| 382 | bool ret; |
| 383 | struct mm_stage1_locked stage1_locked = mm_lock_stage1(); |
| 384 | void *ptr = |
| 385 | mm_identity_map(stage1_locked, begin, end, MM_MODE_W, ppool); |
| 386 | size_t size = pa_difference(begin, end); |
| 387 | |
| 388 | if (!ptr) { |
| 389 | /* TODO: partial defrag of failed range. */ |
| 390 | /* Recover any memory consumed in failed mapping. */ |
| 391 | mm_defrag(stage1_locked, ppool); |
| 392 | goto fail; |
| 393 | } |
| 394 | |
| 395 | memset_s(ptr, size, 0, size); |
| 396 | arch_mm_flush_dcache(ptr, size); |
| 397 | mm_unmap(stage1_locked, begin, end, ppool); |
| 398 | |
| 399 | ret = true; |
| 400 | goto out; |
| 401 | |
| 402 | fail: |
| 403 | ret = false; |
| 404 | |
| 405 | out: |
| 406 | mm_unlock_stage1(&stage1_locked); |
| 407 | |
| 408 | return ret; |
| 409 | } |
| 410 | |
| 411 | /** |
| 412 | * Clears a region of physical memory by overwriting it with zeros. The data is |
| 413 | * flushed from the cache so the memory has been cleared across the system. |
| 414 | */ |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 415 | static bool spci_clear_memory_constituents( |
| 416 | struct spci_memory_region_constituent *constituents, |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 417 | uint32_t constituent_count, struct mpool *page_pool) |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 418 | { |
| 419 | struct mpool local_page_pool; |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 420 | struct mm_stage1_locked stage1_locked; |
| 421 | bool ret = false; |
| 422 | |
| 423 | /* |
| 424 | * Create a local pool so any freed memory can't be used by another |
| 425 | * thread. This is to ensure each constituent that is mapped can be |
| 426 | * unmapped again afterwards. |
| 427 | */ |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 428 | mpool_init_with_fallback(&local_page_pool, page_pool); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 429 | |
| 430 | /* Iterate over the memory region constituents. */ |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 431 | for (uint32_t i = 0; i < constituent_count; ++i) { |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 432 | size_t size = constituents[i].page_count * PAGE_SIZE; |
Andrew Walbran | d88ee92 | 2020-01-15 18:13:21 +0000 | [diff] [blame] | 433 | paddr_t begin = pa_from_ipa( |
| 434 | ipa_init(spci_memory_region_constituent_get_address( |
| 435 | &constituents[i]))); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 436 | paddr_t end = pa_add(begin, size); |
| 437 | |
| 438 | if (!clear_memory(begin, end, &local_page_pool)) { |
| 439 | /* |
| 440 | * api_clear_memory will defrag on failure, so no need |
| 441 | * to do it here. |
| 442 | */ |
| 443 | goto out; |
| 444 | } |
| 445 | } |
| 446 | |
| 447 | /* |
| 448 | * Need to defrag after clearing, as it may have added extra mappings to |
| 449 | * the stage 1 page table. |
| 450 | */ |
| 451 | stage1_locked = mm_lock_stage1(); |
| 452 | mm_defrag(stage1_locked, &local_page_pool); |
| 453 | mm_unlock_stage1(&stage1_locked); |
| 454 | |
| 455 | ret = true; |
| 456 | |
| 457 | out: |
| 458 | mpool_fini(&local_page_pool); |
| 459 | return ret; |
| 460 | } |
| 461 | |
| 462 | /** |
| 463 | * Shares memory from the calling VM with another. The memory can be shared in |
| 464 | * different modes. |
| 465 | * |
| 466 | * This function requires the calling context to hold the <to> and <from> locks. |
| 467 | * |
| 468 | * Returns: |
| 469 | * In case of error one of the following values is returned: |
| 470 | * 1) SPCI_INVALID_PARAMETERS - The endpoint provided parameters were |
| 471 | * erroneous; |
| 472 | * 2) SPCI_NO_MEMORY - Hafnium did not have sufficient memory to complete |
| 473 | * the request. |
| 474 | * Success is indicated by SPCI_SUCCESS. |
| 475 | */ |
| 476 | static struct spci_value spci_share_memory( |
| 477 | struct vm_locked to_locked, struct vm_locked from_locked, |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 478 | struct spci_memory_region_constituent *constituents, |
| 479 | uint32_t constituent_count, uint32_t memory_to_attributes, |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 480 | uint32_t share_func, struct mpool *page_pool, bool clear) |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 481 | { |
| 482 | struct vm *to = to_locked.vm; |
| 483 | struct vm *from = from_locked.vm; |
| 484 | uint32_t orig_from_mode; |
| 485 | uint32_t from_mode; |
| 486 | uint32_t to_mode; |
| 487 | struct mpool local_page_pool; |
| 488 | struct spci_value ret; |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 489 | |
| 490 | /* |
Andrew Walbran | d88ee92 | 2020-01-15 18:13:21 +0000 | [diff] [blame] | 491 | * Make sure constituents are properly aligned to a 32-bit boundary. If |
| 492 | * not we would get alignment faults trying to read (32-bit) values. |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 493 | */ |
Andrew Walbran | d88ee92 | 2020-01-15 18:13:21 +0000 | [diff] [blame] | 494 | if (!is_aligned(constituents, 4)) { |
Andrew Walbran | 280f95e | 2020-02-19 16:29:34 +0000 | [diff] [blame] | 495 | dlog_verbose("Constituents not aligned.\n"); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 496 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 497 | } |
| 498 | |
| 499 | /* Disallow reflexive shares as this suggests an error in the VM. */ |
| 500 | if (to == from) { |
Andrew Walbran | 280f95e | 2020-02-19 16:29:34 +0000 | [diff] [blame] | 501 | dlog_verbose("Reflexive share.\n"); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 502 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 503 | } |
| 504 | |
| 505 | /* |
| 506 | * Check if the state transition is lawful for both VMs involved |
| 507 | * in the memory exchange, ensure that all constituents of a memory |
| 508 | * region being shared are at the same state. |
| 509 | */ |
Andrew Walbran | e7ad3c0 | 2019-12-24 17:03:04 +0000 | [diff] [blame] | 510 | if (!spci_msg_check_transition(to, from, share_func, &orig_from_mode, |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 511 | constituents, constituent_count, |
| 512 | memory_to_attributes, &from_mode, |
| 513 | &to_mode)) { |
Andrew Walbran | 280f95e | 2020-02-19 16:29:34 +0000 | [diff] [blame] | 514 | dlog_verbose("Invalid transition.\n"); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 515 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 516 | } |
| 517 | |
| 518 | /* |
| 519 | * Create a local pool so any freed memory can't be used by another |
| 520 | * thread. This is to ensure the original mapping can be restored if the |
| 521 | * clear fails. |
| 522 | */ |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 523 | mpool_init_with_fallback(&local_page_pool, page_pool); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 524 | |
| 525 | /* |
| 526 | * First reserve all required memory for the new page table entries in |
| 527 | * both sender and recipient page tables without committing, to make |
| 528 | * sure the entire operation will succeed without exhausting the page |
| 529 | * pool. |
| 530 | */ |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 531 | if (!spci_region_group_identity_map(from_locked, constituents, |
| 532 | constituent_count, from_mode, |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 533 | page_pool, false) || |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 534 | !spci_region_group_identity_map(to_locked, constituents, |
| 535 | constituent_count, to_mode, |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 536 | page_pool, false)) { |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 537 | /* TODO: partial defrag of failed range. */ |
| 538 | ret = spci_error(SPCI_NO_MEMORY); |
| 539 | goto out; |
| 540 | } |
| 541 | |
| 542 | /* |
| 543 | * First update the mapping for the sender so there is no overlap with |
| 544 | * the recipient. This won't allocate because the transaction was |
| 545 | * already prepared above, but may free pages in the case that a whole |
| 546 | * block is being unmapped that was previously partially mapped. |
| 547 | */ |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 548 | CHECK(spci_region_group_identity_map(from_locked, constituents, |
| 549 | constituent_count, from_mode, |
| 550 | &local_page_pool, true)); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 551 | |
| 552 | /* Clear the memory so no VM or device can see the previous contents. */ |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 553 | if (clear && !spci_clear_memory_constituents( |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 554 | constituents, constituent_count, page_pool)) { |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 555 | /* |
| 556 | * On failure, roll back by returning memory to the sender. This |
| 557 | * may allocate pages which were previously freed into |
| 558 | * `local_page_pool` by the call above, but will never allocate |
| 559 | * more pages than that so can never fail. |
| 560 | */ |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 561 | CHECK(spci_region_group_identity_map( |
| 562 | from_locked, constituents, constituent_count, |
| 563 | orig_from_mode, &local_page_pool, true)); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 564 | |
| 565 | ret = spci_error(SPCI_NO_MEMORY); |
| 566 | goto out; |
| 567 | } |
| 568 | |
| 569 | /* |
| 570 | * Complete the transfer by mapping the memory into the recipient. This |
| 571 | * won't allocate because the transaction was already prepared above, so |
| 572 | * it doesn't need to use the `local_page_pool`. |
| 573 | */ |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 574 | CHECK(spci_region_group_identity_map(to_locked, constituents, |
| 575 | constituent_count, to_mode, |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 576 | page_pool, true)); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 577 | |
| 578 | ret = (struct spci_value){.func = SPCI_SUCCESS_32}; |
| 579 | |
| 580 | out: |
| 581 | mpool_fini(&local_page_pool); |
| 582 | |
| 583 | /* |
| 584 | * Tidy up the page tables by reclaiming failed mappings (if there was |
| 585 | * an error) or merging entries into blocks where possible (on success). |
| 586 | */ |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 587 | mm_vm_defrag(&to->ptable, page_pool); |
| 588 | mm_vm_defrag(&from->ptable, page_pool); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 589 | |
| 590 | return ret; |
| 591 | } |
| 592 | |
| 593 | /** |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 594 | * Validates a call to donate, lend or share memory and then updates the stage-2 |
| 595 | * page tables. Specifically, check if the message length and number of memory |
| 596 | * region constituents match, and if the transition is valid for the type of |
| 597 | * memory sending operation. |
| 598 | * |
| 599 | * Assumes that the caller has already found and locked both VMs and ensured |
| 600 | * that the destination RX buffer is available, and copied the memory region |
| 601 | * descriptor from the sender's TX buffer to a trusted internal buffer. |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 602 | */ |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 603 | struct spci_value spci_memory_send(struct vm_locked to_locked, |
| 604 | struct vm_locked from_locked, |
| 605 | struct spci_memory_region *memory_region, |
| 606 | uint32_t memory_share_size, |
| 607 | uint32_t share_func, struct mpool *page_pool) |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 608 | { |
| 609 | uint32_t memory_to_attributes; |
| 610 | uint32_t attributes_size; |
| 611 | uint32_t constituents_size; |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 612 | struct spci_memory_region_constituent *constituents; |
| 613 | uint32_t constituent_count = memory_region->constituent_count; |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 614 | |
| 615 | /* |
| 616 | * Ensure the number of constituents are within the memory |
| 617 | * bounds. |
| 618 | */ |
| 619 | attributes_size = sizeof(struct spci_memory_region_attributes) * |
| 620 | memory_region->attribute_count; |
| 621 | constituents_size = sizeof(struct spci_memory_region_constituent) * |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 622 | constituent_count; |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 623 | if (memory_region->constituent_offset < |
| 624 | sizeof(struct spci_memory_region) + attributes_size || |
| 625 | memory_share_size != |
| 626 | memory_region->constituent_offset + constituents_size) { |
| 627 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 628 | } |
| 629 | |
Andrew Walbran | e28f4a2 | 2019-12-24 15:45:36 +0000 | [diff] [blame] | 630 | /* The sender must match the message sender. */ |
| 631 | if (memory_region->sender != from_locked.vm->id) { |
| 632 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 633 | } |
| 634 | |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 635 | /* We only support a single recipient. */ |
| 636 | if (memory_region->attribute_count != 1) { |
Andrew Walbran | e908c4a | 2019-12-02 17:13:47 +0000 | [diff] [blame] | 637 | return spci_error(SPCI_NOT_SUPPORTED); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 638 | } |
| 639 | |
Andrew Walbran | cfe6164 | 2019-12-02 15:34:06 +0000 | [diff] [blame] | 640 | /* The recipient must match the message recipient. */ |
| 641 | if (memory_region->attributes[0].receiver != to_locked.vm->id) { |
| 642 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 643 | } |
| 644 | |
Andrew Walbran | e7ad3c0 | 2019-12-24 17:03:04 +0000 | [diff] [blame] | 645 | switch (share_func) { |
| 646 | case SPCI_MEM_DONATE_32: |
| 647 | case SPCI_MEM_LEND_32: |
| 648 | case SPCI_MEM_SHARE_32: |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 649 | memory_to_attributes = spci_memory_attrs_to_mode( |
| 650 | memory_region->attributes[0].memory_attributes); |
| 651 | break; |
Andrew Walbran | e7ad3c0 | 2019-12-24 17:03:04 +0000 | [diff] [blame] | 652 | case HF_SPCI_MEM_RELINQUISH: |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 653 | memory_to_attributes = MM_MODE_R | MM_MODE_W | MM_MODE_X; |
| 654 | break; |
| 655 | default: |
Andrew Walbran | 17eebf9 | 2020-02-05 16:35:49 +0000 | [diff] [blame] | 656 | dlog_error("Invalid memory sharing message.\n"); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 657 | return spci_error(SPCI_INVALID_PARAMETERS); |
| 658 | } |
| 659 | |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 660 | constituents = spci_memory_region_get_constituents(memory_region); |
| 661 | return spci_share_memory( |
| 662 | to_locked, from_locked, constituents, constituent_count, |
Andrew Walbran | 475c145 | 2020-02-07 13:22:22 +0000 | [diff] [blame] | 663 | memory_to_attributes, share_func, page_pool, |
Andrew Walbran | f4b51af | 2020-02-03 14:44:54 +0000 | [diff] [blame] | 664 | memory_region->flags & SPCI_MEMORY_REGION_FLAG_CLEAR); |
Jose Marinho | 09b1db8 | 2019-08-08 09:16:59 +0100 | [diff] [blame] | 665 | } |