blob: 95cc3ae157b300f7335ffe809e656b5c3d1caaa1 [file] [log] [blame]
Jose Marinho75509b42019-04-09 09:34:59 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andrew Walbran475c1452020-02-07 13:22:22 +000017#include "hf/spci_memory.h"
18
Andrew Walbran290b0c92020-02-03 16:37:14 +000019#include "hf/arch/tee.h"
20
Jose Marinho75509b42019-04-09 09:34:59 +010021#include "hf/api.h"
Jose Marinho09b1db82019-08-08 09:16:59 +010022#include "hf/check.h"
Jose Marinho75509b42019-04-09 09:34:59 +010023#include "hf/dlog.h"
Andrew Walbran475c1452020-02-07 13:22:22 +000024#include "hf/mpool.h"
Jose Marinho75509b42019-04-09 09:34:59 +010025#include "hf/spci_internal.h"
26#include "hf/std.h"
Andrew Scull3c257452019-11-26 13:32:50 +000027#include "hf/vm.h"
Jose Marinho75509b42019-04-09 09:34:59 +010028
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000029/** The maximum number of recipients a memory region may be sent to. */
30#define MAX_MEM_SHARE_RECIPIENTS 1
31
32/**
33 * The maximum number of memory sharing handles which may be active at once. A
34 * DONATE handle is active from when it is sent to when it is retrieved; a SHARE
35 * or LEND handle is active from when it is sent to when it is reclaimed.
36 */
37#define MAX_MEM_SHARES 100
38
Andrew Walbranc34c7b22020-02-28 11:16:59 +000039static_assert(sizeof(struct spci_memory_region_constituent) % 16 == 0,
40 "struct spci_memory_region_constituent must be a multiple of 16 "
41 "bytes long.");
Andrew Walbrana65a1322020-04-06 19:32:32 +010042static_assert(sizeof(struct spci_composite_memory_region) % 16 == 0,
43 "struct spci_composite_memory_region must be a multiple of 16 "
Andrew Walbranc34c7b22020-02-28 11:16:59 +000044 "bytes long.");
Andrew Walbrana65a1322020-04-06 19:32:32 +010045static_assert(sizeof(struct spci_memory_region_attributes) == 4,
46 "struct spci_memory_region_attributes must be 4bytes long.");
47static_assert(sizeof(struct spci_memory_access) % 16 == 0,
48 "struct spci_memory_access must be a multiple of 16 bytes long.");
Andrew Walbranc34c7b22020-02-28 11:16:59 +000049static_assert(sizeof(struct spci_memory_region) % 16 == 0,
50 "struct spci_memory_region must be a multiple of 16 bytes long.");
Andrew Walbrana65a1322020-04-06 19:32:32 +010051static_assert(sizeof(struct spci_mem_relinquish) % 16 == 0,
52 "struct spci_mem_relinquish must be a multiple of 16 "
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000053 "bytes long.");
Andrew Walbranc34c7b22020-02-28 11:16:59 +000054
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000055struct spci_memory_share_state {
56 /**
57 * The memory region being shared, or NULL if this share state is
58 * unallocated.
59 */
60 struct spci_memory_region *memory_region;
61
62 /**
63 * The SPCI function used for sharing the memory. Must be one of
64 * SPCI_MEM_DONATE_32, SPCI_MEM_LEND_32 or SPCI_MEM_SHARE_32 if the
65 * share state is allocated, or 0.
66 */
67 uint32_t share_func;
68
69 /**
70 * Whether each recipient has retrieved the memory region yet. The order
71 * of this array matches the order of the attribute descriptors in the
72 * memory region descriptor. Any entries beyond the attribute_count will
73 * always be false.
74 */
75 bool retrieved[MAX_MEM_SHARE_RECIPIENTS];
Andrew Walbran475c1452020-02-07 13:22:22 +000076};
77
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000078/**
79 * Encapsulates the set of share states while the `share_states_lock` is held.
80 */
81struct share_states_locked {
82 struct spci_memory_share_state *share_states;
83};
84
85/**
86 * All access to members of a `struct spci_memory_share_state` must be guarded
87 * by this lock.
88 */
89static struct spinlock share_states_lock_instance = SPINLOCK_INIT;
90static struct spci_memory_share_state share_states[MAX_MEM_SHARES];
91
92/**
93 * Initialises the next available `struct spci_memory_share_state` and sets
94 * `handle` to its handle. Returns true on succes or false if none are
95 * available.
96 */
97static bool allocate_share_state(uint32_t share_func,
98 struct spci_memory_region *memory_region,
99 spci_memory_handle_t *handle)
100{
Andrew Walbrana65a1322020-04-06 19:32:32 +0100101 uint64_t i;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000102
103 CHECK(memory_region != NULL);
104
105 sl_lock(&share_states_lock_instance);
106 for (i = 0; i < MAX_MEM_SHARES; ++i) {
107 if (share_states[i].share_func == 0) {
108 uint32_t j;
109 struct spci_memory_share_state *allocated_state =
110 &share_states[i];
111 allocated_state->share_func = share_func;
112 allocated_state->memory_region = memory_region;
113 for (j = 0; j < MAX_MEM_SHARE_RECIPIENTS; ++j) {
114 allocated_state->retrieved[j] = false;
115 }
116 *handle = i | SPCI_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR;
117 sl_unlock(&share_states_lock_instance);
118 return true;
119 }
120 }
121
122 sl_unlock(&share_states_lock_instance);
123 return false;
124}
125
126/** Locks the share states lock. */
127struct share_states_locked share_states_lock(void)
128{
129 sl_lock(&share_states_lock_instance);
130
131 return (struct share_states_locked){.share_states = share_states};
132}
133
134/** Unlocks the share states lock. */
135static void share_states_unlock(struct share_states_locked *share_states)
136{
137 CHECK(share_states->share_states != NULL);
138 share_states->share_states = NULL;
139 sl_unlock(&share_states_lock_instance);
140}
141
142/**
143 * If the given handle is a valid handle for an allocated share state then takes
144 * the lock, initialises `share_state_locked` to point to the share state and
145 * returns true. Otherwise returns false and doesn't take the lock.
146 */
147static bool get_share_state(struct share_states_locked share_states,
148 spci_memory_handle_t handle,
149 struct spci_memory_share_state **share_state_ret)
150{
151 struct spci_memory_share_state *share_state;
152 uint32_t index = handle & ~SPCI_MEMORY_HANDLE_ALLOCATOR_MASK;
153
154 if (index >= MAX_MEM_SHARES) {
155 return false;
156 }
157
158 share_state = &share_states.share_states[index];
159
160 if (share_state->share_func == 0) {
161 return false;
162 }
163
164 *share_state_ret = share_state;
165 return true;
166}
167
168/** Marks a share state as unallocated. */
169static void share_state_free(struct share_states_locked share_states,
170 struct spci_memory_share_state *share_state,
171 struct mpool *page_pool)
172{
173 CHECK(share_states.share_states != NULL);
174 share_state->share_func = 0;
175 mpool_free(page_pool, share_state->memory_region);
176 share_state->memory_region = NULL;
177}
178
179/**
180 * Marks the share state with the given handle as unallocated, or returns false
181 * if the handle was invalid.
182 */
183static bool share_state_free_handle(spci_memory_handle_t handle,
184 struct mpool *page_pool)
185{
186 struct share_states_locked share_states = share_states_lock();
187 struct spci_memory_share_state *share_state;
188
189 if (!get_share_state(share_states, handle, &share_state)) {
190 share_states_unlock(&share_states);
191 return false;
192 }
193
194 share_state_free(share_states, share_state, page_pool);
195 share_states_unlock(&share_states);
196
197 return true;
198}
199
200static void dump_memory_region(struct spci_memory_region *memory_region)
201{
202 uint32_t i;
203
204 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
205 return;
206 }
207
Andrew Walbrana65a1322020-04-06 19:32:32 +0100208 dlog("from VM %d, attributes %#x, flags %#x, handle %#x, tag %d, to %d "
209 "recipients [",
210 memory_region->sender, memory_region->attributes,
211 memory_region->flags, memory_region->handle, memory_region->tag,
212 memory_region->receiver_count);
213 for (i = 0; i < memory_region->receiver_count; ++i) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000214 if (i != 0) {
215 dlog(", ");
216 }
Andrew Walbrana65a1322020-04-06 19:32:32 +0100217 dlog("VM %d: %#x (offset %d)",
218 memory_region->receivers[i].receiver_permissions.receiver,
219 memory_region->receivers[i]
220 .receiver_permissions.permissions,
221 memory_region->receivers[i]
222 .composite_memory_region_offset);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000223 }
224 dlog("]");
225}
226
227static void dump_share_states(void)
228{
229 uint32_t i;
230
231 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
232 return;
233 }
234
235 dlog("Current share states:\n");
236 sl_lock(&share_states_lock_instance);
237 for (i = 0; i < MAX_MEM_SHARES; ++i) {
238 if (share_states[i].share_func != 0) {
239 dlog("%d: ", i);
240 switch (share_states[i].share_func) {
241 case SPCI_MEM_SHARE_32:
242 dlog("SHARE");
243 break;
244 case SPCI_MEM_LEND_32:
245 dlog("LEND");
246 break;
247 case SPCI_MEM_DONATE_32:
248 dlog("DONATE");
249 break;
250 default:
251 dlog("invalid share_func %#x",
252 share_states[i].share_func);
253 }
254 dlog(" (");
255 dump_memory_region(share_states[i].memory_region);
256 if (share_states[i].retrieved[0]) {
257 dlog("): retrieved\n");
258 } else {
259 dlog("): not retrieved\n");
260 }
261 break;
262 }
263 }
264 sl_unlock(&share_states_lock_instance);
265}
266
Andrew Walbran475c1452020-02-07 13:22:22 +0000267/* TODO: Add device attributes: GRE, cacheability, shareability. */
Andrew Walbrana65a1322020-04-06 19:32:32 +0100268static inline uint32_t spci_memory_permissions_to_mode(
269 spci_memory_access_permissions_t permissions)
Andrew Walbran475c1452020-02-07 13:22:22 +0000270{
271 uint32_t mode = 0;
272
Andrew Walbrana65a1322020-04-06 19:32:32 +0100273 switch (spci_get_data_access_attr(permissions)) {
274 case SPCI_DATA_ACCESS_RO:
Andrew Walbran475c1452020-02-07 13:22:22 +0000275 mode = MM_MODE_R;
276 break;
Andrew Walbrana65a1322020-04-06 19:32:32 +0100277 case SPCI_DATA_ACCESS_RW:
278 case SPCI_DATA_ACCESS_NOT_SPECIFIED:
Andrew Walbran475c1452020-02-07 13:22:22 +0000279 mode = MM_MODE_R | MM_MODE_W;
280 break;
Andrew Walbrana65a1322020-04-06 19:32:32 +0100281 case SPCI_DATA_ACCESS_RESERVED:
282 panic("Tried to convert SPCI_DATA_ACCESS_RESERVED.");
283 }
284
285 switch (spci_get_instruction_access_attr(permissions)) {
286 case SPCI_INSTRUCTION_ACCESS_NX:
Andrew Walbran475c1452020-02-07 13:22:22 +0000287 break;
Andrew Walbrana65a1322020-04-06 19:32:32 +0100288 case SPCI_INSTRUCTION_ACCESS_X:
289 case SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED:
290 mode |= MM_MODE_X;
291 break;
292 case SPCI_INSTRUCTION_ACCESS_RESERVED:
293 panic("Tried to convert SPCI_INSTRUCTION_ACCESS_RESVERVED.");
Andrew Walbran475c1452020-02-07 13:22:22 +0000294 }
295
296 return mode;
297}
298
Jose Marinho75509b42019-04-09 09:34:59 +0100299/**
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000300 * Get the current mode in the stage-2 page table of the given vm of all the
301 * pages in the given constituents, if they all have the same mode, or return
Andrew Walbrana65a1322020-04-06 19:32:32 +0100302 * an appropriate SPCI error if not.
Jose Marinho75509b42019-04-09 09:34:59 +0100303 */
Andrew Walbrana65a1322020-04-06 19:32:32 +0100304static struct spci_value constituents_get_mode(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000305 struct vm_locked vm, uint32_t *orig_mode,
Andrew Walbranf4b51af2020-02-03 14:44:54 +0000306 struct spci_memory_region_constituent *constituents,
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000307 uint32_t constituent_count)
Jose Marinho75509b42019-04-09 09:34:59 +0100308{
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100309 uint32_t i;
Jose Marinho75509b42019-04-09 09:34:59 +0100310
Andrew Walbranf4b51af2020-02-03 14:44:54 +0000311 if (constituent_count == 0) {
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100312 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000313 * Fail if there are no constituents. Otherwise we would get an
314 * uninitialised *orig_mode.
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100315 */
Andrew Walbrana65a1322020-04-06 19:32:32 +0100316 return spci_error(SPCI_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +0100317 }
318
Andrew Walbranf4b51af2020-02-03 14:44:54 +0000319 for (i = 0; i < constituent_count; ++i) {
Andrew Walbrana65a1322020-04-06 19:32:32 +0100320 ipaddr_t begin = ipa_init(constituents[i].address);
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100321 size_t size = constituents[i].page_count * PAGE_SIZE;
322 ipaddr_t end = ipa_add(begin, size);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000323 uint32_t current_mode;
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100324
325 /* Fail if addresses are not page-aligned. */
326 if (!is_aligned(ipa_addr(begin), PAGE_SIZE) ||
327 !is_aligned(ipa_addr(end), PAGE_SIZE)) {
Andrew Walbrana65a1322020-04-06 19:32:32 +0100328 return spci_error(SPCI_INVALID_PARAMETERS);
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100329 }
330
331 /*
332 * Ensure that this constituent memory range is all mapped with
333 * the same mode.
334 */
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000335 if (!mm_vm_get_mode(&vm.vm->ptable, begin, end,
336 &current_mode)) {
Andrew Walbrana65a1322020-04-06 19:32:32 +0100337 return spci_error(SPCI_DENIED);
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100338 }
339
340 /*
341 * Ensure that all constituents are mapped with the same mode.
342 */
343 if (i == 0) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000344 *orig_mode = current_mode;
345 } else if (current_mode != *orig_mode) {
Andrew Walbrana65a1322020-04-06 19:32:32 +0100346 return spci_error(SPCI_DENIED);
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100347 }
Jose Marinho75509b42019-04-09 09:34:59 +0100348 }
349
Andrew Walbrana65a1322020-04-06 19:32:32 +0100350 return (struct spci_value){.func = SPCI_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000351}
352
353/**
354 * Verify that all pages have the same mode, that the starting mode
355 * constitutes a valid state and obtain the next mode to apply
356 * to the sending VM.
357 *
358 * Returns:
Andrew Walbrana65a1322020-04-06 19:32:32 +0100359 * 1) SPCI_DENIED if a state transition was not found;
360 * 2) SPCI_DENIED if the pages being shared do not have the same mode within
361 * the <from> VM;
362 * 3) SPCI_INVALID_PARAMETERS if the beginning and end IPAs are not page
363 * aligned;
364 * 4) SPCI_INVALID_PARAMETERS if the requested share type was not handled.
365 * Or SPCI_SUCCESS on success.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000366 */
Andrew Walbrana65a1322020-04-06 19:32:32 +0100367static struct spci_value spci_send_check_transition(
368 struct vm_locked from, uint32_t share_func,
369 spci_memory_access_permissions_t permissions, uint32_t *orig_from_mode,
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000370 struct spci_memory_region_constituent *constituents,
371 uint32_t constituent_count, uint32_t *from_mode)
372{
373 const uint32_t state_mask =
374 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
Andrew Walbrana65a1322020-04-06 19:32:32 +0100375 const uint32_t required_from_mode =
376 spci_memory_permissions_to_mode(permissions);
377 struct spci_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000378
Andrew Walbrana65a1322020-04-06 19:32:32 +0100379 ret = constituents_get_mode(from, orig_from_mode, constituents,
380 constituent_count);
381 if (ret.func != SPCI_SUCCESS_32) {
382 return ret;
Andrew Scullb5f49e02019-10-02 13:20:47 +0100383 }
384
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000385 /* Ensure the address range is normal memory and not a device. */
386 if (*orig_from_mode & MM_MODE_D) {
387 dlog_verbose("Can't share device memory (mode is %#x).\n",
388 *orig_from_mode);
Andrew Walbrana65a1322020-04-06 19:32:32 +0100389 return spci_error(SPCI_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000390 }
391
392 /*
393 * Ensure the sender is the owner and has exclusive access to the
394 * memory.
395 */
396 if ((*orig_from_mode & state_mask) != 0) {
Andrew Walbrana65a1322020-04-06 19:32:32 +0100397 return spci_error(SPCI_DENIED);
398 }
399
400 if ((*orig_from_mode & required_from_mode) != required_from_mode) {
401 dlog_verbose(
402 "Sender tried to send memory with permissions which "
403 "required mode %#x but only had %#x itself.\n",
404 required_from_mode, *orig_from_mode);
405 return spci_error(SPCI_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000406 }
407
408 /* Find the appropriate new mode. */
409 *from_mode = ~state_mask & *orig_from_mode;
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000410 switch (share_func) {
411 case SPCI_MEM_DONATE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000412 *from_mode |= MM_MODE_INVALID | MM_MODE_UNOWNED;
Jose Marinho75509b42019-04-09 09:34:59 +0100413 break;
414
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000415 case SPCI_MEM_LEND_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000416 *from_mode |= MM_MODE_INVALID;
Andrew Walbran648fc3e2019-10-22 16:23:05 +0100417 break;
418
Andrew Walbrane7ad3c02019-12-24 17:03:04 +0000419 case SPCI_MEM_SHARE_32:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000420 *from_mode |= MM_MODE_SHARED;
Jose Marinho56c25732019-05-20 09:48:53 +0100421 break;
422
Jose Marinho75509b42019-04-09 09:34:59 +0100423 default:
Andrew Walbrana65a1322020-04-06 19:32:32 +0100424 return spci_error(SPCI_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +0100425 }
426
Andrew Walbrana65a1322020-04-06 19:32:32 +0100427 return (struct spci_value){.func = SPCI_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000428}
429
Andrew Walbrana65a1322020-04-06 19:32:32 +0100430static struct spci_value spci_relinquish_check_transition(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000431 struct vm_locked from, uint32_t *orig_from_mode,
432 struct spci_memory_region_constituent *constituents,
433 uint32_t constituent_count, uint32_t *from_mode)
434{
435 const uint32_t state_mask =
436 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
437 uint32_t orig_from_state;
Andrew Walbrana65a1322020-04-06 19:32:32 +0100438 struct spci_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000439
Andrew Walbrana65a1322020-04-06 19:32:32 +0100440 ret = constituents_get_mode(from, orig_from_mode, constituents,
441 constituent_count);
442 if (ret.func != SPCI_SUCCESS_32) {
443 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000444 }
445
446 /* Ensure the address range is normal memory and not a device. */
447 if (*orig_from_mode & MM_MODE_D) {
448 dlog_verbose("Can't relinquish device memory (mode is %#x).\n",
449 *orig_from_mode);
Andrew Walbrana65a1322020-04-06 19:32:32 +0100450 return spci_error(SPCI_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000451 }
452
453 /*
454 * Ensure the relinquishing VM is not the owner but has access to the
455 * memory.
456 */
457 orig_from_state = *orig_from_mode & state_mask;
458 if ((orig_from_state & ~MM_MODE_SHARED) != MM_MODE_UNOWNED) {
459 dlog_verbose(
460 "Tried to relinquish memory in state %#x (masked %#x "
461 "but "
462 "should be %#x).\n",
463 *orig_from_mode, orig_from_state, MM_MODE_UNOWNED);
Andrew Walbrana65a1322020-04-06 19:32:32 +0100464 return spci_error(SPCI_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000465 }
466
467 /* Find the appropriate new mode. */
468 *from_mode = (~state_mask & *orig_from_mode) | MM_MODE_UNMAPPED_MASK;
469
Andrew Walbrana65a1322020-04-06 19:32:32 +0100470 return (struct spci_value){.func = SPCI_SUCCESS_32};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000471}
472
473/**
474 * Verify that all pages have the same mode, that the starting mode
475 * constitutes a valid state and obtain the next mode to apply
476 * to the retrieving VM.
477 *
478 * Returns:
Andrew Walbrana65a1322020-04-06 19:32:32 +0100479 * 1) SPCI_DENIED if a state transition was not found;
480 * 2) SPCI_DENIED if the pages being shared do not have the same mode within
481 * the <to> VM;
482 * 3) SPCI_INVALID_PARAMETERS if the beginning and end IPAs are not page
483 * aligned;
484 * 4) SPCI_INVALID_PARAMETERS if the requested share type was not handled.
485 * Or SPCI_SUCCESS on success.
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000486 */
Andrew Walbrana65a1322020-04-06 19:32:32 +0100487static struct spci_value spci_retrieve_check_transition(
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000488 struct vm_locked to, uint32_t share_func,
489 struct spci_memory_region_constituent *constituents,
490 uint32_t constituent_count, uint32_t memory_to_attributes,
491 uint32_t *to_mode)
492{
493 uint32_t orig_to_mode;
Andrew Walbrana65a1322020-04-06 19:32:32 +0100494 struct spci_value ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000495
Andrew Walbrana65a1322020-04-06 19:32:32 +0100496 ret = constituents_get_mode(to, &orig_to_mode, constituents,
497 constituent_count);
498 if (ret.func != SPCI_SUCCESS_32) {
499 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000500 }
501
502 if (share_func == SPCI_MEM_RECLAIM_32) {
503 const uint32_t state_mask =
504 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
505 uint32_t orig_to_state = orig_to_mode & state_mask;
506
507 if (orig_to_state != MM_MODE_INVALID &&
508 orig_to_state != MM_MODE_SHARED) {
Andrew Walbrana65a1322020-04-06 19:32:32 +0100509 return spci_error(SPCI_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000510 }
511 } else {
512 /*
513 * Ensure the retriever has the expected state. We don't care
514 * about the MM_MODE_SHARED bit; either with or without it set
515 * are both valid representations of the !O-NA state.
516 */
517 if ((orig_to_mode & MM_MODE_UNMAPPED_MASK) !=
518 MM_MODE_UNMAPPED_MASK) {
Andrew Walbrana65a1322020-04-06 19:32:32 +0100519 return spci_error(SPCI_DENIED);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000520 }
521 }
522
523 /* Find the appropriate new mode. */
524 *to_mode = memory_to_attributes;
525 switch (share_func) {
526 case SPCI_MEM_DONATE_32:
527 *to_mode |= 0;
528 break;
529
530 case SPCI_MEM_LEND_32:
531 *to_mode |= MM_MODE_UNOWNED;
532 break;
533
534 case SPCI_MEM_SHARE_32:
535 *to_mode |= MM_MODE_UNOWNED | MM_MODE_SHARED;
536 break;
537
538 case SPCI_MEM_RECLAIM_32:
539 *to_mode |= 0;
540 break;
541
542 default:
Andrew Walbrana65a1322020-04-06 19:32:32 +0100543 return spci_error(SPCI_INVALID_PARAMETERS);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000544 }
545
Andrew Walbrana65a1322020-04-06 19:32:32 +0100546 return (struct spci_value){.func = SPCI_SUCCESS_32};
Jose Marinho75509b42019-04-09 09:34:59 +0100547}
Jose Marinho09b1db82019-08-08 09:16:59 +0100548
549/**
550 * Updates a VM's page table such that the given set of physical address ranges
551 * are mapped in the address space at the corresponding address ranges, in the
552 * mode provided.
553 *
554 * If commit is false, the page tables will be allocated from the mpool but no
555 * mappings will actually be updated. This function must always be called first
556 * with commit false to check that it will succeed before calling with commit
557 * true, to avoid leaving the page table in a half-updated state. To make a
558 * series of changes atomically you can call them all with commit false before
559 * calling them all with commit true.
560 *
561 * mm_vm_defrag should always be called after a series of page table updates,
562 * whether they succeed or fail.
563 *
564 * Returns true on success, or false if the update failed and no changes were
565 * made to memory mappings.
566 */
567static bool spci_region_group_identity_map(
Andrew Walbranf4b51af2020-02-03 14:44:54 +0000568 struct vm_locked vm_locked,
569 struct spci_memory_region_constituent *constituents,
570 uint32_t constituent_count, int mode, struct mpool *ppool, bool commit)
Jose Marinho09b1db82019-08-08 09:16:59 +0100571{
Jose Marinho09b1db82019-08-08 09:16:59 +0100572 /* Iterate over the memory region constituents. */
Andrew Walbranf4b51af2020-02-03 14:44:54 +0000573 for (uint32_t index = 0; index < constituent_count; index++) {
Jose Marinho09b1db82019-08-08 09:16:59 +0100574 size_t size = constituents[index].page_count * PAGE_SIZE;
Andrew Walbrana65a1322020-04-06 19:32:32 +0100575 paddr_t pa_begin =
576 pa_from_ipa(ipa_init(constituents[index].address));
Jose Marinho09b1db82019-08-08 09:16:59 +0100577 paddr_t pa_end = pa_add(pa_begin, size);
578
579 if (commit) {
Andrew Scull3c257452019-11-26 13:32:50 +0000580 vm_identity_commit(vm_locked, pa_begin, pa_end, mode,
581 ppool, NULL);
582 } else if (!vm_identity_prepare(vm_locked, pa_begin, pa_end,
583 mode, ppool)) {
Jose Marinho09b1db82019-08-08 09:16:59 +0100584 return false;
585 }
586 }
587
588 return true;
589}
590
591/**
592 * Clears a region of physical memory by overwriting it with zeros. The data is
593 * flushed from the cache so the memory has been cleared across the system.
594 */
595static bool clear_memory(paddr_t begin, paddr_t end, struct mpool *ppool)
596{
597 /*
Fuad Tabbaed294af2019-12-20 10:43:01 +0000598 * TODO: change this to a CPU local single page window rather than a
Jose Marinho09b1db82019-08-08 09:16:59 +0100599 * global mapping of the whole range. Such an approach will limit
600 * the changes to stage-1 tables and will allow only local
601 * invalidation.
602 */
603 bool ret;
604 struct mm_stage1_locked stage1_locked = mm_lock_stage1();
605 void *ptr =
606 mm_identity_map(stage1_locked, begin, end, MM_MODE_W, ppool);
607 size_t size = pa_difference(begin, end);
608
609 if (!ptr) {
610 /* TODO: partial defrag of failed range. */
611 /* Recover any memory consumed in failed mapping. */
612 mm_defrag(stage1_locked, ppool);
613 goto fail;
614 }
615
616 memset_s(ptr, size, 0, size);
617 arch_mm_flush_dcache(ptr, size);
618 mm_unmap(stage1_locked, begin, end, ppool);
619
620 ret = true;
621 goto out;
622
623fail:
624 ret = false;
625
626out:
627 mm_unlock_stage1(&stage1_locked);
628
629 return ret;
630}
631
632/**
633 * Clears a region of physical memory by overwriting it with zeros. The data is
634 * flushed from the cache so the memory has been cleared across the system.
635 */
Andrew Walbranf4b51af2020-02-03 14:44:54 +0000636static bool spci_clear_memory_constituents(
637 struct spci_memory_region_constituent *constituents,
Andrew Walbran475c1452020-02-07 13:22:22 +0000638 uint32_t constituent_count, struct mpool *page_pool)
Jose Marinho09b1db82019-08-08 09:16:59 +0100639{
640 struct mpool local_page_pool;
Jose Marinho09b1db82019-08-08 09:16:59 +0100641 struct mm_stage1_locked stage1_locked;
642 bool ret = false;
643
644 /*
645 * Create a local pool so any freed memory can't be used by another
646 * thread. This is to ensure each constituent that is mapped can be
647 * unmapped again afterwards.
648 */
Andrew Walbran475c1452020-02-07 13:22:22 +0000649 mpool_init_with_fallback(&local_page_pool, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +0100650
651 /* Iterate over the memory region constituents. */
Andrew Walbranf4b51af2020-02-03 14:44:54 +0000652 for (uint32_t i = 0; i < constituent_count; ++i) {
Jose Marinho09b1db82019-08-08 09:16:59 +0100653 size_t size = constituents[i].page_count * PAGE_SIZE;
Andrew Walbrana65a1322020-04-06 19:32:32 +0100654 paddr_t begin = pa_from_ipa(ipa_init(constituents[i].address));
Jose Marinho09b1db82019-08-08 09:16:59 +0100655 paddr_t end = pa_add(begin, size);
656
657 if (!clear_memory(begin, end, &local_page_pool)) {
658 /*
659 * api_clear_memory will defrag on failure, so no need
660 * to do it here.
661 */
662 goto out;
663 }
664 }
665
666 /*
667 * Need to defrag after clearing, as it may have added extra mappings to
668 * the stage 1 page table.
669 */
670 stage1_locked = mm_lock_stage1();
671 mm_defrag(stage1_locked, &local_page_pool);
672 mm_unlock_stage1(&stage1_locked);
673
674 ret = true;
675
676out:
677 mpool_fini(&local_page_pool);
678 return ret;
679}
680
681/**
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000682 * Validates and prepares memory to be sent from the calling VM to another.
Jose Marinho09b1db82019-08-08 09:16:59 +0100683 *
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000684 * This function requires the calling context to hold the <from> VM lock.
Jose Marinho09b1db82019-08-08 09:16:59 +0100685 *
686 * Returns:
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000687 * In case of error, one of the following values is returned:
Jose Marinho09b1db82019-08-08 09:16:59 +0100688 * 1) SPCI_INVALID_PARAMETERS - The endpoint provided parameters were
689 * erroneous;
690 * 2) SPCI_NO_MEMORY - Hafnium did not have sufficient memory to complete
691 * the request.
Andrew Walbrana65a1322020-04-06 19:32:32 +0100692 * 3) SPCI_DENIED - The sender doesn't have sufficient access to send the
693 * memory with the given permissions.
Jose Marinho09b1db82019-08-08 09:16:59 +0100694 * Success is indicated by SPCI_SUCCESS.
695 */
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000696static struct spci_value spci_send_memory(
697 struct vm_locked from_locked,
Andrew Walbranf4b51af2020-02-03 14:44:54 +0000698 struct spci_memory_region_constituent *constituents,
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000699 uint32_t constituent_count, uint32_t share_func,
Andrew Walbrana65a1322020-04-06 19:32:32 +0100700 spci_memory_access_permissions_t permissions, struct mpool *page_pool,
701 bool clear)
Jose Marinho09b1db82019-08-08 09:16:59 +0100702{
Jose Marinho09b1db82019-08-08 09:16:59 +0100703 struct vm *from = from_locked.vm;
704 uint32_t orig_from_mode;
705 uint32_t from_mode;
Jose Marinho09b1db82019-08-08 09:16:59 +0100706 struct mpool local_page_pool;
707 struct spci_value ret;
Jose Marinho09b1db82019-08-08 09:16:59 +0100708
709 /*
Andrew Walbrana65a1322020-04-06 19:32:32 +0100710 * Make sure constituents are properly aligned to a 64-bit boundary. If
711 * not we would get alignment faults trying to read (64-bit) values.
Jose Marinho09b1db82019-08-08 09:16:59 +0100712 */
Andrew Walbrana65a1322020-04-06 19:32:32 +0100713 if (!is_aligned(constituents, 8)) {
Jose Marinho09b1db82019-08-08 09:16:59 +0100714 return spci_error(SPCI_INVALID_PARAMETERS);
715 }
716
717 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000718 * Check if the state transition is lawful for the sender, ensure that
719 * all constituents of a memory region being shared are at the same
720 * state.
Jose Marinho09b1db82019-08-08 09:16:59 +0100721 */
Andrew Walbrana65a1322020-04-06 19:32:32 +0100722 ret = spci_send_check_transition(from_locked, share_func, permissions,
723 &orig_from_mode, constituents,
724 constituent_count, &from_mode);
725 if (ret.func != SPCI_SUCCESS_32) {
726 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +0100727 }
728
729 /*
730 * Create a local pool so any freed memory can't be used by another
731 * thread. This is to ensure the original mapping can be restored if the
732 * clear fails.
733 */
Andrew Walbran475c1452020-02-07 13:22:22 +0000734 mpool_init_with_fallback(&local_page_pool, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +0100735
736 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000737 * First reserve all required memory for the new page table entries
738 * without committing, to make sure the entire operation will succeed
739 * without exhausting the page pool.
Jose Marinho09b1db82019-08-08 09:16:59 +0100740 */
Andrew Walbranf4b51af2020-02-03 14:44:54 +0000741 if (!spci_region_group_identity_map(from_locked, constituents,
742 constituent_count, from_mode,
Andrew Walbran475c1452020-02-07 13:22:22 +0000743 page_pool, false)) {
Jose Marinho09b1db82019-08-08 09:16:59 +0100744 /* TODO: partial defrag of failed range. */
745 ret = spci_error(SPCI_NO_MEMORY);
746 goto out;
747 }
748
749 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000750 * Update the mapping for the sender. This won't allocate because the
751 * transaction was already prepared above, but may free pages in the
752 * case that a whole block is being unmapped that was previously
753 * partially mapped.
Jose Marinho09b1db82019-08-08 09:16:59 +0100754 */
Andrew Walbranf4b51af2020-02-03 14:44:54 +0000755 CHECK(spci_region_group_identity_map(from_locked, constituents,
756 constituent_count, from_mode,
757 &local_page_pool, true));
Jose Marinho09b1db82019-08-08 09:16:59 +0100758
759 /* Clear the memory so no VM or device can see the previous contents. */
Andrew Walbranf4b51af2020-02-03 14:44:54 +0000760 if (clear && !spci_clear_memory_constituents(
Andrew Walbran475c1452020-02-07 13:22:22 +0000761 constituents, constituent_count, page_pool)) {
Jose Marinho09b1db82019-08-08 09:16:59 +0100762 /*
763 * On failure, roll back by returning memory to the sender. This
764 * may allocate pages which were previously freed into
765 * `local_page_pool` by the call above, but will never allocate
766 * more pages than that so can never fail.
767 */
Andrew Walbranf4b51af2020-02-03 14:44:54 +0000768 CHECK(spci_region_group_identity_map(
769 from_locked, constituents, constituent_count,
770 orig_from_mode, &local_page_pool, true));
Jose Marinho09b1db82019-08-08 09:16:59 +0100771
772 ret = spci_error(SPCI_NO_MEMORY);
773 goto out;
774 }
775
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000776 ret = (struct spci_value){.func = SPCI_SUCCESS_32};
777
778out:
779 mpool_fini(&local_page_pool);
780
781 /*
782 * Tidy up the page table by reclaiming failed mappings (if there was an
783 * error) or merging entries into blocks where possible (on success).
784 */
785 mm_vm_defrag(&from->ptable, page_pool);
786
787 return ret;
788}
789
790/**
791 * Validates and maps memory shared from one VM to another.
792 *
793 * This function requires the calling context to hold the <to> lock.
794 *
795 * Returns:
796 * In case of error, one of the following values is returned:
797 * 1) SPCI_INVALID_PARAMETERS - The endpoint provided parameters were
798 * erroneous;
799 * 2) SPCI_NO_MEMORY - Hafnium did not have sufficient memory to complete
800 * the request.
801 * Success is indicated by SPCI_SUCCESS.
802 */
803static struct spci_value spci_retrieve_memory(
804 struct vm_locked to_locked,
805 struct spci_memory_region_constituent *constituents,
806 uint32_t constituent_count, uint32_t memory_to_attributes,
807 uint32_t share_func, bool clear, struct mpool *page_pool)
808{
809 struct vm *to = to_locked.vm;
810 uint32_t to_mode;
811 struct mpool local_page_pool;
812 struct spci_value ret;
813
814 /*
815 * Make sure constituents are properly aligned to a 32-bit boundary. If
816 * not we would get alignment faults trying to read (32-bit) values.
817 */
818 if (!is_aligned(constituents, 4)) {
819 dlog_verbose("Constituents not aligned.\n");
820 return spci_error(SPCI_INVALID_PARAMETERS);
821 }
822
823 /*
824 * Check if the state transition is lawful for the recipient, and ensure
825 * that all constituents of the memory region being retrieved are at the
826 * same state.
827 */
Andrew Walbrana65a1322020-04-06 19:32:32 +0100828 ret = spci_retrieve_check_transition(to_locked, share_func,
829 constituents, constituent_count,
830 memory_to_attributes, &to_mode);
831 if (ret.func != SPCI_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000832 dlog_verbose("Invalid transition.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +0100833 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000834 }
835
836 /*
837 * Create a local pool so any freed memory can't be used by another
838 * thread. This is to ensure the original mapping can be restored if the
839 * clear fails.
840 */
841 mpool_init_with_fallback(&local_page_pool, page_pool);
842
843 /*
844 * First reserve all required memory for the new page table entries in
845 * the recipient page tables without committing, to make sure the entire
846 * operation will succeed without exhausting the page pool.
847 */
848 if (!spci_region_group_identity_map(to_locked, constituents,
849 constituent_count, to_mode,
850 page_pool, false)) {
851 /* TODO: partial defrag of failed range. */
852 dlog_verbose(
853 "Insufficient memory to update recipient page "
854 "table.\n");
855 ret = spci_error(SPCI_NO_MEMORY);
856 goto out;
857 }
858
859 /* Clear the memory so no VM or device can see the previous contents. */
860 if (clear && !spci_clear_memory_constituents(
861 constituents, constituent_count, page_pool)) {
862 ret = spci_error(SPCI_NO_MEMORY);
863 goto out;
864 }
865
Jose Marinho09b1db82019-08-08 09:16:59 +0100866 /*
867 * Complete the transfer by mapping the memory into the recipient. This
868 * won't allocate because the transaction was already prepared above, so
869 * it doesn't need to use the `local_page_pool`.
870 */
Andrew Walbranf4b51af2020-02-03 14:44:54 +0000871 CHECK(spci_region_group_identity_map(to_locked, constituents,
872 constituent_count, to_mode,
Andrew Walbran475c1452020-02-07 13:22:22 +0000873 page_pool, true));
Jose Marinho09b1db82019-08-08 09:16:59 +0100874
875 ret = (struct spci_value){.func = SPCI_SUCCESS_32};
876
877out:
878 mpool_fini(&local_page_pool);
879
880 /*
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000881 * Tidy up the page table by reclaiming failed mappings (if there was
Jose Marinho09b1db82019-08-08 09:16:59 +0100882 * an error) or merging entries into blocks where possible (on success).
883 */
Andrew Walbran475c1452020-02-07 13:22:22 +0000884 mm_vm_defrag(&to->ptable, page_pool);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000885
886 return ret;
887}
888
Andrew Walbran290b0c92020-02-03 16:37:14 +0000889/**
890 * Reclaims the given memory from the TEE. To do this space is first reserved in
891 * the <to> VM's page table, then the reclaim request is sent on to the TEE,
892 * then (if that is successful) the memory is mapped back into the <to> VM's
893 * page table.
894 *
895 * This function requires the calling context to hold the <to> lock.
896 *
897 * Returns:
898 * In case of error, one of the following values is returned:
899 * 1) SPCI_INVALID_PARAMETERS - The endpoint provided parameters were
900 * erroneous;
901 * 2) SPCI_NO_MEMORY - Hafnium did not have sufficient memory to complete
902 * the request.
903 * Success is indicated by SPCI_SUCCESS.
904 */
905static struct spci_value spci_tee_reclaim_memory(
906 struct vm_locked to_locked, spci_memory_handle_t handle,
907 struct spci_memory_region_constituent *constituents,
908 uint32_t constituent_count, uint32_t memory_to_attributes, bool clear,
909 struct mpool *page_pool)
910{
911 struct vm *to = to_locked.vm;
912 uint32_t to_mode;
913 struct mpool local_page_pool;
914 struct spci_value ret;
915 spci_memory_region_flags_t tee_flags;
916
917 /*
918 * Make sure constituents are properly aligned to a 32-bit boundary. If
919 * not we would get alignment faults trying to read (32-bit) values.
920 */
921 if (!is_aligned(constituents, 4)) {
922 dlog_verbose("Constituents not aligned.\n");
923 return spci_error(SPCI_INVALID_PARAMETERS);
924 }
925
926 /*
927 * Check if the state transition is lawful for the recipient, and ensure
928 * that all constituents of the memory region being retrieved are at the
929 * same state.
930 */
931 ret = spci_retrieve_check_transition(to_locked, SPCI_MEM_RECLAIM_32,
932 constituents, constituent_count,
933 memory_to_attributes, &to_mode);
934 if (ret.func != SPCI_SUCCESS_32) {
935 dlog_verbose("Invalid transition.\n");
936 return ret;
937 }
938
939 /*
940 * Create a local pool so any freed memory can't be used by another
941 * thread. This is to ensure the original mapping can be restored if the
942 * clear fails.
943 */
944 mpool_init_with_fallback(&local_page_pool, page_pool);
945
946 /*
947 * First reserve all required memory for the new page table entries in
948 * the recipient page tables without committing, to make sure the entire
949 * operation will succeed without exhausting the page pool.
950 */
951 if (!spci_region_group_identity_map(to_locked, constituents,
952 constituent_count, to_mode,
953 page_pool, false)) {
954 /* TODO: partial defrag of failed range. */
955 dlog_verbose(
956 "Insufficient memory to update recipient page "
957 "table.\n");
958 ret = spci_error(SPCI_NO_MEMORY);
959 goto out;
960 }
961
962 /*
963 * Forward the request to the TEE and see what happens.
964 */
965 tee_flags = 0;
966 if (clear) {
967 tee_flags |= SPCI_MEMORY_REGION_FLAG_CLEAR;
968 }
969 ret = arch_tee_call(
970 (struct spci_value){.func = SPCI_MEM_RECLAIM_32,
971 .arg1 = (uint32_t)handle,
972 .arg2 = (uint32_t)(handle >> 32),
973 .arg3 = tee_flags});
974
975 if (ret.func != SPCI_SUCCESS_32) {
976 dlog_verbose(
977 "Got %#x (%d) from EL3 in response to "
978 "SPCI_MEM_RECLAIM_32, expected SPCI_SUCCESS_32.\n",
979 ret.func, ret.arg2);
980 goto out;
981 }
982
983 /*
984 * The TEE was happy with it, so complete the reclaim by mapping the
985 * memory into the recipient. This won't allocate because the
986 * transaction was already prepared above, so it doesn't need to use the
987 * `local_page_pool`.
988 */
989 CHECK(spci_region_group_identity_map(to_locked, constituents,
990 constituent_count, to_mode,
991 page_pool, true));
992
993 ret = (struct spci_value){.func = SPCI_SUCCESS_32};
994
995out:
996 mpool_fini(&local_page_pool);
997
998 /*
999 * Tidy up the page table by reclaiming failed mappings (if there was
1000 * an error) or merging entries into blocks where possible (on success).
1001 */
1002 mm_vm_defrag(&to->ptable, page_pool);
1003
1004 return ret;
1005}
1006
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001007static struct spci_value spci_relinquish_memory(
1008 struct vm_locked from_locked,
1009 struct spci_memory_region_constituent *constituents,
1010 uint32_t constituent_count, struct mpool *page_pool, bool clear)
1011{
1012 uint32_t orig_from_mode;
1013 uint32_t from_mode;
1014 struct mpool local_page_pool;
1015 struct spci_value ret;
1016
Andrew Walbrana65a1322020-04-06 19:32:32 +01001017 ret = spci_relinquish_check_transition(from_locked, &orig_from_mode,
1018 constituents, constituent_count,
1019 &from_mode);
1020 if (ret.func != SPCI_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001021 dlog_verbose("Invalid transition.\n");
Andrew Walbrana65a1322020-04-06 19:32:32 +01001022 return ret;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001023 }
1024
1025 /*
1026 * Create a local pool so any freed memory can't be used by another
1027 * thread. This is to ensure the original mapping can be restored if the
1028 * clear fails.
1029 */
1030 mpool_init_with_fallback(&local_page_pool, page_pool);
1031
1032 /*
1033 * First reserve all required memory for the new page table entries
1034 * without committing, to make sure the entire operation will succeed
1035 * without exhausting the page pool.
1036 */
1037 if (!spci_region_group_identity_map(from_locked, constituents,
1038 constituent_count, from_mode,
1039 page_pool, false)) {
1040 /* TODO: partial defrag of failed range. */
1041 ret = spci_error(SPCI_NO_MEMORY);
1042 goto out;
1043 }
1044
1045 /*
1046 * Update the mapping for the sender. This won't allocate because the
1047 * transaction was already prepared above, but may free pages in the
1048 * case that a whole block is being unmapped that was previously
1049 * partially mapped.
1050 */
1051 CHECK(spci_region_group_identity_map(from_locked, constituents,
1052 constituent_count, from_mode,
1053 &local_page_pool, true));
1054
1055 /* Clear the memory so no VM or device can see the previous contents. */
1056 if (clear && !spci_clear_memory_constituents(
1057 constituents, constituent_count, page_pool)) {
1058 /*
1059 * On failure, roll back by returning memory to the sender. This
1060 * may allocate pages which were previously freed into
1061 * `local_page_pool` by the call above, but will never allocate
1062 * more pages than that so can never fail.
1063 */
1064 CHECK(spci_region_group_identity_map(
1065 from_locked, constituents, constituent_count,
1066 orig_from_mode, &local_page_pool, true));
1067
1068 ret = spci_error(SPCI_NO_MEMORY);
1069 goto out;
1070 }
1071
1072 ret = (struct spci_value){.func = SPCI_SUCCESS_32};
1073
1074out:
1075 mpool_fini(&local_page_pool);
1076
1077 /*
1078 * Tidy up the page table by reclaiming failed mappings (if there was an
1079 * error) or merging entries into blocks where possible (on success).
1080 */
1081 mm_vm_defrag(&from_locked.vm->ptable, page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +01001082
1083 return ret;
1084}
1085
1086/**
Andrew Walbrana65a1322020-04-06 19:32:32 +01001087 * Check that the given `memory_region` represents a valid memory send request
1088 * of the given `share_func` type, return the clear flag and permissions via the
1089 * respective output parameters, and update the permissions if necessary.
1090 * Returns SPCI_SUCCESS if the request was valid, or the relevant SPCI_ERROR if
1091 * not.
1092 */
1093static struct spci_value spci_memory_send_validate(
1094 struct vm *to, struct vm_locked from_locked,
1095 struct spci_memory_region *memory_region, uint32_t memory_share_size,
1096 uint32_t share_func, bool *clear,
1097 spci_memory_access_permissions_t *permissions)
1098{
1099 struct spci_composite_memory_region *composite;
1100 uint32_t receivers_size;
1101 uint32_t constituents_size;
1102 enum spci_data_access data_access;
1103 enum spci_instruction_access instruction_access;
1104
1105 CHECK(clear != NULL);
1106 CHECK(permissions != NULL);
1107
1108 /* The sender must match the message sender. */
1109 if (memory_region->sender != from_locked.vm->id) {
1110 dlog_verbose("Invalid sender %d.\n", memory_region->sender);
1111 return spci_error(SPCI_INVALID_PARAMETERS);
1112 }
1113
1114 /* We only support a single recipient. */
1115 if (memory_region->receiver_count != 1) {
1116 dlog_verbose("Multiple recipients not supported.\n");
1117 return spci_error(SPCI_INVALID_PARAMETERS);
1118 }
1119
1120 /*
1121 * Ensure that the composite header is within the memory bounds and
1122 * doesn't overlap the first part of the message.
1123 */
1124 receivers_size = sizeof(struct spci_memory_access) *
1125 memory_region->receiver_count;
1126 if (memory_region->receivers[0].composite_memory_region_offset <
1127 sizeof(struct spci_memory_region) + receivers_size ||
1128 memory_region->receivers[0].composite_memory_region_offset +
1129 sizeof(struct spci_composite_memory_region) >=
1130 memory_share_size) {
1131 dlog_verbose(
1132 "Invalid composite memory region descriptor offset.\n");
1133 return spci_error(SPCI_INVALID_PARAMETERS);
1134 }
1135
1136 composite = spci_memory_region_get_composite(memory_region, 0);
1137
1138 /*
1139 * Ensure the number of constituents are within the memory
1140 * bounds.
1141 */
1142 constituents_size = sizeof(struct spci_memory_region_constituent) *
1143 composite->constituent_count;
1144 if (memory_share_size !=
1145 memory_region->receivers[0].composite_memory_region_offset +
1146 sizeof(struct spci_composite_memory_region) +
1147 constituents_size) {
1148 dlog_verbose("Invalid size %d or constituent offset %d.\n",
1149 memory_share_size,
1150 memory_region->receivers[0]
1151 .composite_memory_region_offset);
1152 return spci_error(SPCI_INVALID_PARAMETERS);
1153 }
1154
1155 /* The recipient must match the message recipient. */
1156 if (memory_region->receivers[0].receiver_permissions.receiver !=
1157 to->id) {
1158 return spci_error(SPCI_INVALID_PARAMETERS);
1159 }
1160
1161 *clear = memory_region->flags & SPCI_MEMORY_REGION_FLAG_CLEAR;
1162 /*
1163 * Clear is not allowed for memory sharing, as the sender still has
1164 * access to the memory.
1165 */
1166 if (*clear && share_func == SPCI_MEM_SHARE_32) {
1167 dlog_verbose("Memory can't be cleared while being shared.\n");
1168 return spci_error(SPCI_INVALID_PARAMETERS);
1169 }
1170
1171 /* No other flags are allowed/supported here. */
1172 if (memory_region->flags & ~SPCI_MEMORY_REGION_FLAG_CLEAR) {
1173 dlog_verbose("Invalid flags %#x.\n", memory_region->flags);
1174 return spci_error(SPCI_INVALID_PARAMETERS);
1175 }
1176
1177 /* Check that the permissions are valid. */
1178 *permissions =
1179 memory_region->receivers[0].receiver_permissions.permissions;
1180 data_access = spci_get_data_access_attr(*permissions);
1181 instruction_access = spci_get_instruction_access_attr(*permissions);
1182 if (data_access == SPCI_DATA_ACCESS_RESERVED ||
1183 instruction_access == SPCI_INSTRUCTION_ACCESS_RESERVED) {
1184 dlog_verbose("Reserved value for receiver permissions %#x.\n",
1185 *permissions);
1186 return spci_error(SPCI_INVALID_PARAMETERS);
1187 }
1188 if (instruction_access != SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED) {
1189 dlog_verbose(
1190 "Invalid instruction access permissions %#x for "
1191 "sending memory.\n",
1192 *permissions);
1193 return spci_error(SPCI_INVALID_PARAMETERS);
1194 }
1195 if (share_func == SPCI_MEM_SHARE_32) {
1196 if (data_access == SPCI_DATA_ACCESS_NOT_SPECIFIED) {
1197 dlog_verbose(
1198 "Invalid data access permissions %#x for "
1199 "sharing memory.\n",
1200 *permissions);
1201 return spci_error(SPCI_INVALID_PARAMETERS);
1202 }
1203 /*
1204 * According to section 6.11.3 of the FF-A spec NX is required
1205 * for share operations (but must not be specified by the
1206 * sender) so set it in the copy that we store, ready to be
1207 * returned to the retriever.
1208 */
1209 spci_set_instruction_access_attr(permissions,
1210 SPCI_INSTRUCTION_ACCESS_NX);
1211 memory_region->receivers[0].receiver_permissions.permissions =
1212 *permissions;
1213 }
1214 if (share_func == SPCI_MEM_LEND_32 &&
1215 data_access == SPCI_DATA_ACCESS_NOT_SPECIFIED) {
1216 dlog_verbose(
1217 "Invalid data access permissions %#x for lending "
1218 "memory.\n",
1219 *permissions);
1220 return spci_error(SPCI_INVALID_PARAMETERS);
1221 }
1222 if (share_func == SPCI_MEM_DONATE_32 &&
1223 data_access != SPCI_DATA_ACCESS_NOT_SPECIFIED) {
1224 dlog_verbose(
1225 "Invalid data access permissions %#x for donating "
1226 "memory.\n",
1227 *permissions);
1228 return spci_error(SPCI_INVALID_PARAMETERS);
1229 }
1230
1231 return (struct spci_value){.func = SPCI_SUCCESS_32};
1232}
1233
1234/**
Andrew Walbran475c1452020-02-07 13:22:22 +00001235 * Validates a call to donate, lend or share memory and then updates the stage-2
1236 * page tables. Specifically, check if the message length and number of memory
1237 * region constituents match, and if the transition is valid for the type of
1238 * memory sending operation.
1239 *
1240 * Assumes that the caller has already found and locked both VMs and ensured
1241 * that the destination RX buffer is available, and copied the memory region
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001242 * descriptor from the sender's TX buffer to a freshly allocated page from
1243 * Hafnium's internal pool.
1244 *
1245 * This function takes ownership of the `memory_region` passed in; it must not
1246 * be freed by the caller.
Jose Marinho09b1db82019-08-08 09:16:59 +01001247 */
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001248struct spci_value spci_memory_send(struct vm *to, struct vm_locked from_locked,
Andrew Walbran475c1452020-02-07 13:22:22 +00001249 struct spci_memory_region *memory_region,
1250 uint32_t memory_share_size,
1251 uint32_t share_func, struct mpool *page_pool)
Jose Marinho09b1db82019-08-08 09:16:59 +01001252{
Andrew Walbrana65a1322020-04-06 19:32:32 +01001253 struct spci_composite_memory_region *composite;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001254 bool clear;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001255 spci_memory_access_permissions_t permissions;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001256 struct spci_value ret;
1257 spci_memory_handle_t handle;
Jose Marinho09b1db82019-08-08 09:16:59 +01001258
1259 /*
Andrew Walbrana65a1322020-04-06 19:32:32 +01001260 * If there is an error validating the `memory_region` then we need to
1261 * free it because we own it but we won't be storing it in a share state
1262 * after all.
Jose Marinho09b1db82019-08-08 09:16:59 +01001263 */
Andrew Walbrana65a1322020-04-06 19:32:32 +01001264 ret = spci_memory_send_validate(to, from_locked, memory_region,
1265 memory_share_size, share_func, &clear,
1266 &permissions);
1267 if (ret.func != SPCI_SUCCESS_32) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001268 mpool_free(page_pool, memory_region);
Andrew Walbrana65a1322020-04-06 19:32:32 +01001269 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01001270 }
1271
Andrew Walbrana65a1322020-04-06 19:32:32 +01001272 /* Set flag for share function, ready to be retrieved later. */
1273 switch (share_func) {
1274 case SPCI_MEM_SHARE_32:
1275 memory_region->flags |=
1276 SPCI_MEMORY_REGION_TRANSACTION_TYPE_SHARE;
1277 break;
1278 case SPCI_MEM_LEND_32:
1279 memory_region->flags |=
1280 SPCI_MEMORY_REGION_TRANSACTION_TYPE_LEND;
1281 break;
1282 case SPCI_MEM_DONATE_32:
1283 memory_region->flags |=
1284 SPCI_MEMORY_REGION_TRANSACTION_TYPE_DONATE;
1285 break;
Jose Marinho09b1db82019-08-08 09:16:59 +01001286 }
1287
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001288 /*
1289 * Allocate a share state before updating the page table. Otherwise if
1290 * updating the page table succeeded but allocating the share state
1291 * failed then it would leave the memory in a state where nobody could
1292 * get it back.
1293 */
1294 if (to->id != HF_TEE_VM_ID &&
1295 !allocate_share_state(share_func, memory_region, &handle)) {
1296 dlog_verbose("Failed to allocate share state.\n");
1297 mpool_free(page_pool, memory_region);
1298 return spci_error(SPCI_NO_MEMORY);
1299 }
1300
1301 dump_share_states();
1302
1303 /* Check that state is valid in sender page table and update. */
Andrew Walbrana65a1322020-04-06 19:32:32 +01001304 composite = spci_memory_region_get_composite(memory_region, 0);
1305 ret = spci_send_memory(from_locked, composite->constituents,
1306 composite->constituent_count, share_func,
1307 permissions, page_pool, clear);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001308 if (ret.func != SPCI_SUCCESS_32) {
1309 if (to->id != HF_TEE_VM_ID) {
1310 /* Free share state. */
1311 bool freed = share_state_free_handle(handle, page_pool);
1312
1313 CHECK(freed);
1314 }
1315
1316 return ret;
1317 }
1318
1319 if (to->id == HF_TEE_VM_ID) {
Andrew Walbrana65a1322020-04-06 19:32:32 +01001320 /* No share state allocated here so no handle to return. */
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001321 return (struct spci_value){.func = SPCI_SUCCESS_32};
1322 }
1323
1324 return (struct spci_value){.func = SPCI_SUCCESS_32, .arg2 = handle};
1325}
1326
1327struct spci_value spci_memory_retrieve(
Andrew Walbrana65a1322020-04-06 19:32:32 +01001328 struct vm_locked to_locked, struct spci_memory_region *retrieve_request,
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001329 uint32_t retrieve_request_size, struct mpool *page_pool)
1330{
1331 uint32_t expected_retrieve_request_size =
Andrew Walbrana65a1322020-04-06 19:32:32 +01001332 sizeof(struct spci_memory_region) +
1333 retrieve_request->receiver_count *
1334 sizeof(struct spci_memory_access);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001335 spci_memory_handle_t handle = retrieve_request->handle;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001336 spci_memory_region_flags_t transaction_type =
1337 retrieve_request->flags &
1338 SPCI_MEMORY_REGION_TRANSACTION_TYPE_MASK;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001339 struct spci_memory_region *memory_region;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001340 spci_memory_access_permissions_t sent_permissions;
1341 enum spci_data_access sent_data_access;
1342 enum spci_instruction_access sent_instruction_access;
1343 spci_memory_access_permissions_t requested_permissions;
1344 enum spci_data_access requested_data_access;
1345 enum spci_instruction_access requested_instruction_access;
1346 spci_memory_access_permissions_t permissions;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001347 uint32_t memory_to_attributes;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001348 struct spci_composite_memory_region *composite;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001349 struct share_states_locked share_states;
1350 struct spci_memory_share_state *share_state;
1351 struct spci_value ret;
1352 uint32_t response_size;
1353
1354 dump_share_states();
1355
1356 if (retrieve_request_size != expected_retrieve_request_size) {
1357 dlog_verbose(
1358 "Invalid length for SPCI_MEM_RETRIEVE_REQ, expected %d "
1359 "but was %d.\n",
1360 expected_retrieve_request_size, retrieve_request_size);
1361 return spci_error(SPCI_INVALID_PARAMETERS);
1362 }
1363
Andrew Walbrana65a1322020-04-06 19:32:32 +01001364 if (retrieve_request->receiver_count != 1) {
1365 dlog_verbose(
1366 "Multi-way memory sharing not supported (got %d "
1367 "receivers descriptors on SPCI_MEM_RETRIEVE_REQ, "
1368 "expected 1).\n",
1369 retrieve_request->receiver_count);
1370 return spci_error(SPCI_INVALID_PARAMETERS);
1371 }
1372
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001373 share_states = share_states_lock();
1374 if (!get_share_state(share_states, handle, &share_state)) {
1375 dlog_verbose("Invalid handle %#x for SPCI_MEM_RETRIEVE_REQ.\n",
1376 handle);
1377 ret = spci_error(SPCI_INVALID_PARAMETERS);
1378 goto out;
1379 }
1380
Andrew Walbrana65a1322020-04-06 19:32:32 +01001381 memory_region = share_state->memory_region;
1382 CHECK(memory_region != NULL);
1383
1384 /*
1385 * Check that the transaction type expected by the receiver is correct,
1386 * if it has been specified.
1387 */
1388 if (transaction_type !=
1389 SPCI_MEMORY_REGION_TRANSACTION_TYPE_UNSPECIFIED &&
1390 transaction_type != (memory_region->flags &
1391 SPCI_MEMORY_REGION_TRANSACTION_TYPE_MASK)) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001392 dlog_verbose(
1393 "Incorrect transaction type %#x for "
1394 "SPCI_MEM_RETRIEVE_REQ, expected %#x for handle %#x.\n",
Andrew Walbrana65a1322020-04-06 19:32:32 +01001395 transaction_type,
1396 memory_region->flags &
1397 SPCI_MEMORY_REGION_TRANSACTION_TYPE_MASK,
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001398 handle);
1399 ret = spci_error(SPCI_INVALID_PARAMETERS);
1400 goto out;
1401 }
1402
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001403 if (retrieve_request->sender != memory_region->sender) {
1404 dlog_verbose(
1405 "Incorrect sender ID %d for SPCI_MEM_RETRIEVE_REQ, "
1406 "expected %d for handle %#x.\n",
1407 retrieve_request->sender, memory_region->sender,
1408 handle);
1409 ret = spci_error(SPCI_INVALID_PARAMETERS);
1410 goto out;
1411 }
1412
1413 if (retrieve_request->tag != memory_region->tag) {
1414 dlog_verbose(
1415 "Incorrect tag %d for SPCI_MEM_RETRIEVE_REQ, expected "
1416 "%d for handle %#x.\n",
1417 retrieve_request->tag, memory_region->tag, handle);
1418 ret = spci_error(SPCI_INVALID_PARAMETERS);
1419 goto out;
1420 }
1421
Andrew Walbrana65a1322020-04-06 19:32:32 +01001422 if (retrieve_request->receivers[0].receiver_permissions.receiver !=
1423 to_locked.vm->id) {
1424 dlog_verbose(
1425 "Retrieve request receiver VM ID %d didn't match "
1426 "caller of SPCI_MEM_RETRIEVE_REQ.\n",
1427 retrieve_request->receivers[0]
1428 .receiver_permissions.receiver);
1429 ret = spci_error(SPCI_INVALID_PARAMETERS);
1430 goto out;
1431 }
1432
1433 if (memory_region->receivers[0].receiver_permissions.receiver !=
1434 to_locked.vm->id) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001435 dlog_verbose(
1436 "Incorrect receiver VM ID %d for "
1437 "SPCI_MEM_RETRIEVE_REQ, expected %d for handle %#x.\n",
Andrew Walbrana65a1322020-04-06 19:32:32 +01001438 to_locked.vm->id,
1439 memory_region->receivers[0]
1440 .receiver_permissions.receiver,
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001441 handle);
1442 ret = spci_error(SPCI_INVALID_PARAMETERS);
1443 goto out;
1444 }
1445
1446 if (share_state->retrieved[0]) {
1447 dlog_verbose("Memory with handle %#x already retrieved.\n",
1448 handle);
1449 ret = spci_error(SPCI_DENIED);
1450 goto out;
1451 }
1452
Andrew Walbrana65a1322020-04-06 19:32:32 +01001453 if (retrieve_request->receivers[0].composite_memory_region_offset !=
1454 0) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001455 dlog_verbose(
1456 "Retriever specified address ranges not supported (got "
Andrew Walbrana65a1322020-04-06 19:32:32 +01001457 "offset"
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001458 "%d).\n",
Andrew Walbrana65a1322020-04-06 19:32:32 +01001459 retrieve_request->receivers[0]
1460 .composite_memory_region_offset);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001461 ret = spci_error(SPCI_INVALID_PARAMETERS);
1462 goto out;
1463 }
1464
Andrew Walbrana65a1322020-04-06 19:32:32 +01001465 /*
1466 * Check permissions from sender against permissions requested by
1467 * receiver.
1468 */
1469 /* TODO: Check attributes too. */
1470 sent_permissions =
1471 memory_region->receivers[0].receiver_permissions.permissions;
1472 sent_data_access = spci_get_data_access_attr(sent_permissions);
1473 sent_instruction_access =
1474 spci_get_instruction_access_attr(sent_permissions);
1475 requested_permissions =
1476 retrieve_request->receivers[0].receiver_permissions.permissions;
1477 requested_data_access =
1478 spci_get_data_access_attr(requested_permissions);
1479 requested_instruction_access =
1480 spci_get_instruction_access_attr(requested_permissions);
1481 permissions = 0;
1482 switch (sent_data_access) {
1483 case SPCI_DATA_ACCESS_NOT_SPECIFIED:
1484 case SPCI_DATA_ACCESS_RW:
1485 if (requested_data_access == SPCI_DATA_ACCESS_NOT_SPECIFIED ||
1486 requested_data_access == SPCI_DATA_ACCESS_RW) {
1487 spci_set_data_access_attr(&permissions,
1488 SPCI_DATA_ACCESS_RW);
1489 break;
1490 }
1491 /* Intentional fall-through. */
1492 case SPCI_DATA_ACCESS_RO:
1493 if (requested_data_access == SPCI_DATA_ACCESS_NOT_SPECIFIED ||
1494 requested_data_access == SPCI_DATA_ACCESS_RO) {
1495 spci_set_data_access_attr(&permissions,
1496 SPCI_DATA_ACCESS_RO);
1497 break;
1498 }
1499 dlog_verbose(
1500 "Invalid data access requested; sender specified "
1501 "permissions %#x but receiver requested %#x.\n",
1502 sent_permissions, requested_permissions);
1503 ret = spci_error(SPCI_DENIED);
1504 goto out;
1505 case SPCI_DATA_ACCESS_RESERVED:
1506 panic("Got unexpected SPCI_DATA_ACCESS_RESERVED. Should be "
1507 "checked before this point.");
1508 }
1509 switch (sent_instruction_access) {
1510 case SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED:
1511 case SPCI_INSTRUCTION_ACCESS_X:
1512 if (requested_instruction_access ==
1513 SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED ||
1514 requested_instruction_access == SPCI_INSTRUCTION_ACCESS_X) {
1515 spci_set_instruction_access_attr(
1516 &permissions, SPCI_INSTRUCTION_ACCESS_X);
1517 break;
1518 }
1519 case SPCI_INSTRUCTION_ACCESS_NX:
1520 if (requested_instruction_access ==
1521 SPCI_INSTRUCTION_ACCESS_NOT_SPECIFIED ||
1522 requested_instruction_access ==
1523 SPCI_INSTRUCTION_ACCESS_NX) {
1524 spci_set_instruction_access_attr(
1525 &permissions, SPCI_INSTRUCTION_ACCESS_NX);
1526 break;
1527 }
1528 dlog_verbose(
1529 "Invalid instruction access requested; sender "
1530 "specified "
1531 "permissions %#x but receiver requested %#x.\n",
1532 sent_permissions, requested_permissions);
1533 ret = spci_error(SPCI_DENIED);
1534 goto out;
1535 case SPCI_INSTRUCTION_ACCESS_RESERVED:
1536 panic("Got unexpected SPCI_INSTRUCTION_ACCESS_RESERVED. Should "
1537 "be checked before this point.");
1538 }
1539 memory_to_attributes = spci_memory_permissions_to_mode(permissions);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001540
Andrew Walbrana65a1322020-04-06 19:32:32 +01001541 composite = spci_memory_region_get_composite(memory_region, 0);
1542 ret = spci_retrieve_memory(to_locked, composite->constituents,
1543 composite->constituent_count,
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001544 memory_to_attributes,
1545 share_state->share_func, false, page_pool);
1546 if (ret.func != SPCI_SUCCESS_32) {
1547 goto out;
1548 }
1549
1550 /*
1551 * Copy response to RX buffer of caller and deliver the message. This
1552 * must be done before the share_state is (possibly) freed.
1553 */
Andrew Walbrana65a1322020-04-06 19:32:32 +01001554 /* TODO: combine attributes from sender and request. */
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001555 response_size = spci_retrieved_memory_region_init(
Andrew Walbrana65a1322020-04-06 19:32:32 +01001556 to_locked.vm->mailbox.recv, HF_MAILBOX_SIZE,
1557 memory_region->sender, memory_region->attributes,
1558 memory_region->flags, handle, to_locked.vm->id, permissions,
1559 composite->constituents, composite->constituent_count);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001560 to_locked.vm->mailbox.recv_size = response_size;
1561 to_locked.vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID;
1562 to_locked.vm->mailbox.recv_func = SPCI_MEM_RETRIEVE_RESP_32;
1563 to_locked.vm->mailbox.state = MAILBOX_STATE_READ;
1564
1565 if (share_state->share_func == SPCI_MEM_DONATE_32) {
1566 /*
1567 * Memory that has been donated can't be relinquished, so no
1568 * need to keep the share state around.
1569 */
1570 share_state_free(share_states, share_state, page_pool);
1571 dlog_verbose("Freed share state for donate.\n");
1572 } else {
1573 share_state->retrieved[0] = true;
1574 }
1575
1576 ret = (struct spci_value){.func = SPCI_MEM_RETRIEVE_RESP_32,
Andrew Walbrana65a1322020-04-06 19:32:32 +01001577 .arg1 = response_size,
1578 .arg2 = response_size};
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001579
1580out:
1581 share_states_unlock(&share_states);
1582 dump_share_states();
1583 return ret;
1584}
1585
1586struct spci_value spci_memory_relinquish(
1587 struct vm_locked from_locked,
1588 struct spci_mem_relinquish *relinquish_request, struct mpool *page_pool)
1589{
1590 spci_memory_handle_t handle = relinquish_request->handle;
1591 struct share_states_locked share_states;
1592 struct spci_memory_share_state *share_state;
1593 struct spci_memory_region *memory_region;
1594 bool clear;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001595 struct spci_composite_memory_region *composite;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001596 struct spci_value ret;
1597
Andrew Walbrana65a1322020-04-06 19:32:32 +01001598 if (relinquish_request->endpoint_count != 1) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001599 dlog_verbose(
Andrew Walbrana65a1322020-04-06 19:32:32 +01001600 "Stream endpoints not supported (got %d endpoints on "
1601 "SPCI_MEM_RELINQUISH, expected 1).\n",
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001602 relinquish_request->endpoint_count);
1603 return spci_error(SPCI_INVALID_PARAMETERS);
1604 }
1605
Andrew Walbrana65a1322020-04-06 19:32:32 +01001606 if (relinquish_request->endpoints[0] != from_locked.vm->id) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001607 dlog_verbose(
1608 "VM ID %d in relinquish message doesn't match calling "
1609 "VM ID %d.\n",
Andrew Walbrana65a1322020-04-06 19:32:32 +01001610 relinquish_request->endpoints[0], from_locked.vm->id);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001611 return spci_error(SPCI_INVALID_PARAMETERS);
1612 }
1613
1614 dump_share_states();
1615
1616 share_states = share_states_lock();
1617 if (!get_share_state(share_states, handle, &share_state)) {
1618 dlog_verbose("Invalid handle %#x for SPCI_MEM_RELINQUISH.\n",
1619 handle);
1620 ret = spci_error(SPCI_INVALID_PARAMETERS);
1621 goto out;
1622 }
1623
1624 memory_region = share_state->memory_region;
1625 CHECK(memory_region != NULL);
1626
Andrew Walbrana65a1322020-04-06 19:32:32 +01001627 if (memory_region->receivers[0].receiver_permissions.receiver !=
1628 from_locked.vm->id) {
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001629 dlog_verbose(
1630 "VM ID %d tried to relinquish memory region with "
1631 "handle %#x but receiver was %d.\n",
1632 from_locked.vm->id, handle,
Andrew Walbrana65a1322020-04-06 19:32:32 +01001633 memory_region->receivers[0]
1634 .receiver_permissions.receiver);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001635 ret = spci_error(SPCI_INVALID_PARAMETERS);
1636 goto out;
1637 }
1638
1639 if (!share_state->retrieved[0]) {
1640 dlog_verbose(
1641 "Memory with handle %#x not yet retrieved, can't "
1642 "relinquish.\n",
1643 handle);
1644 ret = spci_error(SPCI_INVALID_PARAMETERS);
1645 goto out;
1646 }
1647
1648 clear = relinquish_request->flags & SPCI_MEMORY_REGION_FLAG_CLEAR;
1649
1650 /*
1651 * Clear is not allowed for memory that was shared, as the original
1652 * sender still has access to the memory.
1653 */
1654 if (clear && share_state->share_func == SPCI_MEM_SHARE_32) {
1655 dlog_verbose("Memory which was shared can't be cleared.\n");
1656 ret = spci_error(SPCI_INVALID_PARAMETERS);
1657 goto out;
1658 }
1659
Andrew Walbrana65a1322020-04-06 19:32:32 +01001660 composite = spci_memory_region_get_composite(memory_region, 0);
1661 ret = spci_relinquish_memory(from_locked, composite->constituents,
1662 composite->constituent_count, page_pool,
1663 clear);
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001664
1665 if (ret.func == SPCI_SUCCESS_32) {
1666 /*
1667 * Mark memory handle as not retrieved, so it can be reclaimed
1668 * (or retrieved again).
1669 */
1670 share_state->retrieved[0] = false;
1671 }
1672
1673out:
1674 share_states_unlock(&share_states);
1675 dump_share_states();
1676 return ret;
1677}
1678
1679/**
1680 * Validates that the reclaim transition is allowed for the given handle,
1681 * updates the page table of the reclaiming VM, and frees the internal state
1682 * associated with the handle.
1683 */
1684struct spci_value spci_memory_reclaim(struct vm_locked to_locked,
1685 spci_memory_handle_t handle, bool clear,
1686 struct mpool *page_pool)
1687{
1688 struct share_states_locked share_states;
1689 struct spci_memory_share_state *share_state;
1690 struct spci_memory_region *memory_region;
Andrew Walbrana65a1322020-04-06 19:32:32 +01001691 struct spci_composite_memory_region *composite;
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001692 uint32_t memory_to_attributes = MM_MODE_R | MM_MODE_W | MM_MODE_X;
1693 struct spci_value ret;
1694
1695 dump_share_states();
1696
1697 share_states = share_states_lock();
1698 if (!get_share_state(share_states, handle, &share_state)) {
1699 dlog_verbose("Invalid handle %#x for SPCI_MEM_RECLAIM.\n",
1700 handle);
1701 ret = spci_error(SPCI_INVALID_PARAMETERS);
1702 goto out;
1703 }
1704
1705 memory_region = share_state->memory_region;
1706 CHECK(memory_region != NULL);
1707
1708 if (to_locked.vm->id != memory_region->sender) {
1709 dlog_verbose(
1710 "VM %d attempted to reclaim memory handle %#x "
1711 "originally sent by VM %d.\n",
1712 to_locked.vm->id, handle, memory_region->sender);
1713 ret = spci_error(SPCI_INVALID_PARAMETERS);
1714 goto out;
1715 }
1716
1717 if (share_state->retrieved[0]) {
1718 dlog_verbose(
1719 "Tried to reclaim memory handle %#x that has not been "
1720 "relinquished.\n",
1721 handle);
1722 ret = spci_error(SPCI_DENIED);
1723 goto out;
1724 }
1725
Andrew Walbrana65a1322020-04-06 19:32:32 +01001726 composite = spci_memory_region_get_composite(memory_region, 0);
1727 ret = spci_retrieve_memory(to_locked, composite->constituents,
1728 composite->constituent_count,
Andrew Walbran5de9c3d2020-02-10 13:35:29 +00001729 memory_to_attributes, SPCI_MEM_RECLAIM_32,
1730 clear, page_pool);
1731
1732 if (ret.func == SPCI_SUCCESS_32) {
1733 share_state_free(share_states, share_state, page_pool);
1734 dlog_verbose("Freed share state after successful reclaim.\n");
1735 }
1736
1737out:
1738 share_states_unlock(&share_states);
1739 return ret;
Jose Marinho09b1db82019-08-08 09:16:59 +01001740}
Andrew Walbran290b0c92020-02-03 16:37:14 +00001741
1742/**
1743 * Validates that the reclaim transition is allowed for the given memory region
1744 * and updates the page table of the reclaiming VM.
1745 */
1746struct spci_value spci_memory_tee_reclaim(
1747 struct vm_locked to_locked, spci_memory_handle_t handle,
1748 struct spci_memory_region *memory_region, bool clear,
1749 struct mpool *page_pool)
1750{
1751 uint32_t memory_to_attributes = MM_MODE_R | MM_MODE_W | MM_MODE_X;
1752 struct spci_composite_memory_region *composite;
1753
1754 if (memory_region->receiver_count != 1) {
1755 /* Only one receiver supported by Hafnium for now. */
1756 dlog_verbose(
1757 "Multiple recipients not supported (got %d, expected "
1758 "1).\n",
1759 memory_region->receiver_count);
1760 return spci_error(SPCI_NOT_SUPPORTED);
1761 }
1762
1763 if (memory_region->handle != handle) {
1764 dlog_verbose(
1765 "Got memory region handle %#x from TEE but requested "
1766 "handle %#x.\n",
1767 memory_region->handle, handle);
1768 return spci_error(SPCI_INVALID_PARAMETERS);
1769 }
1770
1771 /* The original sender must match the caller. */
1772 if (to_locked.vm->id != memory_region->sender) {
1773 dlog_verbose(
1774 "VM %d attempted to reclaim memory handle %#x "
1775 "originally sent by VM %d.\n",
1776 to_locked.vm->id, handle, memory_region->sender);
1777 return spci_error(SPCI_INVALID_PARAMETERS);
1778 }
1779
1780 composite = spci_memory_region_get_composite(memory_region, 0);
1781
1782 /*
1783 * Forward the request to the TEE and then map the memory back into the
1784 * caller's stage-2 page table.
1785 */
1786 return spci_tee_reclaim_memory(to_locked, handle,
1787 composite->constituents,
1788 composite->constituent_count,
1789 memory_to_attributes, clear, page_pool);
1790}