blob: 06106d5fb9194ed802590221fbf3067f05b8a8f6 [file] [log] [blame]
Jose Marinho75509b42019-04-09 09:34:59 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "hf/api.h"
Jose Marinho09b1db82019-08-08 09:16:59 +010018#include "hf/check.h"
Jose Marinho75509b42019-04-09 09:34:59 +010019#include "hf/dlog.h"
20#include "hf/spci_internal.h"
21#include "hf/std.h"
Andrew Scull3c257452019-11-26 13:32:50 +000022#include "hf/vm.h"
Jose Marinho75509b42019-04-09 09:34:59 +010023
24/**
Jose Marinho75509b42019-04-09 09:34:59 +010025 * Obtain the next mode to apply to the two VMs.
26 *
Jose Marinho7fbbf2e2019-08-05 13:19:58 +010027 * Returns true iff a state transition was found.
Jose Marinho75509b42019-04-09 09:34:59 +010028 */
29static bool spci_msg_get_next_state(
30 const struct spci_mem_transitions *transitions,
31 uint32_t transition_count, uint32_t memory_to_attributes,
Andrew Walbran1281ed42019-10-22 17:23:40 +010032 uint32_t orig_from_mode, uint32_t orig_to_mode, uint32_t *from_mode,
33 uint32_t *to_mode)
Jose Marinho75509b42019-04-09 09:34:59 +010034{
35 const uint32_t state_mask =
36 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
37 const uint32_t orig_from_state = orig_from_mode & state_mask;
38
39 for (uint32_t index = 0; index < transition_count; index++) {
40 uint32_t table_orig_from_mode =
41 transitions[index].orig_from_mode;
42 uint32_t table_orig_to_mode = transitions[index].orig_to_mode;
43
44 if (((orig_from_state) == table_orig_from_mode) &&
45 ((orig_to_mode & state_mask) == table_orig_to_mode)) {
46 *to_mode = transitions[index].to_mode |
47 memory_to_attributes;
Jose Marinho713f13a2019-05-21 11:54:16 +010048
49 *from_mode = transitions[index].from_mode |
50 (~state_mask & orig_from_mode);
Jose Marinho75509b42019-04-09 09:34:59 +010051
52 return true;
53 }
54 }
55 return false;
56}
57
58/**
59 * Verify that all pages have the same mode, that the starting mode
60 * constitutes a valid state and obtain the next mode to apply
61 * to the two VMs.
62 *
63 * Returns:
64 * The error code false indicates that:
65 * 1) a state transition was not found;
66 * 2) the pages being shared do not have the same mode within the <to>
67 * or <form> VMs;
68 * 3) The beginning and end IPAs are not page aligned;
69 * 4) The requested share type was not handled.
70 * Success is indicated by true.
71 *
72 */
Jose Marinho09b1db82019-08-08 09:16:59 +010073static bool spci_msg_check_transition(struct vm *to, struct vm *from,
Andrew Walbran85aabe92019-12-03 12:03:03 +000074 uint32_t share_type,
Jose Marinho09b1db82019-08-08 09:16:59 +010075 uint32_t *orig_from_mode,
76 struct spci_memory_region *memory_region,
77 uint32_t memory_to_attributes,
78 uint32_t *from_mode, uint32_t *to_mode)
Jose Marinho75509b42019-04-09 09:34:59 +010079{
Andrew Walbran1281ed42019-10-22 17:23:40 +010080 uint32_t orig_to_mode;
Jose Marinho75509b42019-04-09 09:34:59 +010081 const struct spci_mem_transitions *mem_transition_table;
82 uint32_t transition_table_size;
Jose Marinho7fbbf2e2019-08-05 13:19:58 +010083 uint32_t i;
Jose Marinho75509b42019-04-09 09:34:59 +010084
85 /*
86 * TODO: Transition table does not currently consider the multiple
87 * shared case.
88 */
89 static const struct spci_mem_transitions donate_transitions[] = {
90 {
91 /* 1) {O-EA, !O-NA} -> {!O-NA, O-EA} */
92 .orig_from_mode = 0,
93 .orig_to_mode = MM_MODE_INVALID | MM_MODE_UNOWNED,
94 .from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED,
95 .to_mode = 0,
96 },
97 {
98 /* 2) {O-NA, !O-EA} -> {!O-NA, O-EA} */
99 .orig_from_mode = MM_MODE_INVALID,
100 .orig_to_mode = MM_MODE_UNOWNED,
101 .from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED,
102 .to_mode = 0,
103 },
104 {
105 /* 3) {O-SA, !O-SA} -> {!O-NA, O-EA} */
106 .orig_from_mode = MM_MODE_SHARED,
107 .orig_to_mode = MM_MODE_UNOWNED | MM_MODE_SHARED,
108 .from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED,
109 .to_mode = 0,
110 },
111 {
112 /*
113 * Duplicate of 1) in order to cater for an alternative
114 * representation of !O-NA:
115 * (INVALID | UNOWNED | SHARED) and (INVALID | UNOWNED)
116 * are both alternate representations of !O-NA.
117 */
118 /* 4) {O-EA, !O-NA} -> {!O-NA, O-EA} */
119 .orig_from_mode = 0,
120 .orig_to_mode = MM_MODE_INVALID | MM_MODE_UNOWNED |
121 MM_MODE_SHARED,
122 .from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED |
123 MM_MODE_SHARED,
124 .to_mode = 0,
125 },
126 };
Jose Marinho56c25732019-05-20 09:48:53 +0100127
Jose Marinho713f13a2019-05-21 11:54:16 +0100128 static const uint32_t size_donate_transitions =
129 ARRAY_SIZE(donate_transitions);
130
Andrew Walbran648fc3e2019-10-22 16:23:05 +0100131 /*
132 * This data structure holds the allowed state transitions for the
133 * "lend" state machine. In this state machine the owner keeps ownership
134 * but loses access to the lent pages.
135 */
136 static const struct spci_mem_transitions lend_transitions[] = {
Jose Marinho56c25732019-05-20 09:48:53 +0100137 {
Andrew Walbran648fc3e2019-10-22 16:23:05 +0100138 /* 1) {O-EA, !O-NA} -> {O-NA, !O-EA} */
139 .orig_from_mode = 0,
140 .orig_to_mode = MM_MODE_INVALID | MM_MODE_UNOWNED |
141 MM_MODE_SHARED,
142 .from_mode = MM_MODE_INVALID,
143 .to_mode = MM_MODE_UNOWNED,
Jose Marinho56c25732019-05-20 09:48:53 +0100144 },
145 {
Andrew Walbran648fc3e2019-10-22 16:23:05 +0100146 /*
147 * Duplicate of 1) in order to cater for an alternative
148 * representation of !O-NA:
149 * (INVALID | UNOWNED | SHARED) and (INVALID | UNOWNED)
150 * are both alternate representations of !O-NA.
151 */
152 /* 2) {O-EA, !O-NA} -> {O-NA, !O-EA} */
153 .orig_from_mode = 0,
154 .orig_to_mode = MM_MODE_INVALID | MM_MODE_UNOWNED,
155 .from_mode = MM_MODE_INVALID,
156 .to_mode = MM_MODE_UNOWNED,
Jose Marinho56c25732019-05-20 09:48:53 +0100157 },
158 };
159
Andrew Walbran648fc3e2019-10-22 16:23:05 +0100160 static const uint32_t size_lend_transitions =
161 ARRAY_SIZE(lend_transitions);
Jose Marinho56c25732019-05-20 09:48:53 +0100162
Jose Marinho713f13a2019-05-21 11:54:16 +0100163 /*
Andrew Walbran648fc3e2019-10-22 16:23:05 +0100164 * This data structure holds the allowed state transitions for the
165 * "share" state machine. In this state machine the owner keeps the
166 * shared pages mapped on its stage2 table and keeps access as well.
Jose Marinho713f13a2019-05-21 11:54:16 +0100167 */
Andrew Walbran648fc3e2019-10-22 16:23:05 +0100168 static const struct spci_mem_transitions share_transitions[] = {
Jose Marinho713f13a2019-05-21 11:54:16 +0100169 {
170 /* 1) {O-EA, !O-NA} -> {O-SA, !O-SA} */
171 .orig_from_mode = 0,
172 .orig_to_mode = MM_MODE_INVALID | MM_MODE_UNOWNED |
173 MM_MODE_SHARED,
174 .from_mode = MM_MODE_SHARED,
175 .to_mode = MM_MODE_UNOWNED | MM_MODE_SHARED,
176 },
177 {
178 /*
179 * Duplicate of 1) in order to cater for an alternative
180 * representation of !O-NA:
181 * (INVALID | UNOWNED | SHARED) and (INVALID | UNOWNED)
182 * are both alternate representations of !O-NA.
183 */
184 /* 2) {O-EA, !O-NA} -> {O-SA, !O-SA} */
185 .orig_from_mode = 0,
186 .orig_to_mode = MM_MODE_INVALID | MM_MODE_UNOWNED,
187 .from_mode = MM_MODE_SHARED,
188 .to_mode = MM_MODE_UNOWNED | MM_MODE_SHARED,
189 },
190 };
191
Andrew Walbran648fc3e2019-10-22 16:23:05 +0100192 static const uint32_t size_share_transitions =
193 ARRAY_SIZE(share_transitions);
194
195 static const struct spci_mem_transitions relinquish_transitions[] = {
196 {
197 /* 1) {!O-EA, O-NA} -> {!O-NA, O-EA} */
198 .orig_from_mode = MM_MODE_UNOWNED,
199 .orig_to_mode = MM_MODE_INVALID,
200 .from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED |
201 MM_MODE_SHARED,
202 .to_mode = 0,
203 },
204 {
205 /* 2) {!O-SA, O-SA} -> {!O-NA, O-EA} */
206 .orig_from_mode = MM_MODE_UNOWNED | MM_MODE_SHARED,
207 .orig_to_mode = MM_MODE_SHARED,
208 .from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED |
209 MM_MODE_SHARED,
210 .to_mode = 0,
211 },
212 };
213
214 static const uint32_t size_relinquish_transitions =
215 ARRAY_SIZE(relinquish_transitions);
Jose Marinho75509b42019-04-09 09:34:59 +0100216
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100217 struct spci_memory_region_constituent *constituents =
218 spci_memory_region_get_constituents(memory_region);
219
220 if (memory_region->constituent_count == 0) {
221 /*
222 * Fail if there are no constituents. Otherwise
223 * spci_msg_get_next_state would get an unitialised
224 * *orig_from_mode and orig_to_mode.
225 */
Jose Marinho75509b42019-04-09 09:34:59 +0100226 return false;
227 }
228
Jose Marinho7fbbf2e2019-08-05 13:19:58 +0100229 for (i = 0; i < memory_region->constituent_count; ++i) {
230 ipaddr_t begin = ipa_init(constituents[i].address);
231 size_t size = constituents[i].page_count * PAGE_SIZE;
232 ipaddr_t end = ipa_add(begin, size);
233 uint32_t current_from_mode;
234 uint32_t current_to_mode;
235
236 /* Fail if addresses are not page-aligned. */
237 if (!is_aligned(ipa_addr(begin), PAGE_SIZE) ||
238 !is_aligned(ipa_addr(end), PAGE_SIZE)) {
239 return false;
240 }
241
242 /*
243 * Ensure that this constituent memory range is all mapped with
244 * the same mode.
245 */
246 if (!mm_vm_get_mode(&from->ptable, begin, end,
247 &current_from_mode) ||
248 !mm_vm_get_mode(&to->ptable, begin, end,
249 &current_to_mode)) {
250 return false;
251 }
252
253 /*
254 * Ensure that all constituents are mapped with the same mode.
255 */
256 if (i == 0) {
257 *orig_from_mode = current_from_mode;
258 orig_to_mode = current_to_mode;
259 } else if (current_from_mode != *orig_from_mode ||
260 current_to_mode != orig_to_mode) {
261 return false;
262 }
Jose Marinho75509b42019-04-09 09:34:59 +0100263 }
264
Andrew Scullb5f49e02019-10-02 13:20:47 +0100265 /* Ensure the address range is normal memory and not a device. */
266 if (*orig_from_mode & MM_MODE_D) {
267 return false;
268 }
269
Andrew Walbran85aabe92019-12-03 12:03:03 +0000270 switch (share_type) {
271 case SPCI_MSG_SEND_LEGACY_MEMORY_DONATE:
Jose Marinho75509b42019-04-09 09:34:59 +0100272 mem_transition_table = donate_transitions;
273 transition_table_size = size_donate_transitions;
274 break;
275
Andrew Walbran85aabe92019-12-03 12:03:03 +0000276 case SPCI_MSG_SEND_LEGACY_MEMORY_LEND:
Andrew Walbran648fc3e2019-10-22 16:23:05 +0100277 mem_transition_table = lend_transitions;
278 transition_table_size = size_lend_transitions;
279 break;
280
Andrew Walbran85aabe92019-12-03 12:03:03 +0000281 case SPCI_MSG_SEND_LEGACY_MEMORY_SHARE:
Andrew Walbran648fc3e2019-10-22 16:23:05 +0100282 mem_transition_table = share_transitions;
283 transition_table_size = size_share_transitions;
284 break;
285
Andrew Walbran85aabe92019-12-03 12:03:03 +0000286 case SPCI_MSG_SEND_LEGACY_MEMORY_RELINQUISH:
Jose Marinho56c25732019-05-20 09:48:53 +0100287 mem_transition_table = relinquish_transitions;
288 transition_table_size = size_relinquish_transitions;
289 break;
290
Jose Marinho75509b42019-04-09 09:34:59 +0100291 default:
292 return false;
293 }
294
295 return spci_msg_get_next_state(mem_transition_table,
296 transition_table_size,
297 memory_to_attributes, *orig_from_mode,
298 orig_to_mode, from_mode, to_mode);
299}
Jose Marinho09b1db82019-08-08 09:16:59 +0100300
301/**
302 * Updates a VM's page table such that the given set of physical address ranges
303 * are mapped in the address space at the corresponding address ranges, in the
304 * mode provided.
305 *
306 * If commit is false, the page tables will be allocated from the mpool but no
307 * mappings will actually be updated. This function must always be called first
308 * with commit false to check that it will succeed before calling with commit
309 * true, to avoid leaving the page table in a half-updated state. To make a
310 * series of changes atomically you can call them all with commit false before
311 * calling them all with commit true.
312 *
313 * mm_vm_defrag should always be called after a series of page table updates,
314 * whether they succeed or fail.
315 *
316 * Returns true on success, or false if the update failed and no changes were
317 * made to memory mappings.
318 */
319static bool spci_region_group_identity_map(
Andrew Scull3c257452019-11-26 13:32:50 +0000320 struct vm_locked vm_locked, struct spci_memory_region *memory_region,
321 int mode, struct mpool *ppool, bool commit)
Jose Marinho09b1db82019-08-08 09:16:59 +0100322{
323 struct spci_memory_region_constituent *constituents =
324 spci_memory_region_get_constituents(memory_region);
325 uint32_t memory_constituent_count = memory_region->constituent_count;
326
327 /* Iterate over the memory region constituents. */
328 for (uint32_t index = 0; index < memory_constituent_count; index++) {
329 size_t size = constituents[index].page_count * PAGE_SIZE;
330 paddr_t pa_begin =
331 pa_from_ipa(ipa_init(constituents[index].address));
332 paddr_t pa_end = pa_add(pa_begin, size);
333
334 if (commit) {
Andrew Scull3c257452019-11-26 13:32:50 +0000335 vm_identity_commit(vm_locked, pa_begin, pa_end, mode,
336 ppool, NULL);
337 } else if (!vm_identity_prepare(vm_locked, pa_begin, pa_end,
338 mode, ppool)) {
Jose Marinho09b1db82019-08-08 09:16:59 +0100339 return false;
340 }
341 }
342
343 return true;
344}
345
346/**
347 * Clears a region of physical memory by overwriting it with zeros. The data is
348 * flushed from the cache so the memory has been cleared across the system.
349 */
350static bool clear_memory(paddr_t begin, paddr_t end, struct mpool *ppool)
351{
352 /*
Fuad Tabbaed294af2019-12-20 10:43:01 +0000353 * TODO: change this to a CPU local single page window rather than a
Jose Marinho09b1db82019-08-08 09:16:59 +0100354 * global mapping of the whole range. Such an approach will limit
355 * the changes to stage-1 tables and will allow only local
356 * invalidation.
357 */
358 bool ret;
359 struct mm_stage1_locked stage1_locked = mm_lock_stage1();
360 void *ptr =
361 mm_identity_map(stage1_locked, begin, end, MM_MODE_W, ppool);
362 size_t size = pa_difference(begin, end);
363
364 if (!ptr) {
365 /* TODO: partial defrag of failed range. */
366 /* Recover any memory consumed in failed mapping. */
367 mm_defrag(stage1_locked, ppool);
368 goto fail;
369 }
370
371 memset_s(ptr, size, 0, size);
372 arch_mm_flush_dcache(ptr, size);
373 mm_unmap(stage1_locked, begin, end, ppool);
374
375 ret = true;
376 goto out;
377
378fail:
379 ret = false;
380
381out:
382 mm_unlock_stage1(&stage1_locked);
383
384 return ret;
385}
386
387/**
388 * Clears a region of physical memory by overwriting it with zeros. The data is
389 * flushed from the cache so the memory has been cleared across the system.
390 */
391static bool spci_clear_memory_region(struct spci_memory_region *memory_region,
392 struct mpool *api_page_pool)
393{
394 struct mpool local_page_pool;
395 struct spci_memory_region_constituent *constituents =
396 spci_memory_region_get_constituents(memory_region);
397 uint32_t memory_constituent_count = memory_region->constituent_count;
398 struct mm_stage1_locked stage1_locked;
399 bool ret = false;
400
401 /*
402 * Create a local pool so any freed memory can't be used by another
403 * thread. This is to ensure each constituent that is mapped can be
404 * unmapped again afterwards.
405 */
406 mpool_init_with_fallback(&local_page_pool, api_page_pool);
407
408 /* Iterate over the memory region constituents. */
409 for (uint32_t i = 0; i < memory_constituent_count; ++i) {
410 size_t size = constituents[i].page_count * PAGE_SIZE;
411 paddr_t begin = pa_from_ipa(ipa_init(constituents[i].address));
412 paddr_t end = pa_add(begin, size);
413
414 if (!clear_memory(begin, end, &local_page_pool)) {
415 /*
416 * api_clear_memory will defrag on failure, so no need
417 * to do it here.
418 */
419 goto out;
420 }
421 }
422
423 /*
424 * Need to defrag after clearing, as it may have added extra mappings to
425 * the stage 1 page table.
426 */
427 stage1_locked = mm_lock_stage1();
428 mm_defrag(stage1_locked, &local_page_pool);
429 mm_unlock_stage1(&stage1_locked);
430
431 ret = true;
432
433out:
434 mpool_fini(&local_page_pool);
435 return ret;
436}
437
438/**
439 * Shares memory from the calling VM with another. The memory can be shared in
440 * different modes.
441 *
442 * This function requires the calling context to hold the <to> and <from> locks.
443 *
444 * Returns:
445 * In case of error one of the following values is returned:
446 * 1) SPCI_INVALID_PARAMETERS - The endpoint provided parameters were
447 * erroneous;
448 * 2) SPCI_NO_MEMORY - Hafnium did not have sufficient memory to complete
449 * the request.
450 * Success is indicated by SPCI_SUCCESS.
451 */
452static struct spci_value spci_share_memory(
453 struct vm_locked to_locked, struct vm_locked from_locked,
454 struct spci_memory_region *memory_region, uint32_t memory_to_attributes,
Andrew Walbran85aabe92019-12-03 12:03:03 +0000455 uint32_t share_type, struct mpool *api_page_pool)
Jose Marinho09b1db82019-08-08 09:16:59 +0100456{
457 struct vm *to = to_locked.vm;
458 struct vm *from = from_locked.vm;
459 uint32_t orig_from_mode;
460 uint32_t from_mode;
461 uint32_t to_mode;
462 struct mpool local_page_pool;
463 struct spci_value ret;
464 struct spci_memory_region_constituent *constituents =
465 spci_memory_region_get_constituents(memory_region);
466
467 /*
468 * Make sure constituents are properly aligned to a 64-bit boundary. If
469 * not we would get alignment faults trying to read (64-bit) page
470 * addresses.
471 */
472 if (!is_aligned(constituents, 8)) {
473 return spci_error(SPCI_INVALID_PARAMETERS);
474 }
475
476 /* Disallow reflexive shares as this suggests an error in the VM. */
477 if (to == from) {
478 return spci_error(SPCI_INVALID_PARAMETERS);
479 }
480
481 /*
482 * Check if the state transition is lawful for both VMs involved
483 * in the memory exchange, ensure that all constituents of a memory
484 * region being shared are at the same state.
485 */
Andrew Walbran85aabe92019-12-03 12:03:03 +0000486 if (!spci_msg_check_transition(to, from, share_type, &orig_from_mode,
Jose Marinho09b1db82019-08-08 09:16:59 +0100487 memory_region, memory_to_attributes,
488 &from_mode, &to_mode)) {
489 return spci_error(SPCI_INVALID_PARAMETERS);
490 }
491
492 /*
493 * Create a local pool so any freed memory can't be used by another
494 * thread. This is to ensure the original mapping can be restored if the
495 * clear fails.
496 */
497 mpool_init_with_fallback(&local_page_pool, api_page_pool);
498
499 /*
500 * First reserve all required memory for the new page table entries in
501 * both sender and recipient page tables without committing, to make
502 * sure the entire operation will succeed without exhausting the page
503 * pool.
504 */
Andrew Scull3c257452019-11-26 13:32:50 +0000505 if (!spci_region_group_identity_map(from_locked, memory_region,
Jose Marinho09b1db82019-08-08 09:16:59 +0100506 from_mode, api_page_pool, false) ||
Andrew Scull3c257452019-11-26 13:32:50 +0000507 !spci_region_group_identity_map(to_locked, memory_region, to_mode,
Jose Marinho09b1db82019-08-08 09:16:59 +0100508 api_page_pool, false)) {
509 /* TODO: partial defrag of failed range. */
510 ret = spci_error(SPCI_NO_MEMORY);
511 goto out;
512 }
513
514 /*
515 * First update the mapping for the sender so there is no overlap with
516 * the recipient. This won't allocate because the transaction was
517 * already prepared above, but may free pages in the case that a whole
518 * block is being unmapped that was previously partially mapped.
519 */
Andrew Scull3c257452019-11-26 13:32:50 +0000520 CHECK(spci_region_group_identity_map(
521 from_locked, memory_region, from_mode, &local_page_pool, true));
Jose Marinho09b1db82019-08-08 09:16:59 +0100522
523 /* Clear the memory so no VM or device can see the previous contents. */
524 if ((memory_region->flags & SPCI_MEMORY_REGION_FLAG_CLEAR) &&
525 !spci_clear_memory_region(memory_region, api_page_pool)) {
526 /*
527 * On failure, roll back by returning memory to the sender. This
528 * may allocate pages which were previously freed into
529 * `local_page_pool` by the call above, but will never allocate
530 * more pages than that so can never fail.
531 */
Andrew Scull3c257452019-11-26 13:32:50 +0000532 CHECK(spci_region_group_identity_map(from_locked, memory_region,
533 orig_from_mode,
534 &local_page_pool, true));
Jose Marinho09b1db82019-08-08 09:16:59 +0100535
536 ret = spci_error(SPCI_NO_MEMORY);
537 goto out;
538 }
539
540 /*
541 * Complete the transfer by mapping the memory into the recipient. This
542 * won't allocate because the transaction was already prepared above, so
543 * it doesn't need to use the `local_page_pool`.
544 */
Andrew Scull3c257452019-11-26 13:32:50 +0000545 CHECK(spci_region_group_identity_map(to_locked, memory_region, to_mode,
546 api_page_pool, true));
Jose Marinho09b1db82019-08-08 09:16:59 +0100547
548 ret = (struct spci_value){.func = SPCI_SUCCESS_32};
549
550out:
551 mpool_fini(&local_page_pool);
552
553 /*
554 * Tidy up the page tables by reclaiming failed mappings (if there was
555 * an error) or merging entries into blocks where possible (on success).
556 */
557 mm_vm_defrag(&to->ptable, api_page_pool);
558 mm_vm_defrag(&from->ptable, api_page_pool);
559
560 return ret;
561}
562
563/**
564 * Check if the message length and the number of memory region constituents
565 * match, if the check is correct call the memory sharing routine.
566 */
567static struct spci_value spci_validate_call_share_memory(
568 struct vm_locked to_locked, struct vm_locked from_locked,
569 struct spci_memory_region *memory_region, uint32_t memory_share_size,
Andrew Walbran85aabe92019-12-03 12:03:03 +0000570 uint32_t share_type, struct mpool *api_page_pool)
Jose Marinho09b1db82019-08-08 09:16:59 +0100571{
572 uint32_t memory_to_attributes;
573 uint32_t attributes_size;
574 uint32_t constituents_size;
575
576 /*
577 * Ensure the number of constituents are within the memory
578 * bounds.
579 */
580 attributes_size = sizeof(struct spci_memory_region_attributes) *
581 memory_region->attribute_count;
582 constituents_size = sizeof(struct spci_memory_region_constituent) *
583 memory_region->constituent_count;
584 if (memory_region->constituent_offset <
585 sizeof(struct spci_memory_region) + attributes_size ||
586 memory_share_size !=
587 memory_region->constituent_offset + constituents_size) {
588 return spci_error(SPCI_INVALID_PARAMETERS);
589 }
590
Andrew Walbrane28f4a22019-12-24 15:45:36 +0000591 /* The sender must match the message sender. */
592 if (memory_region->sender != from_locked.vm->id) {
593 return spci_error(SPCI_INVALID_PARAMETERS);
594 }
595
Jose Marinho09b1db82019-08-08 09:16:59 +0100596 /* We only support a single recipient. */
597 if (memory_region->attribute_count != 1) {
598 return spci_error(SPCI_INVALID_PARAMETERS);
599 }
600
Andrew Walbrancfe61642019-12-02 15:34:06 +0000601 /* The recipient must match the message recipient. */
602 if (memory_region->attributes[0].receiver != to_locked.vm->id) {
603 return spci_error(SPCI_INVALID_PARAMETERS);
604 }
605
Andrew Walbran85aabe92019-12-03 12:03:03 +0000606 switch (share_type) {
607 case SPCI_MSG_SEND_LEGACY_MEMORY_DONATE:
608 case SPCI_MSG_SEND_LEGACY_MEMORY_LEND:
609 case SPCI_MSG_SEND_LEGACY_MEMORY_SHARE:
Jose Marinho09b1db82019-08-08 09:16:59 +0100610 memory_to_attributes = spci_memory_attrs_to_mode(
611 memory_region->attributes[0].memory_attributes);
612 break;
Andrew Walbran85aabe92019-12-03 12:03:03 +0000613 case SPCI_MSG_SEND_LEGACY_MEMORY_RELINQUISH:
Jose Marinho09b1db82019-08-08 09:16:59 +0100614 memory_to_attributes = MM_MODE_R | MM_MODE_W | MM_MODE_X;
615 break;
616 default:
617 dlog("Invalid memory sharing message.\n");
618 return spci_error(SPCI_INVALID_PARAMETERS);
619 }
620
621 return spci_share_memory(to_locked, from_locked, memory_region,
Andrew Walbran85aabe92019-12-03 12:03:03 +0000622 memory_to_attributes, share_type,
623 api_page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +0100624}
625
626/**
627 * Performs initial architected message information parsing. Calls the
628 * corresponding api functions implementing the functionality requested
629 * in the architected message.
630 */
631struct spci_value spci_msg_handle_architected_message(
632 struct vm_locked to_locked, struct vm_locked from_locked,
Andrew Walbran85aabe92019-12-03 12:03:03 +0000633 struct spci_memory_region *memory_region, uint32_t size,
634 uint32_t attributes, struct mpool *api_page_pool)
Jose Marinho09b1db82019-08-08 09:16:59 +0100635{
Andrew Walbran85aabe92019-12-03 12:03:03 +0000636 uint32_t share_type = attributes & SPCI_MSG_SEND_LEGACY_MEMORY_MASK;
637 struct spci_value ret = spci_validate_call_share_memory(
638 to_locked, from_locked, memory_region, size, share_type,
639 api_page_pool);
Jose Marinho09b1db82019-08-08 09:16:59 +0100640
641 /* Copy data to the destination Rx. */
642 /*
643 * TODO: Translate the <from> IPA addresses to <to> IPA addresses.
644 * Currently we assume identity mapping of the stage 2 translation.
645 * Removing this assumption relies on a mechanism to handle scenarios
646 * where the memory region fits in the source Tx buffer but cannot fit
647 * in the destination Rx buffer. This mechanism will be defined at the
648 * spec level.
649 */
650 if (ret.func == SPCI_SUCCESS_32) {
651 memcpy_s(to_locked.vm->mailbox.recv, SPCI_MSG_PAYLOAD_MAX,
Andrew Walbran85aabe92019-12-03 12:03:03 +0000652 memory_region, size);
Jose Marinho09b1db82019-08-08 09:16:59 +0100653 to_locked.vm->mailbox.recv_size = size;
654 to_locked.vm->mailbox.recv_sender = from_locked.vm->id;
Andrew Walbran85aabe92019-12-03 12:03:03 +0000655 to_locked.vm->mailbox.recv_attributes = share_type;
Jose Marinho09b1db82019-08-08 09:16:59 +0100656 }
657
658 return ret;
659}