blob: befc88df404244689fc259e6ba9ef1cbe4803f94 [file] [log] [blame]
Soby Mathewb4c6df42022-11-09 11:13:29 +00001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
5 */
6
7#include <buffer.h>
8#include <granule.h>
9#include <measurement.h>
10#include <realm.h>
11#include <ripas.h>
12#include <smc-handler.h>
13#include <smc-rmi.h>
14#include <smc.h>
15#include <stddef.h>
16#include <string.h>
17#include <table.h>
18
19/*
20 * Validate the map_addr value passed to RMI_RTT_* and RMI_DATA_* commands.
21 */
22static bool validate_map_addr(unsigned long map_addr,
23 unsigned long level,
24 struct rd *rd)
25{
26
27 if (map_addr >= realm_ipa_size(rd)) {
28 return false;
29 }
30 if (!addr_is_level_aligned(map_addr, level)) {
31 return false;
32 }
33 return true;
34}
35
36/*
37 * Structure commands can operate on all RTTs except for the root RTT so
38 * the minimal valid level is the stage 2 starting level + 1.
39 */
40static bool validate_rtt_structure_cmds(unsigned long map_addr,
41 long level,
42 struct rd *rd)
43{
44 int min_level = realm_rtt_starting_level(rd) + 1;
45
46 if ((level < min_level) || (level > RTT_PAGE_LEVEL)) {
47 return false;
48 }
49 return validate_map_addr(map_addr, level, rd);
50}
51
52/*
53 * Map/Unmap commands can operate up to a level 2 block entry so min_level is
54 * the smallest block size.
55 */
56static bool validate_rtt_map_cmds(unsigned long map_addr,
57 long level,
58 struct rd *rd)
59{
60 if ((level < RTT_MIN_BLOCK_LEVEL) || (level > RTT_PAGE_LEVEL)) {
61 return false;
62 }
63 return validate_map_addr(map_addr, level, rd);
64}
65
66/*
67 * Entry commands can operate on any entry so the minimal valid level is the
68 * stage 2 starting level.
69 */
70static bool validate_rtt_entry_cmds(unsigned long map_addr,
71 long level,
72 struct rd *rd)
73{
74 if ((level < realm_rtt_starting_level(rd)) ||
75 (level > RTT_PAGE_LEVEL)) {
76 return false;
77 }
78 return validate_map_addr(map_addr, level, rd);
79}
80
AlexeiFedorovac923c82023-04-06 15:12:04 +010081unsigned long smc_rtt_create(unsigned long rd_addr,
82 unsigned long rtt_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +000083 unsigned long map_addr,
84 unsigned long ulevel)
85{
86 struct granule *g_rd;
87 struct granule *g_tbl;
88 struct rd *rd;
89 struct granule *g_table_root;
90 struct rtt_walk wi;
91 unsigned long *s2tt, *parent_s2tt, parent_s2tte;
92 long level = (long)ulevel;
93 unsigned long ipa_bits;
94 unsigned long ret;
95 struct realm_s2_context s2_ctx;
96 int sl;
97
98 if (!find_lock_two_granules(rtt_addr,
99 GRANULE_STATE_DELEGATED,
100 &g_tbl,
101 rd_addr,
102 GRANULE_STATE_RD,
103 &g_rd)) {
104 return RMI_ERROR_INPUT;
105 }
106
107 rd = granule_map(g_rd, SLOT_RD);
108
109 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
110 buffer_unmap(rd);
111 granule_unlock(g_rd);
112 granule_unlock(g_tbl);
113 return RMI_ERROR_INPUT;
114 }
115
116 g_table_root = rd->s2_ctx.g_rtt;
117 sl = realm_rtt_starting_level(rd);
118 ipa_bits = realm_ipa_bits(rd);
119 s2_ctx = rd->s2_ctx;
120 buffer_unmap(rd);
121
122 /*
123 * Lock the RTT root. Enforcing locking order RD->RTT is enough to
124 * ensure deadlock free locking guarentee.
125 */
126 granule_lock(g_table_root, GRANULE_STATE_RTT);
127
128 /* Unlock RD after locking RTT Root */
129 granule_unlock(g_rd);
130
131 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
132 map_addr, level - 1L, &wi);
133 if (wi.last_level != level - 1L) {
134 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
135 goto out_unlock_llt;
136 }
137
138 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
139 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
140 s2tt = granule_map(g_tbl, SLOT_DELEGATED);
141
142 if (s2tte_is_unassigned(parent_s2tte)) {
143 /*
144 * Note that if map_addr is an Unprotected IPA, the RIPAS field
145 * is guaranteed to be zero, in both parent and child s2ttes.
146 */
147 enum ripas ripas = s2tte_get_ripas(parent_s2tte);
148
149 s2tt_init_unassigned(s2tt, ripas);
150
151 /*
152 * Increase the refcount of the parent, the granule was
153 * locked while table walking and hand-over-hand locking.
154 * Atomicity and acquire/release semantics not required because
155 * the table is accessed always locked.
156 */
157 __granule_get(wi.g_llt);
158
159 } else if (s2tte_is_destroyed(parent_s2tte)) {
160 s2tt_init_destroyed(s2tt);
161 __granule_get(wi.g_llt);
162
163 } else if (s2tte_is_assigned(parent_s2tte, level - 1L)) {
164 unsigned long block_pa;
165
166 /*
167 * We should observe parent assigned s2tte only when
168 * we create tables above this level.
169 */
170 assert(level > RTT_MIN_BLOCK_LEVEL);
171
172 block_pa = s2tte_pa(parent_s2tte, level - 1L);
173
174 s2tt_init_assigned_empty(s2tt, block_pa, level);
175
176 /*
177 * Increase the refcount to mark the granule as in-use. refcount
178 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
179 */
180 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
181
182 } else if (s2tte_is_valid(parent_s2tte, level - 1L)) {
183 unsigned long block_pa;
184
185 /*
186 * We should observe parent valid s2tte only when
187 * we create tables above this level.
188 */
189 assert(level > RTT_MIN_BLOCK_LEVEL);
190
191 /*
192 * Break before make. This may cause spurious S2 aborts.
193 */
194 s2tte_write(&parent_s2tt[wi.index], 0UL);
195 invalidate_block(&s2_ctx, map_addr);
196
197 block_pa = s2tte_pa(parent_s2tte, level - 1L);
198
199 s2tt_init_valid(s2tt, block_pa, level);
200
201 /*
202 * Increase the refcount to mark the granule as in-use. refcount
203 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
204 */
205 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
206
207 } else if (s2tte_is_valid_ns(parent_s2tte, level - 1L)) {
208 unsigned long block_pa;
209
210 /*
211 * We should observe parent valid_ns s2tte only when
212 * we create tables above this level.
213 */
214 assert(level > RTT_MIN_BLOCK_LEVEL);
215
216 /*
217 * Break before make. This may cause spurious S2 aborts.
218 */
219 s2tte_write(&parent_s2tt[wi.index], 0UL);
220 invalidate_block(&s2_ctx, map_addr);
221
222 block_pa = s2tte_pa(parent_s2tte, level - 1L);
223
224 s2tt_init_valid_ns(s2tt, block_pa, level);
225
226 /*
227 * Increase the refcount to mark the granule as in-use. refcount
228 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
229 */
230 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
231
232 } else if (s2tte_is_table(parent_s2tte, level - 1L)) {
233 ret = pack_return_code(RMI_ERROR_RTT,
234 (unsigned int)(level - 1L));
235 goto out_unmap_table;
236
237 } else {
238 assert(false);
239 }
240
241 ret = RMI_SUCCESS;
242
243 granule_set_state(g_tbl, GRANULE_STATE_RTT);
244
245 parent_s2tte = s2tte_create_table(rtt_addr, level - 1L);
246 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
247
248out_unmap_table:
249 buffer_unmap(s2tt);
250 buffer_unmap(parent_s2tt);
251out_unlock_llt:
252 granule_unlock(wi.g_llt);
253 granule_unlock(g_tbl);
254 return ret;
255}
256
257unsigned long smc_rtt_fold(unsigned long rtt_addr,
258 unsigned long rd_addr,
259 unsigned long map_addr,
260 unsigned long ulevel)
261{
262 struct granule *g_rd;
263 struct granule *g_tbl;
264 struct rd *rd;
265 struct granule *g_table_root;
266 struct rtt_walk wi;
267 unsigned long *table, *parent_s2tt, parent_s2tte;
268 long level = (long)ulevel;
269 unsigned long ipa_bits;
270 unsigned long ret;
271 struct realm_s2_context s2_ctx;
272 int sl;
273 enum ripas ripas;
274
275 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
276 if (g_rd == NULL) {
277 return RMI_ERROR_INPUT;
278 }
279
280 rd = granule_map(g_rd, SLOT_RD);
281
282 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
283 buffer_unmap(rd);
284 granule_unlock(g_rd);
285 return RMI_ERROR_INPUT;
286 }
287
288 g_table_root = rd->s2_ctx.g_rtt;
289 sl = realm_rtt_starting_level(rd);
290 ipa_bits = realm_ipa_bits(rd);
291 s2_ctx = rd->s2_ctx;
292 buffer_unmap(rd);
293 granule_lock(g_table_root, GRANULE_STATE_RTT);
294 granule_unlock(g_rd);
295
296 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
297 map_addr, level - 1L, &wi);
298 if (wi.last_level != level - 1UL) {
299 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
300 goto out_unlock_parent_table;
301 }
302
303 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
304 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
305 if (!s2tte_is_table(parent_s2tte, level - 1L)) {
306 ret = pack_return_code(RMI_ERROR_RTT,
307 (unsigned int)(level - 1L));
308 goto out_unmap_parent_table;
309 }
310
311 /*
312 * Check that the 'rtt_addr' RTT is used at (map_addr, level).
313 * Note that this also verifies that the rtt_addr is properly aligned.
314 */
315 if (rtt_addr != s2tte_pa_table(parent_s2tte, level - 1L)) {
316 ret = pack_return_code(RMI_ERROR_RTT,
317 (unsigned int)(level - 1L));
318 goto out_unmap_parent_table;
319 }
320
321 g_tbl = find_lock_granule(rtt_addr, GRANULE_STATE_RTT);
322
323 /*
324 * A table descriptor S2TTE always points to a TABLE granule.
325 */
326 assert(g_tbl);
327
328 table = granule_map(g_tbl, SLOT_RTT2);
329
330 /*
331 * The command can succeed only if all 512 S2TTEs are of the same type.
332 * We first check the table's ref. counter to speed up the case when
333 * the host makes a guess whether a memory region can be folded.
334 */
335 if (g_tbl->refcount == 0UL) {
336 if (table_is_destroyed_block(table)) {
337 parent_s2tte = s2tte_create_destroyed();
338 __granule_put(wi.g_llt);
339
340 } else if (table_is_unassigned_block(table, &ripas)) {
341 /*
342 * Note that if map_addr is an Unprotected IPA, the
343 * RIPAS field is guaranteed to be zero, in both parent
344 * and child s2ttes.
345 */
346 parent_s2tte = s2tte_create_unassigned(ripas);
347 __granule_put(wi.g_llt);
348 } else {
349 /*
350 * The table holds a mixture of destroyed and
351 * unassigned entries.
352 */
AlexeiFedorov892abce2023-04-06 16:32:12 +0100353 ret = pack_return_code(RMI_ERROR_RTT, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000354 goto out_unmap_table;
355 }
356
357 } else if (g_tbl->refcount == S2TTES_PER_S2TT) {
358
359 unsigned long s2tte, block_pa;
360
361 /* The RMM specification does not allow creating block
362 * entries less than RTT_MIN_BLOCK_LEVEL even though
363 * permitted by the Arm Architecture.
364 * Hence ensure that the table being folded is at a level
365 * higher than the RTT_MIN_BLOCK_LEVEL.
366 *
367 * A fully populated table cannot be destroyed if that
368 * would create a block mapping below RTT_MIN_BLOCK_LEVEL.
369 */
370 if (level <= RTT_MIN_BLOCK_LEVEL) {
AlexeiFedorov892abce2023-04-06 16:32:12 +0100371 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000372 goto out_unmap_table;
373 }
374
375 s2tte = s2tte_read(&table[0]);
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000376 block_pa = s2tte_pa(s2tte, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000377
378 /*
379 * The table must also refer to a contiguous block through
380 * the same type of s2tte, either Assigned, Valid or Valid_NS.
381 */
382 if (table_maps_assigned_block(table, level)) {
383 parent_s2tte = s2tte_create_assigned_empty(block_pa, level - 1L);
384 } else if (table_maps_valid_block(table, level)) {
385 parent_s2tte = s2tte_create_valid(block_pa, level - 1L);
386 } else if (table_maps_valid_ns_block(table, level)) {
387 parent_s2tte = s2tte_create_valid_ns(block_pa, level - 1L);
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000388 /* The table contains mixed entries that cannot be folded */
Soby Mathewb4c6df42022-11-09 11:13:29 +0000389 } else {
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000390 ret = pack_return_code(RMI_ERROR_RTT, level);
391 goto out_unmap_table;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000392 }
393
394 __granule_refcount_dec(g_tbl, S2TTES_PER_S2TT);
395 } else {
396 /*
397 * The table holds a mixture of different types of s2ttes.
398 */
AlexeiFedorov892abce2023-04-06 16:32:12 +0100399 ret = pack_return_code(RMI_ERROR_RTT, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000400 goto out_unmap_table;
401 }
402
403 ret = RMI_SUCCESS;
404
405 /*
406 * Break before make.
407 */
408 s2tte_write(&parent_s2tt[wi.index], 0UL);
409
410 if (s2tte_is_valid(parent_s2tte, level - 1L) ||
411 s2tte_is_valid_ns(parent_s2tte, level - 1L)) {
412 invalidate_pages_in_block(&s2_ctx, map_addr);
413 } else {
414 invalidate_block(&s2_ctx, map_addr);
415 }
416
417 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
418
419 granule_memzero_mapped(table);
420 granule_set_state(g_tbl, GRANULE_STATE_DELEGATED);
421
422out_unmap_table:
423 buffer_unmap(table);
424 granule_unlock(g_tbl);
425out_unmap_parent_table:
426 buffer_unmap(parent_s2tt);
427out_unlock_parent_table:
428 granule_unlock(wi.g_llt);
429 return ret;
430}
431
432unsigned long smc_rtt_destroy(unsigned long rtt_addr,
433 unsigned long rd_addr,
434 unsigned long map_addr,
435 unsigned long ulevel)
436{
437 struct granule *g_rd;
438 struct granule *g_tbl;
439 struct rd *rd;
440 struct granule *g_table_root;
441 struct rtt_walk wi;
442 unsigned long *table, *parent_s2tt, parent_s2tte;
443 long level = (long)ulevel;
444 unsigned long ipa_bits;
445 unsigned long ret;
446 struct realm_s2_context s2_ctx;
447 int sl;
448 bool in_par;
449
450 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
451 if (g_rd == NULL) {
452 return RMI_ERROR_INPUT;
453 }
454
455 rd = granule_map(g_rd, SLOT_RD);
456
457 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
458 buffer_unmap(rd);
459 granule_unlock(g_rd);
460 return RMI_ERROR_INPUT;
461 }
462
463 g_table_root = rd->s2_ctx.g_rtt;
464 sl = realm_rtt_starting_level(rd);
465 ipa_bits = realm_ipa_bits(rd);
466 s2_ctx = rd->s2_ctx;
467 in_par = addr_in_par(rd, map_addr);
468 buffer_unmap(rd);
469 granule_lock(g_table_root, GRANULE_STATE_RTT);
470 granule_unlock(g_rd);
471
472 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
473 map_addr, level - 1L, &wi);
474 if (wi.last_level != level - 1UL) {
475 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
476 goto out_unlock_parent_table;
477 }
478
479 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
480 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
481 if (!s2tte_is_table(parent_s2tte, level - 1L)) {
482 ret = pack_return_code(RMI_ERROR_RTT,
483 (unsigned int)(level - 1L));
484 goto out_unmap_parent_table;
485 }
486
487 /*
488 * Check that the 'rtt_addr' RTT is used at (map_addr, level).
489 * Note that this also verifies that the rtt_addr is properly aligned.
490 */
491 if (rtt_addr != s2tte_pa_table(parent_s2tte, level - 1L)) {
492 ret = RMI_ERROR_INPUT;
493 goto out_unmap_parent_table;
494 }
495
496 /*
497 * Lock the RTT granule. The 'rtt_addr' is verified, thus can be treated
498 * as an internal granule.
499 */
500 g_tbl = find_lock_granule(rtt_addr, GRANULE_STATE_RTT);
501
502 /*
503 * A table descriptor S2TTE always points to a TABLE granule.
504 */
505 assert(g_tbl != NULL);
506
507 /*
508 * Read the refcount value. RTT granule is always accessed locked, thus
509 * the refcount can be accessed without atomic operations.
510 */
511 if (g_tbl->refcount != 0UL) {
AlexeiFedorov892abce2023-04-06 16:32:12 +0100512 ret = pack_return_code(RMI_ERROR_RTT, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000513 goto out_unlock_table;
514 }
515
516 ret = RMI_SUCCESS;
517
518 table = granule_map(g_tbl, SLOT_RTT2);
519
520 if (in_par) {
521 parent_s2tte = s2tte_create_destroyed();
522 } else {
523 parent_s2tte = s2tte_create_invalid_ns();
524 }
525
526 __granule_put(wi.g_llt);
527
528 /*
529 * Break before make. Note that this may cause spurious S2 aborts.
530 */
531 s2tte_write(&parent_s2tt[wi.index], 0UL);
532 invalidate_block(&s2_ctx, map_addr);
533 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
534
535 granule_memzero_mapped(table);
536 granule_set_state(g_tbl, GRANULE_STATE_DELEGATED);
537
538 buffer_unmap(table);
539out_unlock_table:
540 granule_unlock(g_tbl);
541out_unmap_parent_table:
542 buffer_unmap(parent_s2tt);
543out_unlock_parent_table:
544 granule_unlock(wi.g_llt);
545 return ret;
546}
547
548enum map_unmap_ns_op {
549 MAP_NS,
550 UNMAP_NS
551};
552
553/*
554 * We don't hold a reference on the NS granule when it is
555 * mapped into a realm. Instead we rely on the guarantees
556 * provided by the architecture to ensure that a NS access
557 * to a protected granule is prohibited even within the realm.
558 */
559static unsigned long map_unmap_ns(unsigned long rd_addr,
560 unsigned long map_addr,
561 long level,
562 unsigned long host_s2tte,
563 enum map_unmap_ns_op op)
564{
565 struct granule *g_rd;
566 struct rd *rd;
567 struct granule *g_table_root;
568 unsigned long *s2tt, s2tte;
569 struct rtt_walk wi;
570 unsigned long ipa_bits;
571 unsigned long ret;
572 struct realm_s2_context s2_ctx;
573 int sl;
574
575 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
576 if (g_rd == NULL) {
577 return RMI_ERROR_INPUT;
578 }
579
580 rd = granule_map(g_rd, SLOT_RD);
581
582 if (!validate_rtt_map_cmds(map_addr, level, rd)) {
583 buffer_unmap(rd);
584 granule_unlock(g_rd);
585 return RMI_ERROR_INPUT;
586 }
587
588 g_table_root = rd->s2_ctx.g_rtt;
589 sl = realm_rtt_starting_level(rd);
590 ipa_bits = realm_ipa_bits(rd);
591
592 /*
593 * We don't have to check PAR boundaries for unmap_ns
594 * operation because we already test that the s2tte is Valid_NS
595 * and only outside-PAR IPAs can be translated by such s2tte.
596 *
597 * For "map_ns", however, the s2tte is verified to be Unassigned
598 * but both inside & outside PAR IPAs can be translated by such s2ttes.
599 */
600 if ((op == MAP_NS) && addr_in_par(rd, map_addr)) {
601 buffer_unmap(rd);
602 granule_unlock(g_rd);
603 return RMI_ERROR_INPUT;
604 }
605
606 s2_ctx = rd->s2_ctx;
607 buffer_unmap(rd);
608
609 granule_lock(g_table_root, GRANULE_STATE_RTT);
610 granule_unlock(g_rd);
611
612 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
613 map_addr, level, &wi);
614 if (wi.last_level != level) {
615 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
616 goto out_unlock_llt;
617 }
618
619 s2tt = granule_map(wi.g_llt, SLOT_RTT);
620 s2tte = s2tte_read(&s2tt[wi.index]);
621
622 if (op == MAP_NS) {
623 if (!s2tte_is_unassigned(s2tte)) {
624 ret = pack_return_code(RMI_ERROR_RTT,
625 (unsigned int)level);
626 goto out_unmap_table;
627 }
628
629 s2tte = s2tte_create_valid_ns(host_s2tte, level);
630 s2tte_write(&s2tt[wi.index], s2tte);
631 __granule_get(wi.g_llt);
632
633 } else if (op == UNMAP_NS) {
634 /*
635 * The following check also verifies that map_addr is outside
636 * PAR, as valid_NS s2tte may only cover outside PAR IPA range.
637 */
638 if (!s2tte_is_valid_ns(s2tte, level)) {
639 ret = pack_return_code(RMI_ERROR_RTT,
640 (unsigned int)level);
641 goto out_unmap_table;
642 }
643
644 s2tte = s2tte_create_invalid_ns();
645 s2tte_write(&s2tt[wi.index], s2tte);
646 __granule_put(wi.g_llt);
647 if (level == RTT_PAGE_LEVEL) {
648 invalidate_page(&s2_ctx, map_addr);
649 } else {
650 invalidate_block(&s2_ctx, map_addr);
651 }
652 }
653
654 ret = RMI_SUCCESS;
655
656out_unmap_table:
657 buffer_unmap(s2tt);
658out_unlock_llt:
659 granule_unlock(wi.g_llt);
660 return ret;
661}
662
663unsigned long smc_rtt_map_unprotected(unsigned long rd_addr,
664 unsigned long map_addr,
665 unsigned long ulevel,
666 unsigned long s2tte)
667{
668 long level = (long)ulevel;
669
670 if (!host_ns_s2tte_is_valid(s2tte, level)) {
671 return RMI_ERROR_INPUT;
672 }
673
674 return map_unmap_ns(rd_addr, map_addr, level, s2tte, MAP_NS);
675}
676
677unsigned long smc_rtt_unmap_unprotected(unsigned long rd_addr,
678 unsigned long map_addr,
679 unsigned long ulevel)
680{
681 return map_unmap_ns(rd_addr, map_addr, (long)ulevel, 0UL, UNMAP_NS);
682}
683
684void smc_rtt_read_entry(unsigned long rd_addr,
685 unsigned long map_addr,
686 unsigned long ulevel,
687 struct smc_result *ret)
688{
689 struct granule *g_rd, *g_rtt_root;
690 struct rd *rd;
691 struct rtt_walk wi;
692 unsigned long *s2tt, s2tte;
693 unsigned long ipa_bits;
694 long level = (long)ulevel;
695 int sl;
696
697 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
698 if (g_rd == NULL) {
699 ret->x[0] = RMI_ERROR_INPUT;
700 return;
701 }
702
703 rd = granule_map(g_rd, SLOT_RD);
704
705 if (!validate_rtt_entry_cmds(map_addr, level, rd)) {
706 buffer_unmap(rd);
707 granule_unlock(g_rd);
708 ret->x[0] = RMI_ERROR_INPUT;
709 return;
710 }
711
712 g_rtt_root = rd->s2_ctx.g_rtt;
713 sl = realm_rtt_starting_level(rd);
714 ipa_bits = realm_ipa_bits(rd);
715 buffer_unmap(rd);
716
717 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
718 granule_unlock(g_rd);
719
720 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
721 map_addr, level, &wi);
722 s2tt = granule_map(wi.g_llt, SLOT_RTT);
723 s2tte = s2tte_read(&s2tt[wi.index]);
724 ret->x[1] = wi.last_level;
725 ret->x[3] = 0UL;
726 ret->x[4] = 0UL;
727
728 if (s2tte_is_unassigned(s2tte)) {
729 enum ripas ripas = s2tte_get_ripas(s2tte);
730
Yousuf A3daed822022-10-13 16:06:00 +0100731 ret->x[2] = RMI_UNASSIGNED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000732 ret->x[4] = (unsigned long)ripas;
733 } else if (s2tte_is_destroyed(s2tte)) {
Yousuf A3daed822022-10-13 16:06:00 +0100734 ret->x[2] = RMI_DESTROYED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000735 } else if (s2tte_is_assigned(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100736 ret->x[2] = RMI_ASSIGNED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000737 ret->x[3] = s2tte_pa(s2tte, wi.last_level);
Yousuf A62808152022-10-31 10:35:42 +0000738 ret->x[4] = RIPAS_EMPTY;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000739 } else if (s2tte_is_valid(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100740 ret->x[2] = RMI_ASSIGNED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000741 ret->x[3] = s2tte_pa(s2tte, wi.last_level);
Yousuf A62808152022-10-31 10:35:42 +0000742 ret->x[4] = RIPAS_RAM;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000743 } else if (s2tte_is_valid_ns(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100744 ret->x[2] = RMI_VALID_NS;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000745 ret->x[3] = host_ns_s2tte(s2tte, wi.last_level);
746 } else if (s2tte_is_table(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100747 ret->x[2] = RMI_TABLE;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000748 ret->x[3] = s2tte_pa_table(s2tte, wi.last_level);
749 } else {
750 assert(false);
751 }
752
753 buffer_unmap(s2tt);
754 granule_unlock(wi.g_llt);
755
756 ret->x[0] = RMI_SUCCESS;
757}
758
759static void data_granule_measure(struct rd *rd, void *data,
760 unsigned long ipa,
761 unsigned long flags)
762{
763 struct measurement_desc_data measure_desc = {0};
764
765 /* Initialize the measurement descriptior structure */
766 measure_desc.desc_type = MEASURE_DESC_TYPE_DATA;
767 measure_desc.len = sizeof(struct measurement_desc_data);
768 measure_desc.ipa = ipa;
769 measure_desc.flags = flags;
770 memcpy(measure_desc.rim,
771 &rd->measurement[RIM_MEASUREMENT_SLOT],
772 measurement_get_size(rd->algorithm));
773
774 if (flags == RMI_MEASURE_CONTENT) {
775 /*
776 * Hashing the data granules and store the result in the
777 * measurement descriptor structure.
778 */
779 measurement_hash_compute(rd->algorithm,
780 data,
781 GRANULE_SIZE,
782 measure_desc.content);
783 }
784
785 /*
786 * Hashing the measurement descriptor structure; the result is the
787 * updated RIM.
788 */
789 measurement_hash_compute(rd->algorithm,
790 &measure_desc,
791 sizeof(measure_desc),
792 rd->measurement[RIM_MEASUREMENT_SLOT]);
793}
794
795static unsigned long validate_data_create_unknown(unsigned long map_addr,
796 struct rd *rd)
797{
798 if (!addr_in_par(rd, map_addr)) {
799 return RMI_ERROR_INPUT;
800 }
801
802 if (!validate_map_addr(map_addr, RTT_PAGE_LEVEL, rd)) {
803 return RMI_ERROR_INPUT;
804 }
805
806 return RMI_SUCCESS;
807}
808
809static unsigned long validate_data_create(unsigned long map_addr,
810 struct rd *rd)
811{
812 if (get_rd_state_locked(rd) != REALM_STATE_NEW) {
813 return RMI_ERROR_REALM;
814 }
815
816 return validate_data_create_unknown(map_addr, rd);
817}
818
819/*
820 * Implements both Data.Create and Data.CreateUnknown
821 *
822 * if @g_src == NULL, this implemented Data.CreateUnknown
823 * and otherwise this implemented Data.Create.
824 */
AlexeiFedorovac923c82023-04-06 15:12:04 +0100825static unsigned long data_create(unsigned long rd_addr,
826 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000827 unsigned long map_addr,
828 struct granule *g_src,
829 unsigned long flags)
830{
831 struct granule *g_data;
832 struct granule *g_rd;
833 struct granule *g_table_root;
834 struct rd *rd;
835 struct rtt_walk wi;
836 unsigned long s2tte, *s2tt;
837 enum ripas ripas;
838 enum granule_state new_data_state = GRANULE_STATE_DELEGATED;
839 unsigned long ipa_bits;
840 unsigned long ret;
841 int __unused meas_ret;
842 int sl;
843
844 if (!find_lock_two_granules(data_addr,
845 GRANULE_STATE_DELEGATED,
846 &g_data,
847 rd_addr,
848 GRANULE_STATE_RD,
849 &g_rd)) {
850 return RMI_ERROR_INPUT;
851 }
852
853 rd = granule_map(g_rd, SLOT_RD);
854
855 ret = (g_src != NULL) ?
856 validate_data_create(map_addr, rd) :
857 validate_data_create_unknown(map_addr, rd);
858
859 if (ret != RMI_SUCCESS) {
860 goto out_unmap_rd;
861 }
862
863 g_table_root = rd->s2_ctx.g_rtt;
864 sl = realm_rtt_starting_level(rd);
865 ipa_bits = realm_ipa_bits(rd);
866 granule_lock(g_table_root, GRANULE_STATE_RTT);
867 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
868 map_addr, RTT_PAGE_LEVEL, &wi);
869 if (wi.last_level != RTT_PAGE_LEVEL) {
870 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
871 goto out_unlock_ll_table;
872 }
873
874 s2tt = granule_map(wi.g_llt, SLOT_RTT);
875 s2tte = s2tte_read(&s2tt[wi.index]);
876 if (!s2tte_is_unassigned(s2tte)) {
877 ret = pack_return_code(RMI_ERROR_RTT, RTT_PAGE_LEVEL);
878 goto out_unmap_ll_table;
879 }
880
881 ripas = s2tte_get_ripas(s2tte);
882
883 if (g_src != NULL) {
884 bool ns_access_ok;
885 void *data = granule_map(g_data, SLOT_DELEGATED);
886
887 ns_access_ok = ns_buffer_read(SLOT_NS, g_src, 0U,
888 GRANULE_SIZE, data);
889
890 if (!ns_access_ok) {
891 /*
892 * Some data may be copied before the failure. Zero
893 * g_data granule as it will remain in delegated state.
894 */
895 (void)memset(data, 0, GRANULE_SIZE);
896 buffer_unmap(data);
897 ret = RMI_ERROR_INPUT;
898 goto out_unmap_ll_table;
899 }
900
901
902 data_granule_measure(rd, data, map_addr, flags);
903
904 buffer_unmap(data);
905 }
906
907 new_data_state = GRANULE_STATE_DATA;
908
Yousuf A62808152022-10-31 10:35:42 +0000909 s2tte = (ripas == RIPAS_EMPTY) ?
Soby Mathewb4c6df42022-11-09 11:13:29 +0000910 s2tte_create_assigned_empty(data_addr, RTT_PAGE_LEVEL) :
911 s2tte_create_valid(data_addr, RTT_PAGE_LEVEL);
912
913 s2tte_write(&s2tt[wi.index], s2tte);
914 __granule_get(wi.g_llt);
915
916 ret = RMI_SUCCESS;
917
918out_unmap_ll_table:
919 buffer_unmap(s2tt);
920out_unlock_ll_table:
921 granule_unlock(wi.g_llt);
922out_unmap_rd:
923 buffer_unmap(rd);
924 granule_unlock(g_rd);
925 granule_unlock_transition(g_data, new_data_state);
926 return ret;
927}
928
AlexeiFedorovac923c82023-04-06 15:12:04 +0100929unsigned long smc_data_create(unsigned long rd_addr,
930 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000931 unsigned long map_addr,
932 unsigned long src_addr,
933 unsigned long flags)
934{
935 struct granule *g_src;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000936
937 if (flags != RMI_NO_MEASURE_CONTENT && flags != RMI_MEASURE_CONTENT) {
938 return RMI_ERROR_INPUT;
939 }
940
941 g_src = find_granule(src_addr);
942 if ((g_src == NULL) || (g_src->state != GRANULE_STATE_NS)) {
943 return RMI_ERROR_INPUT;
944 }
945
AlexeiFedorovac923c82023-04-06 15:12:04 +0100946 return data_create(rd_addr, data_addr, map_addr, g_src, flags);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000947}
948
AlexeiFedorovac923c82023-04-06 15:12:04 +0100949unsigned long smc_data_create_unknown(unsigned long rd_addr,
950 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000951 unsigned long map_addr)
952{
AlexeiFedorovac923c82023-04-06 15:12:04 +0100953 return data_create(rd_addr, data_addr, map_addr, NULL, 0);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000954}
955
956unsigned long smc_data_destroy(unsigned long rd_addr,
957 unsigned long map_addr)
958{
959 struct granule *g_data;
960 struct granule *g_rd;
961 struct granule *g_table_root;
962 struct rtt_walk wi;
963 unsigned long data_addr, s2tte, *s2tt;
964 struct rd *rd;
965 unsigned long ipa_bits;
966 unsigned long ret;
967 struct realm_s2_context s2_ctx;
968 bool valid;
969 int sl;
970
971 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
972 if (g_rd == NULL) {
973 return RMI_ERROR_INPUT;
974 }
975
976 rd = granule_map(g_rd, SLOT_RD);
977
978 if (!validate_map_addr(map_addr, RTT_PAGE_LEVEL, rd)) {
979 buffer_unmap(rd);
980 granule_unlock(g_rd);
981 return RMI_ERROR_INPUT;
982 }
983
984 g_table_root = rd->s2_ctx.g_rtt;
985 sl = realm_rtt_starting_level(rd);
986 ipa_bits = realm_ipa_bits(rd);
987 s2_ctx = rd->s2_ctx;
988 buffer_unmap(rd);
989
990 granule_lock(g_table_root, GRANULE_STATE_RTT);
991 granule_unlock(g_rd);
992
993 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
994 map_addr, RTT_PAGE_LEVEL, &wi);
995 if (wi.last_level != RTT_PAGE_LEVEL) {
996 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
997 goto out_unlock_ll_table;
998 }
999
1000 s2tt = granule_map(wi.g_llt, SLOT_RTT);
1001 s2tte = s2tte_read(&s2tt[wi.index]);
1002
1003 valid = s2tte_is_valid(s2tte, RTT_PAGE_LEVEL);
1004
1005 /*
1006 * Check if either HIPAS=ASSIGNED or map_addr is a
1007 * valid Protected IPA.
1008 */
1009 if (!valid && !s2tte_is_assigned(s2tte, RTT_PAGE_LEVEL)) {
1010 ret = pack_return_code(RMI_ERROR_RTT, RTT_PAGE_LEVEL);
1011 goto out_unmap_ll_table;
1012 }
1013
1014 data_addr = s2tte_pa(s2tte, RTT_PAGE_LEVEL);
1015
1016 /*
1017 * We have already established either HIPAS=ASSIGNED or a valid mapping.
1018 * If valid, transition HIPAS to DESTROYED and if HIPAS=ASSIGNED,
1019 * transition to UNASSIGNED.
1020 */
1021 s2tte = valid ? s2tte_create_destroyed() :
Yousuf A62808152022-10-31 10:35:42 +00001022 s2tte_create_unassigned(RIPAS_EMPTY);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001023
1024 s2tte_write(&s2tt[wi.index], s2tte);
1025
1026 if (valid) {
1027 invalidate_page(&s2_ctx, map_addr);
1028 }
1029
1030 __granule_put(wi.g_llt);
1031
1032 /*
1033 * Lock the data granule and check expected state. Correct locking order
1034 * is guaranteed because granule address is obtained from a locked
1035 * granule by table walk. This lock needs to be acquired before a state
1036 * transition to or from GRANULE_STATE_DATA for granule address can happen.
1037 */
1038 g_data = find_lock_granule(data_addr, GRANULE_STATE_DATA);
1039 assert(g_data);
1040 granule_memzero(g_data, SLOT_DELEGATED);
1041 granule_unlock_transition(g_data, GRANULE_STATE_DELEGATED);
1042
1043 ret = RMI_SUCCESS;
1044
1045out_unmap_ll_table:
1046 buffer_unmap(s2tt);
1047out_unlock_ll_table:
1048 granule_unlock(wi.g_llt);
1049
1050 return ret;
1051}
1052
1053static bool update_ripas(unsigned long *s2tte, unsigned long level,
1054 enum ripas ripas)
1055{
1056 if (s2tte_is_table(*s2tte, level)) {
1057 return false;
1058 }
1059
1060 if (s2tte_is_valid(*s2tte, level)) {
Yousuf A62808152022-10-31 10:35:42 +00001061 if (ripas == RIPAS_EMPTY) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001062 unsigned long pa = s2tte_pa(*s2tte, level);
1063 *s2tte = s2tte_create_assigned_empty(pa, level);
1064 }
1065 return true;
1066 }
1067
1068 if (s2tte_is_unassigned(*s2tte) || s2tte_is_assigned(*s2tte, level)) {
1069 *s2tte |= s2tte_create_ripas(ripas);
1070 return true;
1071 }
1072
1073 return false;
1074}
1075
1076static void ripas_granule_measure(struct rd *rd,
1077 unsigned long ipa,
1078 unsigned long level)
1079{
1080 struct measurement_desc_ripas measure_desc = {0};
1081
1082 /* Initialize the measurement descriptior structure */
1083 measure_desc.desc_type = MEASURE_DESC_TYPE_RIPAS;
1084 measure_desc.len = sizeof(struct measurement_desc_ripas);
1085 measure_desc.ipa = ipa;
1086 measure_desc.level = level;
1087 memcpy(measure_desc.rim,
1088 &rd->measurement[RIM_MEASUREMENT_SLOT],
1089 measurement_get_size(rd->algorithm));
1090
1091 /*
1092 * Hashing the measurement descriptor structure; the result is the
1093 * updated RIM.
1094 */
1095 measurement_hash_compute(rd->algorithm,
1096 &measure_desc,
1097 sizeof(measure_desc),
1098 rd->measurement[RIM_MEASUREMENT_SLOT]);
1099}
1100
1101unsigned long smc_rtt_init_ripas(unsigned long rd_addr,
1102 unsigned long map_addr,
1103 unsigned long ulevel)
1104{
1105 struct granule *g_rd, *g_rtt_root;
1106 struct rd *rd;
1107 unsigned long ipa_bits;
1108 struct rtt_walk wi;
1109 unsigned long s2tte, *s2tt;
1110 unsigned long ret;
1111 long level = (long)ulevel;
1112 int sl;
1113
1114 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
1115 if (g_rd == NULL) {
1116 return RMI_ERROR_INPUT;
1117 }
1118
1119 rd = granule_map(g_rd, SLOT_RD);
1120
1121 if (get_rd_state_locked(rd) != REALM_STATE_NEW) {
1122 buffer_unmap(rd);
1123 granule_unlock(g_rd);
1124 return RMI_ERROR_REALM;
1125 }
1126
1127 if (!validate_rtt_entry_cmds(map_addr, level, rd)) {
1128 buffer_unmap(rd);
1129 granule_unlock(g_rd);
1130 return RMI_ERROR_INPUT;
1131 }
1132
1133 if (!addr_in_par(rd, map_addr)) {
1134 buffer_unmap(rd);
1135 granule_unlock(g_rd);
1136 return RMI_ERROR_INPUT;
1137 }
1138
1139 g_rtt_root = rd->s2_ctx.g_rtt;
1140 sl = realm_rtt_starting_level(rd);
1141 ipa_bits = realm_ipa_bits(rd);
1142
1143 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
1144 granule_unlock(g_rd);
1145
1146 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
1147 map_addr, level, &wi);
AlexeiFedorovac923c82023-04-06 15:12:04 +01001148
Soby Mathewb4c6df42022-11-09 11:13:29 +00001149 if (wi.last_level != level) {
1150 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
1151 goto out_unlock_llt;
1152 }
1153
1154 s2tt = granule_map(wi.g_llt, SLOT_RTT);
1155 s2tte = s2tte_read(&s2tt[wi.index]);
1156
1157 /* Allowed only for HIPAS=UNASSIGNED */
1158 if (s2tte_is_table(s2tte, level) || !s2tte_is_unassigned(s2tte)) {
1159 ret = pack_return_code(RMI_ERROR_RTT, (unsigned int)level);
1160 goto out_unmap_llt;
1161 }
1162
Yousuf A62808152022-10-31 10:35:42 +00001163 s2tte |= s2tte_create_ripas(RIPAS_RAM);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001164
1165 s2tte_write(&s2tt[wi.index], s2tte);
1166
1167 ripas_granule_measure(rd, map_addr, level);
1168
1169 ret = RMI_SUCCESS;
1170
1171out_unmap_llt:
1172 buffer_unmap(s2tt);
1173out_unlock_llt:
1174 buffer_unmap(rd);
1175 granule_unlock(wi.g_llt);
1176 return ret;
1177}
1178
1179unsigned long smc_rtt_set_ripas(unsigned long rd_addr,
1180 unsigned long rec_addr,
1181 unsigned long map_addr,
1182 unsigned long ulevel,
1183 unsigned long uripas)
1184{
1185 struct granule *g_rd, *g_rec, *g_rtt_root;
1186 struct rec *rec;
1187 struct rd *rd;
1188 unsigned long map_size, ipa_bits;
1189 struct rtt_walk wi;
1190 unsigned long s2tte, *s2tt;
1191 struct realm_s2_context s2_ctx;
1192 long level = (long)ulevel;
1193 enum ripas ripas = (enum ripas)uripas;
1194 unsigned long ret;
1195 bool valid;
1196 int sl;
1197
Yousuf A62808152022-10-31 10:35:42 +00001198 if (ripas > RIPAS_RAM) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001199 return RMI_ERROR_INPUT;
1200 }
1201
1202 if (!find_lock_two_granules(rd_addr,
1203 GRANULE_STATE_RD,
1204 &g_rd,
1205 rec_addr,
1206 GRANULE_STATE_REC,
1207 &g_rec)) {
1208 return RMI_ERROR_INPUT;
1209 }
1210
1211 if (granule_refcount_read_acquire(g_rec) != 0UL) {
AlexeiFedorov892abce2023-04-06 16:32:12 +01001212 ret = RMI_ERROR_REC;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001213 goto out_unlock_rec_rd;
1214 }
1215
1216 rec = granule_map(g_rec, SLOT_REC);
1217
1218 if (g_rd != rec->realm_info.g_rd) {
1219 ret = RMI_ERROR_REC;
1220 goto out_unmap_rec;
1221 }
1222
1223 if (ripas != rec->set_ripas.ripas) {
1224 ret = RMI_ERROR_INPUT;
1225 goto out_unmap_rec;
1226 }
1227
1228 if (map_addr != rec->set_ripas.addr) {
1229 /* Target region is not next chunk of requested region */
1230 ret = RMI_ERROR_INPUT;
1231 goto out_unmap_rec;
1232 }
1233
1234 rd = granule_map(g_rd, SLOT_RD);
1235
1236 if (!validate_rtt_entry_cmds(map_addr, level, rd)) {
1237 ret = RMI_ERROR_INPUT;
1238 goto out_unmap_rd;
1239 }
1240
1241 map_size = s2tte_map_size(level);
1242 if (map_addr + map_size > rec->set_ripas.end) {
1243 /* Target region extends beyond end of requested region */
1244 ret = RMI_ERROR_INPUT;
1245 goto out_unmap_rd;
1246 }
1247
1248 g_rtt_root = rd->s2_ctx.g_rtt;
1249 sl = realm_rtt_starting_level(rd);
1250 ipa_bits = realm_ipa_bits(rd);
1251 s2_ctx = rd->s2_ctx;
1252
1253 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
1254
1255 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
1256 map_addr, level, &wi);
1257 if (wi.last_level != level) {
1258 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
1259 goto out_unlock_llt;
1260 }
1261
1262 s2tt = granule_map(wi.g_llt, SLOT_RTT);
1263 s2tte = s2tte_read(&s2tt[wi.index]);
1264
1265 valid = s2tte_is_valid(s2tte, level);
1266
1267 if (!update_ripas(&s2tte, level, ripas)) {
1268 ret = pack_return_code(RMI_ERROR_RTT, (unsigned int)level);
1269 goto out_unmap_llt;
1270 }
1271
1272 s2tte_write(&s2tt[wi.index], s2tte);
1273
Yousuf A62808152022-10-31 10:35:42 +00001274 if (valid && (ripas == RIPAS_EMPTY)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001275 if (level == RTT_PAGE_LEVEL) {
1276 invalidate_page(&s2_ctx, map_addr);
1277 } else {
1278 invalidate_block(&s2_ctx, map_addr);
1279 }
1280 }
1281
1282 rec->set_ripas.addr += map_size;
1283
1284 ret = RMI_SUCCESS;
1285
1286out_unmap_llt:
1287 buffer_unmap(s2tt);
1288out_unlock_llt:
1289 granule_unlock(wi.g_llt);
1290out_unmap_rd:
1291 buffer_unmap(rd);
1292out_unmap_rec:
1293 buffer_unmap(rec);
1294out_unlock_rec_rd:
1295 granule_unlock(g_rec);
1296 granule_unlock(g_rd);
1297 return ret;
1298}