blob: 0e367e0c48e692bc8520ce09b6f6fb1db1ab3647 [file] [log] [blame]
Soby Mathewb4c6df42022-11-09 11:13:29 +00001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
5 */
6
7#include <buffer.h>
8#include <granule.h>
9#include <measurement.h>
10#include <realm.h>
11#include <ripas.h>
12#include <smc-handler.h>
13#include <smc-rmi.h>
14#include <smc.h>
15#include <stddef.h>
16#include <string.h>
17#include <table.h>
18
19/*
20 * Validate the map_addr value passed to RMI_RTT_* and RMI_DATA_* commands.
21 */
22static bool validate_map_addr(unsigned long map_addr,
23 unsigned long level,
24 struct rd *rd)
25{
26
27 if (map_addr >= realm_ipa_size(rd)) {
28 return false;
29 }
30 if (!addr_is_level_aligned(map_addr, level)) {
31 return false;
32 }
33 return true;
34}
35
36/*
37 * Structure commands can operate on all RTTs except for the root RTT so
38 * the minimal valid level is the stage 2 starting level + 1.
39 */
40static bool validate_rtt_structure_cmds(unsigned long map_addr,
41 long level,
42 struct rd *rd)
43{
44 int min_level = realm_rtt_starting_level(rd) + 1;
45
46 if ((level < min_level) || (level > RTT_PAGE_LEVEL)) {
47 return false;
48 }
49 return validate_map_addr(map_addr, level, rd);
50}
51
52/*
53 * Map/Unmap commands can operate up to a level 2 block entry so min_level is
54 * the smallest block size.
55 */
56static bool validate_rtt_map_cmds(unsigned long map_addr,
57 long level,
58 struct rd *rd)
59{
60 if ((level < RTT_MIN_BLOCK_LEVEL) || (level > RTT_PAGE_LEVEL)) {
61 return false;
62 }
63 return validate_map_addr(map_addr, level, rd);
64}
65
66/*
67 * Entry commands can operate on any entry so the minimal valid level is the
68 * stage 2 starting level.
69 */
70static bool validate_rtt_entry_cmds(unsigned long map_addr,
71 long level,
72 struct rd *rd)
73{
74 if ((level < realm_rtt_starting_level(rd)) ||
75 (level > RTT_PAGE_LEVEL)) {
76 return false;
77 }
78 return validate_map_addr(map_addr, level, rd);
79}
80
AlexeiFedorovac923c82023-04-06 15:12:04 +010081unsigned long smc_rtt_create(unsigned long rd_addr,
82 unsigned long rtt_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +000083 unsigned long map_addr,
84 unsigned long ulevel)
85{
86 struct granule *g_rd;
87 struct granule *g_tbl;
88 struct rd *rd;
89 struct granule *g_table_root;
90 struct rtt_walk wi;
91 unsigned long *s2tt, *parent_s2tt, parent_s2tte;
92 long level = (long)ulevel;
93 unsigned long ipa_bits;
94 unsigned long ret;
95 struct realm_s2_context s2_ctx;
96 int sl;
97
98 if (!find_lock_two_granules(rtt_addr,
99 GRANULE_STATE_DELEGATED,
100 &g_tbl,
101 rd_addr,
102 GRANULE_STATE_RD,
103 &g_rd)) {
104 return RMI_ERROR_INPUT;
105 }
106
107 rd = granule_map(g_rd, SLOT_RD);
108
109 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
110 buffer_unmap(rd);
111 granule_unlock(g_rd);
112 granule_unlock(g_tbl);
113 return RMI_ERROR_INPUT;
114 }
115
116 g_table_root = rd->s2_ctx.g_rtt;
117 sl = realm_rtt_starting_level(rd);
118 ipa_bits = realm_ipa_bits(rd);
119 s2_ctx = rd->s2_ctx;
120 buffer_unmap(rd);
121
122 /*
123 * Lock the RTT root. Enforcing locking order RD->RTT is enough to
124 * ensure deadlock free locking guarentee.
125 */
126 granule_lock(g_table_root, GRANULE_STATE_RTT);
127
128 /* Unlock RD after locking RTT Root */
129 granule_unlock(g_rd);
130
131 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
132 map_addr, level - 1L, &wi);
133 if (wi.last_level != level - 1L) {
134 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
135 goto out_unlock_llt;
136 }
137
138 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
139 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
140 s2tt = granule_map(g_tbl, SLOT_DELEGATED);
141
142 if (s2tte_is_unassigned(parent_s2tte)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000143 enum ripas ripas = s2tte_get_ripas(parent_s2tte);
144
145 s2tt_init_unassigned(s2tt, ripas);
146
147 /*
148 * Increase the refcount of the parent, the granule was
149 * locked while table walking and hand-over-hand locking.
150 * Atomicity and acquire/release semantics not required because
151 * the table is accessed always locked.
152 */
153 __granule_get(wi.g_llt);
154
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100155 } else if (s2tte_is_unassigned_ns(parent_s2tte)) {
156 s2tt_init_unassigned_ns(s2tt);
157 __granule_get(wi.g_llt);
158
Soby Mathewb4c6df42022-11-09 11:13:29 +0000159 } else if (s2tte_is_destroyed(parent_s2tte)) {
160 s2tt_init_destroyed(s2tt);
161 __granule_get(wi.g_llt);
162
163 } else if (s2tte_is_assigned(parent_s2tte, level - 1L)) {
164 unsigned long block_pa;
165
166 /*
167 * We should observe parent assigned s2tte only when
168 * we create tables above this level.
169 */
170 assert(level > RTT_MIN_BLOCK_LEVEL);
171
172 block_pa = s2tte_pa(parent_s2tte, level - 1L);
173
174 s2tt_init_assigned_empty(s2tt, block_pa, level);
175
176 /*
177 * Increase the refcount to mark the granule as in-use. refcount
178 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
179 */
180 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
181
182 } else if (s2tte_is_valid(parent_s2tte, level - 1L)) {
183 unsigned long block_pa;
184
185 /*
186 * We should observe parent valid s2tte only when
187 * we create tables above this level.
188 */
189 assert(level > RTT_MIN_BLOCK_LEVEL);
190
191 /*
192 * Break before make. This may cause spurious S2 aborts.
193 */
194 s2tte_write(&parent_s2tt[wi.index], 0UL);
195 invalidate_block(&s2_ctx, map_addr);
196
197 block_pa = s2tte_pa(parent_s2tte, level - 1L);
198
199 s2tt_init_valid(s2tt, block_pa, level);
200
201 /*
202 * Increase the refcount to mark the granule as in-use. refcount
203 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
204 */
205 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
206
207 } else if (s2tte_is_valid_ns(parent_s2tte, level - 1L)) {
208 unsigned long block_pa;
209
210 /*
211 * We should observe parent valid_ns s2tte only when
212 * we create tables above this level.
213 */
214 assert(level > RTT_MIN_BLOCK_LEVEL);
215
216 /*
217 * Break before make. This may cause spurious S2 aborts.
218 */
219 s2tte_write(&parent_s2tt[wi.index], 0UL);
220 invalidate_block(&s2_ctx, map_addr);
221
222 block_pa = s2tte_pa(parent_s2tte, level - 1L);
223
224 s2tt_init_valid_ns(s2tt, block_pa, level);
225
226 /*
227 * Increase the refcount to mark the granule as in-use. refcount
228 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
229 */
230 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
231
232 } else if (s2tte_is_table(parent_s2tte, level - 1L)) {
233 ret = pack_return_code(RMI_ERROR_RTT,
234 (unsigned int)(level - 1L));
235 goto out_unmap_table;
236
237 } else {
238 assert(false);
239 }
240
241 ret = RMI_SUCCESS;
242
243 granule_set_state(g_tbl, GRANULE_STATE_RTT);
244
245 parent_s2tte = s2tte_create_table(rtt_addr, level - 1L);
246 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
247
248out_unmap_table:
249 buffer_unmap(s2tt);
250 buffer_unmap(parent_s2tt);
251out_unlock_llt:
252 granule_unlock(wi.g_llt);
253 granule_unlock(g_tbl);
254 return ret;
255}
256
257unsigned long smc_rtt_fold(unsigned long rtt_addr,
258 unsigned long rd_addr,
259 unsigned long map_addr,
260 unsigned long ulevel)
261{
262 struct granule *g_rd;
263 struct granule *g_tbl;
264 struct rd *rd;
265 struct granule *g_table_root;
266 struct rtt_walk wi;
267 unsigned long *table, *parent_s2tt, parent_s2tte;
268 long level = (long)ulevel;
269 unsigned long ipa_bits;
270 unsigned long ret;
271 struct realm_s2_context s2_ctx;
272 int sl;
273 enum ripas ripas;
274
275 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
276 if (g_rd == NULL) {
277 return RMI_ERROR_INPUT;
278 }
279
280 rd = granule_map(g_rd, SLOT_RD);
281
282 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
283 buffer_unmap(rd);
284 granule_unlock(g_rd);
285 return RMI_ERROR_INPUT;
286 }
287
288 g_table_root = rd->s2_ctx.g_rtt;
289 sl = realm_rtt_starting_level(rd);
290 ipa_bits = realm_ipa_bits(rd);
291 s2_ctx = rd->s2_ctx;
292 buffer_unmap(rd);
293 granule_lock(g_table_root, GRANULE_STATE_RTT);
294 granule_unlock(g_rd);
295
296 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
297 map_addr, level - 1L, &wi);
298 if (wi.last_level != level - 1UL) {
299 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
300 goto out_unlock_parent_table;
301 }
302
303 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
304 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
305 if (!s2tte_is_table(parent_s2tte, level - 1L)) {
306 ret = pack_return_code(RMI_ERROR_RTT,
307 (unsigned int)(level - 1L));
308 goto out_unmap_parent_table;
309 }
310
311 /*
312 * Check that the 'rtt_addr' RTT is used at (map_addr, level).
313 * Note that this also verifies that the rtt_addr is properly aligned.
314 */
315 if (rtt_addr != s2tte_pa_table(parent_s2tte, level - 1L)) {
316 ret = pack_return_code(RMI_ERROR_RTT,
317 (unsigned int)(level - 1L));
318 goto out_unmap_parent_table;
319 }
320
321 g_tbl = find_lock_granule(rtt_addr, GRANULE_STATE_RTT);
322
323 /*
324 * A table descriptor S2TTE always points to a TABLE granule.
325 */
326 assert(g_tbl);
327
328 table = granule_map(g_tbl, SLOT_RTT2);
329
330 /*
331 * The command can succeed only if all 512 S2TTEs are of the same type.
332 * We first check the table's ref. counter to speed up the case when
333 * the host makes a guess whether a memory region can be folded.
334 */
335 if (g_tbl->refcount == 0UL) {
336 if (table_is_destroyed_block(table)) {
337 parent_s2tte = s2tte_create_destroyed();
338 __granule_put(wi.g_llt);
339
340 } else if (table_is_unassigned_block(table, &ripas)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000341 parent_s2tte = s2tte_create_unassigned(ripas);
342 __granule_put(wi.g_llt);
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100343 } else if (table_is_unassigned_ns_block(table)) {
344 parent_s2tte = s2tte_create_unassigned_ns();
345 __granule_put(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000346 } else {
347 /*
348 * The table holds a mixture of destroyed and
349 * unassigned entries.
350 */
AlexeiFedorov892abce2023-04-06 16:32:12 +0100351 ret = pack_return_code(RMI_ERROR_RTT, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000352 goto out_unmap_table;
353 }
354
355 } else if (g_tbl->refcount == S2TTES_PER_S2TT) {
356
357 unsigned long s2tte, block_pa;
358
359 /* The RMM specification does not allow creating block
360 * entries less than RTT_MIN_BLOCK_LEVEL even though
361 * permitted by the Arm Architecture.
362 * Hence ensure that the table being folded is at a level
363 * higher than the RTT_MIN_BLOCK_LEVEL.
364 *
365 * A fully populated table cannot be destroyed if that
366 * would create a block mapping below RTT_MIN_BLOCK_LEVEL.
367 */
368 if (level <= RTT_MIN_BLOCK_LEVEL) {
AlexeiFedorov892abce2023-04-06 16:32:12 +0100369 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000370 goto out_unmap_table;
371 }
372
373 s2tte = s2tte_read(&table[0]);
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000374 block_pa = s2tte_pa(s2tte, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000375
376 /*
377 * The table must also refer to a contiguous block through
378 * the same type of s2tte, either Assigned, Valid or Valid_NS.
379 */
380 if (table_maps_assigned_block(table, level)) {
381 parent_s2tte = s2tte_create_assigned_empty(block_pa, level - 1L);
382 } else if (table_maps_valid_block(table, level)) {
383 parent_s2tte = s2tte_create_valid(block_pa, level - 1L);
384 } else if (table_maps_valid_ns_block(table, level)) {
385 parent_s2tte = s2tte_create_valid_ns(block_pa, level - 1L);
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000386 /* The table contains mixed entries that cannot be folded */
Soby Mathewb4c6df42022-11-09 11:13:29 +0000387 } else {
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000388 ret = pack_return_code(RMI_ERROR_RTT, level);
389 goto out_unmap_table;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000390 }
391
392 __granule_refcount_dec(g_tbl, S2TTES_PER_S2TT);
393 } else {
394 /*
395 * The table holds a mixture of different types of s2ttes.
396 */
AlexeiFedorov892abce2023-04-06 16:32:12 +0100397 ret = pack_return_code(RMI_ERROR_RTT, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000398 goto out_unmap_table;
399 }
400
401 ret = RMI_SUCCESS;
402
403 /*
404 * Break before make.
405 */
406 s2tte_write(&parent_s2tt[wi.index], 0UL);
407
408 if (s2tte_is_valid(parent_s2tte, level - 1L) ||
409 s2tte_is_valid_ns(parent_s2tte, level - 1L)) {
410 invalidate_pages_in_block(&s2_ctx, map_addr);
411 } else {
412 invalidate_block(&s2_ctx, map_addr);
413 }
414
415 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
416
417 granule_memzero_mapped(table);
418 granule_set_state(g_tbl, GRANULE_STATE_DELEGATED);
419
420out_unmap_table:
421 buffer_unmap(table);
422 granule_unlock(g_tbl);
423out_unmap_parent_table:
424 buffer_unmap(parent_s2tt);
425out_unlock_parent_table:
426 granule_unlock(wi.g_llt);
427 return ret;
428}
429
430unsigned long smc_rtt_destroy(unsigned long rtt_addr,
431 unsigned long rd_addr,
432 unsigned long map_addr,
433 unsigned long ulevel)
434{
435 struct granule *g_rd;
436 struct granule *g_tbl;
437 struct rd *rd;
438 struct granule *g_table_root;
439 struct rtt_walk wi;
440 unsigned long *table, *parent_s2tt, parent_s2tte;
441 long level = (long)ulevel;
442 unsigned long ipa_bits;
443 unsigned long ret;
444 struct realm_s2_context s2_ctx;
445 int sl;
446 bool in_par;
447
448 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
449 if (g_rd == NULL) {
450 return RMI_ERROR_INPUT;
451 }
452
453 rd = granule_map(g_rd, SLOT_RD);
454
455 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
456 buffer_unmap(rd);
457 granule_unlock(g_rd);
458 return RMI_ERROR_INPUT;
459 }
460
461 g_table_root = rd->s2_ctx.g_rtt;
462 sl = realm_rtt_starting_level(rd);
463 ipa_bits = realm_ipa_bits(rd);
464 s2_ctx = rd->s2_ctx;
465 in_par = addr_in_par(rd, map_addr);
466 buffer_unmap(rd);
467 granule_lock(g_table_root, GRANULE_STATE_RTT);
468 granule_unlock(g_rd);
469
470 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
471 map_addr, level - 1L, &wi);
472 if (wi.last_level != level - 1UL) {
473 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
474 goto out_unlock_parent_table;
475 }
476
477 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
478 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
479 if (!s2tte_is_table(parent_s2tte, level - 1L)) {
480 ret = pack_return_code(RMI_ERROR_RTT,
481 (unsigned int)(level - 1L));
482 goto out_unmap_parent_table;
483 }
484
485 /*
486 * Check that the 'rtt_addr' RTT is used at (map_addr, level).
487 * Note that this also verifies that the rtt_addr is properly aligned.
488 */
489 if (rtt_addr != s2tte_pa_table(parent_s2tte, level - 1L)) {
490 ret = RMI_ERROR_INPUT;
491 goto out_unmap_parent_table;
492 }
493
494 /*
495 * Lock the RTT granule. The 'rtt_addr' is verified, thus can be treated
496 * as an internal granule.
497 */
498 g_tbl = find_lock_granule(rtt_addr, GRANULE_STATE_RTT);
499
500 /*
501 * A table descriptor S2TTE always points to a TABLE granule.
502 */
503 assert(g_tbl != NULL);
504
505 /*
506 * Read the refcount value. RTT granule is always accessed locked, thus
507 * the refcount can be accessed without atomic operations.
508 */
509 if (g_tbl->refcount != 0UL) {
AlexeiFedorov892abce2023-04-06 16:32:12 +0100510 ret = pack_return_code(RMI_ERROR_RTT, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000511 goto out_unlock_table;
512 }
513
514 ret = RMI_SUCCESS;
515
516 table = granule_map(g_tbl, SLOT_RTT2);
517
518 if (in_par) {
519 parent_s2tte = s2tte_create_destroyed();
520 } else {
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100521 parent_s2tte = s2tte_create_unassigned_ns();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000522 }
523
524 __granule_put(wi.g_llt);
525
526 /*
527 * Break before make. Note that this may cause spurious S2 aborts.
528 */
529 s2tte_write(&parent_s2tt[wi.index], 0UL);
530 invalidate_block(&s2_ctx, map_addr);
531 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
532
533 granule_memzero_mapped(table);
534 granule_set_state(g_tbl, GRANULE_STATE_DELEGATED);
535
536 buffer_unmap(table);
537out_unlock_table:
538 granule_unlock(g_tbl);
539out_unmap_parent_table:
540 buffer_unmap(parent_s2tt);
541out_unlock_parent_table:
542 granule_unlock(wi.g_llt);
543 return ret;
544}
545
546enum map_unmap_ns_op {
547 MAP_NS,
548 UNMAP_NS
549};
550
551/*
552 * We don't hold a reference on the NS granule when it is
553 * mapped into a realm. Instead we rely on the guarantees
554 * provided by the architecture to ensure that a NS access
555 * to a protected granule is prohibited even within the realm.
556 */
557static unsigned long map_unmap_ns(unsigned long rd_addr,
558 unsigned long map_addr,
559 long level,
560 unsigned long host_s2tte,
561 enum map_unmap_ns_op op)
562{
563 struct granule *g_rd;
564 struct rd *rd;
565 struct granule *g_table_root;
566 unsigned long *s2tt, s2tte;
567 struct rtt_walk wi;
568 unsigned long ipa_bits;
569 unsigned long ret;
570 struct realm_s2_context s2_ctx;
571 int sl;
572
573 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
574 if (g_rd == NULL) {
575 return RMI_ERROR_INPUT;
576 }
577
578 rd = granule_map(g_rd, SLOT_RD);
579
580 if (!validate_rtt_map_cmds(map_addr, level, rd)) {
581 buffer_unmap(rd);
582 granule_unlock(g_rd);
583 return RMI_ERROR_INPUT;
584 }
585
586 g_table_root = rd->s2_ctx.g_rtt;
587 sl = realm_rtt_starting_level(rd);
588 ipa_bits = realm_ipa_bits(rd);
589
AlexeiFedorovc34e3242023-04-12 11:30:33 +0100590 /* Check if map_addr is outside PAR */
591 if (addr_in_par(rd, map_addr)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000592 buffer_unmap(rd);
593 granule_unlock(g_rd);
594 return RMI_ERROR_INPUT;
595 }
596
597 s2_ctx = rd->s2_ctx;
598 buffer_unmap(rd);
599
600 granule_lock(g_table_root, GRANULE_STATE_RTT);
601 granule_unlock(g_rd);
602
603 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
604 map_addr, level, &wi);
605 if (wi.last_level != level) {
606 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
607 goto out_unlock_llt;
608 }
609
610 s2tt = granule_map(wi.g_llt, SLOT_RTT);
611 s2tte = s2tte_read(&s2tt[wi.index]);
612
613 if (op == MAP_NS) {
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100614 if (!s2tte_is_unassigned_ns(s2tte)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000615 ret = pack_return_code(RMI_ERROR_RTT,
616 (unsigned int)level);
617 goto out_unmap_table;
618 }
619
620 s2tte = s2tte_create_valid_ns(host_s2tte, level);
621 s2tte_write(&s2tt[wi.index], s2tte);
622 __granule_get(wi.g_llt);
623
624 } else if (op == UNMAP_NS) {
625 /*
626 * The following check also verifies that map_addr is outside
627 * PAR, as valid_NS s2tte may only cover outside PAR IPA range.
628 */
629 if (!s2tte_is_valid_ns(s2tte, level)) {
630 ret = pack_return_code(RMI_ERROR_RTT,
631 (unsigned int)level);
632 goto out_unmap_table;
633 }
634
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100635 s2tte = s2tte_create_unassigned_ns();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000636 s2tte_write(&s2tt[wi.index], s2tte);
637 __granule_put(wi.g_llt);
638 if (level == RTT_PAGE_LEVEL) {
639 invalidate_page(&s2_ctx, map_addr);
640 } else {
641 invalidate_block(&s2_ctx, map_addr);
642 }
643 }
644
645 ret = RMI_SUCCESS;
646
647out_unmap_table:
648 buffer_unmap(s2tt);
649out_unlock_llt:
650 granule_unlock(wi.g_llt);
651 return ret;
652}
653
654unsigned long smc_rtt_map_unprotected(unsigned long rd_addr,
655 unsigned long map_addr,
656 unsigned long ulevel,
657 unsigned long s2tte)
658{
659 long level = (long)ulevel;
660
661 if (!host_ns_s2tte_is_valid(s2tte, level)) {
662 return RMI_ERROR_INPUT;
663 }
664
665 return map_unmap_ns(rd_addr, map_addr, level, s2tte, MAP_NS);
666}
667
668unsigned long smc_rtt_unmap_unprotected(unsigned long rd_addr,
669 unsigned long map_addr,
670 unsigned long ulevel)
671{
672 return map_unmap_ns(rd_addr, map_addr, (long)ulevel, 0UL, UNMAP_NS);
673}
674
675void smc_rtt_read_entry(unsigned long rd_addr,
676 unsigned long map_addr,
677 unsigned long ulevel,
678 struct smc_result *ret)
679{
680 struct granule *g_rd, *g_rtt_root;
681 struct rd *rd;
682 struct rtt_walk wi;
683 unsigned long *s2tt, s2tte;
684 unsigned long ipa_bits;
685 long level = (long)ulevel;
686 int sl;
687
688 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
689 if (g_rd == NULL) {
690 ret->x[0] = RMI_ERROR_INPUT;
691 return;
692 }
693
694 rd = granule_map(g_rd, SLOT_RD);
695
696 if (!validate_rtt_entry_cmds(map_addr, level, rd)) {
697 buffer_unmap(rd);
698 granule_unlock(g_rd);
699 ret->x[0] = RMI_ERROR_INPUT;
700 return;
701 }
702
703 g_rtt_root = rd->s2_ctx.g_rtt;
704 sl = realm_rtt_starting_level(rd);
705 ipa_bits = realm_ipa_bits(rd);
706 buffer_unmap(rd);
707
708 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
709 granule_unlock(g_rd);
710
711 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
712 map_addr, level, &wi);
713 s2tt = granule_map(wi.g_llt, SLOT_RTT);
714 s2tte = s2tte_read(&s2tt[wi.index]);
715 ret->x[1] = wi.last_level;
716 ret->x[3] = 0UL;
717 ret->x[4] = 0UL;
718
719 if (s2tte_is_unassigned(s2tte)) {
720 enum ripas ripas = s2tte_get_ripas(s2tte);
721
Yousuf A3daed822022-10-13 16:06:00 +0100722 ret->x[2] = RMI_UNASSIGNED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000723 ret->x[4] = (unsigned long)ripas;
724 } else if (s2tte_is_destroyed(s2tte)) {
Yousuf A3daed822022-10-13 16:06:00 +0100725 ret->x[2] = RMI_DESTROYED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000726 } else if (s2tte_is_assigned(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100727 ret->x[2] = RMI_ASSIGNED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000728 ret->x[3] = s2tte_pa(s2tte, wi.last_level);
Yousuf A62808152022-10-31 10:35:42 +0000729 ret->x[4] = RIPAS_EMPTY;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000730 } else if (s2tte_is_valid(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100731 ret->x[2] = RMI_ASSIGNED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000732 ret->x[3] = s2tte_pa(s2tte, wi.last_level);
Yousuf A62808152022-10-31 10:35:42 +0000733 ret->x[4] = RIPAS_RAM;
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100734 } else if (s2tte_is_unassigned_ns(s2tte)) {
735 ret->x[2] = RMI_UNASSIGNED;
736 ret->x[4] = RIPAS_EMPTY;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000737 } else if (s2tte_is_valid_ns(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100738 ret->x[2] = RMI_VALID_NS;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000739 ret->x[3] = host_ns_s2tte(s2tte, wi.last_level);
740 } else if (s2tte_is_table(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100741 ret->x[2] = RMI_TABLE;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000742 ret->x[3] = s2tte_pa_table(s2tte, wi.last_level);
743 } else {
744 assert(false);
745 }
746
747 buffer_unmap(s2tt);
748 granule_unlock(wi.g_llt);
749
750 ret->x[0] = RMI_SUCCESS;
751}
752
753static void data_granule_measure(struct rd *rd, void *data,
754 unsigned long ipa,
755 unsigned long flags)
756{
757 struct measurement_desc_data measure_desc = {0};
758
759 /* Initialize the measurement descriptior structure */
760 measure_desc.desc_type = MEASURE_DESC_TYPE_DATA;
761 measure_desc.len = sizeof(struct measurement_desc_data);
762 measure_desc.ipa = ipa;
763 measure_desc.flags = flags;
764 memcpy(measure_desc.rim,
765 &rd->measurement[RIM_MEASUREMENT_SLOT],
766 measurement_get_size(rd->algorithm));
767
768 if (flags == RMI_MEASURE_CONTENT) {
769 /*
770 * Hashing the data granules and store the result in the
771 * measurement descriptor structure.
772 */
773 measurement_hash_compute(rd->algorithm,
774 data,
775 GRANULE_SIZE,
776 measure_desc.content);
777 }
778
779 /*
780 * Hashing the measurement descriptor structure; the result is the
781 * updated RIM.
782 */
783 measurement_hash_compute(rd->algorithm,
784 &measure_desc,
785 sizeof(measure_desc),
786 rd->measurement[RIM_MEASUREMENT_SLOT]);
787}
788
789static unsigned long validate_data_create_unknown(unsigned long map_addr,
790 struct rd *rd)
791{
792 if (!addr_in_par(rd, map_addr)) {
793 return RMI_ERROR_INPUT;
794 }
795
796 if (!validate_map_addr(map_addr, RTT_PAGE_LEVEL, rd)) {
797 return RMI_ERROR_INPUT;
798 }
799
800 return RMI_SUCCESS;
801}
802
803static unsigned long validate_data_create(unsigned long map_addr,
804 struct rd *rd)
805{
806 if (get_rd_state_locked(rd) != REALM_STATE_NEW) {
807 return RMI_ERROR_REALM;
808 }
809
810 return validate_data_create_unknown(map_addr, rd);
811}
812
813/*
814 * Implements both Data.Create and Data.CreateUnknown
815 *
816 * if @g_src == NULL, this implemented Data.CreateUnknown
817 * and otherwise this implemented Data.Create.
818 */
AlexeiFedorovac923c82023-04-06 15:12:04 +0100819static unsigned long data_create(unsigned long rd_addr,
820 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000821 unsigned long map_addr,
822 struct granule *g_src,
823 unsigned long flags)
824{
825 struct granule *g_data;
826 struct granule *g_rd;
827 struct granule *g_table_root;
828 struct rd *rd;
829 struct rtt_walk wi;
830 unsigned long s2tte, *s2tt;
831 enum ripas ripas;
832 enum granule_state new_data_state = GRANULE_STATE_DELEGATED;
833 unsigned long ipa_bits;
834 unsigned long ret;
835 int __unused meas_ret;
836 int sl;
837
838 if (!find_lock_two_granules(data_addr,
839 GRANULE_STATE_DELEGATED,
840 &g_data,
841 rd_addr,
842 GRANULE_STATE_RD,
843 &g_rd)) {
844 return RMI_ERROR_INPUT;
845 }
846
847 rd = granule_map(g_rd, SLOT_RD);
848
849 ret = (g_src != NULL) ?
850 validate_data_create(map_addr, rd) :
851 validate_data_create_unknown(map_addr, rd);
852
853 if (ret != RMI_SUCCESS) {
854 goto out_unmap_rd;
855 }
856
857 g_table_root = rd->s2_ctx.g_rtt;
858 sl = realm_rtt_starting_level(rd);
859 ipa_bits = realm_ipa_bits(rd);
860 granule_lock(g_table_root, GRANULE_STATE_RTT);
861 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
862 map_addr, RTT_PAGE_LEVEL, &wi);
863 if (wi.last_level != RTT_PAGE_LEVEL) {
864 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
865 goto out_unlock_ll_table;
866 }
867
868 s2tt = granule_map(wi.g_llt, SLOT_RTT);
869 s2tte = s2tte_read(&s2tt[wi.index]);
870 if (!s2tte_is_unassigned(s2tte)) {
871 ret = pack_return_code(RMI_ERROR_RTT, RTT_PAGE_LEVEL);
872 goto out_unmap_ll_table;
873 }
874
875 ripas = s2tte_get_ripas(s2tte);
876
877 if (g_src != NULL) {
878 bool ns_access_ok;
879 void *data = granule_map(g_data, SLOT_DELEGATED);
880
881 ns_access_ok = ns_buffer_read(SLOT_NS, g_src, 0U,
882 GRANULE_SIZE, data);
883
884 if (!ns_access_ok) {
885 /*
886 * Some data may be copied before the failure. Zero
887 * g_data granule as it will remain in delegated state.
888 */
889 (void)memset(data, 0, GRANULE_SIZE);
890 buffer_unmap(data);
891 ret = RMI_ERROR_INPUT;
892 goto out_unmap_ll_table;
893 }
894
895
896 data_granule_measure(rd, data, map_addr, flags);
897
898 buffer_unmap(data);
899 }
900
901 new_data_state = GRANULE_STATE_DATA;
902
Yousuf A62808152022-10-31 10:35:42 +0000903 s2tte = (ripas == RIPAS_EMPTY) ?
Soby Mathewb4c6df42022-11-09 11:13:29 +0000904 s2tte_create_assigned_empty(data_addr, RTT_PAGE_LEVEL) :
905 s2tte_create_valid(data_addr, RTT_PAGE_LEVEL);
906
907 s2tte_write(&s2tt[wi.index], s2tte);
908 __granule_get(wi.g_llt);
909
910 ret = RMI_SUCCESS;
911
912out_unmap_ll_table:
913 buffer_unmap(s2tt);
914out_unlock_ll_table:
915 granule_unlock(wi.g_llt);
916out_unmap_rd:
917 buffer_unmap(rd);
918 granule_unlock(g_rd);
919 granule_unlock_transition(g_data, new_data_state);
920 return ret;
921}
922
AlexeiFedorovac923c82023-04-06 15:12:04 +0100923unsigned long smc_data_create(unsigned long rd_addr,
924 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000925 unsigned long map_addr,
926 unsigned long src_addr,
927 unsigned long flags)
928{
929 struct granule *g_src;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000930
931 if (flags != RMI_NO_MEASURE_CONTENT && flags != RMI_MEASURE_CONTENT) {
932 return RMI_ERROR_INPUT;
933 }
934
935 g_src = find_granule(src_addr);
936 if ((g_src == NULL) || (g_src->state != GRANULE_STATE_NS)) {
937 return RMI_ERROR_INPUT;
938 }
939
AlexeiFedorovac923c82023-04-06 15:12:04 +0100940 return data_create(rd_addr, data_addr, map_addr, g_src, flags);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000941}
942
AlexeiFedorovac923c82023-04-06 15:12:04 +0100943unsigned long smc_data_create_unknown(unsigned long rd_addr,
944 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000945 unsigned long map_addr)
946{
AlexeiFedorovac923c82023-04-06 15:12:04 +0100947 return data_create(rd_addr, data_addr, map_addr, NULL, 0);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000948}
949
950unsigned long smc_data_destroy(unsigned long rd_addr,
951 unsigned long map_addr)
952{
953 struct granule *g_data;
954 struct granule *g_rd;
955 struct granule *g_table_root;
956 struct rtt_walk wi;
957 unsigned long data_addr, s2tte, *s2tt;
958 struct rd *rd;
959 unsigned long ipa_bits;
960 unsigned long ret;
961 struct realm_s2_context s2_ctx;
962 bool valid;
963 int sl;
964
965 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
966 if (g_rd == NULL) {
967 return RMI_ERROR_INPUT;
968 }
969
970 rd = granule_map(g_rd, SLOT_RD);
971
972 if (!validate_map_addr(map_addr, RTT_PAGE_LEVEL, rd)) {
973 buffer_unmap(rd);
974 granule_unlock(g_rd);
975 return RMI_ERROR_INPUT;
976 }
977
978 g_table_root = rd->s2_ctx.g_rtt;
979 sl = realm_rtt_starting_level(rd);
980 ipa_bits = realm_ipa_bits(rd);
981 s2_ctx = rd->s2_ctx;
982 buffer_unmap(rd);
983
984 granule_lock(g_table_root, GRANULE_STATE_RTT);
985 granule_unlock(g_rd);
986
987 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
988 map_addr, RTT_PAGE_LEVEL, &wi);
989 if (wi.last_level != RTT_PAGE_LEVEL) {
990 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
991 goto out_unlock_ll_table;
992 }
993
994 s2tt = granule_map(wi.g_llt, SLOT_RTT);
995 s2tte = s2tte_read(&s2tt[wi.index]);
996
997 valid = s2tte_is_valid(s2tte, RTT_PAGE_LEVEL);
998
999 /*
1000 * Check if either HIPAS=ASSIGNED or map_addr is a
1001 * valid Protected IPA.
1002 */
1003 if (!valid && !s2tte_is_assigned(s2tte, RTT_PAGE_LEVEL)) {
1004 ret = pack_return_code(RMI_ERROR_RTT, RTT_PAGE_LEVEL);
1005 goto out_unmap_ll_table;
1006 }
1007
1008 data_addr = s2tte_pa(s2tte, RTT_PAGE_LEVEL);
1009
1010 /*
1011 * We have already established either HIPAS=ASSIGNED or a valid mapping.
1012 * If valid, transition HIPAS to DESTROYED and if HIPAS=ASSIGNED,
1013 * transition to UNASSIGNED.
1014 */
1015 s2tte = valid ? s2tte_create_destroyed() :
Yousuf A62808152022-10-31 10:35:42 +00001016 s2tte_create_unassigned(RIPAS_EMPTY);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001017
1018 s2tte_write(&s2tt[wi.index], s2tte);
1019
1020 if (valid) {
1021 invalidate_page(&s2_ctx, map_addr);
1022 }
1023
1024 __granule_put(wi.g_llt);
1025
1026 /*
1027 * Lock the data granule and check expected state. Correct locking order
1028 * is guaranteed because granule address is obtained from a locked
1029 * granule by table walk. This lock needs to be acquired before a state
1030 * transition to or from GRANULE_STATE_DATA for granule address can happen.
1031 */
1032 g_data = find_lock_granule(data_addr, GRANULE_STATE_DATA);
1033 assert(g_data);
1034 granule_memzero(g_data, SLOT_DELEGATED);
1035 granule_unlock_transition(g_data, GRANULE_STATE_DELEGATED);
1036
1037 ret = RMI_SUCCESS;
1038
1039out_unmap_ll_table:
1040 buffer_unmap(s2tt);
1041out_unlock_ll_table:
1042 granule_unlock(wi.g_llt);
1043
1044 return ret;
1045}
1046
1047static bool update_ripas(unsigned long *s2tte, unsigned long level,
1048 enum ripas ripas)
1049{
1050 if (s2tte_is_table(*s2tte, level)) {
1051 return false;
1052 }
1053
1054 if (s2tte_is_valid(*s2tte, level)) {
Yousuf A62808152022-10-31 10:35:42 +00001055 if (ripas == RIPAS_EMPTY) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001056 unsigned long pa = s2tte_pa(*s2tte, level);
1057 *s2tte = s2tte_create_assigned_empty(pa, level);
1058 }
1059 return true;
1060 }
1061
1062 if (s2tte_is_unassigned(*s2tte) || s2tte_is_assigned(*s2tte, level)) {
1063 *s2tte |= s2tte_create_ripas(ripas);
1064 return true;
1065 }
1066
1067 return false;
1068}
1069
1070static void ripas_granule_measure(struct rd *rd,
1071 unsigned long ipa,
1072 unsigned long level)
1073{
1074 struct measurement_desc_ripas measure_desc = {0};
1075
1076 /* Initialize the measurement descriptior structure */
1077 measure_desc.desc_type = MEASURE_DESC_TYPE_RIPAS;
1078 measure_desc.len = sizeof(struct measurement_desc_ripas);
1079 measure_desc.ipa = ipa;
1080 measure_desc.level = level;
1081 memcpy(measure_desc.rim,
1082 &rd->measurement[RIM_MEASUREMENT_SLOT],
1083 measurement_get_size(rd->algorithm));
1084
1085 /*
1086 * Hashing the measurement descriptor structure; the result is the
1087 * updated RIM.
1088 */
1089 measurement_hash_compute(rd->algorithm,
1090 &measure_desc,
1091 sizeof(measure_desc),
1092 rd->measurement[RIM_MEASUREMENT_SLOT]);
1093}
1094
1095unsigned long smc_rtt_init_ripas(unsigned long rd_addr,
1096 unsigned long map_addr,
1097 unsigned long ulevel)
1098{
1099 struct granule *g_rd, *g_rtt_root;
1100 struct rd *rd;
1101 unsigned long ipa_bits;
1102 struct rtt_walk wi;
1103 unsigned long s2tte, *s2tt;
1104 unsigned long ret;
1105 long level = (long)ulevel;
1106 int sl;
1107
1108 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
1109 if (g_rd == NULL) {
1110 return RMI_ERROR_INPUT;
1111 }
1112
1113 rd = granule_map(g_rd, SLOT_RD);
1114
1115 if (get_rd_state_locked(rd) != REALM_STATE_NEW) {
1116 buffer_unmap(rd);
1117 granule_unlock(g_rd);
1118 return RMI_ERROR_REALM;
1119 }
1120
1121 if (!validate_rtt_entry_cmds(map_addr, level, rd)) {
1122 buffer_unmap(rd);
1123 granule_unlock(g_rd);
1124 return RMI_ERROR_INPUT;
1125 }
1126
1127 if (!addr_in_par(rd, map_addr)) {
1128 buffer_unmap(rd);
1129 granule_unlock(g_rd);
1130 return RMI_ERROR_INPUT;
1131 }
1132
1133 g_rtt_root = rd->s2_ctx.g_rtt;
1134 sl = realm_rtt_starting_level(rd);
1135 ipa_bits = realm_ipa_bits(rd);
1136
1137 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
1138 granule_unlock(g_rd);
1139
1140 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
1141 map_addr, level, &wi);
AlexeiFedorovac923c82023-04-06 15:12:04 +01001142
Soby Mathewb4c6df42022-11-09 11:13:29 +00001143 if (wi.last_level != level) {
1144 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
1145 goto out_unlock_llt;
1146 }
1147
1148 s2tt = granule_map(wi.g_llt, SLOT_RTT);
1149 s2tte = s2tte_read(&s2tt[wi.index]);
1150
1151 /* Allowed only for HIPAS=UNASSIGNED */
1152 if (s2tte_is_table(s2tte, level) || !s2tte_is_unassigned(s2tte)) {
1153 ret = pack_return_code(RMI_ERROR_RTT, (unsigned int)level);
1154 goto out_unmap_llt;
1155 }
1156
Yousuf A62808152022-10-31 10:35:42 +00001157 s2tte |= s2tte_create_ripas(RIPAS_RAM);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001158
1159 s2tte_write(&s2tt[wi.index], s2tte);
1160
1161 ripas_granule_measure(rd, map_addr, level);
1162
1163 ret = RMI_SUCCESS;
1164
1165out_unmap_llt:
1166 buffer_unmap(s2tt);
1167out_unlock_llt:
1168 buffer_unmap(rd);
1169 granule_unlock(wi.g_llt);
1170 return ret;
1171}
1172
1173unsigned long smc_rtt_set_ripas(unsigned long rd_addr,
1174 unsigned long rec_addr,
1175 unsigned long map_addr,
1176 unsigned long ulevel,
1177 unsigned long uripas)
1178{
1179 struct granule *g_rd, *g_rec, *g_rtt_root;
1180 struct rec *rec;
1181 struct rd *rd;
1182 unsigned long map_size, ipa_bits;
1183 struct rtt_walk wi;
1184 unsigned long s2tte, *s2tt;
1185 struct realm_s2_context s2_ctx;
1186 long level = (long)ulevel;
1187 enum ripas ripas = (enum ripas)uripas;
1188 unsigned long ret;
1189 bool valid;
1190 int sl;
1191
Yousuf A62808152022-10-31 10:35:42 +00001192 if (ripas > RIPAS_RAM) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001193 return RMI_ERROR_INPUT;
1194 }
1195
1196 if (!find_lock_two_granules(rd_addr,
1197 GRANULE_STATE_RD,
1198 &g_rd,
1199 rec_addr,
1200 GRANULE_STATE_REC,
1201 &g_rec)) {
1202 return RMI_ERROR_INPUT;
1203 }
1204
1205 if (granule_refcount_read_acquire(g_rec) != 0UL) {
AlexeiFedorov892abce2023-04-06 16:32:12 +01001206 ret = RMI_ERROR_REC;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001207 goto out_unlock_rec_rd;
1208 }
1209
1210 rec = granule_map(g_rec, SLOT_REC);
1211
1212 if (g_rd != rec->realm_info.g_rd) {
1213 ret = RMI_ERROR_REC;
1214 goto out_unmap_rec;
1215 }
1216
1217 if (ripas != rec->set_ripas.ripas) {
1218 ret = RMI_ERROR_INPUT;
1219 goto out_unmap_rec;
1220 }
1221
1222 if (map_addr != rec->set_ripas.addr) {
1223 /* Target region is not next chunk of requested region */
1224 ret = RMI_ERROR_INPUT;
1225 goto out_unmap_rec;
1226 }
1227
1228 rd = granule_map(g_rd, SLOT_RD);
1229
1230 if (!validate_rtt_entry_cmds(map_addr, level, rd)) {
1231 ret = RMI_ERROR_INPUT;
1232 goto out_unmap_rd;
1233 }
1234
1235 map_size = s2tte_map_size(level);
1236 if (map_addr + map_size > rec->set_ripas.end) {
1237 /* Target region extends beyond end of requested region */
1238 ret = RMI_ERROR_INPUT;
1239 goto out_unmap_rd;
1240 }
1241
1242 g_rtt_root = rd->s2_ctx.g_rtt;
1243 sl = realm_rtt_starting_level(rd);
1244 ipa_bits = realm_ipa_bits(rd);
1245 s2_ctx = rd->s2_ctx;
1246
1247 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
1248
1249 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
1250 map_addr, level, &wi);
1251 if (wi.last_level != level) {
1252 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
1253 goto out_unlock_llt;
1254 }
1255
1256 s2tt = granule_map(wi.g_llt, SLOT_RTT);
1257 s2tte = s2tte_read(&s2tt[wi.index]);
1258
1259 valid = s2tte_is_valid(s2tte, level);
1260
1261 if (!update_ripas(&s2tte, level, ripas)) {
1262 ret = pack_return_code(RMI_ERROR_RTT, (unsigned int)level);
1263 goto out_unmap_llt;
1264 }
1265
1266 s2tte_write(&s2tt[wi.index], s2tte);
1267
Yousuf A62808152022-10-31 10:35:42 +00001268 if (valid && (ripas == RIPAS_EMPTY)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001269 if (level == RTT_PAGE_LEVEL) {
1270 invalidate_page(&s2_ctx, map_addr);
1271 } else {
1272 invalidate_block(&s2_ctx, map_addr);
1273 }
1274 }
1275
1276 rec->set_ripas.addr += map_size;
1277
1278 ret = RMI_SUCCESS;
1279
1280out_unmap_llt:
1281 buffer_unmap(s2tt);
1282out_unlock_llt:
1283 granule_unlock(wi.g_llt);
1284out_unmap_rd:
1285 buffer_unmap(rd);
1286out_unmap_rec:
1287 buffer_unmap(rec);
1288out_unlock_rec_rd:
1289 granule_unlock(g_rec);
1290 granule_unlock(g_rd);
1291 return ret;
1292}