blob: b255fb9100d19339379f00100a203ec9c61b34f0 [file] [log] [blame]
Soby Mathewb4c6df42022-11-09 11:13:29 +00001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
5 */
6
7#include <buffer.h>
8#include <granule.h>
9#include <measurement.h>
10#include <realm.h>
11#include <ripas.h>
12#include <smc-handler.h>
13#include <smc-rmi.h>
14#include <smc.h>
15#include <stddef.h>
16#include <string.h>
17#include <table.h>
18
19/*
20 * Validate the map_addr value passed to RMI_RTT_* and RMI_DATA_* commands.
21 */
22static bool validate_map_addr(unsigned long map_addr,
23 unsigned long level,
24 struct rd *rd)
25{
26
27 if (map_addr >= realm_ipa_size(rd)) {
28 return false;
29 }
30 if (!addr_is_level_aligned(map_addr, level)) {
31 return false;
32 }
33 return true;
34}
35
36/*
37 * Structure commands can operate on all RTTs except for the root RTT so
38 * the minimal valid level is the stage 2 starting level + 1.
39 */
40static bool validate_rtt_structure_cmds(unsigned long map_addr,
41 long level,
42 struct rd *rd)
43{
44 int min_level = realm_rtt_starting_level(rd) + 1;
45
46 if ((level < min_level) || (level > RTT_PAGE_LEVEL)) {
47 return false;
48 }
49 return validate_map_addr(map_addr, level, rd);
50}
51
52/*
53 * Map/Unmap commands can operate up to a level 2 block entry so min_level is
54 * the smallest block size.
55 */
56static bool validate_rtt_map_cmds(unsigned long map_addr,
57 long level,
58 struct rd *rd)
59{
60 if ((level < RTT_MIN_BLOCK_LEVEL) || (level > RTT_PAGE_LEVEL)) {
61 return false;
62 }
63 return validate_map_addr(map_addr, level, rd);
64}
65
66/*
67 * Entry commands can operate on any entry so the minimal valid level is the
68 * stage 2 starting level.
69 */
70static bool validate_rtt_entry_cmds(unsigned long map_addr,
71 long level,
72 struct rd *rd)
73{
74 if ((level < realm_rtt_starting_level(rd)) ||
75 (level > RTT_PAGE_LEVEL)) {
76 return false;
77 }
78 return validate_map_addr(map_addr, level, rd);
79}
80
AlexeiFedorovac923c82023-04-06 15:12:04 +010081unsigned long smc_rtt_create(unsigned long rd_addr,
82 unsigned long rtt_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +000083 unsigned long map_addr,
84 unsigned long ulevel)
85{
86 struct granule *g_rd;
87 struct granule *g_tbl;
88 struct rd *rd;
89 struct granule *g_table_root;
90 struct rtt_walk wi;
91 unsigned long *s2tt, *parent_s2tt, parent_s2tte;
92 long level = (long)ulevel;
93 unsigned long ipa_bits;
94 unsigned long ret;
95 struct realm_s2_context s2_ctx;
96 int sl;
97
98 if (!find_lock_two_granules(rtt_addr,
99 GRANULE_STATE_DELEGATED,
100 &g_tbl,
101 rd_addr,
102 GRANULE_STATE_RD,
103 &g_rd)) {
104 return RMI_ERROR_INPUT;
105 }
106
107 rd = granule_map(g_rd, SLOT_RD);
108
109 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
110 buffer_unmap(rd);
111 granule_unlock(g_rd);
112 granule_unlock(g_tbl);
113 return RMI_ERROR_INPUT;
114 }
115
116 g_table_root = rd->s2_ctx.g_rtt;
117 sl = realm_rtt_starting_level(rd);
118 ipa_bits = realm_ipa_bits(rd);
119 s2_ctx = rd->s2_ctx;
120 buffer_unmap(rd);
121
122 /*
123 * Lock the RTT root. Enforcing locking order RD->RTT is enough to
124 * ensure deadlock free locking guarentee.
125 */
126 granule_lock(g_table_root, GRANULE_STATE_RTT);
127
128 /* Unlock RD after locking RTT Root */
129 granule_unlock(g_rd);
130
131 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
132 map_addr, level - 1L, &wi);
133 if (wi.last_level != level - 1L) {
134 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
135 goto out_unlock_llt;
136 }
137
138 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
139 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
140 s2tt = granule_map(g_tbl, SLOT_DELEGATED);
141
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100142 if (s2tte_is_unassigned_empty(parent_s2tte)) {
143 s2tt_init_unassigned_empty(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000144
145 /*
146 * Increase the refcount of the parent, the granule was
147 * locked while table walking and hand-over-hand locking.
148 * Atomicity and acquire/release semantics not required because
149 * the table is accessed always locked.
150 */
151 __granule_get(wi.g_llt);
152
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100153 } else if (s2tte_is_unassigned_ram(parent_s2tte)) {
154 s2tt_init_unassigned_ram(s2tt);
155 __granule_get(wi.g_llt);
156
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100157 } else if (s2tte_is_unassigned_ns(parent_s2tte)) {
158 s2tt_init_unassigned_ns(s2tt);
159 __granule_get(wi.g_llt);
160
Soby Mathewb4c6df42022-11-09 11:13:29 +0000161 } else if (s2tte_is_destroyed(parent_s2tte)) {
162 s2tt_init_destroyed(s2tt);
163 __granule_get(wi.g_llt);
164
AlexeiFedorov3a739332023-04-13 13:54:04 +0100165 } else if (s2tte_is_assigned_empty(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000166 unsigned long block_pa;
167
168 /*
169 * We should observe parent assigned s2tte only when
170 * we create tables above this level.
171 */
172 assert(level > RTT_MIN_BLOCK_LEVEL);
173
174 block_pa = s2tte_pa(parent_s2tte, level - 1L);
175
176 s2tt_init_assigned_empty(s2tt, block_pa, level);
177
178 /*
179 * Increase the refcount to mark the granule as in-use. refcount
180 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
181 */
182 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
183
AlexeiFedorov3a739332023-04-13 13:54:04 +0100184 } else if (s2tte_is_assigned_ram(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000185 unsigned long block_pa;
186
187 /*
188 * We should observe parent valid s2tte only when
189 * we create tables above this level.
190 */
191 assert(level > RTT_MIN_BLOCK_LEVEL);
192
193 /*
194 * Break before make. This may cause spurious S2 aborts.
195 */
196 s2tte_write(&parent_s2tt[wi.index], 0UL);
197 invalidate_block(&s2_ctx, map_addr);
198
199 block_pa = s2tte_pa(parent_s2tte, level - 1L);
200
AlexeiFedorov3a739332023-04-13 13:54:04 +0100201 s2tt_init_assigned_ram(s2tt, block_pa, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000202
203 /*
204 * Increase the refcount to mark the granule as in-use. refcount
205 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
206 */
207 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
208
AlexeiFedorov3a739332023-04-13 13:54:04 +0100209 } else if (s2tte_is_assigned_ns(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000210 unsigned long block_pa;
211
212 /*
AlexeiFedorov3a739332023-04-13 13:54:04 +0100213 * We should observe parent assigned_ns s2tte only when
Soby Mathewb4c6df42022-11-09 11:13:29 +0000214 * we create tables above this level.
215 */
216 assert(level > RTT_MIN_BLOCK_LEVEL);
217
218 /*
219 * Break before make. This may cause spurious S2 aborts.
220 */
221 s2tte_write(&parent_s2tt[wi.index], 0UL);
222 invalidate_block(&s2_ctx, map_addr);
223
224 block_pa = s2tte_pa(parent_s2tte, level - 1L);
225
AlexeiFedorov3a739332023-04-13 13:54:04 +0100226 s2tt_init_assigned_ns(s2tt, block_pa, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000227
228 /*
229 * Increase the refcount to mark the granule as in-use. refcount
230 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
231 */
232 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
233
234 } else if (s2tte_is_table(parent_s2tte, level - 1L)) {
235 ret = pack_return_code(RMI_ERROR_RTT,
236 (unsigned int)(level - 1L));
237 goto out_unmap_table;
238
239 } else {
240 assert(false);
241 }
242
243 ret = RMI_SUCCESS;
244
245 granule_set_state(g_tbl, GRANULE_STATE_RTT);
246
247 parent_s2tte = s2tte_create_table(rtt_addr, level - 1L);
248 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
249
250out_unmap_table:
251 buffer_unmap(s2tt);
252 buffer_unmap(parent_s2tt);
253out_unlock_llt:
254 granule_unlock(wi.g_llt);
255 granule_unlock(g_tbl);
256 return ret;
257}
258
259unsigned long smc_rtt_fold(unsigned long rtt_addr,
260 unsigned long rd_addr,
261 unsigned long map_addr,
262 unsigned long ulevel)
263{
264 struct granule *g_rd;
265 struct granule *g_tbl;
266 struct rd *rd;
267 struct granule *g_table_root;
268 struct rtt_walk wi;
269 unsigned long *table, *parent_s2tt, parent_s2tte;
270 long level = (long)ulevel;
271 unsigned long ipa_bits;
272 unsigned long ret;
273 struct realm_s2_context s2_ctx;
274 int sl;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000275
276 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
277 if (g_rd == NULL) {
278 return RMI_ERROR_INPUT;
279 }
280
281 rd = granule_map(g_rd, SLOT_RD);
282
283 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
284 buffer_unmap(rd);
285 granule_unlock(g_rd);
286 return RMI_ERROR_INPUT;
287 }
288
289 g_table_root = rd->s2_ctx.g_rtt;
290 sl = realm_rtt_starting_level(rd);
291 ipa_bits = realm_ipa_bits(rd);
292 s2_ctx = rd->s2_ctx;
293 buffer_unmap(rd);
294 granule_lock(g_table_root, GRANULE_STATE_RTT);
295 granule_unlock(g_rd);
296
297 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
298 map_addr, level - 1L, &wi);
299 if (wi.last_level != level - 1UL) {
300 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
301 goto out_unlock_parent_table;
302 }
303
304 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
305 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
306 if (!s2tte_is_table(parent_s2tte, level - 1L)) {
307 ret = pack_return_code(RMI_ERROR_RTT,
308 (unsigned int)(level - 1L));
309 goto out_unmap_parent_table;
310 }
311
312 /*
313 * Check that the 'rtt_addr' RTT is used at (map_addr, level).
314 * Note that this also verifies that the rtt_addr is properly aligned.
315 */
316 if (rtt_addr != s2tte_pa_table(parent_s2tte, level - 1L)) {
317 ret = pack_return_code(RMI_ERROR_RTT,
318 (unsigned int)(level - 1L));
319 goto out_unmap_parent_table;
320 }
321
322 g_tbl = find_lock_granule(rtt_addr, GRANULE_STATE_RTT);
323
324 /*
325 * A table descriptor S2TTE always points to a TABLE granule.
326 */
327 assert(g_tbl);
328
329 table = granule_map(g_tbl, SLOT_RTT2);
330
331 /*
332 * The command can succeed only if all 512 S2TTEs are of the same type.
333 * We first check the table's ref. counter to speed up the case when
334 * the host makes a guess whether a memory region can be folded.
335 */
336 if (g_tbl->refcount == 0UL) {
337 if (table_is_destroyed_block(table)) {
338 parent_s2tte = s2tte_create_destroyed();
339 __granule_put(wi.g_llt);
340
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100341 } else if (table_is_unassigned_empty_block(table)) {
342 parent_s2tte = s2tte_create_unassigned_empty();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000343 __granule_put(wi.g_llt);
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100344
345 } else if (table_is_unassigned_ram_block(table)) {
346 parent_s2tte = s2tte_create_unassigned_ram();
347 __granule_put(wi.g_llt);
348
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100349 } else if (table_is_unassigned_ns_block(table)) {
350 parent_s2tte = s2tte_create_unassigned_ns();
351 __granule_put(wi.g_llt);
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100352
Soby Mathewb4c6df42022-11-09 11:13:29 +0000353 } else {
354 /*
355 * The table holds a mixture of destroyed and
356 * unassigned entries.
357 */
AlexeiFedorov892abce2023-04-06 16:32:12 +0100358 ret = pack_return_code(RMI_ERROR_RTT, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000359 goto out_unmap_table;
360 }
361
362 } else if (g_tbl->refcount == S2TTES_PER_S2TT) {
363
364 unsigned long s2tte, block_pa;
365
366 /* The RMM specification does not allow creating block
367 * entries less than RTT_MIN_BLOCK_LEVEL even though
368 * permitted by the Arm Architecture.
369 * Hence ensure that the table being folded is at a level
370 * higher than the RTT_MIN_BLOCK_LEVEL.
371 *
372 * A fully populated table cannot be destroyed if that
373 * would create a block mapping below RTT_MIN_BLOCK_LEVEL.
374 */
375 if (level <= RTT_MIN_BLOCK_LEVEL) {
AlexeiFedorov892abce2023-04-06 16:32:12 +0100376 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000377 goto out_unmap_table;
378 }
379
380 s2tte = s2tte_read(&table[0]);
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000381 block_pa = s2tte_pa(s2tte, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000382
383 /*
AlexeiFedorov3a739332023-04-13 13:54:04 +0100384 * The table must also refer to a contiguous block through the
385 * same type of s2tte, either Assigned, Valid or Assigned_NS.
Soby Mathewb4c6df42022-11-09 11:13:29 +0000386 */
AlexeiFedorov3a739332023-04-13 13:54:04 +0100387 if (table_maps_assigned_empty_block(table, level)) {
388 parent_s2tte = s2tte_create_assigned_empty(block_pa,
389 level - 1L);
390 } else if (table_maps_assigned_ram_block(table, level)) {
391 parent_s2tte = s2tte_create_assigned_ram(block_pa,
392 level - 1L);
393 } else if (table_maps_assigned_ns_block(table, level)) {
394 parent_s2tte = s2tte_create_assigned_ns(block_pa,
395 level - 1L);
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000396 /* The table contains mixed entries that cannot be folded */
Soby Mathewb4c6df42022-11-09 11:13:29 +0000397 } else {
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000398 ret = pack_return_code(RMI_ERROR_RTT, level);
399 goto out_unmap_table;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000400 }
401
402 __granule_refcount_dec(g_tbl, S2TTES_PER_S2TT);
403 } else {
404 /*
405 * The table holds a mixture of different types of s2ttes.
406 */
AlexeiFedorov892abce2023-04-06 16:32:12 +0100407 ret = pack_return_code(RMI_ERROR_RTT, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000408 goto out_unmap_table;
409 }
410
411 ret = RMI_SUCCESS;
412
413 /*
414 * Break before make.
415 */
416 s2tte_write(&parent_s2tt[wi.index], 0UL);
417
AlexeiFedorov3a739332023-04-13 13:54:04 +0100418 if (s2tte_is_assigned_ram(parent_s2tte, level - 1L) ||
419 s2tte_is_assigned_ns(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000420 invalidate_pages_in_block(&s2_ctx, map_addr);
421 } else {
422 invalidate_block(&s2_ctx, map_addr);
423 }
424
425 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
426
427 granule_memzero_mapped(table);
428 granule_set_state(g_tbl, GRANULE_STATE_DELEGATED);
429
430out_unmap_table:
431 buffer_unmap(table);
432 granule_unlock(g_tbl);
433out_unmap_parent_table:
434 buffer_unmap(parent_s2tt);
435out_unlock_parent_table:
436 granule_unlock(wi.g_llt);
437 return ret;
438}
439
440unsigned long smc_rtt_destroy(unsigned long rtt_addr,
441 unsigned long rd_addr,
442 unsigned long map_addr,
443 unsigned long ulevel)
444{
445 struct granule *g_rd;
446 struct granule *g_tbl;
447 struct rd *rd;
448 struct granule *g_table_root;
449 struct rtt_walk wi;
450 unsigned long *table, *parent_s2tt, parent_s2tte;
451 long level = (long)ulevel;
452 unsigned long ipa_bits;
453 unsigned long ret;
454 struct realm_s2_context s2_ctx;
455 int sl;
456 bool in_par;
457
458 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
459 if (g_rd == NULL) {
460 return RMI_ERROR_INPUT;
461 }
462
463 rd = granule_map(g_rd, SLOT_RD);
464
465 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
466 buffer_unmap(rd);
467 granule_unlock(g_rd);
468 return RMI_ERROR_INPUT;
469 }
470
471 g_table_root = rd->s2_ctx.g_rtt;
472 sl = realm_rtt_starting_level(rd);
473 ipa_bits = realm_ipa_bits(rd);
474 s2_ctx = rd->s2_ctx;
475 in_par = addr_in_par(rd, map_addr);
476 buffer_unmap(rd);
477 granule_lock(g_table_root, GRANULE_STATE_RTT);
478 granule_unlock(g_rd);
479
480 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
481 map_addr, level - 1L, &wi);
482 if (wi.last_level != level - 1UL) {
483 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
484 goto out_unlock_parent_table;
485 }
486
487 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
488 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
489 if (!s2tte_is_table(parent_s2tte, level - 1L)) {
490 ret = pack_return_code(RMI_ERROR_RTT,
491 (unsigned int)(level - 1L));
492 goto out_unmap_parent_table;
493 }
494
495 /*
496 * Check that the 'rtt_addr' RTT is used at (map_addr, level).
497 * Note that this also verifies that the rtt_addr is properly aligned.
498 */
499 if (rtt_addr != s2tte_pa_table(parent_s2tte, level - 1L)) {
500 ret = RMI_ERROR_INPUT;
501 goto out_unmap_parent_table;
502 }
503
504 /*
505 * Lock the RTT granule. The 'rtt_addr' is verified, thus can be treated
506 * as an internal granule.
507 */
508 g_tbl = find_lock_granule(rtt_addr, GRANULE_STATE_RTT);
509
510 /*
511 * A table descriptor S2TTE always points to a TABLE granule.
512 */
513 assert(g_tbl != NULL);
514
515 /*
516 * Read the refcount value. RTT granule is always accessed locked, thus
517 * the refcount can be accessed without atomic operations.
518 */
519 if (g_tbl->refcount != 0UL) {
AlexeiFedorov892abce2023-04-06 16:32:12 +0100520 ret = pack_return_code(RMI_ERROR_RTT, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000521 goto out_unlock_table;
522 }
523
524 ret = RMI_SUCCESS;
525
526 table = granule_map(g_tbl, SLOT_RTT2);
527
528 if (in_par) {
529 parent_s2tte = s2tte_create_destroyed();
530 } else {
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100531 parent_s2tte = s2tte_create_unassigned_ns();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000532 }
533
534 __granule_put(wi.g_llt);
535
536 /*
537 * Break before make. Note that this may cause spurious S2 aborts.
538 */
539 s2tte_write(&parent_s2tt[wi.index], 0UL);
540 invalidate_block(&s2_ctx, map_addr);
541 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
542
543 granule_memzero_mapped(table);
544 granule_set_state(g_tbl, GRANULE_STATE_DELEGATED);
545
546 buffer_unmap(table);
547out_unlock_table:
548 granule_unlock(g_tbl);
549out_unmap_parent_table:
550 buffer_unmap(parent_s2tt);
551out_unlock_parent_table:
552 granule_unlock(wi.g_llt);
553 return ret;
554}
555
556enum map_unmap_ns_op {
557 MAP_NS,
558 UNMAP_NS
559};
560
561/*
562 * We don't hold a reference on the NS granule when it is
563 * mapped into a realm. Instead we rely on the guarantees
564 * provided by the architecture to ensure that a NS access
565 * to a protected granule is prohibited even within the realm.
566 */
567static unsigned long map_unmap_ns(unsigned long rd_addr,
568 unsigned long map_addr,
569 long level,
570 unsigned long host_s2tte,
571 enum map_unmap_ns_op op)
572{
573 struct granule *g_rd;
574 struct rd *rd;
575 struct granule *g_table_root;
576 unsigned long *s2tt, s2tte;
577 struct rtt_walk wi;
578 unsigned long ipa_bits;
579 unsigned long ret;
580 struct realm_s2_context s2_ctx;
581 int sl;
582
583 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
584 if (g_rd == NULL) {
585 return RMI_ERROR_INPUT;
586 }
587
588 rd = granule_map(g_rd, SLOT_RD);
589
590 if (!validate_rtt_map_cmds(map_addr, level, rd)) {
591 buffer_unmap(rd);
592 granule_unlock(g_rd);
593 return RMI_ERROR_INPUT;
594 }
595
596 g_table_root = rd->s2_ctx.g_rtt;
597 sl = realm_rtt_starting_level(rd);
598 ipa_bits = realm_ipa_bits(rd);
599
AlexeiFedorovc34e3242023-04-12 11:30:33 +0100600 /* Check if map_addr is outside PAR */
601 if (addr_in_par(rd, map_addr)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000602 buffer_unmap(rd);
603 granule_unlock(g_rd);
604 return RMI_ERROR_INPUT;
605 }
606
607 s2_ctx = rd->s2_ctx;
608 buffer_unmap(rd);
609
610 granule_lock(g_table_root, GRANULE_STATE_RTT);
611 granule_unlock(g_rd);
612
613 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
614 map_addr, level, &wi);
615 if (wi.last_level != level) {
616 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
617 goto out_unlock_llt;
618 }
619
620 s2tt = granule_map(wi.g_llt, SLOT_RTT);
621 s2tte = s2tte_read(&s2tt[wi.index]);
622
623 if (op == MAP_NS) {
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100624 if (!s2tte_is_unassigned_ns(s2tte)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000625 ret = pack_return_code(RMI_ERROR_RTT,
626 (unsigned int)level);
627 goto out_unmap_table;
628 }
629
AlexeiFedorov3a739332023-04-13 13:54:04 +0100630 s2tte = s2tte_create_assigned_ns(host_s2tte, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000631 s2tte_write(&s2tt[wi.index], s2tte);
632 __granule_get(wi.g_llt);
633
634 } else if (op == UNMAP_NS) {
635 /*
636 * The following check also verifies that map_addr is outside
637 * PAR, as valid_NS s2tte may only cover outside PAR IPA range.
638 */
AlexeiFedorov3a739332023-04-13 13:54:04 +0100639 if (!s2tte_is_assigned_ns(s2tte, level)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000640 ret = pack_return_code(RMI_ERROR_RTT,
641 (unsigned int)level);
642 goto out_unmap_table;
643 }
644
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100645 s2tte = s2tte_create_unassigned_ns();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000646 s2tte_write(&s2tt[wi.index], s2tte);
647 __granule_put(wi.g_llt);
648 if (level == RTT_PAGE_LEVEL) {
649 invalidate_page(&s2_ctx, map_addr);
650 } else {
651 invalidate_block(&s2_ctx, map_addr);
652 }
653 }
654
655 ret = RMI_SUCCESS;
656
657out_unmap_table:
658 buffer_unmap(s2tt);
659out_unlock_llt:
660 granule_unlock(wi.g_llt);
661 return ret;
662}
663
664unsigned long smc_rtt_map_unprotected(unsigned long rd_addr,
665 unsigned long map_addr,
666 unsigned long ulevel,
667 unsigned long s2tte)
668{
669 long level = (long)ulevel;
670
671 if (!host_ns_s2tte_is_valid(s2tte, level)) {
672 return RMI_ERROR_INPUT;
673 }
674
675 return map_unmap_ns(rd_addr, map_addr, level, s2tte, MAP_NS);
676}
677
678unsigned long smc_rtt_unmap_unprotected(unsigned long rd_addr,
679 unsigned long map_addr,
680 unsigned long ulevel)
681{
682 return map_unmap_ns(rd_addr, map_addr, (long)ulevel, 0UL, UNMAP_NS);
683}
684
685void smc_rtt_read_entry(unsigned long rd_addr,
686 unsigned long map_addr,
687 unsigned long ulevel,
688 struct smc_result *ret)
689{
690 struct granule *g_rd, *g_rtt_root;
691 struct rd *rd;
692 struct rtt_walk wi;
693 unsigned long *s2tt, s2tte;
694 unsigned long ipa_bits;
695 long level = (long)ulevel;
696 int sl;
697
698 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
699 if (g_rd == NULL) {
700 ret->x[0] = RMI_ERROR_INPUT;
701 return;
702 }
703
704 rd = granule_map(g_rd, SLOT_RD);
705
706 if (!validate_rtt_entry_cmds(map_addr, level, rd)) {
707 buffer_unmap(rd);
708 granule_unlock(g_rd);
709 ret->x[0] = RMI_ERROR_INPUT;
710 return;
711 }
712
713 g_rtt_root = rd->s2_ctx.g_rtt;
714 sl = realm_rtt_starting_level(rd);
715 ipa_bits = realm_ipa_bits(rd);
716 buffer_unmap(rd);
717
718 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
719 granule_unlock(g_rd);
720
721 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
722 map_addr, level, &wi);
723 s2tt = granule_map(wi.g_llt, SLOT_RTT);
724 s2tte = s2tte_read(&s2tt[wi.index]);
725 ret->x[1] = wi.last_level;
726 ret->x[3] = 0UL;
727 ret->x[4] = 0UL;
728
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100729 if (s2tte_is_unassigned_empty(s2tte)) {
Yousuf A3daed822022-10-13 16:06:00 +0100730 ret->x[2] = RMI_UNASSIGNED;
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100731 ret->x[4] = RIPAS_EMPTY;
732 } else if (s2tte_is_unassigned_ram(s2tte)) {
733 ret->x[2] = RMI_UNASSIGNED;
734 ret->x[4] = RIPAS_RAM;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000735 } else if (s2tte_is_destroyed(s2tte)) {
Yousuf A3daed822022-10-13 16:06:00 +0100736 ret->x[2] = RMI_DESTROYED;
AlexeiFedorov3a739332023-04-13 13:54:04 +0100737 } else if (s2tte_is_assigned_empty(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100738 ret->x[2] = RMI_ASSIGNED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000739 ret->x[3] = s2tte_pa(s2tte, wi.last_level);
Yousuf A62808152022-10-31 10:35:42 +0000740 ret->x[4] = RIPAS_EMPTY;
AlexeiFedorov3a739332023-04-13 13:54:04 +0100741 } else if (s2tte_is_assigned_ram(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100742 ret->x[2] = RMI_ASSIGNED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000743 ret->x[3] = s2tte_pa(s2tte, wi.last_level);
Yousuf A62808152022-10-31 10:35:42 +0000744 ret->x[4] = RIPAS_RAM;
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100745 } else if (s2tte_is_unassigned_ns(s2tte)) {
746 ret->x[2] = RMI_UNASSIGNED;
747 ret->x[4] = RIPAS_EMPTY;
AlexeiFedorov3a739332023-04-13 13:54:04 +0100748 } else if (s2tte_is_assigned_ns(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100749 ret->x[2] = RMI_VALID_NS;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000750 ret->x[3] = host_ns_s2tte(s2tte, wi.last_level);
751 } else if (s2tte_is_table(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100752 ret->x[2] = RMI_TABLE;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000753 ret->x[3] = s2tte_pa_table(s2tte, wi.last_level);
754 } else {
755 assert(false);
756 }
757
758 buffer_unmap(s2tt);
759 granule_unlock(wi.g_llt);
760
761 ret->x[0] = RMI_SUCCESS;
762}
763
764static void data_granule_measure(struct rd *rd, void *data,
765 unsigned long ipa,
766 unsigned long flags)
767{
768 struct measurement_desc_data measure_desc = {0};
769
770 /* Initialize the measurement descriptior structure */
771 measure_desc.desc_type = MEASURE_DESC_TYPE_DATA;
772 measure_desc.len = sizeof(struct measurement_desc_data);
773 measure_desc.ipa = ipa;
774 measure_desc.flags = flags;
775 memcpy(measure_desc.rim,
776 &rd->measurement[RIM_MEASUREMENT_SLOT],
777 measurement_get_size(rd->algorithm));
778
779 if (flags == RMI_MEASURE_CONTENT) {
780 /*
781 * Hashing the data granules and store the result in the
782 * measurement descriptor structure.
783 */
784 measurement_hash_compute(rd->algorithm,
785 data,
786 GRANULE_SIZE,
787 measure_desc.content);
788 }
789
790 /*
791 * Hashing the measurement descriptor structure; the result is the
792 * updated RIM.
793 */
794 measurement_hash_compute(rd->algorithm,
795 &measure_desc,
796 sizeof(measure_desc),
797 rd->measurement[RIM_MEASUREMENT_SLOT]);
798}
799
800static unsigned long validate_data_create_unknown(unsigned long map_addr,
801 struct rd *rd)
802{
803 if (!addr_in_par(rd, map_addr)) {
804 return RMI_ERROR_INPUT;
805 }
806
807 if (!validate_map_addr(map_addr, RTT_PAGE_LEVEL, rd)) {
808 return RMI_ERROR_INPUT;
809 }
810
811 return RMI_SUCCESS;
812}
813
814static unsigned long validate_data_create(unsigned long map_addr,
815 struct rd *rd)
816{
817 if (get_rd_state_locked(rd) != REALM_STATE_NEW) {
818 return RMI_ERROR_REALM;
819 }
820
821 return validate_data_create_unknown(map_addr, rd);
822}
823
824/*
825 * Implements both Data.Create and Data.CreateUnknown
826 *
827 * if @g_src == NULL, this implemented Data.CreateUnknown
828 * and otherwise this implemented Data.Create.
829 */
AlexeiFedorovac923c82023-04-06 15:12:04 +0100830static unsigned long data_create(unsigned long rd_addr,
831 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000832 unsigned long map_addr,
833 struct granule *g_src,
834 unsigned long flags)
835{
836 struct granule *g_data;
837 struct granule *g_rd;
838 struct granule *g_table_root;
839 struct rd *rd;
840 struct rtt_walk wi;
841 unsigned long s2tte, *s2tt;
AlexeiFedorov0fb44552023-04-14 15:37:58 +0100842 enum ripas ripas_val;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000843 enum granule_state new_data_state = GRANULE_STATE_DELEGATED;
844 unsigned long ipa_bits;
845 unsigned long ret;
846 int __unused meas_ret;
847 int sl;
848
849 if (!find_lock_two_granules(data_addr,
850 GRANULE_STATE_DELEGATED,
851 &g_data,
852 rd_addr,
853 GRANULE_STATE_RD,
854 &g_rd)) {
855 return RMI_ERROR_INPUT;
856 }
857
858 rd = granule_map(g_rd, SLOT_RD);
859
860 ret = (g_src != NULL) ?
861 validate_data_create(map_addr, rd) :
862 validate_data_create_unknown(map_addr, rd);
863
864 if (ret != RMI_SUCCESS) {
865 goto out_unmap_rd;
866 }
867
868 g_table_root = rd->s2_ctx.g_rtt;
869 sl = realm_rtt_starting_level(rd);
870 ipa_bits = realm_ipa_bits(rd);
871 granule_lock(g_table_root, GRANULE_STATE_RTT);
872 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
873 map_addr, RTT_PAGE_LEVEL, &wi);
874 if (wi.last_level != RTT_PAGE_LEVEL) {
875 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
876 goto out_unlock_ll_table;
877 }
878
879 s2tt = granule_map(wi.g_llt, SLOT_RTT);
880 s2tte = s2tte_read(&s2tt[wi.index]);
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100881 if (s2tte_is_unassigned_empty(s2tte)) {
AlexeiFedorov0fb44552023-04-14 15:37:58 +0100882 ripas_val = RIPAS_EMPTY;
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100883 } else if (s2tte_is_unassigned_ram(s2tte)) {
AlexeiFedorov0fb44552023-04-14 15:37:58 +0100884 ripas_val = RIPAS_RAM;
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100885 } else {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000886 ret = pack_return_code(RMI_ERROR_RTT, RTT_PAGE_LEVEL);
887 goto out_unmap_ll_table;
888 }
889
AlexeiFedorov0fb44552023-04-14 15:37:58 +0100890 ripas_val = s2tte_get_ripas(s2tte);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000891
892 if (g_src != NULL) {
893 bool ns_access_ok;
894 void *data = granule_map(g_data, SLOT_DELEGATED);
895
896 ns_access_ok = ns_buffer_read(SLOT_NS, g_src, 0U,
897 GRANULE_SIZE, data);
898
899 if (!ns_access_ok) {
900 /*
901 * Some data may be copied before the failure. Zero
902 * g_data granule as it will remain in delegated state.
903 */
904 (void)memset(data, 0, GRANULE_SIZE);
905 buffer_unmap(data);
906 ret = RMI_ERROR_INPUT;
907 goto out_unmap_ll_table;
908 }
909
910
911 data_granule_measure(rd, data, map_addr, flags);
912
913 buffer_unmap(data);
914 }
915
916 new_data_state = GRANULE_STATE_DATA;
917
AlexeiFedorov0fb44552023-04-14 15:37:58 +0100918 s2tte = (ripas_val == RIPAS_EMPTY) ?
Soby Mathewb4c6df42022-11-09 11:13:29 +0000919 s2tte_create_assigned_empty(data_addr, RTT_PAGE_LEVEL) :
AlexeiFedorov3a739332023-04-13 13:54:04 +0100920 s2tte_create_assigned_ram(data_addr, RTT_PAGE_LEVEL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000921
922 s2tte_write(&s2tt[wi.index], s2tte);
923 __granule_get(wi.g_llt);
924
925 ret = RMI_SUCCESS;
926
927out_unmap_ll_table:
928 buffer_unmap(s2tt);
929out_unlock_ll_table:
930 granule_unlock(wi.g_llt);
931out_unmap_rd:
932 buffer_unmap(rd);
933 granule_unlock(g_rd);
934 granule_unlock_transition(g_data, new_data_state);
935 return ret;
936}
937
AlexeiFedorovac923c82023-04-06 15:12:04 +0100938unsigned long smc_data_create(unsigned long rd_addr,
939 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000940 unsigned long map_addr,
941 unsigned long src_addr,
942 unsigned long flags)
943{
944 struct granule *g_src;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000945
946 if (flags != RMI_NO_MEASURE_CONTENT && flags != RMI_MEASURE_CONTENT) {
947 return RMI_ERROR_INPUT;
948 }
949
950 g_src = find_granule(src_addr);
951 if ((g_src == NULL) || (g_src->state != GRANULE_STATE_NS)) {
952 return RMI_ERROR_INPUT;
953 }
954
AlexeiFedorovac923c82023-04-06 15:12:04 +0100955 return data_create(rd_addr, data_addr, map_addr, g_src, flags);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000956}
957
AlexeiFedorovac923c82023-04-06 15:12:04 +0100958unsigned long smc_data_create_unknown(unsigned long rd_addr,
959 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000960 unsigned long map_addr)
961{
AlexeiFedorovac923c82023-04-06 15:12:04 +0100962 return data_create(rd_addr, data_addr, map_addr, NULL, 0);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000963}
964
965unsigned long smc_data_destroy(unsigned long rd_addr,
966 unsigned long map_addr)
967{
968 struct granule *g_data;
969 struct granule *g_rd;
970 struct granule *g_table_root;
971 struct rtt_walk wi;
972 unsigned long data_addr, s2tte, *s2tt;
973 struct rd *rd;
974 unsigned long ipa_bits;
975 unsigned long ret;
976 struct realm_s2_context s2_ctx;
977 bool valid;
978 int sl;
979
980 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
981 if (g_rd == NULL) {
982 return RMI_ERROR_INPUT;
983 }
984
985 rd = granule_map(g_rd, SLOT_RD);
986
987 if (!validate_map_addr(map_addr, RTT_PAGE_LEVEL, rd)) {
988 buffer_unmap(rd);
989 granule_unlock(g_rd);
990 return RMI_ERROR_INPUT;
991 }
992
993 g_table_root = rd->s2_ctx.g_rtt;
994 sl = realm_rtt_starting_level(rd);
995 ipa_bits = realm_ipa_bits(rd);
996 s2_ctx = rd->s2_ctx;
997 buffer_unmap(rd);
998
999 granule_lock(g_table_root, GRANULE_STATE_RTT);
1000 granule_unlock(g_rd);
1001
1002 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
1003 map_addr, RTT_PAGE_LEVEL, &wi);
1004 if (wi.last_level != RTT_PAGE_LEVEL) {
1005 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
1006 goto out_unlock_ll_table;
1007 }
1008
1009 s2tt = granule_map(wi.g_llt, SLOT_RTT);
1010 s2tte = s2tte_read(&s2tt[wi.index]);
1011
AlexeiFedorov3a739332023-04-13 13:54:04 +01001012 valid = s2tte_is_assigned_ram(s2tte, RTT_PAGE_LEVEL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001013
1014 /*
1015 * Check if either HIPAS=ASSIGNED or map_addr is a
1016 * valid Protected IPA.
1017 */
AlexeiFedorov3a739332023-04-13 13:54:04 +01001018 if (!valid && !s2tte_is_assigned_empty(s2tte, RTT_PAGE_LEVEL)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001019 ret = pack_return_code(RMI_ERROR_RTT, RTT_PAGE_LEVEL);
1020 goto out_unmap_ll_table;
1021 }
1022
1023 data_addr = s2tte_pa(s2tte, RTT_PAGE_LEVEL);
1024
1025 /*
1026 * We have already established either HIPAS=ASSIGNED or a valid mapping.
1027 * If valid, transition HIPAS to DESTROYED and if HIPAS=ASSIGNED,
1028 * transition to UNASSIGNED.
1029 */
1030 s2tte = valid ? s2tte_create_destroyed() :
AlexeiFedorovc07a6382023-04-14 11:59:18 +01001031 s2tte_create_unassigned_empty();
Soby Mathewb4c6df42022-11-09 11:13:29 +00001032
1033 s2tte_write(&s2tt[wi.index], s2tte);
1034
1035 if (valid) {
1036 invalidate_page(&s2_ctx, map_addr);
1037 }
1038
1039 __granule_put(wi.g_llt);
1040
1041 /*
1042 * Lock the data granule and check expected state. Correct locking order
1043 * is guaranteed because granule address is obtained from a locked
1044 * granule by table walk. This lock needs to be acquired before a state
1045 * transition to or from GRANULE_STATE_DATA for granule address can happen.
1046 */
1047 g_data = find_lock_granule(data_addr, GRANULE_STATE_DATA);
1048 assert(g_data);
1049 granule_memzero(g_data, SLOT_DELEGATED);
1050 granule_unlock_transition(g_data, GRANULE_STATE_DELEGATED);
1051
1052 ret = RMI_SUCCESS;
1053
1054out_unmap_ll_table:
1055 buffer_unmap(s2tt);
1056out_unlock_ll_table:
1057 granule_unlock(wi.g_llt);
1058
1059 return ret;
1060}
1061
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001062/*
1063 * Sets new ripas value in @s2tte.
1064 *
1065 * It returns false if the @s2tte has no ripas value.
1066 * It sets @(*do_tlbi) to 'true' if the TLBs have to be invalidated.
1067 */
1068static bool update_ripas(unsigned long *s2tte, bool *do_tlbi,
1069 unsigned long level, enum ripas ripas_val)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001070{
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001071 unsigned long pa;
1072
1073 *do_tlbi = false;
1074
1075 if (!s2tte_has_ripas(*s2tte, level)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001076 return false;
1077 }
1078
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001079 if (ripas_val == RIPAS_RAM) {
1080 if (s2tte_is_unassigned_empty(*s2tte)) {
1081 *s2tte = s2tte_create_unassigned_ram();
1082 } else if (s2tte_is_assigned_empty(*s2tte, level)) {
1083 pa = s2tte_pa(*s2tte, level);
1084 *s2tte = s2tte_create_assigned_ram(pa, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001085 }
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001086 } else if (ripas_val == RIPAS_EMPTY) {
1087 if (s2tte_is_unassigned_ram(*s2tte)) {
1088 *s2tte = s2tte_create_unassigned_empty();
1089 } else if (s2tte_is_assigned_ram(*s2tte, level)) {
1090 pa = s2tte_pa(*s2tte, level);
1091 *s2tte = s2tte_create_assigned_empty(pa, level);
1092 *do_tlbi = true;
1093 }
1094 } else {
1095 assert(false);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001096 }
1097
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001098 return true;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001099}
1100
1101static void ripas_granule_measure(struct rd *rd,
1102 unsigned long ipa,
1103 unsigned long level)
1104{
1105 struct measurement_desc_ripas measure_desc = {0};
1106
1107 /* Initialize the measurement descriptior structure */
1108 measure_desc.desc_type = MEASURE_DESC_TYPE_RIPAS;
1109 measure_desc.len = sizeof(struct measurement_desc_ripas);
1110 measure_desc.ipa = ipa;
1111 measure_desc.level = level;
1112 memcpy(measure_desc.rim,
1113 &rd->measurement[RIM_MEASUREMENT_SLOT],
1114 measurement_get_size(rd->algorithm));
1115
1116 /*
1117 * Hashing the measurement descriptor structure; the result is the
1118 * updated RIM.
1119 */
1120 measurement_hash_compute(rd->algorithm,
1121 &measure_desc,
1122 sizeof(measure_desc),
1123 rd->measurement[RIM_MEASUREMENT_SLOT]);
1124}
1125
1126unsigned long smc_rtt_init_ripas(unsigned long rd_addr,
1127 unsigned long map_addr,
1128 unsigned long ulevel)
1129{
1130 struct granule *g_rd, *g_rtt_root;
1131 struct rd *rd;
1132 unsigned long ipa_bits;
1133 struct rtt_walk wi;
1134 unsigned long s2tte, *s2tt;
1135 unsigned long ret;
1136 long level = (long)ulevel;
1137 int sl;
1138
1139 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
1140 if (g_rd == NULL) {
1141 return RMI_ERROR_INPUT;
1142 }
1143
1144 rd = granule_map(g_rd, SLOT_RD);
1145
1146 if (get_rd_state_locked(rd) != REALM_STATE_NEW) {
1147 buffer_unmap(rd);
1148 granule_unlock(g_rd);
1149 return RMI_ERROR_REALM;
1150 }
1151
1152 if (!validate_rtt_entry_cmds(map_addr, level, rd)) {
1153 buffer_unmap(rd);
1154 granule_unlock(g_rd);
1155 return RMI_ERROR_INPUT;
1156 }
1157
1158 if (!addr_in_par(rd, map_addr)) {
1159 buffer_unmap(rd);
1160 granule_unlock(g_rd);
1161 return RMI_ERROR_INPUT;
1162 }
1163
1164 g_rtt_root = rd->s2_ctx.g_rtt;
1165 sl = realm_rtt_starting_level(rd);
1166 ipa_bits = realm_ipa_bits(rd);
1167
1168 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
1169 granule_unlock(g_rd);
1170
1171 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
1172 map_addr, level, &wi);
AlexeiFedorovac923c82023-04-06 15:12:04 +01001173
Soby Mathewb4c6df42022-11-09 11:13:29 +00001174 if (wi.last_level != level) {
1175 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
1176 goto out_unlock_llt;
1177 }
1178
1179 s2tt = granule_map(wi.g_llt, SLOT_RTT);
1180 s2tte = s2tte_read(&s2tt[wi.index]);
1181
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001182 if (s2tte_is_unassigned_empty(s2tte)) {
1183 s2tte = s2tte_create_unassigned_ram();
1184 s2tte_write(&s2tt[wi.index], s2tte);
1185 ripas_granule_measure(rd, map_addr, level);
1186 } else if (s2tte_is_unassigned_ram(s2tte)) {
1187 ripas_granule_measure(rd, map_addr, level);
1188 } else {
1189 ret = pack_return_code(RMI_ERROR_RTT, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001190 goto out_unmap_llt;
1191 }
1192
Soby Mathewb4c6df42022-11-09 11:13:29 +00001193 ret = RMI_SUCCESS;
1194
1195out_unmap_llt:
1196 buffer_unmap(s2tt);
1197out_unlock_llt:
1198 buffer_unmap(rd);
1199 granule_unlock(wi.g_llt);
1200 return ret;
1201}
1202
1203unsigned long smc_rtt_set_ripas(unsigned long rd_addr,
1204 unsigned long rec_addr,
1205 unsigned long map_addr,
1206 unsigned long ulevel,
1207 unsigned long uripas)
1208{
1209 struct granule *g_rd, *g_rec, *g_rtt_root;
1210 struct rec *rec;
1211 struct rd *rd;
1212 unsigned long map_size, ipa_bits;
1213 struct rtt_walk wi;
1214 unsigned long s2tte, *s2tt;
1215 struct realm_s2_context s2_ctx;
1216 long level = (long)ulevel;
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001217 enum ripas ripas_val = (enum ripas)uripas;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001218 unsigned long ret;
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001219 bool tlbi_required;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001220 int sl;
1221
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001222 if (ripas_val > RIPAS_RAM) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001223 return RMI_ERROR_INPUT;
1224 }
1225
1226 if (!find_lock_two_granules(rd_addr,
1227 GRANULE_STATE_RD,
1228 &g_rd,
1229 rec_addr,
1230 GRANULE_STATE_REC,
1231 &g_rec)) {
1232 return RMI_ERROR_INPUT;
1233 }
1234
1235 if (granule_refcount_read_acquire(g_rec) != 0UL) {
AlexeiFedorov892abce2023-04-06 16:32:12 +01001236 ret = RMI_ERROR_REC;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001237 goto out_unlock_rec_rd;
1238 }
1239
1240 rec = granule_map(g_rec, SLOT_REC);
1241
1242 if (g_rd != rec->realm_info.g_rd) {
1243 ret = RMI_ERROR_REC;
1244 goto out_unmap_rec;
1245 }
1246
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001247 if (ripas_val != rec->set_ripas.ripas_val) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001248 ret = RMI_ERROR_INPUT;
1249 goto out_unmap_rec;
1250 }
1251
1252 if (map_addr != rec->set_ripas.addr) {
1253 /* Target region is not next chunk of requested region */
1254 ret = RMI_ERROR_INPUT;
1255 goto out_unmap_rec;
1256 }
1257
1258 rd = granule_map(g_rd, SLOT_RD);
1259
1260 if (!validate_rtt_entry_cmds(map_addr, level, rd)) {
1261 ret = RMI_ERROR_INPUT;
1262 goto out_unmap_rd;
1263 }
1264
1265 map_size = s2tte_map_size(level);
1266 if (map_addr + map_size > rec->set_ripas.end) {
1267 /* Target region extends beyond end of requested region */
1268 ret = RMI_ERROR_INPUT;
1269 goto out_unmap_rd;
1270 }
1271
1272 g_rtt_root = rd->s2_ctx.g_rtt;
1273 sl = realm_rtt_starting_level(rd);
1274 ipa_bits = realm_ipa_bits(rd);
1275 s2_ctx = rd->s2_ctx;
1276
1277 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
1278
1279 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
1280 map_addr, level, &wi);
1281 if (wi.last_level != level) {
1282 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
1283 goto out_unlock_llt;
1284 }
1285
1286 s2tt = granule_map(wi.g_llt, SLOT_RTT);
1287 s2tte = s2tte_read(&s2tt[wi.index]);
1288
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001289 if (!update_ripas(&s2tte, &tlbi_required, level, ripas_val)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001290 ret = pack_return_code(RMI_ERROR_RTT, (unsigned int)level);
1291 goto out_unmap_llt;
1292 }
1293
1294 s2tte_write(&s2tt[wi.index], s2tte);
1295
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001296 if (tlbi_required) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001297 if (level == RTT_PAGE_LEVEL) {
1298 invalidate_page(&s2_ctx, map_addr);
1299 } else {
1300 invalidate_block(&s2_ctx, map_addr);
1301 }
1302 }
1303
1304 rec->set_ripas.addr += map_size;
1305
1306 ret = RMI_SUCCESS;
1307
1308out_unmap_llt:
1309 buffer_unmap(s2tt);
1310out_unlock_llt:
1311 granule_unlock(wi.g_llt);
1312out_unmap_rd:
1313 buffer_unmap(rd);
1314out_unmap_rec:
1315 buffer_unmap(rec);
1316out_unlock_rec_rd:
1317 granule_unlock(g_rec);
1318 granule_unlock(g_rd);
1319 return ret;
1320}