blob: c1fbdd5d5d29a3e3176c4307974b8f7106210c75 [file] [log] [blame]
Soby Mathewb4c6df42022-11-09 11:13:29 +00001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
5 */
6
7#include <buffer.h>
8#include <granule.h>
9#include <measurement.h>
10#include <realm.h>
11#include <ripas.h>
12#include <smc-handler.h>
13#include <smc-rmi.h>
14#include <smc.h>
15#include <stddef.h>
16#include <string.h>
17#include <table.h>
18
19/*
20 * Validate the map_addr value passed to RMI_RTT_* and RMI_DATA_* commands.
21 */
22static bool validate_map_addr(unsigned long map_addr,
23 unsigned long level,
24 struct rd *rd)
25{
26
27 if (map_addr >= realm_ipa_size(rd)) {
28 return false;
29 }
30 if (!addr_is_level_aligned(map_addr, level)) {
31 return false;
32 }
33 return true;
34}
35
36/*
37 * Structure commands can operate on all RTTs except for the root RTT so
38 * the minimal valid level is the stage 2 starting level + 1.
39 */
40static bool validate_rtt_structure_cmds(unsigned long map_addr,
41 long level,
42 struct rd *rd)
43{
44 int min_level = realm_rtt_starting_level(rd) + 1;
45
46 if ((level < min_level) || (level > RTT_PAGE_LEVEL)) {
47 return false;
48 }
49 return validate_map_addr(map_addr, level, rd);
50}
51
52/*
53 * Map/Unmap commands can operate up to a level 2 block entry so min_level is
54 * the smallest block size.
55 */
56static bool validate_rtt_map_cmds(unsigned long map_addr,
57 long level,
58 struct rd *rd)
59{
60 if ((level < RTT_MIN_BLOCK_LEVEL) || (level > RTT_PAGE_LEVEL)) {
61 return false;
62 }
63 return validate_map_addr(map_addr, level, rd);
64}
65
66/*
67 * Entry commands can operate on any entry so the minimal valid level is the
68 * stage 2 starting level.
69 */
70static bool validate_rtt_entry_cmds(unsigned long map_addr,
71 long level,
72 struct rd *rd)
73{
74 if ((level < realm_rtt_starting_level(rd)) ||
75 (level > RTT_PAGE_LEVEL)) {
76 return false;
77 }
78 return validate_map_addr(map_addr, level, rd);
79}
80
AlexeiFedorovac923c82023-04-06 15:12:04 +010081unsigned long smc_rtt_create(unsigned long rd_addr,
82 unsigned long rtt_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +000083 unsigned long map_addr,
84 unsigned long ulevel)
85{
86 struct granule *g_rd;
87 struct granule *g_tbl;
88 struct rd *rd;
89 struct granule *g_table_root;
90 struct rtt_walk wi;
91 unsigned long *s2tt, *parent_s2tt, parent_s2tte;
92 long level = (long)ulevel;
93 unsigned long ipa_bits;
94 unsigned long ret;
95 struct realm_s2_context s2_ctx;
96 int sl;
97
98 if (!find_lock_two_granules(rtt_addr,
99 GRANULE_STATE_DELEGATED,
100 &g_tbl,
101 rd_addr,
102 GRANULE_STATE_RD,
103 &g_rd)) {
104 return RMI_ERROR_INPUT;
105 }
106
107 rd = granule_map(g_rd, SLOT_RD);
108
109 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
110 buffer_unmap(rd);
111 granule_unlock(g_rd);
112 granule_unlock(g_tbl);
113 return RMI_ERROR_INPUT;
114 }
115
116 g_table_root = rd->s2_ctx.g_rtt;
117 sl = realm_rtt_starting_level(rd);
118 ipa_bits = realm_ipa_bits(rd);
119 s2_ctx = rd->s2_ctx;
120 buffer_unmap(rd);
121
122 /*
123 * Lock the RTT root. Enforcing locking order RD->RTT is enough to
124 * ensure deadlock free locking guarentee.
125 */
126 granule_lock(g_table_root, GRANULE_STATE_RTT);
127
128 /* Unlock RD after locking RTT Root */
129 granule_unlock(g_rd);
130
131 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
132 map_addr, level - 1L, &wi);
133 if (wi.last_level != level - 1L) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100134 ret = pack_return_code(RMI_ERROR_RTT,
135 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000136 goto out_unlock_llt;
137 }
138
139 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
140 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
141 s2tt = granule_map(g_tbl, SLOT_DELEGATED);
142
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100143 if (s2tte_is_unassigned_empty(parent_s2tte)) {
144 s2tt_init_unassigned_empty(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000145
146 /*
147 * Increase the refcount of the parent, the granule was
148 * locked while table walking and hand-over-hand locking.
149 * Atomicity and acquire/release semantics not required because
150 * the table is accessed always locked.
151 */
152 __granule_get(wi.g_llt);
153
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100154 } else if (s2tte_is_unassigned_ram(parent_s2tte)) {
155 s2tt_init_unassigned_ram(s2tt);
156 __granule_get(wi.g_llt);
157
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100158 } else if (s2tte_is_unassigned_ns(parent_s2tte)) {
159 s2tt_init_unassigned_ns(s2tt);
160 __granule_get(wi.g_llt);
161
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100162 } else if (s2tte_is_unassigned_destroyed(parent_s2tte)) {
163 s2tt_init_unassigned_destroyed(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000164 __granule_get(wi.g_llt);
165
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100166 } else if (s2tte_is_assigned_destroyed(parent_s2tte, level - 1L)) {
167 unsigned long block_pa;
168
169 /*
170 * We should observe parent assigned s2tte only when
171 * we create tables above this level.
172 */
173 assert(level > RTT_MIN_BLOCK_LEVEL);
174
175 block_pa = s2tte_pa(parent_s2tte, level - 1L);
176
177 s2tt_init_assigned_destroyed(s2tt, block_pa, level);
178
179 /*
180 * Increase the refcount to mark the granule as in-use. refcount
181 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
182 */
183 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
184
AlexeiFedorov3a739332023-04-13 13:54:04 +0100185 } else if (s2tte_is_assigned_empty(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000186 unsigned long block_pa;
187
188 /*
189 * We should observe parent assigned s2tte only when
190 * we create tables above this level.
191 */
192 assert(level > RTT_MIN_BLOCK_LEVEL);
193
194 block_pa = s2tte_pa(parent_s2tte, level - 1L);
195
196 s2tt_init_assigned_empty(s2tt, block_pa, level);
197
198 /*
199 * Increase the refcount to mark the granule as in-use. refcount
200 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
201 */
202 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
203
AlexeiFedorov3a739332023-04-13 13:54:04 +0100204 } else if (s2tte_is_assigned_ram(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000205 unsigned long block_pa;
206
207 /*
208 * We should observe parent valid s2tte only when
209 * we create tables above this level.
210 */
211 assert(level > RTT_MIN_BLOCK_LEVEL);
212
213 /*
214 * Break before make. This may cause spurious S2 aborts.
215 */
216 s2tte_write(&parent_s2tt[wi.index], 0UL);
217 invalidate_block(&s2_ctx, map_addr);
218
219 block_pa = s2tte_pa(parent_s2tte, level - 1L);
220
AlexeiFedorov3a739332023-04-13 13:54:04 +0100221 s2tt_init_assigned_ram(s2tt, block_pa, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000222
223 /*
224 * Increase the refcount to mark the granule as in-use. refcount
225 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
226 */
227 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
228
AlexeiFedorov3a739332023-04-13 13:54:04 +0100229 } else if (s2tte_is_assigned_ns(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000230 unsigned long block_pa;
231
232 /*
AlexeiFedorov3a739332023-04-13 13:54:04 +0100233 * We should observe parent assigned_ns s2tte only when
Soby Mathewb4c6df42022-11-09 11:13:29 +0000234 * we create tables above this level.
235 */
236 assert(level > RTT_MIN_BLOCK_LEVEL);
237
238 /*
239 * Break before make. This may cause spurious S2 aborts.
240 */
241 s2tte_write(&parent_s2tt[wi.index], 0UL);
242 invalidate_block(&s2_ctx, map_addr);
243
244 block_pa = s2tte_pa(parent_s2tte, level - 1L);
245
AlexeiFedorov3a739332023-04-13 13:54:04 +0100246 s2tt_init_assigned_ns(s2tt, block_pa, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000247
248 /*
249 * Increase the refcount to mark the granule as in-use. refcount
250 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
251 */
252 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
253
254 } else if (s2tte_is_table(parent_s2tte, level - 1L)) {
255 ret = pack_return_code(RMI_ERROR_RTT,
256 (unsigned int)(level - 1L));
257 goto out_unmap_table;
258
259 } else {
260 assert(false);
261 }
262
263 ret = RMI_SUCCESS;
264
265 granule_set_state(g_tbl, GRANULE_STATE_RTT);
266
267 parent_s2tte = s2tte_create_table(rtt_addr, level - 1L);
268 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
269
270out_unmap_table:
271 buffer_unmap(s2tt);
272 buffer_unmap(parent_s2tt);
273out_unlock_llt:
274 granule_unlock(wi.g_llt);
275 granule_unlock(g_tbl);
276 return ret;
277}
278
AlexeiFedorove2002be2023-04-19 17:20:12 +0100279void smc_rtt_fold(unsigned long rd_addr,
280 unsigned long map_addr,
281 unsigned long ulevel,
282 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000283{
284 struct granule *g_rd;
285 struct granule *g_tbl;
286 struct rd *rd;
287 struct granule *g_table_root;
288 struct rtt_walk wi;
289 unsigned long *table, *parent_s2tt, parent_s2tte;
290 long level = (long)ulevel;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100291 unsigned long ipa_bits, rtt_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000292 unsigned long ret;
293 struct realm_s2_context s2_ctx;
294 int sl;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000295
296 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
297 if (g_rd == NULL) {
AlexeiFedorove2002be2023-04-19 17:20:12 +0100298 res->x[0] = RMI_ERROR_INPUT;
299 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000300 }
301
302 rd = granule_map(g_rd, SLOT_RD);
303
304 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
305 buffer_unmap(rd);
306 granule_unlock(g_rd);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100307 res->x[0] = RMI_ERROR_INPUT;
308 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000309 }
310
311 g_table_root = rd->s2_ctx.g_rtt;
312 sl = realm_rtt_starting_level(rd);
313 ipa_bits = realm_ipa_bits(rd);
314 s2_ctx = rd->s2_ctx;
315 buffer_unmap(rd);
316 granule_lock(g_table_root, GRANULE_STATE_RTT);
317 granule_unlock(g_rd);
318
319 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
320 map_addr, level - 1L, &wi);
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100321 if (wi.last_level != level - 1L) {
322 ret = pack_return_code(RMI_ERROR_RTT,
323 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000324 goto out_unlock_parent_table;
325 }
326
327 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
328 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
329 if (!s2tte_is_table(parent_s2tte, level - 1L)) {
330 ret = pack_return_code(RMI_ERROR_RTT,
331 (unsigned int)(level - 1L));
332 goto out_unmap_parent_table;
333 }
334
AlexeiFedorove2002be2023-04-19 17:20:12 +0100335 rtt_addr = s2tte_pa_table(parent_s2tte, level - 1L);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000336 g_tbl = find_lock_granule(rtt_addr, GRANULE_STATE_RTT);
337
338 /*
339 * A table descriptor S2TTE always points to a TABLE granule.
340 */
AlexeiFedorov63b71692023-04-19 11:18:42 +0100341 assert(g_tbl != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000342
343 table = granule_map(g_tbl, SLOT_RTT2);
344
345 /*
346 * The command can succeed only if all 512 S2TTEs are of the same type.
347 * We first check the table's ref. counter to speed up the case when
348 * the host makes a guess whether a memory region can be folded.
349 */
350 if (g_tbl->refcount == 0UL) {
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100351 if (table_is_unassigned_destroyed_block(table)) {
352 parent_s2tte = s2tte_create_unassigned_destroyed();
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100353 } else if (table_is_unassigned_empty_block(table)) {
354 parent_s2tte = s2tte_create_unassigned_empty();
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100355 } else if (table_is_unassigned_ram_block(table)) {
356 parent_s2tte = s2tte_create_unassigned_ram();
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100357 } else if (table_is_unassigned_ns_block(table)) {
358 parent_s2tte = s2tte_create_unassigned_ns();
AlexeiFedorov49752c62023-04-24 14:31:14 +0100359 } else if (table_maps_assigned_ns_block(table, level)) {
360 unsigned long s2tte = s2tte_read(&table[0]);
361 unsigned long block_pa = s2tte_pa(s2tte, level);
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100362
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100363 parent_s2tte = s2tte_create_assigned_ns(block_pa,
364 level - 1L);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000365 } else {
366 /*
367 * The table holds a mixture of destroyed and
368 * unassigned entries.
369 */
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100370 ret = pack_return_code(RMI_ERROR_RTT,
371 (unsigned int)level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000372 goto out_unmap_table;
373 }
AlexeiFedorov49752c62023-04-24 14:31:14 +0100374 __granule_put(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000375 } else if (g_tbl->refcount == S2TTES_PER_S2TT) {
376
377 unsigned long s2tte, block_pa;
378
379 /* The RMM specification does not allow creating block
380 * entries less than RTT_MIN_BLOCK_LEVEL even though
381 * permitted by the Arm Architecture.
382 * Hence ensure that the table being folded is at a level
383 * higher than the RTT_MIN_BLOCK_LEVEL.
384 *
385 * A fully populated table cannot be destroyed if that
386 * would create a block mapping below RTT_MIN_BLOCK_LEVEL.
387 */
388 if (level <= RTT_MIN_BLOCK_LEVEL) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100389 ret = pack_return_code(RMI_ERROR_RTT,
390 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000391 goto out_unmap_table;
392 }
393
394 s2tte = s2tte_read(&table[0]);
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000395 block_pa = s2tte_pa(s2tte, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000396
397 /*
AlexeiFedorov3a739332023-04-13 13:54:04 +0100398 * The table must also refer to a contiguous block through the
399 * same type of s2tte, either Assigned, Valid or Assigned_NS.
Soby Mathewb4c6df42022-11-09 11:13:29 +0000400 */
AlexeiFedorov3a739332023-04-13 13:54:04 +0100401 if (table_maps_assigned_empty_block(table, level)) {
402 parent_s2tte = s2tte_create_assigned_empty(block_pa,
403 level - 1L);
404 } else if (table_maps_assigned_ram_block(table, level)) {
405 parent_s2tte = s2tte_create_assigned_ram(block_pa,
406 level - 1L);
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000407 /* The table contains mixed entries that cannot be folded */
Soby Mathewb4c6df42022-11-09 11:13:29 +0000408 } else {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100409 ret = pack_return_code(RMI_ERROR_RTT,
410 (unsigned int)level);
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000411 goto out_unmap_table;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000412 }
413
414 __granule_refcount_dec(g_tbl, S2TTES_PER_S2TT);
415 } else {
416 /*
417 * The table holds a mixture of different types of s2ttes.
418 */
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100419 ret = pack_return_code(RMI_ERROR_RTT,
420 (unsigned int)level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000421 goto out_unmap_table;
422 }
423
424 ret = RMI_SUCCESS;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100425 res->x[1] = rtt_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000426
427 /*
428 * Break before make.
429 */
430 s2tte_write(&parent_s2tt[wi.index], 0UL);
431
AlexeiFedorov3a739332023-04-13 13:54:04 +0100432 if (s2tte_is_assigned_ram(parent_s2tte, level - 1L) ||
433 s2tte_is_assigned_ns(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000434 invalidate_pages_in_block(&s2_ctx, map_addr);
435 } else {
436 invalidate_block(&s2_ctx, map_addr);
437 }
438
439 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
440
441 granule_memzero_mapped(table);
442 granule_set_state(g_tbl, GRANULE_STATE_DELEGATED);
443
444out_unmap_table:
445 buffer_unmap(table);
446 granule_unlock(g_tbl);
447out_unmap_parent_table:
448 buffer_unmap(parent_s2tt);
449out_unlock_parent_table:
450 granule_unlock(wi.g_llt);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100451 res->x[0] = ret;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000452}
453
AlexeiFedorove2002be2023-04-19 17:20:12 +0100454void smc_rtt_destroy(unsigned long rd_addr,
455 unsigned long map_addr,
456 unsigned long ulevel,
457 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000458{
459 struct granule *g_rd;
460 struct granule *g_tbl;
461 struct rd *rd;
462 struct granule *g_table_root;
463 struct rtt_walk wi;
464 unsigned long *table, *parent_s2tt, parent_s2tte;
465 long level = (long)ulevel;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100466 unsigned long ipa_bits, rtt_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000467 unsigned long ret;
468 struct realm_s2_context s2_ctx;
469 int sl;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100470 bool in_par, skip_non_live = false;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000471
472 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
473 if (g_rd == NULL) {
AlexeiFedorove2002be2023-04-19 17:20:12 +0100474 res->x[0] = RMI_ERROR_INPUT;
475 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000476 }
477
478 rd = granule_map(g_rd, SLOT_RD);
479
480 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
481 buffer_unmap(rd);
482 granule_unlock(g_rd);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100483 res->x[0] = RMI_ERROR_INPUT;
484 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000485 }
486
487 g_table_root = rd->s2_ctx.g_rtt;
488 sl = realm_rtt_starting_level(rd);
489 ipa_bits = realm_ipa_bits(rd);
490 s2_ctx = rd->s2_ctx;
491 in_par = addr_in_par(rd, map_addr);
492 buffer_unmap(rd);
493 granule_lock(g_table_root, GRANULE_STATE_RTT);
494 granule_unlock(g_rd);
495
496 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
497 map_addr, level - 1L, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000498
499 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
500 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100501
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100502 if ((wi.last_level != level - 1L) ||
503 !s2tte_is_table(parent_s2tte, level - 1L)) {
504 ret = pack_return_code(RMI_ERROR_RTT,
505 (unsigned int)wi.last_level);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100506 skip_non_live = true;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000507 goto out_unmap_parent_table;
508 }
509
AlexeiFedorove2002be2023-04-19 17:20:12 +0100510 rtt_addr = s2tte_pa_table(parent_s2tte, level - 1L);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000511
512 /*
513 * Lock the RTT granule. The 'rtt_addr' is verified, thus can be treated
514 * as an internal granule.
515 */
516 g_tbl = find_lock_granule(rtt_addr, GRANULE_STATE_RTT);
517
518 /*
519 * A table descriptor S2TTE always points to a TABLE granule.
520 */
521 assert(g_tbl != NULL);
522
523 /*
524 * Read the refcount value. RTT granule is always accessed locked, thus
525 * the refcount can be accessed without atomic operations.
526 */
527 if (g_tbl->refcount != 0UL) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100528 ret = pack_return_code(RMI_ERROR_RTT, (unsigned int)level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000529 goto out_unlock_table;
530 }
531
532 ret = RMI_SUCCESS;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100533 res->x[1] = rtt_addr;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100534 skip_non_live = true;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000535
536 table = granule_map(g_tbl, SLOT_RTT2);
537
538 if (in_par) {
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100539 parent_s2tte = s2tte_create_unassigned_destroyed();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000540 } else {
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100541 parent_s2tte = s2tte_create_unassigned_ns();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000542 }
543
544 __granule_put(wi.g_llt);
545
546 /*
547 * Break before make. Note that this may cause spurious S2 aborts.
548 */
549 s2tte_write(&parent_s2tt[wi.index], 0UL);
550 invalidate_block(&s2_ctx, map_addr);
551 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
552
553 granule_memzero_mapped(table);
554 granule_set_state(g_tbl, GRANULE_STATE_DELEGATED);
555
556 buffer_unmap(table);
557out_unlock_table:
558 granule_unlock(g_tbl);
559out_unmap_parent_table:
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100560 if (skip_non_live) {
561 res->x[2] = skip_non_live_entries(map_addr, parent_s2tt, &wi);
562 }
Soby Mathewb4c6df42022-11-09 11:13:29 +0000563 buffer_unmap(parent_s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000564 granule_unlock(wi.g_llt);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100565 res->x[0] = ret;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000566}
567
568enum map_unmap_ns_op {
569 MAP_NS,
570 UNMAP_NS
571};
572
573/*
574 * We don't hold a reference on the NS granule when it is
575 * mapped into a realm. Instead we rely on the guarantees
576 * provided by the architecture to ensure that a NS access
577 * to a protected granule is prohibited even within the realm.
578 */
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100579static void map_unmap_ns(unsigned long rd_addr,
580 unsigned long map_addr,
581 long level,
582 unsigned long host_s2tte,
583 enum map_unmap_ns_op op,
584 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000585{
586 struct granule *g_rd;
587 struct rd *rd;
588 struct granule *g_table_root;
589 unsigned long *s2tt, s2tte;
590 struct rtt_walk wi;
591 unsigned long ipa_bits;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000592 struct realm_s2_context s2_ctx;
593 int sl;
594
595 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
596 if (g_rd == NULL) {
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100597 res->x[0] = RMI_ERROR_INPUT;
598 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000599 }
600
601 rd = granule_map(g_rd, SLOT_RD);
602
603 if (!validate_rtt_map_cmds(map_addr, level, rd)) {
604 buffer_unmap(rd);
605 granule_unlock(g_rd);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100606 res->x[0] = RMI_ERROR_INPUT;
607 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000608 }
609
610 g_table_root = rd->s2_ctx.g_rtt;
611 sl = realm_rtt_starting_level(rd);
612 ipa_bits = realm_ipa_bits(rd);
613
AlexeiFedorovc34e3242023-04-12 11:30:33 +0100614 /* Check if map_addr is outside PAR */
615 if (addr_in_par(rd, map_addr)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000616 buffer_unmap(rd);
617 granule_unlock(g_rd);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100618 res->x[0] = RMI_ERROR_INPUT;
619 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000620 }
621
622 s2_ctx = rd->s2_ctx;
623 buffer_unmap(rd);
624
625 granule_lock(g_table_root, GRANULE_STATE_RTT);
626 granule_unlock(g_rd);
627
628 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
629 map_addr, level, &wi);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100630
631 /*
632 * For UNMAP_NS, we need to map the table and look
633 * for the end of the non-live region.
634 */
635 if (op == MAP_NS && wi.last_level != level) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100636 res->x[0] = pack_return_code(RMI_ERROR_RTT,
637 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000638 goto out_unlock_llt;
639 }
640
641 s2tt = granule_map(wi.g_llt, SLOT_RTT);
642 s2tte = s2tte_read(&s2tt[wi.index]);
643
644 if (op == MAP_NS) {
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100645 if (!s2tte_is_unassigned_ns(s2tte)) {
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100646 res->x[0] = pack_return_code(RMI_ERROR_RTT,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000647 (unsigned int)level);
648 goto out_unmap_table;
649 }
650
AlexeiFedorov3a739332023-04-13 13:54:04 +0100651 s2tte = s2tte_create_assigned_ns(host_s2tte, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000652 s2tte_write(&s2tt[wi.index], s2tte);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000653
654 } else if (op == UNMAP_NS) {
655 /*
656 * The following check also verifies that map_addr is outside
657 * PAR, as valid_NS s2tte may only cover outside PAR IPA range.
658 */
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100659 bool assigned_ns = s2tte_is_assigned_ns(s2tte, wi.last_level);
660
661 if ((wi.last_level != level) || !assigned_ns) {
662 res->x[0] = pack_return_code(RMI_ERROR_RTT,
663 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000664 goto out_unmap_table;
665 }
666
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100667 s2tte = s2tte_create_unassigned_ns();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000668 s2tte_write(&s2tt[wi.index], s2tte);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000669 if (level == RTT_PAGE_LEVEL) {
670 invalidate_page(&s2_ctx, map_addr);
671 } else {
672 invalidate_block(&s2_ctx, map_addr);
673 }
674 }
675
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100676 res->x[0] = RMI_SUCCESS;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000677
678out_unmap_table:
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100679 if (op == UNMAP_NS) {
680 res->x[1] = skip_non_live_entries(map_addr, s2tt, &wi);
681 }
Soby Mathewb4c6df42022-11-09 11:13:29 +0000682 buffer_unmap(s2tt);
683out_unlock_llt:
684 granule_unlock(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000685}
686
687unsigned long smc_rtt_map_unprotected(unsigned long rd_addr,
688 unsigned long map_addr,
689 unsigned long ulevel,
690 unsigned long s2tte)
691{
692 long level = (long)ulevel;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100693 struct smc_result res;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000694
695 if (!host_ns_s2tte_is_valid(s2tte, level)) {
696 return RMI_ERROR_INPUT;
697 }
698
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100699 map_unmap_ns(rd_addr, map_addr, level, s2tte, MAP_NS, &res);
700 return res.x[0];
Soby Mathewb4c6df42022-11-09 11:13:29 +0000701}
702
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100703void smc_rtt_unmap_unprotected(unsigned long rd_addr,
704 unsigned long map_addr,
705 unsigned long ulevel,
706 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000707{
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100708 return map_unmap_ns(rd_addr, map_addr, (long)ulevel, 0UL, UNMAP_NS, res);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000709}
710
711void smc_rtt_read_entry(unsigned long rd_addr,
712 unsigned long map_addr,
713 unsigned long ulevel,
714 struct smc_result *ret)
715{
716 struct granule *g_rd, *g_rtt_root;
717 struct rd *rd;
718 struct rtt_walk wi;
719 unsigned long *s2tt, s2tte;
720 unsigned long ipa_bits;
721 long level = (long)ulevel;
722 int sl;
723
724 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
725 if (g_rd == NULL) {
726 ret->x[0] = RMI_ERROR_INPUT;
727 return;
728 }
729
730 rd = granule_map(g_rd, SLOT_RD);
731
732 if (!validate_rtt_entry_cmds(map_addr, level, rd)) {
733 buffer_unmap(rd);
734 granule_unlock(g_rd);
735 ret->x[0] = RMI_ERROR_INPUT;
736 return;
737 }
738
739 g_rtt_root = rd->s2_ctx.g_rtt;
740 sl = realm_rtt_starting_level(rd);
741 ipa_bits = realm_ipa_bits(rd);
742 buffer_unmap(rd);
743
744 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
745 granule_unlock(g_rd);
746
747 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
748 map_addr, level, &wi);
749 s2tt = granule_map(wi.g_llt, SLOT_RTT);
750 s2tte = s2tte_read(&s2tt[wi.index]);
751 ret->x[1] = wi.last_level;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000752
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100753 if (s2tte_is_unassigned_empty(s2tte)) {
Yousuf A3daed822022-10-13 16:06:00 +0100754 ret->x[2] = RMI_UNASSIGNED;
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100755 ret->x[3] = 0UL;
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100756 ret->x[4] = RIPAS_EMPTY;
757 } else if (s2tte_is_unassigned_ram(s2tte)) {
758 ret->x[2] = RMI_UNASSIGNED;
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100759 ret->x[3] = 0UL;
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100760 ret->x[4] = RIPAS_RAM;
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100761 } else if (s2tte_is_unassigned_destroyed(s2tte)) {
762 ret->x[2] = RMI_UNASSIGNED;
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100763 ret->x[3] = 0UL;
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100764 ret->x[4] = RIPAS_DESTROYED;
AlexeiFedorov3a739332023-04-13 13:54:04 +0100765 } else if (s2tte_is_assigned_empty(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100766 ret->x[2] = RMI_ASSIGNED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000767 ret->x[3] = s2tte_pa(s2tte, wi.last_level);
Yousuf A62808152022-10-31 10:35:42 +0000768 ret->x[4] = RIPAS_EMPTY;
AlexeiFedorov3a739332023-04-13 13:54:04 +0100769 } else if (s2tte_is_assigned_ram(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100770 ret->x[2] = RMI_ASSIGNED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000771 ret->x[3] = s2tte_pa(s2tte, wi.last_level);
Yousuf A62808152022-10-31 10:35:42 +0000772 ret->x[4] = RIPAS_RAM;
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100773 } else if (s2tte_is_assigned_destroyed(s2tte, wi.last_level)) {
774 ret->x[2] = RMI_ASSIGNED;
775 ret->x[3] = 0UL;
776 ret->x[4] = RIPAS_DESTROYED;
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100777 } else if (s2tte_is_unassigned_ns(s2tte)) {
778 ret->x[2] = RMI_UNASSIGNED;
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100779 ret->x[3] = 0UL;
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100780 ret->x[4] = RIPAS_EMPTY;
AlexeiFedorov3a739332023-04-13 13:54:04 +0100781 } else if (s2tte_is_assigned_ns(s2tte, wi.last_level)) {
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100782 ret->x[2] = RMI_ASSIGNED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000783 ret->x[3] = host_ns_s2tte(s2tte, wi.last_level);
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100784 ret->x[4] = RIPAS_EMPTY;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000785 } else if (s2tte_is_table(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100786 ret->x[2] = RMI_TABLE;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000787 ret->x[3] = s2tte_pa_table(s2tte, wi.last_level);
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100788 ret->x[4] = RIPAS_EMPTY;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000789 } else {
790 assert(false);
791 }
792
793 buffer_unmap(s2tt);
794 granule_unlock(wi.g_llt);
795
796 ret->x[0] = RMI_SUCCESS;
797}
798
799static void data_granule_measure(struct rd *rd, void *data,
800 unsigned long ipa,
801 unsigned long flags)
802{
803 struct measurement_desc_data measure_desc = {0};
804
805 /* Initialize the measurement descriptior structure */
806 measure_desc.desc_type = MEASURE_DESC_TYPE_DATA;
807 measure_desc.len = sizeof(struct measurement_desc_data);
808 measure_desc.ipa = ipa;
809 measure_desc.flags = flags;
810 memcpy(measure_desc.rim,
811 &rd->measurement[RIM_MEASUREMENT_SLOT],
812 measurement_get_size(rd->algorithm));
813
814 if (flags == RMI_MEASURE_CONTENT) {
815 /*
816 * Hashing the data granules and store the result in the
817 * measurement descriptor structure.
818 */
819 measurement_hash_compute(rd->algorithm,
820 data,
821 GRANULE_SIZE,
822 measure_desc.content);
823 }
824
825 /*
826 * Hashing the measurement descriptor structure; the result is the
827 * updated RIM.
828 */
829 measurement_hash_compute(rd->algorithm,
830 &measure_desc,
831 sizeof(measure_desc),
832 rd->measurement[RIM_MEASUREMENT_SLOT]);
833}
834
835static unsigned long validate_data_create_unknown(unsigned long map_addr,
836 struct rd *rd)
837{
838 if (!addr_in_par(rd, map_addr)) {
839 return RMI_ERROR_INPUT;
840 }
841
842 if (!validate_map_addr(map_addr, RTT_PAGE_LEVEL, rd)) {
843 return RMI_ERROR_INPUT;
844 }
845
846 return RMI_SUCCESS;
847}
848
849static unsigned long validate_data_create(unsigned long map_addr,
850 struct rd *rd)
851{
852 if (get_rd_state_locked(rd) != REALM_STATE_NEW) {
853 return RMI_ERROR_REALM;
854 }
855
856 return validate_data_create_unknown(map_addr, rd);
857}
858
859/*
AlexeiFedorov0f9cd1f2023-07-10 17:04:58 +0100860 * Implements both RMI_DATA_CREATE and RMI_DATA_CREATE_UNKNOWN
Soby Mathewb4c6df42022-11-09 11:13:29 +0000861 *
AlexeiFedorov0f9cd1f2023-07-10 17:04:58 +0100862 * if @g_src == NULL, implements RMI_DATA_CREATE_UNKNOWN
863 * and RMI_DATA_CREATE otherwise.
Soby Mathewb4c6df42022-11-09 11:13:29 +0000864 */
AlexeiFedorovac923c82023-04-06 15:12:04 +0100865static unsigned long data_create(unsigned long rd_addr,
866 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000867 unsigned long map_addr,
868 struct granule *g_src,
869 unsigned long flags)
870{
871 struct granule *g_data;
872 struct granule *g_rd;
873 struct granule *g_table_root;
874 struct rd *rd;
875 struct rtt_walk wi;
876 unsigned long s2tte, *s2tt;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000877 enum granule_state new_data_state = GRANULE_STATE_DELEGATED;
878 unsigned long ipa_bits;
879 unsigned long ret;
880 int __unused meas_ret;
881 int sl;
882
883 if (!find_lock_two_granules(data_addr,
884 GRANULE_STATE_DELEGATED,
885 &g_data,
886 rd_addr,
887 GRANULE_STATE_RD,
888 &g_rd)) {
889 return RMI_ERROR_INPUT;
890 }
891
892 rd = granule_map(g_rd, SLOT_RD);
893
894 ret = (g_src != NULL) ?
895 validate_data_create(map_addr, rd) :
896 validate_data_create_unknown(map_addr, rd);
897
898 if (ret != RMI_SUCCESS) {
899 goto out_unmap_rd;
900 }
901
902 g_table_root = rd->s2_ctx.g_rtt;
903 sl = realm_rtt_starting_level(rd);
904 ipa_bits = realm_ipa_bits(rd);
905 granule_lock(g_table_root, GRANULE_STATE_RTT);
906 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
907 map_addr, RTT_PAGE_LEVEL, &wi);
908 if (wi.last_level != RTT_PAGE_LEVEL) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100909 ret = pack_return_code(RMI_ERROR_RTT,
910 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000911 goto out_unlock_ll_table;
912 }
913
914 s2tt = granule_map(wi.g_llt, SLOT_RTT);
915 s2tte = s2tte_read(&s2tt[wi.index]);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000916
Soby Mathewb4c6df42022-11-09 11:13:29 +0000917 if (g_src != NULL) {
918 bool ns_access_ok;
AlexeiFedorov0f9cd1f2023-07-10 17:04:58 +0100919 void *data;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000920
AlexeiFedorov0f9cd1f2023-07-10 17:04:58 +0100921 if (!s2tte_is_unassigned_ram(s2tte)) {
922 ret = pack_return_code(RMI_ERROR_RTT, RTT_PAGE_LEVEL);
923 goto out_unmap_ll_table;
924 }
925
926 data = granule_map(g_data, SLOT_DELEGATED);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000927 ns_access_ok = ns_buffer_read(SLOT_NS, g_src, 0U,
928 GRANULE_SIZE, data);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000929 if (!ns_access_ok) {
930 /*
931 * Some data may be copied before the failure. Zero
932 * g_data granule as it will remain in delegated state.
933 */
934 (void)memset(data, 0, GRANULE_SIZE);
935 buffer_unmap(data);
936 ret = RMI_ERROR_INPUT;
937 goto out_unmap_ll_table;
938 }
939
Soby Mathewb4c6df42022-11-09 11:13:29 +0000940 data_granule_measure(rd, data, map_addr, flags);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000941 buffer_unmap(data);
AlexeiFedorov0f9cd1f2023-07-10 17:04:58 +0100942
943 } else if (!s2tte_is_unassigned(s2tte)) {
944 ret = pack_return_code(RMI_ERROR_RTT, RTT_PAGE_LEVEL);
945 goto out_unmap_ll_table;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000946 }
947
948 new_data_state = GRANULE_STATE_DATA;
949
AlexeiFedorovcde1fdc2023-04-18 16:37:25 +0100950 s2tte = s2tte_create_assigned_ram(data_addr, RTT_PAGE_LEVEL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000951
952 s2tte_write(&s2tt[wi.index], s2tte);
953 __granule_get(wi.g_llt);
954
955 ret = RMI_SUCCESS;
956
957out_unmap_ll_table:
958 buffer_unmap(s2tt);
959out_unlock_ll_table:
960 granule_unlock(wi.g_llt);
961out_unmap_rd:
962 buffer_unmap(rd);
963 granule_unlock(g_rd);
964 granule_unlock_transition(g_data, new_data_state);
965 return ret;
966}
967
AlexeiFedorovac923c82023-04-06 15:12:04 +0100968unsigned long smc_data_create(unsigned long rd_addr,
969 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000970 unsigned long map_addr,
971 unsigned long src_addr,
972 unsigned long flags)
973{
974 struct granule *g_src;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000975
976 if (flags != RMI_NO_MEASURE_CONTENT && flags != RMI_MEASURE_CONTENT) {
977 return RMI_ERROR_INPUT;
978 }
979
980 g_src = find_granule(src_addr);
981 if ((g_src == NULL) || (g_src->state != GRANULE_STATE_NS)) {
982 return RMI_ERROR_INPUT;
983 }
984
AlexeiFedorovac923c82023-04-06 15:12:04 +0100985 return data_create(rd_addr, data_addr, map_addr, g_src, flags);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000986}
987
AlexeiFedorovac923c82023-04-06 15:12:04 +0100988unsigned long smc_data_create_unknown(unsigned long rd_addr,
989 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000990 unsigned long map_addr)
991{
AlexeiFedorovac923c82023-04-06 15:12:04 +0100992 return data_create(rd_addr, data_addr, map_addr, NULL, 0);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000993}
994
AlexeiFedorove2002be2023-04-19 17:20:12 +0100995void smc_data_destroy(unsigned long rd_addr,
996 unsigned long map_addr,
997 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000998{
999 struct granule *g_data;
1000 struct granule *g_rd;
1001 struct granule *g_table_root;
1002 struct rtt_walk wi;
1003 unsigned long data_addr, s2tte, *s2tt;
1004 struct rd *rd;
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001005 unsigned long ipa_bits;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001006 struct realm_s2_context s2_ctx;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001007 int sl;
1008
1009 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
1010 if (g_rd == NULL) {
AlexeiFedorove2002be2023-04-19 17:20:12 +01001011 res->x[0] = RMI_ERROR_INPUT;
AlexeiFedorova1b2a1d2023-07-18 15:08:47 +01001012 res->x[2] = 0UL;
AlexeiFedorove2002be2023-04-19 17:20:12 +01001013 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001014 }
1015
1016 rd = granule_map(g_rd, SLOT_RD);
1017
1018 if (!validate_map_addr(map_addr, RTT_PAGE_LEVEL, rd)) {
1019 buffer_unmap(rd);
1020 granule_unlock(g_rd);
AlexeiFedorove2002be2023-04-19 17:20:12 +01001021 res->x[0] = RMI_ERROR_INPUT;
AlexeiFedorova1b2a1d2023-07-18 15:08:47 +01001022 res->x[2] = 0UL;
AlexeiFedorove2002be2023-04-19 17:20:12 +01001023 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001024 }
1025
1026 g_table_root = rd->s2_ctx.g_rtt;
1027 sl = realm_rtt_starting_level(rd);
1028 ipa_bits = realm_ipa_bits(rd);
1029 s2_ctx = rd->s2_ctx;
1030 buffer_unmap(rd);
1031
1032 granule_lock(g_table_root, GRANULE_STATE_RTT);
1033 granule_unlock(g_rd);
1034
1035 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
1036 map_addr, RTT_PAGE_LEVEL, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001037
1038 s2tt = granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001039 if (wi.last_level != RTT_PAGE_LEVEL) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +01001040 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1041 (unsigned int)wi.last_level);
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001042 goto out_unmap_ll_table;
1043 }
Soby Mathewb4c6df42022-11-09 11:13:29 +00001044
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001045 s2tte = s2tte_read(&s2tt[wi.index]);
AlexeiFedorovc53b1f72023-07-04 15:37:03 +01001046
AlexeiFedorova43cd312023-04-17 11:42:25 +01001047 if (s2tte_is_assigned_ram(s2tte, RTT_PAGE_LEVEL)) {
1048 data_addr = s2tte_pa(s2tte, RTT_PAGE_LEVEL);
AlexeiFedorovc53b1f72023-07-04 15:37:03 +01001049 s2tte = s2tte_create_unassigned_destroyed();
AlexeiFedorova43cd312023-04-17 11:42:25 +01001050 s2tte_write(&s2tt[wi.index], s2tte);
1051 invalidate_page(&s2_ctx, map_addr);
1052 } else if (s2tte_is_assigned_empty(s2tte, RTT_PAGE_LEVEL)) {
1053 data_addr = s2tte_pa(s2tte, RTT_PAGE_LEVEL);
1054 s2tte = s2tte_create_unassigned_empty();
1055 s2tte_write(&s2tt[wi.index], s2tte);
1056 } else {
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001057 res->x[0] = pack_return_code(RMI_ERROR_RTT, RTT_PAGE_LEVEL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001058 goto out_unmap_ll_table;
1059 }
1060
Soby Mathewb4c6df42022-11-09 11:13:29 +00001061 __granule_put(wi.g_llt);
1062
1063 /*
1064 * Lock the data granule and check expected state. Correct locking order
1065 * is guaranteed because granule address is obtained from a locked
1066 * granule by table walk. This lock needs to be acquired before a state
1067 * transition to or from GRANULE_STATE_DATA for granule address can happen.
1068 */
1069 g_data = find_lock_granule(data_addr, GRANULE_STATE_DATA);
AlexeiFedorov63b71692023-04-19 11:18:42 +01001070 assert(g_data != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001071 granule_memzero(g_data, SLOT_DELEGATED);
1072 granule_unlock_transition(g_data, GRANULE_STATE_DELEGATED);
1073
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001074 res->x[0] = RMI_SUCCESS;
AlexeiFedorove2002be2023-04-19 17:20:12 +01001075 res->x[1] = data_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001076out_unmap_ll_table:
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001077 res->x[2] = skip_non_live_entries(map_addr, s2tt, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001078 buffer_unmap(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001079 granule_unlock(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001080}
1081
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001082/*
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001083 * Update the ripas value for the entry pointed by @s2ttep.
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001084 *
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001085 * Returns:
1086 * < 0 - On error and the operation was aborted,
1087 * e.g., entry cannot have a ripas.
1088 * 0 - Operation was success and no TLBI is required.
1089 * > 0 - Operation was success and TLBI is required.
1090 * Sets:
1091 * @(*do_tlbi) to 'true' if the TLBs have to be invalidated.
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001092 */
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001093static int update_ripas(unsigned long *s2ttep, unsigned long level,
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001094 enum ripas ripas_val,
1095 enum ripas_change_destroyed change_destroyed)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001096{
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001097 unsigned long pa, s2tte = s2tte_read(s2ttep);
1098 int ret = 0;
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001099
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001100 if (!s2tte_has_ripas(s2tte, level)) {
1101 return -1;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001102 }
1103
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001104 if (ripas_val == RIPAS_RAM) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001105 if (s2tte_is_unassigned_empty(s2tte)) {
1106 s2tte = s2tte_create_unassigned_ram();
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001107 } else if (s2tte_is_unassigned_destroyed(s2tte)) {
1108 if (change_destroyed == CHANGE_DESTROYED) {
1109 s2tte = s2tte_create_unassigned_ram();
1110 } else {
1111 return -1;
1112 }
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001113 } else if (s2tte_is_assigned_empty(s2tte, level)) {
1114 pa = s2tte_pa(s2tte, level);
1115 s2tte = s2tte_create_assigned_ram(pa, level);
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001116 } else if (s2tte_is_assigned_destroyed(s2tte, level)) {
1117 if (change_destroyed == CHANGE_DESTROYED) {
1118 pa = s2tte_pa(s2tte, level);
1119 s2tte = s2tte_create_assigned_ram(pa, level);
1120 } else {
1121 return -1;
1122 }
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001123 } else {
1124 /* No action is required */
1125 return 0;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001126 }
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001127 } else if (ripas_val == RIPAS_EMPTY) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001128 if (s2tte_is_unassigned_ram(s2tte)) {
1129 s2tte = s2tte_create_unassigned_empty();
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001130 } else if (s2tte_is_unassigned_destroyed(s2tte)) {
1131 if (change_destroyed == CHANGE_DESTROYED) {
1132 s2tte = s2tte_create_unassigned_empty();
1133 } else {
1134 return -1;
1135 }
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001136 } else if (s2tte_is_assigned_ram(s2tte, level)) {
1137 pa = s2tte_pa(s2tte, level);
1138 s2tte = s2tte_create_assigned_empty(pa, level);
1139 /* TLBI is required */
1140 ret = 1;
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001141 } else if (s2tte_is_assigned_destroyed(s2tte, level)) {
1142 if (change_destroyed == CHANGE_DESTROYED) {
1143 pa = s2tte_pa(s2tte, level);
1144 s2tte = s2tte_create_assigned_empty(pa, level);
1145 /* TLBI is required */
1146 ret = 1;
1147 } else {
1148 return -1;
1149 }
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001150 } else {
1151 /* No action is required */
1152 return 0;
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001153 }
Soby Mathewb4c6df42022-11-09 11:13:29 +00001154 }
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001155 s2tte_write(s2ttep, s2tte);
1156 return ret;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001157}
1158
1159static void ripas_granule_measure(struct rd *rd,
AlexeiFedorov960d1612023-04-25 13:23:39 +01001160 unsigned long base,
1161 unsigned long top)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001162{
1163 struct measurement_desc_ripas measure_desc = {0};
1164
1165 /* Initialize the measurement descriptior structure */
1166 measure_desc.desc_type = MEASURE_DESC_TYPE_RIPAS;
1167 measure_desc.len = sizeof(struct measurement_desc_ripas);
AlexeiFedorov960d1612023-04-25 13:23:39 +01001168 measure_desc.base = base;
1169 measure_desc.top = top;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001170 (void)memcpy(measure_desc.rim,
1171 &rd->measurement[RIM_MEASUREMENT_SLOT],
1172 measurement_get_size(rd->algorithm));
Soby Mathewb4c6df42022-11-09 11:13:29 +00001173
1174 /*
1175 * Hashing the measurement descriptor structure; the result is the
1176 * updated RIM.
1177 */
1178 measurement_hash_compute(rd->algorithm,
1179 &measure_desc,
1180 sizeof(measure_desc),
1181 rd->measurement[RIM_MEASUREMENT_SLOT]);
1182}
1183
AlexeiFedorov960d1612023-04-25 13:23:39 +01001184void smc_rtt_init_ripas(unsigned long rd_addr,
1185 unsigned long base,
1186 unsigned long top,
1187 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001188{
1189 struct granule *g_rd, *g_rtt_root;
1190 struct rd *rd;
AlexeiFedorov960d1612023-04-25 13:23:39 +01001191 unsigned long ipa_bits, addr, map_size;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001192 struct rtt_walk wi;
1193 unsigned long s2tte, *s2tt;
AlexeiFedorov960d1612023-04-25 13:23:39 +01001194 long level;
1195 unsigned int index;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001196 int sl;
1197
1198 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
1199 if (g_rd == NULL) {
AlexeiFedorov960d1612023-04-25 13:23:39 +01001200 res->x[0] = RMI_ERROR_INPUT;
1201 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001202 }
1203
1204 rd = granule_map(g_rd, SLOT_RD);
1205
1206 if (get_rd_state_locked(rd) != REALM_STATE_NEW) {
1207 buffer_unmap(rd);
1208 granule_unlock(g_rd);
AlexeiFedorov960d1612023-04-25 13:23:39 +01001209 res->x[0] = RMI_ERROR_REALM;
1210 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001211 }
1212
AlexeiFedorov960d1612023-04-25 13:23:39 +01001213 if (!validate_map_addr(base, RTT_PAGE_LEVEL, rd) ||
1214 !validate_rtt_entry_cmds(top, RTT_PAGE_LEVEL, rd)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001215 buffer_unmap(rd);
1216 granule_unlock(g_rd);
AlexeiFedorov960d1612023-04-25 13:23:39 +01001217 res->x[0] = RMI_ERROR_INPUT;
1218 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001219 }
1220
AlexeiFedorov960d1612023-04-25 13:23:39 +01001221 if (!addr_in_par(rd, base) || !addr_in_par(rd, top)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001222 buffer_unmap(rd);
1223 granule_unlock(g_rd);
AlexeiFedorov960d1612023-04-25 13:23:39 +01001224 res->x[0] = RMI_ERROR_INPUT;
1225 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001226 }
1227
1228 g_rtt_root = rd->s2_ctx.g_rtt;
1229 sl = realm_rtt_starting_level(rd);
1230 ipa_bits = realm_ipa_bits(rd);
1231
1232 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001233
1234 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
AlexeiFedorov960d1612023-04-25 13:23:39 +01001235 base, RTT_PAGE_LEVEL, &wi);
1236 level = wi.last_level;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001237 s2tt = granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov960d1612023-04-25 13:23:39 +01001238 map_size = s2tte_map_size(level);
1239 addr = base & ~(map_size - 1UL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001240
AlexeiFedorov960d1612023-04-25 13:23:39 +01001241 /*
1242 * If the RTTE covers a range below "base", we need to
1243 * go deeper.
1244 */
1245 if (addr != base) {
1246 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1247 (unsigned int)level);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001248 goto out_unmap_llt;
1249 }
1250
AlexeiFedorov960d1612023-04-25 13:23:39 +01001251 for (index = wi.index; index < S2TTES_PER_S2TT;
1252 index++, addr += map_size) {
1253 unsigned long next = addr + map_size;
1254
1255 if (next > top) {
1256 break;
1257 }
1258
1259 s2tte = s2tte_read(&s2tt[index]);
1260 if (s2tte_is_unassigned_empty(s2tte)) {
1261 s2tte = s2tte_create_unassigned_ram();
1262 s2tte_write(&s2tt[index], s2tte);
1263 } else if (!s2tte_is_unassigned_ram(s2tte)) {
1264 break;
1265 }
1266 ripas_granule_measure(rd, addr, next);
1267 }
1268
1269 if (addr > base) {
1270 res->x[0] = RMI_SUCCESS;
1271 res->x[1] = addr;
1272 } else {
1273 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1274 (unsigned int)level);
1275 }
Soby Mathewb4c6df42022-11-09 11:13:29 +00001276
1277out_unmap_llt:
1278 buffer_unmap(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001279 buffer_unmap(rd);
1280 granule_unlock(wi.g_llt);
AlexeiFedorov80295e42023-07-10 13:11:14 +01001281 granule_unlock(g_rd);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001282}
1283
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001284static void rtt_set_ripas_range(struct realm_s2_context *s2_ctx,
1285 unsigned long *s2tt,
1286 unsigned long base,
1287 unsigned long top,
1288 struct rtt_walk *wi,
1289 unsigned long ripas_val,
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001290 enum ripas_change_destroyed change_destroyed,
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001291 struct smc_result *res)
1292{
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001293 unsigned long index = wi->index;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001294 long level = wi->last_level;
1295 unsigned long map_size = s2tte_map_size(level);
1296
1297 /* Align to the RTT level */
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001298 unsigned long addr = base & ~(map_size - 1UL);
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001299
1300 /* Make sure we don't touch a range below the requested range */
1301 if (addr != base) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +01001302 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1303 (unsigned int)level);
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001304 return;
1305 }
1306
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001307 for (index = wi->index; index < S2TTES_PER_S2TT; addr += map_size) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001308 int ret;
1309
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001310 /* If this entry crosses the range, break. */
1311 if (addr + map_size > top) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001312 break;
1313 }
1314
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001315 ret = update_ripas(&s2tt[index++], level,
1316 ripas_val, change_destroyed);
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001317 if (ret < 0) {
1318 break;
1319 }
1320
1321 /* Handle TLBI */
1322 if (ret != 0) {
1323 if (level == RTT_PAGE_LEVEL) {
1324 invalidate_page(s2_ctx, addr);
1325 } else {
1326 invalidate_block(s2_ctx, addr);
1327 }
1328 }
1329 }
1330
1331 if (addr > base) {
1332 res->x[0] = RMI_SUCCESS;
1333 res->x[1] = addr;
1334 } else {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +01001335 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1336 (unsigned int)level);
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001337 }
1338}
1339
1340void smc_rtt_set_ripas(unsigned long rd_addr,
1341 unsigned long rec_addr,
1342 unsigned long base,
1343 unsigned long top,
1344 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001345{
1346 struct granule *g_rd, *g_rec, *g_rtt_root;
1347 struct rec *rec;
1348 struct rd *rd;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001349 unsigned long ipa_bits;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001350 struct rtt_walk wi;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001351 unsigned long *s2tt;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001352 struct realm_s2_context s2_ctx;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001353 enum ripas ripas_val;
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001354 enum ripas_change_destroyed change_destroyed;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001355 int sl;
1356
Soby Mathewb4c6df42022-11-09 11:13:29 +00001357 if (!find_lock_two_granules(rd_addr,
1358 GRANULE_STATE_RD,
1359 &g_rd,
1360 rec_addr,
1361 GRANULE_STATE_REC,
1362 &g_rec)) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001363 res->x[0] = RMI_ERROR_INPUT;
1364 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001365 }
1366
1367 if (granule_refcount_read_acquire(g_rec) != 0UL) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001368 res->x[0] = RMI_ERROR_REC;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001369 goto out_unlock_rec_rd;
1370 }
1371
1372 rec = granule_map(g_rec, SLOT_REC);
1373
1374 if (g_rd != rec->realm_info.g_rd) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001375 res->x[0] = RMI_ERROR_REC;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001376 goto out_unmap_rec;
1377 }
1378
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001379 ripas_val = rec->set_ripas.ripas_val;
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001380 change_destroyed = rec->set_ripas.change_destroyed;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001381
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001382 /*
1383 * Return error in case of target region:
1384 * - is not the next chunk of requested region
1385 * - extends beyond the end of requested region
1386 */
1387 if ((base != rec->set_ripas.addr) || (top > rec->set_ripas.top)) {
1388 res->x[0] = RMI_ERROR_INPUT;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001389 goto out_unmap_rec;
1390 }
1391
1392 rd = granule_map(g_rd, SLOT_RD);
1393
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001394 /*
1395 * At this point, we know base == rec->set_ripas.addr
1396 * and thus must be aligned to GRANULE size.
1397 */
1398 assert(validate_map_addr(base, RTT_PAGE_LEVEL, rd));
Soby Mathewb4c6df42022-11-09 11:13:29 +00001399
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001400 if (!validate_map_addr(top, RTT_PAGE_LEVEL, rd)) {
1401 res->x[0] = RMI_ERROR_INPUT;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001402 goto out_unmap_rd;
1403 }
1404
1405 g_rtt_root = rd->s2_ctx.g_rtt;
1406 sl = realm_rtt_starting_level(rd);
1407 ipa_bits = realm_ipa_bits(rd);
1408 s2_ctx = rd->s2_ctx;
1409
1410 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
1411
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001412 /* Walk to the deepest level possible */
Soby Mathewb4c6df42022-11-09 11:13:29 +00001413 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001414 base, RTT_PAGE_LEVEL, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001415
1416 s2tt = granule_map(wi.g_llt, SLOT_RTT);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001417
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001418 rtt_set_ripas_range(&s2_ctx, s2tt, base, top, &wi,
1419 ripas_val, change_destroyed, res);
1420
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001421 if (res->x[0] == RMI_SUCCESS) {
1422 rec->set_ripas.addr = res->x[1];
Soby Mathewb4c6df42022-11-09 11:13:29 +00001423 }
1424
Soby Mathewb4c6df42022-11-09 11:13:29 +00001425 buffer_unmap(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001426 granule_unlock(wi.g_llt);
1427out_unmap_rd:
1428 buffer_unmap(rd);
1429out_unmap_rec:
1430 buffer_unmap(rec);
1431out_unlock_rec_rd:
1432 granule_unlock(g_rec);
1433 granule_unlock(g_rd);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001434}