blob: 915b99886b999051021b343b4089c537ba14be01 [file] [log] [blame]
Soby Mathewb4c6df42022-11-09 11:13:29 +00001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
5 */
6
7#include <buffer.h>
8#include <granule.h>
9#include <measurement.h>
10#include <realm.h>
11#include <ripas.h>
12#include <smc-handler.h>
13#include <smc-rmi.h>
14#include <smc.h>
15#include <stddef.h>
16#include <string.h>
17#include <table.h>
18
19/*
20 * Validate the map_addr value passed to RMI_RTT_* and RMI_DATA_* commands.
21 */
22static bool validate_map_addr(unsigned long map_addr,
23 unsigned long level,
24 struct rd *rd)
25{
26
27 if (map_addr >= realm_ipa_size(rd)) {
28 return false;
29 }
30 if (!addr_is_level_aligned(map_addr, level)) {
31 return false;
32 }
33 return true;
34}
35
36/*
37 * Structure commands can operate on all RTTs except for the root RTT so
38 * the minimal valid level is the stage 2 starting level + 1.
39 */
40static bool validate_rtt_structure_cmds(unsigned long map_addr,
41 long level,
42 struct rd *rd)
43{
44 int min_level = realm_rtt_starting_level(rd) + 1;
45
46 if ((level < min_level) || (level > RTT_PAGE_LEVEL)) {
47 return false;
48 }
49 return validate_map_addr(map_addr, level, rd);
50}
51
52/*
53 * Map/Unmap commands can operate up to a level 2 block entry so min_level is
54 * the smallest block size.
55 */
56static bool validate_rtt_map_cmds(unsigned long map_addr,
57 long level,
58 struct rd *rd)
59{
60 if ((level < RTT_MIN_BLOCK_LEVEL) || (level > RTT_PAGE_LEVEL)) {
61 return false;
62 }
63 return validate_map_addr(map_addr, level, rd);
64}
65
66/*
67 * Entry commands can operate on any entry so the minimal valid level is the
68 * stage 2 starting level.
69 */
70static bool validate_rtt_entry_cmds(unsigned long map_addr,
71 long level,
72 struct rd *rd)
73{
74 if ((level < realm_rtt_starting_level(rd)) ||
75 (level > RTT_PAGE_LEVEL)) {
76 return false;
77 }
78 return validate_map_addr(map_addr, level, rd);
79}
80
AlexeiFedorovac923c82023-04-06 15:12:04 +010081unsigned long smc_rtt_create(unsigned long rd_addr,
82 unsigned long rtt_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +000083 unsigned long map_addr,
84 unsigned long ulevel)
85{
86 struct granule *g_rd;
87 struct granule *g_tbl;
88 struct rd *rd;
89 struct granule *g_table_root;
90 struct rtt_walk wi;
91 unsigned long *s2tt, *parent_s2tt, parent_s2tte;
92 long level = (long)ulevel;
93 unsigned long ipa_bits;
94 unsigned long ret;
95 struct realm_s2_context s2_ctx;
96 int sl;
97
98 if (!find_lock_two_granules(rtt_addr,
99 GRANULE_STATE_DELEGATED,
100 &g_tbl,
101 rd_addr,
102 GRANULE_STATE_RD,
103 &g_rd)) {
104 return RMI_ERROR_INPUT;
105 }
106
107 rd = granule_map(g_rd, SLOT_RD);
108
109 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
110 buffer_unmap(rd);
111 granule_unlock(g_rd);
112 granule_unlock(g_tbl);
113 return RMI_ERROR_INPUT;
114 }
115
116 g_table_root = rd->s2_ctx.g_rtt;
117 sl = realm_rtt_starting_level(rd);
118 ipa_bits = realm_ipa_bits(rd);
119 s2_ctx = rd->s2_ctx;
120 buffer_unmap(rd);
121
122 /*
123 * Lock the RTT root. Enforcing locking order RD->RTT is enough to
124 * ensure deadlock free locking guarentee.
125 */
126 granule_lock(g_table_root, GRANULE_STATE_RTT);
127
128 /* Unlock RD after locking RTT Root */
129 granule_unlock(g_rd);
130
131 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
132 map_addr, level - 1L, &wi);
133 if (wi.last_level != level - 1L) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100134 ret = pack_return_code(RMI_ERROR_RTT,
135 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000136 goto out_unlock_llt;
137 }
138
139 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
140 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
141 s2tt = granule_map(g_tbl, SLOT_DELEGATED);
142
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100143 if (s2tte_is_unassigned_empty(parent_s2tte)) {
144 s2tt_init_unassigned_empty(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000145
146 /*
147 * Increase the refcount of the parent, the granule was
148 * locked while table walking and hand-over-hand locking.
149 * Atomicity and acquire/release semantics not required because
150 * the table is accessed always locked.
151 */
152 __granule_get(wi.g_llt);
153
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100154 } else if (s2tte_is_unassigned_ram(parent_s2tte)) {
155 s2tt_init_unassigned_ram(s2tt);
156 __granule_get(wi.g_llt);
157
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100158 } else if (s2tte_is_unassigned_ns(parent_s2tte)) {
159 s2tt_init_unassigned_ns(s2tt);
160 __granule_get(wi.g_llt);
161
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100162 } else if (s2tte_is_unassigned_destroyed(parent_s2tte)) {
163 s2tt_init_unassigned_destroyed(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000164 __granule_get(wi.g_llt);
165
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100166 } else if (s2tte_is_assigned_destroyed(parent_s2tte, level - 1L)) {
167 unsigned long block_pa;
168
169 /*
170 * We should observe parent assigned s2tte only when
171 * we create tables above this level.
172 */
173 assert(level > RTT_MIN_BLOCK_LEVEL);
174
175 block_pa = s2tte_pa(parent_s2tte, level - 1L);
176
177 s2tt_init_assigned_destroyed(s2tt, block_pa, level);
178
179 /*
180 * Increase the refcount to mark the granule as in-use. refcount
181 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
182 */
183 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
184
AlexeiFedorov3a739332023-04-13 13:54:04 +0100185 } else if (s2tte_is_assigned_empty(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000186 unsigned long block_pa;
187
188 /*
189 * We should observe parent assigned s2tte only when
190 * we create tables above this level.
191 */
192 assert(level > RTT_MIN_BLOCK_LEVEL);
193
194 block_pa = s2tte_pa(parent_s2tte, level - 1L);
195
196 s2tt_init_assigned_empty(s2tt, block_pa, level);
197
198 /*
199 * Increase the refcount to mark the granule as in-use. refcount
200 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
201 */
202 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
203
AlexeiFedorov3a739332023-04-13 13:54:04 +0100204 } else if (s2tte_is_assigned_ram(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000205 unsigned long block_pa;
206
207 /*
208 * We should observe parent valid s2tte only when
209 * we create tables above this level.
210 */
211 assert(level > RTT_MIN_BLOCK_LEVEL);
212
213 /*
214 * Break before make. This may cause spurious S2 aborts.
215 */
216 s2tte_write(&parent_s2tt[wi.index], 0UL);
217 invalidate_block(&s2_ctx, map_addr);
218
219 block_pa = s2tte_pa(parent_s2tte, level - 1L);
220
AlexeiFedorov3a739332023-04-13 13:54:04 +0100221 s2tt_init_assigned_ram(s2tt, block_pa, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000222
223 /*
224 * Increase the refcount to mark the granule as in-use. refcount
225 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
226 */
227 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
228
AlexeiFedorov3a739332023-04-13 13:54:04 +0100229 } else if (s2tte_is_assigned_ns(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000230 unsigned long block_pa;
231
232 /*
AlexeiFedorov3a739332023-04-13 13:54:04 +0100233 * We should observe parent assigned_ns s2tte only when
Soby Mathewb4c6df42022-11-09 11:13:29 +0000234 * we create tables above this level.
235 */
236 assert(level > RTT_MIN_BLOCK_LEVEL);
237
238 /*
239 * Break before make. This may cause spurious S2 aborts.
240 */
241 s2tte_write(&parent_s2tt[wi.index], 0UL);
242 invalidate_block(&s2_ctx, map_addr);
243
244 block_pa = s2tte_pa(parent_s2tte, level - 1L);
245
AlexeiFedorov3a739332023-04-13 13:54:04 +0100246 s2tt_init_assigned_ns(s2tt, block_pa, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000247
248 /*
249 * Increase the refcount to mark the granule as in-use. refcount
250 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
251 */
252 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
253
254 } else if (s2tte_is_table(parent_s2tte, level - 1L)) {
255 ret = pack_return_code(RMI_ERROR_RTT,
256 (unsigned int)(level - 1L));
257 goto out_unmap_table;
258
259 } else {
260 assert(false);
261 }
262
263 ret = RMI_SUCCESS;
264
265 granule_set_state(g_tbl, GRANULE_STATE_RTT);
266
267 parent_s2tte = s2tte_create_table(rtt_addr, level - 1L);
268 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
269
270out_unmap_table:
271 buffer_unmap(s2tt);
272 buffer_unmap(parent_s2tt);
273out_unlock_llt:
274 granule_unlock(wi.g_llt);
275 granule_unlock(g_tbl);
276 return ret;
277}
278
AlexeiFedorove2002be2023-04-19 17:20:12 +0100279void smc_rtt_fold(unsigned long rd_addr,
280 unsigned long map_addr,
281 unsigned long ulevel,
282 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000283{
284 struct granule *g_rd;
285 struct granule *g_tbl;
286 struct rd *rd;
287 struct granule *g_table_root;
288 struct rtt_walk wi;
289 unsigned long *table, *parent_s2tt, parent_s2tte;
290 long level = (long)ulevel;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100291 unsigned long ipa_bits, rtt_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000292 unsigned long ret;
293 struct realm_s2_context s2_ctx;
294 int sl;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000295
296 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
297 if (g_rd == NULL) {
AlexeiFedorove2002be2023-04-19 17:20:12 +0100298 res->x[0] = RMI_ERROR_INPUT;
299 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000300 }
301
302 rd = granule_map(g_rd, SLOT_RD);
303
304 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
305 buffer_unmap(rd);
306 granule_unlock(g_rd);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100307 res->x[0] = RMI_ERROR_INPUT;
308 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000309 }
310
311 g_table_root = rd->s2_ctx.g_rtt;
312 sl = realm_rtt_starting_level(rd);
313 ipa_bits = realm_ipa_bits(rd);
314 s2_ctx = rd->s2_ctx;
315 buffer_unmap(rd);
316 granule_lock(g_table_root, GRANULE_STATE_RTT);
317 granule_unlock(g_rd);
318
319 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
320 map_addr, level - 1L, &wi);
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100321 if (wi.last_level != level - 1L) {
322 ret = pack_return_code(RMI_ERROR_RTT,
323 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000324 goto out_unlock_parent_table;
325 }
326
327 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
328 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
329 if (!s2tte_is_table(parent_s2tte, level - 1L)) {
330 ret = pack_return_code(RMI_ERROR_RTT,
331 (unsigned int)(level - 1L));
332 goto out_unmap_parent_table;
333 }
334
AlexeiFedorove2002be2023-04-19 17:20:12 +0100335 rtt_addr = s2tte_pa_table(parent_s2tte, level - 1L);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000336 g_tbl = find_lock_granule(rtt_addr, GRANULE_STATE_RTT);
337
338 /*
339 * A table descriptor S2TTE always points to a TABLE granule.
340 */
AlexeiFedorov63b71692023-04-19 11:18:42 +0100341 assert(g_tbl != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000342
343 table = granule_map(g_tbl, SLOT_RTT2);
344
345 /*
346 * The command can succeed only if all 512 S2TTEs are of the same type.
347 * We first check the table's ref. counter to speed up the case when
348 * the host makes a guess whether a memory region can be folded.
349 */
350 if (g_tbl->refcount == 0UL) {
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100351 if (table_is_unassigned_destroyed_block(table)) {
352 parent_s2tte = s2tte_create_unassigned_destroyed();
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100353 } else if (table_is_unassigned_empty_block(table)) {
354 parent_s2tte = s2tte_create_unassigned_empty();
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100355 } else if (table_is_unassigned_ram_block(table)) {
356 parent_s2tte = s2tte_create_unassigned_ram();
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100357 } else if (table_is_unassigned_ns_block(table)) {
358 parent_s2tte = s2tte_create_unassigned_ns();
AlexeiFedorov49752c62023-04-24 14:31:14 +0100359 } else if (table_maps_assigned_ns_block(table, level)) {
360 unsigned long s2tte = s2tte_read(&table[0]);
361 unsigned long block_pa = s2tte_pa(s2tte, level);
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100362
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100363 parent_s2tte = s2tte_create_assigned_ns(block_pa,
364 level - 1L);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000365 } else {
366 /*
367 * The table holds a mixture of destroyed and
368 * unassigned entries.
369 */
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100370 ret = pack_return_code(RMI_ERROR_RTT,
371 (unsigned int)level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000372 goto out_unmap_table;
373 }
AlexeiFedorov49752c62023-04-24 14:31:14 +0100374 __granule_put(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000375 } else if (g_tbl->refcount == S2TTES_PER_S2TT) {
376
377 unsigned long s2tte, block_pa;
378
379 /* The RMM specification does not allow creating block
380 * entries less than RTT_MIN_BLOCK_LEVEL even though
381 * permitted by the Arm Architecture.
382 * Hence ensure that the table being folded is at a level
383 * higher than the RTT_MIN_BLOCK_LEVEL.
384 *
385 * A fully populated table cannot be destroyed if that
386 * would create a block mapping below RTT_MIN_BLOCK_LEVEL.
387 */
388 if (level <= RTT_MIN_BLOCK_LEVEL) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100389 ret = pack_return_code(RMI_ERROR_RTT,
390 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000391 goto out_unmap_table;
392 }
393
394 s2tte = s2tte_read(&table[0]);
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000395 block_pa = s2tte_pa(s2tte, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000396
397 /*
AlexeiFedorov3a739332023-04-13 13:54:04 +0100398 * The table must also refer to a contiguous block through the
AlexeiFedorov3f840a02023-07-19 10:55:05 +0100399 * same type of s2tte, either Assigned or Valid.
Soby Mathewb4c6df42022-11-09 11:13:29 +0000400 */
AlexeiFedorov3a739332023-04-13 13:54:04 +0100401 if (table_maps_assigned_empty_block(table, level)) {
402 parent_s2tte = s2tte_create_assigned_empty(block_pa,
403 level - 1L);
404 } else if (table_maps_assigned_ram_block(table, level)) {
405 parent_s2tte = s2tte_create_assigned_ram(block_pa,
406 level - 1L);
AlexeiFedorov3f840a02023-07-19 10:55:05 +0100407 } else if (table_maps_assigned_destroyed_block(table, level)) {
408 parent_s2tte = s2tte_create_assigned_destroyed(block_pa,
409 level - 1L);
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000410 /* The table contains mixed entries that cannot be folded */
Soby Mathewb4c6df42022-11-09 11:13:29 +0000411 } else {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100412 ret = pack_return_code(RMI_ERROR_RTT,
413 (unsigned int)level);
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000414 goto out_unmap_table;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000415 }
416
417 __granule_refcount_dec(g_tbl, S2TTES_PER_S2TT);
418 } else {
419 /*
420 * The table holds a mixture of different types of s2ttes.
421 */
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100422 ret = pack_return_code(RMI_ERROR_RTT,
423 (unsigned int)level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000424 goto out_unmap_table;
425 }
426
427 ret = RMI_SUCCESS;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100428 res->x[1] = rtt_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000429
430 /*
431 * Break before make.
432 */
433 s2tte_write(&parent_s2tt[wi.index], 0UL);
434
AlexeiFedorov3a739332023-04-13 13:54:04 +0100435 if (s2tte_is_assigned_ram(parent_s2tte, level - 1L) ||
436 s2tte_is_assigned_ns(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000437 invalidate_pages_in_block(&s2_ctx, map_addr);
438 } else {
439 invalidate_block(&s2_ctx, map_addr);
440 }
441
442 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
443
444 granule_memzero_mapped(table);
445 granule_set_state(g_tbl, GRANULE_STATE_DELEGATED);
446
447out_unmap_table:
448 buffer_unmap(table);
449 granule_unlock(g_tbl);
450out_unmap_parent_table:
451 buffer_unmap(parent_s2tt);
452out_unlock_parent_table:
453 granule_unlock(wi.g_llt);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100454 res->x[0] = ret;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000455}
456
AlexeiFedorove2002be2023-04-19 17:20:12 +0100457void smc_rtt_destroy(unsigned long rd_addr,
458 unsigned long map_addr,
459 unsigned long ulevel,
460 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000461{
462 struct granule *g_rd;
463 struct granule *g_tbl;
464 struct rd *rd;
465 struct granule *g_table_root;
466 struct rtt_walk wi;
467 unsigned long *table, *parent_s2tt, parent_s2tte;
468 long level = (long)ulevel;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100469 unsigned long ipa_bits, rtt_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000470 unsigned long ret;
471 struct realm_s2_context s2_ctx;
472 int sl;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100473 bool in_par, skip_non_live = false;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000474
475 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
476 if (g_rd == NULL) {
AlexeiFedorove2002be2023-04-19 17:20:12 +0100477 res->x[0] = RMI_ERROR_INPUT;
AlexeiFedorov3ebd4622023-07-18 16:27:39 +0100478 res->x[2] = 0UL;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100479 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000480 }
481
482 rd = granule_map(g_rd, SLOT_RD);
483
484 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
485 buffer_unmap(rd);
486 granule_unlock(g_rd);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100487 res->x[0] = RMI_ERROR_INPUT;
AlexeiFedorov3ebd4622023-07-18 16:27:39 +0100488 res->x[2] = 0UL;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100489 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000490 }
491
492 g_table_root = rd->s2_ctx.g_rtt;
493 sl = realm_rtt_starting_level(rd);
494 ipa_bits = realm_ipa_bits(rd);
495 s2_ctx = rd->s2_ctx;
496 in_par = addr_in_par(rd, map_addr);
497 buffer_unmap(rd);
498 granule_lock(g_table_root, GRANULE_STATE_RTT);
499 granule_unlock(g_rd);
500
501 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
502 map_addr, level - 1L, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000503
504 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
505 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100506
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100507 if ((wi.last_level != level - 1L) ||
508 !s2tte_is_table(parent_s2tte, level - 1L)) {
509 ret = pack_return_code(RMI_ERROR_RTT,
510 (unsigned int)wi.last_level);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100511 skip_non_live = true;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000512 goto out_unmap_parent_table;
513 }
514
AlexeiFedorove2002be2023-04-19 17:20:12 +0100515 rtt_addr = s2tte_pa_table(parent_s2tte, level - 1L);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000516
517 /*
518 * Lock the RTT granule. The 'rtt_addr' is verified, thus can be treated
519 * as an internal granule.
520 */
521 g_tbl = find_lock_granule(rtt_addr, GRANULE_STATE_RTT);
522
523 /*
524 * A table descriptor S2TTE always points to a TABLE granule.
525 */
526 assert(g_tbl != NULL);
527
528 /*
529 * Read the refcount value. RTT granule is always accessed locked, thus
530 * the refcount can be accessed without atomic operations.
531 */
532 if (g_tbl->refcount != 0UL) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100533 ret = pack_return_code(RMI_ERROR_RTT, (unsigned int)level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000534 goto out_unlock_table;
535 }
536
537 ret = RMI_SUCCESS;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100538 res->x[1] = rtt_addr;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100539 skip_non_live = true;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000540
541 table = granule_map(g_tbl, SLOT_RTT2);
542
543 if (in_par) {
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100544 parent_s2tte = s2tte_create_unassigned_destroyed();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000545 } else {
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100546 parent_s2tte = s2tte_create_unassigned_ns();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000547 }
548
549 __granule_put(wi.g_llt);
550
551 /*
552 * Break before make. Note that this may cause spurious S2 aborts.
553 */
554 s2tte_write(&parent_s2tt[wi.index], 0UL);
555 invalidate_block(&s2_ctx, map_addr);
556 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
557
558 granule_memzero_mapped(table);
559 granule_set_state(g_tbl, GRANULE_STATE_DELEGATED);
560
561 buffer_unmap(table);
562out_unlock_table:
563 granule_unlock(g_tbl);
564out_unmap_parent_table:
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100565 if (skip_non_live) {
566 res->x[2] = skip_non_live_entries(map_addr, parent_s2tt, &wi);
AlexeiFedorov3ebd4622023-07-18 16:27:39 +0100567 } else {
568 res->x[2] = map_addr;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100569 }
Soby Mathewb4c6df42022-11-09 11:13:29 +0000570 buffer_unmap(parent_s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000571 granule_unlock(wi.g_llt);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100572 res->x[0] = ret;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000573}
574
575enum map_unmap_ns_op {
576 MAP_NS,
577 UNMAP_NS
578};
579
580/*
581 * We don't hold a reference on the NS granule when it is
582 * mapped into a realm. Instead we rely on the guarantees
583 * provided by the architecture to ensure that a NS access
584 * to a protected granule is prohibited even within the realm.
585 */
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100586static void map_unmap_ns(unsigned long rd_addr,
587 unsigned long map_addr,
588 long level,
589 unsigned long host_s2tte,
590 enum map_unmap_ns_op op,
591 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000592{
593 struct granule *g_rd;
594 struct rd *rd;
595 struct granule *g_table_root;
596 unsigned long *s2tt, s2tte;
597 struct rtt_walk wi;
598 unsigned long ipa_bits;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000599 struct realm_s2_context s2_ctx;
600 int sl;
601
602 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
603 if (g_rd == NULL) {
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100604 res->x[0] = RMI_ERROR_INPUT;
605 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000606 }
607
608 rd = granule_map(g_rd, SLOT_RD);
609
610 if (!validate_rtt_map_cmds(map_addr, level, rd)) {
611 buffer_unmap(rd);
612 granule_unlock(g_rd);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100613 res->x[0] = RMI_ERROR_INPUT;
614 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000615 }
616
617 g_table_root = rd->s2_ctx.g_rtt;
618 sl = realm_rtt_starting_level(rd);
619 ipa_bits = realm_ipa_bits(rd);
620
AlexeiFedorovc34e3242023-04-12 11:30:33 +0100621 /* Check if map_addr is outside PAR */
622 if (addr_in_par(rd, map_addr)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000623 buffer_unmap(rd);
624 granule_unlock(g_rd);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100625 res->x[0] = RMI_ERROR_INPUT;
626 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000627 }
628
629 s2_ctx = rd->s2_ctx;
630 buffer_unmap(rd);
631
632 granule_lock(g_table_root, GRANULE_STATE_RTT);
633 granule_unlock(g_rd);
634
635 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
636 map_addr, level, &wi);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100637
638 /*
639 * For UNMAP_NS, we need to map the table and look
640 * for the end of the non-live region.
641 */
642 if (op == MAP_NS && wi.last_level != level) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100643 res->x[0] = pack_return_code(RMI_ERROR_RTT,
644 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000645 goto out_unlock_llt;
646 }
647
648 s2tt = granule_map(wi.g_llt, SLOT_RTT);
649 s2tte = s2tte_read(&s2tt[wi.index]);
650
651 if (op == MAP_NS) {
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100652 if (!s2tte_is_unassigned_ns(s2tte)) {
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100653 res->x[0] = pack_return_code(RMI_ERROR_RTT,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000654 (unsigned int)level);
655 goto out_unmap_table;
656 }
657
AlexeiFedorov3a739332023-04-13 13:54:04 +0100658 s2tte = s2tte_create_assigned_ns(host_s2tte, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000659 s2tte_write(&s2tt[wi.index], s2tte);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000660
661 } else if (op == UNMAP_NS) {
662 /*
663 * The following check also verifies that map_addr is outside
664 * PAR, as valid_NS s2tte may only cover outside PAR IPA range.
665 */
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100666 bool assigned_ns = s2tte_is_assigned_ns(s2tte, wi.last_level);
667
668 if ((wi.last_level != level) || !assigned_ns) {
669 res->x[0] = pack_return_code(RMI_ERROR_RTT,
670 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000671 goto out_unmap_table;
672 }
673
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100674 s2tte = s2tte_create_unassigned_ns();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000675 s2tte_write(&s2tt[wi.index], s2tte);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000676 if (level == RTT_PAGE_LEVEL) {
677 invalidate_page(&s2_ctx, map_addr);
678 } else {
679 invalidate_block(&s2_ctx, map_addr);
680 }
681 }
682
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100683 res->x[0] = RMI_SUCCESS;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000684
685out_unmap_table:
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100686 if (op == UNMAP_NS) {
687 res->x[1] = skip_non_live_entries(map_addr, s2tt, &wi);
688 }
Soby Mathewb4c6df42022-11-09 11:13:29 +0000689 buffer_unmap(s2tt);
690out_unlock_llt:
691 granule_unlock(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000692}
693
694unsigned long smc_rtt_map_unprotected(unsigned long rd_addr,
695 unsigned long map_addr,
696 unsigned long ulevel,
697 unsigned long s2tte)
698{
699 long level = (long)ulevel;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100700 struct smc_result res;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000701
702 if (!host_ns_s2tte_is_valid(s2tte, level)) {
703 return RMI_ERROR_INPUT;
704 }
705
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100706 map_unmap_ns(rd_addr, map_addr, level, s2tte, MAP_NS, &res);
707 return res.x[0];
Soby Mathewb4c6df42022-11-09 11:13:29 +0000708}
709
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100710void smc_rtt_unmap_unprotected(unsigned long rd_addr,
711 unsigned long map_addr,
712 unsigned long ulevel,
713 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000714{
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100715 return map_unmap_ns(rd_addr, map_addr, (long)ulevel, 0UL, UNMAP_NS, res);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000716}
717
718void smc_rtt_read_entry(unsigned long rd_addr,
719 unsigned long map_addr,
720 unsigned long ulevel,
721 struct smc_result *ret)
722{
723 struct granule *g_rd, *g_rtt_root;
724 struct rd *rd;
725 struct rtt_walk wi;
726 unsigned long *s2tt, s2tte;
727 unsigned long ipa_bits;
728 long level = (long)ulevel;
729 int sl;
730
731 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
732 if (g_rd == NULL) {
733 ret->x[0] = RMI_ERROR_INPUT;
734 return;
735 }
736
737 rd = granule_map(g_rd, SLOT_RD);
738
739 if (!validate_rtt_entry_cmds(map_addr, level, rd)) {
740 buffer_unmap(rd);
741 granule_unlock(g_rd);
742 ret->x[0] = RMI_ERROR_INPUT;
743 return;
744 }
745
746 g_rtt_root = rd->s2_ctx.g_rtt;
747 sl = realm_rtt_starting_level(rd);
748 ipa_bits = realm_ipa_bits(rd);
749 buffer_unmap(rd);
750
751 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
752 granule_unlock(g_rd);
753
754 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
755 map_addr, level, &wi);
756 s2tt = granule_map(wi.g_llt, SLOT_RTT);
757 s2tte = s2tte_read(&s2tt[wi.index]);
758 ret->x[1] = wi.last_level;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000759
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100760 if (s2tte_is_unassigned_empty(s2tte)) {
Yousuf A3daed822022-10-13 16:06:00 +0100761 ret->x[2] = RMI_UNASSIGNED;
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100762 ret->x[3] = 0UL;
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100763 ret->x[4] = RIPAS_EMPTY;
764 } else if (s2tte_is_unassigned_ram(s2tte)) {
765 ret->x[2] = RMI_UNASSIGNED;
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100766 ret->x[3] = 0UL;
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100767 ret->x[4] = RIPAS_RAM;
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100768 } else if (s2tte_is_unassigned_destroyed(s2tte)) {
769 ret->x[2] = RMI_UNASSIGNED;
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100770 ret->x[3] = 0UL;
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100771 ret->x[4] = RIPAS_DESTROYED;
AlexeiFedorov3a739332023-04-13 13:54:04 +0100772 } else if (s2tte_is_assigned_empty(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100773 ret->x[2] = RMI_ASSIGNED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000774 ret->x[3] = s2tte_pa(s2tte, wi.last_level);
Yousuf A62808152022-10-31 10:35:42 +0000775 ret->x[4] = RIPAS_EMPTY;
AlexeiFedorov3a739332023-04-13 13:54:04 +0100776 } else if (s2tte_is_assigned_ram(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100777 ret->x[2] = RMI_ASSIGNED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000778 ret->x[3] = s2tte_pa(s2tte, wi.last_level);
Yousuf A62808152022-10-31 10:35:42 +0000779 ret->x[4] = RIPAS_RAM;
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100780 } else if (s2tte_is_assigned_destroyed(s2tte, wi.last_level)) {
781 ret->x[2] = RMI_ASSIGNED;
782 ret->x[3] = 0UL;
783 ret->x[4] = RIPAS_DESTROYED;
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100784 } else if (s2tte_is_unassigned_ns(s2tte)) {
785 ret->x[2] = RMI_UNASSIGNED;
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100786 ret->x[3] = 0UL;
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100787 ret->x[4] = RIPAS_EMPTY;
AlexeiFedorov3a739332023-04-13 13:54:04 +0100788 } else if (s2tte_is_assigned_ns(s2tte, wi.last_level)) {
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100789 ret->x[2] = RMI_ASSIGNED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000790 ret->x[3] = host_ns_s2tte(s2tte, wi.last_level);
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100791 ret->x[4] = RIPAS_EMPTY;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000792 } else if (s2tte_is_table(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100793 ret->x[2] = RMI_TABLE;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000794 ret->x[3] = s2tte_pa_table(s2tte, wi.last_level);
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100795 ret->x[4] = RIPAS_EMPTY;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000796 } else {
797 assert(false);
798 }
799
800 buffer_unmap(s2tt);
801 granule_unlock(wi.g_llt);
802
803 ret->x[0] = RMI_SUCCESS;
804}
805
806static void data_granule_measure(struct rd *rd, void *data,
807 unsigned long ipa,
808 unsigned long flags)
809{
810 struct measurement_desc_data measure_desc = {0};
811
812 /* Initialize the measurement descriptior structure */
813 measure_desc.desc_type = MEASURE_DESC_TYPE_DATA;
814 measure_desc.len = sizeof(struct measurement_desc_data);
815 measure_desc.ipa = ipa;
816 measure_desc.flags = flags;
817 memcpy(measure_desc.rim,
818 &rd->measurement[RIM_MEASUREMENT_SLOT],
819 measurement_get_size(rd->algorithm));
820
821 if (flags == RMI_MEASURE_CONTENT) {
822 /*
823 * Hashing the data granules and store the result in the
824 * measurement descriptor structure.
825 */
826 measurement_hash_compute(rd->algorithm,
827 data,
828 GRANULE_SIZE,
829 measure_desc.content);
830 }
831
832 /*
833 * Hashing the measurement descriptor structure; the result is the
834 * updated RIM.
835 */
836 measurement_hash_compute(rd->algorithm,
837 &measure_desc,
838 sizeof(measure_desc),
839 rd->measurement[RIM_MEASUREMENT_SLOT]);
840}
841
842static unsigned long validate_data_create_unknown(unsigned long map_addr,
843 struct rd *rd)
844{
845 if (!addr_in_par(rd, map_addr)) {
846 return RMI_ERROR_INPUT;
847 }
848
849 if (!validate_map_addr(map_addr, RTT_PAGE_LEVEL, rd)) {
850 return RMI_ERROR_INPUT;
851 }
852
853 return RMI_SUCCESS;
854}
855
856static unsigned long validate_data_create(unsigned long map_addr,
857 struct rd *rd)
858{
859 if (get_rd_state_locked(rd) != REALM_STATE_NEW) {
860 return RMI_ERROR_REALM;
861 }
862
863 return validate_data_create_unknown(map_addr, rd);
864}
865
866/*
AlexeiFedorov0f9cd1f2023-07-10 17:04:58 +0100867 * Implements both RMI_DATA_CREATE and RMI_DATA_CREATE_UNKNOWN
Soby Mathewb4c6df42022-11-09 11:13:29 +0000868 *
AlexeiFedorov0f9cd1f2023-07-10 17:04:58 +0100869 * if @g_src == NULL, implements RMI_DATA_CREATE_UNKNOWN
870 * and RMI_DATA_CREATE otherwise.
Soby Mathewb4c6df42022-11-09 11:13:29 +0000871 */
AlexeiFedorovac923c82023-04-06 15:12:04 +0100872static unsigned long data_create(unsigned long rd_addr,
873 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000874 unsigned long map_addr,
875 struct granule *g_src,
876 unsigned long flags)
877{
878 struct granule *g_data;
879 struct granule *g_rd;
880 struct granule *g_table_root;
881 struct rd *rd;
882 struct rtt_walk wi;
883 unsigned long s2tte, *s2tt;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000884 enum granule_state new_data_state = GRANULE_STATE_DELEGATED;
885 unsigned long ipa_bits;
886 unsigned long ret;
887 int __unused meas_ret;
888 int sl;
889
890 if (!find_lock_two_granules(data_addr,
891 GRANULE_STATE_DELEGATED,
892 &g_data,
893 rd_addr,
894 GRANULE_STATE_RD,
895 &g_rd)) {
896 return RMI_ERROR_INPUT;
897 }
898
899 rd = granule_map(g_rd, SLOT_RD);
900
901 ret = (g_src != NULL) ?
902 validate_data_create(map_addr, rd) :
903 validate_data_create_unknown(map_addr, rd);
904
905 if (ret != RMI_SUCCESS) {
906 goto out_unmap_rd;
907 }
908
909 g_table_root = rd->s2_ctx.g_rtt;
910 sl = realm_rtt_starting_level(rd);
911 ipa_bits = realm_ipa_bits(rd);
912 granule_lock(g_table_root, GRANULE_STATE_RTT);
913 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
914 map_addr, RTT_PAGE_LEVEL, &wi);
915 if (wi.last_level != RTT_PAGE_LEVEL) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100916 ret = pack_return_code(RMI_ERROR_RTT,
917 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000918 goto out_unlock_ll_table;
919 }
920
921 s2tt = granule_map(wi.g_llt, SLOT_RTT);
922 s2tte = s2tte_read(&s2tt[wi.index]);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000923
Soby Mathewb4c6df42022-11-09 11:13:29 +0000924 if (g_src != NULL) {
925 bool ns_access_ok;
AlexeiFedorov0f9cd1f2023-07-10 17:04:58 +0100926 void *data;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000927
AlexeiFedorov0f9cd1f2023-07-10 17:04:58 +0100928 if (!s2tte_is_unassigned_ram(s2tte)) {
929 ret = pack_return_code(RMI_ERROR_RTT, RTT_PAGE_LEVEL);
930 goto out_unmap_ll_table;
931 }
932
933 data = granule_map(g_data, SLOT_DELEGATED);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000934 ns_access_ok = ns_buffer_read(SLOT_NS, g_src, 0U,
935 GRANULE_SIZE, data);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000936 if (!ns_access_ok) {
937 /*
938 * Some data may be copied before the failure. Zero
939 * g_data granule as it will remain in delegated state.
940 */
941 (void)memset(data, 0, GRANULE_SIZE);
942 buffer_unmap(data);
943 ret = RMI_ERROR_INPUT;
944 goto out_unmap_ll_table;
945 }
946
Soby Mathewb4c6df42022-11-09 11:13:29 +0000947 data_granule_measure(rd, data, map_addr, flags);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000948 buffer_unmap(data);
AlexeiFedorov0f9cd1f2023-07-10 17:04:58 +0100949
950 } else if (!s2tte_is_unassigned(s2tte)) {
951 ret = pack_return_code(RMI_ERROR_RTT, RTT_PAGE_LEVEL);
952 goto out_unmap_ll_table;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000953 }
954
955 new_data_state = GRANULE_STATE_DATA;
956
AlexeiFedorovcde1fdc2023-04-18 16:37:25 +0100957 s2tte = s2tte_create_assigned_ram(data_addr, RTT_PAGE_LEVEL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000958
959 s2tte_write(&s2tt[wi.index], s2tte);
960 __granule_get(wi.g_llt);
961
962 ret = RMI_SUCCESS;
963
964out_unmap_ll_table:
965 buffer_unmap(s2tt);
966out_unlock_ll_table:
967 granule_unlock(wi.g_llt);
968out_unmap_rd:
969 buffer_unmap(rd);
970 granule_unlock(g_rd);
971 granule_unlock_transition(g_data, new_data_state);
972 return ret;
973}
974
AlexeiFedorovac923c82023-04-06 15:12:04 +0100975unsigned long smc_data_create(unsigned long rd_addr,
976 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000977 unsigned long map_addr,
978 unsigned long src_addr,
979 unsigned long flags)
980{
981 struct granule *g_src;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000982
983 if (flags != RMI_NO_MEASURE_CONTENT && flags != RMI_MEASURE_CONTENT) {
984 return RMI_ERROR_INPUT;
985 }
986
987 g_src = find_granule(src_addr);
988 if ((g_src == NULL) || (g_src->state != GRANULE_STATE_NS)) {
989 return RMI_ERROR_INPUT;
990 }
991
AlexeiFedorovac923c82023-04-06 15:12:04 +0100992 return data_create(rd_addr, data_addr, map_addr, g_src, flags);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000993}
994
AlexeiFedorovac923c82023-04-06 15:12:04 +0100995unsigned long smc_data_create_unknown(unsigned long rd_addr,
996 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000997 unsigned long map_addr)
998{
AlexeiFedorovac923c82023-04-06 15:12:04 +0100999 return data_create(rd_addr, data_addr, map_addr, NULL, 0);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001000}
1001
AlexeiFedorove2002be2023-04-19 17:20:12 +01001002void smc_data_destroy(unsigned long rd_addr,
1003 unsigned long map_addr,
1004 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001005{
1006 struct granule *g_data;
1007 struct granule *g_rd;
1008 struct granule *g_table_root;
1009 struct rtt_walk wi;
1010 unsigned long data_addr, s2tte, *s2tt;
1011 struct rd *rd;
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001012 unsigned long ipa_bits;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001013 struct realm_s2_context s2_ctx;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001014 int sl;
1015
1016 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
1017 if (g_rd == NULL) {
AlexeiFedorove2002be2023-04-19 17:20:12 +01001018 res->x[0] = RMI_ERROR_INPUT;
AlexeiFedorova1b2a1d2023-07-18 15:08:47 +01001019 res->x[2] = 0UL;
AlexeiFedorove2002be2023-04-19 17:20:12 +01001020 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001021 }
1022
1023 rd = granule_map(g_rd, SLOT_RD);
1024
1025 if (!validate_map_addr(map_addr, RTT_PAGE_LEVEL, rd)) {
1026 buffer_unmap(rd);
1027 granule_unlock(g_rd);
AlexeiFedorove2002be2023-04-19 17:20:12 +01001028 res->x[0] = RMI_ERROR_INPUT;
AlexeiFedorova1b2a1d2023-07-18 15:08:47 +01001029 res->x[2] = 0UL;
AlexeiFedorove2002be2023-04-19 17:20:12 +01001030 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001031 }
1032
1033 g_table_root = rd->s2_ctx.g_rtt;
1034 sl = realm_rtt_starting_level(rd);
1035 ipa_bits = realm_ipa_bits(rd);
1036 s2_ctx = rd->s2_ctx;
1037 buffer_unmap(rd);
1038
1039 granule_lock(g_table_root, GRANULE_STATE_RTT);
1040 granule_unlock(g_rd);
1041
1042 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
1043 map_addr, RTT_PAGE_LEVEL, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001044
1045 s2tt = granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001046 if (wi.last_level != RTT_PAGE_LEVEL) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +01001047 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1048 (unsigned int)wi.last_level);
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001049 goto out_unmap_ll_table;
1050 }
Soby Mathewb4c6df42022-11-09 11:13:29 +00001051
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001052 s2tte = s2tte_read(&s2tt[wi.index]);
AlexeiFedorovc53b1f72023-07-04 15:37:03 +01001053
AlexeiFedorova43cd312023-04-17 11:42:25 +01001054 if (s2tte_is_assigned_ram(s2tte, RTT_PAGE_LEVEL)) {
1055 data_addr = s2tte_pa(s2tte, RTT_PAGE_LEVEL);
AlexeiFedorovc53b1f72023-07-04 15:37:03 +01001056 s2tte = s2tte_create_unassigned_destroyed();
AlexeiFedorova43cd312023-04-17 11:42:25 +01001057 s2tte_write(&s2tt[wi.index], s2tte);
1058 invalidate_page(&s2_ctx, map_addr);
1059 } else if (s2tte_is_assigned_empty(s2tte, RTT_PAGE_LEVEL)) {
1060 data_addr = s2tte_pa(s2tte, RTT_PAGE_LEVEL);
1061 s2tte = s2tte_create_unassigned_empty();
1062 s2tte_write(&s2tt[wi.index], s2tte);
1063 } else {
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001064 res->x[0] = pack_return_code(RMI_ERROR_RTT, RTT_PAGE_LEVEL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001065 goto out_unmap_ll_table;
1066 }
1067
Soby Mathewb4c6df42022-11-09 11:13:29 +00001068 __granule_put(wi.g_llt);
1069
1070 /*
1071 * Lock the data granule and check expected state. Correct locking order
1072 * is guaranteed because granule address is obtained from a locked
1073 * granule by table walk. This lock needs to be acquired before a state
1074 * transition to or from GRANULE_STATE_DATA for granule address can happen.
1075 */
1076 g_data = find_lock_granule(data_addr, GRANULE_STATE_DATA);
AlexeiFedorov63b71692023-04-19 11:18:42 +01001077 assert(g_data != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001078 granule_memzero(g_data, SLOT_DELEGATED);
1079 granule_unlock_transition(g_data, GRANULE_STATE_DELEGATED);
1080
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001081 res->x[0] = RMI_SUCCESS;
AlexeiFedorove2002be2023-04-19 17:20:12 +01001082 res->x[1] = data_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001083out_unmap_ll_table:
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001084 res->x[2] = skip_non_live_entries(map_addr, s2tt, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001085 buffer_unmap(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001086 granule_unlock(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001087}
1088
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001089/*
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001090 * Update the ripas value for the entry pointed by @s2ttep.
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001091 *
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001092 * Returns:
1093 * < 0 - On error and the operation was aborted,
1094 * e.g., entry cannot have a ripas.
1095 * 0 - Operation was success and no TLBI is required.
1096 * > 0 - Operation was success and TLBI is required.
1097 * Sets:
1098 * @(*do_tlbi) to 'true' if the TLBs have to be invalidated.
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001099 */
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001100static int update_ripas(unsigned long *s2ttep, unsigned long level,
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001101 enum ripas ripas_val,
1102 enum ripas_change_destroyed change_destroyed)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001103{
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001104 unsigned long pa, s2tte = s2tte_read(s2ttep);
1105 int ret = 0;
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001106
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001107 if (!s2tte_has_ripas(s2tte, level)) {
1108 return -1;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001109 }
1110
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001111 if (ripas_val == RIPAS_RAM) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001112 if (s2tte_is_unassigned_empty(s2tte)) {
1113 s2tte = s2tte_create_unassigned_ram();
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001114 } else if (s2tte_is_unassigned_destroyed(s2tte)) {
1115 if (change_destroyed == CHANGE_DESTROYED) {
1116 s2tte = s2tte_create_unassigned_ram();
1117 } else {
1118 return -1;
1119 }
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001120 } else if (s2tte_is_assigned_empty(s2tte, level)) {
1121 pa = s2tte_pa(s2tte, level);
1122 s2tte = s2tte_create_assigned_ram(pa, level);
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001123 } else if (s2tte_is_assigned_destroyed(s2tte, level)) {
1124 if (change_destroyed == CHANGE_DESTROYED) {
1125 pa = s2tte_pa(s2tte, level);
1126 s2tte = s2tte_create_assigned_ram(pa, level);
1127 } else {
1128 return -1;
1129 }
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001130 } else {
1131 /* No action is required */
1132 return 0;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001133 }
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001134 } else if (ripas_val == RIPAS_EMPTY) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001135 if (s2tte_is_unassigned_ram(s2tte)) {
1136 s2tte = s2tte_create_unassigned_empty();
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001137 } else if (s2tte_is_unassigned_destroyed(s2tte)) {
1138 if (change_destroyed == CHANGE_DESTROYED) {
1139 s2tte = s2tte_create_unassigned_empty();
1140 } else {
1141 return -1;
1142 }
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001143 } else if (s2tte_is_assigned_ram(s2tte, level)) {
1144 pa = s2tte_pa(s2tte, level);
1145 s2tte = s2tte_create_assigned_empty(pa, level);
1146 /* TLBI is required */
1147 ret = 1;
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001148 } else if (s2tte_is_assigned_destroyed(s2tte, level)) {
1149 if (change_destroyed == CHANGE_DESTROYED) {
1150 pa = s2tte_pa(s2tte, level);
1151 s2tte = s2tte_create_assigned_empty(pa, level);
1152 /* TLBI is required */
1153 ret = 1;
1154 } else {
1155 return -1;
1156 }
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001157 } else {
1158 /* No action is required */
1159 return 0;
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001160 }
Soby Mathewb4c6df42022-11-09 11:13:29 +00001161 }
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001162 s2tte_write(s2ttep, s2tte);
1163 return ret;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001164}
1165
1166static void ripas_granule_measure(struct rd *rd,
AlexeiFedorov960d1612023-04-25 13:23:39 +01001167 unsigned long base,
1168 unsigned long top)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001169{
1170 struct measurement_desc_ripas measure_desc = {0};
1171
1172 /* Initialize the measurement descriptior structure */
1173 measure_desc.desc_type = MEASURE_DESC_TYPE_RIPAS;
1174 measure_desc.len = sizeof(struct measurement_desc_ripas);
AlexeiFedorov960d1612023-04-25 13:23:39 +01001175 measure_desc.base = base;
1176 measure_desc.top = top;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001177 (void)memcpy(measure_desc.rim,
1178 &rd->measurement[RIM_MEASUREMENT_SLOT],
1179 measurement_get_size(rd->algorithm));
Soby Mathewb4c6df42022-11-09 11:13:29 +00001180
1181 /*
1182 * Hashing the measurement descriptor structure; the result is the
1183 * updated RIM.
1184 */
1185 measurement_hash_compute(rd->algorithm,
1186 &measure_desc,
1187 sizeof(measure_desc),
1188 rd->measurement[RIM_MEASUREMENT_SLOT]);
1189}
1190
AlexeiFedorov960d1612023-04-25 13:23:39 +01001191void smc_rtt_init_ripas(unsigned long rd_addr,
1192 unsigned long base,
1193 unsigned long top,
1194 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001195{
1196 struct granule *g_rd, *g_rtt_root;
1197 struct rd *rd;
AlexeiFedorov960d1612023-04-25 13:23:39 +01001198 unsigned long ipa_bits, addr, map_size;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001199 struct rtt_walk wi;
1200 unsigned long s2tte, *s2tt;
AlexeiFedorov960d1612023-04-25 13:23:39 +01001201 long level;
1202 unsigned int index;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001203 int sl;
1204
1205 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
1206 if (g_rd == NULL) {
AlexeiFedorov960d1612023-04-25 13:23:39 +01001207 res->x[0] = RMI_ERROR_INPUT;
1208 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001209 }
1210
1211 rd = granule_map(g_rd, SLOT_RD);
1212
1213 if (get_rd_state_locked(rd) != REALM_STATE_NEW) {
1214 buffer_unmap(rd);
1215 granule_unlock(g_rd);
AlexeiFedorov960d1612023-04-25 13:23:39 +01001216 res->x[0] = RMI_ERROR_REALM;
1217 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001218 }
1219
AlexeiFedorov960d1612023-04-25 13:23:39 +01001220 if (!validate_map_addr(base, RTT_PAGE_LEVEL, rd) ||
1221 !validate_rtt_entry_cmds(top, RTT_PAGE_LEVEL, rd)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001222 buffer_unmap(rd);
1223 granule_unlock(g_rd);
AlexeiFedorov960d1612023-04-25 13:23:39 +01001224 res->x[0] = RMI_ERROR_INPUT;
1225 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001226 }
1227
AlexeiFedorov960d1612023-04-25 13:23:39 +01001228 if (!addr_in_par(rd, base) || !addr_in_par(rd, top)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001229 buffer_unmap(rd);
1230 granule_unlock(g_rd);
AlexeiFedorov960d1612023-04-25 13:23:39 +01001231 res->x[0] = RMI_ERROR_INPUT;
1232 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001233 }
1234
1235 g_rtt_root = rd->s2_ctx.g_rtt;
1236 sl = realm_rtt_starting_level(rd);
1237 ipa_bits = realm_ipa_bits(rd);
1238
1239 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001240
1241 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
AlexeiFedorov960d1612023-04-25 13:23:39 +01001242 base, RTT_PAGE_LEVEL, &wi);
1243 level = wi.last_level;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001244 s2tt = granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov960d1612023-04-25 13:23:39 +01001245 map_size = s2tte_map_size(level);
1246 addr = base & ~(map_size - 1UL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001247
AlexeiFedorov960d1612023-04-25 13:23:39 +01001248 /*
1249 * If the RTTE covers a range below "base", we need to
1250 * go deeper.
1251 */
1252 if (addr != base) {
1253 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1254 (unsigned int)level);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001255 goto out_unmap_llt;
1256 }
1257
AlexeiFedorov960d1612023-04-25 13:23:39 +01001258 for (index = wi.index; index < S2TTES_PER_S2TT;
1259 index++, addr += map_size) {
1260 unsigned long next = addr + map_size;
1261
1262 if (next > top) {
1263 break;
1264 }
1265
1266 s2tte = s2tte_read(&s2tt[index]);
1267 if (s2tte_is_unassigned_empty(s2tte)) {
1268 s2tte = s2tte_create_unassigned_ram();
1269 s2tte_write(&s2tt[index], s2tte);
1270 } else if (!s2tte_is_unassigned_ram(s2tte)) {
1271 break;
1272 }
1273 ripas_granule_measure(rd, addr, next);
1274 }
1275
1276 if (addr > base) {
1277 res->x[0] = RMI_SUCCESS;
1278 res->x[1] = addr;
1279 } else {
1280 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1281 (unsigned int)level);
1282 }
Soby Mathewb4c6df42022-11-09 11:13:29 +00001283
1284out_unmap_llt:
1285 buffer_unmap(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001286 buffer_unmap(rd);
1287 granule_unlock(wi.g_llt);
AlexeiFedorov80295e42023-07-10 13:11:14 +01001288 granule_unlock(g_rd);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001289}
1290
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001291static void rtt_set_ripas_range(struct realm_s2_context *s2_ctx,
1292 unsigned long *s2tt,
1293 unsigned long base,
1294 unsigned long top,
1295 struct rtt_walk *wi,
1296 unsigned long ripas_val,
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001297 enum ripas_change_destroyed change_destroyed,
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001298 struct smc_result *res)
1299{
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001300 unsigned long index = wi->index;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001301 long level = wi->last_level;
1302 unsigned long map_size = s2tte_map_size(level);
1303
1304 /* Align to the RTT level */
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001305 unsigned long addr = base & ~(map_size - 1UL);
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001306
1307 /* Make sure we don't touch a range below the requested range */
1308 if (addr != base) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +01001309 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1310 (unsigned int)level);
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001311 return;
1312 }
1313
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001314 for (index = wi->index; index < S2TTES_PER_S2TT; addr += map_size) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001315 int ret;
1316
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001317 /* If this entry crosses the range, break. */
1318 if (addr + map_size > top) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001319 break;
1320 }
1321
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001322 ret = update_ripas(&s2tt[index++], level,
1323 ripas_val, change_destroyed);
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001324 if (ret < 0) {
1325 break;
1326 }
1327
1328 /* Handle TLBI */
1329 if (ret != 0) {
1330 if (level == RTT_PAGE_LEVEL) {
1331 invalidate_page(s2_ctx, addr);
1332 } else {
1333 invalidate_block(s2_ctx, addr);
1334 }
1335 }
1336 }
1337
1338 if (addr > base) {
1339 res->x[0] = RMI_SUCCESS;
1340 res->x[1] = addr;
1341 } else {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +01001342 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1343 (unsigned int)level);
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001344 }
1345}
1346
1347void smc_rtt_set_ripas(unsigned long rd_addr,
1348 unsigned long rec_addr,
1349 unsigned long base,
1350 unsigned long top,
1351 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001352{
1353 struct granule *g_rd, *g_rec, *g_rtt_root;
1354 struct rec *rec;
1355 struct rd *rd;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001356 unsigned long ipa_bits;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001357 struct rtt_walk wi;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001358 unsigned long *s2tt;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001359 struct realm_s2_context s2_ctx;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001360 enum ripas ripas_val;
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001361 enum ripas_change_destroyed change_destroyed;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001362 int sl;
1363
Soby Mathewb4c6df42022-11-09 11:13:29 +00001364 if (!find_lock_two_granules(rd_addr,
1365 GRANULE_STATE_RD,
1366 &g_rd,
1367 rec_addr,
1368 GRANULE_STATE_REC,
1369 &g_rec)) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001370 res->x[0] = RMI_ERROR_INPUT;
1371 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001372 }
1373
1374 if (granule_refcount_read_acquire(g_rec) != 0UL) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001375 res->x[0] = RMI_ERROR_REC;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001376 goto out_unlock_rec_rd;
1377 }
1378
1379 rec = granule_map(g_rec, SLOT_REC);
1380
1381 if (g_rd != rec->realm_info.g_rd) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001382 res->x[0] = RMI_ERROR_REC;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001383 goto out_unmap_rec;
1384 }
1385
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001386 ripas_val = rec->set_ripas.ripas_val;
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001387 change_destroyed = rec->set_ripas.change_destroyed;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001388
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001389 /*
1390 * Return error in case of target region:
1391 * - is not the next chunk of requested region
1392 * - extends beyond the end of requested region
1393 */
1394 if ((base != rec->set_ripas.addr) || (top > rec->set_ripas.top)) {
1395 res->x[0] = RMI_ERROR_INPUT;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001396 goto out_unmap_rec;
1397 }
1398
1399 rd = granule_map(g_rd, SLOT_RD);
1400
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001401 /*
1402 * At this point, we know base == rec->set_ripas.addr
1403 * and thus must be aligned to GRANULE size.
1404 */
1405 assert(validate_map_addr(base, RTT_PAGE_LEVEL, rd));
Soby Mathewb4c6df42022-11-09 11:13:29 +00001406
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001407 if (!validate_map_addr(top, RTT_PAGE_LEVEL, rd)) {
1408 res->x[0] = RMI_ERROR_INPUT;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001409 goto out_unmap_rd;
1410 }
1411
1412 g_rtt_root = rd->s2_ctx.g_rtt;
1413 sl = realm_rtt_starting_level(rd);
1414 ipa_bits = realm_ipa_bits(rd);
1415 s2_ctx = rd->s2_ctx;
1416
1417 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
1418
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001419 /* Walk to the deepest level possible */
Soby Mathewb4c6df42022-11-09 11:13:29 +00001420 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001421 base, RTT_PAGE_LEVEL, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001422
1423 s2tt = granule_map(wi.g_llt, SLOT_RTT);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001424
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001425 rtt_set_ripas_range(&s2_ctx, s2tt, base, top, &wi,
1426 ripas_val, change_destroyed, res);
1427
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001428 if (res->x[0] == RMI_SUCCESS) {
1429 rec->set_ripas.addr = res->x[1];
Soby Mathewb4c6df42022-11-09 11:13:29 +00001430 }
1431
Soby Mathewb4c6df42022-11-09 11:13:29 +00001432 buffer_unmap(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001433 granule_unlock(wi.g_llt);
1434out_unmap_rd:
1435 buffer_unmap(rd);
1436out_unmap_rec:
1437 buffer_unmap(rec);
1438out_unlock_rec_rd:
1439 granule_unlock(g_rec);
1440 granule_unlock(g_rd);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001441}