blob: 8ee0b78b5d7997d50be9f2aa1ff92df8bc2c1f1a [file] [log] [blame]
Soby Mathewb4c6df42022-11-09 11:13:29 +00001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
5 */
6
7#include <buffer.h>
8#include <granule.h>
9#include <measurement.h>
10#include <realm.h>
11#include <ripas.h>
12#include <smc-handler.h>
13#include <smc-rmi.h>
14#include <smc.h>
15#include <stddef.h>
16#include <string.h>
17#include <table.h>
18
19/*
20 * Validate the map_addr value passed to RMI_RTT_* and RMI_DATA_* commands.
21 */
22static bool validate_map_addr(unsigned long map_addr,
23 unsigned long level,
24 struct rd *rd)
25{
AlexeiFedorov14d47ae2023-07-19 15:26:50 +010026 return ((map_addr < realm_ipa_size(rd)) &&
27 addr_is_level_aligned(map_addr, level));
Soby Mathewb4c6df42022-11-09 11:13:29 +000028}
29
30/*
31 * Structure commands can operate on all RTTs except for the root RTT so
32 * the minimal valid level is the stage 2 starting level + 1.
33 */
34static bool validate_rtt_structure_cmds(unsigned long map_addr,
35 long level,
36 struct rd *rd)
37{
38 int min_level = realm_rtt_starting_level(rd) + 1;
39
40 if ((level < min_level) || (level > RTT_PAGE_LEVEL)) {
41 return false;
42 }
43 return validate_map_addr(map_addr, level, rd);
44}
45
46/*
47 * Map/Unmap commands can operate up to a level 2 block entry so min_level is
48 * the smallest block size.
49 */
50static bool validate_rtt_map_cmds(unsigned long map_addr,
51 long level,
52 struct rd *rd)
53{
54 if ((level < RTT_MIN_BLOCK_LEVEL) || (level > RTT_PAGE_LEVEL)) {
55 return false;
56 }
57 return validate_map_addr(map_addr, level, rd);
58}
59
60/*
61 * Entry commands can operate on any entry so the minimal valid level is the
62 * stage 2 starting level.
63 */
64static bool validate_rtt_entry_cmds(unsigned long map_addr,
65 long level,
66 struct rd *rd)
67{
68 if ((level < realm_rtt_starting_level(rd)) ||
69 (level > RTT_PAGE_LEVEL)) {
70 return false;
71 }
72 return validate_map_addr(map_addr, level, rd);
73}
74
AlexeiFedorovac923c82023-04-06 15:12:04 +010075unsigned long smc_rtt_create(unsigned long rd_addr,
76 unsigned long rtt_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +000077 unsigned long map_addr,
78 unsigned long ulevel)
79{
80 struct granule *g_rd;
81 struct granule *g_tbl;
82 struct rd *rd;
83 struct granule *g_table_root;
84 struct rtt_walk wi;
85 unsigned long *s2tt, *parent_s2tt, parent_s2tte;
86 long level = (long)ulevel;
87 unsigned long ipa_bits;
88 unsigned long ret;
89 struct realm_s2_context s2_ctx;
90 int sl;
91
92 if (!find_lock_two_granules(rtt_addr,
93 GRANULE_STATE_DELEGATED,
94 &g_tbl,
95 rd_addr,
96 GRANULE_STATE_RD,
97 &g_rd)) {
98 return RMI_ERROR_INPUT;
99 }
100
101 rd = granule_map(g_rd, SLOT_RD);
102
103 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
104 buffer_unmap(rd);
105 granule_unlock(g_rd);
106 granule_unlock(g_tbl);
107 return RMI_ERROR_INPUT;
108 }
109
110 g_table_root = rd->s2_ctx.g_rtt;
111 sl = realm_rtt_starting_level(rd);
112 ipa_bits = realm_ipa_bits(rd);
113 s2_ctx = rd->s2_ctx;
114 buffer_unmap(rd);
115
116 /*
117 * Lock the RTT root. Enforcing locking order RD->RTT is enough to
118 * ensure deadlock free locking guarentee.
119 */
120 granule_lock(g_table_root, GRANULE_STATE_RTT);
121
122 /* Unlock RD after locking RTT Root */
123 granule_unlock(g_rd);
124
125 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
126 map_addr, level - 1L, &wi);
127 if (wi.last_level != level - 1L) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100128 ret = pack_return_code(RMI_ERROR_RTT,
129 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000130 goto out_unlock_llt;
131 }
132
133 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
134 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
135 s2tt = granule_map(g_tbl, SLOT_DELEGATED);
136
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100137 if (s2tte_is_unassigned_empty(parent_s2tte)) {
138 s2tt_init_unassigned_empty(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000139
140 /*
141 * Increase the refcount of the parent, the granule was
142 * locked while table walking and hand-over-hand locking.
143 * Atomicity and acquire/release semantics not required because
144 * the table is accessed always locked.
145 */
146 __granule_get(wi.g_llt);
147
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100148 } else if (s2tte_is_unassigned_ram(parent_s2tte)) {
149 s2tt_init_unassigned_ram(s2tt);
150 __granule_get(wi.g_llt);
151
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100152 } else if (s2tte_is_unassigned_ns(parent_s2tte)) {
153 s2tt_init_unassigned_ns(s2tt);
154 __granule_get(wi.g_llt);
155
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100156 } else if (s2tte_is_unassigned_destroyed(parent_s2tte)) {
157 s2tt_init_unassigned_destroyed(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000158 __granule_get(wi.g_llt);
159
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100160 } else if (s2tte_is_assigned_destroyed(parent_s2tte, level - 1L)) {
161 unsigned long block_pa;
162
163 /*
164 * We should observe parent assigned s2tte only when
165 * we create tables above this level.
166 */
167 assert(level > RTT_MIN_BLOCK_LEVEL);
168
169 block_pa = s2tte_pa(parent_s2tte, level - 1L);
170
171 s2tt_init_assigned_destroyed(s2tt, block_pa, level);
172
173 /*
174 * Increase the refcount to mark the granule as in-use. refcount
175 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
176 */
177 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
178
AlexeiFedorov3a739332023-04-13 13:54:04 +0100179 } else if (s2tte_is_assigned_empty(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000180 unsigned long block_pa;
181
182 /*
183 * We should observe parent assigned s2tte only when
184 * we create tables above this level.
185 */
186 assert(level > RTT_MIN_BLOCK_LEVEL);
187
188 block_pa = s2tte_pa(parent_s2tte, level - 1L);
189
190 s2tt_init_assigned_empty(s2tt, block_pa, level);
191
192 /*
193 * Increase the refcount to mark the granule as in-use. refcount
194 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
195 */
196 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
197
AlexeiFedorov3a739332023-04-13 13:54:04 +0100198 } else if (s2tte_is_assigned_ram(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000199 unsigned long block_pa;
200
201 /*
202 * We should observe parent valid s2tte only when
203 * we create tables above this level.
204 */
205 assert(level > RTT_MIN_BLOCK_LEVEL);
206
207 /*
208 * Break before make. This may cause spurious S2 aborts.
209 */
210 s2tte_write(&parent_s2tt[wi.index], 0UL);
211 invalidate_block(&s2_ctx, map_addr);
212
213 block_pa = s2tte_pa(parent_s2tte, level - 1L);
214
AlexeiFedorov3a739332023-04-13 13:54:04 +0100215 s2tt_init_assigned_ram(s2tt, block_pa, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000216
217 /*
218 * Increase the refcount to mark the granule as in-use. refcount
219 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
220 */
221 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
222
AlexeiFedorov3a739332023-04-13 13:54:04 +0100223 } else if (s2tte_is_assigned_ns(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000224 unsigned long block_pa;
225
226 /*
AlexeiFedorov3a739332023-04-13 13:54:04 +0100227 * We should observe parent assigned_ns s2tte only when
Soby Mathewb4c6df42022-11-09 11:13:29 +0000228 * we create tables above this level.
229 */
230 assert(level > RTT_MIN_BLOCK_LEVEL);
231
232 /*
233 * Break before make. This may cause spurious S2 aborts.
234 */
235 s2tte_write(&parent_s2tt[wi.index], 0UL);
236 invalidate_block(&s2_ctx, map_addr);
237
238 block_pa = s2tte_pa(parent_s2tte, level - 1L);
239
AlexeiFedorov3a739332023-04-13 13:54:04 +0100240 s2tt_init_assigned_ns(s2tt, block_pa, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000241
242 /*
243 * Increase the refcount to mark the granule as in-use. refcount
244 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
245 */
246 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
247
248 } else if (s2tte_is_table(parent_s2tte, level - 1L)) {
249 ret = pack_return_code(RMI_ERROR_RTT,
250 (unsigned int)(level - 1L));
251 goto out_unmap_table;
252
253 } else {
254 assert(false);
255 }
256
257 ret = RMI_SUCCESS;
258
259 granule_set_state(g_tbl, GRANULE_STATE_RTT);
260
261 parent_s2tte = s2tte_create_table(rtt_addr, level - 1L);
262 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
263
264out_unmap_table:
265 buffer_unmap(s2tt);
266 buffer_unmap(parent_s2tt);
267out_unlock_llt:
268 granule_unlock(wi.g_llt);
269 granule_unlock(g_tbl);
270 return ret;
271}
272
AlexeiFedorove2002be2023-04-19 17:20:12 +0100273void smc_rtt_fold(unsigned long rd_addr,
274 unsigned long map_addr,
275 unsigned long ulevel,
276 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000277{
278 struct granule *g_rd;
279 struct granule *g_tbl;
280 struct rd *rd;
281 struct granule *g_table_root;
282 struct rtt_walk wi;
283 unsigned long *table, *parent_s2tt, parent_s2tte;
284 long level = (long)ulevel;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100285 unsigned long ipa_bits, rtt_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000286 unsigned long ret;
287 struct realm_s2_context s2_ctx;
288 int sl;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000289
290 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
291 if (g_rd == NULL) {
AlexeiFedorove2002be2023-04-19 17:20:12 +0100292 res->x[0] = RMI_ERROR_INPUT;
293 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000294 }
295
296 rd = granule_map(g_rd, SLOT_RD);
297
298 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
299 buffer_unmap(rd);
300 granule_unlock(g_rd);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100301 res->x[0] = RMI_ERROR_INPUT;
302 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000303 }
304
305 g_table_root = rd->s2_ctx.g_rtt;
306 sl = realm_rtt_starting_level(rd);
307 ipa_bits = realm_ipa_bits(rd);
308 s2_ctx = rd->s2_ctx;
309 buffer_unmap(rd);
310 granule_lock(g_table_root, GRANULE_STATE_RTT);
311 granule_unlock(g_rd);
312
313 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
314 map_addr, level - 1L, &wi);
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100315 if (wi.last_level != level - 1L) {
316 ret = pack_return_code(RMI_ERROR_RTT,
317 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000318 goto out_unlock_parent_table;
319 }
320
321 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
322 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
323 if (!s2tte_is_table(parent_s2tte, level - 1L)) {
324 ret = pack_return_code(RMI_ERROR_RTT,
325 (unsigned int)(level - 1L));
326 goto out_unmap_parent_table;
327 }
328
AlexeiFedorove2002be2023-04-19 17:20:12 +0100329 rtt_addr = s2tte_pa_table(parent_s2tte, level - 1L);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000330 g_tbl = find_lock_granule(rtt_addr, GRANULE_STATE_RTT);
331
332 /*
333 * A table descriptor S2TTE always points to a TABLE granule.
334 */
AlexeiFedorov63b71692023-04-19 11:18:42 +0100335 assert(g_tbl != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000336
337 table = granule_map(g_tbl, SLOT_RTT2);
338
339 /*
340 * The command can succeed only if all 512 S2TTEs are of the same type.
341 * We first check the table's ref. counter to speed up the case when
342 * the host makes a guess whether a memory region can be folded.
343 */
344 if (g_tbl->refcount == 0UL) {
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100345 if (table_is_unassigned_destroyed_block(table)) {
346 parent_s2tte = s2tte_create_unassigned_destroyed();
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100347 } else if (table_is_unassigned_empty_block(table)) {
348 parent_s2tte = s2tte_create_unassigned_empty();
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100349 } else if (table_is_unassigned_ram_block(table)) {
350 parent_s2tte = s2tte_create_unassigned_ram();
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100351 } else if (table_is_unassigned_ns_block(table)) {
352 parent_s2tte = s2tte_create_unassigned_ns();
AlexeiFedorov49752c62023-04-24 14:31:14 +0100353 } else if (table_maps_assigned_ns_block(table, level)) {
354 unsigned long s2tte = s2tte_read(&table[0]);
355 unsigned long block_pa = s2tte_pa(s2tte, level);
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100356
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100357 parent_s2tte = s2tte_create_assigned_ns(block_pa,
358 level - 1L);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000359 } else {
360 /*
361 * The table holds a mixture of destroyed and
362 * unassigned entries.
363 */
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100364 ret = pack_return_code(RMI_ERROR_RTT,
365 (unsigned int)level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000366 goto out_unmap_table;
367 }
AlexeiFedorov49752c62023-04-24 14:31:14 +0100368 __granule_put(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000369 } else if (g_tbl->refcount == S2TTES_PER_S2TT) {
370
371 unsigned long s2tte, block_pa;
372
373 /* The RMM specification does not allow creating block
374 * entries less than RTT_MIN_BLOCK_LEVEL even though
375 * permitted by the Arm Architecture.
376 * Hence ensure that the table being folded is at a level
377 * higher than the RTT_MIN_BLOCK_LEVEL.
378 *
379 * A fully populated table cannot be destroyed if that
380 * would create a block mapping below RTT_MIN_BLOCK_LEVEL.
381 */
382 if (level <= RTT_MIN_BLOCK_LEVEL) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100383 ret = pack_return_code(RMI_ERROR_RTT,
384 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000385 goto out_unmap_table;
386 }
387
388 s2tte = s2tte_read(&table[0]);
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000389 block_pa = s2tte_pa(s2tte, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000390
391 /*
AlexeiFedorov3a739332023-04-13 13:54:04 +0100392 * The table must also refer to a contiguous block through the
AlexeiFedorov3f840a02023-07-19 10:55:05 +0100393 * same type of s2tte, either Assigned or Valid.
Soby Mathewb4c6df42022-11-09 11:13:29 +0000394 */
AlexeiFedorov3a739332023-04-13 13:54:04 +0100395 if (table_maps_assigned_empty_block(table, level)) {
396 parent_s2tte = s2tte_create_assigned_empty(block_pa,
397 level - 1L);
398 } else if (table_maps_assigned_ram_block(table, level)) {
399 parent_s2tte = s2tte_create_assigned_ram(block_pa,
400 level - 1L);
AlexeiFedorov3f840a02023-07-19 10:55:05 +0100401 } else if (table_maps_assigned_destroyed_block(table, level)) {
402 parent_s2tte = s2tte_create_assigned_destroyed(block_pa,
403 level - 1L);
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000404 /* The table contains mixed entries that cannot be folded */
Soby Mathewb4c6df42022-11-09 11:13:29 +0000405 } else {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100406 ret = pack_return_code(RMI_ERROR_RTT,
407 (unsigned int)level);
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000408 goto out_unmap_table;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000409 }
410
411 __granule_refcount_dec(g_tbl, S2TTES_PER_S2TT);
412 } else {
413 /*
414 * The table holds a mixture of different types of s2ttes.
415 */
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100416 ret = pack_return_code(RMI_ERROR_RTT,
417 (unsigned int)level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000418 goto out_unmap_table;
419 }
420
421 ret = RMI_SUCCESS;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100422 res->x[1] = rtt_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000423
424 /*
425 * Break before make.
426 */
427 s2tte_write(&parent_s2tt[wi.index], 0UL);
428
AlexeiFedorov3a739332023-04-13 13:54:04 +0100429 if (s2tte_is_assigned_ram(parent_s2tte, level - 1L) ||
430 s2tte_is_assigned_ns(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000431 invalidate_pages_in_block(&s2_ctx, map_addr);
432 } else {
433 invalidate_block(&s2_ctx, map_addr);
434 }
435
436 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
437
438 granule_memzero_mapped(table);
439 granule_set_state(g_tbl, GRANULE_STATE_DELEGATED);
440
441out_unmap_table:
442 buffer_unmap(table);
443 granule_unlock(g_tbl);
444out_unmap_parent_table:
445 buffer_unmap(parent_s2tt);
446out_unlock_parent_table:
447 granule_unlock(wi.g_llt);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100448 res->x[0] = ret;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000449}
450
AlexeiFedorove2002be2023-04-19 17:20:12 +0100451void smc_rtt_destroy(unsigned long rd_addr,
452 unsigned long map_addr,
453 unsigned long ulevel,
454 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000455{
456 struct granule *g_rd;
457 struct granule *g_tbl;
458 struct rd *rd;
459 struct granule *g_table_root;
460 struct rtt_walk wi;
461 unsigned long *table, *parent_s2tt, parent_s2tte;
462 long level = (long)ulevel;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100463 unsigned long ipa_bits, rtt_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000464 unsigned long ret;
465 struct realm_s2_context s2_ctx;
466 int sl;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100467 bool in_par, skip_non_live = false;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000468
469 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
470 if (g_rd == NULL) {
AlexeiFedorove2002be2023-04-19 17:20:12 +0100471 res->x[0] = RMI_ERROR_INPUT;
AlexeiFedorov3ebd4622023-07-18 16:27:39 +0100472 res->x[2] = 0UL;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100473 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000474 }
475
476 rd = granule_map(g_rd, SLOT_RD);
477
478 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
479 buffer_unmap(rd);
480 granule_unlock(g_rd);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100481 res->x[0] = RMI_ERROR_INPUT;
AlexeiFedorov3ebd4622023-07-18 16:27:39 +0100482 res->x[2] = 0UL;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100483 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000484 }
485
486 g_table_root = rd->s2_ctx.g_rtt;
487 sl = realm_rtt_starting_level(rd);
488 ipa_bits = realm_ipa_bits(rd);
489 s2_ctx = rd->s2_ctx;
490 in_par = addr_in_par(rd, map_addr);
491 buffer_unmap(rd);
492 granule_lock(g_table_root, GRANULE_STATE_RTT);
493 granule_unlock(g_rd);
494
495 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
496 map_addr, level - 1L, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000497
498 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
499 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100500
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100501 if ((wi.last_level != level - 1L) ||
502 !s2tte_is_table(parent_s2tte, level - 1L)) {
503 ret = pack_return_code(RMI_ERROR_RTT,
504 (unsigned int)wi.last_level);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100505 skip_non_live = true;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000506 goto out_unmap_parent_table;
507 }
508
AlexeiFedorove2002be2023-04-19 17:20:12 +0100509 rtt_addr = s2tte_pa_table(parent_s2tte, level - 1L);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000510
511 /*
512 * Lock the RTT granule. The 'rtt_addr' is verified, thus can be treated
513 * as an internal granule.
514 */
515 g_tbl = find_lock_granule(rtt_addr, GRANULE_STATE_RTT);
516
517 /*
518 * A table descriptor S2TTE always points to a TABLE granule.
519 */
520 assert(g_tbl != NULL);
521
522 /*
523 * Read the refcount value. RTT granule is always accessed locked, thus
524 * the refcount can be accessed without atomic operations.
525 */
526 if (g_tbl->refcount != 0UL) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100527 ret = pack_return_code(RMI_ERROR_RTT, (unsigned int)level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000528 goto out_unlock_table;
529 }
530
531 ret = RMI_SUCCESS;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100532 res->x[1] = rtt_addr;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100533 skip_non_live = true;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000534
535 table = granule_map(g_tbl, SLOT_RTT2);
536
537 if (in_par) {
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100538 parent_s2tte = s2tte_create_unassigned_destroyed();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000539 } else {
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100540 parent_s2tte = s2tte_create_unassigned_ns();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000541 }
542
543 __granule_put(wi.g_llt);
544
545 /*
546 * Break before make. Note that this may cause spurious S2 aborts.
547 */
548 s2tte_write(&parent_s2tt[wi.index], 0UL);
549 invalidate_block(&s2_ctx, map_addr);
550 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
551
552 granule_memzero_mapped(table);
553 granule_set_state(g_tbl, GRANULE_STATE_DELEGATED);
554
555 buffer_unmap(table);
556out_unlock_table:
557 granule_unlock(g_tbl);
558out_unmap_parent_table:
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100559 if (skip_non_live) {
560 res->x[2] = skip_non_live_entries(map_addr, parent_s2tt, &wi);
AlexeiFedorov3ebd4622023-07-18 16:27:39 +0100561 } else {
562 res->x[2] = map_addr;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100563 }
Soby Mathewb4c6df42022-11-09 11:13:29 +0000564 buffer_unmap(parent_s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000565 granule_unlock(wi.g_llt);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100566 res->x[0] = ret;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000567}
568
569enum map_unmap_ns_op {
570 MAP_NS,
571 UNMAP_NS
572};
573
574/*
575 * We don't hold a reference on the NS granule when it is
576 * mapped into a realm. Instead we rely on the guarantees
577 * provided by the architecture to ensure that a NS access
578 * to a protected granule is prohibited even within the realm.
579 */
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100580static void map_unmap_ns(unsigned long rd_addr,
581 unsigned long map_addr,
582 long level,
583 unsigned long host_s2tte,
584 enum map_unmap_ns_op op,
585 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000586{
587 struct granule *g_rd;
588 struct rd *rd;
589 struct granule *g_table_root;
590 unsigned long *s2tt, s2tte;
591 struct rtt_walk wi;
592 unsigned long ipa_bits;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000593 struct realm_s2_context s2_ctx;
594 int sl;
595
596 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
597 if (g_rd == NULL) {
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100598 res->x[0] = RMI_ERROR_INPUT;
599 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000600 }
601
602 rd = granule_map(g_rd, SLOT_RD);
603
604 if (!validate_rtt_map_cmds(map_addr, level, rd)) {
605 buffer_unmap(rd);
606 granule_unlock(g_rd);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100607 res->x[0] = RMI_ERROR_INPUT;
608 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000609 }
610
611 g_table_root = rd->s2_ctx.g_rtt;
612 sl = realm_rtt_starting_level(rd);
613 ipa_bits = realm_ipa_bits(rd);
614
AlexeiFedorovc34e3242023-04-12 11:30:33 +0100615 /* Check if map_addr is outside PAR */
616 if (addr_in_par(rd, map_addr)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000617 buffer_unmap(rd);
618 granule_unlock(g_rd);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100619 res->x[0] = RMI_ERROR_INPUT;
620 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000621 }
622
623 s2_ctx = rd->s2_ctx;
624 buffer_unmap(rd);
625
626 granule_lock(g_table_root, GRANULE_STATE_RTT);
627 granule_unlock(g_rd);
628
629 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
630 map_addr, level, &wi);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100631
632 /*
633 * For UNMAP_NS, we need to map the table and look
634 * for the end of the non-live region.
635 */
AlexeiFedorov14d47ae2023-07-19 15:26:50 +0100636 if ((op == MAP_NS) && (wi.last_level != level)) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100637 res->x[0] = pack_return_code(RMI_ERROR_RTT,
638 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000639 goto out_unlock_llt;
640 }
641
642 s2tt = granule_map(wi.g_llt, SLOT_RTT);
643 s2tte = s2tte_read(&s2tt[wi.index]);
644
645 if (op == MAP_NS) {
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100646 if (!s2tte_is_unassigned_ns(s2tte)) {
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100647 res->x[0] = pack_return_code(RMI_ERROR_RTT,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000648 (unsigned int)level);
649 goto out_unmap_table;
650 }
651
AlexeiFedorov3a739332023-04-13 13:54:04 +0100652 s2tte = s2tte_create_assigned_ns(host_s2tte, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000653 s2tte_write(&s2tt[wi.index], s2tte);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000654
655 } else if (op == UNMAP_NS) {
656 /*
657 * The following check also verifies that map_addr is outside
658 * PAR, as valid_NS s2tte may only cover outside PAR IPA range.
659 */
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100660 bool assigned_ns = s2tte_is_assigned_ns(s2tte, wi.last_level);
661
662 if ((wi.last_level != level) || !assigned_ns) {
663 res->x[0] = pack_return_code(RMI_ERROR_RTT,
664 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000665 goto out_unmap_table;
666 }
667
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100668 s2tte = s2tte_create_unassigned_ns();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000669 s2tte_write(&s2tt[wi.index], s2tte);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000670 if (level == RTT_PAGE_LEVEL) {
671 invalidate_page(&s2_ctx, map_addr);
672 } else {
673 invalidate_block(&s2_ctx, map_addr);
674 }
675 }
676
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100677 res->x[0] = RMI_SUCCESS;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000678
679out_unmap_table:
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100680 if (op == UNMAP_NS) {
681 res->x[1] = skip_non_live_entries(map_addr, s2tt, &wi);
682 }
Soby Mathewb4c6df42022-11-09 11:13:29 +0000683 buffer_unmap(s2tt);
684out_unlock_llt:
685 granule_unlock(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000686}
687
688unsigned long smc_rtt_map_unprotected(unsigned long rd_addr,
689 unsigned long map_addr,
690 unsigned long ulevel,
691 unsigned long s2tte)
692{
693 long level = (long)ulevel;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100694 struct smc_result res;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000695
696 if (!host_ns_s2tte_is_valid(s2tte, level)) {
697 return RMI_ERROR_INPUT;
698 }
699
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100700 map_unmap_ns(rd_addr, map_addr, level, s2tte, MAP_NS, &res);
701 return res.x[0];
Soby Mathewb4c6df42022-11-09 11:13:29 +0000702}
703
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100704void smc_rtt_unmap_unprotected(unsigned long rd_addr,
705 unsigned long map_addr,
706 unsigned long ulevel,
707 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000708{
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100709 return map_unmap_ns(rd_addr, map_addr, (long)ulevel, 0UL, UNMAP_NS, res);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000710}
711
712void smc_rtt_read_entry(unsigned long rd_addr,
713 unsigned long map_addr,
714 unsigned long ulevel,
715 struct smc_result *ret)
716{
717 struct granule *g_rd, *g_rtt_root;
718 struct rd *rd;
719 struct rtt_walk wi;
720 unsigned long *s2tt, s2tte;
721 unsigned long ipa_bits;
722 long level = (long)ulevel;
723 int sl;
724
725 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
726 if (g_rd == NULL) {
727 ret->x[0] = RMI_ERROR_INPUT;
728 return;
729 }
730
731 rd = granule_map(g_rd, SLOT_RD);
732
733 if (!validate_rtt_entry_cmds(map_addr, level, rd)) {
734 buffer_unmap(rd);
735 granule_unlock(g_rd);
736 ret->x[0] = RMI_ERROR_INPUT;
737 return;
738 }
739
740 g_rtt_root = rd->s2_ctx.g_rtt;
741 sl = realm_rtt_starting_level(rd);
742 ipa_bits = realm_ipa_bits(rd);
743 buffer_unmap(rd);
744
745 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
746 granule_unlock(g_rd);
747
748 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
749 map_addr, level, &wi);
750 s2tt = granule_map(wi.g_llt, SLOT_RTT);
751 s2tte = s2tte_read(&s2tt[wi.index]);
752 ret->x[1] = wi.last_level;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000753
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100754 if (s2tte_is_unassigned_empty(s2tte)) {
Yousuf A3daed822022-10-13 16:06:00 +0100755 ret->x[2] = RMI_UNASSIGNED;
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100756 ret->x[3] = 0UL;
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100757 ret->x[4] = RIPAS_EMPTY;
758 } else if (s2tte_is_unassigned_ram(s2tte)) {
759 ret->x[2] = RMI_UNASSIGNED;
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100760 ret->x[3] = 0UL;
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100761 ret->x[4] = RIPAS_RAM;
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100762 } else if (s2tte_is_unassigned_destroyed(s2tte)) {
763 ret->x[2] = RMI_UNASSIGNED;
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100764 ret->x[3] = 0UL;
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100765 ret->x[4] = RIPAS_DESTROYED;
AlexeiFedorov3a739332023-04-13 13:54:04 +0100766 } else if (s2tte_is_assigned_empty(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100767 ret->x[2] = RMI_ASSIGNED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000768 ret->x[3] = s2tte_pa(s2tte, wi.last_level);
Yousuf A62808152022-10-31 10:35:42 +0000769 ret->x[4] = RIPAS_EMPTY;
AlexeiFedorov3a739332023-04-13 13:54:04 +0100770 } else if (s2tte_is_assigned_ram(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100771 ret->x[2] = RMI_ASSIGNED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000772 ret->x[3] = s2tte_pa(s2tte, wi.last_level);
Yousuf A62808152022-10-31 10:35:42 +0000773 ret->x[4] = RIPAS_RAM;
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100774 } else if (s2tte_is_assigned_destroyed(s2tte, wi.last_level)) {
775 ret->x[2] = RMI_ASSIGNED;
776 ret->x[3] = 0UL;
777 ret->x[4] = RIPAS_DESTROYED;
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100778 } else if (s2tte_is_unassigned_ns(s2tte)) {
779 ret->x[2] = RMI_UNASSIGNED;
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100780 ret->x[3] = 0UL;
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100781 ret->x[4] = RIPAS_EMPTY;
AlexeiFedorov3a739332023-04-13 13:54:04 +0100782 } else if (s2tte_is_assigned_ns(s2tte, wi.last_level)) {
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100783 ret->x[2] = RMI_ASSIGNED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000784 ret->x[3] = host_ns_s2tte(s2tte, wi.last_level);
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100785 ret->x[4] = RIPAS_EMPTY;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000786 } else if (s2tte_is_table(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100787 ret->x[2] = RMI_TABLE;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000788 ret->x[3] = s2tte_pa_table(s2tte, wi.last_level);
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100789 ret->x[4] = RIPAS_EMPTY;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000790 } else {
791 assert(false);
792 }
793
794 buffer_unmap(s2tt);
795 granule_unlock(wi.g_llt);
796
797 ret->x[0] = RMI_SUCCESS;
798}
799
800static void data_granule_measure(struct rd *rd, void *data,
801 unsigned long ipa,
802 unsigned long flags)
803{
804 struct measurement_desc_data measure_desc = {0};
805
806 /* Initialize the measurement descriptior structure */
807 measure_desc.desc_type = MEASURE_DESC_TYPE_DATA;
808 measure_desc.len = sizeof(struct measurement_desc_data);
809 measure_desc.ipa = ipa;
810 measure_desc.flags = flags;
811 memcpy(measure_desc.rim,
812 &rd->measurement[RIM_MEASUREMENT_SLOT],
813 measurement_get_size(rd->algorithm));
814
815 if (flags == RMI_MEASURE_CONTENT) {
816 /*
817 * Hashing the data granules and store the result in the
818 * measurement descriptor structure.
819 */
820 measurement_hash_compute(rd->algorithm,
821 data,
822 GRANULE_SIZE,
823 measure_desc.content);
824 }
825
826 /*
827 * Hashing the measurement descriptor structure; the result is the
828 * updated RIM.
829 */
830 measurement_hash_compute(rd->algorithm,
831 &measure_desc,
832 sizeof(measure_desc),
833 rd->measurement[RIM_MEASUREMENT_SLOT]);
834}
835
836static unsigned long validate_data_create_unknown(unsigned long map_addr,
837 struct rd *rd)
838{
839 if (!addr_in_par(rd, map_addr)) {
840 return RMI_ERROR_INPUT;
841 }
842
843 if (!validate_map_addr(map_addr, RTT_PAGE_LEVEL, rd)) {
844 return RMI_ERROR_INPUT;
845 }
846
847 return RMI_SUCCESS;
848}
849
850static unsigned long validate_data_create(unsigned long map_addr,
851 struct rd *rd)
852{
853 if (get_rd_state_locked(rd) != REALM_STATE_NEW) {
854 return RMI_ERROR_REALM;
855 }
856
857 return validate_data_create_unknown(map_addr, rd);
858}
859
860/*
AlexeiFedorov0f9cd1f2023-07-10 17:04:58 +0100861 * Implements both RMI_DATA_CREATE and RMI_DATA_CREATE_UNKNOWN
Soby Mathewb4c6df42022-11-09 11:13:29 +0000862 *
AlexeiFedorov0f9cd1f2023-07-10 17:04:58 +0100863 * if @g_src == NULL, implements RMI_DATA_CREATE_UNKNOWN
864 * and RMI_DATA_CREATE otherwise.
Soby Mathewb4c6df42022-11-09 11:13:29 +0000865 */
AlexeiFedorovac923c82023-04-06 15:12:04 +0100866static unsigned long data_create(unsigned long rd_addr,
867 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000868 unsigned long map_addr,
869 struct granule *g_src,
870 unsigned long flags)
871{
872 struct granule *g_data;
873 struct granule *g_rd;
874 struct granule *g_table_root;
875 struct rd *rd;
876 struct rtt_walk wi;
877 unsigned long s2tte, *s2tt;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000878 enum granule_state new_data_state = GRANULE_STATE_DELEGATED;
879 unsigned long ipa_bits;
880 unsigned long ret;
881 int __unused meas_ret;
882 int sl;
883
884 if (!find_lock_two_granules(data_addr,
885 GRANULE_STATE_DELEGATED,
886 &g_data,
887 rd_addr,
888 GRANULE_STATE_RD,
889 &g_rd)) {
890 return RMI_ERROR_INPUT;
891 }
892
893 rd = granule_map(g_rd, SLOT_RD);
894
895 ret = (g_src != NULL) ?
896 validate_data_create(map_addr, rd) :
897 validate_data_create_unknown(map_addr, rd);
898
899 if (ret != RMI_SUCCESS) {
900 goto out_unmap_rd;
901 }
902
903 g_table_root = rd->s2_ctx.g_rtt;
904 sl = realm_rtt_starting_level(rd);
905 ipa_bits = realm_ipa_bits(rd);
906 granule_lock(g_table_root, GRANULE_STATE_RTT);
907 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
908 map_addr, RTT_PAGE_LEVEL, &wi);
909 if (wi.last_level != RTT_PAGE_LEVEL) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100910 ret = pack_return_code(RMI_ERROR_RTT,
911 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000912 goto out_unlock_ll_table;
913 }
914
915 s2tt = granule_map(wi.g_llt, SLOT_RTT);
916 s2tte = s2tte_read(&s2tt[wi.index]);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000917
Soby Mathewb4c6df42022-11-09 11:13:29 +0000918 if (g_src != NULL) {
919 bool ns_access_ok;
AlexeiFedorov0f9cd1f2023-07-10 17:04:58 +0100920 void *data;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000921
AlexeiFedorov0f9cd1f2023-07-10 17:04:58 +0100922 if (!s2tte_is_unassigned_ram(s2tte)) {
923 ret = pack_return_code(RMI_ERROR_RTT, RTT_PAGE_LEVEL);
924 goto out_unmap_ll_table;
925 }
926
927 data = granule_map(g_data, SLOT_DELEGATED);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000928 ns_access_ok = ns_buffer_read(SLOT_NS, g_src, 0U,
929 GRANULE_SIZE, data);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000930 if (!ns_access_ok) {
931 /*
932 * Some data may be copied before the failure. Zero
933 * g_data granule as it will remain in delegated state.
934 */
935 (void)memset(data, 0, GRANULE_SIZE);
936 buffer_unmap(data);
937 ret = RMI_ERROR_INPUT;
938 goto out_unmap_ll_table;
939 }
940
Soby Mathewb4c6df42022-11-09 11:13:29 +0000941 data_granule_measure(rd, data, map_addr, flags);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000942 buffer_unmap(data);
AlexeiFedorov0f9cd1f2023-07-10 17:04:58 +0100943
944 } else if (!s2tte_is_unassigned(s2tte)) {
945 ret = pack_return_code(RMI_ERROR_RTT, RTT_PAGE_LEVEL);
946 goto out_unmap_ll_table;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000947 }
948
949 new_data_state = GRANULE_STATE_DATA;
950
AlexeiFedorovcde1fdc2023-04-18 16:37:25 +0100951 s2tte = s2tte_create_assigned_ram(data_addr, RTT_PAGE_LEVEL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000952
953 s2tte_write(&s2tt[wi.index], s2tte);
954 __granule_get(wi.g_llt);
955
956 ret = RMI_SUCCESS;
957
958out_unmap_ll_table:
959 buffer_unmap(s2tt);
960out_unlock_ll_table:
961 granule_unlock(wi.g_llt);
962out_unmap_rd:
963 buffer_unmap(rd);
964 granule_unlock(g_rd);
965 granule_unlock_transition(g_data, new_data_state);
966 return ret;
967}
968
AlexeiFedorovac923c82023-04-06 15:12:04 +0100969unsigned long smc_data_create(unsigned long rd_addr,
970 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000971 unsigned long map_addr,
972 unsigned long src_addr,
973 unsigned long flags)
974{
975 struct granule *g_src;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000976
977 if (flags != RMI_NO_MEASURE_CONTENT && flags != RMI_MEASURE_CONTENT) {
978 return RMI_ERROR_INPUT;
979 }
980
981 g_src = find_granule(src_addr);
982 if ((g_src == NULL) || (g_src->state != GRANULE_STATE_NS)) {
983 return RMI_ERROR_INPUT;
984 }
985
AlexeiFedorovac923c82023-04-06 15:12:04 +0100986 return data_create(rd_addr, data_addr, map_addr, g_src, flags);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000987}
988
AlexeiFedorovac923c82023-04-06 15:12:04 +0100989unsigned long smc_data_create_unknown(unsigned long rd_addr,
990 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000991 unsigned long map_addr)
992{
AlexeiFedorovac923c82023-04-06 15:12:04 +0100993 return data_create(rd_addr, data_addr, map_addr, NULL, 0);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000994}
995
AlexeiFedorove2002be2023-04-19 17:20:12 +0100996void smc_data_destroy(unsigned long rd_addr,
997 unsigned long map_addr,
998 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000999{
1000 struct granule *g_data;
1001 struct granule *g_rd;
1002 struct granule *g_table_root;
1003 struct rtt_walk wi;
1004 unsigned long data_addr, s2tte, *s2tt;
1005 struct rd *rd;
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001006 unsigned long ipa_bits;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001007 struct realm_s2_context s2_ctx;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001008 int sl;
1009
1010 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
1011 if (g_rd == NULL) {
AlexeiFedorove2002be2023-04-19 17:20:12 +01001012 res->x[0] = RMI_ERROR_INPUT;
AlexeiFedorova1b2a1d2023-07-18 15:08:47 +01001013 res->x[2] = 0UL;
AlexeiFedorove2002be2023-04-19 17:20:12 +01001014 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001015 }
1016
1017 rd = granule_map(g_rd, SLOT_RD);
1018
1019 if (!validate_map_addr(map_addr, RTT_PAGE_LEVEL, rd)) {
1020 buffer_unmap(rd);
1021 granule_unlock(g_rd);
AlexeiFedorove2002be2023-04-19 17:20:12 +01001022 res->x[0] = RMI_ERROR_INPUT;
AlexeiFedorova1b2a1d2023-07-18 15:08:47 +01001023 res->x[2] = 0UL;
AlexeiFedorove2002be2023-04-19 17:20:12 +01001024 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001025 }
1026
1027 g_table_root = rd->s2_ctx.g_rtt;
1028 sl = realm_rtt_starting_level(rd);
1029 ipa_bits = realm_ipa_bits(rd);
1030 s2_ctx = rd->s2_ctx;
1031 buffer_unmap(rd);
1032
1033 granule_lock(g_table_root, GRANULE_STATE_RTT);
1034 granule_unlock(g_rd);
1035
1036 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
1037 map_addr, RTT_PAGE_LEVEL, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001038
1039 s2tt = granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001040 if (wi.last_level != RTT_PAGE_LEVEL) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +01001041 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1042 (unsigned int)wi.last_level);
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001043 goto out_unmap_ll_table;
1044 }
Soby Mathewb4c6df42022-11-09 11:13:29 +00001045
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001046 s2tte = s2tte_read(&s2tt[wi.index]);
AlexeiFedorovc53b1f72023-07-04 15:37:03 +01001047
AlexeiFedorova43cd312023-04-17 11:42:25 +01001048 if (s2tte_is_assigned_ram(s2tte, RTT_PAGE_LEVEL)) {
1049 data_addr = s2tte_pa(s2tte, RTT_PAGE_LEVEL);
AlexeiFedorovc53b1f72023-07-04 15:37:03 +01001050 s2tte = s2tte_create_unassigned_destroyed();
AlexeiFedorova43cd312023-04-17 11:42:25 +01001051 s2tte_write(&s2tt[wi.index], s2tte);
1052 invalidate_page(&s2_ctx, map_addr);
1053 } else if (s2tte_is_assigned_empty(s2tte, RTT_PAGE_LEVEL)) {
1054 data_addr = s2tte_pa(s2tte, RTT_PAGE_LEVEL);
1055 s2tte = s2tte_create_unassigned_empty();
1056 s2tte_write(&s2tt[wi.index], s2tte);
1057 } else {
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001058 res->x[0] = pack_return_code(RMI_ERROR_RTT, RTT_PAGE_LEVEL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001059 goto out_unmap_ll_table;
1060 }
1061
Soby Mathewb4c6df42022-11-09 11:13:29 +00001062 __granule_put(wi.g_llt);
1063
1064 /*
1065 * Lock the data granule and check expected state. Correct locking order
1066 * is guaranteed because granule address is obtained from a locked
1067 * granule by table walk. This lock needs to be acquired before a state
1068 * transition to or from GRANULE_STATE_DATA for granule address can happen.
1069 */
1070 g_data = find_lock_granule(data_addr, GRANULE_STATE_DATA);
AlexeiFedorov63b71692023-04-19 11:18:42 +01001071 assert(g_data != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001072 granule_memzero(g_data, SLOT_DELEGATED);
1073 granule_unlock_transition(g_data, GRANULE_STATE_DELEGATED);
1074
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001075 res->x[0] = RMI_SUCCESS;
AlexeiFedorove2002be2023-04-19 17:20:12 +01001076 res->x[1] = data_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001077out_unmap_ll_table:
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001078 res->x[2] = skip_non_live_entries(map_addr, s2tt, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001079 buffer_unmap(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001080 granule_unlock(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001081}
1082
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001083/*
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001084 * Update the ripas value for the entry pointed by @s2ttep.
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001085 *
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001086 * Returns:
1087 * < 0 - On error and the operation was aborted,
1088 * e.g., entry cannot have a ripas.
1089 * 0 - Operation was success and no TLBI is required.
1090 * > 0 - Operation was success and TLBI is required.
1091 * Sets:
1092 * @(*do_tlbi) to 'true' if the TLBs have to be invalidated.
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001093 */
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001094static int update_ripas(unsigned long *s2ttep, unsigned long level,
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001095 enum ripas ripas_val,
1096 enum ripas_change_destroyed change_destroyed)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001097{
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001098 unsigned long pa, s2tte = s2tte_read(s2ttep);
1099 int ret = 0;
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001100
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001101 if (!s2tte_has_ripas(s2tte, level)) {
1102 return -1;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001103 }
1104
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001105 if (ripas_val == RIPAS_RAM) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001106 if (s2tte_is_unassigned_empty(s2tte)) {
1107 s2tte = s2tte_create_unassigned_ram();
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001108 } else if (s2tte_is_unassigned_destroyed(s2tte)) {
1109 if (change_destroyed == CHANGE_DESTROYED) {
1110 s2tte = s2tte_create_unassigned_ram();
1111 } else {
1112 return -1;
1113 }
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001114 } else if (s2tte_is_assigned_empty(s2tte, level)) {
1115 pa = s2tte_pa(s2tte, level);
1116 s2tte = s2tte_create_assigned_ram(pa, level);
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001117 } else if (s2tte_is_assigned_destroyed(s2tte, level)) {
1118 if (change_destroyed == CHANGE_DESTROYED) {
1119 pa = s2tte_pa(s2tte, level);
1120 s2tte = s2tte_create_assigned_ram(pa, level);
1121 } else {
1122 return -1;
1123 }
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001124 } else {
1125 /* No action is required */
1126 return 0;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001127 }
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001128 } else if (ripas_val == RIPAS_EMPTY) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001129 if (s2tte_is_unassigned_ram(s2tte)) {
1130 s2tte = s2tte_create_unassigned_empty();
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001131 } else if (s2tte_is_unassigned_destroyed(s2tte)) {
1132 if (change_destroyed == CHANGE_DESTROYED) {
1133 s2tte = s2tte_create_unassigned_empty();
1134 } else {
1135 return -1;
1136 }
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001137 } else if (s2tte_is_assigned_ram(s2tte, level)) {
1138 pa = s2tte_pa(s2tte, level);
1139 s2tte = s2tte_create_assigned_empty(pa, level);
1140 /* TLBI is required */
1141 ret = 1;
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001142 } else if (s2tte_is_assigned_destroyed(s2tte, level)) {
1143 if (change_destroyed == CHANGE_DESTROYED) {
1144 pa = s2tte_pa(s2tte, level);
1145 s2tte = s2tte_create_assigned_empty(pa, level);
1146 /* TLBI is required */
1147 ret = 1;
1148 } else {
1149 return -1;
1150 }
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001151 } else {
1152 /* No action is required */
1153 return 0;
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001154 }
Soby Mathewb4c6df42022-11-09 11:13:29 +00001155 }
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001156 s2tte_write(s2ttep, s2tte);
1157 return ret;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001158}
1159
1160static void ripas_granule_measure(struct rd *rd,
AlexeiFedorov960d1612023-04-25 13:23:39 +01001161 unsigned long base,
1162 unsigned long top)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001163{
1164 struct measurement_desc_ripas measure_desc = {0};
1165
1166 /* Initialize the measurement descriptior structure */
1167 measure_desc.desc_type = MEASURE_DESC_TYPE_RIPAS;
1168 measure_desc.len = sizeof(struct measurement_desc_ripas);
AlexeiFedorov960d1612023-04-25 13:23:39 +01001169 measure_desc.base = base;
1170 measure_desc.top = top;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001171 (void)memcpy(measure_desc.rim,
1172 &rd->measurement[RIM_MEASUREMENT_SLOT],
1173 measurement_get_size(rd->algorithm));
Soby Mathewb4c6df42022-11-09 11:13:29 +00001174
1175 /*
1176 * Hashing the measurement descriptor structure; the result is the
1177 * updated RIM.
1178 */
1179 measurement_hash_compute(rd->algorithm,
1180 &measure_desc,
1181 sizeof(measure_desc),
1182 rd->measurement[RIM_MEASUREMENT_SLOT]);
1183}
1184
AlexeiFedorov960d1612023-04-25 13:23:39 +01001185void smc_rtt_init_ripas(unsigned long rd_addr,
1186 unsigned long base,
1187 unsigned long top,
1188 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001189{
1190 struct granule *g_rd, *g_rtt_root;
1191 struct rd *rd;
AlexeiFedorov960d1612023-04-25 13:23:39 +01001192 unsigned long ipa_bits, addr, map_size;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001193 struct rtt_walk wi;
1194 unsigned long s2tte, *s2tt;
AlexeiFedorov960d1612023-04-25 13:23:39 +01001195 long level;
AlexeiFedorov14d47ae2023-07-19 15:26:50 +01001196 unsigned long index;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001197 int sl;
1198
AlexeiFedorov14d47ae2023-07-19 15:26:50 +01001199 if (top <= base) {
1200 res->x[0] = RMI_ERROR_INPUT;
1201 return;
1202 }
1203
Soby Mathewb4c6df42022-11-09 11:13:29 +00001204 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
1205 if (g_rd == NULL) {
AlexeiFedorov960d1612023-04-25 13:23:39 +01001206 res->x[0] = RMI_ERROR_INPUT;
1207 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001208 }
1209
1210 rd = granule_map(g_rd, SLOT_RD);
1211
AlexeiFedorov14d47ae2023-07-19 15:26:50 +01001212 if (!validate_map_addr(base, RTT_PAGE_LEVEL, rd) ||
1213 !validate_rtt_entry_cmds(top, RTT_PAGE_LEVEL, rd) ||
1214 !addr_in_par(rd, base) || !addr_in_par(rd, top - GRANULE_SIZE)) {
1215 buffer_unmap(rd);
1216 granule_unlock(g_rd);
1217 res->x[0] = RMI_ERROR_INPUT;
1218 return;
1219 }
1220
Soby Mathewb4c6df42022-11-09 11:13:29 +00001221 if (get_rd_state_locked(rd) != REALM_STATE_NEW) {
1222 buffer_unmap(rd);
1223 granule_unlock(g_rd);
AlexeiFedorov960d1612023-04-25 13:23:39 +01001224 res->x[0] = RMI_ERROR_REALM;
1225 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001226 }
1227
Soby Mathewb4c6df42022-11-09 11:13:29 +00001228 g_rtt_root = rd->s2_ctx.g_rtt;
1229 sl = realm_rtt_starting_level(rd);
1230 ipa_bits = realm_ipa_bits(rd);
1231
1232 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001233
1234 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
AlexeiFedorov960d1612023-04-25 13:23:39 +01001235 base, RTT_PAGE_LEVEL, &wi);
1236 level = wi.last_level;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001237 s2tt = granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov960d1612023-04-25 13:23:39 +01001238 map_size = s2tte_map_size(level);
1239 addr = base & ~(map_size - 1UL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001240
AlexeiFedorov960d1612023-04-25 13:23:39 +01001241 /*
AlexeiFedorov14d47ae2023-07-19 15:26:50 +01001242 * If the RTTE covers a range below "base", we need to go deeper.
AlexeiFedorov960d1612023-04-25 13:23:39 +01001243 */
1244 if (addr != base) {
1245 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1246 (unsigned int)level);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001247 goto out_unmap_llt;
1248 }
1249
AlexeiFedorov960d1612023-04-25 13:23:39 +01001250 for (index = wi.index; index < S2TTES_PER_S2TT;
1251 index++, addr += map_size) {
1252 unsigned long next = addr + map_size;
1253
AlexeiFedorov14d47ae2023-07-19 15:26:50 +01001254 /*
1255 * Break on "top_align" failure condition,
1256 * or if this entry crosses the range.
1257 */
AlexeiFedorov960d1612023-04-25 13:23:39 +01001258 if (next > top) {
1259 break;
1260 }
1261
1262 s2tte = s2tte_read(&s2tt[index]);
1263 if (s2tte_is_unassigned_empty(s2tte)) {
1264 s2tte = s2tte_create_unassigned_ram();
1265 s2tte_write(&s2tt[index], s2tte);
1266 } else if (!s2tte_is_unassigned_ram(s2tte)) {
1267 break;
1268 }
1269 ripas_granule_measure(rd, addr, next);
1270 }
1271
1272 if (addr > base) {
1273 res->x[0] = RMI_SUCCESS;
1274 res->x[1] = addr;
1275 } else {
1276 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1277 (unsigned int)level);
1278 }
Soby Mathewb4c6df42022-11-09 11:13:29 +00001279
1280out_unmap_llt:
1281 buffer_unmap(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001282 buffer_unmap(rd);
1283 granule_unlock(wi.g_llt);
AlexeiFedorov80295e42023-07-10 13:11:14 +01001284 granule_unlock(g_rd);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001285}
1286
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001287static void rtt_set_ripas_range(struct realm_s2_context *s2_ctx,
1288 unsigned long *s2tt,
1289 unsigned long base,
1290 unsigned long top,
1291 struct rtt_walk *wi,
1292 unsigned long ripas_val,
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001293 enum ripas_change_destroyed change_destroyed,
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001294 struct smc_result *res)
1295{
AlexeiFedorov14d47ae2023-07-19 15:26:50 +01001296 unsigned long index;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001297 long level = wi->last_level;
1298 unsigned long map_size = s2tte_map_size(level);
1299
1300 /* Align to the RTT level */
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001301 unsigned long addr = base & ~(map_size - 1UL);
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001302
1303 /* Make sure we don't touch a range below the requested range */
1304 if (addr != base) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +01001305 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1306 (unsigned int)level);
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001307 return;
1308 }
1309
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001310 for (index = wi->index; index < S2TTES_PER_S2TT; addr += map_size) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001311 int ret;
1312
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001313 /* If this entry crosses the range, break. */
1314 if (addr + map_size > top) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001315 break;
1316 }
1317
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001318 ret = update_ripas(&s2tt[index++], level,
1319 ripas_val, change_destroyed);
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001320 if (ret < 0) {
1321 break;
1322 }
1323
1324 /* Handle TLBI */
1325 if (ret != 0) {
1326 if (level == RTT_PAGE_LEVEL) {
1327 invalidate_page(s2_ctx, addr);
1328 } else {
1329 invalidate_block(s2_ctx, addr);
1330 }
1331 }
1332 }
1333
1334 if (addr > base) {
1335 res->x[0] = RMI_SUCCESS;
1336 res->x[1] = addr;
1337 } else {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +01001338 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1339 (unsigned int)level);
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001340 }
1341}
1342
1343void smc_rtt_set_ripas(unsigned long rd_addr,
1344 unsigned long rec_addr,
1345 unsigned long base,
1346 unsigned long top,
1347 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001348{
1349 struct granule *g_rd, *g_rec, *g_rtt_root;
1350 struct rec *rec;
1351 struct rd *rd;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001352 unsigned long ipa_bits;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001353 struct rtt_walk wi;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001354 unsigned long *s2tt;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001355 struct realm_s2_context s2_ctx;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001356 enum ripas ripas_val;
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001357 enum ripas_change_destroyed change_destroyed;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001358 int sl;
1359
AlexeiFedorov14d47ae2023-07-19 15:26:50 +01001360 if (top <= base) {
1361 res->x[0] = RMI_ERROR_INPUT;
1362 return;
1363 }
1364
Soby Mathewb4c6df42022-11-09 11:13:29 +00001365 if (!find_lock_two_granules(rd_addr,
1366 GRANULE_STATE_RD,
1367 &g_rd,
1368 rec_addr,
1369 GRANULE_STATE_REC,
1370 &g_rec)) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001371 res->x[0] = RMI_ERROR_INPUT;
1372 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001373 }
1374
1375 if (granule_refcount_read_acquire(g_rec) != 0UL) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001376 res->x[0] = RMI_ERROR_REC;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001377 goto out_unlock_rec_rd;
1378 }
1379
1380 rec = granule_map(g_rec, SLOT_REC);
1381
1382 if (g_rd != rec->realm_info.g_rd) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001383 res->x[0] = RMI_ERROR_REC;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001384 goto out_unmap_rec;
1385 }
1386
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001387 ripas_val = rec->set_ripas.ripas_val;
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001388 change_destroyed = rec->set_ripas.change_destroyed;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001389
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001390 /*
1391 * Return error in case of target region:
1392 * - is not the next chunk of requested region
1393 * - extends beyond the end of requested region
1394 */
1395 if ((base != rec->set_ripas.addr) || (top > rec->set_ripas.top)) {
1396 res->x[0] = RMI_ERROR_INPUT;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001397 goto out_unmap_rec;
1398 }
1399
1400 rd = granule_map(g_rd, SLOT_RD);
1401
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001402 /*
1403 * At this point, we know base == rec->set_ripas.addr
1404 * and thus must be aligned to GRANULE size.
1405 */
1406 assert(validate_map_addr(base, RTT_PAGE_LEVEL, rd));
Soby Mathewb4c6df42022-11-09 11:13:29 +00001407
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001408 if (!validate_map_addr(top, RTT_PAGE_LEVEL, rd)) {
1409 res->x[0] = RMI_ERROR_INPUT;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001410 goto out_unmap_rd;
1411 }
1412
1413 g_rtt_root = rd->s2_ctx.g_rtt;
1414 sl = realm_rtt_starting_level(rd);
1415 ipa_bits = realm_ipa_bits(rd);
1416 s2_ctx = rd->s2_ctx;
1417
1418 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
1419
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001420 /* Walk to the deepest level possible */
Soby Mathewb4c6df42022-11-09 11:13:29 +00001421 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001422 base, RTT_PAGE_LEVEL, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001423
1424 s2tt = granule_map(wi.g_llt, SLOT_RTT);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001425
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001426 rtt_set_ripas_range(&s2_ctx, s2tt, base, top, &wi,
1427 ripas_val, change_destroyed, res);
1428
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001429 if (res->x[0] == RMI_SUCCESS) {
1430 rec->set_ripas.addr = res->x[1];
Soby Mathewb4c6df42022-11-09 11:13:29 +00001431 }
1432
Soby Mathewb4c6df42022-11-09 11:13:29 +00001433 buffer_unmap(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001434 granule_unlock(wi.g_llt);
1435out_unmap_rd:
1436 buffer_unmap(rd);
1437out_unmap_rec:
1438 buffer_unmap(rec);
1439out_unlock_rec_rd:
1440 granule_unlock(g_rec);
1441 granule_unlock(g_rd);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001442}