blob: a94d8140a3260dc27c5170f37443a4ac7344e2f3 [file] [log] [blame]
Soby Mathewb4c6df42022-11-09 11:13:29 +00001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
5 */
6
7#include <buffer.h>
8#include <granule.h>
9#include <measurement.h>
10#include <realm.h>
11#include <ripas.h>
12#include <smc-handler.h>
13#include <smc-rmi.h>
14#include <smc.h>
15#include <stddef.h>
16#include <string.h>
17#include <table.h>
18
19/*
20 * Validate the map_addr value passed to RMI_RTT_* and RMI_DATA_* commands.
21 */
22static bool validate_map_addr(unsigned long map_addr,
23 unsigned long level,
24 struct rd *rd)
25{
26
27 if (map_addr >= realm_ipa_size(rd)) {
28 return false;
29 }
30 if (!addr_is_level_aligned(map_addr, level)) {
31 return false;
32 }
33 return true;
34}
35
36/*
37 * Structure commands can operate on all RTTs except for the root RTT so
38 * the minimal valid level is the stage 2 starting level + 1.
39 */
40static bool validate_rtt_structure_cmds(unsigned long map_addr,
41 long level,
42 struct rd *rd)
43{
44 int min_level = realm_rtt_starting_level(rd) + 1;
45
46 if ((level < min_level) || (level > RTT_PAGE_LEVEL)) {
47 return false;
48 }
49 return validate_map_addr(map_addr, level, rd);
50}
51
52/*
53 * Map/Unmap commands can operate up to a level 2 block entry so min_level is
54 * the smallest block size.
55 */
56static bool validate_rtt_map_cmds(unsigned long map_addr,
57 long level,
58 struct rd *rd)
59{
60 if ((level < RTT_MIN_BLOCK_LEVEL) || (level > RTT_PAGE_LEVEL)) {
61 return false;
62 }
63 return validate_map_addr(map_addr, level, rd);
64}
65
66/*
67 * Entry commands can operate on any entry so the minimal valid level is the
68 * stage 2 starting level.
69 */
70static bool validate_rtt_entry_cmds(unsigned long map_addr,
71 long level,
72 struct rd *rd)
73{
74 if ((level < realm_rtt_starting_level(rd)) ||
75 (level > RTT_PAGE_LEVEL)) {
76 return false;
77 }
78 return validate_map_addr(map_addr, level, rd);
79}
80
AlexeiFedorovac923c82023-04-06 15:12:04 +010081unsigned long smc_rtt_create(unsigned long rd_addr,
82 unsigned long rtt_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +000083 unsigned long map_addr,
84 unsigned long ulevel)
85{
86 struct granule *g_rd;
87 struct granule *g_tbl;
88 struct rd *rd;
89 struct granule *g_table_root;
90 struct rtt_walk wi;
91 unsigned long *s2tt, *parent_s2tt, parent_s2tte;
92 long level = (long)ulevel;
93 unsigned long ipa_bits;
94 unsigned long ret;
95 struct realm_s2_context s2_ctx;
96 int sl;
97
98 if (!find_lock_two_granules(rtt_addr,
99 GRANULE_STATE_DELEGATED,
100 &g_tbl,
101 rd_addr,
102 GRANULE_STATE_RD,
103 &g_rd)) {
104 return RMI_ERROR_INPUT;
105 }
106
107 rd = granule_map(g_rd, SLOT_RD);
108
109 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
110 buffer_unmap(rd);
111 granule_unlock(g_rd);
112 granule_unlock(g_tbl);
113 return RMI_ERROR_INPUT;
114 }
115
116 g_table_root = rd->s2_ctx.g_rtt;
117 sl = realm_rtt_starting_level(rd);
118 ipa_bits = realm_ipa_bits(rd);
119 s2_ctx = rd->s2_ctx;
120 buffer_unmap(rd);
121
122 /*
123 * Lock the RTT root. Enforcing locking order RD->RTT is enough to
124 * ensure deadlock free locking guarentee.
125 */
126 granule_lock(g_table_root, GRANULE_STATE_RTT);
127
128 /* Unlock RD after locking RTT Root */
129 granule_unlock(g_rd);
130
131 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
132 map_addr, level - 1L, &wi);
133 if (wi.last_level != level - 1L) {
134 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
135 goto out_unlock_llt;
136 }
137
138 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
139 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
140 s2tt = granule_map(g_tbl, SLOT_DELEGATED);
141
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100142 if (s2tte_is_unassigned_empty(parent_s2tte)) {
143 s2tt_init_unassigned_empty(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000144
145 /*
146 * Increase the refcount of the parent, the granule was
147 * locked while table walking and hand-over-hand locking.
148 * Atomicity and acquire/release semantics not required because
149 * the table is accessed always locked.
150 */
151 __granule_get(wi.g_llt);
152
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100153 } else if (s2tte_is_unassigned_ram(parent_s2tte)) {
154 s2tt_init_unassigned_ram(s2tt);
155 __granule_get(wi.g_llt);
156
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100157 } else if (s2tte_is_unassigned_ns(parent_s2tte)) {
158 s2tt_init_unassigned_ns(s2tt);
159 __granule_get(wi.g_llt);
160
Soby Mathewb4c6df42022-11-09 11:13:29 +0000161 } else if (s2tte_is_destroyed(parent_s2tte)) {
162 s2tt_init_destroyed(s2tt);
163 __granule_get(wi.g_llt);
164
AlexeiFedorov3a739332023-04-13 13:54:04 +0100165 } else if (s2tte_is_assigned_empty(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000166 unsigned long block_pa;
167
168 /*
169 * We should observe parent assigned s2tte only when
170 * we create tables above this level.
171 */
172 assert(level > RTT_MIN_BLOCK_LEVEL);
173
174 block_pa = s2tte_pa(parent_s2tte, level - 1L);
175
176 s2tt_init_assigned_empty(s2tt, block_pa, level);
177
178 /*
179 * Increase the refcount to mark the granule as in-use. refcount
180 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
181 */
182 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
183
AlexeiFedorov3a739332023-04-13 13:54:04 +0100184 } else if (s2tte_is_assigned_ram(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000185 unsigned long block_pa;
186
187 /*
188 * We should observe parent valid s2tte only when
189 * we create tables above this level.
190 */
191 assert(level > RTT_MIN_BLOCK_LEVEL);
192
193 /*
194 * Break before make. This may cause spurious S2 aborts.
195 */
196 s2tte_write(&parent_s2tt[wi.index], 0UL);
197 invalidate_block(&s2_ctx, map_addr);
198
199 block_pa = s2tte_pa(parent_s2tte, level - 1L);
200
AlexeiFedorov3a739332023-04-13 13:54:04 +0100201 s2tt_init_assigned_ram(s2tt, block_pa, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000202
203 /*
204 * Increase the refcount to mark the granule as in-use. refcount
205 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
206 */
207 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
208
AlexeiFedorov3a739332023-04-13 13:54:04 +0100209 } else if (s2tte_is_assigned_ns(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000210 unsigned long block_pa;
211
212 /*
AlexeiFedorov3a739332023-04-13 13:54:04 +0100213 * We should observe parent assigned_ns s2tte only when
Soby Mathewb4c6df42022-11-09 11:13:29 +0000214 * we create tables above this level.
215 */
216 assert(level > RTT_MIN_BLOCK_LEVEL);
217
218 /*
219 * Break before make. This may cause spurious S2 aborts.
220 */
221 s2tte_write(&parent_s2tt[wi.index], 0UL);
222 invalidate_block(&s2_ctx, map_addr);
223
224 block_pa = s2tte_pa(parent_s2tte, level - 1L);
225
AlexeiFedorov3a739332023-04-13 13:54:04 +0100226 s2tt_init_assigned_ns(s2tt, block_pa, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000227
228 /*
229 * Increase the refcount to mark the granule as in-use. refcount
230 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
231 */
232 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
233
234 } else if (s2tte_is_table(parent_s2tte, level - 1L)) {
235 ret = pack_return_code(RMI_ERROR_RTT,
236 (unsigned int)(level - 1L));
237 goto out_unmap_table;
238
239 } else {
240 assert(false);
241 }
242
243 ret = RMI_SUCCESS;
244
245 granule_set_state(g_tbl, GRANULE_STATE_RTT);
246
247 parent_s2tte = s2tte_create_table(rtt_addr, level - 1L);
248 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
249
250out_unmap_table:
251 buffer_unmap(s2tt);
252 buffer_unmap(parent_s2tt);
253out_unlock_llt:
254 granule_unlock(wi.g_llt);
255 granule_unlock(g_tbl);
256 return ret;
257}
258
AlexeiFedorove2002be2023-04-19 17:20:12 +0100259void smc_rtt_fold(unsigned long rd_addr,
260 unsigned long map_addr,
261 unsigned long ulevel,
262 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000263{
264 struct granule *g_rd;
265 struct granule *g_tbl;
266 struct rd *rd;
267 struct granule *g_table_root;
268 struct rtt_walk wi;
269 unsigned long *table, *parent_s2tt, parent_s2tte;
270 long level = (long)ulevel;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100271 unsigned long ipa_bits, rtt_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000272 unsigned long ret;
273 struct realm_s2_context s2_ctx;
274 int sl;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000275
276 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
277 if (g_rd == NULL) {
AlexeiFedorove2002be2023-04-19 17:20:12 +0100278 res->x[0] = RMI_ERROR_INPUT;
279 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000280 }
281
282 rd = granule_map(g_rd, SLOT_RD);
283
284 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
285 buffer_unmap(rd);
286 granule_unlock(g_rd);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100287 res->x[0] = RMI_ERROR_INPUT;
288 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000289 }
290
291 g_table_root = rd->s2_ctx.g_rtt;
292 sl = realm_rtt_starting_level(rd);
293 ipa_bits = realm_ipa_bits(rd);
294 s2_ctx = rd->s2_ctx;
295 buffer_unmap(rd);
296 granule_lock(g_table_root, GRANULE_STATE_RTT);
297 granule_unlock(g_rd);
298
299 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
300 map_addr, level - 1L, &wi);
301 if (wi.last_level != level - 1UL) {
302 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
303 goto out_unlock_parent_table;
304 }
305
306 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
307 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
308 if (!s2tte_is_table(parent_s2tte, level - 1L)) {
309 ret = pack_return_code(RMI_ERROR_RTT,
310 (unsigned int)(level - 1L));
311 goto out_unmap_parent_table;
312 }
313
AlexeiFedorove2002be2023-04-19 17:20:12 +0100314 rtt_addr = s2tte_pa_table(parent_s2tte, level - 1L);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000315 g_tbl = find_lock_granule(rtt_addr, GRANULE_STATE_RTT);
316
317 /*
318 * A table descriptor S2TTE always points to a TABLE granule.
319 */
AlexeiFedorov63b71692023-04-19 11:18:42 +0100320 assert(g_tbl != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000321
322 table = granule_map(g_tbl, SLOT_RTT2);
323
324 /*
325 * The command can succeed only if all 512 S2TTEs are of the same type.
326 * We first check the table's ref. counter to speed up the case when
327 * the host makes a guess whether a memory region can be folded.
328 */
329 if (g_tbl->refcount == 0UL) {
330 if (table_is_destroyed_block(table)) {
331 parent_s2tte = s2tte_create_destroyed();
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100332 } else if (table_is_unassigned_empty_block(table)) {
333 parent_s2tte = s2tte_create_unassigned_empty();
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100334 } else if (table_is_unassigned_ram_block(table)) {
335 parent_s2tte = s2tte_create_unassigned_ram();
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100336 } else if (table_is_unassigned_ns_block(table)) {
337 parent_s2tte = s2tte_create_unassigned_ns();
AlexeiFedorov49752c62023-04-24 14:31:14 +0100338 } else if (table_maps_assigned_ns_block(table, level)) {
339 unsigned long s2tte = s2tte_read(&table[0]);
340 unsigned long block_pa = s2tte_pa(s2tte, level);
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100341
AlexeiFedorov49752c62023-04-24 14:31:14 +0100342 parent_s2tte = s2tte_create_assigned_ns(block_pa, level - 1L);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000343 } else {
344 /*
345 * The table holds a mixture of destroyed and
346 * unassigned entries.
347 */
AlexeiFedorov892abce2023-04-06 16:32:12 +0100348 ret = pack_return_code(RMI_ERROR_RTT, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000349 goto out_unmap_table;
350 }
AlexeiFedorov49752c62023-04-24 14:31:14 +0100351 __granule_put(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000352 } else if (g_tbl->refcount == S2TTES_PER_S2TT) {
353
354 unsigned long s2tte, block_pa;
355
356 /* The RMM specification does not allow creating block
357 * entries less than RTT_MIN_BLOCK_LEVEL even though
358 * permitted by the Arm Architecture.
359 * Hence ensure that the table being folded is at a level
360 * higher than the RTT_MIN_BLOCK_LEVEL.
361 *
362 * A fully populated table cannot be destroyed if that
363 * would create a block mapping below RTT_MIN_BLOCK_LEVEL.
364 */
365 if (level <= RTT_MIN_BLOCK_LEVEL) {
AlexeiFedorov892abce2023-04-06 16:32:12 +0100366 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000367 goto out_unmap_table;
368 }
369
370 s2tte = s2tte_read(&table[0]);
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000371 block_pa = s2tte_pa(s2tte, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000372
373 /*
AlexeiFedorov3a739332023-04-13 13:54:04 +0100374 * The table must also refer to a contiguous block through the
375 * same type of s2tte, either Assigned, Valid or Assigned_NS.
Soby Mathewb4c6df42022-11-09 11:13:29 +0000376 */
AlexeiFedorov3a739332023-04-13 13:54:04 +0100377 if (table_maps_assigned_empty_block(table, level)) {
378 parent_s2tte = s2tte_create_assigned_empty(block_pa,
379 level - 1L);
380 } else if (table_maps_assigned_ram_block(table, level)) {
381 parent_s2tte = s2tte_create_assigned_ram(block_pa,
382 level - 1L);
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000383 /* The table contains mixed entries that cannot be folded */
Soby Mathewb4c6df42022-11-09 11:13:29 +0000384 } else {
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000385 ret = pack_return_code(RMI_ERROR_RTT, level);
386 goto out_unmap_table;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000387 }
388
389 __granule_refcount_dec(g_tbl, S2TTES_PER_S2TT);
390 } else {
391 /*
392 * The table holds a mixture of different types of s2ttes.
393 */
AlexeiFedorov892abce2023-04-06 16:32:12 +0100394 ret = pack_return_code(RMI_ERROR_RTT, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000395 goto out_unmap_table;
396 }
397
398 ret = RMI_SUCCESS;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100399 res->x[1] = rtt_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000400
401 /*
402 * Break before make.
403 */
404 s2tte_write(&parent_s2tt[wi.index], 0UL);
405
AlexeiFedorov3a739332023-04-13 13:54:04 +0100406 if (s2tte_is_assigned_ram(parent_s2tte, level - 1L) ||
407 s2tte_is_assigned_ns(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000408 invalidate_pages_in_block(&s2_ctx, map_addr);
409 } else {
410 invalidate_block(&s2_ctx, map_addr);
411 }
412
413 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
414
415 granule_memzero_mapped(table);
416 granule_set_state(g_tbl, GRANULE_STATE_DELEGATED);
417
418out_unmap_table:
419 buffer_unmap(table);
420 granule_unlock(g_tbl);
421out_unmap_parent_table:
422 buffer_unmap(parent_s2tt);
423out_unlock_parent_table:
424 granule_unlock(wi.g_llt);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100425 res->x[0] = ret;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000426}
427
AlexeiFedorove2002be2023-04-19 17:20:12 +0100428void smc_rtt_destroy(unsigned long rd_addr,
429 unsigned long map_addr,
430 unsigned long ulevel,
431 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000432{
433 struct granule *g_rd;
434 struct granule *g_tbl;
435 struct rd *rd;
436 struct granule *g_table_root;
437 struct rtt_walk wi;
438 unsigned long *table, *parent_s2tt, parent_s2tte;
439 long level = (long)ulevel;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100440 unsigned long ipa_bits, rtt_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000441 unsigned long ret;
442 struct realm_s2_context s2_ctx;
443 int sl;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100444 bool in_par, skip_non_live = false;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000445
446 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
447 if (g_rd == NULL) {
AlexeiFedorove2002be2023-04-19 17:20:12 +0100448 res->x[0] = RMI_ERROR_INPUT;
449 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000450 }
451
452 rd = granule_map(g_rd, SLOT_RD);
453
454 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
455 buffer_unmap(rd);
456 granule_unlock(g_rd);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100457 res->x[0] = RMI_ERROR_INPUT;
458 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000459 }
460
461 g_table_root = rd->s2_ctx.g_rtt;
462 sl = realm_rtt_starting_level(rd);
463 ipa_bits = realm_ipa_bits(rd);
464 s2_ctx = rd->s2_ctx;
465 in_par = addr_in_par(rd, map_addr);
466 buffer_unmap(rd);
467 granule_lock(g_table_root, GRANULE_STATE_RTT);
468 granule_unlock(g_rd);
469
470 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
471 map_addr, level - 1L, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000472
473 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
474 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100475
476 if ((wi.last_level != level - 1UL) ||
477 !s2tte_is_table(parent_s2tte, level - 1UL)) {
478 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
479 skip_non_live = true;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000480 goto out_unmap_parent_table;
481 }
482
AlexeiFedorove2002be2023-04-19 17:20:12 +0100483 rtt_addr = s2tte_pa_table(parent_s2tte, level - 1L);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000484
485 /*
486 * Lock the RTT granule. The 'rtt_addr' is verified, thus can be treated
487 * as an internal granule.
488 */
489 g_tbl = find_lock_granule(rtt_addr, GRANULE_STATE_RTT);
490
491 /*
492 * A table descriptor S2TTE always points to a TABLE granule.
493 */
494 assert(g_tbl != NULL);
495
496 /*
497 * Read the refcount value. RTT granule is always accessed locked, thus
498 * the refcount can be accessed without atomic operations.
499 */
500 if (g_tbl->refcount != 0UL) {
AlexeiFedorov892abce2023-04-06 16:32:12 +0100501 ret = pack_return_code(RMI_ERROR_RTT, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000502 goto out_unlock_table;
503 }
504
505 ret = RMI_SUCCESS;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100506 res->x[1] = rtt_addr;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100507 skip_non_live = true;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000508
509 table = granule_map(g_tbl, SLOT_RTT2);
510
511 if (in_par) {
512 parent_s2tte = s2tte_create_destroyed();
513 } else {
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100514 parent_s2tte = s2tte_create_unassigned_ns();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000515 }
516
517 __granule_put(wi.g_llt);
518
519 /*
520 * Break before make. Note that this may cause spurious S2 aborts.
521 */
522 s2tte_write(&parent_s2tt[wi.index], 0UL);
523 invalidate_block(&s2_ctx, map_addr);
524 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
525
526 granule_memzero_mapped(table);
527 granule_set_state(g_tbl, GRANULE_STATE_DELEGATED);
528
529 buffer_unmap(table);
530out_unlock_table:
531 granule_unlock(g_tbl);
532out_unmap_parent_table:
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100533 if (skip_non_live) {
534 res->x[2] = skip_non_live_entries(map_addr, parent_s2tt, &wi);
535 }
Soby Mathewb4c6df42022-11-09 11:13:29 +0000536 buffer_unmap(parent_s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000537 granule_unlock(wi.g_llt);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100538 res->x[0] = ret;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000539}
540
541enum map_unmap_ns_op {
542 MAP_NS,
543 UNMAP_NS
544};
545
546/*
547 * We don't hold a reference on the NS granule when it is
548 * mapped into a realm. Instead we rely on the guarantees
549 * provided by the architecture to ensure that a NS access
550 * to a protected granule is prohibited even within the realm.
551 */
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100552static void map_unmap_ns(unsigned long rd_addr,
553 unsigned long map_addr,
554 long level,
555 unsigned long host_s2tte,
556 enum map_unmap_ns_op op,
557 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000558{
559 struct granule *g_rd;
560 struct rd *rd;
561 struct granule *g_table_root;
562 unsigned long *s2tt, s2tte;
563 struct rtt_walk wi;
564 unsigned long ipa_bits;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000565 struct realm_s2_context s2_ctx;
566 int sl;
567
568 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
569 if (g_rd == NULL) {
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100570 res->x[0] = RMI_ERROR_INPUT;
571 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000572 }
573
574 rd = granule_map(g_rd, SLOT_RD);
575
576 if (!validate_rtt_map_cmds(map_addr, level, rd)) {
577 buffer_unmap(rd);
578 granule_unlock(g_rd);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100579 res->x[0] = RMI_ERROR_INPUT;
580 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000581 }
582
583 g_table_root = rd->s2_ctx.g_rtt;
584 sl = realm_rtt_starting_level(rd);
585 ipa_bits = realm_ipa_bits(rd);
586
AlexeiFedorovc34e3242023-04-12 11:30:33 +0100587 /* Check if map_addr is outside PAR */
588 if (addr_in_par(rd, map_addr)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000589 buffer_unmap(rd);
590 granule_unlock(g_rd);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100591 res->x[0] = RMI_ERROR_INPUT;
592 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000593 }
594
595 s2_ctx = rd->s2_ctx;
596 buffer_unmap(rd);
597
598 granule_lock(g_table_root, GRANULE_STATE_RTT);
599 granule_unlock(g_rd);
600
601 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
602 map_addr, level, &wi);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100603
604 /*
605 * For UNMAP_NS, we need to map the table and look
606 * for the end of the non-live region.
607 */
608 if (op == MAP_NS && wi.last_level != level) {
609 res->x[0] = pack_return_code(RMI_ERROR_RTT, wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000610 goto out_unlock_llt;
611 }
612
613 s2tt = granule_map(wi.g_llt, SLOT_RTT);
614 s2tte = s2tte_read(&s2tt[wi.index]);
615
616 if (op == MAP_NS) {
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100617 if (!s2tte_is_unassigned_ns(s2tte)) {
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100618 res->x[0] = pack_return_code(RMI_ERROR_RTT,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000619 (unsigned int)level);
620 goto out_unmap_table;
621 }
622
AlexeiFedorov3a739332023-04-13 13:54:04 +0100623 s2tte = s2tte_create_assigned_ns(host_s2tte, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000624 s2tte_write(&s2tt[wi.index], s2tte);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000625
626 } else if (op == UNMAP_NS) {
627 /*
628 * The following check also verifies that map_addr is outside
629 * PAR, as valid_NS s2tte may only cover outside PAR IPA range.
630 */
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100631 bool assigned_ns = s2tte_is_assigned_ns(s2tte, wi.last_level);
632
633 if ((wi.last_level != level) || !assigned_ns) {
634 res->x[0] = pack_return_code(RMI_ERROR_RTT,
635 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000636 goto out_unmap_table;
637 }
638
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100639 s2tte = s2tte_create_unassigned_ns();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000640 s2tte_write(&s2tt[wi.index], s2tte);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000641 if (level == RTT_PAGE_LEVEL) {
642 invalidate_page(&s2_ctx, map_addr);
643 } else {
644 invalidate_block(&s2_ctx, map_addr);
645 }
646 }
647
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100648 res->x[0] = RMI_SUCCESS;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000649
650out_unmap_table:
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100651 if (op == UNMAP_NS) {
652 res->x[1] = skip_non_live_entries(map_addr, s2tt, &wi);
653 }
Soby Mathewb4c6df42022-11-09 11:13:29 +0000654 buffer_unmap(s2tt);
655out_unlock_llt:
656 granule_unlock(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000657}
658
659unsigned long smc_rtt_map_unprotected(unsigned long rd_addr,
660 unsigned long map_addr,
661 unsigned long ulevel,
662 unsigned long s2tte)
663{
664 long level = (long)ulevel;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100665 struct smc_result res;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000666
667 if (!host_ns_s2tte_is_valid(s2tte, level)) {
668 return RMI_ERROR_INPUT;
669 }
670
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100671 map_unmap_ns(rd_addr, map_addr, level, s2tte, MAP_NS, &res);
672 return res.x[0];
Soby Mathewb4c6df42022-11-09 11:13:29 +0000673}
674
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100675void smc_rtt_unmap_unprotected(unsigned long rd_addr,
676 unsigned long map_addr,
677 unsigned long ulevel,
678 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000679{
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100680 return map_unmap_ns(rd_addr, map_addr, (long)ulevel, 0UL, UNMAP_NS, res);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000681}
682
683void smc_rtt_read_entry(unsigned long rd_addr,
684 unsigned long map_addr,
685 unsigned long ulevel,
686 struct smc_result *ret)
687{
688 struct granule *g_rd, *g_rtt_root;
689 struct rd *rd;
690 struct rtt_walk wi;
691 unsigned long *s2tt, s2tte;
692 unsigned long ipa_bits;
693 long level = (long)ulevel;
694 int sl;
695
696 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
697 if (g_rd == NULL) {
698 ret->x[0] = RMI_ERROR_INPUT;
699 return;
700 }
701
702 rd = granule_map(g_rd, SLOT_RD);
703
704 if (!validate_rtt_entry_cmds(map_addr, level, rd)) {
705 buffer_unmap(rd);
706 granule_unlock(g_rd);
707 ret->x[0] = RMI_ERROR_INPUT;
708 return;
709 }
710
711 g_rtt_root = rd->s2_ctx.g_rtt;
712 sl = realm_rtt_starting_level(rd);
713 ipa_bits = realm_ipa_bits(rd);
714 buffer_unmap(rd);
715
716 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
717 granule_unlock(g_rd);
718
719 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
720 map_addr, level, &wi);
721 s2tt = granule_map(wi.g_llt, SLOT_RTT);
722 s2tte = s2tte_read(&s2tt[wi.index]);
723 ret->x[1] = wi.last_level;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000724
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100725 if (s2tte_is_unassigned_empty(s2tte)) {
Yousuf A3daed822022-10-13 16:06:00 +0100726 ret->x[2] = RMI_UNASSIGNED;
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100727 ret->x[3] = 0UL;
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100728 ret->x[4] = RIPAS_EMPTY;
729 } else if (s2tte_is_unassigned_ram(s2tte)) {
730 ret->x[2] = RMI_UNASSIGNED;
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100731 ret->x[3] = 0UL;
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100732 ret->x[4] = RIPAS_RAM;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000733 } else if (s2tte_is_destroyed(s2tte)) {
Yousuf A3daed822022-10-13 16:06:00 +0100734 ret->x[2] = RMI_DESTROYED;
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100735 ret->x[3] = 0UL;
736 ret->x[4] = RIPAS_UNDEFINED;
AlexeiFedorov3a739332023-04-13 13:54:04 +0100737 } else if (s2tte_is_assigned_empty(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100738 ret->x[2] = RMI_ASSIGNED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000739 ret->x[3] = s2tte_pa(s2tte, wi.last_level);
Yousuf A62808152022-10-31 10:35:42 +0000740 ret->x[4] = RIPAS_EMPTY;
AlexeiFedorov3a739332023-04-13 13:54:04 +0100741 } else if (s2tte_is_assigned_ram(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100742 ret->x[2] = RMI_ASSIGNED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000743 ret->x[3] = s2tte_pa(s2tte, wi.last_level);
Yousuf A62808152022-10-31 10:35:42 +0000744 ret->x[4] = RIPAS_RAM;
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100745 } else if (s2tte_is_unassigned_ns(s2tte)) {
746 ret->x[2] = RMI_UNASSIGNED;
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100747 ret->x[3] = 0UL;
748 ret->x[4] = RIPAS_UNDEFINED;
AlexeiFedorov3a739332023-04-13 13:54:04 +0100749 } else if (s2tte_is_assigned_ns(s2tte, wi.last_level)) {
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100750 ret->x[2] = RMI_ASSIGNED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000751 ret->x[3] = host_ns_s2tte(s2tte, wi.last_level);
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100752 ret->x[4] = RIPAS_UNDEFINED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000753 } else if (s2tte_is_table(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100754 ret->x[2] = RMI_TABLE;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000755 ret->x[3] = s2tte_pa_table(s2tte, wi.last_level);
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100756 ret->x[4] = RIPAS_UNDEFINED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000757 } else {
758 assert(false);
759 }
760
761 buffer_unmap(s2tt);
762 granule_unlock(wi.g_llt);
763
764 ret->x[0] = RMI_SUCCESS;
765}
766
767static void data_granule_measure(struct rd *rd, void *data,
768 unsigned long ipa,
769 unsigned long flags)
770{
771 struct measurement_desc_data measure_desc = {0};
772
773 /* Initialize the measurement descriptior structure */
774 measure_desc.desc_type = MEASURE_DESC_TYPE_DATA;
775 measure_desc.len = sizeof(struct measurement_desc_data);
776 measure_desc.ipa = ipa;
777 measure_desc.flags = flags;
778 memcpy(measure_desc.rim,
779 &rd->measurement[RIM_MEASUREMENT_SLOT],
780 measurement_get_size(rd->algorithm));
781
782 if (flags == RMI_MEASURE_CONTENT) {
783 /*
784 * Hashing the data granules and store the result in the
785 * measurement descriptor structure.
786 */
787 measurement_hash_compute(rd->algorithm,
788 data,
789 GRANULE_SIZE,
790 measure_desc.content);
791 }
792
793 /*
794 * Hashing the measurement descriptor structure; the result is the
795 * updated RIM.
796 */
797 measurement_hash_compute(rd->algorithm,
798 &measure_desc,
799 sizeof(measure_desc),
800 rd->measurement[RIM_MEASUREMENT_SLOT]);
801}
802
803static unsigned long validate_data_create_unknown(unsigned long map_addr,
804 struct rd *rd)
805{
806 if (!addr_in_par(rd, map_addr)) {
807 return RMI_ERROR_INPUT;
808 }
809
810 if (!validate_map_addr(map_addr, RTT_PAGE_LEVEL, rd)) {
811 return RMI_ERROR_INPUT;
812 }
813
814 return RMI_SUCCESS;
815}
816
817static unsigned long validate_data_create(unsigned long map_addr,
818 struct rd *rd)
819{
820 if (get_rd_state_locked(rd) != REALM_STATE_NEW) {
821 return RMI_ERROR_REALM;
822 }
823
824 return validate_data_create_unknown(map_addr, rd);
825}
826
827/*
828 * Implements both Data.Create and Data.CreateUnknown
829 *
830 * if @g_src == NULL, this implemented Data.CreateUnknown
831 * and otherwise this implemented Data.Create.
832 */
AlexeiFedorovac923c82023-04-06 15:12:04 +0100833static unsigned long data_create(unsigned long rd_addr,
834 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000835 unsigned long map_addr,
836 struct granule *g_src,
837 unsigned long flags)
838{
839 struct granule *g_data;
840 struct granule *g_rd;
841 struct granule *g_table_root;
842 struct rd *rd;
843 struct rtt_walk wi;
844 unsigned long s2tte, *s2tt;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000845 enum granule_state new_data_state = GRANULE_STATE_DELEGATED;
846 unsigned long ipa_bits;
847 unsigned long ret;
848 int __unused meas_ret;
849 int sl;
850
851 if (!find_lock_two_granules(data_addr,
852 GRANULE_STATE_DELEGATED,
853 &g_data,
854 rd_addr,
855 GRANULE_STATE_RD,
856 &g_rd)) {
857 return RMI_ERROR_INPUT;
858 }
859
860 rd = granule_map(g_rd, SLOT_RD);
861
862 ret = (g_src != NULL) ?
863 validate_data_create(map_addr, rd) :
864 validate_data_create_unknown(map_addr, rd);
865
866 if (ret != RMI_SUCCESS) {
867 goto out_unmap_rd;
868 }
869
870 g_table_root = rd->s2_ctx.g_rtt;
871 sl = realm_rtt_starting_level(rd);
872 ipa_bits = realm_ipa_bits(rd);
873 granule_lock(g_table_root, GRANULE_STATE_RTT);
874 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
875 map_addr, RTT_PAGE_LEVEL, &wi);
876 if (wi.last_level != RTT_PAGE_LEVEL) {
877 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
878 goto out_unlock_ll_table;
879 }
880
881 s2tt = granule_map(wi.g_llt, SLOT_RTT);
882 s2tte = s2tte_read(&s2tt[wi.index]);
AlexeiFedorovcde1fdc2023-04-18 16:37:25 +0100883 if (!s2tte_is_unassigned_ram(s2tte)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000884 ret = pack_return_code(RMI_ERROR_RTT, RTT_PAGE_LEVEL);
885 goto out_unmap_ll_table;
886 }
887
Soby Mathewb4c6df42022-11-09 11:13:29 +0000888 if (g_src != NULL) {
889 bool ns_access_ok;
890 void *data = granule_map(g_data, SLOT_DELEGATED);
891
892 ns_access_ok = ns_buffer_read(SLOT_NS, g_src, 0U,
893 GRANULE_SIZE, data);
894
895 if (!ns_access_ok) {
896 /*
897 * Some data may be copied before the failure. Zero
898 * g_data granule as it will remain in delegated state.
899 */
900 (void)memset(data, 0, GRANULE_SIZE);
901 buffer_unmap(data);
902 ret = RMI_ERROR_INPUT;
903 goto out_unmap_ll_table;
904 }
905
906
907 data_granule_measure(rd, data, map_addr, flags);
908
909 buffer_unmap(data);
910 }
911
912 new_data_state = GRANULE_STATE_DATA;
913
AlexeiFedorovcde1fdc2023-04-18 16:37:25 +0100914 s2tte = s2tte_create_assigned_ram(data_addr, RTT_PAGE_LEVEL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000915
916 s2tte_write(&s2tt[wi.index], s2tte);
917 __granule_get(wi.g_llt);
918
919 ret = RMI_SUCCESS;
920
921out_unmap_ll_table:
922 buffer_unmap(s2tt);
923out_unlock_ll_table:
924 granule_unlock(wi.g_llt);
925out_unmap_rd:
926 buffer_unmap(rd);
927 granule_unlock(g_rd);
928 granule_unlock_transition(g_data, new_data_state);
929 return ret;
930}
931
AlexeiFedorovac923c82023-04-06 15:12:04 +0100932unsigned long smc_data_create(unsigned long rd_addr,
933 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000934 unsigned long map_addr,
935 unsigned long src_addr,
936 unsigned long flags)
937{
938 struct granule *g_src;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000939
940 if (flags != RMI_NO_MEASURE_CONTENT && flags != RMI_MEASURE_CONTENT) {
941 return RMI_ERROR_INPUT;
942 }
943
944 g_src = find_granule(src_addr);
945 if ((g_src == NULL) || (g_src->state != GRANULE_STATE_NS)) {
946 return RMI_ERROR_INPUT;
947 }
948
AlexeiFedorovac923c82023-04-06 15:12:04 +0100949 return data_create(rd_addr, data_addr, map_addr, g_src, flags);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000950}
951
AlexeiFedorovac923c82023-04-06 15:12:04 +0100952unsigned long smc_data_create_unknown(unsigned long rd_addr,
953 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000954 unsigned long map_addr)
955{
AlexeiFedorovac923c82023-04-06 15:12:04 +0100956 return data_create(rd_addr, data_addr, map_addr, NULL, 0);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000957}
958
AlexeiFedorove2002be2023-04-19 17:20:12 +0100959void smc_data_destroy(unsigned long rd_addr,
960 unsigned long map_addr,
961 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000962{
963 struct granule *g_data;
964 struct granule *g_rd;
965 struct granule *g_table_root;
966 struct rtt_walk wi;
967 unsigned long data_addr, s2tte, *s2tt;
968 struct rd *rd;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100969 unsigned long ipa_bits;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000970 struct realm_s2_context s2_ctx;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000971 int sl;
972
973 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
974 if (g_rd == NULL) {
AlexeiFedorove2002be2023-04-19 17:20:12 +0100975 res->x[0] = RMI_ERROR_INPUT;
976 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000977 }
978
979 rd = granule_map(g_rd, SLOT_RD);
980
981 if (!validate_map_addr(map_addr, RTT_PAGE_LEVEL, rd)) {
982 buffer_unmap(rd);
983 granule_unlock(g_rd);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100984 res->x[0] = RMI_ERROR_INPUT;
985 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000986 }
987
988 g_table_root = rd->s2_ctx.g_rtt;
989 sl = realm_rtt_starting_level(rd);
990 ipa_bits = realm_ipa_bits(rd);
991 s2_ctx = rd->s2_ctx;
992 buffer_unmap(rd);
993
994 granule_lock(g_table_root, GRANULE_STATE_RTT);
995 granule_unlock(g_rd);
996
997 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
998 map_addr, RTT_PAGE_LEVEL, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000999
1000 s2tt = granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001001 if (wi.last_level != RTT_PAGE_LEVEL) {
1002 res->x[0] = pack_return_code(RMI_ERROR_RTT, wi.last_level);
1003 goto out_unmap_ll_table;
1004 }
Soby Mathewb4c6df42022-11-09 11:13:29 +00001005
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001006 s2tte = s2tte_read(&s2tt[wi.index]);
AlexeiFedorova43cd312023-04-17 11:42:25 +01001007 if (s2tte_is_assigned_ram(s2tte, RTT_PAGE_LEVEL)) {
1008 data_addr = s2tte_pa(s2tte, RTT_PAGE_LEVEL);
1009 s2tte = s2tte_create_destroyed();
1010 s2tte_write(&s2tt[wi.index], s2tte);
1011 invalidate_page(&s2_ctx, map_addr);
1012 } else if (s2tte_is_assigned_empty(s2tte, RTT_PAGE_LEVEL)) {
1013 data_addr = s2tte_pa(s2tte, RTT_PAGE_LEVEL);
1014 s2tte = s2tte_create_unassigned_empty();
1015 s2tte_write(&s2tt[wi.index], s2tte);
1016 } else {
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001017 res->x[0] = pack_return_code(RMI_ERROR_RTT, RTT_PAGE_LEVEL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001018 goto out_unmap_ll_table;
1019 }
1020
Soby Mathewb4c6df42022-11-09 11:13:29 +00001021 __granule_put(wi.g_llt);
1022
1023 /*
1024 * Lock the data granule and check expected state. Correct locking order
1025 * is guaranteed because granule address is obtained from a locked
1026 * granule by table walk. This lock needs to be acquired before a state
1027 * transition to or from GRANULE_STATE_DATA for granule address can happen.
1028 */
1029 g_data = find_lock_granule(data_addr, GRANULE_STATE_DATA);
AlexeiFedorov63b71692023-04-19 11:18:42 +01001030 assert(g_data != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001031 granule_memzero(g_data, SLOT_DELEGATED);
1032 granule_unlock_transition(g_data, GRANULE_STATE_DELEGATED);
1033
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001034 res->x[0] = RMI_SUCCESS;
AlexeiFedorove2002be2023-04-19 17:20:12 +01001035 res->x[1] = data_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001036out_unmap_ll_table:
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001037 res->x[2] = skip_non_live_entries(map_addr, s2tt, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001038 buffer_unmap(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001039 granule_unlock(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001040}
1041
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001042/*
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001043 * Update the ripas value for the entry pointed by @s2ttep.
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001044 *
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001045 * Returns:
1046 * < 0 - On error and the operation was aborted,
1047 * e.g., entry cannot have a ripas.
1048 * 0 - Operation was success and no TLBI is required.
1049 * > 0 - Operation was success and TLBI is required.
1050 * Sets:
1051 * @(*do_tlbi) to 'true' if the TLBs have to be invalidated.
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001052 */
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001053static int update_ripas(unsigned long *s2ttep, unsigned long level,
1054 enum ripas ripas_val)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001055{
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001056 unsigned long pa, s2tte = s2tte_read(s2ttep);
1057 int ret = 0;
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001058
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001059 if (!s2tte_has_ripas(s2tte, level)) {
1060 return -1;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001061 }
1062
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001063 if (ripas_val == RIPAS_RAM) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001064 if (s2tte_is_unassigned_empty(s2tte)) {
1065 s2tte = s2tte_create_unassigned_ram();
1066 } else if (s2tte_is_assigned_empty(s2tte, level)) {
1067 pa = s2tte_pa(s2tte, level);
1068 s2tte = s2tte_create_assigned_ram(pa, level);
1069 } else {
1070 /* No action is required */
1071 return 0;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001072 }
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001073 } else if (ripas_val == RIPAS_EMPTY) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001074 if (s2tte_is_unassigned_ram(s2tte)) {
1075 s2tte = s2tte_create_unassigned_empty();
1076 } else if (s2tte_is_assigned_ram(s2tte, level)) {
1077 pa = s2tte_pa(s2tte, level);
1078 s2tte = s2tte_create_assigned_empty(pa, level);
1079 /* TLBI is required */
1080 ret = 1;
1081 } else {
1082 /* No action is required */
1083 return 0;
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001084 }
Soby Mathewb4c6df42022-11-09 11:13:29 +00001085 }
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001086 s2tte_write(s2ttep, s2tte);
1087 return ret;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001088}
1089
1090static void ripas_granule_measure(struct rd *rd,
AlexeiFedorov960d1612023-04-25 13:23:39 +01001091 unsigned long base,
1092 unsigned long top)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001093{
1094 struct measurement_desc_ripas measure_desc = {0};
1095
1096 /* Initialize the measurement descriptior structure */
1097 measure_desc.desc_type = MEASURE_DESC_TYPE_RIPAS;
1098 measure_desc.len = sizeof(struct measurement_desc_ripas);
AlexeiFedorov960d1612023-04-25 13:23:39 +01001099 measure_desc.base = base;
1100 measure_desc.top = top;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001101 (void)memcpy(measure_desc.rim,
1102 &rd->measurement[RIM_MEASUREMENT_SLOT],
1103 measurement_get_size(rd->algorithm));
Soby Mathewb4c6df42022-11-09 11:13:29 +00001104
1105 /*
1106 * Hashing the measurement descriptor structure; the result is the
1107 * updated RIM.
1108 */
1109 measurement_hash_compute(rd->algorithm,
1110 &measure_desc,
1111 sizeof(measure_desc),
1112 rd->measurement[RIM_MEASUREMENT_SLOT]);
1113}
1114
AlexeiFedorov960d1612023-04-25 13:23:39 +01001115void smc_rtt_init_ripas(unsigned long rd_addr,
1116 unsigned long base,
1117 unsigned long top,
1118 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001119{
1120 struct granule *g_rd, *g_rtt_root;
1121 struct rd *rd;
AlexeiFedorov960d1612023-04-25 13:23:39 +01001122 unsigned long ipa_bits, addr, map_size;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001123 struct rtt_walk wi;
1124 unsigned long s2tte, *s2tt;
AlexeiFedorov960d1612023-04-25 13:23:39 +01001125 long level;
1126 unsigned int index;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001127 int sl;
1128
1129 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
1130 if (g_rd == NULL) {
AlexeiFedorov960d1612023-04-25 13:23:39 +01001131 res->x[0] = RMI_ERROR_INPUT;
1132 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001133 }
1134
1135 rd = granule_map(g_rd, SLOT_RD);
1136
1137 if (get_rd_state_locked(rd) != REALM_STATE_NEW) {
1138 buffer_unmap(rd);
1139 granule_unlock(g_rd);
AlexeiFedorov960d1612023-04-25 13:23:39 +01001140 res->x[0] = RMI_ERROR_REALM;
1141 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001142 }
1143
AlexeiFedorov960d1612023-04-25 13:23:39 +01001144 if (!validate_map_addr(base, RTT_PAGE_LEVEL, rd) ||
1145 !validate_rtt_entry_cmds(top, RTT_PAGE_LEVEL, rd)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001146 buffer_unmap(rd);
1147 granule_unlock(g_rd);
AlexeiFedorov960d1612023-04-25 13:23:39 +01001148 res->x[0] = RMI_ERROR_INPUT;
1149 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001150 }
1151
AlexeiFedorov960d1612023-04-25 13:23:39 +01001152 if (!addr_in_par(rd, base) || !addr_in_par(rd, top)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001153 buffer_unmap(rd);
1154 granule_unlock(g_rd);
AlexeiFedorov960d1612023-04-25 13:23:39 +01001155 res->x[0] = RMI_ERROR_INPUT;
1156 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001157 }
1158
1159 g_rtt_root = rd->s2_ctx.g_rtt;
1160 sl = realm_rtt_starting_level(rd);
1161 ipa_bits = realm_ipa_bits(rd);
1162
1163 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
1164 granule_unlock(g_rd);
1165
1166 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
AlexeiFedorov960d1612023-04-25 13:23:39 +01001167 base, RTT_PAGE_LEVEL, &wi);
1168 level = wi.last_level;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001169 s2tt = granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov960d1612023-04-25 13:23:39 +01001170 map_size = s2tte_map_size(level);
1171 addr = base & ~(map_size - 1UL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001172
AlexeiFedorov960d1612023-04-25 13:23:39 +01001173 /*
1174 * If the RTTE covers a range below "base", we need to
1175 * go deeper.
1176 */
1177 if (addr != base) {
1178 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1179 (unsigned int)level);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001180 goto out_unmap_llt;
1181 }
1182
AlexeiFedorov960d1612023-04-25 13:23:39 +01001183 for (index = wi.index; index < S2TTES_PER_S2TT;
1184 index++, addr += map_size) {
1185 unsigned long next = addr + map_size;
1186
1187 if (next > top) {
1188 break;
1189 }
1190
1191 s2tte = s2tte_read(&s2tt[index]);
1192 if (s2tte_is_unassigned_empty(s2tte)) {
1193 s2tte = s2tte_create_unassigned_ram();
1194 s2tte_write(&s2tt[index], s2tte);
1195 } else if (!s2tte_is_unassigned_ram(s2tte)) {
1196 break;
1197 }
1198 ripas_granule_measure(rd, addr, next);
1199 }
1200
1201 if (addr > base) {
1202 res->x[0] = RMI_SUCCESS;
1203 res->x[1] = addr;
1204 } else {
1205 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1206 (unsigned int)level);
1207 }
Soby Mathewb4c6df42022-11-09 11:13:29 +00001208
1209out_unmap_llt:
1210 buffer_unmap(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001211 buffer_unmap(rd);
1212 granule_unlock(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001213}
1214
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001215static void rtt_set_ripas_range(struct realm_s2_context *s2_ctx,
1216 unsigned long *s2tt,
1217 unsigned long base,
1218 unsigned long top,
1219 struct rtt_walk *wi,
1220 unsigned long ripas_val,
1221 struct smc_result *res)
1222{
1223 unsigned long addr;
1224 unsigned int index = wi->index;
1225 long level = wi->last_level;
1226 unsigned long map_size = s2tte_map_size(level);
1227
1228 /* Align to the RTT level */
1229 addr = base & ~(map_size - 1UL);
1230
1231 /* Make sure we don't touch a range below the requested range */
1232 if (addr != base) {
1233 res->x[0] = pack_return_code(RMI_ERROR_RTT, level);
1234 return;
1235 }
1236
1237 for (index = wi->index; index < S2TTES_PER_S2TT;
1238 index++, addr += map_size) {
1239 unsigned long next = addr + map_size;
1240 int ret;
1241
1242 /* If this entry crosses the range, abort. */
1243 if (next > top) {
1244 break;
1245 }
1246
1247 ret = update_ripas(&s2tt[index], level, ripas_val);
1248 if (ret < 0) {
1249 break;
1250 }
1251
1252 /* Handle TLBI */
1253 if (ret != 0) {
1254 if (level == RTT_PAGE_LEVEL) {
1255 invalidate_page(s2_ctx, addr);
1256 } else {
1257 invalidate_block(s2_ctx, addr);
1258 }
1259 }
1260 }
1261
1262 if (addr > base) {
1263 res->x[0] = RMI_SUCCESS;
1264 res->x[1] = addr;
1265 } else {
1266 res->x[0] = pack_return_code(RMI_ERROR_RTT, level);
1267 }
1268}
1269
1270void smc_rtt_set_ripas(unsigned long rd_addr,
1271 unsigned long rec_addr,
1272 unsigned long base,
1273 unsigned long top,
1274 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001275{
1276 struct granule *g_rd, *g_rec, *g_rtt_root;
1277 struct rec *rec;
1278 struct rd *rd;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001279 unsigned long ipa_bits;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001280 struct rtt_walk wi;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001281 unsigned long *s2tt;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001282 struct realm_s2_context s2_ctx;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001283 enum ripas ripas_val;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001284 int sl;
1285
Soby Mathewb4c6df42022-11-09 11:13:29 +00001286 if (!find_lock_two_granules(rd_addr,
1287 GRANULE_STATE_RD,
1288 &g_rd,
1289 rec_addr,
1290 GRANULE_STATE_REC,
1291 &g_rec)) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001292 res->x[0] = RMI_ERROR_INPUT;
1293 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001294 }
1295
1296 if (granule_refcount_read_acquire(g_rec) != 0UL) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001297 res->x[0] = RMI_ERROR_REC;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001298 goto out_unlock_rec_rd;
1299 }
1300
1301 rec = granule_map(g_rec, SLOT_REC);
1302
1303 if (g_rd != rec->realm_info.g_rd) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001304 res->x[0] = RMI_ERROR_REC;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001305 goto out_unmap_rec;
1306 }
1307
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001308 ripas_val = rec->set_ripas.ripas_val;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001309
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001310 /*
1311 * Return error in case of target region:
1312 * - is not the next chunk of requested region
1313 * - extends beyond the end of requested region
1314 */
1315 if ((base != rec->set_ripas.addr) || (top > rec->set_ripas.top)) {
1316 res->x[0] = RMI_ERROR_INPUT;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001317 goto out_unmap_rec;
1318 }
1319
1320 rd = granule_map(g_rd, SLOT_RD);
1321
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001322 /*
1323 * At this point, we know base == rec->set_ripas.addr
1324 * and thus must be aligned to GRANULE size.
1325 */
1326 assert(validate_map_addr(base, RTT_PAGE_LEVEL, rd));
Soby Mathewb4c6df42022-11-09 11:13:29 +00001327
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001328 if (!validate_map_addr(top, RTT_PAGE_LEVEL, rd)) {
1329 res->x[0] = RMI_ERROR_INPUT;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001330 goto out_unmap_rd;
1331 }
1332
1333 g_rtt_root = rd->s2_ctx.g_rtt;
1334 sl = realm_rtt_starting_level(rd);
1335 ipa_bits = realm_ipa_bits(rd);
1336 s2_ctx = rd->s2_ctx;
1337
1338 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
1339
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001340 /* Walk to the deepest level possible */
Soby Mathewb4c6df42022-11-09 11:13:29 +00001341 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001342 base, RTT_PAGE_LEVEL, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001343
1344 s2tt = granule_map(wi.g_llt, SLOT_RTT);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001345
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001346 rtt_set_ripas_range(&s2_ctx, s2tt, base, top, &wi, ripas_val, res);
1347 if (res->x[0] == RMI_SUCCESS) {
1348 rec->set_ripas.addr = res->x[1];
Soby Mathewb4c6df42022-11-09 11:13:29 +00001349 }
1350
Soby Mathewb4c6df42022-11-09 11:13:29 +00001351 buffer_unmap(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001352 granule_unlock(wi.g_llt);
1353out_unmap_rd:
1354 buffer_unmap(rd);
1355out_unmap_rec:
1356 buffer_unmap(rec);
1357out_unlock_rec_rd:
1358 granule_unlock(g_rec);
1359 granule_unlock(g_rd);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001360}