blob: 5a401f1d8f36b0e1873baaffd425e9f023c16e7d [file] [log] [blame]
Soby Mathewb4c6df42022-11-09 11:13:29 +00001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
5 */
6
7#include <buffer.h>
8#include <granule.h>
9#include <measurement.h>
10#include <realm.h>
11#include <ripas.h>
12#include <smc-handler.h>
13#include <smc-rmi.h>
14#include <smc.h>
15#include <stddef.h>
16#include <string.h>
17#include <table.h>
18
19/*
20 * Validate the map_addr value passed to RMI_RTT_* and RMI_DATA_* commands.
21 */
22static bool validate_map_addr(unsigned long map_addr,
23 unsigned long level,
24 struct rd *rd)
25{
26
27 if (map_addr >= realm_ipa_size(rd)) {
28 return false;
29 }
30 if (!addr_is_level_aligned(map_addr, level)) {
31 return false;
32 }
33 return true;
34}
35
36/*
37 * Structure commands can operate on all RTTs except for the root RTT so
38 * the minimal valid level is the stage 2 starting level + 1.
39 */
40static bool validate_rtt_structure_cmds(unsigned long map_addr,
41 long level,
42 struct rd *rd)
43{
44 int min_level = realm_rtt_starting_level(rd) + 1;
45
46 if ((level < min_level) || (level > RTT_PAGE_LEVEL)) {
47 return false;
48 }
49 return validate_map_addr(map_addr, level, rd);
50}
51
52/*
53 * Map/Unmap commands can operate up to a level 2 block entry so min_level is
54 * the smallest block size.
55 */
56static bool validate_rtt_map_cmds(unsigned long map_addr,
57 long level,
58 struct rd *rd)
59{
60 if ((level < RTT_MIN_BLOCK_LEVEL) || (level > RTT_PAGE_LEVEL)) {
61 return false;
62 }
63 return validate_map_addr(map_addr, level, rd);
64}
65
66/*
67 * Entry commands can operate on any entry so the minimal valid level is the
68 * stage 2 starting level.
69 */
70static bool validate_rtt_entry_cmds(unsigned long map_addr,
71 long level,
72 struct rd *rd)
73{
74 if ((level < realm_rtt_starting_level(rd)) ||
75 (level > RTT_PAGE_LEVEL)) {
76 return false;
77 }
78 return validate_map_addr(map_addr, level, rd);
79}
80
AlexeiFedorovac923c82023-04-06 15:12:04 +010081unsigned long smc_rtt_create(unsigned long rd_addr,
82 unsigned long rtt_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +000083 unsigned long map_addr,
84 unsigned long ulevel)
85{
86 struct granule *g_rd;
87 struct granule *g_tbl;
88 struct rd *rd;
89 struct granule *g_table_root;
90 struct rtt_walk wi;
91 unsigned long *s2tt, *parent_s2tt, parent_s2tte;
92 long level = (long)ulevel;
93 unsigned long ipa_bits;
94 unsigned long ret;
95 struct realm_s2_context s2_ctx;
96 int sl;
97
98 if (!find_lock_two_granules(rtt_addr,
99 GRANULE_STATE_DELEGATED,
100 &g_tbl,
101 rd_addr,
102 GRANULE_STATE_RD,
103 &g_rd)) {
104 return RMI_ERROR_INPUT;
105 }
106
107 rd = granule_map(g_rd, SLOT_RD);
108
109 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
110 buffer_unmap(rd);
111 granule_unlock(g_rd);
112 granule_unlock(g_tbl);
113 return RMI_ERROR_INPUT;
114 }
115
116 g_table_root = rd->s2_ctx.g_rtt;
117 sl = realm_rtt_starting_level(rd);
118 ipa_bits = realm_ipa_bits(rd);
119 s2_ctx = rd->s2_ctx;
120 buffer_unmap(rd);
121
122 /*
123 * Lock the RTT root. Enforcing locking order RD->RTT is enough to
124 * ensure deadlock free locking guarentee.
125 */
126 granule_lock(g_table_root, GRANULE_STATE_RTT);
127
128 /* Unlock RD after locking RTT Root */
129 granule_unlock(g_rd);
130
131 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
132 map_addr, level - 1L, &wi);
133 if (wi.last_level != level - 1L) {
134 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
135 goto out_unlock_llt;
136 }
137
138 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
139 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
140 s2tt = granule_map(g_tbl, SLOT_DELEGATED);
141
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100142 if (s2tte_is_unassigned_empty(parent_s2tte)) {
143 s2tt_init_unassigned_empty(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000144
145 /*
146 * Increase the refcount of the parent, the granule was
147 * locked while table walking and hand-over-hand locking.
148 * Atomicity and acquire/release semantics not required because
149 * the table is accessed always locked.
150 */
151 __granule_get(wi.g_llt);
152
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100153 } else if (s2tte_is_unassigned_ram(parent_s2tte)) {
154 s2tt_init_unassigned_ram(s2tt);
155 __granule_get(wi.g_llt);
156
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100157 } else if (s2tte_is_unassigned_ns(parent_s2tte)) {
158 s2tt_init_unassigned_ns(s2tt);
159 __granule_get(wi.g_llt);
160
Soby Mathewb4c6df42022-11-09 11:13:29 +0000161 } else if (s2tte_is_destroyed(parent_s2tte)) {
162 s2tt_init_destroyed(s2tt);
163 __granule_get(wi.g_llt);
164
AlexeiFedorov3a739332023-04-13 13:54:04 +0100165 } else if (s2tte_is_assigned_empty(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000166 unsigned long block_pa;
167
168 /*
169 * We should observe parent assigned s2tte only when
170 * we create tables above this level.
171 */
172 assert(level > RTT_MIN_BLOCK_LEVEL);
173
174 block_pa = s2tte_pa(parent_s2tte, level - 1L);
175
176 s2tt_init_assigned_empty(s2tt, block_pa, level);
177
178 /*
179 * Increase the refcount to mark the granule as in-use. refcount
180 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
181 */
182 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
183
AlexeiFedorov3a739332023-04-13 13:54:04 +0100184 } else if (s2tte_is_assigned_ram(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000185 unsigned long block_pa;
186
187 /*
188 * We should observe parent valid s2tte only when
189 * we create tables above this level.
190 */
191 assert(level > RTT_MIN_BLOCK_LEVEL);
192
193 /*
194 * Break before make. This may cause spurious S2 aborts.
195 */
196 s2tte_write(&parent_s2tt[wi.index], 0UL);
197 invalidate_block(&s2_ctx, map_addr);
198
199 block_pa = s2tte_pa(parent_s2tte, level - 1L);
200
AlexeiFedorov3a739332023-04-13 13:54:04 +0100201 s2tt_init_assigned_ram(s2tt, block_pa, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000202
203 /*
204 * Increase the refcount to mark the granule as in-use. refcount
205 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
206 */
207 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
208
AlexeiFedorov3a739332023-04-13 13:54:04 +0100209 } else if (s2tte_is_assigned_ns(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000210 unsigned long block_pa;
211
212 /*
AlexeiFedorov3a739332023-04-13 13:54:04 +0100213 * We should observe parent assigned_ns s2tte only when
Soby Mathewb4c6df42022-11-09 11:13:29 +0000214 * we create tables above this level.
215 */
216 assert(level > RTT_MIN_BLOCK_LEVEL);
217
218 /*
219 * Break before make. This may cause spurious S2 aborts.
220 */
221 s2tte_write(&parent_s2tt[wi.index], 0UL);
222 invalidate_block(&s2_ctx, map_addr);
223
224 block_pa = s2tte_pa(parent_s2tte, level - 1L);
225
AlexeiFedorov3a739332023-04-13 13:54:04 +0100226 s2tt_init_assigned_ns(s2tt, block_pa, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000227
228 /*
229 * Increase the refcount to mark the granule as in-use. refcount
230 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
231 */
232 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
233
234 } else if (s2tte_is_table(parent_s2tte, level - 1L)) {
235 ret = pack_return_code(RMI_ERROR_RTT,
236 (unsigned int)(level - 1L));
237 goto out_unmap_table;
238
239 } else {
240 assert(false);
241 }
242
243 ret = RMI_SUCCESS;
244
245 granule_set_state(g_tbl, GRANULE_STATE_RTT);
246
247 parent_s2tte = s2tte_create_table(rtt_addr, level - 1L);
248 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
249
250out_unmap_table:
251 buffer_unmap(s2tt);
252 buffer_unmap(parent_s2tt);
253out_unlock_llt:
254 granule_unlock(wi.g_llt);
255 granule_unlock(g_tbl);
256 return ret;
257}
258
AlexeiFedorove2002be2023-04-19 17:20:12 +0100259void smc_rtt_fold(unsigned long rd_addr,
260 unsigned long map_addr,
261 unsigned long ulevel,
262 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000263{
264 struct granule *g_rd;
265 struct granule *g_tbl;
266 struct rd *rd;
267 struct granule *g_table_root;
268 struct rtt_walk wi;
269 unsigned long *table, *parent_s2tt, parent_s2tte;
270 long level = (long)ulevel;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100271 unsigned long ipa_bits, rtt_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000272 unsigned long ret;
273 struct realm_s2_context s2_ctx;
274 int sl;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000275
276 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
277 if (g_rd == NULL) {
AlexeiFedorove2002be2023-04-19 17:20:12 +0100278 res->x[0] = RMI_ERROR_INPUT;
279 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000280 }
281
282 rd = granule_map(g_rd, SLOT_RD);
283
284 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
285 buffer_unmap(rd);
286 granule_unlock(g_rd);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100287 res->x[0] = RMI_ERROR_INPUT;
288 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000289 }
290
291 g_table_root = rd->s2_ctx.g_rtt;
292 sl = realm_rtt_starting_level(rd);
293 ipa_bits = realm_ipa_bits(rd);
294 s2_ctx = rd->s2_ctx;
295 buffer_unmap(rd);
296 granule_lock(g_table_root, GRANULE_STATE_RTT);
297 granule_unlock(g_rd);
298
299 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
300 map_addr, level - 1L, &wi);
301 if (wi.last_level != level - 1UL) {
302 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
303 goto out_unlock_parent_table;
304 }
305
306 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
307 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
308 if (!s2tte_is_table(parent_s2tte, level - 1L)) {
309 ret = pack_return_code(RMI_ERROR_RTT,
310 (unsigned int)(level - 1L));
311 goto out_unmap_parent_table;
312 }
313
AlexeiFedorove2002be2023-04-19 17:20:12 +0100314 rtt_addr = s2tte_pa_table(parent_s2tte, level - 1L);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000315 g_tbl = find_lock_granule(rtt_addr, GRANULE_STATE_RTT);
316
317 /*
318 * A table descriptor S2TTE always points to a TABLE granule.
319 */
AlexeiFedorov63b71692023-04-19 11:18:42 +0100320 assert(g_tbl != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000321
322 table = granule_map(g_tbl, SLOT_RTT2);
323
324 /*
325 * The command can succeed only if all 512 S2TTEs are of the same type.
326 * We first check the table's ref. counter to speed up the case when
327 * the host makes a guess whether a memory region can be folded.
328 */
329 if (g_tbl->refcount == 0UL) {
330 if (table_is_destroyed_block(table)) {
331 parent_s2tte = s2tte_create_destroyed();
332 __granule_put(wi.g_llt);
333
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100334 } else if (table_is_unassigned_empty_block(table)) {
335 parent_s2tte = s2tte_create_unassigned_empty();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000336 __granule_put(wi.g_llt);
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100337
338 } else if (table_is_unassigned_ram_block(table)) {
339 parent_s2tte = s2tte_create_unassigned_ram();
340 __granule_put(wi.g_llt);
341
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100342 } else if (table_is_unassigned_ns_block(table)) {
343 parent_s2tte = s2tte_create_unassigned_ns();
344 __granule_put(wi.g_llt);
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100345
Soby Mathewb4c6df42022-11-09 11:13:29 +0000346 } else {
347 /*
348 * The table holds a mixture of destroyed and
349 * unassigned entries.
350 */
AlexeiFedorov892abce2023-04-06 16:32:12 +0100351 ret = pack_return_code(RMI_ERROR_RTT, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000352 goto out_unmap_table;
353 }
354
355 } else if (g_tbl->refcount == S2TTES_PER_S2TT) {
356
357 unsigned long s2tte, block_pa;
358
359 /* The RMM specification does not allow creating block
360 * entries less than RTT_MIN_BLOCK_LEVEL even though
361 * permitted by the Arm Architecture.
362 * Hence ensure that the table being folded is at a level
363 * higher than the RTT_MIN_BLOCK_LEVEL.
364 *
365 * A fully populated table cannot be destroyed if that
366 * would create a block mapping below RTT_MIN_BLOCK_LEVEL.
367 */
368 if (level <= RTT_MIN_BLOCK_LEVEL) {
AlexeiFedorov892abce2023-04-06 16:32:12 +0100369 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000370 goto out_unmap_table;
371 }
372
373 s2tte = s2tte_read(&table[0]);
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000374 block_pa = s2tte_pa(s2tte, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000375
376 /*
AlexeiFedorov3a739332023-04-13 13:54:04 +0100377 * The table must also refer to a contiguous block through the
378 * same type of s2tte, either Assigned, Valid or Assigned_NS.
Soby Mathewb4c6df42022-11-09 11:13:29 +0000379 */
AlexeiFedorov3a739332023-04-13 13:54:04 +0100380 if (table_maps_assigned_empty_block(table, level)) {
381 parent_s2tte = s2tte_create_assigned_empty(block_pa,
382 level - 1L);
383 } else if (table_maps_assigned_ram_block(table, level)) {
384 parent_s2tte = s2tte_create_assigned_ram(block_pa,
385 level - 1L);
386 } else if (table_maps_assigned_ns_block(table, level)) {
387 parent_s2tte = s2tte_create_assigned_ns(block_pa,
388 level - 1L);
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000389 /* The table contains mixed entries that cannot be folded */
Soby Mathewb4c6df42022-11-09 11:13:29 +0000390 } else {
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000391 ret = pack_return_code(RMI_ERROR_RTT, level);
392 goto out_unmap_table;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000393 }
394
395 __granule_refcount_dec(g_tbl, S2TTES_PER_S2TT);
396 } else {
397 /*
398 * The table holds a mixture of different types of s2ttes.
399 */
AlexeiFedorov892abce2023-04-06 16:32:12 +0100400 ret = pack_return_code(RMI_ERROR_RTT, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000401 goto out_unmap_table;
402 }
403
404 ret = RMI_SUCCESS;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100405 res->x[1] = rtt_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000406
407 /*
408 * Break before make.
409 */
410 s2tte_write(&parent_s2tt[wi.index], 0UL);
411
AlexeiFedorov3a739332023-04-13 13:54:04 +0100412 if (s2tte_is_assigned_ram(parent_s2tte, level - 1L) ||
413 s2tte_is_assigned_ns(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000414 invalidate_pages_in_block(&s2_ctx, map_addr);
415 } else {
416 invalidate_block(&s2_ctx, map_addr);
417 }
418
419 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
420
421 granule_memzero_mapped(table);
422 granule_set_state(g_tbl, GRANULE_STATE_DELEGATED);
423
424out_unmap_table:
425 buffer_unmap(table);
426 granule_unlock(g_tbl);
427out_unmap_parent_table:
428 buffer_unmap(parent_s2tt);
429out_unlock_parent_table:
430 granule_unlock(wi.g_llt);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100431 res->x[0] = ret;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000432}
433
AlexeiFedorove2002be2023-04-19 17:20:12 +0100434void smc_rtt_destroy(unsigned long rd_addr,
435 unsigned long map_addr,
436 unsigned long ulevel,
437 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000438{
439 struct granule *g_rd;
440 struct granule *g_tbl;
441 struct rd *rd;
442 struct granule *g_table_root;
443 struct rtt_walk wi;
444 unsigned long *table, *parent_s2tt, parent_s2tte;
445 long level = (long)ulevel;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100446 unsigned long ipa_bits, rtt_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000447 unsigned long ret;
448 struct realm_s2_context s2_ctx;
449 int sl;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100450 bool in_par, skip_non_live = false;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000451
452 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
453 if (g_rd == NULL) {
AlexeiFedorove2002be2023-04-19 17:20:12 +0100454 res->x[0] = RMI_ERROR_INPUT;
455 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000456 }
457
458 rd = granule_map(g_rd, SLOT_RD);
459
460 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
461 buffer_unmap(rd);
462 granule_unlock(g_rd);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100463 res->x[0] = RMI_ERROR_INPUT;
464 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000465 }
466
467 g_table_root = rd->s2_ctx.g_rtt;
468 sl = realm_rtt_starting_level(rd);
469 ipa_bits = realm_ipa_bits(rd);
470 s2_ctx = rd->s2_ctx;
471 in_par = addr_in_par(rd, map_addr);
472 buffer_unmap(rd);
473 granule_lock(g_table_root, GRANULE_STATE_RTT);
474 granule_unlock(g_rd);
475
476 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
477 map_addr, level - 1L, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000478
479 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
480 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100481
482 if ((wi.last_level != level - 1UL) ||
483 !s2tte_is_table(parent_s2tte, level - 1UL)) {
484 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
485 skip_non_live = true;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000486 goto out_unmap_parent_table;
487 }
488
AlexeiFedorove2002be2023-04-19 17:20:12 +0100489 rtt_addr = s2tte_pa_table(parent_s2tte, level - 1L);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000490
491 /*
492 * Lock the RTT granule. The 'rtt_addr' is verified, thus can be treated
493 * as an internal granule.
494 */
495 g_tbl = find_lock_granule(rtt_addr, GRANULE_STATE_RTT);
496
497 /*
498 * A table descriptor S2TTE always points to a TABLE granule.
499 */
500 assert(g_tbl != NULL);
501
502 /*
503 * Read the refcount value. RTT granule is always accessed locked, thus
504 * the refcount can be accessed without atomic operations.
505 */
506 if (g_tbl->refcount != 0UL) {
AlexeiFedorov892abce2023-04-06 16:32:12 +0100507 ret = pack_return_code(RMI_ERROR_RTT, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000508 goto out_unlock_table;
509 }
510
511 ret = RMI_SUCCESS;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100512 res->x[1] = rtt_addr;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100513 skip_non_live = true;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000514
515 table = granule_map(g_tbl, SLOT_RTT2);
516
517 if (in_par) {
518 parent_s2tte = s2tte_create_destroyed();
519 } else {
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100520 parent_s2tte = s2tte_create_unassigned_ns();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000521 }
522
523 __granule_put(wi.g_llt);
524
525 /*
526 * Break before make. Note that this may cause spurious S2 aborts.
527 */
528 s2tte_write(&parent_s2tt[wi.index], 0UL);
529 invalidate_block(&s2_ctx, map_addr);
530 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
531
532 granule_memzero_mapped(table);
533 granule_set_state(g_tbl, GRANULE_STATE_DELEGATED);
534
535 buffer_unmap(table);
536out_unlock_table:
537 granule_unlock(g_tbl);
538out_unmap_parent_table:
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100539 if (skip_non_live) {
540 res->x[2] = skip_non_live_entries(map_addr, parent_s2tt, &wi);
541 }
Soby Mathewb4c6df42022-11-09 11:13:29 +0000542 buffer_unmap(parent_s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000543 granule_unlock(wi.g_llt);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100544 res->x[0] = ret;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000545}
546
547enum map_unmap_ns_op {
548 MAP_NS,
549 UNMAP_NS
550};
551
552/*
553 * We don't hold a reference on the NS granule when it is
554 * mapped into a realm. Instead we rely on the guarantees
555 * provided by the architecture to ensure that a NS access
556 * to a protected granule is prohibited even within the realm.
557 */
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100558static void map_unmap_ns(unsigned long rd_addr,
559 unsigned long map_addr,
560 long level,
561 unsigned long host_s2tte,
562 enum map_unmap_ns_op op,
563 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000564{
565 struct granule *g_rd;
566 struct rd *rd;
567 struct granule *g_table_root;
568 unsigned long *s2tt, s2tte;
569 struct rtt_walk wi;
570 unsigned long ipa_bits;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000571 struct realm_s2_context s2_ctx;
572 int sl;
573
574 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
575 if (g_rd == NULL) {
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100576 res->x[0] = RMI_ERROR_INPUT;
577 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000578 }
579
580 rd = granule_map(g_rd, SLOT_RD);
581
582 if (!validate_rtt_map_cmds(map_addr, level, rd)) {
583 buffer_unmap(rd);
584 granule_unlock(g_rd);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100585 res->x[0] = RMI_ERROR_INPUT;
586 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000587 }
588
589 g_table_root = rd->s2_ctx.g_rtt;
590 sl = realm_rtt_starting_level(rd);
591 ipa_bits = realm_ipa_bits(rd);
592
AlexeiFedorovc34e3242023-04-12 11:30:33 +0100593 /* Check if map_addr is outside PAR */
594 if (addr_in_par(rd, map_addr)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000595 buffer_unmap(rd);
596 granule_unlock(g_rd);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100597 res->x[0] = RMI_ERROR_INPUT;
598 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000599 }
600
601 s2_ctx = rd->s2_ctx;
602 buffer_unmap(rd);
603
604 granule_lock(g_table_root, GRANULE_STATE_RTT);
605 granule_unlock(g_rd);
606
607 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
608 map_addr, level, &wi);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100609
610 /*
611 * For UNMAP_NS, we need to map the table and look
612 * for the end of the non-live region.
613 */
614 if (op == MAP_NS && wi.last_level != level) {
615 res->x[0] = pack_return_code(RMI_ERROR_RTT, wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000616 goto out_unlock_llt;
617 }
618
619 s2tt = granule_map(wi.g_llt, SLOT_RTT);
620 s2tte = s2tte_read(&s2tt[wi.index]);
621
622 if (op == MAP_NS) {
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100623 if (!s2tte_is_unassigned_ns(s2tte)) {
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100624 res->x[0] = pack_return_code(RMI_ERROR_RTT,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000625 (unsigned int)level);
626 goto out_unmap_table;
627 }
628
AlexeiFedorov3a739332023-04-13 13:54:04 +0100629 s2tte = s2tte_create_assigned_ns(host_s2tte, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000630 s2tte_write(&s2tt[wi.index], s2tte);
631 __granule_get(wi.g_llt);
632
633 } else if (op == UNMAP_NS) {
634 /*
635 * The following check also verifies that map_addr is outside
636 * PAR, as valid_NS s2tte may only cover outside PAR IPA range.
637 */
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100638 bool assigned_ns = s2tte_is_assigned_ns(s2tte, wi.last_level);
639
640 if ((wi.last_level != level) || !assigned_ns) {
641 res->x[0] = pack_return_code(RMI_ERROR_RTT,
642 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000643 goto out_unmap_table;
644 }
645
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100646 s2tte = s2tte_create_unassigned_ns();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000647 s2tte_write(&s2tt[wi.index], s2tte);
648 __granule_put(wi.g_llt);
649 if (level == RTT_PAGE_LEVEL) {
650 invalidate_page(&s2_ctx, map_addr);
651 } else {
652 invalidate_block(&s2_ctx, map_addr);
653 }
654 }
655
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100656 res->x[0] = RMI_SUCCESS;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000657
658out_unmap_table:
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100659 if (op == UNMAP_NS) {
660 res->x[1] = skip_non_live_entries(map_addr, s2tt, &wi);
661 }
Soby Mathewb4c6df42022-11-09 11:13:29 +0000662 buffer_unmap(s2tt);
663out_unlock_llt:
664 granule_unlock(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000665}
666
667unsigned long smc_rtt_map_unprotected(unsigned long rd_addr,
668 unsigned long map_addr,
669 unsigned long ulevel,
670 unsigned long s2tte)
671{
672 long level = (long)ulevel;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100673 struct smc_result res;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000674
675 if (!host_ns_s2tte_is_valid(s2tte, level)) {
676 return RMI_ERROR_INPUT;
677 }
678
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100679 map_unmap_ns(rd_addr, map_addr, level, s2tte, MAP_NS, &res);
680 return res.x[0];
Soby Mathewb4c6df42022-11-09 11:13:29 +0000681}
682
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100683void smc_rtt_unmap_unprotected(unsigned long rd_addr,
684 unsigned long map_addr,
685 unsigned long ulevel,
686 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000687{
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100688 return map_unmap_ns(rd_addr, map_addr, (long)ulevel, 0UL, UNMAP_NS, res);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000689}
690
691void smc_rtt_read_entry(unsigned long rd_addr,
692 unsigned long map_addr,
693 unsigned long ulevel,
694 struct smc_result *ret)
695{
696 struct granule *g_rd, *g_rtt_root;
697 struct rd *rd;
698 struct rtt_walk wi;
699 unsigned long *s2tt, s2tte;
700 unsigned long ipa_bits;
701 long level = (long)ulevel;
702 int sl;
703
704 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
705 if (g_rd == NULL) {
706 ret->x[0] = RMI_ERROR_INPUT;
707 return;
708 }
709
710 rd = granule_map(g_rd, SLOT_RD);
711
712 if (!validate_rtt_entry_cmds(map_addr, level, rd)) {
713 buffer_unmap(rd);
714 granule_unlock(g_rd);
715 ret->x[0] = RMI_ERROR_INPUT;
716 return;
717 }
718
719 g_rtt_root = rd->s2_ctx.g_rtt;
720 sl = realm_rtt_starting_level(rd);
721 ipa_bits = realm_ipa_bits(rd);
722 buffer_unmap(rd);
723
724 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
725 granule_unlock(g_rd);
726
727 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
728 map_addr, level, &wi);
729 s2tt = granule_map(wi.g_llt, SLOT_RTT);
730 s2tte = s2tte_read(&s2tt[wi.index]);
731 ret->x[1] = wi.last_level;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000732
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100733 if (s2tte_is_unassigned_empty(s2tte)) {
Yousuf A3daed822022-10-13 16:06:00 +0100734 ret->x[2] = RMI_UNASSIGNED;
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100735 ret->x[3] = 0UL;
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100736 ret->x[4] = RIPAS_EMPTY;
737 } else if (s2tte_is_unassigned_ram(s2tte)) {
738 ret->x[2] = RMI_UNASSIGNED;
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100739 ret->x[3] = 0UL;
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100740 ret->x[4] = RIPAS_RAM;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000741 } else if (s2tte_is_destroyed(s2tte)) {
Yousuf A3daed822022-10-13 16:06:00 +0100742 ret->x[2] = RMI_DESTROYED;
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100743 ret->x[3] = 0UL;
744 ret->x[4] = RIPAS_UNDEFINED;
AlexeiFedorov3a739332023-04-13 13:54:04 +0100745 } else if (s2tte_is_assigned_empty(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100746 ret->x[2] = RMI_ASSIGNED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000747 ret->x[3] = s2tte_pa(s2tte, wi.last_level);
Yousuf A62808152022-10-31 10:35:42 +0000748 ret->x[4] = RIPAS_EMPTY;
AlexeiFedorov3a739332023-04-13 13:54:04 +0100749 } else if (s2tte_is_assigned_ram(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100750 ret->x[2] = RMI_ASSIGNED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000751 ret->x[3] = s2tte_pa(s2tte, wi.last_level);
Yousuf A62808152022-10-31 10:35:42 +0000752 ret->x[4] = RIPAS_RAM;
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100753 } else if (s2tte_is_unassigned_ns(s2tte)) {
754 ret->x[2] = RMI_UNASSIGNED;
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100755 ret->x[3] = 0UL;
756 ret->x[4] = RIPAS_UNDEFINED;
AlexeiFedorov3a739332023-04-13 13:54:04 +0100757 } else if (s2tte_is_assigned_ns(s2tte, wi.last_level)) {
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100758 ret->x[2] = RMI_ASSIGNED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000759 ret->x[3] = host_ns_s2tte(s2tte, wi.last_level);
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100760 ret->x[4] = RIPAS_UNDEFINED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000761 } else if (s2tte_is_table(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100762 ret->x[2] = RMI_TABLE;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000763 ret->x[3] = s2tte_pa_table(s2tte, wi.last_level);
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100764 ret->x[4] = RIPAS_UNDEFINED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000765 } else {
766 assert(false);
767 }
768
769 buffer_unmap(s2tt);
770 granule_unlock(wi.g_llt);
771
772 ret->x[0] = RMI_SUCCESS;
773}
774
775static void data_granule_measure(struct rd *rd, void *data,
776 unsigned long ipa,
777 unsigned long flags)
778{
779 struct measurement_desc_data measure_desc = {0};
780
781 /* Initialize the measurement descriptior structure */
782 measure_desc.desc_type = MEASURE_DESC_TYPE_DATA;
783 measure_desc.len = sizeof(struct measurement_desc_data);
784 measure_desc.ipa = ipa;
785 measure_desc.flags = flags;
786 memcpy(measure_desc.rim,
787 &rd->measurement[RIM_MEASUREMENT_SLOT],
788 measurement_get_size(rd->algorithm));
789
790 if (flags == RMI_MEASURE_CONTENT) {
791 /*
792 * Hashing the data granules and store the result in the
793 * measurement descriptor structure.
794 */
795 measurement_hash_compute(rd->algorithm,
796 data,
797 GRANULE_SIZE,
798 measure_desc.content);
799 }
800
801 /*
802 * Hashing the measurement descriptor structure; the result is the
803 * updated RIM.
804 */
805 measurement_hash_compute(rd->algorithm,
806 &measure_desc,
807 sizeof(measure_desc),
808 rd->measurement[RIM_MEASUREMENT_SLOT]);
809}
810
811static unsigned long validate_data_create_unknown(unsigned long map_addr,
812 struct rd *rd)
813{
814 if (!addr_in_par(rd, map_addr)) {
815 return RMI_ERROR_INPUT;
816 }
817
818 if (!validate_map_addr(map_addr, RTT_PAGE_LEVEL, rd)) {
819 return RMI_ERROR_INPUT;
820 }
821
822 return RMI_SUCCESS;
823}
824
825static unsigned long validate_data_create(unsigned long map_addr,
826 struct rd *rd)
827{
828 if (get_rd_state_locked(rd) != REALM_STATE_NEW) {
829 return RMI_ERROR_REALM;
830 }
831
832 return validate_data_create_unknown(map_addr, rd);
833}
834
835/*
836 * Implements both Data.Create and Data.CreateUnknown
837 *
838 * if @g_src == NULL, this implemented Data.CreateUnknown
839 * and otherwise this implemented Data.Create.
840 */
AlexeiFedorovac923c82023-04-06 15:12:04 +0100841static unsigned long data_create(unsigned long rd_addr,
842 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000843 unsigned long map_addr,
844 struct granule *g_src,
845 unsigned long flags)
846{
847 struct granule *g_data;
848 struct granule *g_rd;
849 struct granule *g_table_root;
850 struct rd *rd;
851 struct rtt_walk wi;
852 unsigned long s2tte, *s2tt;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000853 enum granule_state new_data_state = GRANULE_STATE_DELEGATED;
854 unsigned long ipa_bits;
855 unsigned long ret;
856 int __unused meas_ret;
857 int sl;
858
859 if (!find_lock_two_granules(data_addr,
860 GRANULE_STATE_DELEGATED,
861 &g_data,
862 rd_addr,
863 GRANULE_STATE_RD,
864 &g_rd)) {
865 return RMI_ERROR_INPUT;
866 }
867
868 rd = granule_map(g_rd, SLOT_RD);
869
870 ret = (g_src != NULL) ?
871 validate_data_create(map_addr, rd) :
872 validate_data_create_unknown(map_addr, rd);
873
874 if (ret != RMI_SUCCESS) {
875 goto out_unmap_rd;
876 }
877
878 g_table_root = rd->s2_ctx.g_rtt;
879 sl = realm_rtt_starting_level(rd);
880 ipa_bits = realm_ipa_bits(rd);
881 granule_lock(g_table_root, GRANULE_STATE_RTT);
882 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
883 map_addr, RTT_PAGE_LEVEL, &wi);
884 if (wi.last_level != RTT_PAGE_LEVEL) {
885 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
886 goto out_unlock_ll_table;
887 }
888
889 s2tt = granule_map(wi.g_llt, SLOT_RTT);
890 s2tte = s2tte_read(&s2tt[wi.index]);
AlexeiFedorovcde1fdc2023-04-18 16:37:25 +0100891 if (!s2tte_is_unassigned_ram(s2tte)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000892 ret = pack_return_code(RMI_ERROR_RTT, RTT_PAGE_LEVEL);
893 goto out_unmap_ll_table;
894 }
895
Soby Mathewb4c6df42022-11-09 11:13:29 +0000896 if (g_src != NULL) {
897 bool ns_access_ok;
898 void *data = granule_map(g_data, SLOT_DELEGATED);
899
900 ns_access_ok = ns_buffer_read(SLOT_NS, g_src, 0U,
901 GRANULE_SIZE, data);
902
903 if (!ns_access_ok) {
904 /*
905 * Some data may be copied before the failure. Zero
906 * g_data granule as it will remain in delegated state.
907 */
908 (void)memset(data, 0, GRANULE_SIZE);
909 buffer_unmap(data);
910 ret = RMI_ERROR_INPUT;
911 goto out_unmap_ll_table;
912 }
913
914
915 data_granule_measure(rd, data, map_addr, flags);
916
917 buffer_unmap(data);
918 }
919
920 new_data_state = GRANULE_STATE_DATA;
921
AlexeiFedorovcde1fdc2023-04-18 16:37:25 +0100922 s2tte = s2tte_create_assigned_ram(data_addr, RTT_PAGE_LEVEL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000923
924 s2tte_write(&s2tt[wi.index], s2tte);
925 __granule_get(wi.g_llt);
926
927 ret = RMI_SUCCESS;
928
929out_unmap_ll_table:
930 buffer_unmap(s2tt);
931out_unlock_ll_table:
932 granule_unlock(wi.g_llt);
933out_unmap_rd:
934 buffer_unmap(rd);
935 granule_unlock(g_rd);
936 granule_unlock_transition(g_data, new_data_state);
937 return ret;
938}
939
AlexeiFedorovac923c82023-04-06 15:12:04 +0100940unsigned long smc_data_create(unsigned long rd_addr,
941 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000942 unsigned long map_addr,
943 unsigned long src_addr,
944 unsigned long flags)
945{
946 struct granule *g_src;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000947
948 if (flags != RMI_NO_MEASURE_CONTENT && flags != RMI_MEASURE_CONTENT) {
949 return RMI_ERROR_INPUT;
950 }
951
952 g_src = find_granule(src_addr);
953 if ((g_src == NULL) || (g_src->state != GRANULE_STATE_NS)) {
954 return RMI_ERROR_INPUT;
955 }
956
AlexeiFedorovac923c82023-04-06 15:12:04 +0100957 return data_create(rd_addr, data_addr, map_addr, g_src, flags);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000958}
959
AlexeiFedorovac923c82023-04-06 15:12:04 +0100960unsigned long smc_data_create_unknown(unsigned long rd_addr,
961 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000962 unsigned long map_addr)
963{
AlexeiFedorovac923c82023-04-06 15:12:04 +0100964 return data_create(rd_addr, data_addr, map_addr, NULL, 0);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000965}
966
AlexeiFedorove2002be2023-04-19 17:20:12 +0100967void smc_data_destroy(unsigned long rd_addr,
968 unsigned long map_addr,
969 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000970{
971 struct granule *g_data;
972 struct granule *g_rd;
973 struct granule *g_table_root;
974 struct rtt_walk wi;
975 unsigned long data_addr, s2tte, *s2tt;
976 struct rd *rd;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100977 unsigned long ipa_bits;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000978 struct realm_s2_context s2_ctx;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000979 int sl;
980
981 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
982 if (g_rd == NULL) {
AlexeiFedorove2002be2023-04-19 17:20:12 +0100983 res->x[0] = RMI_ERROR_INPUT;
984 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000985 }
986
987 rd = granule_map(g_rd, SLOT_RD);
988
989 if (!validate_map_addr(map_addr, RTT_PAGE_LEVEL, rd)) {
990 buffer_unmap(rd);
991 granule_unlock(g_rd);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100992 res->x[0] = RMI_ERROR_INPUT;
993 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000994 }
995
996 g_table_root = rd->s2_ctx.g_rtt;
997 sl = realm_rtt_starting_level(rd);
998 ipa_bits = realm_ipa_bits(rd);
999 s2_ctx = rd->s2_ctx;
1000 buffer_unmap(rd);
1001
1002 granule_lock(g_table_root, GRANULE_STATE_RTT);
1003 granule_unlock(g_rd);
1004
1005 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
1006 map_addr, RTT_PAGE_LEVEL, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001007
1008 s2tt = granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001009 if (wi.last_level != RTT_PAGE_LEVEL) {
1010 res->x[0] = pack_return_code(RMI_ERROR_RTT, wi.last_level);
1011 goto out_unmap_ll_table;
1012 }
Soby Mathewb4c6df42022-11-09 11:13:29 +00001013
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001014 s2tte = s2tte_read(&s2tt[wi.index]);
AlexeiFedorova43cd312023-04-17 11:42:25 +01001015 if (s2tte_is_assigned_ram(s2tte, RTT_PAGE_LEVEL)) {
1016 data_addr = s2tte_pa(s2tte, RTT_PAGE_LEVEL);
1017 s2tte = s2tte_create_destroyed();
1018 s2tte_write(&s2tt[wi.index], s2tte);
1019 invalidate_page(&s2_ctx, map_addr);
1020 } else if (s2tte_is_assigned_empty(s2tte, RTT_PAGE_LEVEL)) {
1021 data_addr = s2tte_pa(s2tte, RTT_PAGE_LEVEL);
1022 s2tte = s2tte_create_unassigned_empty();
1023 s2tte_write(&s2tt[wi.index], s2tte);
1024 } else {
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001025 res->x[0] = pack_return_code(RMI_ERROR_RTT, RTT_PAGE_LEVEL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001026 goto out_unmap_ll_table;
1027 }
1028
Soby Mathewb4c6df42022-11-09 11:13:29 +00001029 __granule_put(wi.g_llt);
1030
1031 /*
1032 * Lock the data granule and check expected state. Correct locking order
1033 * is guaranteed because granule address is obtained from a locked
1034 * granule by table walk. This lock needs to be acquired before a state
1035 * transition to or from GRANULE_STATE_DATA for granule address can happen.
1036 */
1037 g_data = find_lock_granule(data_addr, GRANULE_STATE_DATA);
AlexeiFedorov63b71692023-04-19 11:18:42 +01001038 assert(g_data != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001039 granule_memzero(g_data, SLOT_DELEGATED);
1040 granule_unlock_transition(g_data, GRANULE_STATE_DELEGATED);
1041
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001042 res->x[0] = RMI_SUCCESS;
AlexeiFedorove2002be2023-04-19 17:20:12 +01001043 res->x[1] = data_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001044out_unmap_ll_table:
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001045 res->x[2] = skip_non_live_entries(map_addr, s2tt, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001046 buffer_unmap(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001047 granule_unlock(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001048}
1049
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001050/*
1051 * Sets new ripas value in @s2tte.
1052 *
1053 * It returns false if the @s2tte has no ripas value.
1054 * It sets @(*do_tlbi) to 'true' if the TLBs have to be invalidated.
1055 */
1056static bool update_ripas(unsigned long *s2tte, bool *do_tlbi,
1057 unsigned long level, enum ripas ripas_val)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001058{
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001059 unsigned long pa;
1060
1061 *do_tlbi = false;
1062
1063 if (!s2tte_has_ripas(*s2tte, level)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001064 return false;
1065 }
1066
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001067 if (ripas_val == RIPAS_RAM) {
1068 if (s2tte_is_unassigned_empty(*s2tte)) {
1069 *s2tte = s2tte_create_unassigned_ram();
1070 } else if (s2tte_is_assigned_empty(*s2tte, level)) {
1071 pa = s2tte_pa(*s2tte, level);
1072 *s2tte = s2tte_create_assigned_ram(pa, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001073 }
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001074 } else if (ripas_val == RIPAS_EMPTY) {
1075 if (s2tte_is_unassigned_ram(*s2tte)) {
1076 *s2tte = s2tte_create_unassigned_empty();
1077 } else if (s2tte_is_assigned_ram(*s2tte, level)) {
1078 pa = s2tte_pa(*s2tte, level);
1079 *s2tte = s2tte_create_assigned_empty(pa, level);
1080 *do_tlbi = true;
1081 }
1082 } else {
1083 assert(false);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001084 }
1085
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001086 return true;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001087}
1088
1089static void ripas_granule_measure(struct rd *rd,
1090 unsigned long ipa,
1091 unsigned long level)
1092{
1093 struct measurement_desc_ripas measure_desc = {0};
1094
1095 /* Initialize the measurement descriptior structure */
1096 measure_desc.desc_type = MEASURE_DESC_TYPE_RIPAS;
1097 measure_desc.len = sizeof(struct measurement_desc_ripas);
1098 measure_desc.ipa = ipa;
1099 measure_desc.level = level;
1100 memcpy(measure_desc.rim,
1101 &rd->measurement[RIM_MEASUREMENT_SLOT],
1102 measurement_get_size(rd->algorithm));
1103
1104 /*
1105 * Hashing the measurement descriptor structure; the result is the
1106 * updated RIM.
1107 */
1108 measurement_hash_compute(rd->algorithm,
1109 &measure_desc,
1110 sizeof(measure_desc),
1111 rd->measurement[RIM_MEASUREMENT_SLOT]);
1112}
1113
1114unsigned long smc_rtt_init_ripas(unsigned long rd_addr,
1115 unsigned long map_addr,
1116 unsigned long ulevel)
1117{
1118 struct granule *g_rd, *g_rtt_root;
1119 struct rd *rd;
1120 unsigned long ipa_bits;
1121 struct rtt_walk wi;
1122 unsigned long s2tte, *s2tt;
1123 unsigned long ret;
1124 long level = (long)ulevel;
1125 int sl;
1126
1127 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
1128 if (g_rd == NULL) {
1129 return RMI_ERROR_INPUT;
1130 }
1131
1132 rd = granule_map(g_rd, SLOT_RD);
1133
1134 if (get_rd_state_locked(rd) != REALM_STATE_NEW) {
1135 buffer_unmap(rd);
1136 granule_unlock(g_rd);
1137 return RMI_ERROR_REALM;
1138 }
1139
1140 if (!validate_rtt_entry_cmds(map_addr, level, rd)) {
1141 buffer_unmap(rd);
1142 granule_unlock(g_rd);
1143 return RMI_ERROR_INPUT;
1144 }
1145
1146 if (!addr_in_par(rd, map_addr)) {
1147 buffer_unmap(rd);
1148 granule_unlock(g_rd);
1149 return RMI_ERROR_INPUT;
1150 }
1151
1152 g_rtt_root = rd->s2_ctx.g_rtt;
1153 sl = realm_rtt_starting_level(rd);
1154 ipa_bits = realm_ipa_bits(rd);
1155
1156 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
1157 granule_unlock(g_rd);
1158
1159 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
1160 map_addr, level, &wi);
AlexeiFedorovac923c82023-04-06 15:12:04 +01001161
Soby Mathewb4c6df42022-11-09 11:13:29 +00001162 if (wi.last_level != level) {
1163 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
1164 goto out_unlock_llt;
1165 }
1166
1167 s2tt = granule_map(wi.g_llt, SLOT_RTT);
1168 s2tte = s2tte_read(&s2tt[wi.index]);
1169
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001170 if (s2tte_is_unassigned_empty(s2tte)) {
1171 s2tte = s2tte_create_unassigned_ram();
1172 s2tte_write(&s2tt[wi.index], s2tte);
1173 ripas_granule_measure(rd, map_addr, level);
1174 } else if (s2tte_is_unassigned_ram(s2tte)) {
1175 ripas_granule_measure(rd, map_addr, level);
1176 } else {
1177 ret = pack_return_code(RMI_ERROR_RTT, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001178 goto out_unmap_llt;
1179 }
1180
Soby Mathewb4c6df42022-11-09 11:13:29 +00001181 ret = RMI_SUCCESS;
1182
1183out_unmap_llt:
1184 buffer_unmap(s2tt);
1185out_unlock_llt:
1186 buffer_unmap(rd);
1187 granule_unlock(wi.g_llt);
1188 return ret;
1189}
1190
1191unsigned long smc_rtt_set_ripas(unsigned long rd_addr,
1192 unsigned long rec_addr,
1193 unsigned long map_addr,
1194 unsigned long ulevel,
1195 unsigned long uripas)
1196{
1197 struct granule *g_rd, *g_rec, *g_rtt_root;
1198 struct rec *rec;
1199 struct rd *rd;
1200 unsigned long map_size, ipa_bits;
1201 struct rtt_walk wi;
1202 unsigned long s2tte, *s2tt;
1203 struct realm_s2_context s2_ctx;
1204 long level = (long)ulevel;
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001205 enum ripas ripas_val = (enum ripas)uripas;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001206 unsigned long ret;
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001207 bool tlbi_required;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001208 int sl;
1209
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001210 if (ripas_val > RIPAS_RAM) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001211 return RMI_ERROR_INPUT;
1212 }
1213
1214 if (!find_lock_two_granules(rd_addr,
1215 GRANULE_STATE_RD,
1216 &g_rd,
1217 rec_addr,
1218 GRANULE_STATE_REC,
1219 &g_rec)) {
1220 return RMI_ERROR_INPUT;
1221 }
1222
1223 if (granule_refcount_read_acquire(g_rec) != 0UL) {
AlexeiFedorov892abce2023-04-06 16:32:12 +01001224 ret = RMI_ERROR_REC;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001225 goto out_unlock_rec_rd;
1226 }
1227
1228 rec = granule_map(g_rec, SLOT_REC);
1229
1230 if (g_rd != rec->realm_info.g_rd) {
1231 ret = RMI_ERROR_REC;
1232 goto out_unmap_rec;
1233 }
1234
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001235 if (ripas_val != rec->set_ripas.ripas_val) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001236 ret = RMI_ERROR_INPUT;
1237 goto out_unmap_rec;
1238 }
1239
1240 if (map_addr != rec->set_ripas.addr) {
1241 /* Target region is not next chunk of requested region */
1242 ret = RMI_ERROR_INPUT;
1243 goto out_unmap_rec;
1244 }
1245
1246 rd = granule_map(g_rd, SLOT_RD);
1247
1248 if (!validate_rtt_entry_cmds(map_addr, level, rd)) {
1249 ret = RMI_ERROR_INPUT;
1250 goto out_unmap_rd;
1251 }
1252
1253 map_size = s2tte_map_size(level);
1254 if (map_addr + map_size > rec->set_ripas.end) {
1255 /* Target region extends beyond end of requested region */
1256 ret = RMI_ERROR_INPUT;
1257 goto out_unmap_rd;
1258 }
1259
1260 g_rtt_root = rd->s2_ctx.g_rtt;
1261 sl = realm_rtt_starting_level(rd);
1262 ipa_bits = realm_ipa_bits(rd);
1263 s2_ctx = rd->s2_ctx;
1264
1265 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
1266
1267 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
1268 map_addr, level, &wi);
1269 if (wi.last_level != level) {
1270 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
1271 goto out_unlock_llt;
1272 }
1273
1274 s2tt = granule_map(wi.g_llt, SLOT_RTT);
1275 s2tte = s2tte_read(&s2tt[wi.index]);
1276
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001277 if (!update_ripas(&s2tte, &tlbi_required, level, ripas_val)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001278 ret = pack_return_code(RMI_ERROR_RTT, (unsigned int)level);
1279 goto out_unmap_llt;
1280 }
1281
1282 s2tte_write(&s2tt[wi.index], s2tte);
1283
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001284 if (tlbi_required) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001285 if (level == RTT_PAGE_LEVEL) {
1286 invalidate_page(&s2_ctx, map_addr);
1287 } else {
1288 invalidate_block(&s2_ctx, map_addr);
1289 }
1290 }
1291
1292 rec->set_ripas.addr += map_size;
1293
1294 ret = RMI_SUCCESS;
1295
1296out_unmap_llt:
1297 buffer_unmap(s2tt);
1298out_unlock_llt:
1299 granule_unlock(wi.g_llt);
1300out_unmap_rd:
1301 buffer_unmap(rd);
1302out_unmap_rec:
1303 buffer_unmap(rec);
1304out_unlock_rec_rd:
1305 granule_unlock(g_rec);
1306 granule_unlock(g_rd);
1307 return ret;
1308}