blob: 58eeb479421293431053e5bafc0ae41a8501087a [file] [log] [blame]
Soby Mathewb4c6df42022-11-09 11:13:29 +00001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
5 */
6
7#include <buffer.h>
8#include <granule.h>
9#include <measurement.h>
10#include <realm.h>
11#include <ripas.h>
12#include <smc-handler.h>
13#include <smc-rmi.h>
14#include <smc.h>
15#include <stddef.h>
16#include <string.h>
17#include <table.h>
18
19/*
20 * Validate the map_addr value passed to RMI_RTT_* and RMI_DATA_* commands.
21 */
22static bool validate_map_addr(unsigned long map_addr,
23 unsigned long level,
24 struct rd *rd)
25{
26
27 if (map_addr >= realm_ipa_size(rd)) {
28 return false;
29 }
30 if (!addr_is_level_aligned(map_addr, level)) {
31 return false;
32 }
33 return true;
34}
35
36/*
37 * Structure commands can operate on all RTTs except for the root RTT so
38 * the minimal valid level is the stage 2 starting level + 1.
39 */
40static bool validate_rtt_structure_cmds(unsigned long map_addr,
41 long level,
42 struct rd *rd)
43{
44 int min_level = realm_rtt_starting_level(rd) + 1;
45
46 if ((level < min_level) || (level > RTT_PAGE_LEVEL)) {
47 return false;
48 }
49 return validate_map_addr(map_addr, level, rd);
50}
51
52/*
53 * Map/Unmap commands can operate up to a level 2 block entry so min_level is
54 * the smallest block size.
55 */
56static bool validate_rtt_map_cmds(unsigned long map_addr,
57 long level,
58 struct rd *rd)
59{
60 if ((level < RTT_MIN_BLOCK_LEVEL) || (level > RTT_PAGE_LEVEL)) {
61 return false;
62 }
63 return validate_map_addr(map_addr, level, rd);
64}
65
66/*
67 * Entry commands can operate on any entry so the minimal valid level is the
68 * stage 2 starting level.
69 */
70static bool validate_rtt_entry_cmds(unsigned long map_addr,
71 long level,
72 struct rd *rd)
73{
74 if ((level < realm_rtt_starting_level(rd)) ||
75 (level > RTT_PAGE_LEVEL)) {
76 return false;
77 }
78 return validate_map_addr(map_addr, level, rd);
79}
80
AlexeiFedorovac923c82023-04-06 15:12:04 +010081unsigned long smc_rtt_create(unsigned long rd_addr,
82 unsigned long rtt_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +000083 unsigned long map_addr,
84 unsigned long ulevel)
85{
86 struct granule *g_rd;
87 struct granule *g_tbl;
88 struct rd *rd;
89 struct granule *g_table_root;
90 struct rtt_walk wi;
91 unsigned long *s2tt, *parent_s2tt, parent_s2tte;
92 long level = (long)ulevel;
93 unsigned long ipa_bits;
94 unsigned long ret;
95 struct realm_s2_context s2_ctx;
96 int sl;
97
98 if (!find_lock_two_granules(rtt_addr,
99 GRANULE_STATE_DELEGATED,
100 &g_tbl,
101 rd_addr,
102 GRANULE_STATE_RD,
103 &g_rd)) {
104 return RMI_ERROR_INPUT;
105 }
106
107 rd = granule_map(g_rd, SLOT_RD);
108
109 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
110 buffer_unmap(rd);
111 granule_unlock(g_rd);
112 granule_unlock(g_tbl);
113 return RMI_ERROR_INPUT;
114 }
115
116 g_table_root = rd->s2_ctx.g_rtt;
117 sl = realm_rtt_starting_level(rd);
118 ipa_bits = realm_ipa_bits(rd);
119 s2_ctx = rd->s2_ctx;
120 buffer_unmap(rd);
121
122 /*
123 * Lock the RTT root. Enforcing locking order RD->RTT is enough to
124 * ensure deadlock free locking guarentee.
125 */
126 granule_lock(g_table_root, GRANULE_STATE_RTT);
127
128 /* Unlock RD after locking RTT Root */
129 granule_unlock(g_rd);
130
131 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
132 map_addr, level - 1L, &wi);
133 if (wi.last_level != level - 1L) {
134 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
135 goto out_unlock_llt;
136 }
137
138 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
139 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
140 s2tt = granule_map(g_tbl, SLOT_DELEGATED);
141
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100142 if (s2tte_is_unassigned_empty(parent_s2tte)) {
143 s2tt_init_unassigned_empty(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000144
145 /*
146 * Increase the refcount of the parent, the granule was
147 * locked while table walking and hand-over-hand locking.
148 * Atomicity and acquire/release semantics not required because
149 * the table is accessed always locked.
150 */
151 __granule_get(wi.g_llt);
152
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100153 } else if (s2tte_is_unassigned_ram(parent_s2tte)) {
154 s2tt_init_unassigned_ram(s2tt);
155 __granule_get(wi.g_llt);
156
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100157 } else if (s2tte_is_unassigned_ns(parent_s2tte)) {
158 s2tt_init_unassigned_ns(s2tt);
159 __granule_get(wi.g_llt);
160
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100161 } else if (s2tte_is_unassigned_destroyed(parent_s2tte)) {
162 s2tt_init_unassigned_destroyed(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000163 __granule_get(wi.g_llt);
164
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100165 } else if (s2tte_is_assigned_destroyed(parent_s2tte, level - 1L)) {
166 unsigned long block_pa;
167
168 /*
169 * We should observe parent assigned s2tte only when
170 * we create tables above this level.
171 */
172 assert(level > RTT_MIN_BLOCK_LEVEL);
173
174 block_pa = s2tte_pa(parent_s2tte, level - 1L);
175
176 s2tt_init_assigned_destroyed(s2tt, block_pa, level);
177
178 /*
179 * Increase the refcount to mark the granule as in-use. refcount
180 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
181 */
182 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
183
AlexeiFedorov3a739332023-04-13 13:54:04 +0100184 } else if (s2tte_is_assigned_empty(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000185 unsigned long block_pa;
186
187 /*
188 * We should observe parent assigned s2tte only when
189 * we create tables above this level.
190 */
191 assert(level > RTT_MIN_BLOCK_LEVEL);
192
193 block_pa = s2tte_pa(parent_s2tte, level - 1L);
194
195 s2tt_init_assigned_empty(s2tt, block_pa, level);
196
197 /*
198 * Increase the refcount to mark the granule as in-use. refcount
199 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
200 */
201 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
202
AlexeiFedorov3a739332023-04-13 13:54:04 +0100203 } else if (s2tte_is_assigned_ram(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000204 unsigned long block_pa;
205
206 /*
207 * We should observe parent valid s2tte only when
208 * we create tables above this level.
209 */
210 assert(level > RTT_MIN_BLOCK_LEVEL);
211
212 /*
213 * Break before make. This may cause spurious S2 aborts.
214 */
215 s2tte_write(&parent_s2tt[wi.index], 0UL);
216 invalidate_block(&s2_ctx, map_addr);
217
218 block_pa = s2tte_pa(parent_s2tte, level - 1L);
219
AlexeiFedorov3a739332023-04-13 13:54:04 +0100220 s2tt_init_assigned_ram(s2tt, block_pa, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000221
222 /*
223 * Increase the refcount to mark the granule as in-use. refcount
224 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
225 */
226 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
227
AlexeiFedorov3a739332023-04-13 13:54:04 +0100228 } else if (s2tte_is_assigned_ns(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000229 unsigned long block_pa;
230
231 /*
AlexeiFedorov3a739332023-04-13 13:54:04 +0100232 * We should observe parent assigned_ns s2tte only when
Soby Mathewb4c6df42022-11-09 11:13:29 +0000233 * we create tables above this level.
234 */
235 assert(level > RTT_MIN_BLOCK_LEVEL);
236
237 /*
238 * Break before make. This may cause spurious S2 aborts.
239 */
240 s2tte_write(&parent_s2tt[wi.index], 0UL);
241 invalidate_block(&s2_ctx, map_addr);
242
243 block_pa = s2tte_pa(parent_s2tte, level - 1L);
244
AlexeiFedorov3a739332023-04-13 13:54:04 +0100245 s2tt_init_assigned_ns(s2tt, block_pa, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000246
247 /*
248 * Increase the refcount to mark the granule as in-use. refcount
249 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
250 */
251 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
252
253 } else if (s2tte_is_table(parent_s2tte, level - 1L)) {
254 ret = pack_return_code(RMI_ERROR_RTT,
255 (unsigned int)(level - 1L));
256 goto out_unmap_table;
257
258 } else {
259 assert(false);
260 }
261
262 ret = RMI_SUCCESS;
263
264 granule_set_state(g_tbl, GRANULE_STATE_RTT);
265
266 parent_s2tte = s2tte_create_table(rtt_addr, level - 1L);
267 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
268
269out_unmap_table:
270 buffer_unmap(s2tt);
271 buffer_unmap(parent_s2tt);
272out_unlock_llt:
273 granule_unlock(wi.g_llt);
274 granule_unlock(g_tbl);
275 return ret;
276}
277
AlexeiFedorove2002be2023-04-19 17:20:12 +0100278void smc_rtt_fold(unsigned long rd_addr,
279 unsigned long map_addr,
280 unsigned long ulevel,
281 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000282{
283 struct granule *g_rd;
284 struct granule *g_tbl;
285 struct rd *rd;
286 struct granule *g_table_root;
287 struct rtt_walk wi;
288 unsigned long *table, *parent_s2tt, parent_s2tte;
289 long level = (long)ulevel;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100290 unsigned long ipa_bits, rtt_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000291 unsigned long ret;
292 struct realm_s2_context s2_ctx;
293 int sl;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000294
295 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
296 if (g_rd == NULL) {
AlexeiFedorove2002be2023-04-19 17:20:12 +0100297 res->x[0] = RMI_ERROR_INPUT;
298 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000299 }
300
301 rd = granule_map(g_rd, SLOT_RD);
302
303 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
304 buffer_unmap(rd);
305 granule_unlock(g_rd);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100306 res->x[0] = RMI_ERROR_INPUT;
307 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000308 }
309
310 g_table_root = rd->s2_ctx.g_rtt;
311 sl = realm_rtt_starting_level(rd);
312 ipa_bits = realm_ipa_bits(rd);
313 s2_ctx = rd->s2_ctx;
314 buffer_unmap(rd);
315 granule_lock(g_table_root, GRANULE_STATE_RTT);
316 granule_unlock(g_rd);
317
318 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
319 map_addr, level - 1L, &wi);
320 if (wi.last_level != level - 1UL) {
321 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
322 goto out_unlock_parent_table;
323 }
324
325 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
326 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
327 if (!s2tte_is_table(parent_s2tte, level - 1L)) {
328 ret = pack_return_code(RMI_ERROR_RTT,
329 (unsigned int)(level - 1L));
330 goto out_unmap_parent_table;
331 }
332
AlexeiFedorove2002be2023-04-19 17:20:12 +0100333 rtt_addr = s2tte_pa_table(parent_s2tte, level - 1L);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000334 g_tbl = find_lock_granule(rtt_addr, GRANULE_STATE_RTT);
335
336 /*
337 * A table descriptor S2TTE always points to a TABLE granule.
338 */
AlexeiFedorov63b71692023-04-19 11:18:42 +0100339 assert(g_tbl != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000340
341 table = granule_map(g_tbl, SLOT_RTT2);
342
343 /*
344 * The command can succeed only if all 512 S2TTEs are of the same type.
345 * We first check the table's ref. counter to speed up the case when
346 * the host makes a guess whether a memory region can be folded.
347 */
348 if (g_tbl->refcount == 0UL) {
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100349 if (table_is_unassigned_destroyed_block(table)) {
350 parent_s2tte = s2tte_create_unassigned_destroyed();
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100351 } else if (table_is_unassigned_empty_block(table)) {
352 parent_s2tte = s2tte_create_unassigned_empty();
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100353 } else if (table_is_unassigned_ram_block(table)) {
354 parent_s2tte = s2tte_create_unassigned_ram();
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100355 } else if (table_is_unassigned_ns_block(table)) {
356 parent_s2tte = s2tte_create_unassigned_ns();
AlexeiFedorov49752c62023-04-24 14:31:14 +0100357 } else if (table_maps_assigned_ns_block(table, level)) {
358 unsigned long s2tte = s2tte_read(&table[0]);
359 unsigned long block_pa = s2tte_pa(s2tte, level);
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100360
AlexeiFedorov49752c62023-04-24 14:31:14 +0100361 parent_s2tte = s2tte_create_assigned_ns(block_pa, level - 1L);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000362 } else {
363 /*
364 * The table holds a mixture of destroyed and
365 * unassigned entries.
366 */
AlexeiFedorov892abce2023-04-06 16:32:12 +0100367 ret = pack_return_code(RMI_ERROR_RTT, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000368 goto out_unmap_table;
369 }
AlexeiFedorov49752c62023-04-24 14:31:14 +0100370 __granule_put(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000371 } else if (g_tbl->refcount == S2TTES_PER_S2TT) {
372
373 unsigned long s2tte, block_pa;
374
375 /* The RMM specification does not allow creating block
376 * entries less than RTT_MIN_BLOCK_LEVEL even though
377 * permitted by the Arm Architecture.
378 * Hence ensure that the table being folded is at a level
379 * higher than the RTT_MIN_BLOCK_LEVEL.
380 *
381 * A fully populated table cannot be destroyed if that
382 * would create a block mapping below RTT_MIN_BLOCK_LEVEL.
383 */
384 if (level <= RTT_MIN_BLOCK_LEVEL) {
AlexeiFedorov892abce2023-04-06 16:32:12 +0100385 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000386 goto out_unmap_table;
387 }
388
389 s2tte = s2tte_read(&table[0]);
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000390 block_pa = s2tte_pa(s2tte, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000391
392 /*
AlexeiFedorov3a739332023-04-13 13:54:04 +0100393 * The table must also refer to a contiguous block through the
394 * same type of s2tte, either Assigned, Valid or Assigned_NS.
Soby Mathewb4c6df42022-11-09 11:13:29 +0000395 */
AlexeiFedorov3a739332023-04-13 13:54:04 +0100396 if (table_maps_assigned_empty_block(table, level)) {
397 parent_s2tte = s2tte_create_assigned_empty(block_pa,
398 level - 1L);
399 } else if (table_maps_assigned_ram_block(table, level)) {
400 parent_s2tte = s2tte_create_assigned_ram(block_pa,
401 level - 1L);
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000402 /* The table contains mixed entries that cannot be folded */
Soby Mathewb4c6df42022-11-09 11:13:29 +0000403 } else {
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000404 ret = pack_return_code(RMI_ERROR_RTT, level);
405 goto out_unmap_table;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000406 }
407
408 __granule_refcount_dec(g_tbl, S2TTES_PER_S2TT);
409 } else {
410 /*
411 * The table holds a mixture of different types of s2ttes.
412 */
AlexeiFedorov892abce2023-04-06 16:32:12 +0100413 ret = pack_return_code(RMI_ERROR_RTT, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000414 goto out_unmap_table;
415 }
416
417 ret = RMI_SUCCESS;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100418 res->x[1] = rtt_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000419
420 /*
421 * Break before make.
422 */
423 s2tte_write(&parent_s2tt[wi.index], 0UL);
424
AlexeiFedorov3a739332023-04-13 13:54:04 +0100425 if (s2tte_is_assigned_ram(parent_s2tte, level - 1L) ||
426 s2tte_is_assigned_ns(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000427 invalidate_pages_in_block(&s2_ctx, map_addr);
428 } else {
429 invalidate_block(&s2_ctx, map_addr);
430 }
431
432 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
433
434 granule_memzero_mapped(table);
435 granule_set_state(g_tbl, GRANULE_STATE_DELEGATED);
436
437out_unmap_table:
438 buffer_unmap(table);
439 granule_unlock(g_tbl);
440out_unmap_parent_table:
441 buffer_unmap(parent_s2tt);
442out_unlock_parent_table:
443 granule_unlock(wi.g_llt);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100444 res->x[0] = ret;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000445}
446
AlexeiFedorove2002be2023-04-19 17:20:12 +0100447void smc_rtt_destroy(unsigned long rd_addr,
448 unsigned long map_addr,
449 unsigned long ulevel,
450 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000451{
452 struct granule *g_rd;
453 struct granule *g_tbl;
454 struct rd *rd;
455 struct granule *g_table_root;
456 struct rtt_walk wi;
457 unsigned long *table, *parent_s2tt, parent_s2tte;
458 long level = (long)ulevel;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100459 unsigned long ipa_bits, rtt_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000460 unsigned long ret;
461 struct realm_s2_context s2_ctx;
462 int sl;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100463 bool in_par, skip_non_live = false;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000464
465 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
466 if (g_rd == NULL) {
AlexeiFedorove2002be2023-04-19 17:20:12 +0100467 res->x[0] = RMI_ERROR_INPUT;
468 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000469 }
470
471 rd = granule_map(g_rd, SLOT_RD);
472
473 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
474 buffer_unmap(rd);
475 granule_unlock(g_rd);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100476 res->x[0] = RMI_ERROR_INPUT;
477 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000478 }
479
480 g_table_root = rd->s2_ctx.g_rtt;
481 sl = realm_rtt_starting_level(rd);
482 ipa_bits = realm_ipa_bits(rd);
483 s2_ctx = rd->s2_ctx;
484 in_par = addr_in_par(rd, map_addr);
485 buffer_unmap(rd);
486 granule_lock(g_table_root, GRANULE_STATE_RTT);
487 granule_unlock(g_rd);
488
489 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
490 map_addr, level - 1L, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000491
492 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
493 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100494
495 if ((wi.last_level != level - 1UL) ||
496 !s2tte_is_table(parent_s2tte, level - 1UL)) {
497 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
498 skip_non_live = true;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000499 goto out_unmap_parent_table;
500 }
501
AlexeiFedorove2002be2023-04-19 17:20:12 +0100502 rtt_addr = s2tte_pa_table(parent_s2tte, level - 1L);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000503
504 /*
505 * Lock the RTT granule. The 'rtt_addr' is verified, thus can be treated
506 * as an internal granule.
507 */
508 g_tbl = find_lock_granule(rtt_addr, GRANULE_STATE_RTT);
509
510 /*
511 * A table descriptor S2TTE always points to a TABLE granule.
512 */
513 assert(g_tbl != NULL);
514
515 /*
516 * Read the refcount value. RTT granule is always accessed locked, thus
517 * the refcount can be accessed without atomic operations.
518 */
519 if (g_tbl->refcount != 0UL) {
AlexeiFedorov892abce2023-04-06 16:32:12 +0100520 ret = pack_return_code(RMI_ERROR_RTT, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000521 goto out_unlock_table;
522 }
523
524 ret = RMI_SUCCESS;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100525 res->x[1] = rtt_addr;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100526 skip_non_live = true;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000527
528 table = granule_map(g_tbl, SLOT_RTT2);
529
530 if (in_par) {
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100531 parent_s2tte = s2tte_create_unassigned_destroyed();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000532 } else {
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100533 parent_s2tte = s2tte_create_unassigned_ns();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000534 }
535
536 __granule_put(wi.g_llt);
537
538 /*
539 * Break before make. Note that this may cause spurious S2 aborts.
540 */
541 s2tte_write(&parent_s2tt[wi.index], 0UL);
542 invalidate_block(&s2_ctx, map_addr);
543 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
544
545 granule_memzero_mapped(table);
546 granule_set_state(g_tbl, GRANULE_STATE_DELEGATED);
547
548 buffer_unmap(table);
549out_unlock_table:
550 granule_unlock(g_tbl);
551out_unmap_parent_table:
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100552 if (skip_non_live) {
553 res->x[2] = skip_non_live_entries(map_addr, parent_s2tt, &wi);
554 }
Soby Mathewb4c6df42022-11-09 11:13:29 +0000555 buffer_unmap(parent_s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000556 granule_unlock(wi.g_llt);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100557 res->x[0] = ret;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000558}
559
560enum map_unmap_ns_op {
561 MAP_NS,
562 UNMAP_NS
563};
564
565/*
566 * We don't hold a reference on the NS granule when it is
567 * mapped into a realm. Instead we rely on the guarantees
568 * provided by the architecture to ensure that a NS access
569 * to a protected granule is prohibited even within the realm.
570 */
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100571static void map_unmap_ns(unsigned long rd_addr,
572 unsigned long map_addr,
573 long level,
574 unsigned long host_s2tte,
575 enum map_unmap_ns_op op,
576 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000577{
578 struct granule *g_rd;
579 struct rd *rd;
580 struct granule *g_table_root;
581 unsigned long *s2tt, s2tte;
582 struct rtt_walk wi;
583 unsigned long ipa_bits;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000584 struct realm_s2_context s2_ctx;
585 int sl;
586
587 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
588 if (g_rd == NULL) {
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100589 res->x[0] = RMI_ERROR_INPUT;
590 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000591 }
592
593 rd = granule_map(g_rd, SLOT_RD);
594
595 if (!validate_rtt_map_cmds(map_addr, level, rd)) {
596 buffer_unmap(rd);
597 granule_unlock(g_rd);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100598 res->x[0] = RMI_ERROR_INPUT;
599 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000600 }
601
602 g_table_root = rd->s2_ctx.g_rtt;
603 sl = realm_rtt_starting_level(rd);
604 ipa_bits = realm_ipa_bits(rd);
605
AlexeiFedorovc34e3242023-04-12 11:30:33 +0100606 /* Check if map_addr is outside PAR */
607 if (addr_in_par(rd, map_addr)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000608 buffer_unmap(rd);
609 granule_unlock(g_rd);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100610 res->x[0] = RMI_ERROR_INPUT;
611 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000612 }
613
614 s2_ctx = rd->s2_ctx;
615 buffer_unmap(rd);
616
617 granule_lock(g_table_root, GRANULE_STATE_RTT);
618 granule_unlock(g_rd);
619
620 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
621 map_addr, level, &wi);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100622
623 /*
624 * For UNMAP_NS, we need to map the table and look
625 * for the end of the non-live region.
626 */
627 if (op == MAP_NS && wi.last_level != level) {
628 res->x[0] = pack_return_code(RMI_ERROR_RTT, wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000629 goto out_unlock_llt;
630 }
631
632 s2tt = granule_map(wi.g_llt, SLOT_RTT);
633 s2tte = s2tte_read(&s2tt[wi.index]);
634
635 if (op == MAP_NS) {
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100636 if (!s2tte_is_unassigned_ns(s2tte)) {
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100637 res->x[0] = pack_return_code(RMI_ERROR_RTT,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000638 (unsigned int)level);
639 goto out_unmap_table;
640 }
641
AlexeiFedorov3a739332023-04-13 13:54:04 +0100642 s2tte = s2tte_create_assigned_ns(host_s2tte, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000643 s2tte_write(&s2tt[wi.index], s2tte);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000644
645 } else if (op == UNMAP_NS) {
646 /*
647 * The following check also verifies that map_addr is outside
648 * PAR, as valid_NS s2tte may only cover outside PAR IPA range.
649 */
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100650 bool assigned_ns = s2tte_is_assigned_ns(s2tte, wi.last_level);
651
652 if ((wi.last_level != level) || !assigned_ns) {
653 res->x[0] = pack_return_code(RMI_ERROR_RTT,
654 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000655 goto out_unmap_table;
656 }
657
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100658 s2tte = s2tte_create_unassigned_ns();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000659 s2tte_write(&s2tt[wi.index], s2tte);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000660 if (level == RTT_PAGE_LEVEL) {
661 invalidate_page(&s2_ctx, map_addr);
662 } else {
663 invalidate_block(&s2_ctx, map_addr);
664 }
665 }
666
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100667 res->x[0] = RMI_SUCCESS;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000668
669out_unmap_table:
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100670 if (op == UNMAP_NS) {
671 res->x[1] = skip_non_live_entries(map_addr, s2tt, &wi);
672 }
Soby Mathewb4c6df42022-11-09 11:13:29 +0000673 buffer_unmap(s2tt);
674out_unlock_llt:
675 granule_unlock(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000676}
677
678unsigned long smc_rtt_map_unprotected(unsigned long rd_addr,
679 unsigned long map_addr,
680 unsigned long ulevel,
681 unsigned long s2tte)
682{
683 long level = (long)ulevel;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100684 struct smc_result res;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000685
686 if (!host_ns_s2tte_is_valid(s2tte, level)) {
687 return RMI_ERROR_INPUT;
688 }
689
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100690 map_unmap_ns(rd_addr, map_addr, level, s2tte, MAP_NS, &res);
691 return res.x[0];
Soby Mathewb4c6df42022-11-09 11:13:29 +0000692}
693
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100694void smc_rtt_unmap_unprotected(unsigned long rd_addr,
695 unsigned long map_addr,
696 unsigned long ulevel,
697 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000698{
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100699 return map_unmap_ns(rd_addr, map_addr, (long)ulevel, 0UL, UNMAP_NS, res);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000700}
701
702void smc_rtt_read_entry(unsigned long rd_addr,
703 unsigned long map_addr,
704 unsigned long ulevel,
705 struct smc_result *ret)
706{
707 struct granule *g_rd, *g_rtt_root;
708 struct rd *rd;
709 struct rtt_walk wi;
710 unsigned long *s2tt, s2tte;
711 unsigned long ipa_bits;
712 long level = (long)ulevel;
713 int sl;
714
715 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
716 if (g_rd == NULL) {
717 ret->x[0] = RMI_ERROR_INPUT;
718 return;
719 }
720
721 rd = granule_map(g_rd, SLOT_RD);
722
723 if (!validate_rtt_entry_cmds(map_addr, level, rd)) {
724 buffer_unmap(rd);
725 granule_unlock(g_rd);
726 ret->x[0] = RMI_ERROR_INPUT;
727 return;
728 }
729
730 g_rtt_root = rd->s2_ctx.g_rtt;
731 sl = realm_rtt_starting_level(rd);
732 ipa_bits = realm_ipa_bits(rd);
733 buffer_unmap(rd);
734
735 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
736 granule_unlock(g_rd);
737
738 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
739 map_addr, level, &wi);
740 s2tt = granule_map(wi.g_llt, SLOT_RTT);
741 s2tte = s2tte_read(&s2tt[wi.index]);
742 ret->x[1] = wi.last_level;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000743
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100744 if (s2tte_is_unassigned_empty(s2tte)) {
Yousuf A3daed822022-10-13 16:06:00 +0100745 ret->x[2] = RMI_UNASSIGNED;
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100746 ret->x[3] = 0UL;
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100747 ret->x[4] = RIPAS_EMPTY;
748 } else if (s2tte_is_unassigned_ram(s2tte)) {
749 ret->x[2] = RMI_UNASSIGNED;
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100750 ret->x[3] = 0UL;
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100751 ret->x[4] = RIPAS_RAM;
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100752 } else if (s2tte_is_unassigned_destroyed(s2tte)) {
753 ret->x[2] = RMI_UNASSIGNED;
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100754 ret->x[3] = 0UL;
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100755 ret->x[4] = RIPAS_DESTROYED;
AlexeiFedorov3a739332023-04-13 13:54:04 +0100756 } else if (s2tte_is_assigned_empty(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100757 ret->x[2] = RMI_ASSIGNED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000758 ret->x[3] = s2tte_pa(s2tte, wi.last_level);
Yousuf A62808152022-10-31 10:35:42 +0000759 ret->x[4] = RIPAS_EMPTY;
AlexeiFedorov3a739332023-04-13 13:54:04 +0100760 } else if (s2tte_is_assigned_ram(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100761 ret->x[2] = RMI_ASSIGNED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000762 ret->x[3] = s2tte_pa(s2tte, wi.last_level);
Yousuf A62808152022-10-31 10:35:42 +0000763 ret->x[4] = RIPAS_RAM;
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100764 } else if (s2tte_is_assigned_destroyed(s2tte, wi.last_level)) {
765 ret->x[2] = RMI_ASSIGNED;
766 ret->x[3] = 0UL;
767 ret->x[4] = RIPAS_DESTROYED;
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100768 } else if (s2tte_is_unassigned_ns(s2tte)) {
769 ret->x[2] = RMI_UNASSIGNED;
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100770 ret->x[3] = 0UL;
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100771 ret->x[4] = RIPAS_EMPTY;
AlexeiFedorov3a739332023-04-13 13:54:04 +0100772 } else if (s2tte_is_assigned_ns(s2tte, wi.last_level)) {
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100773 ret->x[2] = RMI_ASSIGNED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000774 ret->x[3] = host_ns_s2tte(s2tte, wi.last_level);
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100775 ret->x[4] = RIPAS_EMPTY;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000776 } else if (s2tte_is_table(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100777 ret->x[2] = RMI_TABLE;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000778 ret->x[3] = s2tte_pa_table(s2tte, wi.last_level);
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100779 ret->x[4] = RIPAS_EMPTY;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000780 } else {
781 assert(false);
782 }
783
784 buffer_unmap(s2tt);
785 granule_unlock(wi.g_llt);
786
787 ret->x[0] = RMI_SUCCESS;
788}
789
790static void data_granule_measure(struct rd *rd, void *data,
791 unsigned long ipa,
792 unsigned long flags)
793{
794 struct measurement_desc_data measure_desc = {0};
795
796 /* Initialize the measurement descriptior structure */
797 measure_desc.desc_type = MEASURE_DESC_TYPE_DATA;
798 measure_desc.len = sizeof(struct measurement_desc_data);
799 measure_desc.ipa = ipa;
800 measure_desc.flags = flags;
801 memcpy(measure_desc.rim,
802 &rd->measurement[RIM_MEASUREMENT_SLOT],
803 measurement_get_size(rd->algorithm));
804
805 if (flags == RMI_MEASURE_CONTENT) {
806 /*
807 * Hashing the data granules and store the result in the
808 * measurement descriptor structure.
809 */
810 measurement_hash_compute(rd->algorithm,
811 data,
812 GRANULE_SIZE,
813 measure_desc.content);
814 }
815
816 /*
817 * Hashing the measurement descriptor structure; the result is the
818 * updated RIM.
819 */
820 measurement_hash_compute(rd->algorithm,
821 &measure_desc,
822 sizeof(measure_desc),
823 rd->measurement[RIM_MEASUREMENT_SLOT]);
824}
825
826static unsigned long validate_data_create_unknown(unsigned long map_addr,
827 struct rd *rd)
828{
829 if (!addr_in_par(rd, map_addr)) {
830 return RMI_ERROR_INPUT;
831 }
832
833 if (!validate_map_addr(map_addr, RTT_PAGE_LEVEL, rd)) {
834 return RMI_ERROR_INPUT;
835 }
836
837 return RMI_SUCCESS;
838}
839
840static unsigned long validate_data_create(unsigned long map_addr,
841 struct rd *rd)
842{
843 if (get_rd_state_locked(rd) != REALM_STATE_NEW) {
844 return RMI_ERROR_REALM;
845 }
846
847 return validate_data_create_unknown(map_addr, rd);
848}
849
850/*
AlexeiFedorov0f9cd1f2023-07-10 17:04:58 +0100851 * Implements both RMI_DATA_CREATE and RMI_DATA_CREATE_UNKNOWN
Soby Mathewb4c6df42022-11-09 11:13:29 +0000852 *
AlexeiFedorov0f9cd1f2023-07-10 17:04:58 +0100853 * if @g_src == NULL, implements RMI_DATA_CREATE_UNKNOWN
854 * and RMI_DATA_CREATE otherwise.
Soby Mathewb4c6df42022-11-09 11:13:29 +0000855 */
AlexeiFedorovac923c82023-04-06 15:12:04 +0100856static unsigned long data_create(unsigned long rd_addr,
857 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000858 unsigned long map_addr,
859 struct granule *g_src,
860 unsigned long flags)
861{
862 struct granule *g_data;
863 struct granule *g_rd;
864 struct granule *g_table_root;
865 struct rd *rd;
866 struct rtt_walk wi;
867 unsigned long s2tte, *s2tt;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000868 enum granule_state new_data_state = GRANULE_STATE_DELEGATED;
869 unsigned long ipa_bits;
870 unsigned long ret;
871 int __unused meas_ret;
872 int sl;
873
874 if (!find_lock_two_granules(data_addr,
875 GRANULE_STATE_DELEGATED,
876 &g_data,
877 rd_addr,
878 GRANULE_STATE_RD,
879 &g_rd)) {
880 return RMI_ERROR_INPUT;
881 }
882
883 rd = granule_map(g_rd, SLOT_RD);
884
885 ret = (g_src != NULL) ?
886 validate_data_create(map_addr, rd) :
887 validate_data_create_unknown(map_addr, rd);
888
889 if (ret != RMI_SUCCESS) {
890 goto out_unmap_rd;
891 }
892
893 g_table_root = rd->s2_ctx.g_rtt;
894 sl = realm_rtt_starting_level(rd);
895 ipa_bits = realm_ipa_bits(rd);
896 granule_lock(g_table_root, GRANULE_STATE_RTT);
897 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
898 map_addr, RTT_PAGE_LEVEL, &wi);
899 if (wi.last_level != RTT_PAGE_LEVEL) {
900 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
901 goto out_unlock_ll_table;
902 }
903
904 s2tt = granule_map(wi.g_llt, SLOT_RTT);
905 s2tte = s2tte_read(&s2tt[wi.index]);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000906
Soby Mathewb4c6df42022-11-09 11:13:29 +0000907 if (g_src != NULL) {
908 bool ns_access_ok;
AlexeiFedorov0f9cd1f2023-07-10 17:04:58 +0100909 void *data;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000910
AlexeiFedorov0f9cd1f2023-07-10 17:04:58 +0100911 if (!s2tte_is_unassigned_ram(s2tte)) {
912 ret = pack_return_code(RMI_ERROR_RTT, RTT_PAGE_LEVEL);
913 goto out_unmap_ll_table;
914 }
915
916 data = granule_map(g_data, SLOT_DELEGATED);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000917 ns_access_ok = ns_buffer_read(SLOT_NS, g_src, 0U,
918 GRANULE_SIZE, data);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000919 if (!ns_access_ok) {
920 /*
921 * Some data may be copied before the failure. Zero
922 * g_data granule as it will remain in delegated state.
923 */
924 (void)memset(data, 0, GRANULE_SIZE);
925 buffer_unmap(data);
926 ret = RMI_ERROR_INPUT;
927 goto out_unmap_ll_table;
928 }
929
Soby Mathewb4c6df42022-11-09 11:13:29 +0000930 data_granule_measure(rd, data, map_addr, flags);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000931 buffer_unmap(data);
AlexeiFedorov0f9cd1f2023-07-10 17:04:58 +0100932
933 } else if (!s2tte_is_unassigned(s2tte)) {
934 ret = pack_return_code(RMI_ERROR_RTT, RTT_PAGE_LEVEL);
935 goto out_unmap_ll_table;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000936 }
937
938 new_data_state = GRANULE_STATE_DATA;
939
AlexeiFedorovcde1fdc2023-04-18 16:37:25 +0100940 s2tte = s2tte_create_assigned_ram(data_addr, RTT_PAGE_LEVEL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000941
942 s2tte_write(&s2tt[wi.index], s2tte);
943 __granule_get(wi.g_llt);
944
945 ret = RMI_SUCCESS;
946
947out_unmap_ll_table:
948 buffer_unmap(s2tt);
949out_unlock_ll_table:
950 granule_unlock(wi.g_llt);
951out_unmap_rd:
952 buffer_unmap(rd);
953 granule_unlock(g_rd);
954 granule_unlock_transition(g_data, new_data_state);
955 return ret;
956}
957
AlexeiFedorovac923c82023-04-06 15:12:04 +0100958unsigned long smc_data_create(unsigned long rd_addr,
959 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000960 unsigned long map_addr,
961 unsigned long src_addr,
962 unsigned long flags)
963{
964 struct granule *g_src;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000965
966 if (flags != RMI_NO_MEASURE_CONTENT && flags != RMI_MEASURE_CONTENT) {
967 return RMI_ERROR_INPUT;
968 }
969
970 g_src = find_granule(src_addr);
971 if ((g_src == NULL) || (g_src->state != GRANULE_STATE_NS)) {
972 return RMI_ERROR_INPUT;
973 }
974
AlexeiFedorovac923c82023-04-06 15:12:04 +0100975 return data_create(rd_addr, data_addr, map_addr, g_src, flags);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000976}
977
AlexeiFedorovac923c82023-04-06 15:12:04 +0100978unsigned long smc_data_create_unknown(unsigned long rd_addr,
979 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000980 unsigned long map_addr)
981{
AlexeiFedorovac923c82023-04-06 15:12:04 +0100982 return data_create(rd_addr, data_addr, map_addr, NULL, 0);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000983}
984
AlexeiFedorove2002be2023-04-19 17:20:12 +0100985void smc_data_destroy(unsigned long rd_addr,
986 unsigned long map_addr,
987 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000988{
989 struct granule *g_data;
990 struct granule *g_rd;
991 struct granule *g_table_root;
992 struct rtt_walk wi;
993 unsigned long data_addr, s2tte, *s2tt;
994 struct rd *rd;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100995 unsigned long ipa_bits;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000996 struct realm_s2_context s2_ctx;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000997 int sl;
998
999 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
1000 if (g_rd == NULL) {
AlexeiFedorove2002be2023-04-19 17:20:12 +01001001 res->x[0] = RMI_ERROR_INPUT;
1002 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001003 }
1004
1005 rd = granule_map(g_rd, SLOT_RD);
1006
1007 if (!validate_map_addr(map_addr, RTT_PAGE_LEVEL, rd)) {
1008 buffer_unmap(rd);
1009 granule_unlock(g_rd);
AlexeiFedorove2002be2023-04-19 17:20:12 +01001010 res->x[0] = RMI_ERROR_INPUT;
1011 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001012 }
1013
1014 g_table_root = rd->s2_ctx.g_rtt;
1015 sl = realm_rtt_starting_level(rd);
1016 ipa_bits = realm_ipa_bits(rd);
1017 s2_ctx = rd->s2_ctx;
1018 buffer_unmap(rd);
1019
1020 granule_lock(g_table_root, GRANULE_STATE_RTT);
1021 granule_unlock(g_rd);
1022
1023 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
1024 map_addr, RTT_PAGE_LEVEL, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001025
1026 s2tt = granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001027 if (wi.last_level != RTT_PAGE_LEVEL) {
1028 res->x[0] = pack_return_code(RMI_ERROR_RTT, wi.last_level);
1029 goto out_unmap_ll_table;
1030 }
Soby Mathewb4c6df42022-11-09 11:13:29 +00001031
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001032 s2tte = s2tte_read(&s2tt[wi.index]);
AlexeiFedorovc53b1f72023-07-04 15:37:03 +01001033
AlexeiFedorova43cd312023-04-17 11:42:25 +01001034 if (s2tte_is_assigned_ram(s2tte, RTT_PAGE_LEVEL)) {
1035 data_addr = s2tte_pa(s2tte, RTT_PAGE_LEVEL);
AlexeiFedorovc53b1f72023-07-04 15:37:03 +01001036 s2tte = s2tte_create_unassigned_destroyed();
AlexeiFedorova43cd312023-04-17 11:42:25 +01001037 s2tte_write(&s2tt[wi.index], s2tte);
1038 invalidate_page(&s2_ctx, map_addr);
1039 } else if (s2tte_is_assigned_empty(s2tte, RTT_PAGE_LEVEL)) {
1040 data_addr = s2tte_pa(s2tte, RTT_PAGE_LEVEL);
1041 s2tte = s2tte_create_unassigned_empty();
1042 s2tte_write(&s2tt[wi.index], s2tte);
1043 } else {
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001044 res->x[0] = pack_return_code(RMI_ERROR_RTT, RTT_PAGE_LEVEL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001045 goto out_unmap_ll_table;
1046 }
1047
Soby Mathewb4c6df42022-11-09 11:13:29 +00001048 __granule_put(wi.g_llt);
1049
1050 /*
1051 * Lock the data granule and check expected state. Correct locking order
1052 * is guaranteed because granule address is obtained from a locked
1053 * granule by table walk. This lock needs to be acquired before a state
1054 * transition to or from GRANULE_STATE_DATA for granule address can happen.
1055 */
1056 g_data = find_lock_granule(data_addr, GRANULE_STATE_DATA);
AlexeiFedorov63b71692023-04-19 11:18:42 +01001057 assert(g_data != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001058 granule_memzero(g_data, SLOT_DELEGATED);
1059 granule_unlock_transition(g_data, GRANULE_STATE_DELEGATED);
1060
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001061 res->x[0] = RMI_SUCCESS;
AlexeiFedorove2002be2023-04-19 17:20:12 +01001062 res->x[1] = data_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001063out_unmap_ll_table:
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001064 res->x[2] = skip_non_live_entries(map_addr, s2tt, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001065 buffer_unmap(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001066 granule_unlock(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001067}
1068
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001069/*
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001070 * Update the ripas value for the entry pointed by @s2ttep.
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001071 *
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001072 * Returns:
1073 * < 0 - On error and the operation was aborted,
1074 * e.g., entry cannot have a ripas.
1075 * 0 - Operation was success and no TLBI is required.
1076 * > 0 - Operation was success and TLBI is required.
1077 * Sets:
1078 * @(*do_tlbi) to 'true' if the TLBs have to be invalidated.
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001079 */
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001080static int update_ripas(unsigned long *s2ttep, unsigned long level,
1081 enum ripas ripas_val)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001082{
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001083 unsigned long pa, s2tte = s2tte_read(s2ttep);
1084 int ret = 0;
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001085
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001086 if (!s2tte_has_ripas(s2tte, level)) {
1087 return -1;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001088 }
1089
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001090 if (ripas_val == RIPAS_RAM) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001091 if (s2tte_is_unassigned_empty(s2tte)) {
1092 s2tte = s2tte_create_unassigned_ram();
1093 } else if (s2tte_is_assigned_empty(s2tte, level)) {
1094 pa = s2tte_pa(s2tte, level);
1095 s2tte = s2tte_create_assigned_ram(pa, level);
1096 } else {
1097 /* No action is required */
1098 return 0;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001099 }
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001100 } else if (ripas_val == RIPAS_EMPTY) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001101 if (s2tte_is_unassigned_ram(s2tte)) {
1102 s2tte = s2tte_create_unassigned_empty();
1103 } else if (s2tte_is_assigned_ram(s2tte, level)) {
1104 pa = s2tte_pa(s2tte, level);
1105 s2tte = s2tte_create_assigned_empty(pa, level);
1106 /* TLBI is required */
1107 ret = 1;
1108 } else {
1109 /* No action is required */
1110 return 0;
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001111 }
Soby Mathewb4c6df42022-11-09 11:13:29 +00001112 }
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001113 s2tte_write(s2ttep, s2tte);
1114 return ret;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001115}
1116
1117static void ripas_granule_measure(struct rd *rd,
AlexeiFedorov960d1612023-04-25 13:23:39 +01001118 unsigned long base,
1119 unsigned long top)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001120{
1121 struct measurement_desc_ripas measure_desc = {0};
1122
1123 /* Initialize the measurement descriptior structure */
1124 measure_desc.desc_type = MEASURE_DESC_TYPE_RIPAS;
1125 measure_desc.len = sizeof(struct measurement_desc_ripas);
AlexeiFedorov960d1612023-04-25 13:23:39 +01001126 measure_desc.base = base;
1127 measure_desc.top = top;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001128 (void)memcpy(measure_desc.rim,
1129 &rd->measurement[RIM_MEASUREMENT_SLOT],
1130 measurement_get_size(rd->algorithm));
Soby Mathewb4c6df42022-11-09 11:13:29 +00001131
1132 /*
1133 * Hashing the measurement descriptor structure; the result is the
1134 * updated RIM.
1135 */
1136 measurement_hash_compute(rd->algorithm,
1137 &measure_desc,
1138 sizeof(measure_desc),
1139 rd->measurement[RIM_MEASUREMENT_SLOT]);
1140}
1141
AlexeiFedorov960d1612023-04-25 13:23:39 +01001142void smc_rtt_init_ripas(unsigned long rd_addr,
1143 unsigned long base,
1144 unsigned long top,
1145 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001146{
1147 struct granule *g_rd, *g_rtt_root;
1148 struct rd *rd;
AlexeiFedorov960d1612023-04-25 13:23:39 +01001149 unsigned long ipa_bits, addr, map_size;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001150 struct rtt_walk wi;
1151 unsigned long s2tte, *s2tt;
AlexeiFedorov960d1612023-04-25 13:23:39 +01001152 long level;
1153 unsigned int index;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001154 int sl;
1155
1156 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
1157 if (g_rd == NULL) {
AlexeiFedorov960d1612023-04-25 13:23:39 +01001158 res->x[0] = RMI_ERROR_INPUT;
1159 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001160 }
1161
1162 rd = granule_map(g_rd, SLOT_RD);
1163
1164 if (get_rd_state_locked(rd) != REALM_STATE_NEW) {
1165 buffer_unmap(rd);
1166 granule_unlock(g_rd);
AlexeiFedorov960d1612023-04-25 13:23:39 +01001167 res->x[0] = RMI_ERROR_REALM;
1168 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001169 }
1170
AlexeiFedorov960d1612023-04-25 13:23:39 +01001171 if (!validate_map_addr(base, RTT_PAGE_LEVEL, rd) ||
1172 !validate_rtt_entry_cmds(top, RTT_PAGE_LEVEL, rd)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001173 buffer_unmap(rd);
1174 granule_unlock(g_rd);
AlexeiFedorov960d1612023-04-25 13:23:39 +01001175 res->x[0] = RMI_ERROR_INPUT;
1176 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001177 }
1178
AlexeiFedorov960d1612023-04-25 13:23:39 +01001179 if (!addr_in_par(rd, base) || !addr_in_par(rd, top)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001180 buffer_unmap(rd);
1181 granule_unlock(g_rd);
AlexeiFedorov960d1612023-04-25 13:23:39 +01001182 res->x[0] = RMI_ERROR_INPUT;
1183 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001184 }
1185
1186 g_rtt_root = rd->s2_ctx.g_rtt;
1187 sl = realm_rtt_starting_level(rd);
1188 ipa_bits = realm_ipa_bits(rd);
1189
1190 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001191
1192 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
AlexeiFedorov960d1612023-04-25 13:23:39 +01001193 base, RTT_PAGE_LEVEL, &wi);
1194 level = wi.last_level;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001195 s2tt = granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov960d1612023-04-25 13:23:39 +01001196 map_size = s2tte_map_size(level);
1197 addr = base & ~(map_size - 1UL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001198
AlexeiFedorov960d1612023-04-25 13:23:39 +01001199 /*
1200 * If the RTTE covers a range below "base", we need to
1201 * go deeper.
1202 */
1203 if (addr != base) {
1204 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1205 (unsigned int)level);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001206 goto out_unmap_llt;
1207 }
1208
AlexeiFedorov960d1612023-04-25 13:23:39 +01001209 for (index = wi.index; index < S2TTES_PER_S2TT;
1210 index++, addr += map_size) {
1211 unsigned long next = addr + map_size;
1212
1213 if (next > top) {
1214 break;
1215 }
1216
1217 s2tte = s2tte_read(&s2tt[index]);
1218 if (s2tte_is_unassigned_empty(s2tte)) {
1219 s2tte = s2tte_create_unassigned_ram();
1220 s2tte_write(&s2tt[index], s2tte);
1221 } else if (!s2tte_is_unassigned_ram(s2tte)) {
1222 break;
1223 }
1224 ripas_granule_measure(rd, addr, next);
1225 }
1226
1227 if (addr > base) {
1228 res->x[0] = RMI_SUCCESS;
1229 res->x[1] = addr;
1230 } else {
1231 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1232 (unsigned int)level);
1233 }
Soby Mathewb4c6df42022-11-09 11:13:29 +00001234
1235out_unmap_llt:
1236 buffer_unmap(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001237 buffer_unmap(rd);
1238 granule_unlock(wi.g_llt);
AlexeiFedorov80295e42023-07-10 13:11:14 +01001239 granule_unlock(g_rd);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001240}
1241
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001242static void rtt_set_ripas_range(struct realm_s2_context *s2_ctx,
1243 unsigned long *s2tt,
1244 unsigned long base,
1245 unsigned long top,
1246 struct rtt_walk *wi,
1247 unsigned long ripas_val,
1248 struct smc_result *res)
1249{
1250 unsigned long addr;
1251 unsigned int index = wi->index;
1252 long level = wi->last_level;
1253 unsigned long map_size = s2tte_map_size(level);
1254
1255 /* Align to the RTT level */
1256 addr = base & ~(map_size - 1UL);
1257
1258 /* Make sure we don't touch a range below the requested range */
1259 if (addr != base) {
1260 res->x[0] = pack_return_code(RMI_ERROR_RTT, level);
1261 return;
1262 }
1263
1264 for (index = wi->index; index < S2TTES_PER_S2TT;
1265 index++, addr += map_size) {
1266 unsigned long next = addr + map_size;
1267 int ret;
1268
1269 /* If this entry crosses the range, abort. */
1270 if (next > top) {
1271 break;
1272 }
1273
1274 ret = update_ripas(&s2tt[index], level, ripas_val);
1275 if (ret < 0) {
1276 break;
1277 }
1278
1279 /* Handle TLBI */
1280 if (ret != 0) {
1281 if (level == RTT_PAGE_LEVEL) {
1282 invalidate_page(s2_ctx, addr);
1283 } else {
1284 invalidate_block(s2_ctx, addr);
1285 }
1286 }
1287 }
1288
1289 if (addr > base) {
1290 res->x[0] = RMI_SUCCESS;
1291 res->x[1] = addr;
1292 } else {
1293 res->x[0] = pack_return_code(RMI_ERROR_RTT, level);
1294 }
1295}
1296
1297void smc_rtt_set_ripas(unsigned long rd_addr,
1298 unsigned long rec_addr,
1299 unsigned long base,
1300 unsigned long top,
1301 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001302{
1303 struct granule *g_rd, *g_rec, *g_rtt_root;
1304 struct rec *rec;
1305 struct rd *rd;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001306 unsigned long ipa_bits;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001307 struct rtt_walk wi;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001308 unsigned long *s2tt;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001309 struct realm_s2_context s2_ctx;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001310 enum ripas ripas_val;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001311 int sl;
1312
Soby Mathewb4c6df42022-11-09 11:13:29 +00001313 if (!find_lock_two_granules(rd_addr,
1314 GRANULE_STATE_RD,
1315 &g_rd,
1316 rec_addr,
1317 GRANULE_STATE_REC,
1318 &g_rec)) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001319 res->x[0] = RMI_ERROR_INPUT;
1320 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001321 }
1322
1323 if (granule_refcount_read_acquire(g_rec) != 0UL) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001324 res->x[0] = RMI_ERROR_REC;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001325 goto out_unlock_rec_rd;
1326 }
1327
1328 rec = granule_map(g_rec, SLOT_REC);
1329
1330 if (g_rd != rec->realm_info.g_rd) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001331 res->x[0] = RMI_ERROR_REC;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001332 goto out_unmap_rec;
1333 }
1334
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001335 ripas_val = rec->set_ripas.ripas_val;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001336
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001337 /*
1338 * Return error in case of target region:
1339 * - is not the next chunk of requested region
1340 * - extends beyond the end of requested region
1341 */
1342 if ((base != rec->set_ripas.addr) || (top > rec->set_ripas.top)) {
1343 res->x[0] = RMI_ERROR_INPUT;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001344 goto out_unmap_rec;
1345 }
1346
1347 rd = granule_map(g_rd, SLOT_RD);
1348
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001349 /*
1350 * At this point, we know base == rec->set_ripas.addr
1351 * and thus must be aligned to GRANULE size.
1352 */
1353 assert(validate_map_addr(base, RTT_PAGE_LEVEL, rd));
Soby Mathewb4c6df42022-11-09 11:13:29 +00001354
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001355 if (!validate_map_addr(top, RTT_PAGE_LEVEL, rd)) {
1356 res->x[0] = RMI_ERROR_INPUT;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001357 goto out_unmap_rd;
1358 }
1359
1360 g_rtt_root = rd->s2_ctx.g_rtt;
1361 sl = realm_rtt_starting_level(rd);
1362 ipa_bits = realm_ipa_bits(rd);
1363 s2_ctx = rd->s2_ctx;
1364
1365 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
1366
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001367 /* Walk to the deepest level possible */
Soby Mathewb4c6df42022-11-09 11:13:29 +00001368 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001369 base, RTT_PAGE_LEVEL, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001370
1371 s2tt = granule_map(wi.g_llt, SLOT_RTT);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001372
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001373 rtt_set_ripas_range(&s2_ctx, s2tt, base, top, &wi, ripas_val, res);
1374 if (res->x[0] == RMI_SUCCESS) {
1375 rec->set_ripas.addr = res->x[1];
Soby Mathewb4c6df42022-11-09 11:13:29 +00001376 }
1377
Soby Mathewb4c6df42022-11-09 11:13:29 +00001378 buffer_unmap(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001379 granule_unlock(wi.g_llt);
1380out_unmap_rd:
1381 buffer_unmap(rd);
1382out_unmap_rec:
1383 buffer_unmap(rec);
1384out_unlock_rec_rd:
1385 granule_unlock(g_rec);
1386 granule_unlock(g_rd);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001387}