blob: 6afbef9971f314e00a1618e4652fca0bb8d24715 [file] [log] [blame]
Soby Mathewb4c6df42022-11-09 11:13:29 +00001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
5 */
6
7#include <buffer.h>
8#include <granule.h>
9#include <measurement.h>
10#include <realm.h>
11#include <ripas.h>
12#include <smc-handler.h>
13#include <smc-rmi.h>
14#include <smc.h>
15#include <stddef.h>
16#include <string.h>
17#include <table.h>
18
19/*
20 * Validate the map_addr value passed to RMI_RTT_* and RMI_DATA_* commands.
21 */
22static bool validate_map_addr(unsigned long map_addr,
AlexeiFedorov4faab852023-08-30 15:06:49 +010023 long level,
Soby Mathewb4c6df42022-11-09 11:13:29 +000024 struct rd *rd)
25{
AlexeiFedorov14d47ae2023-07-19 15:26:50 +010026 return ((map_addr < realm_ipa_size(rd)) &&
27 addr_is_level_aligned(map_addr, level));
Soby Mathewb4c6df42022-11-09 11:13:29 +000028}
29
30/*
31 * Structure commands can operate on all RTTs except for the root RTT so
32 * the minimal valid level is the stage 2 starting level + 1.
33 */
34static bool validate_rtt_structure_cmds(unsigned long map_addr,
35 long level,
36 struct rd *rd)
37{
38 int min_level = realm_rtt_starting_level(rd) + 1;
39
40 if ((level < min_level) || (level > RTT_PAGE_LEVEL)) {
41 return false;
42 }
AlexeiFedorovf85f8102023-09-11 16:14:18 +010043 return validate_map_addr(map_addr, level - 1L, rd);
Soby Mathewb4c6df42022-11-09 11:13:29 +000044}
45
46/*
47 * Map/Unmap commands can operate up to a level 2 block entry so min_level is
48 * the smallest block size.
49 */
50static bool validate_rtt_map_cmds(unsigned long map_addr,
51 long level,
52 struct rd *rd)
53{
54 if ((level < RTT_MIN_BLOCK_LEVEL) || (level > RTT_PAGE_LEVEL)) {
55 return false;
56 }
57 return validate_map_addr(map_addr, level, rd);
58}
59
60/*
61 * Entry commands can operate on any entry so the minimal valid level is the
62 * stage 2 starting level.
63 */
64static bool validate_rtt_entry_cmds(unsigned long map_addr,
65 long level,
66 struct rd *rd)
67{
68 if ((level < realm_rtt_starting_level(rd)) ||
69 (level > RTT_PAGE_LEVEL)) {
70 return false;
71 }
72 return validate_map_addr(map_addr, level, rd);
73}
74
AlexeiFedorovac923c82023-04-06 15:12:04 +010075unsigned long smc_rtt_create(unsigned long rd_addr,
76 unsigned long rtt_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +000077 unsigned long map_addr,
78 unsigned long ulevel)
79{
80 struct granule *g_rd;
81 struct granule *g_tbl;
82 struct rd *rd;
83 struct granule *g_table_root;
84 struct rtt_walk wi;
85 unsigned long *s2tt, *parent_s2tt, parent_s2tte;
86 long level = (long)ulevel;
87 unsigned long ipa_bits;
88 unsigned long ret;
89 struct realm_s2_context s2_ctx;
90 int sl;
91
92 if (!find_lock_two_granules(rtt_addr,
93 GRANULE_STATE_DELEGATED,
94 &g_tbl,
95 rd_addr,
96 GRANULE_STATE_RD,
97 &g_rd)) {
98 return RMI_ERROR_INPUT;
99 }
100
101 rd = granule_map(g_rd, SLOT_RD);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100102 assert(rd != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000103
104 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
105 buffer_unmap(rd);
106 granule_unlock(g_rd);
107 granule_unlock(g_tbl);
108 return RMI_ERROR_INPUT;
109 }
110
111 g_table_root = rd->s2_ctx.g_rtt;
112 sl = realm_rtt_starting_level(rd);
113 ipa_bits = realm_ipa_bits(rd);
114 s2_ctx = rd->s2_ctx;
115 buffer_unmap(rd);
116
117 /*
118 * Lock the RTT root. Enforcing locking order RD->RTT is enough to
119 * ensure deadlock free locking guarentee.
120 */
121 granule_lock(g_table_root, GRANULE_STATE_RTT);
122
123 /* Unlock RD after locking RTT Root */
124 granule_unlock(g_rd);
125
126 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
127 map_addr, level - 1L, &wi);
AlexeiFedorov3f5d6272023-10-23 16:27:37 +0100128 if (wi.last_level != (level - 1L)) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100129 ret = pack_return_code(RMI_ERROR_RTT,
130 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000131 goto out_unlock_llt;
132 }
133
134 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100135 assert(parent_s2tt != NULL);
136
Soby Mathewb4c6df42022-11-09 11:13:29 +0000137 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
138 s2tt = granule_map(g_tbl, SLOT_DELEGATED);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100139 assert(s2tt != NULL);
140
Soby Mathewb4c6df42022-11-09 11:13:29 +0000141
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100142 if (s2tte_is_unassigned_empty(parent_s2tte)) {
143 s2tt_init_unassigned_empty(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000144
145 /*
146 * Increase the refcount of the parent, the granule was
147 * locked while table walking and hand-over-hand locking.
148 * Atomicity and acquire/release semantics not required because
149 * the table is accessed always locked.
150 */
151 __granule_get(wi.g_llt);
152
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100153 } else if (s2tte_is_unassigned_ram(parent_s2tte)) {
154 s2tt_init_unassigned_ram(s2tt);
155 __granule_get(wi.g_llt);
156
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100157 } else if (s2tte_is_unassigned_ns(parent_s2tte)) {
158 s2tt_init_unassigned_ns(s2tt);
159 __granule_get(wi.g_llt);
160
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100161 } else if (s2tte_is_unassigned_destroyed(parent_s2tte)) {
162 s2tt_init_unassigned_destroyed(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000163 __granule_get(wi.g_llt);
164
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100165 } else if (s2tte_is_assigned_destroyed(parent_s2tte, level - 1L)) {
166 unsigned long block_pa;
167
168 /*
169 * We should observe parent assigned s2tte only when
170 * we create tables above this level.
171 */
172 assert(level > RTT_MIN_BLOCK_LEVEL);
173
174 block_pa = s2tte_pa(parent_s2tte, level - 1L);
175
176 s2tt_init_assigned_destroyed(s2tt, block_pa, level);
177
178 /*
179 * Increase the refcount to mark the granule as in-use. refcount
180 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
181 */
182 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
183
AlexeiFedorov3a739332023-04-13 13:54:04 +0100184 } else if (s2tte_is_assigned_empty(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000185 unsigned long block_pa;
186
187 /*
188 * We should observe parent assigned s2tte only when
189 * we create tables above this level.
190 */
191 assert(level > RTT_MIN_BLOCK_LEVEL);
192
193 block_pa = s2tte_pa(parent_s2tte, level - 1L);
194
195 s2tt_init_assigned_empty(s2tt, block_pa, level);
196
197 /*
198 * Increase the refcount to mark the granule as in-use. refcount
199 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
200 */
201 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
202
AlexeiFedorov3a739332023-04-13 13:54:04 +0100203 } else if (s2tte_is_assigned_ram(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000204 unsigned long block_pa;
205
206 /*
207 * We should observe parent valid s2tte only when
208 * we create tables above this level.
209 */
210 assert(level > RTT_MIN_BLOCK_LEVEL);
211
212 /*
213 * Break before make. This may cause spurious S2 aborts.
214 */
215 s2tte_write(&parent_s2tt[wi.index], 0UL);
216 invalidate_block(&s2_ctx, map_addr);
217
218 block_pa = s2tte_pa(parent_s2tte, level - 1L);
219
AlexeiFedorov3a739332023-04-13 13:54:04 +0100220 s2tt_init_assigned_ram(s2tt, block_pa, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000221
222 /*
223 * Increase the refcount to mark the granule as in-use. refcount
224 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
225 */
226 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
227
AlexeiFedorov3a739332023-04-13 13:54:04 +0100228 } else if (s2tte_is_assigned_ns(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000229 unsigned long block_pa;
230
231 /*
AlexeiFedorov3a739332023-04-13 13:54:04 +0100232 * We should observe parent assigned_ns s2tte only when
Soby Mathewb4c6df42022-11-09 11:13:29 +0000233 * we create tables above this level.
234 */
235 assert(level > RTT_MIN_BLOCK_LEVEL);
236
237 /*
238 * Break before make. This may cause spurious S2 aborts.
239 */
240 s2tte_write(&parent_s2tt[wi.index], 0UL);
241 invalidate_block(&s2_ctx, map_addr);
242
243 block_pa = s2tte_pa(parent_s2tte, level - 1L);
244
Javier Almansa Sobrino15fc44e2023-09-29 13:52:04 +0100245 s2tt_init_assigned_ns(s2tt, parent_s2tte, block_pa, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000246
247 /*
Javier Almansa Sobrino15fc44e2023-09-29 13:52:04 +0100248 * Increment the refcount on the parent for the new RTT we are
249 * about to add. The NS block entry doesn't have a refcount
250 * on the parent RTT.
Soby Mathewb4c6df42022-11-09 11:13:29 +0000251 */
Javier Almansa Sobrino15fc44e2023-09-29 13:52:04 +0100252 __granule_get(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000253
254 } else if (s2tte_is_table(parent_s2tte, level - 1L)) {
255 ret = pack_return_code(RMI_ERROR_RTT,
256 (unsigned int)(level - 1L));
257 goto out_unmap_table;
258
259 } else {
260 assert(false);
261 }
262
263 ret = RMI_SUCCESS;
264
265 granule_set_state(g_tbl, GRANULE_STATE_RTT);
266
267 parent_s2tte = s2tte_create_table(rtt_addr, level - 1L);
268 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
269
270out_unmap_table:
271 buffer_unmap(s2tt);
272 buffer_unmap(parent_s2tt);
273out_unlock_llt:
274 granule_unlock(wi.g_llt);
275 granule_unlock(g_tbl);
276 return ret;
277}
278
AlexeiFedorove2002be2023-04-19 17:20:12 +0100279void smc_rtt_fold(unsigned long rd_addr,
280 unsigned long map_addr,
281 unsigned long ulevel,
282 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000283{
284 struct granule *g_rd;
285 struct granule *g_tbl;
286 struct rd *rd;
287 struct granule *g_table_root;
288 struct rtt_walk wi;
289 unsigned long *table, *parent_s2tt, parent_s2tte;
290 long level = (long)ulevel;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100291 unsigned long ipa_bits, rtt_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000292 unsigned long ret;
293 struct realm_s2_context s2_ctx;
294 int sl;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000295
296 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
297 if (g_rd == NULL) {
AlexeiFedorove2002be2023-04-19 17:20:12 +0100298 res->x[0] = RMI_ERROR_INPUT;
299 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000300 }
301
302 rd = granule_map(g_rd, SLOT_RD);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100303 assert(rd != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000304
305 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
306 buffer_unmap(rd);
307 granule_unlock(g_rd);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100308 res->x[0] = RMI_ERROR_INPUT;
309 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000310 }
311
312 g_table_root = rd->s2_ctx.g_rtt;
313 sl = realm_rtt_starting_level(rd);
314 ipa_bits = realm_ipa_bits(rd);
315 s2_ctx = rd->s2_ctx;
316 buffer_unmap(rd);
317 granule_lock(g_table_root, GRANULE_STATE_RTT);
318 granule_unlock(g_rd);
319
320 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
321 map_addr, level - 1L, &wi);
AlexeiFedorov3f5d6272023-10-23 16:27:37 +0100322 if (wi.last_level != (level - 1L)) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100323 ret = pack_return_code(RMI_ERROR_RTT,
324 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000325 goto out_unlock_parent_table;
326 }
327
328 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100329 assert(parent_s2tt != NULL);
330
Soby Mathewb4c6df42022-11-09 11:13:29 +0000331 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
332 if (!s2tte_is_table(parent_s2tte, level - 1L)) {
333 ret = pack_return_code(RMI_ERROR_RTT,
334 (unsigned int)(level - 1L));
335 goto out_unmap_parent_table;
336 }
337
AlexeiFedorove2002be2023-04-19 17:20:12 +0100338 rtt_addr = s2tte_pa_table(parent_s2tte, level - 1L);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000339 g_tbl = find_lock_granule(rtt_addr, GRANULE_STATE_RTT);
340
341 /*
342 * A table descriptor S2TTE always points to a TABLE granule.
343 */
AlexeiFedorov63b71692023-04-19 11:18:42 +0100344 assert(g_tbl != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000345
346 table = granule_map(g_tbl, SLOT_RTT2);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100347 assert(table != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000348
349 /*
350 * The command can succeed only if all 512 S2TTEs are of the same type.
351 * We first check the table's ref. counter to speed up the case when
352 * the host makes a guess whether a memory region can be folded.
353 */
354 if (g_tbl->refcount == 0UL) {
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100355 if (table_is_unassigned_destroyed_block(table)) {
356 parent_s2tte = s2tte_create_unassigned_destroyed();
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100357 } else if (table_is_unassigned_empty_block(table)) {
358 parent_s2tte = s2tte_create_unassigned_empty();
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100359 } else if (table_is_unassigned_ram_block(table)) {
360 parent_s2tte = s2tte_create_unassigned_ram();
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100361 } else if (table_is_unassigned_ns_block(table)) {
362 parent_s2tte = s2tte_create_unassigned_ns();
AlexeiFedorov49752c62023-04-24 14:31:14 +0100363 } else if (table_maps_assigned_ns_block(table, level)) {
364 unsigned long s2tte = s2tte_read(&table[0]);
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100365
Javier Almansa Sobrino15fc44e2023-09-29 13:52:04 +0100366 /*
367 * Since table_maps_assigned_ns_block() has succedded,
368 * the PA in first entry of the table is aligned at
369 * parent level. Use the TTE from the first entry
370 * directly as it also has the NS attributes to be used
371 * for the parent block entry.
372 */
373 parent_s2tte = s2tte_create_assigned_ns(s2tte, level - 1L);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000374 } else {
375 /*
376 * The table holds a mixture of destroyed and
377 * unassigned entries.
378 */
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100379 ret = pack_return_code(RMI_ERROR_RTT,
380 (unsigned int)level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000381 goto out_unmap_table;
382 }
AlexeiFedorov49752c62023-04-24 14:31:14 +0100383 __granule_put(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000384 } else if (g_tbl->refcount == S2TTES_PER_S2TT) {
385
386 unsigned long s2tte, block_pa;
387
388 /* The RMM specification does not allow creating block
389 * entries less than RTT_MIN_BLOCK_LEVEL even though
390 * permitted by the Arm Architecture.
391 * Hence ensure that the table being folded is at a level
392 * higher than the RTT_MIN_BLOCK_LEVEL.
393 *
394 * A fully populated table cannot be destroyed if that
395 * would create a block mapping below RTT_MIN_BLOCK_LEVEL.
396 */
397 if (level <= RTT_MIN_BLOCK_LEVEL) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100398 ret = pack_return_code(RMI_ERROR_RTT,
399 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000400 goto out_unmap_table;
401 }
402
403 s2tte = s2tte_read(&table[0]);
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000404 block_pa = s2tte_pa(s2tte, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000405
406 /*
AlexeiFedorov3a739332023-04-13 13:54:04 +0100407 * The table must also refer to a contiguous block through the
AlexeiFedorov3f840a02023-07-19 10:55:05 +0100408 * same type of s2tte, either Assigned or Valid.
Soby Mathewb4c6df42022-11-09 11:13:29 +0000409 */
AlexeiFedorov3a739332023-04-13 13:54:04 +0100410 if (table_maps_assigned_empty_block(table, level)) {
411 parent_s2tte = s2tte_create_assigned_empty(block_pa,
412 level - 1L);
413 } else if (table_maps_assigned_ram_block(table, level)) {
414 parent_s2tte = s2tte_create_assigned_ram(block_pa,
415 level - 1L);
AlexeiFedorov3f840a02023-07-19 10:55:05 +0100416 } else if (table_maps_assigned_destroyed_block(table, level)) {
417 parent_s2tte = s2tte_create_assigned_destroyed(block_pa,
418 level - 1L);
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000419 /* The table contains mixed entries that cannot be folded */
Soby Mathewb4c6df42022-11-09 11:13:29 +0000420 } else {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100421 ret = pack_return_code(RMI_ERROR_RTT,
422 (unsigned int)level);
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000423 goto out_unmap_table;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000424 }
425
426 __granule_refcount_dec(g_tbl, S2TTES_PER_S2TT);
427 } else {
428 /*
429 * The table holds a mixture of different types of s2ttes.
430 */
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100431 ret = pack_return_code(RMI_ERROR_RTT,
432 (unsigned int)level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000433 goto out_unmap_table;
434 }
435
436 ret = RMI_SUCCESS;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100437 res->x[1] = rtt_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000438
439 /*
440 * Break before make.
441 */
442 s2tte_write(&parent_s2tt[wi.index], 0UL);
443
AlexeiFedorov3a739332023-04-13 13:54:04 +0100444 if (s2tte_is_assigned_ram(parent_s2tte, level - 1L) ||
445 s2tte_is_assigned_ns(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000446 invalidate_pages_in_block(&s2_ctx, map_addr);
447 } else {
448 invalidate_block(&s2_ctx, map_addr);
449 }
450
451 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
452
453 granule_memzero_mapped(table);
454 granule_set_state(g_tbl, GRANULE_STATE_DELEGATED);
455
456out_unmap_table:
457 buffer_unmap(table);
458 granule_unlock(g_tbl);
459out_unmap_parent_table:
460 buffer_unmap(parent_s2tt);
461out_unlock_parent_table:
462 granule_unlock(wi.g_llt);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100463 res->x[0] = ret;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000464}
465
AlexeiFedorove2002be2023-04-19 17:20:12 +0100466void smc_rtt_destroy(unsigned long rd_addr,
467 unsigned long map_addr,
468 unsigned long ulevel,
469 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000470{
471 struct granule *g_rd;
472 struct granule *g_tbl;
473 struct rd *rd;
474 struct granule *g_table_root;
475 struct rtt_walk wi;
476 unsigned long *table, *parent_s2tt, parent_s2tte;
477 long level = (long)ulevel;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100478 unsigned long ipa_bits, rtt_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000479 unsigned long ret;
480 struct realm_s2_context s2_ctx;
481 int sl;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100482 bool in_par, skip_non_live = false;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000483
484 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
485 if (g_rd == NULL) {
AlexeiFedorove2002be2023-04-19 17:20:12 +0100486 res->x[0] = RMI_ERROR_INPUT;
AlexeiFedorov3ebd4622023-07-18 16:27:39 +0100487 res->x[2] = 0UL;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100488 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000489 }
490
491 rd = granule_map(g_rd, SLOT_RD);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100492 assert(rd != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000493
494 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
495 buffer_unmap(rd);
496 granule_unlock(g_rd);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100497 res->x[0] = RMI_ERROR_INPUT;
AlexeiFedorov3ebd4622023-07-18 16:27:39 +0100498 res->x[2] = 0UL;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100499 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000500 }
501
502 g_table_root = rd->s2_ctx.g_rtt;
503 sl = realm_rtt_starting_level(rd);
504 ipa_bits = realm_ipa_bits(rd);
505 s2_ctx = rd->s2_ctx;
506 in_par = addr_in_par(rd, map_addr);
507 buffer_unmap(rd);
508 granule_lock(g_table_root, GRANULE_STATE_RTT);
509 granule_unlock(g_rd);
510
511 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
512 map_addr, level - 1L, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000513
514 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100515 assert(parent_s2tt != NULL);
516
Soby Mathewb4c6df42022-11-09 11:13:29 +0000517 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100518
AlexeiFedorov3f5d6272023-10-23 16:27:37 +0100519 if ((wi.last_level != (level - 1L)) ||
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100520 !s2tte_is_table(parent_s2tte, level - 1L)) {
521 ret = pack_return_code(RMI_ERROR_RTT,
522 (unsigned int)wi.last_level);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100523 skip_non_live = true;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000524 goto out_unmap_parent_table;
525 }
526
AlexeiFedorove2002be2023-04-19 17:20:12 +0100527 rtt_addr = s2tte_pa_table(parent_s2tte, level - 1L);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000528
529 /*
530 * Lock the RTT granule. The 'rtt_addr' is verified, thus can be treated
531 * as an internal granule.
532 */
533 g_tbl = find_lock_granule(rtt_addr, GRANULE_STATE_RTT);
534
535 /*
536 * A table descriptor S2TTE always points to a TABLE granule.
537 */
538 assert(g_tbl != NULL);
539
540 /*
541 * Read the refcount value. RTT granule is always accessed locked, thus
542 * the refcount can be accessed without atomic operations.
543 */
544 if (g_tbl->refcount != 0UL) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100545 ret = pack_return_code(RMI_ERROR_RTT, (unsigned int)level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000546 goto out_unlock_table;
547 }
548
549 ret = RMI_SUCCESS;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100550 res->x[1] = rtt_addr;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100551 skip_non_live = true;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000552
553 table = granule_map(g_tbl, SLOT_RTT2);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100554 assert(table != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000555
556 if (in_par) {
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100557 parent_s2tte = s2tte_create_unassigned_destroyed();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000558 } else {
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100559 parent_s2tte = s2tte_create_unassigned_ns();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000560 }
561
562 __granule_put(wi.g_llt);
563
564 /*
565 * Break before make. Note that this may cause spurious S2 aborts.
566 */
567 s2tte_write(&parent_s2tt[wi.index], 0UL);
AlexeiFedorov22ba1062023-11-15 16:23:37 +0000568
569 if (in_par) {
570 /* For protected IPA, all S2TTEs in the RTT will be invalid */
571 invalidate_block(&s2_ctx, map_addr);
572 } else {
573 /*
574 * For unprotected IPA, invalidate the TLB for the entire range
575 * mapped by the RTT as it may have valid NS mappings.
576 */
577 invalidate_pages_in_block(&s2_ctx, map_addr);
578 }
579
Soby Mathewb4c6df42022-11-09 11:13:29 +0000580 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
581
582 granule_memzero_mapped(table);
583 granule_set_state(g_tbl, GRANULE_STATE_DELEGATED);
584
585 buffer_unmap(table);
586out_unlock_table:
587 granule_unlock(g_tbl);
588out_unmap_parent_table:
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100589 if (skip_non_live) {
590 res->x[2] = skip_non_live_entries(map_addr, parent_s2tt, &wi);
AlexeiFedorov3ebd4622023-07-18 16:27:39 +0100591 } else {
592 res->x[2] = map_addr;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100593 }
Soby Mathewb4c6df42022-11-09 11:13:29 +0000594 buffer_unmap(parent_s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000595 granule_unlock(wi.g_llt);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100596 res->x[0] = ret;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000597}
598
599enum map_unmap_ns_op {
600 MAP_NS,
601 UNMAP_NS
602};
603
604/*
605 * We don't hold a reference on the NS granule when it is
606 * mapped into a realm. Instead we rely on the guarantees
607 * provided by the architecture to ensure that a NS access
608 * to a protected granule is prohibited even within the realm.
609 */
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100610static void map_unmap_ns(unsigned long rd_addr,
611 unsigned long map_addr,
612 long level,
613 unsigned long host_s2tte,
614 enum map_unmap_ns_op op,
615 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000616{
617 struct granule *g_rd;
618 struct rd *rd;
619 struct granule *g_table_root;
620 unsigned long *s2tt, s2tte;
621 struct rtt_walk wi;
622 unsigned long ipa_bits;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000623 struct realm_s2_context s2_ctx;
624 int sl;
625
626 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
627 if (g_rd == NULL) {
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100628 res->x[0] = RMI_ERROR_INPUT;
629 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000630 }
631
632 rd = granule_map(g_rd, SLOT_RD);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100633 assert(rd != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000634
635 if (!validate_rtt_map_cmds(map_addr, level, rd)) {
636 buffer_unmap(rd);
637 granule_unlock(g_rd);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100638 res->x[0] = RMI_ERROR_INPUT;
639 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000640 }
641
642 g_table_root = rd->s2_ctx.g_rtt;
643 sl = realm_rtt_starting_level(rd);
644 ipa_bits = realm_ipa_bits(rd);
645
AlexeiFedorovc34e3242023-04-12 11:30:33 +0100646 /* Check if map_addr is outside PAR */
647 if (addr_in_par(rd, map_addr)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000648 buffer_unmap(rd);
649 granule_unlock(g_rd);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100650 res->x[0] = RMI_ERROR_INPUT;
651 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000652 }
653
654 s2_ctx = rd->s2_ctx;
655 buffer_unmap(rd);
656
657 granule_lock(g_table_root, GRANULE_STATE_RTT);
658 granule_unlock(g_rd);
659
660 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
661 map_addr, level, &wi);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100662
663 /*
664 * For UNMAP_NS, we need to map the table and look
665 * for the end of the non-live region.
666 */
AlexeiFedorov14d47ae2023-07-19 15:26:50 +0100667 if ((op == MAP_NS) && (wi.last_level != level)) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100668 res->x[0] = pack_return_code(RMI_ERROR_RTT,
669 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000670 goto out_unlock_llt;
671 }
672
673 s2tt = granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100674 assert(s2tt != NULL);
675
Soby Mathewb4c6df42022-11-09 11:13:29 +0000676 s2tte = s2tte_read(&s2tt[wi.index]);
677
678 if (op == MAP_NS) {
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100679 if (!s2tte_is_unassigned_ns(s2tte)) {
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100680 res->x[0] = pack_return_code(RMI_ERROR_RTT,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000681 (unsigned int)level);
682 goto out_unmap_table;
683 }
684
AlexeiFedorov3a739332023-04-13 13:54:04 +0100685 s2tte = s2tte_create_assigned_ns(host_s2tte, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000686 s2tte_write(&s2tt[wi.index], s2tte);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000687
688 } else if (op == UNMAP_NS) {
689 /*
690 * The following check also verifies that map_addr is outside
691 * PAR, as valid_NS s2tte may only cover outside PAR IPA range.
692 */
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100693 bool assigned_ns = s2tte_is_assigned_ns(s2tte, wi.last_level);
694
695 if ((wi.last_level != level) || !assigned_ns) {
696 res->x[0] = pack_return_code(RMI_ERROR_RTT,
697 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000698 goto out_unmap_table;
699 }
700
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100701 s2tte = s2tte_create_unassigned_ns();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000702 s2tte_write(&s2tt[wi.index], s2tte);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000703 if (level == RTT_PAGE_LEVEL) {
704 invalidate_page(&s2_ctx, map_addr);
705 } else {
706 invalidate_block(&s2_ctx, map_addr);
707 }
708 }
709
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100710 res->x[0] = RMI_SUCCESS;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000711
712out_unmap_table:
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100713 if (op == UNMAP_NS) {
714 res->x[1] = skip_non_live_entries(map_addr, s2tt, &wi);
715 }
Soby Mathewb4c6df42022-11-09 11:13:29 +0000716 buffer_unmap(s2tt);
717out_unlock_llt:
718 granule_unlock(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000719}
720
721unsigned long smc_rtt_map_unprotected(unsigned long rd_addr,
722 unsigned long map_addr,
723 unsigned long ulevel,
724 unsigned long s2tte)
725{
726 long level = (long)ulevel;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100727 struct smc_result res;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000728
AlexeiFedorov7a2f5882023-09-14 11:41:32 +0100729 if ((level < RTT_MIN_BLOCK_LEVEL) || (level > RTT_PAGE_LEVEL)) {
730 return RMI_ERROR_INPUT;
731 }
732
Soby Mathewb4c6df42022-11-09 11:13:29 +0000733 if (!host_ns_s2tte_is_valid(s2tte, level)) {
734 return RMI_ERROR_INPUT;
735 }
736
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100737 map_unmap_ns(rd_addr, map_addr, level, s2tte, MAP_NS, &res);
738 return res.x[0];
Soby Mathewb4c6df42022-11-09 11:13:29 +0000739}
740
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100741void smc_rtt_unmap_unprotected(unsigned long rd_addr,
742 unsigned long map_addr,
743 unsigned long ulevel,
744 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000745{
AlexeiFedorov7a2f5882023-09-14 11:41:32 +0100746 long level = (long)ulevel;
747
748 if ((level < RTT_MIN_BLOCK_LEVEL) || (level > RTT_PAGE_LEVEL)) {
749 res->x[0] = RMI_ERROR_INPUT;
750 return;
751 }
752
753 map_unmap_ns(rd_addr, map_addr, level, 0UL, UNMAP_NS, res);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000754}
755
756void smc_rtt_read_entry(unsigned long rd_addr,
757 unsigned long map_addr,
758 unsigned long ulevel,
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100759 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000760{
761 struct granule *g_rd, *g_rtt_root;
762 struct rd *rd;
763 struct rtt_walk wi;
764 unsigned long *s2tt, s2tte;
765 unsigned long ipa_bits;
766 long level = (long)ulevel;
767 int sl;
768
769 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
770 if (g_rd == NULL) {
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100771 res->x[0] = RMI_ERROR_INPUT;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000772 return;
773 }
774
775 rd = granule_map(g_rd, SLOT_RD);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100776 assert(rd != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000777
778 if (!validate_rtt_entry_cmds(map_addr, level, rd)) {
779 buffer_unmap(rd);
780 granule_unlock(g_rd);
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100781 res->x[0] = RMI_ERROR_INPUT;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000782 return;
783 }
784
785 g_rtt_root = rd->s2_ctx.g_rtt;
786 sl = realm_rtt_starting_level(rd);
787 ipa_bits = realm_ipa_bits(rd);
788 buffer_unmap(rd);
789
790 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
791 granule_unlock(g_rd);
792
793 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
794 map_addr, level, &wi);
795 s2tt = granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100796 assert(s2tt != NULL);
797
Soby Mathewb4c6df42022-11-09 11:13:29 +0000798 s2tte = s2tte_read(&s2tt[wi.index]);
AlexeiFedorov4faab852023-08-30 15:06:49 +0100799 res->x[1] = (unsigned long)wi.last_level;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000800
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100801 if (s2tte_is_unassigned_empty(s2tte)) {
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100802 res->x[2] = RMI_UNASSIGNED;
803 res->x[3] = 0UL;
AlexeiFedorov4faab852023-08-30 15:06:49 +0100804 res->x[4] = (unsigned long)RIPAS_EMPTY;
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100805 } else if (s2tte_is_unassigned_ram(s2tte)) {
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100806 res->x[2] = RMI_UNASSIGNED;
807 res->x[3] = 0UL;
AlexeiFedorov4faab852023-08-30 15:06:49 +0100808 res->x[4] = (unsigned long)RIPAS_RAM;
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100809 } else if (s2tte_is_unassigned_destroyed(s2tte)) {
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100810 res->x[2] = RMI_UNASSIGNED;
811 res->x[3] = 0UL;
AlexeiFedorov4faab852023-08-30 15:06:49 +0100812 res->x[4] = (unsigned long)RIPAS_DESTROYED;
AlexeiFedorov3a739332023-04-13 13:54:04 +0100813 } else if (s2tte_is_assigned_empty(s2tte, wi.last_level)) {
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100814 res->x[2] = RMI_ASSIGNED;
815 res->x[3] = s2tte_pa(s2tte, wi.last_level);
AlexeiFedorov4faab852023-08-30 15:06:49 +0100816 res->x[4] = (unsigned long)RIPAS_EMPTY;
AlexeiFedorov3a739332023-04-13 13:54:04 +0100817 } else if (s2tte_is_assigned_ram(s2tte, wi.last_level)) {
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100818 res->x[2] = RMI_ASSIGNED;
819 res->x[3] = s2tte_pa(s2tte, wi.last_level);
AlexeiFedorov4faab852023-08-30 15:06:49 +0100820 res->x[4] = (unsigned long)RIPAS_RAM;
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100821 } else if (s2tte_is_assigned_destroyed(s2tte, wi.last_level)) {
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100822 res->x[2] = RMI_ASSIGNED;
823 res->x[3] = 0UL;
AlexeiFedorov4faab852023-08-30 15:06:49 +0100824 res->x[4] = (unsigned long)RIPAS_DESTROYED;
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100825 } else if (s2tte_is_unassigned_ns(s2tte)) {
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100826 res->x[2] = RMI_UNASSIGNED;
827 res->x[3] = 0UL;
AlexeiFedorov4faab852023-08-30 15:06:49 +0100828 res->x[4] = (unsigned long)RIPAS_EMPTY;
AlexeiFedorov3a739332023-04-13 13:54:04 +0100829 } else if (s2tte_is_assigned_ns(s2tte, wi.last_level)) {
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100830 res->x[2] = RMI_ASSIGNED;
831 res->x[3] = host_ns_s2tte(s2tte, wi.last_level);
AlexeiFedorov4faab852023-08-30 15:06:49 +0100832 res->x[4] = (unsigned long)RIPAS_EMPTY;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000833 } else if (s2tte_is_table(s2tte, wi.last_level)) {
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100834 res->x[2] = RMI_TABLE;
835 res->x[3] = s2tte_pa_table(s2tte, wi.last_level);
AlexeiFedorov4faab852023-08-30 15:06:49 +0100836 res->x[4] = (unsigned long)RIPAS_EMPTY;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000837 } else {
838 assert(false);
839 }
840
841 buffer_unmap(s2tt);
842 granule_unlock(wi.g_llt);
843
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100844 res->x[0] = RMI_SUCCESS;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000845}
846
Soby Mathewb4c6df42022-11-09 11:13:29 +0000847static unsigned long validate_data_create_unknown(unsigned long map_addr,
848 struct rd *rd)
849{
850 if (!addr_in_par(rd, map_addr)) {
851 return RMI_ERROR_INPUT;
852 }
853
854 if (!validate_map_addr(map_addr, RTT_PAGE_LEVEL, rd)) {
855 return RMI_ERROR_INPUT;
856 }
857
858 return RMI_SUCCESS;
859}
860
861static unsigned long validate_data_create(unsigned long map_addr,
862 struct rd *rd)
863{
Mate Toth-Pal988dfcb2024-01-19 10:52:06 +0100864 if (get_rd_state_locked(rd) != REALM_NEW) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000865 return RMI_ERROR_REALM;
866 }
867
868 return validate_data_create_unknown(map_addr, rd);
869}
870
871/*
AlexeiFedorov0f9cd1f2023-07-10 17:04:58 +0100872 * Implements both RMI_DATA_CREATE and RMI_DATA_CREATE_UNKNOWN
Soby Mathewb4c6df42022-11-09 11:13:29 +0000873 *
AlexeiFedorov0f9cd1f2023-07-10 17:04:58 +0100874 * if @g_src == NULL, implements RMI_DATA_CREATE_UNKNOWN
875 * and RMI_DATA_CREATE otherwise.
Soby Mathewb4c6df42022-11-09 11:13:29 +0000876 */
AlexeiFedorovac923c82023-04-06 15:12:04 +0100877static unsigned long data_create(unsigned long rd_addr,
878 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000879 unsigned long map_addr,
880 struct granule *g_src,
881 unsigned long flags)
882{
883 struct granule *g_data;
884 struct granule *g_rd;
885 struct granule *g_table_root;
886 struct rd *rd;
887 struct rtt_walk wi;
888 unsigned long s2tte, *s2tt;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000889 enum granule_state new_data_state = GRANULE_STATE_DELEGATED;
890 unsigned long ipa_bits;
891 unsigned long ret;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000892 int sl;
893
894 if (!find_lock_two_granules(data_addr,
895 GRANULE_STATE_DELEGATED,
896 &g_data,
897 rd_addr,
898 GRANULE_STATE_RD,
899 &g_rd)) {
900 return RMI_ERROR_INPUT;
901 }
902
903 rd = granule_map(g_rd, SLOT_RD);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100904 assert(rd != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000905
906 ret = (g_src != NULL) ?
907 validate_data_create(map_addr, rd) :
908 validate_data_create_unknown(map_addr, rd);
909
910 if (ret != RMI_SUCCESS) {
911 goto out_unmap_rd;
912 }
913
914 g_table_root = rd->s2_ctx.g_rtt;
915 sl = realm_rtt_starting_level(rd);
916 ipa_bits = realm_ipa_bits(rd);
917 granule_lock(g_table_root, GRANULE_STATE_RTT);
918 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
919 map_addr, RTT_PAGE_LEVEL, &wi);
920 if (wi.last_level != RTT_PAGE_LEVEL) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100921 ret = pack_return_code(RMI_ERROR_RTT,
922 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000923 goto out_unlock_ll_table;
924 }
925
926 s2tt = granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100927 assert(s2tt != NULL);
928
Soby Mathewb4c6df42022-11-09 11:13:29 +0000929 s2tte = s2tte_read(&s2tt[wi.index]);
AlexeiFedorov4e2db162023-10-10 13:57:22 +0100930 if (!s2tte_is_unassigned(s2tte)) {
931 ret = pack_return_code(RMI_ERROR_RTT, RTT_PAGE_LEVEL);
932 goto out_unmap_ll_table;
933 }
Soby Mathewb4c6df42022-11-09 11:13:29 +0000934
Soby Mathewb4c6df42022-11-09 11:13:29 +0000935 if (g_src != NULL) {
936 bool ns_access_ok;
AlexeiFedorov4e2db162023-10-10 13:57:22 +0100937 void *data = granule_map(g_data, SLOT_DELEGATED);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000938
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100939 assert(data != NULL);
940
Soby Mathewb4c6df42022-11-09 11:13:29 +0000941 ns_access_ok = ns_buffer_read(SLOT_NS, g_src, 0U,
942 GRANULE_SIZE, data);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000943 if (!ns_access_ok) {
944 /*
945 * Some data may be copied before the failure. Zero
946 * g_data granule as it will remain in delegated state.
947 */
948 (void)memset(data, 0, GRANULE_SIZE);
949 buffer_unmap(data);
950 ret = RMI_ERROR_INPUT;
951 goto out_unmap_ll_table;
952 }
953
Mate Toth-Palc7698312023-08-09 12:49:34 +0200954 measurement_data_granule_measure(
955 rd->measurement[RIM_MEASUREMENT_SLOT],
956 rd->algorithm,
957 data,
958 map_addr,
959 flags);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000960 buffer_unmap(data);
AlexeiFedorov0f9cd1f2023-07-10 17:04:58 +0100961
AlexeiFedorov4e2db162023-10-10 13:57:22 +0100962 s2tte = s2tte_create_assigned_ram(data_addr, RTT_PAGE_LEVEL);
963 } else {
964 s2tte = s2tte_create_assigned_unchanged(s2tte, data_addr,
965 RTT_PAGE_LEVEL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000966 }
967
968 new_data_state = GRANULE_STATE_DATA;
969
Soby Mathewb4c6df42022-11-09 11:13:29 +0000970 s2tte_write(&s2tt[wi.index], s2tte);
971 __granule_get(wi.g_llt);
972
973 ret = RMI_SUCCESS;
974
975out_unmap_ll_table:
976 buffer_unmap(s2tt);
977out_unlock_ll_table:
978 granule_unlock(wi.g_llt);
979out_unmap_rd:
980 buffer_unmap(rd);
981 granule_unlock(g_rd);
982 granule_unlock_transition(g_data, new_data_state);
983 return ret;
984}
985
AlexeiFedorovac923c82023-04-06 15:12:04 +0100986unsigned long smc_data_create(unsigned long rd_addr,
987 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000988 unsigned long map_addr,
989 unsigned long src_addr,
990 unsigned long flags)
991{
992 struct granule *g_src;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000993
AlexeiFedorov93f5ec52023-08-31 14:26:53 +0100994 if ((flags != RMI_NO_MEASURE_CONTENT) &&
995 (flags != RMI_MEASURE_CONTENT)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000996 return RMI_ERROR_INPUT;
997 }
998
999 g_src = find_granule(src_addr);
1000 if ((g_src == NULL) || (g_src->state != GRANULE_STATE_NS)) {
1001 return RMI_ERROR_INPUT;
1002 }
1003
AlexeiFedorovac923c82023-04-06 15:12:04 +01001004 return data_create(rd_addr, data_addr, map_addr, g_src, flags);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001005}
1006
AlexeiFedorovac923c82023-04-06 15:12:04 +01001007unsigned long smc_data_create_unknown(unsigned long rd_addr,
1008 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +00001009 unsigned long map_addr)
1010{
AlexeiFedorovac923c82023-04-06 15:12:04 +01001011 return data_create(rd_addr, data_addr, map_addr, NULL, 0);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001012}
1013
AlexeiFedorove2002be2023-04-19 17:20:12 +01001014void smc_data_destroy(unsigned long rd_addr,
1015 unsigned long map_addr,
1016 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001017{
1018 struct granule *g_data;
1019 struct granule *g_rd;
1020 struct granule *g_table_root;
1021 struct rtt_walk wi;
1022 unsigned long data_addr, s2tte, *s2tt;
1023 struct rd *rd;
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001024 unsigned long ipa_bits;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001025 struct realm_s2_context s2_ctx;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001026 int sl;
1027
1028 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
1029 if (g_rd == NULL) {
AlexeiFedorove2002be2023-04-19 17:20:12 +01001030 res->x[0] = RMI_ERROR_INPUT;
AlexeiFedorova1b2a1d2023-07-18 15:08:47 +01001031 res->x[2] = 0UL;
AlexeiFedorove2002be2023-04-19 17:20:12 +01001032 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001033 }
1034
1035 rd = granule_map(g_rd, SLOT_RD);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +01001036 assert(rd != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001037
AlexeiFedorov868a6512023-09-14 13:21:11 +01001038 if (!addr_in_par(rd, map_addr) ||
1039 !validate_map_addr(map_addr, RTT_PAGE_LEVEL, rd)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001040 buffer_unmap(rd);
1041 granule_unlock(g_rd);
AlexeiFedorove2002be2023-04-19 17:20:12 +01001042 res->x[0] = RMI_ERROR_INPUT;
AlexeiFedorova1b2a1d2023-07-18 15:08:47 +01001043 res->x[2] = 0UL;
AlexeiFedorove2002be2023-04-19 17:20:12 +01001044 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001045 }
1046
1047 g_table_root = rd->s2_ctx.g_rtt;
1048 sl = realm_rtt_starting_level(rd);
1049 ipa_bits = realm_ipa_bits(rd);
1050 s2_ctx = rd->s2_ctx;
1051 buffer_unmap(rd);
1052
1053 granule_lock(g_table_root, GRANULE_STATE_RTT);
1054 granule_unlock(g_rd);
1055
1056 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
1057 map_addr, RTT_PAGE_LEVEL, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001058
1059 s2tt = granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +01001060 assert(s2tt != NULL);
1061
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001062 if (wi.last_level != RTT_PAGE_LEVEL) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +01001063 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1064 (unsigned int)wi.last_level);
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001065 goto out_unmap_ll_table;
1066 }
Soby Mathewb4c6df42022-11-09 11:13:29 +00001067
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001068 s2tte = s2tte_read(&s2tt[wi.index]);
AlexeiFedorovc53b1f72023-07-04 15:37:03 +01001069
AlexeiFedorova43cd312023-04-17 11:42:25 +01001070 if (s2tte_is_assigned_ram(s2tte, RTT_PAGE_LEVEL)) {
1071 data_addr = s2tte_pa(s2tte, RTT_PAGE_LEVEL);
AlexeiFedorovc53b1f72023-07-04 15:37:03 +01001072 s2tte = s2tte_create_unassigned_destroyed();
AlexeiFedorova43cd312023-04-17 11:42:25 +01001073 s2tte_write(&s2tt[wi.index], s2tte);
1074 invalidate_page(&s2_ctx, map_addr);
1075 } else if (s2tte_is_assigned_empty(s2tte, RTT_PAGE_LEVEL)) {
1076 data_addr = s2tte_pa(s2tte, RTT_PAGE_LEVEL);
1077 s2tte = s2tte_create_unassigned_empty();
1078 s2tte_write(&s2tt[wi.index], s2tte);
Javier Almansa Sobrino84a1b162023-09-26 17:32:44 +01001079 } else if (s2tte_is_assigned_destroyed(s2tte, RTT_PAGE_LEVEL)) {
1080 data_addr = s2tte_pa(s2tte, RTT_PAGE_LEVEL);
1081 s2tte = s2tte_create_unassigned_destroyed();
1082 s2tte_write(&s2tt[wi.index], s2tte);
AlexeiFedorova43cd312023-04-17 11:42:25 +01001083 } else {
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001084 res->x[0] = pack_return_code(RMI_ERROR_RTT, RTT_PAGE_LEVEL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001085 goto out_unmap_ll_table;
1086 }
1087
Soby Mathewb4c6df42022-11-09 11:13:29 +00001088 __granule_put(wi.g_llt);
1089
1090 /*
1091 * Lock the data granule and check expected state. Correct locking order
1092 * is guaranteed because granule address is obtained from a locked
1093 * granule by table walk. This lock needs to be acquired before a state
1094 * transition to or from GRANULE_STATE_DATA for granule address can happen.
1095 */
1096 g_data = find_lock_granule(data_addr, GRANULE_STATE_DATA);
AlexeiFedorov63b71692023-04-19 11:18:42 +01001097 assert(g_data != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001098 granule_memzero(g_data, SLOT_DELEGATED);
1099 granule_unlock_transition(g_data, GRANULE_STATE_DELEGATED);
1100
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001101 res->x[0] = RMI_SUCCESS;
AlexeiFedorove2002be2023-04-19 17:20:12 +01001102 res->x[1] = data_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001103out_unmap_ll_table:
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001104 res->x[2] = skip_non_live_entries(map_addr, s2tt, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001105 buffer_unmap(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001106 granule_unlock(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001107}
1108
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001109/*
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001110 * Update the ripas value for the entry pointed by @s2ttep.
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001111 *
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001112 * Returns:
1113 * < 0 - On error and the operation was aborted,
1114 * e.g., entry cannot have a ripas.
1115 * 0 - Operation was success and no TLBI is required.
1116 * > 0 - Operation was success and TLBI is required.
1117 * Sets:
1118 * @(*do_tlbi) to 'true' if the TLBs have to be invalidated.
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001119 */
AlexeiFedorov4faab852023-08-30 15:06:49 +01001120static int update_ripas(unsigned long *s2ttep, long level,
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001121 enum ripas ripas_val,
1122 enum ripas_change_destroyed change_destroyed)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001123{
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001124 unsigned long pa, s2tte = s2tte_read(s2ttep);
1125 int ret = 0;
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001126
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001127 if (!s2tte_has_ripas(s2tte, level)) {
1128 return -1;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001129 }
1130
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001131 if (ripas_val == RIPAS_RAM) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001132 if (s2tte_is_unassigned_empty(s2tte)) {
1133 s2tte = s2tte_create_unassigned_ram();
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001134 } else if (s2tte_is_unassigned_destroyed(s2tte)) {
1135 if (change_destroyed == CHANGE_DESTROYED) {
1136 s2tte = s2tte_create_unassigned_ram();
1137 } else {
1138 return -1;
1139 }
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001140 } else if (s2tte_is_assigned_empty(s2tte, level)) {
1141 pa = s2tte_pa(s2tte, level);
1142 s2tte = s2tte_create_assigned_ram(pa, level);
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001143 } else if (s2tte_is_assigned_destroyed(s2tte, level)) {
1144 if (change_destroyed == CHANGE_DESTROYED) {
1145 pa = s2tte_pa(s2tte, level);
1146 s2tte = s2tte_create_assigned_ram(pa, level);
1147 } else {
1148 return -1;
1149 }
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001150 } else {
1151 /* No action is required */
1152 return 0;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001153 }
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001154 } else if (ripas_val == RIPAS_EMPTY) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001155 if (s2tte_is_unassigned_ram(s2tte)) {
1156 s2tte = s2tte_create_unassigned_empty();
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001157 } else if (s2tte_is_unassigned_destroyed(s2tte)) {
1158 if (change_destroyed == CHANGE_DESTROYED) {
1159 s2tte = s2tte_create_unassigned_empty();
1160 } else {
1161 return -1;
1162 }
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001163 } else if (s2tte_is_assigned_ram(s2tte, level)) {
1164 pa = s2tte_pa(s2tte, level);
1165 s2tte = s2tte_create_assigned_empty(pa, level);
1166 /* TLBI is required */
1167 ret = 1;
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001168 } else if (s2tte_is_assigned_destroyed(s2tte, level)) {
1169 if (change_destroyed == CHANGE_DESTROYED) {
1170 pa = s2tte_pa(s2tte, level);
1171 s2tte = s2tte_create_assigned_empty(pa, level);
1172 /* TLBI is required */
1173 ret = 1;
1174 } else {
1175 return -1;
1176 }
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001177 } else {
1178 /* No action is required */
1179 return 0;
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001180 }
Soby Mathewb4c6df42022-11-09 11:13:29 +00001181 }
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001182 s2tte_write(s2ttep, s2tte);
1183 return ret;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001184}
1185
AlexeiFedorov960d1612023-04-25 13:23:39 +01001186void smc_rtt_init_ripas(unsigned long rd_addr,
1187 unsigned long base,
1188 unsigned long top,
1189 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001190{
1191 struct granule *g_rd, *g_rtt_root;
1192 struct rd *rd;
AlexeiFedorov960d1612023-04-25 13:23:39 +01001193 unsigned long ipa_bits, addr, map_size;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001194 struct rtt_walk wi;
1195 unsigned long s2tte, *s2tt;
AlexeiFedorov960d1612023-04-25 13:23:39 +01001196 long level;
AlexeiFedorov14d47ae2023-07-19 15:26:50 +01001197 unsigned long index;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001198 int sl;
1199
AlexeiFedorov14d47ae2023-07-19 15:26:50 +01001200 if (top <= base) {
1201 res->x[0] = RMI_ERROR_INPUT;
1202 return;
1203 }
1204
Soby Mathewb4c6df42022-11-09 11:13:29 +00001205 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
1206 if (g_rd == NULL) {
AlexeiFedorov960d1612023-04-25 13:23:39 +01001207 res->x[0] = RMI_ERROR_INPUT;
1208 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001209 }
1210
1211 rd = granule_map(g_rd, SLOT_RD);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +01001212 assert(rd != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001213
AlexeiFedorov14d47ae2023-07-19 15:26:50 +01001214 if (!validate_map_addr(base, RTT_PAGE_LEVEL, rd) ||
AlexeiFedorov64fd6c32023-07-20 12:33:00 +01001215 !validate_map_addr(top, RTT_PAGE_LEVEL, rd) ||
AlexeiFedorov14d47ae2023-07-19 15:26:50 +01001216 !addr_in_par(rd, base) || !addr_in_par(rd, top - GRANULE_SIZE)) {
1217 buffer_unmap(rd);
1218 granule_unlock(g_rd);
1219 res->x[0] = RMI_ERROR_INPUT;
1220 return;
1221 }
1222
Mate Toth-Pal988dfcb2024-01-19 10:52:06 +01001223 if (get_rd_state_locked(rd) != REALM_NEW) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001224 buffer_unmap(rd);
1225 granule_unlock(g_rd);
AlexeiFedorov960d1612023-04-25 13:23:39 +01001226 res->x[0] = RMI_ERROR_REALM;
1227 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001228 }
1229
Soby Mathewb4c6df42022-11-09 11:13:29 +00001230 g_rtt_root = rd->s2_ctx.g_rtt;
1231 sl = realm_rtt_starting_level(rd);
1232 ipa_bits = realm_ipa_bits(rd);
1233
1234 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001235
1236 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
AlexeiFedorov960d1612023-04-25 13:23:39 +01001237 base, RTT_PAGE_LEVEL, &wi);
1238 level = wi.last_level;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001239 s2tt = granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +01001240 assert(s2tt != NULL);
1241
AlexeiFedorov960d1612023-04-25 13:23:39 +01001242 map_size = s2tte_map_size(level);
1243 addr = base & ~(map_size - 1UL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001244
AlexeiFedorov960d1612023-04-25 13:23:39 +01001245 /*
AlexeiFedorov14d47ae2023-07-19 15:26:50 +01001246 * If the RTTE covers a range below "base", we need to go deeper.
AlexeiFedorov960d1612023-04-25 13:23:39 +01001247 */
1248 if (addr != base) {
1249 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1250 (unsigned int)level);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001251 goto out_unmap_llt;
1252 }
1253
AlexeiFedorovee2fc822023-10-31 14:54:39 +00001254 for (index = wi.index; index < S2TTES_PER_S2TT; index++) {
AlexeiFedorov960d1612023-04-25 13:23:39 +01001255 unsigned long next = addr + map_size;
1256
AlexeiFedorov14d47ae2023-07-19 15:26:50 +01001257 /*
1258 * Break on "top_align" failure condition,
1259 * or if this entry crosses the range.
1260 */
AlexeiFedorov960d1612023-04-25 13:23:39 +01001261 if (next > top) {
1262 break;
1263 }
1264
1265 s2tte = s2tte_read(&s2tt[index]);
1266 if (s2tte_is_unassigned_empty(s2tte)) {
1267 s2tte = s2tte_create_unassigned_ram();
1268 s2tte_write(&s2tt[index], s2tte);
1269 } else if (!s2tte_is_unassigned_ram(s2tte)) {
1270 break;
1271 }
Mate Toth-Palc7698312023-08-09 12:49:34 +02001272 measurement_init_ripas_measure(rd->measurement[RIM_MEASUREMENT_SLOT],
1273 rd->algorithm,
1274 addr,
1275 next);
AlexeiFedorovee2fc822023-10-31 14:54:39 +00001276 addr = next;
AlexeiFedorov960d1612023-04-25 13:23:39 +01001277 }
1278
1279 if (addr > base) {
1280 res->x[0] = RMI_SUCCESS;
1281 res->x[1] = addr;
1282 } else {
1283 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1284 (unsigned int)level);
1285 }
Soby Mathewb4c6df42022-11-09 11:13:29 +00001286
1287out_unmap_llt:
1288 buffer_unmap(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001289 buffer_unmap(rd);
1290 granule_unlock(wi.g_llt);
AlexeiFedorov80295e42023-07-10 13:11:14 +01001291 granule_unlock(g_rd);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001292}
1293
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001294static void rtt_set_ripas_range(struct realm_s2_context *s2_ctx,
1295 unsigned long *s2tt,
1296 unsigned long base,
1297 unsigned long top,
1298 struct rtt_walk *wi,
AlexeiFedorov4faab852023-08-30 15:06:49 +01001299 enum ripas ripas_val,
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001300 enum ripas_change_destroyed change_destroyed,
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001301 struct smc_result *res)
1302{
AlexeiFedorov14d47ae2023-07-19 15:26:50 +01001303 unsigned long index;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001304 long level = wi->last_level;
AlexeiFedorov4faab852023-08-30 15:06:49 +01001305 unsigned long map_size = s2tte_map_size((int)level);
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001306
1307 /* Align to the RTT level */
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001308 unsigned long addr = base & ~(map_size - 1UL);
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001309
1310 /* Make sure we don't touch a range below the requested range */
1311 if (addr != base) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +01001312 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1313 (unsigned int)level);
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001314 return;
1315 }
1316
AlexeiFedorovee2fc822023-10-31 14:54:39 +00001317 for (index = wi->index; index < S2TTES_PER_S2TT; index++) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001318 int ret;
1319
AlexeiFedorov64fd6c32023-07-20 12:33:00 +01001320 /*
1321 * Break on "top_align" failure condition,
1322 * or if this entry crosses the range.
1323 */
1324 if ((addr + map_size) > top) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001325 break;
1326 }
1327
AlexeiFedorovee2fc822023-10-31 14:54:39 +00001328 ret = update_ripas(&s2tt[index], level,
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001329 ripas_val, change_destroyed);
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001330 if (ret < 0) {
1331 break;
1332 }
1333
1334 /* Handle TLBI */
1335 if (ret != 0) {
1336 if (level == RTT_PAGE_LEVEL) {
1337 invalidate_page(s2_ctx, addr);
1338 } else {
1339 invalidate_block(s2_ctx, addr);
1340 }
1341 }
AlexeiFedorovee2fc822023-10-31 14:54:39 +00001342
1343 addr += map_size;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001344 }
1345
1346 if (addr > base) {
1347 res->x[0] = RMI_SUCCESS;
1348 res->x[1] = addr;
1349 } else {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +01001350 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1351 (unsigned int)level);
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001352 }
1353}
1354
1355void smc_rtt_set_ripas(unsigned long rd_addr,
1356 unsigned long rec_addr,
1357 unsigned long base,
1358 unsigned long top,
1359 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001360{
1361 struct granule *g_rd, *g_rec, *g_rtt_root;
1362 struct rec *rec;
1363 struct rd *rd;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001364 unsigned long ipa_bits;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001365 struct rtt_walk wi;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001366 unsigned long *s2tt;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001367 struct realm_s2_context s2_ctx;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001368 enum ripas ripas_val;
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001369 enum ripas_change_destroyed change_destroyed;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001370 int sl;
1371
AlexeiFedorov14d47ae2023-07-19 15:26:50 +01001372 if (top <= base) {
1373 res->x[0] = RMI_ERROR_INPUT;
1374 return;
1375 }
1376
Soby Mathewb4c6df42022-11-09 11:13:29 +00001377 if (!find_lock_two_granules(rd_addr,
1378 GRANULE_STATE_RD,
1379 &g_rd,
1380 rec_addr,
1381 GRANULE_STATE_REC,
1382 &g_rec)) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001383 res->x[0] = RMI_ERROR_INPUT;
1384 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001385 }
1386
1387 if (granule_refcount_read_acquire(g_rec) != 0UL) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001388 res->x[0] = RMI_ERROR_REC;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001389 goto out_unlock_rec_rd;
1390 }
1391
1392 rec = granule_map(g_rec, SLOT_REC);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +01001393 assert(rec != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001394
1395 if (g_rd != rec->realm_info.g_rd) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001396 res->x[0] = RMI_ERROR_REC;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001397 goto out_unmap_rec;
1398 }
1399
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001400 ripas_val = rec->set_ripas.ripas_val;
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001401 change_destroyed = rec->set_ripas.change_destroyed;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001402
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001403 /*
1404 * Return error in case of target region:
1405 * - is not the next chunk of requested region
1406 * - extends beyond the end of requested region
1407 */
1408 if ((base != rec->set_ripas.addr) || (top > rec->set_ripas.top)) {
1409 res->x[0] = RMI_ERROR_INPUT;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001410 goto out_unmap_rec;
1411 }
1412
1413 rd = granule_map(g_rd, SLOT_RD);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +01001414 assert(rd != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001415
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001416 /*
1417 * At this point, we know base == rec->set_ripas.addr
1418 * and thus must be aligned to GRANULE size.
1419 */
1420 assert(validate_map_addr(base, RTT_PAGE_LEVEL, rd));
Soby Mathewb4c6df42022-11-09 11:13:29 +00001421
Soby Mathewb4c6df42022-11-09 11:13:29 +00001422 g_rtt_root = rd->s2_ctx.g_rtt;
1423 sl = realm_rtt_starting_level(rd);
1424 ipa_bits = realm_ipa_bits(rd);
1425 s2_ctx = rd->s2_ctx;
1426
1427 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
1428
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001429 /* Walk to the deepest level possible */
Soby Mathewb4c6df42022-11-09 11:13:29 +00001430 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001431 base, RTT_PAGE_LEVEL, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001432
AlexeiFedorov64fd6c32023-07-20 12:33:00 +01001433 /*
1434 * Base has to be aligned to the level at which
1435 * it is mapped in RTT.
1436 */
1437 if (!validate_map_addr(base, wi.last_level, rd)) {
1438 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1439 (unsigned int)wi.last_level);
1440 goto out_unlock_llt;
1441 }
1442
Soby Mathewb4c6df42022-11-09 11:13:29 +00001443 s2tt = granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +01001444 assert(s2tt != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001445
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001446 rtt_set_ripas_range(&s2_ctx, s2tt, base, top, &wi,
1447 ripas_val, change_destroyed, res);
1448
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001449 if (res->x[0] == RMI_SUCCESS) {
1450 rec->set_ripas.addr = res->x[1];
Soby Mathewb4c6df42022-11-09 11:13:29 +00001451 }
1452
Soby Mathewb4c6df42022-11-09 11:13:29 +00001453 buffer_unmap(s2tt);
AlexeiFedorov64fd6c32023-07-20 12:33:00 +01001454out_unlock_llt:
Soby Mathewb4c6df42022-11-09 11:13:29 +00001455 granule_unlock(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001456 buffer_unmap(rd);
1457out_unmap_rec:
1458 buffer_unmap(rec);
1459out_unlock_rec_rd:
1460 granule_unlock(g_rec);
1461 granule_unlock(g_rd);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001462}