blob: c7a416d61a05da91542b0bec1b7a86d6f82510b9 [file] [log] [blame]
Soby Mathewb4c6df42022-11-09 11:13:29 +00001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
5 */
6
7#include <buffer.h>
8#include <granule.h>
9#include <measurement.h>
10#include <realm.h>
11#include <ripas.h>
12#include <smc-handler.h>
13#include <smc-rmi.h>
14#include <smc.h>
15#include <stddef.h>
16#include <string.h>
17#include <table.h>
18
19/*
20 * Validate the map_addr value passed to RMI_RTT_* and RMI_DATA_* commands.
21 */
22static bool validate_map_addr(unsigned long map_addr,
AlexeiFedorov4faab852023-08-30 15:06:49 +010023 long level,
Soby Mathewb4c6df42022-11-09 11:13:29 +000024 struct rd *rd)
25{
AlexeiFedorov14d47ae2023-07-19 15:26:50 +010026 return ((map_addr < realm_ipa_size(rd)) &&
27 addr_is_level_aligned(map_addr, level));
Soby Mathewb4c6df42022-11-09 11:13:29 +000028}
29
30/*
31 * Structure commands can operate on all RTTs except for the root RTT so
32 * the minimal valid level is the stage 2 starting level + 1.
33 */
34static bool validate_rtt_structure_cmds(unsigned long map_addr,
35 long level,
36 struct rd *rd)
37{
38 int min_level = realm_rtt_starting_level(rd) + 1;
39
40 if ((level < min_level) || (level > RTT_PAGE_LEVEL)) {
41 return false;
42 }
AlexeiFedorovf85f8102023-09-11 16:14:18 +010043 return validate_map_addr(map_addr, level - 1L, rd);
Soby Mathewb4c6df42022-11-09 11:13:29 +000044}
45
46/*
47 * Map/Unmap commands can operate up to a level 2 block entry so min_level is
48 * the smallest block size.
49 */
50static bool validate_rtt_map_cmds(unsigned long map_addr,
51 long level,
52 struct rd *rd)
53{
54 if ((level < RTT_MIN_BLOCK_LEVEL) || (level > RTT_PAGE_LEVEL)) {
55 return false;
56 }
57 return validate_map_addr(map_addr, level, rd);
58}
59
60/*
61 * Entry commands can operate on any entry so the minimal valid level is the
62 * stage 2 starting level.
63 */
64static bool validate_rtt_entry_cmds(unsigned long map_addr,
65 long level,
66 struct rd *rd)
67{
68 if ((level < realm_rtt_starting_level(rd)) ||
69 (level > RTT_PAGE_LEVEL)) {
70 return false;
71 }
72 return validate_map_addr(map_addr, level, rd);
73}
74
AlexeiFedorovac923c82023-04-06 15:12:04 +010075unsigned long smc_rtt_create(unsigned long rd_addr,
76 unsigned long rtt_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +000077 unsigned long map_addr,
78 unsigned long ulevel)
79{
80 struct granule *g_rd;
81 struct granule *g_tbl;
82 struct rd *rd;
83 struct granule *g_table_root;
84 struct rtt_walk wi;
85 unsigned long *s2tt, *parent_s2tt, parent_s2tte;
86 long level = (long)ulevel;
87 unsigned long ipa_bits;
88 unsigned long ret;
89 struct realm_s2_context s2_ctx;
90 int sl;
91
92 if (!find_lock_two_granules(rtt_addr,
93 GRANULE_STATE_DELEGATED,
94 &g_tbl,
95 rd_addr,
96 GRANULE_STATE_RD,
97 &g_rd)) {
98 return RMI_ERROR_INPUT;
99 }
100
101 rd = granule_map(g_rd, SLOT_RD);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100102 assert(rd != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000103
104 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
105 buffer_unmap(rd);
106 granule_unlock(g_rd);
107 granule_unlock(g_tbl);
108 return RMI_ERROR_INPUT;
109 }
110
111 g_table_root = rd->s2_ctx.g_rtt;
112 sl = realm_rtt_starting_level(rd);
113 ipa_bits = realm_ipa_bits(rd);
114 s2_ctx = rd->s2_ctx;
115 buffer_unmap(rd);
116
117 /*
118 * Lock the RTT root. Enforcing locking order RD->RTT is enough to
119 * ensure deadlock free locking guarentee.
120 */
121 granule_lock(g_table_root, GRANULE_STATE_RTT);
122
123 /* Unlock RD after locking RTT Root */
124 granule_unlock(g_rd);
125
126 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
127 map_addr, level - 1L, &wi);
128 if (wi.last_level != level - 1L) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100129 ret = pack_return_code(RMI_ERROR_RTT,
130 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000131 goto out_unlock_llt;
132 }
133
134 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100135 assert(parent_s2tt != NULL);
136
Soby Mathewb4c6df42022-11-09 11:13:29 +0000137 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
138 s2tt = granule_map(g_tbl, SLOT_DELEGATED);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100139 assert(s2tt != NULL);
140
Soby Mathewb4c6df42022-11-09 11:13:29 +0000141
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100142 if (s2tte_is_unassigned_empty(parent_s2tte)) {
143 s2tt_init_unassigned_empty(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000144
145 /*
146 * Increase the refcount of the parent, the granule was
147 * locked while table walking and hand-over-hand locking.
148 * Atomicity and acquire/release semantics not required because
149 * the table is accessed always locked.
150 */
151 __granule_get(wi.g_llt);
152
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100153 } else if (s2tte_is_unassigned_ram(parent_s2tte)) {
154 s2tt_init_unassigned_ram(s2tt);
155 __granule_get(wi.g_llt);
156
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100157 } else if (s2tte_is_unassigned_ns(parent_s2tte)) {
158 s2tt_init_unassigned_ns(s2tt);
159 __granule_get(wi.g_llt);
160
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100161 } else if (s2tte_is_unassigned_destroyed(parent_s2tte)) {
162 s2tt_init_unassigned_destroyed(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000163 __granule_get(wi.g_llt);
164
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100165 } else if (s2tte_is_assigned_destroyed(parent_s2tte, level - 1L)) {
166 unsigned long block_pa;
167
168 /*
169 * We should observe parent assigned s2tte only when
170 * we create tables above this level.
171 */
172 assert(level > RTT_MIN_BLOCK_LEVEL);
173
174 block_pa = s2tte_pa(parent_s2tte, level - 1L);
175
176 s2tt_init_assigned_destroyed(s2tt, block_pa, level);
177
178 /*
179 * Increase the refcount to mark the granule as in-use. refcount
180 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
181 */
182 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
183
AlexeiFedorov3a739332023-04-13 13:54:04 +0100184 } else if (s2tte_is_assigned_empty(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000185 unsigned long block_pa;
186
187 /*
188 * We should observe parent assigned s2tte only when
189 * we create tables above this level.
190 */
191 assert(level > RTT_MIN_BLOCK_LEVEL);
192
193 block_pa = s2tte_pa(parent_s2tte, level - 1L);
194
195 s2tt_init_assigned_empty(s2tt, block_pa, level);
196
197 /*
198 * Increase the refcount to mark the granule as in-use. refcount
199 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
200 */
201 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
202
AlexeiFedorov3a739332023-04-13 13:54:04 +0100203 } else if (s2tte_is_assigned_ram(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000204 unsigned long block_pa;
205
206 /*
207 * We should observe parent valid s2tte only when
208 * we create tables above this level.
209 */
210 assert(level > RTT_MIN_BLOCK_LEVEL);
211
212 /*
213 * Break before make. This may cause spurious S2 aborts.
214 */
215 s2tte_write(&parent_s2tt[wi.index], 0UL);
216 invalidate_block(&s2_ctx, map_addr);
217
218 block_pa = s2tte_pa(parent_s2tte, level - 1L);
219
AlexeiFedorov3a739332023-04-13 13:54:04 +0100220 s2tt_init_assigned_ram(s2tt, block_pa, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000221
222 /*
223 * Increase the refcount to mark the granule as in-use. refcount
224 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
225 */
226 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
227
AlexeiFedorov3a739332023-04-13 13:54:04 +0100228 } else if (s2tte_is_assigned_ns(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000229 unsigned long block_pa;
230
231 /*
AlexeiFedorov3a739332023-04-13 13:54:04 +0100232 * We should observe parent assigned_ns s2tte only when
Soby Mathewb4c6df42022-11-09 11:13:29 +0000233 * we create tables above this level.
234 */
235 assert(level > RTT_MIN_BLOCK_LEVEL);
236
237 /*
238 * Break before make. This may cause spurious S2 aborts.
239 */
240 s2tte_write(&parent_s2tt[wi.index], 0UL);
241 invalidate_block(&s2_ctx, map_addr);
242
243 block_pa = s2tte_pa(parent_s2tte, level - 1L);
244
AlexeiFedorov3a739332023-04-13 13:54:04 +0100245 s2tt_init_assigned_ns(s2tt, block_pa, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000246
247 /*
248 * Increase the refcount to mark the granule as in-use. refcount
249 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
250 */
251 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
252
253 } else if (s2tte_is_table(parent_s2tte, level - 1L)) {
254 ret = pack_return_code(RMI_ERROR_RTT,
255 (unsigned int)(level - 1L));
256 goto out_unmap_table;
257
258 } else {
259 assert(false);
260 }
261
262 ret = RMI_SUCCESS;
263
264 granule_set_state(g_tbl, GRANULE_STATE_RTT);
265
266 parent_s2tte = s2tte_create_table(rtt_addr, level - 1L);
267 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
268
269out_unmap_table:
270 buffer_unmap(s2tt);
271 buffer_unmap(parent_s2tt);
272out_unlock_llt:
273 granule_unlock(wi.g_llt);
274 granule_unlock(g_tbl);
275 return ret;
276}
277
AlexeiFedorove2002be2023-04-19 17:20:12 +0100278void smc_rtt_fold(unsigned long rd_addr,
279 unsigned long map_addr,
280 unsigned long ulevel,
281 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000282{
283 struct granule *g_rd;
284 struct granule *g_tbl;
285 struct rd *rd;
286 struct granule *g_table_root;
287 struct rtt_walk wi;
288 unsigned long *table, *parent_s2tt, parent_s2tte;
289 long level = (long)ulevel;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100290 unsigned long ipa_bits, rtt_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000291 unsigned long ret;
292 struct realm_s2_context s2_ctx;
293 int sl;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000294
295 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
296 if (g_rd == NULL) {
AlexeiFedorove2002be2023-04-19 17:20:12 +0100297 res->x[0] = RMI_ERROR_INPUT;
298 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000299 }
300
301 rd = granule_map(g_rd, SLOT_RD);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100302 assert(rd != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000303
304 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
305 buffer_unmap(rd);
306 granule_unlock(g_rd);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100307 res->x[0] = RMI_ERROR_INPUT;
308 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000309 }
310
311 g_table_root = rd->s2_ctx.g_rtt;
312 sl = realm_rtt_starting_level(rd);
313 ipa_bits = realm_ipa_bits(rd);
314 s2_ctx = rd->s2_ctx;
315 buffer_unmap(rd);
316 granule_lock(g_table_root, GRANULE_STATE_RTT);
317 granule_unlock(g_rd);
318
319 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
320 map_addr, level - 1L, &wi);
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100321 if (wi.last_level != level - 1L) {
322 ret = pack_return_code(RMI_ERROR_RTT,
323 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000324 goto out_unlock_parent_table;
325 }
326
327 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100328 assert(parent_s2tt != NULL);
329
Soby Mathewb4c6df42022-11-09 11:13:29 +0000330 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
331 if (!s2tte_is_table(parent_s2tte, level - 1L)) {
332 ret = pack_return_code(RMI_ERROR_RTT,
333 (unsigned int)(level - 1L));
334 goto out_unmap_parent_table;
335 }
336
AlexeiFedorove2002be2023-04-19 17:20:12 +0100337 rtt_addr = s2tte_pa_table(parent_s2tte, level - 1L);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000338 g_tbl = find_lock_granule(rtt_addr, GRANULE_STATE_RTT);
339
340 /*
341 * A table descriptor S2TTE always points to a TABLE granule.
342 */
AlexeiFedorov63b71692023-04-19 11:18:42 +0100343 assert(g_tbl != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000344
345 table = granule_map(g_tbl, SLOT_RTT2);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100346 assert(table != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000347
348 /*
349 * The command can succeed only if all 512 S2TTEs are of the same type.
350 * We first check the table's ref. counter to speed up the case when
351 * the host makes a guess whether a memory region can be folded.
352 */
353 if (g_tbl->refcount == 0UL) {
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100354 if (table_is_unassigned_destroyed_block(table)) {
355 parent_s2tte = s2tte_create_unassigned_destroyed();
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100356 } else if (table_is_unassigned_empty_block(table)) {
357 parent_s2tte = s2tte_create_unassigned_empty();
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100358 } else if (table_is_unassigned_ram_block(table)) {
359 parent_s2tte = s2tte_create_unassigned_ram();
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100360 } else if (table_is_unassigned_ns_block(table)) {
361 parent_s2tte = s2tte_create_unassigned_ns();
AlexeiFedorov49752c62023-04-24 14:31:14 +0100362 } else if (table_maps_assigned_ns_block(table, level)) {
363 unsigned long s2tte = s2tte_read(&table[0]);
364 unsigned long block_pa = s2tte_pa(s2tte, level);
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100365
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100366 parent_s2tte = s2tte_create_assigned_ns(block_pa,
367 level - 1L);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000368 } else {
369 /*
370 * The table holds a mixture of destroyed and
371 * unassigned entries.
372 */
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100373 ret = pack_return_code(RMI_ERROR_RTT,
374 (unsigned int)level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000375 goto out_unmap_table;
376 }
AlexeiFedorov49752c62023-04-24 14:31:14 +0100377 __granule_put(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000378 } else if (g_tbl->refcount == S2TTES_PER_S2TT) {
379
380 unsigned long s2tte, block_pa;
381
382 /* The RMM specification does not allow creating block
383 * entries less than RTT_MIN_BLOCK_LEVEL even though
384 * permitted by the Arm Architecture.
385 * Hence ensure that the table being folded is at a level
386 * higher than the RTT_MIN_BLOCK_LEVEL.
387 *
388 * A fully populated table cannot be destroyed if that
389 * would create a block mapping below RTT_MIN_BLOCK_LEVEL.
390 */
391 if (level <= RTT_MIN_BLOCK_LEVEL) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100392 ret = pack_return_code(RMI_ERROR_RTT,
393 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000394 goto out_unmap_table;
395 }
396
397 s2tte = s2tte_read(&table[0]);
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000398 block_pa = s2tte_pa(s2tte, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000399
400 /*
AlexeiFedorov3a739332023-04-13 13:54:04 +0100401 * The table must also refer to a contiguous block through the
AlexeiFedorov3f840a02023-07-19 10:55:05 +0100402 * same type of s2tte, either Assigned or Valid.
Soby Mathewb4c6df42022-11-09 11:13:29 +0000403 */
AlexeiFedorov3a739332023-04-13 13:54:04 +0100404 if (table_maps_assigned_empty_block(table, level)) {
405 parent_s2tte = s2tte_create_assigned_empty(block_pa,
406 level - 1L);
407 } else if (table_maps_assigned_ram_block(table, level)) {
408 parent_s2tte = s2tte_create_assigned_ram(block_pa,
409 level - 1L);
AlexeiFedorov3f840a02023-07-19 10:55:05 +0100410 } else if (table_maps_assigned_destroyed_block(table, level)) {
411 parent_s2tte = s2tte_create_assigned_destroyed(block_pa,
412 level - 1L);
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000413 /* The table contains mixed entries that cannot be folded */
Soby Mathewb4c6df42022-11-09 11:13:29 +0000414 } else {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100415 ret = pack_return_code(RMI_ERROR_RTT,
416 (unsigned int)level);
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000417 goto out_unmap_table;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000418 }
419
420 __granule_refcount_dec(g_tbl, S2TTES_PER_S2TT);
421 } else {
422 /*
423 * The table holds a mixture of different types of s2ttes.
424 */
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100425 ret = pack_return_code(RMI_ERROR_RTT,
426 (unsigned int)level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000427 goto out_unmap_table;
428 }
429
430 ret = RMI_SUCCESS;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100431 res->x[1] = rtt_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000432
433 /*
434 * Break before make.
435 */
436 s2tte_write(&parent_s2tt[wi.index], 0UL);
437
AlexeiFedorov3a739332023-04-13 13:54:04 +0100438 if (s2tte_is_assigned_ram(parent_s2tte, level - 1L) ||
439 s2tte_is_assigned_ns(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000440 invalidate_pages_in_block(&s2_ctx, map_addr);
441 } else {
442 invalidate_block(&s2_ctx, map_addr);
443 }
444
445 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
446
447 granule_memzero_mapped(table);
448 granule_set_state(g_tbl, GRANULE_STATE_DELEGATED);
449
450out_unmap_table:
451 buffer_unmap(table);
452 granule_unlock(g_tbl);
453out_unmap_parent_table:
454 buffer_unmap(parent_s2tt);
455out_unlock_parent_table:
456 granule_unlock(wi.g_llt);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100457 res->x[0] = ret;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000458}
459
AlexeiFedorove2002be2023-04-19 17:20:12 +0100460void smc_rtt_destroy(unsigned long rd_addr,
461 unsigned long map_addr,
462 unsigned long ulevel,
463 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000464{
465 struct granule *g_rd;
466 struct granule *g_tbl;
467 struct rd *rd;
468 struct granule *g_table_root;
469 struct rtt_walk wi;
470 unsigned long *table, *parent_s2tt, parent_s2tte;
471 long level = (long)ulevel;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100472 unsigned long ipa_bits, rtt_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000473 unsigned long ret;
474 struct realm_s2_context s2_ctx;
475 int sl;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100476 bool in_par, skip_non_live = false;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000477
478 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
479 if (g_rd == NULL) {
AlexeiFedorove2002be2023-04-19 17:20:12 +0100480 res->x[0] = RMI_ERROR_INPUT;
AlexeiFedorov3ebd4622023-07-18 16:27:39 +0100481 res->x[2] = 0UL;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100482 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000483 }
484
485 rd = granule_map(g_rd, SLOT_RD);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100486 assert(rd != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000487
488 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
489 buffer_unmap(rd);
490 granule_unlock(g_rd);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100491 res->x[0] = RMI_ERROR_INPUT;
AlexeiFedorov3ebd4622023-07-18 16:27:39 +0100492 res->x[2] = 0UL;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100493 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000494 }
495
496 g_table_root = rd->s2_ctx.g_rtt;
497 sl = realm_rtt_starting_level(rd);
498 ipa_bits = realm_ipa_bits(rd);
499 s2_ctx = rd->s2_ctx;
500 in_par = addr_in_par(rd, map_addr);
501 buffer_unmap(rd);
502 granule_lock(g_table_root, GRANULE_STATE_RTT);
503 granule_unlock(g_rd);
504
505 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
506 map_addr, level - 1L, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000507
508 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100509 assert(parent_s2tt != NULL);
510
Soby Mathewb4c6df42022-11-09 11:13:29 +0000511 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100512
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100513 if ((wi.last_level != level - 1L) ||
514 !s2tte_is_table(parent_s2tte, level - 1L)) {
515 ret = pack_return_code(RMI_ERROR_RTT,
516 (unsigned int)wi.last_level);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100517 skip_non_live = true;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000518 goto out_unmap_parent_table;
519 }
520
AlexeiFedorove2002be2023-04-19 17:20:12 +0100521 rtt_addr = s2tte_pa_table(parent_s2tte, level - 1L);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000522
523 /*
524 * Lock the RTT granule. The 'rtt_addr' is verified, thus can be treated
525 * as an internal granule.
526 */
527 g_tbl = find_lock_granule(rtt_addr, GRANULE_STATE_RTT);
528
529 /*
530 * A table descriptor S2TTE always points to a TABLE granule.
531 */
532 assert(g_tbl != NULL);
533
534 /*
535 * Read the refcount value. RTT granule is always accessed locked, thus
536 * the refcount can be accessed without atomic operations.
537 */
538 if (g_tbl->refcount != 0UL) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100539 ret = pack_return_code(RMI_ERROR_RTT, (unsigned int)level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000540 goto out_unlock_table;
541 }
542
543 ret = RMI_SUCCESS;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100544 res->x[1] = rtt_addr;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100545 skip_non_live = true;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000546
547 table = granule_map(g_tbl, SLOT_RTT2);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100548 assert(table != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000549
550 if (in_par) {
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100551 parent_s2tte = s2tte_create_unassigned_destroyed();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000552 } else {
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100553 parent_s2tte = s2tte_create_unassigned_ns();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000554 }
555
556 __granule_put(wi.g_llt);
557
558 /*
559 * Break before make. Note that this may cause spurious S2 aborts.
560 */
561 s2tte_write(&parent_s2tt[wi.index], 0UL);
562 invalidate_block(&s2_ctx, map_addr);
563 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
564
565 granule_memzero_mapped(table);
566 granule_set_state(g_tbl, GRANULE_STATE_DELEGATED);
567
568 buffer_unmap(table);
569out_unlock_table:
570 granule_unlock(g_tbl);
571out_unmap_parent_table:
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100572 if (skip_non_live) {
573 res->x[2] = skip_non_live_entries(map_addr, parent_s2tt, &wi);
AlexeiFedorov3ebd4622023-07-18 16:27:39 +0100574 } else {
575 res->x[2] = map_addr;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100576 }
Soby Mathewb4c6df42022-11-09 11:13:29 +0000577 buffer_unmap(parent_s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000578 granule_unlock(wi.g_llt);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100579 res->x[0] = ret;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000580}
581
582enum map_unmap_ns_op {
583 MAP_NS,
584 UNMAP_NS
585};
586
587/*
588 * We don't hold a reference on the NS granule when it is
589 * mapped into a realm. Instead we rely on the guarantees
590 * provided by the architecture to ensure that a NS access
591 * to a protected granule is prohibited even within the realm.
592 */
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100593static void map_unmap_ns(unsigned long rd_addr,
594 unsigned long map_addr,
595 long level,
596 unsigned long host_s2tte,
597 enum map_unmap_ns_op op,
598 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000599{
600 struct granule *g_rd;
601 struct rd *rd;
602 struct granule *g_table_root;
603 unsigned long *s2tt, s2tte;
604 struct rtt_walk wi;
605 unsigned long ipa_bits;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000606 struct realm_s2_context s2_ctx;
607 int sl;
608
609 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
610 if (g_rd == NULL) {
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100611 res->x[0] = RMI_ERROR_INPUT;
612 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000613 }
614
615 rd = granule_map(g_rd, SLOT_RD);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100616 assert(rd != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000617
618 if (!validate_rtt_map_cmds(map_addr, level, rd)) {
619 buffer_unmap(rd);
620 granule_unlock(g_rd);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100621 res->x[0] = RMI_ERROR_INPUT;
622 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000623 }
624
625 g_table_root = rd->s2_ctx.g_rtt;
626 sl = realm_rtt_starting_level(rd);
627 ipa_bits = realm_ipa_bits(rd);
628
AlexeiFedorovc34e3242023-04-12 11:30:33 +0100629 /* Check if map_addr is outside PAR */
630 if (addr_in_par(rd, map_addr)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000631 buffer_unmap(rd);
632 granule_unlock(g_rd);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100633 res->x[0] = RMI_ERROR_INPUT;
634 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000635 }
636
637 s2_ctx = rd->s2_ctx;
638 buffer_unmap(rd);
639
640 granule_lock(g_table_root, GRANULE_STATE_RTT);
641 granule_unlock(g_rd);
642
643 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
644 map_addr, level, &wi);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100645
646 /*
647 * For UNMAP_NS, we need to map the table and look
648 * for the end of the non-live region.
649 */
AlexeiFedorov14d47ae2023-07-19 15:26:50 +0100650 if ((op == MAP_NS) && (wi.last_level != level)) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100651 res->x[0] = pack_return_code(RMI_ERROR_RTT,
652 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000653 goto out_unlock_llt;
654 }
655
656 s2tt = granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100657 assert(s2tt != NULL);
658
Soby Mathewb4c6df42022-11-09 11:13:29 +0000659 s2tte = s2tte_read(&s2tt[wi.index]);
660
661 if (op == MAP_NS) {
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100662 if (!s2tte_is_unassigned_ns(s2tte)) {
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100663 res->x[0] = pack_return_code(RMI_ERROR_RTT,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000664 (unsigned int)level);
665 goto out_unmap_table;
666 }
667
AlexeiFedorov3a739332023-04-13 13:54:04 +0100668 s2tte = s2tte_create_assigned_ns(host_s2tte, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000669 s2tte_write(&s2tt[wi.index], s2tte);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000670
671 } else if (op == UNMAP_NS) {
672 /*
673 * The following check also verifies that map_addr is outside
674 * PAR, as valid_NS s2tte may only cover outside PAR IPA range.
675 */
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100676 bool assigned_ns = s2tte_is_assigned_ns(s2tte, wi.last_level);
677
678 if ((wi.last_level != level) || !assigned_ns) {
679 res->x[0] = pack_return_code(RMI_ERROR_RTT,
680 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000681 goto out_unmap_table;
682 }
683
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100684 s2tte = s2tte_create_unassigned_ns();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000685 s2tte_write(&s2tt[wi.index], s2tte);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000686 if (level == RTT_PAGE_LEVEL) {
687 invalidate_page(&s2_ctx, map_addr);
688 } else {
689 invalidate_block(&s2_ctx, map_addr);
690 }
691 }
692
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100693 res->x[0] = RMI_SUCCESS;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000694
695out_unmap_table:
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100696 if (op == UNMAP_NS) {
697 res->x[1] = skip_non_live_entries(map_addr, s2tt, &wi);
698 }
Soby Mathewb4c6df42022-11-09 11:13:29 +0000699 buffer_unmap(s2tt);
700out_unlock_llt:
701 granule_unlock(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000702}
703
704unsigned long smc_rtt_map_unprotected(unsigned long rd_addr,
705 unsigned long map_addr,
706 unsigned long ulevel,
707 unsigned long s2tte)
708{
709 long level = (long)ulevel;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100710 struct smc_result res;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000711
AlexeiFedorov7a2f5882023-09-14 11:41:32 +0100712 if ((level < RTT_MIN_BLOCK_LEVEL) || (level > RTT_PAGE_LEVEL)) {
713 return RMI_ERROR_INPUT;
714 }
715
Soby Mathewb4c6df42022-11-09 11:13:29 +0000716 if (!host_ns_s2tte_is_valid(s2tte, level)) {
717 return RMI_ERROR_INPUT;
718 }
719
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100720 map_unmap_ns(rd_addr, map_addr, level, s2tte, MAP_NS, &res);
721 return res.x[0];
Soby Mathewb4c6df42022-11-09 11:13:29 +0000722}
723
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100724void smc_rtt_unmap_unprotected(unsigned long rd_addr,
725 unsigned long map_addr,
726 unsigned long ulevel,
727 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000728{
AlexeiFedorov7a2f5882023-09-14 11:41:32 +0100729 long level = (long)ulevel;
730
731 if ((level < RTT_MIN_BLOCK_LEVEL) || (level > RTT_PAGE_LEVEL)) {
732 res->x[0] = RMI_ERROR_INPUT;
733 return;
734 }
735
736 map_unmap_ns(rd_addr, map_addr, level, 0UL, UNMAP_NS, res);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000737}
738
739void smc_rtt_read_entry(unsigned long rd_addr,
740 unsigned long map_addr,
741 unsigned long ulevel,
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100742 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000743{
744 struct granule *g_rd, *g_rtt_root;
745 struct rd *rd;
746 struct rtt_walk wi;
747 unsigned long *s2tt, s2tte;
748 unsigned long ipa_bits;
749 long level = (long)ulevel;
750 int sl;
751
752 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
753 if (g_rd == NULL) {
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100754 res->x[0] = RMI_ERROR_INPUT;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000755 return;
756 }
757
758 rd = granule_map(g_rd, SLOT_RD);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100759 assert(rd != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000760
761 if (!validate_rtt_entry_cmds(map_addr, level, rd)) {
762 buffer_unmap(rd);
763 granule_unlock(g_rd);
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100764 res->x[0] = RMI_ERROR_INPUT;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000765 return;
766 }
767
768 g_rtt_root = rd->s2_ctx.g_rtt;
769 sl = realm_rtt_starting_level(rd);
770 ipa_bits = realm_ipa_bits(rd);
771 buffer_unmap(rd);
772
773 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
774 granule_unlock(g_rd);
775
776 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
777 map_addr, level, &wi);
778 s2tt = granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100779 assert(s2tt != NULL);
780
Soby Mathewb4c6df42022-11-09 11:13:29 +0000781 s2tte = s2tte_read(&s2tt[wi.index]);
AlexeiFedorov4faab852023-08-30 15:06:49 +0100782 res->x[1] = (unsigned long)wi.last_level;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000783
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100784 if (s2tte_is_unassigned_empty(s2tte)) {
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100785 res->x[2] = RMI_UNASSIGNED;
786 res->x[3] = 0UL;
AlexeiFedorov4faab852023-08-30 15:06:49 +0100787 res->x[4] = (unsigned long)RIPAS_EMPTY;
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100788 } else if (s2tte_is_unassigned_ram(s2tte)) {
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100789 res->x[2] = RMI_UNASSIGNED;
790 res->x[3] = 0UL;
AlexeiFedorov4faab852023-08-30 15:06:49 +0100791 res->x[4] = (unsigned long)RIPAS_RAM;
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100792 } else if (s2tte_is_unassigned_destroyed(s2tte)) {
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100793 res->x[2] = RMI_UNASSIGNED;
794 res->x[3] = 0UL;
AlexeiFedorov4faab852023-08-30 15:06:49 +0100795 res->x[4] = (unsigned long)RIPAS_DESTROYED;
AlexeiFedorov3a739332023-04-13 13:54:04 +0100796 } else if (s2tte_is_assigned_empty(s2tte, wi.last_level)) {
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100797 res->x[2] = RMI_ASSIGNED;
798 res->x[3] = s2tte_pa(s2tte, wi.last_level);
AlexeiFedorov4faab852023-08-30 15:06:49 +0100799 res->x[4] = (unsigned long)RIPAS_EMPTY;
AlexeiFedorov3a739332023-04-13 13:54:04 +0100800 } else if (s2tte_is_assigned_ram(s2tte, wi.last_level)) {
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100801 res->x[2] = RMI_ASSIGNED;
802 res->x[3] = s2tte_pa(s2tte, wi.last_level);
AlexeiFedorov4faab852023-08-30 15:06:49 +0100803 res->x[4] = (unsigned long)RIPAS_RAM;
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100804 } else if (s2tte_is_assigned_destroyed(s2tte, wi.last_level)) {
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100805 res->x[2] = RMI_ASSIGNED;
806 res->x[3] = 0UL;
AlexeiFedorov4faab852023-08-30 15:06:49 +0100807 res->x[4] = (unsigned long)RIPAS_DESTROYED;
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100808 } else if (s2tte_is_unassigned_ns(s2tte)) {
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100809 res->x[2] = RMI_UNASSIGNED;
810 res->x[3] = 0UL;
AlexeiFedorov4faab852023-08-30 15:06:49 +0100811 res->x[4] = (unsigned long)RIPAS_EMPTY;
AlexeiFedorov3a739332023-04-13 13:54:04 +0100812 } else if (s2tte_is_assigned_ns(s2tte, wi.last_level)) {
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100813 res->x[2] = RMI_ASSIGNED;
814 res->x[3] = host_ns_s2tte(s2tte, wi.last_level);
AlexeiFedorov4faab852023-08-30 15:06:49 +0100815 res->x[4] = (unsigned long)RIPAS_EMPTY;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000816 } else if (s2tte_is_table(s2tte, wi.last_level)) {
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100817 res->x[2] = RMI_TABLE;
818 res->x[3] = s2tte_pa_table(s2tte, wi.last_level);
AlexeiFedorov4faab852023-08-30 15:06:49 +0100819 res->x[4] = (unsigned long)RIPAS_EMPTY;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000820 } else {
821 assert(false);
822 }
823
824 buffer_unmap(s2tt);
825 granule_unlock(wi.g_llt);
826
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100827 res->x[0] = RMI_SUCCESS;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000828}
829
Soby Mathewb4c6df42022-11-09 11:13:29 +0000830static unsigned long validate_data_create_unknown(unsigned long map_addr,
831 struct rd *rd)
832{
833 if (!addr_in_par(rd, map_addr)) {
834 return RMI_ERROR_INPUT;
835 }
836
837 if (!validate_map_addr(map_addr, RTT_PAGE_LEVEL, rd)) {
838 return RMI_ERROR_INPUT;
839 }
840
841 return RMI_SUCCESS;
842}
843
844static unsigned long validate_data_create(unsigned long map_addr,
845 struct rd *rd)
846{
847 if (get_rd_state_locked(rd) != REALM_STATE_NEW) {
848 return RMI_ERROR_REALM;
849 }
850
851 return validate_data_create_unknown(map_addr, rd);
852}
853
854/*
AlexeiFedorov0f9cd1f2023-07-10 17:04:58 +0100855 * Implements both RMI_DATA_CREATE and RMI_DATA_CREATE_UNKNOWN
Soby Mathewb4c6df42022-11-09 11:13:29 +0000856 *
AlexeiFedorov0f9cd1f2023-07-10 17:04:58 +0100857 * if @g_src == NULL, implements RMI_DATA_CREATE_UNKNOWN
858 * and RMI_DATA_CREATE otherwise.
Soby Mathewb4c6df42022-11-09 11:13:29 +0000859 */
AlexeiFedorovac923c82023-04-06 15:12:04 +0100860static unsigned long data_create(unsigned long rd_addr,
861 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000862 unsigned long map_addr,
863 struct granule *g_src,
864 unsigned long flags)
865{
866 struct granule *g_data;
867 struct granule *g_rd;
868 struct granule *g_table_root;
869 struct rd *rd;
870 struct rtt_walk wi;
871 unsigned long s2tte, *s2tt;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000872 enum granule_state new_data_state = GRANULE_STATE_DELEGATED;
873 unsigned long ipa_bits;
874 unsigned long ret;
875 int __unused meas_ret;
876 int sl;
877
878 if (!find_lock_two_granules(data_addr,
879 GRANULE_STATE_DELEGATED,
880 &g_data,
881 rd_addr,
882 GRANULE_STATE_RD,
883 &g_rd)) {
884 return RMI_ERROR_INPUT;
885 }
886
887 rd = granule_map(g_rd, SLOT_RD);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100888 assert(rd != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000889
890 ret = (g_src != NULL) ?
891 validate_data_create(map_addr, rd) :
892 validate_data_create_unknown(map_addr, rd);
893
894 if (ret != RMI_SUCCESS) {
895 goto out_unmap_rd;
896 }
897
898 g_table_root = rd->s2_ctx.g_rtt;
899 sl = realm_rtt_starting_level(rd);
900 ipa_bits = realm_ipa_bits(rd);
901 granule_lock(g_table_root, GRANULE_STATE_RTT);
902 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
903 map_addr, RTT_PAGE_LEVEL, &wi);
904 if (wi.last_level != RTT_PAGE_LEVEL) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100905 ret = pack_return_code(RMI_ERROR_RTT,
906 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000907 goto out_unlock_ll_table;
908 }
909
910 s2tt = granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100911 assert(s2tt != NULL);
912
Soby Mathewb4c6df42022-11-09 11:13:29 +0000913 s2tte = s2tte_read(&s2tt[wi.index]);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000914
Soby Mathewb4c6df42022-11-09 11:13:29 +0000915 if (g_src != NULL) {
916 bool ns_access_ok;
AlexeiFedorov0f9cd1f2023-07-10 17:04:58 +0100917 void *data;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000918
AlexeiFedorov0f9cd1f2023-07-10 17:04:58 +0100919 if (!s2tte_is_unassigned_ram(s2tte)) {
920 ret = pack_return_code(RMI_ERROR_RTT, RTT_PAGE_LEVEL);
921 goto out_unmap_ll_table;
922 }
923
924 data = granule_map(g_data, SLOT_DELEGATED);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100925 assert(data != NULL);
926
Soby Mathewb4c6df42022-11-09 11:13:29 +0000927 ns_access_ok = ns_buffer_read(SLOT_NS, g_src, 0U,
928 GRANULE_SIZE, data);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000929 if (!ns_access_ok) {
930 /*
931 * Some data may be copied before the failure. Zero
932 * g_data granule as it will remain in delegated state.
933 */
934 (void)memset(data, 0, GRANULE_SIZE);
935 buffer_unmap(data);
936 ret = RMI_ERROR_INPUT;
937 goto out_unmap_ll_table;
938 }
939
Mate Toth-Palc7698312023-08-09 12:49:34 +0200940 measurement_data_granule_measure(
941 rd->measurement[RIM_MEASUREMENT_SLOT],
942 rd->algorithm,
943 data,
944 map_addr,
945 flags);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000946 buffer_unmap(data);
AlexeiFedorov0f9cd1f2023-07-10 17:04:58 +0100947
948 } else if (!s2tte_is_unassigned(s2tte)) {
949 ret = pack_return_code(RMI_ERROR_RTT, RTT_PAGE_LEVEL);
950 goto out_unmap_ll_table;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000951 }
952
953 new_data_state = GRANULE_STATE_DATA;
954
AlexeiFedorovcde1fdc2023-04-18 16:37:25 +0100955 s2tte = s2tte_create_assigned_ram(data_addr, RTT_PAGE_LEVEL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000956
957 s2tte_write(&s2tt[wi.index], s2tte);
958 __granule_get(wi.g_llt);
959
960 ret = RMI_SUCCESS;
961
962out_unmap_ll_table:
963 buffer_unmap(s2tt);
964out_unlock_ll_table:
965 granule_unlock(wi.g_llt);
966out_unmap_rd:
967 buffer_unmap(rd);
968 granule_unlock(g_rd);
969 granule_unlock_transition(g_data, new_data_state);
970 return ret;
971}
972
AlexeiFedorovac923c82023-04-06 15:12:04 +0100973unsigned long smc_data_create(unsigned long rd_addr,
974 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000975 unsigned long map_addr,
976 unsigned long src_addr,
977 unsigned long flags)
978{
979 struct granule *g_src;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000980
AlexeiFedorov93f5ec52023-08-31 14:26:53 +0100981 if ((flags != RMI_NO_MEASURE_CONTENT) &&
982 (flags != RMI_MEASURE_CONTENT)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000983 return RMI_ERROR_INPUT;
984 }
985
986 g_src = find_granule(src_addr);
987 if ((g_src == NULL) || (g_src->state != GRANULE_STATE_NS)) {
988 return RMI_ERROR_INPUT;
989 }
990
AlexeiFedorovac923c82023-04-06 15:12:04 +0100991 return data_create(rd_addr, data_addr, map_addr, g_src, flags);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000992}
993
AlexeiFedorovac923c82023-04-06 15:12:04 +0100994unsigned long smc_data_create_unknown(unsigned long rd_addr,
995 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000996 unsigned long map_addr)
997{
AlexeiFedorovac923c82023-04-06 15:12:04 +0100998 return data_create(rd_addr, data_addr, map_addr, NULL, 0);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000999}
1000
AlexeiFedorove2002be2023-04-19 17:20:12 +01001001void smc_data_destroy(unsigned long rd_addr,
1002 unsigned long map_addr,
1003 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001004{
1005 struct granule *g_data;
1006 struct granule *g_rd;
1007 struct granule *g_table_root;
1008 struct rtt_walk wi;
1009 unsigned long data_addr, s2tte, *s2tt;
1010 struct rd *rd;
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001011 unsigned long ipa_bits;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001012 struct realm_s2_context s2_ctx;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001013 int sl;
1014
1015 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
1016 if (g_rd == NULL) {
AlexeiFedorove2002be2023-04-19 17:20:12 +01001017 res->x[0] = RMI_ERROR_INPUT;
AlexeiFedorova1b2a1d2023-07-18 15:08:47 +01001018 res->x[2] = 0UL;
AlexeiFedorove2002be2023-04-19 17:20:12 +01001019 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001020 }
1021
1022 rd = granule_map(g_rd, SLOT_RD);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +01001023 assert(rd != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001024
AlexeiFedorov868a6512023-09-14 13:21:11 +01001025 if (!addr_in_par(rd, map_addr) ||
1026 !validate_map_addr(map_addr, RTT_PAGE_LEVEL, rd)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001027 buffer_unmap(rd);
1028 granule_unlock(g_rd);
AlexeiFedorove2002be2023-04-19 17:20:12 +01001029 res->x[0] = RMI_ERROR_INPUT;
AlexeiFedorova1b2a1d2023-07-18 15:08:47 +01001030 res->x[2] = 0UL;
AlexeiFedorove2002be2023-04-19 17:20:12 +01001031 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001032 }
1033
1034 g_table_root = rd->s2_ctx.g_rtt;
1035 sl = realm_rtt_starting_level(rd);
1036 ipa_bits = realm_ipa_bits(rd);
1037 s2_ctx = rd->s2_ctx;
1038 buffer_unmap(rd);
1039
1040 granule_lock(g_table_root, GRANULE_STATE_RTT);
1041 granule_unlock(g_rd);
1042
1043 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
1044 map_addr, RTT_PAGE_LEVEL, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001045
1046 s2tt = granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +01001047 assert(s2tt != NULL);
1048
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001049 if (wi.last_level != RTT_PAGE_LEVEL) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +01001050 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1051 (unsigned int)wi.last_level);
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001052 goto out_unmap_ll_table;
1053 }
Soby Mathewb4c6df42022-11-09 11:13:29 +00001054
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001055 s2tte = s2tte_read(&s2tt[wi.index]);
AlexeiFedorovc53b1f72023-07-04 15:37:03 +01001056
AlexeiFedorova43cd312023-04-17 11:42:25 +01001057 if (s2tte_is_assigned_ram(s2tte, RTT_PAGE_LEVEL)) {
1058 data_addr = s2tte_pa(s2tte, RTT_PAGE_LEVEL);
AlexeiFedorovc53b1f72023-07-04 15:37:03 +01001059 s2tte = s2tte_create_unassigned_destroyed();
AlexeiFedorova43cd312023-04-17 11:42:25 +01001060 s2tte_write(&s2tt[wi.index], s2tte);
1061 invalidate_page(&s2_ctx, map_addr);
1062 } else if (s2tte_is_assigned_empty(s2tte, RTT_PAGE_LEVEL)) {
1063 data_addr = s2tte_pa(s2tte, RTT_PAGE_LEVEL);
1064 s2tte = s2tte_create_unassigned_empty();
1065 s2tte_write(&s2tt[wi.index], s2tte);
1066 } else {
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001067 res->x[0] = pack_return_code(RMI_ERROR_RTT, RTT_PAGE_LEVEL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001068 goto out_unmap_ll_table;
1069 }
1070
Soby Mathewb4c6df42022-11-09 11:13:29 +00001071 __granule_put(wi.g_llt);
1072
1073 /*
1074 * Lock the data granule and check expected state. Correct locking order
1075 * is guaranteed because granule address is obtained from a locked
1076 * granule by table walk. This lock needs to be acquired before a state
1077 * transition to or from GRANULE_STATE_DATA for granule address can happen.
1078 */
1079 g_data = find_lock_granule(data_addr, GRANULE_STATE_DATA);
AlexeiFedorov63b71692023-04-19 11:18:42 +01001080 assert(g_data != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001081 granule_memzero(g_data, SLOT_DELEGATED);
1082 granule_unlock_transition(g_data, GRANULE_STATE_DELEGATED);
1083
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001084 res->x[0] = RMI_SUCCESS;
AlexeiFedorove2002be2023-04-19 17:20:12 +01001085 res->x[1] = data_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001086out_unmap_ll_table:
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001087 res->x[2] = skip_non_live_entries(map_addr, s2tt, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001088 buffer_unmap(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001089 granule_unlock(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001090}
1091
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001092/*
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001093 * Update the ripas value for the entry pointed by @s2ttep.
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001094 *
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001095 * Returns:
1096 * < 0 - On error and the operation was aborted,
1097 * e.g., entry cannot have a ripas.
1098 * 0 - Operation was success and no TLBI is required.
1099 * > 0 - Operation was success and TLBI is required.
1100 * Sets:
1101 * @(*do_tlbi) to 'true' if the TLBs have to be invalidated.
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001102 */
AlexeiFedorov4faab852023-08-30 15:06:49 +01001103static int update_ripas(unsigned long *s2ttep, long level,
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001104 enum ripas ripas_val,
1105 enum ripas_change_destroyed change_destroyed)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001106{
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001107 unsigned long pa, s2tte = s2tte_read(s2ttep);
1108 int ret = 0;
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001109
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001110 if (!s2tte_has_ripas(s2tte, level)) {
1111 return -1;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001112 }
1113
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001114 if (ripas_val == RIPAS_RAM) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001115 if (s2tte_is_unassigned_empty(s2tte)) {
1116 s2tte = s2tte_create_unassigned_ram();
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001117 } else if (s2tte_is_unassigned_destroyed(s2tte)) {
1118 if (change_destroyed == CHANGE_DESTROYED) {
1119 s2tte = s2tte_create_unassigned_ram();
1120 } else {
1121 return -1;
1122 }
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001123 } else if (s2tte_is_assigned_empty(s2tte, level)) {
1124 pa = s2tte_pa(s2tte, level);
1125 s2tte = s2tte_create_assigned_ram(pa, level);
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001126 } else if (s2tte_is_assigned_destroyed(s2tte, level)) {
1127 if (change_destroyed == CHANGE_DESTROYED) {
1128 pa = s2tte_pa(s2tte, level);
1129 s2tte = s2tte_create_assigned_ram(pa, level);
1130 } else {
1131 return -1;
1132 }
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001133 } else {
1134 /* No action is required */
1135 return 0;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001136 }
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001137 } else if (ripas_val == RIPAS_EMPTY) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001138 if (s2tte_is_unassigned_ram(s2tte)) {
1139 s2tte = s2tte_create_unassigned_empty();
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001140 } else if (s2tte_is_unassigned_destroyed(s2tte)) {
1141 if (change_destroyed == CHANGE_DESTROYED) {
1142 s2tte = s2tte_create_unassigned_empty();
1143 } else {
1144 return -1;
1145 }
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001146 } else if (s2tte_is_assigned_ram(s2tte, level)) {
1147 pa = s2tte_pa(s2tte, level);
1148 s2tte = s2tte_create_assigned_empty(pa, level);
1149 /* TLBI is required */
1150 ret = 1;
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001151 } else if (s2tte_is_assigned_destroyed(s2tte, level)) {
1152 if (change_destroyed == CHANGE_DESTROYED) {
1153 pa = s2tte_pa(s2tte, level);
1154 s2tte = s2tte_create_assigned_empty(pa, level);
1155 /* TLBI is required */
1156 ret = 1;
1157 } else {
1158 return -1;
1159 }
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001160 } else {
1161 /* No action is required */
1162 return 0;
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001163 }
Soby Mathewb4c6df42022-11-09 11:13:29 +00001164 }
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001165 s2tte_write(s2ttep, s2tte);
1166 return ret;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001167}
1168
AlexeiFedorov960d1612023-04-25 13:23:39 +01001169void smc_rtt_init_ripas(unsigned long rd_addr,
1170 unsigned long base,
1171 unsigned long top,
1172 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001173{
1174 struct granule *g_rd, *g_rtt_root;
1175 struct rd *rd;
AlexeiFedorov960d1612023-04-25 13:23:39 +01001176 unsigned long ipa_bits, addr, map_size;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001177 struct rtt_walk wi;
1178 unsigned long s2tte, *s2tt;
AlexeiFedorov960d1612023-04-25 13:23:39 +01001179 long level;
AlexeiFedorov14d47ae2023-07-19 15:26:50 +01001180 unsigned long index;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001181 int sl;
1182
AlexeiFedorov14d47ae2023-07-19 15:26:50 +01001183 if (top <= base) {
1184 res->x[0] = RMI_ERROR_INPUT;
1185 return;
1186 }
1187
Soby Mathewb4c6df42022-11-09 11:13:29 +00001188 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
1189 if (g_rd == NULL) {
AlexeiFedorov960d1612023-04-25 13:23:39 +01001190 res->x[0] = RMI_ERROR_INPUT;
1191 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001192 }
1193
1194 rd = granule_map(g_rd, SLOT_RD);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +01001195 assert(rd != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001196
AlexeiFedorov14d47ae2023-07-19 15:26:50 +01001197 if (!validate_map_addr(base, RTT_PAGE_LEVEL, rd) ||
AlexeiFedorov64fd6c32023-07-20 12:33:00 +01001198 !validate_map_addr(top, RTT_PAGE_LEVEL, rd) ||
AlexeiFedorov14d47ae2023-07-19 15:26:50 +01001199 !addr_in_par(rd, base) || !addr_in_par(rd, top - GRANULE_SIZE)) {
1200 buffer_unmap(rd);
1201 granule_unlock(g_rd);
1202 res->x[0] = RMI_ERROR_INPUT;
1203 return;
1204 }
1205
Soby Mathewb4c6df42022-11-09 11:13:29 +00001206 if (get_rd_state_locked(rd) != REALM_STATE_NEW) {
1207 buffer_unmap(rd);
1208 granule_unlock(g_rd);
AlexeiFedorov960d1612023-04-25 13:23:39 +01001209 res->x[0] = RMI_ERROR_REALM;
1210 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001211 }
1212
Soby Mathewb4c6df42022-11-09 11:13:29 +00001213 g_rtt_root = rd->s2_ctx.g_rtt;
1214 sl = realm_rtt_starting_level(rd);
1215 ipa_bits = realm_ipa_bits(rd);
1216
1217 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001218
1219 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
AlexeiFedorov960d1612023-04-25 13:23:39 +01001220 base, RTT_PAGE_LEVEL, &wi);
1221 level = wi.last_level;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001222 s2tt = granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +01001223 assert(s2tt != NULL);
1224
AlexeiFedorov960d1612023-04-25 13:23:39 +01001225 map_size = s2tte_map_size(level);
1226 addr = base & ~(map_size - 1UL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001227
AlexeiFedorov960d1612023-04-25 13:23:39 +01001228 /*
AlexeiFedorov14d47ae2023-07-19 15:26:50 +01001229 * If the RTTE covers a range below "base", we need to go deeper.
AlexeiFedorov960d1612023-04-25 13:23:39 +01001230 */
1231 if (addr != base) {
1232 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1233 (unsigned int)level);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001234 goto out_unmap_llt;
1235 }
1236
AlexeiFedorov960d1612023-04-25 13:23:39 +01001237 for (index = wi.index; index < S2TTES_PER_S2TT;
1238 index++, addr += map_size) {
1239 unsigned long next = addr + map_size;
1240
AlexeiFedorov14d47ae2023-07-19 15:26:50 +01001241 /*
1242 * Break on "top_align" failure condition,
1243 * or if this entry crosses the range.
1244 */
AlexeiFedorov960d1612023-04-25 13:23:39 +01001245 if (next > top) {
1246 break;
1247 }
1248
1249 s2tte = s2tte_read(&s2tt[index]);
1250 if (s2tte_is_unassigned_empty(s2tte)) {
1251 s2tte = s2tte_create_unassigned_ram();
1252 s2tte_write(&s2tt[index], s2tte);
1253 } else if (!s2tte_is_unassigned_ram(s2tte)) {
1254 break;
1255 }
Mate Toth-Palc7698312023-08-09 12:49:34 +02001256 measurement_init_ripas_measure(rd->measurement[RIM_MEASUREMENT_SLOT],
1257 rd->algorithm,
1258 addr,
1259 next);
AlexeiFedorov960d1612023-04-25 13:23:39 +01001260 }
1261
1262 if (addr > base) {
1263 res->x[0] = RMI_SUCCESS;
1264 res->x[1] = addr;
1265 } else {
1266 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1267 (unsigned int)level);
1268 }
Soby Mathewb4c6df42022-11-09 11:13:29 +00001269
1270out_unmap_llt:
1271 buffer_unmap(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001272 buffer_unmap(rd);
1273 granule_unlock(wi.g_llt);
AlexeiFedorov80295e42023-07-10 13:11:14 +01001274 granule_unlock(g_rd);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001275}
1276
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001277static void rtt_set_ripas_range(struct realm_s2_context *s2_ctx,
1278 unsigned long *s2tt,
1279 unsigned long base,
1280 unsigned long top,
1281 struct rtt_walk *wi,
AlexeiFedorov4faab852023-08-30 15:06:49 +01001282 enum ripas ripas_val,
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001283 enum ripas_change_destroyed change_destroyed,
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001284 struct smc_result *res)
1285{
AlexeiFedorov14d47ae2023-07-19 15:26:50 +01001286 unsigned long index;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001287 long level = wi->last_level;
AlexeiFedorov4faab852023-08-30 15:06:49 +01001288 unsigned long map_size = s2tte_map_size((int)level);
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001289
1290 /* Align to the RTT level */
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001291 unsigned long addr = base & ~(map_size - 1UL);
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001292
1293 /* Make sure we don't touch a range below the requested range */
1294 if (addr != base) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +01001295 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1296 (unsigned int)level);
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001297 return;
1298 }
1299
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001300 for (index = wi->index; index < S2TTES_PER_S2TT; addr += map_size) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001301 int ret;
1302
AlexeiFedorov64fd6c32023-07-20 12:33:00 +01001303 /*
1304 * Break on "top_align" failure condition,
1305 * or if this entry crosses the range.
1306 */
1307 if ((addr + map_size) > top) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001308 break;
1309 }
1310
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001311 ret = update_ripas(&s2tt[index++], level,
1312 ripas_val, change_destroyed);
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001313 if (ret < 0) {
1314 break;
1315 }
1316
1317 /* Handle TLBI */
1318 if (ret != 0) {
1319 if (level == RTT_PAGE_LEVEL) {
1320 invalidate_page(s2_ctx, addr);
1321 } else {
1322 invalidate_block(s2_ctx, addr);
1323 }
1324 }
1325 }
1326
1327 if (addr > base) {
1328 res->x[0] = RMI_SUCCESS;
1329 res->x[1] = addr;
1330 } else {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +01001331 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1332 (unsigned int)level);
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001333 }
1334}
1335
1336void smc_rtt_set_ripas(unsigned long rd_addr,
1337 unsigned long rec_addr,
1338 unsigned long base,
1339 unsigned long top,
1340 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001341{
1342 struct granule *g_rd, *g_rec, *g_rtt_root;
1343 struct rec *rec;
1344 struct rd *rd;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001345 unsigned long ipa_bits;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001346 struct rtt_walk wi;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001347 unsigned long *s2tt;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001348 struct realm_s2_context s2_ctx;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001349 enum ripas ripas_val;
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001350 enum ripas_change_destroyed change_destroyed;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001351 int sl;
1352
AlexeiFedorov14d47ae2023-07-19 15:26:50 +01001353 if (top <= base) {
1354 res->x[0] = RMI_ERROR_INPUT;
1355 return;
1356 }
1357
Soby Mathewb4c6df42022-11-09 11:13:29 +00001358 if (!find_lock_two_granules(rd_addr,
1359 GRANULE_STATE_RD,
1360 &g_rd,
1361 rec_addr,
1362 GRANULE_STATE_REC,
1363 &g_rec)) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001364 res->x[0] = RMI_ERROR_INPUT;
1365 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001366 }
1367
1368 if (granule_refcount_read_acquire(g_rec) != 0UL) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001369 res->x[0] = RMI_ERROR_REC;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001370 goto out_unlock_rec_rd;
1371 }
1372
1373 rec = granule_map(g_rec, SLOT_REC);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +01001374 assert(rec != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001375
1376 if (g_rd != rec->realm_info.g_rd) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001377 res->x[0] = RMI_ERROR_REC;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001378 goto out_unmap_rec;
1379 }
1380
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001381 ripas_val = rec->set_ripas.ripas_val;
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001382 change_destroyed = rec->set_ripas.change_destroyed;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001383
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001384 /*
1385 * Return error in case of target region:
1386 * - is not the next chunk of requested region
1387 * - extends beyond the end of requested region
1388 */
1389 if ((base != rec->set_ripas.addr) || (top > rec->set_ripas.top)) {
1390 res->x[0] = RMI_ERROR_INPUT;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001391 goto out_unmap_rec;
1392 }
1393
1394 rd = granule_map(g_rd, SLOT_RD);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +01001395 assert(rd != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001396
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001397 /*
1398 * At this point, we know base == rec->set_ripas.addr
1399 * and thus must be aligned to GRANULE size.
1400 */
1401 assert(validate_map_addr(base, RTT_PAGE_LEVEL, rd));
Soby Mathewb4c6df42022-11-09 11:13:29 +00001402
Soby Mathewb4c6df42022-11-09 11:13:29 +00001403 g_rtt_root = rd->s2_ctx.g_rtt;
1404 sl = realm_rtt_starting_level(rd);
1405 ipa_bits = realm_ipa_bits(rd);
1406 s2_ctx = rd->s2_ctx;
1407
1408 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
1409
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001410 /* Walk to the deepest level possible */
Soby Mathewb4c6df42022-11-09 11:13:29 +00001411 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001412 base, RTT_PAGE_LEVEL, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001413
AlexeiFedorov64fd6c32023-07-20 12:33:00 +01001414 /*
1415 * Base has to be aligned to the level at which
1416 * it is mapped in RTT.
1417 */
1418 if (!validate_map_addr(base, wi.last_level, rd)) {
1419 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1420 (unsigned int)wi.last_level);
1421 goto out_unlock_llt;
1422 }
1423
Soby Mathewb4c6df42022-11-09 11:13:29 +00001424 s2tt = granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +01001425 assert(s2tt != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001426
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001427 rtt_set_ripas_range(&s2_ctx, s2tt, base, top, &wi,
1428 ripas_val, change_destroyed, res);
1429
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001430 if (res->x[0] == RMI_SUCCESS) {
1431 rec->set_ripas.addr = res->x[1];
Soby Mathewb4c6df42022-11-09 11:13:29 +00001432 }
1433
Soby Mathewb4c6df42022-11-09 11:13:29 +00001434 buffer_unmap(s2tt);
AlexeiFedorov64fd6c32023-07-20 12:33:00 +01001435out_unlock_llt:
Soby Mathewb4c6df42022-11-09 11:13:29 +00001436 granule_unlock(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001437 buffer_unmap(rd);
1438out_unmap_rec:
1439 buffer_unmap(rec);
1440out_unlock_rec_rd:
1441 granule_unlock(g_rec);
1442 granule_unlock(g_rd);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001443}