blob: 3c21d564212e43b68aab14c73e21660e83eb24c1 [file] [log] [blame]
Soby Mathewb4c6df42022-11-09 11:13:29 +00001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
5 */
6
7#include <buffer.h>
8#include <granule.h>
9#include <measurement.h>
10#include <realm.h>
11#include <ripas.h>
12#include <smc-handler.h>
13#include <smc-rmi.h>
14#include <smc.h>
15#include <stddef.h>
16#include <string.h>
17#include <table.h>
18
19/*
20 * Validate the map_addr value passed to RMI_RTT_* and RMI_DATA_* commands.
21 */
22static bool validate_map_addr(unsigned long map_addr,
23 unsigned long level,
24 struct rd *rd)
25{
26
27 if (map_addr >= realm_ipa_size(rd)) {
28 return false;
29 }
30 if (!addr_is_level_aligned(map_addr, level)) {
31 return false;
32 }
33 return true;
34}
35
36/*
37 * Structure commands can operate on all RTTs except for the root RTT so
38 * the minimal valid level is the stage 2 starting level + 1.
39 */
40static bool validate_rtt_structure_cmds(unsigned long map_addr,
41 long level,
42 struct rd *rd)
43{
44 int min_level = realm_rtt_starting_level(rd) + 1;
45
46 if ((level < min_level) || (level > RTT_PAGE_LEVEL)) {
47 return false;
48 }
49 return validate_map_addr(map_addr, level, rd);
50}
51
52/*
53 * Map/Unmap commands can operate up to a level 2 block entry so min_level is
54 * the smallest block size.
55 */
56static bool validate_rtt_map_cmds(unsigned long map_addr,
57 long level,
58 struct rd *rd)
59{
60 if ((level < RTT_MIN_BLOCK_LEVEL) || (level > RTT_PAGE_LEVEL)) {
61 return false;
62 }
63 return validate_map_addr(map_addr, level, rd);
64}
65
66/*
67 * Entry commands can operate on any entry so the minimal valid level is the
68 * stage 2 starting level.
69 */
70static bool validate_rtt_entry_cmds(unsigned long map_addr,
71 long level,
72 struct rd *rd)
73{
74 if ((level < realm_rtt_starting_level(rd)) ||
75 (level > RTT_PAGE_LEVEL)) {
76 return false;
77 }
78 return validate_map_addr(map_addr, level, rd);
79}
80
AlexeiFedorovac923c82023-04-06 15:12:04 +010081unsigned long smc_rtt_create(unsigned long rd_addr,
82 unsigned long rtt_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +000083 unsigned long map_addr,
84 unsigned long ulevel)
85{
86 struct granule *g_rd;
87 struct granule *g_tbl;
88 struct rd *rd;
89 struct granule *g_table_root;
90 struct rtt_walk wi;
91 unsigned long *s2tt, *parent_s2tt, parent_s2tte;
92 long level = (long)ulevel;
93 unsigned long ipa_bits;
94 unsigned long ret;
95 struct realm_s2_context s2_ctx;
96 int sl;
97
98 if (!find_lock_two_granules(rtt_addr,
99 GRANULE_STATE_DELEGATED,
100 &g_tbl,
101 rd_addr,
102 GRANULE_STATE_RD,
103 &g_rd)) {
104 return RMI_ERROR_INPUT;
105 }
106
107 rd = granule_map(g_rd, SLOT_RD);
108
109 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
110 buffer_unmap(rd);
111 granule_unlock(g_rd);
112 granule_unlock(g_tbl);
113 return RMI_ERROR_INPUT;
114 }
115
116 g_table_root = rd->s2_ctx.g_rtt;
117 sl = realm_rtt_starting_level(rd);
118 ipa_bits = realm_ipa_bits(rd);
119 s2_ctx = rd->s2_ctx;
120 buffer_unmap(rd);
121
122 /*
123 * Lock the RTT root. Enforcing locking order RD->RTT is enough to
124 * ensure deadlock free locking guarentee.
125 */
126 granule_lock(g_table_root, GRANULE_STATE_RTT);
127
128 /* Unlock RD after locking RTT Root */
129 granule_unlock(g_rd);
130
131 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
132 map_addr, level - 1L, &wi);
133 if (wi.last_level != level - 1L) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100134 ret = pack_return_code(RMI_ERROR_RTT,
135 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000136 goto out_unlock_llt;
137 }
138
139 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
140 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
141 s2tt = granule_map(g_tbl, SLOT_DELEGATED);
142
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100143 if (s2tte_is_unassigned_empty(parent_s2tte)) {
144 s2tt_init_unassigned_empty(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000145
146 /*
147 * Increase the refcount of the parent, the granule was
148 * locked while table walking and hand-over-hand locking.
149 * Atomicity and acquire/release semantics not required because
150 * the table is accessed always locked.
151 */
152 __granule_get(wi.g_llt);
153
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100154 } else if (s2tte_is_unassigned_ram(parent_s2tte)) {
155 s2tt_init_unassigned_ram(s2tt);
156 __granule_get(wi.g_llt);
157
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100158 } else if (s2tte_is_unassigned_ns(parent_s2tte)) {
159 s2tt_init_unassigned_ns(s2tt);
160 __granule_get(wi.g_llt);
161
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100162 } else if (s2tte_is_unassigned_destroyed(parent_s2tte)) {
163 s2tt_init_unassigned_destroyed(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000164 __granule_get(wi.g_llt);
165
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100166 } else if (s2tte_is_assigned_destroyed(parent_s2tte, level - 1L)) {
167 unsigned long block_pa;
168
169 /*
170 * We should observe parent assigned s2tte only when
171 * we create tables above this level.
172 */
173 assert(level > RTT_MIN_BLOCK_LEVEL);
174
175 block_pa = s2tte_pa(parent_s2tte, level - 1L);
176
177 s2tt_init_assigned_destroyed(s2tt, block_pa, level);
178
179 /*
180 * Increase the refcount to mark the granule as in-use. refcount
181 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
182 */
183 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
184
AlexeiFedorov3a739332023-04-13 13:54:04 +0100185 } else if (s2tte_is_assigned_empty(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000186 unsigned long block_pa;
187
188 /*
189 * We should observe parent assigned s2tte only when
190 * we create tables above this level.
191 */
192 assert(level > RTT_MIN_BLOCK_LEVEL);
193
194 block_pa = s2tte_pa(parent_s2tte, level - 1L);
195
196 s2tt_init_assigned_empty(s2tt, block_pa, level);
197
198 /*
199 * Increase the refcount to mark the granule as in-use. refcount
200 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
201 */
202 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
203
AlexeiFedorov3a739332023-04-13 13:54:04 +0100204 } else if (s2tte_is_assigned_ram(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000205 unsigned long block_pa;
206
207 /*
208 * We should observe parent valid s2tte only when
209 * we create tables above this level.
210 */
211 assert(level > RTT_MIN_BLOCK_LEVEL);
212
213 /*
214 * Break before make. This may cause spurious S2 aborts.
215 */
216 s2tte_write(&parent_s2tt[wi.index], 0UL);
217 invalidate_block(&s2_ctx, map_addr);
218
219 block_pa = s2tte_pa(parent_s2tte, level - 1L);
220
AlexeiFedorov3a739332023-04-13 13:54:04 +0100221 s2tt_init_assigned_ram(s2tt, block_pa, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000222
223 /*
224 * Increase the refcount to mark the granule as in-use. refcount
225 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
226 */
227 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
228
AlexeiFedorov3a739332023-04-13 13:54:04 +0100229 } else if (s2tte_is_assigned_ns(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000230 unsigned long block_pa;
231
232 /*
AlexeiFedorov3a739332023-04-13 13:54:04 +0100233 * We should observe parent assigned_ns s2tte only when
Soby Mathewb4c6df42022-11-09 11:13:29 +0000234 * we create tables above this level.
235 */
236 assert(level > RTT_MIN_BLOCK_LEVEL);
237
238 /*
239 * Break before make. This may cause spurious S2 aborts.
240 */
241 s2tte_write(&parent_s2tt[wi.index], 0UL);
242 invalidate_block(&s2_ctx, map_addr);
243
244 block_pa = s2tte_pa(parent_s2tte, level - 1L);
245
AlexeiFedorov3a739332023-04-13 13:54:04 +0100246 s2tt_init_assigned_ns(s2tt, block_pa, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000247
248 /*
249 * Increase the refcount to mark the granule as in-use. refcount
250 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
251 */
252 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
253
254 } else if (s2tte_is_table(parent_s2tte, level - 1L)) {
255 ret = pack_return_code(RMI_ERROR_RTT,
256 (unsigned int)(level - 1L));
257 goto out_unmap_table;
258
259 } else {
260 assert(false);
261 }
262
263 ret = RMI_SUCCESS;
264
265 granule_set_state(g_tbl, GRANULE_STATE_RTT);
266
267 parent_s2tte = s2tte_create_table(rtt_addr, level - 1L);
268 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
269
270out_unmap_table:
271 buffer_unmap(s2tt);
272 buffer_unmap(parent_s2tt);
273out_unlock_llt:
274 granule_unlock(wi.g_llt);
275 granule_unlock(g_tbl);
276 return ret;
277}
278
AlexeiFedorove2002be2023-04-19 17:20:12 +0100279void smc_rtt_fold(unsigned long rd_addr,
280 unsigned long map_addr,
281 unsigned long ulevel,
282 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000283{
284 struct granule *g_rd;
285 struct granule *g_tbl;
286 struct rd *rd;
287 struct granule *g_table_root;
288 struct rtt_walk wi;
289 unsigned long *table, *parent_s2tt, parent_s2tte;
290 long level = (long)ulevel;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100291 unsigned long ipa_bits, rtt_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000292 unsigned long ret;
293 struct realm_s2_context s2_ctx;
294 int sl;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000295
296 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
297 if (g_rd == NULL) {
AlexeiFedorove2002be2023-04-19 17:20:12 +0100298 res->x[0] = RMI_ERROR_INPUT;
299 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000300 }
301
302 rd = granule_map(g_rd, SLOT_RD);
303
304 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
305 buffer_unmap(rd);
306 granule_unlock(g_rd);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100307 res->x[0] = RMI_ERROR_INPUT;
308 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000309 }
310
311 g_table_root = rd->s2_ctx.g_rtt;
312 sl = realm_rtt_starting_level(rd);
313 ipa_bits = realm_ipa_bits(rd);
314 s2_ctx = rd->s2_ctx;
315 buffer_unmap(rd);
316 granule_lock(g_table_root, GRANULE_STATE_RTT);
317 granule_unlock(g_rd);
318
319 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
320 map_addr, level - 1L, &wi);
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100321 if (wi.last_level != level - 1L) {
322 ret = pack_return_code(RMI_ERROR_RTT,
323 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000324 goto out_unlock_parent_table;
325 }
326
327 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
328 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
329 if (!s2tte_is_table(parent_s2tte, level - 1L)) {
330 ret = pack_return_code(RMI_ERROR_RTT,
331 (unsigned int)(level - 1L));
332 goto out_unmap_parent_table;
333 }
334
AlexeiFedorove2002be2023-04-19 17:20:12 +0100335 rtt_addr = s2tte_pa_table(parent_s2tte, level - 1L);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000336 g_tbl = find_lock_granule(rtt_addr, GRANULE_STATE_RTT);
337
338 /*
339 * A table descriptor S2TTE always points to a TABLE granule.
340 */
AlexeiFedorov63b71692023-04-19 11:18:42 +0100341 assert(g_tbl != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000342
343 table = granule_map(g_tbl, SLOT_RTT2);
344
345 /*
346 * The command can succeed only if all 512 S2TTEs are of the same type.
347 * We first check the table's ref. counter to speed up the case when
348 * the host makes a guess whether a memory region can be folded.
349 */
350 if (g_tbl->refcount == 0UL) {
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100351 if (table_is_unassigned_destroyed_block(table)) {
352 parent_s2tte = s2tte_create_unassigned_destroyed();
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100353 } else if (table_is_unassigned_empty_block(table)) {
354 parent_s2tte = s2tte_create_unassigned_empty();
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100355 } else if (table_is_unassigned_ram_block(table)) {
356 parent_s2tte = s2tte_create_unassigned_ram();
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100357 } else if (table_is_unassigned_ns_block(table)) {
358 parent_s2tte = s2tte_create_unassigned_ns();
AlexeiFedorov49752c62023-04-24 14:31:14 +0100359 } else if (table_maps_assigned_ns_block(table, level)) {
360 unsigned long s2tte = s2tte_read(&table[0]);
361 unsigned long block_pa = s2tte_pa(s2tte, level);
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100362
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100363 parent_s2tte = s2tte_create_assigned_ns(block_pa,
364 level - 1L);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000365 } else {
366 /*
367 * The table holds a mixture of destroyed and
368 * unassigned entries.
369 */
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100370 ret = pack_return_code(RMI_ERROR_RTT,
371 (unsigned int)level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000372 goto out_unmap_table;
373 }
AlexeiFedorov49752c62023-04-24 14:31:14 +0100374 __granule_put(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000375 } else if (g_tbl->refcount == S2TTES_PER_S2TT) {
376
377 unsigned long s2tte, block_pa;
378
379 /* The RMM specification does not allow creating block
380 * entries less than RTT_MIN_BLOCK_LEVEL even though
381 * permitted by the Arm Architecture.
382 * Hence ensure that the table being folded is at a level
383 * higher than the RTT_MIN_BLOCK_LEVEL.
384 *
385 * A fully populated table cannot be destroyed if that
386 * would create a block mapping below RTT_MIN_BLOCK_LEVEL.
387 */
388 if (level <= RTT_MIN_BLOCK_LEVEL) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100389 ret = pack_return_code(RMI_ERROR_RTT,
390 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000391 goto out_unmap_table;
392 }
393
394 s2tte = s2tte_read(&table[0]);
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000395 block_pa = s2tte_pa(s2tte, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000396
397 /*
AlexeiFedorov3a739332023-04-13 13:54:04 +0100398 * The table must also refer to a contiguous block through the
399 * same type of s2tte, either Assigned, Valid or Assigned_NS.
Soby Mathewb4c6df42022-11-09 11:13:29 +0000400 */
AlexeiFedorov3a739332023-04-13 13:54:04 +0100401 if (table_maps_assigned_empty_block(table, level)) {
402 parent_s2tte = s2tte_create_assigned_empty(block_pa,
403 level - 1L);
404 } else if (table_maps_assigned_ram_block(table, level)) {
405 parent_s2tte = s2tte_create_assigned_ram(block_pa,
406 level - 1L);
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000407 /* The table contains mixed entries that cannot be folded */
Soby Mathewb4c6df42022-11-09 11:13:29 +0000408 } else {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100409 ret = pack_return_code(RMI_ERROR_RTT,
410 (unsigned int)level);
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000411 goto out_unmap_table;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000412 }
413
414 __granule_refcount_dec(g_tbl, S2TTES_PER_S2TT);
415 } else {
416 /*
417 * The table holds a mixture of different types of s2ttes.
418 */
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100419 ret = pack_return_code(RMI_ERROR_RTT,
420 (unsigned int)level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000421 goto out_unmap_table;
422 }
423
424 ret = RMI_SUCCESS;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100425 res->x[1] = rtt_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000426
427 /*
428 * Break before make.
429 */
430 s2tte_write(&parent_s2tt[wi.index], 0UL);
431
AlexeiFedorov3a739332023-04-13 13:54:04 +0100432 if (s2tte_is_assigned_ram(parent_s2tte, level - 1L) ||
433 s2tte_is_assigned_ns(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000434 invalidate_pages_in_block(&s2_ctx, map_addr);
435 } else {
436 invalidate_block(&s2_ctx, map_addr);
437 }
438
439 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
440
441 granule_memzero_mapped(table);
442 granule_set_state(g_tbl, GRANULE_STATE_DELEGATED);
443
444out_unmap_table:
445 buffer_unmap(table);
446 granule_unlock(g_tbl);
447out_unmap_parent_table:
448 buffer_unmap(parent_s2tt);
449out_unlock_parent_table:
450 granule_unlock(wi.g_llt);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100451 res->x[0] = ret;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000452}
453
AlexeiFedorove2002be2023-04-19 17:20:12 +0100454void smc_rtt_destroy(unsigned long rd_addr,
455 unsigned long map_addr,
456 unsigned long ulevel,
457 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000458{
459 struct granule *g_rd;
460 struct granule *g_tbl;
461 struct rd *rd;
462 struct granule *g_table_root;
463 struct rtt_walk wi;
464 unsigned long *table, *parent_s2tt, parent_s2tte;
465 long level = (long)ulevel;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100466 unsigned long ipa_bits, rtt_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000467 unsigned long ret;
468 struct realm_s2_context s2_ctx;
469 int sl;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100470 bool in_par, skip_non_live = false;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000471
472 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
473 if (g_rd == NULL) {
AlexeiFedorove2002be2023-04-19 17:20:12 +0100474 res->x[0] = RMI_ERROR_INPUT;
AlexeiFedorov3ebd4622023-07-18 16:27:39 +0100475 res->x[2] = 0UL;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100476 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000477 }
478
479 rd = granule_map(g_rd, SLOT_RD);
480
481 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
482 buffer_unmap(rd);
483 granule_unlock(g_rd);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100484 res->x[0] = RMI_ERROR_INPUT;
AlexeiFedorov3ebd4622023-07-18 16:27:39 +0100485 res->x[2] = 0UL;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100486 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000487 }
488
489 g_table_root = rd->s2_ctx.g_rtt;
490 sl = realm_rtt_starting_level(rd);
491 ipa_bits = realm_ipa_bits(rd);
492 s2_ctx = rd->s2_ctx;
493 in_par = addr_in_par(rd, map_addr);
494 buffer_unmap(rd);
495 granule_lock(g_table_root, GRANULE_STATE_RTT);
496 granule_unlock(g_rd);
497
498 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
499 map_addr, level - 1L, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000500
501 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
502 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100503
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100504 if ((wi.last_level != level - 1L) ||
505 !s2tte_is_table(parent_s2tte, level - 1L)) {
506 ret = pack_return_code(RMI_ERROR_RTT,
507 (unsigned int)wi.last_level);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100508 skip_non_live = true;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000509 goto out_unmap_parent_table;
510 }
511
AlexeiFedorove2002be2023-04-19 17:20:12 +0100512 rtt_addr = s2tte_pa_table(parent_s2tte, level - 1L);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000513
514 /*
515 * Lock the RTT granule. The 'rtt_addr' is verified, thus can be treated
516 * as an internal granule.
517 */
518 g_tbl = find_lock_granule(rtt_addr, GRANULE_STATE_RTT);
519
520 /*
521 * A table descriptor S2TTE always points to a TABLE granule.
522 */
523 assert(g_tbl != NULL);
524
525 /*
526 * Read the refcount value. RTT granule is always accessed locked, thus
527 * the refcount can be accessed without atomic operations.
528 */
529 if (g_tbl->refcount != 0UL) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100530 ret = pack_return_code(RMI_ERROR_RTT, (unsigned int)level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000531 goto out_unlock_table;
532 }
533
534 ret = RMI_SUCCESS;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100535 res->x[1] = rtt_addr;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100536 skip_non_live = true;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000537
538 table = granule_map(g_tbl, SLOT_RTT2);
539
540 if (in_par) {
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100541 parent_s2tte = s2tte_create_unassigned_destroyed();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000542 } else {
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100543 parent_s2tte = s2tte_create_unassigned_ns();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000544 }
545
546 __granule_put(wi.g_llt);
547
548 /*
549 * Break before make. Note that this may cause spurious S2 aborts.
550 */
551 s2tte_write(&parent_s2tt[wi.index], 0UL);
552 invalidate_block(&s2_ctx, map_addr);
553 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
554
555 granule_memzero_mapped(table);
556 granule_set_state(g_tbl, GRANULE_STATE_DELEGATED);
557
558 buffer_unmap(table);
559out_unlock_table:
560 granule_unlock(g_tbl);
561out_unmap_parent_table:
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100562 if (skip_non_live) {
563 res->x[2] = skip_non_live_entries(map_addr, parent_s2tt, &wi);
AlexeiFedorov3ebd4622023-07-18 16:27:39 +0100564 } else {
565 res->x[2] = map_addr;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100566 }
Soby Mathewb4c6df42022-11-09 11:13:29 +0000567 buffer_unmap(parent_s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000568 granule_unlock(wi.g_llt);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100569 res->x[0] = ret;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000570}
571
572enum map_unmap_ns_op {
573 MAP_NS,
574 UNMAP_NS
575};
576
577/*
578 * We don't hold a reference on the NS granule when it is
579 * mapped into a realm. Instead we rely on the guarantees
580 * provided by the architecture to ensure that a NS access
581 * to a protected granule is prohibited even within the realm.
582 */
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100583static void map_unmap_ns(unsigned long rd_addr,
584 unsigned long map_addr,
585 long level,
586 unsigned long host_s2tte,
587 enum map_unmap_ns_op op,
588 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000589{
590 struct granule *g_rd;
591 struct rd *rd;
592 struct granule *g_table_root;
593 unsigned long *s2tt, s2tte;
594 struct rtt_walk wi;
595 unsigned long ipa_bits;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000596 struct realm_s2_context s2_ctx;
597 int sl;
598
599 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
600 if (g_rd == NULL) {
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100601 res->x[0] = RMI_ERROR_INPUT;
602 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000603 }
604
605 rd = granule_map(g_rd, SLOT_RD);
606
607 if (!validate_rtt_map_cmds(map_addr, level, rd)) {
608 buffer_unmap(rd);
609 granule_unlock(g_rd);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100610 res->x[0] = RMI_ERROR_INPUT;
611 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000612 }
613
614 g_table_root = rd->s2_ctx.g_rtt;
615 sl = realm_rtt_starting_level(rd);
616 ipa_bits = realm_ipa_bits(rd);
617
AlexeiFedorovc34e3242023-04-12 11:30:33 +0100618 /* Check if map_addr is outside PAR */
619 if (addr_in_par(rd, map_addr)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000620 buffer_unmap(rd);
621 granule_unlock(g_rd);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100622 res->x[0] = RMI_ERROR_INPUT;
623 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000624 }
625
626 s2_ctx = rd->s2_ctx;
627 buffer_unmap(rd);
628
629 granule_lock(g_table_root, GRANULE_STATE_RTT);
630 granule_unlock(g_rd);
631
632 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
633 map_addr, level, &wi);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100634
635 /*
636 * For UNMAP_NS, we need to map the table and look
637 * for the end of the non-live region.
638 */
639 if (op == MAP_NS && wi.last_level != level) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100640 res->x[0] = pack_return_code(RMI_ERROR_RTT,
641 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000642 goto out_unlock_llt;
643 }
644
645 s2tt = granule_map(wi.g_llt, SLOT_RTT);
646 s2tte = s2tte_read(&s2tt[wi.index]);
647
648 if (op == MAP_NS) {
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100649 if (!s2tte_is_unassigned_ns(s2tte)) {
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100650 res->x[0] = pack_return_code(RMI_ERROR_RTT,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000651 (unsigned int)level);
652 goto out_unmap_table;
653 }
654
AlexeiFedorov3a739332023-04-13 13:54:04 +0100655 s2tte = s2tte_create_assigned_ns(host_s2tte, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000656 s2tte_write(&s2tt[wi.index], s2tte);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000657
658 } else if (op == UNMAP_NS) {
659 /*
660 * The following check also verifies that map_addr is outside
661 * PAR, as valid_NS s2tte may only cover outside PAR IPA range.
662 */
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100663 bool assigned_ns = s2tte_is_assigned_ns(s2tte, wi.last_level);
664
665 if ((wi.last_level != level) || !assigned_ns) {
666 res->x[0] = pack_return_code(RMI_ERROR_RTT,
667 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000668 goto out_unmap_table;
669 }
670
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100671 s2tte = s2tte_create_unassigned_ns();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000672 s2tte_write(&s2tt[wi.index], s2tte);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000673 if (level == RTT_PAGE_LEVEL) {
674 invalidate_page(&s2_ctx, map_addr);
675 } else {
676 invalidate_block(&s2_ctx, map_addr);
677 }
678 }
679
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100680 res->x[0] = RMI_SUCCESS;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000681
682out_unmap_table:
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100683 if (op == UNMAP_NS) {
684 res->x[1] = skip_non_live_entries(map_addr, s2tt, &wi);
685 }
Soby Mathewb4c6df42022-11-09 11:13:29 +0000686 buffer_unmap(s2tt);
687out_unlock_llt:
688 granule_unlock(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000689}
690
691unsigned long smc_rtt_map_unprotected(unsigned long rd_addr,
692 unsigned long map_addr,
693 unsigned long ulevel,
694 unsigned long s2tte)
695{
696 long level = (long)ulevel;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100697 struct smc_result res;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000698
699 if (!host_ns_s2tte_is_valid(s2tte, level)) {
700 return RMI_ERROR_INPUT;
701 }
702
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100703 map_unmap_ns(rd_addr, map_addr, level, s2tte, MAP_NS, &res);
704 return res.x[0];
Soby Mathewb4c6df42022-11-09 11:13:29 +0000705}
706
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100707void smc_rtt_unmap_unprotected(unsigned long rd_addr,
708 unsigned long map_addr,
709 unsigned long ulevel,
710 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000711{
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100712 return map_unmap_ns(rd_addr, map_addr, (long)ulevel, 0UL, UNMAP_NS, res);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000713}
714
715void smc_rtt_read_entry(unsigned long rd_addr,
716 unsigned long map_addr,
717 unsigned long ulevel,
718 struct smc_result *ret)
719{
720 struct granule *g_rd, *g_rtt_root;
721 struct rd *rd;
722 struct rtt_walk wi;
723 unsigned long *s2tt, s2tte;
724 unsigned long ipa_bits;
725 long level = (long)ulevel;
726 int sl;
727
728 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
729 if (g_rd == NULL) {
730 ret->x[0] = RMI_ERROR_INPUT;
731 return;
732 }
733
734 rd = granule_map(g_rd, SLOT_RD);
735
736 if (!validate_rtt_entry_cmds(map_addr, level, rd)) {
737 buffer_unmap(rd);
738 granule_unlock(g_rd);
739 ret->x[0] = RMI_ERROR_INPUT;
740 return;
741 }
742
743 g_rtt_root = rd->s2_ctx.g_rtt;
744 sl = realm_rtt_starting_level(rd);
745 ipa_bits = realm_ipa_bits(rd);
746 buffer_unmap(rd);
747
748 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
749 granule_unlock(g_rd);
750
751 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
752 map_addr, level, &wi);
753 s2tt = granule_map(wi.g_llt, SLOT_RTT);
754 s2tte = s2tte_read(&s2tt[wi.index]);
755 ret->x[1] = wi.last_level;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000756
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100757 if (s2tte_is_unassigned_empty(s2tte)) {
Yousuf A3daed822022-10-13 16:06:00 +0100758 ret->x[2] = RMI_UNASSIGNED;
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100759 ret->x[3] = 0UL;
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100760 ret->x[4] = RIPAS_EMPTY;
761 } else if (s2tte_is_unassigned_ram(s2tte)) {
762 ret->x[2] = RMI_UNASSIGNED;
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100763 ret->x[3] = 0UL;
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100764 ret->x[4] = RIPAS_RAM;
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100765 } else if (s2tte_is_unassigned_destroyed(s2tte)) {
766 ret->x[2] = RMI_UNASSIGNED;
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100767 ret->x[3] = 0UL;
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100768 ret->x[4] = RIPAS_DESTROYED;
AlexeiFedorov3a739332023-04-13 13:54:04 +0100769 } else if (s2tte_is_assigned_empty(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100770 ret->x[2] = RMI_ASSIGNED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000771 ret->x[3] = s2tte_pa(s2tte, wi.last_level);
Yousuf A62808152022-10-31 10:35:42 +0000772 ret->x[4] = RIPAS_EMPTY;
AlexeiFedorov3a739332023-04-13 13:54:04 +0100773 } else if (s2tte_is_assigned_ram(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100774 ret->x[2] = RMI_ASSIGNED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000775 ret->x[3] = s2tte_pa(s2tte, wi.last_level);
Yousuf A62808152022-10-31 10:35:42 +0000776 ret->x[4] = RIPAS_RAM;
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100777 } else if (s2tte_is_assigned_destroyed(s2tte, wi.last_level)) {
778 ret->x[2] = RMI_ASSIGNED;
779 ret->x[3] = 0UL;
780 ret->x[4] = RIPAS_DESTROYED;
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100781 } else if (s2tte_is_unassigned_ns(s2tte)) {
782 ret->x[2] = RMI_UNASSIGNED;
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100783 ret->x[3] = 0UL;
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100784 ret->x[4] = RIPAS_EMPTY;
AlexeiFedorov3a739332023-04-13 13:54:04 +0100785 } else if (s2tte_is_assigned_ns(s2tte, wi.last_level)) {
AlexeiFedorov20afb5c2023-04-18 11:44:19 +0100786 ret->x[2] = RMI_ASSIGNED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000787 ret->x[3] = host_ns_s2tte(s2tte, wi.last_level);
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100788 ret->x[4] = RIPAS_EMPTY;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000789 } else if (s2tte_is_table(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100790 ret->x[2] = RMI_TABLE;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000791 ret->x[3] = s2tte_pa_table(s2tte, wi.last_level);
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100792 ret->x[4] = RIPAS_EMPTY;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000793 } else {
794 assert(false);
795 }
796
797 buffer_unmap(s2tt);
798 granule_unlock(wi.g_llt);
799
800 ret->x[0] = RMI_SUCCESS;
801}
802
803static void data_granule_measure(struct rd *rd, void *data,
804 unsigned long ipa,
805 unsigned long flags)
806{
807 struct measurement_desc_data measure_desc = {0};
808
809 /* Initialize the measurement descriptior structure */
810 measure_desc.desc_type = MEASURE_DESC_TYPE_DATA;
811 measure_desc.len = sizeof(struct measurement_desc_data);
812 measure_desc.ipa = ipa;
813 measure_desc.flags = flags;
814 memcpy(measure_desc.rim,
815 &rd->measurement[RIM_MEASUREMENT_SLOT],
816 measurement_get_size(rd->algorithm));
817
818 if (flags == RMI_MEASURE_CONTENT) {
819 /*
820 * Hashing the data granules and store the result in the
821 * measurement descriptor structure.
822 */
823 measurement_hash_compute(rd->algorithm,
824 data,
825 GRANULE_SIZE,
826 measure_desc.content);
827 }
828
829 /*
830 * Hashing the measurement descriptor structure; the result is the
831 * updated RIM.
832 */
833 measurement_hash_compute(rd->algorithm,
834 &measure_desc,
835 sizeof(measure_desc),
836 rd->measurement[RIM_MEASUREMENT_SLOT]);
837}
838
839static unsigned long validate_data_create_unknown(unsigned long map_addr,
840 struct rd *rd)
841{
842 if (!addr_in_par(rd, map_addr)) {
843 return RMI_ERROR_INPUT;
844 }
845
846 if (!validate_map_addr(map_addr, RTT_PAGE_LEVEL, rd)) {
847 return RMI_ERROR_INPUT;
848 }
849
850 return RMI_SUCCESS;
851}
852
853static unsigned long validate_data_create(unsigned long map_addr,
854 struct rd *rd)
855{
856 if (get_rd_state_locked(rd) != REALM_STATE_NEW) {
857 return RMI_ERROR_REALM;
858 }
859
860 return validate_data_create_unknown(map_addr, rd);
861}
862
863/*
AlexeiFedorov0f9cd1f2023-07-10 17:04:58 +0100864 * Implements both RMI_DATA_CREATE and RMI_DATA_CREATE_UNKNOWN
Soby Mathewb4c6df42022-11-09 11:13:29 +0000865 *
AlexeiFedorov0f9cd1f2023-07-10 17:04:58 +0100866 * if @g_src == NULL, implements RMI_DATA_CREATE_UNKNOWN
867 * and RMI_DATA_CREATE otherwise.
Soby Mathewb4c6df42022-11-09 11:13:29 +0000868 */
AlexeiFedorovac923c82023-04-06 15:12:04 +0100869static unsigned long data_create(unsigned long rd_addr,
870 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000871 unsigned long map_addr,
872 struct granule *g_src,
873 unsigned long flags)
874{
875 struct granule *g_data;
876 struct granule *g_rd;
877 struct granule *g_table_root;
878 struct rd *rd;
879 struct rtt_walk wi;
880 unsigned long s2tte, *s2tt;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000881 enum granule_state new_data_state = GRANULE_STATE_DELEGATED;
882 unsigned long ipa_bits;
883 unsigned long ret;
884 int __unused meas_ret;
885 int sl;
886
887 if (!find_lock_two_granules(data_addr,
888 GRANULE_STATE_DELEGATED,
889 &g_data,
890 rd_addr,
891 GRANULE_STATE_RD,
892 &g_rd)) {
893 return RMI_ERROR_INPUT;
894 }
895
896 rd = granule_map(g_rd, SLOT_RD);
897
898 ret = (g_src != NULL) ?
899 validate_data_create(map_addr, rd) :
900 validate_data_create_unknown(map_addr, rd);
901
902 if (ret != RMI_SUCCESS) {
903 goto out_unmap_rd;
904 }
905
906 g_table_root = rd->s2_ctx.g_rtt;
907 sl = realm_rtt_starting_level(rd);
908 ipa_bits = realm_ipa_bits(rd);
909 granule_lock(g_table_root, GRANULE_STATE_RTT);
910 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
911 map_addr, RTT_PAGE_LEVEL, &wi);
912 if (wi.last_level != RTT_PAGE_LEVEL) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100913 ret = pack_return_code(RMI_ERROR_RTT,
914 (unsigned int)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000915 goto out_unlock_ll_table;
916 }
917
918 s2tt = granule_map(wi.g_llt, SLOT_RTT);
919 s2tte = s2tte_read(&s2tt[wi.index]);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000920
Soby Mathewb4c6df42022-11-09 11:13:29 +0000921 if (g_src != NULL) {
922 bool ns_access_ok;
AlexeiFedorov0f9cd1f2023-07-10 17:04:58 +0100923 void *data;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000924
AlexeiFedorov0f9cd1f2023-07-10 17:04:58 +0100925 if (!s2tte_is_unassigned_ram(s2tte)) {
926 ret = pack_return_code(RMI_ERROR_RTT, RTT_PAGE_LEVEL);
927 goto out_unmap_ll_table;
928 }
929
930 data = granule_map(g_data, SLOT_DELEGATED);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000931 ns_access_ok = ns_buffer_read(SLOT_NS, g_src, 0U,
932 GRANULE_SIZE, data);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000933 if (!ns_access_ok) {
934 /*
935 * Some data may be copied before the failure. Zero
936 * g_data granule as it will remain in delegated state.
937 */
938 (void)memset(data, 0, GRANULE_SIZE);
939 buffer_unmap(data);
940 ret = RMI_ERROR_INPUT;
941 goto out_unmap_ll_table;
942 }
943
Soby Mathewb4c6df42022-11-09 11:13:29 +0000944 data_granule_measure(rd, data, map_addr, flags);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000945 buffer_unmap(data);
AlexeiFedorov0f9cd1f2023-07-10 17:04:58 +0100946
947 } else if (!s2tte_is_unassigned(s2tte)) {
948 ret = pack_return_code(RMI_ERROR_RTT, RTT_PAGE_LEVEL);
949 goto out_unmap_ll_table;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000950 }
951
952 new_data_state = GRANULE_STATE_DATA;
953
AlexeiFedorovcde1fdc2023-04-18 16:37:25 +0100954 s2tte = s2tte_create_assigned_ram(data_addr, RTT_PAGE_LEVEL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000955
956 s2tte_write(&s2tt[wi.index], s2tte);
957 __granule_get(wi.g_llt);
958
959 ret = RMI_SUCCESS;
960
961out_unmap_ll_table:
962 buffer_unmap(s2tt);
963out_unlock_ll_table:
964 granule_unlock(wi.g_llt);
965out_unmap_rd:
966 buffer_unmap(rd);
967 granule_unlock(g_rd);
968 granule_unlock_transition(g_data, new_data_state);
969 return ret;
970}
971
AlexeiFedorovac923c82023-04-06 15:12:04 +0100972unsigned long smc_data_create(unsigned long rd_addr,
973 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000974 unsigned long map_addr,
975 unsigned long src_addr,
976 unsigned long flags)
977{
978 struct granule *g_src;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000979
980 if (flags != RMI_NO_MEASURE_CONTENT && flags != RMI_MEASURE_CONTENT) {
981 return RMI_ERROR_INPUT;
982 }
983
984 g_src = find_granule(src_addr);
985 if ((g_src == NULL) || (g_src->state != GRANULE_STATE_NS)) {
986 return RMI_ERROR_INPUT;
987 }
988
AlexeiFedorovac923c82023-04-06 15:12:04 +0100989 return data_create(rd_addr, data_addr, map_addr, g_src, flags);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000990}
991
AlexeiFedorovac923c82023-04-06 15:12:04 +0100992unsigned long smc_data_create_unknown(unsigned long rd_addr,
993 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000994 unsigned long map_addr)
995{
AlexeiFedorovac923c82023-04-06 15:12:04 +0100996 return data_create(rd_addr, data_addr, map_addr, NULL, 0);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000997}
998
AlexeiFedorove2002be2023-04-19 17:20:12 +0100999void smc_data_destroy(unsigned long rd_addr,
1000 unsigned long map_addr,
1001 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001002{
1003 struct granule *g_data;
1004 struct granule *g_rd;
1005 struct granule *g_table_root;
1006 struct rtt_walk wi;
1007 unsigned long data_addr, s2tte, *s2tt;
1008 struct rd *rd;
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001009 unsigned long ipa_bits;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001010 struct realm_s2_context s2_ctx;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001011 int sl;
1012
1013 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
1014 if (g_rd == NULL) {
AlexeiFedorove2002be2023-04-19 17:20:12 +01001015 res->x[0] = RMI_ERROR_INPUT;
AlexeiFedorova1b2a1d2023-07-18 15:08:47 +01001016 res->x[2] = 0UL;
AlexeiFedorove2002be2023-04-19 17:20:12 +01001017 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001018 }
1019
1020 rd = granule_map(g_rd, SLOT_RD);
1021
1022 if (!validate_map_addr(map_addr, RTT_PAGE_LEVEL, rd)) {
1023 buffer_unmap(rd);
1024 granule_unlock(g_rd);
AlexeiFedorove2002be2023-04-19 17:20:12 +01001025 res->x[0] = RMI_ERROR_INPUT;
AlexeiFedorova1b2a1d2023-07-18 15:08:47 +01001026 res->x[2] = 0UL;
AlexeiFedorove2002be2023-04-19 17:20:12 +01001027 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001028 }
1029
1030 g_table_root = rd->s2_ctx.g_rtt;
1031 sl = realm_rtt_starting_level(rd);
1032 ipa_bits = realm_ipa_bits(rd);
1033 s2_ctx = rd->s2_ctx;
1034 buffer_unmap(rd);
1035
1036 granule_lock(g_table_root, GRANULE_STATE_RTT);
1037 granule_unlock(g_rd);
1038
1039 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
1040 map_addr, RTT_PAGE_LEVEL, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001041
1042 s2tt = granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001043 if (wi.last_level != RTT_PAGE_LEVEL) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +01001044 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1045 (unsigned int)wi.last_level);
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001046 goto out_unmap_ll_table;
1047 }
Soby Mathewb4c6df42022-11-09 11:13:29 +00001048
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001049 s2tte = s2tte_read(&s2tt[wi.index]);
AlexeiFedorovc53b1f72023-07-04 15:37:03 +01001050
AlexeiFedorova43cd312023-04-17 11:42:25 +01001051 if (s2tte_is_assigned_ram(s2tte, RTT_PAGE_LEVEL)) {
1052 data_addr = s2tte_pa(s2tte, RTT_PAGE_LEVEL);
AlexeiFedorovc53b1f72023-07-04 15:37:03 +01001053 s2tte = s2tte_create_unassigned_destroyed();
AlexeiFedorova43cd312023-04-17 11:42:25 +01001054 s2tte_write(&s2tt[wi.index], s2tte);
1055 invalidate_page(&s2_ctx, map_addr);
1056 } else if (s2tte_is_assigned_empty(s2tte, RTT_PAGE_LEVEL)) {
1057 data_addr = s2tte_pa(s2tte, RTT_PAGE_LEVEL);
1058 s2tte = s2tte_create_unassigned_empty();
1059 s2tte_write(&s2tt[wi.index], s2tte);
1060 } else {
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001061 res->x[0] = pack_return_code(RMI_ERROR_RTT, RTT_PAGE_LEVEL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001062 goto out_unmap_ll_table;
1063 }
1064
Soby Mathewb4c6df42022-11-09 11:13:29 +00001065 __granule_put(wi.g_llt);
1066
1067 /*
1068 * Lock the data granule and check expected state. Correct locking order
1069 * is guaranteed because granule address is obtained from a locked
1070 * granule by table walk. This lock needs to be acquired before a state
1071 * transition to or from GRANULE_STATE_DATA for granule address can happen.
1072 */
1073 g_data = find_lock_granule(data_addr, GRANULE_STATE_DATA);
AlexeiFedorov63b71692023-04-19 11:18:42 +01001074 assert(g_data != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001075 granule_memzero(g_data, SLOT_DELEGATED);
1076 granule_unlock_transition(g_data, GRANULE_STATE_DELEGATED);
1077
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001078 res->x[0] = RMI_SUCCESS;
AlexeiFedorove2002be2023-04-19 17:20:12 +01001079 res->x[1] = data_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001080out_unmap_ll_table:
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001081 res->x[2] = skip_non_live_entries(map_addr, s2tt, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001082 buffer_unmap(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001083 granule_unlock(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001084}
1085
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001086/*
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001087 * Update the ripas value for the entry pointed by @s2ttep.
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001088 *
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001089 * Returns:
1090 * < 0 - On error and the operation was aborted,
1091 * e.g., entry cannot have a ripas.
1092 * 0 - Operation was success and no TLBI is required.
1093 * > 0 - Operation was success and TLBI is required.
1094 * Sets:
1095 * @(*do_tlbi) to 'true' if the TLBs have to be invalidated.
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001096 */
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001097static int update_ripas(unsigned long *s2ttep, unsigned long level,
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001098 enum ripas ripas_val,
1099 enum ripas_change_destroyed change_destroyed)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001100{
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001101 unsigned long pa, s2tte = s2tte_read(s2ttep);
1102 int ret = 0;
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001103
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001104 if (!s2tte_has_ripas(s2tte, level)) {
1105 return -1;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001106 }
1107
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001108 if (ripas_val == RIPAS_RAM) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001109 if (s2tte_is_unassigned_empty(s2tte)) {
1110 s2tte = s2tte_create_unassigned_ram();
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001111 } else if (s2tte_is_unassigned_destroyed(s2tte)) {
1112 if (change_destroyed == CHANGE_DESTROYED) {
1113 s2tte = s2tte_create_unassigned_ram();
1114 } else {
1115 return -1;
1116 }
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001117 } else if (s2tte_is_assigned_empty(s2tte, level)) {
1118 pa = s2tte_pa(s2tte, level);
1119 s2tte = s2tte_create_assigned_ram(pa, level);
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001120 } else if (s2tte_is_assigned_destroyed(s2tte, level)) {
1121 if (change_destroyed == CHANGE_DESTROYED) {
1122 pa = s2tte_pa(s2tte, level);
1123 s2tte = s2tte_create_assigned_ram(pa, level);
1124 } else {
1125 return -1;
1126 }
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001127 } else {
1128 /* No action is required */
1129 return 0;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001130 }
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001131 } else if (ripas_val == RIPAS_EMPTY) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001132 if (s2tte_is_unassigned_ram(s2tte)) {
1133 s2tte = s2tte_create_unassigned_empty();
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001134 } else if (s2tte_is_unassigned_destroyed(s2tte)) {
1135 if (change_destroyed == CHANGE_DESTROYED) {
1136 s2tte = s2tte_create_unassigned_empty();
1137 } else {
1138 return -1;
1139 }
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001140 } else if (s2tte_is_assigned_ram(s2tte, level)) {
1141 pa = s2tte_pa(s2tte, level);
1142 s2tte = s2tte_create_assigned_empty(pa, level);
1143 /* TLBI is required */
1144 ret = 1;
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001145 } else if (s2tte_is_assigned_destroyed(s2tte, level)) {
1146 if (change_destroyed == CHANGE_DESTROYED) {
1147 pa = s2tte_pa(s2tte, level);
1148 s2tte = s2tte_create_assigned_empty(pa, level);
1149 /* TLBI is required */
1150 ret = 1;
1151 } else {
1152 return -1;
1153 }
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001154 } else {
1155 /* No action is required */
1156 return 0;
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001157 }
Soby Mathewb4c6df42022-11-09 11:13:29 +00001158 }
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001159 s2tte_write(s2ttep, s2tte);
1160 return ret;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001161}
1162
1163static void ripas_granule_measure(struct rd *rd,
AlexeiFedorov960d1612023-04-25 13:23:39 +01001164 unsigned long base,
1165 unsigned long top)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001166{
1167 struct measurement_desc_ripas measure_desc = {0};
1168
1169 /* Initialize the measurement descriptior structure */
1170 measure_desc.desc_type = MEASURE_DESC_TYPE_RIPAS;
1171 measure_desc.len = sizeof(struct measurement_desc_ripas);
AlexeiFedorov960d1612023-04-25 13:23:39 +01001172 measure_desc.base = base;
1173 measure_desc.top = top;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001174 (void)memcpy(measure_desc.rim,
1175 &rd->measurement[RIM_MEASUREMENT_SLOT],
1176 measurement_get_size(rd->algorithm));
Soby Mathewb4c6df42022-11-09 11:13:29 +00001177
1178 /*
1179 * Hashing the measurement descriptor structure; the result is the
1180 * updated RIM.
1181 */
1182 measurement_hash_compute(rd->algorithm,
1183 &measure_desc,
1184 sizeof(measure_desc),
1185 rd->measurement[RIM_MEASUREMENT_SLOT]);
1186}
1187
AlexeiFedorov960d1612023-04-25 13:23:39 +01001188void smc_rtt_init_ripas(unsigned long rd_addr,
1189 unsigned long base,
1190 unsigned long top,
1191 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001192{
1193 struct granule *g_rd, *g_rtt_root;
1194 struct rd *rd;
AlexeiFedorov960d1612023-04-25 13:23:39 +01001195 unsigned long ipa_bits, addr, map_size;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001196 struct rtt_walk wi;
1197 unsigned long s2tte, *s2tt;
AlexeiFedorov960d1612023-04-25 13:23:39 +01001198 long level;
1199 unsigned int index;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001200 int sl;
1201
1202 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
1203 if (g_rd == NULL) {
AlexeiFedorov960d1612023-04-25 13:23:39 +01001204 res->x[0] = RMI_ERROR_INPUT;
1205 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001206 }
1207
1208 rd = granule_map(g_rd, SLOT_RD);
1209
1210 if (get_rd_state_locked(rd) != REALM_STATE_NEW) {
1211 buffer_unmap(rd);
1212 granule_unlock(g_rd);
AlexeiFedorov960d1612023-04-25 13:23:39 +01001213 res->x[0] = RMI_ERROR_REALM;
1214 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001215 }
1216
AlexeiFedorov960d1612023-04-25 13:23:39 +01001217 if (!validate_map_addr(base, RTT_PAGE_LEVEL, rd) ||
1218 !validate_rtt_entry_cmds(top, RTT_PAGE_LEVEL, rd)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001219 buffer_unmap(rd);
1220 granule_unlock(g_rd);
AlexeiFedorov960d1612023-04-25 13:23:39 +01001221 res->x[0] = RMI_ERROR_INPUT;
1222 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001223 }
1224
AlexeiFedorov960d1612023-04-25 13:23:39 +01001225 if (!addr_in_par(rd, base) || !addr_in_par(rd, top)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001226 buffer_unmap(rd);
1227 granule_unlock(g_rd);
AlexeiFedorov960d1612023-04-25 13:23:39 +01001228 res->x[0] = RMI_ERROR_INPUT;
1229 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001230 }
1231
1232 g_rtt_root = rd->s2_ctx.g_rtt;
1233 sl = realm_rtt_starting_level(rd);
1234 ipa_bits = realm_ipa_bits(rd);
1235
1236 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001237
1238 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
AlexeiFedorov960d1612023-04-25 13:23:39 +01001239 base, RTT_PAGE_LEVEL, &wi);
1240 level = wi.last_level;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001241 s2tt = granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov960d1612023-04-25 13:23:39 +01001242 map_size = s2tte_map_size(level);
1243 addr = base & ~(map_size - 1UL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001244
AlexeiFedorov960d1612023-04-25 13:23:39 +01001245 /*
1246 * If the RTTE covers a range below "base", we need to
1247 * go deeper.
1248 */
1249 if (addr != base) {
1250 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1251 (unsigned int)level);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001252 goto out_unmap_llt;
1253 }
1254
AlexeiFedorov960d1612023-04-25 13:23:39 +01001255 for (index = wi.index; index < S2TTES_PER_S2TT;
1256 index++, addr += map_size) {
1257 unsigned long next = addr + map_size;
1258
1259 if (next > top) {
1260 break;
1261 }
1262
1263 s2tte = s2tte_read(&s2tt[index]);
1264 if (s2tte_is_unassigned_empty(s2tte)) {
1265 s2tte = s2tte_create_unassigned_ram();
1266 s2tte_write(&s2tt[index], s2tte);
1267 } else if (!s2tte_is_unassigned_ram(s2tte)) {
1268 break;
1269 }
1270 ripas_granule_measure(rd, addr, next);
1271 }
1272
1273 if (addr > base) {
1274 res->x[0] = RMI_SUCCESS;
1275 res->x[1] = addr;
1276 } else {
1277 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1278 (unsigned int)level);
1279 }
Soby Mathewb4c6df42022-11-09 11:13:29 +00001280
1281out_unmap_llt:
1282 buffer_unmap(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001283 buffer_unmap(rd);
1284 granule_unlock(wi.g_llt);
AlexeiFedorov80295e42023-07-10 13:11:14 +01001285 granule_unlock(g_rd);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001286}
1287
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001288static void rtt_set_ripas_range(struct realm_s2_context *s2_ctx,
1289 unsigned long *s2tt,
1290 unsigned long base,
1291 unsigned long top,
1292 struct rtt_walk *wi,
1293 unsigned long ripas_val,
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001294 enum ripas_change_destroyed change_destroyed,
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001295 struct smc_result *res)
1296{
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001297 unsigned long index = wi->index;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001298 long level = wi->last_level;
1299 unsigned long map_size = s2tte_map_size(level);
1300
1301 /* Align to the RTT level */
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001302 unsigned long addr = base & ~(map_size - 1UL);
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001303
1304 /* Make sure we don't touch a range below the requested range */
1305 if (addr != base) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +01001306 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1307 (unsigned int)level);
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001308 return;
1309 }
1310
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001311 for (index = wi->index; index < S2TTES_PER_S2TT; addr += map_size) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001312 int ret;
1313
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001314 /* If this entry crosses the range, break. */
1315 if (addr + map_size > top) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001316 break;
1317 }
1318
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001319 ret = update_ripas(&s2tt[index++], level,
1320 ripas_val, change_destroyed);
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001321 if (ret < 0) {
1322 break;
1323 }
1324
1325 /* Handle TLBI */
1326 if (ret != 0) {
1327 if (level == RTT_PAGE_LEVEL) {
1328 invalidate_page(s2_ctx, addr);
1329 } else {
1330 invalidate_block(s2_ctx, addr);
1331 }
1332 }
1333 }
1334
1335 if (addr > base) {
1336 res->x[0] = RMI_SUCCESS;
1337 res->x[1] = addr;
1338 } else {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +01001339 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1340 (unsigned int)level);
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001341 }
1342}
1343
1344void smc_rtt_set_ripas(unsigned long rd_addr,
1345 unsigned long rec_addr,
1346 unsigned long base,
1347 unsigned long top,
1348 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001349{
1350 struct granule *g_rd, *g_rec, *g_rtt_root;
1351 struct rec *rec;
1352 struct rd *rd;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001353 unsigned long ipa_bits;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001354 struct rtt_walk wi;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001355 unsigned long *s2tt;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001356 struct realm_s2_context s2_ctx;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001357 enum ripas ripas_val;
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001358 enum ripas_change_destroyed change_destroyed;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001359 int sl;
1360
Soby Mathewb4c6df42022-11-09 11:13:29 +00001361 if (!find_lock_two_granules(rd_addr,
1362 GRANULE_STATE_RD,
1363 &g_rd,
1364 rec_addr,
1365 GRANULE_STATE_REC,
1366 &g_rec)) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001367 res->x[0] = RMI_ERROR_INPUT;
1368 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001369 }
1370
1371 if (granule_refcount_read_acquire(g_rec) != 0UL) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001372 res->x[0] = RMI_ERROR_REC;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001373 goto out_unlock_rec_rd;
1374 }
1375
1376 rec = granule_map(g_rec, SLOT_REC);
1377
1378 if (g_rd != rec->realm_info.g_rd) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001379 res->x[0] = RMI_ERROR_REC;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001380 goto out_unmap_rec;
1381 }
1382
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001383 ripas_val = rec->set_ripas.ripas_val;
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001384 change_destroyed = rec->set_ripas.change_destroyed;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001385
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001386 /*
1387 * Return error in case of target region:
1388 * - is not the next chunk of requested region
1389 * - extends beyond the end of requested region
1390 */
1391 if ((base != rec->set_ripas.addr) || (top > rec->set_ripas.top)) {
1392 res->x[0] = RMI_ERROR_INPUT;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001393 goto out_unmap_rec;
1394 }
1395
1396 rd = granule_map(g_rd, SLOT_RD);
1397
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001398 /*
1399 * At this point, we know base == rec->set_ripas.addr
1400 * and thus must be aligned to GRANULE size.
1401 */
1402 assert(validate_map_addr(base, RTT_PAGE_LEVEL, rd));
Soby Mathewb4c6df42022-11-09 11:13:29 +00001403
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001404 if (!validate_map_addr(top, RTT_PAGE_LEVEL, rd)) {
1405 res->x[0] = RMI_ERROR_INPUT;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001406 goto out_unmap_rd;
1407 }
1408
1409 g_rtt_root = rd->s2_ctx.g_rtt;
1410 sl = realm_rtt_starting_level(rd);
1411 ipa_bits = realm_ipa_bits(rd);
1412 s2_ctx = rd->s2_ctx;
1413
1414 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
1415
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001416 /* Walk to the deepest level possible */
Soby Mathewb4c6df42022-11-09 11:13:29 +00001417 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001418 base, RTT_PAGE_LEVEL, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001419
1420 s2tt = granule_map(wi.g_llt, SLOT_RTT);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001421
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001422 rtt_set_ripas_range(&s2_ctx, s2tt, base, top, &wi,
1423 ripas_val, change_destroyed, res);
1424
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001425 if (res->x[0] == RMI_SUCCESS) {
1426 rec->set_ripas.addr = res->x[1];
Soby Mathewb4c6df42022-11-09 11:13:29 +00001427 }
1428
Soby Mathewb4c6df42022-11-09 11:13:29 +00001429 buffer_unmap(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001430 granule_unlock(wi.g_llt);
1431out_unmap_rd:
1432 buffer_unmap(rd);
1433out_unmap_rec:
1434 buffer_unmap(rec);
1435out_unlock_rec_rd:
1436 granule_unlock(g_rec);
1437 granule_unlock(g_rd);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001438}