blob: d3ee962c472d793dc3d0500025e7f4014fd83864 [file] [log] [blame]
Soby Mathewb4c6df42022-11-09 11:13:29 +00001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
5 */
6
7#include <buffer.h>
8#include <granule.h>
9#include <measurement.h>
10#include <realm.h>
11#include <ripas.h>
12#include <smc-handler.h>
13#include <smc-rmi.h>
14#include <smc.h>
15#include <stddef.h>
16#include <string.h>
17#include <table.h>
18
19/*
20 * Validate the map_addr value passed to RMI_RTT_* and RMI_DATA_* commands.
21 */
22static bool validate_map_addr(unsigned long map_addr,
23 unsigned long level,
24 struct rd *rd)
25{
26
27 if (map_addr >= realm_ipa_size(rd)) {
28 return false;
29 }
30 if (!addr_is_level_aligned(map_addr, level)) {
31 return false;
32 }
33 return true;
34}
35
36/*
37 * Structure commands can operate on all RTTs except for the root RTT so
38 * the minimal valid level is the stage 2 starting level + 1.
39 */
40static bool validate_rtt_structure_cmds(unsigned long map_addr,
41 long level,
42 struct rd *rd)
43{
44 int min_level = realm_rtt_starting_level(rd) + 1;
45
46 if ((level < min_level) || (level > RTT_PAGE_LEVEL)) {
47 return false;
48 }
49 return validate_map_addr(map_addr, level, rd);
50}
51
52/*
53 * Map/Unmap commands can operate up to a level 2 block entry so min_level is
54 * the smallest block size.
55 */
56static bool validate_rtt_map_cmds(unsigned long map_addr,
57 long level,
58 struct rd *rd)
59{
60 if ((level < RTT_MIN_BLOCK_LEVEL) || (level > RTT_PAGE_LEVEL)) {
61 return false;
62 }
63 return validate_map_addr(map_addr, level, rd);
64}
65
66/*
67 * Entry commands can operate on any entry so the minimal valid level is the
68 * stage 2 starting level.
69 */
70static bool validate_rtt_entry_cmds(unsigned long map_addr,
71 long level,
72 struct rd *rd)
73{
74 if ((level < realm_rtt_starting_level(rd)) ||
75 (level > RTT_PAGE_LEVEL)) {
76 return false;
77 }
78 return validate_map_addr(map_addr, level, rd);
79}
80
AlexeiFedorovac923c82023-04-06 15:12:04 +010081unsigned long smc_rtt_create(unsigned long rd_addr,
82 unsigned long rtt_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +000083 unsigned long map_addr,
84 unsigned long ulevel)
85{
86 struct granule *g_rd;
87 struct granule *g_tbl;
88 struct rd *rd;
89 struct granule *g_table_root;
90 struct rtt_walk wi;
91 unsigned long *s2tt, *parent_s2tt, parent_s2tte;
92 long level = (long)ulevel;
93 unsigned long ipa_bits;
94 unsigned long ret;
95 struct realm_s2_context s2_ctx;
96 int sl;
97
98 if (!find_lock_two_granules(rtt_addr,
99 GRANULE_STATE_DELEGATED,
100 &g_tbl,
101 rd_addr,
102 GRANULE_STATE_RD,
103 &g_rd)) {
104 return RMI_ERROR_INPUT;
105 }
106
107 rd = granule_map(g_rd, SLOT_RD);
108
109 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
110 buffer_unmap(rd);
111 granule_unlock(g_rd);
112 granule_unlock(g_tbl);
113 return RMI_ERROR_INPUT;
114 }
115
116 g_table_root = rd->s2_ctx.g_rtt;
117 sl = realm_rtt_starting_level(rd);
118 ipa_bits = realm_ipa_bits(rd);
119 s2_ctx = rd->s2_ctx;
120 buffer_unmap(rd);
121
122 /*
123 * Lock the RTT root. Enforcing locking order RD->RTT is enough to
124 * ensure deadlock free locking guarentee.
125 */
126 granule_lock(g_table_root, GRANULE_STATE_RTT);
127
128 /* Unlock RD after locking RTT Root */
129 granule_unlock(g_rd);
130
131 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
132 map_addr, level - 1L, &wi);
133 if (wi.last_level != level - 1L) {
134 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
135 goto out_unlock_llt;
136 }
137
138 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
139 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
140 s2tt = granule_map(g_tbl, SLOT_DELEGATED);
141
142 if (s2tte_is_unassigned(parent_s2tte)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000143 enum ripas ripas = s2tte_get_ripas(parent_s2tte);
144
145 s2tt_init_unassigned(s2tt, ripas);
146
147 /*
148 * Increase the refcount of the parent, the granule was
149 * locked while table walking and hand-over-hand locking.
150 * Atomicity and acquire/release semantics not required because
151 * the table is accessed always locked.
152 */
153 __granule_get(wi.g_llt);
154
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100155 } else if (s2tte_is_unassigned_ns(parent_s2tte)) {
156 s2tt_init_unassigned_ns(s2tt);
157 __granule_get(wi.g_llt);
158
Soby Mathewb4c6df42022-11-09 11:13:29 +0000159 } else if (s2tte_is_destroyed(parent_s2tte)) {
160 s2tt_init_destroyed(s2tt);
161 __granule_get(wi.g_llt);
162
AlexeiFedorov3a739332023-04-13 13:54:04 +0100163 } else if (s2tte_is_assigned_empty(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000164 unsigned long block_pa;
165
166 /*
167 * We should observe parent assigned s2tte only when
168 * we create tables above this level.
169 */
170 assert(level > RTT_MIN_BLOCK_LEVEL);
171
172 block_pa = s2tte_pa(parent_s2tte, level - 1L);
173
174 s2tt_init_assigned_empty(s2tt, block_pa, level);
175
176 /*
177 * Increase the refcount to mark the granule as in-use. refcount
178 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
179 */
180 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
181
AlexeiFedorov3a739332023-04-13 13:54:04 +0100182 } else if (s2tte_is_assigned_ram(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000183 unsigned long block_pa;
184
185 /*
186 * We should observe parent valid s2tte only when
187 * we create tables above this level.
188 */
189 assert(level > RTT_MIN_BLOCK_LEVEL);
190
191 /*
192 * Break before make. This may cause spurious S2 aborts.
193 */
194 s2tte_write(&parent_s2tt[wi.index], 0UL);
195 invalidate_block(&s2_ctx, map_addr);
196
197 block_pa = s2tte_pa(parent_s2tte, level - 1L);
198
AlexeiFedorov3a739332023-04-13 13:54:04 +0100199 s2tt_init_assigned_ram(s2tt, block_pa, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000200
201 /*
202 * Increase the refcount to mark the granule as in-use. refcount
203 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
204 */
205 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
206
AlexeiFedorov3a739332023-04-13 13:54:04 +0100207 } else if (s2tte_is_assigned_ns(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000208 unsigned long block_pa;
209
210 /*
AlexeiFedorov3a739332023-04-13 13:54:04 +0100211 * We should observe parent assigned_ns s2tte only when
Soby Mathewb4c6df42022-11-09 11:13:29 +0000212 * we create tables above this level.
213 */
214 assert(level > RTT_MIN_BLOCK_LEVEL);
215
216 /*
217 * Break before make. This may cause spurious S2 aborts.
218 */
219 s2tte_write(&parent_s2tt[wi.index], 0UL);
220 invalidate_block(&s2_ctx, map_addr);
221
222 block_pa = s2tte_pa(parent_s2tte, level - 1L);
223
AlexeiFedorov3a739332023-04-13 13:54:04 +0100224 s2tt_init_assigned_ns(s2tt, block_pa, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000225
226 /*
227 * Increase the refcount to mark the granule as in-use. refcount
228 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
229 */
230 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
231
232 } else if (s2tte_is_table(parent_s2tte, level - 1L)) {
233 ret = pack_return_code(RMI_ERROR_RTT,
234 (unsigned int)(level - 1L));
235 goto out_unmap_table;
236
237 } else {
238 assert(false);
239 }
240
241 ret = RMI_SUCCESS;
242
243 granule_set_state(g_tbl, GRANULE_STATE_RTT);
244
245 parent_s2tte = s2tte_create_table(rtt_addr, level - 1L);
246 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
247
248out_unmap_table:
249 buffer_unmap(s2tt);
250 buffer_unmap(parent_s2tt);
251out_unlock_llt:
252 granule_unlock(wi.g_llt);
253 granule_unlock(g_tbl);
254 return ret;
255}
256
257unsigned long smc_rtt_fold(unsigned long rtt_addr,
258 unsigned long rd_addr,
259 unsigned long map_addr,
260 unsigned long ulevel)
261{
262 struct granule *g_rd;
263 struct granule *g_tbl;
264 struct rd *rd;
265 struct granule *g_table_root;
266 struct rtt_walk wi;
267 unsigned long *table, *parent_s2tt, parent_s2tte;
268 long level = (long)ulevel;
269 unsigned long ipa_bits;
270 unsigned long ret;
271 struct realm_s2_context s2_ctx;
272 int sl;
273 enum ripas ripas;
274
275 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
276 if (g_rd == NULL) {
277 return RMI_ERROR_INPUT;
278 }
279
280 rd = granule_map(g_rd, SLOT_RD);
281
282 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
283 buffer_unmap(rd);
284 granule_unlock(g_rd);
285 return RMI_ERROR_INPUT;
286 }
287
288 g_table_root = rd->s2_ctx.g_rtt;
289 sl = realm_rtt_starting_level(rd);
290 ipa_bits = realm_ipa_bits(rd);
291 s2_ctx = rd->s2_ctx;
292 buffer_unmap(rd);
293 granule_lock(g_table_root, GRANULE_STATE_RTT);
294 granule_unlock(g_rd);
295
296 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
297 map_addr, level - 1L, &wi);
298 if (wi.last_level != level - 1UL) {
299 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
300 goto out_unlock_parent_table;
301 }
302
303 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
304 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
305 if (!s2tte_is_table(parent_s2tte, level - 1L)) {
306 ret = pack_return_code(RMI_ERROR_RTT,
307 (unsigned int)(level - 1L));
308 goto out_unmap_parent_table;
309 }
310
311 /*
312 * Check that the 'rtt_addr' RTT is used at (map_addr, level).
313 * Note that this also verifies that the rtt_addr is properly aligned.
314 */
315 if (rtt_addr != s2tte_pa_table(parent_s2tte, level - 1L)) {
316 ret = pack_return_code(RMI_ERROR_RTT,
317 (unsigned int)(level - 1L));
318 goto out_unmap_parent_table;
319 }
320
321 g_tbl = find_lock_granule(rtt_addr, GRANULE_STATE_RTT);
322
323 /*
324 * A table descriptor S2TTE always points to a TABLE granule.
325 */
326 assert(g_tbl);
327
328 table = granule_map(g_tbl, SLOT_RTT2);
329
330 /*
331 * The command can succeed only if all 512 S2TTEs are of the same type.
332 * We first check the table's ref. counter to speed up the case when
333 * the host makes a guess whether a memory region can be folded.
334 */
335 if (g_tbl->refcount == 0UL) {
336 if (table_is_destroyed_block(table)) {
337 parent_s2tte = s2tte_create_destroyed();
338 __granule_put(wi.g_llt);
339
340 } else if (table_is_unassigned_block(table, &ripas)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000341 parent_s2tte = s2tte_create_unassigned(ripas);
342 __granule_put(wi.g_llt);
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100343 } else if (table_is_unassigned_ns_block(table)) {
344 parent_s2tte = s2tte_create_unassigned_ns();
345 __granule_put(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000346 } else {
347 /*
348 * The table holds a mixture of destroyed and
349 * unassigned entries.
350 */
AlexeiFedorov892abce2023-04-06 16:32:12 +0100351 ret = pack_return_code(RMI_ERROR_RTT, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000352 goto out_unmap_table;
353 }
354
355 } else if (g_tbl->refcount == S2TTES_PER_S2TT) {
356
357 unsigned long s2tte, block_pa;
358
359 /* The RMM specification does not allow creating block
360 * entries less than RTT_MIN_BLOCK_LEVEL even though
361 * permitted by the Arm Architecture.
362 * Hence ensure that the table being folded is at a level
363 * higher than the RTT_MIN_BLOCK_LEVEL.
364 *
365 * A fully populated table cannot be destroyed if that
366 * would create a block mapping below RTT_MIN_BLOCK_LEVEL.
367 */
368 if (level <= RTT_MIN_BLOCK_LEVEL) {
AlexeiFedorov892abce2023-04-06 16:32:12 +0100369 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000370 goto out_unmap_table;
371 }
372
373 s2tte = s2tte_read(&table[0]);
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000374 block_pa = s2tte_pa(s2tte, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000375
376 /*
AlexeiFedorov3a739332023-04-13 13:54:04 +0100377 * The table must also refer to a contiguous block through the
378 * same type of s2tte, either Assigned, Valid or Assigned_NS.
Soby Mathewb4c6df42022-11-09 11:13:29 +0000379 */
AlexeiFedorov3a739332023-04-13 13:54:04 +0100380 if (table_maps_assigned_empty_block(table, level)) {
381 parent_s2tte = s2tte_create_assigned_empty(block_pa,
382 level - 1L);
383 } else if (table_maps_assigned_ram_block(table, level)) {
384 parent_s2tte = s2tte_create_assigned_ram(block_pa,
385 level - 1L);
386 } else if (table_maps_assigned_ns_block(table, level)) {
387 parent_s2tte = s2tte_create_assigned_ns(block_pa,
388 level - 1L);
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000389 /* The table contains mixed entries that cannot be folded */
Soby Mathewb4c6df42022-11-09 11:13:29 +0000390 } else {
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000391 ret = pack_return_code(RMI_ERROR_RTT, level);
392 goto out_unmap_table;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000393 }
394
395 __granule_refcount_dec(g_tbl, S2TTES_PER_S2TT);
396 } else {
397 /*
398 * The table holds a mixture of different types of s2ttes.
399 */
AlexeiFedorov892abce2023-04-06 16:32:12 +0100400 ret = pack_return_code(RMI_ERROR_RTT, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000401 goto out_unmap_table;
402 }
403
404 ret = RMI_SUCCESS;
405
406 /*
407 * Break before make.
408 */
409 s2tte_write(&parent_s2tt[wi.index], 0UL);
410
AlexeiFedorov3a739332023-04-13 13:54:04 +0100411 if (s2tte_is_assigned_ram(parent_s2tte, level - 1L) ||
412 s2tte_is_assigned_ns(parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000413 invalidate_pages_in_block(&s2_ctx, map_addr);
414 } else {
415 invalidate_block(&s2_ctx, map_addr);
416 }
417
418 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
419
420 granule_memzero_mapped(table);
421 granule_set_state(g_tbl, GRANULE_STATE_DELEGATED);
422
423out_unmap_table:
424 buffer_unmap(table);
425 granule_unlock(g_tbl);
426out_unmap_parent_table:
427 buffer_unmap(parent_s2tt);
428out_unlock_parent_table:
429 granule_unlock(wi.g_llt);
430 return ret;
431}
432
433unsigned long smc_rtt_destroy(unsigned long rtt_addr,
434 unsigned long rd_addr,
435 unsigned long map_addr,
436 unsigned long ulevel)
437{
438 struct granule *g_rd;
439 struct granule *g_tbl;
440 struct rd *rd;
441 struct granule *g_table_root;
442 struct rtt_walk wi;
443 unsigned long *table, *parent_s2tt, parent_s2tte;
444 long level = (long)ulevel;
445 unsigned long ipa_bits;
446 unsigned long ret;
447 struct realm_s2_context s2_ctx;
448 int sl;
449 bool in_par;
450
451 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
452 if (g_rd == NULL) {
453 return RMI_ERROR_INPUT;
454 }
455
456 rd = granule_map(g_rd, SLOT_RD);
457
458 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
459 buffer_unmap(rd);
460 granule_unlock(g_rd);
461 return RMI_ERROR_INPUT;
462 }
463
464 g_table_root = rd->s2_ctx.g_rtt;
465 sl = realm_rtt_starting_level(rd);
466 ipa_bits = realm_ipa_bits(rd);
467 s2_ctx = rd->s2_ctx;
468 in_par = addr_in_par(rd, map_addr);
469 buffer_unmap(rd);
470 granule_lock(g_table_root, GRANULE_STATE_RTT);
471 granule_unlock(g_rd);
472
473 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
474 map_addr, level - 1L, &wi);
475 if (wi.last_level != level - 1UL) {
476 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
477 goto out_unlock_parent_table;
478 }
479
480 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
481 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
482 if (!s2tte_is_table(parent_s2tte, level - 1L)) {
483 ret = pack_return_code(RMI_ERROR_RTT,
484 (unsigned int)(level - 1L));
485 goto out_unmap_parent_table;
486 }
487
488 /*
489 * Check that the 'rtt_addr' RTT is used at (map_addr, level).
490 * Note that this also verifies that the rtt_addr is properly aligned.
491 */
492 if (rtt_addr != s2tte_pa_table(parent_s2tte, level - 1L)) {
493 ret = RMI_ERROR_INPUT;
494 goto out_unmap_parent_table;
495 }
496
497 /*
498 * Lock the RTT granule. The 'rtt_addr' is verified, thus can be treated
499 * as an internal granule.
500 */
501 g_tbl = find_lock_granule(rtt_addr, GRANULE_STATE_RTT);
502
503 /*
504 * A table descriptor S2TTE always points to a TABLE granule.
505 */
506 assert(g_tbl != NULL);
507
508 /*
509 * Read the refcount value. RTT granule is always accessed locked, thus
510 * the refcount can be accessed without atomic operations.
511 */
512 if (g_tbl->refcount != 0UL) {
AlexeiFedorov892abce2023-04-06 16:32:12 +0100513 ret = pack_return_code(RMI_ERROR_RTT, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000514 goto out_unlock_table;
515 }
516
517 ret = RMI_SUCCESS;
518
519 table = granule_map(g_tbl, SLOT_RTT2);
520
521 if (in_par) {
522 parent_s2tte = s2tte_create_destroyed();
523 } else {
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100524 parent_s2tte = s2tte_create_unassigned_ns();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000525 }
526
527 __granule_put(wi.g_llt);
528
529 /*
530 * Break before make. Note that this may cause spurious S2 aborts.
531 */
532 s2tte_write(&parent_s2tt[wi.index], 0UL);
533 invalidate_block(&s2_ctx, map_addr);
534 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
535
536 granule_memzero_mapped(table);
537 granule_set_state(g_tbl, GRANULE_STATE_DELEGATED);
538
539 buffer_unmap(table);
540out_unlock_table:
541 granule_unlock(g_tbl);
542out_unmap_parent_table:
543 buffer_unmap(parent_s2tt);
544out_unlock_parent_table:
545 granule_unlock(wi.g_llt);
546 return ret;
547}
548
549enum map_unmap_ns_op {
550 MAP_NS,
551 UNMAP_NS
552};
553
554/*
555 * We don't hold a reference on the NS granule when it is
556 * mapped into a realm. Instead we rely on the guarantees
557 * provided by the architecture to ensure that a NS access
558 * to a protected granule is prohibited even within the realm.
559 */
560static unsigned long map_unmap_ns(unsigned long rd_addr,
561 unsigned long map_addr,
562 long level,
563 unsigned long host_s2tte,
564 enum map_unmap_ns_op op)
565{
566 struct granule *g_rd;
567 struct rd *rd;
568 struct granule *g_table_root;
569 unsigned long *s2tt, s2tte;
570 struct rtt_walk wi;
571 unsigned long ipa_bits;
572 unsigned long ret;
573 struct realm_s2_context s2_ctx;
574 int sl;
575
576 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
577 if (g_rd == NULL) {
578 return RMI_ERROR_INPUT;
579 }
580
581 rd = granule_map(g_rd, SLOT_RD);
582
583 if (!validate_rtt_map_cmds(map_addr, level, rd)) {
584 buffer_unmap(rd);
585 granule_unlock(g_rd);
586 return RMI_ERROR_INPUT;
587 }
588
589 g_table_root = rd->s2_ctx.g_rtt;
590 sl = realm_rtt_starting_level(rd);
591 ipa_bits = realm_ipa_bits(rd);
592
AlexeiFedorovc34e3242023-04-12 11:30:33 +0100593 /* Check if map_addr is outside PAR */
594 if (addr_in_par(rd, map_addr)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000595 buffer_unmap(rd);
596 granule_unlock(g_rd);
597 return RMI_ERROR_INPUT;
598 }
599
600 s2_ctx = rd->s2_ctx;
601 buffer_unmap(rd);
602
603 granule_lock(g_table_root, GRANULE_STATE_RTT);
604 granule_unlock(g_rd);
605
606 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
607 map_addr, level, &wi);
608 if (wi.last_level != level) {
609 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
610 goto out_unlock_llt;
611 }
612
613 s2tt = granule_map(wi.g_llt, SLOT_RTT);
614 s2tte = s2tte_read(&s2tt[wi.index]);
615
616 if (op == MAP_NS) {
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100617 if (!s2tte_is_unassigned_ns(s2tte)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000618 ret = pack_return_code(RMI_ERROR_RTT,
619 (unsigned int)level);
620 goto out_unmap_table;
621 }
622
AlexeiFedorov3a739332023-04-13 13:54:04 +0100623 s2tte = s2tte_create_assigned_ns(host_s2tte, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000624 s2tte_write(&s2tt[wi.index], s2tte);
625 __granule_get(wi.g_llt);
626
627 } else if (op == UNMAP_NS) {
628 /*
629 * The following check also verifies that map_addr is outside
630 * PAR, as valid_NS s2tte may only cover outside PAR IPA range.
631 */
AlexeiFedorov3a739332023-04-13 13:54:04 +0100632 if (!s2tte_is_assigned_ns(s2tte, level)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000633 ret = pack_return_code(RMI_ERROR_RTT,
634 (unsigned int)level);
635 goto out_unmap_table;
636 }
637
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100638 s2tte = s2tte_create_unassigned_ns();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000639 s2tte_write(&s2tt[wi.index], s2tte);
640 __granule_put(wi.g_llt);
641 if (level == RTT_PAGE_LEVEL) {
642 invalidate_page(&s2_ctx, map_addr);
643 } else {
644 invalidate_block(&s2_ctx, map_addr);
645 }
646 }
647
648 ret = RMI_SUCCESS;
649
650out_unmap_table:
651 buffer_unmap(s2tt);
652out_unlock_llt:
653 granule_unlock(wi.g_llt);
654 return ret;
655}
656
657unsigned long smc_rtt_map_unprotected(unsigned long rd_addr,
658 unsigned long map_addr,
659 unsigned long ulevel,
660 unsigned long s2tte)
661{
662 long level = (long)ulevel;
663
664 if (!host_ns_s2tte_is_valid(s2tte, level)) {
665 return RMI_ERROR_INPUT;
666 }
667
668 return map_unmap_ns(rd_addr, map_addr, level, s2tte, MAP_NS);
669}
670
671unsigned long smc_rtt_unmap_unprotected(unsigned long rd_addr,
672 unsigned long map_addr,
673 unsigned long ulevel)
674{
675 return map_unmap_ns(rd_addr, map_addr, (long)ulevel, 0UL, UNMAP_NS);
676}
677
678void smc_rtt_read_entry(unsigned long rd_addr,
679 unsigned long map_addr,
680 unsigned long ulevel,
681 struct smc_result *ret)
682{
683 struct granule *g_rd, *g_rtt_root;
684 struct rd *rd;
685 struct rtt_walk wi;
686 unsigned long *s2tt, s2tte;
687 unsigned long ipa_bits;
688 long level = (long)ulevel;
689 int sl;
690
691 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
692 if (g_rd == NULL) {
693 ret->x[0] = RMI_ERROR_INPUT;
694 return;
695 }
696
697 rd = granule_map(g_rd, SLOT_RD);
698
699 if (!validate_rtt_entry_cmds(map_addr, level, rd)) {
700 buffer_unmap(rd);
701 granule_unlock(g_rd);
702 ret->x[0] = RMI_ERROR_INPUT;
703 return;
704 }
705
706 g_rtt_root = rd->s2_ctx.g_rtt;
707 sl = realm_rtt_starting_level(rd);
708 ipa_bits = realm_ipa_bits(rd);
709 buffer_unmap(rd);
710
711 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
712 granule_unlock(g_rd);
713
714 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
715 map_addr, level, &wi);
716 s2tt = granule_map(wi.g_llt, SLOT_RTT);
717 s2tte = s2tte_read(&s2tt[wi.index]);
718 ret->x[1] = wi.last_level;
719 ret->x[3] = 0UL;
720 ret->x[4] = 0UL;
721
722 if (s2tte_is_unassigned(s2tte)) {
723 enum ripas ripas = s2tte_get_ripas(s2tte);
724
Yousuf A3daed822022-10-13 16:06:00 +0100725 ret->x[2] = RMI_UNASSIGNED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000726 ret->x[4] = (unsigned long)ripas;
727 } else if (s2tte_is_destroyed(s2tte)) {
Yousuf A3daed822022-10-13 16:06:00 +0100728 ret->x[2] = RMI_DESTROYED;
AlexeiFedorov3a739332023-04-13 13:54:04 +0100729 } else if (s2tte_is_assigned_empty(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100730 ret->x[2] = RMI_ASSIGNED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000731 ret->x[3] = s2tte_pa(s2tte, wi.last_level);
Yousuf A62808152022-10-31 10:35:42 +0000732 ret->x[4] = RIPAS_EMPTY;
AlexeiFedorov3a739332023-04-13 13:54:04 +0100733 } else if (s2tte_is_assigned_ram(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100734 ret->x[2] = RMI_ASSIGNED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000735 ret->x[3] = s2tte_pa(s2tte, wi.last_level);
Yousuf A62808152022-10-31 10:35:42 +0000736 ret->x[4] = RIPAS_RAM;
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100737 } else if (s2tte_is_unassigned_ns(s2tte)) {
738 ret->x[2] = RMI_UNASSIGNED;
739 ret->x[4] = RIPAS_EMPTY;
AlexeiFedorov3a739332023-04-13 13:54:04 +0100740 } else if (s2tte_is_assigned_ns(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100741 ret->x[2] = RMI_VALID_NS;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000742 ret->x[3] = host_ns_s2tte(s2tte, wi.last_level);
743 } else if (s2tte_is_table(s2tte, wi.last_level)) {
Yousuf A3daed822022-10-13 16:06:00 +0100744 ret->x[2] = RMI_TABLE;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000745 ret->x[3] = s2tte_pa_table(s2tte, wi.last_level);
746 } else {
747 assert(false);
748 }
749
750 buffer_unmap(s2tt);
751 granule_unlock(wi.g_llt);
752
753 ret->x[0] = RMI_SUCCESS;
754}
755
756static void data_granule_measure(struct rd *rd, void *data,
757 unsigned long ipa,
758 unsigned long flags)
759{
760 struct measurement_desc_data measure_desc = {0};
761
762 /* Initialize the measurement descriptior structure */
763 measure_desc.desc_type = MEASURE_DESC_TYPE_DATA;
764 measure_desc.len = sizeof(struct measurement_desc_data);
765 measure_desc.ipa = ipa;
766 measure_desc.flags = flags;
767 memcpy(measure_desc.rim,
768 &rd->measurement[RIM_MEASUREMENT_SLOT],
769 measurement_get_size(rd->algorithm));
770
771 if (flags == RMI_MEASURE_CONTENT) {
772 /*
773 * Hashing the data granules and store the result in the
774 * measurement descriptor structure.
775 */
776 measurement_hash_compute(rd->algorithm,
777 data,
778 GRANULE_SIZE,
779 measure_desc.content);
780 }
781
782 /*
783 * Hashing the measurement descriptor structure; the result is the
784 * updated RIM.
785 */
786 measurement_hash_compute(rd->algorithm,
787 &measure_desc,
788 sizeof(measure_desc),
789 rd->measurement[RIM_MEASUREMENT_SLOT]);
790}
791
792static unsigned long validate_data_create_unknown(unsigned long map_addr,
793 struct rd *rd)
794{
795 if (!addr_in_par(rd, map_addr)) {
796 return RMI_ERROR_INPUT;
797 }
798
799 if (!validate_map_addr(map_addr, RTT_PAGE_LEVEL, rd)) {
800 return RMI_ERROR_INPUT;
801 }
802
803 return RMI_SUCCESS;
804}
805
806static unsigned long validate_data_create(unsigned long map_addr,
807 struct rd *rd)
808{
809 if (get_rd_state_locked(rd) != REALM_STATE_NEW) {
810 return RMI_ERROR_REALM;
811 }
812
813 return validate_data_create_unknown(map_addr, rd);
814}
815
816/*
817 * Implements both Data.Create and Data.CreateUnknown
818 *
819 * if @g_src == NULL, this implemented Data.CreateUnknown
820 * and otherwise this implemented Data.Create.
821 */
AlexeiFedorovac923c82023-04-06 15:12:04 +0100822static unsigned long data_create(unsigned long rd_addr,
823 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000824 unsigned long map_addr,
825 struct granule *g_src,
826 unsigned long flags)
827{
828 struct granule *g_data;
829 struct granule *g_rd;
830 struct granule *g_table_root;
831 struct rd *rd;
832 struct rtt_walk wi;
833 unsigned long s2tte, *s2tt;
834 enum ripas ripas;
835 enum granule_state new_data_state = GRANULE_STATE_DELEGATED;
836 unsigned long ipa_bits;
837 unsigned long ret;
838 int __unused meas_ret;
839 int sl;
840
841 if (!find_lock_two_granules(data_addr,
842 GRANULE_STATE_DELEGATED,
843 &g_data,
844 rd_addr,
845 GRANULE_STATE_RD,
846 &g_rd)) {
847 return RMI_ERROR_INPUT;
848 }
849
850 rd = granule_map(g_rd, SLOT_RD);
851
852 ret = (g_src != NULL) ?
853 validate_data_create(map_addr, rd) :
854 validate_data_create_unknown(map_addr, rd);
855
856 if (ret != RMI_SUCCESS) {
857 goto out_unmap_rd;
858 }
859
860 g_table_root = rd->s2_ctx.g_rtt;
861 sl = realm_rtt_starting_level(rd);
862 ipa_bits = realm_ipa_bits(rd);
863 granule_lock(g_table_root, GRANULE_STATE_RTT);
864 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
865 map_addr, RTT_PAGE_LEVEL, &wi);
866 if (wi.last_level != RTT_PAGE_LEVEL) {
867 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
868 goto out_unlock_ll_table;
869 }
870
871 s2tt = granule_map(wi.g_llt, SLOT_RTT);
872 s2tte = s2tte_read(&s2tt[wi.index]);
873 if (!s2tte_is_unassigned(s2tte)) {
874 ret = pack_return_code(RMI_ERROR_RTT, RTT_PAGE_LEVEL);
875 goto out_unmap_ll_table;
876 }
877
878 ripas = s2tte_get_ripas(s2tte);
879
880 if (g_src != NULL) {
881 bool ns_access_ok;
882 void *data = granule_map(g_data, SLOT_DELEGATED);
883
884 ns_access_ok = ns_buffer_read(SLOT_NS, g_src, 0U,
885 GRANULE_SIZE, data);
886
887 if (!ns_access_ok) {
888 /*
889 * Some data may be copied before the failure. Zero
890 * g_data granule as it will remain in delegated state.
891 */
892 (void)memset(data, 0, GRANULE_SIZE);
893 buffer_unmap(data);
894 ret = RMI_ERROR_INPUT;
895 goto out_unmap_ll_table;
896 }
897
898
899 data_granule_measure(rd, data, map_addr, flags);
900
901 buffer_unmap(data);
902 }
903
904 new_data_state = GRANULE_STATE_DATA;
905
Yousuf A62808152022-10-31 10:35:42 +0000906 s2tte = (ripas == RIPAS_EMPTY) ?
Soby Mathewb4c6df42022-11-09 11:13:29 +0000907 s2tte_create_assigned_empty(data_addr, RTT_PAGE_LEVEL) :
AlexeiFedorov3a739332023-04-13 13:54:04 +0100908 s2tte_create_assigned_ram(data_addr, RTT_PAGE_LEVEL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000909
910 s2tte_write(&s2tt[wi.index], s2tte);
911 __granule_get(wi.g_llt);
912
913 ret = RMI_SUCCESS;
914
915out_unmap_ll_table:
916 buffer_unmap(s2tt);
917out_unlock_ll_table:
918 granule_unlock(wi.g_llt);
919out_unmap_rd:
920 buffer_unmap(rd);
921 granule_unlock(g_rd);
922 granule_unlock_transition(g_data, new_data_state);
923 return ret;
924}
925
AlexeiFedorovac923c82023-04-06 15:12:04 +0100926unsigned long smc_data_create(unsigned long rd_addr,
927 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000928 unsigned long map_addr,
929 unsigned long src_addr,
930 unsigned long flags)
931{
932 struct granule *g_src;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000933
934 if (flags != RMI_NO_MEASURE_CONTENT && flags != RMI_MEASURE_CONTENT) {
935 return RMI_ERROR_INPUT;
936 }
937
938 g_src = find_granule(src_addr);
939 if ((g_src == NULL) || (g_src->state != GRANULE_STATE_NS)) {
940 return RMI_ERROR_INPUT;
941 }
942
AlexeiFedorovac923c82023-04-06 15:12:04 +0100943 return data_create(rd_addr, data_addr, map_addr, g_src, flags);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000944}
945
AlexeiFedorovac923c82023-04-06 15:12:04 +0100946unsigned long smc_data_create_unknown(unsigned long rd_addr,
947 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000948 unsigned long map_addr)
949{
AlexeiFedorovac923c82023-04-06 15:12:04 +0100950 return data_create(rd_addr, data_addr, map_addr, NULL, 0);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000951}
952
953unsigned long smc_data_destroy(unsigned long rd_addr,
954 unsigned long map_addr)
955{
956 struct granule *g_data;
957 struct granule *g_rd;
958 struct granule *g_table_root;
959 struct rtt_walk wi;
960 unsigned long data_addr, s2tte, *s2tt;
961 struct rd *rd;
962 unsigned long ipa_bits;
963 unsigned long ret;
964 struct realm_s2_context s2_ctx;
965 bool valid;
966 int sl;
967
968 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
969 if (g_rd == NULL) {
970 return RMI_ERROR_INPUT;
971 }
972
973 rd = granule_map(g_rd, SLOT_RD);
974
975 if (!validate_map_addr(map_addr, RTT_PAGE_LEVEL, rd)) {
976 buffer_unmap(rd);
977 granule_unlock(g_rd);
978 return RMI_ERROR_INPUT;
979 }
980
981 g_table_root = rd->s2_ctx.g_rtt;
982 sl = realm_rtt_starting_level(rd);
983 ipa_bits = realm_ipa_bits(rd);
984 s2_ctx = rd->s2_ctx;
985 buffer_unmap(rd);
986
987 granule_lock(g_table_root, GRANULE_STATE_RTT);
988 granule_unlock(g_rd);
989
990 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
991 map_addr, RTT_PAGE_LEVEL, &wi);
992 if (wi.last_level != RTT_PAGE_LEVEL) {
993 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
994 goto out_unlock_ll_table;
995 }
996
997 s2tt = granule_map(wi.g_llt, SLOT_RTT);
998 s2tte = s2tte_read(&s2tt[wi.index]);
999
AlexeiFedorov3a739332023-04-13 13:54:04 +01001000 valid = s2tte_is_assigned_ram(s2tte, RTT_PAGE_LEVEL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001001
1002 /*
1003 * Check if either HIPAS=ASSIGNED or map_addr is a
1004 * valid Protected IPA.
1005 */
AlexeiFedorov3a739332023-04-13 13:54:04 +01001006 if (!valid && !s2tte_is_assigned_empty(s2tte, RTT_PAGE_LEVEL)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001007 ret = pack_return_code(RMI_ERROR_RTT, RTT_PAGE_LEVEL);
1008 goto out_unmap_ll_table;
1009 }
1010
1011 data_addr = s2tte_pa(s2tte, RTT_PAGE_LEVEL);
1012
1013 /*
1014 * We have already established either HIPAS=ASSIGNED or a valid mapping.
1015 * If valid, transition HIPAS to DESTROYED and if HIPAS=ASSIGNED,
1016 * transition to UNASSIGNED.
1017 */
1018 s2tte = valid ? s2tte_create_destroyed() :
Yousuf A62808152022-10-31 10:35:42 +00001019 s2tte_create_unassigned(RIPAS_EMPTY);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001020
1021 s2tte_write(&s2tt[wi.index], s2tte);
1022
1023 if (valid) {
1024 invalidate_page(&s2_ctx, map_addr);
1025 }
1026
1027 __granule_put(wi.g_llt);
1028
1029 /*
1030 * Lock the data granule and check expected state. Correct locking order
1031 * is guaranteed because granule address is obtained from a locked
1032 * granule by table walk. This lock needs to be acquired before a state
1033 * transition to or from GRANULE_STATE_DATA for granule address can happen.
1034 */
1035 g_data = find_lock_granule(data_addr, GRANULE_STATE_DATA);
1036 assert(g_data);
1037 granule_memzero(g_data, SLOT_DELEGATED);
1038 granule_unlock_transition(g_data, GRANULE_STATE_DELEGATED);
1039
1040 ret = RMI_SUCCESS;
1041
1042out_unmap_ll_table:
1043 buffer_unmap(s2tt);
1044out_unlock_ll_table:
1045 granule_unlock(wi.g_llt);
1046
1047 return ret;
1048}
1049
1050static bool update_ripas(unsigned long *s2tte, unsigned long level,
1051 enum ripas ripas)
1052{
1053 if (s2tte_is_table(*s2tte, level)) {
1054 return false;
1055 }
1056
AlexeiFedorov3a739332023-04-13 13:54:04 +01001057 if (s2tte_is_assigned_ram(*s2tte, level)) {
Yousuf A62808152022-10-31 10:35:42 +00001058 if (ripas == RIPAS_EMPTY) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001059 unsigned long pa = s2tte_pa(*s2tte, level);
1060 *s2tte = s2tte_create_assigned_empty(pa, level);
1061 }
1062 return true;
1063 }
1064
AlexeiFedorov3a739332023-04-13 13:54:04 +01001065 if (s2tte_is_unassigned(*s2tte) ||
1066 s2tte_is_assigned_empty(*s2tte, level)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001067 *s2tte |= s2tte_create_ripas(ripas);
1068 return true;
1069 }
1070
1071 return false;
1072}
1073
1074static void ripas_granule_measure(struct rd *rd,
1075 unsigned long ipa,
1076 unsigned long level)
1077{
1078 struct measurement_desc_ripas measure_desc = {0};
1079
1080 /* Initialize the measurement descriptior structure */
1081 measure_desc.desc_type = MEASURE_DESC_TYPE_RIPAS;
1082 measure_desc.len = sizeof(struct measurement_desc_ripas);
1083 measure_desc.ipa = ipa;
1084 measure_desc.level = level;
1085 memcpy(measure_desc.rim,
1086 &rd->measurement[RIM_MEASUREMENT_SLOT],
1087 measurement_get_size(rd->algorithm));
1088
1089 /*
1090 * Hashing the measurement descriptor structure; the result is the
1091 * updated RIM.
1092 */
1093 measurement_hash_compute(rd->algorithm,
1094 &measure_desc,
1095 sizeof(measure_desc),
1096 rd->measurement[RIM_MEASUREMENT_SLOT]);
1097}
1098
1099unsigned long smc_rtt_init_ripas(unsigned long rd_addr,
1100 unsigned long map_addr,
1101 unsigned long ulevel)
1102{
1103 struct granule *g_rd, *g_rtt_root;
1104 struct rd *rd;
1105 unsigned long ipa_bits;
1106 struct rtt_walk wi;
1107 unsigned long s2tte, *s2tt;
1108 unsigned long ret;
1109 long level = (long)ulevel;
1110 int sl;
1111
1112 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
1113 if (g_rd == NULL) {
1114 return RMI_ERROR_INPUT;
1115 }
1116
1117 rd = granule_map(g_rd, SLOT_RD);
1118
1119 if (get_rd_state_locked(rd) != REALM_STATE_NEW) {
1120 buffer_unmap(rd);
1121 granule_unlock(g_rd);
1122 return RMI_ERROR_REALM;
1123 }
1124
1125 if (!validate_rtt_entry_cmds(map_addr, level, rd)) {
1126 buffer_unmap(rd);
1127 granule_unlock(g_rd);
1128 return RMI_ERROR_INPUT;
1129 }
1130
1131 if (!addr_in_par(rd, map_addr)) {
1132 buffer_unmap(rd);
1133 granule_unlock(g_rd);
1134 return RMI_ERROR_INPUT;
1135 }
1136
1137 g_rtt_root = rd->s2_ctx.g_rtt;
1138 sl = realm_rtt_starting_level(rd);
1139 ipa_bits = realm_ipa_bits(rd);
1140
1141 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
1142 granule_unlock(g_rd);
1143
1144 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
1145 map_addr, level, &wi);
AlexeiFedorovac923c82023-04-06 15:12:04 +01001146
Soby Mathewb4c6df42022-11-09 11:13:29 +00001147 if (wi.last_level != level) {
1148 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
1149 goto out_unlock_llt;
1150 }
1151
1152 s2tt = granule_map(wi.g_llt, SLOT_RTT);
1153 s2tte = s2tte_read(&s2tt[wi.index]);
1154
1155 /* Allowed only for HIPAS=UNASSIGNED */
1156 if (s2tte_is_table(s2tte, level) || !s2tte_is_unassigned(s2tte)) {
1157 ret = pack_return_code(RMI_ERROR_RTT, (unsigned int)level);
1158 goto out_unmap_llt;
1159 }
1160
Yousuf A62808152022-10-31 10:35:42 +00001161 s2tte |= s2tte_create_ripas(RIPAS_RAM);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001162
1163 s2tte_write(&s2tt[wi.index], s2tte);
1164
1165 ripas_granule_measure(rd, map_addr, level);
1166
1167 ret = RMI_SUCCESS;
1168
1169out_unmap_llt:
1170 buffer_unmap(s2tt);
1171out_unlock_llt:
1172 buffer_unmap(rd);
1173 granule_unlock(wi.g_llt);
1174 return ret;
1175}
1176
1177unsigned long smc_rtt_set_ripas(unsigned long rd_addr,
1178 unsigned long rec_addr,
1179 unsigned long map_addr,
1180 unsigned long ulevel,
1181 unsigned long uripas)
1182{
1183 struct granule *g_rd, *g_rec, *g_rtt_root;
1184 struct rec *rec;
1185 struct rd *rd;
1186 unsigned long map_size, ipa_bits;
1187 struct rtt_walk wi;
1188 unsigned long s2tte, *s2tt;
1189 struct realm_s2_context s2_ctx;
1190 long level = (long)ulevel;
1191 enum ripas ripas = (enum ripas)uripas;
1192 unsigned long ret;
1193 bool valid;
1194 int sl;
1195
Yousuf A62808152022-10-31 10:35:42 +00001196 if (ripas > RIPAS_RAM) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001197 return RMI_ERROR_INPUT;
1198 }
1199
1200 if (!find_lock_two_granules(rd_addr,
1201 GRANULE_STATE_RD,
1202 &g_rd,
1203 rec_addr,
1204 GRANULE_STATE_REC,
1205 &g_rec)) {
1206 return RMI_ERROR_INPUT;
1207 }
1208
1209 if (granule_refcount_read_acquire(g_rec) != 0UL) {
AlexeiFedorov892abce2023-04-06 16:32:12 +01001210 ret = RMI_ERROR_REC;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001211 goto out_unlock_rec_rd;
1212 }
1213
1214 rec = granule_map(g_rec, SLOT_REC);
1215
1216 if (g_rd != rec->realm_info.g_rd) {
1217 ret = RMI_ERROR_REC;
1218 goto out_unmap_rec;
1219 }
1220
1221 if (ripas != rec->set_ripas.ripas) {
1222 ret = RMI_ERROR_INPUT;
1223 goto out_unmap_rec;
1224 }
1225
1226 if (map_addr != rec->set_ripas.addr) {
1227 /* Target region is not next chunk of requested region */
1228 ret = RMI_ERROR_INPUT;
1229 goto out_unmap_rec;
1230 }
1231
1232 rd = granule_map(g_rd, SLOT_RD);
1233
1234 if (!validate_rtt_entry_cmds(map_addr, level, rd)) {
1235 ret = RMI_ERROR_INPUT;
1236 goto out_unmap_rd;
1237 }
1238
1239 map_size = s2tte_map_size(level);
1240 if (map_addr + map_size > rec->set_ripas.end) {
1241 /* Target region extends beyond end of requested region */
1242 ret = RMI_ERROR_INPUT;
1243 goto out_unmap_rd;
1244 }
1245
1246 g_rtt_root = rd->s2_ctx.g_rtt;
1247 sl = realm_rtt_starting_level(rd);
1248 ipa_bits = realm_ipa_bits(rd);
1249 s2_ctx = rd->s2_ctx;
1250
1251 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
1252
1253 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
1254 map_addr, level, &wi);
1255 if (wi.last_level != level) {
1256 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
1257 goto out_unlock_llt;
1258 }
1259
1260 s2tt = granule_map(wi.g_llt, SLOT_RTT);
1261 s2tte = s2tte_read(&s2tt[wi.index]);
1262
AlexeiFedorov3a739332023-04-13 13:54:04 +01001263 valid = s2tte_is_assigned_ram(s2tte, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001264
1265 if (!update_ripas(&s2tte, level, ripas)) {
1266 ret = pack_return_code(RMI_ERROR_RTT, (unsigned int)level);
1267 goto out_unmap_llt;
1268 }
1269
1270 s2tte_write(&s2tt[wi.index], s2tte);
1271
Yousuf A62808152022-10-31 10:35:42 +00001272 if (valid && (ripas == RIPAS_EMPTY)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001273 if (level == RTT_PAGE_LEVEL) {
1274 invalidate_page(&s2_ctx, map_addr);
1275 } else {
1276 invalidate_block(&s2_ctx, map_addr);
1277 }
1278 }
1279
1280 rec->set_ripas.addr += map_size;
1281
1282 ret = RMI_SUCCESS;
1283
1284out_unmap_llt:
1285 buffer_unmap(s2tt);
1286out_unlock_llt:
1287 granule_unlock(wi.g_llt);
1288out_unmap_rd:
1289 buffer_unmap(rd);
1290out_unmap_rec:
1291 buffer_unmap(rec);
1292out_unlock_rec_rd:
1293 granule_unlock(g_rec);
1294 granule_unlock(g_rd);
1295 return ret;
1296}