blob: e474e5dbe0b627e2454c5485ea060da146c2a107 [file] [log] [blame]
Soby Mathewb4c6df42022-11-09 11:13:29 +00001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
5 */
6
7#include <buffer.h>
8#include <granule.h>
9#include <measurement.h>
10#include <realm.h>
11#include <ripas.h>
12#include <smc-handler.h>
13#include <smc-rmi.h>
14#include <smc.h>
15#include <stddef.h>
16#include <string.h>
17#include <table.h>
18
19/*
20 * Validate the map_addr value passed to RMI_RTT_* and RMI_DATA_* commands.
21 */
22static bool validate_map_addr(unsigned long map_addr,
23 unsigned long level,
24 struct rd *rd)
25{
26
27 if (map_addr >= realm_ipa_size(rd)) {
28 return false;
29 }
30 if (!addr_is_level_aligned(map_addr, level)) {
31 return false;
32 }
33 return true;
34}
35
36/*
37 * Structure commands can operate on all RTTs except for the root RTT so
38 * the minimal valid level is the stage 2 starting level + 1.
39 */
40static bool validate_rtt_structure_cmds(unsigned long map_addr,
41 long level,
42 struct rd *rd)
43{
44 int min_level = realm_rtt_starting_level(rd) + 1;
45
46 if ((level < min_level) || (level > RTT_PAGE_LEVEL)) {
47 return false;
48 }
49 return validate_map_addr(map_addr, level, rd);
50}
51
52/*
53 * Map/Unmap commands can operate up to a level 2 block entry so min_level is
54 * the smallest block size.
55 */
56static bool validate_rtt_map_cmds(unsigned long map_addr,
57 long level,
58 struct rd *rd)
59{
60 if ((level < RTT_MIN_BLOCK_LEVEL) || (level > RTT_PAGE_LEVEL)) {
61 return false;
62 }
63 return validate_map_addr(map_addr, level, rd);
64}
65
66/*
67 * Entry commands can operate on any entry so the minimal valid level is the
68 * stage 2 starting level.
69 */
70static bool validate_rtt_entry_cmds(unsigned long map_addr,
71 long level,
72 struct rd *rd)
73{
74 if ((level < realm_rtt_starting_level(rd)) ||
75 (level > RTT_PAGE_LEVEL)) {
76 return false;
77 }
78 return validate_map_addr(map_addr, level, rd);
79}
80
81unsigned long smc_rtt_create(unsigned long rtt_addr,
82 unsigned long rd_addr,
83 unsigned long map_addr,
84 unsigned long ulevel)
85{
86 struct granule *g_rd;
87 struct granule *g_tbl;
88 struct rd *rd;
89 struct granule *g_table_root;
90 struct rtt_walk wi;
91 unsigned long *s2tt, *parent_s2tt, parent_s2tte;
92 long level = (long)ulevel;
93 unsigned long ipa_bits;
94 unsigned long ret;
95 struct realm_s2_context s2_ctx;
96 int sl;
97
98 if (!find_lock_two_granules(rtt_addr,
99 GRANULE_STATE_DELEGATED,
100 &g_tbl,
101 rd_addr,
102 GRANULE_STATE_RD,
103 &g_rd)) {
104 return RMI_ERROR_INPUT;
105 }
106
107 rd = granule_map(g_rd, SLOT_RD);
108
109 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
110 buffer_unmap(rd);
111 granule_unlock(g_rd);
112 granule_unlock(g_tbl);
113 return RMI_ERROR_INPUT;
114 }
115
116 g_table_root = rd->s2_ctx.g_rtt;
117 sl = realm_rtt_starting_level(rd);
118 ipa_bits = realm_ipa_bits(rd);
119 s2_ctx = rd->s2_ctx;
120 buffer_unmap(rd);
121
122 /*
123 * Lock the RTT root. Enforcing locking order RD->RTT is enough to
124 * ensure deadlock free locking guarentee.
125 */
126 granule_lock(g_table_root, GRANULE_STATE_RTT);
127
128 /* Unlock RD after locking RTT Root */
129 granule_unlock(g_rd);
130
131 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
132 map_addr, level - 1L, &wi);
133 if (wi.last_level != level - 1L) {
134 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
135 goto out_unlock_llt;
136 }
137
138 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
139 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
140 s2tt = granule_map(g_tbl, SLOT_DELEGATED);
141
142 if (s2tte_is_unassigned(parent_s2tte)) {
143 /*
144 * Note that if map_addr is an Unprotected IPA, the RIPAS field
145 * is guaranteed to be zero, in both parent and child s2ttes.
146 */
147 enum ripas ripas = s2tte_get_ripas(parent_s2tte);
148
149 s2tt_init_unassigned(s2tt, ripas);
150
151 /*
152 * Increase the refcount of the parent, the granule was
153 * locked while table walking and hand-over-hand locking.
154 * Atomicity and acquire/release semantics not required because
155 * the table is accessed always locked.
156 */
157 __granule_get(wi.g_llt);
158
159 } else if (s2tte_is_destroyed(parent_s2tte)) {
160 s2tt_init_destroyed(s2tt);
161 __granule_get(wi.g_llt);
162
163 } else if (s2tte_is_assigned(parent_s2tte, level - 1L)) {
164 unsigned long block_pa;
165
166 /*
167 * We should observe parent assigned s2tte only when
168 * we create tables above this level.
169 */
170 assert(level > RTT_MIN_BLOCK_LEVEL);
171
172 block_pa = s2tte_pa(parent_s2tte, level - 1L);
173
174 s2tt_init_assigned_empty(s2tt, block_pa, level);
175
176 /*
177 * Increase the refcount to mark the granule as in-use. refcount
178 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
179 */
180 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
181
182 } else if (s2tte_is_valid(parent_s2tte, level - 1L)) {
183 unsigned long block_pa;
184
185 /*
186 * We should observe parent valid s2tte only when
187 * we create tables above this level.
188 */
189 assert(level > RTT_MIN_BLOCK_LEVEL);
190
191 /*
192 * Break before make. This may cause spurious S2 aborts.
193 */
194 s2tte_write(&parent_s2tt[wi.index], 0UL);
195 invalidate_block(&s2_ctx, map_addr);
196
197 block_pa = s2tte_pa(parent_s2tte, level - 1L);
198
199 s2tt_init_valid(s2tt, block_pa, level);
200
201 /*
202 * Increase the refcount to mark the granule as in-use. refcount
203 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
204 */
205 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
206
207 } else if (s2tte_is_valid_ns(parent_s2tte, level - 1L)) {
208 unsigned long block_pa;
209
210 /*
211 * We should observe parent valid_ns s2tte only when
212 * we create tables above this level.
213 */
214 assert(level > RTT_MIN_BLOCK_LEVEL);
215
216 /*
217 * Break before make. This may cause spurious S2 aborts.
218 */
219 s2tte_write(&parent_s2tt[wi.index], 0UL);
220 invalidate_block(&s2_ctx, map_addr);
221
222 block_pa = s2tte_pa(parent_s2tte, level - 1L);
223
224 s2tt_init_valid_ns(s2tt, block_pa, level);
225
226 /*
227 * Increase the refcount to mark the granule as in-use. refcount
228 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
229 */
230 __granule_refcount_inc(g_tbl, S2TTES_PER_S2TT);
231
232 } else if (s2tte_is_table(parent_s2tte, level - 1L)) {
233 ret = pack_return_code(RMI_ERROR_RTT,
234 (unsigned int)(level - 1L));
235 goto out_unmap_table;
236
237 } else {
238 assert(false);
239 }
240
241 ret = RMI_SUCCESS;
242
243 granule_set_state(g_tbl, GRANULE_STATE_RTT);
244
245 parent_s2tte = s2tte_create_table(rtt_addr, level - 1L);
246 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
247
248out_unmap_table:
249 buffer_unmap(s2tt);
250 buffer_unmap(parent_s2tt);
251out_unlock_llt:
252 granule_unlock(wi.g_llt);
253 granule_unlock(g_tbl);
254 return ret;
255}
256
257unsigned long smc_rtt_fold(unsigned long rtt_addr,
258 unsigned long rd_addr,
259 unsigned long map_addr,
260 unsigned long ulevel)
261{
262 struct granule *g_rd;
263 struct granule *g_tbl;
264 struct rd *rd;
265 struct granule *g_table_root;
266 struct rtt_walk wi;
267 unsigned long *table, *parent_s2tt, parent_s2tte;
268 long level = (long)ulevel;
269 unsigned long ipa_bits;
270 unsigned long ret;
271 struct realm_s2_context s2_ctx;
272 int sl;
273 enum ripas ripas;
274
275 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
276 if (g_rd == NULL) {
277 return RMI_ERROR_INPUT;
278 }
279
280 rd = granule_map(g_rd, SLOT_RD);
281
282 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
283 buffer_unmap(rd);
284 granule_unlock(g_rd);
285 return RMI_ERROR_INPUT;
286 }
287
288 g_table_root = rd->s2_ctx.g_rtt;
289 sl = realm_rtt_starting_level(rd);
290 ipa_bits = realm_ipa_bits(rd);
291 s2_ctx = rd->s2_ctx;
292 buffer_unmap(rd);
293 granule_lock(g_table_root, GRANULE_STATE_RTT);
294 granule_unlock(g_rd);
295
296 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
297 map_addr, level - 1L, &wi);
298 if (wi.last_level != level - 1UL) {
299 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
300 goto out_unlock_parent_table;
301 }
302
303 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
304 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
305 if (!s2tte_is_table(parent_s2tte, level - 1L)) {
306 ret = pack_return_code(RMI_ERROR_RTT,
307 (unsigned int)(level - 1L));
308 goto out_unmap_parent_table;
309 }
310
311 /*
312 * Check that the 'rtt_addr' RTT is used at (map_addr, level).
313 * Note that this also verifies that the rtt_addr is properly aligned.
314 */
315 if (rtt_addr != s2tte_pa_table(parent_s2tte, level - 1L)) {
316 ret = pack_return_code(RMI_ERROR_RTT,
317 (unsigned int)(level - 1L));
318 goto out_unmap_parent_table;
319 }
320
321 g_tbl = find_lock_granule(rtt_addr, GRANULE_STATE_RTT);
322
323 /*
324 * A table descriptor S2TTE always points to a TABLE granule.
325 */
326 assert(g_tbl);
327
328 table = granule_map(g_tbl, SLOT_RTT2);
329
330 /*
331 * The command can succeed only if all 512 S2TTEs are of the same type.
332 * We first check the table's ref. counter to speed up the case when
333 * the host makes a guess whether a memory region can be folded.
334 */
335 if (g_tbl->refcount == 0UL) {
336 if (table_is_destroyed_block(table)) {
337 parent_s2tte = s2tte_create_destroyed();
338 __granule_put(wi.g_llt);
339
340 } else if (table_is_unassigned_block(table, &ripas)) {
341 /*
342 * Note that if map_addr is an Unprotected IPA, the
343 * RIPAS field is guaranteed to be zero, in both parent
344 * and child s2ttes.
345 */
346 parent_s2tte = s2tte_create_unassigned(ripas);
347 __granule_put(wi.g_llt);
348 } else {
349 /*
350 * The table holds a mixture of destroyed and
351 * unassigned entries.
352 */
353 ret = RMI_ERROR_IN_USE;
354 goto out_unmap_table;
355 }
356
357 } else if (g_tbl->refcount == S2TTES_PER_S2TT) {
358
359 unsigned long s2tte, block_pa;
360
361 /* The RMM specification does not allow creating block
362 * entries less than RTT_MIN_BLOCK_LEVEL even though
363 * permitted by the Arm Architecture.
364 * Hence ensure that the table being folded is at a level
365 * higher than the RTT_MIN_BLOCK_LEVEL.
366 *
367 * A fully populated table cannot be destroyed if that
368 * would create a block mapping below RTT_MIN_BLOCK_LEVEL.
369 */
370 if (level <= RTT_MIN_BLOCK_LEVEL) {
371 ret = RMI_ERROR_IN_USE;
372 goto out_unmap_table;
373 }
374
375 s2tte = s2tte_read(&table[0]);
376 block_pa = s2tte_pa(s2tte, level - 1L);
377
378 /*
379 * The table must also refer to a contiguous block through
380 * the same type of s2tte, either Assigned, Valid or Valid_NS.
381 */
382 if (table_maps_assigned_block(table, level)) {
383 parent_s2tte = s2tte_create_assigned_empty(block_pa, level - 1L);
384 } else if (table_maps_valid_block(table, level)) {
385 parent_s2tte = s2tte_create_valid(block_pa, level - 1L);
386 } else if (table_maps_valid_ns_block(table, level)) {
387 parent_s2tte = s2tte_create_valid_ns(block_pa, level - 1L);
388 /* This 'else' case should not happen */
389 } else {
390 assert(false);
391 }
392
393 __granule_refcount_dec(g_tbl, S2TTES_PER_S2TT);
394 } else {
395 /*
396 * The table holds a mixture of different types of s2ttes.
397 */
398 ret = RMI_ERROR_IN_USE;
399 goto out_unmap_table;
400 }
401
402 ret = RMI_SUCCESS;
403
404 /*
405 * Break before make.
406 */
407 s2tte_write(&parent_s2tt[wi.index], 0UL);
408
409 if (s2tte_is_valid(parent_s2tte, level - 1L) ||
410 s2tte_is_valid_ns(parent_s2tte, level - 1L)) {
411 invalidate_pages_in_block(&s2_ctx, map_addr);
412 } else {
413 invalidate_block(&s2_ctx, map_addr);
414 }
415
416 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
417
418 granule_memzero_mapped(table);
419 granule_set_state(g_tbl, GRANULE_STATE_DELEGATED);
420
421out_unmap_table:
422 buffer_unmap(table);
423 granule_unlock(g_tbl);
424out_unmap_parent_table:
425 buffer_unmap(parent_s2tt);
426out_unlock_parent_table:
427 granule_unlock(wi.g_llt);
428 return ret;
429}
430
431unsigned long smc_rtt_destroy(unsigned long rtt_addr,
432 unsigned long rd_addr,
433 unsigned long map_addr,
434 unsigned long ulevel)
435{
436 struct granule *g_rd;
437 struct granule *g_tbl;
438 struct rd *rd;
439 struct granule *g_table_root;
440 struct rtt_walk wi;
441 unsigned long *table, *parent_s2tt, parent_s2tte;
442 long level = (long)ulevel;
443 unsigned long ipa_bits;
444 unsigned long ret;
445 struct realm_s2_context s2_ctx;
446 int sl;
447 bool in_par;
448
449 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
450 if (g_rd == NULL) {
451 return RMI_ERROR_INPUT;
452 }
453
454 rd = granule_map(g_rd, SLOT_RD);
455
456 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
457 buffer_unmap(rd);
458 granule_unlock(g_rd);
459 return RMI_ERROR_INPUT;
460 }
461
462 g_table_root = rd->s2_ctx.g_rtt;
463 sl = realm_rtt_starting_level(rd);
464 ipa_bits = realm_ipa_bits(rd);
465 s2_ctx = rd->s2_ctx;
466 in_par = addr_in_par(rd, map_addr);
467 buffer_unmap(rd);
468 granule_lock(g_table_root, GRANULE_STATE_RTT);
469 granule_unlock(g_rd);
470
471 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
472 map_addr, level - 1L, &wi);
473 if (wi.last_level != level - 1UL) {
474 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
475 goto out_unlock_parent_table;
476 }
477
478 parent_s2tt = granule_map(wi.g_llt, SLOT_RTT);
479 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
480 if (!s2tte_is_table(parent_s2tte, level - 1L)) {
481 ret = pack_return_code(RMI_ERROR_RTT,
482 (unsigned int)(level - 1L));
483 goto out_unmap_parent_table;
484 }
485
486 /*
487 * Check that the 'rtt_addr' RTT is used at (map_addr, level).
488 * Note that this also verifies that the rtt_addr is properly aligned.
489 */
490 if (rtt_addr != s2tte_pa_table(parent_s2tte, level - 1L)) {
491 ret = RMI_ERROR_INPUT;
492 goto out_unmap_parent_table;
493 }
494
495 /*
496 * Lock the RTT granule. The 'rtt_addr' is verified, thus can be treated
497 * as an internal granule.
498 */
499 g_tbl = find_lock_granule(rtt_addr, GRANULE_STATE_RTT);
500
501 /*
502 * A table descriptor S2TTE always points to a TABLE granule.
503 */
504 assert(g_tbl != NULL);
505
506 /*
507 * Read the refcount value. RTT granule is always accessed locked, thus
508 * the refcount can be accessed without atomic operations.
509 */
510 if (g_tbl->refcount != 0UL) {
511 ret = RMI_ERROR_IN_USE;
512 goto out_unlock_table;
513 }
514
515 ret = RMI_SUCCESS;
516
517 table = granule_map(g_tbl, SLOT_RTT2);
518
519 if (in_par) {
520 parent_s2tte = s2tte_create_destroyed();
521 } else {
522 parent_s2tte = s2tte_create_invalid_ns();
523 }
524
525 __granule_put(wi.g_llt);
526
527 /*
528 * Break before make. Note that this may cause spurious S2 aborts.
529 */
530 s2tte_write(&parent_s2tt[wi.index], 0UL);
531 invalidate_block(&s2_ctx, map_addr);
532 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
533
534 granule_memzero_mapped(table);
535 granule_set_state(g_tbl, GRANULE_STATE_DELEGATED);
536
537 buffer_unmap(table);
538out_unlock_table:
539 granule_unlock(g_tbl);
540out_unmap_parent_table:
541 buffer_unmap(parent_s2tt);
542out_unlock_parent_table:
543 granule_unlock(wi.g_llt);
544 return ret;
545}
546
547enum map_unmap_ns_op {
548 MAP_NS,
549 UNMAP_NS
550};
551
552/*
553 * We don't hold a reference on the NS granule when it is
554 * mapped into a realm. Instead we rely on the guarantees
555 * provided by the architecture to ensure that a NS access
556 * to a protected granule is prohibited even within the realm.
557 */
558static unsigned long map_unmap_ns(unsigned long rd_addr,
559 unsigned long map_addr,
560 long level,
561 unsigned long host_s2tte,
562 enum map_unmap_ns_op op)
563{
564 struct granule *g_rd;
565 struct rd *rd;
566 struct granule *g_table_root;
567 unsigned long *s2tt, s2tte;
568 struct rtt_walk wi;
569 unsigned long ipa_bits;
570 unsigned long ret;
571 struct realm_s2_context s2_ctx;
572 int sl;
573
574 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
575 if (g_rd == NULL) {
576 return RMI_ERROR_INPUT;
577 }
578
579 rd = granule_map(g_rd, SLOT_RD);
580
581 if (!validate_rtt_map_cmds(map_addr, level, rd)) {
582 buffer_unmap(rd);
583 granule_unlock(g_rd);
584 return RMI_ERROR_INPUT;
585 }
586
587 g_table_root = rd->s2_ctx.g_rtt;
588 sl = realm_rtt_starting_level(rd);
589 ipa_bits = realm_ipa_bits(rd);
590
591 /*
592 * We don't have to check PAR boundaries for unmap_ns
593 * operation because we already test that the s2tte is Valid_NS
594 * and only outside-PAR IPAs can be translated by such s2tte.
595 *
596 * For "map_ns", however, the s2tte is verified to be Unassigned
597 * but both inside & outside PAR IPAs can be translated by such s2ttes.
598 */
599 if ((op == MAP_NS) && addr_in_par(rd, map_addr)) {
600 buffer_unmap(rd);
601 granule_unlock(g_rd);
602 return RMI_ERROR_INPUT;
603 }
604
605 s2_ctx = rd->s2_ctx;
606 buffer_unmap(rd);
607
608 granule_lock(g_table_root, GRANULE_STATE_RTT);
609 granule_unlock(g_rd);
610
611 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
612 map_addr, level, &wi);
613 if (wi.last_level != level) {
614 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
615 goto out_unlock_llt;
616 }
617
618 s2tt = granule_map(wi.g_llt, SLOT_RTT);
619 s2tte = s2tte_read(&s2tt[wi.index]);
620
621 if (op == MAP_NS) {
622 if (!s2tte_is_unassigned(s2tte)) {
623 ret = pack_return_code(RMI_ERROR_RTT,
624 (unsigned int)level);
625 goto out_unmap_table;
626 }
627
628 s2tte = s2tte_create_valid_ns(host_s2tte, level);
629 s2tte_write(&s2tt[wi.index], s2tte);
630 __granule_get(wi.g_llt);
631
632 } else if (op == UNMAP_NS) {
633 /*
634 * The following check also verifies that map_addr is outside
635 * PAR, as valid_NS s2tte may only cover outside PAR IPA range.
636 */
637 if (!s2tte_is_valid_ns(s2tte, level)) {
638 ret = pack_return_code(RMI_ERROR_RTT,
639 (unsigned int)level);
640 goto out_unmap_table;
641 }
642
643 s2tte = s2tte_create_invalid_ns();
644 s2tte_write(&s2tt[wi.index], s2tte);
645 __granule_put(wi.g_llt);
646 if (level == RTT_PAGE_LEVEL) {
647 invalidate_page(&s2_ctx, map_addr);
648 } else {
649 invalidate_block(&s2_ctx, map_addr);
650 }
651 }
652
653 ret = RMI_SUCCESS;
654
655out_unmap_table:
656 buffer_unmap(s2tt);
657out_unlock_llt:
658 granule_unlock(wi.g_llt);
659 return ret;
660}
661
662unsigned long smc_rtt_map_unprotected(unsigned long rd_addr,
663 unsigned long map_addr,
664 unsigned long ulevel,
665 unsigned long s2tte)
666{
667 long level = (long)ulevel;
668
669 if (!host_ns_s2tte_is_valid(s2tte, level)) {
670 return RMI_ERROR_INPUT;
671 }
672
673 return map_unmap_ns(rd_addr, map_addr, level, s2tte, MAP_NS);
674}
675
676unsigned long smc_rtt_unmap_unprotected(unsigned long rd_addr,
677 unsigned long map_addr,
678 unsigned long ulevel)
679{
680 return map_unmap_ns(rd_addr, map_addr, (long)ulevel, 0UL, UNMAP_NS);
681}
682
683void smc_rtt_read_entry(unsigned long rd_addr,
684 unsigned long map_addr,
685 unsigned long ulevel,
686 struct smc_result *ret)
687{
688 struct granule *g_rd, *g_rtt_root;
689 struct rd *rd;
690 struct rtt_walk wi;
691 unsigned long *s2tt, s2tte;
692 unsigned long ipa_bits;
693 long level = (long)ulevel;
694 int sl;
695
696 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
697 if (g_rd == NULL) {
698 ret->x[0] = RMI_ERROR_INPUT;
699 return;
700 }
701
702 rd = granule_map(g_rd, SLOT_RD);
703
704 if (!validate_rtt_entry_cmds(map_addr, level, rd)) {
705 buffer_unmap(rd);
706 granule_unlock(g_rd);
707 ret->x[0] = RMI_ERROR_INPUT;
708 return;
709 }
710
711 g_rtt_root = rd->s2_ctx.g_rtt;
712 sl = realm_rtt_starting_level(rd);
713 ipa_bits = realm_ipa_bits(rd);
714 buffer_unmap(rd);
715
716 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
717 granule_unlock(g_rd);
718
719 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
720 map_addr, level, &wi);
721 s2tt = granule_map(wi.g_llt, SLOT_RTT);
722 s2tte = s2tte_read(&s2tt[wi.index]);
723 ret->x[1] = wi.last_level;
724 ret->x[3] = 0UL;
725 ret->x[4] = 0UL;
726
727 if (s2tte_is_unassigned(s2tte)) {
728 enum ripas ripas = s2tte_get_ripas(s2tte);
729
730 ret->x[2] = RMI_RTT_STATE_UNASSIGNED;
731 ret->x[4] = (unsigned long)ripas;
732 } else if (s2tte_is_destroyed(s2tte)) {
733 ret->x[2] = RMI_RTT_STATE_DESTROYED;
734 } else if (s2tte_is_assigned(s2tte, wi.last_level)) {
735 ret->x[2] = RMI_RTT_STATE_ASSIGNED;
736 ret->x[3] = s2tte_pa(s2tte, wi.last_level);
737 ret->x[4] = RMI_EMPTY;
738 } else if (s2tte_is_valid(s2tte, wi.last_level)) {
739 ret->x[2] = RMI_RTT_STATE_ASSIGNED;
740 ret->x[3] = s2tte_pa(s2tte, wi.last_level);
741 ret->x[4] = RMI_RAM;
742 } else if (s2tte_is_valid_ns(s2tte, wi.last_level)) {
743 ret->x[2] = RMI_RTT_STATE_VALID_NS;
744 ret->x[3] = host_ns_s2tte(s2tte, wi.last_level);
745 } else if (s2tte_is_table(s2tte, wi.last_level)) {
746 ret->x[2] = RMI_RTT_STATE_TABLE;
747 ret->x[3] = s2tte_pa_table(s2tte, wi.last_level);
748 } else {
749 assert(false);
750 }
751
752 buffer_unmap(s2tt);
753 granule_unlock(wi.g_llt);
754
755 ret->x[0] = RMI_SUCCESS;
756}
757
758static void data_granule_measure(struct rd *rd, void *data,
759 unsigned long ipa,
760 unsigned long flags)
761{
762 struct measurement_desc_data measure_desc = {0};
763
764 /* Initialize the measurement descriptior structure */
765 measure_desc.desc_type = MEASURE_DESC_TYPE_DATA;
766 measure_desc.len = sizeof(struct measurement_desc_data);
767 measure_desc.ipa = ipa;
768 measure_desc.flags = flags;
769 memcpy(measure_desc.rim,
770 &rd->measurement[RIM_MEASUREMENT_SLOT],
771 measurement_get_size(rd->algorithm));
772
773 if (flags == RMI_MEASURE_CONTENT) {
774 /*
775 * Hashing the data granules and store the result in the
776 * measurement descriptor structure.
777 */
778 measurement_hash_compute(rd->algorithm,
779 data,
780 GRANULE_SIZE,
781 measure_desc.content);
782 }
783
784 /*
785 * Hashing the measurement descriptor structure; the result is the
786 * updated RIM.
787 */
788 measurement_hash_compute(rd->algorithm,
789 &measure_desc,
790 sizeof(measure_desc),
791 rd->measurement[RIM_MEASUREMENT_SLOT]);
792}
793
794static unsigned long validate_data_create_unknown(unsigned long map_addr,
795 struct rd *rd)
796{
797 if (!addr_in_par(rd, map_addr)) {
798 return RMI_ERROR_INPUT;
799 }
800
801 if (!validate_map_addr(map_addr, RTT_PAGE_LEVEL, rd)) {
802 return RMI_ERROR_INPUT;
803 }
804
805 return RMI_SUCCESS;
806}
807
808static unsigned long validate_data_create(unsigned long map_addr,
809 struct rd *rd)
810{
811 if (get_rd_state_locked(rd) != REALM_STATE_NEW) {
812 return RMI_ERROR_REALM;
813 }
814
815 return validate_data_create_unknown(map_addr, rd);
816}
817
818/*
819 * Implements both Data.Create and Data.CreateUnknown
820 *
821 * if @g_src == NULL, this implemented Data.CreateUnknown
822 * and otherwise this implemented Data.Create.
823 */
824static unsigned long data_create(unsigned long data_addr,
825 unsigned long rd_addr,
826 unsigned long map_addr,
827 struct granule *g_src,
828 unsigned long flags)
829{
830 struct granule *g_data;
831 struct granule *g_rd;
832 struct granule *g_table_root;
833 struct rd *rd;
834 struct rtt_walk wi;
835 unsigned long s2tte, *s2tt;
836 enum ripas ripas;
837 enum granule_state new_data_state = GRANULE_STATE_DELEGATED;
838 unsigned long ipa_bits;
839 unsigned long ret;
840 int __unused meas_ret;
841 int sl;
842
843 if (!find_lock_two_granules(data_addr,
844 GRANULE_STATE_DELEGATED,
845 &g_data,
846 rd_addr,
847 GRANULE_STATE_RD,
848 &g_rd)) {
849 return RMI_ERROR_INPUT;
850 }
851
852 rd = granule_map(g_rd, SLOT_RD);
853
854 ret = (g_src != NULL) ?
855 validate_data_create(map_addr, rd) :
856 validate_data_create_unknown(map_addr, rd);
857
858 if (ret != RMI_SUCCESS) {
859 goto out_unmap_rd;
860 }
861
862 g_table_root = rd->s2_ctx.g_rtt;
863 sl = realm_rtt_starting_level(rd);
864 ipa_bits = realm_ipa_bits(rd);
865 granule_lock(g_table_root, GRANULE_STATE_RTT);
866 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
867 map_addr, RTT_PAGE_LEVEL, &wi);
868 if (wi.last_level != RTT_PAGE_LEVEL) {
869 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
870 goto out_unlock_ll_table;
871 }
872
873 s2tt = granule_map(wi.g_llt, SLOT_RTT);
874 s2tte = s2tte_read(&s2tt[wi.index]);
875 if (!s2tte_is_unassigned(s2tte)) {
876 ret = pack_return_code(RMI_ERROR_RTT, RTT_PAGE_LEVEL);
877 goto out_unmap_ll_table;
878 }
879
880 ripas = s2tte_get_ripas(s2tte);
881
882 if (g_src != NULL) {
883 bool ns_access_ok;
884 void *data = granule_map(g_data, SLOT_DELEGATED);
885
886 ns_access_ok = ns_buffer_read(SLOT_NS, g_src, 0U,
887 GRANULE_SIZE, data);
888
889 if (!ns_access_ok) {
890 /*
891 * Some data may be copied before the failure. Zero
892 * g_data granule as it will remain in delegated state.
893 */
894 (void)memset(data, 0, GRANULE_SIZE);
895 buffer_unmap(data);
896 ret = RMI_ERROR_INPUT;
897 goto out_unmap_ll_table;
898 }
899
900
901 data_granule_measure(rd, data, map_addr, flags);
902
903 buffer_unmap(data);
904 }
905
906 new_data_state = GRANULE_STATE_DATA;
907
908 s2tte = (ripas == RMI_EMPTY) ?
909 s2tte_create_assigned_empty(data_addr, RTT_PAGE_LEVEL) :
910 s2tte_create_valid(data_addr, RTT_PAGE_LEVEL);
911
912 s2tte_write(&s2tt[wi.index], s2tte);
913 __granule_get(wi.g_llt);
914
915 ret = RMI_SUCCESS;
916
917out_unmap_ll_table:
918 buffer_unmap(s2tt);
919out_unlock_ll_table:
920 granule_unlock(wi.g_llt);
921out_unmap_rd:
922 buffer_unmap(rd);
923 granule_unlock(g_rd);
924 granule_unlock_transition(g_data, new_data_state);
925 return ret;
926}
927
928unsigned long smc_data_create(unsigned long data_addr,
929 unsigned long rd_addr,
930 unsigned long map_addr,
931 unsigned long src_addr,
932 unsigned long flags)
933{
934 struct granule *g_src;
935 unsigned long ret;
936
937 if (flags != RMI_NO_MEASURE_CONTENT && flags != RMI_MEASURE_CONTENT) {
938 return RMI_ERROR_INPUT;
939 }
940
941 g_src = find_granule(src_addr);
942 if ((g_src == NULL) || (g_src->state != GRANULE_STATE_NS)) {
943 return RMI_ERROR_INPUT;
944 }
945
946 ret = data_create(data_addr, rd_addr, map_addr, g_src, flags);
947
948 return ret;
949}
950
951unsigned long smc_data_create_unknown(unsigned long data_addr,
952 unsigned long rd_addr,
953 unsigned long map_addr)
954{
955 return data_create(data_addr, rd_addr, map_addr, NULL, 0);
956}
957
958unsigned long smc_data_destroy(unsigned long rd_addr,
959 unsigned long map_addr)
960{
961 struct granule *g_data;
962 struct granule *g_rd;
963 struct granule *g_table_root;
964 struct rtt_walk wi;
965 unsigned long data_addr, s2tte, *s2tt;
966 struct rd *rd;
967 unsigned long ipa_bits;
968 unsigned long ret;
969 struct realm_s2_context s2_ctx;
970 bool valid;
971 int sl;
972
973 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
974 if (g_rd == NULL) {
975 return RMI_ERROR_INPUT;
976 }
977
978 rd = granule_map(g_rd, SLOT_RD);
979
980 if (!validate_map_addr(map_addr, RTT_PAGE_LEVEL, rd)) {
981 buffer_unmap(rd);
982 granule_unlock(g_rd);
983 return RMI_ERROR_INPUT;
984 }
985
986 g_table_root = rd->s2_ctx.g_rtt;
987 sl = realm_rtt_starting_level(rd);
988 ipa_bits = realm_ipa_bits(rd);
989 s2_ctx = rd->s2_ctx;
990 buffer_unmap(rd);
991
992 granule_lock(g_table_root, GRANULE_STATE_RTT);
993 granule_unlock(g_rd);
994
995 rtt_walk_lock_unlock(g_table_root, sl, ipa_bits,
996 map_addr, RTT_PAGE_LEVEL, &wi);
997 if (wi.last_level != RTT_PAGE_LEVEL) {
998 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
999 goto out_unlock_ll_table;
1000 }
1001
1002 s2tt = granule_map(wi.g_llt, SLOT_RTT);
1003 s2tte = s2tte_read(&s2tt[wi.index]);
1004
1005 valid = s2tte_is_valid(s2tte, RTT_PAGE_LEVEL);
1006
1007 /*
1008 * Check if either HIPAS=ASSIGNED or map_addr is a
1009 * valid Protected IPA.
1010 */
1011 if (!valid && !s2tte_is_assigned(s2tte, RTT_PAGE_LEVEL)) {
1012 ret = pack_return_code(RMI_ERROR_RTT, RTT_PAGE_LEVEL);
1013 goto out_unmap_ll_table;
1014 }
1015
1016 data_addr = s2tte_pa(s2tte, RTT_PAGE_LEVEL);
1017
1018 /*
1019 * We have already established either HIPAS=ASSIGNED or a valid mapping.
1020 * If valid, transition HIPAS to DESTROYED and if HIPAS=ASSIGNED,
1021 * transition to UNASSIGNED.
1022 */
1023 s2tte = valid ? s2tte_create_destroyed() :
1024 s2tte_create_unassigned(RMI_EMPTY);
1025
1026 s2tte_write(&s2tt[wi.index], s2tte);
1027
1028 if (valid) {
1029 invalidate_page(&s2_ctx, map_addr);
1030 }
1031
1032 __granule_put(wi.g_llt);
1033
1034 /*
1035 * Lock the data granule and check expected state. Correct locking order
1036 * is guaranteed because granule address is obtained from a locked
1037 * granule by table walk. This lock needs to be acquired before a state
1038 * transition to or from GRANULE_STATE_DATA for granule address can happen.
1039 */
1040 g_data = find_lock_granule(data_addr, GRANULE_STATE_DATA);
1041 assert(g_data);
1042 granule_memzero(g_data, SLOT_DELEGATED);
1043 granule_unlock_transition(g_data, GRANULE_STATE_DELEGATED);
1044
1045 ret = RMI_SUCCESS;
1046
1047out_unmap_ll_table:
1048 buffer_unmap(s2tt);
1049out_unlock_ll_table:
1050 granule_unlock(wi.g_llt);
1051
1052 return ret;
1053}
1054
1055static bool update_ripas(unsigned long *s2tte, unsigned long level,
1056 enum ripas ripas)
1057{
1058 if (s2tte_is_table(*s2tte, level)) {
1059 return false;
1060 }
1061
1062 if (s2tte_is_valid(*s2tte, level)) {
1063 if (ripas == RMI_EMPTY) {
1064 unsigned long pa = s2tte_pa(*s2tte, level);
1065 *s2tte = s2tte_create_assigned_empty(pa, level);
1066 }
1067 return true;
1068 }
1069
1070 if (s2tte_is_unassigned(*s2tte) || s2tte_is_assigned(*s2tte, level)) {
1071 *s2tte |= s2tte_create_ripas(ripas);
1072 return true;
1073 }
1074
1075 return false;
1076}
1077
1078static void ripas_granule_measure(struct rd *rd,
1079 unsigned long ipa,
1080 unsigned long level)
1081{
1082 struct measurement_desc_ripas measure_desc = {0};
1083
1084 /* Initialize the measurement descriptior structure */
1085 measure_desc.desc_type = MEASURE_DESC_TYPE_RIPAS;
1086 measure_desc.len = sizeof(struct measurement_desc_ripas);
1087 measure_desc.ipa = ipa;
1088 measure_desc.level = level;
1089 memcpy(measure_desc.rim,
1090 &rd->measurement[RIM_MEASUREMENT_SLOT],
1091 measurement_get_size(rd->algorithm));
1092
1093 /*
1094 * Hashing the measurement descriptor structure; the result is the
1095 * updated RIM.
1096 */
1097 measurement_hash_compute(rd->algorithm,
1098 &measure_desc,
1099 sizeof(measure_desc),
1100 rd->measurement[RIM_MEASUREMENT_SLOT]);
1101}
1102
1103unsigned long smc_rtt_init_ripas(unsigned long rd_addr,
1104 unsigned long map_addr,
1105 unsigned long ulevel)
1106{
1107 struct granule *g_rd, *g_rtt_root;
1108 struct rd *rd;
1109 unsigned long ipa_bits;
1110 struct rtt_walk wi;
1111 unsigned long s2tte, *s2tt;
1112 unsigned long ret;
1113 long level = (long)ulevel;
1114 int sl;
1115
1116 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
1117 if (g_rd == NULL) {
1118 return RMI_ERROR_INPUT;
1119 }
1120
1121 rd = granule_map(g_rd, SLOT_RD);
1122
1123 if (get_rd_state_locked(rd) != REALM_STATE_NEW) {
1124 buffer_unmap(rd);
1125 granule_unlock(g_rd);
1126 return RMI_ERROR_REALM;
1127 }
1128
1129 if (!validate_rtt_entry_cmds(map_addr, level, rd)) {
1130 buffer_unmap(rd);
1131 granule_unlock(g_rd);
1132 return RMI_ERROR_INPUT;
1133 }
1134
1135 if (!addr_in_par(rd, map_addr)) {
1136 buffer_unmap(rd);
1137 granule_unlock(g_rd);
1138 return RMI_ERROR_INPUT;
1139 }
1140
1141 g_rtt_root = rd->s2_ctx.g_rtt;
1142 sl = realm_rtt_starting_level(rd);
1143 ipa_bits = realm_ipa_bits(rd);
1144
1145 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
1146 granule_unlock(g_rd);
1147
1148 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
1149 map_addr, level, &wi);
1150 if (wi.last_level != level) {
1151 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
1152 goto out_unlock_llt;
1153 }
1154
1155 s2tt = granule_map(wi.g_llt, SLOT_RTT);
1156 s2tte = s2tte_read(&s2tt[wi.index]);
1157
1158 /* Allowed only for HIPAS=UNASSIGNED */
1159 if (s2tte_is_table(s2tte, level) || !s2tte_is_unassigned(s2tte)) {
1160 ret = pack_return_code(RMI_ERROR_RTT, (unsigned int)level);
1161 goto out_unmap_llt;
1162 }
1163
1164 s2tte |= s2tte_create_ripas(RMI_RAM);
1165
1166 s2tte_write(&s2tt[wi.index], s2tte);
1167
1168 ripas_granule_measure(rd, map_addr, level);
1169
1170 ret = RMI_SUCCESS;
1171
1172out_unmap_llt:
1173 buffer_unmap(s2tt);
1174out_unlock_llt:
1175 buffer_unmap(rd);
1176 granule_unlock(wi.g_llt);
1177 return ret;
1178}
1179
1180unsigned long smc_rtt_set_ripas(unsigned long rd_addr,
1181 unsigned long rec_addr,
1182 unsigned long map_addr,
1183 unsigned long ulevel,
1184 unsigned long uripas)
1185{
1186 struct granule *g_rd, *g_rec, *g_rtt_root;
1187 struct rec *rec;
1188 struct rd *rd;
1189 unsigned long map_size, ipa_bits;
1190 struct rtt_walk wi;
1191 unsigned long s2tte, *s2tt;
1192 struct realm_s2_context s2_ctx;
1193 long level = (long)ulevel;
1194 enum ripas ripas = (enum ripas)uripas;
1195 unsigned long ret;
1196 bool valid;
1197 int sl;
1198
1199 if (ripas > RMI_RAM) {
1200 return RMI_ERROR_INPUT;
1201 }
1202
1203 if (!find_lock_two_granules(rd_addr,
1204 GRANULE_STATE_RD,
1205 &g_rd,
1206 rec_addr,
1207 GRANULE_STATE_REC,
1208 &g_rec)) {
1209 return RMI_ERROR_INPUT;
1210 }
1211
1212 if (granule_refcount_read_acquire(g_rec) != 0UL) {
1213 ret = RMI_ERROR_IN_USE;
1214 goto out_unlock_rec_rd;
1215 }
1216
1217 rec = granule_map(g_rec, SLOT_REC);
1218
1219 if (g_rd != rec->realm_info.g_rd) {
1220 ret = RMI_ERROR_REC;
1221 goto out_unmap_rec;
1222 }
1223
1224 if (ripas != rec->set_ripas.ripas) {
1225 ret = RMI_ERROR_INPUT;
1226 goto out_unmap_rec;
1227 }
1228
1229 if (map_addr != rec->set_ripas.addr) {
1230 /* Target region is not next chunk of requested region */
1231 ret = RMI_ERROR_INPUT;
1232 goto out_unmap_rec;
1233 }
1234
1235 rd = granule_map(g_rd, SLOT_RD);
1236
1237 if (!validate_rtt_entry_cmds(map_addr, level, rd)) {
1238 ret = RMI_ERROR_INPUT;
1239 goto out_unmap_rd;
1240 }
1241
1242 map_size = s2tte_map_size(level);
1243 if (map_addr + map_size > rec->set_ripas.end) {
1244 /* Target region extends beyond end of requested region */
1245 ret = RMI_ERROR_INPUT;
1246 goto out_unmap_rd;
1247 }
1248
1249 g_rtt_root = rd->s2_ctx.g_rtt;
1250 sl = realm_rtt_starting_level(rd);
1251 ipa_bits = realm_ipa_bits(rd);
1252 s2_ctx = rd->s2_ctx;
1253
1254 granule_lock(g_rtt_root, GRANULE_STATE_RTT);
1255
1256 rtt_walk_lock_unlock(g_rtt_root, sl, ipa_bits,
1257 map_addr, level, &wi);
1258 if (wi.last_level != level) {
1259 ret = pack_return_code(RMI_ERROR_RTT, wi.last_level);
1260 goto out_unlock_llt;
1261 }
1262
1263 s2tt = granule_map(wi.g_llt, SLOT_RTT);
1264 s2tte = s2tte_read(&s2tt[wi.index]);
1265
1266 valid = s2tte_is_valid(s2tte, level);
1267
1268 if (!update_ripas(&s2tte, level, ripas)) {
1269 ret = pack_return_code(RMI_ERROR_RTT, (unsigned int)level);
1270 goto out_unmap_llt;
1271 }
1272
1273 s2tte_write(&s2tt[wi.index], s2tte);
1274
1275 if (valid && (ripas == RMI_EMPTY)) {
1276 if (level == RTT_PAGE_LEVEL) {
1277 invalidate_page(&s2_ctx, map_addr);
1278 } else {
1279 invalidate_block(&s2_ctx, map_addr);
1280 }
1281 }
1282
1283 rec->set_ripas.addr += map_size;
1284
1285 ret = RMI_SUCCESS;
1286
1287out_unmap_llt:
1288 buffer_unmap(s2tt);
1289out_unlock_llt:
1290 granule_unlock(wi.g_llt);
1291out_unmap_rd:
1292 buffer_unmap(rd);
1293out_unmap_rec:
1294 buffer_unmap(rec);
1295out_unlock_rec_rd:
1296 granule_unlock(g_rec);
1297 granule_unlock(g_rd);
1298 return ret;
1299}