blob: 5d302f30566b76ad1173659fe02f04ad6cdc55b3 [file] [log] [blame]
Soby Mathewb4c6df42022-11-09 11:13:29 +00001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
5 */
6
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +01007#include <assert.h>
Soby Mathewb4c6df42022-11-09 11:13:29 +00008#include <buffer.h>
AlexeiFedorovd2e93932025-01-13 17:24:37 +00009#include <dev_granule.h>
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +010010#include <errno.h>
Soby Mathewb4c6df42022-11-09 11:13:29 +000011#include <granule.h>
12#include <measurement.h>
13#include <realm.h>
14#include <ripas.h>
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +010015#include <s2tt.h>
Soby Mathewb4c6df42022-11-09 11:13:29 +000016#include <smc-handler.h>
17#include <smc-rmi.h>
18#include <smc.h>
Javier Almansa Sobrino2f717dd2024-02-12 20:49:46 +000019#include <status.h>
Soby Mathewb4c6df42022-11-09 11:13:29 +000020#include <stddef.h>
21#include <string.h>
Soby Mathewb4c6df42022-11-09 11:13:29 +000022
23/*
AlexeiFedorovd2e93932025-01-13 17:24:37 +000024 * Validate the map_addr value passed to
25 * RMI_RTT_*, RMI_DATA_* and RMI_DEV_MEM_* commands.
Soby Mathewb4c6df42022-11-09 11:13:29 +000026 */
27static bool validate_map_addr(unsigned long map_addr,
AlexeiFedorov4faab852023-08-30 15:06:49 +010028 long level,
Soby Mathewb4c6df42022-11-09 11:13:29 +000029 struct rd *rd)
30{
AlexeiFedorov14d47ae2023-07-19 15:26:50 +010031 return ((map_addr < realm_ipa_size(rd)) &&
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +000032 s2tte_is_addr_lvl_aligned(&(rd->s2_ctx), map_addr, level));
Soby Mathewb4c6df42022-11-09 11:13:29 +000033}
34
35/*
36 * Structure commands can operate on all RTTs except for the root RTT so
37 * the minimal valid level is the stage 2 starting level + 1.
38 */
39static bool validate_rtt_structure_cmds(unsigned long map_addr,
40 long level,
41 struct rd *rd)
42{
43 int min_level = realm_rtt_starting_level(rd) + 1;
44
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +010045 if ((level < min_level) || (level > S2TT_PAGE_LEVEL)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +000046 return false;
47 }
AlexeiFedorovf85f8102023-09-11 16:14:18 +010048 return validate_map_addr(map_addr, level - 1L, rd);
Soby Mathewb4c6df42022-11-09 11:13:29 +000049}
50
51/*
AlexeiFedorovd2e93932025-01-13 17:24:37 +000052 * Map/Unmap commands can operate up to a level 1 block entry so min_level is
Soby Mathewb4c6df42022-11-09 11:13:29 +000053 * the smallest block size.
54 */
55static bool validate_rtt_map_cmds(unsigned long map_addr,
56 long level,
57 struct rd *rd)
58{
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +010059 if ((level < S2TT_MIN_BLOCK_LEVEL) || (level > S2TT_PAGE_LEVEL)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +000060 return false;
61 }
62 return validate_map_addr(map_addr, level, rd);
63}
64
65/*
66 * Entry commands can operate on any entry so the minimal valid level is the
67 * stage 2 starting level.
68 */
69static bool validate_rtt_entry_cmds(unsigned long map_addr,
70 long level,
71 struct rd *rd)
72{
73 if ((level < realm_rtt_starting_level(rd)) ||
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +010074 (level > S2TT_PAGE_LEVEL)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +000075 return false;
76 }
77 return validate_map_addr(map_addr, level, rd);
78}
79
AlexeiFedorovac923c82023-04-06 15:12:04 +010080unsigned long smc_rtt_create(unsigned long rd_addr,
81 unsigned long rtt_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +000082 unsigned long map_addr,
83 unsigned long ulevel)
84{
85 struct granule *g_rd;
86 struct granule *g_tbl;
87 struct rd *rd;
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +010088 struct s2tt_walk wi;
Soby Mathewb4c6df42022-11-09 11:13:29 +000089 unsigned long *s2tt, *parent_s2tt, parent_s2tte;
90 long level = (long)ulevel;
Soby Mathewb4c6df42022-11-09 11:13:29 +000091 unsigned long ret;
Javier Almansa Sobrino2eb98b02023-12-18 18:10:55 +000092 struct s2tt_context s2_ctx;
Soby Mathewb4c6df42022-11-09 11:13:29 +000093
94 if (!find_lock_two_granules(rtt_addr,
95 GRANULE_STATE_DELEGATED,
96 &g_tbl,
97 rd_addr,
98 GRANULE_STATE_RD,
99 &g_rd)) {
100 return RMI_ERROR_INPUT;
101 }
102
Javier Almansa Sobrino2f717dd2024-02-12 20:49:46 +0000103 rd = buffer_granule_map(g_rd, SLOT_RD);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100104 assert(rd != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000105
106 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
107 buffer_unmap(rd);
108 granule_unlock(g_rd);
109 granule_unlock(g_tbl);
110 return RMI_ERROR_INPUT;
111 }
112
Soby Mathewb4c6df42022-11-09 11:13:29 +0000113 s2_ctx = rd->s2_ctx;
114 buffer_unmap(rd);
115
116 /*
Shruti Gupta3530a712024-09-12 10:50:21 +0100117 * If LPA2 is disabled for the realm, then `rtt_addr` must not be
118 * more than 48 bits wide.
119 */
120 if (!s2_ctx.enable_lpa2) {
121 if ((rtt_addr >= (UL(1) << S2TT_MAX_PA_BITS))) {
122 granule_unlock(g_rd);
123 granule_unlock(g_tbl);
124 return RMI_ERROR_INPUT;
125 }
126 }
127
128 /*
Soby Mathewb4c6df42022-11-09 11:13:29 +0000129 * Lock the RTT root. Enforcing locking order RD->RTT is enough to
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000130 * ensure deadlock free locking guarantee.
Soby Mathewb4c6df42022-11-09 11:13:29 +0000131 */
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000132 granule_lock(s2_ctx.g_rtt, GRANULE_STATE_RTT);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000133
134 /* Unlock RD after locking RTT Root */
135 granule_unlock(g_rd);
136
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000137 s2tt_walk_lock_unlock(&s2_ctx, map_addr, level - 1L, &wi);
AlexeiFedorov3f5d6272023-10-23 16:27:37 +0100138 if (wi.last_level != (level - 1L)) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100139 ret = pack_return_code(RMI_ERROR_RTT,
Javier Almansa Sobrinof6fff692024-02-02 17:13:57 +0000140 (unsigned char)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000141 goto out_unlock_llt;
142 }
143
Javier Almansa Sobrino2f717dd2024-02-12 20:49:46 +0000144 parent_s2tt = buffer_granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100145 assert(parent_s2tt != NULL);
146
Soby Mathewb4c6df42022-11-09 11:13:29 +0000147 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
Javier Almansa Sobrino2f717dd2024-02-12 20:49:46 +0000148 s2tt = buffer_granule_map(g_tbl, SLOT_DELEGATED);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100149 assert(s2tt != NULL);
150
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000151 if (s2tte_is_unassigned_empty(&s2_ctx, parent_s2tte)) {
152 s2tt_init_unassigned_empty(&s2_ctx, s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000153
154 /*
AlexeiFedorov745499d2024-04-25 16:52:44 +0100155 * Atomically increase the refcount of the parent, the granule
156 * was locked while table walking and hand-over-hand locking.
157 * Acquire/release semantics not required because the table is
158 * accessed always locked.
Soby Mathewb4c6df42022-11-09 11:13:29 +0000159 */
AlexeiFedorov745499d2024-04-25 16:52:44 +0100160 atomic_granule_get(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000161
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000162 } else if (s2tte_is_unassigned_ram(&s2_ctx, parent_s2tte)) {
163 s2tt_init_unassigned_ram(&s2_ctx, s2tt);
AlexeiFedorov745499d2024-04-25 16:52:44 +0100164 atomic_granule_get(wi.g_llt);
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100165
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000166 } else if (s2tte_is_unassigned_ns(&s2_ctx, parent_s2tte)) {
167 s2tt_init_unassigned_ns(&s2_ctx, s2tt);
AlexeiFedorov745499d2024-04-25 16:52:44 +0100168 atomic_granule_get(wi.g_llt);
AlexeiFedorov5ceff352023-04-12 16:17:00 +0100169
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000170 } else if (s2tte_is_unassigned_destroyed(&s2_ctx, parent_s2tte)) {
171 s2tt_init_unassigned_destroyed(&s2_ctx, s2tt);
AlexeiFedorov745499d2024-04-25 16:52:44 +0100172 atomic_granule_get(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000173
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000174 } else if (s2tte_is_assigned_destroyed(&s2_ctx, parent_s2tte,
175 level - 1L)) {
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100176 unsigned long block_pa;
177
178 /*
179 * We should observe parent assigned s2tte only when
180 * we create tables above this level.
181 */
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +0100182 assert(level > S2TT_MIN_BLOCK_LEVEL);
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100183
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000184 block_pa = s2tte_pa(&s2_ctx, parent_s2tte, level - 1L);
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100185
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000186 s2tt_init_assigned_destroyed(&s2_ctx, s2tt, block_pa, level);
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100187
188 /*
189 * Increase the refcount to mark the granule as in-use. refcount
190 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
191 */
AlexeiFedorov745499d2024-04-25 16:52:44 +0100192 granule_refcount_inc(g_tbl, (unsigned short)S2TTES_PER_S2TT);
AlexeiFedorovc53b1f72023-07-04 15:37:03 +0100193
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000194 } else if (s2tte_is_assigned_empty(&s2_ctx, parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000195 unsigned long block_pa;
196
197 /*
198 * We should observe parent assigned s2tte only when
199 * we create tables above this level.
200 */
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +0100201 assert(level > S2TT_MIN_BLOCK_LEVEL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000202
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000203 block_pa = s2tte_pa(&s2_ctx, parent_s2tte, level - 1L);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000204
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000205 s2tt_init_assigned_empty(&s2_ctx, s2tt, block_pa, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000206
207 /*
208 * Increase the refcount to mark the granule as in-use. refcount
209 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
210 */
AlexeiFedorov745499d2024-04-25 16:52:44 +0100211 granule_refcount_inc(g_tbl, (unsigned short)S2TTES_PER_S2TT);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000212
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000213 } else if (s2tte_is_assigned_ram(&s2_ctx, parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000214 unsigned long block_pa;
215
216 /*
217 * We should observe parent valid s2tte only when
218 * we create tables above this level.
219 */
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +0100220 assert(level > S2TT_MIN_BLOCK_LEVEL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000221
222 /*
223 * Break before make. This may cause spurious S2 aborts.
224 */
225 s2tte_write(&parent_s2tt[wi.index], 0UL);
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +0100226 s2tt_invalidate_block(&s2_ctx, map_addr);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000227
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000228 block_pa = s2tte_pa(&s2_ctx, parent_s2tte, level - 1L);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000229
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000230 s2tt_init_assigned_ram(&s2_ctx, s2tt, block_pa, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000231
232 /*
233 * Increase the refcount to mark the granule as in-use. refcount
234 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
235 */
AlexeiFedorov745499d2024-04-25 16:52:44 +0100236 granule_refcount_inc(g_tbl, (unsigned short)S2TTES_PER_S2TT);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000237
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000238 } else if (s2tte_is_assigned_ns(&s2_ctx, parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000239 unsigned long block_pa;
240
241 /*
AlexeiFedorov3a739332023-04-13 13:54:04 +0100242 * We should observe parent assigned_ns s2tte only when
Soby Mathewb4c6df42022-11-09 11:13:29 +0000243 * we create tables above this level.
244 */
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +0100245 assert(level > S2TT_MIN_BLOCK_LEVEL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000246
247 /*
248 * Break before make. This may cause spurious S2 aborts.
249 */
250 s2tte_write(&parent_s2tt[wi.index], 0UL);
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +0100251 s2tt_invalidate_block(&s2_ctx, map_addr);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000252
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000253 block_pa = s2tte_pa(&s2_ctx, parent_s2tte, level - 1L);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000254
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000255 s2tt_init_assigned_ns(&s2_ctx, s2tt, parent_s2tte,
256 block_pa, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000257
258 /*
Javier Almansa Sobrino15fc44e2023-09-29 13:52:04 +0100259 * Increment the refcount on the parent for the new RTT we are
260 * about to add. The NS block entry doesn't have a refcount
261 * on the parent RTT.
Soby Mathewb4c6df42022-11-09 11:13:29 +0000262 */
AlexeiFedorov745499d2024-04-25 16:52:44 +0100263 atomic_granule_get(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000264
AlexeiFedorovd2e93932025-01-13 17:24:37 +0000265 } else if (s2tte_is_assigned_dev_empty(&s2_ctx, parent_s2tte, level - 1L)) {
266 unsigned long block_pa;
267
268 /*
269 * We should observe parent assigned s2tte only when
270 * we create tables above this level.
271 */
272 assert(level > S2TT_MIN_DEV_BLOCK_LEVEL);
273
274 block_pa = s2tte_pa(&s2_ctx, parent_s2tte, level - 1L);
275
276 s2tt_init_assigned_dev_empty(&s2_ctx, s2tt, block_pa, level);
277
278 /*
279 * Increase the refcount to mark the granule as in-use. refcount
280 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
281 */
282 granule_refcount_inc(g_tbl, (unsigned short)S2TTES_PER_S2TT);
283
284 } else if (s2tte_is_assigned_dev_destroyed(&s2_ctx, parent_s2tte, level - 1L)) {
285 unsigned long block_pa;
286
287 /*
288 * We should observe parent assigned s2tte only when
289 * we create tables above this level.
290 */
291 assert(level > S2TT_MIN_DEV_BLOCK_LEVEL);
292
293 block_pa = s2tte_pa(&s2_ctx, parent_s2tte, level - 1L);
294
295 s2tt_init_assigned_dev_destroyed(&s2_ctx, s2tt, block_pa, level);
296
297 /*
298 * Increase the refcount to mark the granule as in-use. refcount
299 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
300 */
301 granule_refcount_inc(g_tbl, (unsigned short)S2TTES_PER_S2TT);
302
303 } else if (s2tte_is_assigned_dev_dev(&s2_ctx, parent_s2tte, level - 1L)) {
304 unsigned long block_pa;
305
306 /*
307 * We should observe parent valid s2tte only when
308 * we create tables above this level.
309 */
310 assert(level > S2TT_MIN_DEV_BLOCK_LEVEL);
311
312 /*
313 * Break before make. This may cause spurious S2 aborts.
314 */
315 s2tte_write(&parent_s2tt[wi.index], 0UL);
316 s2tt_invalidate_block(&s2_ctx, map_addr);
317
318 block_pa = s2tte_pa(&s2_ctx, parent_s2tte, level - 1L);
319
320 s2tt_init_assigned_dev_dev(&s2_ctx, s2tt, parent_s2tte, block_pa, level);
321
322 /*
323 * Increase the refcount to mark the granule as in-use. refcount
324 * is incremented by S2TTES_PER_S2TT (ref RTT unfolding).
325 */
326 granule_refcount_inc(g_tbl, (unsigned short)S2TTES_PER_S2TT);
327
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000328 } else if (s2tte_is_table(&s2_ctx, parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000329 ret = pack_return_code(RMI_ERROR_RTT,
Javier Almansa Sobrinof6fff692024-02-02 17:13:57 +0000330 (unsigned char)(level - 1L));
Soby Mathewb4c6df42022-11-09 11:13:29 +0000331 goto out_unmap_table;
332
333 } else {
334 assert(false);
335 }
336
337 ret = RMI_SUCCESS;
338
339 granule_set_state(g_tbl, GRANULE_STATE_RTT);
340
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000341 parent_s2tte = s2tte_create_table(&s2_ctx, rtt_addr, level - 1L);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000342 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
343
344out_unmap_table:
345 buffer_unmap(s2tt);
346 buffer_unmap(parent_s2tt);
347out_unlock_llt:
348 granule_unlock(wi.g_llt);
349 granule_unlock(g_tbl);
350 return ret;
351}
352
AlexeiFedorove2002be2023-04-19 17:20:12 +0100353void smc_rtt_fold(unsigned long rd_addr,
354 unsigned long map_addr,
355 unsigned long ulevel,
356 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000357{
358 struct granule *g_rd;
359 struct granule *g_tbl;
360 struct rd *rd;
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +0100361 struct s2tt_walk wi;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000362 unsigned long *table, *parent_s2tt, parent_s2tte;
363 long level = (long)ulevel;
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000364 unsigned long rtt_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000365 unsigned long ret;
Javier Almansa Sobrino2eb98b02023-12-18 18:10:55 +0000366 struct s2tt_context s2_ctx;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000367
368 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
369 if (g_rd == NULL) {
AlexeiFedorove2002be2023-04-19 17:20:12 +0100370 res->x[0] = RMI_ERROR_INPUT;
371 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000372 }
373
Javier Almansa Sobrino2f717dd2024-02-12 20:49:46 +0000374 rd = buffer_granule_map(g_rd, SLOT_RD);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100375 assert(rd != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000376
377 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
378 buffer_unmap(rd);
379 granule_unlock(g_rd);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100380 res->x[0] = RMI_ERROR_INPUT;
381 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000382 }
383
Soby Mathewb4c6df42022-11-09 11:13:29 +0000384 s2_ctx = rd->s2_ctx;
385 buffer_unmap(rd);
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000386 granule_lock(s2_ctx.g_rtt, GRANULE_STATE_RTT);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000387 granule_unlock(g_rd);
388
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000389 s2tt_walk_lock_unlock(&s2_ctx, map_addr, level - 1L, &wi);
AlexeiFedorov3f5d6272023-10-23 16:27:37 +0100390 if (wi.last_level != (level - 1L)) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100391 ret = pack_return_code(RMI_ERROR_RTT,
Javier Almansa Sobrinof6fff692024-02-02 17:13:57 +0000392 (unsigned char)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000393 goto out_unlock_parent_table;
394 }
395
Javier Almansa Sobrino2f717dd2024-02-12 20:49:46 +0000396 parent_s2tt = buffer_granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100397 assert(parent_s2tt != NULL);
398
Soby Mathewb4c6df42022-11-09 11:13:29 +0000399 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000400 if (!s2tte_is_table(&s2_ctx, parent_s2tte, level - 1L)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000401 ret = pack_return_code(RMI_ERROR_RTT,
Javier Almansa Sobrinof6fff692024-02-02 17:13:57 +0000402 (unsigned char)(level - 1L));
Soby Mathewb4c6df42022-11-09 11:13:29 +0000403 goto out_unmap_parent_table;
404 }
405
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000406 rtt_addr = s2tte_pa(&s2_ctx, parent_s2tte, level - 1L);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000407 g_tbl = find_lock_granule(rtt_addr, GRANULE_STATE_RTT);
408
409 /*
410 * A table descriptor S2TTE always points to a TABLE granule.
411 */
AlexeiFedorov63b71692023-04-19 11:18:42 +0100412 assert(g_tbl != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000413
Javier Almansa Sobrino2f717dd2024-02-12 20:49:46 +0000414 table = buffer_granule_map(g_tbl, SLOT_RTT2);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100415 assert(table != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000416
417 /*
418 * The command can succeed only if all 512 S2TTEs are of the same type.
419 * We first check the table's ref. counter to speed up the case when
420 * the host makes a guess whether a memory region can be folded.
421 */
AlexeiFedorov745499d2024-04-25 16:52:44 +0100422 if (granule_refcount_read(g_tbl) == 0U) {
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000423 if (s2tt_is_unassigned_destroyed_block(&s2_ctx, table)) {
424 parent_s2tte = s2tte_create_unassigned_destroyed(&s2_ctx);
425 } else if (s2tt_is_unassigned_empty_block(&s2_ctx, table)) {
426 parent_s2tte = s2tte_create_unassigned_empty(&s2_ctx);
427 } else if (s2tt_is_unassigned_ram_block(&s2_ctx, table)) {
428 parent_s2tte = s2tte_create_unassigned_ram(&s2_ctx);
429 } else if (s2tt_is_unassigned_ns_block(&s2_ctx, table)) {
430 parent_s2tte = s2tte_create_unassigned_ns(&s2_ctx);
431 } else if (s2tt_maps_assigned_ns_block(&s2_ctx, table, level)) {
Shruti Gupta3105c3f2024-02-26 14:06:11 +0000432
433 /*
434 * The RMM specification does not allow creating block entries less than
435 * S2TT_MIN_BLOCK_LEVEL for ASSIGNED_NS state.
436 */
437 if (level <= S2TT_MIN_BLOCK_LEVEL) {
438 ret = pack_return_code(RMI_ERROR_RTT,
439 (unsigned char)wi.last_level);
440 goto out_unmap_table;
441 }
AlexeiFedorov49752c62023-04-24 14:31:14 +0100442 unsigned long s2tte = s2tte_read(&table[0]);
AlexeiFedorovc07a6382023-04-14 11:59:18 +0100443
Javier Almansa Sobrino15fc44e2023-09-29 13:52:04 +0100444 /*
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +0100445 * Since s2tt_maps_assigned_ns_block() has succedded,
Javier Almansa Sobrino15fc44e2023-09-29 13:52:04 +0100446 * the PA in first entry of the table is aligned at
447 * parent level. Use the TTE from the first entry
448 * directly as it also has the NS attributes to be used
449 * for the parent block entry.
450 */
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000451 parent_s2tte = s2tte_create_assigned_ns(&s2_ctx, s2tte, level - 1L);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000452 } else {
453 /*
454 * The table holds a mixture of destroyed and
455 * unassigned entries.
456 */
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100457 ret = pack_return_code(RMI_ERROR_RTT,
Javier Almansa Sobrinof6fff692024-02-02 17:13:57 +0000458 (unsigned char)level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000459 goto out_unmap_table;
460 }
AlexeiFedorov745499d2024-04-25 16:52:44 +0100461 atomic_granule_put(wi.g_llt);
462 } else if (granule_refcount_read(g_tbl) ==
463 (unsigned short)S2TTES_PER_S2TT) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000464
465 unsigned long s2tte, block_pa;
466
467 /* The RMM specification does not allow creating block
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +0100468 * entries less than S2TT_MIN_BLOCK_LEVEL even though
Soby Mathewb4c6df42022-11-09 11:13:29 +0000469 * permitted by the Arm Architecture.
470 * Hence ensure that the table being folded is at a level
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +0100471 * higher than the S2TT_MIN_BLOCK_LEVEL.
Soby Mathewb4c6df42022-11-09 11:13:29 +0000472 *
473 * A fully populated table cannot be destroyed if that
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +0100474 * would create a block mapping below S2TT_MIN_BLOCK_LEVEL.
Soby Mathewb4c6df42022-11-09 11:13:29 +0000475 */
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +0100476 if (level <= S2TT_MIN_BLOCK_LEVEL) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100477 ret = pack_return_code(RMI_ERROR_RTT,
Javier Almansa Sobrinof6fff692024-02-02 17:13:57 +0000478 (unsigned char)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000479 goto out_unmap_table;
480 }
481
482 s2tte = s2tte_read(&table[0]);
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000483 block_pa = s2tte_pa(&s2_ctx, s2tte, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000484
485 /*
AlexeiFedorov3a739332023-04-13 13:54:04 +0100486 * The table must also refer to a contiguous block through the
AlexeiFedorov3f840a02023-07-19 10:55:05 +0100487 * same type of s2tte, either Assigned or Valid.
Soby Mathewb4c6df42022-11-09 11:13:29 +0000488 */
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000489 if (s2tt_maps_assigned_empty_block(&s2_ctx, table, level)) {
490 parent_s2tte = s2tte_create_assigned_empty(&s2_ctx,
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +0100491 block_pa, level - 1L);
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000492 } else if (s2tt_maps_assigned_ram_block(&s2_ctx,
493 table, level)) {
494 parent_s2tte = s2tte_create_assigned_ram(&s2_ctx,
495 block_pa,
AlexeiFedorov3a739332023-04-13 13:54:04 +0100496 level - 1L);
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000497 } else if (s2tt_maps_assigned_destroyed_block(&s2_ctx,
498 table, level)) {
499 parent_s2tte = s2tte_create_assigned_destroyed(&s2_ctx,
AlexeiFedorovd2e93932025-01-13 17:24:37 +0000500 block_pa, level - 1L);
501 } else if (s2tt_maps_assigned_dev_empty_block(&s2_ctx,
502 table, level)) {
503 parent_s2tte = s2tte_create_assigned_dev_empty(&s2_ctx,
504 block_pa,
505 level - 1L);
506 } else if (s2tt_maps_assigned_dev_destroyed_block(&s2_ctx,
507 table, level)) {
508 parent_s2tte = s2tte_create_assigned_dev_destroyed(&s2_ctx,
509 block_pa,
510 level - 1L);
511 } else if (s2tt_maps_assigned_dev_dev_block(&s2_ctx, table, level)) {
512 parent_s2tte = s2tte_create_assigned_dev_dev(&s2_ctx,
513 s2tte,
514 level - 1L);
515
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000516 /* The table contains mixed entries that cannot be folded */
Soby Mathewb4c6df42022-11-09 11:13:29 +0000517 } else {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100518 ret = pack_return_code(RMI_ERROR_RTT,
Javier Almansa Sobrinof6fff692024-02-02 17:13:57 +0000519 (unsigned char)level);
AlexeiFedorov80c2f042022-11-25 14:54:46 +0000520 goto out_unmap_table;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000521 }
522
AlexeiFedorov745499d2024-04-25 16:52:44 +0100523 granule_refcount_dec(g_tbl, (unsigned short)S2TTES_PER_S2TT);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000524 } else {
525 /*
526 * The table holds a mixture of different types of s2ttes.
527 */
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100528 ret = pack_return_code(RMI_ERROR_RTT,
Javier Almansa Sobrinof6fff692024-02-02 17:13:57 +0000529 (unsigned char)level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000530 goto out_unmap_table;
531 }
532
533 ret = RMI_SUCCESS;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100534 res->x[1] = rtt_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000535
536 /*
537 * Break before make.
538 */
539 s2tte_write(&parent_s2tt[wi.index], 0UL);
540
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000541 if (s2tte_is_assigned_ram(&s2_ctx, parent_s2tte, level - 1L) ||
AlexeiFedorovd2e93932025-01-13 17:24:37 +0000542 s2tte_is_assigned_ns(&s2_ctx, parent_s2tte, level - 1L) ||
543 s2tte_is_assigned_dev_dev(&s2_ctx, parent_s2tte, level - 1L)) {
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +0100544 s2tt_invalidate_pages_in_block(&s2_ctx, map_addr);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000545 } else {
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +0100546 s2tt_invalidate_block(&s2_ctx, map_addr);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000547 }
548
549 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
550
551 granule_memzero_mapped(table);
552 granule_set_state(g_tbl, GRANULE_STATE_DELEGATED);
553
554out_unmap_table:
555 buffer_unmap(table);
556 granule_unlock(g_tbl);
557out_unmap_parent_table:
558 buffer_unmap(parent_s2tt);
559out_unlock_parent_table:
560 granule_unlock(wi.g_llt);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100561 res->x[0] = ret;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000562}
563
AlexeiFedorove2002be2023-04-19 17:20:12 +0100564void smc_rtt_destroy(unsigned long rd_addr,
565 unsigned long map_addr,
566 unsigned long ulevel,
567 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000568{
569 struct granule *g_rd;
570 struct granule *g_tbl;
571 struct rd *rd;
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +0100572 struct s2tt_walk wi;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000573 unsigned long *table, *parent_s2tt, parent_s2tte;
574 long level = (long)ulevel;
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000575 unsigned long rtt_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000576 unsigned long ret;
Javier Almansa Sobrino2eb98b02023-12-18 18:10:55 +0000577 struct s2tt_context s2_ctx;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100578 bool in_par, skip_non_live = false;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000579
580 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
581 if (g_rd == NULL) {
AlexeiFedorove2002be2023-04-19 17:20:12 +0100582 res->x[0] = RMI_ERROR_INPUT;
AlexeiFedorov3ebd4622023-07-18 16:27:39 +0100583 res->x[2] = 0UL;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100584 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000585 }
586
Javier Almansa Sobrino2f717dd2024-02-12 20:49:46 +0000587 rd = buffer_granule_map(g_rd, SLOT_RD);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100588 assert(rd != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000589
590 if (!validate_rtt_structure_cmds(map_addr, level, rd)) {
591 buffer_unmap(rd);
592 granule_unlock(g_rd);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100593 res->x[0] = RMI_ERROR_INPUT;
AlexeiFedorov3ebd4622023-07-18 16:27:39 +0100594 res->x[2] = 0UL;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100595 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000596 }
597
Soby Mathewb4c6df42022-11-09 11:13:29 +0000598 s2_ctx = rd->s2_ctx;
599 in_par = addr_in_par(rd, map_addr);
600 buffer_unmap(rd);
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000601 granule_lock(s2_ctx.g_rtt, GRANULE_STATE_RTT);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000602 granule_unlock(g_rd);
603
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000604 s2tt_walk_lock_unlock(&s2_ctx, map_addr, level - 1L, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000605
Javier Almansa Sobrino2f717dd2024-02-12 20:49:46 +0000606 parent_s2tt = buffer_granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100607 assert(parent_s2tt != NULL);
608
Soby Mathewb4c6df42022-11-09 11:13:29 +0000609 parent_s2tte = s2tte_read(&parent_s2tt[wi.index]);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100610
AlexeiFedorov3f5d6272023-10-23 16:27:37 +0100611 if ((wi.last_level != (level - 1L)) ||
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000612 !s2tte_is_table(&s2_ctx, parent_s2tte, level - 1L)) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100613 ret = pack_return_code(RMI_ERROR_RTT,
Javier Almansa Sobrinof6fff692024-02-02 17:13:57 +0000614 (unsigned char)wi.last_level);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100615 skip_non_live = true;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000616 goto out_unmap_parent_table;
617 }
618
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000619 rtt_addr = s2tte_pa(&s2_ctx, parent_s2tte, level - 1L);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000620
621 /*
622 * Lock the RTT granule. The 'rtt_addr' is verified, thus can be treated
623 * as an internal granule.
624 */
625 g_tbl = find_lock_granule(rtt_addr, GRANULE_STATE_RTT);
626
627 /*
628 * A table descriptor S2TTE always points to a TABLE granule.
629 */
630 assert(g_tbl != NULL);
631
632 /*
633 * Read the refcount value. RTT granule is always accessed locked, thus
634 * the refcount can be accessed without atomic operations.
635 */
AlexeiFedorov745499d2024-04-25 16:52:44 +0100636 if (granule_refcount_read(g_tbl) != 0U) {
Javier Almansa Sobrinof6fff692024-02-02 17:13:57 +0000637 ret = pack_return_code(RMI_ERROR_RTT, (unsigned char)level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000638 goto out_unlock_table;
639 }
640
641 ret = RMI_SUCCESS;
AlexeiFedorove2002be2023-04-19 17:20:12 +0100642 res->x[1] = rtt_addr;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100643 skip_non_live = true;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000644
Javier Almansa Sobrino2f717dd2024-02-12 20:49:46 +0000645 table = buffer_granule_map(g_tbl, SLOT_RTT2);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100646 assert(table != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000647
648 if (in_par) {
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000649 parent_s2tte = s2tte_create_unassigned_destroyed(&s2_ctx);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000650 } else {
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000651 parent_s2tte = s2tte_create_unassigned_ns(&s2_ctx);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000652 }
653
AlexeiFedorov745499d2024-04-25 16:52:44 +0100654 atomic_granule_put(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000655
656 /*
657 * Break before make. Note that this may cause spurious S2 aborts.
658 */
659 s2tte_write(&parent_s2tt[wi.index], 0UL);
AlexeiFedorov22ba1062023-11-15 16:23:37 +0000660
661 if (in_par) {
662 /* For protected IPA, all S2TTEs in the RTT will be invalid */
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +0100663 s2tt_invalidate_block(&s2_ctx, map_addr);
AlexeiFedorov22ba1062023-11-15 16:23:37 +0000664 } else {
665 /*
666 * For unprotected IPA, invalidate the TLB for the entire range
667 * mapped by the RTT as it may have valid NS mappings.
668 */
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +0100669 s2tt_invalidate_pages_in_block(&s2_ctx, map_addr);
AlexeiFedorov22ba1062023-11-15 16:23:37 +0000670 }
671
Soby Mathewb4c6df42022-11-09 11:13:29 +0000672 s2tte_write(&parent_s2tt[wi.index], parent_s2tte);
673
674 granule_memzero_mapped(table);
675 granule_set_state(g_tbl, GRANULE_STATE_DELEGATED);
676
677 buffer_unmap(table);
678out_unlock_table:
679 granule_unlock(g_tbl);
680out_unmap_parent_table:
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100681 if (skip_non_live) {
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000682 res->x[2] = s2tt_skip_non_live_entries(&s2_ctx, map_addr,
683 parent_s2tt, &wi);
AlexeiFedorov3ebd4622023-07-18 16:27:39 +0100684 } else {
685 res->x[2] = map_addr;
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100686 }
Soby Mathewb4c6df42022-11-09 11:13:29 +0000687 buffer_unmap(parent_s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000688 granule_unlock(wi.g_llt);
AlexeiFedorove2002be2023-04-19 17:20:12 +0100689 res->x[0] = ret;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000690}
691
692enum map_unmap_ns_op {
693 MAP_NS,
694 UNMAP_NS
695};
696
697/*
698 * We don't hold a reference on the NS granule when it is
699 * mapped into a realm. Instead we rely on the guarantees
700 * provided by the architecture to ensure that a NS access
701 * to a protected granule is prohibited even within the realm.
702 */
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100703static void map_unmap_ns(unsigned long rd_addr,
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000704 unsigned long map_addr,
705 long level,
706 unsigned long host_s2tte,
707 enum map_unmap_ns_op op,
708 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000709{
710 struct granule *g_rd;
711 struct rd *rd;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000712 unsigned long *s2tt, s2tte;
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +0100713 struct s2tt_walk wi;
Javier Almansa Sobrino2eb98b02023-12-18 18:10:55 +0000714 struct s2tt_context s2_ctx;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000715
716 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
717 if (g_rd == NULL) {
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100718 res->x[0] = RMI_ERROR_INPUT;
719 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000720 }
721
Javier Almansa Sobrino2f717dd2024-02-12 20:49:46 +0000722 rd = buffer_granule_map(g_rd, SLOT_RD);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100723 assert(rd != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000724
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000725 s2_ctx = rd->s2_ctx;
726
727 if (op == MAP_NS) {
728 if (!host_ns_s2tte_is_valid(&s2_ctx, host_s2tte, level)) {
729 buffer_unmap(rd);
730 granule_unlock(g_rd);
731 res->x[0] = RMI_ERROR_INPUT;
732 return;
733 }
734 }
735
736
Soby Mathewb4c6df42022-11-09 11:13:29 +0000737 if (!validate_rtt_map_cmds(map_addr, level, rd)) {
738 buffer_unmap(rd);
739 granule_unlock(g_rd);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100740 res->x[0] = RMI_ERROR_INPUT;
741 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000742 }
743
AlexeiFedorovc34e3242023-04-12 11:30:33 +0100744 /* Check if map_addr is outside PAR */
745 if (addr_in_par(rd, map_addr)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000746 buffer_unmap(rd);
747 granule_unlock(g_rd);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100748 res->x[0] = RMI_ERROR_INPUT;
749 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000750 }
751
Soby Mathewb4c6df42022-11-09 11:13:29 +0000752 buffer_unmap(rd);
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000753 granule_lock(s2_ctx.g_rtt, GRANULE_STATE_RTT);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000754 granule_unlock(g_rd);
755
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000756 s2tt_walk_lock_unlock(&s2_ctx, map_addr, level, &wi);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100757
758 /*
759 * For UNMAP_NS, we need to map the table and look
760 * for the end of the non-live region.
761 */
AlexeiFedorov14d47ae2023-07-19 15:26:50 +0100762 if ((op == MAP_NS) && (wi.last_level != level)) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +0100763 res->x[0] = pack_return_code(RMI_ERROR_RTT,
Javier Almansa Sobrinof6fff692024-02-02 17:13:57 +0000764 (unsigned char)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000765 goto out_unlock_llt;
766 }
767
Javier Almansa Sobrino2f717dd2024-02-12 20:49:46 +0000768 s2tt = buffer_granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100769 assert(s2tt != NULL);
770
Soby Mathewb4c6df42022-11-09 11:13:29 +0000771 s2tte = s2tte_read(&s2tt[wi.index]);
772
773 if (op == MAP_NS) {
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000774 if (!s2tte_is_unassigned_ns(&s2_ctx, s2tte)) {
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100775 res->x[0] = pack_return_code(RMI_ERROR_RTT,
Javier Almansa Sobrinof6fff692024-02-02 17:13:57 +0000776 (unsigned char)level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000777 goto out_unmap_table;
778 }
779
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000780 s2tte = s2tte_create_assigned_ns(&s2_ctx, host_s2tte, level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000781 s2tte_write(&s2tt[wi.index], s2tte);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000782
783 } else if (op == UNMAP_NS) {
784 /*
785 * The following check also verifies that map_addr is outside
786 * PAR, as valid_NS s2tte may only cover outside PAR IPA range.
787 */
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000788 bool assigned_ns = s2tte_is_assigned_ns(&s2_ctx, s2tte,
789 wi.last_level);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100790
791 if ((wi.last_level != level) || !assigned_ns) {
792 res->x[0] = pack_return_code(RMI_ERROR_RTT,
Javier Almansa Sobrinof6fff692024-02-02 17:13:57 +0000793 (unsigned char)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000794 goto out_unmap_table;
795 }
796
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000797 s2tte = s2tte_create_unassigned_ns(&s2_ctx);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000798 s2tte_write(&s2tt[wi.index], s2tte);
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +0100799 if (level == S2TT_PAGE_LEVEL) {
800 s2tt_invalidate_page(&s2_ctx, map_addr);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000801 } else {
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +0100802 s2tt_invalidate_block(&s2_ctx, map_addr);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000803 }
804 }
805
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100806 res->x[0] = RMI_SUCCESS;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000807
808out_unmap_table:
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100809 if (op == UNMAP_NS) {
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000810 res->x[1] = s2tt_skip_non_live_entries(&s2_ctx, map_addr,
811 s2tt, &wi);
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100812 }
Soby Mathewb4c6df42022-11-09 11:13:29 +0000813 buffer_unmap(s2tt);
814out_unlock_llt:
815 granule_unlock(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000816}
817
818unsigned long smc_rtt_map_unprotected(unsigned long rd_addr,
819 unsigned long map_addr,
820 unsigned long ulevel,
821 unsigned long s2tte)
822{
823 long level = (long)ulevel;
Shruti Gupta9e966b82024-03-21 13:45:24 +0000824 struct smc_result res;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000825
Shruti Gupta9e966b82024-03-21 13:45:24 +0000826 (void)memset(&res, 0, sizeof(struct smc_result));
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +0100827 if ((level < S2TT_MIN_BLOCK_LEVEL) || (level > S2TT_PAGE_LEVEL)) {
AlexeiFedorov7a2f5882023-09-14 11:41:32 +0100828 return RMI_ERROR_INPUT;
829 }
830
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100831 map_unmap_ns(rd_addr, map_addr, level, s2tte, MAP_NS, &res);
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000832
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100833 return res.x[0];
Soby Mathewb4c6df42022-11-09 11:13:29 +0000834}
835
AlexeiFedorov917eabf2023-04-24 12:20:41 +0100836void smc_rtt_unmap_unprotected(unsigned long rd_addr,
837 unsigned long map_addr,
838 unsigned long ulevel,
839 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000840{
AlexeiFedorov7a2f5882023-09-14 11:41:32 +0100841 long level = (long)ulevel;
842
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +0100843 if ((level < S2TT_MIN_BLOCK_LEVEL) || (level > S2TT_PAGE_LEVEL)) {
AlexeiFedorov7a2f5882023-09-14 11:41:32 +0100844 res->x[0] = RMI_ERROR_INPUT;
845 return;
846 }
847
848 map_unmap_ns(rd_addr, map_addr, level, 0UL, UNMAP_NS, res);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000849}
850
851void smc_rtt_read_entry(unsigned long rd_addr,
852 unsigned long map_addr,
853 unsigned long ulevel,
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100854 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000855{
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000856 struct granule *g_rd;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000857 struct rd *rd;
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +0100858 struct s2tt_walk wi;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000859 unsigned long *s2tt, s2tte;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000860 long level = (long)ulevel;
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000861 struct s2tt_context s2_ctx;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000862
863 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
864 if (g_rd == NULL) {
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100865 res->x[0] = RMI_ERROR_INPUT;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000866 return;
867 }
868
Javier Almansa Sobrino2f717dd2024-02-12 20:49:46 +0000869 rd = buffer_granule_map(g_rd, SLOT_RD);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100870 assert(rd != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000871
872 if (!validate_rtt_entry_cmds(map_addr, level, rd)) {
873 buffer_unmap(rd);
874 granule_unlock(g_rd);
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100875 res->x[0] = RMI_ERROR_INPUT;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000876 return;
877 }
878
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000879 s2_ctx = rd->s2_ctx;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000880 buffer_unmap(rd);
881
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000882 granule_lock(s2_ctx.g_rtt, GRANULE_STATE_RTT);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000883 granule_unlock(g_rd);
884
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000885 s2tt_walk_lock_unlock(&s2_ctx, map_addr, level, &wi);
Javier Almansa Sobrino2f717dd2024-02-12 20:49:46 +0000886 s2tt = buffer_granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100887 assert(s2tt != NULL);
888
Soby Mathewb4c6df42022-11-09 11:13:29 +0000889 s2tte = s2tte_read(&s2tt[wi.index]);
AlexeiFedorov4faab852023-08-30 15:06:49 +0100890 res->x[1] = (unsigned long)wi.last_level;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000891
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000892 if (s2tte_is_unassigned_empty(&s2_ctx, s2tte)) {
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100893 res->x[2] = RMI_UNASSIGNED;
894 res->x[3] = 0UL;
AlexeiFedorov4faab852023-08-30 15:06:49 +0100895 res->x[4] = (unsigned long)RIPAS_EMPTY;
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000896 } else if (s2tte_is_unassigned_ram(&s2_ctx, s2tte)) {
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100897 res->x[2] = RMI_UNASSIGNED;
898 res->x[3] = 0UL;
AlexeiFedorov4faab852023-08-30 15:06:49 +0100899 res->x[4] = (unsigned long)RIPAS_RAM;
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000900 } else if (s2tte_is_unassigned_destroyed(&s2_ctx, s2tte)) {
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100901 res->x[2] = RMI_UNASSIGNED;
902 res->x[3] = 0UL;
AlexeiFedorov4faab852023-08-30 15:06:49 +0100903 res->x[4] = (unsigned long)RIPAS_DESTROYED;
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000904 } else if (s2tte_is_assigned_empty(&s2_ctx, s2tte, wi.last_level)) {
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100905 res->x[2] = RMI_ASSIGNED;
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000906 res->x[3] = s2tte_pa(&s2_ctx, s2tte, wi.last_level);
AlexeiFedorov4faab852023-08-30 15:06:49 +0100907 res->x[4] = (unsigned long)RIPAS_EMPTY;
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000908 } else if (s2tte_is_assigned_ram(&s2_ctx, s2tte, wi.last_level)) {
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100909 res->x[2] = RMI_ASSIGNED;
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000910 res->x[3] = s2tte_pa(&s2_ctx, s2tte, wi.last_level);
AlexeiFedorov4faab852023-08-30 15:06:49 +0100911 res->x[4] = (unsigned long)RIPAS_RAM;
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000912 } else if (s2tte_is_assigned_destroyed(&s2_ctx, s2tte, wi.last_level)) {
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100913 res->x[2] = RMI_ASSIGNED;
Soby Mathewf5257c62024-10-25 15:50:11 +0100914 res->x[3] = s2tte_pa(&s2_ctx, s2tte, wi.last_level);
AlexeiFedorov4faab852023-08-30 15:06:49 +0100915 res->x[4] = (unsigned long)RIPAS_DESTROYED;
AlexeiFedorovd2e93932025-01-13 17:24:37 +0000916 } else if (s2tte_is_assigned_dev_empty(&s2_ctx, s2tte, wi.last_level)) {
917 res->x[2] = RMI_ASSIGNED_DEV;
918 res->x[3] = s2tte_pa(&s2_ctx, s2tte, wi.last_level);
919 res->x[4] = (unsigned long)RIPAS_EMPTY;
920 } else if (s2tte_is_assigned_dev_destroyed(&s2_ctx, s2tte,
921 wi.last_level)) {
922 res->x[2] = RMI_ASSIGNED_DEV;
923 res->x[3] = 0UL;
924 res->x[4] = (unsigned long)RIPAS_DESTROYED;
925 } else if (s2tte_is_assigned_dev_dev(&s2_ctx, s2tte, wi.last_level)) {
926 res->x[2] = RMI_ASSIGNED_DEV;
927 res->x[3] = s2tte_pa(&s2_ctx, s2tte, wi.last_level);
928 res->x[4] = (unsigned long)RIPAS_DEV;
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000929 } else if (s2tte_is_unassigned_ns(&s2_ctx, s2tte)) {
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100930 res->x[2] = RMI_UNASSIGNED;
931 res->x[3] = 0UL;
AlexeiFedorov4faab852023-08-30 15:06:49 +0100932 res->x[4] = (unsigned long)RIPAS_EMPTY;
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000933 } else if (s2tte_is_assigned_ns(&s2_ctx, s2tte, wi.last_level)) {
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100934 res->x[2] = RMI_ASSIGNED;
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000935 res->x[3] = host_ns_s2tte(&s2_ctx, s2tte, wi.last_level);
AlexeiFedorov4faab852023-08-30 15:06:49 +0100936 res->x[4] = (unsigned long)RIPAS_EMPTY;
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000937 } else if (s2tte_is_table(&s2_ctx, s2tte, wi.last_level)) {
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100938 res->x[2] = RMI_TABLE;
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000939 res->x[3] = s2tte_pa(&s2_ctx, s2tte, wi.last_level);
AlexeiFedorov4faab852023-08-30 15:06:49 +0100940 res->x[4] = (unsigned long)RIPAS_EMPTY;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000941 } else {
942 assert(false);
943 }
944
945 buffer_unmap(s2tt);
946 granule_unlock(wi.g_llt);
947
AlexeiFedorov64fd6c32023-07-20 12:33:00 +0100948 res->x[0] = RMI_SUCCESS;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000949}
950
Soby Mathewb4c6df42022-11-09 11:13:29 +0000951static unsigned long validate_data_create_unknown(unsigned long map_addr,
952 struct rd *rd)
953{
954 if (!addr_in_par(rd, map_addr)) {
955 return RMI_ERROR_INPUT;
956 }
957
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +0100958 if (!validate_map_addr(map_addr, S2TT_PAGE_LEVEL, rd)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000959 return RMI_ERROR_INPUT;
960 }
961
962 return RMI_SUCCESS;
963}
964
965static unsigned long validate_data_create(unsigned long map_addr,
966 struct rd *rd)
967{
Mate Toth-Pal988dfcb2024-01-19 10:52:06 +0100968 if (get_rd_state_locked(rd) != REALM_NEW) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000969 return RMI_ERROR_REALM;
970 }
971
972 return validate_data_create_unknown(map_addr, rd);
973}
974
975/*
AlexeiFedorov0f9cd1f2023-07-10 17:04:58 +0100976 * Implements both RMI_DATA_CREATE and RMI_DATA_CREATE_UNKNOWN
Soby Mathewb4c6df42022-11-09 11:13:29 +0000977 *
AlexeiFedorov0f9cd1f2023-07-10 17:04:58 +0100978 * if @g_src == NULL, implements RMI_DATA_CREATE_UNKNOWN
979 * and RMI_DATA_CREATE otherwise.
Soby Mathewb4c6df42022-11-09 11:13:29 +0000980 */
AlexeiFedorovac923c82023-04-06 15:12:04 +0100981static unsigned long data_create(unsigned long rd_addr,
982 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +0000983 unsigned long map_addr,
984 struct granule *g_src,
985 unsigned long flags)
986{
987 struct granule *g_data;
988 struct granule *g_rd;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000989 struct rd *rd;
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +0100990 struct s2tt_walk wi;
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +0000991 struct s2tt_context *s2_ctx;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000992 unsigned long s2tte, *s2tt;
AlexeiFedorovd6d93d82024-02-13 16:52:11 +0000993 unsigned char new_data_state = GRANULE_STATE_DELEGATED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000994 unsigned long ret;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000995
996 if (!find_lock_two_granules(data_addr,
997 GRANULE_STATE_DELEGATED,
998 &g_data,
999 rd_addr,
1000 GRANULE_STATE_RD,
1001 &g_rd)) {
1002 return RMI_ERROR_INPUT;
1003 }
1004
Javier Almansa Sobrino2f717dd2024-02-12 20:49:46 +00001005 rd = buffer_granule_map(g_rd, SLOT_RD);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +01001006 assert(rd != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001007
1008 ret = (g_src != NULL) ?
1009 validate_data_create(map_addr, rd) :
1010 validate_data_create_unknown(map_addr, rd);
1011
1012 if (ret != RMI_SUCCESS) {
1013 goto out_unmap_rd;
1014 }
1015
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +00001016 s2_ctx = &(rd->s2_ctx);
Shruti Gupta3530a712024-09-12 10:50:21 +01001017
1018 /*
1019 * If LPA2 is disabled for the realm, then `data_addr` must not be
1020 * more than 48 bits wide.
1021 */
1022 if (!s2_ctx->enable_lpa2) {
1023 if ((data_addr >= (UL(1) << S2TT_MAX_PA_BITS))) {
1024 ret = RMI_ERROR_INPUT;
1025 goto out_unmap_rd;
1026 }
1027 }
1028
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +00001029 granule_lock(s2_ctx->g_rtt, GRANULE_STATE_RTT);
1030
1031 s2tt_walk_lock_unlock(s2_ctx, map_addr, S2TT_PAGE_LEVEL, &wi);
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +01001032 if (wi.last_level != S2TT_PAGE_LEVEL) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +01001033 ret = pack_return_code(RMI_ERROR_RTT,
Javier Almansa Sobrinof6fff692024-02-02 17:13:57 +00001034 (unsigned char)wi.last_level);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001035 goto out_unlock_ll_table;
1036 }
1037
Javier Almansa Sobrino2f717dd2024-02-12 20:49:46 +00001038 s2tt = buffer_granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +01001039 assert(s2tt != NULL);
1040
Soby Mathewb4c6df42022-11-09 11:13:29 +00001041 s2tte = s2tte_read(&s2tt[wi.index]);
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +00001042 if (!s2tte_is_unassigned(s2_ctx, s2tte)) {
Javier Almansa Sobrinof6fff692024-02-02 17:13:57 +00001043 ret = pack_return_code(RMI_ERROR_RTT,
1044 (unsigned char)S2TT_PAGE_LEVEL);
AlexeiFedorov4e2db162023-10-10 13:57:22 +01001045 goto out_unmap_ll_table;
1046 }
Soby Mathewb4c6df42022-11-09 11:13:29 +00001047
Soby Mathewb4c6df42022-11-09 11:13:29 +00001048 if (g_src != NULL) {
1049 bool ns_access_ok;
Javier Almansa Sobrino2f717dd2024-02-12 20:49:46 +00001050 void *data = buffer_granule_map(g_data, SLOT_DELEGATED);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001051
AlexeiFedorov9a9062c2023-08-21 15:41:48 +01001052 assert(data != NULL);
1053
Soby Mathewb4c6df42022-11-09 11:13:29 +00001054 ns_access_ok = ns_buffer_read(SLOT_NS, g_src, 0U,
1055 GRANULE_SIZE, data);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001056 if (!ns_access_ok) {
1057 /*
1058 * Some data may be copied before the failure. Zero
1059 * g_data granule as it will remain in delegated state.
1060 */
AlexeiFedorov862f96c2024-03-01 16:26:48 +00001061 granule_memzero_mapped(data);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001062 buffer_unmap(data);
1063 ret = RMI_ERROR_INPUT;
1064 goto out_unmap_ll_table;
1065 }
1066
Mate Toth-Palc7698312023-08-09 12:49:34 +02001067 measurement_data_granule_measure(
1068 rd->measurement[RIM_MEASUREMENT_SLOT],
1069 rd->algorithm,
1070 data,
1071 map_addr,
1072 flags);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001073 buffer_unmap(data);
AlexeiFedorov0f9cd1f2023-07-10 17:04:58 +01001074
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +00001075 s2tte = s2tte_create_assigned_ram(s2_ctx, data_addr,
1076 S2TT_PAGE_LEVEL);
AlexeiFedorov4e2db162023-10-10 13:57:22 +01001077 } else {
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +00001078 s2tte = s2tte_create_assigned_unchanged(s2_ctx, s2tte,
1079 data_addr,
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +01001080 S2TT_PAGE_LEVEL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001081 }
1082
1083 new_data_state = GRANULE_STATE_DATA;
1084
Soby Mathewb4c6df42022-11-09 11:13:29 +00001085 s2tte_write(&s2tt[wi.index], s2tte);
AlexeiFedorov745499d2024-04-25 16:52:44 +01001086 atomic_granule_get(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001087
1088 ret = RMI_SUCCESS;
1089
1090out_unmap_ll_table:
1091 buffer_unmap(s2tt);
1092out_unlock_ll_table:
1093 granule_unlock(wi.g_llt);
1094out_unmap_rd:
1095 buffer_unmap(rd);
1096 granule_unlock(g_rd);
1097 granule_unlock_transition(g_data, new_data_state);
1098 return ret;
1099}
1100
AlexeiFedorovac923c82023-04-06 15:12:04 +01001101unsigned long smc_data_create(unsigned long rd_addr,
1102 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +00001103 unsigned long map_addr,
1104 unsigned long src_addr,
1105 unsigned long flags)
1106{
1107 struct granule *g_src;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001108
AlexeiFedorov93f5ec52023-08-31 14:26:53 +01001109 if ((flags != RMI_NO_MEASURE_CONTENT) &&
1110 (flags != RMI_MEASURE_CONTENT)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001111 return RMI_ERROR_INPUT;
1112 }
1113
1114 g_src = find_granule(src_addr);
AlexeiFedorov745499d2024-04-25 16:52:44 +01001115 if ((g_src == NULL) ||
1116 (granule_unlocked_state(g_src) != GRANULE_STATE_NS)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001117 return RMI_ERROR_INPUT;
1118 }
1119
AlexeiFedorovac923c82023-04-06 15:12:04 +01001120 return data_create(rd_addr, data_addr, map_addr, g_src, flags);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001121}
1122
AlexeiFedorovac923c82023-04-06 15:12:04 +01001123unsigned long smc_data_create_unknown(unsigned long rd_addr,
1124 unsigned long data_addr,
Soby Mathewb4c6df42022-11-09 11:13:29 +00001125 unsigned long map_addr)
1126{
AlexeiFedorovac923c82023-04-06 15:12:04 +01001127 return data_create(rd_addr, data_addr, map_addr, NULL, 0);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001128}
1129
AlexeiFedorove2002be2023-04-19 17:20:12 +01001130void smc_data_destroy(unsigned long rd_addr,
1131 unsigned long map_addr,
1132 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001133{
1134 struct granule *g_data;
1135 struct granule *g_rd;
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +01001136 struct s2tt_walk wi;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001137 unsigned long data_addr, s2tte, *s2tt;
1138 struct rd *rd;
Javier Almansa Sobrino2eb98b02023-12-18 18:10:55 +00001139 struct s2tt_context s2_ctx;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001140
1141 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
1142 if (g_rd == NULL) {
AlexeiFedorove2002be2023-04-19 17:20:12 +01001143 res->x[0] = RMI_ERROR_INPUT;
AlexeiFedorova1b2a1d2023-07-18 15:08:47 +01001144 res->x[2] = 0UL;
AlexeiFedorove2002be2023-04-19 17:20:12 +01001145 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001146 }
1147
Javier Almansa Sobrino2f717dd2024-02-12 20:49:46 +00001148 rd = buffer_granule_map(g_rd, SLOT_RD);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +01001149 assert(rd != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001150
AlexeiFedorov868a6512023-09-14 13:21:11 +01001151 if (!addr_in_par(rd, map_addr) ||
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +01001152 !validate_map_addr(map_addr, S2TT_PAGE_LEVEL, rd)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001153 buffer_unmap(rd);
1154 granule_unlock(g_rd);
AlexeiFedorove2002be2023-04-19 17:20:12 +01001155 res->x[0] = RMI_ERROR_INPUT;
AlexeiFedorova1b2a1d2023-07-18 15:08:47 +01001156 res->x[2] = 0UL;
AlexeiFedorove2002be2023-04-19 17:20:12 +01001157 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001158 }
1159
Soby Mathewb4c6df42022-11-09 11:13:29 +00001160 s2_ctx = rd->s2_ctx;
1161 buffer_unmap(rd);
1162
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +00001163 granule_lock(s2_ctx.g_rtt, GRANULE_STATE_RTT);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001164 granule_unlock(g_rd);
1165
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +00001166 s2tt_walk_lock_unlock(&s2_ctx, map_addr, S2TT_PAGE_LEVEL, &wi);
Javier Almansa Sobrino2f717dd2024-02-12 20:49:46 +00001167 s2tt = buffer_granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +01001168 assert(s2tt != NULL);
1169
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +01001170 if (wi.last_level != S2TT_PAGE_LEVEL) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +01001171 res->x[0] = pack_return_code(RMI_ERROR_RTT,
Javier Almansa Sobrinof6fff692024-02-02 17:13:57 +00001172 (unsigned char)wi.last_level);
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001173 goto out_unmap_ll_table;
1174 }
Soby Mathewb4c6df42022-11-09 11:13:29 +00001175
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001176 s2tte = s2tte_read(&s2tt[wi.index]);
AlexeiFedorovc53b1f72023-07-04 15:37:03 +01001177
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +00001178 if (s2tte_is_assigned_ram(&s2_ctx, s2tte, S2TT_PAGE_LEVEL)) {
1179 data_addr = s2tte_pa(&s2_ctx, s2tte, S2TT_PAGE_LEVEL);
1180 s2tte = s2tte_create_unassigned_destroyed(&s2_ctx);
AlexeiFedorova43cd312023-04-17 11:42:25 +01001181 s2tte_write(&s2tt[wi.index], s2tte);
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +01001182 s2tt_invalidate_page(&s2_ctx, map_addr);
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +00001183 } else if (s2tte_is_assigned_empty(&s2_ctx, s2tte, S2TT_PAGE_LEVEL)) {
1184 data_addr = s2tte_pa(&s2_ctx, s2tte, S2TT_PAGE_LEVEL);
1185 s2tte = s2tte_create_unassigned_empty(&s2_ctx);
AlexeiFedorova43cd312023-04-17 11:42:25 +01001186 s2tte_write(&s2tt[wi.index], s2tte);
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +00001187 } else if (s2tte_is_assigned_destroyed(&s2_ctx, s2tte,
1188 S2TT_PAGE_LEVEL)) {
1189 data_addr = s2tte_pa(&s2_ctx, s2tte, S2TT_PAGE_LEVEL);
1190 s2tte = s2tte_create_unassigned_destroyed(&s2_ctx);
Javier Almansa Sobrino84a1b162023-09-26 17:32:44 +01001191 s2tte_write(&s2tt[wi.index], s2tte);
AlexeiFedorova43cd312023-04-17 11:42:25 +01001192 } else {
Javier Almansa Sobrinof6fff692024-02-02 17:13:57 +00001193 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1194 (unsigned char)S2TT_PAGE_LEVEL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001195 goto out_unmap_ll_table;
1196 }
1197
AlexeiFedorov745499d2024-04-25 16:52:44 +01001198 atomic_granule_put(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001199
1200 /*
1201 * Lock the data granule and check expected state. Correct locking order
1202 * is guaranteed because granule address is obtained from a locked
1203 * granule by table walk. This lock needs to be acquired before a state
1204 * transition to or from GRANULE_STATE_DATA for granule address can happen.
1205 */
1206 g_data = find_lock_granule(data_addr, GRANULE_STATE_DATA);
AlexeiFedorov63b71692023-04-19 11:18:42 +01001207 assert(g_data != NULL);
Javier Almansa Sobrino2f717dd2024-02-12 20:49:46 +00001208 buffer_granule_memzero(g_data, SLOT_DELEGATED);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001209 granule_unlock_transition(g_data, GRANULE_STATE_DELEGATED);
1210
AlexeiFedorov917eabf2023-04-24 12:20:41 +01001211 res->x[0] = RMI_SUCCESS;
AlexeiFedorove2002be2023-04-19 17:20:12 +01001212 res->x[1] = data_addr;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001213out_unmap_ll_table:
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +00001214 res->x[2] = s2tt_skip_non_live_entries(&s2_ctx, map_addr, s2tt, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001215 buffer_unmap(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001216 granule_unlock(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001217}
1218
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001219/*
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001220 * Update the ripas value for the entry pointed by @s2ttep.
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001221 *
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001222 * Returns:
1223 * < 0 - On error and the operation was aborted,
1224 * e.g., entry cannot have a ripas.
1225 * 0 - Operation was success and no TLBI is required.
1226 * > 0 - Operation was success and TLBI is required.
1227 * Sets:
1228 * @(*do_tlbi) to 'true' if the TLBs have to be invalidated.
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001229 */
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +00001230static int update_ripas(const struct s2tt_context *s2_ctx,
1231 unsigned long *s2ttep, long level,
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001232 enum ripas ripas_val,
1233 enum ripas_change_destroyed change_destroyed)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001234{
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001235 unsigned long pa, s2tte = s2tte_read(s2ttep);
1236 int ret = 0;
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001237
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +00001238 assert(s2_ctx != NULL);
1239
1240 if (!s2tte_has_ripas(s2_ctx, s2tte, level)) {
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +01001241 return -EPERM;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001242 }
1243
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001244 if (ripas_val == RIPAS_RAM) {
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +00001245 if (s2tte_is_unassigned_empty(s2_ctx, s2tte)) {
1246 s2tte = s2tte_create_unassigned_ram(s2_ctx);
1247 } else if (s2tte_is_unassigned_destroyed(s2_ctx, s2tte)) {
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001248 if (change_destroyed == CHANGE_DESTROYED) {
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +00001249 s2tte = s2tte_create_unassigned_ram(s2_ctx);
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001250 } else {
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +01001251 return -EINVAL;
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001252 }
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +00001253 } else if (s2tte_is_assigned_empty(s2_ctx, s2tte, level)) {
1254 pa = s2tte_pa(s2_ctx, s2tte, level);
1255 s2tte = s2tte_create_assigned_ram(s2_ctx, pa, level);
1256 } else if (s2tte_is_assigned_destroyed(s2_ctx, s2tte, level)) {
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001257 if (change_destroyed == CHANGE_DESTROYED) {
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +00001258 pa = s2tte_pa(s2_ctx, s2tte, level);
1259 s2tte = s2tte_create_assigned_ram(s2_ctx, pa,
1260 level);
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001261 } else {
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +01001262 return -EINVAL;
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001263 }
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001264 } else {
1265 /* No action is required */
1266 return 0;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001267 }
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001268 } else if (ripas_val == RIPAS_EMPTY) {
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +00001269 if (s2tte_is_unassigned_ram(s2_ctx, s2tte)) {
1270 s2tte = s2tte_create_unassigned_empty(s2_ctx);
1271 } else if (s2tte_is_unassigned_destroyed(s2_ctx, s2tte)) {
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001272 if (change_destroyed == CHANGE_DESTROYED) {
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +00001273 s2tte = s2tte_create_unassigned_empty(s2_ctx);
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001274 } else {
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +01001275 return -EINVAL;
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001276 }
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +00001277 } else if (s2tte_is_assigned_ram(s2_ctx, s2tte, level)) {
1278 pa = s2tte_pa(s2_ctx, s2tte, level);
1279 s2tte = s2tte_create_assigned_empty(s2_ctx, pa, level);
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001280 /* TLBI is required */
1281 ret = 1;
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +00001282 } else if (s2tte_is_assigned_destroyed(s2_ctx, s2tte, level)) {
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001283 if (change_destroyed == CHANGE_DESTROYED) {
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +00001284 pa = s2tte_pa(s2_ctx, s2tte, level);
1285 s2tte = s2tte_create_assigned_empty(s2_ctx,
1286 pa, level);
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001287 /* TLBI is required */
1288 ret = 1;
1289 } else {
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +01001290 return -EINVAL;
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001291 }
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001292 } else {
1293 /* No action is required */
1294 return 0;
AlexeiFedorov0fb44552023-04-14 15:37:58 +01001295 }
Soby Mathewb4c6df42022-11-09 11:13:29 +00001296 }
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001297 s2tte_write(s2ttep, s2tte);
1298 return ret;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001299}
1300
AlexeiFedorov960d1612023-04-25 13:23:39 +01001301void smc_rtt_init_ripas(unsigned long rd_addr,
1302 unsigned long base,
1303 unsigned long top,
1304 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001305{
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +00001306 struct granule *g_rd;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001307 struct rd *rd;
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +00001308 unsigned long addr, map_size;
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +01001309 struct s2tt_walk wi;
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +00001310 struct s2tt_context *s2_ctx;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001311 unsigned long s2tte, *s2tt;
AlexeiFedorov960d1612023-04-25 13:23:39 +01001312 long level;
AlexeiFedorov14d47ae2023-07-19 15:26:50 +01001313 unsigned long index;
Javier Almansa Sobrinof6fff692024-02-02 17:13:57 +00001314 unsigned int s2ttes_per_s2tt;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001315
AlexeiFedorov14d47ae2023-07-19 15:26:50 +01001316 if (top <= base) {
1317 res->x[0] = RMI_ERROR_INPUT;
1318 return;
1319 }
1320
Soby Mathewb4c6df42022-11-09 11:13:29 +00001321 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
1322 if (g_rd == NULL) {
AlexeiFedorov960d1612023-04-25 13:23:39 +01001323 res->x[0] = RMI_ERROR_INPUT;
1324 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001325 }
1326
Javier Almansa Sobrino2f717dd2024-02-12 20:49:46 +00001327 rd = buffer_granule_map(g_rd, SLOT_RD);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +01001328 assert(rd != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001329
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +01001330 if (!validate_map_addr(base, S2TT_PAGE_LEVEL, rd) ||
1331 !validate_map_addr(top, S2TT_PAGE_LEVEL, rd) ||
AlexeiFedorov14d47ae2023-07-19 15:26:50 +01001332 !addr_in_par(rd, base) || !addr_in_par(rd, top - GRANULE_SIZE)) {
1333 buffer_unmap(rd);
1334 granule_unlock(g_rd);
1335 res->x[0] = RMI_ERROR_INPUT;
1336 return;
1337 }
1338
Mate Toth-Pal988dfcb2024-01-19 10:52:06 +01001339 if (get_rd_state_locked(rd) != REALM_NEW) {
Soby Mathewb4c6df42022-11-09 11:13:29 +00001340 buffer_unmap(rd);
1341 granule_unlock(g_rd);
AlexeiFedorov960d1612023-04-25 13:23:39 +01001342 res->x[0] = RMI_ERROR_REALM;
1343 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001344 }
1345
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +00001346 s2_ctx = &(rd->s2_ctx);
1347 granule_lock(s2_ctx->g_rtt, GRANULE_STATE_RTT);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001348
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +00001349 s2tt_walk_lock_unlock(s2_ctx, base, S2TT_PAGE_LEVEL, &wi);
AlexeiFedorov960d1612023-04-25 13:23:39 +01001350 level = wi.last_level;
Javier Almansa Sobrino2f717dd2024-02-12 20:49:46 +00001351 s2tt = buffer_granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +01001352 assert(s2tt != NULL);
1353
AlexeiFedorov960d1612023-04-25 13:23:39 +01001354 map_size = s2tte_map_size(level);
1355 addr = base & ~(map_size - 1UL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001356
AlexeiFedorov960d1612023-04-25 13:23:39 +01001357 /*
AlexeiFedorov14d47ae2023-07-19 15:26:50 +01001358 * If the RTTE covers a range below "base", we need to go deeper.
AlexeiFedorov960d1612023-04-25 13:23:39 +01001359 */
1360 if (addr != base) {
1361 res->x[0] = pack_return_code(RMI_ERROR_RTT,
Javier Almansa Sobrinof6fff692024-02-02 17:13:57 +00001362 (unsigned char)level);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001363 goto out_unmap_llt;
1364 }
1365
Javier Almansa Sobrinof6fff692024-02-02 17:13:57 +00001366 s2ttes_per_s2tt =
1367 (unsigned int)((level == S2TT_MIN_STARTING_LEVEL_LPA2) ?
1368 S2TTES_PER_S2TT_LM1 : S2TTES_PER_S2TT);
1369 for (index = wi.index; index < s2ttes_per_s2tt; index++) {
AlexeiFedorov960d1612023-04-25 13:23:39 +01001370 unsigned long next = addr + map_size;
1371
AlexeiFedorov14d47ae2023-07-19 15:26:50 +01001372 /*
1373 * Break on "top_align" failure condition,
1374 * or if this entry crosses the range.
1375 */
AlexeiFedorov960d1612023-04-25 13:23:39 +01001376 if (next > top) {
1377 break;
1378 }
1379
1380 s2tte = s2tte_read(&s2tt[index]);
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +00001381 if (s2tte_is_unassigned_empty(s2_ctx, s2tte)) {
1382 s2tte = s2tte_create_unassigned_ram(s2_ctx);
AlexeiFedorov960d1612023-04-25 13:23:39 +01001383 s2tte_write(&s2tt[index], s2tte);
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +00001384 } else if (!s2tte_is_unassigned_ram(s2_ctx, s2tte)) {
AlexeiFedorov960d1612023-04-25 13:23:39 +01001385 break;
1386 }
Mate Toth-Palc7698312023-08-09 12:49:34 +02001387 measurement_init_ripas_measure(rd->measurement[RIM_MEASUREMENT_SLOT],
1388 rd->algorithm,
1389 addr,
1390 next);
AlexeiFedorovee2fc822023-10-31 14:54:39 +00001391 addr = next;
AlexeiFedorov960d1612023-04-25 13:23:39 +01001392 }
1393
1394 if (addr > base) {
1395 res->x[0] = RMI_SUCCESS;
1396 res->x[1] = addr;
1397 } else {
1398 res->x[0] = pack_return_code(RMI_ERROR_RTT,
Javier Almansa Sobrinof6fff692024-02-02 17:13:57 +00001399 (unsigned char)level);
AlexeiFedorov960d1612023-04-25 13:23:39 +01001400 }
Soby Mathewb4c6df42022-11-09 11:13:29 +00001401
1402out_unmap_llt:
1403 buffer_unmap(s2tt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001404 buffer_unmap(rd);
1405 granule_unlock(wi.g_llt);
AlexeiFedorov80295e42023-07-10 13:11:14 +01001406 granule_unlock(g_rd);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001407}
1408
Javier Almansa Sobrino2eb98b02023-12-18 18:10:55 +00001409static void rtt_set_ripas_range(struct s2tt_context *s2_ctx,
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001410 unsigned long *s2tt,
1411 unsigned long base,
1412 unsigned long top,
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +01001413 struct s2tt_walk *wi,
AlexeiFedorov4faab852023-08-30 15:06:49 +01001414 enum ripas ripas_val,
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001415 enum ripas_change_destroyed change_destroyed,
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001416 struct smc_result *res)
1417{
AlexeiFedorov14d47ae2023-07-19 15:26:50 +01001418 unsigned long index;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001419 long level = wi->last_level;
AlexeiFedorov4faab852023-08-30 15:06:49 +01001420 unsigned long map_size = s2tte_map_size((int)level);
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001421
1422 /* Align to the RTT level */
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001423 unsigned long addr = base & ~(map_size - 1UL);
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001424
1425 /* Make sure we don't touch a range below the requested range */
1426 if (addr != base) {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +01001427 res->x[0] = pack_return_code(RMI_ERROR_RTT,
Javier Almansa Sobrinof6fff692024-02-02 17:13:57 +00001428 (unsigned char)level);
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001429 return;
1430 }
1431
AlexeiFedorovee2fc822023-10-31 14:54:39 +00001432 for (index = wi->index; index < S2TTES_PER_S2TT; index++) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001433 int ret;
1434
AlexeiFedorov64fd6c32023-07-20 12:33:00 +01001435 /*
1436 * Break on "top_align" failure condition,
1437 * or if this entry crosses the range.
1438 */
1439 if ((addr + map_size) > top) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001440 break;
1441 }
1442
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +00001443 ret = update_ripas(s2_ctx, &s2tt[index], level,
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001444 ripas_val, change_destroyed);
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001445 if (ret < 0) {
1446 break;
1447 }
1448
1449 /* Handle TLBI */
1450 if (ret != 0) {
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +01001451 if (level == S2TT_PAGE_LEVEL) {
1452 s2tt_invalidate_page(s2_ctx, addr);
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001453 } else {
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +01001454 s2tt_invalidate_block(s2_ctx, addr);
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001455 }
1456 }
AlexeiFedorovee2fc822023-10-31 14:54:39 +00001457
1458 addr += map_size;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001459 }
1460
1461 if (addr > base) {
1462 res->x[0] = RMI_SUCCESS;
1463 res->x[1] = addr;
1464 } else {
AlexeiFedorovbe37dee2023-07-18 10:44:01 +01001465 res->x[0] = pack_return_code(RMI_ERROR_RTT,
Javier Almansa Sobrinof6fff692024-02-02 17:13:57 +00001466 (unsigned char)level);
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001467 }
1468}
1469
1470void smc_rtt_set_ripas(unsigned long rd_addr,
1471 unsigned long rec_addr,
1472 unsigned long base,
1473 unsigned long top,
1474 struct smc_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +00001475{
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +00001476 struct granule *g_rd, *g_rec;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001477 struct rec *rec;
1478 struct rd *rd;
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +01001479 struct s2tt_walk wi;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001480 unsigned long *s2tt;
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +00001481 struct s2tt_context *s2_ctx;
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001482 enum ripas ripas_val;
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001483 enum ripas_change_destroyed change_destroyed;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001484
Shyamanth RH62ece7d2024-12-12 01:14:43 +05301485 if ((top <= base) || !GRANULE_ALIGNED(top)) {
AlexeiFedorov14d47ae2023-07-19 15:26:50 +01001486 res->x[0] = RMI_ERROR_INPUT;
1487 return;
1488 }
1489
Soby Mathewb4c6df42022-11-09 11:13:29 +00001490 if (!find_lock_two_granules(rd_addr,
1491 GRANULE_STATE_RD,
1492 &g_rd,
1493 rec_addr,
1494 GRANULE_STATE_REC,
1495 &g_rec)) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001496 res->x[0] = RMI_ERROR_INPUT;
1497 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001498 }
1499
AlexeiFedorovd6d93d82024-02-13 16:52:11 +00001500 if (granule_refcount_read_acquire(g_rec) != 0U) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001501 res->x[0] = RMI_ERROR_REC;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001502 goto out_unlock_rec_rd;
1503 }
1504
Javier Almansa Sobrino2f717dd2024-02-12 20:49:46 +00001505 rec = buffer_granule_map(g_rec, SLOT_REC);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +01001506 assert(rec != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001507
1508 if (g_rd != rec->realm_info.g_rd) {
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001509 res->x[0] = RMI_ERROR_REC;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001510 goto out_unmap_rec;
1511 }
1512
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001513 ripas_val = rec->set_ripas.ripas_val;
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001514 change_destroyed = rec->set_ripas.change_destroyed;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001515
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001516 /*
1517 * Return error in case of target region:
1518 * - is not the next chunk of requested region
1519 * - extends beyond the end of requested region
1520 */
1521 if ((base != rec->set_ripas.addr) || (top > rec->set_ripas.top)) {
1522 res->x[0] = RMI_ERROR_INPUT;
Soby Mathewb4c6df42022-11-09 11:13:29 +00001523 goto out_unmap_rec;
1524 }
1525
Javier Almansa Sobrino2f717dd2024-02-12 20:49:46 +00001526 rd = buffer_granule_map(g_rd, SLOT_RD);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +01001527 assert(rd != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001528
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001529 /*
1530 * At this point, we know base == rec->set_ripas.addr
1531 * and thus must be aligned to GRANULE size.
1532 */
Javier Almansa Sobrino1e1781e2023-10-18 18:25:56 +01001533 assert(validate_map_addr(base, S2TT_PAGE_LEVEL, rd));
Soby Mathewb4c6df42022-11-09 11:13:29 +00001534
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +00001535 s2_ctx = &(rd->s2_ctx);
1536 granule_lock(s2_ctx->g_rtt, GRANULE_STATE_RTT);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001537
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001538 /* Walk to the deepest level possible */
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +00001539 s2tt_walk_lock_unlock(s2_ctx, base, S2TT_PAGE_LEVEL, &wi);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001540
AlexeiFedorov64fd6c32023-07-20 12:33:00 +01001541 /*
1542 * Base has to be aligned to the level at which
1543 * it is mapped in RTT.
1544 */
1545 if (!validate_map_addr(base, wi.last_level, rd)) {
1546 res->x[0] = pack_return_code(RMI_ERROR_RTT,
Javier Almansa Sobrinof6fff692024-02-02 17:13:57 +00001547 (unsigned char)wi.last_level);
AlexeiFedorov64fd6c32023-07-20 12:33:00 +01001548 goto out_unlock_llt;
1549 }
1550
Javier Almansa Sobrino2f717dd2024-02-12 20:49:46 +00001551 s2tt = buffer_granule_map(wi.g_llt, SLOT_RTT);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +01001552 assert(s2tt != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001553
Javier Almansa Sobrino2595cd82024-01-25 18:25:12 +00001554 rtt_set_ripas_range(s2_ctx, s2tt, base, top, &wi,
AlexeiFedorov63614ea2023-07-14 17:07:20 +01001555 ripas_val, change_destroyed, res);
1556
AlexeiFedorov5cf35ba2023-04-25 10:02:20 +01001557 if (res->x[0] == RMI_SUCCESS) {
1558 rec->set_ripas.addr = res->x[1];
Soby Mathewb4c6df42022-11-09 11:13:29 +00001559 }
1560
Soby Mathewb4c6df42022-11-09 11:13:29 +00001561 buffer_unmap(s2tt);
AlexeiFedorov64fd6c32023-07-20 12:33:00 +01001562out_unlock_llt:
Soby Mathewb4c6df42022-11-09 11:13:29 +00001563 granule_unlock(wi.g_llt);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001564 buffer_unmap(rd);
1565out_unmap_rec:
1566 buffer_unmap(rec);
1567out_unlock_rec_rd:
1568 granule_unlock(g_rec);
1569 granule_unlock(g_rd);
Soby Mathewb4c6df42022-11-09 11:13:29 +00001570}
AlexeiFedorovd2e93932025-01-13 17:24:37 +00001571
1572unsigned long smc_dev_mem_map(unsigned long rd_addr,
1573 unsigned long map_addr,
1574 unsigned long ulevel,
1575 unsigned long dev_mem_addr)
1576{
1577 struct dev_granule *g_dev;
1578 struct granule *g_rd;
1579 struct rd *rd;
1580 struct s2tt_walk wi;
1581 struct s2tt_context s2_ctx;
1582 unsigned long s2tte, *s2tt, num_granules;
1583 long level = (long)ulevel;
1584 unsigned long ret;
1585 __unused enum dev_coh_type type;
1586
1587 /* Dev_Mem_Map/Unmap commands can operate up to a level 2 block entry */
1588 if ((level < S2TT_MIN_DEV_BLOCK_LEVEL) || (level > S2TT_PAGE_LEVEL)) {
1589 return RMI_ERROR_INPUT;
1590 }
1591
1592 /*
1593 * The code below assumes that "external" granules are always
1594 * locked before "external" dev_granules, hence, RD GRANULE is locked
1595 * before DELEGATED DEV_GRANULE.
1596 *
1597 * The alternative scheme is that all external granules and device
1598 * granules are locked together in the order of their physical
1599 * addresses. For that scheme, however, we need primitives similar to
1600 * 'find_lock_two_granules' that would work with different object
1601 * types (struct granule and struct dev_granule).
1602 */
1603
1604 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
1605 if (g_rd == NULL) {
1606 return RMI_ERROR_INPUT;
1607 }
1608
1609 if (level == S2TT_PAGE_LEVEL) {
1610 num_granules = 1UL;
1611 } else {
1612 assert(level == (S2TT_PAGE_LEVEL - 1L));
1613 num_granules = S2TTES_PER_S2TT;
1614 }
1615
1616 g_dev = find_lock_dev_granules(dev_mem_addr,
1617 DEV_GRANULE_STATE_DELEGATED,
1618 num_granules,
1619 &type);
1620 if (g_dev == NULL) {
1621 granule_unlock(g_rd);
1622 return RMI_ERROR_INPUT;
1623 }
1624
1625 rd = buffer_granule_map(g_rd, SLOT_RD);
1626 assert(rd != NULL);
1627
1628 if (!addr_in_par(rd, map_addr) ||
1629 !validate_map_addr(map_addr, level, rd)) {
1630 buffer_unmap(rd);
1631 granule_unlock(g_rd);
1632 ret = RMI_ERROR_INPUT;
1633 goto out_unlock_granules;
1634 }
1635
1636 s2_ctx = rd->s2_ctx;
1637 buffer_unmap(rd);
1638 granule_lock(s2_ctx.g_rtt, GRANULE_STATE_RTT);
1639 granule_unlock(g_rd);
1640
1641 s2tt_walk_lock_unlock(&s2_ctx, map_addr, level, &wi);
1642 if (wi.last_level != level) {
1643 ret = pack_return_code(RMI_ERROR_RTT,
1644 (unsigned char)wi.last_level);
1645 goto out_unlock_ll_table;
1646 }
1647
1648 s2tt = buffer_granule_map(wi.g_llt, SLOT_RTT);
1649 assert(s2tt != NULL);
1650
1651 s2tte = s2tte_read(&s2tt[wi.index]);
1652
1653 if (!s2tte_is_unassigned(&s2_ctx, s2tte)) {
1654 ret = pack_return_code(RMI_ERROR_RTT, (unsigned char)level);
1655 goto out_unmap_ll_table;
1656 }
1657
1658 s2tte = s2tte_create_assigned_dev_unchanged(&s2_ctx, s2tte,
1659 dev_mem_addr, level);
1660 s2tte_write(&s2tt[wi.index], s2tte);
1661 atomic_granule_get(wi.g_llt);
1662
1663 ret = RMI_SUCCESS;
1664
1665out_unmap_ll_table:
1666 buffer_unmap(s2tt);
1667out_unlock_ll_table:
1668 granule_unlock(wi.g_llt);
1669out_unlock_granules:
1670 while (num_granules != 0UL) {
1671 if (ret == RMI_SUCCESS) {
1672 dev_granule_unlock_transition(&g_dev[--num_granules],
1673 DEV_GRANULE_STATE_MAPPED);
1674 } else {
1675 dev_granule_unlock(&g_dev[--num_granules]);
1676 }
1677 }
1678 return ret;
1679}
1680
1681void smc_dev_mem_unmap(unsigned long rd_addr,
1682 unsigned long map_addr,
1683 unsigned long ulevel,
1684 struct smc_result *res)
1685{
1686 struct granule *g_rd;
1687 struct rd *rd;
1688 struct s2tt_walk wi;
1689 struct s2tt_context s2_ctx;
1690 unsigned long dev_mem_addr, dev_addr, s2tte, *s2tt, num_granules;
1691 long level = (long)ulevel;
1692 __unused enum dev_coh_type type;
1693
1694 /* Dev_Mem_Map/Unmap commands can operate up to a level 2 block entry */
1695 if ((level < S2TT_MIN_DEV_BLOCK_LEVEL) || (level > S2TT_PAGE_LEVEL)) {
1696 res->x[0] = RMI_ERROR_INPUT;
1697 res->x[2] = 0UL;
1698 return;
1699 }
1700
1701 g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
1702 if (g_rd == NULL) {
1703 res->x[0] = RMI_ERROR_INPUT;
1704 res->x[2] = 0UL;
1705 return;
1706 }
1707
1708 rd = buffer_granule_map(g_rd, SLOT_RD);
1709 assert(rd != NULL);
1710
1711 if (!addr_in_par(rd, map_addr) ||
1712 !validate_map_addr(map_addr, level, rd)) {
1713 buffer_unmap(rd);
1714 granule_unlock(g_rd);
1715 res->x[0] = RMI_ERROR_INPUT;
1716 res->x[2] = 0UL;
1717 return;
1718 }
1719
1720 s2_ctx = rd->s2_ctx;
1721 buffer_unmap(rd);
1722
1723 granule_lock(s2_ctx.g_rtt, GRANULE_STATE_RTT);
1724 granule_unlock(g_rd);
1725
1726 s2tt_walk_lock_unlock(&s2_ctx, map_addr, level, &wi);
1727 s2tt = buffer_granule_map(wi.g_llt, SLOT_RTT);
1728 assert(s2tt != NULL);
1729
1730 if (wi.last_level != level) {
1731 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1732 (unsigned char)wi.last_level);
1733 goto out_unmap_ll_table;
1734 }
1735
1736 s2tte = s2tte_read(&s2tt[wi.index]);
1737
1738 if (s2tte_is_assigned_dev_dev(&s2_ctx, s2tte, level)) {
1739 dev_mem_addr = s2tte_pa(&s2_ctx, s2tte, level);
1740 s2tte = s2tte_create_unassigned_destroyed(&s2_ctx);
1741 s2tte_write(&s2tt[wi.index], s2tte);
1742 if (level == S2TT_PAGE_LEVEL) {
1743 s2tt_invalidate_page(&s2_ctx, map_addr);
1744 } else {
1745 s2tt_invalidate_block(&s2_ctx, map_addr);
1746 }
1747 } else if (s2tte_is_assigned_dev_empty(&s2_ctx, s2tte, level)) {
1748 dev_mem_addr = s2tte_pa(&s2_ctx, s2tte, level);
1749 s2tte = s2tte_create_unassigned_empty(&s2_ctx);
1750 s2tte_write(&s2tt[wi.index], s2tte);
1751 } else if (s2tte_is_assigned_dev_destroyed(&s2_ctx, s2tte, level)) {
1752 dev_mem_addr = s2tte_pa(&s2_ctx, s2tte, level);
1753 s2tte = s2tte_create_unassigned_destroyed(&s2_ctx);
1754 s2tte_write(&s2tt[wi.index], s2tte);
1755 } else {
1756 res->x[0] = pack_return_code(RMI_ERROR_RTT,
1757 (unsigned char)level);
1758 goto out_unmap_ll_table;
1759 }
1760
1761 atomic_granule_put(wi.g_llt);
1762
1763 num_granules = (level == S2TT_PAGE_LEVEL) ? 1UL : S2TTES_PER_S2TT;
1764 dev_addr = dev_mem_addr;
1765
1766 for (unsigned long i = 0UL; i < num_granules; i++) {
1767 struct dev_granule *g_dev;
1768
1769 g_dev = find_lock_dev_granule(dev_addr, DEV_GRANULE_STATE_MAPPED, &type);
1770 assert(g_dev != NULL);
1771 dev_granule_unlock_transition(g_dev, DEV_GRANULE_STATE_DELEGATED);
1772 dev_addr += GRANULE_SIZE;
1773 }
1774
1775 res->x[0] = RMI_SUCCESS;
1776 res->x[1] = dev_mem_addr;
1777out_unmap_ll_table:
1778 res->x[2] = s2tt_skip_non_live_entries(&s2_ctx, map_addr, s2tt, &wi);
1779 buffer_unmap(s2tt);
1780 granule_unlock(wi.g_llt);
1781}