blob: ca046f68b577aeacf6d3b13c285cf0b2b60ad73f [file] [log] [blame]
Soby Mathewb4c6df42022-11-09 11:13:29 +00001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
4 */
5
6#include <arch.h>
7#include <arch_helpers.h>
8#include <assert.h>
9#include <attestation_token.h>
10#include <buffer.h>
Javier Almansa Sobrino68a593a2022-07-25 09:35:32 +010011#include <buffer_private.h>
Soby Mathewb4c6df42022-11-09 11:13:29 +000012#include <cpuid.h>
13#include <debug.h>
14#include <errno.h>
15#include <gic.h>
16#include <granule.h>
17#include <memory_alloc.h>
18#include <sizes.h>
19#include <slot_buf_arch.h>
20#include <stdbool.h>
21#include <stdint.h>
22#include <table.h>
23#include <xlat_contexts.h>
24#include <xlat_tables.h>
25
26/*
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +000027 * All the slot buffers for a given PE must be mapped by a single translation
Soby Mathewb4c6df42022-11-09 11:13:29 +000028 * table, which means the max VA size should be <= 4KB * 512
29 */
30COMPILER_ASSERT((RMM_SLOT_BUF_VA_SIZE) <= (GRANULE_SIZE * XLAT_TABLE_ENTRIES));
31
32/*
33 * For all translation stages if FEAT_TTST is implemented, while
34 * the PE is executing in AArch64 state and is using 4KB
35 * translation granules, the min address space size is 64KB
36 */
37COMPILER_ASSERT((RMM_SLOT_BUF_VA_SIZE) >= (1 << 16U));
38
39#define RMM_SLOT_BUF_MMAP MAP_REGION_TRANSIENT( \
40 SLOT_VIRT, \
41 RMM_SLOT_BUF_VA_SIZE, \
42 PAGE_SIZE)
43
44#define SLOT_BUF_MMAP_REGIONS UL(1)
45
46/*
47 * Attributes for a buffer slot page descriptor.
48 * Note that the AF bit on the descriptor is handled by the translation
49 * library (it assumes that access faults are not handled) so it does not
50 * need to be specified here.
51 */
52#define SLOT_DESC_ATTR \
53 (MT_RW_DATA | MT_SHAREABILITY_ISH | MT_NG)
54
55/*
56 * The base tables for all the contexts are manually allocated as a continous
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +000057 * block of memory (one L3 table per PE).
Soby Mathewb4c6df42022-11-09 11:13:29 +000058 */
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +000059static uint64_t slot_buf_s1tt[XLAT_TABLE_ENTRIES * MAX_CPUS]
60 __aligned(XLAT_TABLES_ALIGNMENT);
Soby Mathewb4c6df42022-11-09 11:13:29 +000061
62/* Allocate per-cpu xlat_ctx_tbls */
63static struct xlat_ctx_tbls slot_buf_tbls[MAX_CPUS];
64
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +000065/* Allocate xlat_ctx_cfg for high VA which will be common to all PEs */
66static struct xlat_ctx_cfg slot_buf_xlat_ctx_cfg;
Soby Mathewb4c6df42022-11-09 11:13:29 +000067
68/* context definition */
69static struct xlat_ctx slot_buf_xlat_ctx[MAX_CPUS];
70
71/*
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +000072 * Allocate a cache to store the last level table info where the slot buffers
Soby Mathewb4c6df42022-11-09 11:13:29 +000073 * are mapped to avoid needing to perform a table walk every time a buffer
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +000074 * slot operation has to be done.
Soby Mathewb4c6df42022-11-09 11:13:29 +000075 */
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +000076static struct xlat_tbl_info tbl_info_cache[MAX_CPUS];
Soby Mathewb4c6df42022-11-09 11:13:29 +000077
Javier Almansa Sobrino68a593a2022-07-25 09:35:32 +010078uintptr_t slot_to_va(enum buffer_slot slot)
Soby Mathewb4c6df42022-11-09 11:13:29 +000079{
80 assert(slot < NR_CPU_SLOTS);
81
82 return (uintptr_t)(SLOT_VIRT + (GRANULE_SIZE * slot));
83}
84
85static inline struct xlat_ctx *get_slot_buf_xlat_ctx(void)
86{
87 return &slot_buf_xlat_ctx[my_cpuid()];
88}
89
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +000090struct xlat_tbl_info *get_cached_tbl_info(void)
Soby Mathewb4c6df42022-11-09 11:13:29 +000091{
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +000092 return &tbl_info_cache[my_cpuid()];
Soby Mathewb4c6df42022-11-09 11:13:29 +000093}
94
95__unused static uint64_t slot_to_descriptor(enum buffer_slot slot)
96{
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +000097 uint64_t *entry = xlat_get_tte_ptr(get_cached_tbl_info(),
98 slot_to_va(slot));
Soby Mathewb4c6df42022-11-09 11:13:29 +000099
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +0000100 return xlat_read_tte(entry);
101}
102
103int slot_buf_coldboot_init(void)
104{
105 static struct xlat_mmap_region slot_buf_regions[] = {
106 RMM_SLOT_BUF_MMAP,
107 };
108
109 /*
110 * Initialize the common configuration used for all
111 * translation contexts
112 */
113 return xlat_ctx_cfg_init(&slot_buf_xlat_ctx_cfg, VA_HIGH_REGION,
114 &slot_buf_regions[0], 1U,
115 RMM_SLOT_BUF_VA_SIZE);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000116}
117
118/*
119 * Setup xlat table for slot buffer mechanism for each PE.
120 * Must be called for every PE in the system
121 */
122void slot_buf_setup_xlat(void)
123{
124 unsigned int cpuid = my_cpuid();
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +0000125 struct xlat_ctx *slot_buf_ctx = get_slot_buf_xlat_ctx();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000126
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +0000127 /*
128 * Initialize the translation tables for the current context.
129 * This is done on the first boot of each PE.
130 */
131 int ret = xlat_ctx_init(slot_buf_ctx,
132 &slot_buf_xlat_ctx_cfg,
133 &slot_buf_tbls[cpuid],
134 &slot_buf_s1tt[XLAT_TABLE_ENTRIES * cpuid], 1U);
135
136 if (!((ret == 0) || (ret == -EALREADY))) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000137 /*
138 * If the context was already created, carry on with the
139 * initialization. If it cannot be created, panic.
140 */
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +0000141 ERROR("%s (%u): Failed to initialize the xlat context for the slot buffers (-%i)\n",
142 __func__, __LINE__, ret);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000143 panic();
144 }
145
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +0000146 /* Configure MMU registers */
Soby Mathewb4c6df42022-11-09 11:13:29 +0000147 if (xlat_arch_setup_mmu_cfg(get_slot_buf_xlat_ctx())) {
148 ERROR("%s (%u): MMU registers failed to initialize\n",
149 __func__, __LINE__);
150 panic();
151 }
152}
153
154/*
155 * Finishes initializing the slot buffer mechanism.
156 * This function must be called after the MMU is enabled.
157 */
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +0000158void slot_buf_finish_warmboot_init(void)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000159{
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +0000160 assert(is_mmu_enabled() == true);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000161
162 /*
163 * Initialize (if not done yet) the internal cache with the last level
164 * translation table that holds the MMU descriptors for the slot
165 * buffers, so we can access them faster when we need to map/unmap.
166 */
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +0000167 if ((get_cached_tbl_info())->table == NULL) {
168 if (xlat_get_table_from_va(get_cached_tbl_info(),
Soby Mathewb4c6df42022-11-09 11:13:29 +0000169 get_slot_buf_xlat_ctx(),
170 slot_to_va(SLOT_NS)) != 0) {
171 ERROR("%s (%u): Failed to initialize table entry cache for CPU %u\n",
172 __func__, __LINE__, my_cpuid());
173 panic();
174
175 }
176 }
177}
178
179/*
180 * Buffer slots are intended to be transient, and should not be live at
181 * entry/exit of the RMM.
182 */
183void assert_cpu_slots_empty(void)
184{
185 unsigned int i;
186
187 for (i = 0; i < NR_CPU_SLOTS; i++) {
188 assert(slot_to_descriptor(i) == INVALID_DESC);
189 }
190}
191
192static inline bool is_ns_slot(enum buffer_slot slot)
193{
194 return slot == SLOT_NS;
195}
196
197static inline bool is_realm_slot(enum buffer_slot slot)
198{
199 return (slot != SLOT_NS) && (slot < NR_CPU_SLOTS);
200}
201
202static void *ns_granule_map(enum buffer_slot slot, struct granule *granule)
203{
204 unsigned long addr = granule_addr(granule);
205
206 assert(is_ns_slot(slot));
Javier Almansa Sobrinod528efd2023-01-05 16:23:54 +0000207 return buffer_arch_map(slot, addr);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000208}
209
Javier Almansa Sobrinobb66f8a2023-01-05 16:43:43 +0000210static inline void ns_buffer_unmap(void *buf)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000211{
Javier Almansa Sobrinobb66f8a2023-01-05 16:43:43 +0000212 buffer_arch_unmap(buf);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000213}
214
215/*
216 * Maps a granule @g into the provided @slot, returning
217 * the virtual address.
218 *
219 * The caller must either hold @g::lock or hold a reference.
220 */
221void *granule_map(struct granule *g, enum buffer_slot slot)
222{
223 unsigned long addr = granule_addr(g);
224
225 assert(is_realm_slot(slot));
226
Javier Almansa Sobrinod528efd2023-01-05 16:23:54 +0000227 return buffer_arch_map(slot, addr);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000228}
229
230void buffer_unmap(void *buf)
231{
232 buffer_arch_unmap(buf);
233}
234
235bool memcpy_ns_read(void *dest, const void *ns_src, unsigned long size);
236bool memcpy_ns_write(void *ns_dest, const void *src, unsigned long size);
237
238/*
239 * Map a Non secure granule @g into the slot @slot and read data from
240 * this granule to @dest. Unmap the granule once the read is done.
241 *
242 * It returns 'true' on success or `false` if not all data are copied.
243 * Only the least significant bits of @offset are considered, which allows the
244 * full PA of a non-granule aligned buffer to be used for the @offset parameter.
245 */
246bool ns_buffer_read(enum buffer_slot slot,
247 struct granule *ns_gr,
248 unsigned int offset,
249 unsigned int size,
250 void *dest)
251{
252 uintptr_t src;
253 bool retval;
254
255 assert(is_ns_slot(slot));
256 assert(ns_gr != NULL);
Javier Almansa Sobrino1948e692023-01-16 17:10:38 +0000257 assert(dest != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000258
259 /*
260 * To simplify the trapping mechanism around NS access,
261 * memcpy_ns_read uses a single 8-byte LDR instruction and
262 * all parameters must be aligned accordingly.
263 */
264 assert(ALIGNED(size, 8));
265 assert(ALIGNED(offset, 8));
266 assert(ALIGNED(dest, 8));
267
268 offset &= ~GRANULE_MASK;
269 assert(offset + size <= GRANULE_SIZE);
270
Javier Almansa Sobrinobb66f8a2023-01-05 16:43:43 +0000271 src = (uintptr_t)ns_granule_map(slot, ns_gr);
272 retval = memcpy_ns_read(dest, (void *)(src + offset), size);
273 ns_buffer_unmap((void *)src);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000274
275 return retval;
276}
277
278/*
279 * Map a Non secure granule @g into the slot @slot and write data from
280 * this granule to @dest. Unmap the granule once the write is done.
281 *
282 * It returns 'true' on success or `false` if not all data are copied.
283 * Only the least significant bits of @offset are considered, which allows the
284 * full PA of a non-granule aligned buffer to be used for the @offset parameter.
285 */
286bool ns_buffer_write(enum buffer_slot slot,
287 struct granule *ns_gr,
288 unsigned int offset,
289 unsigned int size,
290 void *src)
291{
292 uintptr_t dest;
293 bool retval;
294
295 assert(is_ns_slot(slot));
296 assert(ns_gr != NULL);
Javier Almansa Sobrino1948e692023-01-16 17:10:38 +0000297 assert(src != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000298
299 /*
300 * To simplify the trapping mechanism around NS access,
301 * memcpy_ns_write uses a single 8-byte STR instruction and
302 * all parameters must be aligned accordingly.
303 */
304 assert(ALIGNED(size, 8));
305 assert(ALIGNED(offset, 8));
306 assert(ALIGNED(src, 8));
307
308 offset &= ~GRANULE_MASK;
309 assert(offset + size <= GRANULE_SIZE);
310
Javier Almansa Sobrinobb66f8a2023-01-05 16:43:43 +0000311 dest = (uintptr_t)ns_granule_map(slot, ns_gr);
312 retval = memcpy_ns_write((void *)(dest + offset), src, size);
313 ns_buffer_unmap((void *)dest);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000314
315 return retval;
316}
317
318/******************************************************************************
319 * Internal helpers
320 ******************************************************************************/
321
Javier Almansa Sobrinod528efd2023-01-05 16:23:54 +0000322void *buffer_map_internal(enum buffer_slot slot, unsigned long addr)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000323{
324 uint64_t attr = SLOT_DESC_ATTR;
325 uintptr_t va = slot_to_va(slot);
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +0000326 struct xlat_tbl_info *entry = get_cached_tbl_info();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000327
328 assert(GRANULE_ALIGNED(addr));
329
Javier Almansa Sobrinod528efd2023-01-05 16:23:54 +0000330 attr |= (slot == SLOT_NS ? MT_NS : MT_REALM);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000331
332 if (xlat_map_memory_page_with_attrs(entry, va,
333 (uintptr_t)addr, attr) != 0) {
334 /* Error mapping the buffer */
335 return NULL;
336 }
337
338 return (void *)va;
339}
340
341void buffer_unmap_internal(void *buf)
342{
343 /*
344 * Prevent the compiler from moving prior loads/stores to buf after the
345 * update to the translation table. Otherwise, those could fault.
346 */
347 COMPILER_BARRIER();
348
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +0000349 xlat_unmap_memory_page(get_cached_tbl_info(), (uintptr_t)buf);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000350}