blob: c98d82b95c24517f16aac3e057c30dcbcd30373c [file] [log] [blame]
Soby Mathewb4c6df42022-11-09 11:13:29 +00001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
4 */
5
6#include <arch.h>
7#include <arch_helpers.h>
8#include <assert.h>
9#include <attestation_token.h>
10#include <buffer.h>
Javier Almansa Sobrino68a593a2022-07-25 09:35:32 +010011#include <buffer_private.h>
Soby Mathewb4c6df42022-11-09 11:13:29 +000012#include <cpuid.h>
13#include <debug.h>
14#include <errno.h>
15#include <gic.h>
16#include <granule.h>
17#include <memory_alloc.h>
18#include <sizes.h>
19#include <slot_buf_arch.h>
20#include <stdbool.h>
21#include <stdint.h>
22#include <table.h>
23#include <xlat_contexts.h>
24#include <xlat_tables.h>
25
26/*
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +000027 * All the slot buffers for a given PE must be mapped by a single translation
Soby Mathewb4c6df42022-11-09 11:13:29 +000028 * table, which means the max VA size should be <= 4KB * 512
29 */
30COMPILER_ASSERT((RMM_SLOT_BUF_VA_SIZE) <= (GRANULE_SIZE * XLAT_TABLE_ENTRIES));
31
32/*
33 * For all translation stages if FEAT_TTST is implemented, while
34 * the PE is executing in AArch64 state and is using 4KB
35 * translation granules, the min address space size is 64KB
36 */
37COMPILER_ASSERT((RMM_SLOT_BUF_VA_SIZE) >= (1 << 16U));
38
39#define RMM_SLOT_BUF_MMAP MAP_REGION_TRANSIENT( \
40 SLOT_VIRT, \
41 RMM_SLOT_BUF_VA_SIZE, \
42 PAGE_SIZE)
43
44#define SLOT_BUF_MMAP_REGIONS UL(1)
45
46/*
47 * Attributes for a buffer slot page descriptor.
48 * Note that the AF bit on the descriptor is handled by the translation
49 * library (it assumes that access faults are not handled) so it does not
50 * need to be specified here.
51 */
Javier Almansa Sobrino765a3162023-04-27 17:42:58 +010052#define SLOT_DESC_ATTR (MT_RW_DATA | MT_NG)
Soby Mathewb4c6df42022-11-09 11:13:29 +000053
54/*
55 * The base tables for all the contexts are manually allocated as a continous
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +000056 * block of memory (one L3 table per PE).
Soby Mathewb4c6df42022-11-09 11:13:29 +000057 */
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +000058static uint64_t slot_buf_s1tt[XLAT_TABLE_ENTRIES * MAX_CPUS]
59 __aligned(XLAT_TABLES_ALIGNMENT);
Soby Mathewb4c6df42022-11-09 11:13:29 +000060
61/* Allocate per-cpu xlat_ctx_tbls */
62static struct xlat_ctx_tbls slot_buf_tbls[MAX_CPUS];
63
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +000064/* Allocate xlat_ctx_cfg for high VA which will be common to all PEs */
65static struct xlat_ctx_cfg slot_buf_xlat_ctx_cfg;
Soby Mathewb4c6df42022-11-09 11:13:29 +000066
67/* context definition */
68static struct xlat_ctx slot_buf_xlat_ctx[MAX_CPUS];
69
70/*
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +000071 * Allocate a cache to store the last level table info where the slot buffers
Soby Mathewb4c6df42022-11-09 11:13:29 +000072 * are mapped to avoid needing to perform a table walk every time a buffer
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +000073 * slot operation has to be done.
Soby Mathewb4c6df42022-11-09 11:13:29 +000074 */
Javier Almansa Sobrinoe7aa1ab2023-03-09 17:38:02 +000075static struct xlat_llt_info llt_info_cache[MAX_CPUS];
Soby Mathewb4c6df42022-11-09 11:13:29 +000076
Javier Almansa Sobrino68a593a2022-07-25 09:35:32 +010077uintptr_t slot_to_va(enum buffer_slot slot)
Soby Mathewb4c6df42022-11-09 11:13:29 +000078{
79 assert(slot < NR_CPU_SLOTS);
80
81 return (uintptr_t)(SLOT_VIRT + (GRANULE_SIZE * slot));
82}
83
84static inline struct xlat_ctx *get_slot_buf_xlat_ctx(void)
85{
86 return &slot_buf_xlat_ctx[my_cpuid()];
87}
88
Javier Almansa Sobrinoe7aa1ab2023-03-09 17:38:02 +000089struct xlat_llt_info *get_cached_llt_info(void)
Soby Mathewb4c6df42022-11-09 11:13:29 +000090{
Javier Almansa Sobrinoe7aa1ab2023-03-09 17:38:02 +000091 return &llt_info_cache[my_cpuid()];
Soby Mathewb4c6df42022-11-09 11:13:29 +000092}
93
94__unused static uint64_t slot_to_descriptor(enum buffer_slot slot)
95{
Javier Almansa Sobrinoe7aa1ab2023-03-09 17:38:02 +000096 uint64_t *entry = xlat_get_tte_ptr(get_cached_llt_info(),
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +000097 slot_to_va(slot));
Soby Mathewb4c6df42022-11-09 11:13:29 +000098
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +000099 return xlat_read_tte(entry);
100}
101
102int slot_buf_coldboot_init(void)
103{
104 static struct xlat_mmap_region slot_buf_regions[] = {
105 RMM_SLOT_BUF_MMAP,
106 };
107
108 /*
109 * Initialize the common configuration used for all
110 * translation contexts
111 */
112 return xlat_ctx_cfg_init(&slot_buf_xlat_ctx_cfg, VA_HIGH_REGION,
113 &slot_buf_regions[0], 1U,
114 RMM_SLOT_BUF_VA_SIZE);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000115}
116
117/*
118 * Setup xlat table for slot buffer mechanism for each PE.
119 * Must be called for every PE in the system
120 */
121void slot_buf_setup_xlat(void)
122{
123 unsigned int cpuid = my_cpuid();
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +0000124 struct xlat_ctx *slot_buf_ctx = get_slot_buf_xlat_ctx();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000125
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +0000126 /*
127 * Initialize the translation tables for the current context.
128 * This is done on the first boot of each PE.
129 */
130 int ret = xlat_ctx_init(slot_buf_ctx,
131 &slot_buf_xlat_ctx_cfg,
132 &slot_buf_tbls[cpuid],
133 &slot_buf_s1tt[XLAT_TABLE_ENTRIES * cpuid], 1U);
134
135 if (!((ret == 0) || (ret == -EALREADY))) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000136 /*
137 * If the context was already created, carry on with the
138 * initialization. If it cannot be created, panic.
139 */
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +0000140 ERROR("%s (%u): Failed to initialize the xlat context for the slot buffers (-%i)\n",
141 __func__, __LINE__, ret);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000142 panic();
143 }
144
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +0000145 /* Configure MMU registers */
Soby Mathewb4c6df42022-11-09 11:13:29 +0000146 if (xlat_arch_setup_mmu_cfg(get_slot_buf_xlat_ctx())) {
147 ERROR("%s (%u): MMU registers failed to initialize\n",
148 __func__, __LINE__);
149 panic();
150 }
151}
152
153/*
154 * Finishes initializing the slot buffer mechanism.
155 * This function must be called after the MMU is enabled.
156 */
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +0000157void slot_buf_finish_warmboot_init(void)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000158{
Javier Almansa Sobrinoed932592023-01-24 12:50:41 +0000159 assert(is_mmu_enabled() == true);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000160
161 /*
162 * Initialize (if not done yet) the internal cache with the last level
163 * translation table that holds the MMU descriptors for the slot
164 * buffers, so we can access them faster when we need to map/unmap.
165 */
Javier Almansa Sobrinoe7aa1ab2023-03-09 17:38:02 +0000166 if ((get_cached_llt_info())->table == NULL) {
167 if (xlat_get_llt_from_va(get_cached_llt_info(),
168 get_slot_buf_xlat_ctx(),
169 slot_to_va(SLOT_NS)) != 0) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000170 ERROR("%s (%u): Failed to initialize table entry cache for CPU %u\n",
171 __func__, __LINE__, my_cpuid());
172 panic();
173
174 }
175 }
176}
177
178/*
179 * Buffer slots are intended to be transient, and should not be live at
180 * entry/exit of the RMM.
181 */
182void assert_cpu_slots_empty(void)
183{
184 unsigned int i;
185
186 for (i = 0; i < NR_CPU_SLOTS; i++) {
Javier Almansa Sobrino66af7f72023-02-16 11:42:05 +0000187 assert(slot_to_descriptor(i) == TRANSIENT_DESC);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000188 }
189}
190
191static inline bool is_ns_slot(enum buffer_slot slot)
192{
193 return slot == SLOT_NS;
194}
195
196static inline bool is_realm_slot(enum buffer_slot slot)
197{
198 return (slot != SLOT_NS) && (slot < NR_CPU_SLOTS);
199}
200
201static void *ns_granule_map(enum buffer_slot slot, struct granule *granule)
202{
203 unsigned long addr = granule_addr(granule);
204
205 assert(is_ns_slot(slot));
Javier Almansa Sobrinod528efd2023-01-05 16:23:54 +0000206 return buffer_arch_map(slot, addr);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000207}
208
Javier Almansa Sobrinobb66f8a2023-01-05 16:43:43 +0000209static inline void ns_buffer_unmap(void *buf)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000210{
Javier Almansa Sobrinobb66f8a2023-01-05 16:43:43 +0000211 buffer_arch_unmap(buf);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000212}
213
214/*
215 * Maps a granule @g into the provided @slot, returning
216 * the virtual address.
217 *
218 * The caller must either hold @g::lock or hold a reference.
219 */
220void *granule_map(struct granule *g, enum buffer_slot slot)
221{
222 unsigned long addr = granule_addr(g);
223
224 assert(is_realm_slot(slot));
225
Javier Almansa Sobrinod528efd2023-01-05 16:23:54 +0000226 return buffer_arch_map(slot, addr);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000227}
228
229void buffer_unmap(void *buf)
230{
231 buffer_arch_unmap(buf);
232}
233
234bool memcpy_ns_read(void *dest, const void *ns_src, unsigned long size);
235bool memcpy_ns_write(void *ns_dest, const void *src, unsigned long size);
236
237/*
238 * Map a Non secure granule @g into the slot @slot and read data from
239 * this granule to @dest. Unmap the granule once the read is done.
240 *
241 * It returns 'true' on success or `false` if not all data are copied.
242 * Only the least significant bits of @offset are considered, which allows the
243 * full PA of a non-granule aligned buffer to be used for the @offset parameter.
244 */
245bool ns_buffer_read(enum buffer_slot slot,
246 struct granule *ns_gr,
247 unsigned int offset,
248 unsigned int size,
249 void *dest)
250{
251 uintptr_t src;
252 bool retval;
253
254 assert(is_ns_slot(slot));
255 assert(ns_gr != NULL);
Javier Almansa Sobrino1948e692023-01-16 17:10:38 +0000256 assert(dest != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000257
258 /*
259 * To simplify the trapping mechanism around NS access,
260 * memcpy_ns_read uses a single 8-byte LDR instruction and
261 * all parameters must be aligned accordingly.
262 */
263 assert(ALIGNED(size, 8));
264 assert(ALIGNED(offset, 8));
265 assert(ALIGNED(dest, 8));
266
267 offset &= ~GRANULE_MASK;
268 assert(offset + size <= GRANULE_SIZE);
269
Javier Almansa Sobrinobb66f8a2023-01-05 16:43:43 +0000270 src = (uintptr_t)ns_granule_map(slot, ns_gr);
271 retval = memcpy_ns_read(dest, (void *)(src + offset), size);
272 ns_buffer_unmap((void *)src);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000273
274 return retval;
275}
276
277/*
278 * Map a Non secure granule @g into the slot @slot and write data from
279 * this granule to @dest. Unmap the granule once the write is done.
280 *
281 * It returns 'true' on success or `false` if not all data are copied.
282 * Only the least significant bits of @offset are considered, which allows the
283 * full PA of a non-granule aligned buffer to be used for the @offset parameter.
284 */
285bool ns_buffer_write(enum buffer_slot slot,
286 struct granule *ns_gr,
287 unsigned int offset,
288 unsigned int size,
289 void *src)
290{
291 uintptr_t dest;
292 bool retval;
293
294 assert(is_ns_slot(slot));
295 assert(ns_gr != NULL);
Javier Almansa Sobrino1948e692023-01-16 17:10:38 +0000296 assert(src != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000297
298 /*
299 * To simplify the trapping mechanism around NS access,
300 * memcpy_ns_write uses a single 8-byte STR instruction and
301 * all parameters must be aligned accordingly.
302 */
303 assert(ALIGNED(size, 8));
304 assert(ALIGNED(offset, 8));
305 assert(ALIGNED(src, 8));
306
307 offset &= ~GRANULE_MASK;
308 assert(offset + size <= GRANULE_SIZE);
309
Javier Almansa Sobrinobb66f8a2023-01-05 16:43:43 +0000310 dest = (uintptr_t)ns_granule_map(slot, ns_gr);
311 retval = memcpy_ns_write((void *)(dest + offset), src, size);
312 ns_buffer_unmap((void *)dest);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000313
314 return retval;
315}
316
317/******************************************************************************
318 * Internal helpers
319 ******************************************************************************/
320
Javier Almansa Sobrinod528efd2023-01-05 16:23:54 +0000321void *buffer_map_internal(enum buffer_slot slot, unsigned long addr)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000322{
323 uint64_t attr = SLOT_DESC_ATTR;
324 uintptr_t va = slot_to_va(slot);
Javier Almansa Sobrinoe7aa1ab2023-03-09 17:38:02 +0000325 struct xlat_llt_info *entry = get_cached_llt_info();
Soby Mathewb4c6df42022-11-09 11:13:29 +0000326
327 assert(GRANULE_ALIGNED(addr));
328
Javier Almansa Sobrinod528efd2023-01-05 16:23:54 +0000329 attr |= (slot == SLOT_NS ? MT_NS : MT_REALM);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000330
331 if (xlat_map_memory_page_with_attrs(entry, va,
332 (uintptr_t)addr, attr) != 0) {
333 /* Error mapping the buffer */
334 return NULL;
335 }
336
337 return (void *)va;
338}
339
340void buffer_unmap_internal(void *buf)
341{
342 /*
343 * Prevent the compiler from moving prior loads/stores to buf after the
344 * update to the translation table. Otherwise, those could fault.
345 */
346 COMPILER_BARRIER();
347
Javier Almansa Sobrinoe7aa1ab2023-03-09 17:38:02 +0000348 xlat_unmap_memory_page(get_cached_llt_info(), (uintptr_t)buf);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000349}