blob: 543540864889b2fd5396326a33d21ad4b6df8028 [file] [log] [blame]
David Brazdil7a462ec2019-08-15 12:27:47 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
David Brazdil7a462ec2019-08-15 12:27:47 +01007 */
8
9#include "hf/manifest.h"
10
J-Alvesd8a1d362023-03-08 11:15:28 +000011#include <stddef.h>
J-Alves77b6f4f2023-03-15 11:34:49 +000012#include <stdint.h>
J-Alvesd8a1d362023-03-08 11:15:28 +000013
J-Alves35315782022-01-25 17:58:32 +000014#include "hf/arch/types.h"
J-Alves77b6f4f2023-03-15 11:34:49 +000015#include "hf/arch/vmid_base.h"
J-Alves35315782022-01-25 17:58:32 +000016
David Brazdil7a462ec2019-08-15 12:27:47 +010017#include "hf/addr.h"
Daniel Boulbya2f8c662021-11-26 17:52:53 +000018#include "hf/assert.h"
J-Alves35315782022-01-25 17:58:32 +000019#include "hf/boot_info.h"
J-Alves77b6f4f2023-03-15 11:34:49 +000020#include "hf/boot_params.h"
David Brazdil7a462ec2019-08-15 12:27:47 +010021#include "hf/check.h"
Andrew Scullae9962e2019-10-03 16:51:16 +010022#include "hf/dlog.h"
J-Alves77b6f4f2023-03-15 11:34:49 +000023#include "hf/fdt.h"
Karl Meakin0e617d92024-04-05 12:55:22 +010024#include "hf/ffa.h"
J-Alves77b6f4f2023-03-15 11:34:49 +000025#include "hf/mm.h"
26#include "hf/mpool.h"
J-Alves2f86c1e2022-02-23 18:44:19 +000027#include "hf/sp_pkg.h"
David Brazdil7a462ec2019-08-15 12:27:47 +010028#include "hf/static_assert.h"
29#include "hf/std.h"
30
31#define TRY(expr) \
32 do { \
33 enum manifest_return_code ret_code = (expr); \
34 if (ret_code != MANIFEST_SUCCESS) { \
35 return ret_code; \
36 } \
37 } while (0)
38
David Brazdilb856be62020-03-25 10:14:55 +000039#define VM_ID_MAX (HF_VM_ID_OFFSET + MAX_VMS - 1)
40#define VM_ID_MAX_DIGITS (5)
41#define VM_NAME_EXTRA_CHARS (3) /* "vm" + number + '\0' */
42#define VM_NAME_MAX_SIZE (VM_ID_MAX_DIGITS + VM_NAME_EXTRA_CHARS)
43static_assert(VM_NAME_MAX_SIZE <= STRING_MAX_SIZE,
44 "VM name does not fit into a struct string.");
45static_assert(VM_ID_MAX <= 99999, "Insufficient VM_NAME_BUF_SIZE");
Olivier Deprez2a8ee342020-08-03 15:10:44 +020046static_assert((HF_OTHER_WORLD_ID > VM_ID_MAX) ||
47 (HF_OTHER_WORLD_ID < HF_VM_ID_BASE),
Andrew Walbran9daa57e2019-09-27 13:33:20 +010048 "TrustZone VM ID clashes with normal VM range.");
David Brazdil7a462ec2019-08-15 12:27:47 +010049
Kathleen Capella4a2a6e72023-04-21 14:43:26 -040050/* Bitmap to track boot order values in use. */
51#define BOOT_ORDER_ENTRY_BITS (sizeof(uint64_t) * 8)
52#define BOOT_ORDER_MAP_ENTRIES \
53 ((DEFAULT_BOOT_ORDER + (BOOT_ORDER_ENTRY_BITS - 1)) / \
54 BOOT_ORDER_ENTRY_BITS)
55
Daniel Boulby801f8ef2022-06-27 14:21:01 +010056/**
J-Alves596049f2023-03-15 11:40:24 +000057 * A struct to keep track of the partitions properties during early boot
58 * manifest parsing:
59 * - Interrupts ID.
60 * - Physical memory ranges.
Daniel Boulby801f8ef2022-06-27 14:21:01 +010061 */
Olivier Deprez93644652022-09-09 11:01:12 +020062struct manifest_data {
63 struct manifest manifest;
Daniel Boulby4ca50f02022-07-29 18:29:34 +010064 struct interrupt_bitmap intids;
J-Alves596049f2023-03-15 11:40:24 +000065 /*
66 * Allocate enough for the maximum amount of memory regions defined via
67 * the partitions manifest, and regions for each partition
68 * address-space.
69 */
Daniel Boulby4d165722023-11-21 13:46:42 +000070 struct mem_range mem_regions[PARTITION_MAX_MEMORY_REGIONS * MAX_VMS +
71 PARTITION_MAX_DEVICE_REGIONS * MAX_VMS +
72 MAX_VMS];
Kathleen Capella4a2a6e72023-04-21 14:43:26 -040073 uint64_t boot_order_values[BOOT_ORDER_MAP_ENTRIES];
Daniel Boulby801f8ef2022-06-27 14:21:01 +010074};
Olivier Deprez93644652022-09-09 11:01:12 +020075
Daniel Boulby801f8ef2022-06-27 14:21:01 +010076/**
Daniel Boulbya7e9e182022-06-27 14:21:01 +010077 * Calculate the number of entries in the ppool that are required to
Olivier Deprez93644652022-09-09 11:01:12 +020078 * store the manifest_data struct.
Daniel Boulby801f8ef2022-06-27 14:21:01 +010079 */
J-Alves596049f2023-03-15 11:40:24 +000080static const size_t manifest_data_ppool_entries =
Olivier Deprez93644652022-09-09 11:01:12 +020081 (align_up(sizeof(struct manifest_data), MM_PPOOL_ENTRY_SIZE) /
Daniel Boulbya7e9e182022-06-27 14:21:01 +010082 MM_PPOOL_ENTRY_SIZE);
83
Olivier Deprez93644652022-09-09 11:01:12 +020084static struct manifest_data *manifest_data;
85/* Index used to track the number of memory regions allocated. */
Daniel Boulbya7e9e182022-06-27 14:21:01 +010086static size_t allocated_mem_regions_index = 0;
Daniel Boulby801f8ef2022-06-27 14:21:01 +010087
Kathleen Capella4a2a6e72023-04-21 14:43:26 -040088static bool check_boot_order(uint16_t boot_order)
89{
90 uint16_t i;
91 uint64_t boot_order_mask;
92
93 if (boot_order == DEFAULT_BOOT_ORDER) {
94 return true;
95 }
96 if (boot_order > DEFAULT_BOOT_ORDER) {
97 dlog_error("Boot order should not exceed %x",
98 DEFAULT_BOOT_ORDER);
99 return false;
100 }
101
102 i = boot_order / BOOT_ORDER_ENTRY_BITS;
103 boot_order_mask = 1 << (boot_order % BOOT_ORDER_ENTRY_BITS);
104
105 if ((boot_order_mask & manifest_data->boot_order_values[i]) != 0U) {
106 dlog_error("Boot order must be a unique value.");
107 return false;
108 }
109
110 manifest_data->boot_order_values[i] |= boot_order_mask;
111
112 return true;
113}
114
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100115/**
Olivier Deprez93644652022-09-09 11:01:12 +0200116 * Allocates and clear memory for the manifest data in the given memory pool.
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100117 * Returns true if the memory is successfully allocated.
118 */
Olivier Deprez93644652022-09-09 11:01:12 +0200119static bool manifest_data_init(struct mpool *ppool)
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100120{
Olivier Deprez93644652022-09-09 11:01:12 +0200121 manifest_data = (struct manifest_data *)mpool_alloc_contiguous(
122 ppool, manifest_data_ppool_entries, 1);
J-Alves5c0ae6f2023-06-14 15:20:21 +0100123
124 assert(manifest_data != NULL);
125
Olivier Deprez93644652022-09-09 11:01:12 +0200126 memset_s(manifest_data, sizeof(struct manifest_data), 0,
127 sizeof(struct manifest_data));
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100128
Olivier Deprez93644652022-09-09 11:01:12 +0200129 return manifest_data != NULL;
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100130}
131
132/**
Olivier Deprez93644652022-09-09 11:01:12 +0200133 * Frees the memory used for the manifest data in the given memory pool.
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100134 */
Olivier Deprez93644652022-09-09 11:01:12 +0200135static void manifest_data_deinit(struct mpool *ppool)
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100136{
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100137 /**
Olivier Deprez93644652022-09-09 11:01:12 +0200138 * Clear and return the memory used for the manifest_data struct to the
139 * memory pool.
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100140 */
Olivier Deprez93644652022-09-09 11:01:12 +0200141 memset_s(manifest_data, sizeof(struct manifest_data), 0,
142 sizeof(struct manifest_data));
143 mpool_add_chunk(ppool, manifest_data, manifest_data_ppool_entries);
144
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100145 /**
146 * Reset the index used for tracking the number of memory regions
147 * allocated.
148 */
149 allocated_mem_regions_index = 0;
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100150}
151
J-Alves19e20cf2023-08-02 12:48:55 +0100152static inline size_t count_digits(ffa_id_t vm_id)
David Brazdilb856be62020-03-25 10:14:55 +0000153{
154 size_t digits = 0;
155
156 do {
157 digits++;
158 vm_id /= 10;
159 } while (vm_id);
160 return digits;
161}
162
David Brazdil7a462ec2019-08-15 12:27:47 +0100163/**
164 * Generates a string with the two letters "vm" followed by an integer.
165 * Assumes `buf` is of size VM_NAME_BUF_SIZE.
166 */
J-Alves19e20cf2023-08-02 12:48:55 +0100167static void generate_vm_node_name(struct string *str, ffa_id_t vm_id)
David Brazdil7a462ec2019-08-15 12:27:47 +0100168{
169 static const char *digits = "0123456789";
David Brazdilb856be62020-03-25 10:14:55 +0000170 size_t vm_id_digits = count_digits(vm_id);
171 char *base = str->data;
172 char *ptr = base + (VM_NAME_EXTRA_CHARS + vm_id_digits);
David Brazdil7a462ec2019-08-15 12:27:47 +0100173
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000174 assert(vm_id_digits <= VM_ID_MAX_DIGITS);
David Brazdil7a462ec2019-08-15 12:27:47 +0100175 *(--ptr) = '\0';
176 do {
177 *(--ptr) = digits[vm_id % 10];
178 vm_id /= 10;
179 } while (vm_id);
180 *(--ptr) = 'm';
181 *(--ptr) = 'v';
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000182 assert(ptr == base);
David Brazdil7a462ec2019-08-15 12:27:47 +0100183}
184
Andrew Scullae9962e2019-10-03 16:51:16 +0100185/**
Andrew Scullb2c3a242019-11-04 13:52:36 +0000186 * Read a boolean property: true if present; false if not. If present, the value
187 * of the property must be empty else it is considered malformed.
Andrew Scullae9962e2019-10-03 16:51:16 +0100188 */
Andrew Scullb2c3a242019-11-04 13:52:36 +0000189static enum manifest_return_code read_bool(const struct fdt_node *node,
190 const char *property, bool *out)
Andrew Scullae9962e2019-10-03 16:51:16 +0100191{
David Brazdilb856be62020-03-25 10:14:55 +0000192 struct memiter data;
193 bool present = fdt_read_property(node, property, &data);
Andrew Scullae9962e2019-10-03 16:51:16 +0100194
David Brazdilb856be62020-03-25 10:14:55 +0000195 if (present && memiter_size(&data) != 0) {
Andrew Scullb2c3a242019-11-04 13:52:36 +0000196 return MANIFEST_ERROR_MALFORMED_BOOLEAN;
197 }
198
199 *out = present;
200 return MANIFEST_SUCCESS;
Andrew Scullae9962e2019-10-03 16:51:16 +0100201}
202
Andrew Scull72b43c02019-09-18 13:53:45 +0100203static enum manifest_return_code read_string(const struct fdt_node *node,
David Brazdil136f2942019-09-23 14:11:03 +0100204 const char *property,
205 struct string *out)
Andrew Scull72b43c02019-09-18 13:53:45 +0100206{
David Brazdilb856be62020-03-25 10:14:55 +0000207 struct memiter data;
Andrew Scull72b43c02019-09-18 13:53:45 +0100208
David Brazdilb856be62020-03-25 10:14:55 +0000209 if (!fdt_read_property(node, property, &data)) {
Andrew Scull72b43c02019-09-18 13:53:45 +0100210 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
211 }
212
David Brazdilb856be62020-03-25 10:14:55 +0000213 switch (string_init(out, &data)) {
David Brazdil136f2942019-09-23 14:11:03 +0100214 case STRING_SUCCESS:
215 return MANIFEST_SUCCESS;
216 case STRING_ERROR_INVALID_INPUT:
217 return MANIFEST_ERROR_MALFORMED_STRING;
218 case STRING_ERROR_TOO_LONG:
219 return MANIFEST_ERROR_STRING_TOO_LONG;
220 }
Andrew Scull72b43c02019-09-18 13:53:45 +0100221}
222
223static enum manifest_return_code read_optional_string(
David Brazdil136f2942019-09-23 14:11:03 +0100224 const struct fdt_node *node, const char *property, struct string *out)
Andrew Scull72b43c02019-09-18 13:53:45 +0100225{
David Brazdil136f2942019-09-23 14:11:03 +0100226 enum manifest_return_code ret;
Andrew Scull72b43c02019-09-18 13:53:45 +0100227
David Brazdil136f2942019-09-23 14:11:03 +0100228 ret = read_string(node, property, out);
229 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
230 string_init_empty(out);
231 ret = MANIFEST_SUCCESS;
Andrew Scull72b43c02019-09-18 13:53:45 +0100232 }
David Brazdil136f2942019-09-23 14:11:03 +0100233 return ret;
Andrew Scull72b43c02019-09-18 13:53:45 +0100234}
235
David Brazdil7a462ec2019-08-15 12:27:47 +0100236static enum manifest_return_code read_uint64(const struct fdt_node *node,
237 const char *property,
238 uint64_t *out)
239{
David Brazdilb856be62020-03-25 10:14:55 +0000240 struct memiter data;
David Brazdil7a462ec2019-08-15 12:27:47 +0100241
David Brazdilb856be62020-03-25 10:14:55 +0000242 if (!fdt_read_property(node, property, &data)) {
David Brazdil7a462ec2019-08-15 12:27:47 +0100243 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
244 }
245
David Brazdilb856be62020-03-25 10:14:55 +0000246 if (!fdt_parse_number(&data, memiter_size(&data), out)) {
David Brazdil7a462ec2019-08-15 12:27:47 +0100247 return MANIFEST_ERROR_MALFORMED_INTEGER;
248 }
249
250 return MANIFEST_SUCCESS;
251}
252
David Brazdil080ee312020-02-25 15:30:30 -0800253static enum manifest_return_code read_optional_uint64(
254 const struct fdt_node *node, const char *property,
255 uint64_t default_value, uint64_t *out)
256{
257 enum manifest_return_code ret;
258
259 ret = read_uint64(node, property, out);
260 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
261 *out = default_value;
262 return MANIFEST_SUCCESS;
263 }
264 return ret;
265}
266
Olivier Deprez62d99e32020-01-09 15:58:07 +0100267static enum manifest_return_code read_uint32(const struct fdt_node *node,
268 const char *property,
269 uint32_t *out)
270{
271 uint64_t value;
272
273 TRY(read_uint64(node, property, &value));
274
275 if (value > UINT32_MAX) {
276 return MANIFEST_ERROR_INTEGER_OVERFLOW;
277 }
278
279 *out = (uint32_t)value;
280 return MANIFEST_SUCCESS;
281}
282
Manish Pandeye68e7932020-04-23 15:29:28 +0100283static enum manifest_return_code read_optional_uint32(
284 const struct fdt_node *node, const char *property,
285 uint32_t default_value, uint32_t *out)
286{
287 enum manifest_return_code ret;
288
289 ret = read_uint32(node, property, out);
290 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
291 *out = default_value;
292 return MANIFEST_SUCCESS;
293 }
294 return ret;
295}
296
David Brazdil7a462ec2019-08-15 12:27:47 +0100297static enum manifest_return_code read_uint16(const struct fdt_node *node,
298 const char *property,
299 uint16_t *out)
300{
301 uint64_t value;
302
303 TRY(read_uint64(node, property, &value));
David Brazdil7a462ec2019-08-15 12:27:47 +0100304 if (value > UINT16_MAX) {
305 return MANIFEST_ERROR_INTEGER_OVERFLOW;
306 }
307
308 *out = (uint16_t)value;
309 return MANIFEST_SUCCESS;
310}
311
J-Alvesb37fd082020-10-22 12:29:21 +0100312static enum manifest_return_code read_optional_uint16(
313 const struct fdt_node *node, const char *property,
314 uint16_t default_value, uint16_t *out)
315{
316 enum manifest_return_code ret;
317
318 ret = read_uint16(node, property, out);
319 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
320 *out = default_value;
321 return MANIFEST_SUCCESS;
322 }
323
Kathleen Capella4a2a6e72023-04-21 14:43:26 -0400324 return ret;
J-Alvesb37fd082020-10-22 12:29:21 +0100325}
326
Olivier Deprez62d99e32020-01-09 15:58:07 +0100327static enum manifest_return_code read_uint8(const struct fdt_node *node,
328 const char *property, uint8_t *out)
329{
330 uint64_t value;
331
332 TRY(read_uint64(node, property, &value));
333
334 if (value > UINT8_MAX) {
335 return MANIFEST_ERROR_INTEGER_OVERFLOW;
336 }
337
338 *out = (uint8_t)value;
339 return MANIFEST_SUCCESS;
340}
341
J-Alves4369bd92020-08-07 16:35:36 +0100342static enum manifest_return_code read_optional_uint8(
343 const struct fdt_node *node, const char *property,
344 uint8_t default_value, uint8_t *out)
345{
346 enum manifest_return_code ret;
347
348 ret = read_uint8(node, property, out);
349 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
350 *out = default_value;
351 return MANIFEST_SUCCESS;
352 }
353
354 return MANIFEST_SUCCESS;
355}
356
Andrew Scullae9962e2019-10-03 16:51:16 +0100357struct uint32list_iter {
358 struct memiter mem_it;
359};
360
J-Alves4369bd92020-08-07 16:35:36 +0100361static enum manifest_return_code read_uint32list(const struct fdt_node *node,
362 const char *property,
363 struct uint32list_iter *out)
Andrew Scullae9962e2019-10-03 16:51:16 +0100364{
David Brazdilb856be62020-03-25 10:14:55 +0000365 struct memiter data;
Andrew Scullae9962e2019-10-03 16:51:16 +0100366
David Brazdilb856be62020-03-25 10:14:55 +0000367 if (!fdt_read_property(node, property, &data)) {
Andrew Scullae9962e2019-10-03 16:51:16 +0100368 memiter_init(&out->mem_it, NULL, 0);
J-Alves4369bd92020-08-07 16:35:36 +0100369 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
Andrew Scullae9962e2019-10-03 16:51:16 +0100370 }
371
David Brazdilb856be62020-03-25 10:14:55 +0000372 if ((memiter_size(&data) % sizeof(uint32_t)) != 0) {
Andrew Scullae9962e2019-10-03 16:51:16 +0100373 return MANIFEST_ERROR_MALFORMED_INTEGER_LIST;
374 }
375
David Brazdilb856be62020-03-25 10:14:55 +0000376 out->mem_it = data;
Andrew Scullae9962e2019-10-03 16:51:16 +0100377 return MANIFEST_SUCCESS;
378}
379
J-Alves4369bd92020-08-07 16:35:36 +0100380static enum manifest_return_code read_optional_uint32list(
381 const struct fdt_node *node, const char *property,
382 struct uint32list_iter *out)
383{
384 enum manifest_return_code ret = read_uint32list(node, property, out);
385
386 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
387 return MANIFEST_SUCCESS;
388 }
389 return ret;
390}
391
Andrew Scullae9962e2019-10-03 16:51:16 +0100392static bool uint32list_has_next(const struct uint32list_iter *list)
393{
394 return memiter_size(&list->mem_it) > 0;
395}
396
David Brazdil5ea99462020-03-25 13:01:47 +0000397static enum manifest_return_code uint32list_get_next(
398 struct uint32list_iter *list, uint32_t *out)
Andrew Scullae9962e2019-10-03 16:51:16 +0100399{
Andrew Scullae9962e2019-10-03 16:51:16 +0100400 uint64_t num;
401
402 CHECK(uint32list_has_next(list));
David Brazdilb856be62020-03-25 10:14:55 +0000403 if (!fdt_parse_number(&list->mem_it, sizeof(uint32_t), &num)) {
Andrew Scullae9962e2019-10-03 16:51:16 +0100404 return MANIFEST_ERROR_MALFORMED_INTEGER;
405 }
406
David Brazdil5ea99462020-03-25 13:01:47 +0000407 *out = (uint32_t)num;
408 return MANIFEST_SUCCESS;
Andrew Scullae9962e2019-10-03 16:51:16 +0100409}
410
Olivier Deprez62d99e32020-01-09 15:58:07 +0100411static enum manifest_return_code parse_vm_common(const struct fdt_node *node,
412 struct manifest_vm *vm,
J-Alves19e20cf2023-08-02 12:48:55 +0100413 ffa_id_t vm_id)
David Brazdil7a462ec2019-08-15 12:27:47 +0100414{
Andrew Scullae9962e2019-10-03 16:51:16 +0100415 struct uint32list_iter smcs;
David Brazdil5ea99462020-03-25 13:01:47 +0000416 size_t idx;
Andrew Scullae9962e2019-10-03 16:51:16 +0100417
Olivier Deprez62d99e32020-01-09 15:58:07 +0100418 TRY(read_bool(node, "is_ffa_partition", &vm->is_ffa_partition));
419
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -0700420 TRY(read_bool(node, "hyp_loaded", &vm->is_hyp_loaded));
421
David Brazdil136f2942019-09-23 14:11:03 +0100422 TRY(read_string(node, "debug_name", &vm->debug_name));
Andrew Scullae9962e2019-10-03 16:51:16 +0100423
424 TRY(read_optional_uint32list(node, "smc_whitelist", &smcs));
425 while (uint32list_has_next(&smcs) &&
426 vm->smc_whitelist.smc_count < MAX_SMCS) {
David Brazdil5ea99462020-03-25 13:01:47 +0000427 idx = vm->smc_whitelist.smc_count++;
428 TRY(uint32list_get_next(&smcs, &vm->smc_whitelist.smcs[idx]));
Andrew Scullae9962e2019-10-03 16:51:16 +0100429 }
430
431 if (uint32list_has_next(&smcs)) {
Karl Meakine8937d92024-03-19 16:04:25 +0000432 dlog_warning("%s SMC whitelist too long.\n",
433 vm->debug_name.data);
Andrew Scullae9962e2019-10-03 16:51:16 +0100434 }
435
Andrew Scullb2c3a242019-11-04 13:52:36 +0000436 TRY(read_bool(node, "smc_whitelist_permissive",
437 &vm->smc_whitelist.permissive));
Andrew Scullae9962e2019-10-03 16:51:16 +0100438
Olivier Deprez62d99e32020-01-09 15:58:07 +0100439 if (vm_id != HF_PRIMARY_VM_ID) {
440 TRY(read_uint64(node, "mem_size", &vm->secondary.mem_size));
441 TRY(read_uint16(node, "vcpu_count", &vm->secondary.vcpu_count));
Fuad Tabba50469e02020-06-30 15:14:28 +0100442 TRY(read_optional_string(node, "fdt_filename",
443 &vm->secondary.fdt_filename));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100444 }
445
446 return MANIFEST_SUCCESS;
447}
448
449static enum manifest_return_code parse_vm(struct fdt_node *node,
450 struct manifest_vm *vm,
J-Alves19e20cf2023-08-02 12:48:55 +0100451 ffa_id_t vm_id)
Olivier Deprez62d99e32020-01-09 15:58:07 +0100452{
453 TRY(read_optional_string(node, "kernel_filename",
454 &vm->kernel_filename));
455
David Brazdile6f83222019-09-23 14:47:37 +0100456 if (vm_id == HF_PRIMARY_VM_ID) {
457 TRY(read_optional_string(node, "ramdisk_filename",
458 &vm->primary.ramdisk_filename));
David Brazdil080ee312020-02-25 15:30:30 -0800459 TRY(read_optional_uint64(node, "boot_address",
460 MANIFEST_INVALID_ADDRESS,
461 &vm->primary.boot_address));
David Brazdil7a462ec2019-08-15 12:27:47 +0100462 }
Raghu Krishnamurthy988a5e72021-02-27 21:46:06 -0800463 TRY(read_optional_uint8(node, "exception-level", (uint8_t)EL1,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700464 (uint8_t *)&vm->partition.run_time_el));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100465
David Brazdil7a462ec2019-08-15 12:27:47 +0100466 return MANIFEST_SUCCESS;
467}
468
J-Alves77b6f4f2023-03-15 11:34:49 +0000469static bool is_memory_region_within_ranges(uintptr_t base_address,
470 uint32_t page_count,
471 const struct mem_range *ranges,
472 const size_t ranges_size)
473{
474 uintptr_t region_end =
475 base_address + ((uintptr_t)page_count * PAGE_SIZE - 1);
476
477 for (size_t i = 0; i < ranges_size; i++) {
478 uintptr_t base = (uintptr_t)pa_addr(ranges[i].begin);
479 uintptr_t end = (uintptr_t)pa_addr(ranges[i].end);
480
481 if ((base_address >= base && base_address <= end) ||
482 (region_end >= base && region_end <= end)) {
483 return true;
484 }
485 }
486
487 return false;
488}
489
490void dump_memory_ranges(const struct mem_range *ranges,
491 const size_t ranges_size, bool ns)
492{
493 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
494 return;
495 }
496
497 dlog("%s Memory ranges:\n", ns ? "NS" : "S");
498
499 for (size_t i = 0; i < ranges_size; i++) {
500 uintptr_t begin = pa_addr(ranges[i].begin);
501 uintptr_t end = pa_addr(ranges[i].end);
502 size_t page_count =
503 align_up(pa_difference(ranges[i].begin, ranges[i].end),
504 PAGE_SIZE) /
505 PAGE_SIZE;
506
Karl Meakine8937d92024-03-19 16:04:25 +0000507 dlog(" [%lx - %lx (%zu pages)]\n", begin, end, page_count);
J-Alves77b6f4f2023-03-15 11:34:49 +0000508 }
509}
510
511/**
512 * Check the partition's assigned memory is contained in the memory ranges
513 * configured for the SWd, in the SPMC's manifest.
514 */
515static enum manifest_return_code check_partition_memory_is_valid(
516 uintptr_t base_address, uint32_t page_count, uint32_t attributes,
517 const struct boot_params *params)
518{
519 bool is_secure_region =
520 (attributes & MANIFEST_REGION_ATTR_SECURITY) == 0U;
Daniel Boulby4339edc2024-02-21 14:59:00 +0000521 bool is_device_region =
522 (attributes & MANIFEST_REGION_ATTR_MEMORY_TYPE_DEVICE) != 0U;
523 const struct mem_range *ranges_from_manifest;
524 size_t ranges_count;
525 bool within_ranges;
526 enum manifest_return_code error_return;
527
528 if (!is_device_region) {
529 ranges_from_manifest = is_secure_region ? params->mem_ranges
530 : params->ns_mem_ranges;
531 ranges_count = is_secure_region ? params->mem_ranges_count
532 : params->ns_mem_ranges_count;
533 error_return = MANIFEST_ERROR_MEM_REGION_INVALID;
534 } else {
535 ranges_from_manifest = is_secure_region
536 ? params->device_mem_ranges
537 : params->ns_device_mem_ranges;
538 ranges_count = is_secure_region
539 ? params->device_mem_ranges_count
540 : params->ns_device_mem_ranges_count;
541 error_return = MANIFEST_ERROR_DEVICE_MEM_REGION_INVALID;
542 }
543
544 within_ranges = is_memory_region_within_ranges(
J-Alves77b6f4f2023-03-15 11:34:49 +0000545 base_address, page_count, ranges_from_manifest, ranges_count);
546
Daniel Boulby4339edc2024-02-21 14:59:00 +0000547 return within_ranges ? MANIFEST_SUCCESS : error_return;
J-Alves77b6f4f2023-03-15 11:34:49 +0000548}
549
550/*
551 * Keep track of the memory allocated by partitions. This includes memory region
Daniel Boulby4d165722023-11-21 13:46:42 +0000552 * nodes and device region nodes defined in their respective partition
553 * manifests, as well address space defined from their load address.
J-Alves77b6f4f2023-03-15 11:34:49 +0000554 */
555static enum manifest_return_code check_and_record_memory_used(
Varun Wadekar4afbfd72022-10-13 14:30:18 +0100556 uintptr_t base_address, uint32_t page_count)
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100557{
J-Alves77b6f4f2023-03-15 11:34:49 +0000558 bool overlap_of_regions;
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100559
Varun Wadekar4afbfd72022-10-13 14:30:18 +0100560 if (page_count == 0U) {
561 dlog_error(
Karl Meakine8937d92024-03-19 16:04:25 +0000562 "Empty memory region defined with base address: "
563 "%#lx.\n",
Varun Wadekar4afbfd72022-10-13 14:30:18 +0100564 base_address);
565 return MANIFEST_ERROR_MEM_REGION_EMPTY;
566 }
567
Daniel Boulbyc1a613d2022-10-18 11:26:17 +0100568 if (!is_aligned(base_address, PAGE_SIZE)) {
Karl Meakine8937d92024-03-19 16:04:25 +0000569 dlog_error("base_address (%#lx) is not aligned to page size.\n",
Daniel Boulbyc1a613d2022-10-18 11:26:17 +0100570 base_address);
571 return MANIFEST_ERROR_MEM_REGION_UNALIGNED;
572 }
573
J-Alves77b6f4f2023-03-15 11:34:49 +0000574 overlap_of_regions = is_memory_region_within_ranges(
575 base_address, page_count, manifest_data->mem_regions,
576 allocated_mem_regions_index);
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100577
J-Alves77b6f4f2023-03-15 11:34:49 +0000578 if (!overlap_of_regions) {
579 paddr_t begin = pa_init(base_address);
580
581 manifest_data->mem_regions[allocated_mem_regions_index].begin =
582 begin;
583 manifest_data->mem_regions[allocated_mem_regions_index].end =
584 pa_add(begin, page_count * PAGE_SIZE - 1);
585 allocated_mem_regions_index++;
586
587 return MANIFEST_SUCCESS;
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100588 }
589
J-Alves77b6f4f2023-03-15 11:34:49 +0000590 return MANIFEST_ERROR_MEM_REGION_OVERLAP;
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100591}
592
Manish Pandey6542f5c2020-04-27 14:37:46 +0100593static enum manifest_return_code parse_ffa_memory_region_node(
Karl Meakinf6d49402023-04-04 18:14:26 +0100594 struct fdt_node *mem_node, uintptr_t load_address,
595 struct memory_region *mem_regions, uint16_t *count, struct rx_tx *rxtx,
J-Alves77b6f4f2023-03-15 11:34:49 +0000596 const struct boot_params *boot_params)
Manish Pandey6542f5c2020-04-27 14:37:46 +0100597{
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100598 uint32_t phandle;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700599 uint16_t i = 0;
Madhukar Pappireddy3c2b7912023-10-11 14:47:27 -0500600 uint32_t j = 0;
Karl Meakinf6d49402023-04-04 18:14:26 +0100601 uintptr_t relative_address;
Madhukar Pappireddy3c2b7912023-10-11 14:47:27 -0500602 struct uint32list_iter list;
Manish Pandey6542f5c2020-04-27 14:37:46 +0100603
604 dlog_verbose(" Partition memory regions\n");
605
606 if (!fdt_is_compatible(mem_node, "arm,ffa-manifest-memory-regions")) {
607 return MANIFEST_ERROR_NOT_COMPATIBLE;
608 }
609
610 if (!fdt_first_child(mem_node)) {
611 return MANIFEST_ERROR_MEMORY_REGION_NODE_EMPTY;
612 }
613
614 do {
615 dlog_verbose(" Memory Region[%u]\n", i);
616
617 TRY(read_optional_string(mem_node, "description",
618 &mem_regions[i].name));
619 dlog_verbose(" Name: %s\n",
620 string_data(&mem_regions[i].name));
621
Karl Meakinf6d49402023-04-04 18:14:26 +0100622 TRY(read_optional_uint64(mem_node, "base-address",
623 MANIFEST_INVALID_ADDRESS,
624 &mem_regions[i].base_address));
Karl Meakine8937d92024-03-19 16:04:25 +0000625 dlog_verbose(" Base address: %#lx\n",
Manish Pandey6542f5c2020-04-27 14:37:46 +0100626 mem_regions[i].base_address);
627
Karl Meakinf6d49402023-04-04 18:14:26 +0100628 TRY(read_optional_uint64(mem_node, "relative-address",
629 MANIFEST_INVALID_ADDRESS,
630 &relative_address));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200631 if (relative_address != MANIFEST_INVALID_ADDRESS) {
Karl Meakine8937d92024-03-19 16:04:25 +0000632 dlog_verbose(" Relative address: %#lx\n",
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200633 relative_address);
634 }
Karl Meakinf6d49402023-04-04 18:14:26 +0100635
636 if (mem_regions[i].base_address == MANIFEST_INVALID_ADDRESS &&
637 relative_address == MANIFEST_INVALID_ADDRESS) {
638 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
639 }
640
641 if (mem_regions[i].base_address != MANIFEST_INVALID_ADDRESS &&
642 relative_address != MANIFEST_INVALID_ADDRESS) {
643 return MANIFEST_ERROR_BASE_ADDRESS_AND_RELATIVE_ADDRESS;
644 }
645
646 if (relative_address != MANIFEST_INVALID_ADDRESS &&
647 relative_address > UINT64_MAX - load_address) {
648 return MANIFEST_ERROR_INTEGER_OVERFLOW;
649 }
650
651 if (relative_address != MANIFEST_INVALID_ADDRESS) {
652 mem_regions[i].base_address =
653 load_address + relative_address;
654 }
655
Manish Pandey6542f5c2020-04-27 14:37:46 +0100656 TRY(read_uint32(mem_node, "pages-count",
657 &mem_regions[i].page_count));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200658 dlog_verbose(" Pages_count: %u\n",
Manish Pandey6542f5c2020-04-27 14:37:46 +0100659 mem_regions[i].page_count);
660
661 TRY(read_uint32(mem_node, "attributes",
662 &mem_regions[i].attributes));
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -0700663
Olivier Deprez035fa152022-03-14 11:19:10 +0100664 /*
665 * Check RWX permission attributes.
666 * Security attribute is checked at load phase.
667 */
668 uint32_t permissions = mem_regions[i].attributes &
669 (MANIFEST_REGION_ATTR_READ |
670 MANIFEST_REGION_ATTR_WRITE |
671 MANIFEST_REGION_ATTR_EXEC);
672 if (permissions != MANIFEST_REGION_ATTR_READ &&
673 permissions != (MANIFEST_REGION_ATTR_READ |
674 MANIFEST_REGION_ATTR_WRITE) &&
675 permissions != (MANIFEST_REGION_ATTR_READ |
676 MANIFEST_REGION_ATTR_EXEC)) {
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -0700677 return MANIFEST_ERROR_INVALID_MEM_PERM;
678 }
679
Olivier Deprez035fa152022-03-14 11:19:10 +0100680 /* Filter memory region attributes. */
681 mem_regions[i].attributes &= MANIFEST_REGION_ALL_ATTR_MASK;
682
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200683 dlog_verbose(" Attributes: %#x\n",
Manish Pandey6542f5c2020-04-27 14:37:46 +0100684 mem_regions[i].attributes);
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100685
J-Alves77b6f4f2023-03-15 11:34:49 +0000686 TRY(check_partition_memory_is_valid(
687 mem_regions[i].base_address, mem_regions[i].page_count,
688 mem_regions[i].attributes, boot_params));
689
690 TRY(check_and_record_memory_used(mem_regions[i].base_address,
691 mem_regions[i].page_count));
692
Madhukar Pappireddy3c2b7912023-10-11 14:47:27 -0500693 TRY(read_optional_uint32(mem_node, "smmu-id",
694 MANIFEST_INVALID_ID,
695 &mem_regions[i].dma_prop.smmu_id));
696 if (mem_regions[i].dma_prop.smmu_id != MANIFEST_INVALID_ID) {
697 dlog_verbose(" smmu-id: %u\n",
698 mem_regions[i].dma_prop.smmu_id);
699 }
700
701 TRY(read_optional_uint32list(mem_node, "stream-ids", &list));
702 dlog_verbose(" Stream IDs assigned:\n");
703
704 j = 0;
705 while (uint32list_has_next(&list)) {
706 if (j == PARTITION_MAX_STREAMS_PER_DEVICE) {
707 return MANIFEST_ERROR_STREAM_IDS_OVERFLOW;
708 }
709
710 TRY(uint32list_get_next(
711 &list, &mem_regions[i].dma_prop.stream_ids[j]));
712 dlog_verbose(" %u\n",
713 mem_regions[i].dma_prop.stream_ids[j]);
714 j++;
715 }
716 if (j == 0) {
717 dlog_verbose(" None\n");
718 } else if (mem_regions[i].dma_prop.smmu_id ==
719 MANIFEST_INVALID_ID) {
720 /*
721 * SMMU ID must be specified if the partition specifies
722 * Stream IDs for any device upstream of SMMU.
723 */
724 return MANIFEST_ERROR_MISSING_SMMU_ID;
725 }
726
727 mem_regions[i].dma_prop.stream_count = j;
728
729 TRY(read_optional_uint32list(
730 mem_node, "stream-ids-access-permissions", &list));
731 dlog_verbose(" Access permissions of Stream IDs:\n");
732
733 j = 0;
734 while (uint32list_has_next(&list)) {
735 uint32_t permissions;
736
737 if (j == PARTITION_MAX_STREAMS_PER_DEVICE) {
738 return MANIFEST_ERROR_DMA_ACCESS_PERMISSIONS_OVERFLOW;
739 }
740
741 TRY(uint32list_get_next(&list, &permissions));
742 dlog_verbose(" %u\n", permissions);
743
744 if (j == 0) {
745 mem_regions[i].dma_prop.dma_access_permissions =
746 permissions;
747 }
748
749 /*
750 * All stream ids belonging to a dma device must specify
751 * the same access permissions.
752 */
753 if (permissions !=
754 mem_regions[i].dma_prop.dma_access_permissions) {
755 return MANIFEST_ERROR_MISMATCH_DMA_ACCESS_PERMISSIONS;
756 }
757
758 j++;
759 }
760
761 if (j == 0) {
762 dlog_verbose(" None\n");
763 } else if (j != mem_regions[i].dma_prop.stream_count) {
764 return MANIFEST_ERROR_MISMATCH_DMA_ACCESS_PERMISSIONS;
765 }
766
767 if (j > 0) {
768 /* Filter the dma access permissions. */
769 mem_regions[i].dma_prop.dma_access_permissions &=
770 MANIFEST_REGION_ALL_ATTR_MASK;
771 }
772
Manish Pandeya70a4192020-10-07 22:05:04 +0100773 if (rxtx->available) {
774 TRY(read_optional_uint32(
775 mem_node, "phandle",
776 (uint32_t)MANIFEST_INVALID_ADDRESS, &phandle));
777 if (phandle == rxtx->rx_phandle) {
778 dlog_verbose(" Assigned as RX buffer\n");
779 rxtx->rx_buffer = &mem_regions[i];
780 } else if (phandle == rxtx->tx_phandle) {
781 dlog_verbose(" Assigned as TX buffer\n");
782 rxtx->tx_buffer = &mem_regions[i];
783 }
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100784 }
785
Manish Pandey6542f5c2020-04-27 14:37:46 +0100786 i++;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700787 } while (fdt_next_sibling(mem_node) &&
788 (i < PARTITION_MAX_MEMORY_REGIONS));
Manish Pandey6542f5c2020-04-27 14:37:46 +0100789
Manish Pandeya70a4192020-10-07 22:05:04 +0100790 if (rxtx->available &&
791 (rxtx->rx_buffer->page_count != rxtx->tx_buffer->page_count)) {
Manish Pandeyf06c9072020-09-29 15:41:58 +0100792 return MANIFEST_ERROR_RXTX_SIZE_MISMATCH;
793 }
794
Manish Pandey2145c212020-05-01 16:04:22 +0100795 *count = i;
Manish Pandey6542f5c2020-04-27 14:37:46 +0100796
797 return MANIFEST_SUCCESS;
798}
799
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -0700800static struct interrupt_info *device_region_get_interrupt_info(
801 struct device_region *dev_regions, uint32_t intid)
802{
803 for (uint32_t i = 0; i < ARRAY_SIZE(dev_regions->interrupts); i++) {
804 if (dev_regions->interrupts[i].id == intid) {
805 return &(dev_regions->interrupts[i]);
806 }
807 }
808 return NULL;
809}
810
Manish Pandeye68e7932020-04-23 15:29:28 +0100811static enum manifest_return_code parse_ffa_device_region_node(
Manish Pandey2145c212020-05-01 16:04:22 +0100812 struct fdt_node *dev_node, struct device_region *dev_regions,
Daniel Boulby4339edc2024-02-21 14:59:00 +0000813 uint16_t *count, uint8_t *dma_device_count,
814 const struct boot_params *boot_params)
Manish Pandeye68e7932020-04-23 15:29:28 +0100815{
816 struct uint32list_iter list;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700817 uint16_t i = 0;
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500818 uint32_t j = 0;
Olivier Deprez93644652022-09-09 11:01:12 +0200819 struct interrupt_bitmap allocated_intids = manifest_data->intids;
Madhukar Pappireddye032af52023-10-11 14:52:58 -0500820 static uint8_t dma_device_id = 0;
Manish Pandeye68e7932020-04-23 15:29:28 +0100821
822 dlog_verbose(" Partition Device Regions\n");
823
824 if (!fdt_is_compatible(dev_node, "arm,ffa-manifest-device-regions")) {
825 return MANIFEST_ERROR_NOT_COMPATIBLE;
826 }
827
828 if (!fdt_first_child(dev_node)) {
829 return MANIFEST_ERROR_DEVICE_REGION_NODE_EMPTY;
830 }
831
Madhukar Pappireddye032af52023-10-11 14:52:58 -0500832 *dma_device_count = 0;
833
Manish Pandeye68e7932020-04-23 15:29:28 +0100834 do {
835 dlog_verbose(" Device Region[%u]\n", i);
836
837 TRY(read_optional_string(dev_node, "description",
838 &dev_regions[i].name));
839 dlog_verbose(" Name: %s\n",
840 string_data(&dev_regions[i].name));
841
842 TRY(read_uint64(dev_node, "base-address",
843 &dev_regions[i].base_address));
Karl Meakine8937d92024-03-19 16:04:25 +0000844 dlog_verbose(" Base address: %#lx\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100845 dev_regions[i].base_address);
846
847 TRY(read_uint32(dev_node, "pages-count",
848 &dev_regions[i].page_count));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200849 dlog_verbose(" Pages_count: %u\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100850 dev_regions[i].page_count);
851
Daniel Boulby4d165722023-11-21 13:46:42 +0000852 TRY(check_and_record_memory_used(dev_regions[i].base_address,
853 dev_regions[i].page_count));
854
Manish Pandeye68e7932020-04-23 15:29:28 +0100855 TRY(read_uint32(dev_node, "attributes",
856 &dev_regions[i].attributes));
Daniel Boulby4339edc2024-02-21 14:59:00 +0000857 /* Set the memory type attribute to device. */
858 dev_regions[i].attributes =
859 dev_regions[i].attributes |
860 MANIFEST_REGION_ATTR_MEMORY_TYPE_DEVICE;
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -0700861
Olivier Deprez035fa152022-03-14 11:19:10 +0100862 /*
863 * Check RWX permission attributes.
864 * Security attribute is checked at load phase.
865 */
866 uint32_t permissions = dev_regions[i].attributes &
867 (MANIFEST_REGION_ATTR_READ |
868 MANIFEST_REGION_ATTR_WRITE |
869 MANIFEST_REGION_ATTR_EXEC);
870
871 if (permissions != MANIFEST_REGION_ATTR_READ &&
872 permissions != (MANIFEST_REGION_ATTR_READ |
873 MANIFEST_REGION_ATTR_WRITE)) {
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -0700874 return MANIFEST_ERROR_INVALID_MEM_PERM;
875 }
876
Daniel Boulby4339edc2024-02-21 14:59:00 +0000877 /* Filter device region attributes. */
Olivier Deprez035fa152022-03-14 11:19:10 +0100878 dev_regions[i].attributes = dev_regions[i].attributes &
879 MANIFEST_REGION_ALL_ATTR_MASK;
880
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200881 dlog_verbose(" Attributes: %#x\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100882 dev_regions[i].attributes);
883
Daniel Boulby4339edc2024-02-21 14:59:00 +0000884 TRY(check_partition_memory_is_valid(
885 dev_regions[i].base_address, dev_regions[i].page_count,
886 dev_regions[i].attributes, boot_params));
887
Manish Pandeye68e7932020-04-23 15:29:28 +0100888 TRY(read_optional_uint32list(dev_node, "interrupts", &list));
889 dlog_verbose(" Interrupt List:\n");
890 j = 0;
891 while (uint32list_has_next(&list) &&
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700892 j < PARTITION_MAX_INTERRUPTS_PER_DEVICE) {
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100893 uint32_t intid;
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100894
Manish Pandeye68e7932020-04-23 15:29:28 +0100895 TRY(uint32list_get_next(
896 &list, &dev_regions[i].interrupts[j].id));
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100897 intid = dev_regions[i].interrupts[j].id;
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100898
899 dlog_verbose(" ID = %u\n", intid);
900
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100901 if (interrupt_bitmap_get_value(&allocated_intids,
902 intid) == 1U) {
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100903 return MANIFEST_ERROR_INTERRUPT_ID_REPEATED;
904 }
905
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100906 interrupt_bitmap_set_value(&allocated_intids, intid);
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100907
Manish Pandeye68e7932020-04-23 15:29:28 +0100908 if (uint32list_has_next(&list)) {
909 TRY(uint32list_get_next(&list,
910 &dev_regions[i]
911 .interrupts[j]
912 .attributes));
913 } else {
914 return MANIFEST_ERROR_MALFORMED_INTEGER_LIST;
915 }
916
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -0700917 dev_regions[i].interrupts[j].mpidr_valid = false;
918 dev_regions[i].interrupts[j].mpidr = 0;
919
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100920 dlog_verbose(" attributes = %u\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100921 dev_regions[i].interrupts[j].attributes);
922 j++;
923 }
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500924
925 dev_regions[i].interrupt_count = j;
Manish Pandeye68e7932020-04-23 15:29:28 +0100926 if (j == 0) {
927 dlog_verbose(" Empty\n");
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -0700928 } else {
929 TRY(read_optional_uint32list(
930 dev_node, "interrupts-target", &list));
931 dlog_verbose(" Interrupt Target List:\n");
932
933 while (uint32list_has_next(&list)) {
934 uint32_t intid;
935 uint64_t mpidr = 0;
936 uint32_t mpidr_lower = 0;
937 uint32_t mpidr_upper = 0;
938 struct interrupt_info *info = NULL;
939
940 TRY(uint32list_get_next(&list, &intid));
941
942 dlog_verbose(" ID = %u\n", intid);
943
944 if (interrupt_bitmap_get_value(
945 &allocated_intids, intid) != 1U) {
946 return MANIFEST_ERROR_INTERRUPT_ID_NOT_IN_LIST;
947 }
948
949 TRY(uint32list_get_next(&list, &mpidr_upper));
950 TRY(uint32list_get_next(&list, &mpidr_lower));
951 mpidr = mpidr_upper;
952 mpidr <<= 32;
953 mpidr |= mpidr_lower;
954
955 info = device_region_get_interrupt_info(
956 &dev_regions[i], intid);
957 /*
958 * We should find info since
959 * interrupt_bitmap_get_value already ensures
960 * that we saw the interrupt and allocated ids
961 * for it.
962 */
963 assert(info != NULL);
964 info->mpidr = mpidr;
965 info->mpidr_valid = true;
Karl Meakine8937d92024-03-19 16:04:25 +0000966 dlog_verbose(" MPIDR = %#lx\n", mpidr);
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -0700967 }
Manish Pandeye68e7932020-04-23 15:29:28 +0100968 }
969
970 TRY(read_optional_uint32(dev_node, "smmu-id",
Madhukar Pappireddy54680c72020-10-23 15:02:38 -0500971 MANIFEST_INVALID_ID,
Manish Pandeye68e7932020-04-23 15:29:28 +0100972 &dev_regions[i].smmu_id));
Olivier Deprez86d87ae2021-08-19 14:27:46 +0200973 if (dev_regions[i].smmu_id != MANIFEST_INVALID_ID) {
974 dlog_verbose(" smmu-id: %u\n",
975 dev_regions[i].smmu_id);
976 }
Manish Pandeye68e7932020-04-23 15:29:28 +0100977
978 TRY(read_optional_uint32list(dev_node, "stream-ids", &list));
979 dlog_verbose(" Stream IDs assigned:\n");
980
981 j = 0;
Madhukar Pappireddye032af52023-10-11 14:52:58 -0500982 while (uint32list_has_next(&list)) {
983 if (j == PARTITION_MAX_STREAMS_PER_DEVICE) {
984 return MANIFEST_ERROR_STREAM_IDS_OVERFLOW;
985 }
986
Manish Pandeye68e7932020-04-23 15:29:28 +0100987 TRY(uint32list_get_next(&list,
988 &dev_regions[i].stream_ids[j]));
989 dlog_verbose(" %u\n",
990 dev_regions[i].stream_ids[j]);
991 j++;
992 }
Madhukar Pappireddye032af52023-10-11 14:52:58 -0500993
Manish Pandeye68e7932020-04-23 15:29:28 +0100994 if (j == 0) {
995 dlog_verbose(" None\n");
Madhukar Pappireddye032af52023-10-11 14:52:58 -0500996 } else if (dev_regions[i].smmu_id != MANIFEST_INVALID_ID) {
997 dev_regions[i].dma_device_id = dma_device_id++;
998 *dma_device_count = dma_device_id;
999
Madhukar Pappireddy8a364472023-10-11 15:34:07 -05001000 if (*dma_device_count > PARTITION_MAX_DMA_DEVICES) {
1001 return MANIFEST_ERROR_DMA_DEVICE_OVERFLOW;
1002 }
1003
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001004 dlog_verbose(" dma peripheral device id: %u\n",
1005 dev_regions[i].dma_device_id);
1006 } else {
1007 /*
1008 * SMMU ID must be specified if the partition specifies
1009 * Stream IDs for any device upstream of SMMU.
1010 */
1011 return MANIFEST_ERROR_MISSING_SMMU_ID;
Manish Pandeye68e7932020-04-23 15:29:28 +01001012 }
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001013
Madhukar Pappireddy54680c72020-10-23 15:02:38 -05001014 dev_regions[i].stream_count = j;
Manish Pandeye68e7932020-04-23 15:29:28 +01001015
1016 TRY(read_bool(dev_node, "exclusive-access",
1017 &dev_regions[i].exclusive_access));
J-Alves4369bd92020-08-07 16:35:36 +01001018 dlog_verbose(" Exclusive_access: %u\n",
Manish Pandeye68e7932020-04-23 15:29:28 +01001019 dev_regions[i].exclusive_access);
1020
1021 i++;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -07001022 } while (fdt_next_sibling(dev_node) &&
1023 (i < PARTITION_MAX_DEVICE_REGIONS));
Manish Pandeye68e7932020-04-23 15:29:28 +01001024
Manish Pandey2145c212020-05-01 16:04:22 +01001025 *count = i;
Manish Pandeye68e7932020-04-23 15:29:28 +01001026
1027 return MANIFEST_SUCCESS;
1028}
1029
J-Alvesabebe432022-05-31 14:40:50 +01001030static enum manifest_return_code sanity_check_ffa_manifest(
1031 struct manifest_vm *vm)
1032{
Karl Meakin0e617d92024-04-05 12:55:22 +01001033 enum ffa_version ffa_version;
J-Alvesabebe432022-05-31 14:40:50 +01001034 enum manifest_return_code ret_code = MANIFEST_SUCCESS;
1035 const char *error_string = "specified in manifest is unsupported";
1036 uint32_t k = 0;
Kathleen Capellaf71dee42023-08-08 16:24:14 -04001037 bool using_req2 = (vm->partition.messaging_method &
1038 (FFA_PARTITION_DIRECT_REQ2_RECV |
1039 FFA_PARTITION_DIRECT_REQ2_SEND)) != 0;
J-Alvesabebe432022-05-31 14:40:50 +01001040
1041 /* ensure that the SPM version is compatible */
Karl Meakin0e617d92024-04-05 12:55:22 +01001042 ffa_version = vm->partition.ffa_version;
1043 if (!ffa_versions_are_compatible(ffa_version, FFA_VERSION_COMPILED)) {
J-Alvesabebe432022-05-31 14:40:50 +01001044 dlog_error("FF-A partition manifest version %s: %u.%u\n",
Karl Meakin0e617d92024-04-05 12:55:22 +01001045 error_string, ffa_version_get_major(ffa_version),
1046 ffa_version_get_minor(ffa_version));
J-Alvesabebe432022-05-31 14:40:50 +01001047 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1048 }
1049
1050 if (vm->partition.xlat_granule != PAGE_4KB) {
1051 dlog_error("Translation granule %s: %u\n", error_string,
1052 vm->partition.xlat_granule);
1053 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1054 }
1055
1056 if (vm->partition.execution_state != AARCH64) {
1057 dlog_error("Execution state %s: %u\n", error_string,
1058 vm->partition.execution_state);
1059 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1060 }
1061
1062 if (vm->partition.run_time_el != EL1 &&
1063 vm->partition.run_time_el != S_EL1 &&
Daniel Boulby874d5432023-04-27 12:40:24 +01001064 vm->partition.run_time_el != S_EL0 &&
1065 vm->partition.run_time_el != EL0) {
J-Alvesabebe432022-05-31 14:40:50 +01001066 dlog_error("Exception level %s: %d\n", error_string,
1067 vm->partition.run_time_el);
1068 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1069 }
1070
Karl Meakin0e617d92024-04-05 12:55:22 +01001071 if (vm->partition.ffa_version < FFA_VERSION_1_2 && using_req2) {
Kathleen Capellaf71dee42023-08-08 16:24:14 -04001072 dlog_error("Messaging method %s: %x\n", error_string,
1073 vm->partition.messaging_method);
1074 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1075 }
1076
J-Alvesabebe432022-05-31 14:40:50 +01001077 if ((vm->partition.messaging_method &
1078 ~(FFA_PARTITION_DIRECT_REQ_RECV | FFA_PARTITION_DIRECT_REQ_SEND |
Kathleen Capellaf71dee42023-08-08 16:24:14 -04001079 FFA_PARTITION_INDIRECT_MSG | FFA_PARTITION_DIRECT_REQ2_RECV |
1080 FFA_PARTITION_DIRECT_REQ2_SEND)) != 0U) {
J-Alvesabebe432022-05-31 14:40:50 +01001081 dlog_error("Messaging method %s: %x\n", error_string,
1082 vm->partition.messaging_method);
1083 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1084 }
1085
Daniel Boulby874d5432023-04-27 12:40:24 +01001086 if ((vm->partition.run_time_el == S_EL0 ||
1087 vm->partition.run_time_el == EL0) &&
J-Alvesabebe432022-05-31 14:40:50 +01001088 vm->partition.execution_ctx_count != 1) {
1089 dlog_error(
1090 "Exception level and execution context count %s: %d "
1091 "%d\n",
1092 error_string, vm->partition.run_time_el,
1093 vm->partition.execution_ctx_count);
1094 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1095 }
1096
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -07001097 for (uint16_t i = 0; i < vm->partition.dev_region_count; i++) {
J-Alvesabebe432022-05-31 14:40:50 +01001098 struct device_region dev_region;
1099
1100 dev_region = vm->partition.dev_regions[i];
1101
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -07001102 if (dev_region.interrupt_count >
1103 PARTITION_MAX_INTERRUPTS_PER_DEVICE) {
J-Alvesabebe432022-05-31 14:40:50 +01001104 dlog_error(
1105 "Interrupt count for device region exceeds "
1106 "limit.\n");
1107 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1108 continue;
1109 }
1110
1111 for (uint8_t j = 0; j < dev_region.interrupt_count; j++) {
1112 k++;
1113 if (k > VM_MANIFEST_MAX_INTERRUPTS) {
1114 dlog_error(
1115 "Interrupt count for VM exceeds "
1116 "limit.\n");
1117 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1118 continue;
1119 }
1120 }
1121 }
1122
1123 /* GP register is restricted to one of x0 - x3. */
Karl Meakin824b63d2024-06-03 19:04:53 +01001124 if (vm->partition.gp_register_num != DEFAULT_BOOT_GP_REGISTER &&
J-Alvesabebe432022-05-31 14:40:50 +01001125 vm->partition.gp_register_num > 3) {
1126 dlog_error("GP register number %s: %u\n", error_string,
1127 vm->partition.gp_register_num);
1128 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1129 }
1130
1131 return ret_code;
1132}
1133
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001134/**
1135 * Find the device id allocated to the device region node corresponding to the
1136 * specified stream id.
1137 */
1138static bool find_dma_device_id_from_dev_region_nodes(
1139 const struct manifest_vm *manifest_vm, uint32_t sid, uint8_t *device_id)
1140{
1141 for (uint16_t i = 0; i < manifest_vm->partition.dev_region_count; i++) {
1142 struct device_region dev_region =
1143 manifest_vm->partition.dev_regions[i];
1144
1145 for (uint8_t j = 0; j < dev_region.stream_count; j++) {
1146 if (sid == dev_region.stream_ids[j]) {
1147 *device_id = dev_region.dma_device_id;
1148 return true;
1149 }
1150 }
1151 }
1152 return false;
1153}
1154
1155/**
1156 * Identify the device id of a DMA device node corresponding to a stream id
1157 * specified in the memory region node.
1158 */
1159static bool map_dma_device_id_to_stream_ids(struct manifest_vm *vm)
1160{
1161 for (uint16_t i = 0; i < vm->partition.mem_region_count; i++) {
1162 struct memory_region mem_region = vm->partition.mem_regions[i];
1163
1164 for (uint8_t j = 0; j < mem_region.dma_prop.stream_count; j++) {
1165 uint32_t sid = mem_region.dma_prop.stream_ids[j];
1166 uint8_t device_id = 0;
1167
1168 /*
1169 * Every stream id must have been declared in the
1170 * device node as well.
1171 */
1172 if (!find_dma_device_id_from_dev_region_nodes(
1173 vm, sid, &device_id)) {
1174 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00001175 "Stream ID %d not found in any device "
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001176 "region node of partition manifest\n",
1177 sid);
1178 return false;
1179 }
1180
1181 mem_region.dma_prop.dma_device_id = device_id;
1182 }
1183 }
1184
1185 return true;
1186}
1187
J-Alves77b6f4f2023-03-15 11:34:49 +00001188enum manifest_return_code parse_ffa_manifest(
1189 struct fdt *fdt, struct manifest_vm *vm,
1190 struct fdt_node *boot_info_node, const struct boot_params *boot_params)
Olivier Deprez62d99e32020-01-09 15:58:07 +01001191{
1192 unsigned int i = 0;
Kathleen Capella422b10b2023-06-30 18:28:27 -04001193 unsigned int j = 0;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001194 struct uint32list_iter uuid;
1195 uint32_t uuid_word;
1196 struct fdt_node root;
1197 struct fdt_node ffa_node;
1198 struct string rxtx_node_name = STRING_INIT("rx_tx-info");
Manish Pandey6542f5c2020-04-27 14:37:46 +01001199 struct string mem_region_node_name = STRING_INIT("memory-regions");
Manish Pandeye68e7932020-04-23 15:29:28 +01001200 struct string dev_region_node_name = STRING_INIT("device-regions");
J-Alves35315782022-01-25 17:58:32 +00001201 struct string boot_info_node_name = STRING_INIT("boot-info");
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001202 bool managed_exit_field_present = false;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001203
1204 if (!fdt_find_node(fdt, "/", &root)) {
1205 return MANIFEST_ERROR_NO_ROOT_NODE;
1206 }
1207
1208 /* Check "compatible" property. */
1209 if (!fdt_is_compatible(&root, "arm,ffa-manifest-1.0")) {
1210 return MANIFEST_ERROR_NOT_COMPATIBLE;
1211 }
1212
J-Alves4369bd92020-08-07 16:35:36 +01001213 TRY(read_uint32list(&root, "uuid", &uuid));
Olivier Deprez62d99e32020-01-09 15:58:07 +01001214
Kathleen Capella422b10b2023-06-30 18:28:27 -04001215 while (uint32list_has_next(&uuid) && j < PARTITION_MAX_UUIDS) {
1216 while (uint32list_has_next(&uuid) && i < 4) {
1217 TRY(uint32list_get_next(&uuid, &uuid_word));
1218 vm->partition.uuids[j].uuid[i] = uuid_word;
1219 i++;
1220 }
1221
1222 if (ffa_uuid_is_null(&vm->partition.uuids[j])) {
1223 return MANIFEST_ERROR_UUID_ALL_ZEROS;
1224 }
1225 dlog_verbose(" UUID %#x-%x-%x-%x\n",
1226 vm->partition.uuids[j].uuid[0],
1227 vm->partition.uuids[j].uuid[1],
1228 vm->partition.uuids[j].uuid[2],
1229 vm->partition.uuids[j].uuid[3]);
1230 j++;
1231 i = 0;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001232 }
Kathleen Capella422b10b2023-06-30 18:28:27 -04001233
1234 vm->partition.uuid_count = j;
1235 dlog_verbose(" Number of UUIDs %u\n", vm->partition.uuid_count);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001236
Olivier Deprezb7aa0c22023-06-29 10:46:26 +02001237 TRY(read_uint32(&root, "ffa-version", &vm->partition.ffa_version));
1238 dlog_verbose(" Expected FF-A version %u.%u\n",
Karl Meakin0e617d92024-04-05 12:55:22 +01001239 ffa_version_get_major(vm->partition.ffa_version),
1240 ffa_version_get_minor(vm->partition.ffa_version));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +02001241
Olivier Deprez62d99e32020-01-09 15:58:07 +01001242 TRY(read_uint16(&root, "execution-ctx-count",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001243 &vm->partition.execution_ctx_count));
J-Alves4369bd92020-08-07 16:35:36 +01001244 dlog_verbose(" Number of execution context %u\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001245 vm->partition.execution_ctx_count);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001246
1247 TRY(read_uint8(&root, "exception-level",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001248 (uint8_t *)&vm->partition.run_time_el));
1249 dlog_verbose(" Run-time EL %u\n", vm->partition.run_time_el);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001250
1251 TRY(read_uint8(&root, "execution-state",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001252 (uint8_t *)&vm->partition.execution_state));
1253 dlog_verbose(" Execution state %u\n", vm->partition.execution_state);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001254
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001255 TRY(read_optional_uint64(&root, "load-address", 0,
1256 &vm->partition.load_addr));
Karl Meakine8937d92024-03-19 16:04:25 +00001257 dlog_verbose(" Load address %#lx\n", vm->partition.load_addr);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001258
J-Alves4369bd92020-08-07 16:35:36 +01001259 TRY(read_optional_uint64(&root, "entrypoint-offset", 0,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001260 &vm->partition.ep_offset));
Karl Meakine8937d92024-03-19 16:04:25 +00001261 dlog_verbose(" Entry point offset %#zx\n", vm->partition.ep_offset);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001262
J-Alves35315782022-01-25 17:58:32 +00001263 TRY(read_optional_uint32(&root, "gp-register-num",
1264 DEFAULT_BOOT_GP_REGISTER,
1265 &vm->partition.gp_register_num));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +02001266
1267 if (vm->partition.gp_register_num != DEFAULT_BOOT_GP_REGISTER) {
1268 dlog_verbose(" Boot GP register: x%u\n",
1269 vm->partition.gp_register_num);
1270 }
J-Alves35315782022-01-25 17:58:32 +00001271
J-Alvesb37fd082020-10-22 12:29:21 +01001272 TRY(read_optional_uint16(&root, "boot-order", DEFAULT_BOOT_ORDER,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001273 &vm->partition.boot_order));
Kathleen Capella4a2a6e72023-04-21 14:43:26 -04001274 if (vm->partition.boot_order != DEFAULT_BOOT_ORDER) {
Karl Meakine8937d92024-03-19 16:04:25 +00001275 dlog_verbose(" Boot order %u\n", vm->partition.boot_order);
Kathleen Capella4a2a6e72023-04-21 14:43:26 -04001276 }
1277
1278 if (!check_boot_order(vm->partition.boot_order)) {
1279 return MANIFEST_ERROR_INVALID_BOOT_ORDER;
1280 }
J-Alvesb37fd082020-10-22 12:29:21 +01001281
J-Alves4369bd92020-08-07 16:35:36 +01001282 TRY(read_optional_uint8(&root, "xlat-granule", 0,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001283 (uint8_t *)&vm->partition.xlat_granule));
1284 dlog_verbose(" Translation granule %u\n", vm->partition.xlat_granule);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001285
1286 ffa_node = root;
1287 if (fdt_find_child(&ffa_node, &rxtx_node_name)) {
1288 if (!fdt_is_compatible(&ffa_node,
1289 "arm,ffa-manifest-rx_tx-buffer")) {
1290 return MANIFEST_ERROR_NOT_COMPATIBLE;
1291 }
1292
Manish Pandeyfa1f2912020-05-05 12:57:01 +01001293 /*
1294 * Read only phandles for now, it will be used to update buffers
1295 * while parsing memory regions.
1296 */
1297 TRY(read_uint32(&ffa_node, "rx-buffer",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001298 &vm->partition.rxtx.rx_phandle));
Olivier Deprez62d99e32020-01-09 15:58:07 +01001299
Manish Pandeyfa1f2912020-05-05 12:57:01 +01001300 TRY(read_uint32(&ffa_node, "tx-buffer",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001301 &vm->partition.rxtx.tx_phandle));
Olivier Deprez62d99e32020-01-09 15:58:07 +01001302
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001303 vm->partition.rxtx.available = true;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001304 }
1305
Kathleen Capellaf71dee42023-08-08 16:24:14 -04001306 TRY(read_uint16(&root, "messaging-method",
1307 (uint16_t *)&vm->partition.messaging_method));
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001308 dlog_verbose(" Messaging method %u\n", vm->partition.messaging_method);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001309
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001310 TRY(read_bool(&root, "managed-exit", &managed_exit_field_present));
1311
1312 TRY(read_optional_uint8(
1313 &root, "ns-interrupts-action", NS_ACTION_SIGNALED,
1314 (uint8_t *)&vm->partition.ns_interrupts_action));
1315
1316 /*
1317 * An SP manifest can specify one of the fields listed below:
1318 * `managed-exit`: Introduced in FF-A v1.0 spec.
1319 * `ns-interrupts-action`: Introduced in FF-A v1.1 EAC0 spec.
1320 * If both are missing from the manifest, the default response is
1321 * NS_ACTION_SIGNALED.
1322 */
1323 if (managed_exit_field_present) {
1324 vm->partition.ns_interrupts_action = NS_ACTION_ME;
1325 }
1326
1327 if (vm->partition.ns_interrupts_action != NS_ACTION_QUEUED &&
1328 vm->partition.ns_interrupts_action != NS_ACTION_ME &&
1329 vm->partition.ns_interrupts_action != NS_ACTION_SIGNALED) {
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001330 return MANIFEST_ERROR_ILLEGAL_NS_INT_ACTION;
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001331 }
1332
1333 dlog_verbose(
Olivier Deprezb7aa0c22023-06-29 10:46:26 +02001334 " NS Interrupts %s\n",
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001335 (vm->partition.ns_interrupts_action == NS_ACTION_QUEUED)
1336 ? "Queued"
1337 : (vm->partition.ns_interrupts_action == NS_ACTION_SIGNALED)
1338 ? "Signaled"
1339 : "Managed exit");
1340
1341 if (vm->partition.ns_interrupts_action == NS_ACTION_ME) {
1342 /* Managed exit only supported by S_EL1 partitions. */
1343 if (vm->partition.run_time_el != S_EL1) {
1344 dlog_error(
1345 "Managed exit cannot be supported by this "
1346 "partition\n");
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001347 return MANIFEST_ERROR_ILLEGAL_NS_INT_ACTION;
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001348 }
Madhukar Pappireddy046dad02022-06-21 18:43:33 -05001349
1350 TRY(read_bool(&root, "managed-exit-virq",
1351 &vm->partition.me_signal_virq));
1352 if (vm->partition.me_signal_virq) {
1353 dlog_verbose(" Managed Exit signaled through vIRQ\n");
1354 }
J-Alvesa4730db2021-11-02 10:31:01 +00001355 }
1356
1357 TRY(read_bool(&root, "notification-support",
1358 &vm->partition.notification_support));
1359 if (vm->partition.notification_support) {
1360 dlog_verbose(" Notifications Receipt Supported\n");
1361 }
Maksims Svecovs9ddf86a2021-05-06 17:17:21 +01001362
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001363 TRY(read_optional_uint8(
1364 &root, "other-s-interrupts-action", OTHER_S_INT_ACTION_SIGNALED,
1365 (uint8_t *)&vm->partition.other_s_interrupts_action));
1366
1367 if (vm->partition.other_s_interrupts_action ==
1368 OTHER_S_INT_ACTION_QUEUED) {
1369 if (vm->partition.ns_interrupts_action != NS_ACTION_QUEUED) {
1370 dlog_error(
1371 "Choice of the fields 'ns-interrupts-action' "
1372 "and 'other-s-interrupts-action' not "
1373 "compatible\n");
1374 return MANIFEST_ERROR_NOT_COMPATIBLE;
1375 }
1376 } else if (vm->partition.other_s_interrupts_action >
1377 OTHER_S_INT_ACTION_SIGNALED) {
1378 dlog_error(
J-Alves0a824e92024-04-26 16:20:12 +01001379 "Illegal value specified for the field "
1380 "'other-s-interrupts-action': %u\n",
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001381 vm->partition.other_s_interrupts_action);
1382 return MANIFEST_ERROR_ILLEGAL_OTHER_S_INT_ACTION;
1383 }
1384
J-Alves35315782022-01-25 17:58:32 +00001385 /* Parse boot info node. */
1386 if (boot_info_node != NULL) {
1387 ffa_node = root;
1388 vm->partition.boot_info =
1389 fdt_find_child(&ffa_node, &boot_info_node_name);
1390 if (vm->partition.boot_info) {
1391 *boot_info_node = ffa_node;
1392 }
1393 } else {
1394 vm->partition.boot_info = false;
1395 }
1396
Olivier Depreza15f2352022-09-26 09:17:24 +02001397 TRY(read_optional_uint32(
1398 &root, "power-management-messages",
1399 MANIFEST_POWER_MANAGEMENT_CPU_OFF_SUPPORTED |
1400 MANIFEST_POWER_MANAGEMENT_CPU_ON_SUPPORTED,
1401 &vm->partition.power_management));
1402 vm->partition.power_management &= MANIFEST_POWER_MANAGEMENT_ALL_MASK;
1403 if (vm->partition.execution_ctx_count == 1 ||
Daniel Boulby874d5432023-04-27 12:40:24 +01001404 vm->partition.run_time_el == S_EL0 ||
1405 vm->partition.run_time_el == EL0) {
Olivier Depreza15f2352022-09-26 09:17:24 +02001406 vm->partition.power_management =
1407 MANIFEST_POWER_MANAGEMENT_NONE_MASK;
1408 }
1409
1410 dlog_verbose(" Power management messages %#x\n",
1411 vm->partition.power_management);
1412
Manish Pandey6542f5c2020-04-27 14:37:46 +01001413 /* Parse memory-regions */
1414 ffa_node = root;
1415 if (fdt_find_child(&ffa_node, &mem_region_node_name)) {
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001416 TRY(parse_ffa_memory_region_node(
Karl Meakinf6d49402023-04-04 18:14:26 +01001417 &ffa_node, vm->partition.load_addr,
1418 vm->partition.mem_regions,
J-Alves77b6f4f2023-03-15 11:34:49 +00001419 &vm->partition.mem_region_count, &vm->partition.rxtx,
1420 boot_params));
Manish Pandey6542f5c2020-04-27 14:37:46 +01001421 }
Manish Pandey2145c212020-05-01 16:04:22 +01001422 dlog_verbose(" Total %u memory regions found\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001423 vm->partition.mem_region_count);
Manish Pandey6542f5c2020-04-27 14:37:46 +01001424
Manish Pandeye68e7932020-04-23 15:29:28 +01001425 /* Parse Device-regions */
1426 ffa_node = root;
1427 if (fdt_find_child(&ffa_node, &dev_region_node_name)) {
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001428 TRY(parse_ffa_device_region_node(
1429 &ffa_node, vm->partition.dev_regions,
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001430 &vm->partition.dev_region_count,
Daniel Boulby4339edc2024-02-21 14:59:00 +00001431 &vm->partition.dma_device_count, boot_params));
Manish Pandeye68e7932020-04-23 15:29:28 +01001432 }
Manish Pandey2145c212020-05-01 16:04:22 +01001433 dlog_verbose(" Total %u device regions found\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001434 vm->partition.dev_region_count);
Manish Pandeye68e7932020-04-23 15:29:28 +01001435
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001436 if (!map_dma_device_id_to_stream_ids(vm)) {
1437 return MANIFEST_ERROR_NOT_COMPATIBLE;
1438 }
1439
J-Alves4eb7b542022-03-02 15:21:52 +00001440 return sanity_check_ffa_manifest(vm);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001441}
1442
Olivier Deprez62d99e32020-01-09 15:58:07 +01001443static enum manifest_return_code parse_ffa_partition_package(
1444 struct mm_stage1_locked stage1_locked, struct fdt_node *node,
J-Alves19e20cf2023-08-02 12:48:55 +01001445 struct manifest_vm *vm, ffa_id_t vm_id,
J-Alves77b6f4f2023-03-15 11:34:49 +00001446 const struct boot_params *boot_params, struct mpool *ppool)
Olivier Deprez62d99e32020-01-09 15:58:07 +01001447{
1448 enum manifest_return_code ret = MANIFEST_ERROR_NOT_COMPATIBLE;
J-Alves2f86c1e2022-02-23 18:44:19 +00001449 uintpaddr_t load_address;
1450 struct sp_pkg_header header;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001451 struct fdt sp_fdt;
J-Alves2f86c1e2022-02-23 18:44:19 +00001452 vaddr_t pkg_start;
1453 vaddr_t manifest_address;
J-Alves35315782022-01-25 17:58:32 +00001454 struct fdt_node boot_info_node;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001455
1456 /*
1457 * This must have been hinted as being an FF-A partition,
1458 * return straight with failure if this is not the case.
1459 */
1460 if (!vm->is_ffa_partition) {
J-Alves2f86c1e2022-02-23 18:44:19 +00001461 return ret;
1462 }
1463
1464 TRY(read_uint64(node, "load_address", &load_address));
1465 if (!is_aligned(load_address, PAGE_SIZE)) {
Olivier Deprez62d99e32020-01-09 15:58:07 +01001466 return MANIFEST_ERROR_NOT_COMPATIBLE;
1467 }
1468
J-Alves2f86c1e2022-02-23 18:44:19 +00001469 assert(load_address != 0U);
1470
1471 if (!sp_pkg_init(stage1_locked, pa_init(load_address), &header,
1472 ppool)) {
1473 return ret;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001474 }
1475
J-Alves2f86c1e2022-02-23 18:44:19 +00001476 pkg_start = va_init(load_address);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001477
J-Alves2f86c1e2022-02-23 18:44:19 +00001478 if (vm_id != HF_PRIMARY_VM_ID &&
1479 sp_pkg_get_mem_size(&header) >= vm->secondary.mem_size) {
J-Alves4369bd92020-08-07 16:35:36 +01001480 dlog_error("Invalid package header or DT size.\n");
J-Alves2f86c1e2022-02-23 18:44:19 +00001481 goto out;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001482 }
1483
J-Alves2f86c1e2022-02-23 18:44:19 +00001484 manifest_address = va_add(va_init(load_address), header.pm_offset);
1485 if (!fdt_init_from_ptr(&sp_fdt, ptr_from_va(manifest_address),
1486 header.pm_size)) {
Kathleen Capella422b10b2023-06-30 18:28:27 -04001487 dlog_error("manifest.c: FDT failed validation.\n");
J-Alves2f86c1e2022-02-23 18:44:19 +00001488 goto out;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001489 }
1490
J-Alves77b6f4f2023-03-15 11:34:49 +00001491 ret = parse_ffa_manifest(&sp_fdt, vm, &boot_info_node, boot_params);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001492 if (ret != MANIFEST_SUCCESS) {
Olivier Deprez3cbdf862023-06-07 15:36:32 +02001493 dlog_error("Error parsing partition manifest.\n");
J-Alves2f86c1e2022-02-23 18:44:19 +00001494 goto out;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001495 }
1496
J-Alves2f86c1e2022-02-23 18:44:19 +00001497 if (vm->partition.load_addr != load_address) {
J-Alvesa26ea212021-03-22 14:33:47 +00001498 dlog_warning(
J-Alves0a824e92024-04-26 16:20:12 +01001499 "Partition's load address at its manifest differs from "
1500 "specified in partition's package.\n");
J-Alves2f86c1e2022-02-23 18:44:19 +00001501 vm->partition.load_addr = load_address;
J-Alvesa26ea212021-03-22 14:33:47 +00001502 }
1503
J-Alves889a1d72022-05-13 11:38:27 +01001504 if (vm->partition.gp_register_num != DEFAULT_BOOT_GP_REGISTER) {
1505 if (header.version == SP_PKG_HEADER_VERSION_2 &&
1506 vm->partition.boot_info &&
1507 !ffa_boot_info_node(&boot_info_node, pkg_start, &header)) {
1508 dlog_error("Failed to process boot information.\n");
1509 }
J-Alves35315782022-01-25 17:58:32 +00001510 }
J-Alves2f86c1e2022-02-23 18:44:19 +00001511out:
1512 sp_pkg_deinit(stage1_locked, pkg_start, &header, ppool);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001513 return ret;
1514}
1515
David Brazdil7a462ec2019-08-15 12:27:47 +01001516/**
1517 * Parse manifest from FDT.
1518 */
Olivier Deprez62d99e32020-01-09 15:58:07 +01001519enum manifest_return_code manifest_init(struct mm_stage1_locked stage1_locked,
Olivier Deprez93644652022-09-09 11:01:12 +02001520 struct manifest **manifest_ret,
Olivier Deprez62d99e32020-01-09 15:58:07 +01001521 struct memiter *manifest_fdt,
J-Alves77b6f4f2023-03-15 11:34:49 +00001522 struct boot_params *boot_params,
Olivier Deprez62d99e32020-01-09 15:58:07 +01001523 struct mpool *ppool)
David Brazdil7a462ec2019-08-15 12:27:47 +01001524{
Olivier Deprez93644652022-09-09 11:01:12 +02001525 struct manifest *manifest;
David Brazdilb856be62020-03-25 10:14:55 +00001526 struct string vm_name;
1527 struct fdt fdt;
David Brazdil7a462ec2019-08-15 12:27:47 +01001528 struct fdt_node hyp_node;
1529 size_t i = 0;
1530 bool found_primary_vm = false;
1531
J-Alvescd438fa2023-04-26 10:13:12 +01001532 if (boot_params->mem_ranges_count == 0 &&
1533 boot_params->ns_mem_ranges_count == 0) {
1534 return MANIFEST_ERROR_MEMORY_MISSING;
1535 }
1536
J-Alves77b6f4f2023-03-15 11:34:49 +00001537 dump_memory_ranges(boot_params->mem_ranges,
1538 boot_params->mem_ranges_count, false);
1539 dump_memory_ranges(boot_params->ns_mem_ranges,
1540 boot_params->ns_mem_ranges_count, true);
1541
Olivier Deprez93644652022-09-09 11:01:12 +02001542 /* Allocate space in the ppool for the manifest data. */
1543 if (!manifest_data_init(ppool)) {
1544 panic("Unable to allocate manifest data.\n");
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001545 }
1546
Olivier Deprez93644652022-09-09 11:01:12 +02001547 manifest = &manifest_data->manifest;
1548 *manifest_ret = manifest;
1549
David Brazdilb856be62020-03-25 10:14:55 +00001550 if (!fdt_init_from_memiter(&fdt, manifest_fdt)) {
1551 return MANIFEST_ERROR_FILE_SIZE; /* TODO */
David Brazdila2358d42020-01-27 18:51:38 +00001552 }
1553
David Brazdil7a462ec2019-08-15 12:27:47 +01001554 /* Find hypervisor node. */
David Brazdilb856be62020-03-25 10:14:55 +00001555 if (!fdt_find_node(&fdt, "/hypervisor", &hyp_node)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001556 return MANIFEST_ERROR_NO_HYPERVISOR_FDT_NODE;
1557 }
1558
David Brazdil74e9c3b2019-08-28 11:09:08 +01001559 /* Check "compatible" property. */
David Brazdilf4925382020-03-25 13:33:51 +00001560 if (!fdt_is_compatible(&hyp_node, "hafnium,hafnium")) {
David Brazdil74e9c3b2019-08-28 11:09:08 +01001561 return MANIFEST_ERROR_NOT_COMPATIBLE;
1562 }
1563
Olivier Deprez622ab8d2021-08-02 12:15:45 +02001564 TRY(read_bool(&hyp_node, "ffa_tee_enabled",
1565 &manifest->ffa_tee_enabled));
Andrew Walbran41a49d82020-01-10 17:46:38 +00001566
David Brazdil7a462ec2019-08-15 12:27:47 +01001567 /* Iterate over reserved VM IDs and check no such nodes exist. */
Olivier Deprez2a8ee342020-08-03 15:10:44 +02001568 for (i = HF_VM_ID_BASE; i < HF_VM_ID_OFFSET; i++) {
J-Alves19e20cf2023-08-02 12:48:55 +01001569 ffa_id_t vm_id = (ffa_id_t)i - HF_VM_ID_BASE;
David Brazdil7a462ec2019-08-15 12:27:47 +01001570 struct fdt_node vm_node = hyp_node;
David Brazdil7a462ec2019-08-15 12:27:47 +01001571
David Brazdilb856be62020-03-25 10:14:55 +00001572 generate_vm_node_name(&vm_name, vm_id);
1573 if (fdt_find_child(&vm_node, &vm_name)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001574 return MANIFEST_ERROR_RESERVED_VM_ID;
1575 }
1576 }
1577
1578 /* Iterate over VM nodes until we find one that does not exist. */
1579 for (i = 0; i <= MAX_VMS; ++i) {
J-Alves19e20cf2023-08-02 12:48:55 +01001580 ffa_id_t vm_id = HF_VM_ID_OFFSET + i;
David Brazdil7a462ec2019-08-15 12:27:47 +01001581 struct fdt_node vm_node = hyp_node;
David Brazdil7a462ec2019-08-15 12:27:47 +01001582
Olivier Deprez2a8ee342020-08-03 15:10:44 +02001583 generate_vm_node_name(&vm_name, vm_id - HF_VM_ID_BASE);
David Brazdilb856be62020-03-25 10:14:55 +00001584 if (!fdt_find_child(&vm_node, &vm_name)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001585 break;
1586 }
1587
1588 if (i == MAX_VMS) {
1589 return MANIFEST_ERROR_TOO_MANY_VMS;
1590 }
1591
1592 if (vm_id == HF_PRIMARY_VM_ID) {
1593 CHECK(found_primary_vm == false); /* sanity check */
1594 found_primary_vm = true;
1595 }
1596
David Brazdil0251b942019-09-10 15:59:50 +01001597 manifest->vm_count = i + 1;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001598
1599 TRY(parse_vm_common(&vm_node, &manifest->vm[i], vm_id));
1600
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -07001601 CHECK(!manifest->vm[i].is_hyp_loaded ||
1602 manifest->vm[i].is_ffa_partition);
1603
1604 if (manifest->vm[i].is_ffa_partition &&
1605 !manifest->vm[i].is_hyp_loaded) {
Olivier Deprez62d99e32020-01-09 15:58:07 +01001606 TRY(parse_ffa_partition_package(stage1_locked, &vm_node,
1607 &manifest->vm[i], vm_id,
J-Alves77b6f4f2023-03-15 11:34:49 +00001608 boot_params, ppool));
J-Alves596049f2023-03-15 11:40:24 +00001609 size_t page_count =
1610 align_up(manifest->vm[i].secondary.mem_size,
1611 PAGE_SIZE) /
1612 PAGE_SIZE;
1613
1614 if (vm_id == HF_PRIMARY_VM_ID) {
1615 continue;
1616 }
1617
1618 TRY(check_partition_memory_is_valid(
1619 manifest->vm[i].partition.load_addr, page_count,
1620 0, boot_params));
1621
1622 /*
1623 * Check if memory from load-address until (load-address
1624 * + memory size) has been used by other partition.
1625 */
1626 TRY(check_and_record_memory_used(
1627 manifest->vm[i].partition.load_addr,
1628 page_count));
Olivier Deprez62d99e32020-01-09 15:58:07 +01001629 } else {
1630 TRY(parse_vm(&vm_node, &manifest->vm[i], vm_id));
1631 }
David Brazdil7a462ec2019-08-15 12:27:47 +01001632 }
1633
Olivier Deprezfb05f3c2020-11-10 17:48:04 +01001634 if (!found_primary_vm && vm_id_is_current_world(HF_PRIMARY_VM_ID)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001635 return MANIFEST_ERROR_NO_PRIMARY_VM;
1636 }
1637
1638 return MANIFEST_SUCCESS;
1639}
1640
Olivier Deprez93644652022-09-09 11:01:12 +02001641/**
1642 * Free manifest data resources, called once manifest parsing has
1643 * completed and VMs are loaded.
1644 */
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001645void manifest_deinit(struct mpool *ppool)
1646{
Olivier Deprez93644652022-09-09 11:01:12 +02001647 manifest_data_deinit(ppool);
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001648}
1649
David Brazdil7a462ec2019-08-15 12:27:47 +01001650const char *manifest_strerror(enum manifest_return_code ret_code)
1651{
1652 switch (ret_code) {
1653 case MANIFEST_SUCCESS:
1654 return "Success";
David Brazdila2358d42020-01-27 18:51:38 +00001655 case MANIFEST_ERROR_FILE_SIZE:
1656 return "Total size in header does not match file size";
Olivier Deprez62d99e32020-01-09 15:58:07 +01001657 case MANIFEST_ERROR_MALFORMED_DTB:
1658 return "Malformed device tree blob";
David Brazdila2358d42020-01-27 18:51:38 +00001659 case MANIFEST_ERROR_NO_ROOT_NODE:
1660 return "Could not find root node in manifest";
David Brazdil7a462ec2019-08-15 12:27:47 +01001661 case MANIFEST_ERROR_NO_HYPERVISOR_FDT_NODE:
1662 return "Could not find \"hypervisor\" node in manifest";
David Brazdil74e9c3b2019-08-28 11:09:08 +01001663 case MANIFEST_ERROR_NOT_COMPATIBLE:
1664 return "Hypervisor manifest entry not compatible with Hafnium";
David Brazdil7a462ec2019-08-15 12:27:47 +01001665 case MANIFEST_ERROR_RESERVED_VM_ID:
1666 return "Manifest defines a VM with a reserved ID";
1667 case MANIFEST_ERROR_NO_PRIMARY_VM:
1668 return "Manifest does not contain a primary VM entry";
1669 case MANIFEST_ERROR_TOO_MANY_VMS:
1670 return "Manifest specifies more VMs than Hafnium has "
1671 "statically allocated space for";
1672 case MANIFEST_ERROR_PROPERTY_NOT_FOUND:
1673 return "Property not found";
1674 case MANIFEST_ERROR_MALFORMED_STRING:
1675 return "Malformed string property";
David Brazdil0dbb41f2019-09-09 18:03:35 +01001676 case MANIFEST_ERROR_STRING_TOO_LONG:
1677 return "String too long";
David Brazdil7a462ec2019-08-15 12:27:47 +01001678 case MANIFEST_ERROR_MALFORMED_INTEGER:
1679 return "Malformed integer property";
1680 case MANIFEST_ERROR_INTEGER_OVERFLOW:
1681 return "Integer overflow";
Andrew Scullae9962e2019-10-03 16:51:16 +01001682 case MANIFEST_ERROR_MALFORMED_INTEGER_LIST:
1683 return "Malformed integer list property";
Andrew Scullb2c3a242019-11-04 13:52:36 +00001684 case MANIFEST_ERROR_MALFORMED_BOOLEAN:
1685 return "Malformed boolean property";
Manish Pandey6542f5c2020-04-27 14:37:46 +01001686 case MANIFEST_ERROR_MEMORY_REGION_NODE_EMPTY:
1687 return "Memory-region node should have at least one entry";
Manish Pandeye68e7932020-04-23 15:29:28 +01001688 case MANIFEST_ERROR_DEVICE_REGION_NODE_EMPTY:
1689 return "Device-region node should have at least one entry";
Manish Pandeyf06c9072020-09-29 15:41:58 +01001690 case MANIFEST_ERROR_RXTX_SIZE_MISMATCH:
1691 return "RX and TX buffers should be of same size";
Varun Wadekar4afbfd72022-10-13 14:30:18 +01001692 case MANIFEST_ERROR_MEM_REGION_EMPTY:
1693 return "Memory region should have at least one page";
Karl Meakinf6d49402023-04-04 18:14:26 +01001694 case MANIFEST_ERROR_BASE_ADDRESS_AND_RELATIVE_ADDRESS:
1695 return "Base and relative addresses are mutually exclusive";
Daniel Boulbya7e9e182022-06-27 14:21:01 +01001696 case MANIFEST_ERROR_MEM_REGION_OVERLAP:
1697 return "Memory region overlaps with one already allocated";
Daniel Boulbyc1a613d2022-10-18 11:26:17 +01001698 case MANIFEST_ERROR_MEM_REGION_UNALIGNED:
1699 return "Memory region is not aligned to a page boundary";
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -07001700 case MANIFEST_ERROR_INVALID_MEM_PERM:
1701 return "Memory permission should be RO, RW or RX";
J-Alves35315782022-01-25 17:58:32 +00001702 case MANIFEST_ERROR_ARGUMENTS_LIST_EMPTY:
1703 return "Arguments-list node should have at least one argument";
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001704 case MANIFEST_ERROR_INTERRUPT_ID_REPEATED:
1705 return "Interrupt ID already assigned to another endpoint";
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001706 case MANIFEST_ERROR_ILLEGAL_NS_INT_ACTION:
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001707 return "Illegal value specidied for the field: Action in "
1708 "response to NS Interrupt";
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -07001709 case MANIFEST_ERROR_INTERRUPT_ID_NOT_IN_LIST:
1710 return "Interrupt ID is not in the list of interrupts";
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001711 case MANIFEST_ERROR_ILLEGAL_OTHER_S_INT_ACTION:
1712 return "Illegal value specified for the field: Action in "
1713 "response to Other-S Interrupt";
J-Alves77b6f4f2023-03-15 11:34:49 +00001714 case MANIFEST_ERROR_MEMORY_MISSING:
1715 return "Memory nodes must be defined in the SPMC manifest "
Olivier Deprez3cbdf862023-06-07 15:36:32 +02001716 "('memory' and 'ns-memory')";
J-Alves77b6f4f2023-03-15 11:34:49 +00001717 case MANIFEST_ERROR_PARTITION_ADDRESS_OVERLAP:
1718 return "Partition's memory [load address: load address + "
1719 "memory size[ overlap with other allocated "
Olivier Deprez3cbdf862023-06-07 15:36:32 +02001720 "regions";
J-Alves77b6f4f2023-03-15 11:34:49 +00001721 case MANIFEST_ERROR_MEM_REGION_INVALID:
Olivier Deprez3cbdf862023-06-07 15:36:32 +02001722 return "Invalid memory region range";
Daniel Boulby4339edc2024-02-21 14:59:00 +00001723 case MANIFEST_ERROR_DEVICE_MEM_REGION_INVALID:
1724 return "Invalid device memory region range";
Kathleen Capella4a2a6e72023-04-21 14:43:26 -04001725 case MANIFEST_ERROR_INVALID_BOOT_ORDER:
1726 return "Boot order should be a unique value less than "
1727 "default largest value";
Kathleen Capella422b10b2023-06-30 18:28:27 -04001728 case MANIFEST_ERROR_UUID_ALL_ZEROS:
1729 return "UUID should not be NIL";
Madhukar Pappireddy3c2b7912023-10-11 14:47:27 -05001730 case MANIFEST_ERROR_MISSING_SMMU_ID:
1731 return "SMMU ID must be specified for the given Stream IDs";
1732 case MANIFEST_ERROR_MISMATCH_DMA_ACCESS_PERMISSIONS:
1733 return "DMA device access permissions must match memory region "
1734 "attributes";
1735 case MANIFEST_ERROR_STREAM_IDS_OVERFLOW:
1736 return "DMA device stream ID count exceeds predefined limit";
1737 case MANIFEST_ERROR_DMA_ACCESS_PERMISSIONS_OVERFLOW:
1738 return "DMA access permissions count exceeds predefined limit";
Madhukar Pappireddy8a364472023-10-11 15:34:07 -05001739 case MANIFEST_ERROR_DMA_DEVICE_OVERFLOW:
1740 return "Number of device regions with DMA peripheral exceeds "
1741 "limit.";
David Brazdil7a462ec2019-08-15 12:27:47 +01001742 }
1743
1744 panic("Unexpected manifest return code.");
1745}