blob: fd428f966bb8634fc8a49ef408fa705c6f277b96 [file] [log] [blame]
David Brazdil7a462ec2019-08-15 12:27:47 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
David Brazdil7a462ec2019-08-15 12:27:47 +01007 */
8
9#include "hf/manifest.h"
10
J-Alvesd8a1d362023-03-08 11:15:28 +000011#include <stddef.h>
J-Alves77b6f4f2023-03-15 11:34:49 +000012#include <stdint.h>
J-Alvesd8a1d362023-03-08 11:15:28 +000013
J-Alves35315782022-01-25 17:58:32 +000014#include "hf/arch/types.h"
J-Alves77b6f4f2023-03-15 11:34:49 +000015#include "hf/arch/vmid_base.h"
J-Alves35315782022-01-25 17:58:32 +000016
David Brazdil7a462ec2019-08-15 12:27:47 +010017#include "hf/addr.h"
Daniel Boulbya2f8c662021-11-26 17:52:53 +000018#include "hf/assert.h"
J-Alves35315782022-01-25 17:58:32 +000019#include "hf/boot_info.h"
J-Alves77b6f4f2023-03-15 11:34:49 +000020#include "hf/boot_params.h"
David Brazdil7a462ec2019-08-15 12:27:47 +010021#include "hf/check.h"
Andrew Scullae9962e2019-10-03 16:51:16 +010022#include "hf/dlog.h"
J-Alves77b6f4f2023-03-15 11:34:49 +000023#include "hf/fdt.h"
24#include "hf/mm.h"
25#include "hf/mpool.h"
J-Alves2f86c1e2022-02-23 18:44:19 +000026#include "hf/sp_pkg.h"
David Brazdil7a462ec2019-08-15 12:27:47 +010027#include "hf/static_assert.h"
28#include "hf/std.h"
29
30#define TRY(expr) \
31 do { \
32 enum manifest_return_code ret_code = (expr); \
33 if (ret_code != MANIFEST_SUCCESS) { \
34 return ret_code; \
35 } \
36 } while (0)
37
David Brazdilb856be62020-03-25 10:14:55 +000038#define VM_ID_MAX (HF_VM_ID_OFFSET + MAX_VMS - 1)
39#define VM_ID_MAX_DIGITS (5)
40#define VM_NAME_EXTRA_CHARS (3) /* "vm" + number + '\0' */
41#define VM_NAME_MAX_SIZE (VM_ID_MAX_DIGITS + VM_NAME_EXTRA_CHARS)
42static_assert(VM_NAME_MAX_SIZE <= STRING_MAX_SIZE,
43 "VM name does not fit into a struct string.");
44static_assert(VM_ID_MAX <= 99999, "Insufficient VM_NAME_BUF_SIZE");
Olivier Deprez2a8ee342020-08-03 15:10:44 +020045static_assert((HF_OTHER_WORLD_ID > VM_ID_MAX) ||
46 (HF_OTHER_WORLD_ID < HF_VM_ID_BASE),
Andrew Walbran9daa57e2019-09-27 13:33:20 +010047 "TrustZone VM ID clashes with normal VM range.");
David Brazdil7a462ec2019-08-15 12:27:47 +010048
Kathleen Capella4a2a6e72023-04-21 14:43:26 -040049/* Bitmap to track boot order values in use. */
50#define BOOT_ORDER_ENTRY_BITS (sizeof(uint64_t) * 8)
51#define BOOT_ORDER_MAP_ENTRIES \
52 ((DEFAULT_BOOT_ORDER + (BOOT_ORDER_ENTRY_BITS - 1)) / \
53 BOOT_ORDER_ENTRY_BITS)
54
Daniel Boulby801f8ef2022-06-27 14:21:01 +010055/**
J-Alves596049f2023-03-15 11:40:24 +000056 * A struct to keep track of the partitions properties during early boot
57 * manifest parsing:
58 * - Interrupts ID.
59 * - Physical memory ranges.
Daniel Boulby801f8ef2022-06-27 14:21:01 +010060 */
Olivier Deprez93644652022-09-09 11:01:12 +020061struct manifest_data {
62 struct manifest manifest;
Daniel Boulby4ca50f02022-07-29 18:29:34 +010063 struct interrupt_bitmap intids;
J-Alves596049f2023-03-15 11:40:24 +000064 /*
65 * Allocate enough for the maximum amount of memory regions defined via
66 * the partitions manifest, and regions for each partition
67 * address-space.
68 */
69 struct mem_range
70 mem_regions[PARTITION_MAX_MEMORY_REGIONS * MAX_VMS + MAX_VMS];
Kathleen Capella4a2a6e72023-04-21 14:43:26 -040071 uint64_t boot_order_values[BOOT_ORDER_MAP_ENTRIES];
Daniel Boulby801f8ef2022-06-27 14:21:01 +010072};
Olivier Deprez93644652022-09-09 11:01:12 +020073
Daniel Boulby801f8ef2022-06-27 14:21:01 +010074/**
Daniel Boulbya7e9e182022-06-27 14:21:01 +010075 * Calculate the number of entries in the ppool that are required to
Olivier Deprez93644652022-09-09 11:01:12 +020076 * store the manifest_data struct.
Daniel Boulby801f8ef2022-06-27 14:21:01 +010077 */
J-Alves596049f2023-03-15 11:40:24 +000078static const size_t manifest_data_ppool_entries =
Olivier Deprez93644652022-09-09 11:01:12 +020079 (align_up(sizeof(struct manifest_data), MM_PPOOL_ENTRY_SIZE) /
Daniel Boulbya7e9e182022-06-27 14:21:01 +010080 MM_PPOOL_ENTRY_SIZE);
81
Olivier Deprez93644652022-09-09 11:01:12 +020082static struct manifest_data *manifest_data;
83/* Index used to track the number of memory regions allocated. */
Daniel Boulbya7e9e182022-06-27 14:21:01 +010084static size_t allocated_mem_regions_index = 0;
Daniel Boulby801f8ef2022-06-27 14:21:01 +010085
Kathleen Capella4a2a6e72023-04-21 14:43:26 -040086static bool check_boot_order(uint16_t boot_order)
87{
88 uint16_t i;
89 uint64_t boot_order_mask;
90
91 if (boot_order == DEFAULT_BOOT_ORDER) {
92 return true;
93 }
94 if (boot_order > DEFAULT_BOOT_ORDER) {
95 dlog_error("Boot order should not exceed %x",
96 DEFAULT_BOOT_ORDER);
97 return false;
98 }
99
100 i = boot_order / BOOT_ORDER_ENTRY_BITS;
101 boot_order_mask = 1 << (boot_order % BOOT_ORDER_ENTRY_BITS);
102
103 if ((boot_order_mask & manifest_data->boot_order_values[i]) != 0U) {
104 dlog_error("Boot order must be a unique value.");
105 return false;
106 }
107
108 manifest_data->boot_order_values[i] |= boot_order_mask;
109
110 return true;
111}
112
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100113/**
Olivier Deprez93644652022-09-09 11:01:12 +0200114 * Allocates and clear memory for the manifest data in the given memory pool.
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100115 * Returns true if the memory is successfully allocated.
116 */
Olivier Deprez93644652022-09-09 11:01:12 +0200117static bool manifest_data_init(struct mpool *ppool)
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100118{
Olivier Deprez93644652022-09-09 11:01:12 +0200119 manifest_data = (struct manifest_data *)mpool_alloc_contiguous(
120 ppool, manifest_data_ppool_entries, 1);
J-Alves5c0ae6f2023-06-14 15:20:21 +0100121
122 assert(manifest_data != NULL);
123
Olivier Deprez93644652022-09-09 11:01:12 +0200124 memset_s(manifest_data, sizeof(struct manifest_data), 0,
125 sizeof(struct manifest_data));
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100126
Olivier Deprez93644652022-09-09 11:01:12 +0200127 return manifest_data != NULL;
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100128}
129
130/**
Olivier Deprez93644652022-09-09 11:01:12 +0200131 * Frees the memory used for the manifest data in the given memory pool.
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100132 */
Olivier Deprez93644652022-09-09 11:01:12 +0200133static void manifest_data_deinit(struct mpool *ppool)
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100134{
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100135 /**
Olivier Deprez93644652022-09-09 11:01:12 +0200136 * Clear and return the memory used for the manifest_data struct to the
137 * memory pool.
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100138 */
Olivier Deprez93644652022-09-09 11:01:12 +0200139 memset_s(manifest_data, sizeof(struct manifest_data), 0,
140 sizeof(struct manifest_data));
141 mpool_add_chunk(ppool, manifest_data, manifest_data_ppool_entries);
142
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100143 /**
144 * Reset the index used for tracking the number of memory regions
145 * allocated.
146 */
147 allocated_mem_regions_index = 0;
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100148}
149
J-Alves19e20cf2023-08-02 12:48:55 +0100150static inline size_t count_digits(ffa_id_t vm_id)
David Brazdilb856be62020-03-25 10:14:55 +0000151{
152 size_t digits = 0;
153
154 do {
155 digits++;
156 vm_id /= 10;
157 } while (vm_id);
158 return digits;
159}
160
David Brazdil7a462ec2019-08-15 12:27:47 +0100161/**
162 * Generates a string with the two letters "vm" followed by an integer.
163 * Assumes `buf` is of size VM_NAME_BUF_SIZE.
164 */
J-Alves19e20cf2023-08-02 12:48:55 +0100165static void generate_vm_node_name(struct string *str, ffa_id_t vm_id)
David Brazdil7a462ec2019-08-15 12:27:47 +0100166{
167 static const char *digits = "0123456789";
David Brazdilb856be62020-03-25 10:14:55 +0000168 size_t vm_id_digits = count_digits(vm_id);
169 char *base = str->data;
170 char *ptr = base + (VM_NAME_EXTRA_CHARS + vm_id_digits);
David Brazdil7a462ec2019-08-15 12:27:47 +0100171
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000172 assert(vm_id_digits <= VM_ID_MAX_DIGITS);
David Brazdil7a462ec2019-08-15 12:27:47 +0100173 *(--ptr) = '\0';
174 do {
175 *(--ptr) = digits[vm_id % 10];
176 vm_id /= 10;
177 } while (vm_id);
178 *(--ptr) = 'm';
179 *(--ptr) = 'v';
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000180 assert(ptr == base);
David Brazdil7a462ec2019-08-15 12:27:47 +0100181}
182
Andrew Scullae9962e2019-10-03 16:51:16 +0100183/**
Andrew Scullb2c3a242019-11-04 13:52:36 +0000184 * Read a boolean property: true if present; false if not. If present, the value
185 * of the property must be empty else it is considered malformed.
Andrew Scullae9962e2019-10-03 16:51:16 +0100186 */
Andrew Scullb2c3a242019-11-04 13:52:36 +0000187static enum manifest_return_code read_bool(const struct fdt_node *node,
188 const char *property, bool *out)
Andrew Scullae9962e2019-10-03 16:51:16 +0100189{
David Brazdilb856be62020-03-25 10:14:55 +0000190 struct memiter data;
191 bool present = fdt_read_property(node, property, &data);
Andrew Scullae9962e2019-10-03 16:51:16 +0100192
David Brazdilb856be62020-03-25 10:14:55 +0000193 if (present && memiter_size(&data) != 0) {
Andrew Scullb2c3a242019-11-04 13:52:36 +0000194 return MANIFEST_ERROR_MALFORMED_BOOLEAN;
195 }
196
197 *out = present;
198 return MANIFEST_SUCCESS;
Andrew Scullae9962e2019-10-03 16:51:16 +0100199}
200
Andrew Scull72b43c02019-09-18 13:53:45 +0100201static enum manifest_return_code read_string(const struct fdt_node *node,
David Brazdil136f2942019-09-23 14:11:03 +0100202 const char *property,
203 struct string *out)
Andrew Scull72b43c02019-09-18 13:53:45 +0100204{
David Brazdilb856be62020-03-25 10:14:55 +0000205 struct memiter data;
Andrew Scull72b43c02019-09-18 13:53:45 +0100206
David Brazdilb856be62020-03-25 10:14:55 +0000207 if (!fdt_read_property(node, property, &data)) {
Andrew Scull72b43c02019-09-18 13:53:45 +0100208 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
209 }
210
David Brazdilb856be62020-03-25 10:14:55 +0000211 switch (string_init(out, &data)) {
David Brazdil136f2942019-09-23 14:11:03 +0100212 case STRING_SUCCESS:
213 return MANIFEST_SUCCESS;
214 case STRING_ERROR_INVALID_INPUT:
215 return MANIFEST_ERROR_MALFORMED_STRING;
216 case STRING_ERROR_TOO_LONG:
217 return MANIFEST_ERROR_STRING_TOO_LONG;
218 }
Andrew Scull72b43c02019-09-18 13:53:45 +0100219}
220
221static enum manifest_return_code read_optional_string(
David Brazdil136f2942019-09-23 14:11:03 +0100222 const struct fdt_node *node, const char *property, struct string *out)
Andrew Scull72b43c02019-09-18 13:53:45 +0100223{
David Brazdil136f2942019-09-23 14:11:03 +0100224 enum manifest_return_code ret;
Andrew Scull72b43c02019-09-18 13:53:45 +0100225
David Brazdil136f2942019-09-23 14:11:03 +0100226 ret = read_string(node, property, out);
227 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
228 string_init_empty(out);
229 ret = MANIFEST_SUCCESS;
Andrew Scull72b43c02019-09-18 13:53:45 +0100230 }
David Brazdil136f2942019-09-23 14:11:03 +0100231 return ret;
Andrew Scull72b43c02019-09-18 13:53:45 +0100232}
233
David Brazdil7a462ec2019-08-15 12:27:47 +0100234static enum manifest_return_code read_uint64(const struct fdt_node *node,
235 const char *property,
236 uint64_t *out)
237{
David Brazdilb856be62020-03-25 10:14:55 +0000238 struct memiter data;
David Brazdil7a462ec2019-08-15 12:27:47 +0100239
David Brazdilb856be62020-03-25 10:14:55 +0000240 if (!fdt_read_property(node, property, &data)) {
David Brazdil7a462ec2019-08-15 12:27:47 +0100241 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
242 }
243
David Brazdilb856be62020-03-25 10:14:55 +0000244 if (!fdt_parse_number(&data, memiter_size(&data), out)) {
David Brazdil7a462ec2019-08-15 12:27:47 +0100245 return MANIFEST_ERROR_MALFORMED_INTEGER;
246 }
247
248 return MANIFEST_SUCCESS;
249}
250
David Brazdil080ee312020-02-25 15:30:30 -0800251static enum manifest_return_code read_optional_uint64(
252 const struct fdt_node *node, const char *property,
253 uint64_t default_value, uint64_t *out)
254{
255 enum manifest_return_code ret;
256
257 ret = read_uint64(node, property, out);
258 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
259 *out = default_value;
260 return MANIFEST_SUCCESS;
261 }
262 return ret;
263}
264
Olivier Deprez62d99e32020-01-09 15:58:07 +0100265static enum manifest_return_code read_uint32(const struct fdt_node *node,
266 const char *property,
267 uint32_t *out)
268{
269 uint64_t value;
270
271 TRY(read_uint64(node, property, &value));
272
273 if (value > UINT32_MAX) {
274 return MANIFEST_ERROR_INTEGER_OVERFLOW;
275 }
276
277 *out = (uint32_t)value;
278 return MANIFEST_SUCCESS;
279}
280
Manish Pandeye68e7932020-04-23 15:29:28 +0100281static enum manifest_return_code read_optional_uint32(
282 const struct fdt_node *node, const char *property,
283 uint32_t default_value, uint32_t *out)
284{
285 enum manifest_return_code ret;
286
287 ret = read_uint32(node, property, out);
288 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
289 *out = default_value;
290 return MANIFEST_SUCCESS;
291 }
292 return ret;
293}
294
David Brazdil7a462ec2019-08-15 12:27:47 +0100295static enum manifest_return_code read_uint16(const struct fdt_node *node,
296 const char *property,
297 uint16_t *out)
298{
299 uint64_t value;
300
301 TRY(read_uint64(node, property, &value));
David Brazdil7a462ec2019-08-15 12:27:47 +0100302 if (value > UINT16_MAX) {
303 return MANIFEST_ERROR_INTEGER_OVERFLOW;
304 }
305
306 *out = (uint16_t)value;
307 return MANIFEST_SUCCESS;
308}
309
J-Alvesb37fd082020-10-22 12:29:21 +0100310static enum manifest_return_code read_optional_uint16(
311 const struct fdt_node *node, const char *property,
312 uint16_t default_value, uint16_t *out)
313{
314 enum manifest_return_code ret;
315
316 ret = read_uint16(node, property, out);
317 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
318 *out = default_value;
319 return MANIFEST_SUCCESS;
320 }
321
Kathleen Capella4a2a6e72023-04-21 14:43:26 -0400322 return ret;
J-Alvesb37fd082020-10-22 12:29:21 +0100323}
324
Olivier Deprez62d99e32020-01-09 15:58:07 +0100325static enum manifest_return_code read_uint8(const struct fdt_node *node,
326 const char *property, uint8_t *out)
327{
328 uint64_t value;
329
330 TRY(read_uint64(node, property, &value));
331
332 if (value > UINT8_MAX) {
333 return MANIFEST_ERROR_INTEGER_OVERFLOW;
334 }
335
336 *out = (uint8_t)value;
337 return MANIFEST_SUCCESS;
338}
339
J-Alves4369bd92020-08-07 16:35:36 +0100340static enum manifest_return_code read_optional_uint8(
341 const struct fdt_node *node, const char *property,
342 uint8_t default_value, uint8_t *out)
343{
344 enum manifest_return_code ret;
345
346 ret = read_uint8(node, property, out);
347 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
348 *out = default_value;
349 return MANIFEST_SUCCESS;
350 }
351
352 return MANIFEST_SUCCESS;
353}
354
Andrew Scullae9962e2019-10-03 16:51:16 +0100355struct uint32list_iter {
356 struct memiter mem_it;
357};
358
J-Alves4369bd92020-08-07 16:35:36 +0100359static enum manifest_return_code read_uint32list(const struct fdt_node *node,
360 const char *property,
361 struct uint32list_iter *out)
Andrew Scullae9962e2019-10-03 16:51:16 +0100362{
David Brazdilb856be62020-03-25 10:14:55 +0000363 struct memiter data;
Andrew Scullae9962e2019-10-03 16:51:16 +0100364
David Brazdilb856be62020-03-25 10:14:55 +0000365 if (!fdt_read_property(node, property, &data)) {
Andrew Scullae9962e2019-10-03 16:51:16 +0100366 memiter_init(&out->mem_it, NULL, 0);
J-Alves4369bd92020-08-07 16:35:36 +0100367 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
Andrew Scullae9962e2019-10-03 16:51:16 +0100368 }
369
David Brazdilb856be62020-03-25 10:14:55 +0000370 if ((memiter_size(&data) % sizeof(uint32_t)) != 0) {
Andrew Scullae9962e2019-10-03 16:51:16 +0100371 return MANIFEST_ERROR_MALFORMED_INTEGER_LIST;
372 }
373
David Brazdilb856be62020-03-25 10:14:55 +0000374 out->mem_it = data;
Andrew Scullae9962e2019-10-03 16:51:16 +0100375 return MANIFEST_SUCCESS;
376}
377
J-Alves4369bd92020-08-07 16:35:36 +0100378static enum manifest_return_code read_optional_uint32list(
379 const struct fdt_node *node, const char *property,
380 struct uint32list_iter *out)
381{
382 enum manifest_return_code ret = read_uint32list(node, property, out);
383
384 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
385 return MANIFEST_SUCCESS;
386 }
387 return ret;
388}
389
Andrew Scullae9962e2019-10-03 16:51:16 +0100390static bool uint32list_has_next(const struct uint32list_iter *list)
391{
392 return memiter_size(&list->mem_it) > 0;
393}
394
David Brazdil5ea99462020-03-25 13:01:47 +0000395static enum manifest_return_code uint32list_get_next(
396 struct uint32list_iter *list, uint32_t *out)
Andrew Scullae9962e2019-10-03 16:51:16 +0100397{
Andrew Scullae9962e2019-10-03 16:51:16 +0100398 uint64_t num;
399
400 CHECK(uint32list_has_next(list));
David Brazdilb856be62020-03-25 10:14:55 +0000401 if (!fdt_parse_number(&list->mem_it, sizeof(uint32_t), &num)) {
Andrew Scullae9962e2019-10-03 16:51:16 +0100402 return MANIFEST_ERROR_MALFORMED_INTEGER;
403 }
404
David Brazdil5ea99462020-03-25 13:01:47 +0000405 *out = (uint32_t)num;
406 return MANIFEST_SUCCESS;
Andrew Scullae9962e2019-10-03 16:51:16 +0100407}
408
Olivier Deprez62d99e32020-01-09 15:58:07 +0100409static enum manifest_return_code parse_vm_common(const struct fdt_node *node,
410 struct manifest_vm *vm,
J-Alves19e20cf2023-08-02 12:48:55 +0100411 ffa_id_t vm_id)
David Brazdil7a462ec2019-08-15 12:27:47 +0100412{
Andrew Scullae9962e2019-10-03 16:51:16 +0100413 struct uint32list_iter smcs;
David Brazdil5ea99462020-03-25 13:01:47 +0000414 size_t idx;
Andrew Scullae9962e2019-10-03 16:51:16 +0100415
Olivier Deprez62d99e32020-01-09 15:58:07 +0100416 TRY(read_bool(node, "is_ffa_partition", &vm->is_ffa_partition));
417
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -0700418 TRY(read_bool(node, "hyp_loaded", &vm->is_hyp_loaded));
419
David Brazdil136f2942019-09-23 14:11:03 +0100420 TRY(read_string(node, "debug_name", &vm->debug_name));
Andrew Scullae9962e2019-10-03 16:51:16 +0100421
422 TRY(read_optional_uint32list(node, "smc_whitelist", &smcs));
423 while (uint32list_has_next(&smcs) &&
424 vm->smc_whitelist.smc_count < MAX_SMCS) {
David Brazdil5ea99462020-03-25 13:01:47 +0000425 idx = vm->smc_whitelist.smc_count++;
426 TRY(uint32list_get_next(&smcs, &vm->smc_whitelist.smcs[idx]));
Andrew Scullae9962e2019-10-03 16:51:16 +0100427 }
428
429 if (uint32list_has_next(&smcs)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000430 dlog_warning("%s SMC whitelist too long.\n", vm->debug_name);
Andrew Scullae9962e2019-10-03 16:51:16 +0100431 }
432
Andrew Scullb2c3a242019-11-04 13:52:36 +0000433 TRY(read_bool(node, "smc_whitelist_permissive",
434 &vm->smc_whitelist.permissive));
Andrew Scullae9962e2019-10-03 16:51:16 +0100435
Olivier Deprez62d99e32020-01-09 15:58:07 +0100436 if (vm_id != HF_PRIMARY_VM_ID) {
437 TRY(read_uint64(node, "mem_size", &vm->secondary.mem_size));
438 TRY(read_uint16(node, "vcpu_count", &vm->secondary.vcpu_count));
Fuad Tabba50469e02020-06-30 15:14:28 +0100439 TRY(read_optional_string(node, "fdt_filename",
440 &vm->secondary.fdt_filename));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100441 }
442
443 return MANIFEST_SUCCESS;
444}
445
446static enum manifest_return_code parse_vm(struct fdt_node *node,
447 struct manifest_vm *vm,
J-Alves19e20cf2023-08-02 12:48:55 +0100448 ffa_id_t vm_id)
Olivier Deprez62d99e32020-01-09 15:58:07 +0100449{
450 TRY(read_optional_string(node, "kernel_filename",
451 &vm->kernel_filename));
452
David Brazdile6f83222019-09-23 14:47:37 +0100453 if (vm_id == HF_PRIMARY_VM_ID) {
454 TRY(read_optional_string(node, "ramdisk_filename",
455 &vm->primary.ramdisk_filename));
David Brazdil080ee312020-02-25 15:30:30 -0800456 TRY(read_optional_uint64(node, "boot_address",
457 MANIFEST_INVALID_ADDRESS,
458 &vm->primary.boot_address));
David Brazdil7a462ec2019-08-15 12:27:47 +0100459 }
Raghu Krishnamurthy988a5e72021-02-27 21:46:06 -0800460 TRY(read_optional_uint8(node, "exception-level", (uint8_t)EL1,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700461 (uint8_t *)&vm->partition.run_time_el));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100462
David Brazdil7a462ec2019-08-15 12:27:47 +0100463 return MANIFEST_SUCCESS;
464}
465
J-Alves77b6f4f2023-03-15 11:34:49 +0000466static bool is_memory_region_within_ranges(uintptr_t base_address,
467 uint32_t page_count,
468 const struct mem_range *ranges,
469 const size_t ranges_size)
470{
471 uintptr_t region_end =
472 base_address + ((uintptr_t)page_count * PAGE_SIZE - 1);
473
474 for (size_t i = 0; i < ranges_size; i++) {
475 uintptr_t base = (uintptr_t)pa_addr(ranges[i].begin);
476 uintptr_t end = (uintptr_t)pa_addr(ranges[i].end);
477
478 if ((base_address >= base && base_address <= end) ||
479 (region_end >= base && region_end <= end)) {
480 return true;
481 }
482 }
483
484 return false;
485}
486
487void dump_memory_ranges(const struct mem_range *ranges,
488 const size_t ranges_size, bool ns)
489{
490 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
491 return;
492 }
493
494 dlog("%s Memory ranges:\n", ns ? "NS" : "S");
495
496 for (size_t i = 0; i < ranges_size; i++) {
497 uintptr_t begin = pa_addr(ranges[i].begin);
498 uintptr_t end = pa_addr(ranges[i].end);
499 size_t page_count =
500 align_up(pa_difference(ranges[i].begin, ranges[i].end),
501 PAGE_SIZE) /
502 PAGE_SIZE;
503
504 dlog(" [%x - %x (%u pages)]\n", begin, end, page_count);
505 }
506}
507
508/**
509 * Check the partition's assigned memory is contained in the memory ranges
510 * configured for the SWd, in the SPMC's manifest.
511 */
512static enum manifest_return_code check_partition_memory_is_valid(
513 uintptr_t base_address, uint32_t page_count, uint32_t attributes,
514 const struct boot_params *params)
515{
516 bool is_secure_region =
517 (attributes & MANIFEST_REGION_ATTR_SECURITY) == 0U;
518 const struct mem_range *ranges_from_manifest =
519 is_secure_region ? params->mem_ranges : params->ns_mem_ranges;
520 size_t ranges_count = is_secure_region ? params->mem_ranges_count
521 : params->ns_mem_ranges_count;
522 bool within_ranges = is_memory_region_within_ranges(
523 base_address, page_count, ranges_from_manifest, ranges_count);
524
525 return within_ranges ? MANIFEST_SUCCESS
526 : MANIFEST_ERROR_MEM_REGION_INVALID;
527}
528
529/*
530 * Keep track of the memory allocated by partitions. This includes memory region
531 * nodes defined in their respective partition manifests, as well address space
532 * defined from their load address.
533 */
534static enum manifest_return_code check_and_record_memory_used(
Varun Wadekar4afbfd72022-10-13 14:30:18 +0100535 uintptr_t base_address, uint32_t page_count)
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100536{
J-Alves77b6f4f2023-03-15 11:34:49 +0000537 bool overlap_of_regions;
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100538
Varun Wadekar4afbfd72022-10-13 14:30:18 +0100539 if (page_count == 0U) {
540 dlog_error(
Daniel Boulbyc1a613d2022-10-18 11:26:17 +0100541 "Empty memory region defined with base address: %#x.\n",
Varun Wadekar4afbfd72022-10-13 14:30:18 +0100542 base_address);
543 return MANIFEST_ERROR_MEM_REGION_EMPTY;
544 }
545
Daniel Boulbyc1a613d2022-10-18 11:26:17 +0100546 if (!is_aligned(base_address, PAGE_SIZE)) {
547 dlog_error("base_address (%#x) is not aligned to page size.\n",
548 base_address);
549 return MANIFEST_ERROR_MEM_REGION_UNALIGNED;
550 }
551
J-Alves77b6f4f2023-03-15 11:34:49 +0000552 overlap_of_regions = is_memory_region_within_ranges(
553 base_address, page_count, manifest_data->mem_regions,
554 allocated_mem_regions_index);
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100555
J-Alves77b6f4f2023-03-15 11:34:49 +0000556 if (!overlap_of_regions) {
557 paddr_t begin = pa_init(base_address);
558
559 manifest_data->mem_regions[allocated_mem_regions_index].begin =
560 begin;
561 manifest_data->mem_regions[allocated_mem_regions_index].end =
562 pa_add(begin, page_count * PAGE_SIZE - 1);
563 allocated_mem_regions_index++;
564
565 return MANIFEST_SUCCESS;
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100566 }
567
J-Alves77b6f4f2023-03-15 11:34:49 +0000568 return MANIFEST_ERROR_MEM_REGION_OVERLAP;
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100569}
570
Manish Pandey6542f5c2020-04-27 14:37:46 +0100571static enum manifest_return_code parse_ffa_memory_region_node(
Karl Meakinf6d49402023-04-04 18:14:26 +0100572 struct fdt_node *mem_node, uintptr_t load_address,
573 struct memory_region *mem_regions, uint16_t *count, struct rx_tx *rxtx,
J-Alves77b6f4f2023-03-15 11:34:49 +0000574 const struct boot_params *boot_params)
Manish Pandey6542f5c2020-04-27 14:37:46 +0100575{
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100576 uint32_t phandle;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700577 uint16_t i = 0;
Madhukar Pappireddy3c2b7912023-10-11 14:47:27 -0500578 uint32_t j = 0;
Karl Meakinf6d49402023-04-04 18:14:26 +0100579 uintptr_t relative_address;
Madhukar Pappireddy3c2b7912023-10-11 14:47:27 -0500580 struct uint32list_iter list;
Manish Pandey6542f5c2020-04-27 14:37:46 +0100581
582 dlog_verbose(" Partition memory regions\n");
583
584 if (!fdt_is_compatible(mem_node, "arm,ffa-manifest-memory-regions")) {
585 return MANIFEST_ERROR_NOT_COMPATIBLE;
586 }
587
588 if (!fdt_first_child(mem_node)) {
589 return MANIFEST_ERROR_MEMORY_REGION_NODE_EMPTY;
590 }
591
592 do {
593 dlog_verbose(" Memory Region[%u]\n", i);
594
595 TRY(read_optional_string(mem_node, "description",
596 &mem_regions[i].name));
597 dlog_verbose(" Name: %s\n",
598 string_data(&mem_regions[i].name));
599
Karl Meakinf6d49402023-04-04 18:14:26 +0100600 TRY(read_optional_uint64(mem_node, "base-address",
601 MANIFEST_INVALID_ADDRESS,
602 &mem_regions[i].base_address));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200603 dlog_verbose(" Base address: %#x\n",
Manish Pandey6542f5c2020-04-27 14:37:46 +0100604 mem_regions[i].base_address);
605
Karl Meakinf6d49402023-04-04 18:14:26 +0100606 TRY(read_optional_uint64(mem_node, "relative-address",
607 MANIFEST_INVALID_ADDRESS,
608 &relative_address));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200609 if (relative_address != MANIFEST_INVALID_ADDRESS) {
610 dlog_verbose(" Relative address: %#x\n",
611 relative_address);
612 }
Karl Meakinf6d49402023-04-04 18:14:26 +0100613
614 if (mem_regions[i].base_address == MANIFEST_INVALID_ADDRESS &&
615 relative_address == MANIFEST_INVALID_ADDRESS) {
616 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
617 }
618
619 if (mem_regions[i].base_address != MANIFEST_INVALID_ADDRESS &&
620 relative_address != MANIFEST_INVALID_ADDRESS) {
621 return MANIFEST_ERROR_BASE_ADDRESS_AND_RELATIVE_ADDRESS;
622 }
623
624 if (relative_address != MANIFEST_INVALID_ADDRESS &&
625 relative_address > UINT64_MAX - load_address) {
626 return MANIFEST_ERROR_INTEGER_OVERFLOW;
627 }
628
629 if (relative_address != MANIFEST_INVALID_ADDRESS) {
630 mem_regions[i].base_address =
631 load_address + relative_address;
632 }
633
Manish Pandey6542f5c2020-04-27 14:37:46 +0100634 TRY(read_uint32(mem_node, "pages-count",
635 &mem_regions[i].page_count));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200636 dlog_verbose(" Pages_count: %u\n",
Manish Pandey6542f5c2020-04-27 14:37:46 +0100637 mem_regions[i].page_count);
638
639 TRY(read_uint32(mem_node, "attributes",
640 &mem_regions[i].attributes));
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -0700641
Olivier Deprez035fa152022-03-14 11:19:10 +0100642 /*
643 * Check RWX permission attributes.
644 * Security attribute is checked at load phase.
645 */
646 uint32_t permissions = mem_regions[i].attributes &
647 (MANIFEST_REGION_ATTR_READ |
648 MANIFEST_REGION_ATTR_WRITE |
649 MANIFEST_REGION_ATTR_EXEC);
650 if (permissions != MANIFEST_REGION_ATTR_READ &&
651 permissions != (MANIFEST_REGION_ATTR_READ |
652 MANIFEST_REGION_ATTR_WRITE) &&
653 permissions != (MANIFEST_REGION_ATTR_READ |
654 MANIFEST_REGION_ATTR_EXEC)) {
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -0700655 return MANIFEST_ERROR_INVALID_MEM_PERM;
656 }
657
Olivier Deprez035fa152022-03-14 11:19:10 +0100658 /* Filter memory region attributes. */
659 mem_regions[i].attributes &= MANIFEST_REGION_ALL_ATTR_MASK;
660
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200661 dlog_verbose(" Attributes: %#x\n",
Manish Pandey6542f5c2020-04-27 14:37:46 +0100662 mem_regions[i].attributes);
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100663
J-Alves77b6f4f2023-03-15 11:34:49 +0000664 TRY(check_partition_memory_is_valid(
665 mem_regions[i].base_address, mem_regions[i].page_count,
666 mem_regions[i].attributes, boot_params));
667
668 TRY(check_and_record_memory_used(mem_regions[i].base_address,
669 mem_regions[i].page_count));
670
Madhukar Pappireddy3c2b7912023-10-11 14:47:27 -0500671 TRY(read_optional_uint32(mem_node, "smmu-id",
672 MANIFEST_INVALID_ID,
673 &mem_regions[i].dma_prop.smmu_id));
674 if (mem_regions[i].dma_prop.smmu_id != MANIFEST_INVALID_ID) {
675 dlog_verbose(" smmu-id: %u\n",
676 mem_regions[i].dma_prop.smmu_id);
677 }
678
679 TRY(read_optional_uint32list(mem_node, "stream-ids", &list));
680 dlog_verbose(" Stream IDs assigned:\n");
681
682 j = 0;
683 while (uint32list_has_next(&list)) {
684 if (j == PARTITION_MAX_STREAMS_PER_DEVICE) {
685 return MANIFEST_ERROR_STREAM_IDS_OVERFLOW;
686 }
687
688 TRY(uint32list_get_next(
689 &list, &mem_regions[i].dma_prop.stream_ids[j]));
690 dlog_verbose(" %u\n",
691 mem_regions[i].dma_prop.stream_ids[j]);
692 j++;
693 }
694 if (j == 0) {
695 dlog_verbose(" None\n");
696 } else if (mem_regions[i].dma_prop.smmu_id ==
697 MANIFEST_INVALID_ID) {
698 /*
699 * SMMU ID must be specified if the partition specifies
700 * Stream IDs for any device upstream of SMMU.
701 */
702 return MANIFEST_ERROR_MISSING_SMMU_ID;
703 }
704
705 mem_regions[i].dma_prop.stream_count = j;
706
707 TRY(read_optional_uint32list(
708 mem_node, "stream-ids-access-permissions", &list));
709 dlog_verbose(" Access permissions of Stream IDs:\n");
710
711 j = 0;
712 while (uint32list_has_next(&list)) {
713 uint32_t permissions;
714
715 if (j == PARTITION_MAX_STREAMS_PER_DEVICE) {
716 return MANIFEST_ERROR_DMA_ACCESS_PERMISSIONS_OVERFLOW;
717 }
718
719 TRY(uint32list_get_next(&list, &permissions));
720 dlog_verbose(" %u\n", permissions);
721
722 if (j == 0) {
723 mem_regions[i].dma_prop.dma_access_permissions =
724 permissions;
725 }
726
727 /*
728 * All stream ids belonging to a dma device must specify
729 * the same access permissions.
730 */
731 if (permissions !=
732 mem_regions[i].dma_prop.dma_access_permissions) {
733 return MANIFEST_ERROR_MISMATCH_DMA_ACCESS_PERMISSIONS;
734 }
735
736 j++;
737 }
738
739 if (j == 0) {
740 dlog_verbose(" None\n");
741 } else if (j != mem_regions[i].dma_prop.stream_count) {
742 return MANIFEST_ERROR_MISMATCH_DMA_ACCESS_PERMISSIONS;
743 }
744
745 if (j > 0) {
746 /* Filter the dma access permissions. */
747 mem_regions[i].dma_prop.dma_access_permissions &=
748 MANIFEST_REGION_ALL_ATTR_MASK;
749 }
750
Manish Pandeya70a4192020-10-07 22:05:04 +0100751 if (rxtx->available) {
752 TRY(read_optional_uint32(
753 mem_node, "phandle",
754 (uint32_t)MANIFEST_INVALID_ADDRESS, &phandle));
755 if (phandle == rxtx->rx_phandle) {
756 dlog_verbose(" Assigned as RX buffer\n");
757 rxtx->rx_buffer = &mem_regions[i];
758 } else if (phandle == rxtx->tx_phandle) {
759 dlog_verbose(" Assigned as TX buffer\n");
760 rxtx->tx_buffer = &mem_regions[i];
761 }
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100762 }
763
Manish Pandey6542f5c2020-04-27 14:37:46 +0100764 i++;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700765 } while (fdt_next_sibling(mem_node) &&
766 (i < PARTITION_MAX_MEMORY_REGIONS));
Manish Pandey6542f5c2020-04-27 14:37:46 +0100767
Manish Pandeya70a4192020-10-07 22:05:04 +0100768 if (rxtx->available &&
769 (rxtx->rx_buffer->page_count != rxtx->tx_buffer->page_count)) {
Manish Pandeyf06c9072020-09-29 15:41:58 +0100770 return MANIFEST_ERROR_RXTX_SIZE_MISMATCH;
771 }
772
Manish Pandey2145c212020-05-01 16:04:22 +0100773 *count = i;
Manish Pandey6542f5c2020-04-27 14:37:46 +0100774
775 return MANIFEST_SUCCESS;
776}
777
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -0700778static struct interrupt_info *device_region_get_interrupt_info(
779 struct device_region *dev_regions, uint32_t intid)
780{
781 for (uint32_t i = 0; i < ARRAY_SIZE(dev_regions->interrupts); i++) {
782 if (dev_regions->interrupts[i].id == intid) {
783 return &(dev_regions->interrupts[i]);
784 }
785 }
786 return NULL;
787}
788
Manish Pandeye68e7932020-04-23 15:29:28 +0100789static enum manifest_return_code parse_ffa_device_region_node(
Manish Pandey2145c212020-05-01 16:04:22 +0100790 struct fdt_node *dev_node, struct device_region *dev_regions,
Madhukar Pappireddye032af52023-10-11 14:52:58 -0500791 uint16_t *count, uint8_t *dma_device_count)
Manish Pandeye68e7932020-04-23 15:29:28 +0100792{
793 struct uint32list_iter list;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700794 uint16_t i = 0;
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500795 uint32_t j = 0;
Olivier Deprez93644652022-09-09 11:01:12 +0200796 struct interrupt_bitmap allocated_intids = manifest_data->intids;
Madhukar Pappireddye032af52023-10-11 14:52:58 -0500797 static uint8_t dma_device_id = 0;
Manish Pandeye68e7932020-04-23 15:29:28 +0100798
799 dlog_verbose(" Partition Device Regions\n");
800
801 if (!fdt_is_compatible(dev_node, "arm,ffa-manifest-device-regions")) {
802 return MANIFEST_ERROR_NOT_COMPATIBLE;
803 }
804
805 if (!fdt_first_child(dev_node)) {
806 return MANIFEST_ERROR_DEVICE_REGION_NODE_EMPTY;
807 }
808
Madhukar Pappireddye032af52023-10-11 14:52:58 -0500809 *dma_device_count = 0;
810
Manish Pandeye68e7932020-04-23 15:29:28 +0100811 do {
812 dlog_verbose(" Device Region[%u]\n", i);
813
814 TRY(read_optional_string(dev_node, "description",
815 &dev_regions[i].name));
816 dlog_verbose(" Name: %s\n",
817 string_data(&dev_regions[i].name));
818
819 TRY(read_uint64(dev_node, "base-address",
820 &dev_regions[i].base_address));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200821 dlog_verbose(" Base address: %#x\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100822 dev_regions[i].base_address);
823
824 TRY(read_uint32(dev_node, "pages-count",
825 &dev_regions[i].page_count));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200826 dlog_verbose(" Pages_count: %u\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100827 dev_regions[i].page_count);
828
829 TRY(read_uint32(dev_node, "attributes",
830 &dev_regions[i].attributes));
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -0700831
Olivier Deprez035fa152022-03-14 11:19:10 +0100832 /*
833 * Check RWX permission attributes.
834 * Security attribute is checked at load phase.
835 */
836 uint32_t permissions = dev_regions[i].attributes &
837 (MANIFEST_REGION_ATTR_READ |
838 MANIFEST_REGION_ATTR_WRITE |
839 MANIFEST_REGION_ATTR_EXEC);
840
841 if (permissions != MANIFEST_REGION_ATTR_READ &&
842 permissions != (MANIFEST_REGION_ATTR_READ |
843 MANIFEST_REGION_ATTR_WRITE)) {
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -0700844 return MANIFEST_ERROR_INVALID_MEM_PERM;
845 }
846
Olivier Deprez035fa152022-03-14 11:19:10 +0100847 /* Filer device region attributes. */
848 dev_regions[i].attributes = dev_regions[i].attributes &
849 MANIFEST_REGION_ALL_ATTR_MASK;
850
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200851 dlog_verbose(" Attributes: %#x\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100852 dev_regions[i].attributes);
853
854 TRY(read_optional_uint32list(dev_node, "interrupts", &list));
855 dlog_verbose(" Interrupt List:\n");
856 j = 0;
857 while (uint32list_has_next(&list) &&
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700858 j < PARTITION_MAX_INTERRUPTS_PER_DEVICE) {
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100859 uint32_t intid;
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100860
Manish Pandeye68e7932020-04-23 15:29:28 +0100861 TRY(uint32list_get_next(
862 &list, &dev_regions[i].interrupts[j].id));
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100863 intid = dev_regions[i].interrupts[j].id;
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100864
865 dlog_verbose(" ID = %u\n", intid);
866
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100867 if (interrupt_bitmap_get_value(&allocated_intids,
868 intid) == 1U) {
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100869 return MANIFEST_ERROR_INTERRUPT_ID_REPEATED;
870 }
871
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100872 interrupt_bitmap_set_value(&allocated_intids, intid);
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100873
Manish Pandeye68e7932020-04-23 15:29:28 +0100874 if (uint32list_has_next(&list)) {
875 TRY(uint32list_get_next(&list,
876 &dev_regions[i]
877 .interrupts[j]
878 .attributes));
879 } else {
880 return MANIFEST_ERROR_MALFORMED_INTEGER_LIST;
881 }
882
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -0700883 dev_regions[i].interrupts[j].mpidr_valid = false;
884 dev_regions[i].interrupts[j].mpidr = 0;
885
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100886 dlog_verbose(" attributes = %u\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100887 dev_regions[i].interrupts[j].attributes);
888 j++;
889 }
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500890
891 dev_regions[i].interrupt_count = j;
Manish Pandeye68e7932020-04-23 15:29:28 +0100892 if (j == 0) {
893 dlog_verbose(" Empty\n");
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -0700894 } else {
895 TRY(read_optional_uint32list(
896 dev_node, "interrupts-target", &list));
897 dlog_verbose(" Interrupt Target List:\n");
898
899 while (uint32list_has_next(&list)) {
900 uint32_t intid;
901 uint64_t mpidr = 0;
902 uint32_t mpidr_lower = 0;
903 uint32_t mpidr_upper = 0;
904 struct interrupt_info *info = NULL;
905
906 TRY(uint32list_get_next(&list, &intid));
907
908 dlog_verbose(" ID = %u\n", intid);
909
910 if (interrupt_bitmap_get_value(
911 &allocated_intids, intid) != 1U) {
912 return MANIFEST_ERROR_INTERRUPT_ID_NOT_IN_LIST;
913 }
914
915 TRY(uint32list_get_next(&list, &mpidr_upper));
916 TRY(uint32list_get_next(&list, &mpidr_lower));
917 mpidr = mpidr_upper;
918 mpidr <<= 32;
919 mpidr |= mpidr_lower;
920
921 info = device_region_get_interrupt_info(
922 &dev_regions[i], intid);
923 /*
924 * We should find info since
925 * interrupt_bitmap_get_value already ensures
926 * that we saw the interrupt and allocated ids
927 * for it.
928 */
929 assert(info != NULL);
930 info->mpidr = mpidr;
931 info->mpidr_valid = true;
932 dlog_verbose(" MPIDR = %#x\n", mpidr);
933 }
Manish Pandeye68e7932020-04-23 15:29:28 +0100934 }
935
936 TRY(read_optional_uint32(dev_node, "smmu-id",
Madhukar Pappireddy54680c72020-10-23 15:02:38 -0500937 MANIFEST_INVALID_ID,
Manish Pandeye68e7932020-04-23 15:29:28 +0100938 &dev_regions[i].smmu_id));
Olivier Deprez86d87ae2021-08-19 14:27:46 +0200939 if (dev_regions[i].smmu_id != MANIFEST_INVALID_ID) {
940 dlog_verbose(" smmu-id: %u\n",
941 dev_regions[i].smmu_id);
942 }
Manish Pandeye68e7932020-04-23 15:29:28 +0100943
944 TRY(read_optional_uint32list(dev_node, "stream-ids", &list));
945 dlog_verbose(" Stream IDs assigned:\n");
946
947 j = 0;
Madhukar Pappireddye032af52023-10-11 14:52:58 -0500948 while (uint32list_has_next(&list)) {
949 if (j == PARTITION_MAX_STREAMS_PER_DEVICE) {
950 return MANIFEST_ERROR_STREAM_IDS_OVERFLOW;
951 }
952
Manish Pandeye68e7932020-04-23 15:29:28 +0100953 TRY(uint32list_get_next(&list,
954 &dev_regions[i].stream_ids[j]));
955 dlog_verbose(" %u\n",
956 dev_regions[i].stream_ids[j]);
957 j++;
958 }
Madhukar Pappireddye032af52023-10-11 14:52:58 -0500959
Manish Pandeye68e7932020-04-23 15:29:28 +0100960 if (j == 0) {
961 dlog_verbose(" None\n");
Madhukar Pappireddye032af52023-10-11 14:52:58 -0500962 } else if (dev_regions[i].smmu_id != MANIFEST_INVALID_ID) {
963 dev_regions[i].dma_device_id = dma_device_id++;
964 *dma_device_count = dma_device_id;
965
Madhukar Pappireddy8a364472023-10-11 15:34:07 -0500966 if (*dma_device_count > PARTITION_MAX_DMA_DEVICES) {
967 return MANIFEST_ERROR_DMA_DEVICE_OVERFLOW;
968 }
969
Madhukar Pappireddye032af52023-10-11 14:52:58 -0500970 dlog_verbose(" dma peripheral device id: %u\n",
971 dev_regions[i].dma_device_id);
972 } else {
973 /*
974 * SMMU ID must be specified if the partition specifies
975 * Stream IDs for any device upstream of SMMU.
976 */
977 return MANIFEST_ERROR_MISSING_SMMU_ID;
Manish Pandeye68e7932020-04-23 15:29:28 +0100978 }
Madhukar Pappireddye032af52023-10-11 14:52:58 -0500979
Madhukar Pappireddy54680c72020-10-23 15:02:38 -0500980 dev_regions[i].stream_count = j;
Manish Pandeye68e7932020-04-23 15:29:28 +0100981
982 TRY(read_bool(dev_node, "exclusive-access",
983 &dev_regions[i].exclusive_access));
J-Alves4369bd92020-08-07 16:35:36 +0100984 dlog_verbose(" Exclusive_access: %u\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100985 dev_regions[i].exclusive_access);
986
987 i++;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700988 } while (fdt_next_sibling(dev_node) &&
989 (i < PARTITION_MAX_DEVICE_REGIONS));
Manish Pandeye68e7932020-04-23 15:29:28 +0100990
Manish Pandey2145c212020-05-01 16:04:22 +0100991 *count = i;
Manish Pandeye68e7932020-04-23 15:29:28 +0100992
993 return MANIFEST_SUCCESS;
994}
995
J-Alvesabebe432022-05-31 14:40:50 +0100996static enum manifest_return_code sanity_check_ffa_manifest(
997 struct manifest_vm *vm)
998{
999 uint16_t ffa_version_major;
1000 uint16_t ffa_version_minor;
1001 enum manifest_return_code ret_code = MANIFEST_SUCCESS;
1002 const char *error_string = "specified in manifest is unsupported";
1003 uint32_t k = 0;
Kathleen Capellaf71dee42023-08-08 16:24:14 -04001004 bool using_req2 = (vm->partition.messaging_method &
1005 (FFA_PARTITION_DIRECT_REQ2_RECV |
1006 FFA_PARTITION_DIRECT_REQ2_SEND)) != 0;
J-Alvesabebe432022-05-31 14:40:50 +01001007
1008 /* ensure that the SPM version is compatible */
1009 ffa_version_major = (vm->partition.ffa_version & 0xffff0000) >>
1010 FFA_VERSION_MAJOR_OFFSET;
1011 ffa_version_minor = vm->partition.ffa_version & 0xffff;
1012
1013 if (ffa_version_major != FFA_VERSION_MAJOR ||
1014 ffa_version_minor > FFA_VERSION_MINOR) {
1015 dlog_error("FF-A partition manifest version %s: %u.%u\n",
1016 error_string, ffa_version_major, ffa_version_minor);
1017 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1018 }
1019
1020 if (vm->partition.xlat_granule != PAGE_4KB) {
1021 dlog_error("Translation granule %s: %u\n", error_string,
1022 vm->partition.xlat_granule);
1023 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1024 }
1025
1026 if (vm->partition.execution_state != AARCH64) {
1027 dlog_error("Execution state %s: %u\n", error_string,
1028 vm->partition.execution_state);
1029 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1030 }
1031
1032 if (vm->partition.run_time_el != EL1 &&
1033 vm->partition.run_time_el != S_EL1 &&
Daniel Boulby874d5432023-04-27 12:40:24 +01001034 vm->partition.run_time_el != S_EL0 &&
1035 vm->partition.run_time_el != EL0) {
J-Alvesabebe432022-05-31 14:40:50 +01001036 dlog_error("Exception level %s: %d\n", error_string,
1037 vm->partition.run_time_el);
1038 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1039 }
1040
Kathleen Capellaf71dee42023-08-08 16:24:14 -04001041 if (vm->partition.ffa_version < MAKE_FFA_VERSION(1, 2) && using_req2) {
1042 dlog_error("Messaging method %s: %x\n", error_string,
1043 vm->partition.messaging_method);
1044 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1045 }
1046
J-Alvesabebe432022-05-31 14:40:50 +01001047 if ((vm->partition.messaging_method &
1048 ~(FFA_PARTITION_DIRECT_REQ_RECV | FFA_PARTITION_DIRECT_REQ_SEND |
Kathleen Capellaf71dee42023-08-08 16:24:14 -04001049 FFA_PARTITION_INDIRECT_MSG | FFA_PARTITION_DIRECT_REQ2_RECV |
1050 FFA_PARTITION_DIRECT_REQ2_SEND)) != 0U) {
J-Alvesabebe432022-05-31 14:40:50 +01001051 dlog_error("Messaging method %s: %x\n", error_string,
1052 vm->partition.messaging_method);
1053 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1054 }
1055
Daniel Boulby874d5432023-04-27 12:40:24 +01001056 if ((vm->partition.run_time_el == S_EL0 ||
1057 vm->partition.run_time_el == EL0) &&
J-Alvesabebe432022-05-31 14:40:50 +01001058 vm->partition.execution_ctx_count != 1) {
1059 dlog_error(
1060 "Exception level and execution context count %s: %d "
1061 "%d\n",
1062 error_string, vm->partition.run_time_el,
1063 vm->partition.execution_ctx_count);
1064 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1065 }
1066
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -07001067 for (uint16_t i = 0; i < vm->partition.dev_region_count; i++) {
J-Alvesabebe432022-05-31 14:40:50 +01001068 struct device_region dev_region;
1069
1070 dev_region = vm->partition.dev_regions[i];
1071
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -07001072 if (dev_region.interrupt_count >
1073 PARTITION_MAX_INTERRUPTS_PER_DEVICE) {
J-Alvesabebe432022-05-31 14:40:50 +01001074 dlog_error(
1075 "Interrupt count for device region exceeds "
1076 "limit.\n");
1077 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1078 continue;
1079 }
1080
1081 for (uint8_t j = 0; j < dev_region.interrupt_count; j++) {
1082 k++;
1083 if (k > VM_MANIFEST_MAX_INTERRUPTS) {
1084 dlog_error(
1085 "Interrupt count for VM exceeds "
1086 "limit.\n");
1087 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1088 continue;
1089 }
1090 }
1091 }
1092
1093 /* GP register is restricted to one of x0 - x3. */
1094 if (vm->partition.gp_register_num != -1 &&
1095 vm->partition.gp_register_num > 3) {
1096 dlog_error("GP register number %s: %u\n", error_string,
1097 vm->partition.gp_register_num);
1098 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1099 }
1100
1101 return ret_code;
1102}
1103
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001104/**
1105 * Find the device id allocated to the device region node corresponding to the
1106 * specified stream id.
1107 */
1108static bool find_dma_device_id_from_dev_region_nodes(
1109 const struct manifest_vm *manifest_vm, uint32_t sid, uint8_t *device_id)
1110{
1111 for (uint16_t i = 0; i < manifest_vm->partition.dev_region_count; i++) {
1112 struct device_region dev_region =
1113 manifest_vm->partition.dev_regions[i];
1114
1115 for (uint8_t j = 0; j < dev_region.stream_count; j++) {
1116 if (sid == dev_region.stream_ids[j]) {
1117 *device_id = dev_region.dma_device_id;
1118 return true;
1119 }
1120 }
1121 }
1122 return false;
1123}
1124
1125/**
1126 * Identify the device id of a DMA device node corresponding to a stream id
1127 * specified in the memory region node.
1128 */
1129static bool map_dma_device_id_to_stream_ids(struct manifest_vm *vm)
1130{
1131 for (uint16_t i = 0; i < vm->partition.mem_region_count; i++) {
1132 struct memory_region mem_region = vm->partition.mem_regions[i];
1133
1134 for (uint8_t j = 0; j < mem_region.dma_prop.stream_count; j++) {
1135 uint32_t sid = mem_region.dma_prop.stream_ids[j];
1136 uint8_t device_id = 0;
1137
1138 /*
1139 * Every stream id must have been declared in the
1140 * device node as well.
1141 */
1142 if (!find_dma_device_id_from_dev_region_nodes(
1143 vm, sid, &device_id)) {
1144 dlog_verbose(
1145 "Stream ID not found in any device "
1146 "region node of partition manifest\n",
1147 sid);
1148 return false;
1149 }
1150
1151 mem_region.dma_prop.dma_device_id = device_id;
1152 }
1153 }
1154
1155 return true;
1156}
1157
J-Alves77b6f4f2023-03-15 11:34:49 +00001158enum manifest_return_code parse_ffa_manifest(
1159 struct fdt *fdt, struct manifest_vm *vm,
1160 struct fdt_node *boot_info_node, const struct boot_params *boot_params)
Olivier Deprez62d99e32020-01-09 15:58:07 +01001161{
1162 unsigned int i = 0;
Kathleen Capella422b10b2023-06-30 18:28:27 -04001163 unsigned int j = 0;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001164 struct uint32list_iter uuid;
1165 uint32_t uuid_word;
1166 struct fdt_node root;
1167 struct fdt_node ffa_node;
1168 struct string rxtx_node_name = STRING_INIT("rx_tx-info");
Manish Pandey6542f5c2020-04-27 14:37:46 +01001169 struct string mem_region_node_name = STRING_INIT("memory-regions");
Manish Pandeye68e7932020-04-23 15:29:28 +01001170 struct string dev_region_node_name = STRING_INIT("device-regions");
J-Alves35315782022-01-25 17:58:32 +00001171 struct string boot_info_node_name = STRING_INIT("boot-info");
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001172 bool managed_exit_field_present = false;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001173
1174 if (!fdt_find_node(fdt, "/", &root)) {
1175 return MANIFEST_ERROR_NO_ROOT_NODE;
1176 }
1177
1178 /* Check "compatible" property. */
1179 if (!fdt_is_compatible(&root, "arm,ffa-manifest-1.0")) {
1180 return MANIFEST_ERROR_NOT_COMPATIBLE;
1181 }
1182
J-Alves4369bd92020-08-07 16:35:36 +01001183 TRY(read_uint32list(&root, "uuid", &uuid));
Olivier Deprez62d99e32020-01-09 15:58:07 +01001184
Kathleen Capella422b10b2023-06-30 18:28:27 -04001185 while (uint32list_has_next(&uuid) && j < PARTITION_MAX_UUIDS) {
1186 while (uint32list_has_next(&uuid) && i < 4) {
1187 TRY(uint32list_get_next(&uuid, &uuid_word));
1188 vm->partition.uuids[j].uuid[i] = uuid_word;
1189 i++;
1190 }
1191
1192 if (ffa_uuid_is_null(&vm->partition.uuids[j])) {
1193 return MANIFEST_ERROR_UUID_ALL_ZEROS;
1194 }
1195 dlog_verbose(" UUID %#x-%x-%x-%x\n",
1196 vm->partition.uuids[j].uuid[0],
1197 vm->partition.uuids[j].uuid[1],
1198 vm->partition.uuids[j].uuid[2],
1199 vm->partition.uuids[j].uuid[3]);
1200 j++;
1201 i = 0;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001202 }
Kathleen Capella422b10b2023-06-30 18:28:27 -04001203
1204 vm->partition.uuid_count = j;
1205 dlog_verbose(" Number of UUIDs %u\n", vm->partition.uuid_count);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001206
Olivier Deprezb7aa0c22023-06-29 10:46:26 +02001207 TRY(read_uint32(&root, "ffa-version", &vm->partition.ffa_version));
1208 dlog_verbose(" Expected FF-A version %u.%u\n",
1209 vm->partition.ffa_version >> 16,
1210 vm->partition.ffa_version & 0xffff);
1211
Olivier Deprez62d99e32020-01-09 15:58:07 +01001212 TRY(read_uint16(&root, "execution-ctx-count",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001213 &vm->partition.execution_ctx_count));
J-Alves4369bd92020-08-07 16:35:36 +01001214 dlog_verbose(" Number of execution context %u\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001215 vm->partition.execution_ctx_count);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001216
1217 TRY(read_uint8(&root, "exception-level",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001218 (uint8_t *)&vm->partition.run_time_el));
1219 dlog_verbose(" Run-time EL %u\n", vm->partition.run_time_el);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001220
1221 TRY(read_uint8(&root, "execution-state",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001222 (uint8_t *)&vm->partition.execution_state));
1223 dlog_verbose(" Execution state %u\n", vm->partition.execution_state);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001224
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001225 TRY(read_optional_uint64(&root, "load-address", 0,
1226 &vm->partition.load_addr));
1227 dlog_verbose(" Load address %#x\n", vm->partition.load_addr);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001228
J-Alves4369bd92020-08-07 16:35:36 +01001229 TRY(read_optional_uint64(&root, "entrypoint-offset", 0,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001230 &vm->partition.ep_offset));
1231 dlog_verbose(" Entry point offset %#x\n", vm->partition.ep_offset);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001232
J-Alves35315782022-01-25 17:58:32 +00001233 TRY(read_optional_uint32(&root, "gp-register-num",
1234 DEFAULT_BOOT_GP_REGISTER,
1235 &vm->partition.gp_register_num));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +02001236
1237 if (vm->partition.gp_register_num != DEFAULT_BOOT_GP_REGISTER) {
1238 dlog_verbose(" Boot GP register: x%u\n",
1239 vm->partition.gp_register_num);
1240 }
J-Alves35315782022-01-25 17:58:32 +00001241
J-Alvesb37fd082020-10-22 12:29:21 +01001242 TRY(read_optional_uint16(&root, "boot-order", DEFAULT_BOOT_ORDER,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001243 &vm->partition.boot_order));
Kathleen Capella4a2a6e72023-04-21 14:43:26 -04001244 if (vm->partition.boot_order != DEFAULT_BOOT_ORDER) {
1245 dlog_verbose(" Boot order %#u\n", vm->partition.boot_order);
1246 }
1247
1248 if (!check_boot_order(vm->partition.boot_order)) {
1249 return MANIFEST_ERROR_INVALID_BOOT_ORDER;
1250 }
J-Alvesb37fd082020-10-22 12:29:21 +01001251
J-Alves4369bd92020-08-07 16:35:36 +01001252 TRY(read_optional_uint8(&root, "xlat-granule", 0,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001253 (uint8_t *)&vm->partition.xlat_granule));
1254 dlog_verbose(" Translation granule %u\n", vm->partition.xlat_granule);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001255
1256 ffa_node = root;
1257 if (fdt_find_child(&ffa_node, &rxtx_node_name)) {
1258 if (!fdt_is_compatible(&ffa_node,
1259 "arm,ffa-manifest-rx_tx-buffer")) {
1260 return MANIFEST_ERROR_NOT_COMPATIBLE;
1261 }
1262
Manish Pandeyfa1f2912020-05-05 12:57:01 +01001263 /*
1264 * Read only phandles for now, it will be used to update buffers
1265 * while parsing memory regions.
1266 */
1267 TRY(read_uint32(&ffa_node, "rx-buffer",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001268 &vm->partition.rxtx.rx_phandle));
Olivier Deprez62d99e32020-01-09 15:58:07 +01001269
Manish Pandeyfa1f2912020-05-05 12:57:01 +01001270 TRY(read_uint32(&ffa_node, "tx-buffer",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001271 &vm->partition.rxtx.tx_phandle));
Olivier Deprez62d99e32020-01-09 15:58:07 +01001272
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001273 vm->partition.rxtx.available = true;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001274 }
1275
Kathleen Capellaf71dee42023-08-08 16:24:14 -04001276 TRY(read_uint16(&root, "messaging-method",
1277 (uint16_t *)&vm->partition.messaging_method));
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001278 dlog_verbose(" Messaging method %u\n", vm->partition.messaging_method);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001279
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001280 TRY(read_bool(&root, "managed-exit", &managed_exit_field_present));
1281
1282 TRY(read_optional_uint8(
1283 &root, "ns-interrupts-action", NS_ACTION_SIGNALED,
1284 (uint8_t *)&vm->partition.ns_interrupts_action));
1285
1286 /*
1287 * An SP manifest can specify one of the fields listed below:
1288 * `managed-exit`: Introduced in FF-A v1.0 spec.
1289 * `ns-interrupts-action`: Introduced in FF-A v1.1 EAC0 spec.
1290 * If both are missing from the manifest, the default response is
1291 * NS_ACTION_SIGNALED.
1292 */
1293 if (managed_exit_field_present) {
1294 vm->partition.ns_interrupts_action = NS_ACTION_ME;
1295 }
1296
1297 if (vm->partition.ns_interrupts_action != NS_ACTION_QUEUED &&
1298 vm->partition.ns_interrupts_action != NS_ACTION_ME &&
1299 vm->partition.ns_interrupts_action != NS_ACTION_SIGNALED) {
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001300 return MANIFEST_ERROR_ILLEGAL_NS_INT_ACTION;
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001301 }
1302
1303 dlog_verbose(
Olivier Deprezb7aa0c22023-06-29 10:46:26 +02001304 " NS Interrupts %s\n",
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001305 (vm->partition.ns_interrupts_action == NS_ACTION_QUEUED)
1306 ? "Queued"
1307 : (vm->partition.ns_interrupts_action == NS_ACTION_SIGNALED)
1308 ? "Signaled"
1309 : "Managed exit");
1310
1311 if (vm->partition.ns_interrupts_action == NS_ACTION_ME) {
1312 /* Managed exit only supported by S_EL1 partitions. */
1313 if (vm->partition.run_time_el != S_EL1) {
1314 dlog_error(
1315 "Managed exit cannot be supported by this "
1316 "partition\n");
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001317 return MANIFEST_ERROR_ILLEGAL_NS_INT_ACTION;
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001318 }
Madhukar Pappireddy046dad02022-06-21 18:43:33 -05001319
1320 TRY(read_bool(&root, "managed-exit-virq",
1321 &vm->partition.me_signal_virq));
1322 if (vm->partition.me_signal_virq) {
1323 dlog_verbose(" Managed Exit signaled through vIRQ\n");
1324 }
J-Alvesa4730db2021-11-02 10:31:01 +00001325 }
1326
1327 TRY(read_bool(&root, "notification-support",
1328 &vm->partition.notification_support));
1329 if (vm->partition.notification_support) {
1330 dlog_verbose(" Notifications Receipt Supported\n");
1331 }
Maksims Svecovs9ddf86a2021-05-06 17:17:21 +01001332
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001333 TRY(read_optional_uint8(
1334 &root, "other-s-interrupts-action", OTHER_S_INT_ACTION_SIGNALED,
1335 (uint8_t *)&vm->partition.other_s_interrupts_action));
1336
1337 if (vm->partition.other_s_interrupts_action ==
1338 OTHER_S_INT_ACTION_QUEUED) {
1339 if (vm->partition.ns_interrupts_action != NS_ACTION_QUEUED) {
1340 dlog_error(
1341 "Choice of the fields 'ns-interrupts-action' "
1342 "and 'other-s-interrupts-action' not "
1343 "compatible\n");
1344 return MANIFEST_ERROR_NOT_COMPATIBLE;
1345 }
1346 } else if (vm->partition.other_s_interrupts_action >
1347 OTHER_S_INT_ACTION_SIGNALED) {
1348 dlog_error(
1349 "Illegal value specified for the field"
1350 " 'other-s-interrupts-action': %u\n",
1351 vm->partition.other_s_interrupts_action);
1352 return MANIFEST_ERROR_ILLEGAL_OTHER_S_INT_ACTION;
1353 }
1354
J-Alves35315782022-01-25 17:58:32 +00001355 /* Parse boot info node. */
1356 if (boot_info_node != NULL) {
1357 ffa_node = root;
1358 vm->partition.boot_info =
1359 fdt_find_child(&ffa_node, &boot_info_node_name);
1360 if (vm->partition.boot_info) {
1361 *boot_info_node = ffa_node;
1362 }
1363 } else {
1364 vm->partition.boot_info = false;
1365 }
1366
Olivier Depreza15f2352022-09-26 09:17:24 +02001367 TRY(read_optional_uint32(
1368 &root, "power-management-messages",
1369 MANIFEST_POWER_MANAGEMENT_CPU_OFF_SUPPORTED |
1370 MANIFEST_POWER_MANAGEMENT_CPU_ON_SUPPORTED,
1371 &vm->partition.power_management));
1372 vm->partition.power_management &= MANIFEST_POWER_MANAGEMENT_ALL_MASK;
1373 if (vm->partition.execution_ctx_count == 1 ||
Daniel Boulby874d5432023-04-27 12:40:24 +01001374 vm->partition.run_time_el == S_EL0 ||
1375 vm->partition.run_time_el == EL0) {
Olivier Depreza15f2352022-09-26 09:17:24 +02001376 vm->partition.power_management =
1377 MANIFEST_POWER_MANAGEMENT_NONE_MASK;
1378 }
1379
1380 dlog_verbose(" Power management messages %#x\n",
1381 vm->partition.power_management);
1382
Manish Pandey6542f5c2020-04-27 14:37:46 +01001383 /* Parse memory-regions */
1384 ffa_node = root;
1385 if (fdt_find_child(&ffa_node, &mem_region_node_name)) {
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001386 TRY(parse_ffa_memory_region_node(
Karl Meakinf6d49402023-04-04 18:14:26 +01001387 &ffa_node, vm->partition.load_addr,
1388 vm->partition.mem_regions,
J-Alves77b6f4f2023-03-15 11:34:49 +00001389 &vm->partition.mem_region_count, &vm->partition.rxtx,
1390 boot_params));
Manish Pandey6542f5c2020-04-27 14:37:46 +01001391 }
Manish Pandey2145c212020-05-01 16:04:22 +01001392 dlog_verbose(" Total %u memory regions found\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001393 vm->partition.mem_region_count);
Manish Pandey6542f5c2020-04-27 14:37:46 +01001394
Manish Pandeye68e7932020-04-23 15:29:28 +01001395 /* Parse Device-regions */
1396 ffa_node = root;
1397 if (fdt_find_child(&ffa_node, &dev_region_node_name)) {
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001398 TRY(parse_ffa_device_region_node(
1399 &ffa_node, vm->partition.dev_regions,
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001400 &vm->partition.dev_region_count,
1401 &vm->partition.dma_device_count));
Manish Pandeye68e7932020-04-23 15:29:28 +01001402 }
Manish Pandey2145c212020-05-01 16:04:22 +01001403 dlog_verbose(" Total %u device regions found\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001404 vm->partition.dev_region_count);
Manish Pandeye68e7932020-04-23 15:29:28 +01001405
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001406 if (!map_dma_device_id_to_stream_ids(vm)) {
1407 return MANIFEST_ERROR_NOT_COMPATIBLE;
1408 }
1409
J-Alves4eb7b542022-03-02 15:21:52 +00001410 return sanity_check_ffa_manifest(vm);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001411}
1412
Olivier Deprez62d99e32020-01-09 15:58:07 +01001413static enum manifest_return_code parse_ffa_partition_package(
1414 struct mm_stage1_locked stage1_locked, struct fdt_node *node,
J-Alves19e20cf2023-08-02 12:48:55 +01001415 struct manifest_vm *vm, ffa_id_t vm_id,
J-Alves77b6f4f2023-03-15 11:34:49 +00001416 const struct boot_params *boot_params, struct mpool *ppool)
Olivier Deprez62d99e32020-01-09 15:58:07 +01001417{
1418 enum manifest_return_code ret = MANIFEST_ERROR_NOT_COMPATIBLE;
J-Alves2f86c1e2022-02-23 18:44:19 +00001419 uintpaddr_t load_address;
1420 struct sp_pkg_header header;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001421 struct fdt sp_fdt;
J-Alves2f86c1e2022-02-23 18:44:19 +00001422 vaddr_t pkg_start;
1423 vaddr_t manifest_address;
J-Alves35315782022-01-25 17:58:32 +00001424 struct fdt_node boot_info_node;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001425
1426 /*
1427 * This must have been hinted as being an FF-A partition,
1428 * return straight with failure if this is not the case.
1429 */
1430 if (!vm->is_ffa_partition) {
J-Alves2f86c1e2022-02-23 18:44:19 +00001431 return ret;
1432 }
1433
1434 TRY(read_uint64(node, "load_address", &load_address));
1435 if (!is_aligned(load_address, PAGE_SIZE)) {
Olivier Deprez62d99e32020-01-09 15:58:07 +01001436 return MANIFEST_ERROR_NOT_COMPATIBLE;
1437 }
1438
J-Alves2f86c1e2022-02-23 18:44:19 +00001439 assert(load_address != 0U);
1440
1441 if (!sp_pkg_init(stage1_locked, pa_init(load_address), &header,
1442 ppool)) {
1443 return ret;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001444 }
1445
J-Alves2f86c1e2022-02-23 18:44:19 +00001446 pkg_start = va_init(load_address);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001447
J-Alves2f86c1e2022-02-23 18:44:19 +00001448 if (vm_id != HF_PRIMARY_VM_ID &&
1449 sp_pkg_get_mem_size(&header) >= vm->secondary.mem_size) {
J-Alves4369bd92020-08-07 16:35:36 +01001450 dlog_error("Invalid package header or DT size.\n");
J-Alves2f86c1e2022-02-23 18:44:19 +00001451 goto out;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001452 }
1453
J-Alves2f86c1e2022-02-23 18:44:19 +00001454 manifest_address = va_add(va_init(load_address), header.pm_offset);
1455 if (!fdt_init_from_ptr(&sp_fdt, ptr_from_va(manifest_address),
1456 header.pm_size)) {
Kathleen Capella422b10b2023-06-30 18:28:27 -04001457 dlog_error("manifest.c: FDT failed validation.\n");
J-Alves2f86c1e2022-02-23 18:44:19 +00001458 goto out;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001459 }
1460
J-Alves77b6f4f2023-03-15 11:34:49 +00001461 ret = parse_ffa_manifest(&sp_fdt, vm, &boot_info_node, boot_params);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001462 if (ret != MANIFEST_SUCCESS) {
Olivier Deprez3cbdf862023-06-07 15:36:32 +02001463 dlog_error("Error parsing partition manifest.\n");
J-Alves2f86c1e2022-02-23 18:44:19 +00001464 goto out;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001465 }
1466
J-Alves2f86c1e2022-02-23 18:44:19 +00001467 if (vm->partition.load_addr != load_address) {
J-Alvesa26ea212021-03-22 14:33:47 +00001468 dlog_warning(
1469 "Partition's load address at its manifest differs"
1470 " from specified in partition's package.\n");
J-Alves2f86c1e2022-02-23 18:44:19 +00001471 vm->partition.load_addr = load_address;
J-Alvesa26ea212021-03-22 14:33:47 +00001472 }
1473
J-Alves889a1d72022-05-13 11:38:27 +01001474 if (vm->partition.gp_register_num != DEFAULT_BOOT_GP_REGISTER) {
1475 if (header.version == SP_PKG_HEADER_VERSION_2 &&
1476 vm->partition.boot_info &&
1477 !ffa_boot_info_node(&boot_info_node, pkg_start, &header)) {
1478 dlog_error("Failed to process boot information.\n");
1479 }
J-Alves35315782022-01-25 17:58:32 +00001480 }
J-Alves2f86c1e2022-02-23 18:44:19 +00001481out:
1482 sp_pkg_deinit(stage1_locked, pkg_start, &header, ppool);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001483 return ret;
1484}
1485
David Brazdil7a462ec2019-08-15 12:27:47 +01001486/**
1487 * Parse manifest from FDT.
1488 */
Olivier Deprez62d99e32020-01-09 15:58:07 +01001489enum manifest_return_code manifest_init(struct mm_stage1_locked stage1_locked,
Olivier Deprez93644652022-09-09 11:01:12 +02001490 struct manifest **manifest_ret,
Olivier Deprez62d99e32020-01-09 15:58:07 +01001491 struct memiter *manifest_fdt,
J-Alves77b6f4f2023-03-15 11:34:49 +00001492 struct boot_params *boot_params,
Olivier Deprez62d99e32020-01-09 15:58:07 +01001493 struct mpool *ppool)
David Brazdil7a462ec2019-08-15 12:27:47 +01001494{
Olivier Deprez93644652022-09-09 11:01:12 +02001495 struct manifest *manifest;
David Brazdilb856be62020-03-25 10:14:55 +00001496 struct string vm_name;
1497 struct fdt fdt;
David Brazdil7a462ec2019-08-15 12:27:47 +01001498 struct fdt_node hyp_node;
1499 size_t i = 0;
1500 bool found_primary_vm = false;
1501
J-Alvescd438fa2023-04-26 10:13:12 +01001502 if (boot_params->mem_ranges_count == 0 &&
1503 boot_params->ns_mem_ranges_count == 0) {
1504 return MANIFEST_ERROR_MEMORY_MISSING;
1505 }
1506
J-Alves77b6f4f2023-03-15 11:34:49 +00001507 dump_memory_ranges(boot_params->mem_ranges,
1508 boot_params->mem_ranges_count, false);
1509 dump_memory_ranges(boot_params->ns_mem_ranges,
1510 boot_params->ns_mem_ranges_count, true);
1511
Olivier Deprez93644652022-09-09 11:01:12 +02001512 /* Allocate space in the ppool for the manifest data. */
1513 if (!manifest_data_init(ppool)) {
1514 panic("Unable to allocate manifest data.\n");
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001515 }
1516
Olivier Deprez93644652022-09-09 11:01:12 +02001517 manifest = &manifest_data->manifest;
1518 *manifest_ret = manifest;
1519
David Brazdilb856be62020-03-25 10:14:55 +00001520 if (!fdt_init_from_memiter(&fdt, manifest_fdt)) {
1521 return MANIFEST_ERROR_FILE_SIZE; /* TODO */
David Brazdila2358d42020-01-27 18:51:38 +00001522 }
1523
David Brazdil7a462ec2019-08-15 12:27:47 +01001524 /* Find hypervisor node. */
David Brazdilb856be62020-03-25 10:14:55 +00001525 if (!fdt_find_node(&fdt, "/hypervisor", &hyp_node)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001526 return MANIFEST_ERROR_NO_HYPERVISOR_FDT_NODE;
1527 }
1528
David Brazdil74e9c3b2019-08-28 11:09:08 +01001529 /* Check "compatible" property. */
David Brazdilf4925382020-03-25 13:33:51 +00001530 if (!fdt_is_compatible(&hyp_node, "hafnium,hafnium")) {
David Brazdil74e9c3b2019-08-28 11:09:08 +01001531 return MANIFEST_ERROR_NOT_COMPATIBLE;
1532 }
1533
Olivier Deprez622ab8d2021-08-02 12:15:45 +02001534 TRY(read_bool(&hyp_node, "ffa_tee_enabled",
1535 &manifest->ffa_tee_enabled));
Andrew Walbran41a49d82020-01-10 17:46:38 +00001536
David Brazdil7a462ec2019-08-15 12:27:47 +01001537 /* Iterate over reserved VM IDs and check no such nodes exist. */
Olivier Deprez2a8ee342020-08-03 15:10:44 +02001538 for (i = HF_VM_ID_BASE; i < HF_VM_ID_OFFSET; i++) {
J-Alves19e20cf2023-08-02 12:48:55 +01001539 ffa_id_t vm_id = (ffa_id_t)i - HF_VM_ID_BASE;
David Brazdil7a462ec2019-08-15 12:27:47 +01001540 struct fdt_node vm_node = hyp_node;
David Brazdil7a462ec2019-08-15 12:27:47 +01001541
David Brazdilb856be62020-03-25 10:14:55 +00001542 generate_vm_node_name(&vm_name, vm_id);
1543 if (fdt_find_child(&vm_node, &vm_name)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001544 return MANIFEST_ERROR_RESERVED_VM_ID;
1545 }
1546 }
1547
1548 /* Iterate over VM nodes until we find one that does not exist. */
1549 for (i = 0; i <= MAX_VMS; ++i) {
J-Alves19e20cf2023-08-02 12:48:55 +01001550 ffa_id_t vm_id = HF_VM_ID_OFFSET + i;
David Brazdil7a462ec2019-08-15 12:27:47 +01001551 struct fdt_node vm_node = hyp_node;
David Brazdil7a462ec2019-08-15 12:27:47 +01001552
Olivier Deprez2a8ee342020-08-03 15:10:44 +02001553 generate_vm_node_name(&vm_name, vm_id - HF_VM_ID_BASE);
David Brazdilb856be62020-03-25 10:14:55 +00001554 if (!fdt_find_child(&vm_node, &vm_name)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001555 break;
1556 }
1557
1558 if (i == MAX_VMS) {
1559 return MANIFEST_ERROR_TOO_MANY_VMS;
1560 }
1561
1562 if (vm_id == HF_PRIMARY_VM_ID) {
1563 CHECK(found_primary_vm == false); /* sanity check */
1564 found_primary_vm = true;
1565 }
1566
David Brazdil0251b942019-09-10 15:59:50 +01001567 manifest->vm_count = i + 1;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001568
1569 TRY(parse_vm_common(&vm_node, &manifest->vm[i], vm_id));
1570
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -07001571 CHECK(!manifest->vm[i].is_hyp_loaded ||
1572 manifest->vm[i].is_ffa_partition);
1573
1574 if (manifest->vm[i].is_ffa_partition &&
1575 !manifest->vm[i].is_hyp_loaded) {
Olivier Deprez62d99e32020-01-09 15:58:07 +01001576 TRY(parse_ffa_partition_package(stage1_locked, &vm_node,
1577 &manifest->vm[i], vm_id,
J-Alves77b6f4f2023-03-15 11:34:49 +00001578 boot_params, ppool));
J-Alves596049f2023-03-15 11:40:24 +00001579 size_t page_count =
1580 align_up(manifest->vm[i].secondary.mem_size,
1581 PAGE_SIZE) /
1582 PAGE_SIZE;
1583
1584 if (vm_id == HF_PRIMARY_VM_ID) {
1585 continue;
1586 }
1587
1588 TRY(check_partition_memory_is_valid(
1589 manifest->vm[i].partition.load_addr, page_count,
1590 0, boot_params));
1591
1592 /*
1593 * Check if memory from load-address until (load-address
1594 * + memory size) has been used by other partition.
1595 */
1596 TRY(check_and_record_memory_used(
1597 manifest->vm[i].partition.load_addr,
1598 page_count));
Olivier Deprez62d99e32020-01-09 15:58:07 +01001599 } else {
1600 TRY(parse_vm(&vm_node, &manifest->vm[i], vm_id));
1601 }
David Brazdil7a462ec2019-08-15 12:27:47 +01001602 }
1603
Olivier Deprezfb05f3c2020-11-10 17:48:04 +01001604 if (!found_primary_vm && vm_id_is_current_world(HF_PRIMARY_VM_ID)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001605 return MANIFEST_ERROR_NO_PRIMARY_VM;
1606 }
1607
1608 return MANIFEST_SUCCESS;
1609}
1610
Olivier Deprez93644652022-09-09 11:01:12 +02001611/**
1612 * Free manifest data resources, called once manifest parsing has
1613 * completed and VMs are loaded.
1614 */
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001615void manifest_deinit(struct mpool *ppool)
1616{
Olivier Deprez93644652022-09-09 11:01:12 +02001617 manifest_data_deinit(ppool);
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001618}
1619
David Brazdil7a462ec2019-08-15 12:27:47 +01001620const char *manifest_strerror(enum manifest_return_code ret_code)
1621{
1622 switch (ret_code) {
1623 case MANIFEST_SUCCESS:
1624 return "Success";
David Brazdila2358d42020-01-27 18:51:38 +00001625 case MANIFEST_ERROR_FILE_SIZE:
1626 return "Total size in header does not match file size";
Olivier Deprez62d99e32020-01-09 15:58:07 +01001627 case MANIFEST_ERROR_MALFORMED_DTB:
1628 return "Malformed device tree blob";
David Brazdila2358d42020-01-27 18:51:38 +00001629 case MANIFEST_ERROR_NO_ROOT_NODE:
1630 return "Could not find root node in manifest";
David Brazdil7a462ec2019-08-15 12:27:47 +01001631 case MANIFEST_ERROR_NO_HYPERVISOR_FDT_NODE:
1632 return "Could not find \"hypervisor\" node in manifest";
David Brazdil74e9c3b2019-08-28 11:09:08 +01001633 case MANIFEST_ERROR_NOT_COMPATIBLE:
1634 return "Hypervisor manifest entry not compatible with Hafnium";
David Brazdil7a462ec2019-08-15 12:27:47 +01001635 case MANIFEST_ERROR_RESERVED_VM_ID:
1636 return "Manifest defines a VM with a reserved ID";
1637 case MANIFEST_ERROR_NO_PRIMARY_VM:
1638 return "Manifest does not contain a primary VM entry";
1639 case MANIFEST_ERROR_TOO_MANY_VMS:
1640 return "Manifest specifies more VMs than Hafnium has "
1641 "statically allocated space for";
1642 case MANIFEST_ERROR_PROPERTY_NOT_FOUND:
1643 return "Property not found";
1644 case MANIFEST_ERROR_MALFORMED_STRING:
1645 return "Malformed string property";
David Brazdil0dbb41f2019-09-09 18:03:35 +01001646 case MANIFEST_ERROR_STRING_TOO_LONG:
1647 return "String too long";
David Brazdil7a462ec2019-08-15 12:27:47 +01001648 case MANIFEST_ERROR_MALFORMED_INTEGER:
1649 return "Malformed integer property";
1650 case MANIFEST_ERROR_INTEGER_OVERFLOW:
1651 return "Integer overflow";
Andrew Scullae9962e2019-10-03 16:51:16 +01001652 case MANIFEST_ERROR_MALFORMED_INTEGER_LIST:
1653 return "Malformed integer list property";
Andrew Scullb2c3a242019-11-04 13:52:36 +00001654 case MANIFEST_ERROR_MALFORMED_BOOLEAN:
1655 return "Malformed boolean property";
Manish Pandey6542f5c2020-04-27 14:37:46 +01001656 case MANIFEST_ERROR_MEMORY_REGION_NODE_EMPTY:
1657 return "Memory-region node should have at least one entry";
Manish Pandeye68e7932020-04-23 15:29:28 +01001658 case MANIFEST_ERROR_DEVICE_REGION_NODE_EMPTY:
1659 return "Device-region node should have at least one entry";
Manish Pandeyf06c9072020-09-29 15:41:58 +01001660 case MANIFEST_ERROR_RXTX_SIZE_MISMATCH:
1661 return "RX and TX buffers should be of same size";
Varun Wadekar4afbfd72022-10-13 14:30:18 +01001662 case MANIFEST_ERROR_MEM_REGION_EMPTY:
1663 return "Memory region should have at least one page";
Karl Meakinf6d49402023-04-04 18:14:26 +01001664 case MANIFEST_ERROR_BASE_ADDRESS_AND_RELATIVE_ADDRESS:
1665 return "Base and relative addresses are mutually exclusive";
Daniel Boulbya7e9e182022-06-27 14:21:01 +01001666 case MANIFEST_ERROR_MEM_REGION_OVERLAP:
1667 return "Memory region overlaps with one already allocated";
Daniel Boulbyc1a613d2022-10-18 11:26:17 +01001668 case MANIFEST_ERROR_MEM_REGION_UNALIGNED:
1669 return "Memory region is not aligned to a page boundary";
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -07001670 case MANIFEST_ERROR_INVALID_MEM_PERM:
1671 return "Memory permission should be RO, RW or RX";
J-Alves35315782022-01-25 17:58:32 +00001672 case MANIFEST_ERROR_ARGUMENTS_LIST_EMPTY:
1673 return "Arguments-list node should have at least one argument";
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001674 case MANIFEST_ERROR_INTERRUPT_ID_REPEATED:
1675 return "Interrupt ID already assigned to another endpoint";
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001676 case MANIFEST_ERROR_ILLEGAL_NS_INT_ACTION:
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001677 return "Illegal value specidied for the field: Action in "
1678 "response to NS Interrupt";
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -07001679 case MANIFEST_ERROR_INTERRUPT_ID_NOT_IN_LIST:
1680 return "Interrupt ID is not in the list of interrupts";
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001681 case MANIFEST_ERROR_ILLEGAL_OTHER_S_INT_ACTION:
1682 return "Illegal value specified for the field: Action in "
1683 "response to Other-S Interrupt";
J-Alves77b6f4f2023-03-15 11:34:49 +00001684 case MANIFEST_ERROR_MEMORY_MISSING:
1685 return "Memory nodes must be defined in the SPMC manifest "
Olivier Deprez3cbdf862023-06-07 15:36:32 +02001686 "('memory' and 'ns-memory')";
J-Alves77b6f4f2023-03-15 11:34:49 +00001687 case MANIFEST_ERROR_PARTITION_ADDRESS_OVERLAP:
1688 return "Partition's memory [load address: load address + "
1689 "memory size[ overlap with other allocated "
Olivier Deprez3cbdf862023-06-07 15:36:32 +02001690 "regions";
J-Alves77b6f4f2023-03-15 11:34:49 +00001691 case MANIFEST_ERROR_MEM_REGION_INVALID:
Olivier Deprez3cbdf862023-06-07 15:36:32 +02001692 return "Invalid memory region range";
Kathleen Capella4a2a6e72023-04-21 14:43:26 -04001693 case MANIFEST_ERROR_INVALID_BOOT_ORDER:
1694 return "Boot order should be a unique value less than "
1695 "default largest value";
Kathleen Capella422b10b2023-06-30 18:28:27 -04001696 case MANIFEST_ERROR_UUID_ALL_ZEROS:
1697 return "UUID should not be NIL";
Madhukar Pappireddy3c2b7912023-10-11 14:47:27 -05001698 case MANIFEST_ERROR_MISSING_SMMU_ID:
1699 return "SMMU ID must be specified for the given Stream IDs";
1700 case MANIFEST_ERROR_MISMATCH_DMA_ACCESS_PERMISSIONS:
1701 return "DMA device access permissions must match memory region "
1702 "attributes";
1703 case MANIFEST_ERROR_STREAM_IDS_OVERFLOW:
1704 return "DMA device stream ID count exceeds predefined limit";
1705 case MANIFEST_ERROR_DMA_ACCESS_PERMISSIONS_OVERFLOW:
1706 return "DMA access permissions count exceeds predefined limit";
Madhukar Pappireddy8a364472023-10-11 15:34:07 -05001707 case MANIFEST_ERROR_DMA_DEVICE_OVERFLOW:
1708 return "Number of device regions with DMA peripheral exceeds "
1709 "limit.";
David Brazdil7a462ec2019-08-15 12:27:47 +01001710 }
1711
1712 panic("Unexpected manifest return code.");
1713}