blob: 628b13792bcaaf1297af954bf1c33ee4f4805566 [file] [log] [blame]
David Brazdil7a462ec2019-08-15 12:27:47 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
David Brazdil7a462ec2019-08-15 12:27:47 +01007 */
8
9#include "hf/manifest.h"
10
J-Alvesd8a1d362023-03-08 11:15:28 +000011#include <stddef.h>
J-Alves77b6f4f2023-03-15 11:34:49 +000012#include <stdint.h>
J-Alvesd8a1d362023-03-08 11:15:28 +000013
J-Alves35315782022-01-25 17:58:32 +000014#include "hf/arch/types.h"
J-Alves77b6f4f2023-03-15 11:34:49 +000015#include "hf/arch/vmid_base.h"
J-Alves35315782022-01-25 17:58:32 +000016
David Brazdil7a462ec2019-08-15 12:27:47 +010017#include "hf/addr.h"
Daniel Boulbya2f8c662021-11-26 17:52:53 +000018#include "hf/assert.h"
J-Alves35315782022-01-25 17:58:32 +000019#include "hf/boot_info.h"
J-Alves77b6f4f2023-03-15 11:34:49 +000020#include "hf/boot_params.h"
David Brazdil7a462ec2019-08-15 12:27:47 +010021#include "hf/check.h"
Andrew Scullae9962e2019-10-03 16:51:16 +010022#include "hf/dlog.h"
J-Alves77b6f4f2023-03-15 11:34:49 +000023#include "hf/fdt.h"
24#include "hf/mm.h"
25#include "hf/mpool.h"
J-Alves2f86c1e2022-02-23 18:44:19 +000026#include "hf/sp_pkg.h"
David Brazdil7a462ec2019-08-15 12:27:47 +010027#include "hf/static_assert.h"
28#include "hf/std.h"
29
30#define TRY(expr) \
31 do { \
32 enum manifest_return_code ret_code = (expr); \
33 if (ret_code != MANIFEST_SUCCESS) { \
34 return ret_code; \
35 } \
36 } while (0)
37
David Brazdilb856be62020-03-25 10:14:55 +000038#define VM_ID_MAX (HF_VM_ID_OFFSET + MAX_VMS - 1)
39#define VM_ID_MAX_DIGITS (5)
40#define VM_NAME_EXTRA_CHARS (3) /* "vm" + number + '\0' */
41#define VM_NAME_MAX_SIZE (VM_ID_MAX_DIGITS + VM_NAME_EXTRA_CHARS)
42static_assert(VM_NAME_MAX_SIZE <= STRING_MAX_SIZE,
43 "VM name does not fit into a struct string.");
44static_assert(VM_ID_MAX <= 99999, "Insufficient VM_NAME_BUF_SIZE");
Olivier Deprez2a8ee342020-08-03 15:10:44 +020045static_assert((HF_OTHER_WORLD_ID > VM_ID_MAX) ||
46 (HF_OTHER_WORLD_ID < HF_VM_ID_BASE),
Andrew Walbran9daa57e2019-09-27 13:33:20 +010047 "TrustZone VM ID clashes with normal VM range.");
David Brazdil7a462ec2019-08-15 12:27:47 +010048
Kathleen Capella4a2a6e72023-04-21 14:43:26 -040049/* Bitmap to track boot order values in use. */
50#define BOOT_ORDER_ENTRY_BITS (sizeof(uint64_t) * 8)
51#define BOOT_ORDER_MAP_ENTRIES \
52 ((DEFAULT_BOOT_ORDER + (BOOT_ORDER_ENTRY_BITS - 1)) / \
53 BOOT_ORDER_ENTRY_BITS)
54
Daniel Boulby801f8ef2022-06-27 14:21:01 +010055/**
J-Alves596049f2023-03-15 11:40:24 +000056 * A struct to keep track of the partitions properties during early boot
57 * manifest parsing:
58 * - Interrupts ID.
59 * - Physical memory ranges.
Daniel Boulby801f8ef2022-06-27 14:21:01 +010060 */
Olivier Deprez93644652022-09-09 11:01:12 +020061struct manifest_data {
62 struct manifest manifest;
Daniel Boulby4ca50f02022-07-29 18:29:34 +010063 struct interrupt_bitmap intids;
J-Alves596049f2023-03-15 11:40:24 +000064 /*
65 * Allocate enough for the maximum amount of memory regions defined via
66 * the partitions manifest, and regions for each partition
67 * address-space.
68 */
Daniel Boulby4d165722023-11-21 13:46:42 +000069 struct mem_range mem_regions[PARTITION_MAX_MEMORY_REGIONS * MAX_VMS +
70 PARTITION_MAX_DEVICE_REGIONS * MAX_VMS +
71 MAX_VMS];
Kathleen Capella4a2a6e72023-04-21 14:43:26 -040072 uint64_t boot_order_values[BOOT_ORDER_MAP_ENTRIES];
Daniel Boulby801f8ef2022-06-27 14:21:01 +010073};
Olivier Deprez93644652022-09-09 11:01:12 +020074
Daniel Boulby801f8ef2022-06-27 14:21:01 +010075/**
Daniel Boulbya7e9e182022-06-27 14:21:01 +010076 * Calculate the number of entries in the ppool that are required to
Olivier Deprez93644652022-09-09 11:01:12 +020077 * store the manifest_data struct.
Daniel Boulby801f8ef2022-06-27 14:21:01 +010078 */
J-Alves596049f2023-03-15 11:40:24 +000079static const size_t manifest_data_ppool_entries =
Olivier Deprez93644652022-09-09 11:01:12 +020080 (align_up(sizeof(struct manifest_data), MM_PPOOL_ENTRY_SIZE) /
Daniel Boulbya7e9e182022-06-27 14:21:01 +010081 MM_PPOOL_ENTRY_SIZE);
82
Olivier Deprez93644652022-09-09 11:01:12 +020083static struct manifest_data *manifest_data;
84/* Index used to track the number of memory regions allocated. */
Daniel Boulbya7e9e182022-06-27 14:21:01 +010085static size_t allocated_mem_regions_index = 0;
Daniel Boulby801f8ef2022-06-27 14:21:01 +010086
Kathleen Capella4a2a6e72023-04-21 14:43:26 -040087static bool check_boot_order(uint16_t boot_order)
88{
89 uint16_t i;
90 uint64_t boot_order_mask;
91
92 if (boot_order == DEFAULT_BOOT_ORDER) {
93 return true;
94 }
95 if (boot_order > DEFAULT_BOOT_ORDER) {
96 dlog_error("Boot order should not exceed %x",
97 DEFAULT_BOOT_ORDER);
98 return false;
99 }
100
101 i = boot_order / BOOT_ORDER_ENTRY_BITS;
102 boot_order_mask = 1 << (boot_order % BOOT_ORDER_ENTRY_BITS);
103
104 if ((boot_order_mask & manifest_data->boot_order_values[i]) != 0U) {
105 dlog_error("Boot order must be a unique value.");
106 return false;
107 }
108
109 manifest_data->boot_order_values[i] |= boot_order_mask;
110
111 return true;
112}
113
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100114/**
Olivier Deprez93644652022-09-09 11:01:12 +0200115 * Allocates and clear memory for the manifest data in the given memory pool.
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100116 * Returns true if the memory is successfully allocated.
117 */
Olivier Deprez93644652022-09-09 11:01:12 +0200118static bool manifest_data_init(struct mpool *ppool)
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100119{
Olivier Deprez93644652022-09-09 11:01:12 +0200120 manifest_data = (struct manifest_data *)mpool_alloc_contiguous(
121 ppool, manifest_data_ppool_entries, 1);
J-Alves5c0ae6f2023-06-14 15:20:21 +0100122
123 assert(manifest_data != NULL);
124
Olivier Deprez93644652022-09-09 11:01:12 +0200125 memset_s(manifest_data, sizeof(struct manifest_data), 0,
126 sizeof(struct manifest_data));
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100127
Olivier Deprez93644652022-09-09 11:01:12 +0200128 return manifest_data != NULL;
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100129}
130
131/**
Olivier Deprez93644652022-09-09 11:01:12 +0200132 * Frees the memory used for the manifest data in the given memory pool.
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100133 */
Olivier Deprez93644652022-09-09 11:01:12 +0200134static void manifest_data_deinit(struct mpool *ppool)
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100135{
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100136 /**
Olivier Deprez93644652022-09-09 11:01:12 +0200137 * Clear and return the memory used for the manifest_data struct to the
138 * memory pool.
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100139 */
Olivier Deprez93644652022-09-09 11:01:12 +0200140 memset_s(manifest_data, sizeof(struct manifest_data), 0,
141 sizeof(struct manifest_data));
142 mpool_add_chunk(ppool, manifest_data, manifest_data_ppool_entries);
143
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100144 /**
145 * Reset the index used for tracking the number of memory regions
146 * allocated.
147 */
148 allocated_mem_regions_index = 0;
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100149}
150
J-Alves19e20cf2023-08-02 12:48:55 +0100151static inline size_t count_digits(ffa_id_t vm_id)
David Brazdilb856be62020-03-25 10:14:55 +0000152{
153 size_t digits = 0;
154
155 do {
156 digits++;
157 vm_id /= 10;
158 } while (vm_id);
159 return digits;
160}
161
David Brazdil7a462ec2019-08-15 12:27:47 +0100162/**
163 * Generates a string with the two letters "vm" followed by an integer.
164 * Assumes `buf` is of size VM_NAME_BUF_SIZE.
165 */
J-Alves19e20cf2023-08-02 12:48:55 +0100166static void generate_vm_node_name(struct string *str, ffa_id_t vm_id)
David Brazdil7a462ec2019-08-15 12:27:47 +0100167{
168 static const char *digits = "0123456789";
David Brazdilb856be62020-03-25 10:14:55 +0000169 size_t vm_id_digits = count_digits(vm_id);
170 char *base = str->data;
171 char *ptr = base + (VM_NAME_EXTRA_CHARS + vm_id_digits);
David Brazdil7a462ec2019-08-15 12:27:47 +0100172
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000173 assert(vm_id_digits <= VM_ID_MAX_DIGITS);
David Brazdil7a462ec2019-08-15 12:27:47 +0100174 *(--ptr) = '\0';
175 do {
176 *(--ptr) = digits[vm_id % 10];
177 vm_id /= 10;
178 } while (vm_id);
179 *(--ptr) = 'm';
180 *(--ptr) = 'v';
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000181 assert(ptr == base);
David Brazdil7a462ec2019-08-15 12:27:47 +0100182}
183
Andrew Scullae9962e2019-10-03 16:51:16 +0100184/**
Andrew Scullb2c3a242019-11-04 13:52:36 +0000185 * Read a boolean property: true if present; false if not. If present, the value
186 * of the property must be empty else it is considered malformed.
Andrew Scullae9962e2019-10-03 16:51:16 +0100187 */
Andrew Scullb2c3a242019-11-04 13:52:36 +0000188static enum manifest_return_code read_bool(const struct fdt_node *node,
189 const char *property, bool *out)
Andrew Scullae9962e2019-10-03 16:51:16 +0100190{
David Brazdilb856be62020-03-25 10:14:55 +0000191 struct memiter data;
192 bool present = fdt_read_property(node, property, &data);
Andrew Scullae9962e2019-10-03 16:51:16 +0100193
David Brazdilb856be62020-03-25 10:14:55 +0000194 if (present && memiter_size(&data) != 0) {
Andrew Scullb2c3a242019-11-04 13:52:36 +0000195 return MANIFEST_ERROR_MALFORMED_BOOLEAN;
196 }
197
198 *out = present;
199 return MANIFEST_SUCCESS;
Andrew Scullae9962e2019-10-03 16:51:16 +0100200}
201
Andrew Scull72b43c02019-09-18 13:53:45 +0100202static enum manifest_return_code read_string(const struct fdt_node *node,
David Brazdil136f2942019-09-23 14:11:03 +0100203 const char *property,
204 struct string *out)
Andrew Scull72b43c02019-09-18 13:53:45 +0100205{
David Brazdilb856be62020-03-25 10:14:55 +0000206 struct memiter data;
Andrew Scull72b43c02019-09-18 13:53:45 +0100207
David Brazdilb856be62020-03-25 10:14:55 +0000208 if (!fdt_read_property(node, property, &data)) {
Andrew Scull72b43c02019-09-18 13:53:45 +0100209 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
210 }
211
David Brazdilb856be62020-03-25 10:14:55 +0000212 switch (string_init(out, &data)) {
David Brazdil136f2942019-09-23 14:11:03 +0100213 case STRING_SUCCESS:
214 return MANIFEST_SUCCESS;
215 case STRING_ERROR_INVALID_INPUT:
216 return MANIFEST_ERROR_MALFORMED_STRING;
217 case STRING_ERROR_TOO_LONG:
218 return MANIFEST_ERROR_STRING_TOO_LONG;
219 }
Andrew Scull72b43c02019-09-18 13:53:45 +0100220}
221
222static enum manifest_return_code read_optional_string(
David Brazdil136f2942019-09-23 14:11:03 +0100223 const struct fdt_node *node, const char *property, struct string *out)
Andrew Scull72b43c02019-09-18 13:53:45 +0100224{
David Brazdil136f2942019-09-23 14:11:03 +0100225 enum manifest_return_code ret;
Andrew Scull72b43c02019-09-18 13:53:45 +0100226
David Brazdil136f2942019-09-23 14:11:03 +0100227 ret = read_string(node, property, out);
228 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
229 string_init_empty(out);
230 ret = MANIFEST_SUCCESS;
Andrew Scull72b43c02019-09-18 13:53:45 +0100231 }
David Brazdil136f2942019-09-23 14:11:03 +0100232 return ret;
Andrew Scull72b43c02019-09-18 13:53:45 +0100233}
234
David Brazdil7a462ec2019-08-15 12:27:47 +0100235static enum manifest_return_code read_uint64(const struct fdt_node *node,
236 const char *property,
237 uint64_t *out)
238{
David Brazdilb856be62020-03-25 10:14:55 +0000239 struct memiter data;
David Brazdil7a462ec2019-08-15 12:27:47 +0100240
David Brazdilb856be62020-03-25 10:14:55 +0000241 if (!fdt_read_property(node, property, &data)) {
David Brazdil7a462ec2019-08-15 12:27:47 +0100242 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
243 }
244
David Brazdilb856be62020-03-25 10:14:55 +0000245 if (!fdt_parse_number(&data, memiter_size(&data), out)) {
David Brazdil7a462ec2019-08-15 12:27:47 +0100246 return MANIFEST_ERROR_MALFORMED_INTEGER;
247 }
248
249 return MANIFEST_SUCCESS;
250}
251
David Brazdil080ee312020-02-25 15:30:30 -0800252static enum manifest_return_code read_optional_uint64(
253 const struct fdt_node *node, const char *property,
254 uint64_t default_value, uint64_t *out)
255{
256 enum manifest_return_code ret;
257
258 ret = read_uint64(node, property, out);
259 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
260 *out = default_value;
261 return MANIFEST_SUCCESS;
262 }
263 return ret;
264}
265
Olivier Deprez62d99e32020-01-09 15:58:07 +0100266static enum manifest_return_code read_uint32(const struct fdt_node *node,
267 const char *property,
268 uint32_t *out)
269{
270 uint64_t value;
271
272 TRY(read_uint64(node, property, &value));
273
274 if (value > UINT32_MAX) {
275 return MANIFEST_ERROR_INTEGER_OVERFLOW;
276 }
277
278 *out = (uint32_t)value;
279 return MANIFEST_SUCCESS;
280}
281
Manish Pandeye68e7932020-04-23 15:29:28 +0100282static enum manifest_return_code read_optional_uint32(
283 const struct fdt_node *node, const char *property,
284 uint32_t default_value, uint32_t *out)
285{
286 enum manifest_return_code ret;
287
288 ret = read_uint32(node, property, out);
289 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
290 *out = default_value;
291 return MANIFEST_SUCCESS;
292 }
293 return ret;
294}
295
David Brazdil7a462ec2019-08-15 12:27:47 +0100296static enum manifest_return_code read_uint16(const struct fdt_node *node,
297 const char *property,
298 uint16_t *out)
299{
300 uint64_t value;
301
302 TRY(read_uint64(node, property, &value));
David Brazdil7a462ec2019-08-15 12:27:47 +0100303 if (value > UINT16_MAX) {
304 return MANIFEST_ERROR_INTEGER_OVERFLOW;
305 }
306
307 *out = (uint16_t)value;
308 return MANIFEST_SUCCESS;
309}
310
J-Alvesb37fd082020-10-22 12:29:21 +0100311static enum manifest_return_code read_optional_uint16(
312 const struct fdt_node *node, const char *property,
313 uint16_t default_value, uint16_t *out)
314{
315 enum manifest_return_code ret;
316
317 ret = read_uint16(node, property, out);
318 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
319 *out = default_value;
320 return MANIFEST_SUCCESS;
321 }
322
Kathleen Capella4a2a6e72023-04-21 14:43:26 -0400323 return ret;
J-Alvesb37fd082020-10-22 12:29:21 +0100324}
325
Olivier Deprez62d99e32020-01-09 15:58:07 +0100326static enum manifest_return_code read_uint8(const struct fdt_node *node,
327 const char *property, uint8_t *out)
328{
329 uint64_t value;
330
331 TRY(read_uint64(node, property, &value));
332
333 if (value > UINT8_MAX) {
334 return MANIFEST_ERROR_INTEGER_OVERFLOW;
335 }
336
337 *out = (uint8_t)value;
338 return MANIFEST_SUCCESS;
339}
340
J-Alves4369bd92020-08-07 16:35:36 +0100341static enum manifest_return_code read_optional_uint8(
342 const struct fdt_node *node, const char *property,
343 uint8_t default_value, uint8_t *out)
344{
345 enum manifest_return_code ret;
346
347 ret = read_uint8(node, property, out);
348 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
349 *out = default_value;
350 return MANIFEST_SUCCESS;
351 }
352
353 return MANIFEST_SUCCESS;
354}
355
Andrew Scullae9962e2019-10-03 16:51:16 +0100356struct uint32list_iter {
357 struct memiter mem_it;
358};
359
J-Alves4369bd92020-08-07 16:35:36 +0100360static enum manifest_return_code read_uint32list(const struct fdt_node *node,
361 const char *property,
362 struct uint32list_iter *out)
Andrew Scullae9962e2019-10-03 16:51:16 +0100363{
David Brazdilb856be62020-03-25 10:14:55 +0000364 struct memiter data;
Andrew Scullae9962e2019-10-03 16:51:16 +0100365
David Brazdilb856be62020-03-25 10:14:55 +0000366 if (!fdt_read_property(node, property, &data)) {
Andrew Scullae9962e2019-10-03 16:51:16 +0100367 memiter_init(&out->mem_it, NULL, 0);
J-Alves4369bd92020-08-07 16:35:36 +0100368 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
Andrew Scullae9962e2019-10-03 16:51:16 +0100369 }
370
David Brazdilb856be62020-03-25 10:14:55 +0000371 if ((memiter_size(&data) % sizeof(uint32_t)) != 0) {
Andrew Scullae9962e2019-10-03 16:51:16 +0100372 return MANIFEST_ERROR_MALFORMED_INTEGER_LIST;
373 }
374
David Brazdilb856be62020-03-25 10:14:55 +0000375 out->mem_it = data;
Andrew Scullae9962e2019-10-03 16:51:16 +0100376 return MANIFEST_SUCCESS;
377}
378
J-Alves4369bd92020-08-07 16:35:36 +0100379static enum manifest_return_code read_optional_uint32list(
380 const struct fdt_node *node, const char *property,
381 struct uint32list_iter *out)
382{
383 enum manifest_return_code ret = read_uint32list(node, property, out);
384
385 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
386 return MANIFEST_SUCCESS;
387 }
388 return ret;
389}
390
Andrew Scullae9962e2019-10-03 16:51:16 +0100391static bool uint32list_has_next(const struct uint32list_iter *list)
392{
393 return memiter_size(&list->mem_it) > 0;
394}
395
David Brazdil5ea99462020-03-25 13:01:47 +0000396static enum manifest_return_code uint32list_get_next(
397 struct uint32list_iter *list, uint32_t *out)
Andrew Scullae9962e2019-10-03 16:51:16 +0100398{
Andrew Scullae9962e2019-10-03 16:51:16 +0100399 uint64_t num;
400
401 CHECK(uint32list_has_next(list));
David Brazdilb856be62020-03-25 10:14:55 +0000402 if (!fdt_parse_number(&list->mem_it, sizeof(uint32_t), &num)) {
Andrew Scullae9962e2019-10-03 16:51:16 +0100403 return MANIFEST_ERROR_MALFORMED_INTEGER;
404 }
405
David Brazdil5ea99462020-03-25 13:01:47 +0000406 *out = (uint32_t)num;
407 return MANIFEST_SUCCESS;
Andrew Scullae9962e2019-10-03 16:51:16 +0100408}
409
Olivier Deprez62d99e32020-01-09 15:58:07 +0100410static enum manifest_return_code parse_vm_common(const struct fdt_node *node,
411 struct manifest_vm *vm,
J-Alves19e20cf2023-08-02 12:48:55 +0100412 ffa_id_t vm_id)
David Brazdil7a462ec2019-08-15 12:27:47 +0100413{
Andrew Scullae9962e2019-10-03 16:51:16 +0100414 struct uint32list_iter smcs;
David Brazdil5ea99462020-03-25 13:01:47 +0000415 size_t idx;
Andrew Scullae9962e2019-10-03 16:51:16 +0100416
Olivier Deprez62d99e32020-01-09 15:58:07 +0100417 TRY(read_bool(node, "is_ffa_partition", &vm->is_ffa_partition));
418
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -0700419 TRY(read_bool(node, "hyp_loaded", &vm->is_hyp_loaded));
420
David Brazdil136f2942019-09-23 14:11:03 +0100421 TRY(read_string(node, "debug_name", &vm->debug_name));
Andrew Scullae9962e2019-10-03 16:51:16 +0100422
423 TRY(read_optional_uint32list(node, "smc_whitelist", &smcs));
424 while (uint32list_has_next(&smcs) &&
425 vm->smc_whitelist.smc_count < MAX_SMCS) {
David Brazdil5ea99462020-03-25 13:01:47 +0000426 idx = vm->smc_whitelist.smc_count++;
427 TRY(uint32list_get_next(&smcs, &vm->smc_whitelist.smcs[idx]));
Andrew Scullae9962e2019-10-03 16:51:16 +0100428 }
429
430 if (uint32list_has_next(&smcs)) {
Karl Meakine8937d92024-03-19 16:04:25 +0000431 dlog_warning("%s SMC whitelist too long.\n",
432 vm->debug_name.data);
Andrew Scullae9962e2019-10-03 16:51:16 +0100433 }
434
Andrew Scullb2c3a242019-11-04 13:52:36 +0000435 TRY(read_bool(node, "smc_whitelist_permissive",
436 &vm->smc_whitelist.permissive));
Andrew Scullae9962e2019-10-03 16:51:16 +0100437
Olivier Deprez62d99e32020-01-09 15:58:07 +0100438 if (vm_id != HF_PRIMARY_VM_ID) {
439 TRY(read_uint64(node, "mem_size", &vm->secondary.mem_size));
440 TRY(read_uint16(node, "vcpu_count", &vm->secondary.vcpu_count));
Fuad Tabba50469e02020-06-30 15:14:28 +0100441 TRY(read_optional_string(node, "fdt_filename",
442 &vm->secondary.fdt_filename));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100443 }
444
445 return MANIFEST_SUCCESS;
446}
447
448static enum manifest_return_code parse_vm(struct fdt_node *node,
449 struct manifest_vm *vm,
J-Alves19e20cf2023-08-02 12:48:55 +0100450 ffa_id_t vm_id)
Olivier Deprez62d99e32020-01-09 15:58:07 +0100451{
452 TRY(read_optional_string(node, "kernel_filename",
453 &vm->kernel_filename));
454
David Brazdile6f83222019-09-23 14:47:37 +0100455 if (vm_id == HF_PRIMARY_VM_ID) {
456 TRY(read_optional_string(node, "ramdisk_filename",
457 &vm->primary.ramdisk_filename));
David Brazdil080ee312020-02-25 15:30:30 -0800458 TRY(read_optional_uint64(node, "boot_address",
459 MANIFEST_INVALID_ADDRESS,
460 &vm->primary.boot_address));
David Brazdil7a462ec2019-08-15 12:27:47 +0100461 }
Raghu Krishnamurthy988a5e72021-02-27 21:46:06 -0800462 TRY(read_optional_uint8(node, "exception-level", (uint8_t)EL1,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700463 (uint8_t *)&vm->partition.run_time_el));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100464
David Brazdil7a462ec2019-08-15 12:27:47 +0100465 return MANIFEST_SUCCESS;
466}
467
J-Alves77b6f4f2023-03-15 11:34:49 +0000468static bool is_memory_region_within_ranges(uintptr_t base_address,
469 uint32_t page_count,
470 const struct mem_range *ranges,
471 const size_t ranges_size)
472{
473 uintptr_t region_end =
474 base_address + ((uintptr_t)page_count * PAGE_SIZE - 1);
475
476 for (size_t i = 0; i < ranges_size; i++) {
477 uintptr_t base = (uintptr_t)pa_addr(ranges[i].begin);
478 uintptr_t end = (uintptr_t)pa_addr(ranges[i].end);
479
480 if ((base_address >= base && base_address <= end) ||
481 (region_end >= base && region_end <= end)) {
482 return true;
483 }
484 }
485
486 return false;
487}
488
489void dump_memory_ranges(const struct mem_range *ranges,
490 const size_t ranges_size, bool ns)
491{
492 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
493 return;
494 }
495
496 dlog("%s Memory ranges:\n", ns ? "NS" : "S");
497
498 for (size_t i = 0; i < ranges_size; i++) {
499 uintptr_t begin = pa_addr(ranges[i].begin);
500 uintptr_t end = pa_addr(ranges[i].end);
501 size_t page_count =
502 align_up(pa_difference(ranges[i].begin, ranges[i].end),
503 PAGE_SIZE) /
504 PAGE_SIZE;
505
Karl Meakine8937d92024-03-19 16:04:25 +0000506 dlog(" [%lx - %lx (%zu pages)]\n", begin, end, page_count);
J-Alves77b6f4f2023-03-15 11:34:49 +0000507 }
508}
509
510/**
511 * Check the partition's assigned memory is contained in the memory ranges
512 * configured for the SWd, in the SPMC's manifest.
513 */
514static enum manifest_return_code check_partition_memory_is_valid(
515 uintptr_t base_address, uint32_t page_count, uint32_t attributes,
516 const struct boot_params *params)
517{
518 bool is_secure_region =
519 (attributes & MANIFEST_REGION_ATTR_SECURITY) == 0U;
Daniel Boulby4339edc2024-02-21 14:59:00 +0000520 bool is_device_region =
521 (attributes & MANIFEST_REGION_ATTR_MEMORY_TYPE_DEVICE) != 0U;
522 const struct mem_range *ranges_from_manifest;
523 size_t ranges_count;
524 bool within_ranges;
525 enum manifest_return_code error_return;
526
527 if (!is_device_region) {
528 ranges_from_manifest = is_secure_region ? params->mem_ranges
529 : params->ns_mem_ranges;
530 ranges_count = is_secure_region ? params->mem_ranges_count
531 : params->ns_mem_ranges_count;
532 error_return = MANIFEST_ERROR_MEM_REGION_INVALID;
533 } else {
534 ranges_from_manifest = is_secure_region
535 ? params->device_mem_ranges
536 : params->ns_device_mem_ranges;
537 ranges_count = is_secure_region
538 ? params->device_mem_ranges_count
539 : params->ns_device_mem_ranges_count;
540 error_return = MANIFEST_ERROR_DEVICE_MEM_REGION_INVALID;
541 }
542
543 within_ranges = is_memory_region_within_ranges(
J-Alves77b6f4f2023-03-15 11:34:49 +0000544 base_address, page_count, ranges_from_manifest, ranges_count);
545
Daniel Boulby4339edc2024-02-21 14:59:00 +0000546 return within_ranges ? MANIFEST_SUCCESS : error_return;
J-Alves77b6f4f2023-03-15 11:34:49 +0000547}
548
549/*
550 * Keep track of the memory allocated by partitions. This includes memory region
Daniel Boulby4d165722023-11-21 13:46:42 +0000551 * nodes and device region nodes defined in their respective partition
552 * manifests, as well address space defined from their load address.
J-Alves77b6f4f2023-03-15 11:34:49 +0000553 */
554static enum manifest_return_code check_and_record_memory_used(
Varun Wadekar4afbfd72022-10-13 14:30:18 +0100555 uintptr_t base_address, uint32_t page_count)
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100556{
J-Alves77b6f4f2023-03-15 11:34:49 +0000557 bool overlap_of_regions;
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100558
Varun Wadekar4afbfd72022-10-13 14:30:18 +0100559 if (page_count == 0U) {
560 dlog_error(
Karl Meakine8937d92024-03-19 16:04:25 +0000561 "Empty memory region defined with base address: "
562 "%#lx.\n",
Varun Wadekar4afbfd72022-10-13 14:30:18 +0100563 base_address);
564 return MANIFEST_ERROR_MEM_REGION_EMPTY;
565 }
566
Daniel Boulbyc1a613d2022-10-18 11:26:17 +0100567 if (!is_aligned(base_address, PAGE_SIZE)) {
Karl Meakine8937d92024-03-19 16:04:25 +0000568 dlog_error("base_address (%#lx) is not aligned to page size.\n",
Daniel Boulbyc1a613d2022-10-18 11:26:17 +0100569 base_address);
570 return MANIFEST_ERROR_MEM_REGION_UNALIGNED;
571 }
572
J-Alves77b6f4f2023-03-15 11:34:49 +0000573 overlap_of_regions = is_memory_region_within_ranges(
574 base_address, page_count, manifest_data->mem_regions,
575 allocated_mem_regions_index);
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100576
J-Alves77b6f4f2023-03-15 11:34:49 +0000577 if (!overlap_of_regions) {
578 paddr_t begin = pa_init(base_address);
579
580 manifest_data->mem_regions[allocated_mem_regions_index].begin =
581 begin;
582 manifest_data->mem_regions[allocated_mem_regions_index].end =
583 pa_add(begin, page_count * PAGE_SIZE - 1);
584 allocated_mem_regions_index++;
585
586 return MANIFEST_SUCCESS;
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100587 }
588
J-Alves77b6f4f2023-03-15 11:34:49 +0000589 return MANIFEST_ERROR_MEM_REGION_OVERLAP;
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100590}
591
Manish Pandey6542f5c2020-04-27 14:37:46 +0100592static enum manifest_return_code parse_ffa_memory_region_node(
Karl Meakinf6d49402023-04-04 18:14:26 +0100593 struct fdt_node *mem_node, uintptr_t load_address,
594 struct memory_region *mem_regions, uint16_t *count, struct rx_tx *rxtx,
J-Alves77b6f4f2023-03-15 11:34:49 +0000595 const struct boot_params *boot_params)
Manish Pandey6542f5c2020-04-27 14:37:46 +0100596{
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100597 uint32_t phandle;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700598 uint16_t i = 0;
Madhukar Pappireddy3c2b7912023-10-11 14:47:27 -0500599 uint32_t j = 0;
Karl Meakinf6d49402023-04-04 18:14:26 +0100600 uintptr_t relative_address;
Madhukar Pappireddy3c2b7912023-10-11 14:47:27 -0500601 struct uint32list_iter list;
Manish Pandey6542f5c2020-04-27 14:37:46 +0100602
603 dlog_verbose(" Partition memory regions\n");
604
605 if (!fdt_is_compatible(mem_node, "arm,ffa-manifest-memory-regions")) {
606 return MANIFEST_ERROR_NOT_COMPATIBLE;
607 }
608
609 if (!fdt_first_child(mem_node)) {
610 return MANIFEST_ERROR_MEMORY_REGION_NODE_EMPTY;
611 }
612
613 do {
614 dlog_verbose(" Memory Region[%u]\n", i);
615
616 TRY(read_optional_string(mem_node, "description",
617 &mem_regions[i].name));
618 dlog_verbose(" Name: %s\n",
619 string_data(&mem_regions[i].name));
620
Karl Meakinf6d49402023-04-04 18:14:26 +0100621 TRY(read_optional_uint64(mem_node, "base-address",
622 MANIFEST_INVALID_ADDRESS,
623 &mem_regions[i].base_address));
Karl Meakine8937d92024-03-19 16:04:25 +0000624 dlog_verbose(" Base address: %#lx\n",
Manish Pandey6542f5c2020-04-27 14:37:46 +0100625 mem_regions[i].base_address);
626
Karl Meakinf6d49402023-04-04 18:14:26 +0100627 TRY(read_optional_uint64(mem_node, "relative-address",
628 MANIFEST_INVALID_ADDRESS,
629 &relative_address));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200630 if (relative_address != MANIFEST_INVALID_ADDRESS) {
Karl Meakine8937d92024-03-19 16:04:25 +0000631 dlog_verbose(" Relative address: %#lx\n",
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200632 relative_address);
633 }
Karl Meakinf6d49402023-04-04 18:14:26 +0100634
635 if (mem_regions[i].base_address == MANIFEST_INVALID_ADDRESS &&
636 relative_address == MANIFEST_INVALID_ADDRESS) {
637 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
638 }
639
640 if (mem_regions[i].base_address != MANIFEST_INVALID_ADDRESS &&
641 relative_address != MANIFEST_INVALID_ADDRESS) {
642 return MANIFEST_ERROR_BASE_ADDRESS_AND_RELATIVE_ADDRESS;
643 }
644
645 if (relative_address != MANIFEST_INVALID_ADDRESS &&
646 relative_address > UINT64_MAX - load_address) {
647 return MANIFEST_ERROR_INTEGER_OVERFLOW;
648 }
649
650 if (relative_address != MANIFEST_INVALID_ADDRESS) {
651 mem_regions[i].base_address =
652 load_address + relative_address;
653 }
654
Manish Pandey6542f5c2020-04-27 14:37:46 +0100655 TRY(read_uint32(mem_node, "pages-count",
656 &mem_regions[i].page_count));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200657 dlog_verbose(" Pages_count: %u\n",
Manish Pandey6542f5c2020-04-27 14:37:46 +0100658 mem_regions[i].page_count);
659
660 TRY(read_uint32(mem_node, "attributes",
661 &mem_regions[i].attributes));
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -0700662
Olivier Deprez035fa152022-03-14 11:19:10 +0100663 /*
664 * Check RWX permission attributes.
665 * Security attribute is checked at load phase.
666 */
667 uint32_t permissions = mem_regions[i].attributes &
668 (MANIFEST_REGION_ATTR_READ |
669 MANIFEST_REGION_ATTR_WRITE |
670 MANIFEST_REGION_ATTR_EXEC);
671 if (permissions != MANIFEST_REGION_ATTR_READ &&
672 permissions != (MANIFEST_REGION_ATTR_READ |
673 MANIFEST_REGION_ATTR_WRITE) &&
674 permissions != (MANIFEST_REGION_ATTR_READ |
675 MANIFEST_REGION_ATTR_EXEC)) {
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -0700676 return MANIFEST_ERROR_INVALID_MEM_PERM;
677 }
678
Olivier Deprez035fa152022-03-14 11:19:10 +0100679 /* Filter memory region attributes. */
680 mem_regions[i].attributes &= MANIFEST_REGION_ALL_ATTR_MASK;
681
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200682 dlog_verbose(" Attributes: %#x\n",
Manish Pandey6542f5c2020-04-27 14:37:46 +0100683 mem_regions[i].attributes);
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100684
J-Alves77b6f4f2023-03-15 11:34:49 +0000685 TRY(check_partition_memory_is_valid(
686 mem_regions[i].base_address, mem_regions[i].page_count,
687 mem_regions[i].attributes, boot_params));
688
689 TRY(check_and_record_memory_used(mem_regions[i].base_address,
690 mem_regions[i].page_count));
691
Madhukar Pappireddy3c2b7912023-10-11 14:47:27 -0500692 TRY(read_optional_uint32(mem_node, "smmu-id",
693 MANIFEST_INVALID_ID,
694 &mem_regions[i].dma_prop.smmu_id));
695 if (mem_regions[i].dma_prop.smmu_id != MANIFEST_INVALID_ID) {
696 dlog_verbose(" smmu-id: %u\n",
697 mem_regions[i].dma_prop.smmu_id);
698 }
699
700 TRY(read_optional_uint32list(mem_node, "stream-ids", &list));
701 dlog_verbose(" Stream IDs assigned:\n");
702
703 j = 0;
704 while (uint32list_has_next(&list)) {
705 if (j == PARTITION_MAX_STREAMS_PER_DEVICE) {
706 return MANIFEST_ERROR_STREAM_IDS_OVERFLOW;
707 }
708
709 TRY(uint32list_get_next(
710 &list, &mem_regions[i].dma_prop.stream_ids[j]));
711 dlog_verbose(" %u\n",
712 mem_regions[i].dma_prop.stream_ids[j]);
713 j++;
714 }
715 if (j == 0) {
716 dlog_verbose(" None\n");
717 } else if (mem_regions[i].dma_prop.smmu_id ==
718 MANIFEST_INVALID_ID) {
719 /*
720 * SMMU ID must be specified if the partition specifies
721 * Stream IDs for any device upstream of SMMU.
722 */
723 return MANIFEST_ERROR_MISSING_SMMU_ID;
724 }
725
726 mem_regions[i].dma_prop.stream_count = j;
727
728 TRY(read_optional_uint32list(
729 mem_node, "stream-ids-access-permissions", &list));
730 dlog_verbose(" Access permissions of Stream IDs:\n");
731
732 j = 0;
733 while (uint32list_has_next(&list)) {
734 uint32_t permissions;
735
736 if (j == PARTITION_MAX_STREAMS_PER_DEVICE) {
737 return MANIFEST_ERROR_DMA_ACCESS_PERMISSIONS_OVERFLOW;
738 }
739
740 TRY(uint32list_get_next(&list, &permissions));
741 dlog_verbose(" %u\n", permissions);
742
743 if (j == 0) {
744 mem_regions[i].dma_prop.dma_access_permissions =
745 permissions;
746 }
747
748 /*
749 * All stream ids belonging to a dma device must specify
750 * the same access permissions.
751 */
752 if (permissions !=
753 mem_regions[i].dma_prop.dma_access_permissions) {
754 return MANIFEST_ERROR_MISMATCH_DMA_ACCESS_PERMISSIONS;
755 }
756
757 j++;
758 }
759
760 if (j == 0) {
761 dlog_verbose(" None\n");
762 } else if (j != mem_regions[i].dma_prop.stream_count) {
763 return MANIFEST_ERROR_MISMATCH_DMA_ACCESS_PERMISSIONS;
764 }
765
766 if (j > 0) {
767 /* Filter the dma access permissions. */
768 mem_regions[i].dma_prop.dma_access_permissions &=
769 MANIFEST_REGION_ALL_ATTR_MASK;
770 }
771
Manish Pandeya70a4192020-10-07 22:05:04 +0100772 if (rxtx->available) {
773 TRY(read_optional_uint32(
774 mem_node, "phandle",
775 (uint32_t)MANIFEST_INVALID_ADDRESS, &phandle));
776 if (phandle == rxtx->rx_phandle) {
777 dlog_verbose(" Assigned as RX buffer\n");
778 rxtx->rx_buffer = &mem_regions[i];
779 } else if (phandle == rxtx->tx_phandle) {
780 dlog_verbose(" Assigned as TX buffer\n");
781 rxtx->tx_buffer = &mem_regions[i];
782 }
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100783 }
784
Manish Pandey6542f5c2020-04-27 14:37:46 +0100785 i++;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700786 } while (fdt_next_sibling(mem_node) &&
787 (i < PARTITION_MAX_MEMORY_REGIONS));
Manish Pandey6542f5c2020-04-27 14:37:46 +0100788
Manish Pandeya70a4192020-10-07 22:05:04 +0100789 if (rxtx->available &&
790 (rxtx->rx_buffer->page_count != rxtx->tx_buffer->page_count)) {
Manish Pandeyf06c9072020-09-29 15:41:58 +0100791 return MANIFEST_ERROR_RXTX_SIZE_MISMATCH;
792 }
793
Manish Pandey2145c212020-05-01 16:04:22 +0100794 *count = i;
Manish Pandey6542f5c2020-04-27 14:37:46 +0100795
796 return MANIFEST_SUCCESS;
797}
798
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -0700799static struct interrupt_info *device_region_get_interrupt_info(
800 struct device_region *dev_regions, uint32_t intid)
801{
802 for (uint32_t i = 0; i < ARRAY_SIZE(dev_regions->interrupts); i++) {
803 if (dev_regions->interrupts[i].id == intid) {
804 return &(dev_regions->interrupts[i]);
805 }
806 }
807 return NULL;
808}
809
Manish Pandeye68e7932020-04-23 15:29:28 +0100810static enum manifest_return_code parse_ffa_device_region_node(
Manish Pandey2145c212020-05-01 16:04:22 +0100811 struct fdt_node *dev_node, struct device_region *dev_regions,
Daniel Boulby4339edc2024-02-21 14:59:00 +0000812 uint16_t *count, uint8_t *dma_device_count,
813 const struct boot_params *boot_params)
Manish Pandeye68e7932020-04-23 15:29:28 +0100814{
815 struct uint32list_iter list;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700816 uint16_t i = 0;
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500817 uint32_t j = 0;
Olivier Deprez93644652022-09-09 11:01:12 +0200818 struct interrupt_bitmap allocated_intids = manifest_data->intids;
Madhukar Pappireddye032af52023-10-11 14:52:58 -0500819 static uint8_t dma_device_id = 0;
Manish Pandeye68e7932020-04-23 15:29:28 +0100820
821 dlog_verbose(" Partition Device Regions\n");
822
823 if (!fdt_is_compatible(dev_node, "arm,ffa-manifest-device-regions")) {
824 return MANIFEST_ERROR_NOT_COMPATIBLE;
825 }
826
827 if (!fdt_first_child(dev_node)) {
828 return MANIFEST_ERROR_DEVICE_REGION_NODE_EMPTY;
829 }
830
Madhukar Pappireddye032af52023-10-11 14:52:58 -0500831 *dma_device_count = 0;
832
Manish Pandeye68e7932020-04-23 15:29:28 +0100833 do {
834 dlog_verbose(" Device Region[%u]\n", i);
835
836 TRY(read_optional_string(dev_node, "description",
837 &dev_regions[i].name));
838 dlog_verbose(" Name: %s\n",
839 string_data(&dev_regions[i].name));
840
841 TRY(read_uint64(dev_node, "base-address",
842 &dev_regions[i].base_address));
Karl Meakine8937d92024-03-19 16:04:25 +0000843 dlog_verbose(" Base address: %#lx\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100844 dev_regions[i].base_address);
845
846 TRY(read_uint32(dev_node, "pages-count",
847 &dev_regions[i].page_count));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200848 dlog_verbose(" Pages_count: %u\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100849 dev_regions[i].page_count);
850
Daniel Boulby4d165722023-11-21 13:46:42 +0000851 TRY(check_and_record_memory_used(dev_regions[i].base_address,
852 dev_regions[i].page_count));
853
Manish Pandeye68e7932020-04-23 15:29:28 +0100854 TRY(read_uint32(dev_node, "attributes",
855 &dev_regions[i].attributes));
Daniel Boulby4339edc2024-02-21 14:59:00 +0000856 /* Set the memory type attribute to device. */
857 dev_regions[i].attributes =
858 dev_regions[i].attributes |
859 MANIFEST_REGION_ATTR_MEMORY_TYPE_DEVICE;
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -0700860
Olivier Deprez035fa152022-03-14 11:19:10 +0100861 /*
862 * Check RWX permission attributes.
863 * Security attribute is checked at load phase.
864 */
865 uint32_t permissions = dev_regions[i].attributes &
866 (MANIFEST_REGION_ATTR_READ |
867 MANIFEST_REGION_ATTR_WRITE |
868 MANIFEST_REGION_ATTR_EXEC);
869
870 if (permissions != MANIFEST_REGION_ATTR_READ &&
871 permissions != (MANIFEST_REGION_ATTR_READ |
872 MANIFEST_REGION_ATTR_WRITE)) {
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -0700873 return MANIFEST_ERROR_INVALID_MEM_PERM;
874 }
875
Daniel Boulby4339edc2024-02-21 14:59:00 +0000876 /* Filter device region attributes. */
Olivier Deprez035fa152022-03-14 11:19:10 +0100877 dev_regions[i].attributes = dev_regions[i].attributes &
878 MANIFEST_REGION_ALL_ATTR_MASK;
879
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200880 dlog_verbose(" Attributes: %#x\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100881 dev_regions[i].attributes);
882
Daniel Boulby4339edc2024-02-21 14:59:00 +0000883 TRY(check_partition_memory_is_valid(
884 dev_regions[i].base_address, dev_regions[i].page_count,
885 dev_regions[i].attributes, boot_params));
886
Manish Pandeye68e7932020-04-23 15:29:28 +0100887 TRY(read_optional_uint32list(dev_node, "interrupts", &list));
888 dlog_verbose(" Interrupt List:\n");
889 j = 0;
890 while (uint32list_has_next(&list) &&
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700891 j < PARTITION_MAX_INTERRUPTS_PER_DEVICE) {
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100892 uint32_t intid;
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100893
Manish Pandeye68e7932020-04-23 15:29:28 +0100894 TRY(uint32list_get_next(
895 &list, &dev_regions[i].interrupts[j].id));
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100896 intid = dev_regions[i].interrupts[j].id;
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100897
898 dlog_verbose(" ID = %u\n", intid);
899
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100900 if (interrupt_bitmap_get_value(&allocated_intids,
901 intid) == 1U) {
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100902 return MANIFEST_ERROR_INTERRUPT_ID_REPEATED;
903 }
904
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100905 interrupt_bitmap_set_value(&allocated_intids, intid);
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100906
Manish Pandeye68e7932020-04-23 15:29:28 +0100907 if (uint32list_has_next(&list)) {
908 TRY(uint32list_get_next(&list,
909 &dev_regions[i]
910 .interrupts[j]
911 .attributes));
912 } else {
913 return MANIFEST_ERROR_MALFORMED_INTEGER_LIST;
914 }
915
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -0700916 dev_regions[i].interrupts[j].mpidr_valid = false;
917 dev_regions[i].interrupts[j].mpidr = 0;
918
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100919 dlog_verbose(" attributes = %u\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100920 dev_regions[i].interrupts[j].attributes);
921 j++;
922 }
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500923
924 dev_regions[i].interrupt_count = j;
Manish Pandeye68e7932020-04-23 15:29:28 +0100925 if (j == 0) {
926 dlog_verbose(" Empty\n");
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -0700927 } else {
928 TRY(read_optional_uint32list(
929 dev_node, "interrupts-target", &list));
930 dlog_verbose(" Interrupt Target List:\n");
931
932 while (uint32list_has_next(&list)) {
933 uint32_t intid;
934 uint64_t mpidr = 0;
935 uint32_t mpidr_lower = 0;
936 uint32_t mpidr_upper = 0;
937 struct interrupt_info *info = NULL;
938
939 TRY(uint32list_get_next(&list, &intid));
940
941 dlog_verbose(" ID = %u\n", intid);
942
943 if (interrupt_bitmap_get_value(
944 &allocated_intids, intid) != 1U) {
945 return MANIFEST_ERROR_INTERRUPT_ID_NOT_IN_LIST;
946 }
947
948 TRY(uint32list_get_next(&list, &mpidr_upper));
949 TRY(uint32list_get_next(&list, &mpidr_lower));
950 mpidr = mpidr_upper;
951 mpidr <<= 32;
952 mpidr |= mpidr_lower;
953
954 info = device_region_get_interrupt_info(
955 &dev_regions[i], intid);
956 /*
957 * We should find info since
958 * interrupt_bitmap_get_value already ensures
959 * that we saw the interrupt and allocated ids
960 * for it.
961 */
962 assert(info != NULL);
963 info->mpidr = mpidr;
964 info->mpidr_valid = true;
Karl Meakine8937d92024-03-19 16:04:25 +0000965 dlog_verbose(" MPIDR = %#lx\n", mpidr);
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -0700966 }
Manish Pandeye68e7932020-04-23 15:29:28 +0100967 }
968
969 TRY(read_optional_uint32(dev_node, "smmu-id",
Madhukar Pappireddy54680c72020-10-23 15:02:38 -0500970 MANIFEST_INVALID_ID,
Manish Pandeye68e7932020-04-23 15:29:28 +0100971 &dev_regions[i].smmu_id));
Olivier Deprez86d87ae2021-08-19 14:27:46 +0200972 if (dev_regions[i].smmu_id != MANIFEST_INVALID_ID) {
973 dlog_verbose(" smmu-id: %u\n",
974 dev_regions[i].smmu_id);
975 }
Manish Pandeye68e7932020-04-23 15:29:28 +0100976
977 TRY(read_optional_uint32list(dev_node, "stream-ids", &list));
978 dlog_verbose(" Stream IDs assigned:\n");
979
980 j = 0;
Madhukar Pappireddye032af52023-10-11 14:52:58 -0500981 while (uint32list_has_next(&list)) {
982 if (j == PARTITION_MAX_STREAMS_PER_DEVICE) {
983 return MANIFEST_ERROR_STREAM_IDS_OVERFLOW;
984 }
985
Manish Pandeye68e7932020-04-23 15:29:28 +0100986 TRY(uint32list_get_next(&list,
987 &dev_regions[i].stream_ids[j]));
988 dlog_verbose(" %u\n",
989 dev_regions[i].stream_ids[j]);
990 j++;
991 }
Madhukar Pappireddye032af52023-10-11 14:52:58 -0500992
Manish Pandeye68e7932020-04-23 15:29:28 +0100993 if (j == 0) {
994 dlog_verbose(" None\n");
Madhukar Pappireddye032af52023-10-11 14:52:58 -0500995 } else if (dev_regions[i].smmu_id != MANIFEST_INVALID_ID) {
996 dev_regions[i].dma_device_id = dma_device_id++;
997 *dma_device_count = dma_device_id;
998
Madhukar Pappireddy8a364472023-10-11 15:34:07 -0500999 if (*dma_device_count > PARTITION_MAX_DMA_DEVICES) {
1000 return MANIFEST_ERROR_DMA_DEVICE_OVERFLOW;
1001 }
1002
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001003 dlog_verbose(" dma peripheral device id: %u\n",
1004 dev_regions[i].dma_device_id);
1005 } else {
1006 /*
1007 * SMMU ID must be specified if the partition specifies
1008 * Stream IDs for any device upstream of SMMU.
1009 */
1010 return MANIFEST_ERROR_MISSING_SMMU_ID;
Manish Pandeye68e7932020-04-23 15:29:28 +01001011 }
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001012
Madhukar Pappireddy54680c72020-10-23 15:02:38 -05001013 dev_regions[i].stream_count = j;
Manish Pandeye68e7932020-04-23 15:29:28 +01001014
1015 TRY(read_bool(dev_node, "exclusive-access",
1016 &dev_regions[i].exclusive_access));
J-Alves4369bd92020-08-07 16:35:36 +01001017 dlog_verbose(" Exclusive_access: %u\n",
Manish Pandeye68e7932020-04-23 15:29:28 +01001018 dev_regions[i].exclusive_access);
1019
1020 i++;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -07001021 } while (fdt_next_sibling(dev_node) &&
1022 (i < PARTITION_MAX_DEVICE_REGIONS));
Manish Pandeye68e7932020-04-23 15:29:28 +01001023
Manish Pandey2145c212020-05-01 16:04:22 +01001024 *count = i;
Manish Pandeye68e7932020-04-23 15:29:28 +01001025
1026 return MANIFEST_SUCCESS;
1027}
1028
J-Alvesabebe432022-05-31 14:40:50 +01001029static enum manifest_return_code sanity_check_ffa_manifest(
1030 struct manifest_vm *vm)
1031{
1032 uint16_t ffa_version_major;
1033 uint16_t ffa_version_minor;
1034 enum manifest_return_code ret_code = MANIFEST_SUCCESS;
1035 const char *error_string = "specified in manifest is unsupported";
1036 uint32_t k = 0;
Kathleen Capellaf71dee42023-08-08 16:24:14 -04001037 bool using_req2 = (vm->partition.messaging_method &
1038 (FFA_PARTITION_DIRECT_REQ2_RECV |
1039 FFA_PARTITION_DIRECT_REQ2_SEND)) != 0;
J-Alvesabebe432022-05-31 14:40:50 +01001040
1041 /* ensure that the SPM version is compatible */
1042 ffa_version_major = (vm->partition.ffa_version & 0xffff0000) >>
1043 FFA_VERSION_MAJOR_OFFSET;
1044 ffa_version_minor = vm->partition.ffa_version & 0xffff;
1045
1046 if (ffa_version_major != FFA_VERSION_MAJOR ||
1047 ffa_version_minor > FFA_VERSION_MINOR) {
1048 dlog_error("FF-A partition manifest version %s: %u.%u\n",
1049 error_string, ffa_version_major, ffa_version_minor);
1050 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1051 }
1052
1053 if (vm->partition.xlat_granule != PAGE_4KB) {
1054 dlog_error("Translation granule %s: %u\n", error_string,
1055 vm->partition.xlat_granule);
1056 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1057 }
1058
1059 if (vm->partition.execution_state != AARCH64) {
1060 dlog_error("Execution state %s: %u\n", error_string,
1061 vm->partition.execution_state);
1062 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1063 }
1064
1065 if (vm->partition.run_time_el != EL1 &&
1066 vm->partition.run_time_el != S_EL1 &&
Daniel Boulby874d5432023-04-27 12:40:24 +01001067 vm->partition.run_time_el != S_EL0 &&
1068 vm->partition.run_time_el != EL0) {
J-Alvesabebe432022-05-31 14:40:50 +01001069 dlog_error("Exception level %s: %d\n", error_string,
1070 vm->partition.run_time_el);
1071 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1072 }
1073
Kathleen Capellaf71dee42023-08-08 16:24:14 -04001074 if (vm->partition.ffa_version < MAKE_FFA_VERSION(1, 2) && using_req2) {
1075 dlog_error("Messaging method %s: %x\n", error_string,
1076 vm->partition.messaging_method);
1077 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1078 }
1079
J-Alvesabebe432022-05-31 14:40:50 +01001080 if ((vm->partition.messaging_method &
1081 ~(FFA_PARTITION_DIRECT_REQ_RECV | FFA_PARTITION_DIRECT_REQ_SEND |
Kathleen Capellaf71dee42023-08-08 16:24:14 -04001082 FFA_PARTITION_INDIRECT_MSG | FFA_PARTITION_DIRECT_REQ2_RECV |
1083 FFA_PARTITION_DIRECT_REQ2_SEND)) != 0U) {
J-Alvesabebe432022-05-31 14:40:50 +01001084 dlog_error("Messaging method %s: %x\n", error_string,
1085 vm->partition.messaging_method);
1086 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1087 }
1088
Daniel Boulby874d5432023-04-27 12:40:24 +01001089 if ((vm->partition.run_time_el == S_EL0 ||
1090 vm->partition.run_time_el == EL0) &&
J-Alvesabebe432022-05-31 14:40:50 +01001091 vm->partition.execution_ctx_count != 1) {
1092 dlog_error(
1093 "Exception level and execution context count %s: %d "
1094 "%d\n",
1095 error_string, vm->partition.run_time_el,
1096 vm->partition.execution_ctx_count);
1097 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1098 }
1099
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -07001100 for (uint16_t i = 0; i < vm->partition.dev_region_count; i++) {
J-Alvesabebe432022-05-31 14:40:50 +01001101 struct device_region dev_region;
1102
1103 dev_region = vm->partition.dev_regions[i];
1104
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -07001105 if (dev_region.interrupt_count >
1106 PARTITION_MAX_INTERRUPTS_PER_DEVICE) {
J-Alvesabebe432022-05-31 14:40:50 +01001107 dlog_error(
1108 "Interrupt count for device region exceeds "
1109 "limit.\n");
1110 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1111 continue;
1112 }
1113
1114 for (uint8_t j = 0; j < dev_region.interrupt_count; j++) {
1115 k++;
1116 if (k > VM_MANIFEST_MAX_INTERRUPTS) {
1117 dlog_error(
1118 "Interrupt count for VM exceeds "
1119 "limit.\n");
1120 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1121 continue;
1122 }
1123 }
1124 }
1125
1126 /* GP register is restricted to one of x0 - x3. */
Karl Meakin824b63d2024-06-03 19:04:53 +01001127 if (vm->partition.gp_register_num != DEFAULT_BOOT_GP_REGISTER &&
J-Alvesabebe432022-05-31 14:40:50 +01001128 vm->partition.gp_register_num > 3) {
1129 dlog_error("GP register number %s: %u\n", error_string,
1130 vm->partition.gp_register_num);
1131 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1132 }
1133
1134 return ret_code;
1135}
1136
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001137/**
1138 * Find the device id allocated to the device region node corresponding to the
1139 * specified stream id.
1140 */
1141static bool find_dma_device_id_from_dev_region_nodes(
1142 const struct manifest_vm *manifest_vm, uint32_t sid, uint8_t *device_id)
1143{
1144 for (uint16_t i = 0; i < manifest_vm->partition.dev_region_count; i++) {
1145 struct device_region dev_region =
1146 manifest_vm->partition.dev_regions[i];
1147
1148 for (uint8_t j = 0; j < dev_region.stream_count; j++) {
1149 if (sid == dev_region.stream_ids[j]) {
1150 *device_id = dev_region.dma_device_id;
1151 return true;
1152 }
1153 }
1154 }
1155 return false;
1156}
1157
1158/**
1159 * Identify the device id of a DMA device node corresponding to a stream id
1160 * specified in the memory region node.
1161 */
1162static bool map_dma_device_id_to_stream_ids(struct manifest_vm *vm)
1163{
1164 for (uint16_t i = 0; i < vm->partition.mem_region_count; i++) {
1165 struct memory_region mem_region = vm->partition.mem_regions[i];
1166
1167 for (uint8_t j = 0; j < mem_region.dma_prop.stream_count; j++) {
1168 uint32_t sid = mem_region.dma_prop.stream_ids[j];
1169 uint8_t device_id = 0;
1170
1171 /*
1172 * Every stream id must have been declared in the
1173 * device node as well.
1174 */
1175 if (!find_dma_device_id_from_dev_region_nodes(
1176 vm, sid, &device_id)) {
1177 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00001178 "Stream ID %d not found in any device "
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001179 "region node of partition manifest\n",
1180 sid);
1181 return false;
1182 }
1183
1184 mem_region.dma_prop.dma_device_id = device_id;
1185 }
1186 }
1187
1188 return true;
1189}
1190
J-Alves77b6f4f2023-03-15 11:34:49 +00001191enum manifest_return_code parse_ffa_manifest(
1192 struct fdt *fdt, struct manifest_vm *vm,
1193 struct fdt_node *boot_info_node, const struct boot_params *boot_params)
Olivier Deprez62d99e32020-01-09 15:58:07 +01001194{
1195 unsigned int i = 0;
Kathleen Capella422b10b2023-06-30 18:28:27 -04001196 unsigned int j = 0;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001197 struct uint32list_iter uuid;
1198 uint32_t uuid_word;
1199 struct fdt_node root;
1200 struct fdt_node ffa_node;
1201 struct string rxtx_node_name = STRING_INIT("rx_tx-info");
Manish Pandey6542f5c2020-04-27 14:37:46 +01001202 struct string mem_region_node_name = STRING_INIT("memory-regions");
Manish Pandeye68e7932020-04-23 15:29:28 +01001203 struct string dev_region_node_name = STRING_INIT("device-regions");
J-Alves35315782022-01-25 17:58:32 +00001204 struct string boot_info_node_name = STRING_INIT("boot-info");
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001205 bool managed_exit_field_present = false;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001206
1207 if (!fdt_find_node(fdt, "/", &root)) {
1208 return MANIFEST_ERROR_NO_ROOT_NODE;
1209 }
1210
1211 /* Check "compatible" property. */
1212 if (!fdt_is_compatible(&root, "arm,ffa-manifest-1.0")) {
1213 return MANIFEST_ERROR_NOT_COMPATIBLE;
1214 }
1215
J-Alves4369bd92020-08-07 16:35:36 +01001216 TRY(read_uint32list(&root, "uuid", &uuid));
Olivier Deprez62d99e32020-01-09 15:58:07 +01001217
Kathleen Capella422b10b2023-06-30 18:28:27 -04001218 while (uint32list_has_next(&uuid) && j < PARTITION_MAX_UUIDS) {
1219 while (uint32list_has_next(&uuid) && i < 4) {
1220 TRY(uint32list_get_next(&uuid, &uuid_word));
1221 vm->partition.uuids[j].uuid[i] = uuid_word;
1222 i++;
1223 }
1224
1225 if (ffa_uuid_is_null(&vm->partition.uuids[j])) {
1226 return MANIFEST_ERROR_UUID_ALL_ZEROS;
1227 }
1228 dlog_verbose(" UUID %#x-%x-%x-%x\n",
1229 vm->partition.uuids[j].uuid[0],
1230 vm->partition.uuids[j].uuid[1],
1231 vm->partition.uuids[j].uuid[2],
1232 vm->partition.uuids[j].uuid[3]);
1233 j++;
1234 i = 0;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001235 }
Kathleen Capella422b10b2023-06-30 18:28:27 -04001236
1237 vm->partition.uuid_count = j;
1238 dlog_verbose(" Number of UUIDs %u\n", vm->partition.uuid_count);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001239
Olivier Deprezb7aa0c22023-06-29 10:46:26 +02001240 TRY(read_uint32(&root, "ffa-version", &vm->partition.ffa_version));
1241 dlog_verbose(" Expected FF-A version %u.%u\n",
1242 vm->partition.ffa_version >> 16,
1243 vm->partition.ffa_version & 0xffff);
1244
Olivier Deprez62d99e32020-01-09 15:58:07 +01001245 TRY(read_uint16(&root, "execution-ctx-count",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001246 &vm->partition.execution_ctx_count));
J-Alves4369bd92020-08-07 16:35:36 +01001247 dlog_verbose(" Number of execution context %u\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001248 vm->partition.execution_ctx_count);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001249
1250 TRY(read_uint8(&root, "exception-level",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001251 (uint8_t *)&vm->partition.run_time_el));
1252 dlog_verbose(" Run-time EL %u\n", vm->partition.run_time_el);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001253
1254 TRY(read_uint8(&root, "execution-state",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001255 (uint8_t *)&vm->partition.execution_state));
1256 dlog_verbose(" Execution state %u\n", vm->partition.execution_state);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001257
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001258 TRY(read_optional_uint64(&root, "load-address", 0,
1259 &vm->partition.load_addr));
Karl Meakine8937d92024-03-19 16:04:25 +00001260 dlog_verbose(" Load address %#lx\n", vm->partition.load_addr);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001261
J-Alves4369bd92020-08-07 16:35:36 +01001262 TRY(read_optional_uint64(&root, "entrypoint-offset", 0,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001263 &vm->partition.ep_offset));
Karl Meakine8937d92024-03-19 16:04:25 +00001264 dlog_verbose(" Entry point offset %#zx\n", vm->partition.ep_offset);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001265
J-Alves35315782022-01-25 17:58:32 +00001266 TRY(read_optional_uint32(&root, "gp-register-num",
1267 DEFAULT_BOOT_GP_REGISTER,
1268 &vm->partition.gp_register_num));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +02001269
1270 if (vm->partition.gp_register_num != DEFAULT_BOOT_GP_REGISTER) {
1271 dlog_verbose(" Boot GP register: x%u\n",
1272 vm->partition.gp_register_num);
1273 }
J-Alves35315782022-01-25 17:58:32 +00001274
J-Alvesb37fd082020-10-22 12:29:21 +01001275 TRY(read_optional_uint16(&root, "boot-order", DEFAULT_BOOT_ORDER,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001276 &vm->partition.boot_order));
Kathleen Capella4a2a6e72023-04-21 14:43:26 -04001277 if (vm->partition.boot_order != DEFAULT_BOOT_ORDER) {
Karl Meakine8937d92024-03-19 16:04:25 +00001278 dlog_verbose(" Boot order %u\n", vm->partition.boot_order);
Kathleen Capella4a2a6e72023-04-21 14:43:26 -04001279 }
1280
1281 if (!check_boot_order(vm->partition.boot_order)) {
1282 return MANIFEST_ERROR_INVALID_BOOT_ORDER;
1283 }
J-Alvesb37fd082020-10-22 12:29:21 +01001284
J-Alves4369bd92020-08-07 16:35:36 +01001285 TRY(read_optional_uint8(&root, "xlat-granule", 0,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001286 (uint8_t *)&vm->partition.xlat_granule));
1287 dlog_verbose(" Translation granule %u\n", vm->partition.xlat_granule);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001288
1289 ffa_node = root;
1290 if (fdt_find_child(&ffa_node, &rxtx_node_name)) {
1291 if (!fdt_is_compatible(&ffa_node,
1292 "arm,ffa-manifest-rx_tx-buffer")) {
1293 return MANIFEST_ERROR_NOT_COMPATIBLE;
1294 }
1295
Manish Pandeyfa1f2912020-05-05 12:57:01 +01001296 /*
1297 * Read only phandles for now, it will be used to update buffers
1298 * while parsing memory regions.
1299 */
1300 TRY(read_uint32(&ffa_node, "rx-buffer",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001301 &vm->partition.rxtx.rx_phandle));
Olivier Deprez62d99e32020-01-09 15:58:07 +01001302
Manish Pandeyfa1f2912020-05-05 12:57:01 +01001303 TRY(read_uint32(&ffa_node, "tx-buffer",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001304 &vm->partition.rxtx.tx_phandle));
Olivier Deprez62d99e32020-01-09 15:58:07 +01001305
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001306 vm->partition.rxtx.available = true;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001307 }
1308
Kathleen Capellaf71dee42023-08-08 16:24:14 -04001309 TRY(read_uint16(&root, "messaging-method",
1310 (uint16_t *)&vm->partition.messaging_method));
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001311 dlog_verbose(" Messaging method %u\n", vm->partition.messaging_method);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001312
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001313 TRY(read_bool(&root, "managed-exit", &managed_exit_field_present));
1314
1315 TRY(read_optional_uint8(
1316 &root, "ns-interrupts-action", NS_ACTION_SIGNALED,
1317 (uint8_t *)&vm->partition.ns_interrupts_action));
1318
1319 /*
1320 * An SP manifest can specify one of the fields listed below:
1321 * `managed-exit`: Introduced in FF-A v1.0 spec.
1322 * `ns-interrupts-action`: Introduced in FF-A v1.1 EAC0 spec.
1323 * If both are missing from the manifest, the default response is
1324 * NS_ACTION_SIGNALED.
1325 */
1326 if (managed_exit_field_present) {
1327 vm->partition.ns_interrupts_action = NS_ACTION_ME;
1328 }
1329
1330 if (vm->partition.ns_interrupts_action != NS_ACTION_QUEUED &&
1331 vm->partition.ns_interrupts_action != NS_ACTION_ME &&
1332 vm->partition.ns_interrupts_action != NS_ACTION_SIGNALED) {
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001333 return MANIFEST_ERROR_ILLEGAL_NS_INT_ACTION;
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001334 }
1335
1336 dlog_verbose(
Olivier Deprezb7aa0c22023-06-29 10:46:26 +02001337 " NS Interrupts %s\n",
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001338 (vm->partition.ns_interrupts_action == NS_ACTION_QUEUED)
1339 ? "Queued"
1340 : (vm->partition.ns_interrupts_action == NS_ACTION_SIGNALED)
1341 ? "Signaled"
1342 : "Managed exit");
1343
1344 if (vm->partition.ns_interrupts_action == NS_ACTION_ME) {
1345 /* Managed exit only supported by S_EL1 partitions. */
1346 if (vm->partition.run_time_el != S_EL1) {
1347 dlog_error(
1348 "Managed exit cannot be supported by this "
1349 "partition\n");
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001350 return MANIFEST_ERROR_ILLEGAL_NS_INT_ACTION;
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001351 }
Madhukar Pappireddy046dad02022-06-21 18:43:33 -05001352
1353 TRY(read_bool(&root, "managed-exit-virq",
1354 &vm->partition.me_signal_virq));
1355 if (vm->partition.me_signal_virq) {
1356 dlog_verbose(" Managed Exit signaled through vIRQ\n");
1357 }
J-Alvesa4730db2021-11-02 10:31:01 +00001358 }
1359
1360 TRY(read_bool(&root, "notification-support",
1361 &vm->partition.notification_support));
1362 if (vm->partition.notification_support) {
1363 dlog_verbose(" Notifications Receipt Supported\n");
1364 }
Maksims Svecovs9ddf86a2021-05-06 17:17:21 +01001365
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001366 TRY(read_optional_uint8(
1367 &root, "other-s-interrupts-action", OTHER_S_INT_ACTION_SIGNALED,
1368 (uint8_t *)&vm->partition.other_s_interrupts_action));
1369
1370 if (vm->partition.other_s_interrupts_action ==
1371 OTHER_S_INT_ACTION_QUEUED) {
1372 if (vm->partition.ns_interrupts_action != NS_ACTION_QUEUED) {
1373 dlog_error(
1374 "Choice of the fields 'ns-interrupts-action' "
1375 "and 'other-s-interrupts-action' not "
1376 "compatible\n");
1377 return MANIFEST_ERROR_NOT_COMPATIBLE;
1378 }
1379 } else if (vm->partition.other_s_interrupts_action >
1380 OTHER_S_INT_ACTION_SIGNALED) {
1381 dlog_error(
1382 "Illegal value specified for the field"
1383 " 'other-s-interrupts-action': %u\n",
1384 vm->partition.other_s_interrupts_action);
1385 return MANIFEST_ERROR_ILLEGAL_OTHER_S_INT_ACTION;
1386 }
1387
J-Alves35315782022-01-25 17:58:32 +00001388 /* Parse boot info node. */
1389 if (boot_info_node != NULL) {
1390 ffa_node = root;
1391 vm->partition.boot_info =
1392 fdt_find_child(&ffa_node, &boot_info_node_name);
1393 if (vm->partition.boot_info) {
1394 *boot_info_node = ffa_node;
1395 }
1396 } else {
1397 vm->partition.boot_info = false;
1398 }
1399
Olivier Depreza15f2352022-09-26 09:17:24 +02001400 TRY(read_optional_uint32(
1401 &root, "power-management-messages",
1402 MANIFEST_POWER_MANAGEMENT_CPU_OFF_SUPPORTED |
1403 MANIFEST_POWER_MANAGEMENT_CPU_ON_SUPPORTED,
1404 &vm->partition.power_management));
1405 vm->partition.power_management &= MANIFEST_POWER_MANAGEMENT_ALL_MASK;
1406 if (vm->partition.execution_ctx_count == 1 ||
Daniel Boulby874d5432023-04-27 12:40:24 +01001407 vm->partition.run_time_el == S_EL0 ||
1408 vm->partition.run_time_el == EL0) {
Olivier Depreza15f2352022-09-26 09:17:24 +02001409 vm->partition.power_management =
1410 MANIFEST_POWER_MANAGEMENT_NONE_MASK;
1411 }
1412
1413 dlog_verbose(" Power management messages %#x\n",
1414 vm->partition.power_management);
1415
Manish Pandey6542f5c2020-04-27 14:37:46 +01001416 /* Parse memory-regions */
1417 ffa_node = root;
1418 if (fdt_find_child(&ffa_node, &mem_region_node_name)) {
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001419 TRY(parse_ffa_memory_region_node(
Karl Meakinf6d49402023-04-04 18:14:26 +01001420 &ffa_node, vm->partition.load_addr,
1421 vm->partition.mem_regions,
J-Alves77b6f4f2023-03-15 11:34:49 +00001422 &vm->partition.mem_region_count, &vm->partition.rxtx,
1423 boot_params));
Manish Pandey6542f5c2020-04-27 14:37:46 +01001424 }
Manish Pandey2145c212020-05-01 16:04:22 +01001425 dlog_verbose(" Total %u memory regions found\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001426 vm->partition.mem_region_count);
Manish Pandey6542f5c2020-04-27 14:37:46 +01001427
Manish Pandeye68e7932020-04-23 15:29:28 +01001428 /* Parse Device-regions */
1429 ffa_node = root;
1430 if (fdt_find_child(&ffa_node, &dev_region_node_name)) {
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001431 TRY(parse_ffa_device_region_node(
1432 &ffa_node, vm->partition.dev_regions,
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001433 &vm->partition.dev_region_count,
Daniel Boulby4339edc2024-02-21 14:59:00 +00001434 &vm->partition.dma_device_count, boot_params));
Manish Pandeye68e7932020-04-23 15:29:28 +01001435 }
Manish Pandey2145c212020-05-01 16:04:22 +01001436 dlog_verbose(" Total %u device regions found\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001437 vm->partition.dev_region_count);
Manish Pandeye68e7932020-04-23 15:29:28 +01001438
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001439 if (!map_dma_device_id_to_stream_ids(vm)) {
1440 return MANIFEST_ERROR_NOT_COMPATIBLE;
1441 }
1442
J-Alves4eb7b542022-03-02 15:21:52 +00001443 return sanity_check_ffa_manifest(vm);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001444}
1445
Olivier Deprez62d99e32020-01-09 15:58:07 +01001446static enum manifest_return_code parse_ffa_partition_package(
1447 struct mm_stage1_locked stage1_locked, struct fdt_node *node,
J-Alves19e20cf2023-08-02 12:48:55 +01001448 struct manifest_vm *vm, ffa_id_t vm_id,
J-Alves77b6f4f2023-03-15 11:34:49 +00001449 const struct boot_params *boot_params, struct mpool *ppool)
Olivier Deprez62d99e32020-01-09 15:58:07 +01001450{
1451 enum manifest_return_code ret = MANIFEST_ERROR_NOT_COMPATIBLE;
J-Alves2f86c1e2022-02-23 18:44:19 +00001452 uintpaddr_t load_address;
1453 struct sp_pkg_header header;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001454 struct fdt sp_fdt;
J-Alves2f86c1e2022-02-23 18:44:19 +00001455 vaddr_t pkg_start;
1456 vaddr_t manifest_address;
J-Alves35315782022-01-25 17:58:32 +00001457 struct fdt_node boot_info_node;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001458
1459 /*
1460 * This must have been hinted as being an FF-A partition,
1461 * return straight with failure if this is not the case.
1462 */
1463 if (!vm->is_ffa_partition) {
J-Alves2f86c1e2022-02-23 18:44:19 +00001464 return ret;
1465 }
1466
1467 TRY(read_uint64(node, "load_address", &load_address));
1468 if (!is_aligned(load_address, PAGE_SIZE)) {
Olivier Deprez62d99e32020-01-09 15:58:07 +01001469 return MANIFEST_ERROR_NOT_COMPATIBLE;
1470 }
1471
J-Alves2f86c1e2022-02-23 18:44:19 +00001472 assert(load_address != 0U);
1473
1474 if (!sp_pkg_init(stage1_locked, pa_init(load_address), &header,
1475 ppool)) {
1476 return ret;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001477 }
1478
J-Alves2f86c1e2022-02-23 18:44:19 +00001479 pkg_start = va_init(load_address);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001480
J-Alves2f86c1e2022-02-23 18:44:19 +00001481 if (vm_id != HF_PRIMARY_VM_ID &&
1482 sp_pkg_get_mem_size(&header) >= vm->secondary.mem_size) {
J-Alves4369bd92020-08-07 16:35:36 +01001483 dlog_error("Invalid package header or DT size.\n");
J-Alves2f86c1e2022-02-23 18:44:19 +00001484 goto out;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001485 }
1486
J-Alves2f86c1e2022-02-23 18:44:19 +00001487 manifest_address = va_add(va_init(load_address), header.pm_offset);
1488 if (!fdt_init_from_ptr(&sp_fdt, ptr_from_va(manifest_address),
1489 header.pm_size)) {
Kathleen Capella422b10b2023-06-30 18:28:27 -04001490 dlog_error("manifest.c: FDT failed validation.\n");
J-Alves2f86c1e2022-02-23 18:44:19 +00001491 goto out;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001492 }
1493
J-Alves77b6f4f2023-03-15 11:34:49 +00001494 ret = parse_ffa_manifest(&sp_fdt, vm, &boot_info_node, boot_params);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001495 if (ret != MANIFEST_SUCCESS) {
Olivier Deprez3cbdf862023-06-07 15:36:32 +02001496 dlog_error("Error parsing partition manifest.\n");
J-Alves2f86c1e2022-02-23 18:44:19 +00001497 goto out;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001498 }
1499
J-Alves2f86c1e2022-02-23 18:44:19 +00001500 if (vm->partition.load_addr != load_address) {
J-Alvesa26ea212021-03-22 14:33:47 +00001501 dlog_warning(
1502 "Partition's load address at its manifest differs"
1503 " from specified in partition's package.\n");
J-Alves2f86c1e2022-02-23 18:44:19 +00001504 vm->partition.load_addr = load_address;
J-Alvesa26ea212021-03-22 14:33:47 +00001505 }
1506
J-Alves889a1d72022-05-13 11:38:27 +01001507 if (vm->partition.gp_register_num != DEFAULT_BOOT_GP_REGISTER) {
1508 if (header.version == SP_PKG_HEADER_VERSION_2 &&
1509 vm->partition.boot_info &&
1510 !ffa_boot_info_node(&boot_info_node, pkg_start, &header)) {
1511 dlog_error("Failed to process boot information.\n");
1512 }
J-Alves35315782022-01-25 17:58:32 +00001513 }
J-Alves2f86c1e2022-02-23 18:44:19 +00001514out:
1515 sp_pkg_deinit(stage1_locked, pkg_start, &header, ppool);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001516 return ret;
1517}
1518
David Brazdil7a462ec2019-08-15 12:27:47 +01001519/**
1520 * Parse manifest from FDT.
1521 */
Olivier Deprez62d99e32020-01-09 15:58:07 +01001522enum manifest_return_code manifest_init(struct mm_stage1_locked stage1_locked,
Olivier Deprez93644652022-09-09 11:01:12 +02001523 struct manifest **manifest_ret,
Olivier Deprez62d99e32020-01-09 15:58:07 +01001524 struct memiter *manifest_fdt,
J-Alves77b6f4f2023-03-15 11:34:49 +00001525 struct boot_params *boot_params,
Olivier Deprez62d99e32020-01-09 15:58:07 +01001526 struct mpool *ppool)
David Brazdil7a462ec2019-08-15 12:27:47 +01001527{
Olivier Deprez93644652022-09-09 11:01:12 +02001528 struct manifest *manifest;
David Brazdilb856be62020-03-25 10:14:55 +00001529 struct string vm_name;
1530 struct fdt fdt;
David Brazdil7a462ec2019-08-15 12:27:47 +01001531 struct fdt_node hyp_node;
1532 size_t i = 0;
1533 bool found_primary_vm = false;
1534
J-Alvescd438fa2023-04-26 10:13:12 +01001535 if (boot_params->mem_ranges_count == 0 &&
1536 boot_params->ns_mem_ranges_count == 0) {
1537 return MANIFEST_ERROR_MEMORY_MISSING;
1538 }
1539
J-Alves77b6f4f2023-03-15 11:34:49 +00001540 dump_memory_ranges(boot_params->mem_ranges,
1541 boot_params->mem_ranges_count, false);
1542 dump_memory_ranges(boot_params->ns_mem_ranges,
1543 boot_params->ns_mem_ranges_count, true);
1544
Olivier Deprez93644652022-09-09 11:01:12 +02001545 /* Allocate space in the ppool for the manifest data. */
1546 if (!manifest_data_init(ppool)) {
1547 panic("Unable to allocate manifest data.\n");
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001548 }
1549
Olivier Deprez93644652022-09-09 11:01:12 +02001550 manifest = &manifest_data->manifest;
1551 *manifest_ret = manifest;
1552
David Brazdilb856be62020-03-25 10:14:55 +00001553 if (!fdt_init_from_memiter(&fdt, manifest_fdt)) {
1554 return MANIFEST_ERROR_FILE_SIZE; /* TODO */
David Brazdila2358d42020-01-27 18:51:38 +00001555 }
1556
David Brazdil7a462ec2019-08-15 12:27:47 +01001557 /* Find hypervisor node. */
David Brazdilb856be62020-03-25 10:14:55 +00001558 if (!fdt_find_node(&fdt, "/hypervisor", &hyp_node)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001559 return MANIFEST_ERROR_NO_HYPERVISOR_FDT_NODE;
1560 }
1561
David Brazdil74e9c3b2019-08-28 11:09:08 +01001562 /* Check "compatible" property. */
David Brazdilf4925382020-03-25 13:33:51 +00001563 if (!fdt_is_compatible(&hyp_node, "hafnium,hafnium")) {
David Brazdil74e9c3b2019-08-28 11:09:08 +01001564 return MANIFEST_ERROR_NOT_COMPATIBLE;
1565 }
1566
Olivier Deprez622ab8d2021-08-02 12:15:45 +02001567 TRY(read_bool(&hyp_node, "ffa_tee_enabled",
1568 &manifest->ffa_tee_enabled));
Andrew Walbran41a49d82020-01-10 17:46:38 +00001569
David Brazdil7a462ec2019-08-15 12:27:47 +01001570 /* Iterate over reserved VM IDs and check no such nodes exist. */
Olivier Deprez2a8ee342020-08-03 15:10:44 +02001571 for (i = HF_VM_ID_BASE; i < HF_VM_ID_OFFSET; i++) {
J-Alves19e20cf2023-08-02 12:48:55 +01001572 ffa_id_t vm_id = (ffa_id_t)i - HF_VM_ID_BASE;
David Brazdil7a462ec2019-08-15 12:27:47 +01001573 struct fdt_node vm_node = hyp_node;
David Brazdil7a462ec2019-08-15 12:27:47 +01001574
David Brazdilb856be62020-03-25 10:14:55 +00001575 generate_vm_node_name(&vm_name, vm_id);
1576 if (fdt_find_child(&vm_node, &vm_name)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001577 return MANIFEST_ERROR_RESERVED_VM_ID;
1578 }
1579 }
1580
1581 /* Iterate over VM nodes until we find one that does not exist. */
1582 for (i = 0; i <= MAX_VMS; ++i) {
J-Alves19e20cf2023-08-02 12:48:55 +01001583 ffa_id_t vm_id = HF_VM_ID_OFFSET + i;
David Brazdil7a462ec2019-08-15 12:27:47 +01001584 struct fdt_node vm_node = hyp_node;
David Brazdil7a462ec2019-08-15 12:27:47 +01001585
Olivier Deprez2a8ee342020-08-03 15:10:44 +02001586 generate_vm_node_name(&vm_name, vm_id - HF_VM_ID_BASE);
David Brazdilb856be62020-03-25 10:14:55 +00001587 if (!fdt_find_child(&vm_node, &vm_name)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001588 break;
1589 }
1590
1591 if (i == MAX_VMS) {
1592 return MANIFEST_ERROR_TOO_MANY_VMS;
1593 }
1594
1595 if (vm_id == HF_PRIMARY_VM_ID) {
1596 CHECK(found_primary_vm == false); /* sanity check */
1597 found_primary_vm = true;
1598 }
1599
David Brazdil0251b942019-09-10 15:59:50 +01001600 manifest->vm_count = i + 1;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001601
1602 TRY(parse_vm_common(&vm_node, &manifest->vm[i], vm_id));
1603
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -07001604 CHECK(!manifest->vm[i].is_hyp_loaded ||
1605 manifest->vm[i].is_ffa_partition);
1606
1607 if (manifest->vm[i].is_ffa_partition &&
1608 !manifest->vm[i].is_hyp_loaded) {
Olivier Deprez62d99e32020-01-09 15:58:07 +01001609 TRY(parse_ffa_partition_package(stage1_locked, &vm_node,
1610 &manifest->vm[i], vm_id,
J-Alves77b6f4f2023-03-15 11:34:49 +00001611 boot_params, ppool));
J-Alves596049f2023-03-15 11:40:24 +00001612 size_t page_count =
1613 align_up(manifest->vm[i].secondary.mem_size,
1614 PAGE_SIZE) /
1615 PAGE_SIZE;
1616
1617 if (vm_id == HF_PRIMARY_VM_ID) {
1618 continue;
1619 }
1620
1621 TRY(check_partition_memory_is_valid(
1622 manifest->vm[i].partition.load_addr, page_count,
1623 0, boot_params));
1624
1625 /*
1626 * Check if memory from load-address until (load-address
1627 * + memory size) has been used by other partition.
1628 */
1629 TRY(check_and_record_memory_used(
1630 manifest->vm[i].partition.load_addr,
1631 page_count));
Olivier Deprez62d99e32020-01-09 15:58:07 +01001632 } else {
1633 TRY(parse_vm(&vm_node, &manifest->vm[i], vm_id));
1634 }
David Brazdil7a462ec2019-08-15 12:27:47 +01001635 }
1636
Olivier Deprezfb05f3c2020-11-10 17:48:04 +01001637 if (!found_primary_vm && vm_id_is_current_world(HF_PRIMARY_VM_ID)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001638 return MANIFEST_ERROR_NO_PRIMARY_VM;
1639 }
1640
1641 return MANIFEST_SUCCESS;
1642}
1643
Olivier Deprez93644652022-09-09 11:01:12 +02001644/**
1645 * Free manifest data resources, called once manifest parsing has
1646 * completed and VMs are loaded.
1647 */
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001648void manifest_deinit(struct mpool *ppool)
1649{
Olivier Deprez93644652022-09-09 11:01:12 +02001650 manifest_data_deinit(ppool);
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001651}
1652
David Brazdil7a462ec2019-08-15 12:27:47 +01001653const char *manifest_strerror(enum manifest_return_code ret_code)
1654{
1655 switch (ret_code) {
1656 case MANIFEST_SUCCESS:
1657 return "Success";
David Brazdila2358d42020-01-27 18:51:38 +00001658 case MANIFEST_ERROR_FILE_SIZE:
1659 return "Total size in header does not match file size";
Olivier Deprez62d99e32020-01-09 15:58:07 +01001660 case MANIFEST_ERROR_MALFORMED_DTB:
1661 return "Malformed device tree blob";
David Brazdila2358d42020-01-27 18:51:38 +00001662 case MANIFEST_ERROR_NO_ROOT_NODE:
1663 return "Could not find root node in manifest";
David Brazdil7a462ec2019-08-15 12:27:47 +01001664 case MANIFEST_ERROR_NO_HYPERVISOR_FDT_NODE:
1665 return "Could not find \"hypervisor\" node in manifest";
David Brazdil74e9c3b2019-08-28 11:09:08 +01001666 case MANIFEST_ERROR_NOT_COMPATIBLE:
1667 return "Hypervisor manifest entry not compatible with Hafnium";
David Brazdil7a462ec2019-08-15 12:27:47 +01001668 case MANIFEST_ERROR_RESERVED_VM_ID:
1669 return "Manifest defines a VM with a reserved ID";
1670 case MANIFEST_ERROR_NO_PRIMARY_VM:
1671 return "Manifest does not contain a primary VM entry";
1672 case MANIFEST_ERROR_TOO_MANY_VMS:
1673 return "Manifest specifies more VMs than Hafnium has "
1674 "statically allocated space for";
1675 case MANIFEST_ERROR_PROPERTY_NOT_FOUND:
1676 return "Property not found";
1677 case MANIFEST_ERROR_MALFORMED_STRING:
1678 return "Malformed string property";
David Brazdil0dbb41f2019-09-09 18:03:35 +01001679 case MANIFEST_ERROR_STRING_TOO_LONG:
1680 return "String too long";
David Brazdil7a462ec2019-08-15 12:27:47 +01001681 case MANIFEST_ERROR_MALFORMED_INTEGER:
1682 return "Malformed integer property";
1683 case MANIFEST_ERROR_INTEGER_OVERFLOW:
1684 return "Integer overflow";
Andrew Scullae9962e2019-10-03 16:51:16 +01001685 case MANIFEST_ERROR_MALFORMED_INTEGER_LIST:
1686 return "Malformed integer list property";
Andrew Scullb2c3a242019-11-04 13:52:36 +00001687 case MANIFEST_ERROR_MALFORMED_BOOLEAN:
1688 return "Malformed boolean property";
Manish Pandey6542f5c2020-04-27 14:37:46 +01001689 case MANIFEST_ERROR_MEMORY_REGION_NODE_EMPTY:
1690 return "Memory-region node should have at least one entry";
Manish Pandeye68e7932020-04-23 15:29:28 +01001691 case MANIFEST_ERROR_DEVICE_REGION_NODE_EMPTY:
1692 return "Device-region node should have at least one entry";
Manish Pandeyf06c9072020-09-29 15:41:58 +01001693 case MANIFEST_ERROR_RXTX_SIZE_MISMATCH:
1694 return "RX and TX buffers should be of same size";
Varun Wadekar4afbfd72022-10-13 14:30:18 +01001695 case MANIFEST_ERROR_MEM_REGION_EMPTY:
1696 return "Memory region should have at least one page";
Karl Meakinf6d49402023-04-04 18:14:26 +01001697 case MANIFEST_ERROR_BASE_ADDRESS_AND_RELATIVE_ADDRESS:
1698 return "Base and relative addresses are mutually exclusive";
Daniel Boulbya7e9e182022-06-27 14:21:01 +01001699 case MANIFEST_ERROR_MEM_REGION_OVERLAP:
1700 return "Memory region overlaps with one already allocated";
Daniel Boulbyc1a613d2022-10-18 11:26:17 +01001701 case MANIFEST_ERROR_MEM_REGION_UNALIGNED:
1702 return "Memory region is not aligned to a page boundary";
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -07001703 case MANIFEST_ERROR_INVALID_MEM_PERM:
1704 return "Memory permission should be RO, RW or RX";
J-Alves35315782022-01-25 17:58:32 +00001705 case MANIFEST_ERROR_ARGUMENTS_LIST_EMPTY:
1706 return "Arguments-list node should have at least one argument";
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001707 case MANIFEST_ERROR_INTERRUPT_ID_REPEATED:
1708 return "Interrupt ID already assigned to another endpoint";
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001709 case MANIFEST_ERROR_ILLEGAL_NS_INT_ACTION:
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001710 return "Illegal value specidied for the field: Action in "
1711 "response to NS Interrupt";
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -07001712 case MANIFEST_ERROR_INTERRUPT_ID_NOT_IN_LIST:
1713 return "Interrupt ID is not in the list of interrupts";
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001714 case MANIFEST_ERROR_ILLEGAL_OTHER_S_INT_ACTION:
1715 return "Illegal value specified for the field: Action in "
1716 "response to Other-S Interrupt";
J-Alves77b6f4f2023-03-15 11:34:49 +00001717 case MANIFEST_ERROR_MEMORY_MISSING:
1718 return "Memory nodes must be defined in the SPMC manifest "
Olivier Deprez3cbdf862023-06-07 15:36:32 +02001719 "('memory' and 'ns-memory')";
J-Alves77b6f4f2023-03-15 11:34:49 +00001720 case MANIFEST_ERROR_PARTITION_ADDRESS_OVERLAP:
1721 return "Partition's memory [load address: load address + "
1722 "memory size[ overlap with other allocated "
Olivier Deprez3cbdf862023-06-07 15:36:32 +02001723 "regions";
J-Alves77b6f4f2023-03-15 11:34:49 +00001724 case MANIFEST_ERROR_MEM_REGION_INVALID:
Olivier Deprez3cbdf862023-06-07 15:36:32 +02001725 return "Invalid memory region range";
Daniel Boulby4339edc2024-02-21 14:59:00 +00001726 case MANIFEST_ERROR_DEVICE_MEM_REGION_INVALID:
1727 return "Invalid device memory region range";
Kathleen Capella4a2a6e72023-04-21 14:43:26 -04001728 case MANIFEST_ERROR_INVALID_BOOT_ORDER:
1729 return "Boot order should be a unique value less than "
1730 "default largest value";
Kathleen Capella422b10b2023-06-30 18:28:27 -04001731 case MANIFEST_ERROR_UUID_ALL_ZEROS:
1732 return "UUID should not be NIL";
Madhukar Pappireddy3c2b7912023-10-11 14:47:27 -05001733 case MANIFEST_ERROR_MISSING_SMMU_ID:
1734 return "SMMU ID must be specified for the given Stream IDs";
1735 case MANIFEST_ERROR_MISMATCH_DMA_ACCESS_PERMISSIONS:
1736 return "DMA device access permissions must match memory region "
1737 "attributes";
1738 case MANIFEST_ERROR_STREAM_IDS_OVERFLOW:
1739 return "DMA device stream ID count exceeds predefined limit";
1740 case MANIFEST_ERROR_DMA_ACCESS_PERMISSIONS_OVERFLOW:
1741 return "DMA access permissions count exceeds predefined limit";
Madhukar Pappireddy8a364472023-10-11 15:34:07 -05001742 case MANIFEST_ERROR_DMA_DEVICE_OVERFLOW:
1743 return "Number of device regions with DMA peripheral exceeds "
1744 "limit.";
David Brazdil7a462ec2019-08-15 12:27:47 +01001745 }
1746
1747 panic("Unexpected manifest return code.");
1748}