blob: c01da9540b75d34327323cf6cb6cb457dde16898 [file] [log] [blame]
David Brazdil7a462ec2019-08-15 12:27:47 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
David Brazdil7a462ec2019-08-15 12:27:47 +01007 */
8
9#include "hf/manifest.h"
10
J-Alvesd8a1d362023-03-08 11:15:28 +000011#include <stddef.h>
12
J-Alves35315782022-01-25 17:58:32 +000013#include "hf/arch/types.h"
14
David Brazdil7a462ec2019-08-15 12:27:47 +010015#include "hf/addr.h"
Daniel Boulbya2f8c662021-11-26 17:52:53 +000016#include "hf/assert.h"
J-Alves35315782022-01-25 17:58:32 +000017#include "hf/boot_info.h"
David Brazdil7a462ec2019-08-15 12:27:47 +010018#include "hf/check.h"
Andrew Scullae9962e2019-10-03 16:51:16 +010019#include "hf/dlog.h"
J-Alves2f86c1e2022-02-23 18:44:19 +000020#include "hf/sp_pkg.h"
David Brazdil7a462ec2019-08-15 12:27:47 +010021#include "hf/static_assert.h"
22#include "hf/std.h"
23
24#define TRY(expr) \
25 do { \
26 enum manifest_return_code ret_code = (expr); \
27 if (ret_code != MANIFEST_SUCCESS) { \
28 return ret_code; \
29 } \
30 } while (0)
31
David Brazdilb856be62020-03-25 10:14:55 +000032#define VM_ID_MAX (HF_VM_ID_OFFSET + MAX_VMS - 1)
33#define VM_ID_MAX_DIGITS (5)
34#define VM_NAME_EXTRA_CHARS (3) /* "vm" + number + '\0' */
35#define VM_NAME_MAX_SIZE (VM_ID_MAX_DIGITS + VM_NAME_EXTRA_CHARS)
36static_assert(VM_NAME_MAX_SIZE <= STRING_MAX_SIZE,
37 "VM name does not fit into a struct string.");
38static_assert(VM_ID_MAX <= 99999, "Insufficient VM_NAME_BUF_SIZE");
Olivier Deprez2a8ee342020-08-03 15:10:44 +020039static_assert((HF_OTHER_WORLD_ID > VM_ID_MAX) ||
40 (HF_OTHER_WORLD_ID < HF_VM_ID_BASE),
Andrew Walbran9daa57e2019-09-27 13:33:20 +010041 "TrustZone VM ID clashes with normal VM range.");
David Brazdil7a462ec2019-08-15 12:27:47 +010042
Daniel Boulby801f8ef2022-06-27 14:21:01 +010043/**
Olivier Deprez93644652022-09-09 11:01:12 +020044 * A struct to keep track of a VM's properties during early boot
45 * manifest parsing.
Daniel Boulby801f8ef2022-06-27 14:21:01 +010046 */
Olivier Deprez93644652022-09-09 11:01:12 +020047struct manifest_data {
48 struct manifest manifest;
Daniel Boulby4ca50f02022-07-29 18:29:34 +010049 struct interrupt_bitmap intids;
Daniel Boulbya7e9e182022-06-27 14:21:01 +010050 struct {
51 uintptr_t base;
52 uintptr_t limit;
53 } mem_regions[PARTITION_MAX_MEMORY_REGIONS * MAX_VMS];
Daniel Boulby801f8ef2022-06-27 14:21:01 +010054};
Olivier Deprez93644652022-09-09 11:01:12 +020055
Daniel Boulby801f8ef2022-06-27 14:21:01 +010056/**
Daniel Boulbya7e9e182022-06-27 14:21:01 +010057 * Calculate the number of entries in the ppool that are required to
Olivier Deprez93644652022-09-09 11:01:12 +020058 * store the manifest_data struct.
Daniel Boulby801f8ef2022-06-27 14:21:01 +010059 */
Olivier Deprez93644652022-09-09 11:01:12 +020060static size_t manifest_data_ppool_entries =
61 (align_up(sizeof(struct manifest_data), MM_PPOOL_ENTRY_SIZE) /
Daniel Boulbya7e9e182022-06-27 14:21:01 +010062 MM_PPOOL_ENTRY_SIZE);
63
Olivier Deprez93644652022-09-09 11:01:12 +020064static struct manifest_data *manifest_data;
65/* Index used to track the number of memory regions allocated. */
Daniel Boulbya7e9e182022-06-27 14:21:01 +010066static size_t allocated_mem_regions_index = 0;
Daniel Boulby801f8ef2022-06-27 14:21:01 +010067
68/**
Olivier Deprez93644652022-09-09 11:01:12 +020069 * Allocates and clear memory for the manifest data in the given memory pool.
Daniel Boulby801f8ef2022-06-27 14:21:01 +010070 * Returns true if the memory is successfully allocated.
71 */
Olivier Deprez93644652022-09-09 11:01:12 +020072static bool manifest_data_init(struct mpool *ppool)
Daniel Boulby801f8ef2022-06-27 14:21:01 +010073{
Olivier Deprez93644652022-09-09 11:01:12 +020074 manifest_data = (struct manifest_data *)mpool_alloc_contiguous(
75 ppool, manifest_data_ppool_entries, 1);
76 memset_s(manifest_data, sizeof(struct manifest_data), 0,
77 sizeof(struct manifest_data));
Daniel Boulbya7e9e182022-06-27 14:21:01 +010078
Olivier Deprez93644652022-09-09 11:01:12 +020079 return manifest_data != NULL;
Daniel Boulby801f8ef2022-06-27 14:21:01 +010080}
81
82/**
Olivier Deprez93644652022-09-09 11:01:12 +020083 * Frees the memory used for the manifest data in the given memory pool.
Daniel Boulby801f8ef2022-06-27 14:21:01 +010084 */
Olivier Deprez93644652022-09-09 11:01:12 +020085static void manifest_data_deinit(struct mpool *ppool)
Daniel Boulby801f8ef2022-06-27 14:21:01 +010086{
Daniel Boulbya7e9e182022-06-27 14:21:01 +010087 /**
Olivier Deprez93644652022-09-09 11:01:12 +020088 * Clear and return the memory used for the manifest_data struct to the
89 * memory pool.
Daniel Boulbya7e9e182022-06-27 14:21:01 +010090 */
Olivier Deprez93644652022-09-09 11:01:12 +020091 memset_s(manifest_data, sizeof(struct manifest_data), 0,
92 sizeof(struct manifest_data));
93 mpool_add_chunk(ppool, manifest_data, manifest_data_ppool_entries);
94
Daniel Boulbya7e9e182022-06-27 14:21:01 +010095 /**
96 * Reset the index used for tracking the number of memory regions
97 * allocated.
98 */
99 allocated_mem_regions_index = 0;
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100100}
101
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100102static inline size_t count_digits(ffa_vm_id_t vm_id)
David Brazdilb856be62020-03-25 10:14:55 +0000103{
104 size_t digits = 0;
105
106 do {
107 digits++;
108 vm_id /= 10;
109 } while (vm_id);
110 return digits;
111}
112
David Brazdil7a462ec2019-08-15 12:27:47 +0100113/**
114 * Generates a string with the two letters "vm" followed by an integer.
115 * Assumes `buf` is of size VM_NAME_BUF_SIZE.
116 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100117static void generate_vm_node_name(struct string *str, ffa_vm_id_t vm_id)
David Brazdil7a462ec2019-08-15 12:27:47 +0100118{
119 static const char *digits = "0123456789";
David Brazdilb856be62020-03-25 10:14:55 +0000120 size_t vm_id_digits = count_digits(vm_id);
121 char *base = str->data;
122 char *ptr = base + (VM_NAME_EXTRA_CHARS + vm_id_digits);
David Brazdil7a462ec2019-08-15 12:27:47 +0100123
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000124 assert(vm_id_digits <= VM_ID_MAX_DIGITS);
David Brazdil7a462ec2019-08-15 12:27:47 +0100125 *(--ptr) = '\0';
126 do {
127 *(--ptr) = digits[vm_id % 10];
128 vm_id /= 10;
129 } while (vm_id);
130 *(--ptr) = 'm';
131 *(--ptr) = 'v';
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000132 assert(ptr == base);
David Brazdil7a462ec2019-08-15 12:27:47 +0100133}
134
Andrew Scullae9962e2019-10-03 16:51:16 +0100135/**
Andrew Scullb2c3a242019-11-04 13:52:36 +0000136 * Read a boolean property: true if present; false if not. If present, the value
137 * of the property must be empty else it is considered malformed.
Andrew Scullae9962e2019-10-03 16:51:16 +0100138 */
Andrew Scullb2c3a242019-11-04 13:52:36 +0000139static enum manifest_return_code read_bool(const struct fdt_node *node,
140 const char *property, bool *out)
Andrew Scullae9962e2019-10-03 16:51:16 +0100141{
David Brazdilb856be62020-03-25 10:14:55 +0000142 struct memiter data;
143 bool present = fdt_read_property(node, property, &data);
Andrew Scullae9962e2019-10-03 16:51:16 +0100144
David Brazdilb856be62020-03-25 10:14:55 +0000145 if (present && memiter_size(&data) != 0) {
Andrew Scullb2c3a242019-11-04 13:52:36 +0000146 return MANIFEST_ERROR_MALFORMED_BOOLEAN;
147 }
148
149 *out = present;
150 return MANIFEST_SUCCESS;
Andrew Scullae9962e2019-10-03 16:51:16 +0100151}
152
Andrew Scull72b43c02019-09-18 13:53:45 +0100153static enum manifest_return_code read_string(const struct fdt_node *node,
David Brazdil136f2942019-09-23 14:11:03 +0100154 const char *property,
155 struct string *out)
Andrew Scull72b43c02019-09-18 13:53:45 +0100156{
David Brazdilb856be62020-03-25 10:14:55 +0000157 struct memiter data;
Andrew Scull72b43c02019-09-18 13:53:45 +0100158
David Brazdilb856be62020-03-25 10:14:55 +0000159 if (!fdt_read_property(node, property, &data)) {
Andrew Scull72b43c02019-09-18 13:53:45 +0100160 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
161 }
162
David Brazdilb856be62020-03-25 10:14:55 +0000163 switch (string_init(out, &data)) {
David Brazdil136f2942019-09-23 14:11:03 +0100164 case STRING_SUCCESS:
165 return MANIFEST_SUCCESS;
166 case STRING_ERROR_INVALID_INPUT:
167 return MANIFEST_ERROR_MALFORMED_STRING;
168 case STRING_ERROR_TOO_LONG:
169 return MANIFEST_ERROR_STRING_TOO_LONG;
170 }
Andrew Scull72b43c02019-09-18 13:53:45 +0100171}
172
173static enum manifest_return_code read_optional_string(
David Brazdil136f2942019-09-23 14:11:03 +0100174 const struct fdt_node *node, const char *property, struct string *out)
Andrew Scull72b43c02019-09-18 13:53:45 +0100175{
David Brazdil136f2942019-09-23 14:11:03 +0100176 enum manifest_return_code ret;
Andrew Scull72b43c02019-09-18 13:53:45 +0100177
David Brazdil136f2942019-09-23 14:11:03 +0100178 ret = read_string(node, property, out);
179 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
180 string_init_empty(out);
181 ret = MANIFEST_SUCCESS;
Andrew Scull72b43c02019-09-18 13:53:45 +0100182 }
David Brazdil136f2942019-09-23 14:11:03 +0100183 return ret;
Andrew Scull72b43c02019-09-18 13:53:45 +0100184}
185
David Brazdil7a462ec2019-08-15 12:27:47 +0100186static enum manifest_return_code read_uint64(const struct fdt_node *node,
187 const char *property,
188 uint64_t *out)
189{
David Brazdilb856be62020-03-25 10:14:55 +0000190 struct memiter data;
David Brazdil7a462ec2019-08-15 12:27:47 +0100191
David Brazdilb856be62020-03-25 10:14:55 +0000192 if (!fdt_read_property(node, property, &data)) {
David Brazdil7a462ec2019-08-15 12:27:47 +0100193 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
194 }
195
David Brazdilb856be62020-03-25 10:14:55 +0000196 if (!fdt_parse_number(&data, memiter_size(&data), out)) {
David Brazdil7a462ec2019-08-15 12:27:47 +0100197 return MANIFEST_ERROR_MALFORMED_INTEGER;
198 }
199
200 return MANIFEST_SUCCESS;
201}
202
David Brazdil080ee312020-02-25 15:30:30 -0800203static enum manifest_return_code read_optional_uint64(
204 const struct fdt_node *node, const char *property,
205 uint64_t default_value, uint64_t *out)
206{
207 enum manifest_return_code ret;
208
209 ret = read_uint64(node, property, out);
210 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
211 *out = default_value;
212 return MANIFEST_SUCCESS;
213 }
214 return ret;
215}
216
Olivier Deprez62d99e32020-01-09 15:58:07 +0100217static enum manifest_return_code read_uint32(const struct fdt_node *node,
218 const char *property,
219 uint32_t *out)
220{
221 uint64_t value;
222
223 TRY(read_uint64(node, property, &value));
224
225 if (value > UINT32_MAX) {
226 return MANIFEST_ERROR_INTEGER_OVERFLOW;
227 }
228
229 *out = (uint32_t)value;
230 return MANIFEST_SUCCESS;
231}
232
Manish Pandeye68e7932020-04-23 15:29:28 +0100233static enum manifest_return_code read_optional_uint32(
234 const struct fdt_node *node, const char *property,
235 uint32_t default_value, uint32_t *out)
236{
237 enum manifest_return_code ret;
238
239 ret = read_uint32(node, property, out);
240 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
241 *out = default_value;
242 return MANIFEST_SUCCESS;
243 }
244 return ret;
245}
246
David Brazdil7a462ec2019-08-15 12:27:47 +0100247static enum manifest_return_code read_uint16(const struct fdt_node *node,
248 const char *property,
249 uint16_t *out)
250{
251 uint64_t value;
252
253 TRY(read_uint64(node, property, &value));
254
255 if (value > UINT16_MAX) {
256 return MANIFEST_ERROR_INTEGER_OVERFLOW;
257 }
258
259 *out = (uint16_t)value;
260 return MANIFEST_SUCCESS;
261}
262
J-Alvesb37fd082020-10-22 12:29:21 +0100263static enum manifest_return_code read_optional_uint16(
264 const struct fdt_node *node, const char *property,
265 uint16_t default_value, uint16_t *out)
266{
267 enum manifest_return_code ret;
268
269 ret = read_uint16(node, property, out);
270 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
271 *out = default_value;
272 return MANIFEST_SUCCESS;
273 }
274
275 return MANIFEST_SUCCESS;
276}
277
Olivier Deprez62d99e32020-01-09 15:58:07 +0100278static enum manifest_return_code read_uint8(const struct fdt_node *node,
279 const char *property, uint8_t *out)
280{
281 uint64_t value;
282
283 TRY(read_uint64(node, property, &value));
284
285 if (value > UINT8_MAX) {
286 return MANIFEST_ERROR_INTEGER_OVERFLOW;
287 }
288
289 *out = (uint8_t)value;
290 return MANIFEST_SUCCESS;
291}
292
J-Alves4369bd92020-08-07 16:35:36 +0100293static enum manifest_return_code read_optional_uint8(
294 const struct fdt_node *node, const char *property,
295 uint8_t default_value, uint8_t *out)
296{
297 enum manifest_return_code ret;
298
299 ret = read_uint8(node, property, out);
300 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
301 *out = default_value;
302 return MANIFEST_SUCCESS;
303 }
304
305 return MANIFEST_SUCCESS;
306}
307
Andrew Scullae9962e2019-10-03 16:51:16 +0100308struct uint32list_iter {
309 struct memiter mem_it;
310};
311
J-Alves4369bd92020-08-07 16:35:36 +0100312static enum manifest_return_code read_uint32list(const struct fdt_node *node,
313 const char *property,
314 struct uint32list_iter *out)
Andrew Scullae9962e2019-10-03 16:51:16 +0100315{
David Brazdilb856be62020-03-25 10:14:55 +0000316 struct memiter data;
Andrew Scullae9962e2019-10-03 16:51:16 +0100317
David Brazdilb856be62020-03-25 10:14:55 +0000318 if (!fdt_read_property(node, property, &data)) {
Andrew Scullae9962e2019-10-03 16:51:16 +0100319 memiter_init(&out->mem_it, NULL, 0);
J-Alves4369bd92020-08-07 16:35:36 +0100320 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
Andrew Scullae9962e2019-10-03 16:51:16 +0100321 }
322
David Brazdilb856be62020-03-25 10:14:55 +0000323 if ((memiter_size(&data) % sizeof(uint32_t)) != 0) {
Andrew Scullae9962e2019-10-03 16:51:16 +0100324 return MANIFEST_ERROR_MALFORMED_INTEGER_LIST;
325 }
326
David Brazdilb856be62020-03-25 10:14:55 +0000327 out->mem_it = data;
Andrew Scullae9962e2019-10-03 16:51:16 +0100328 return MANIFEST_SUCCESS;
329}
330
J-Alves4369bd92020-08-07 16:35:36 +0100331static enum manifest_return_code read_optional_uint32list(
332 const struct fdt_node *node, const char *property,
333 struct uint32list_iter *out)
334{
335 enum manifest_return_code ret = read_uint32list(node, property, out);
336
337 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
338 return MANIFEST_SUCCESS;
339 }
340 return ret;
341}
342
Andrew Scullae9962e2019-10-03 16:51:16 +0100343static bool uint32list_has_next(const struct uint32list_iter *list)
344{
345 return memiter_size(&list->mem_it) > 0;
346}
347
David Brazdil5ea99462020-03-25 13:01:47 +0000348static enum manifest_return_code uint32list_get_next(
349 struct uint32list_iter *list, uint32_t *out)
Andrew Scullae9962e2019-10-03 16:51:16 +0100350{
Andrew Scullae9962e2019-10-03 16:51:16 +0100351 uint64_t num;
352
353 CHECK(uint32list_has_next(list));
David Brazdilb856be62020-03-25 10:14:55 +0000354 if (!fdt_parse_number(&list->mem_it, sizeof(uint32_t), &num)) {
Andrew Scullae9962e2019-10-03 16:51:16 +0100355 return MANIFEST_ERROR_MALFORMED_INTEGER;
356 }
357
David Brazdil5ea99462020-03-25 13:01:47 +0000358 *out = (uint32_t)num;
359 return MANIFEST_SUCCESS;
Andrew Scullae9962e2019-10-03 16:51:16 +0100360}
361
Olivier Deprez62d99e32020-01-09 15:58:07 +0100362static enum manifest_return_code parse_vm_common(const struct fdt_node *node,
363 struct manifest_vm *vm,
364 ffa_vm_id_t vm_id)
David Brazdil7a462ec2019-08-15 12:27:47 +0100365{
Andrew Scullae9962e2019-10-03 16:51:16 +0100366 struct uint32list_iter smcs;
David Brazdil5ea99462020-03-25 13:01:47 +0000367 size_t idx;
Andrew Scullae9962e2019-10-03 16:51:16 +0100368
Olivier Deprez62d99e32020-01-09 15:58:07 +0100369 TRY(read_bool(node, "is_ffa_partition", &vm->is_ffa_partition));
370
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -0700371 TRY(read_bool(node, "hyp_loaded", &vm->is_hyp_loaded));
372
David Brazdil136f2942019-09-23 14:11:03 +0100373 TRY(read_string(node, "debug_name", &vm->debug_name));
Andrew Scullae9962e2019-10-03 16:51:16 +0100374
375 TRY(read_optional_uint32list(node, "smc_whitelist", &smcs));
376 while (uint32list_has_next(&smcs) &&
377 vm->smc_whitelist.smc_count < MAX_SMCS) {
David Brazdil5ea99462020-03-25 13:01:47 +0000378 idx = vm->smc_whitelist.smc_count++;
379 TRY(uint32list_get_next(&smcs, &vm->smc_whitelist.smcs[idx]));
Andrew Scullae9962e2019-10-03 16:51:16 +0100380 }
381
382 if (uint32list_has_next(&smcs)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000383 dlog_warning("%s SMC whitelist too long.\n", vm->debug_name);
Andrew Scullae9962e2019-10-03 16:51:16 +0100384 }
385
Andrew Scullb2c3a242019-11-04 13:52:36 +0000386 TRY(read_bool(node, "smc_whitelist_permissive",
387 &vm->smc_whitelist.permissive));
Andrew Scullae9962e2019-10-03 16:51:16 +0100388
Olivier Deprez62d99e32020-01-09 15:58:07 +0100389 if (vm_id != HF_PRIMARY_VM_ID) {
390 TRY(read_uint64(node, "mem_size", &vm->secondary.mem_size));
391 TRY(read_uint16(node, "vcpu_count", &vm->secondary.vcpu_count));
Fuad Tabba50469e02020-06-30 15:14:28 +0100392 TRY(read_optional_string(node, "fdt_filename",
393 &vm->secondary.fdt_filename));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100394 }
395
396 return MANIFEST_SUCCESS;
397}
398
399static enum manifest_return_code parse_vm(struct fdt_node *node,
400 struct manifest_vm *vm,
401 ffa_vm_id_t vm_id)
402{
403 TRY(read_optional_string(node, "kernel_filename",
404 &vm->kernel_filename));
405
David Brazdile6f83222019-09-23 14:47:37 +0100406 if (vm_id == HF_PRIMARY_VM_ID) {
407 TRY(read_optional_string(node, "ramdisk_filename",
408 &vm->primary.ramdisk_filename));
David Brazdil080ee312020-02-25 15:30:30 -0800409 TRY(read_optional_uint64(node, "boot_address",
410 MANIFEST_INVALID_ADDRESS,
411 &vm->primary.boot_address));
David Brazdil7a462ec2019-08-15 12:27:47 +0100412 }
Raghu Krishnamurthy988a5e72021-02-27 21:46:06 -0800413 TRY(read_optional_uint8(node, "exception-level", (uint8_t)EL1,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700414 (uint8_t *)&vm->partition.run_time_el));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100415
David Brazdil7a462ec2019-08-15 12:27:47 +0100416 return MANIFEST_SUCCESS;
417}
418
Varun Wadekar4afbfd72022-10-13 14:30:18 +0100419static enum manifest_return_code check_and_record_mem_regions(
420 uintptr_t base_address, uint32_t page_count)
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100421{
J-Alvesd8a1d362023-03-08 11:15:28 +0000422 uintptr_t limit =
423 base_address + ((uintptr_t)page_count * PAGE_SIZE) - 1U;
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100424
Varun Wadekar4afbfd72022-10-13 14:30:18 +0100425 if (page_count == 0U) {
426 dlog_error(
Daniel Boulbyc1a613d2022-10-18 11:26:17 +0100427 "Empty memory region defined with base address: %#x.\n",
Varun Wadekar4afbfd72022-10-13 14:30:18 +0100428 base_address);
429 return MANIFEST_ERROR_MEM_REGION_EMPTY;
430 }
431
Daniel Boulbyc1a613d2022-10-18 11:26:17 +0100432 if (!is_aligned(base_address, PAGE_SIZE)) {
433 dlog_error("base_address (%#x) is not aligned to page size.\n",
434 base_address);
435 return MANIFEST_ERROR_MEM_REGION_UNALIGNED;
436 }
437
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100438 for (size_t i = 0; i < allocated_mem_regions_index; i++) {
Olivier Deprez93644652022-09-09 11:01:12 +0200439 uintptr_t mem_region_base = manifest_data->mem_regions[i].base;
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100440 uintptr_t mem_region_limit =
Olivier Deprez93644652022-09-09 11:01:12 +0200441 manifest_data->mem_regions[i].limit;
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100442
443 if ((base_address >= mem_region_base &&
Varun Wadekarae9aeaa2022-10-13 14:23:58 +0100444 base_address <= mem_region_limit) ||
445 (limit <= mem_region_limit && limit >= mem_region_base)) {
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100446 dlog_error(
447 "Overlapping memory regions\n"
448 "New Region %#x - %#x\n"
449 "Overlapping region %#x - %#x\n",
450 base_address, limit, mem_region_base,
451 mem_region_limit);
Varun Wadekar4afbfd72022-10-13 14:30:18 +0100452 return MANIFEST_ERROR_MEM_REGION_OVERLAP;
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100453 }
454 }
455
Olivier Deprez93644652022-09-09 11:01:12 +0200456 manifest_data->mem_regions[allocated_mem_regions_index].base =
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100457 base_address;
Olivier Deprez93644652022-09-09 11:01:12 +0200458 manifest_data->mem_regions[allocated_mem_regions_index].limit = limit;
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100459 allocated_mem_regions_index++;
460
Varun Wadekar4afbfd72022-10-13 14:30:18 +0100461 return MANIFEST_SUCCESS;
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100462}
463
Manish Pandey6542f5c2020-04-27 14:37:46 +0100464static enum manifest_return_code parse_ffa_memory_region_node(
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100465 struct fdt_node *mem_node, struct memory_region *mem_regions,
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700466 uint16_t *count, struct rx_tx *rxtx)
Manish Pandey6542f5c2020-04-27 14:37:46 +0100467{
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100468 uint32_t phandle;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700469 uint16_t i = 0;
Manish Pandey6542f5c2020-04-27 14:37:46 +0100470
471 dlog_verbose(" Partition memory regions\n");
472
473 if (!fdt_is_compatible(mem_node, "arm,ffa-manifest-memory-regions")) {
474 return MANIFEST_ERROR_NOT_COMPATIBLE;
475 }
476
477 if (!fdt_first_child(mem_node)) {
478 return MANIFEST_ERROR_MEMORY_REGION_NODE_EMPTY;
479 }
480
481 do {
482 dlog_verbose(" Memory Region[%u]\n", i);
483
484 TRY(read_optional_string(mem_node, "description",
485 &mem_regions[i].name));
486 dlog_verbose(" Name: %s\n",
487 string_data(&mem_regions[i].name));
488
J-Alvesd8a1d362023-03-08 11:15:28 +0000489 TRY(read_uint64(mem_node, "base-address",
490 &mem_regions[i].base_address));
Manish Pandey6542f5c2020-04-27 14:37:46 +0100491 dlog_verbose(" Base address: %#x\n",
492 mem_regions[i].base_address);
493
494 TRY(read_uint32(mem_node, "pages-count",
495 &mem_regions[i].page_count));
496 dlog_verbose(" Pages_count: %u\n",
497 mem_regions[i].page_count);
498
Varun Wadekar4afbfd72022-10-13 14:30:18 +0100499 TRY(check_and_record_mem_regions(mem_regions[i].base_address,
500 mem_regions[i].page_count));
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100501
Manish Pandey6542f5c2020-04-27 14:37:46 +0100502 TRY(read_uint32(mem_node, "attributes",
503 &mem_regions[i].attributes));
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -0700504
Olivier Deprez035fa152022-03-14 11:19:10 +0100505 /*
506 * Check RWX permission attributes.
507 * Security attribute is checked at load phase.
508 */
509 uint32_t permissions = mem_regions[i].attributes &
510 (MANIFEST_REGION_ATTR_READ |
511 MANIFEST_REGION_ATTR_WRITE |
512 MANIFEST_REGION_ATTR_EXEC);
513 if (permissions != MANIFEST_REGION_ATTR_READ &&
514 permissions != (MANIFEST_REGION_ATTR_READ |
515 MANIFEST_REGION_ATTR_WRITE) &&
516 permissions != (MANIFEST_REGION_ATTR_READ |
517 MANIFEST_REGION_ATTR_EXEC)) {
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -0700518 return MANIFEST_ERROR_INVALID_MEM_PERM;
519 }
520
Olivier Deprez035fa152022-03-14 11:19:10 +0100521 /* Filter memory region attributes. */
522 mem_regions[i].attributes &= MANIFEST_REGION_ALL_ATTR_MASK;
523
524 dlog_verbose(" Attributes: %#x\n",
Manish Pandey6542f5c2020-04-27 14:37:46 +0100525 mem_regions[i].attributes);
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100526
Manish Pandeya70a4192020-10-07 22:05:04 +0100527 if (rxtx->available) {
528 TRY(read_optional_uint32(
529 mem_node, "phandle",
530 (uint32_t)MANIFEST_INVALID_ADDRESS, &phandle));
531 if (phandle == rxtx->rx_phandle) {
532 dlog_verbose(" Assigned as RX buffer\n");
533 rxtx->rx_buffer = &mem_regions[i];
534 } else if (phandle == rxtx->tx_phandle) {
535 dlog_verbose(" Assigned as TX buffer\n");
536 rxtx->tx_buffer = &mem_regions[i];
537 }
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100538 }
539
Manish Pandey6542f5c2020-04-27 14:37:46 +0100540 i++;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700541 } while (fdt_next_sibling(mem_node) &&
542 (i < PARTITION_MAX_MEMORY_REGIONS));
Manish Pandey6542f5c2020-04-27 14:37:46 +0100543
Manish Pandeya70a4192020-10-07 22:05:04 +0100544 if (rxtx->available &&
545 (rxtx->rx_buffer->page_count != rxtx->tx_buffer->page_count)) {
Manish Pandeyf06c9072020-09-29 15:41:58 +0100546 return MANIFEST_ERROR_RXTX_SIZE_MISMATCH;
547 }
548
Manish Pandey2145c212020-05-01 16:04:22 +0100549 *count = i;
Manish Pandey6542f5c2020-04-27 14:37:46 +0100550
551 return MANIFEST_SUCCESS;
552}
553
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -0700554static struct interrupt_info *device_region_get_interrupt_info(
555 struct device_region *dev_regions, uint32_t intid)
556{
557 for (uint32_t i = 0; i < ARRAY_SIZE(dev_regions->interrupts); i++) {
558 if (dev_regions->interrupts[i].id == intid) {
559 return &(dev_regions->interrupts[i]);
560 }
561 }
562 return NULL;
563}
564
Manish Pandeye68e7932020-04-23 15:29:28 +0100565static enum manifest_return_code parse_ffa_device_region_node(
Manish Pandey2145c212020-05-01 16:04:22 +0100566 struct fdt_node *dev_node, struct device_region *dev_regions,
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700567 uint16_t *count)
Manish Pandeye68e7932020-04-23 15:29:28 +0100568{
569 struct uint32list_iter list;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700570 uint16_t i = 0;
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500571 uint32_t j = 0;
Olivier Deprez93644652022-09-09 11:01:12 +0200572 struct interrupt_bitmap allocated_intids = manifest_data->intids;
Manish Pandeye68e7932020-04-23 15:29:28 +0100573
574 dlog_verbose(" Partition Device Regions\n");
575
576 if (!fdt_is_compatible(dev_node, "arm,ffa-manifest-device-regions")) {
577 return MANIFEST_ERROR_NOT_COMPATIBLE;
578 }
579
580 if (!fdt_first_child(dev_node)) {
581 return MANIFEST_ERROR_DEVICE_REGION_NODE_EMPTY;
582 }
583
584 do {
585 dlog_verbose(" Device Region[%u]\n", i);
586
587 TRY(read_optional_string(dev_node, "description",
588 &dev_regions[i].name));
589 dlog_verbose(" Name: %s\n",
590 string_data(&dev_regions[i].name));
591
592 TRY(read_uint64(dev_node, "base-address",
593 &dev_regions[i].base_address));
594 dlog_verbose(" Base address: %#x\n",
595 dev_regions[i].base_address);
596
597 TRY(read_uint32(dev_node, "pages-count",
598 &dev_regions[i].page_count));
599 dlog_verbose(" Pages_count: %u\n",
600 dev_regions[i].page_count);
601
602 TRY(read_uint32(dev_node, "attributes",
603 &dev_regions[i].attributes));
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -0700604
Olivier Deprez035fa152022-03-14 11:19:10 +0100605 /*
606 * Check RWX permission attributes.
607 * Security attribute is checked at load phase.
608 */
609 uint32_t permissions = dev_regions[i].attributes &
610 (MANIFEST_REGION_ATTR_READ |
611 MANIFEST_REGION_ATTR_WRITE |
612 MANIFEST_REGION_ATTR_EXEC);
613
614 if (permissions != MANIFEST_REGION_ATTR_READ &&
615 permissions != (MANIFEST_REGION_ATTR_READ |
616 MANIFEST_REGION_ATTR_WRITE)) {
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -0700617 return MANIFEST_ERROR_INVALID_MEM_PERM;
618 }
619
Olivier Deprez035fa152022-03-14 11:19:10 +0100620 /* Filer device region attributes. */
621 dev_regions[i].attributes = dev_regions[i].attributes &
622 MANIFEST_REGION_ALL_ATTR_MASK;
623
624 dlog_verbose(" Attributes: %#x\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100625 dev_regions[i].attributes);
626
627 TRY(read_optional_uint32list(dev_node, "interrupts", &list));
628 dlog_verbose(" Interrupt List:\n");
629 j = 0;
630 while (uint32list_has_next(&list) &&
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700631 j < PARTITION_MAX_INTERRUPTS_PER_DEVICE) {
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100632 uint32_t intid;
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100633
Manish Pandeye68e7932020-04-23 15:29:28 +0100634 TRY(uint32list_get_next(
635 &list, &dev_regions[i].interrupts[j].id));
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100636 intid = dev_regions[i].interrupts[j].id;
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100637
638 dlog_verbose(" ID = %u\n", intid);
639
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100640 if (interrupt_bitmap_get_value(&allocated_intids,
641 intid) == 1U) {
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100642 return MANIFEST_ERROR_INTERRUPT_ID_REPEATED;
643 }
644
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100645 interrupt_bitmap_set_value(&allocated_intids, intid);
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100646
Manish Pandeye68e7932020-04-23 15:29:28 +0100647 if (uint32list_has_next(&list)) {
648 TRY(uint32list_get_next(&list,
649 &dev_regions[i]
650 .interrupts[j]
651 .attributes));
652 } else {
653 return MANIFEST_ERROR_MALFORMED_INTEGER_LIST;
654 }
655
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -0700656 dev_regions[i].interrupts[j].mpidr_valid = false;
657 dev_regions[i].interrupts[j].mpidr = 0;
658
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100659 dlog_verbose(" attributes = %u\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100660 dev_regions[i].interrupts[j].attributes);
661 j++;
662 }
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500663
664 dev_regions[i].interrupt_count = j;
Manish Pandeye68e7932020-04-23 15:29:28 +0100665 if (j == 0) {
666 dlog_verbose(" Empty\n");
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -0700667 } else {
668 TRY(read_optional_uint32list(
669 dev_node, "interrupts-target", &list));
670 dlog_verbose(" Interrupt Target List:\n");
671
672 while (uint32list_has_next(&list)) {
673 uint32_t intid;
674 uint64_t mpidr = 0;
675 uint32_t mpidr_lower = 0;
676 uint32_t mpidr_upper = 0;
677 struct interrupt_info *info = NULL;
678
679 TRY(uint32list_get_next(&list, &intid));
680
681 dlog_verbose(" ID = %u\n", intid);
682
683 if (interrupt_bitmap_get_value(
684 &allocated_intids, intid) != 1U) {
685 return MANIFEST_ERROR_INTERRUPT_ID_NOT_IN_LIST;
686 }
687
688 TRY(uint32list_get_next(&list, &mpidr_upper));
689 TRY(uint32list_get_next(&list, &mpidr_lower));
690 mpidr = mpidr_upper;
691 mpidr <<= 32;
692 mpidr |= mpidr_lower;
693
694 info = device_region_get_interrupt_info(
695 &dev_regions[i], intid);
696 /*
697 * We should find info since
698 * interrupt_bitmap_get_value already ensures
699 * that we saw the interrupt and allocated ids
700 * for it.
701 */
702 assert(info != NULL);
703 info->mpidr = mpidr;
704 info->mpidr_valid = true;
705 dlog_verbose(" MPIDR = %#x\n", mpidr);
706 }
Manish Pandeye68e7932020-04-23 15:29:28 +0100707 }
708
709 TRY(read_optional_uint32(dev_node, "smmu-id",
Madhukar Pappireddy54680c72020-10-23 15:02:38 -0500710 MANIFEST_INVALID_ID,
Manish Pandeye68e7932020-04-23 15:29:28 +0100711 &dev_regions[i].smmu_id));
Olivier Deprez86d87ae2021-08-19 14:27:46 +0200712 if (dev_regions[i].smmu_id != MANIFEST_INVALID_ID) {
713 dlog_verbose(" smmu-id: %u\n",
714 dev_regions[i].smmu_id);
715 }
Manish Pandeye68e7932020-04-23 15:29:28 +0100716
717 TRY(read_optional_uint32list(dev_node, "stream-ids", &list));
718 dlog_verbose(" Stream IDs assigned:\n");
719
720 j = 0;
721 while (uint32list_has_next(&list) &&
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700722 j < PARTITION_MAX_STREAMS_PER_DEVICE) {
Manish Pandeye68e7932020-04-23 15:29:28 +0100723 TRY(uint32list_get_next(&list,
724 &dev_regions[i].stream_ids[j]));
725 dlog_verbose(" %u\n",
726 dev_regions[i].stream_ids[j]);
727 j++;
728 }
729 if (j == 0) {
730 dlog_verbose(" None\n");
731 }
Madhukar Pappireddy54680c72020-10-23 15:02:38 -0500732 dev_regions[i].stream_count = j;
Manish Pandeye68e7932020-04-23 15:29:28 +0100733
734 TRY(read_bool(dev_node, "exclusive-access",
735 &dev_regions[i].exclusive_access));
J-Alves4369bd92020-08-07 16:35:36 +0100736 dlog_verbose(" Exclusive_access: %u\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100737 dev_regions[i].exclusive_access);
738
739 i++;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700740 } while (fdt_next_sibling(dev_node) &&
741 (i < PARTITION_MAX_DEVICE_REGIONS));
Manish Pandeye68e7932020-04-23 15:29:28 +0100742
Manish Pandey2145c212020-05-01 16:04:22 +0100743 *count = i;
Manish Pandeye68e7932020-04-23 15:29:28 +0100744
745 return MANIFEST_SUCCESS;
746}
747
J-Alvesabebe432022-05-31 14:40:50 +0100748static enum manifest_return_code sanity_check_ffa_manifest(
749 struct manifest_vm *vm)
750{
751 uint16_t ffa_version_major;
752 uint16_t ffa_version_minor;
753 enum manifest_return_code ret_code = MANIFEST_SUCCESS;
754 const char *error_string = "specified in manifest is unsupported";
755 uint32_t k = 0;
756
757 /* ensure that the SPM version is compatible */
758 ffa_version_major = (vm->partition.ffa_version & 0xffff0000) >>
759 FFA_VERSION_MAJOR_OFFSET;
760 ffa_version_minor = vm->partition.ffa_version & 0xffff;
761
762 if (ffa_version_major != FFA_VERSION_MAJOR ||
763 ffa_version_minor > FFA_VERSION_MINOR) {
764 dlog_error("FF-A partition manifest version %s: %u.%u\n",
765 error_string, ffa_version_major, ffa_version_minor);
766 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
767 }
768
769 if (vm->partition.xlat_granule != PAGE_4KB) {
770 dlog_error("Translation granule %s: %u\n", error_string,
771 vm->partition.xlat_granule);
772 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
773 }
774
775 if (vm->partition.execution_state != AARCH64) {
776 dlog_error("Execution state %s: %u\n", error_string,
777 vm->partition.execution_state);
778 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
779 }
780
781 if (vm->partition.run_time_el != EL1 &&
782 vm->partition.run_time_el != S_EL1 &&
783 vm->partition.run_time_el != S_EL0) {
784 dlog_error("Exception level %s: %d\n", error_string,
785 vm->partition.run_time_el);
786 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
787 }
788
789 if ((vm->partition.messaging_method &
790 ~(FFA_PARTITION_DIRECT_REQ_RECV | FFA_PARTITION_DIRECT_REQ_SEND |
791 FFA_PARTITION_INDIRECT_MSG)) != 0U) {
792 dlog_error("Messaging method %s: %x\n", error_string,
793 vm->partition.messaging_method);
794 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
795 }
796
797 if (vm->partition.run_time_el == S_EL0 &&
798 vm->partition.execution_ctx_count != 1) {
799 dlog_error(
800 "Exception level and execution context count %s: %d "
801 "%d\n",
802 error_string, vm->partition.run_time_el,
803 vm->partition.execution_ctx_count);
804 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
805 }
806
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700807 for (uint16_t i = 0; i < vm->partition.dev_region_count; i++) {
J-Alvesabebe432022-05-31 14:40:50 +0100808 struct device_region dev_region;
809
810 dev_region = vm->partition.dev_regions[i];
811
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700812 if (dev_region.interrupt_count >
813 PARTITION_MAX_INTERRUPTS_PER_DEVICE) {
J-Alvesabebe432022-05-31 14:40:50 +0100814 dlog_error(
815 "Interrupt count for device region exceeds "
816 "limit.\n");
817 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
818 continue;
819 }
820
821 for (uint8_t j = 0; j < dev_region.interrupt_count; j++) {
822 k++;
823 if (k > VM_MANIFEST_MAX_INTERRUPTS) {
824 dlog_error(
825 "Interrupt count for VM exceeds "
826 "limit.\n");
827 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
828 continue;
829 }
830 }
831 }
832
833 /* GP register is restricted to one of x0 - x3. */
834 if (vm->partition.gp_register_num != -1 &&
835 vm->partition.gp_register_num > 3) {
836 dlog_error("GP register number %s: %u\n", error_string,
837 vm->partition.gp_register_num);
838 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
839 }
840
841 return ret_code;
842}
843
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -0700844enum manifest_return_code parse_ffa_manifest(struct fdt *fdt,
J-Alves35315782022-01-25 17:58:32 +0000845 struct manifest_vm *vm,
846 struct fdt_node *boot_info_node)
Olivier Deprez62d99e32020-01-09 15:58:07 +0100847{
848 unsigned int i = 0;
849 struct uint32list_iter uuid;
850 uint32_t uuid_word;
851 struct fdt_node root;
852 struct fdt_node ffa_node;
853 struct string rxtx_node_name = STRING_INIT("rx_tx-info");
Manish Pandey6542f5c2020-04-27 14:37:46 +0100854 struct string mem_region_node_name = STRING_INIT("memory-regions");
Manish Pandeye68e7932020-04-23 15:29:28 +0100855 struct string dev_region_node_name = STRING_INIT("device-regions");
J-Alves35315782022-01-25 17:58:32 +0000856 struct string boot_info_node_name = STRING_INIT("boot-info");
Madhukar Pappireddy84154052022-06-21 18:30:25 -0500857 bool managed_exit_field_present = false;
Olivier Deprez62d99e32020-01-09 15:58:07 +0100858
859 if (!fdt_find_node(fdt, "/", &root)) {
860 return MANIFEST_ERROR_NO_ROOT_NODE;
861 }
862
863 /* Check "compatible" property. */
864 if (!fdt_is_compatible(&root, "arm,ffa-manifest-1.0")) {
865 return MANIFEST_ERROR_NOT_COMPATIBLE;
866 }
867
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700868 TRY(read_uint32(&root, "ffa-version", &vm->partition.ffa_version));
J-Alves4369bd92020-08-07 16:35:36 +0100869 dlog_verbose(" Expected FF-A version %u.%u\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700870 vm->partition.ffa_version >> 16,
871 vm->partition.ffa_version & 0xffff);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100872
J-Alves4369bd92020-08-07 16:35:36 +0100873 TRY(read_uint32list(&root, "uuid", &uuid));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100874
875 while (uint32list_has_next(&uuid) && i < 4) {
876 TRY(uint32list_get_next(&uuid, &uuid_word));
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700877 vm->partition.uuid.uuid[i] = uuid_word;
Olivier Deprez62d99e32020-01-09 15:58:07 +0100878 i++;
879 }
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700880 dlog_verbose(" UUID %#x-%x-%x-%x\n", vm->partition.uuid.uuid[0],
881 vm->partition.uuid.uuid[1], vm->partition.uuid.uuid[2],
882 vm->partition.uuid.uuid[3]);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100883
884 TRY(read_uint16(&root, "execution-ctx-count",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700885 &vm->partition.execution_ctx_count));
J-Alves4369bd92020-08-07 16:35:36 +0100886 dlog_verbose(" Number of execution context %u\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700887 vm->partition.execution_ctx_count);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100888
889 TRY(read_uint8(&root, "exception-level",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700890 (uint8_t *)&vm->partition.run_time_el));
891 dlog_verbose(" Run-time EL %u\n", vm->partition.run_time_el);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100892
893 TRY(read_uint8(&root, "execution-state",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700894 (uint8_t *)&vm->partition.execution_state));
895 dlog_verbose(" Execution state %u\n", vm->partition.execution_state);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100896
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700897 TRY(read_optional_uint64(&root, "load-address", 0,
898 &vm->partition.load_addr));
899 dlog_verbose(" Load address %#x\n", vm->partition.load_addr);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100900
J-Alves4369bd92020-08-07 16:35:36 +0100901 TRY(read_optional_uint64(&root, "entrypoint-offset", 0,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700902 &vm->partition.ep_offset));
903 dlog_verbose(" Entry point offset %#x\n", vm->partition.ep_offset);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100904
J-Alves35315782022-01-25 17:58:32 +0000905 TRY(read_optional_uint32(&root, "gp-register-num",
906 DEFAULT_BOOT_GP_REGISTER,
907 &vm->partition.gp_register_num));
908 dlog_verbose(" Boot GP register: %#x\n",
909 vm->partition.gp_register_num);
910
J-Alvesb37fd082020-10-22 12:29:21 +0100911 TRY(read_optional_uint16(&root, "boot-order", DEFAULT_BOOT_ORDER,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700912 &vm->partition.boot_order));
913 dlog_verbose(" Boot order %#u\n", vm->partition.boot_order);
J-Alvesb37fd082020-10-22 12:29:21 +0100914
J-Alves4369bd92020-08-07 16:35:36 +0100915 TRY(read_optional_uint8(&root, "xlat-granule", 0,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700916 (uint8_t *)&vm->partition.xlat_granule));
917 dlog_verbose(" Translation granule %u\n", vm->partition.xlat_granule);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100918
919 ffa_node = root;
920 if (fdt_find_child(&ffa_node, &rxtx_node_name)) {
921 if (!fdt_is_compatible(&ffa_node,
922 "arm,ffa-manifest-rx_tx-buffer")) {
923 return MANIFEST_ERROR_NOT_COMPATIBLE;
924 }
925
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100926 /*
927 * Read only phandles for now, it will be used to update buffers
928 * while parsing memory regions.
929 */
930 TRY(read_uint32(&ffa_node, "rx-buffer",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700931 &vm->partition.rxtx.rx_phandle));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100932
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100933 TRY(read_uint32(&ffa_node, "tx-buffer",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700934 &vm->partition.rxtx.tx_phandle));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100935
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700936 vm->partition.rxtx.available = true;
Olivier Deprez62d99e32020-01-09 15:58:07 +0100937 }
938
939 TRY(read_uint8(&root, "messaging-method",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700940 (uint8_t *)&vm->partition.messaging_method));
941 dlog_verbose(" Messaging method %u\n", vm->partition.messaging_method);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100942
Madhukar Pappireddy84154052022-06-21 18:30:25 -0500943 TRY(read_bool(&root, "managed-exit", &managed_exit_field_present));
944
945 TRY(read_optional_uint8(
946 &root, "ns-interrupts-action", NS_ACTION_SIGNALED,
947 (uint8_t *)&vm->partition.ns_interrupts_action));
948
949 /*
950 * An SP manifest can specify one of the fields listed below:
951 * `managed-exit`: Introduced in FF-A v1.0 spec.
952 * `ns-interrupts-action`: Introduced in FF-A v1.1 EAC0 spec.
953 * If both are missing from the manifest, the default response is
954 * NS_ACTION_SIGNALED.
955 */
956 if (managed_exit_field_present) {
957 vm->partition.ns_interrupts_action = NS_ACTION_ME;
958 }
959
960 if (vm->partition.ns_interrupts_action != NS_ACTION_QUEUED &&
961 vm->partition.ns_interrupts_action != NS_ACTION_ME &&
962 vm->partition.ns_interrupts_action != NS_ACTION_SIGNALED) {
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -0600963 return MANIFEST_ERROR_ILLEGAL_NS_INT_ACTION;
Madhukar Pappireddy84154052022-06-21 18:30:25 -0500964 }
965
966 dlog_verbose(
967 "NS Interrupts %s\n",
968 (vm->partition.ns_interrupts_action == NS_ACTION_QUEUED)
969 ? "Queued"
970 : (vm->partition.ns_interrupts_action == NS_ACTION_SIGNALED)
971 ? "Signaled"
972 : "Managed exit");
973
974 if (vm->partition.ns_interrupts_action == NS_ACTION_ME) {
975 /* Managed exit only supported by S_EL1 partitions. */
976 if (vm->partition.run_time_el != S_EL1) {
977 dlog_error(
978 "Managed exit cannot be supported by this "
979 "partition\n");
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -0600980 return MANIFEST_ERROR_ILLEGAL_NS_INT_ACTION;
Madhukar Pappireddy84154052022-06-21 18:30:25 -0500981 }
Madhukar Pappireddy046dad02022-06-21 18:43:33 -0500982
983 TRY(read_bool(&root, "managed-exit-virq",
984 &vm->partition.me_signal_virq));
985 if (vm->partition.me_signal_virq) {
986 dlog_verbose(" Managed Exit signaled through vIRQ\n");
987 }
J-Alvesa4730db2021-11-02 10:31:01 +0000988 }
989
990 TRY(read_bool(&root, "notification-support",
991 &vm->partition.notification_support));
992 if (vm->partition.notification_support) {
993 dlog_verbose(" Notifications Receipt Supported\n");
994 }
Maksims Svecovs9ddf86a2021-05-06 17:17:21 +0100995
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -0600996 TRY(read_optional_uint8(
997 &root, "other-s-interrupts-action", OTHER_S_INT_ACTION_SIGNALED,
998 (uint8_t *)&vm->partition.other_s_interrupts_action));
999
1000 if (vm->partition.other_s_interrupts_action ==
1001 OTHER_S_INT_ACTION_QUEUED) {
1002 if (vm->partition.ns_interrupts_action != NS_ACTION_QUEUED) {
1003 dlog_error(
1004 "Choice of the fields 'ns-interrupts-action' "
1005 "and 'other-s-interrupts-action' not "
1006 "compatible\n");
1007 return MANIFEST_ERROR_NOT_COMPATIBLE;
1008 }
1009 } else if (vm->partition.other_s_interrupts_action >
1010 OTHER_S_INT_ACTION_SIGNALED) {
1011 dlog_error(
1012 "Illegal value specified for the field"
1013 " 'other-s-interrupts-action': %u\n",
1014 vm->partition.other_s_interrupts_action);
1015 return MANIFEST_ERROR_ILLEGAL_OTHER_S_INT_ACTION;
1016 }
1017
J-Alves35315782022-01-25 17:58:32 +00001018 /* Parse boot info node. */
1019 if (boot_info_node != NULL) {
1020 ffa_node = root;
1021 vm->partition.boot_info =
1022 fdt_find_child(&ffa_node, &boot_info_node_name);
1023 if (vm->partition.boot_info) {
1024 *boot_info_node = ffa_node;
1025 }
1026 } else {
1027 vm->partition.boot_info = false;
1028 }
1029
Olivier Depreza15f2352022-09-26 09:17:24 +02001030 TRY(read_optional_uint32(
1031 &root, "power-management-messages",
1032 MANIFEST_POWER_MANAGEMENT_CPU_OFF_SUPPORTED |
1033 MANIFEST_POWER_MANAGEMENT_CPU_ON_SUPPORTED,
1034 &vm->partition.power_management));
1035 vm->partition.power_management &= MANIFEST_POWER_MANAGEMENT_ALL_MASK;
1036 if (vm->partition.execution_ctx_count == 1 ||
1037 vm->partition.run_time_el == S_EL0) {
1038 vm->partition.power_management =
1039 MANIFEST_POWER_MANAGEMENT_NONE_MASK;
1040 }
1041
1042 dlog_verbose(" Power management messages %#x\n",
1043 vm->partition.power_management);
1044
Manish Pandey6542f5c2020-04-27 14:37:46 +01001045 /* Parse memory-regions */
1046 ffa_node = root;
1047 if (fdt_find_child(&ffa_node, &mem_region_node_name)) {
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001048 TRY(parse_ffa_memory_region_node(
1049 &ffa_node, vm->partition.mem_regions,
1050 &vm->partition.mem_region_count, &vm->partition.rxtx));
Manish Pandey6542f5c2020-04-27 14:37:46 +01001051 }
Manish Pandey2145c212020-05-01 16:04:22 +01001052 dlog_verbose(" Total %u memory regions found\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001053 vm->partition.mem_region_count);
Manish Pandey6542f5c2020-04-27 14:37:46 +01001054
Manish Pandeye68e7932020-04-23 15:29:28 +01001055 /* Parse Device-regions */
1056 ffa_node = root;
1057 if (fdt_find_child(&ffa_node, &dev_region_node_name)) {
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001058 TRY(parse_ffa_device_region_node(
1059 &ffa_node, vm->partition.dev_regions,
1060 &vm->partition.dev_region_count));
Manish Pandeye68e7932020-04-23 15:29:28 +01001061 }
Manish Pandey2145c212020-05-01 16:04:22 +01001062 dlog_verbose(" Total %u device regions found\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001063 vm->partition.dev_region_count);
Manish Pandeye68e7932020-04-23 15:29:28 +01001064
J-Alves4eb7b542022-03-02 15:21:52 +00001065 return sanity_check_ffa_manifest(vm);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001066}
1067
Olivier Deprez62d99e32020-01-09 15:58:07 +01001068static enum manifest_return_code parse_ffa_partition_package(
1069 struct mm_stage1_locked stage1_locked, struct fdt_node *node,
1070 struct manifest_vm *vm, ffa_vm_id_t vm_id, struct mpool *ppool)
1071{
1072 enum manifest_return_code ret = MANIFEST_ERROR_NOT_COMPATIBLE;
J-Alves2f86c1e2022-02-23 18:44:19 +00001073 uintpaddr_t load_address;
1074 struct sp_pkg_header header;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001075 struct fdt sp_fdt;
J-Alves2f86c1e2022-02-23 18:44:19 +00001076 vaddr_t pkg_start;
1077 vaddr_t manifest_address;
J-Alves35315782022-01-25 17:58:32 +00001078 struct fdt_node boot_info_node;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001079
1080 /*
1081 * This must have been hinted as being an FF-A partition,
1082 * return straight with failure if this is not the case.
1083 */
1084 if (!vm->is_ffa_partition) {
J-Alves2f86c1e2022-02-23 18:44:19 +00001085 return ret;
1086 }
1087
1088 TRY(read_uint64(node, "load_address", &load_address));
1089 if (!is_aligned(load_address, PAGE_SIZE)) {
Olivier Deprez62d99e32020-01-09 15:58:07 +01001090 return MANIFEST_ERROR_NOT_COMPATIBLE;
1091 }
1092
J-Alves2f86c1e2022-02-23 18:44:19 +00001093 assert(load_address != 0U);
1094
1095 if (!sp_pkg_init(stage1_locked, pa_init(load_address), &header,
1096 ppool)) {
1097 return ret;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001098 }
1099
J-Alves2f86c1e2022-02-23 18:44:19 +00001100 pkg_start = va_init(load_address);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001101
J-Alves2f86c1e2022-02-23 18:44:19 +00001102 if (vm_id != HF_PRIMARY_VM_ID &&
1103 sp_pkg_get_mem_size(&header) >= vm->secondary.mem_size) {
J-Alves4369bd92020-08-07 16:35:36 +01001104 dlog_error("Invalid package header or DT size.\n");
J-Alves2f86c1e2022-02-23 18:44:19 +00001105 goto out;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001106 }
1107
J-Alves2f86c1e2022-02-23 18:44:19 +00001108 manifest_address = va_add(va_init(load_address), header.pm_offset);
1109 if (!fdt_init_from_ptr(&sp_fdt, ptr_from_va(manifest_address),
1110 header.pm_size)) {
Olivier Deprez62d99e32020-01-09 15:58:07 +01001111 dlog_error("FDT failed validation.\n");
J-Alves2f86c1e2022-02-23 18:44:19 +00001112 goto out;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001113 }
1114
J-Alves35315782022-01-25 17:58:32 +00001115 ret = parse_ffa_manifest(&sp_fdt, vm, &boot_info_node);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001116 if (ret != MANIFEST_SUCCESS) {
J-Alves4eb7b542022-03-02 15:21:52 +00001117 dlog_error("Error parsing partition manifest: %s.\n",
1118 manifest_strerror(ret));
J-Alves2f86c1e2022-02-23 18:44:19 +00001119 goto out;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001120 }
1121
J-Alves2f86c1e2022-02-23 18:44:19 +00001122 if (vm->partition.load_addr != load_address) {
J-Alvesa26ea212021-03-22 14:33:47 +00001123 dlog_warning(
1124 "Partition's load address at its manifest differs"
1125 " from specified in partition's package.\n");
J-Alves2f86c1e2022-02-23 18:44:19 +00001126 vm->partition.load_addr = load_address;
J-Alvesa26ea212021-03-22 14:33:47 +00001127 }
1128
J-Alves889a1d72022-05-13 11:38:27 +01001129 if (vm->partition.gp_register_num != DEFAULT_BOOT_GP_REGISTER) {
1130 if (header.version == SP_PKG_HEADER_VERSION_2 &&
1131 vm->partition.boot_info &&
1132 !ffa_boot_info_node(&boot_info_node, pkg_start, &header)) {
1133 dlog_error("Failed to process boot information.\n");
1134 }
J-Alves35315782022-01-25 17:58:32 +00001135 }
1136
J-Alves2f86c1e2022-02-23 18:44:19 +00001137out:
1138 sp_pkg_deinit(stage1_locked, pkg_start, &header, ppool);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001139 return ret;
1140}
1141
David Brazdil7a462ec2019-08-15 12:27:47 +01001142/**
1143 * Parse manifest from FDT.
1144 */
Olivier Deprez62d99e32020-01-09 15:58:07 +01001145enum manifest_return_code manifest_init(struct mm_stage1_locked stage1_locked,
Olivier Deprez93644652022-09-09 11:01:12 +02001146 struct manifest **manifest_ret,
Olivier Deprez62d99e32020-01-09 15:58:07 +01001147 struct memiter *manifest_fdt,
1148 struct mpool *ppool)
David Brazdil7a462ec2019-08-15 12:27:47 +01001149{
Olivier Deprez93644652022-09-09 11:01:12 +02001150 struct manifest *manifest;
David Brazdilb856be62020-03-25 10:14:55 +00001151 struct string vm_name;
1152 struct fdt fdt;
David Brazdil7a462ec2019-08-15 12:27:47 +01001153 struct fdt_node hyp_node;
1154 size_t i = 0;
1155 bool found_primary_vm = false;
1156
Olivier Deprez93644652022-09-09 11:01:12 +02001157 /* Allocate space in the ppool for the manifest data. */
1158 if (!manifest_data_init(ppool)) {
1159 panic("Unable to allocate manifest data.\n");
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001160 }
1161
Olivier Deprez93644652022-09-09 11:01:12 +02001162 manifest = &manifest_data->manifest;
1163 *manifest_ret = manifest;
1164
David Brazdilb856be62020-03-25 10:14:55 +00001165 if (!fdt_init_from_memiter(&fdt, manifest_fdt)) {
1166 return MANIFEST_ERROR_FILE_SIZE; /* TODO */
David Brazdila2358d42020-01-27 18:51:38 +00001167 }
1168
David Brazdil7a462ec2019-08-15 12:27:47 +01001169 /* Find hypervisor node. */
David Brazdilb856be62020-03-25 10:14:55 +00001170 if (!fdt_find_node(&fdt, "/hypervisor", &hyp_node)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001171 return MANIFEST_ERROR_NO_HYPERVISOR_FDT_NODE;
1172 }
1173
David Brazdil74e9c3b2019-08-28 11:09:08 +01001174 /* Check "compatible" property. */
David Brazdilf4925382020-03-25 13:33:51 +00001175 if (!fdt_is_compatible(&hyp_node, "hafnium,hafnium")) {
David Brazdil74e9c3b2019-08-28 11:09:08 +01001176 return MANIFEST_ERROR_NOT_COMPATIBLE;
1177 }
1178
Olivier Deprez622ab8d2021-08-02 12:15:45 +02001179 TRY(read_bool(&hyp_node, "ffa_tee_enabled",
1180 &manifest->ffa_tee_enabled));
Andrew Walbran41a49d82020-01-10 17:46:38 +00001181
David Brazdil7a462ec2019-08-15 12:27:47 +01001182 /* Iterate over reserved VM IDs and check no such nodes exist. */
Olivier Deprez2a8ee342020-08-03 15:10:44 +02001183 for (i = HF_VM_ID_BASE; i < HF_VM_ID_OFFSET; i++) {
1184 ffa_vm_id_t vm_id = (ffa_vm_id_t)i - HF_VM_ID_BASE;
David Brazdil7a462ec2019-08-15 12:27:47 +01001185 struct fdt_node vm_node = hyp_node;
David Brazdil7a462ec2019-08-15 12:27:47 +01001186
David Brazdilb856be62020-03-25 10:14:55 +00001187 generate_vm_node_name(&vm_name, vm_id);
1188 if (fdt_find_child(&vm_node, &vm_name)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001189 return MANIFEST_ERROR_RESERVED_VM_ID;
1190 }
1191 }
1192
1193 /* Iterate over VM nodes until we find one that does not exist. */
1194 for (i = 0; i <= MAX_VMS; ++i) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001195 ffa_vm_id_t vm_id = HF_VM_ID_OFFSET + i;
David Brazdil7a462ec2019-08-15 12:27:47 +01001196 struct fdt_node vm_node = hyp_node;
David Brazdil7a462ec2019-08-15 12:27:47 +01001197
Olivier Deprez2a8ee342020-08-03 15:10:44 +02001198 generate_vm_node_name(&vm_name, vm_id - HF_VM_ID_BASE);
David Brazdilb856be62020-03-25 10:14:55 +00001199 if (!fdt_find_child(&vm_node, &vm_name)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001200 break;
1201 }
1202
1203 if (i == MAX_VMS) {
1204 return MANIFEST_ERROR_TOO_MANY_VMS;
1205 }
1206
1207 if (vm_id == HF_PRIMARY_VM_ID) {
1208 CHECK(found_primary_vm == false); /* sanity check */
1209 found_primary_vm = true;
1210 }
1211
David Brazdil0251b942019-09-10 15:59:50 +01001212 manifest->vm_count = i + 1;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001213
1214 TRY(parse_vm_common(&vm_node, &manifest->vm[i], vm_id));
1215
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -07001216 CHECK(!manifest->vm[i].is_hyp_loaded ||
1217 manifest->vm[i].is_ffa_partition);
1218
1219 if (manifest->vm[i].is_ffa_partition &&
1220 !manifest->vm[i].is_hyp_loaded) {
Olivier Deprez62d99e32020-01-09 15:58:07 +01001221 TRY(parse_ffa_partition_package(stage1_locked, &vm_node,
1222 &manifest->vm[i], vm_id,
1223 ppool));
1224 } else {
1225 TRY(parse_vm(&vm_node, &manifest->vm[i], vm_id));
1226 }
David Brazdil7a462ec2019-08-15 12:27:47 +01001227 }
1228
Olivier Deprezfb05f3c2020-11-10 17:48:04 +01001229 if (!found_primary_vm && vm_id_is_current_world(HF_PRIMARY_VM_ID)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001230 return MANIFEST_ERROR_NO_PRIMARY_VM;
1231 }
1232
1233 return MANIFEST_SUCCESS;
1234}
1235
Olivier Deprez93644652022-09-09 11:01:12 +02001236/**
1237 * Free manifest data resources, called once manifest parsing has
1238 * completed and VMs are loaded.
1239 */
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001240void manifest_deinit(struct mpool *ppool)
1241{
Olivier Deprez93644652022-09-09 11:01:12 +02001242 manifest_data_deinit(ppool);
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001243}
1244
David Brazdil7a462ec2019-08-15 12:27:47 +01001245const char *manifest_strerror(enum manifest_return_code ret_code)
1246{
1247 switch (ret_code) {
1248 case MANIFEST_SUCCESS:
1249 return "Success";
David Brazdila2358d42020-01-27 18:51:38 +00001250 case MANIFEST_ERROR_FILE_SIZE:
1251 return "Total size in header does not match file size";
Olivier Deprez62d99e32020-01-09 15:58:07 +01001252 case MANIFEST_ERROR_MALFORMED_DTB:
1253 return "Malformed device tree blob";
David Brazdila2358d42020-01-27 18:51:38 +00001254 case MANIFEST_ERROR_NO_ROOT_NODE:
1255 return "Could not find root node in manifest";
David Brazdil7a462ec2019-08-15 12:27:47 +01001256 case MANIFEST_ERROR_NO_HYPERVISOR_FDT_NODE:
1257 return "Could not find \"hypervisor\" node in manifest";
David Brazdil74e9c3b2019-08-28 11:09:08 +01001258 case MANIFEST_ERROR_NOT_COMPATIBLE:
1259 return "Hypervisor manifest entry not compatible with Hafnium";
David Brazdil7a462ec2019-08-15 12:27:47 +01001260 case MANIFEST_ERROR_RESERVED_VM_ID:
1261 return "Manifest defines a VM with a reserved ID";
1262 case MANIFEST_ERROR_NO_PRIMARY_VM:
1263 return "Manifest does not contain a primary VM entry";
1264 case MANIFEST_ERROR_TOO_MANY_VMS:
1265 return "Manifest specifies more VMs than Hafnium has "
1266 "statically allocated space for";
1267 case MANIFEST_ERROR_PROPERTY_NOT_FOUND:
1268 return "Property not found";
1269 case MANIFEST_ERROR_MALFORMED_STRING:
1270 return "Malformed string property";
David Brazdil0dbb41f2019-09-09 18:03:35 +01001271 case MANIFEST_ERROR_STRING_TOO_LONG:
1272 return "String too long";
David Brazdil7a462ec2019-08-15 12:27:47 +01001273 case MANIFEST_ERROR_MALFORMED_INTEGER:
1274 return "Malformed integer property";
1275 case MANIFEST_ERROR_INTEGER_OVERFLOW:
1276 return "Integer overflow";
Andrew Scullae9962e2019-10-03 16:51:16 +01001277 case MANIFEST_ERROR_MALFORMED_INTEGER_LIST:
1278 return "Malformed integer list property";
Andrew Scullb2c3a242019-11-04 13:52:36 +00001279 case MANIFEST_ERROR_MALFORMED_BOOLEAN:
1280 return "Malformed boolean property";
Manish Pandey6542f5c2020-04-27 14:37:46 +01001281 case MANIFEST_ERROR_MEMORY_REGION_NODE_EMPTY:
1282 return "Memory-region node should have at least one entry";
Manish Pandeye68e7932020-04-23 15:29:28 +01001283 case MANIFEST_ERROR_DEVICE_REGION_NODE_EMPTY:
1284 return "Device-region node should have at least one entry";
Manish Pandeyf06c9072020-09-29 15:41:58 +01001285 case MANIFEST_ERROR_RXTX_SIZE_MISMATCH:
1286 return "RX and TX buffers should be of same size";
Varun Wadekar4afbfd72022-10-13 14:30:18 +01001287 case MANIFEST_ERROR_MEM_REGION_EMPTY:
1288 return "Memory region should have at least one page";
Daniel Boulbya7e9e182022-06-27 14:21:01 +01001289 case MANIFEST_ERROR_MEM_REGION_OVERLAP:
1290 return "Memory region overlaps with one already allocated";
Daniel Boulbyc1a613d2022-10-18 11:26:17 +01001291 case MANIFEST_ERROR_MEM_REGION_UNALIGNED:
1292 return "Memory region is not aligned to a page boundary";
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -07001293 case MANIFEST_ERROR_INVALID_MEM_PERM:
1294 return "Memory permission should be RO, RW or RX";
J-Alves35315782022-01-25 17:58:32 +00001295 case MANIFEST_ERROR_ARGUMENTS_LIST_EMPTY:
1296 return "Arguments-list node should have at least one argument";
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001297 case MANIFEST_ERROR_INTERRUPT_ID_REPEATED:
1298 return "Interrupt ID already assigned to another endpoint";
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001299 case MANIFEST_ERROR_ILLEGAL_NS_INT_ACTION:
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001300 return "Illegal value specidied for the field: Action in "
1301 "response to NS Interrupt";
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -07001302 case MANIFEST_ERROR_INTERRUPT_ID_NOT_IN_LIST:
1303 return "Interrupt ID is not in the list of interrupts";
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001304 case MANIFEST_ERROR_ILLEGAL_OTHER_S_INT_ACTION:
1305 return "Illegal value specified for the field: Action in "
1306 "response to Other-S Interrupt";
David Brazdil7a462ec2019-08-15 12:27:47 +01001307 }
1308
1309 panic("Unexpected manifest return code.");
1310}