blob: 91bdae7a42e48c4796adb6630757ad7c88a73800 [file] [log] [blame]
David Brazdil7a462ec2019-08-15 12:27:47 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
David Brazdil7a462ec2019-08-15 12:27:47 +01007 */
8
9#include "hf/manifest.h"
10
J-Alvesd8a1d362023-03-08 11:15:28 +000011#include <stddef.h>
J-Alves77b6f4f2023-03-15 11:34:49 +000012#include <stdint.h>
J-Alvesd8a1d362023-03-08 11:15:28 +000013
J-Alves35315782022-01-25 17:58:32 +000014#include "hf/arch/types.h"
J-Alves77b6f4f2023-03-15 11:34:49 +000015#include "hf/arch/vmid_base.h"
J-Alves35315782022-01-25 17:58:32 +000016
David Brazdil7a462ec2019-08-15 12:27:47 +010017#include "hf/addr.h"
Daniel Boulbya2f8c662021-11-26 17:52:53 +000018#include "hf/assert.h"
J-Alves35315782022-01-25 17:58:32 +000019#include "hf/boot_info.h"
J-Alves77b6f4f2023-03-15 11:34:49 +000020#include "hf/boot_params.h"
David Brazdil7a462ec2019-08-15 12:27:47 +010021#include "hf/check.h"
Andrew Scullae9962e2019-10-03 16:51:16 +010022#include "hf/dlog.h"
J-Alves77b6f4f2023-03-15 11:34:49 +000023#include "hf/fdt.h"
Karl Meakin0e617d92024-04-05 12:55:22 +010024#include "hf/ffa.h"
Daniel Boulby3f295b12023-12-15 17:38:08 +000025#include "hf/layout.h"
J-Alves77b6f4f2023-03-15 11:34:49 +000026#include "hf/mm.h"
27#include "hf/mpool.h"
J-Alves2f86c1e2022-02-23 18:44:19 +000028#include "hf/sp_pkg.h"
David Brazdil7a462ec2019-08-15 12:27:47 +010029#include "hf/static_assert.h"
30#include "hf/std.h"
31
32#define TRY(expr) \
33 do { \
34 enum manifest_return_code ret_code = (expr); \
35 if (ret_code != MANIFEST_SUCCESS) { \
36 return ret_code; \
37 } \
38 } while (0)
39
David Brazdilb856be62020-03-25 10:14:55 +000040#define VM_ID_MAX (HF_VM_ID_OFFSET + MAX_VMS - 1)
41#define VM_ID_MAX_DIGITS (5)
42#define VM_NAME_EXTRA_CHARS (3) /* "vm" + number + '\0' */
43#define VM_NAME_MAX_SIZE (VM_ID_MAX_DIGITS + VM_NAME_EXTRA_CHARS)
44static_assert(VM_NAME_MAX_SIZE <= STRING_MAX_SIZE,
45 "VM name does not fit into a struct string.");
46static_assert(VM_ID_MAX <= 99999, "Insufficient VM_NAME_BUF_SIZE");
Olivier Deprez2a8ee342020-08-03 15:10:44 +020047static_assert((HF_OTHER_WORLD_ID > VM_ID_MAX) ||
48 (HF_OTHER_WORLD_ID < HF_VM_ID_BASE),
Andrew Walbran9daa57e2019-09-27 13:33:20 +010049 "TrustZone VM ID clashes with normal VM range.");
David Brazdil7a462ec2019-08-15 12:27:47 +010050
Kathleen Capella4a2a6e72023-04-21 14:43:26 -040051/* Bitmap to track boot order values in use. */
52#define BOOT_ORDER_ENTRY_BITS (sizeof(uint64_t) * 8)
53#define BOOT_ORDER_MAP_ENTRIES \
54 ((DEFAULT_BOOT_ORDER + (BOOT_ORDER_ENTRY_BITS - 1)) / \
55 BOOT_ORDER_ENTRY_BITS)
56
Daniel Boulby801f8ef2022-06-27 14:21:01 +010057/**
J-Alves596049f2023-03-15 11:40:24 +000058 * A struct to keep track of the partitions properties during early boot
59 * manifest parsing:
60 * - Interrupts ID.
61 * - Physical memory ranges.
Daniel Boulby801f8ef2022-06-27 14:21:01 +010062 */
Olivier Deprez93644652022-09-09 11:01:12 +020063struct manifest_data {
64 struct manifest manifest;
Daniel Boulby4ca50f02022-07-29 18:29:34 +010065 struct interrupt_bitmap intids;
J-Alves596049f2023-03-15 11:40:24 +000066 /*
67 * Allocate enough for the maximum amount of memory regions defined via
68 * the partitions manifest, and regions for each partition
69 * address-space.
70 */
Daniel Boulby4d165722023-11-21 13:46:42 +000071 struct mem_range mem_regions[PARTITION_MAX_MEMORY_REGIONS * MAX_VMS +
72 PARTITION_MAX_DEVICE_REGIONS * MAX_VMS +
73 MAX_VMS];
Daniel Boulby3f295b12023-12-15 17:38:08 +000074 size_t mem_regions_index;
Kathleen Capella4a2a6e72023-04-21 14:43:26 -040075 uint64_t boot_order_values[BOOT_ORDER_MAP_ENTRIES];
Daniel Boulby801f8ef2022-06-27 14:21:01 +010076};
Olivier Deprez93644652022-09-09 11:01:12 +020077
Daniel Boulby801f8ef2022-06-27 14:21:01 +010078/**
Daniel Boulbya7e9e182022-06-27 14:21:01 +010079 * Calculate the number of entries in the ppool that are required to
Olivier Deprez93644652022-09-09 11:01:12 +020080 * store the manifest_data struct.
Daniel Boulby801f8ef2022-06-27 14:21:01 +010081 */
J-Alves596049f2023-03-15 11:40:24 +000082static const size_t manifest_data_ppool_entries =
Olivier Deprez93644652022-09-09 11:01:12 +020083 (align_up(sizeof(struct manifest_data), MM_PPOOL_ENTRY_SIZE) /
Daniel Boulbya7e9e182022-06-27 14:21:01 +010084 MM_PPOOL_ENTRY_SIZE);
85
Olivier Deprez93644652022-09-09 11:01:12 +020086static struct manifest_data *manifest_data;
Daniel Boulby801f8ef2022-06-27 14:21:01 +010087
Kathleen Capella4a2a6e72023-04-21 14:43:26 -040088static bool check_boot_order(uint16_t boot_order)
89{
90 uint16_t i;
91 uint64_t boot_order_mask;
92
93 if (boot_order == DEFAULT_BOOT_ORDER) {
94 return true;
95 }
96 if (boot_order > DEFAULT_BOOT_ORDER) {
97 dlog_error("Boot order should not exceed %x",
98 DEFAULT_BOOT_ORDER);
99 return false;
100 }
101
102 i = boot_order / BOOT_ORDER_ENTRY_BITS;
103 boot_order_mask = 1 << (boot_order % BOOT_ORDER_ENTRY_BITS);
104
105 if ((boot_order_mask & manifest_data->boot_order_values[i]) != 0U) {
106 dlog_error("Boot order must be a unique value.");
107 return false;
108 }
109
110 manifest_data->boot_order_values[i] |= boot_order_mask;
111
112 return true;
113}
114
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100115/**
Olivier Deprez93644652022-09-09 11:01:12 +0200116 * Allocates and clear memory for the manifest data in the given memory pool.
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100117 * Returns true if the memory is successfully allocated.
118 */
Olivier Deprez93644652022-09-09 11:01:12 +0200119static bool manifest_data_init(struct mpool *ppool)
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100120{
Olivier Deprez93644652022-09-09 11:01:12 +0200121 manifest_data = (struct manifest_data *)mpool_alloc_contiguous(
122 ppool, manifest_data_ppool_entries, 1);
J-Alves5c0ae6f2023-06-14 15:20:21 +0100123
124 assert(manifest_data != NULL);
125
Olivier Deprez93644652022-09-09 11:01:12 +0200126 memset_s(manifest_data, sizeof(struct manifest_data), 0,
127 sizeof(struct manifest_data));
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100128
Olivier Deprez93644652022-09-09 11:01:12 +0200129 return manifest_data != NULL;
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100130}
131
132/**
Olivier Deprez93644652022-09-09 11:01:12 +0200133 * Frees the memory used for the manifest data in the given memory pool.
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100134 */
Olivier Deprez93644652022-09-09 11:01:12 +0200135static void manifest_data_deinit(struct mpool *ppool)
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100136{
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100137 /**
Olivier Deprez93644652022-09-09 11:01:12 +0200138 * Clear and return the memory used for the manifest_data struct to the
139 * memory pool.
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100140 */
Olivier Deprez93644652022-09-09 11:01:12 +0200141 memset_s(manifest_data, sizeof(struct manifest_data), 0,
142 sizeof(struct manifest_data));
143 mpool_add_chunk(ppool, manifest_data, manifest_data_ppool_entries);
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100144}
145
J-Alves19e20cf2023-08-02 12:48:55 +0100146static inline size_t count_digits(ffa_id_t vm_id)
David Brazdilb856be62020-03-25 10:14:55 +0000147{
148 size_t digits = 0;
149
150 do {
151 digits++;
152 vm_id /= 10;
153 } while (vm_id);
154 return digits;
155}
156
David Brazdil7a462ec2019-08-15 12:27:47 +0100157/**
158 * Generates a string with the two letters "vm" followed by an integer.
159 * Assumes `buf` is of size VM_NAME_BUF_SIZE.
160 */
J-Alves19e20cf2023-08-02 12:48:55 +0100161static void generate_vm_node_name(struct string *str, ffa_id_t vm_id)
David Brazdil7a462ec2019-08-15 12:27:47 +0100162{
163 static const char *digits = "0123456789";
David Brazdilb856be62020-03-25 10:14:55 +0000164 size_t vm_id_digits = count_digits(vm_id);
165 char *base = str->data;
166 char *ptr = base + (VM_NAME_EXTRA_CHARS + vm_id_digits);
David Brazdil7a462ec2019-08-15 12:27:47 +0100167
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000168 assert(vm_id_digits <= VM_ID_MAX_DIGITS);
David Brazdil7a462ec2019-08-15 12:27:47 +0100169 *(--ptr) = '\0';
170 do {
171 *(--ptr) = digits[vm_id % 10];
172 vm_id /= 10;
173 } while (vm_id);
174 *(--ptr) = 'm';
175 *(--ptr) = 'v';
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000176 assert(ptr == base);
David Brazdil7a462ec2019-08-15 12:27:47 +0100177}
178
Andrew Scullae9962e2019-10-03 16:51:16 +0100179/**
Andrew Scullb2c3a242019-11-04 13:52:36 +0000180 * Read a boolean property: true if present; false if not. If present, the value
181 * of the property must be empty else it is considered malformed.
Andrew Scullae9962e2019-10-03 16:51:16 +0100182 */
Andrew Scullb2c3a242019-11-04 13:52:36 +0000183static enum manifest_return_code read_bool(const struct fdt_node *node,
184 const char *property, bool *out)
Andrew Scullae9962e2019-10-03 16:51:16 +0100185{
David Brazdilb856be62020-03-25 10:14:55 +0000186 struct memiter data;
187 bool present = fdt_read_property(node, property, &data);
Andrew Scullae9962e2019-10-03 16:51:16 +0100188
David Brazdilb856be62020-03-25 10:14:55 +0000189 if (present && memiter_size(&data) != 0) {
Andrew Scullb2c3a242019-11-04 13:52:36 +0000190 return MANIFEST_ERROR_MALFORMED_BOOLEAN;
191 }
192
193 *out = present;
194 return MANIFEST_SUCCESS;
Andrew Scullae9962e2019-10-03 16:51:16 +0100195}
196
Andrew Scull72b43c02019-09-18 13:53:45 +0100197static enum manifest_return_code read_string(const struct fdt_node *node,
David Brazdil136f2942019-09-23 14:11:03 +0100198 const char *property,
199 struct string *out)
Andrew Scull72b43c02019-09-18 13:53:45 +0100200{
David Brazdilb856be62020-03-25 10:14:55 +0000201 struct memiter data;
Andrew Scull72b43c02019-09-18 13:53:45 +0100202
David Brazdilb856be62020-03-25 10:14:55 +0000203 if (!fdt_read_property(node, property, &data)) {
Andrew Scull72b43c02019-09-18 13:53:45 +0100204 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
205 }
206
David Brazdilb856be62020-03-25 10:14:55 +0000207 switch (string_init(out, &data)) {
David Brazdil136f2942019-09-23 14:11:03 +0100208 case STRING_SUCCESS:
209 return MANIFEST_SUCCESS;
210 case STRING_ERROR_INVALID_INPUT:
211 return MANIFEST_ERROR_MALFORMED_STRING;
212 case STRING_ERROR_TOO_LONG:
213 return MANIFEST_ERROR_STRING_TOO_LONG;
214 }
Andrew Scull72b43c02019-09-18 13:53:45 +0100215}
216
217static enum manifest_return_code read_optional_string(
David Brazdil136f2942019-09-23 14:11:03 +0100218 const struct fdt_node *node, const char *property, struct string *out)
Andrew Scull72b43c02019-09-18 13:53:45 +0100219{
David Brazdil136f2942019-09-23 14:11:03 +0100220 enum manifest_return_code ret;
Andrew Scull72b43c02019-09-18 13:53:45 +0100221
David Brazdil136f2942019-09-23 14:11:03 +0100222 ret = read_string(node, property, out);
223 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
224 string_init_empty(out);
225 ret = MANIFEST_SUCCESS;
Andrew Scull72b43c02019-09-18 13:53:45 +0100226 }
David Brazdil136f2942019-09-23 14:11:03 +0100227 return ret;
Andrew Scull72b43c02019-09-18 13:53:45 +0100228}
229
David Brazdil7a462ec2019-08-15 12:27:47 +0100230static enum manifest_return_code read_uint64(const struct fdt_node *node,
231 const char *property,
232 uint64_t *out)
233{
David Brazdilb856be62020-03-25 10:14:55 +0000234 struct memiter data;
David Brazdil7a462ec2019-08-15 12:27:47 +0100235
David Brazdilb856be62020-03-25 10:14:55 +0000236 if (!fdt_read_property(node, property, &data)) {
David Brazdil7a462ec2019-08-15 12:27:47 +0100237 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
238 }
239
David Brazdilb856be62020-03-25 10:14:55 +0000240 if (!fdt_parse_number(&data, memiter_size(&data), out)) {
David Brazdil7a462ec2019-08-15 12:27:47 +0100241 return MANIFEST_ERROR_MALFORMED_INTEGER;
242 }
243
244 return MANIFEST_SUCCESS;
245}
246
David Brazdil080ee312020-02-25 15:30:30 -0800247static enum manifest_return_code read_optional_uint64(
248 const struct fdt_node *node, const char *property,
249 uint64_t default_value, uint64_t *out)
250{
251 enum manifest_return_code ret;
252
253 ret = read_uint64(node, property, out);
254 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
255 *out = default_value;
256 return MANIFEST_SUCCESS;
257 }
258 return ret;
259}
260
Olivier Deprez62d99e32020-01-09 15:58:07 +0100261static enum manifest_return_code read_uint32(const struct fdt_node *node,
262 const char *property,
263 uint32_t *out)
264{
265 uint64_t value;
266
267 TRY(read_uint64(node, property, &value));
268
269 if (value > UINT32_MAX) {
270 return MANIFEST_ERROR_INTEGER_OVERFLOW;
271 }
272
273 *out = (uint32_t)value;
274 return MANIFEST_SUCCESS;
275}
276
Manish Pandeye68e7932020-04-23 15:29:28 +0100277static enum manifest_return_code read_optional_uint32(
278 const struct fdt_node *node, const char *property,
279 uint32_t default_value, uint32_t *out)
280{
281 enum manifest_return_code ret;
282
283 ret = read_uint32(node, property, out);
284 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
285 *out = default_value;
286 return MANIFEST_SUCCESS;
287 }
288 return ret;
289}
290
David Brazdil7a462ec2019-08-15 12:27:47 +0100291static enum manifest_return_code read_uint16(const struct fdt_node *node,
292 const char *property,
293 uint16_t *out)
294{
295 uint64_t value;
296
297 TRY(read_uint64(node, property, &value));
David Brazdil7a462ec2019-08-15 12:27:47 +0100298 if (value > UINT16_MAX) {
299 return MANIFEST_ERROR_INTEGER_OVERFLOW;
300 }
301
302 *out = (uint16_t)value;
303 return MANIFEST_SUCCESS;
304}
305
J-Alvesb37fd082020-10-22 12:29:21 +0100306static enum manifest_return_code read_optional_uint16(
307 const struct fdt_node *node, const char *property,
308 uint16_t default_value, uint16_t *out)
309{
310 enum manifest_return_code ret;
311
312 ret = read_uint16(node, property, out);
313 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
314 *out = default_value;
315 return MANIFEST_SUCCESS;
316 }
317
Kathleen Capella4a2a6e72023-04-21 14:43:26 -0400318 return ret;
J-Alvesb37fd082020-10-22 12:29:21 +0100319}
320
Olivier Deprez62d99e32020-01-09 15:58:07 +0100321static enum manifest_return_code read_uint8(const struct fdt_node *node,
322 const char *property, uint8_t *out)
323{
324 uint64_t value;
325
326 TRY(read_uint64(node, property, &value));
327
328 if (value > UINT8_MAX) {
329 return MANIFEST_ERROR_INTEGER_OVERFLOW;
330 }
331
332 *out = (uint8_t)value;
333 return MANIFEST_SUCCESS;
334}
335
J-Alves4369bd92020-08-07 16:35:36 +0100336static enum manifest_return_code read_optional_uint8(
337 const struct fdt_node *node, const char *property,
338 uint8_t default_value, uint8_t *out)
339{
340 enum manifest_return_code ret;
341
342 ret = read_uint8(node, property, out);
343 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
344 *out = default_value;
345 return MANIFEST_SUCCESS;
346 }
347
348 return MANIFEST_SUCCESS;
349}
350
Andrew Scullae9962e2019-10-03 16:51:16 +0100351struct uint32list_iter {
352 struct memiter mem_it;
353};
354
J-Alves4369bd92020-08-07 16:35:36 +0100355static enum manifest_return_code read_uint32list(const struct fdt_node *node,
356 const char *property,
357 struct uint32list_iter *out)
Andrew Scullae9962e2019-10-03 16:51:16 +0100358{
David Brazdilb856be62020-03-25 10:14:55 +0000359 struct memiter data;
Andrew Scullae9962e2019-10-03 16:51:16 +0100360
David Brazdilb856be62020-03-25 10:14:55 +0000361 if (!fdt_read_property(node, property, &data)) {
Andrew Scullae9962e2019-10-03 16:51:16 +0100362 memiter_init(&out->mem_it, NULL, 0);
J-Alves4369bd92020-08-07 16:35:36 +0100363 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
Andrew Scullae9962e2019-10-03 16:51:16 +0100364 }
365
David Brazdilb856be62020-03-25 10:14:55 +0000366 if ((memiter_size(&data) % sizeof(uint32_t)) != 0) {
Andrew Scullae9962e2019-10-03 16:51:16 +0100367 return MANIFEST_ERROR_MALFORMED_INTEGER_LIST;
368 }
369
David Brazdilb856be62020-03-25 10:14:55 +0000370 out->mem_it = data;
Andrew Scullae9962e2019-10-03 16:51:16 +0100371 return MANIFEST_SUCCESS;
372}
373
J-Alves4369bd92020-08-07 16:35:36 +0100374static enum manifest_return_code read_optional_uint32list(
375 const struct fdt_node *node, const char *property,
376 struct uint32list_iter *out)
377{
378 enum manifest_return_code ret = read_uint32list(node, property, out);
379
380 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
381 return MANIFEST_SUCCESS;
382 }
383 return ret;
384}
385
Andrew Scullae9962e2019-10-03 16:51:16 +0100386static bool uint32list_has_next(const struct uint32list_iter *list)
387{
388 return memiter_size(&list->mem_it) > 0;
389}
390
David Brazdil5ea99462020-03-25 13:01:47 +0000391static enum manifest_return_code uint32list_get_next(
392 struct uint32list_iter *list, uint32_t *out)
Andrew Scullae9962e2019-10-03 16:51:16 +0100393{
Andrew Scullae9962e2019-10-03 16:51:16 +0100394 uint64_t num;
395
396 CHECK(uint32list_has_next(list));
David Brazdilb856be62020-03-25 10:14:55 +0000397 if (!fdt_parse_number(&list->mem_it, sizeof(uint32_t), &num)) {
Andrew Scullae9962e2019-10-03 16:51:16 +0100398 return MANIFEST_ERROR_MALFORMED_INTEGER;
399 }
400
David Brazdil5ea99462020-03-25 13:01:47 +0000401 *out = (uint32_t)num;
402 return MANIFEST_SUCCESS;
Andrew Scullae9962e2019-10-03 16:51:16 +0100403}
404
Karl Meakin60532972024-08-02 16:11:21 +0100405/**
406 * Parse a UUID from `uuid` into `out`.
407 * Returns `MANIFEST_SUCCESS` if parsing succeeded.
408 */
409static enum manifest_return_code parse_uuid(struct uint32list_iter *uuid,
410 struct ffa_uuid *out)
411{
412 for (size_t i = 0; i < 4 && uint32list_has_next(uuid); i++) {
413 TRY(uint32list_get_next(uuid, &out->uuid[i]));
414 }
415
416 return MANIFEST_SUCCESS;
417}
418
419/**
420 * Parse a list of UUIDs from `uuid` into `out`.
421 * Writes the number of UUIDs parsed to `len`.
422 * Returns `MANIFEST_SUCCESS` if parsing succeeded.
423 * Returns `MANIFEST_ERROR_UUID_ALL_ZEROS` if any of the UUIDs are all zeros.
424 * Returns `MANIFEEST_ERROR_TOO_MANY_UUIDS` if there are more than
425 * `PARTITION_MAX_UUIDS`
426 */
427static enum manifest_return_code parse_uuid_list(struct uint32list_iter *uuid,
428 struct ffa_uuid *out,
429 uint16_t *len)
430{
431 uint16_t j;
432
Karl Meakin45abeeb2024-08-02 16:55:44 +0100433 for (j = 0; uint32list_has_next(uuid); j++) {
Karl Meakin60532972024-08-02 16:11:21 +0100434 TRY(parse_uuid(uuid, &out[j]));
435
436 if (ffa_uuid_is_null(&out[j])) {
437 return MANIFEST_ERROR_UUID_ALL_ZEROS;
438 }
439 dlog_verbose(" UUID %#x-%x-%x-%x\n", out[j].uuid[0],
440 out[j].uuid[1], out[j].uuid[2], out[j].uuid[3]);
Karl Meakin45abeeb2024-08-02 16:55:44 +0100441
442 if (j >= PARTITION_MAX_UUIDS) {
443 return MANIFEST_ERROR_TOO_MANY_UUIDS;
444 }
Karl Meakin60532972024-08-02 16:11:21 +0100445 }
446
447 *len = j;
448 return MANIFEST_SUCCESS;
449}
450
Olivier Deprez62d99e32020-01-09 15:58:07 +0100451static enum manifest_return_code parse_vm_common(const struct fdt_node *node,
452 struct manifest_vm *vm,
J-Alves19e20cf2023-08-02 12:48:55 +0100453 ffa_id_t vm_id)
David Brazdil7a462ec2019-08-15 12:27:47 +0100454{
Andrew Scullae9962e2019-10-03 16:51:16 +0100455 struct uint32list_iter smcs;
David Brazdil5ea99462020-03-25 13:01:47 +0000456 size_t idx;
Andrew Scullae9962e2019-10-03 16:51:16 +0100457
Olivier Deprez62d99e32020-01-09 15:58:07 +0100458 TRY(read_bool(node, "is_ffa_partition", &vm->is_ffa_partition));
459
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -0700460 TRY(read_bool(node, "hyp_loaded", &vm->is_hyp_loaded));
461
David Brazdil136f2942019-09-23 14:11:03 +0100462 TRY(read_string(node, "debug_name", &vm->debug_name));
Andrew Scullae9962e2019-10-03 16:51:16 +0100463
464 TRY(read_optional_uint32list(node, "smc_whitelist", &smcs));
465 while (uint32list_has_next(&smcs) &&
466 vm->smc_whitelist.smc_count < MAX_SMCS) {
David Brazdil5ea99462020-03-25 13:01:47 +0000467 idx = vm->smc_whitelist.smc_count++;
468 TRY(uint32list_get_next(&smcs, &vm->smc_whitelist.smcs[idx]));
Andrew Scullae9962e2019-10-03 16:51:16 +0100469 }
470
471 if (uint32list_has_next(&smcs)) {
Karl Meakine8937d92024-03-19 16:04:25 +0000472 dlog_warning("%s SMC whitelist too long.\n",
473 vm->debug_name.data);
Andrew Scullae9962e2019-10-03 16:51:16 +0100474 }
475
Andrew Scullb2c3a242019-11-04 13:52:36 +0000476 TRY(read_bool(node, "smc_whitelist_permissive",
477 &vm->smc_whitelist.permissive));
Andrew Scullae9962e2019-10-03 16:51:16 +0100478
Olivier Deprez62d99e32020-01-09 15:58:07 +0100479 if (vm_id != HF_PRIMARY_VM_ID) {
480 TRY(read_uint64(node, "mem_size", &vm->secondary.mem_size));
481 TRY(read_uint16(node, "vcpu_count", &vm->secondary.vcpu_count));
Fuad Tabba50469e02020-06-30 15:14:28 +0100482 TRY(read_optional_string(node, "fdt_filename",
483 &vm->secondary.fdt_filename));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100484 }
485
486 return MANIFEST_SUCCESS;
487}
488
489static enum manifest_return_code parse_vm(struct fdt_node *node,
490 struct manifest_vm *vm,
J-Alves19e20cf2023-08-02 12:48:55 +0100491 ffa_id_t vm_id)
Olivier Deprez62d99e32020-01-09 15:58:07 +0100492{
493 TRY(read_optional_string(node, "kernel_filename",
494 &vm->kernel_filename));
495
David Brazdile6f83222019-09-23 14:47:37 +0100496 if (vm_id == HF_PRIMARY_VM_ID) {
497 TRY(read_optional_string(node, "ramdisk_filename",
498 &vm->primary.ramdisk_filename));
David Brazdil080ee312020-02-25 15:30:30 -0800499 TRY(read_optional_uint64(node, "boot_address",
500 MANIFEST_INVALID_ADDRESS,
501 &vm->primary.boot_address));
David Brazdil7a462ec2019-08-15 12:27:47 +0100502 }
Raghu Krishnamurthy988a5e72021-02-27 21:46:06 -0800503 TRY(read_optional_uint8(node, "exception-level", (uint8_t)EL1,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700504 (uint8_t *)&vm->partition.run_time_el));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100505
David Brazdil7a462ec2019-08-15 12:27:47 +0100506 return MANIFEST_SUCCESS;
507}
508
J-Alves77b6f4f2023-03-15 11:34:49 +0000509static bool is_memory_region_within_ranges(uintptr_t base_address,
510 uint32_t page_count,
511 const struct mem_range *ranges,
512 const size_t ranges_size)
513{
514 uintptr_t region_end =
515 base_address + ((uintptr_t)page_count * PAGE_SIZE - 1);
516
517 for (size_t i = 0; i < ranges_size; i++) {
518 uintptr_t base = (uintptr_t)pa_addr(ranges[i].begin);
519 uintptr_t end = (uintptr_t)pa_addr(ranges[i].end);
520
521 if ((base_address >= base && base_address <= end) ||
522 (region_end >= base && region_end <= end)) {
523 return true;
524 }
525 }
526
527 return false;
528}
529
530void dump_memory_ranges(const struct mem_range *ranges,
531 const size_t ranges_size, bool ns)
532{
533 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
534 return;
535 }
536
537 dlog("%s Memory ranges:\n", ns ? "NS" : "S");
538
539 for (size_t i = 0; i < ranges_size; i++) {
540 uintptr_t begin = pa_addr(ranges[i].begin);
541 uintptr_t end = pa_addr(ranges[i].end);
542 size_t page_count =
543 align_up(pa_difference(ranges[i].begin, ranges[i].end),
544 PAGE_SIZE) /
545 PAGE_SIZE;
546
Karl Meakine8937d92024-03-19 16:04:25 +0000547 dlog(" [%lx - %lx (%zu pages)]\n", begin, end, page_count);
J-Alves77b6f4f2023-03-15 11:34:49 +0000548 }
549}
550
551/**
552 * Check the partition's assigned memory is contained in the memory ranges
553 * configured for the SWd, in the SPMC's manifest.
554 */
555static enum manifest_return_code check_partition_memory_is_valid(
556 uintptr_t base_address, uint32_t page_count, uint32_t attributes,
Olivier Deprez058ddee2024-08-27 09:22:11 +0200557 const struct boot_params *params, bool is_device_region)
J-Alves77b6f4f2023-03-15 11:34:49 +0000558{
559 bool is_secure_region =
560 (attributes & MANIFEST_REGION_ATTR_SECURITY) == 0U;
Daniel Boulby4339edc2024-02-21 14:59:00 +0000561 const struct mem_range *ranges_from_manifest;
562 size_t ranges_count;
563 bool within_ranges;
564 enum manifest_return_code error_return;
565
566 if (!is_device_region) {
567 ranges_from_manifest = is_secure_region ? params->mem_ranges
568 : params->ns_mem_ranges;
569 ranges_count = is_secure_region ? params->mem_ranges_count
570 : params->ns_mem_ranges_count;
571 error_return = MANIFEST_ERROR_MEM_REGION_INVALID;
572 } else {
573 ranges_from_manifest = is_secure_region
574 ? params->device_mem_ranges
575 : params->ns_device_mem_ranges;
576 ranges_count = is_secure_region
577 ? params->device_mem_ranges_count
578 : params->ns_device_mem_ranges_count;
579 error_return = MANIFEST_ERROR_DEVICE_MEM_REGION_INVALID;
580 }
581
582 within_ranges = is_memory_region_within_ranges(
J-Alves77b6f4f2023-03-15 11:34:49 +0000583 base_address, page_count, ranges_from_manifest, ranges_count);
584
Daniel Boulby4339edc2024-02-21 14:59:00 +0000585 return within_ranges ? MANIFEST_SUCCESS : error_return;
J-Alves77b6f4f2023-03-15 11:34:49 +0000586}
587
588/*
589 * Keep track of the memory allocated by partitions. This includes memory region
Daniel Boulby4d165722023-11-21 13:46:42 +0000590 * nodes and device region nodes defined in their respective partition
591 * manifests, as well address space defined from their load address.
J-Alves77b6f4f2023-03-15 11:34:49 +0000592 */
593static enum manifest_return_code check_and_record_memory_used(
Daniel Boulby3f295b12023-12-15 17:38:08 +0000594 uintptr_t base_address, uint32_t page_count,
595 struct mem_range *mem_ranges, size_t *mem_regions_index)
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100596{
J-Alves77b6f4f2023-03-15 11:34:49 +0000597 bool overlap_of_regions;
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100598
Varun Wadekar4afbfd72022-10-13 14:30:18 +0100599 if (page_count == 0U) {
600 dlog_error(
Karl Meakine8937d92024-03-19 16:04:25 +0000601 "Empty memory region defined with base address: "
602 "%#lx.\n",
Varun Wadekar4afbfd72022-10-13 14:30:18 +0100603 base_address);
604 return MANIFEST_ERROR_MEM_REGION_EMPTY;
605 }
606
Daniel Boulbyc1a613d2022-10-18 11:26:17 +0100607 if (!is_aligned(base_address, PAGE_SIZE)) {
Karl Meakine8937d92024-03-19 16:04:25 +0000608 dlog_error("base_address (%#lx) is not aligned to page size.\n",
Daniel Boulbyc1a613d2022-10-18 11:26:17 +0100609 base_address);
610 return MANIFEST_ERROR_MEM_REGION_UNALIGNED;
611 }
612
J-Alves77b6f4f2023-03-15 11:34:49 +0000613 overlap_of_regions = is_memory_region_within_ranges(
Daniel Boulby3f295b12023-12-15 17:38:08 +0000614 base_address, page_count, mem_ranges, *mem_regions_index);
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100615
J-Alves77b6f4f2023-03-15 11:34:49 +0000616 if (!overlap_of_regions) {
617 paddr_t begin = pa_init(base_address);
618
Daniel Boulby3f295b12023-12-15 17:38:08 +0000619 mem_ranges[*mem_regions_index].begin = begin;
620 mem_ranges[*mem_regions_index].end =
J-Alves77b6f4f2023-03-15 11:34:49 +0000621 pa_add(begin, page_count * PAGE_SIZE - 1);
Daniel Boulby3f295b12023-12-15 17:38:08 +0000622 (*mem_regions_index)++;
J-Alves77b6f4f2023-03-15 11:34:49 +0000623
624 return MANIFEST_SUCCESS;
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100625 }
626
J-Alves77b6f4f2023-03-15 11:34:49 +0000627 return MANIFEST_ERROR_MEM_REGION_OVERLAP;
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100628}
629
Madhukar Pappireddy718afa92024-06-20 16:48:49 -0500630static enum manifest_return_code parse_common_fields_mem_dev_region_node(
631 struct fdt_node *ffa_node, struct dma_device_properties *dma_prop)
632{
633 uint32_t j = 0;
634 struct uint32list_iter list;
635
636 TRY(read_optional_uint32(ffa_node, "smmu-id", MANIFEST_INVALID_ID,
637 &dma_prop->smmu_id));
638 if (dma_prop->smmu_id != MANIFEST_INVALID_ID) {
639 dlog_verbose(" smmu-id: %u\n", dma_prop->smmu_id);
640 }
641
642 TRY(read_optional_uint32list(ffa_node, "stream-ids", &list));
643 dlog_verbose(" Stream IDs assigned:\n");
644
645 j = 0;
646 while (uint32list_has_next(&list)) {
647 if (j == PARTITION_MAX_STREAMS_PER_DEVICE) {
648 return MANIFEST_ERROR_STREAM_IDS_OVERFLOW;
649 }
650
651 TRY(uint32list_get_next(&list, &dma_prop->stream_ids[j]));
652 dlog_verbose(" %u\n", dma_prop->stream_ids[j]);
653 j++;
654 }
655 if (j == 0) {
656 dlog_verbose(" None\n");
657 } else if (dma_prop->smmu_id == MANIFEST_INVALID_ID) {
658 /*
659 * SMMU ID must be specified if the partition specifies
660 * Stream IDs for any device upstream of SMMU.
661 */
662 return MANIFEST_ERROR_MISSING_SMMU_ID;
663 }
664 dma_prop->stream_count = j;
665
666 return MANIFEST_SUCCESS;
667}
668
Karl Meakin68368292024-11-18 12:02:12 +0000669/**
670 * Parse and validate a memory regions's base address.
671 *
672 * The base address can be specified either as an absolute address (with
673 * `base-address`) or as an offset from `load_address` (with
674 * `load-address-relative-offset`).
675
676 * Returns an error if:
677 * - Neither `base-address` or `load-address-relative-offset` are specified.
678 * - Both `base-address` and `load-address-relative-offset` are specified.
679 * - The effective address (`load-address-relative-offset` + `load_address`)
680 * would overflow.
681 */
682static enum manifest_return_code parse_base_address(
683 struct fdt_node *mem_node, uintptr_t load_address,
684 struct memory_region *mem_region)
685{
686 uintptr_t relative_offset;
687 uintptr_t absolute_address;
688
689 bool is_relative;
690 bool is_absolute;
691
692 TRY(read_optional_uint64(mem_node, "base-address",
693 MANIFEST_INVALID_ADDRESS, &absolute_address));
694
695 TRY(read_optional_uint64(mem_node, "load-address-relative-offset",
696 MANIFEST_INVALID_ADDRESS, &relative_offset));
697
698 is_absolute = (absolute_address != MANIFEST_INVALID_ADDRESS);
699 is_relative = (relative_offset != MANIFEST_INVALID_ADDRESS);
700
701 if (!is_absolute && !is_relative) {
702 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
703 }
704
705 if (is_absolute && is_relative) {
706 return MANIFEST_ERROR_BASE_ADDRESS_AND_RELATIVE_ADDRESS;
707 }
708
709 if (is_relative && relative_offset > UINT64_MAX - load_address) {
710 return MANIFEST_ERROR_INTEGER_OVERFLOW;
711 }
712
713 mem_region->base_address =
714 is_absolute ? absolute_address : load_address + relative_offset;
715
716 return MANIFEST_SUCCESS;
717}
718
Manish Pandey6542f5c2020-04-27 14:37:46 +0100719static enum manifest_return_code parse_ffa_memory_region_node(
Karl Meakinf6d49402023-04-04 18:14:26 +0100720 struct fdt_node *mem_node, uintptr_t load_address,
721 struct memory_region *mem_regions, uint16_t *count, struct rx_tx *rxtx,
J-Alves77b6f4f2023-03-15 11:34:49 +0000722 const struct boot_params *boot_params)
Manish Pandey6542f5c2020-04-27 14:37:46 +0100723{
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100724 uint32_t phandle;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700725 uint16_t i = 0;
Madhukar Pappireddy3c2b7912023-10-11 14:47:27 -0500726 uint32_t j = 0;
Madhukar Pappireddy3c2b7912023-10-11 14:47:27 -0500727 struct uint32list_iter list;
Manish Pandey6542f5c2020-04-27 14:37:46 +0100728
729 dlog_verbose(" Partition memory regions\n");
730
731 if (!fdt_is_compatible(mem_node, "arm,ffa-manifest-memory-regions")) {
732 return MANIFEST_ERROR_NOT_COMPATIBLE;
733 }
734
735 if (!fdt_first_child(mem_node)) {
736 return MANIFEST_ERROR_MEMORY_REGION_NODE_EMPTY;
737 }
738
739 do {
740 dlog_verbose(" Memory Region[%u]\n", i);
741
742 TRY(read_optional_string(mem_node, "description",
743 &mem_regions[i].name));
744 dlog_verbose(" Name: %s\n",
745 string_data(&mem_regions[i].name));
746
Karl Meakin68368292024-11-18 12:02:12 +0000747 TRY(parse_base_address(mem_node, load_address,
748 &mem_regions[i]));
Karl Meakinf6d49402023-04-04 18:14:26 +0100749
Manish Pandey6542f5c2020-04-27 14:37:46 +0100750 TRY(read_uint32(mem_node, "pages-count",
751 &mem_regions[i].page_count));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200752 dlog_verbose(" Pages_count: %u\n",
Manish Pandey6542f5c2020-04-27 14:37:46 +0100753 mem_regions[i].page_count);
754
755 TRY(read_uint32(mem_node, "attributes",
756 &mem_regions[i].attributes));
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -0700757
Olivier Deprez035fa152022-03-14 11:19:10 +0100758 /*
759 * Check RWX permission attributes.
760 * Security attribute is checked at load phase.
761 */
762 uint32_t permissions = mem_regions[i].attributes &
763 (MANIFEST_REGION_ATTR_READ |
764 MANIFEST_REGION_ATTR_WRITE |
765 MANIFEST_REGION_ATTR_EXEC);
766 if (permissions != MANIFEST_REGION_ATTR_READ &&
767 permissions != (MANIFEST_REGION_ATTR_READ |
768 MANIFEST_REGION_ATTR_WRITE) &&
769 permissions != (MANIFEST_REGION_ATTR_READ |
770 MANIFEST_REGION_ATTR_EXEC)) {
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -0700771 return MANIFEST_ERROR_INVALID_MEM_PERM;
772 }
773
Olivier Deprez035fa152022-03-14 11:19:10 +0100774 /* Filter memory region attributes. */
775 mem_regions[i].attributes &= MANIFEST_REGION_ALL_ATTR_MASK;
776
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200777 dlog_verbose(" Attributes: %#x\n",
Manish Pandey6542f5c2020-04-27 14:37:46 +0100778 mem_regions[i].attributes);
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100779
J-Alves77b6f4f2023-03-15 11:34:49 +0000780 TRY(check_partition_memory_is_valid(
781 mem_regions[i].base_address, mem_regions[i].page_count,
Olivier Deprez058ddee2024-08-27 09:22:11 +0200782 mem_regions[i].attributes, boot_params, false));
J-Alves77b6f4f2023-03-15 11:34:49 +0000783
Daniel Boulby3f295b12023-12-15 17:38:08 +0000784 TRY(check_and_record_memory_used(
785 mem_regions[i].base_address, mem_regions[i].page_count,
786 manifest_data->mem_regions,
787 &manifest_data->mem_regions_index));
J-Alves77b6f4f2023-03-15 11:34:49 +0000788
Madhukar Pappireddy718afa92024-06-20 16:48:49 -0500789 TRY(parse_common_fields_mem_dev_region_node(
790 mem_node, &mem_regions[i].dma_prop));
Madhukar Pappireddy3c2b7912023-10-11 14:47:27 -0500791
792 TRY(read_optional_uint32list(
793 mem_node, "stream-ids-access-permissions", &list));
794 dlog_verbose(" Access permissions of Stream IDs:\n");
795
796 j = 0;
797 while (uint32list_has_next(&list)) {
798 uint32_t permissions;
799
800 if (j == PARTITION_MAX_STREAMS_PER_DEVICE) {
801 return MANIFEST_ERROR_DMA_ACCESS_PERMISSIONS_OVERFLOW;
802 }
803
804 TRY(uint32list_get_next(&list, &permissions));
805 dlog_verbose(" %u\n", permissions);
806
807 if (j == 0) {
Madhukar Pappireddy9c764b32024-06-20 14:36:55 -0500808 mem_regions[i].dma_access_permissions =
Madhukar Pappireddy3c2b7912023-10-11 14:47:27 -0500809 permissions;
810 }
811
812 /*
813 * All stream ids belonging to a dma device must specify
814 * the same access permissions.
815 */
816 if (permissions !=
Madhukar Pappireddy9c764b32024-06-20 14:36:55 -0500817 mem_regions[i].dma_access_permissions) {
Madhukar Pappireddy3c2b7912023-10-11 14:47:27 -0500818 return MANIFEST_ERROR_MISMATCH_DMA_ACCESS_PERMISSIONS;
819 }
820
821 j++;
822 }
823
824 if (j == 0) {
825 dlog_verbose(" None\n");
826 } else if (j != mem_regions[i].dma_prop.stream_count) {
827 return MANIFEST_ERROR_MISMATCH_DMA_ACCESS_PERMISSIONS;
828 }
829
830 if (j > 0) {
831 /* Filter the dma access permissions. */
Madhukar Pappireddy9c764b32024-06-20 14:36:55 -0500832 mem_regions[i].dma_access_permissions &=
Madhukar Pappireddy3c2b7912023-10-11 14:47:27 -0500833 MANIFEST_REGION_ALL_ATTR_MASK;
834 }
835
Manish Pandeya70a4192020-10-07 22:05:04 +0100836 if (rxtx->available) {
837 TRY(read_optional_uint32(
838 mem_node, "phandle",
839 (uint32_t)MANIFEST_INVALID_ADDRESS, &phandle));
840 if (phandle == rxtx->rx_phandle) {
841 dlog_verbose(" Assigned as RX buffer\n");
842 rxtx->rx_buffer = &mem_regions[i];
843 } else if (phandle == rxtx->tx_phandle) {
844 dlog_verbose(" Assigned as TX buffer\n");
845 rxtx->tx_buffer = &mem_regions[i];
846 }
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100847 }
848
Manish Pandey6542f5c2020-04-27 14:37:46 +0100849 i++;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700850 } while (fdt_next_sibling(mem_node) &&
851 (i < PARTITION_MAX_MEMORY_REGIONS));
Manish Pandey6542f5c2020-04-27 14:37:46 +0100852
Manish Pandeya70a4192020-10-07 22:05:04 +0100853 if (rxtx->available &&
854 (rxtx->rx_buffer->page_count != rxtx->tx_buffer->page_count)) {
Manish Pandeyf06c9072020-09-29 15:41:58 +0100855 return MANIFEST_ERROR_RXTX_SIZE_MISMATCH;
856 }
857
Manish Pandey2145c212020-05-01 16:04:22 +0100858 *count = i;
Manish Pandey6542f5c2020-04-27 14:37:46 +0100859
860 return MANIFEST_SUCCESS;
861}
862
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -0700863static struct interrupt_info *device_region_get_interrupt_info(
864 struct device_region *dev_regions, uint32_t intid)
865{
866 for (uint32_t i = 0; i < ARRAY_SIZE(dev_regions->interrupts); i++) {
867 if (dev_regions->interrupts[i].id == intid) {
868 return &(dev_regions->interrupts[i]);
869 }
870 }
871 return NULL;
872}
873
Manish Pandeye68e7932020-04-23 15:29:28 +0100874static enum manifest_return_code parse_ffa_device_region_node(
Manish Pandey2145c212020-05-01 16:04:22 +0100875 struct fdt_node *dev_node, struct device_region *dev_regions,
Daniel Boulby4339edc2024-02-21 14:59:00 +0000876 uint16_t *count, uint8_t *dma_device_count,
877 const struct boot_params *boot_params)
Manish Pandeye68e7932020-04-23 15:29:28 +0100878{
879 struct uint32list_iter list;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700880 uint16_t i = 0;
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500881 uint32_t j = 0;
Olivier Deprez93644652022-09-09 11:01:12 +0200882 struct interrupt_bitmap allocated_intids = manifest_data->intids;
Madhukar Pappireddy718afa92024-06-20 16:48:49 -0500883 uint8_t dma_device_id = 0;
Manish Pandeye68e7932020-04-23 15:29:28 +0100884
885 dlog_verbose(" Partition Device Regions\n");
886
887 if (!fdt_is_compatible(dev_node, "arm,ffa-manifest-device-regions")) {
888 return MANIFEST_ERROR_NOT_COMPATIBLE;
889 }
890
891 if (!fdt_first_child(dev_node)) {
892 return MANIFEST_ERROR_DEVICE_REGION_NODE_EMPTY;
893 }
894
Madhukar Pappireddye032af52023-10-11 14:52:58 -0500895 *dma_device_count = 0;
896
Manish Pandeye68e7932020-04-23 15:29:28 +0100897 do {
898 dlog_verbose(" Device Region[%u]\n", i);
899
900 TRY(read_optional_string(dev_node, "description",
901 &dev_regions[i].name));
902 dlog_verbose(" Name: %s\n",
903 string_data(&dev_regions[i].name));
904
905 TRY(read_uint64(dev_node, "base-address",
906 &dev_regions[i].base_address));
Karl Meakine8937d92024-03-19 16:04:25 +0000907 dlog_verbose(" Base address: %#lx\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100908 dev_regions[i].base_address);
909
910 TRY(read_uint32(dev_node, "pages-count",
911 &dev_regions[i].page_count));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200912 dlog_verbose(" Pages_count: %u\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100913 dev_regions[i].page_count);
914
Daniel Boulby3f295b12023-12-15 17:38:08 +0000915 TRY(check_and_record_memory_used(
916 dev_regions[i].base_address, dev_regions[i].page_count,
917 manifest_data->mem_regions,
918 &manifest_data->mem_regions_index));
Daniel Boulby4d165722023-11-21 13:46:42 +0000919
Manish Pandeye68e7932020-04-23 15:29:28 +0100920 TRY(read_uint32(dev_node, "attributes",
921 &dev_regions[i].attributes));
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -0700922
Olivier Deprez035fa152022-03-14 11:19:10 +0100923 /*
924 * Check RWX permission attributes.
925 * Security attribute is checked at load phase.
926 */
927 uint32_t permissions = dev_regions[i].attributes &
928 (MANIFEST_REGION_ATTR_READ |
929 MANIFEST_REGION_ATTR_WRITE |
930 MANIFEST_REGION_ATTR_EXEC);
931
932 if (permissions != MANIFEST_REGION_ATTR_READ &&
933 permissions != (MANIFEST_REGION_ATTR_READ |
934 MANIFEST_REGION_ATTR_WRITE)) {
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -0700935 return MANIFEST_ERROR_INVALID_MEM_PERM;
936 }
937
Daniel Boulby4339edc2024-02-21 14:59:00 +0000938 /* Filter device region attributes. */
Olivier Deprez035fa152022-03-14 11:19:10 +0100939 dev_regions[i].attributes = dev_regions[i].attributes &
940 MANIFEST_REGION_ALL_ATTR_MASK;
941
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200942 dlog_verbose(" Attributes: %#x\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100943 dev_regions[i].attributes);
944
Daniel Boulby4339edc2024-02-21 14:59:00 +0000945 TRY(check_partition_memory_is_valid(
946 dev_regions[i].base_address, dev_regions[i].page_count,
Olivier Deprez058ddee2024-08-27 09:22:11 +0200947 dev_regions[i].attributes, boot_params, true));
Daniel Boulby4339edc2024-02-21 14:59:00 +0000948
Manish Pandeye68e7932020-04-23 15:29:28 +0100949 TRY(read_optional_uint32list(dev_node, "interrupts", &list));
950 dlog_verbose(" Interrupt List:\n");
951 j = 0;
952 while (uint32list_has_next(&list) &&
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700953 j < PARTITION_MAX_INTERRUPTS_PER_DEVICE) {
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100954 uint32_t intid;
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100955
Manish Pandeye68e7932020-04-23 15:29:28 +0100956 TRY(uint32list_get_next(
957 &list, &dev_regions[i].interrupts[j].id));
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100958 intid = dev_regions[i].interrupts[j].id;
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100959
960 dlog_verbose(" ID = %u\n", intid);
961
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100962 if (interrupt_bitmap_get_value(&allocated_intids,
963 intid) == 1U) {
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100964 return MANIFEST_ERROR_INTERRUPT_ID_REPEATED;
965 }
966
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100967 interrupt_bitmap_set_value(&allocated_intids, intid);
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100968
Manish Pandeye68e7932020-04-23 15:29:28 +0100969 if (uint32list_has_next(&list)) {
970 TRY(uint32list_get_next(&list,
971 &dev_regions[i]
972 .interrupts[j]
973 .attributes));
974 } else {
975 return MANIFEST_ERROR_MALFORMED_INTEGER_LIST;
976 }
977
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -0700978 dev_regions[i].interrupts[j].mpidr_valid = false;
979 dev_regions[i].interrupts[j].mpidr = 0;
980
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100981 dlog_verbose(" attributes = %u\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100982 dev_regions[i].interrupts[j].attributes);
983 j++;
984 }
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500985
986 dev_regions[i].interrupt_count = j;
Manish Pandeye68e7932020-04-23 15:29:28 +0100987 if (j == 0) {
988 dlog_verbose(" Empty\n");
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -0700989 } else {
990 TRY(read_optional_uint32list(
991 dev_node, "interrupts-target", &list));
992 dlog_verbose(" Interrupt Target List:\n");
993
994 while (uint32list_has_next(&list)) {
995 uint32_t intid;
996 uint64_t mpidr = 0;
997 uint32_t mpidr_lower = 0;
998 uint32_t mpidr_upper = 0;
999 struct interrupt_info *info = NULL;
1000
1001 TRY(uint32list_get_next(&list, &intid));
1002
1003 dlog_verbose(" ID = %u\n", intid);
1004
1005 if (interrupt_bitmap_get_value(
1006 &allocated_intids, intid) != 1U) {
1007 return MANIFEST_ERROR_INTERRUPT_ID_NOT_IN_LIST;
1008 }
1009
1010 TRY(uint32list_get_next(&list, &mpidr_upper));
1011 TRY(uint32list_get_next(&list, &mpidr_lower));
1012 mpidr = mpidr_upper;
1013 mpidr <<= 32;
1014 mpidr |= mpidr_lower;
1015
1016 info = device_region_get_interrupt_info(
1017 &dev_regions[i], intid);
1018 /*
1019 * We should find info since
1020 * interrupt_bitmap_get_value already ensures
1021 * that we saw the interrupt and allocated ids
1022 * for it.
1023 */
1024 assert(info != NULL);
1025 info->mpidr = mpidr;
1026 info->mpidr_valid = true;
Karl Meakine8937d92024-03-19 16:04:25 +00001027 dlog_verbose(" MPIDR = %#lx\n", mpidr);
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -07001028 }
Manish Pandeye68e7932020-04-23 15:29:28 +01001029 }
1030
Madhukar Pappireddy718afa92024-06-20 16:48:49 -05001031 TRY(parse_common_fields_mem_dev_region_node(
1032 dev_node, &dev_regions[i].dma_prop));
1033
Madhukar Pappireddy9c764b32024-06-20 14:36:55 -05001034 if (dev_regions[i].dma_prop.smmu_id != MANIFEST_INVALID_ID) {
Madhukar Pappireddy9c764b32024-06-20 14:36:55 -05001035 dev_regions[i].dma_prop.dma_device_id = dma_device_id++;
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001036 *dma_device_count = dma_device_id;
1037
Madhukar Pappireddy8a364472023-10-11 15:34:07 -05001038 if (*dma_device_count > PARTITION_MAX_DMA_DEVICES) {
1039 return MANIFEST_ERROR_DMA_DEVICE_OVERFLOW;
1040 }
1041
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001042 dlog_verbose(" dma peripheral device id: %u\n",
Madhukar Pappireddy9c764b32024-06-20 14:36:55 -05001043 dev_regions[i].dma_prop.dma_device_id);
Manish Pandeye68e7932020-04-23 15:29:28 +01001044 }
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001045
Manish Pandeye68e7932020-04-23 15:29:28 +01001046 TRY(read_bool(dev_node, "exclusive-access",
1047 &dev_regions[i].exclusive_access));
J-Alves4369bd92020-08-07 16:35:36 +01001048 dlog_verbose(" Exclusive_access: %u\n",
Manish Pandeye68e7932020-04-23 15:29:28 +01001049 dev_regions[i].exclusive_access);
1050
1051 i++;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -07001052 } while (fdt_next_sibling(dev_node) &&
1053 (i < PARTITION_MAX_DEVICE_REGIONS));
Manish Pandeye68e7932020-04-23 15:29:28 +01001054
Manish Pandey2145c212020-05-01 16:04:22 +01001055 *count = i;
Manish Pandeye68e7932020-04-23 15:29:28 +01001056
1057 return MANIFEST_SUCCESS;
1058}
1059
J-Alvesabebe432022-05-31 14:40:50 +01001060static enum manifest_return_code sanity_check_ffa_manifest(
1061 struct manifest_vm *vm)
1062{
Karl Meakin0e617d92024-04-05 12:55:22 +01001063 enum ffa_version ffa_version;
J-Alvesabebe432022-05-31 14:40:50 +01001064 enum manifest_return_code ret_code = MANIFEST_SUCCESS;
1065 const char *error_string = "specified in manifest is unsupported";
1066 uint32_t k = 0;
Kathleen Capellaf71dee42023-08-08 16:24:14 -04001067 bool using_req2 = (vm->partition.messaging_method &
1068 (FFA_PARTITION_DIRECT_REQ2_RECV |
1069 FFA_PARTITION_DIRECT_REQ2_SEND)) != 0;
J-Alvesabebe432022-05-31 14:40:50 +01001070
1071 /* ensure that the SPM version is compatible */
Karl Meakin0e617d92024-04-05 12:55:22 +01001072 ffa_version = vm->partition.ffa_version;
1073 if (!ffa_versions_are_compatible(ffa_version, FFA_VERSION_COMPILED)) {
J-Alvesabebe432022-05-31 14:40:50 +01001074 dlog_error("FF-A partition manifest version %s: %u.%u\n",
Karl Meakin0e617d92024-04-05 12:55:22 +01001075 error_string, ffa_version_get_major(ffa_version),
1076 ffa_version_get_minor(ffa_version));
J-Alvesabebe432022-05-31 14:40:50 +01001077 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1078 }
1079
1080 if (vm->partition.xlat_granule != PAGE_4KB) {
1081 dlog_error("Translation granule %s: %u\n", error_string,
1082 vm->partition.xlat_granule);
1083 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1084 }
1085
1086 if (vm->partition.execution_state != AARCH64) {
1087 dlog_error("Execution state %s: %u\n", error_string,
1088 vm->partition.execution_state);
1089 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1090 }
1091
1092 if (vm->partition.run_time_el != EL1 &&
1093 vm->partition.run_time_el != S_EL1 &&
Daniel Boulby874d5432023-04-27 12:40:24 +01001094 vm->partition.run_time_el != S_EL0 &&
1095 vm->partition.run_time_el != EL0) {
J-Alvesabebe432022-05-31 14:40:50 +01001096 dlog_error("Exception level %s: %d\n", error_string,
1097 vm->partition.run_time_el);
1098 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1099 }
1100
Karl Meakin0e617d92024-04-05 12:55:22 +01001101 if (vm->partition.ffa_version < FFA_VERSION_1_2 && using_req2) {
Kathleen Capellaf71dee42023-08-08 16:24:14 -04001102 dlog_error("Messaging method %s: %x\n", error_string,
1103 vm->partition.messaging_method);
1104 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1105 }
1106
J-Alvesabebe432022-05-31 14:40:50 +01001107 if ((vm->partition.messaging_method &
1108 ~(FFA_PARTITION_DIRECT_REQ_RECV | FFA_PARTITION_DIRECT_REQ_SEND |
Kathleen Capellaf71dee42023-08-08 16:24:14 -04001109 FFA_PARTITION_INDIRECT_MSG | FFA_PARTITION_DIRECT_REQ2_RECV |
1110 FFA_PARTITION_DIRECT_REQ2_SEND)) != 0U) {
J-Alvesabebe432022-05-31 14:40:50 +01001111 dlog_error("Messaging method %s: %x\n", error_string,
1112 vm->partition.messaging_method);
1113 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1114 }
1115
Daniel Boulby874d5432023-04-27 12:40:24 +01001116 if ((vm->partition.run_time_el == S_EL0 ||
1117 vm->partition.run_time_el == EL0) &&
J-Alvesabebe432022-05-31 14:40:50 +01001118 vm->partition.execution_ctx_count != 1) {
1119 dlog_error(
1120 "Exception level and execution context count %s: %d "
1121 "%d\n",
1122 error_string, vm->partition.run_time_el,
1123 vm->partition.execution_ctx_count);
1124 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1125 }
1126
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -07001127 for (uint16_t i = 0; i < vm->partition.dev_region_count; i++) {
J-Alvesabebe432022-05-31 14:40:50 +01001128 struct device_region dev_region;
1129
1130 dev_region = vm->partition.dev_regions[i];
1131
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -07001132 if (dev_region.interrupt_count >
1133 PARTITION_MAX_INTERRUPTS_PER_DEVICE) {
J-Alvesabebe432022-05-31 14:40:50 +01001134 dlog_error(
1135 "Interrupt count for device region exceeds "
1136 "limit.\n");
1137 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1138 continue;
1139 }
1140
1141 for (uint8_t j = 0; j < dev_region.interrupt_count; j++) {
1142 k++;
1143 if (k > VM_MANIFEST_MAX_INTERRUPTS) {
1144 dlog_error(
1145 "Interrupt count for VM exceeds "
1146 "limit.\n");
1147 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1148 continue;
1149 }
1150 }
1151 }
1152
1153 /* GP register is restricted to one of x0 - x3. */
Karl Meakin824b63d2024-06-03 19:04:53 +01001154 if (vm->partition.gp_register_num != DEFAULT_BOOT_GP_REGISTER &&
J-Alvesabebe432022-05-31 14:40:50 +01001155 vm->partition.gp_register_num > 3) {
1156 dlog_error("GP register number %s: %u\n", error_string,
1157 vm->partition.gp_register_num);
1158 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1159 }
1160
1161 return ret_code;
1162}
1163
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001164/**
1165 * Find the device id allocated to the device region node corresponding to the
1166 * specified stream id.
1167 */
1168static bool find_dma_device_id_from_dev_region_nodes(
1169 const struct manifest_vm *manifest_vm, uint32_t sid, uint8_t *device_id)
1170{
1171 for (uint16_t i = 0; i < manifest_vm->partition.dev_region_count; i++) {
1172 struct device_region dev_region =
1173 manifest_vm->partition.dev_regions[i];
1174
Madhukar Pappireddy9c764b32024-06-20 14:36:55 -05001175 for (uint8_t j = 0; j < dev_region.dma_prop.stream_count; j++) {
1176 if (sid == dev_region.dma_prop.stream_ids[j]) {
1177 *device_id = dev_region.dma_prop.dma_device_id;
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001178 return true;
1179 }
1180 }
1181 }
1182 return false;
1183}
1184
1185/**
1186 * Identify the device id of a DMA device node corresponding to a stream id
1187 * specified in the memory region node.
1188 */
1189static bool map_dma_device_id_to_stream_ids(struct manifest_vm *vm)
1190{
1191 for (uint16_t i = 0; i < vm->partition.mem_region_count; i++) {
1192 struct memory_region mem_region = vm->partition.mem_regions[i];
1193
1194 for (uint8_t j = 0; j < mem_region.dma_prop.stream_count; j++) {
1195 uint32_t sid = mem_region.dma_prop.stream_ids[j];
1196 uint8_t device_id = 0;
1197
1198 /*
1199 * Every stream id must have been declared in the
1200 * device node as well.
1201 */
1202 if (!find_dma_device_id_from_dev_region_nodes(
1203 vm, sid, &device_id)) {
1204 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00001205 "Stream ID %d not found in any device "
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001206 "region node of partition manifest\n",
1207 sid);
1208 return false;
1209 }
1210
1211 mem_region.dma_prop.dma_device_id = device_id;
1212 }
1213 }
1214
1215 return true;
1216}
1217
J-Alves77b6f4f2023-03-15 11:34:49 +00001218enum manifest_return_code parse_ffa_manifest(
1219 struct fdt *fdt, struct manifest_vm *vm,
1220 struct fdt_node *boot_info_node, const struct boot_params *boot_params)
Olivier Deprez62d99e32020-01-09 15:58:07 +01001221{
Olivier Deprez62d99e32020-01-09 15:58:07 +01001222 struct uint32list_iter uuid;
Davidson K5d5f2792024-08-19 19:09:12 +05301223 uintpaddr_t load_address;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001224 struct fdt_node root;
1225 struct fdt_node ffa_node;
1226 struct string rxtx_node_name = STRING_INIT("rx_tx-info");
Manish Pandey6542f5c2020-04-27 14:37:46 +01001227 struct string mem_region_node_name = STRING_INIT("memory-regions");
Manish Pandeye68e7932020-04-23 15:29:28 +01001228 struct string dev_region_node_name = STRING_INIT("device-regions");
J-Alves35315782022-01-25 17:58:32 +00001229 struct string boot_info_node_name = STRING_INIT("boot-info");
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001230 bool managed_exit_field_present = false;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001231
1232 if (!fdt_find_node(fdt, "/", &root)) {
1233 return MANIFEST_ERROR_NO_ROOT_NODE;
1234 }
1235
1236 /* Check "compatible" property. */
1237 if (!fdt_is_compatible(&root, "arm,ffa-manifest-1.0")) {
1238 return MANIFEST_ERROR_NOT_COMPATIBLE;
1239 }
1240
J-Alves4369bd92020-08-07 16:35:36 +01001241 TRY(read_uint32list(&root, "uuid", &uuid));
Olivier Deprez62d99e32020-01-09 15:58:07 +01001242
Karl Meakin60532972024-08-02 16:11:21 +01001243 TRY(parse_uuid_list(&uuid, vm->partition.uuids,
1244 &vm->partition.uuid_count));
Kathleen Capella422b10b2023-06-30 18:28:27 -04001245 dlog_verbose(" Number of UUIDs %u\n", vm->partition.uuid_count);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001246
Olivier Deprezb7aa0c22023-06-29 10:46:26 +02001247 TRY(read_uint32(&root, "ffa-version", &vm->partition.ffa_version));
1248 dlog_verbose(" Expected FF-A version %u.%u\n",
Karl Meakin0e617d92024-04-05 12:55:22 +01001249 ffa_version_get_major(vm->partition.ffa_version),
1250 ffa_version_get_minor(vm->partition.ffa_version));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +02001251
Olivier Deprez62d99e32020-01-09 15:58:07 +01001252 TRY(read_uint16(&root, "execution-ctx-count",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001253 &vm->partition.execution_ctx_count));
J-Alves4369bd92020-08-07 16:35:36 +01001254 dlog_verbose(" Number of execution context %u\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001255 vm->partition.execution_ctx_count);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001256
1257 TRY(read_uint8(&root, "exception-level",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001258 (uint8_t *)&vm->partition.run_time_el));
1259 dlog_verbose(" Run-time EL %u\n", vm->partition.run_time_el);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001260
1261 TRY(read_uint8(&root, "execution-state",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001262 (uint8_t *)&vm->partition.execution_state));
1263 dlog_verbose(" Execution state %u\n", vm->partition.execution_state);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001264
Davidson K5d5f2792024-08-19 19:09:12 +05301265 TRY(read_optional_uint64(&root, "load-address", 0, &load_address));
1266 if (vm->partition.load_addr != load_address) {
1267 dlog_warning(
1268 "Partition's load address at its manifest differs"
1269 " from specified in partition's package.\n");
1270 }
Karl Meakine8937d92024-03-19 16:04:25 +00001271 dlog_verbose(" Load address %#lx\n", vm->partition.load_addr);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001272
J-Alves4369bd92020-08-07 16:35:36 +01001273 TRY(read_optional_uint64(&root, "entrypoint-offset", 0,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001274 &vm->partition.ep_offset));
Karl Meakine8937d92024-03-19 16:04:25 +00001275 dlog_verbose(" Entry point offset %#zx\n", vm->partition.ep_offset);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001276
J-Alves35315782022-01-25 17:58:32 +00001277 TRY(read_optional_uint32(&root, "gp-register-num",
1278 DEFAULT_BOOT_GP_REGISTER,
1279 &vm->partition.gp_register_num));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +02001280
1281 if (vm->partition.gp_register_num != DEFAULT_BOOT_GP_REGISTER) {
1282 dlog_verbose(" Boot GP register: x%u\n",
1283 vm->partition.gp_register_num);
1284 }
J-Alves35315782022-01-25 17:58:32 +00001285
J-Alvesb37fd082020-10-22 12:29:21 +01001286 TRY(read_optional_uint16(&root, "boot-order", DEFAULT_BOOT_ORDER,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001287 &vm->partition.boot_order));
Kathleen Capella4a2a6e72023-04-21 14:43:26 -04001288 if (vm->partition.boot_order != DEFAULT_BOOT_ORDER) {
Karl Meakine8937d92024-03-19 16:04:25 +00001289 dlog_verbose(" Boot order %u\n", vm->partition.boot_order);
Kathleen Capella4a2a6e72023-04-21 14:43:26 -04001290 }
1291
1292 if (!check_boot_order(vm->partition.boot_order)) {
1293 return MANIFEST_ERROR_INVALID_BOOT_ORDER;
1294 }
J-Alvesb37fd082020-10-22 12:29:21 +01001295
J-Alves4369bd92020-08-07 16:35:36 +01001296 TRY(read_optional_uint8(&root, "xlat-granule", 0,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001297 (uint8_t *)&vm->partition.xlat_granule));
1298 dlog_verbose(" Translation granule %u\n", vm->partition.xlat_granule);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001299
1300 ffa_node = root;
1301 if (fdt_find_child(&ffa_node, &rxtx_node_name)) {
1302 if (!fdt_is_compatible(&ffa_node,
1303 "arm,ffa-manifest-rx_tx-buffer")) {
1304 return MANIFEST_ERROR_NOT_COMPATIBLE;
1305 }
1306
Manish Pandeyfa1f2912020-05-05 12:57:01 +01001307 /*
1308 * Read only phandles for now, it will be used to update buffers
1309 * while parsing memory regions.
1310 */
1311 TRY(read_uint32(&ffa_node, "rx-buffer",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001312 &vm->partition.rxtx.rx_phandle));
Olivier Deprez62d99e32020-01-09 15:58:07 +01001313
Manish Pandeyfa1f2912020-05-05 12:57:01 +01001314 TRY(read_uint32(&ffa_node, "tx-buffer",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001315 &vm->partition.rxtx.tx_phandle));
Olivier Deprez62d99e32020-01-09 15:58:07 +01001316
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001317 vm->partition.rxtx.available = true;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001318 }
1319
Kathleen Capellaf71dee42023-08-08 16:24:14 -04001320 TRY(read_uint16(&root, "messaging-method",
1321 (uint16_t *)&vm->partition.messaging_method));
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001322 dlog_verbose(" Messaging method %u\n", vm->partition.messaging_method);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001323
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001324 TRY(read_bool(&root, "managed-exit", &managed_exit_field_present));
1325
1326 TRY(read_optional_uint8(
1327 &root, "ns-interrupts-action", NS_ACTION_SIGNALED,
1328 (uint8_t *)&vm->partition.ns_interrupts_action));
1329
1330 /*
1331 * An SP manifest can specify one of the fields listed below:
1332 * `managed-exit`: Introduced in FF-A v1.0 spec.
1333 * `ns-interrupts-action`: Introduced in FF-A v1.1 EAC0 spec.
1334 * If both are missing from the manifest, the default response is
1335 * NS_ACTION_SIGNALED.
1336 */
1337 if (managed_exit_field_present) {
1338 vm->partition.ns_interrupts_action = NS_ACTION_ME;
1339 }
1340
1341 if (vm->partition.ns_interrupts_action != NS_ACTION_QUEUED &&
1342 vm->partition.ns_interrupts_action != NS_ACTION_ME &&
1343 vm->partition.ns_interrupts_action != NS_ACTION_SIGNALED) {
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001344 return MANIFEST_ERROR_ILLEGAL_NS_INT_ACTION;
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001345 }
1346
1347 dlog_verbose(
Olivier Deprezb7aa0c22023-06-29 10:46:26 +02001348 " NS Interrupts %s\n",
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001349 (vm->partition.ns_interrupts_action == NS_ACTION_QUEUED)
1350 ? "Queued"
1351 : (vm->partition.ns_interrupts_action == NS_ACTION_SIGNALED)
1352 ? "Signaled"
1353 : "Managed exit");
1354
1355 if (vm->partition.ns_interrupts_action == NS_ACTION_ME) {
1356 /* Managed exit only supported by S_EL1 partitions. */
1357 if (vm->partition.run_time_el != S_EL1) {
1358 dlog_error(
1359 "Managed exit cannot be supported by this "
1360 "partition\n");
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001361 return MANIFEST_ERROR_ILLEGAL_NS_INT_ACTION;
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001362 }
Madhukar Pappireddy046dad02022-06-21 18:43:33 -05001363
1364 TRY(read_bool(&root, "managed-exit-virq",
1365 &vm->partition.me_signal_virq));
1366 if (vm->partition.me_signal_virq) {
1367 dlog_verbose(" Managed Exit signaled through vIRQ\n");
1368 }
J-Alvesa4730db2021-11-02 10:31:01 +00001369 }
1370
1371 TRY(read_bool(&root, "notification-support",
1372 &vm->partition.notification_support));
1373 if (vm->partition.notification_support) {
1374 dlog_verbose(" Notifications Receipt Supported\n");
1375 }
Maksims Svecovs9ddf86a2021-05-06 17:17:21 +01001376
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001377 TRY(read_optional_uint8(
1378 &root, "other-s-interrupts-action", OTHER_S_INT_ACTION_SIGNALED,
1379 (uint8_t *)&vm->partition.other_s_interrupts_action));
1380
1381 if (vm->partition.other_s_interrupts_action ==
1382 OTHER_S_INT_ACTION_QUEUED) {
1383 if (vm->partition.ns_interrupts_action != NS_ACTION_QUEUED) {
1384 dlog_error(
1385 "Choice of the fields 'ns-interrupts-action' "
1386 "and 'other-s-interrupts-action' not "
1387 "compatible\n");
1388 return MANIFEST_ERROR_NOT_COMPATIBLE;
1389 }
1390 } else if (vm->partition.other_s_interrupts_action >
1391 OTHER_S_INT_ACTION_SIGNALED) {
1392 dlog_error(
J-Alves0a824e92024-04-26 16:20:12 +01001393 "Illegal value specified for the field "
1394 "'other-s-interrupts-action': %u\n",
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001395 vm->partition.other_s_interrupts_action);
1396 return MANIFEST_ERROR_ILLEGAL_OTHER_S_INT_ACTION;
1397 }
1398
J-Alves35315782022-01-25 17:58:32 +00001399 /* Parse boot info node. */
1400 if (boot_info_node != NULL) {
1401 ffa_node = root;
1402 vm->partition.boot_info =
1403 fdt_find_child(&ffa_node, &boot_info_node_name);
1404 if (vm->partition.boot_info) {
1405 *boot_info_node = ffa_node;
1406 }
1407 } else {
1408 vm->partition.boot_info = false;
1409 }
1410
Olivier Depreza15f2352022-09-26 09:17:24 +02001411 TRY(read_optional_uint32(
Karl Meakin18694022024-08-02 13:59:25 +01001412 &root, "vm-availability-messages", 0,
1413 (uint32_t *)&vm->partition.vm_availability_messages));
1414 dlog_verbose("vm-availability-messages=%#x\n",
1415 *(uint32_t *)&vm->partition.vm_availability_messages);
1416
1417 if (vm->partition.vm_availability_messages.mbz != 0) {
1418 return MANIFEST_ERROR_VM_AVAILABILITY_MESSAGE_INVALID;
1419 }
1420
1421 TRY(read_optional_uint32(
Olivier Depreza15f2352022-09-26 09:17:24 +02001422 &root, "power-management-messages",
1423 MANIFEST_POWER_MANAGEMENT_CPU_OFF_SUPPORTED |
1424 MANIFEST_POWER_MANAGEMENT_CPU_ON_SUPPORTED,
1425 &vm->partition.power_management));
1426 vm->partition.power_management &= MANIFEST_POWER_MANAGEMENT_ALL_MASK;
1427 if (vm->partition.execution_ctx_count == 1 ||
Daniel Boulby874d5432023-04-27 12:40:24 +01001428 vm->partition.run_time_el == S_EL0 ||
1429 vm->partition.run_time_el == EL0) {
Olivier Depreza15f2352022-09-26 09:17:24 +02001430 vm->partition.power_management =
1431 MANIFEST_POWER_MANAGEMENT_NONE_MASK;
1432 }
1433
1434 dlog_verbose(" Power management messages %#x\n",
1435 vm->partition.power_management);
1436
Manish Pandey6542f5c2020-04-27 14:37:46 +01001437 /* Parse memory-regions */
1438 ffa_node = root;
1439 if (fdt_find_child(&ffa_node, &mem_region_node_name)) {
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001440 TRY(parse_ffa_memory_region_node(
Karl Meakinf6d49402023-04-04 18:14:26 +01001441 &ffa_node, vm->partition.load_addr,
1442 vm->partition.mem_regions,
J-Alves77b6f4f2023-03-15 11:34:49 +00001443 &vm->partition.mem_region_count, &vm->partition.rxtx,
1444 boot_params));
Manish Pandey6542f5c2020-04-27 14:37:46 +01001445 }
Manish Pandey2145c212020-05-01 16:04:22 +01001446 dlog_verbose(" Total %u memory regions found\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001447 vm->partition.mem_region_count);
Manish Pandey6542f5c2020-04-27 14:37:46 +01001448
Manish Pandeye68e7932020-04-23 15:29:28 +01001449 /* Parse Device-regions */
1450 ffa_node = root;
1451 if (fdt_find_child(&ffa_node, &dev_region_node_name)) {
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001452 TRY(parse_ffa_device_region_node(
1453 &ffa_node, vm->partition.dev_regions,
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001454 &vm->partition.dev_region_count,
Daniel Boulby4339edc2024-02-21 14:59:00 +00001455 &vm->partition.dma_device_count, boot_params));
Manish Pandeye68e7932020-04-23 15:29:28 +01001456 }
Manish Pandey2145c212020-05-01 16:04:22 +01001457 dlog_verbose(" Total %u device regions found\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001458 vm->partition.dev_region_count);
Manish Pandeye68e7932020-04-23 15:29:28 +01001459
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001460 if (!map_dma_device_id_to_stream_ids(vm)) {
1461 return MANIFEST_ERROR_NOT_COMPATIBLE;
1462 }
1463
J-Alves4eb7b542022-03-02 15:21:52 +00001464 return sanity_check_ffa_manifest(vm);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001465}
1466
Olivier Deprez62d99e32020-01-09 15:58:07 +01001467static enum manifest_return_code parse_ffa_partition_package(
1468 struct mm_stage1_locked stage1_locked, struct fdt_node *node,
J-Alves19e20cf2023-08-02 12:48:55 +01001469 struct manifest_vm *vm, ffa_id_t vm_id,
J-Alves77b6f4f2023-03-15 11:34:49 +00001470 const struct boot_params *boot_params, struct mpool *ppool)
Olivier Deprez62d99e32020-01-09 15:58:07 +01001471{
1472 enum manifest_return_code ret = MANIFEST_ERROR_NOT_COMPATIBLE;
J-Alves2f86c1e2022-02-23 18:44:19 +00001473 uintpaddr_t load_address;
1474 struct sp_pkg_header header;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001475 struct fdt sp_fdt;
J-Alves2f86c1e2022-02-23 18:44:19 +00001476 vaddr_t pkg_start;
1477 vaddr_t manifest_address;
J-Alves35315782022-01-25 17:58:32 +00001478 struct fdt_node boot_info_node;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001479
1480 /*
1481 * This must have been hinted as being an FF-A partition,
1482 * return straight with failure if this is not the case.
1483 */
1484 if (!vm->is_ffa_partition) {
J-Alves2f86c1e2022-02-23 18:44:19 +00001485 return ret;
1486 }
1487
1488 TRY(read_uint64(node, "load_address", &load_address));
1489 if (!is_aligned(load_address, PAGE_SIZE)) {
Olivier Deprez62d99e32020-01-09 15:58:07 +01001490 return MANIFEST_ERROR_NOT_COMPATIBLE;
1491 }
1492
J-Alves2f86c1e2022-02-23 18:44:19 +00001493 assert(load_address != 0U);
1494
1495 if (!sp_pkg_init(stage1_locked, pa_init(load_address), &header,
1496 ppool)) {
1497 return ret;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001498 }
1499
J-Alves2f86c1e2022-02-23 18:44:19 +00001500 pkg_start = va_init(load_address);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001501
J-Alves2f86c1e2022-02-23 18:44:19 +00001502 if (vm_id != HF_PRIMARY_VM_ID &&
1503 sp_pkg_get_mem_size(&header) >= vm->secondary.mem_size) {
J-Alves4369bd92020-08-07 16:35:36 +01001504 dlog_error("Invalid package header or DT size.\n");
J-Alves2f86c1e2022-02-23 18:44:19 +00001505 goto out;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001506 }
1507
J-Alves2f86c1e2022-02-23 18:44:19 +00001508 manifest_address = va_add(va_init(load_address), header.pm_offset);
1509 if (!fdt_init_from_ptr(&sp_fdt, ptr_from_va(manifest_address),
1510 header.pm_size)) {
Kathleen Capella422b10b2023-06-30 18:28:27 -04001511 dlog_error("manifest.c: FDT failed validation.\n");
J-Alves2f86c1e2022-02-23 18:44:19 +00001512 goto out;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001513 }
1514
Davidson K5d5f2792024-08-19 19:09:12 +05301515 vm->partition.load_addr = load_address;
1516
J-Alves77b6f4f2023-03-15 11:34:49 +00001517 ret = parse_ffa_manifest(&sp_fdt, vm, &boot_info_node, boot_params);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001518 if (ret != MANIFEST_SUCCESS) {
Olivier Deprez3cbdf862023-06-07 15:36:32 +02001519 dlog_error("Error parsing partition manifest.\n");
J-Alves2f86c1e2022-02-23 18:44:19 +00001520 goto out;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001521 }
1522
J-Alves889a1d72022-05-13 11:38:27 +01001523 if (vm->partition.gp_register_num != DEFAULT_BOOT_GP_REGISTER) {
1524 if (header.version == SP_PKG_HEADER_VERSION_2 &&
1525 vm->partition.boot_info &&
1526 !ffa_boot_info_node(&boot_info_node, pkg_start, &header)) {
1527 dlog_error("Failed to process boot information.\n");
1528 }
J-Alves35315782022-01-25 17:58:32 +00001529 }
J-Alves2f86c1e2022-02-23 18:44:19 +00001530out:
1531 sp_pkg_deinit(stage1_locked, pkg_start, &header, ppool);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001532 return ret;
1533}
1534
David Brazdil7a462ec2019-08-15 12:27:47 +01001535/**
1536 * Parse manifest from FDT.
1537 */
Olivier Deprez62d99e32020-01-09 15:58:07 +01001538enum manifest_return_code manifest_init(struct mm_stage1_locked stage1_locked,
Olivier Deprez93644652022-09-09 11:01:12 +02001539 struct manifest **manifest_ret,
Olivier Deprez62d99e32020-01-09 15:58:07 +01001540 struct memiter *manifest_fdt,
J-Alves77b6f4f2023-03-15 11:34:49 +00001541 struct boot_params *boot_params,
Olivier Deprez62d99e32020-01-09 15:58:07 +01001542 struct mpool *ppool)
David Brazdil7a462ec2019-08-15 12:27:47 +01001543{
Olivier Deprez93644652022-09-09 11:01:12 +02001544 struct manifest *manifest;
David Brazdilb856be62020-03-25 10:14:55 +00001545 struct string vm_name;
1546 struct fdt fdt;
David Brazdil7a462ec2019-08-15 12:27:47 +01001547 struct fdt_node hyp_node;
1548 size_t i = 0;
1549 bool found_primary_vm = false;
Daniel Boulby3f295b12023-12-15 17:38:08 +00001550 const size_t spmc_size =
1551 align_up(pa_difference(layout_text_begin(), layout_image_end()),
1552 PAGE_SIZE);
1553 const size_t spmc_page_count = spmc_size / PAGE_SIZE;
David Brazdil7a462ec2019-08-15 12:27:47 +01001554
J-Alvescd438fa2023-04-26 10:13:12 +01001555 if (boot_params->mem_ranges_count == 0 &&
1556 boot_params->ns_mem_ranges_count == 0) {
1557 return MANIFEST_ERROR_MEMORY_MISSING;
1558 }
1559
J-Alves77b6f4f2023-03-15 11:34:49 +00001560 dump_memory_ranges(boot_params->mem_ranges,
1561 boot_params->mem_ranges_count, false);
1562 dump_memory_ranges(boot_params->ns_mem_ranges,
1563 boot_params->ns_mem_ranges_count, true);
1564
Olivier Deprez93644652022-09-09 11:01:12 +02001565 /* Allocate space in the ppool for the manifest data. */
1566 if (!manifest_data_init(ppool)) {
1567 panic("Unable to allocate manifest data.\n");
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001568 }
1569
Daniel Boulby3f295b12023-12-15 17:38:08 +00001570 /*
1571 * Add SPMC load address range to memory ranges to track to ensure
1572 * no partitions overlap with this memory.
1573 * The system integrator should have prevented this by defining the
1574 * secure memory region ranges so as not to overlap the SPMC load
1575 * address range. Therefore, this code is intended to catch any
1576 * potential misconfigurations there.
1577 */
1578 if (is_aligned(pa_addr(layout_text_begin()), PAGE_SIZE) &&
1579 spmc_page_count != 0) {
1580 TRY(check_and_record_memory_used(
1581 pa_addr(layout_text_begin()), spmc_page_count,
1582 manifest_data->mem_regions,
1583 &manifest_data->mem_regions_index));
1584 }
1585
Olivier Deprez93644652022-09-09 11:01:12 +02001586 manifest = &manifest_data->manifest;
1587 *manifest_ret = manifest;
1588
David Brazdilb856be62020-03-25 10:14:55 +00001589 if (!fdt_init_from_memiter(&fdt, manifest_fdt)) {
1590 return MANIFEST_ERROR_FILE_SIZE; /* TODO */
David Brazdila2358d42020-01-27 18:51:38 +00001591 }
1592
David Brazdil7a462ec2019-08-15 12:27:47 +01001593 /* Find hypervisor node. */
David Brazdilb856be62020-03-25 10:14:55 +00001594 if (!fdt_find_node(&fdt, "/hypervisor", &hyp_node)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001595 return MANIFEST_ERROR_NO_HYPERVISOR_FDT_NODE;
1596 }
1597
David Brazdil74e9c3b2019-08-28 11:09:08 +01001598 /* Check "compatible" property. */
David Brazdilf4925382020-03-25 13:33:51 +00001599 if (!fdt_is_compatible(&hyp_node, "hafnium,hafnium")) {
David Brazdil74e9c3b2019-08-28 11:09:08 +01001600 return MANIFEST_ERROR_NOT_COMPATIBLE;
1601 }
1602
Olivier Deprez622ab8d2021-08-02 12:15:45 +02001603 TRY(read_bool(&hyp_node, "ffa_tee_enabled",
1604 &manifest->ffa_tee_enabled));
Andrew Walbran41a49d82020-01-10 17:46:38 +00001605
David Brazdil7a462ec2019-08-15 12:27:47 +01001606 /* Iterate over reserved VM IDs and check no such nodes exist. */
Olivier Deprez2a8ee342020-08-03 15:10:44 +02001607 for (i = HF_VM_ID_BASE; i < HF_VM_ID_OFFSET; i++) {
J-Alves19e20cf2023-08-02 12:48:55 +01001608 ffa_id_t vm_id = (ffa_id_t)i - HF_VM_ID_BASE;
David Brazdil7a462ec2019-08-15 12:27:47 +01001609 struct fdt_node vm_node = hyp_node;
David Brazdil7a462ec2019-08-15 12:27:47 +01001610
David Brazdilb856be62020-03-25 10:14:55 +00001611 generate_vm_node_name(&vm_name, vm_id);
1612 if (fdt_find_child(&vm_node, &vm_name)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001613 return MANIFEST_ERROR_RESERVED_VM_ID;
1614 }
1615 }
1616
1617 /* Iterate over VM nodes until we find one that does not exist. */
1618 for (i = 0; i <= MAX_VMS; ++i) {
J-Alves19e20cf2023-08-02 12:48:55 +01001619 ffa_id_t vm_id = HF_VM_ID_OFFSET + i;
David Brazdil7a462ec2019-08-15 12:27:47 +01001620 struct fdt_node vm_node = hyp_node;
David Brazdil7a462ec2019-08-15 12:27:47 +01001621
Olivier Deprez2a8ee342020-08-03 15:10:44 +02001622 generate_vm_node_name(&vm_name, vm_id - HF_VM_ID_BASE);
David Brazdilb856be62020-03-25 10:14:55 +00001623 if (!fdt_find_child(&vm_node, &vm_name)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001624 break;
1625 }
1626
1627 if (i == MAX_VMS) {
1628 return MANIFEST_ERROR_TOO_MANY_VMS;
1629 }
1630
1631 if (vm_id == HF_PRIMARY_VM_ID) {
1632 CHECK(found_primary_vm == false); /* sanity check */
1633 found_primary_vm = true;
1634 }
1635
David Brazdil0251b942019-09-10 15:59:50 +01001636 manifest->vm_count = i + 1;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001637
1638 TRY(parse_vm_common(&vm_node, &manifest->vm[i], vm_id));
1639
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -07001640 CHECK(!manifest->vm[i].is_hyp_loaded ||
1641 manifest->vm[i].is_ffa_partition);
1642
1643 if (manifest->vm[i].is_ffa_partition &&
1644 !manifest->vm[i].is_hyp_loaded) {
Olivier Deprez62d99e32020-01-09 15:58:07 +01001645 TRY(parse_ffa_partition_package(stage1_locked, &vm_node,
1646 &manifest->vm[i], vm_id,
J-Alves77b6f4f2023-03-15 11:34:49 +00001647 boot_params, ppool));
J-Alves596049f2023-03-15 11:40:24 +00001648 size_t page_count =
1649 align_up(manifest->vm[i].secondary.mem_size,
1650 PAGE_SIZE) /
1651 PAGE_SIZE;
1652
1653 if (vm_id == HF_PRIMARY_VM_ID) {
1654 continue;
1655 }
1656
1657 TRY(check_partition_memory_is_valid(
1658 manifest->vm[i].partition.load_addr, page_count,
Olivier Deprez058ddee2024-08-27 09:22:11 +02001659 0, boot_params, false));
J-Alves596049f2023-03-15 11:40:24 +00001660
1661 /*
1662 * Check if memory from load-address until (load-address
1663 * + memory size) has been used by other partition.
1664 */
1665 TRY(check_and_record_memory_used(
Daniel Boulby3f295b12023-12-15 17:38:08 +00001666 manifest->vm[i].partition.load_addr, page_count,
1667 manifest_data->mem_regions,
1668 &manifest_data->mem_regions_index));
Olivier Deprez62d99e32020-01-09 15:58:07 +01001669 } else {
1670 TRY(parse_vm(&vm_node, &manifest->vm[i], vm_id));
1671 }
David Brazdil7a462ec2019-08-15 12:27:47 +01001672 }
1673
Olivier Deprezfb05f3c2020-11-10 17:48:04 +01001674 if (!found_primary_vm && vm_id_is_current_world(HF_PRIMARY_VM_ID)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001675 return MANIFEST_ERROR_NO_PRIMARY_VM;
1676 }
1677
1678 return MANIFEST_SUCCESS;
1679}
1680
Olivier Deprez93644652022-09-09 11:01:12 +02001681/**
1682 * Free manifest data resources, called once manifest parsing has
1683 * completed and VMs are loaded.
1684 */
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001685void manifest_deinit(struct mpool *ppool)
1686{
Olivier Deprez93644652022-09-09 11:01:12 +02001687 manifest_data_deinit(ppool);
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001688}
1689
David Brazdil7a462ec2019-08-15 12:27:47 +01001690const char *manifest_strerror(enum manifest_return_code ret_code)
1691{
1692 switch (ret_code) {
1693 case MANIFEST_SUCCESS:
1694 return "Success";
David Brazdila2358d42020-01-27 18:51:38 +00001695 case MANIFEST_ERROR_FILE_SIZE:
1696 return "Total size in header does not match file size";
Olivier Deprez62d99e32020-01-09 15:58:07 +01001697 case MANIFEST_ERROR_MALFORMED_DTB:
1698 return "Malformed device tree blob";
David Brazdila2358d42020-01-27 18:51:38 +00001699 case MANIFEST_ERROR_NO_ROOT_NODE:
1700 return "Could not find root node in manifest";
David Brazdil7a462ec2019-08-15 12:27:47 +01001701 case MANIFEST_ERROR_NO_HYPERVISOR_FDT_NODE:
1702 return "Could not find \"hypervisor\" node in manifest";
David Brazdil74e9c3b2019-08-28 11:09:08 +01001703 case MANIFEST_ERROR_NOT_COMPATIBLE:
1704 return "Hypervisor manifest entry not compatible with Hafnium";
David Brazdil7a462ec2019-08-15 12:27:47 +01001705 case MANIFEST_ERROR_RESERVED_VM_ID:
1706 return "Manifest defines a VM with a reserved ID";
1707 case MANIFEST_ERROR_NO_PRIMARY_VM:
1708 return "Manifest does not contain a primary VM entry";
1709 case MANIFEST_ERROR_TOO_MANY_VMS:
1710 return "Manifest specifies more VMs than Hafnium has "
1711 "statically allocated space for";
1712 case MANIFEST_ERROR_PROPERTY_NOT_FOUND:
1713 return "Property not found";
1714 case MANIFEST_ERROR_MALFORMED_STRING:
1715 return "Malformed string property";
David Brazdil0dbb41f2019-09-09 18:03:35 +01001716 case MANIFEST_ERROR_STRING_TOO_LONG:
1717 return "String too long";
David Brazdil7a462ec2019-08-15 12:27:47 +01001718 case MANIFEST_ERROR_MALFORMED_INTEGER:
1719 return "Malformed integer property";
1720 case MANIFEST_ERROR_INTEGER_OVERFLOW:
1721 return "Integer overflow";
Andrew Scullae9962e2019-10-03 16:51:16 +01001722 case MANIFEST_ERROR_MALFORMED_INTEGER_LIST:
1723 return "Malformed integer list property";
Andrew Scullb2c3a242019-11-04 13:52:36 +00001724 case MANIFEST_ERROR_MALFORMED_BOOLEAN:
1725 return "Malformed boolean property";
Manish Pandey6542f5c2020-04-27 14:37:46 +01001726 case MANIFEST_ERROR_MEMORY_REGION_NODE_EMPTY:
1727 return "Memory-region node should have at least one entry";
Manish Pandeye68e7932020-04-23 15:29:28 +01001728 case MANIFEST_ERROR_DEVICE_REGION_NODE_EMPTY:
1729 return "Device-region node should have at least one entry";
Manish Pandeyf06c9072020-09-29 15:41:58 +01001730 case MANIFEST_ERROR_RXTX_SIZE_MISMATCH:
1731 return "RX and TX buffers should be of same size";
Varun Wadekar4afbfd72022-10-13 14:30:18 +01001732 case MANIFEST_ERROR_MEM_REGION_EMPTY:
1733 return "Memory region should have at least one page";
Karl Meakinf6d49402023-04-04 18:14:26 +01001734 case MANIFEST_ERROR_BASE_ADDRESS_AND_RELATIVE_ADDRESS:
1735 return "Base and relative addresses are mutually exclusive";
Daniel Boulbya7e9e182022-06-27 14:21:01 +01001736 case MANIFEST_ERROR_MEM_REGION_OVERLAP:
1737 return "Memory region overlaps with one already allocated";
Daniel Boulbyc1a613d2022-10-18 11:26:17 +01001738 case MANIFEST_ERROR_MEM_REGION_UNALIGNED:
1739 return "Memory region is not aligned to a page boundary";
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -07001740 case MANIFEST_ERROR_INVALID_MEM_PERM:
1741 return "Memory permission should be RO, RW or RX";
J-Alves35315782022-01-25 17:58:32 +00001742 case MANIFEST_ERROR_ARGUMENTS_LIST_EMPTY:
1743 return "Arguments-list node should have at least one argument";
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001744 case MANIFEST_ERROR_INTERRUPT_ID_REPEATED:
1745 return "Interrupt ID already assigned to another endpoint";
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001746 case MANIFEST_ERROR_ILLEGAL_NS_INT_ACTION:
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001747 return "Illegal value specidied for the field: Action in "
1748 "response to NS Interrupt";
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -07001749 case MANIFEST_ERROR_INTERRUPT_ID_NOT_IN_LIST:
1750 return "Interrupt ID is not in the list of interrupts";
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001751 case MANIFEST_ERROR_ILLEGAL_OTHER_S_INT_ACTION:
1752 return "Illegal value specified for the field: Action in "
1753 "response to Other-S Interrupt";
J-Alves77b6f4f2023-03-15 11:34:49 +00001754 case MANIFEST_ERROR_MEMORY_MISSING:
1755 return "Memory nodes must be defined in the SPMC manifest "
Olivier Deprez3cbdf862023-06-07 15:36:32 +02001756 "('memory' and 'ns-memory')";
J-Alves77b6f4f2023-03-15 11:34:49 +00001757 case MANIFEST_ERROR_PARTITION_ADDRESS_OVERLAP:
1758 return "Partition's memory [load address: load address + "
1759 "memory size[ overlap with other allocated "
Olivier Deprez3cbdf862023-06-07 15:36:32 +02001760 "regions";
J-Alves77b6f4f2023-03-15 11:34:49 +00001761 case MANIFEST_ERROR_MEM_REGION_INVALID:
Olivier Deprez3cbdf862023-06-07 15:36:32 +02001762 return "Invalid memory region range";
Daniel Boulby4339edc2024-02-21 14:59:00 +00001763 case MANIFEST_ERROR_DEVICE_MEM_REGION_INVALID:
1764 return "Invalid device memory region range";
Kathleen Capella4a2a6e72023-04-21 14:43:26 -04001765 case MANIFEST_ERROR_INVALID_BOOT_ORDER:
1766 return "Boot order should be a unique value less than "
1767 "default largest value";
Kathleen Capella422b10b2023-06-30 18:28:27 -04001768 case MANIFEST_ERROR_UUID_ALL_ZEROS:
1769 return "UUID should not be NIL";
Karl Meakin45abeeb2024-08-02 16:55:44 +01001770 case MANIFEST_ERROR_TOO_MANY_UUIDS:
1771 return "Manifest specifies more UUIDs than Hafnium has "
1772 "statically allocated space for";
Madhukar Pappireddy3c2b7912023-10-11 14:47:27 -05001773 case MANIFEST_ERROR_MISSING_SMMU_ID:
1774 return "SMMU ID must be specified for the given Stream IDs";
1775 case MANIFEST_ERROR_MISMATCH_DMA_ACCESS_PERMISSIONS:
1776 return "DMA device access permissions must match memory region "
1777 "attributes";
1778 case MANIFEST_ERROR_STREAM_IDS_OVERFLOW:
1779 return "DMA device stream ID count exceeds predefined limit";
1780 case MANIFEST_ERROR_DMA_ACCESS_PERMISSIONS_OVERFLOW:
1781 return "DMA access permissions count exceeds predefined limit";
Madhukar Pappireddy8a364472023-10-11 15:34:07 -05001782 case MANIFEST_ERROR_DMA_DEVICE_OVERFLOW:
1783 return "Number of device regions with DMA peripheral exceeds "
1784 "limit.";
Karl Meakin18694022024-08-02 13:59:25 +01001785 case MANIFEST_ERROR_VM_AVAILABILITY_MESSAGE_INVALID:
1786 return "VM availability messages invalid (bits [31:2] must be "
1787 "zero)";
David Brazdil7a462ec2019-08-15 12:27:47 +01001788 }
1789
1790 panic("Unexpected manifest return code.");
1791}