blob: f9d17034df732400dd8b4edc1a597842c941d3dc [file] [log] [blame]
David Brazdil7a462ec2019-08-15 12:27:47 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
David Brazdil7a462ec2019-08-15 12:27:47 +01007 */
8
9#include "hf/manifest.h"
10
J-Alvesd8a1d362023-03-08 11:15:28 +000011#include <stddef.h>
J-Alves77b6f4f2023-03-15 11:34:49 +000012#include <stdint.h>
J-Alvesd8a1d362023-03-08 11:15:28 +000013
J-Alves35315782022-01-25 17:58:32 +000014#include "hf/arch/types.h"
J-Alves77b6f4f2023-03-15 11:34:49 +000015#include "hf/arch/vmid_base.h"
J-Alves35315782022-01-25 17:58:32 +000016
David Brazdil7a462ec2019-08-15 12:27:47 +010017#include "hf/addr.h"
Daniel Boulbya2f8c662021-11-26 17:52:53 +000018#include "hf/assert.h"
J-Alves35315782022-01-25 17:58:32 +000019#include "hf/boot_info.h"
J-Alves77b6f4f2023-03-15 11:34:49 +000020#include "hf/boot_params.h"
David Brazdil7a462ec2019-08-15 12:27:47 +010021#include "hf/check.h"
Andrew Scullae9962e2019-10-03 16:51:16 +010022#include "hf/dlog.h"
J-Alves77b6f4f2023-03-15 11:34:49 +000023#include "hf/fdt.h"
Karl Meakin0e617d92024-04-05 12:55:22 +010024#include "hf/ffa.h"
Karl Meakin275aa292024-11-18 14:56:09 +000025#include "hf/ffa_partition_manifest.h"
Daniel Boulby3f295b12023-12-15 17:38:08 +000026#include "hf/layout.h"
J-Alvesd853c372025-01-22 18:18:21 +000027#include "hf/mem_range.h"
J-Alves77b6f4f2023-03-15 11:34:49 +000028#include "hf/mm.h"
29#include "hf/mpool.h"
J-Alves6e0abc42024-12-30 16:51:16 +000030#include "hf/partition_pkg.h"
David Brazdil7a462ec2019-08-15 12:27:47 +010031#include "hf/static_assert.h"
32#include "hf/std.h"
33
34#define TRY(expr) \
35 do { \
36 enum manifest_return_code ret_code = (expr); \
37 if (ret_code != MANIFEST_SUCCESS) { \
38 return ret_code; \
39 } \
40 } while (0)
41
David Brazdilb856be62020-03-25 10:14:55 +000042#define VM_ID_MAX (HF_VM_ID_OFFSET + MAX_VMS - 1)
43#define VM_ID_MAX_DIGITS (5)
44#define VM_NAME_EXTRA_CHARS (3) /* "vm" + number + '\0' */
45#define VM_NAME_MAX_SIZE (VM_ID_MAX_DIGITS + VM_NAME_EXTRA_CHARS)
46static_assert(VM_NAME_MAX_SIZE <= STRING_MAX_SIZE,
47 "VM name does not fit into a struct string.");
48static_assert(VM_ID_MAX <= 99999, "Insufficient VM_NAME_BUF_SIZE");
Olivier Deprez2a8ee342020-08-03 15:10:44 +020049static_assert((HF_OTHER_WORLD_ID > VM_ID_MAX) ||
50 (HF_OTHER_WORLD_ID < HF_VM_ID_BASE),
Andrew Walbran9daa57e2019-09-27 13:33:20 +010051 "TrustZone VM ID clashes with normal VM range.");
David Brazdil7a462ec2019-08-15 12:27:47 +010052
Kathleen Capella4a2a6e72023-04-21 14:43:26 -040053/* Bitmap to track boot order values in use. */
54#define BOOT_ORDER_ENTRY_BITS (sizeof(uint64_t) * 8)
55#define BOOT_ORDER_MAP_ENTRIES \
56 ((DEFAULT_BOOT_ORDER + (BOOT_ORDER_ENTRY_BITS - 1)) / \
57 BOOT_ORDER_ENTRY_BITS)
58
Daniel Boulby801f8ef2022-06-27 14:21:01 +010059/**
J-Alves596049f2023-03-15 11:40:24 +000060 * A struct to keep track of the partitions properties during early boot
61 * manifest parsing:
62 * - Interrupts ID.
63 * - Physical memory ranges.
Daniel Boulby801f8ef2022-06-27 14:21:01 +010064 */
Olivier Deprez93644652022-09-09 11:01:12 +020065struct manifest_data {
66 struct manifest manifest;
Daniel Boulby4ca50f02022-07-29 18:29:34 +010067 struct interrupt_bitmap intids;
J-Alves596049f2023-03-15 11:40:24 +000068 /*
69 * Allocate enough for the maximum amount of memory regions defined via
70 * the partitions manifest, and regions for each partition
71 * address-space.
72 */
Daniel Boulby4d165722023-11-21 13:46:42 +000073 struct mem_range mem_regions[PARTITION_MAX_MEMORY_REGIONS * MAX_VMS +
74 PARTITION_MAX_DEVICE_REGIONS * MAX_VMS +
75 MAX_VMS];
Daniel Boulby3f295b12023-12-15 17:38:08 +000076 size_t mem_regions_index;
Kathleen Capella4a2a6e72023-04-21 14:43:26 -040077 uint64_t boot_order_values[BOOT_ORDER_MAP_ENTRIES];
Daniel Boulby801f8ef2022-06-27 14:21:01 +010078};
Olivier Deprez93644652022-09-09 11:01:12 +020079
Daniel Boulby801f8ef2022-06-27 14:21:01 +010080/**
Daniel Boulbya7e9e182022-06-27 14:21:01 +010081 * Calculate the number of entries in the ppool that are required to
Olivier Deprez93644652022-09-09 11:01:12 +020082 * store the manifest_data struct.
Daniel Boulby801f8ef2022-06-27 14:21:01 +010083 */
J-Alves596049f2023-03-15 11:40:24 +000084static const size_t manifest_data_ppool_entries =
Olivier Deprez93644652022-09-09 11:01:12 +020085 (align_up(sizeof(struct manifest_data), MM_PPOOL_ENTRY_SIZE) /
Daniel Boulbya7e9e182022-06-27 14:21:01 +010086 MM_PPOOL_ENTRY_SIZE);
87
Olivier Deprez93644652022-09-09 11:01:12 +020088static struct manifest_data *manifest_data;
Daniel Boulby801f8ef2022-06-27 14:21:01 +010089
Kathleen Capella4a2a6e72023-04-21 14:43:26 -040090static bool check_boot_order(uint16_t boot_order)
91{
92 uint16_t i;
93 uint64_t boot_order_mask;
94
95 if (boot_order == DEFAULT_BOOT_ORDER) {
96 return true;
97 }
98 if (boot_order > DEFAULT_BOOT_ORDER) {
99 dlog_error("Boot order should not exceed %x",
100 DEFAULT_BOOT_ORDER);
101 return false;
102 }
103
104 i = boot_order / BOOT_ORDER_ENTRY_BITS;
J-Alvesf1d9fdd2025-03-20 19:47:45 +0000105 boot_order_mask = UINT64_C(1) << (boot_order % BOOT_ORDER_ENTRY_BITS);
Kathleen Capella4a2a6e72023-04-21 14:43:26 -0400106
107 if ((boot_order_mask & manifest_data->boot_order_values[i]) != 0U) {
108 dlog_error("Boot order must be a unique value.");
109 return false;
110 }
111
112 manifest_data->boot_order_values[i] |= boot_order_mask;
113
114 return true;
115}
116
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100117/**
Olivier Deprez93644652022-09-09 11:01:12 +0200118 * Allocates and clear memory for the manifest data in the given memory pool.
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100119 * Returns true if the memory is successfully allocated.
120 */
Olivier Deprez93644652022-09-09 11:01:12 +0200121static bool manifest_data_init(struct mpool *ppool)
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100122{
Olivier Deprez93644652022-09-09 11:01:12 +0200123 manifest_data = (struct manifest_data *)mpool_alloc_contiguous(
124 ppool, manifest_data_ppool_entries, 1);
J-Alves5c0ae6f2023-06-14 15:20:21 +0100125
126 assert(manifest_data != NULL);
127
Olivier Deprez93644652022-09-09 11:01:12 +0200128 memset_s(manifest_data, sizeof(struct manifest_data), 0,
129 sizeof(struct manifest_data));
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100130
Olivier Deprez93644652022-09-09 11:01:12 +0200131 return manifest_data != NULL;
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100132}
133
134/**
Olivier Deprez93644652022-09-09 11:01:12 +0200135 * Frees the memory used for the manifest data in the given memory pool.
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100136 */
Olivier Deprez93644652022-09-09 11:01:12 +0200137static void manifest_data_deinit(struct mpool *ppool)
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100138{
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100139 /**
Olivier Deprez93644652022-09-09 11:01:12 +0200140 * Clear and return the memory used for the manifest_data struct to the
141 * memory pool.
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100142 */
Olivier Deprez93644652022-09-09 11:01:12 +0200143 memset_s(manifest_data, sizeof(struct manifest_data), 0,
144 sizeof(struct manifest_data));
145 mpool_add_chunk(ppool, manifest_data, manifest_data_ppool_entries);
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100146}
147
J-Alves19e20cf2023-08-02 12:48:55 +0100148static inline size_t count_digits(ffa_id_t vm_id)
David Brazdilb856be62020-03-25 10:14:55 +0000149{
150 size_t digits = 0;
151
152 do {
153 digits++;
154 vm_id /= 10;
155 } while (vm_id);
156 return digits;
157}
158
David Brazdil7a462ec2019-08-15 12:27:47 +0100159/**
160 * Generates a string with the two letters "vm" followed by an integer.
161 * Assumes `buf` is of size VM_NAME_BUF_SIZE.
162 */
J-Alves19e20cf2023-08-02 12:48:55 +0100163static void generate_vm_node_name(struct string *str, ffa_id_t vm_id)
David Brazdil7a462ec2019-08-15 12:27:47 +0100164{
165 static const char *digits = "0123456789";
David Brazdilb856be62020-03-25 10:14:55 +0000166 size_t vm_id_digits = count_digits(vm_id);
167 char *base = str->data;
168 char *ptr = base + (VM_NAME_EXTRA_CHARS + vm_id_digits);
David Brazdil7a462ec2019-08-15 12:27:47 +0100169
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000170 assert(vm_id_digits <= VM_ID_MAX_DIGITS);
David Brazdil7a462ec2019-08-15 12:27:47 +0100171 *(--ptr) = '\0';
172 do {
173 *(--ptr) = digits[vm_id % 10];
174 vm_id /= 10;
175 } while (vm_id);
176 *(--ptr) = 'm';
177 *(--ptr) = 'v';
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000178 assert(ptr == base);
David Brazdil7a462ec2019-08-15 12:27:47 +0100179}
180
Andrew Scullae9962e2019-10-03 16:51:16 +0100181/**
Andrew Scullb2c3a242019-11-04 13:52:36 +0000182 * Read a boolean property: true if present; false if not. If present, the value
183 * of the property must be empty else it is considered malformed.
Andrew Scullae9962e2019-10-03 16:51:16 +0100184 */
Andrew Scullb2c3a242019-11-04 13:52:36 +0000185static enum manifest_return_code read_bool(const struct fdt_node *node,
186 const char *property, bool *out)
Andrew Scullae9962e2019-10-03 16:51:16 +0100187{
David Brazdilb856be62020-03-25 10:14:55 +0000188 struct memiter data;
189 bool present = fdt_read_property(node, property, &data);
Andrew Scullae9962e2019-10-03 16:51:16 +0100190
David Brazdilb856be62020-03-25 10:14:55 +0000191 if (present && memiter_size(&data) != 0) {
Andrew Scullb2c3a242019-11-04 13:52:36 +0000192 return MANIFEST_ERROR_MALFORMED_BOOLEAN;
193 }
194
195 *out = present;
196 return MANIFEST_SUCCESS;
Andrew Scullae9962e2019-10-03 16:51:16 +0100197}
198
Andrew Scull72b43c02019-09-18 13:53:45 +0100199static enum manifest_return_code read_string(const struct fdt_node *node,
David Brazdil136f2942019-09-23 14:11:03 +0100200 const char *property,
201 struct string *out)
Andrew Scull72b43c02019-09-18 13:53:45 +0100202{
David Brazdilb856be62020-03-25 10:14:55 +0000203 struct memiter data;
Andrew Scull72b43c02019-09-18 13:53:45 +0100204
David Brazdilb856be62020-03-25 10:14:55 +0000205 if (!fdt_read_property(node, property, &data)) {
Andrew Scull72b43c02019-09-18 13:53:45 +0100206 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
207 }
208
David Brazdilb856be62020-03-25 10:14:55 +0000209 switch (string_init(out, &data)) {
David Brazdil136f2942019-09-23 14:11:03 +0100210 case STRING_SUCCESS:
211 return MANIFEST_SUCCESS;
212 case STRING_ERROR_INVALID_INPUT:
213 return MANIFEST_ERROR_MALFORMED_STRING;
214 case STRING_ERROR_TOO_LONG:
215 return MANIFEST_ERROR_STRING_TOO_LONG;
216 }
Andrew Scull72b43c02019-09-18 13:53:45 +0100217}
218
219static enum manifest_return_code read_optional_string(
David Brazdil136f2942019-09-23 14:11:03 +0100220 const struct fdt_node *node, const char *property, struct string *out)
Andrew Scull72b43c02019-09-18 13:53:45 +0100221{
David Brazdil136f2942019-09-23 14:11:03 +0100222 enum manifest_return_code ret;
Andrew Scull72b43c02019-09-18 13:53:45 +0100223
David Brazdil136f2942019-09-23 14:11:03 +0100224 ret = read_string(node, property, out);
225 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
226 string_init_empty(out);
227 ret = MANIFEST_SUCCESS;
Andrew Scull72b43c02019-09-18 13:53:45 +0100228 }
David Brazdil136f2942019-09-23 14:11:03 +0100229 return ret;
Andrew Scull72b43c02019-09-18 13:53:45 +0100230}
231
David Brazdil7a462ec2019-08-15 12:27:47 +0100232static enum manifest_return_code read_uint64(const struct fdt_node *node,
233 const char *property,
234 uint64_t *out)
235{
David Brazdilb856be62020-03-25 10:14:55 +0000236 struct memiter data;
David Brazdil7a462ec2019-08-15 12:27:47 +0100237
David Brazdilb856be62020-03-25 10:14:55 +0000238 if (!fdt_read_property(node, property, &data)) {
David Brazdil7a462ec2019-08-15 12:27:47 +0100239 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
240 }
241
David Brazdilb856be62020-03-25 10:14:55 +0000242 if (!fdt_parse_number(&data, memiter_size(&data), out)) {
David Brazdil7a462ec2019-08-15 12:27:47 +0100243 return MANIFEST_ERROR_MALFORMED_INTEGER;
244 }
245
246 return MANIFEST_SUCCESS;
247}
248
David Brazdil080ee312020-02-25 15:30:30 -0800249static enum manifest_return_code read_optional_uint64(
250 const struct fdt_node *node, const char *property,
251 uint64_t default_value, uint64_t *out)
252{
253 enum manifest_return_code ret;
254
255 ret = read_uint64(node, property, out);
256 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
257 *out = default_value;
258 return MANIFEST_SUCCESS;
259 }
260 return ret;
261}
262
Olivier Deprez62d99e32020-01-09 15:58:07 +0100263static enum manifest_return_code read_uint32(const struct fdt_node *node,
264 const char *property,
265 uint32_t *out)
266{
267 uint64_t value;
268
269 TRY(read_uint64(node, property, &value));
270
271 if (value > UINT32_MAX) {
272 return MANIFEST_ERROR_INTEGER_OVERFLOW;
273 }
274
275 *out = (uint32_t)value;
276 return MANIFEST_SUCCESS;
277}
278
Manish Pandeye68e7932020-04-23 15:29:28 +0100279static enum manifest_return_code read_optional_uint32(
280 const struct fdt_node *node, const char *property,
281 uint32_t default_value, uint32_t *out)
282{
283 enum manifest_return_code ret;
284
285 ret = read_uint32(node, property, out);
286 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
287 *out = default_value;
288 return MANIFEST_SUCCESS;
289 }
290 return ret;
291}
292
David Brazdil7a462ec2019-08-15 12:27:47 +0100293static enum manifest_return_code read_uint16(const struct fdt_node *node,
294 const char *property,
295 uint16_t *out)
296{
297 uint64_t value;
298
299 TRY(read_uint64(node, property, &value));
David Brazdil7a462ec2019-08-15 12:27:47 +0100300 if (value > UINT16_MAX) {
301 return MANIFEST_ERROR_INTEGER_OVERFLOW;
302 }
303
304 *out = (uint16_t)value;
305 return MANIFEST_SUCCESS;
306}
307
J-Alvesb37fd082020-10-22 12:29:21 +0100308static enum manifest_return_code read_optional_uint16(
309 const struct fdt_node *node, const char *property,
310 uint16_t default_value, uint16_t *out)
311{
312 enum manifest_return_code ret;
313
314 ret = read_uint16(node, property, out);
315 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
316 *out = default_value;
317 return MANIFEST_SUCCESS;
318 }
319
Kathleen Capella4a2a6e72023-04-21 14:43:26 -0400320 return ret;
J-Alvesb37fd082020-10-22 12:29:21 +0100321}
322
Olivier Deprez62d99e32020-01-09 15:58:07 +0100323static enum manifest_return_code read_uint8(const struct fdt_node *node,
324 const char *property, uint8_t *out)
325{
326 uint64_t value;
327
328 TRY(read_uint64(node, property, &value));
329
330 if (value > UINT8_MAX) {
331 return MANIFEST_ERROR_INTEGER_OVERFLOW;
332 }
333
334 *out = (uint8_t)value;
335 return MANIFEST_SUCCESS;
336}
337
J-Alves4369bd92020-08-07 16:35:36 +0100338static enum manifest_return_code read_optional_uint8(
339 const struct fdt_node *node, const char *property,
340 uint8_t default_value, uint8_t *out)
341{
342 enum manifest_return_code ret;
343
344 ret = read_uint8(node, property, out);
345 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
346 *out = default_value;
347 return MANIFEST_SUCCESS;
348 }
349
350 return MANIFEST_SUCCESS;
351}
352
Andrew Scullae9962e2019-10-03 16:51:16 +0100353struct uint32list_iter {
354 struct memiter mem_it;
355};
356
J-Alves4369bd92020-08-07 16:35:36 +0100357static enum manifest_return_code read_uint32list(const struct fdt_node *node,
358 const char *property,
359 struct uint32list_iter *out)
Andrew Scullae9962e2019-10-03 16:51:16 +0100360{
David Brazdilb856be62020-03-25 10:14:55 +0000361 struct memiter data;
Andrew Scullae9962e2019-10-03 16:51:16 +0100362
David Brazdilb856be62020-03-25 10:14:55 +0000363 if (!fdt_read_property(node, property, &data)) {
Andrew Scullae9962e2019-10-03 16:51:16 +0100364 memiter_init(&out->mem_it, NULL, 0);
J-Alves4369bd92020-08-07 16:35:36 +0100365 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
Andrew Scullae9962e2019-10-03 16:51:16 +0100366 }
367
David Brazdilb856be62020-03-25 10:14:55 +0000368 if ((memiter_size(&data) % sizeof(uint32_t)) != 0) {
Andrew Scullae9962e2019-10-03 16:51:16 +0100369 return MANIFEST_ERROR_MALFORMED_INTEGER_LIST;
370 }
371
David Brazdilb856be62020-03-25 10:14:55 +0000372 out->mem_it = data;
Andrew Scullae9962e2019-10-03 16:51:16 +0100373 return MANIFEST_SUCCESS;
374}
375
J-Alves4369bd92020-08-07 16:35:36 +0100376static enum manifest_return_code read_optional_uint32list(
377 const struct fdt_node *node, const char *property,
378 struct uint32list_iter *out)
379{
380 enum manifest_return_code ret = read_uint32list(node, property, out);
381
382 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
383 return MANIFEST_SUCCESS;
384 }
385 return ret;
386}
387
Andrew Scullae9962e2019-10-03 16:51:16 +0100388static bool uint32list_has_next(const struct uint32list_iter *list)
389{
390 return memiter_size(&list->mem_it) > 0;
391}
392
David Brazdil5ea99462020-03-25 13:01:47 +0000393static enum manifest_return_code uint32list_get_next(
394 struct uint32list_iter *list, uint32_t *out)
Andrew Scullae9962e2019-10-03 16:51:16 +0100395{
Andrew Scullae9962e2019-10-03 16:51:16 +0100396 uint64_t num;
397
398 CHECK(uint32list_has_next(list));
David Brazdilb856be62020-03-25 10:14:55 +0000399 if (!fdt_parse_number(&list->mem_it, sizeof(uint32_t), &num)) {
Andrew Scullae9962e2019-10-03 16:51:16 +0100400 return MANIFEST_ERROR_MALFORMED_INTEGER;
401 }
402
David Brazdil5ea99462020-03-25 13:01:47 +0000403 *out = (uint32_t)num;
404 return MANIFEST_SUCCESS;
Andrew Scullae9962e2019-10-03 16:51:16 +0100405}
406
Karl Meakin60532972024-08-02 16:11:21 +0100407/**
408 * Parse a UUID from `uuid` into `out`.
409 * Returns `MANIFEST_SUCCESS` if parsing succeeded.
410 */
411static enum manifest_return_code parse_uuid(struct uint32list_iter *uuid,
412 struct ffa_uuid *out)
413{
414 for (size_t i = 0; i < 4 && uint32list_has_next(uuid); i++) {
415 TRY(uint32list_get_next(uuid, &out->uuid[i]));
416 }
417
418 return MANIFEST_SUCCESS;
419}
420
421/**
422 * Parse a list of UUIDs from `uuid` into `out`.
423 * Writes the number of UUIDs parsed to `len`.
424 * Returns `MANIFEST_SUCCESS` if parsing succeeded.
425 * Returns `MANIFEST_ERROR_UUID_ALL_ZEROS` if any of the UUIDs are all zeros.
426 * Returns `MANIFEEST_ERROR_TOO_MANY_UUIDS` if there are more than
427 * `PARTITION_MAX_UUIDS`
428 */
429static enum manifest_return_code parse_uuid_list(struct uint32list_iter *uuid,
430 struct ffa_uuid *out,
431 uint16_t *len)
432{
433 uint16_t j;
434
Karl Meakin45abeeb2024-08-02 16:55:44 +0100435 for (j = 0; uint32list_has_next(uuid); j++) {
Karl Meakin60532972024-08-02 16:11:21 +0100436 TRY(parse_uuid(uuid, &out[j]));
437
438 if (ffa_uuid_is_null(&out[j])) {
439 return MANIFEST_ERROR_UUID_ALL_ZEROS;
440 }
441 dlog_verbose(" UUID %#x-%x-%x-%x\n", out[j].uuid[0],
442 out[j].uuid[1], out[j].uuid[2], out[j].uuid[3]);
Karl Meakin45abeeb2024-08-02 16:55:44 +0100443
444 if (j >= PARTITION_MAX_UUIDS) {
445 return MANIFEST_ERROR_TOO_MANY_UUIDS;
446 }
Karl Meakin60532972024-08-02 16:11:21 +0100447 }
448
449 *len = j;
450 return MANIFEST_SUCCESS;
451}
452
Olivier Deprez62d99e32020-01-09 15:58:07 +0100453static enum manifest_return_code parse_vm_common(const struct fdt_node *node,
454 struct manifest_vm *vm,
J-Alves19e20cf2023-08-02 12:48:55 +0100455 ffa_id_t vm_id)
David Brazdil7a462ec2019-08-15 12:27:47 +0100456{
Andrew Scullae9962e2019-10-03 16:51:16 +0100457 struct uint32list_iter smcs;
David Brazdil5ea99462020-03-25 13:01:47 +0000458 size_t idx;
Andrew Scullae9962e2019-10-03 16:51:16 +0100459
Olivier Deprez62d99e32020-01-09 15:58:07 +0100460 TRY(read_bool(node, "is_ffa_partition", &vm->is_ffa_partition));
461
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -0700462 TRY(read_bool(node, "hyp_loaded", &vm->is_hyp_loaded));
463
David Brazdil136f2942019-09-23 14:11:03 +0100464 TRY(read_string(node, "debug_name", &vm->debug_name));
Andrew Scullae9962e2019-10-03 16:51:16 +0100465
466 TRY(read_optional_uint32list(node, "smc_whitelist", &smcs));
467 while (uint32list_has_next(&smcs) &&
468 vm->smc_whitelist.smc_count < MAX_SMCS) {
David Brazdil5ea99462020-03-25 13:01:47 +0000469 idx = vm->smc_whitelist.smc_count++;
470 TRY(uint32list_get_next(&smcs, &vm->smc_whitelist.smcs[idx]));
Andrew Scullae9962e2019-10-03 16:51:16 +0100471 }
472
473 if (uint32list_has_next(&smcs)) {
Karl Meakine8937d92024-03-19 16:04:25 +0000474 dlog_warning("%s SMC whitelist too long.\n",
475 vm->debug_name.data);
Andrew Scullae9962e2019-10-03 16:51:16 +0100476 }
477
Andrew Scullb2c3a242019-11-04 13:52:36 +0000478 TRY(read_bool(node, "smc_whitelist_permissive",
479 &vm->smc_whitelist.permissive));
Andrew Scullae9962e2019-10-03 16:51:16 +0100480
Olivier Deprez62d99e32020-01-09 15:58:07 +0100481 if (vm_id != HF_PRIMARY_VM_ID) {
482 TRY(read_uint64(node, "mem_size", &vm->secondary.mem_size));
483 TRY(read_uint16(node, "vcpu_count", &vm->secondary.vcpu_count));
Fuad Tabba50469e02020-06-30 15:14:28 +0100484 TRY(read_optional_string(node, "fdt_filename",
485 &vm->secondary.fdt_filename));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100486 }
487
488 return MANIFEST_SUCCESS;
489}
490
491static enum manifest_return_code parse_vm(struct fdt_node *node,
492 struct manifest_vm *vm,
J-Alves19e20cf2023-08-02 12:48:55 +0100493 ffa_id_t vm_id)
Olivier Deprez62d99e32020-01-09 15:58:07 +0100494{
495 TRY(read_optional_string(node, "kernel_filename",
496 &vm->kernel_filename));
497
David Brazdile6f83222019-09-23 14:47:37 +0100498 if (vm_id == HF_PRIMARY_VM_ID) {
499 TRY(read_optional_string(node, "ramdisk_filename",
500 &vm->primary.ramdisk_filename));
David Brazdil080ee312020-02-25 15:30:30 -0800501 TRY(read_optional_uint64(node, "boot_address",
502 MANIFEST_INVALID_ADDRESS,
503 &vm->primary.boot_address));
David Brazdil7a462ec2019-08-15 12:27:47 +0100504 }
Raghu Krishnamurthy988a5e72021-02-27 21:46:06 -0800505 TRY(read_optional_uint8(node, "exception-level", (uint8_t)EL1,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700506 (uint8_t *)&vm->partition.run_time_el));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100507
David Brazdil7a462ec2019-08-15 12:27:47 +0100508 return MANIFEST_SUCCESS;
509}
510
Karl Meakinab0add02024-11-18 15:26:37 +0000511/**
512 * Return true if the region described by `region_start` and `page_count`
513 * overlaps with any of `ranges`.
514 */
515static bool is_memory_region_within_ranges(uintptr_t region_start,
J-Alves77b6f4f2023-03-15 11:34:49 +0000516 uint32_t page_count,
Karl Meakinab0add02024-11-18 15:26:37 +0000517 const struct mem_range ranges[],
518 size_t ranges_size)
J-Alves77b6f4f2023-03-15 11:34:49 +0000519{
J-Alvesd853c372025-01-22 18:18:21 +0000520 struct mem_range region = make_mem_range(region_start, page_count);
J-Alves77b6f4f2023-03-15 11:34:49 +0000521
522 for (size_t i = 0; i < ranges_size; i++) {
J-Alvesd853c372025-01-22 18:18:21 +0000523 if (mem_range_overlaps(ranges[i], region)) {
J-Alves77b6f4f2023-03-15 11:34:49 +0000524 return true;
525 }
526 }
527
528 return false;
529}
530
531void dump_memory_ranges(const struct mem_range *ranges,
532 const size_t ranges_size, bool ns)
533{
534 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
535 return;
536 }
537
538 dlog("%s Memory ranges:\n", ns ? "NS" : "S");
539
540 for (size_t i = 0; i < ranges_size; i++) {
541 uintptr_t begin = pa_addr(ranges[i].begin);
542 uintptr_t end = pa_addr(ranges[i].end);
543 size_t page_count =
544 align_up(pa_difference(ranges[i].begin, ranges[i].end),
545 PAGE_SIZE) /
546 PAGE_SIZE;
547
Karl Meakine8937d92024-03-19 16:04:25 +0000548 dlog(" [%lx - %lx (%zu pages)]\n", begin, end, page_count);
J-Alves77b6f4f2023-03-15 11:34:49 +0000549 }
550}
551
552/**
553 * Check the partition's assigned memory is contained in the memory ranges
554 * configured for the SWd, in the SPMC's manifest.
555 */
556static enum manifest_return_code check_partition_memory_is_valid(
557 uintptr_t base_address, uint32_t page_count, uint32_t attributes,
Olivier Deprez058ddee2024-08-27 09:22:11 +0200558 const struct boot_params *params, bool is_device_region)
J-Alves77b6f4f2023-03-15 11:34:49 +0000559{
560 bool is_secure_region =
561 (attributes & MANIFEST_REGION_ATTR_SECURITY) == 0U;
Daniel Boulby4339edc2024-02-21 14:59:00 +0000562 const struct mem_range *ranges_from_manifest;
563 size_t ranges_count;
564 bool within_ranges;
565 enum manifest_return_code error_return;
566
567 if (!is_device_region) {
568 ranges_from_manifest = is_secure_region ? params->mem_ranges
569 : params->ns_mem_ranges;
570 ranges_count = is_secure_region ? params->mem_ranges_count
571 : params->ns_mem_ranges_count;
572 error_return = MANIFEST_ERROR_MEM_REGION_INVALID;
573 } else {
574 ranges_from_manifest = is_secure_region
575 ? params->device_mem_ranges
576 : params->ns_device_mem_ranges;
577 ranges_count = is_secure_region
578 ? params->device_mem_ranges_count
579 : params->ns_device_mem_ranges_count;
580 error_return = MANIFEST_ERROR_DEVICE_MEM_REGION_INVALID;
581 }
582
583 within_ranges = is_memory_region_within_ranges(
J-Alves77b6f4f2023-03-15 11:34:49 +0000584 base_address, page_count, ranges_from_manifest, ranges_count);
585
Daniel Boulby4339edc2024-02-21 14:59:00 +0000586 return within_ranges ? MANIFEST_SUCCESS : error_return;
J-Alves77b6f4f2023-03-15 11:34:49 +0000587}
588
589/*
590 * Keep track of the memory allocated by partitions. This includes memory region
Daniel Boulby4d165722023-11-21 13:46:42 +0000591 * nodes and device region nodes defined in their respective partition
592 * manifests, as well address space defined from their load address.
J-Alves77b6f4f2023-03-15 11:34:49 +0000593 */
594static enum manifest_return_code check_and_record_memory_used(
Daniel Boulby3f295b12023-12-15 17:38:08 +0000595 uintptr_t base_address, uint32_t page_count,
596 struct mem_range *mem_ranges, size_t *mem_regions_index)
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100597{
Karl Meakin6291eb22024-11-18 12:43:47 +0000598 paddr_t begin;
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100599
Daniel Boulbyc1a613d2022-10-18 11:26:17 +0100600 if (!is_aligned(base_address, PAGE_SIZE)) {
Karl Meakine8937d92024-03-19 16:04:25 +0000601 dlog_error("base_address (%#lx) is not aligned to page size.\n",
Daniel Boulbyc1a613d2022-10-18 11:26:17 +0100602 base_address);
603 return MANIFEST_ERROR_MEM_REGION_UNALIGNED;
604 }
605
Karl Meakin6291eb22024-11-18 12:43:47 +0000606 if (is_memory_region_within_ranges(base_address, page_count, mem_ranges,
607 *mem_regions_index)) {
608 return MANIFEST_ERROR_MEM_REGION_OVERLAP;
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100609 }
610
Karl Meakin6291eb22024-11-18 12:43:47 +0000611 begin = pa_init(base_address);
612
613 mem_ranges[*mem_regions_index].begin = begin;
614 mem_ranges[*mem_regions_index].end =
615 pa_add(begin, page_count * PAGE_SIZE - 1);
616 (*mem_regions_index)++;
617
618 return MANIFEST_SUCCESS;
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100619}
620
Madhukar Pappireddy718afa92024-06-20 16:48:49 -0500621static enum manifest_return_code parse_common_fields_mem_dev_region_node(
622 struct fdt_node *ffa_node, struct dma_device_properties *dma_prop)
623{
624 uint32_t j = 0;
625 struct uint32list_iter list;
626
627 TRY(read_optional_uint32(ffa_node, "smmu-id", MANIFEST_INVALID_ID,
628 &dma_prop->smmu_id));
629 if (dma_prop->smmu_id != MANIFEST_INVALID_ID) {
630 dlog_verbose(" smmu-id: %u\n", dma_prop->smmu_id);
631 }
632
633 TRY(read_optional_uint32list(ffa_node, "stream-ids", &list));
634 dlog_verbose(" Stream IDs assigned:\n");
635
636 j = 0;
637 while (uint32list_has_next(&list)) {
638 if (j == PARTITION_MAX_STREAMS_PER_DEVICE) {
639 return MANIFEST_ERROR_STREAM_IDS_OVERFLOW;
640 }
641
642 TRY(uint32list_get_next(&list, &dma_prop->stream_ids[j]));
643 dlog_verbose(" %u\n", dma_prop->stream_ids[j]);
644 j++;
645 }
646 if (j == 0) {
647 dlog_verbose(" None\n");
648 } else if (dma_prop->smmu_id == MANIFEST_INVALID_ID) {
649 /*
650 * SMMU ID must be specified if the partition specifies
651 * Stream IDs for any device upstream of SMMU.
652 */
653 return MANIFEST_ERROR_MISSING_SMMU_ID;
654 }
655 dma_prop->stream_count = j;
656
657 return MANIFEST_SUCCESS;
658}
659
Karl Meakin68368292024-11-18 12:02:12 +0000660/**
661 * Parse and validate a memory regions's base address.
662 *
663 * The base address can be specified either as an absolute address (with
664 * `base-address`) or as an offset from `load_address` (with
665 * `load-address-relative-offset`).
666
667 * Returns an error if:
668 * - Neither `base-address` or `load-address-relative-offset` are specified.
669 * - Both `base-address` and `load-address-relative-offset` are specified.
670 * - The effective address (`load-address-relative-offset` + `load_address`)
671 * would overflow.
672 */
673static enum manifest_return_code parse_base_address(
674 struct fdt_node *mem_node, uintptr_t load_address,
675 struct memory_region *mem_region)
676{
677 uintptr_t relative_offset;
678 uintptr_t absolute_address;
679
680 bool is_relative;
681 bool is_absolute;
682
683 TRY(read_optional_uint64(mem_node, "base-address",
684 MANIFEST_INVALID_ADDRESS, &absolute_address));
685
686 TRY(read_optional_uint64(mem_node, "load-address-relative-offset",
687 MANIFEST_INVALID_ADDRESS, &relative_offset));
688
689 is_absolute = (absolute_address != MANIFEST_INVALID_ADDRESS);
690 is_relative = (relative_offset != MANIFEST_INVALID_ADDRESS);
691
692 if (!is_absolute && !is_relative) {
693 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
694 }
695
696 if (is_absolute && is_relative) {
697 return MANIFEST_ERROR_BASE_ADDRESS_AND_RELATIVE_ADDRESS;
698 }
699
700 if (is_relative && relative_offset > UINT64_MAX - load_address) {
701 return MANIFEST_ERROR_INTEGER_OVERFLOW;
702 }
703
704 mem_region->base_address =
705 is_absolute ? absolute_address : load_address + relative_offset;
Karl Meakin6291eb22024-11-18 12:43:47 +0000706 mem_region->is_relative = is_relative;
Karl Meakin68368292024-11-18 12:02:12 +0000707
708 return MANIFEST_SUCCESS;
709}
710
Karl Meakin275aa292024-11-18 14:56:09 +0000711/**
712 * Parse and validate a memory region/device region's attributes.
713 * Returns an error if:
714 * - Memory region attributes are not `R` or `RW` or `RX`.
715 * - Device region attributes are not `R` or `RW`.
716 * NOTE: Security attribute is not checked by this function, it is checked in
717 * the load phase.
718 */
719static enum manifest_return_code parse_ffa_region_attributes(
720 struct fdt_node *node, uint32_t *out_attributes, bool is_device)
721{
722 uint32_t attributes;
723
724 TRY(read_uint32(node, "attributes", out_attributes));
725
726 attributes = *out_attributes &
727 (MANIFEST_REGION_ATTR_READ | MANIFEST_REGION_ATTR_WRITE |
728 MANIFEST_REGION_ATTR_EXEC);
729
730 if (is_device) {
731 switch (attributes) {
732 case MANIFEST_REGION_ATTR_READ:
733 case MANIFEST_REGION_ATTR_READ | MANIFEST_REGION_ATTR_WRITE:
734 break;
735 default:
736 return MANIFEST_ERROR_INVALID_MEM_PERM;
737 }
738 } else {
739 switch (attributes) {
740 case MANIFEST_REGION_ATTR_READ:
741 case MANIFEST_REGION_ATTR_READ | MANIFEST_REGION_ATTR_WRITE:
742 case MANIFEST_REGION_ATTR_READ | MANIFEST_REGION_ATTR_EXEC:
743 break;
744 default:
745 return MANIFEST_ERROR_INVALID_MEM_PERM;
746 }
747 }
748
749 /* Filter region attributes. */
750 *out_attributes &= MANIFEST_REGION_ALL_ATTR_MASK;
751
752 return MANIFEST_SUCCESS;
753}
754
Karl Meakina2e7ae72024-11-18 15:15:53 +0000755static enum manifest_return_code parse_page_count(struct fdt_node *node,
756 uint32_t *page_count)
757{
758 TRY(read_uint32(node, "pages-count", page_count));
759
760 if (*page_count == 0) {
761 return MANIFEST_ERROR_MEM_REGION_EMPTY;
762 }
763
764 return MANIFEST_SUCCESS;
765}
766
Manish Pandey6542f5c2020-04-27 14:37:46 +0100767static enum manifest_return_code parse_ffa_memory_region_node(
Karl Meakinf6d49402023-04-04 18:14:26 +0100768 struct fdt_node *mem_node, uintptr_t load_address,
769 struct memory_region *mem_regions, uint16_t *count, struct rx_tx *rxtx,
J-Alves77b6f4f2023-03-15 11:34:49 +0000770 const struct boot_params *boot_params)
Manish Pandey6542f5c2020-04-27 14:37:46 +0100771{
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100772 uint32_t phandle;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700773 uint16_t i = 0;
Madhukar Pappireddy3c2b7912023-10-11 14:47:27 -0500774 uint32_t j = 0;
Madhukar Pappireddy3c2b7912023-10-11 14:47:27 -0500775 struct uint32list_iter list;
Manish Pandey6542f5c2020-04-27 14:37:46 +0100776
777 dlog_verbose(" Partition memory regions\n");
778
779 if (!fdt_is_compatible(mem_node, "arm,ffa-manifest-memory-regions")) {
780 return MANIFEST_ERROR_NOT_COMPATIBLE;
781 }
782
783 if (!fdt_first_child(mem_node)) {
784 return MANIFEST_ERROR_MEMORY_REGION_NODE_EMPTY;
785 }
786
787 do {
788 dlog_verbose(" Memory Region[%u]\n", i);
789
790 TRY(read_optional_string(mem_node, "description",
Karl Meakinfb761eb2024-11-20 15:59:56 +0000791 &mem_regions[i].description));
792 dlog_verbose(" Description: %s\n",
793 string_data(&mem_regions[i].description));
Manish Pandey6542f5c2020-04-27 14:37:46 +0100794
Karl Meakin68368292024-11-18 12:02:12 +0000795 TRY(parse_base_address(mem_node, load_address,
796 &mem_regions[i]));
Karl Meakinf6d49402023-04-04 18:14:26 +0100797
Karl Meakina2e7ae72024-11-18 15:15:53 +0000798 TRY(parse_page_count(mem_node, &mem_regions[i].page_count));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200799 dlog_verbose(" Pages_count: %u\n",
Manish Pandey6542f5c2020-04-27 14:37:46 +0100800 mem_regions[i].page_count);
801
Karl Meakin275aa292024-11-18 14:56:09 +0000802 TRY(parse_ffa_region_attributes(
803 mem_node, &mem_regions[i].attributes, false));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200804 dlog_verbose(" Attributes: %#x\n",
Manish Pandey6542f5c2020-04-27 14:37:46 +0100805 mem_regions[i].attributes);
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100806
J-Alves77b6f4f2023-03-15 11:34:49 +0000807 TRY(check_partition_memory_is_valid(
808 mem_regions[i].base_address, mem_regions[i].page_count,
Olivier Deprez058ddee2024-08-27 09:22:11 +0200809 mem_regions[i].attributes, boot_params, false));
J-Alves77b6f4f2023-03-15 11:34:49 +0000810
Karl Meakin6291eb22024-11-18 12:43:47 +0000811 /*
812 * Memory regions are not allowed to overlap with
813 * `load_address`, unless the memory region is relative.
814 */
815 if (!mem_regions[i].is_relative) {
816 struct mem_range range =
817 make_mem_range(mem_regions[i].base_address,
818 mem_regions[i].page_count);
819
J-Alvesd853c372025-01-22 18:18:21 +0000820 if (mem_range_contains_address(range, load_address)) {
Karl Meakin6291eb22024-11-18 12:43:47 +0000821 return MANIFEST_ERROR_MEM_REGION_OVERLAP;
822 }
823 }
824
Daniel Boulby3f295b12023-12-15 17:38:08 +0000825 TRY(check_and_record_memory_used(
826 mem_regions[i].base_address, mem_regions[i].page_count,
827 manifest_data->mem_regions,
828 &manifest_data->mem_regions_index));
J-Alves77b6f4f2023-03-15 11:34:49 +0000829
Madhukar Pappireddy718afa92024-06-20 16:48:49 -0500830 TRY(parse_common_fields_mem_dev_region_node(
831 mem_node, &mem_regions[i].dma_prop));
Madhukar Pappireddy3c2b7912023-10-11 14:47:27 -0500832
833 TRY(read_optional_uint32list(
834 mem_node, "stream-ids-access-permissions", &list));
835 dlog_verbose(" Access permissions of Stream IDs:\n");
836
837 j = 0;
838 while (uint32list_has_next(&list)) {
839 uint32_t permissions;
840
841 if (j == PARTITION_MAX_STREAMS_PER_DEVICE) {
842 return MANIFEST_ERROR_DMA_ACCESS_PERMISSIONS_OVERFLOW;
843 }
844
845 TRY(uint32list_get_next(&list, &permissions));
846 dlog_verbose(" %u\n", permissions);
847
848 if (j == 0) {
Madhukar Pappireddy9c764b32024-06-20 14:36:55 -0500849 mem_regions[i].dma_access_permissions =
Madhukar Pappireddy3c2b7912023-10-11 14:47:27 -0500850 permissions;
851 }
852
853 /*
854 * All stream ids belonging to a dma device must specify
855 * the same access permissions.
856 */
857 if (permissions !=
Madhukar Pappireddy9c764b32024-06-20 14:36:55 -0500858 mem_regions[i].dma_access_permissions) {
Madhukar Pappireddy3c2b7912023-10-11 14:47:27 -0500859 return MANIFEST_ERROR_MISMATCH_DMA_ACCESS_PERMISSIONS;
860 }
861
862 j++;
863 }
864
865 if (j == 0) {
866 dlog_verbose(" None\n");
867 } else if (j != mem_regions[i].dma_prop.stream_count) {
868 return MANIFEST_ERROR_MISMATCH_DMA_ACCESS_PERMISSIONS;
869 }
870
871 if (j > 0) {
872 /* Filter the dma access permissions. */
Madhukar Pappireddy9c764b32024-06-20 14:36:55 -0500873 mem_regions[i].dma_access_permissions &=
Madhukar Pappireddy3c2b7912023-10-11 14:47:27 -0500874 MANIFEST_REGION_ALL_ATTR_MASK;
875 }
876
Manish Pandeya70a4192020-10-07 22:05:04 +0100877 if (rxtx->available) {
878 TRY(read_optional_uint32(
879 mem_node, "phandle",
880 (uint32_t)MANIFEST_INVALID_ADDRESS, &phandle));
881 if (phandle == rxtx->rx_phandle) {
882 dlog_verbose(" Assigned as RX buffer\n");
883 rxtx->rx_buffer = &mem_regions[i];
884 } else if (phandle == rxtx->tx_phandle) {
885 dlog_verbose(" Assigned as TX buffer\n");
886 rxtx->tx_buffer = &mem_regions[i];
887 }
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100888 }
889
Manish Pandey6542f5c2020-04-27 14:37:46 +0100890 i++;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700891 } while (fdt_next_sibling(mem_node) &&
892 (i < PARTITION_MAX_MEMORY_REGIONS));
Manish Pandey6542f5c2020-04-27 14:37:46 +0100893
Manish Pandeya70a4192020-10-07 22:05:04 +0100894 if (rxtx->available &&
895 (rxtx->rx_buffer->page_count != rxtx->tx_buffer->page_count)) {
Manish Pandeyf06c9072020-09-29 15:41:58 +0100896 return MANIFEST_ERROR_RXTX_SIZE_MISMATCH;
897 }
898
Manish Pandey2145c212020-05-01 16:04:22 +0100899 *count = i;
Manish Pandey6542f5c2020-04-27 14:37:46 +0100900
901 return MANIFEST_SUCCESS;
902}
903
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -0700904static struct interrupt_info *device_region_get_interrupt_info(
905 struct device_region *dev_regions, uint32_t intid)
906{
907 for (uint32_t i = 0; i < ARRAY_SIZE(dev_regions->interrupts); i++) {
908 if (dev_regions->interrupts[i].id == intid) {
909 return &(dev_regions->interrupts[i]);
910 }
911 }
912 return NULL;
913}
914
Manish Pandeye68e7932020-04-23 15:29:28 +0100915static enum manifest_return_code parse_ffa_device_region_node(
Manish Pandey2145c212020-05-01 16:04:22 +0100916 struct fdt_node *dev_node, struct device_region *dev_regions,
Daniel Boulby4339edc2024-02-21 14:59:00 +0000917 uint16_t *count, uint8_t *dma_device_count,
918 const struct boot_params *boot_params)
Manish Pandeye68e7932020-04-23 15:29:28 +0100919{
920 struct uint32list_iter list;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700921 uint16_t i = 0;
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500922 uint32_t j = 0;
Olivier Deprez93644652022-09-09 11:01:12 +0200923 struct interrupt_bitmap allocated_intids = manifest_data->intids;
Madhukar Pappireddy718afa92024-06-20 16:48:49 -0500924 uint8_t dma_device_id = 0;
Manish Pandeye68e7932020-04-23 15:29:28 +0100925
926 dlog_verbose(" Partition Device Regions\n");
927
928 if (!fdt_is_compatible(dev_node, "arm,ffa-manifest-device-regions")) {
929 return MANIFEST_ERROR_NOT_COMPATIBLE;
930 }
931
932 if (!fdt_first_child(dev_node)) {
933 return MANIFEST_ERROR_DEVICE_REGION_NODE_EMPTY;
934 }
935
Madhukar Pappireddye032af52023-10-11 14:52:58 -0500936 *dma_device_count = 0;
937
Manish Pandeye68e7932020-04-23 15:29:28 +0100938 do {
939 dlog_verbose(" Device Region[%u]\n", i);
940
941 TRY(read_optional_string(dev_node, "description",
942 &dev_regions[i].name));
943 dlog_verbose(" Name: %s\n",
944 string_data(&dev_regions[i].name));
945
946 TRY(read_uint64(dev_node, "base-address",
947 &dev_regions[i].base_address));
Karl Meakine8937d92024-03-19 16:04:25 +0000948 dlog_verbose(" Base address: %#lx\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100949 dev_regions[i].base_address);
950
Karl Meakina2e7ae72024-11-18 15:15:53 +0000951 TRY(parse_page_count(dev_node, &dev_regions[i].page_count));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200952 dlog_verbose(" Pages_count: %u\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100953 dev_regions[i].page_count);
954
Daniel Boulby3f295b12023-12-15 17:38:08 +0000955 TRY(check_and_record_memory_used(
956 dev_regions[i].base_address, dev_regions[i].page_count,
957 manifest_data->mem_regions,
958 &manifest_data->mem_regions_index));
Daniel Boulby4d165722023-11-21 13:46:42 +0000959
Karl Meakin275aa292024-11-18 14:56:09 +0000960 TRY(parse_ffa_region_attributes(
961 dev_node, &dev_regions[i].attributes, true));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200962 dlog_verbose(" Attributes: %#x\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100963 dev_regions[i].attributes);
964
Daniel Boulby4339edc2024-02-21 14:59:00 +0000965 TRY(check_partition_memory_is_valid(
966 dev_regions[i].base_address, dev_regions[i].page_count,
Olivier Deprez058ddee2024-08-27 09:22:11 +0200967 dev_regions[i].attributes, boot_params, true));
Daniel Boulby4339edc2024-02-21 14:59:00 +0000968
Manish Pandeye68e7932020-04-23 15:29:28 +0100969 TRY(read_optional_uint32list(dev_node, "interrupts", &list));
970 dlog_verbose(" Interrupt List:\n");
971 j = 0;
972 while (uint32list_has_next(&list) &&
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700973 j < PARTITION_MAX_INTERRUPTS_PER_DEVICE) {
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100974 uint32_t intid;
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100975
Manish Pandeye68e7932020-04-23 15:29:28 +0100976 TRY(uint32list_get_next(
977 &list, &dev_regions[i].interrupts[j].id));
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100978 intid = dev_regions[i].interrupts[j].id;
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100979
980 dlog_verbose(" ID = %u\n", intid);
981
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100982 if (interrupt_bitmap_get_value(&allocated_intids,
983 intid) == 1U) {
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100984 return MANIFEST_ERROR_INTERRUPT_ID_REPEATED;
985 }
986
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100987 interrupt_bitmap_set_value(&allocated_intids, intid);
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100988
Manish Pandeye68e7932020-04-23 15:29:28 +0100989 if (uint32list_has_next(&list)) {
990 TRY(uint32list_get_next(&list,
991 &dev_regions[i]
992 .interrupts[j]
993 .attributes));
994 } else {
995 return MANIFEST_ERROR_MALFORMED_INTEGER_LIST;
996 }
997
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -0700998 dev_regions[i].interrupts[j].mpidr_valid = false;
999 dev_regions[i].interrupts[j].mpidr = 0;
1000
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001001 dlog_verbose(" attributes = %u\n",
Manish Pandeye68e7932020-04-23 15:29:28 +01001002 dev_regions[i].interrupts[j].attributes);
1003 j++;
1004 }
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -05001005
1006 dev_regions[i].interrupt_count = j;
Manish Pandeye68e7932020-04-23 15:29:28 +01001007 if (j == 0) {
1008 dlog_verbose(" Empty\n");
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -07001009 } else {
1010 TRY(read_optional_uint32list(
1011 dev_node, "interrupts-target", &list));
1012 dlog_verbose(" Interrupt Target List:\n");
1013
1014 while (uint32list_has_next(&list)) {
1015 uint32_t intid;
1016 uint64_t mpidr = 0;
1017 uint32_t mpidr_lower = 0;
1018 uint32_t mpidr_upper = 0;
1019 struct interrupt_info *info = NULL;
1020
1021 TRY(uint32list_get_next(&list, &intid));
1022
1023 dlog_verbose(" ID = %u\n", intid);
1024
1025 if (interrupt_bitmap_get_value(
1026 &allocated_intids, intid) != 1U) {
1027 return MANIFEST_ERROR_INTERRUPT_ID_NOT_IN_LIST;
1028 }
1029
1030 TRY(uint32list_get_next(&list, &mpidr_upper));
1031 TRY(uint32list_get_next(&list, &mpidr_lower));
1032 mpidr = mpidr_upper;
1033 mpidr <<= 32;
1034 mpidr |= mpidr_lower;
1035
1036 info = device_region_get_interrupt_info(
1037 &dev_regions[i], intid);
1038 /*
1039 * We should find info since
1040 * interrupt_bitmap_get_value already ensures
1041 * that we saw the interrupt and allocated ids
1042 * for it.
1043 */
1044 assert(info != NULL);
1045 info->mpidr = mpidr;
1046 info->mpidr_valid = true;
Karl Meakine8937d92024-03-19 16:04:25 +00001047 dlog_verbose(" MPIDR = %#lx\n", mpidr);
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -07001048 }
Manish Pandeye68e7932020-04-23 15:29:28 +01001049 }
1050
Madhukar Pappireddy718afa92024-06-20 16:48:49 -05001051 TRY(parse_common_fields_mem_dev_region_node(
1052 dev_node, &dev_regions[i].dma_prop));
1053
Madhukar Pappireddy9c764b32024-06-20 14:36:55 -05001054 if (dev_regions[i].dma_prop.smmu_id != MANIFEST_INVALID_ID) {
Madhukar Pappireddy9c764b32024-06-20 14:36:55 -05001055 dev_regions[i].dma_prop.dma_device_id = dma_device_id++;
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001056 *dma_device_count = dma_device_id;
1057
Madhukar Pappireddy8a364472023-10-11 15:34:07 -05001058 if (*dma_device_count > PARTITION_MAX_DMA_DEVICES) {
1059 return MANIFEST_ERROR_DMA_DEVICE_OVERFLOW;
1060 }
1061
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001062 dlog_verbose(" dma peripheral device id: %u\n",
Madhukar Pappireddy9c764b32024-06-20 14:36:55 -05001063 dev_regions[i].dma_prop.dma_device_id);
Manish Pandeye68e7932020-04-23 15:29:28 +01001064 }
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001065
Manish Pandeye68e7932020-04-23 15:29:28 +01001066 TRY(read_bool(dev_node, "exclusive-access",
1067 &dev_regions[i].exclusive_access));
J-Alves4369bd92020-08-07 16:35:36 +01001068 dlog_verbose(" Exclusive_access: %u\n",
Manish Pandeye68e7932020-04-23 15:29:28 +01001069 dev_regions[i].exclusive_access);
1070
1071 i++;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -07001072 } while (fdt_next_sibling(dev_node) &&
1073 (i < PARTITION_MAX_DEVICE_REGIONS));
Manish Pandeye68e7932020-04-23 15:29:28 +01001074
Manish Pandey2145c212020-05-01 16:04:22 +01001075 *count = i;
Manish Pandeye68e7932020-04-23 15:29:28 +01001076
1077 return MANIFEST_SUCCESS;
1078}
1079
J-Alvesabebe432022-05-31 14:40:50 +01001080static enum manifest_return_code sanity_check_ffa_manifest(
1081 struct manifest_vm *vm)
1082{
Karl Meakin0e617d92024-04-05 12:55:22 +01001083 enum ffa_version ffa_version;
J-Alvesabebe432022-05-31 14:40:50 +01001084 enum manifest_return_code ret_code = MANIFEST_SUCCESS;
1085 const char *error_string = "specified in manifest is unsupported";
1086 uint32_t k = 0;
Kathleen Capellaf71dee42023-08-08 16:24:14 -04001087 bool using_req2 = (vm->partition.messaging_method &
1088 (FFA_PARTITION_DIRECT_REQ2_RECV |
1089 FFA_PARTITION_DIRECT_REQ2_SEND)) != 0;
J-Alvesabebe432022-05-31 14:40:50 +01001090
1091 /* ensure that the SPM version is compatible */
Karl Meakin0e617d92024-04-05 12:55:22 +01001092 ffa_version = vm->partition.ffa_version;
1093 if (!ffa_versions_are_compatible(ffa_version, FFA_VERSION_COMPILED)) {
Karl Meakin5be8dfa2025-03-27 14:58:43 +00001094 dlog_error(
1095 "FF-A partition manifest version v%u.%u is not "
1096 "compatible with compiled version v%u.%u\n",
1097 ffa_version_get_major(ffa_version),
1098 ffa_version_get_minor(ffa_version),
1099 ffa_version_get_major(FFA_VERSION_COMPILED),
1100 ffa_version_get_minor(FFA_VERSION_COMPILED));
J-Alvesabebe432022-05-31 14:40:50 +01001101 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1102 }
1103
1104 if (vm->partition.xlat_granule != PAGE_4KB) {
1105 dlog_error("Translation granule %s: %u\n", error_string,
1106 vm->partition.xlat_granule);
1107 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1108 }
1109
1110 if (vm->partition.execution_state != AARCH64) {
1111 dlog_error("Execution state %s: %u\n", error_string,
1112 vm->partition.execution_state);
1113 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1114 }
1115
1116 if (vm->partition.run_time_el != EL1 &&
1117 vm->partition.run_time_el != S_EL1 &&
Daniel Boulby874d5432023-04-27 12:40:24 +01001118 vm->partition.run_time_el != S_EL0 &&
1119 vm->partition.run_time_el != EL0) {
J-Alvesabebe432022-05-31 14:40:50 +01001120 dlog_error("Exception level %s: %d\n", error_string,
1121 vm->partition.run_time_el);
1122 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1123 }
1124
Karl Meakin0e617d92024-04-05 12:55:22 +01001125 if (vm->partition.ffa_version < FFA_VERSION_1_2 && using_req2) {
Kathleen Capellaf71dee42023-08-08 16:24:14 -04001126 dlog_error("Messaging method %s: %x\n", error_string,
1127 vm->partition.messaging_method);
1128 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1129 }
1130
J-Alvesabebe432022-05-31 14:40:50 +01001131 if ((vm->partition.messaging_method &
1132 ~(FFA_PARTITION_DIRECT_REQ_RECV | FFA_PARTITION_DIRECT_REQ_SEND |
Kathleen Capellaf71dee42023-08-08 16:24:14 -04001133 FFA_PARTITION_INDIRECT_MSG | FFA_PARTITION_DIRECT_REQ2_RECV |
1134 FFA_PARTITION_DIRECT_REQ2_SEND)) != 0U) {
J-Alvesabebe432022-05-31 14:40:50 +01001135 dlog_error("Messaging method %s: %x\n", error_string,
1136 vm->partition.messaging_method);
1137 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1138 }
1139
Daniel Boulby874d5432023-04-27 12:40:24 +01001140 if ((vm->partition.run_time_el == S_EL0 ||
1141 vm->partition.run_time_el == EL0) &&
J-Alvesabebe432022-05-31 14:40:50 +01001142 vm->partition.execution_ctx_count != 1) {
1143 dlog_error(
1144 "Exception level and execution context count %s: %d "
1145 "%d\n",
1146 error_string, vm->partition.run_time_el,
1147 vm->partition.execution_ctx_count);
1148 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1149 }
1150
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -07001151 for (uint16_t i = 0; i < vm->partition.dev_region_count; i++) {
J-Alvesabebe432022-05-31 14:40:50 +01001152 struct device_region dev_region;
1153
1154 dev_region = vm->partition.dev_regions[i];
1155
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -07001156 if (dev_region.interrupt_count >
1157 PARTITION_MAX_INTERRUPTS_PER_DEVICE) {
J-Alvesabebe432022-05-31 14:40:50 +01001158 dlog_error(
1159 "Interrupt count for device region exceeds "
1160 "limit.\n");
1161 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1162 continue;
1163 }
1164
1165 for (uint8_t j = 0; j < dev_region.interrupt_count; j++) {
1166 k++;
1167 if (k > VM_MANIFEST_MAX_INTERRUPTS) {
1168 dlog_error(
1169 "Interrupt count for VM exceeds "
1170 "limit.\n");
1171 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1172 continue;
1173 }
1174 }
1175 }
1176
1177 /* GP register is restricted to one of x0 - x3. */
Karl Meakin824b63d2024-06-03 19:04:53 +01001178 if (vm->partition.gp_register_num != DEFAULT_BOOT_GP_REGISTER &&
J-Alvesabebe432022-05-31 14:40:50 +01001179 vm->partition.gp_register_num > 3) {
1180 dlog_error("GP register number %s: %u\n", error_string,
1181 vm->partition.gp_register_num);
1182 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1183 }
1184
J-Alvesbb2703a2025-02-10 12:11:56 +00001185 if (vm->partition.run_time_el == S_EL0 &&
1186 (vm->partition.sri_policy.intr_while_waiting ||
1187 vm->partition.sri_policy.intr_pending_entry_wait)) {
1188 ret_code = MANIFEST_ERROR_SRI_POLICY_NOT_SUPPORTED;
1189 }
1190
J-Alvesabebe432022-05-31 14:40:50 +01001191 return ret_code;
1192}
1193
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001194/**
1195 * Find the device id allocated to the device region node corresponding to the
1196 * specified stream id.
1197 */
1198static bool find_dma_device_id_from_dev_region_nodes(
1199 const struct manifest_vm *manifest_vm, uint32_t sid, uint8_t *device_id)
1200{
1201 for (uint16_t i = 0; i < manifest_vm->partition.dev_region_count; i++) {
1202 struct device_region dev_region =
1203 manifest_vm->partition.dev_regions[i];
1204
Madhukar Pappireddy9c764b32024-06-20 14:36:55 -05001205 for (uint8_t j = 0; j < dev_region.dma_prop.stream_count; j++) {
1206 if (sid == dev_region.dma_prop.stream_ids[j]) {
1207 *device_id = dev_region.dma_prop.dma_device_id;
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001208 return true;
1209 }
1210 }
1211 }
1212 return false;
1213}
1214
1215/**
1216 * Identify the device id of a DMA device node corresponding to a stream id
1217 * specified in the memory region node.
1218 */
1219static bool map_dma_device_id_to_stream_ids(struct manifest_vm *vm)
1220{
1221 for (uint16_t i = 0; i < vm->partition.mem_region_count; i++) {
1222 struct memory_region mem_region = vm->partition.mem_regions[i];
1223
1224 for (uint8_t j = 0; j < mem_region.dma_prop.stream_count; j++) {
1225 uint32_t sid = mem_region.dma_prop.stream_ids[j];
1226 uint8_t device_id = 0;
1227
1228 /*
1229 * Every stream id must have been declared in the
1230 * device node as well.
1231 */
1232 if (!find_dma_device_id_from_dev_region_nodes(
1233 vm, sid, &device_id)) {
1234 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00001235 "Stream ID %d not found in any device "
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001236 "region node of partition manifest\n",
1237 sid);
1238 return false;
1239 }
1240
1241 mem_region.dma_prop.dma_device_id = device_id;
1242 }
1243 }
1244
1245 return true;
1246}
1247
J-Alves77b6f4f2023-03-15 11:34:49 +00001248enum manifest_return_code parse_ffa_manifest(
1249 struct fdt *fdt, struct manifest_vm *vm,
1250 struct fdt_node *boot_info_node, const struct boot_params *boot_params)
Olivier Deprez62d99e32020-01-09 15:58:07 +01001251{
Olivier Deprez62d99e32020-01-09 15:58:07 +01001252 struct uint32list_iter uuid;
Davidson K5d5f2792024-08-19 19:09:12 +05301253 uintpaddr_t load_address;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001254 struct fdt_node root;
1255 struct fdt_node ffa_node;
1256 struct string rxtx_node_name = STRING_INIT("rx_tx-info");
Manish Pandey6542f5c2020-04-27 14:37:46 +01001257 struct string mem_region_node_name = STRING_INIT("memory-regions");
Manish Pandeye68e7932020-04-23 15:29:28 +01001258 struct string dev_region_node_name = STRING_INIT("device-regions");
J-Alves35315782022-01-25 17:58:32 +00001259 struct string boot_info_node_name = STRING_INIT("boot-info");
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001260 bool managed_exit_field_present = false;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001261
1262 if (!fdt_find_node(fdt, "/", &root)) {
1263 return MANIFEST_ERROR_NO_ROOT_NODE;
1264 }
1265
1266 /* Check "compatible" property. */
1267 if (!fdt_is_compatible(&root, "arm,ffa-manifest-1.0")) {
1268 return MANIFEST_ERROR_NOT_COMPATIBLE;
1269 }
1270
J-Alves4369bd92020-08-07 16:35:36 +01001271 TRY(read_uint32list(&root, "uuid", &uuid));
Olivier Deprez62d99e32020-01-09 15:58:07 +01001272
Karl Meakin60532972024-08-02 16:11:21 +01001273 TRY(parse_uuid_list(&uuid, vm->partition.uuids,
1274 &vm->partition.uuid_count));
Kathleen Capella422b10b2023-06-30 18:28:27 -04001275 dlog_verbose(" Number of UUIDs %u\n", vm->partition.uuid_count);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001276
Olivier Deprezb7aa0c22023-06-29 10:46:26 +02001277 TRY(read_uint32(&root, "ffa-version", &vm->partition.ffa_version));
1278 dlog_verbose(" Expected FF-A version %u.%u\n",
Karl Meakin0e617d92024-04-05 12:55:22 +01001279 ffa_version_get_major(vm->partition.ffa_version),
1280 ffa_version_get_minor(vm->partition.ffa_version));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +02001281
Olivier Deprez62d99e32020-01-09 15:58:07 +01001282 TRY(read_uint16(&root, "execution-ctx-count",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001283 &vm->partition.execution_ctx_count));
J-Alves4369bd92020-08-07 16:35:36 +01001284 dlog_verbose(" Number of execution context %u\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001285 vm->partition.execution_ctx_count);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001286
1287 TRY(read_uint8(&root, "exception-level",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001288 (uint8_t *)&vm->partition.run_time_el));
1289 dlog_verbose(" Run-time EL %u\n", vm->partition.run_time_el);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001290
1291 TRY(read_uint8(&root, "execution-state",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001292 (uint8_t *)&vm->partition.execution_state));
1293 dlog_verbose(" Execution state %u\n", vm->partition.execution_state);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001294
Davidson K5d5f2792024-08-19 19:09:12 +05301295 TRY(read_optional_uint64(&root, "load-address", 0, &load_address));
1296 if (vm->partition.load_addr != load_address) {
1297 dlog_warning(
Karl Meakin6291eb22024-11-18 12:43:47 +00001298 "Partition's `load_address` (%#lx) in its manifest "
1299 "differs from `load-address` (%#lx) in its package\n",
1300 vm->partition.load_addr, load_address);
Davidson K5d5f2792024-08-19 19:09:12 +05301301 }
Karl Meakine8937d92024-03-19 16:04:25 +00001302 dlog_verbose(" Load address %#lx\n", vm->partition.load_addr);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001303
J-Alves4369bd92020-08-07 16:35:36 +01001304 TRY(read_optional_uint64(&root, "entrypoint-offset", 0,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001305 &vm->partition.ep_offset));
Karl Meakine8937d92024-03-19 16:04:25 +00001306 dlog_verbose(" Entry point offset %#zx\n", vm->partition.ep_offset);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001307
J-Alves35315782022-01-25 17:58:32 +00001308 TRY(read_optional_uint32(&root, "gp-register-num",
1309 DEFAULT_BOOT_GP_REGISTER,
1310 &vm->partition.gp_register_num));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +02001311
1312 if (vm->partition.gp_register_num != DEFAULT_BOOT_GP_REGISTER) {
1313 dlog_verbose(" Boot GP register: x%u\n",
1314 vm->partition.gp_register_num);
1315 }
J-Alves35315782022-01-25 17:58:32 +00001316
J-Alvesb37fd082020-10-22 12:29:21 +01001317 TRY(read_optional_uint16(&root, "boot-order", DEFAULT_BOOT_ORDER,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001318 &vm->partition.boot_order));
Kathleen Capella4a2a6e72023-04-21 14:43:26 -04001319 if (vm->partition.boot_order != DEFAULT_BOOT_ORDER) {
Karl Meakine8937d92024-03-19 16:04:25 +00001320 dlog_verbose(" Boot order %u\n", vm->partition.boot_order);
Kathleen Capella4a2a6e72023-04-21 14:43:26 -04001321 }
1322
1323 if (!check_boot_order(vm->partition.boot_order)) {
1324 return MANIFEST_ERROR_INVALID_BOOT_ORDER;
1325 }
J-Alvesb37fd082020-10-22 12:29:21 +01001326
J-Alves4369bd92020-08-07 16:35:36 +01001327 TRY(read_optional_uint8(&root, "xlat-granule", 0,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001328 (uint8_t *)&vm->partition.xlat_granule));
1329 dlog_verbose(" Translation granule %u\n", vm->partition.xlat_granule);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001330
1331 ffa_node = root;
1332 if (fdt_find_child(&ffa_node, &rxtx_node_name)) {
1333 if (!fdt_is_compatible(&ffa_node,
1334 "arm,ffa-manifest-rx_tx-buffer")) {
1335 return MANIFEST_ERROR_NOT_COMPATIBLE;
1336 }
1337
Manish Pandeyfa1f2912020-05-05 12:57:01 +01001338 /*
1339 * Read only phandles for now, it will be used to update buffers
1340 * while parsing memory regions.
1341 */
1342 TRY(read_uint32(&ffa_node, "rx-buffer",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001343 &vm->partition.rxtx.rx_phandle));
Olivier Deprez62d99e32020-01-09 15:58:07 +01001344
Manish Pandeyfa1f2912020-05-05 12:57:01 +01001345 TRY(read_uint32(&ffa_node, "tx-buffer",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001346 &vm->partition.rxtx.tx_phandle));
Olivier Deprez62d99e32020-01-09 15:58:07 +01001347
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001348 vm->partition.rxtx.available = true;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001349 }
1350
Kathleen Capellaf71dee42023-08-08 16:24:14 -04001351 TRY(read_uint16(&root, "messaging-method",
1352 (uint16_t *)&vm->partition.messaging_method));
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001353 dlog_verbose(" Messaging method %u\n", vm->partition.messaging_method);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001354
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001355 TRY(read_bool(&root, "managed-exit", &managed_exit_field_present));
1356
1357 TRY(read_optional_uint8(
1358 &root, "ns-interrupts-action", NS_ACTION_SIGNALED,
1359 (uint8_t *)&vm->partition.ns_interrupts_action));
1360
1361 /*
1362 * An SP manifest can specify one of the fields listed below:
1363 * `managed-exit`: Introduced in FF-A v1.0 spec.
1364 * `ns-interrupts-action`: Introduced in FF-A v1.1 EAC0 spec.
1365 * If both are missing from the manifest, the default response is
1366 * NS_ACTION_SIGNALED.
1367 */
1368 if (managed_exit_field_present) {
1369 vm->partition.ns_interrupts_action = NS_ACTION_ME;
1370 }
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001371 if (vm->partition.ns_interrupts_action != NS_ACTION_QUEUED &&
1372 vm->partition.ns_interrupts_action != NS_ACTION_ME &&
1373 vm->partition.ns_interrupts_action != NS_ACTION_SIGNALED) {
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001374 return MANIFEST_ERROR_ILLEGAL_NS_INT_ACTION;
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001375 }
1376
1377 dlog_verbose(
Olivier Deprezb7aa0c22023-06-29 10:46:26 +02001378 " NS Interrupts %s\n",
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001379 (vm->partition.ns_interrupts_action == NS_ACTION_QUEUED)
1380 ? "Queued"
1381 : (vm->partition.ns_interrupts_action == NS_ACTION_SIGNALED)
1382 ? "Signaled"
1383 : "Managed exit");
1384
1385 if (vm->partition.ns_interrupts_action == NS_ACTION_ME) {
1386 /* Managed exit only supported by S_EL1 partitions. */
1387 if (vm->partition.run_time_el != S_EL1) {
1388 dlog_error(
1389 "Managed exit cannot be supported by this "
1390 "partition\n");
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001391 return MANIFEST_ERROR_ILLEGAL_NS_INT_ACTION;
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001392 }
Madhukar Pappireddy046dad02022-06-21 18:43:33 -05001393
1394 TRY(read_bool(&root, "managed-exit-virq",
1395 &vm->partition.me_signal_virq));
1396 if (vm->partition.me_signal_virq) {
1397 dlog_verbose(" Managed Exit signaled through vIRQ\n");
1398 }
J-Alvesa4730db2021-11-02 10:31:01 +00001399 }
1400
J-Alvesbb2703a2025-02-10 12:11:56 +00001401 TRY(read_optional_uint8(&root, "sri-interrupts-policy", 0,
1402 (uint8_t *)&vm->partition.sri_policy));
1403
1404 if (vm->partition.sri_policy.mbz != 0U) {
1405 return MANIFEST_ERROR_ILLEGAL_SRI_POLICY;
1406 }
1407
1408 dlog_verbose(" SRI Trigger Policy.\n");
1409 if (!vm->partition.sri_policy.intr_while_waiting &&
1410 !vm->partition.sri_policy.intr_pending_entry_wait) {
1411 dlog_verbose(" Not trigged in interrupt handling.\n");
1412 } else {
1413 if (vm->partition.sri_policy.intr_while_waiting) {
1414 dlog_verbose(" On interrupts while waiting.\n");
1415 }
1416 if (vm->partition.sri_policy.intr_pending_entry_wait) {
1417 dlog_verbose(
1418 " On entry to wait while interrupts "
1419 "pending.\n");
1420 }
1421 }
1422
J-Alvesa4730db2021-11-02 10:31:01 +00001423 TRY(read_bool(&root, "notification-support",
1424 &vm->partition.notification_support));
1425 if (vm->partition.notification_support) {
1426 dlog_verbose(" Notifications Receipt Supported\n");
1427 }
Maksims Svecovs9ddf86a2021-05-06 17:17:21 +01001428
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001429 TRY(read_optional_uint8(
1430 &root, "other-s-interrupts-action", OTHER_S_INT_ACTION_SIGNALED,
1431 (uint8_t *)&vm->partition.other_s_interrupts_action));
1432
1433 if (vm->partition.other_s_interrupts_action ==
1434 OTHER_S_INT_ACTION_QUEUED) {
1435 if (vm->partition.ns_interrupts_action != NS_ACTION_QUEUED) {
1436 dlog_error(
1437 "Choice of the fields 'ns-interrupts-action' "
1438 "and 'other-s-interrupts-action' not "
1439 "compatible\n");
1440 return MANIFEST_ERROR_NOT_COMPATIBLE;
1441 }
1442 } else if (vm->partition.other_s_interrupts_action >
1443 OTHER_S_INT_ACTION_SIGNALED) {
1444 dlog_error(
J-Alves0a824e92024-04-26 16:20:12 +01001445 "Illegal value specified for the field "
1446 "'other-s-interrupts-action': %u\n",
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001447 vm->partition.other_s_interrupts_action);
1448 return MANIFEST_ERROR_ILLEGAL_OTHER_S_INT_ACTION;
1449 }
1450
J-Alves35315782022-01-25 17:58:32 +00001451 /* Parse boot info node. */
1452 if (boot_info_node != NULL) {
1453 ffa_node = root;
1454 vm->partition.boot_info =
1455 fdt_find_child(&ffa_node, &boot_info_node_name);
1456 if (vm->partition.boot_info) {
1457 *boot_info_node = ffa_node;
1458 }
1459 } else {
1460 vm->partition.boot_info = false;
1461 }
1462
Olivier Depreza15f2352022-09-26 09:17:24 +02001463 TRY(read_optional_uint32(
Karl Meakin18694022024-08-02 13:59:25 +01001464 &root, "vm-availability-messages", 0,
1465 (uint32_t *)&vm->partition.vm_availability_messages));
1466 dlog_verbose("vm-availability-messages=%#x\n",
1467 *(uint32_t *)&vm->partition.vm_availability_messages);
1468
1469 if (vm->partition.vm_availability_messages.mbz != 0) {
1470 return MANIFEST_ERROR_VM_AVAILABILITY_MESSAGE_INVALID;
1471 }
1472
Madhukar Pappireddy958c8412024-11-25 09:54:17 -06001473 TRY(read_optional_uint32(&root, "power-management-messages",
1474 MANIFEST_POWER_MANAGEMENT_CPU_OFF_SUPPORTED,
1475 &vm->partition.power_management));
Olivier Depreza15f2352022-09-26 09:17:24 +02001476 vm->partition.power_management &= MANIFEST_POWER_MANAGEMENT_ALL_MASK;
1477 if (vm->partition.execution_ctx_count == 1 ||
Daniel Boulby874d5432023-04-27 12:40:24 +01001478 vm->partition.run_time_el == S_EL0 ||
1479 vm->partition.run_time_el == EL0) {
Olivier Depreza15f2352022-09-26 09:17:24 +02001480 vm->partition.power_management =
1481 MANIFEST_POWER_MANAGEMENT_NONE_MASK;
1482 }
1483
1484 dlog_verbose(" Power management messages %#x\n",
1485 vm->partition.power_management);
1486
Manish Pandey6542f5c2020-04-27 14:37:46 +01001487 /* Parse memory-regions */
1488 ffa_node = root;
1489 if (fdt_find_child(&ffa_node, &mem_region_node_name)) {
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001490 TRY(parse_ffa_memory_region_node(
Karl Meakinf6d49402023-04-04 18:14:26 +01001491 &ffa_node, vm->partition.load_addr,
1492 vm->partition.mem_regions,
J-Alves77b6f4f2023-03-15 11:34:49 +00001493 &vm->partition.mem_region_count, &vm->partition.rxtx,
1494 boot_params));
Manish Pandey6542f5c2020-04-27 14:37:46 +01001495 }
Manish Pandey2145c212020-05-01 16:04:22 +01001496 dlog_verbose(" Total %u memory regions found\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001497 vm->partition.mem_region_count);
Manish Pandey6542f5c2020-04-27 14:37:46 +01001498
Manish Pandeye68e7932020-04-23 15:29:28 +01001499 /* Parse Device-regions */
1500 ffa_node = root;
1501 if (fdt_find_child(&ffa_node, &dev_region_node_name)) {
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001502 TRY(parse_ffa_device_region_node(
1503 &ffa_node, vm->partition.dev_regions,
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001504 &vm->partition.dev_region_count,
Daniel Boulby4339edc2024-02-21 14:59:00 +00001505 &vm->partition.dma_device_count, boot_params));
Manish Pandeye68e7932020-04-23 15:29:28 +01001506 }
Manish Pandey2145c212020-05-01 16:04:22 +01001507 dlog_verbose(" Total %u device regions found\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001508 vm->partition.dev_region_count);
Manish Pandeye68e7932020-04-23 15:29:28 +01001509
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001510 if (!map_dma_device_id_to_stream_ids(vm)) {
1511 return MANIFEST_ERROR_NOT_COMPATIBLE;
1512 }
1513
J-Alves4eb7b542022-03-02 15:21:52 +00001514 return sanity_check_ffa_manifest(vm);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001515}
1516
Olivier Deprez62d99e32020-01-09 15:58:07 +01001517static enum manifest_return_code parse_ffa_partition_package(
1518 struct mm_stage1_locked stage1_locked, struct fdt_node *node,
J-Alves19e20cf2023-08-02 12:48:55 +01001519 struct manifest_vm *vm, ffa_id_t vm_id,
J-Alves77b6f4f2023-03-15 11:34:49 +00001520 const struct boot_params *boot_params, struct mpool *ppool)
Olivier Deprez62d99e32020-01-09 15:58:07 +01001521{
1522 enum manifest_return_code ret = MANIFEST_ERROR_NOT_COMPATIBLE;
J-Alves2f86c1e2022-02-23 18:44:19 +00001523 uintpaddr_t load_address;
J-Alves6e0abc42024-12-30 16:51:16 +00001524 struct partition_pkg pkg;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001525 struct fdt sp_fdt;
J-Alves6e0abc42024-12-30 16:51:16 +00001526 void *pm_ptr;
1527 size_t pm_size;
J-Alves35315782022-01-25 17:58:32 +00001528 struct fdt_node boot_info_node;
J-Alves6e0abc42024-12-30 16:51:16 +00001529 size_t total_mem_size;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001530
1531 /*
1532 * This must have been hinted as being an FF-A partition,
1533 * return straight with failure if this is not the case.
1534 */
1535 if (!vm->is_ffa_partition) {
J-Alves2f86c1e2022-02-23 18:44:19 +00001536 return ret;
1537 }
1538
1539 TRY(read_uint64(node, "load_address", &load_address));
1540 if (!is_aligned(load_address, PAGE_SIZE)) {
Olivier Deprez62d99e32020-01-09 15:58:07 +01001541 return MANIFEST_ERROR_NOT_COMPATIBLE;
1542 }
1543
J-Alves2f86c1e2022-02-23 18:44:19 +00001544 assert(load_address != 0U);
1545
J-Alves6e0abc42024-12-30 16:51:16 +00001546 if (!partition_pkg_init(stage1_locked, pa_init(load_address), &pkg,
1547 ppool)) {
J-Alves2f86c1e2022-02-23 18:44:19 +00001548 return ret;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001549 }
1550
J-Alves6e0abc42024-12-30 16:51:16 +00001551 total_mem_size = pa_difference(pkg.total.begin, pkg.total.end);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001552
J-Alves2f86c1e2022-02-23 18:44:19 +00001553 if (vm_id != HF_PRIMARY_VM_ID &&
J-Alves6e0abc42024-12-30 16:51:16 +00001554 total_mem_size > (size_t)vm->secondary.mem_size) {
1555 dlog_error("Partition pkg size %zx bigger than expected: %x\n",
1556 total_mem_size, (uint32_t)vm->secondary.mem_size);
J-Alves2f86c1e2022-02-23 18:44:19 +00001557 goto out;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001558 }
1559
J-Alves6e0abc42024-12-30 16:51:16 +00001560 pm_ptr = ptr_from_va(va_from_pa(pkg.pm.begin));
1561
1562 pm_size = pa_difference(pkg.pm.begin, pkg.pm.end);
1563 if (!fdt_init_from_ptr(&sp_fdt, pm_ptr, pm_size)) {
1564 dlog_error("%s: FDT failed validation.\n", __func__);
J-Alves2f86c1e2022-02-23 18:44:19 +00001565 goto out;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001566 }
1567
Davidson K5d5f2792024-08-19 19:09:12 +05301568 vm->partition.load_addr = load_address;
1569
J-Alves77b6f4f2023-03-15 11:34:49 +00001570 ret = parse_ffa_manifest(&sp_fdt, vm, &boot_info_node, boot_params);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001571 if (ret != MANIFEST_SUCCESS) {
Olivier Deprez3cbdf862023-06-07 15:36:32 +02001572 dlog_error("Error parsing partition manifest.\n");
J-Alves2f86c1e2022-02-23 18:44:19 +00001573 goto out;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001574 }
1575
J-Alves6e0abc42024-12-30 16:51:16 +00001576 /* Partition subscribed to boot information. */
1577 if (vm->partition.gp_register_num != DEFAULT_BOOT_GP_REGISTER &&
1578 vm->partition.boot_info) {
1579 /* Its package should have available space for it. */
1580 if (pa_addr(pkg.boot_info.begin) == 0U) {
1581 dlog_warning(
1582 "Partition Package %s doesn't have boot info "
1583 "space.\n",
1584 vm->debug_name.data);
1585 } else {
Kathleen Capella8393ea92025-01-09 17:22:21 -05001586 if (!ffa_boot_info_node(&boot_info_node, &pkg,
1587 vm->partition.ffa_version)) {
J-Alves6e0abc42024-12-30 16:51:16 +00001588 dlog_error(
1589 "Failed to process boot "
1590 "information.\n");
1591 }
J-Alves889a1d72022-05-13 11:38:27 +01001592 }
J-Alves35315782022-01-25 17:58:32 +00001593 }
J-Alves6e0abc42024-12-30 16:51:16 +00001594
J-Alves2f86c1e2022-02-23 18:44:19 +00001595out:
J-Alves6e0abc42024-12-30 16:51:16 +00001596 partition_pkg_deinit(stage1_locked, &pkg, ppool);
1597
Olivier Deprez62d99e32020-01-09 15:58:07 +01001598 return ret;
1599}
1600
David Brazdil7a462ec2019-08-15 12:27:47 +01001601/**
1602 * Parse manifest from FDT.
1603 */
Olivier Deprez62d99e32020-01-09 15:58:07 +01001604enum manifest_return_code manifest_init(struct mm_stage1_locked stage1_locked,
Olivier Deprez93644652022-09-09 11:01:12 +02001605 struct manifest **manifest_ret,
Olivier Deprez62d99e32020-01-09 15:58:07 +01001606 struct memiter *manifest_fdt,
J-Alves77b6f4f2023-03-15 11:34:49 +00001607 struct boot_params *boot_params,
Olivier Deprez62d99e32020-01-09 15:58:07 +01001608 struct mpool *ppool)
David Brazdil7a462ec2019-08-15 12:27:47 +01001609{
Olivier Deprez93644652022-09-09 11:01:12 +02001610 struct manifest *manifest;
David Brazdilb856be62020-03-25 10:14:55 +00001611 struct string vm_name;
1612 struct fdt fdt;
David Brazdil7a462ec2019-08-15 12:27:47 +01001613 struct fdt_node hyp_node;
1614 size_t i = 0;
1615 bool found_primary_vm = false;
Daniel Boulby3f295b12023-12-15 17:38:08 +00001616 const size_t spmc_size =
1617 align_up(pa_difference(layout_text_begin(), layout_image_end()),
1618 PAGE_SIZE);
1619 const size_t spmc_page_count = spmc_size / PAGE_SIZE;
David Brazdil7a462ec2019-08-15 12:27:47 +01001620
J-Alvescd438fa2023-04-26 10:13:12 +01001621 if (boot_params->mem_ranges_count == 0 &&
1622 boot_params->ns_mem_ranges_count == 0) {
1623 return MANIFEST_ERROR_MEMORY_MISSING;
1624 }
1625
J-Alves77b6f4f2023-03-15 11:34:49 +00001626 dump_memory_ranges(boot_params->mem_ranges,
1627 boot_params->mem_ranges_count, false);
1628 dump_memory_ranges(boot_params->ns_mem_ranges,
1629 boot_params->ns_mem_ranges_count, true);
1630
Olivier Deprez93644652022-09-09 11:01:12 +02001631 /* Allocate space in the ppool for the manifest data. */
1632 if (!manifest_data_init(ppool)) {
1633 panic("Unable to allocate manifest data.\n");
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001634 }
1635
Daniel Boulby3f295b12023-12-15 17:38:08 +00001636 /*
1637 * Add SPMC load address range to memory ranges to track to ensure
1638 * no partitions overlap with this memory.
1639 * The system integrator should have prevented this by defining the
1640 * secure memory region ranges so as not to overlap the SPMC load
1641 * address range. Therefore, this code is intended to catch any
1642 * potential misconfigurations there.
1643 */
1644 if (is_aligned(pa_addr(layout_text_begin()), PAGE_SIZE) &&
1645 spmc_page_count != 0) {
1646 TRY(check_and_record_memory_used(
1647 pa_addr(layout_text_begin()), spmc_page_count,
1648 manifest_data->mem_regions,
1649 &manifest_data->mem_regions_index));
1650 }
1651
Olivier Deprez93644652022-09-09 11:01:12 +02001652 manifest = &manifest_data->manifest;
1653 *manifest_ret = manifest;
1654
David Brazdilb856be62020-03-25 10:14:55 +00001655 if (!fdt_init_from_memiter(&fdt, manifest_fdt)) {
1656 return MANIFEST_ERROR_FILE_SIZE; /* TODO */
David Brazdila2358d42020-01-27 18:51:38 +00001657 }
1658
David Brazdil7a462ec2019-08-15 12:27:47 +01001659 /* Find hypervisor node. */
David Brazdilb856be62020-03-25 10:14:55 +00001660 if (!fdt_find_node(&fdt, "/hypervisor", &hyp_node)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001661 return MANIFEST_ERROR_NO_HYPERVISOR_FDT_NODE;
1662 }
1663
David Brazdil74e9c3b2019-08-28 11:09:08 +01001664 /* Check "compatible" property. */
David Brazdilf4925382020-03-25 13:33:51 +00001665 if (!fdt_is_compatible(&hyp_node, "hafnium,hafnium")) {
David Brazdil74e9c3b2019-08-28 11:09:08 +01001666 return MANIFEST_ERROR_NOT_COMPATIBLE;
1667 }
1668
Olivier Deprez622ab8d2021-08-02 12:15:45 +02001669 TRY(read_bool(&hyp_node, "ffa_tee_enabled",
1670 &manifest->ffa_tee_enabled));
Andrew Walbran41a49d82020-01-10 17:46:38 +00001671
David Brazdil7a462ec2019-08-15 12:27:47 +01001672 /* Iterate over reserved VM IDs and check no such nodes exist. */
Olivier Deprez2a8ee342020-08-03 15:10:44 +02001673 for (i = HF_VM_ID_BASE; i < HF_VM_ID_OFFSET; i++) {
J-Alves19e20cf2023-08-02 12:48:55 +01001674 ffa_id_t vm_id = (ffa_id_t)i - HF_VM_ID_BASE;
David Brazdil7a462ec2019-08-15 12:27:47 +01001675 struct fdt_node vm_node = hyp_node;
David Brazdil7a462ec2019-08-15 12:27:47 +01001676
David Brazdilb856be62020-03-25 10:14:55 +00001677 generate_vm_node_name(&vm_name, vm_id);
1678 if (fdt_find_child(&vm_node, &vm_name)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001679 return MANIFEST_ERROR_RESERVED_VM_ID;
1680 }
1681 }
1682
1683 /* Iterate over VM nodes until we find one that does not exist. */
1684 for (i = 0; i <= MAX_VMS; ++i) {
J-Alves19e20cf2023-08-02 12:48:55 +01001685 ffa_id_t vm_id = HF_VM_ID_OFFSET + i;
David Brazdil7a462ec2019-08-15 12:27:47 +01001686 struct fdt_node vm_node = hyp_node;
David Brazdil7a462ec2019-08-15 12:27:47 +01001687
Olivier Deprez2a8ee342020-08-03 15:10:44 +02001688 generate_vm_node_name(&vm_name, vm_id - HF_VM_ID_BASE);
David Brazdilb856be62020-03-25 10:14:55 +00001689 if (!fdt_find_child(&vm_node, &vm_name)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001690 break;
1691 }
1692
1693 if (i == MAX_VMS) {
1694 return MANIFEST_ERROR_TOO_MANY_VMS;
1695 }
1696
1697 if (vm_id == HF_PRIMARY_VM_ID) {
1698 CHECK(found_primary_vm == false); /* sanity check */
1699 found_primary_vm = true;
1700 }
1701
David Brazdil0251b942019-09-10 15:59:50 +01001702 manifest->vm_count = i + 1;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001703
1704 TRY(parse_vm_common(&vm_node, &manifest->vm[i], vm_id));
1705
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -07001706 CHECK(!manifest->vm[i].is_hyp_loaded ||
1707 manifest->vm[i].is_ffa_partition);
1708
1709 if (manifest->vm[i].is_ffa_partition &&
1710 !manifest->vm[i].is_hyp_loaded) {
Olivier Deprez62d99e32020-01-09 15:58:07 +01001711 TRY(parse_ffa_partition_package(stage1_locked, &vm_node,
1712 &manifest->vm[i], vm_id,
J-Alves77b6f4f2023-03-15 11:34:49 +00001713 boot_params, ppool));
J-Alves596049f2023-03-15 11:40:24 +00001714 size_t page_count =
1715 align_up(manifest->vm[i].secondary.mem_size,
1716 PAGE_SIZE) /
1717 PAGE_SIZE;
1718
1719 if (vm_id == HF_PRIMARY_VM_ID) {
1720 continue;
1721 }
1722
1723 TRY(check_partition_memory_is_valid(
1724 manifest->vm[i].partition.load_addr, page_count,
Olivier Deprez058ddee2024-08-27 09:22:11 +02001725 0, boot_params, false));
J-Alves596049f2023-03-15 11:40:24 +00001726
1727 /*
1728 * Check if memory from load-address until (load-address
1729 * + memory size) has been used by other partition.
1730 */
1731 TRY(check_and_record_memory_used(
Daniel Boulby3f295b12023-12-15 17:38:08 +00001732 manifest->vm[i].partition.load_addr, page_count,
1733 manifest_data->mem_regions,
1734 &manifest_data->mem_regions_index));
Olivier Deprez62d99e32020-01-09 15:58:07 +01001735 } else {
1736 TRY(parse_vm(&vm_node, &manifest->vm[i], vm_id));
1737 }
David Brazdil7a462ec2019-08-15 12:27:47 +01001738 }
1739
Olivier Deprezfb05f3c2020-11-10 17:48:04 +01001740 if (!found_primary_vm && vm_id_is_current_world(HF_PRIMARY_VM_ID)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001741 return MANIFEST_ERROR_NO_PRIMARY_VM;
1742 }
1743
1744 return MANIFEST_SUCCESS;
1745}
1746
Olivier Deprez93644652022-09-09 11:01:12 +02001747/**
1748 * Free manifest data resources, called once manifest parsing has
1749 * completed and VMs are loaded.
1750 */
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001751void manifest_deinit(struct mpool *ppool)
1752{
Olivier Deprez93644652022-09-09 11:01:12 +02001753 manifest_data_deinit(ppool);
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001754}
1755
David Brazdil7a462ec2019-08-15 12:27:47 +01001756const char *manifest_strerror(enum manifest_return_code ret_code)
1757{
1758 switch (ret_code) {
1759 case MANIFEST_SUCCESS:
1760 return "Success";
David Brazdila2358d42020-01-27 18:51:38 +00001761 case MANIFEST_ERROR_FILE_SIZE:
1762 return "Total size in header does not match file size";
1763 case MANIFEST_ERROR_NO_ROOT_NODE:
1764 return "Could not find root node in manifest";
David Brazdil7a462ec2019-08-15 12:27:47 +01001765 case MANIFEST_ERROR_NO_HYPERVISOR_FDT_NODE:
1766 return "Could not find \"hypervisor\" node in manifest";
David Brazdil74e9c3b2019-08-28 11:09:08 +01001767 case MANIFEST_ERROR_NOT_COMPATIBLE:
1768 return "Hypervisor manifest entry not compatible with Hafnium";
David Brazdil7a462ec2019-08-15 12:27:47 +01001769 case MANIFEST_ERROR_RESERVED_VM_ID:
1770 return "Manifest defines a VM with a reserved ID";
1771 case MANIFEST_ERROR_NO_PRIMARY_VM:
1772 return "Manifest does not contain a primary VM entry";
1773 case MANIFEST_ERROR_TOO_MANY_VMS:
1774 return "Manifest specifies more VMs than Hafnium has "
1775 "statically allocated space for";
1776 case MANIFEST_ERROR_PROPERTY_NOT_FOUND:
1777 return "Property not found";
1778 case MANIFEST_ERROR_MALFORMED_STRING:
1779 return "Malformed string property";
David Brazdil0dbb41f2019-09-09 18:03:35 +01001780 case MANIFEST_ERROR_STRING_TOO_LONG:
1781 return "String too long";
David Brazdil7a462ec2019-08-15 12:27:47 +01001782 case MANIFEST_ERROR_MALFORMED_INTEGER:
1783 return "Malformed integer property";
1784 case MANIFEST_ERROR_INTEGER_OVERFLOW:
1785 return "Integer overflow";
Andrew Scullae9962e2019-10-03 16:51:16 +01001786 case MANIFEST_ERROR_MALFORMED_INTEGER_LIST:
1787 return "Malformed integer list property";
Andrew Scullb2c3a242019-11-04 13:52:36 +00001788 case MANIFEST_ERROR_MALFORMED_BOOLEAN:
1789 return "Malformed boolean property";
Manish Pandey6542f5c2020-04-27 14:37:46 +01001790 case MANIFEST_ERROR_MEMORY_REGION_NODE_EMPTY:
1791 return "Memory-region node should have at least one entry";
Manish Pandeye68e7932020-04-23 15:29:28 +01001792 case MANIFEST_ERROR_DEVICE_REGION_NODE_EMPTY:
1793 return "Device-region node should have at least one entry";
Manish Pandeyf06c9072020-09-29 15:41:58 +01001794 case MANIFEST_ERROR_RXTX_SIZE_MISMATCH:
1795 return "RX and TX buffers should be of same size";
Varun Wadekar4afbfd72022-10-13 14:30:18 +01001796 case MANIFEST_ERROR_MEM_REGION_EMPTY:
1797 return "Memory region should have at least one page";
Karl Meakinf6d49402023-04-04 18:14:26 +01001798 case MANIFEST_ERROR_BASE_ADDRESS_AND_RELATIVE_ADDRESS:
1799 return "Base and relative addresses are mutually exclusive";
Daniel Boulbya7e9e182022-06-27 14:21:01 +01001800 case MANIFEST_ERROR_MEM_REGION_OVERLAP:
1801 return "Memory region overlaps with one already allocated";
Daniel Boulbyc1a613d2022-10-18 11:26:17 +01001802 case MANIFEST_ERROR_MEM_REGION_UNALIGNED:
1803 return "Memory region is not aligned to a page boundary";
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -07001804 case MANIFEST_ERROR_INVALID_MEM_PERM:
1805 return "Memory permission should be RO, RW or RX";
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001806 case MANIFEST_ERROR_INTERRUPT_ID_REPEATED:
1807 return "Interrupt ID already assigned to another endpoint";
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001808 case MANIFEST_ERROR_ILLEGAL_NS_INT_ACTION:
J-Alvesbb2703a2025-02-10 12:11:56 +00001809 return "Illegal value specified for the field: Action in "
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001810 "response to NS Interrupt";
J-Alvesbb2703a2025-02-10 12:11:56 +00001811 case MANIFEST_ERROR_ILLEGAL_SRI_POLICY:
1812 return "Illegal value specified for the field: SRI Policy";
1813 case MANIFEST_ERROR_SRI_POLICY_NOT_SUPPORTED:
1814 return "S-EL0 Partitions do not support the SRI trigger policy";
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -07001815 case MANIFEST_ERROR_INTERRUPT_ID_NOT_IN_LIST:
1816 return "Interrupt ID is not in the list of interrupts";
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001817 case MANIFEST_ERROR_ILLEGAL_OTHER_S_INT_ACTION:
1818 return "Illegal value specified for the field: Action in "
1819 "response to Other-S Interrupt";
J-Alves77b6f4f2023-03-15 11:34:49 +00001820 case MANIFEST_ERROR_MEMORY_MISSING:
1821 return "Memory nodes must be defined in the SPMC manifest "
Olivier Deprez3cbdf862023-06-07 15:36:32 +02001822 "('memory' and 'ns-memory')";
J-Alves77b6f4f2023-03-15 11:34:49 +00001823 case MANIFEST_ERROR_MEM_REGION_INVALID:
Olivier Deprez3cbdf862023-06-07 15:36:32 +02001824 return "Invalid memory region range";
Daniel Boulby4339edc2024-02-21 14:59:00 +00001825 case MANIFEST_ERROR_DEVICE_MEM_REGION_INVALID:
1826 return "Invalid device memory region range";
Kathleen Capella4a2a6e72023-04-21 14:43:26 -04001827 case MANIFEST_ERROR_INVALID_BOOT_ORDER:
1828 return "Boot order should be a unique value less than "
1829 "default largest value";
Kathleen Capella422b10b2023-06-30 18:28:27 -04001830 case MANIFEST_ERROR_UUID_ALL_ZEROS:
1831 return "UUID should not be NIL";
Karl Meakin45abeeb2024-08-02 16:55:44 +01001832 case MANIFEST_ERROR_TOO_MANY_UUIDS:
1833 return "Manifest specifies more UUIDs than Hafnium has "
1834 "statically allocated space for";
Madhukar Pappireddy3c2b7912023-10-11 14:47:27 -05001835 case MANIFEST_ERROR_MISSING_SMMU_ID:
1836 return "SMMU ID must be specified for the given Stream IDs";
1837 case MANIFEST_ERROR_MISMATCH_DMA_ACCESS_PERMISSIONS:
1838 return "DMA device access permissions must match memory region "
1839 "attributes";
1840 case MANIFEST_ERROR_STREAM_IDS_OVERFLOW:
1841 return "DMA device stream ID count exceeds predefined limit";
1842 case MANIFEST_ERROR_DMA_ACCESS_PERMISSIONS_OVERFLOW:
1843 return "DMA access permissions count exceeds predefined limit";
Madhukar Pappireddy8a364472023-10-11 15:34:07 -05001844 case MANIFEST_ERROR_DMA_DEVICE_OVERFLOW:
1845 return "Number of device regions with DMA peripheral exceeds "
1846 "limit.";
Karl Meakin18694022024-08-02 13:59:25 +01001847 case MANIFEST_ERROR_VM_AVAILABILITY_MESSAGE_INVALID:
1848 return "VM availability messages invalid (bits [31:2] must be "
1849 "zero)";
David Brazdil7a462ec2019-08-15 12:27:47 +01001850 }
1851
1852 panic("Unexpected manifest return code.");
1853}