blob: 6f40ec2caba5dd30f4446dee4468020d9b07eb37 [file] [log] [blame]
David Brazdil7a462ec2019-08-15 12:27:47 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
David Brazdil7a462ec2019-08-15 12:27:47 +01007 */
8
9#include "hf/manifest.h"
10
J-Alvesd8a1d362023-03-08 11:15:28 +000011#include <stddef.h>
J-Alves77b6f4f2023-03-15 11:34:49 +000012#include <stdint.h>
J-Alvesd8a1d362023-03-08 11:15:28 +000013
J-Alves35315782022-01-25 17:58:32 +000014#include "hf/arch/types.h"
J-Alves77b6f4f2023-03-15 11:34:49 +000015#include "hf/arch/vmid_base.h"
J-Alves35315782022-01-25 17:58:32 +000016
David Brazdil7a462ec2019-08-15 12:27:47 +010017#include "hf/addr.h"
Daniel Boulbya2f8c662021-11-26 17:52:53 +000018#include "hf/assert.h"
J-Alves35315782022-01-25 17:58:32 +000019#include "hf/boot_info.h"
J-Alves77b6f4f2023-03-15 11:34:49 +000020#include "hf/boot_params.h"
David Brazdil7a462ec2019-08-15 12:27:47 +010021#include "hf/check.h"
Andrew Scullae9962e2019-10-03 16:51:16 +010022#include "hf/dlog.h"
J-Alves77b6f4f2023-03-15 11:34:49 +000023#include "hf/fdt.h"
Karl Meakin0e617d92024-04-05 12:55:22 +010024#include "hf/ffa.h"
Karl Meakin275aa292024-11-18 14:56:09 +000025#include "hf/ffa_partition_manifest.h"
Daniel Boulby3f295b12023-12-15 17:38:08 +000026#include "hf/layout.h"
J-Alves77b6f4f2023-03-15 11:34:49 +000027#include "hf/mm.h"
28#include "hf/mpool.h"
J-Alves2f86c1e2022-02-23 18:44:19 +000029#include "hf/sp_pkg.h"
David Brazdil7a462ec2019-08-15 12:27:47 +010030#include "hf/static_assert.h"
31#include "hf/std.h"
32
33#define TRY(expr) \
34 do { \
35 enum manifest_return_code ret_code = (expr); \
36 if (ret_code != MANIFEST_SUCCESS) { \
37 return ret_code; \
38 } \
39 } while (0)
40
David Brazdilb856be62020-03-25 10:14:55 +000041#define VM_ID_MAX (HF_VM_ID_OFFSET + MAX_VMS - 1)
42#define VM_ID_MAX_DIGITS (5)
43#define VM_NAME_EXTRA_CHARS (3) /* "vm" + number + '\0' */
44#define VM_NAME_MAX_SIZE (VM_ID_MAX_DIGITS + VM_NAME_EXTRA_CHARS)
45static_assert(VM_NAME_MAX_SIZE <= STRING_MAX_SIZE,
46 "VM name does not fit into a struct string.");
47static_assert(VM_ID_MAX <= 99999, "Insufficient VM_NAME_BUF_SIZE");
Olivier Deprez2a8ee342020-08-03 15:10:44 +020048static_assert((HF_OTHER_WORLD_ID > VM_ID_MAX) ||
49 (HF_OTHER_WORLD_ID < HF_VM_ID_BASE),
Andrew Walbran9daa57e2019-09-27 13:33:20 +010050 "TrustZone VM ID clashes with normal VM range.");
David Brazdil7a462ec2019-08-15 12:27:47 +010051
Kathleen Capella4a2a6e72023-04-21 14:43:26 -040052/* Bitmap to track boot order values in use. */
53#define BOOT_ORDER_ENTRY_BITS (sizeof(uint64_t) * 8)
54#define BOOT_ORDER_MAP_ENTRIES \
55 ((DEFAULT_BOOT_ORDER + (BOOT_ORDER_ENTRY_BITS - 1)) / \
56 BOOT_ORDER_ENTRY_BITS)
57
Daniel Boulby801f8ef2022-06-27 14:21:01 +010058/**
J-Alves596049f2023-03-15 11:40:24 +000059 * A struct to keep track of the partitions properties during early boot
60 * manifest parsing:
61 * - Interrupts ID.
62 * - Physical memory ranges.
Daniel Boulby801f8ef2022-06-27 14:21:01 +010063 */
Olivier Deprez93644652022-09-09 11:01:12 +020064struct manifest_data {
65 struct manifest manifest;
Daniel Boulby4ca50f02022-07-29 18:29:34 +010066 struct interrupt_bitmap intids;
J-Alves596049f2023-03-15 11:40:24 +000067 /*
68 * Allocate enough for the maximum amount of memory regions defined via
69 * the partitions manifest, and regions for each partition
70 * address-space.
71 */
Daniel Boulby4d165722023-11-21 13:46:42 +000072 struct mem_range mem_regions[PARTITION_MAX_MEMORY_REGIONS * MAX_VMS +
73 PARTITION_MAX_DEVICE_REGIONS * MAX_VMS +
74 MAX_VMS];
Daniel Boulby3f295b12023-12-15 17:38:08 +000075 size_t mem_regions_index;
Kathleen Capella4a2a6e72023-04-21 14:43:26 -040076 uint64_t boot_order_values[BOOT_ORDER_MAP_ENTRIES];
Daniel Boulby801f8ef2022-06-27 14:21:01 +010077};
Olivier Deprez93644652022-09-09 11:01:12 +020078
Daniel Boulby801f8ef2022-06-27 14:21:01 +010079/**
Daniel Boulbya7e9e182022-06-27 14:21:01 +010080 * Calculate the number of entries in the ppool that are required to
Olivier Deprez93644652022-09-09 11:01:12 +020081 * store the manifest_data struct.
Daniel Boulby801f8ef2022-06-27 14:21:01 +010082 */
J-Alves596049f2023-03-15 11:40:24 +000083static const size_t manifest_data_ppool_entries =
Olivier Deprez93644652022-09-09 11:01:12 +020084 (align_up(sizeof(struct manifest_data), MM_PPOOL_ENTRY_SIZE) /
Daniel Boulbya7e9e182022-06-27 14:21:01 +010085 MM_PPOOL_ENTRY_SIZE);
86
Olivier Deprez93644652022-09-09 11:01:12 +020087static struct manifest_data *manifest_data;
Daniel Boulby801f8ef2022-06-27 14:21:01 +010088
Kathleen Capella4a2a6e72023-04-21 14:43:26 -040089static bool check_boot_order(uint16_t boot_order)
90{
91 uint16_t i;
92 uint64_t boot_order_mask;
93
94 if (boot_order == DEFAULT_BOOT_ORDER) {
95 return true;
96 }
97 if (boot_order > DEFAULT_BOOT_ORDER) {
98 dlog_error("Boot order should not exceed %x",
99 DEFAULT_BOOT_ORDER);
100 return false;
101 }
102
103 i = boot_order / BOOT_ORDER_ENTRY_BITS;
104 boot_order_mask = 1 << (boot_order % BOOT_ORDER_ENTRY_BITS);
105
106 if ((boot_order_mask & manifest_data->boot_order_values[i]) != 0U) {
107 dlog_error("Boot order must be a unique value.");
108 return false;
109 }
110
111 manifest_data->boot_order_values[i] |= boot_order_mask;
112
113 return true;
114}
115
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100116/**
Olivier Deprez93644652022-09-09 11:01:12 +0200117 * Allocates and clear memory for the manifest data in the given memory pool.
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100118 * Returns true if the memory is successfully allocated.
119 */
Olivier Deprez93644652022-09-09 11:01:12 +0200120static bool manifest_data_init(struct mpool *ppool)
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100121{
Olivier Deprez93644652022-09-09 11:01:12 +0200122 manifest_data = (struct manifest_data *)mpool_alloc_contiguous(
123 ppool, manifest_data_ppool_entries, 1);
J-Alves5c0ae6f2023-06-14 15:20:21 +0100124
125 assert(manifest_data != NULL);
126
Olivier Deprez93644652022-09-09 11:01:12 +0200127 memset_s(manifest_data, sizeof(struct manifest_data), 0,
128 sizeof(struct manifest_data));
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100129
Olivier Deprez93644652022-09-09 11:01:12 +0200130 return manifest_data != NULL;
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100131}
132
133/**
Olivier Deprez93644652022-09-09 11:01:12 +0200134 * Frees the memory used for the manifest data in the given memory pool.
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100135 */
Olivier Deprez93644652022-09-09 11:01:12 +0200136static void manifest_data_deinit(struct mpool *ppool)
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100137{
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100138 /**
Olivier Deprez93644652022-09-09 11:01:12 +0200139 * Clear and return the memory used for the manifest_data struct to the
140 * memory pool.
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100141 */
Olivier Deprez93644652022-09-09 11:01:12 +0200142 memset_s(manifest_data, sizeof(struct manifest_data), 0,
143 sizeof(struct manifest_data));
144 mpool_add_chunk(ppool, manifest_data, manifest_data_ppool_entries);
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100145}
146
J-Alves19e20cf2023-08-02 12:48:55 +0100147static inline size_t count_digits(ffa_id_t vm_id)
David Brazdilb856be62020-03-25 10:14:55 +0000148{
149 size_t digits = 0;
150
151 do {
152 digits++;
153 vm_id /= 10;
154 } while (vm_id);
155 return digits;
156}
157
David Brazdil7a462ec2019-08-15 12:27:47 +0100158/**
159 * Generates a string with the two letters "vm" followed by an integer.
160 * Assumes `buf` is of size VM_NAME_BUF_SIZE.
161 */
J-Alves19e20cf2023-08-02 12:48:55 +0100162static void generate_vm_node_name(struct string *str, ffa_id_t vm_id)
David Brazdil7a462ec2019-08-15 12:27:47 +0100163{
164 static const char *digits = "0123456789";
David Brazdilb856be62020-03-25 10:14:55 +0000165 size_t vm_id_digits = count_digits(vm_id);
166 char *base = str->data;
167 char *ptr = base + (VM_NAME_EXTRA_CHARS + vm_id_digits);
David Brazdil7a462ec2019-08-15 12:27:47 +0100168
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000169 assert(vm_id_digits <= VM_ID_MAX_DIGITS);
David Brazdil7a462ec2019-08-15 12:27:47 +0100170 *(--ptr) = '\0';
171 do {
172 *(--ptr) = digits[vm_id % 10];
173 vm_id /= 10;
174 } while (vm_id);
175 *(--ptr) = 'm';
176 *(--ptr) = 'v';
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000177 assert(ptr == base);
David Brazdil7a462ec2019-08-15 12:27:47 +0100178}
179
Andrew Scullae9962e2019-10-03 16:51:16 +0100180/**
Andrew Scullb2c3a242019-11-04 13:52:36 +0000181 * Read a boolean property: true if present; false if not. If present, the value
182 * of the property must be empty else it is considered malformed.
Andrew Scullae9962e2019-10-03 16:51:16 +0100183 */
Andrew Scullb2c3a242019-11-04 13:52:36 +0000184static enum manifest_return_code read_bool(const struct fdt_node *node,
185 const char *property, bool *out)
Andrew Scullae9962e2019-10-03 16:51:16 +0100186{
David Brazdilb856be62020-03-25 10:14:55 +0000187 struct memiter data;
188 bool present = fdt_read_property(node, property, &data);
Andrew Scullae9962e2019-10-03 16:51:16 +0100189
David Brazdilb856be62020-03-25 10:14:55 +0000190 if (present && memiter_size(&data) != 0) {
Andrew Scullb2c3a242019-11-04 13:52:36 +0000191 return MANIFEST_ERROR_MALFORMED_BOOLEAN;
192 }
193
194 *out = present;
195 return MANIFEST_SUCCESS;
Andrew Scullae9962e2019-10-03 16:51:16 +0100196}
197
Andrew Scull72b43c02019-09-18 13:53:45 +0100198static enum manifest_return_code read_string(const struct fdt_node *node,
David Brazdil136f2942019-09-23 14:11:03 +0100199 const char *property,
200 struct string *out)
Andrew Scull72b43c02019-09-18 13:53:45 +0100201{
David Brazdilb856be62020-03-25 10:14:55 +0000202 struct memiter data;
Andrew Scull72b43c02019-09-18 13:53:45 +0100203
David Brazdilb856be62020-03-25 10:14:55 +0000204 if (!fdt_read_property(node, property, &data)) {
Andrew Scull72b43c02019-09-18 13:53:45 +0100205 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
206 }
207
David Brazdilb856be62020-03-25 10:14:55 +0000208 switch (string_init(out, &data)) {
David Brazdil136f2942019-09-23 14:11:03 +0100209 case STRING_SUCCESS:
210 return MANIFEST_SUCCESS;
211 case STRING_ERROR_INVALID_INPUT:
212 return MANIFEST_ERROR_MALFORMED_STRING;
213 case STRING_ERROR_TOO_LONG:
214 return MANIFEST_ERROR_STRING_TOO_LONG;
215 }
Andrew Scull72b43c02019-09-18 13:53:45 +0100216}
217
218static enum manifest_return_code read_optional_string(
David Brazdil136f2942019-09-23 14:11:03 +0100219 const struct fdt_node *node, const char *property, struct string *out)
Andrew Scull72b43c02019-09-18 13:53:45 +0100220{
David Brazdil136f2942019-09-23 14:11:03 +0100221 enum manifest_return_code ret;
Andrew Scull72b43c02019-09-18 13:53:45 +0100222
David Brazdil136f2942019-09-23 14:11:03 +0100223 ret = read_string(node, property, out);
224 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
225 string_init_empty(out);
226 ret = MANIFEST_SUCCESS;
Andrew Scull72b43c02019-09-18 13:53:45 +0100227 }
David Brazdil136f2942019-09-23 14:11:03 +0100228 return ret;
Andrew Scull72b43c02019-09-18 13:53:45 +0100229}
230
David Brazdil7a462ec2019-08-15 12:27:47 +0100231static enum manifest_return_code read_uint64(const struct fdt_node *node,
232 const char *property,
233 uint64_t *out)
234{
David Brazdilb856be62020-03-25 10:14:55 +0000235 struct memiter data;
David Brazdil7a462ec2019-08-15 12:27:47 +0100236
David Brazdilb856be62020-03-25 10:14:55 +0000237 if (!fdt_read_property(node, property, &data)) {
David Brazdil7a462ec2019-08-15 12:27:47 +0100238 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
239 }
240
David Brazdilb856be62020-03-25 10:14:55 +0000241 if (!fdt_parse_number(&data, memiter_size(&data), out)) {
David Brazdil7a462ec2019-08-15 12:27:47 +0100242 return MANIFEST_ERROR_MALFORMED_INTEGER;
243 }
244
245 return MANIFEST_SUCCESS;
246}
247
David Brazdil080ee312020-02-25 15:30:30 -0800248static enum manifest_return_code read_optional_uint64(
249 const struct fdt_node *node, const char *property,
250 uint64_t default_value, uint64_t *out)
251{
252 enum manifest_return_code ret;
253
254 ret = read_uint64(node, property, out);
255 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
256 *out = default_value;
257 return MANIFEST_SUCCESS;
258 }
259 return ret;
260}
261
Olivier Deprez62d99e32020-01-09 15:58:07 +0100262static enum manifest_return_code read_uint32(const struct fdt_node *node,
263 const char *property,
264 uint32_t *out)
265{
266 uint64_t value;
267
268 TRY(read_uint64(node, property, &value));
269
270 if (value > UINT32_MAX) {
271 return MANIFEST_ERROR_INTEGER_OVERFLOW;
272 }
273
274 *out = (uint32_t)value;
275 return MANIFEST_SUCCESS;
276}
277
Manish Pandeye68e7932020-04-23 15:29:28 +0100278static enum manifest_return_code read_optional_uint32(
279 const struct fdt_node *node, const char *property,
280 uint32_t default_value, uint32_t *out)
281{
282 enum manifest_return_code ret;
283
284 ret = read_uint32(node, property, out);
285 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
286 *out = default_value;
287 return MANIFEST_SUCCESS;
288 }
289 return ret;
290}
291
David Brazdil7a462ec2019-08-15 12:27:47 +0100292static enum manifest_return_code read_uint16(const struct fdt_node *node,
293 const char *property,
294 uint16_t *out)
295{
296 uint64_t value;
297
298 TRY(read_uint64(node, property, &value));
David Brazdil7a462ec2019-08-15 12:27:47 +0100299 if (value > UINT16_MAX) {
300 return MANIFEST_ERROR_INTEGER_OVERFLOW;
301 }
302
303 *out = (uint16_t)value;
304 return MANIFEST_SUCCESS;
305}
306
J-Alvesb37fd082020-10-22 12:29:21 +0100307static enum manifest_return_code read_optional_uint16(
308 const struct fdt_node *node, const char *property,
309 uint16_t default_value, uint16_t *out)
310{
311 enum manifest_return_code ret;
312
313 ret = read_uint16(node, property, out);
314 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
315 *out = default_value;
316 return MANIFEST_SUCCESS;
317 }
318
Kathleen Capella4a2a6e72023-04-21 14:43:26 -0400319 return ret;
J-Alvesb37fd082020-10-22 12:29:21 +0100320}
321
Olivier Deprez62d99e32020-01-09 15:58:07 +0100322static enum manifest_return_code read_uint8(const struct fdt_node *node,
323 const char *property, uint8_t *out)
324{
325 uint64_t value;
326
327 TRY(read_uint64(node, property, &value));
328
329 if (value > UINT8_MAX) {
330 return MANIFEST_ERROR_INTEGER_OVERFLOW;
331 }
332
333 *out = (uint8_t)value;
334 return MANIFEST_SUCCESS;
335}
336
J-Alves4369bd92020-08-07 16:35:36 +0100337static enum manifest_return_code read_optional_uint8(
338 const struct fdt_node *node, const char *property,
339 uint8_t default_value, uint8_t *out)
340{
341 enum manifest_return_code ret;
342
343 ret = read_uint8(node, property, out);
344 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
345 *out = default_value;
346 return MANIFEST_SUCCESS;
347 }
348
349 return MANIFEST_SUCCESS;
350}
351
Andrew Scullae9962e2019-10-03 16:51:16 +0100352struct uint32list_iter {
353 struct memiter mem_it;
354};
355
J-Alves4369bd92020-08-07 16:35:36 +0100356static enum manifest_return_code read_uint32list(const struct fdt_node *node,
357 const char *property,
358 struct uint32list_iter *out)
Andrew Scullae9962e2019-10-03 16:51:16 +0100359{
David Brazdilb856be62020-03-25 10:14:55 +0000360 struct memiter data;
Andrew Scullae9962e2019-10-03 16:51:16 +0100361
David Brazdilb856be62020-03-25 10:14:55 +0000362 if (!fdt_read_property(node, property, &data)) {
Andrew Scullae9962e2019-10-03 16:51:16 +0100363 memiter_init(&out->mem_it, NULL, 0);
J-Alves4369bd92020-08-07 16:35:36 +0100364 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
Andrew Scullae9962e2019-10-03 16:51:16 +0100365 }
366
David Brazdilb856be62020-03-25 10:14:55 +0000367 if ((memiter_size(&data) % sizeof(uint32_t)) != 0) {
Andrew Scullae9962e2019-10-03 16:51:16 +0100368 return MANIFEST_ERROR_MALFORMED_INTEGER_LIST;
369 }
370
David Brazdilb856be62020-03-25 10:14:55 +0000371 out->mem_it = data;
Andrew Scullae9962e2019-10-03 16:51:16 +0100372 return MANIFEST_SUCCESS;
373}
374
J-Alves4369bd92020-08-07 16:35:36 +0100375static enum manifest_return_code read_optional_uint32list(
376 const struct fdt_node *node, const char *property,
377 struct uint32list_iter *out)
378{
379 enum manifest_return_code ret = read_uint32list(node, property, out);
380
381 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
382 return MANIFEST_SUCCESS;
383 }
384 return ret;
385}
386
Andrew Scullae9962e2019-10-03 16:51:16 +0100387static bool uint32list_has_next(const struct uint32list_iter *list)
388{
389 return memiter_size(&list->mem_it) > 0;
390}
391
David Brazdil5ea99462020-03-25 13:01:47 +0000392static enum manifest_return_code uint32list_get_next(
393 struct uint32list_iter *list, uint32_t *out)
Andrew Scullae9962e2019-10-03 16:51:16 +0100394{
Andrew Scullae9962e2019-10-03 16:51:16 +0100395 uint64_t num;
396
397 CHECK(uint32list_has_next(list));
David Brazdilb856be62020-03-25 10:14:55 +0000398 if (!fdt_parse_number(&list->mem_it, sizeof(uint32_t), &num)) {
Andrew Scullae9962e2019-10-03 16:51:16 +0100399 return MANIFEST_ERROR_MALFORMED_INTEGER;
400 }
401
David Brazdil5ea99462020-03-25 13:01:47 +0000402 *out = (uint32_t)num;
403 return MANIFEST_SUCCESS;
Andrew Scullae9962e2019-10-03 16:51:16 +0100404}
405
Karl Meakin60532972024-08-02 16:11:21 +0100406/**
407 * Parse a UUID from `uuid` into `out`.
408 * Returns `MANIFEST_SUCCESS` if parsing succeeded.
409 */
410static enum manifest_return_code parse_uuid(struct uint32list_iter *uuid,
411 struct ffa_uuid *out)
412{
413 for (size_t i = 0; i < 4 && uint32list_has_next(uuid); i++) {
414 TRY(uint32list_get_next(uuid, &out->uuid[i]));
415 }
416
417 return MANIFEST_SUCCESS;
418}
419
420/**
421 * Parse a list of UUIDs from `uuid` into `out`.
422 * Writes the number of UUIDs parsed to `len`.
423 * Returns `MANIFEST_SUCCESS` if parsing succeeded.
424 * Returns `MANIFEST_ERROR_UUID_ALL_ZEROS` if any of the UUIDs are all zeros.
425 * Returns `MANIFEEST_ERROR_TOO_MANY_UUIDS` if there are more than
426 * `PARTITION_MAX_UUIDS`
427 */
428static enum manifest_return_code parse_uuid_list(struct uint32list_iter *uuid,
429 struct ffa_uuid *out,
430 uint16_t *len)
431{
432 uint16_t j;
433
Karl Meakin45abeeb2024-08-02 16:55:44 +0100434 for (j = 0; uint32list_has_next(uuid); j++) {
Karl Meakin60532972024-08-02 16:11:21 +0100435 TRY(parse_uuid(uuid, &out[j]));
436
437 if (ffa_uuid_is_null(&out[j])) {
438 return MANIFEST_ERROR_UUID_ALL_ZEROS;
439 }
440 dlog_verbose(" UUID %#x-%x-%x-%x\n", out[j].uuid[0],
441 out[j].uuid[1], out[j].uuid[2], out[j].uuid[3]);
Karl Meakin45abeeb2024-08-02 16:55:44 +0100442
443 if (j >= PARTITION_MAX_UUIDS) {
444 return MANIFEST_ERROR_TOO_MANY_UUIDS;
445 }
Karl Meakin60532972024-08-02 16:11:21 +0100446 }
447
448 *len = j;
449 return MANIFEST_SUCCESS;
450}
451
Olivier Deprez62d99e32020-01-09 15:58:07 +0100452static enum manifest_return_code parse_vm_common(const struct fdt_node *node,
453 struct manifest_vm *vm,
J-Alves19e20cf2023-08-02 12:48:55 +0100454 ffa_id_t vm_id)
David Brazdil7a462ec2019-08-15 12:27:47 +0100455{
Andrew Scullae9962e2019-10-03 16:51:16 +0100456 struct uint32list_iter smcs;
David Brazdil5ea99462020-03-25 13:01:47 +0000457 size_t idx;
Andrew Scullae9962e2019-10-03 16:51:16 +0100458
Olivier Deprez62d99e32020-01-09 15:58:07 +0100459 TRY(read_bool(node, "is_ffa_partition", &vm->is_ffa_partition));
460
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -0700461 TRY(read_bool(node, "hyp_loaded", &vm->is_hyp_loaded));
462
David Brazdil136f2942019-09-23 14:11:03 +0100463 TRY(read_string(node, "debug_name", &vm->debug_name));
Andrew Scullae9962e2019-10-03 16:51:16 +0100464
465 TRY(read_optional_uint32list(node, "smc_whitelist", &smcs));
466 while (uint32list_has_next(&smcs) &&
467 vm->smc_whitelist.smc_count < MAX_SMCS) {
David Brazdil5ea99462020-03-25 13:01:47 +0000468 idx = vm->smc_whitelist.smc_count++;
469 TRY(uint32list_get_next(&smcs, &vm->smc_whitelist.smcs[idx]));
Andrew Scullae9962e2019-10-03 16:51:16 +0100470 }
471
472 if (uint32list_has_next(&smcs)) {
Karl Meakine8937d92024-03-19 16:04:25 +0000473 dlog_warning("%s SMC whitelist too long.\n",
474 vm->debug_name.data);
Andrew Scullae9962e2019-10-03 16:51:16 +0100475 }
476
Andrew Scullb2c3a242019-11-04 13:52:36 +0000477 TRY(read_bool(node, "smc_whitelist_permissive",
478 &vm->smc_whitelist.permissive));
Andrew Scullae9962e2019-10-03 16:51:16 +0100479
Olivier Deprez62d99e32020-01-09 15:58:07 +0100480 if (vm_id != HF_PRIMARY_VM_ID) {
481 TRY(read_uint64(node, "mem_size", &vm->secondary.mem_size));
482 TRY(read_uint16(node, "vcpu_count", &vm->secondary.vcpu_count));
Fuad Tabba50469e02020-06-30 15:14:28 +0100483 TRY(read_optional_string(node, "fdt_filename",
484 &vm->secondary.fdt_filename));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100485 }
486
487 return MANIFEST_SUCCESS;
488}
489
490static enum manifest_return_code parse_vm(struct fdt_node *node,
491 struct manifest_vm *vm,
J-Alves19e20cf2023-08-02 12:48:55 +0100492 ffa_id_t vm_id)
Olivier Deprez62d99e32020-01-09 15:58:07 +0100493{
494 TRY(read_optional_string(node, "kernel_filename",
495 &vm->kernel_filename));
496
David Brazdile6f83222019-09-23 14:47:37 +0100497 if (vm_id == HF_PRIMARY_VM_ID) {
498 TRY(read_optional_string(node, "ramdisk_filename",
499 &vm->primary.ramdisk_filename));
David Brazdil080ee312020-02-25 15:30:30 -0800500 TRY(read_optional_uint64(node, "boot_address",
501 MANIFEST_INVALID_ADDRESS,
502 &vm->primary.boot_address));
David Brazdil7a462ec2019-08-15 12:27:47 +0100503 }
Raghu Krishnamurthy988a5e72021-02-27 21:46:06 -0800504 TRY(read_optional_uint8(node, "exception-level", (uint8_t)EL1,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700505 (uint8_t *)&vm->partition.run_time_el));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100506
David Brazdil7a462ec2019-08-15 12:27:47 +0100507 return MANIFEST_SUCCESS;
508}
509
Karl Meakinab0add02024-11-18 15:26:37 +0000510static bool mem_range_contains(struct mem_range range, uintptr_t address)
511{
512 return pa_addr(range.begin) <= address && address <= pa_addr(range.end);
513}
514
515/**
516 * Return true if the region described by `region_start` and `page_count`
517 * overlaps with any of `ranges`.
518 */
519static bool is_memory_region_within_ranges(uintptr_t region_start,
J-Alves77b6f4f2023-03-15 11:34:49 +0000520 uint32_t page_count,
Karl Meakinab0add02024-11-18 15:26:37 +0000521 const struct mem_range ranges[],
522 size_t ranges_size)
J-Alves77b6f4f2023-03-15 11:34:49 +0000523{
524 uintptr_t region_end =
Karl Meakinab0add02024-11-18 15:26:37 +0000525 region_start + ((uintptr_t)page_count * PAGE_SIZE - 1);
J-Alves77b6f4f2023-03-15 11:34:49 +0000526
527 for (size_t i = 0; i < ranges_size; i++) {
Karl Meakinab0add02024-11-18 15:26:37 +0000528 if (mem_range_contains(ranges[i], region_start) ||
529 mem_range_contains(ranges[i], region_end)) {
J-Alves77b6f4f2023-03-15 11:34:49 +0000530 return true;
531 }
532 }
533
534 return false;
535}
536
537void dump_memory_ranges(const struct mem_range *ranges,
538 const size_t ranges_size, bool ns)
539{
540 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
541 return;
542 }
543
544 dlog("%s Memory ranges:\n", ns ? "NS" : "S");
545
546 for (size_t i = 0; i < ranges_size; i++) {
547 uintptr_t begin = pa_addr(ranges[i].begin);
548 uintptr_t end = pa_addr(ranges[i].end);
549 size_t page_count =
550 align_up(pa_difference(ranges[i].begin, ranges[i].end),
551 PAGE_SIZE) /
552 PAGE_SIZE;
553
Karl Meakine8937d92024-03-19 16:04:25 +0000554 dlog(" [%lx - %lx (%zu pages)]\n", begin, end, page_count);
J-Alves77b6f4f2023-03-15 11:34:49 +0000555 }
556}
557
558/**
559 * Check the partition's assigned memory is contained in the memory ranges
560 * configured for the SWd, in the SPMC's manifest.
561 */
562static enum manifest_return_code check_partition_memory_is_valid(
563 uintptr_t base_address, uint32_t page_count, uint32_t attributes,
Olivier Deprez058ddee2024-08-27 09:22:11 +0200564 const struct boot_params *params, bool is_device_region)
J-Alves77b6f4f2023-03-15 11:34:49 +0000565{
566 bool is_secure_region =
567 (attributes & MANIFEST_REGION_ATTR_SECURITY) == 0U;
Daniel Boulby4339edc2024-02-21 14:59:00 +0000568 const struct mem_range *ranges_from_manifest;
569 size_t ranges_count;
570 bool within_ranges;
571 enum manifest_return_code error_return;
572
573 if (!is_device_region) {
574 ranges_from_manifest = is_secure_region ? params->mem_ranges
575 : params->ns_mem_ranges;
576 ranges_count = is_secure_region ? params->mem_ranges_count
577 : params->ns_mem_ranges_count;
578 error_return = MANIFEST_ERROR_MEM_REGION_INVALID;
579 } else {
580 ranges_from_manifest = is_secure_region
581 ? params->device_mem_ranges
582 : params->ns_device_mem_ranges;
583 ranges_count = is_secure_region
584 ? params->device_mem_ranges_count
585 : params->ns_device_mem_ranges_count;
586 error_return = MANIFEST_ERROR_DEVICE_MEM_REGION_INVALID;
587 }
588
589 within_ranges = is_memory_region_within_ranges(
J-Alves77b6f4f2023-03-15 11:34:49 +0000590 base_address, page_count, ranges_from_manifest, ranges_count);
591
Daniel Boulby4339edc2024-02-21 14:59:00 +0000592 return within_ranges ? MANIFEST_SUCCESS : error_return;
J-Alves77b6f4f2023-03-15 11:34:49 +0000593}
594
595/*
596 * Keep track of the memory allocated by partitions. This includes memory region
Daniel Boulby4d165722023-11-21 13:46:42 +0000597 * nodes and device region nodes defined in their respective partition
598 * manifests, as well address space defined from their load address.
J-Alves77b6f4f2023-03-15 11:34:49 +0000599 */
600static enum manifest_return_code check_and_record_memory_used(
Daniel Boulby3f295b12023-12-15 17:38:08 +0000601 uintptr_t base_address, uint32_t page_count,
602 struct mem_range *mem_ranges, size_t *mem_regions_index)
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100603{
Karl Meakin6291eb22024-11-18 12:43:47 +0000604 paddr_t begin;
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100605
Daniel Boulbyc1a613d2022-10-18 11:26:17 +0100606 if (!is_aligned(base_address, PAGE_SIZE)) {
Karl Meakine8937d92024-03-19 16:04:25 +0000607 dlog_error("base_address (%#lx) is not aligned to page size.\n",
Daniel Boulbyc1a613d2022-10-18 11:26:17 +0100608 base_address);
609 return MANIFEST_ERROR_MEM_REGION_UNALIGNED;
610 }
611
Karl Meakin6291eb22024-11-18 12:43:47 +0000612 if (is_memory_region_within_ranges(base_address, page_count, mem_ranges,
613 *mem_regions_index)) {
614 return MANIFEST_ERROR_MEM_REGION_OVERLAP;
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100615 }
616
Karl Meakin6291eb22024-11-18 12:43:47 +0000617 begin = pa_init(base_address);
618
619 mem_ranges[*mem_regions_index].begin = begin;
620 mem_ranges[*mem_regions_index].end =
621 pa_add(begin, page_count * PAGE_SIZE - 1);
622 (*mem_regions_index)++;
623
624 return MANIFEST_SUCCESS;
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100625}
626
Madhukar Pappireddy718afa92024-06-20 16:48:49 -0500627static enum manifest_return_code parse_common_fields_mem_dev_region_node(
628 struct fdt_node *ffa_node, struct dma_device_properties *dma_prop)
629{
630 uint32_t j = 0;
631 struct uint32list_iter list;
632
633 TRY(read_optional_uint32(ffa_node, "smmu-id", MANIFEST_INVALID_ID,
634 &dma_prop->smmu_id));
635 if (dma_prop->smmu_id != MANIFEST_INVALID_ID) {
636 dlog_verbose(" smmu-id: %u\n", dma_prop->smmu_id);
637 }
638
639 TRY(read_optional_uint32list(ffa_node, "stream-ids", &list));
640 dlog_verbose(" Stream IDs assigned:\n");
641
642 j = 0;
643 while (uint32list_has_next(&list)) {
644 if (j == PARTITION_MAX_STREAMS_PER_DEVICE) {
645 return MANIFEST_ERROR_STREAM_IDS_OVERFLOW;
646 }
647
648 TRY(uint32list_get_next(&list, &dma_prop->stream_ids[j]));
649 dlog_verbose(" %u\n", dma_prop->stream_ids[j]);
650 j++;
651 }
652 if (j == 0) {
653 dlog_verbose(" None\n");
654 } else if (dma_prop->smmu_id == MANIFEST_INVALID_ID) {
655 /*
656 * SMMU ID must be specified if the partition specifies
657 * Stream IDs for any device upstream of SMMU.
658 */
659 return MANIFEST_ERROR_MISSING_SMMU_ID;
660 }
661 dma_prop->stream_count = j;
662
663 return MANIFEST_SUCCESS;
664}
665
Karl Meakin68368292024-11-18 12:02:12 +0000666/**
667 * Parse and validate a memory regions's base address.
668 *
669 * The base address can be specified either as an absolute address (with
670 * `base-address`) or as an offset from `load_address` (with
671 * `load-address-relative-offset`).
672
673 * Returns an error if:
674 * - Neither `base-address` or `load-address-relative-offset` are specified.
675 * - Both `base-address` and `load-address-relative-offset` are specified.
676 * - The effective address (`load-address-relative-offset` + `load_address`)
677 * would overflow.
678 */
679static enum manifest_return_code parse_base_address(
680 struct fdt_node *mem_node, uintptr_t load_address,
681 struct memory_region *mem_region)
682{
683 uintptr_t relative_offset;
684 uintptr_t absolute_address;
685
686 bool is_relative;
687 bool is_absolute;
688
689 TRY(read_optional_uint64(mem_node, "base-address",
690 MANIFEST_INVALID_ADDRESS, &absolute_address));
691
692 TRY(read_optional_uint64(mem_node, "load-address-relative-offset",
693 MANIFEST_INVALID_ADDRESS, &relative_offset));
694
695 is_absolute = (absolute_address != MANIFEST_INVALID_ADDRESS);
696 is_relative = (relative_offset != MANIFEST_INVALID_ADDRESS);
697
698 if (!is_absolute && !is_relative) {
699 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
700 }
701
702 if (is_absolute && is_relative) {
703 return MANIFEST_ERROR_BASE_ADDRESS_AND_RELATIVE_ADDRESS;
704 }
705
706 if (is_relative && relative_offset > UINT64_MAX - load_address) {
707 return MANIFEST_ERROR_INTEGER_OVERFLOW;
708 }
709
710 mem_region->base_address =
711 is_absolute ? absolute_address : load_address + relative_offset;
Karl Meakin6291eb22024-11-18 12:43:47 +0000712 mem_region->is_relative = is_relative;
Karl Meakin68368292024-11-18 12:02:12 +0000713
714 return MANIFEST_SUCCESS;
715}
716
Karl Meakin275aa292024-11-18 14:56:09 +0000717/**
718 * Parse and validate a memory region/device region's attributes.
719 * Returns an error if:
720 * - Memory region attributes are not `R` or `RW` or `RX`.
721 * - Device region attributes are not `R` or `RW`.
722 * NOTE: Security attribute is not checked by this function, it is checked in
723 * the load phase.
724 */
725static enum manifest_return_code parse_ffa_region_attributes(
726 struct fdt_node *node, uint32_t *out_attributes, bool is_device)
727{
728 uint32_t attributes;
729
730 TRY(read_uint32(node, "attributes", out_attributes));
731
732 attributes = *out_attributes &
733 (MANIFEST_REGION_ATTR_READ | MANIFEST_REGION_ATTR_WRITE |
734 MANIFEST_REGION_ATTR_EXEC);
735
736 if (is_device) {
737 switch (attributes) {
738 case MANIFEST_REGION_ATTR_READ:
739 case MANIFEST_REGION_ATTR_READ | MANIFEST_REGION_ATTR_WRITE:
740 break;
741 default:
742 return MANIFEST_ERROR_INVALID_MEM_PERM;
743 }
744 } else {
745 switch (attributes) {
746 case MANIFEST_REGION_ATTR_READ:
747 case MANIFEST_REGION_ATTR_READ | MANIFEST_REGION_ATTR_WRITE:
748 case MANIFEST_REGION_ATTR_READ | MANIFEST_REGION_ATTR_EXEC:
749 break;
750 default:
751 return MANIFEST_ERROR_INVALID_MEM_PERM;
752 }
753 }
754
755 /* Filter region attributes. */
756 *out_attributes &= MANIFEST_REGION_ALL_ATTR_MASK;
757
758 return MANIFEST_SUCCESS;
759}
760
Karl Meakina2e7ae72024-11-18 15:15:53 +0000761static enum manifest_return_code parse_page_count(struct fdt_node *node,
762 uint32_t *page_count)
763{
764 TRY(read_uint32(node, "pages-count", page_count));
765
766 if (*page_count == 0) {
767 return MANIFEST_ERROR_MEM_REGION_EMPTY;
768 }
769
770 return MANIFEST_SUCCESS;
771}
772
Manish Pandey6542f5c2020-04-27 14:37:46 +0100773static enum manifest_return_code parse_ffa_memory_region_node(
Karl Meakinf6d49402023-04-04 18:14:26 +0100774 struct fdt_node *mem_node, uintptr_t load_address,
775 struct memory_region *mem_regions, uint16_t *count, struct rx_tx *rxtx,
J-Alves77b6f4f2023-03-15 11:34:49 +0000776 const struct boot_params *boot_params)
Manish Pandey6542f5c2020-04-27 14:37:46 +0100777{
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100778 uint32_t phandle;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700779 uint16_t i = 0;
Madhukar Pappireddy3c2b7912023-10-11 14:47:27 -0500780 uint32_t j = 0;
Madhukar Pappireddy3c2b7912023-10-11 14:47:27 -0500781 struct uint32list_iter list;
Manish Pandey6542f5c2020-04-27 14:37:46 +0100782
783 dlog_verbose(" Partition memory regions\n");
784
785 if (!fdt_is_compatible(mem_node, "arm,ffa-manifest-memory-regions")) {
786 return MANIFEST_ERROR_NOT_COMPATIBLE;
787 }
788
789 if (!fdt_first_child(mem_node)) {
790 return MANIFEST_ERROR_MEMORY_REGION_NODE_EMPTY;
791 }
792
793 do {
794 dlog_verbose(" Memory Region[%u]\n", i);
795
796 TRY(read_optional_string(mem_node, "description",
797 &mem_regions[i].name));
798 dlog_verbose(" Name: %s\n",
799 string_data(&mem_regions[i].name));
800
Karl Meakin68368292024-11-18 12:02:12 +0000801 TRY(parse_base_address(mem_node, load_address,
802 &mem_regions[i]));
Karl Meakinf6d49402023-04-04 18:14:26 +0100803
Karl Meakina2e7ae72024-11-18 15:15:53 +0000804 TRY(parse_page_count(mem_node, &mem_regions[i].page_count));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200805 dlog_verbose(" Pages_count: %u\n",
Manish Pandey6542f5c2020-04-27 14:37:46 +0100806 mem_regions[i].page_count);
807
Karl Meakin275aa292024-11-18 14:56:09 +0000808 TRY(parse_ffa_region_attributes(
809 mem_node, &mem_regions[i].attributes, false));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200810 dlog_verbose(" Attributes: %#x\n",
Manish Pandey6542f5c2020-04-27 14:37:46 +0100811 mem_regions[i].attributes);
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100812
J-Alves77b6f4f2023-03-15 11:34:49 +0000813 TRY(check_partition_memory_is_valid(
814 mem_regions[i].base_address, mem_regions[i].page_count,
Olivier Deprez058ddee2024-08-27 09:22:11 +0200815 mem_regions[i].attributes, boot_params, false));
J-Alves77b6f4f2023-03-15 11:34:49 +0000816
Karl Meakin6291eb22024-11-18 12:43:47 +0000817 /*
818 * Memory regions are not allowed to overlap with
819 * `load_address`, unless the memory region is relative.
820 */
821 if (!mem_regions[i].is_relative) {
822 struct mem_range range =
823 make_mem_range(mem_regions[i].base_address,
824 mem_regions[i].page_count);
825
826 if (mem_range_contains(range, load_address)) {
827 return MANIFEST_ERROR_MEM_REGION_OVERLAP;
828 }
829 }
830
Daniel Boulby3f295b12023-12-15 17:38:08 +0000831 TRY(check_and_record_memory_used(
832 mem_regions[i].base_address, mem_regions[i].page_count,
833 manifest_data->mem_regions,
834 &manifest_data->mem_regions_index));
J-Alves77b6f4f2023-03-15 11:34:49 +0000835
Madhukar Pappireddy718afa92024-06-20 16:48:49 -0500836 TRY(parse_common_fields_mem_dev_region_node(
837 mem_node, &mem_regions[i].dma_prop));
Madhukar Pappireddy3c2b7912023-10-11 14:47:27 -0500838
839 TRY(read_optional_uint32list(
840 mem_node, "stream-ids-access-permissions", &list));
841 dlog_verbose(" Access permissions of Stream IDs:\n");
842
843 j = 0;
844 while (uint32list_has_next(&list)) {
845 uint32_t permissions;
846
847 if (j == PARTITION_MAX_STREAMS_PER_DEVICE) {
848 return MANIFEST_ERROR_DMA_ACCESS_PERMISSIONS_OVERFLOW;
849 }
850
851 TRY(uint32list_get_next(&list, &permissions));
852 dlog_verbose(" %u\n", permissions);
853
854 if (j == 0) {
Madhukar Pappireddy9c764b32024-06-20 14:36:55 -0500855 mem_regions[i].dma_access_permissions =
Madhukar Pappireddy3c2b7912023-10-11 14:47:27 -0500856 permissions;
857 }
858
859 /*
860 * All stream ids belonging to a dma device must specify
861 * the same access permissions.
862 */
863 if (permissions !=
Madhukar Pappireddy9c764b32024-06-20 14:36:55 -0500864 mem_regions[i].dma_access_permissions) {
Madhukar Pappireddy3c2b7912023-10-11 14:47:27 -0500865 return MANIFEST_ERROR_MISMATCH_DMA_ACCESS_PERMISSIONS;
866 }
867
868 j++;
869 }
870
871 if (j == 0) {
872 dlog_verbose(" None\n");
873 } else if (j != mem_regions[i].dma_prop.stream_count) {
874 return MANIFEST_ERROR_MISMATCH_DMA_ACCESS_PERMISSIONS;
875 }
876
877 if (j > 0) {
878 /* Filter the dma access permissions. */
Madhukar Pappireddy9c764b32024-06-20 14:36:55 -0500879 mem_regions[i].dma_access_permissions &=
Madhukar Pappireddy3c2b7912023-10-11 14:47:27 -0500880 MANIFEST_REGION_ALL_ATTR_MASK;
881 }
882
Manish Pandeya70a4192020-10-07 22:05:04 +0100883 if (rxtx->available) {
884 TRY(read_optional_uint32(
885 mem_node, "phandle",
886 (uint32_t)MANIFEST_INVALID_ADDRESS, &phandle));
887 if (phandle == rxtx->rx_phandle) {
888 dlog_verbose(" Assigned as RX buffer\n");
889 rxtx->rx_buffer = &mem_regions[i];
890 } else if (phandle == rxtx->tx_phandle) {
891 dlog_verbose(" Assigned as TX buffer\n");
892 rxtx->tx_buffer = &mem_regions[i];
893 }
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100894 }
895
Manish Pandey6542f5c2020-04-27 14:37:46 +0100896 i++;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700897 } while (fdt_next_sibling(mem_node) &&
898 (i < PARTITION_MAX_MEMORY_REGIONS));
Manish Pandey6542f5c2020-04-27 14:37:46 +0100899
Manish Pandeya70a4192020-10-07 22:05:04 +0100900 if (rxtx->available &&
901 (rxtx->rx_buffer->page_count != rxtx->tx_buffer->page_count)) {
Manish Pandeyf06c9072020-09-29 15:41:58 +0100902 return MANIFEST_ERROR_RXTX_SIZE_MISMATCH;
903 }
904
Manish Pandey2145c212020-05-01 16:04:22 +0100905 *count = i;
Manish Pandey6542f5c2020-04-27 14:37:46 +0100906
907 return MANIFEST_SUCCESS;
908}
909
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -0700910static struct interrupt_info *device_region_get_interrupt_info(
911 struct device_region *dev_regions, uint32_t intid)
912{
913 for (uint32_t i = 0; i < ARRAY_SIZE(dev_regions->interrupts); i++) {
914 if (dev_regions->interrupts[i].id == intid) {
915 return &(dev_regions->interrupts[i]);
916 }
917 }
918 return NULL;
919}
920
Manish Pandeye68e7932020-04-23 15:29:28 +0100921static enum manifest_return_code parse_ffa_device_region_node(
Manish Pandey2145c212020-05-01 16:04:22 +0100922 struct fdt_node *dev_node, struct device_region *dev_regions,
Daniel Boulby4339edc2024-02-21 14:59:00 +0000923 uint16_t *count, uint8_t *dma_device_count,
924 const struct boot_params *boot_params)
Manish Pandeye68e7932020-04-23 15:29:28 +0100925{
926 struct uint32list_iter list;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700927 uint16_t i = 0;
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500928 uint32_t j = 0;
Olivier Deprez93644652022-09-09 11:01:12 +0200929 struct interrupt_bitmap allocated_intids = manifest_data->intids;
Madhukar Pappireddy718afa92024-06-20 16:48:49 -0500930 uint8_t dma_device_id = 0;
Manish Pandeye68e7932020-04-23 15:29:28 +0100931
932 dlog_verbose(" Partition Device Regions\n");
933
934 if (!fdt_is_compatible(dev_node, "arm,ffa-manifest-device-regions")) {
935 return MANIFEST_ERROR_NOT_COMPATIBLE;
936 }
937
938 if (!fdt_first_child(dev_node)) {
939 return MANIFEST_ERROR_DEVICE_REGION_NODE_EMPTY;
940 }
941
Madhukar Pappireddye032af52023-10-11 14:52:58 -0500942 *dma_device_count = 0;
943
Manish Pandeye68e7932020-04-23 15:29:28 +0100944 do {
945 dlog_verbose(" Device Region[%u]\n", i);
946
947 TRY(read_optional_string(dev_node, "description",
948 &dev_regions[i].name));
949 dlog_verbose(" Name: %s\n",
950 string_data(&dev_regions[i].name));
951
952 TRY(read_uint64(dev_node, "base-address",
953 &dev_regions[i].base_address));
Karl Meakine8937d92024-03-19 16:04:25 +0000954 dlog_verbose(" Base address: %#lx\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100955 dev_regions[i].base_address);
956
Karl Meakina2e7ae72024-11-18 15:15:53 +0000957 TRY(parse_page_count(dev_node, &dev_regions[i].page_count));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200958 dlog_verbose(" Pages_count: %u\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100959 dev_regions[i].page_count);
960
Daniel Boulby3f295b12023-12-15 17:38:08 +0000961 TRY(check_and_record_memory_used(
962 dev_regions[i].base_address, dev_regions[i].page_count,
963 manifest_data->mem_regions,
964 &manifest_data->mem_regions_index));
Daniel Boulby4d165722023-11-21 13:46:42 +0000965
Karl Meakin275aa292024-11-18 14:56:09 +0000966 TRY(parse_ffa_region_attributes(
967 dev_node, &dev_regions[i].attributes, true));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200968 dlog_verbose(" Attributes: %#x\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100969 dev_regions[i].attributes);
970
Daniel Boulby4339edc2024-02-21 14:59:00 +0000971 TRY(check_partition_memory_is_valid(
972 dev_regions[i].base_address, dev_regions[i].page_count,
Olivier Deprez058ddee2024-08-27 09:22:11 +0200973 dev_regions[i].attributes, boot_params, true));
Daniel Boulby4339edc2024-02-21 14:59:00 +0000974
Manish Pandeye68e7932020-04-23 15:29:28 +0100975 TRY(read_optional_uint32list(dev_node, "interrupts", &list));
976 dlog_verbose(" Interrupt List:\n");
977 j = 0;
978 while (uint32list_has_next(&list) &&
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700979 j < PARTITION_MAX_INTERRUPTS_PER_DEVICE) {
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100980 uint32_t intid;
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100981
Manish Pandeye68e7932020-04-23 15:29:28 +0100982 TRY(uint32list_get_next(
983 &list, &dev_regions[i].interrupts[j].id));
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100984 intid = dev_regions[i].interrupts[j].id;
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100985
986 dlog_verbose(" ID = %u\n", intid);
987
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100988 if (interrupt_bitmap_get_value(&allocated_intids,
989 intid) == 1U) {
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100990 return MANIFEST_ERROR_INTERRUPT_ID_REPEATED;
991 }
992
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100993 interrupt_bitmap_set_value(&allocated_intids, intid);
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100994
Manish Pandeye68e7932020-04-23 15:29:28 +0100995 if (uint32list_has_next(&list)) {
996 TRY(uint32list_get_next(&list,
997 &dev_regions[i]
998 .interrupts[j]
999 .attributes));
1000 } else {
1001 return MANIFEST_ERROR_MALFORMED_INTEGER_LIST;
1002 }
1003
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -07001004 dev_regions[i].interrupts[j].mpidr_valid = false;
1005 dev_regions[i].interrupts[j].mpidr = 0;
1006
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001007 dlog_verbose(" attributes = %u\n",
Manish Pandeye68e7932020-04-23 15:29:28 +01001008 dev_regions[i].interrupts[j].attributes);
1009 j++;
1010 }
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -05001011
1012 dev_regions[i].interrupt_count = j;
Manish Pandeye68e7932020-04-23 15:29:28 +01001013 if (j == 0) {
1014 dlog_verbose(" Empty\n");
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -07001015 } else {
1016 TRY(read_optional_uint32list(
1017 dev_node, "interrupts-target", &list));
1018 dlog_verbose(" Interrupt Target List:\n");
1019
1020 while (uint32list_has_next(&list)) {
1021 uint32_t intid;
1022 uint64_t mpidr = 0;
1023 uint32_t mpidr_lower = 0;
1024 uint32_t mpidr_upper = 0;
1025 struct interrupt_info *info = NULL;
1026
1027 TRY(uint32list_get_next(&list, &intid));
1028
1029 dlog_verbose(" ID = %u\n", intid);
1030
1031 if (interrupt_bitmap_get_value(
1032 &allocated_intids, intid) != 1U) {
1033 return MANIFEST_ERROR_INTERRUPT_ID_NOT_IN_LIST;
1034 }
1035
1036 TRY(uint32list_get_next(&list, &mpidr_upper));
1037 TRY(uint32list_get_next(&list, &mpidr_lower));
1038 mpidr = mpidr_upper;
1039 mpidr <<= 32;
1040 mpidr |= mpidr_lower;
1041
1042 info = device_region_get_interrupt_info(
1043 &dev_regions[i], intid);
1044 /*
1045 * We should find info since
1046 * interrupt_bitmap_get_value already ensures
1047 * that we saw the interrupt and allocated ids
1048 * for it.
1049 */
1050 assert(info != NULL);
1051 info->mpidr = mpidr;
1052 info->mpidr_valid = true;
Karl Meakine8937d92024-03-19 16:04:25 +00001053 dlog_verbose(" MPIDR = %#lx\n", mpidr);
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -07001054 }
Manish Pandeye68e7932020-04-23 15:29:28 +01001055 }
1056
Madhukar Pappireddy718afa92024-06-20 16:48:49 -05001057 TRY(parse_common_fields_mem_dev_region_node(
1058 dev_node, &dev_regions[i].dma_prop));
1059
Madhukar Pappireddy9c764b32024-06-20 14:36:55 -05001060 if (dev_regions[i].dma_prop.smmu_id != MANIFEST_INVALID_ID) {
Madhukar Pappireddy9c764b32024-06-20 14:36:55 -05001061 dev_regions[i].dma_prop.dma_device_id = dma_device_id++;
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001062 *dma_device_count = dma_device_id;
1063
Madhukar Pappireddy8a364472023-10-11 15:34:07 -05001064 if (*dma_device_count > PARTITION_MAX_DMA_DEVICES) {
1065 return MANIFEST_ERROR_DMA_DEVICE_OVERFLOW;
1066 }
1067
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001068 dlog_verbose(" dma peripheral device id: %u\n",
Madhukar Pappireddy9c764b32024-06-20 14:36:55 -05001069 dev_regions[i].dma_prop.dma_device_id);
Manish Pandeye68e7932020-04-23 15:29:28 +01001070 }
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001071
Manish Pandeye68e7932020-04-23 15:29:28 +01001072 TRY(read_bool(dev_node, "exclusive-access",
1073 &dev_regions[i].exclusive_access));
J-Alves4369bd92020-08-07 16:35:36 +01001074 dlog_verbose(" Exclusive_access: %u\n",
Manish Pandeye68e7932020-04-23 15:29:28 +01001075 dev_regions[i].exclusive_access);
1076
1077 i++;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -07001078 } while (fdt_next_sibling(dev_node) &&
1079 (i < PARTITION_MAX_DEVICE_REGIONS));
Manish Pandeye68e7932020-04-23 15:29:28 +01001080
Manish Pandey2145c212020-05-01 16:04:22 +01001081 *count = i;
Manish Pandeye68e7932020-04-23 15:29:28 +01001082
1083 return MANIFEST_SUCCESS;
1084}
1085
J-Alvesabebe432022-05-31 14:40:50 +01001086static enum manifest_return_code sanity_check_ffa_manifest(
1087 struct manifest_vm *vm)
1088{
Karl Meakin0e617d92024-04-05 12:55:22 +01001089 enum ffa_version ffa_version;
J-Alvesabebe432022-05-31 14:40:50 +01001090 enum manifest_return_code ret_code = MANIFEST_SUCCESS;
1091 const char *error_string = "specified in manifest is unsupported";
1092 uint32_t k = 0;
Kathleen Capellaf71dee42023-08-08 16:24:14 -04001093 bool using_req2 = (vm->partition.messaging_method &
1094 (FFA_PARTITION_DIRECT_REQ2_RECV |
1095 FFA_PARTITION_DIRECT_REQ2_SEND)) != 0;
J-Alvesabebe432022-05-31 14:40:50 +01001096
1097 /* ensure that the SPM version is compatible */
Karl Meakin0e617d92024-04-05 12:55:22 +01001098 ffa_version = vm->partition.ffa_version;
1099 if (!ffa_versions_are_compatible(ffa_version, FFA_VERSION_COMPILED)) {
J-Alvesabebe432022-05-31 14:40:50 +01001100 dlog_error("FF-A partition manifest version %s: %u.%u\n",
Karl Meakin0e617d92024-04-05 12:55:22 +01001101 error_string, ffa_version_get_major(ffa_version),
1102 ffa_version_get_minor(ffa_version));
J-Alvesabebe432022-05-31 14:40:50 +01001103 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1104 }
1105
1106 if (vm->partition.xlat_granule != PAGE_4KB) {
1107 dlog_error("Translation granule %s: %u\n", error_string,
1108 vm->partition.xlat_granule);
1109 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1110 }
1111
1112 if (vm->partition.execution_state != AARCH64) {
1113 dlog_error("Execution state %s: %u\n", error_string,
1114 vm->partition.execution_state);
1115 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1116 }
1117
1118 if (vm->partition.run_time_el != EL1 &&
1119 vm->partition.run_time_el != S_EL1 &&
Daniel Boulby874d5432023-04-27 12:40:24 +01001120 vm->partition.run_time_el != S_EL0 &&
1121 vm->partition.run_time_el != EL0) {
J-Alvesabebe432022-05-31 14:40:50 +01001122 dlog_error("Exception level %s: %d\n", error_string,
1123 vm->partition.run_time_el);
1124 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1125 }
1126
Karl Meakin0e617d92024-04-05 12:55:22 +01001127 if (vm->partition.ffa_version < FFA_VERSION_1_2 && using_req2) {
Kathleen Capellaf71dee42023-08-08 16:24:14 -04001128 dlog_error("Messaging method %s: %x\n", error_string,
1129 vm->partition.messaging_method);
1130 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1131 }
1132
J-Alvesabebe432022-05-31 14:40:50 +01001133 if ((vm->partition.messaging_method &
1134 ~(FFA_PARTITION_DIRECT_REQ_RECV | FFA_PARTITION_DIRECT_REQ_SEND |
Kathleen Capellaf71dee42023-08-08 16:24:14 -04001135 FFA_PARTITION_INDIRECT_MSG | FFA_PARTITION_DIRECT_REQ2_RECV |
1136 FFA_PARTITION_DIRECT_REQ2_SEND)) != 0U) {
J-Alvesabebe432022-05-31 14:40:50 +01001137 dlog_error("Messaging method %s: %x\n", error_string,
1138 vm->partition.messaging_method);
1139 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1140 }
1141
Daniel Boulby874d5432023-04-27 12:40:24 +01001142 if ((vm->partition.run_time_el == S_EL0 ||
1143 vm->partition.run_time_el == EL0) &&
J-Alvesabebe432022-05-31 14:40:50 +01001144 vm->partition.execution_ctx_count != 1) {
1145 dlog_error(
1146 "Exception level and execution context count %s: %d "
1147 "%d\n",
1148 error_string, vm->partition.run_time_el,
1149 vm->partition.execution_ctx_count);
1150 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1151 }
1152
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -07001153 for (uint16_t i = 0; i < vm->partition.dev_region_count; i++) {
J-Alvesabebe432022-05-31 14:40:50 +01001154 struct device_region dev_region;
1155
1156 dev_region = vm->partition.dev_regions[i];
1157
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -07001158 if (dev_region.interrupt_count >
1159 PARTITION_MAX_INTERRUPTS_PER_DEVICE) {
J-Alvesabebe432022-05-31 14:40:50 +01001160 dlog_error(
1161 "Interrupt count for device region exceeds "
1162 "limit.\n");
1163 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1164 continue;
1165 }
1166
1167 for (uint8_t j = 0; j < dev_region.interrupt_count; j++) {
1168 k++;
1169 if (k > VM_MANIFEST_MAX_INTERRUPTS) {
1170 dlog_error(
1171 "Interrupt count for VM exceeds "
1172 "limit.\n");
1173 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1174 continue;
1175 }
1176 }
1177 }
1178
1179 /* GP register is restricted to one of x0 - x3. */
Karl Meakin824b63d2024-06-03 19:04:53 +01001180 if (vm->partition.gp_register_num != DEFAULT_BOOT_GP_REGISTER &&
J-Alvesabebe432022-05-31 14:40:50 +01001181 vm->partition.gp_register_num > 3) {
1182 dlog_error("GP register number %s: %u\n", error_string,
1183 vm->partition.gp_register_num);
1184 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
1185 }
1186
1187 return ret_code;
1188}
1189
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001190/**
1191 * Find the device id allocated to the device region node corresponding to the
1192 * specified stream id.
1193 */
1194static bool find_dma_device_id_from_dev_region_nodes(
1195 const struct manifest_vm *manifest_vm, uint32_t sid, uint8_t *device_id)
1196{
1197 for (uint16_t i = 0; i < manifest_vm->partition.dev_region_count; i++) {
1198 struct device_region dev_region =
1199 manifest_vm->partition.dev_regions[i];
1200
Madhukar Pappireddy9c764b32024-06-20 14:36:55 -05001201 for (uint8_t j = 0; j < dev_region.dma_prop.stream_count; j++) {
1202 if (sid == dev_region.dma_prop.stream_ids[j]) {
1203 *device_id = dev_region.dma_prop.dma_device_id;
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001204 return true;
1205 }
1206 }
1207 }
1208 return false;
1209}
1210
1211/**
1212 * Identify the device id of a DMA device node corresponding to a stream id
1213 * specified in the memory region node.
1214 */
1215static bool map_dma_device_id_to_stream_ids(struct manifest_vm *vm)
1216{
1217 for (uint16_t i = 0; i < vm->partition.mem_region_count; i++) {
1218 struct memory_region mem_region = vm->partition.mem_regions[i];
1219
1220 for (uint8_t j = 0; j < mem_region.dma_prop.stream_count; j++) {
1221 uint32_t sid = mem_region.dma_prop.stream_ids[j];
1222 uint8_t device_id = 0;
1223
1224 /*
1225 * Every stream id must have been declared in the
1226 * device node as well.
1227 */
1228 if (!find_dma_device_id_from_dev_region_nodes(
1229 vm, sid, &device_id)) {
1230 dlog_verbose(
Karl Meakine8937d92024-03-19 16:04:25 +00001231 "Stream ID %d not found in any device "
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001232 "region node of partition manifest\n",
1233 sid);
1234 return false;
1235 }
1236
1237 mem_region.dma_prop.dma_device_id = device_id;
1238 }
1239 }
1240
1241 return true;
1242}
1243
J-Alves77b6f4f2023-03-15 11:34:49 +00001244enum manifest_return_code parse_ffa_manifest(
1245 struct fdt *fdt, struct manifest_vm *vm,
1246 struct fdt_node *boot_info_node, const struct boot_params *boot_params)
Olivier Deprez62d99e32020-01-09 15:58:07 +01001247{
Olivier Deprez62d99e32020-01-09 15:58:07 +01001248 struct uint32list_iter uuid;
Davidson K5d5f2792024-08-19 19:09:12 +05301249 uintpaddr_t load_address;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001250 struct fdt_node root;
1251 struct fdt_node ffa_node;
1252 struct string rxtx_node_name = STRING_INIT("rx_tx-info");
Manish Pandey6542f5c2020-04-27 14:37:46 +01001253 struct string mem_region_node_name = STRING_INIT("memory-regions");
Manish Pandeye68e7932020-04-23 15:29:28 +01001254 struct string dev_region_node_name = STRING_INIT("device-regions");
J-Alves35315782022-01-25 17:58:32 +00001255 struct string boot_info_node_name = STRING_INIT("boot-info");
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001256 bool managed_exit_field_present = false;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001257
1258 if (!fdt_find_node(fdt, "/", &root)) {
1259 return MANIFEST_ERROR_NO_ROOT_NODE;
1260 }
1261
1262 /* Check "compatible" property. */
1263 if (!fdt_is_compatible(&root, "arm,ffa-manifest-1.0")) {
1264 return MANIFEST_ERROR_NOT_COMPATIBLE;
1265 }
1266
J-Alves4369bd92020-08-07 16:35:36 +01001267 TRY(read_uint32list(&root, "uuid", &uuid));
Olivier Deprez62d99e32020-01-09 15:58:07 +01001268
Karl Meakin60532972024-08-02 16:11:21 +01001269 TRY(parse_uuid_list(&uuid, vm->partition.uuids,
1270 &vm->partition.uuid_count));
Kathleen Capella422b10b2023-06-30 18:28:27 -04001271 dlog_verbose(" Number of UUIDs %u\n", vm->partition.uuid_count);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001272
Olivier Deprezb7aa0c22023-06-29 10:46:26 +02001273 TRY(read_uint32(&root, "ffa-version", &vm->partition.ffa_version));
1274 dlog_verbose(" Expected FF-A version %u.%u\n",
Karl Meakin0e617d92024-04-05 12:55:22 +01001275 ffa_version_get_major(vm->partition.ffa_version),
1276 ffa_version_get_minor(vm->partition.ffa_version));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +02001277
Olivier Deprez62d99e32020-01-09 15:58:07 +01001278 TRY(read_uint16(&root, "execution-ctx-count",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001279 &vm->partition.execution_ctx_count));
J-Alves4369bd92020-08-07 16:35:36 +01001280 dlog_verbose(" Number of execution context %u\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001281 vm->partition.execution_ctx_count);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001282
1283 TRY(read_uint8(&root, "exception-level",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001284 (uint8_t *)&vm->partition.run_time_el));
1285 dlog_verbose(" Run-time EL %u\n", vm->partition.run_time_el);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001286
1287 TRY(read_uint8(&root, "execution-state",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001288 (uint8_t *)&vm->partition.execution_state));
1289 dlog_verbose(" Execution state %u\n", vm->partition.execution_state);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001290
Davidson K5d5f2792024-08-19 19:09:12 +05301291 TRY(read_optional_uint64(&root, "load-address", 0, &load_address));
1292 if (vm->partition.load_addr != load_address) {
1293 dlog_warning(
Karl Meakin6291eb22024-11-18 12:43:47 +00001294 "Partition's `load_address` (%#lx) in its manifest "
1295 "differs from `load-address` (%#lx) in its package\n",
1296 vm->partition.load_addr, load_address);
Davidson K5d5f2792024-08-19 19:09:12 +05301297 }
Karl Meakine8937d92024-03-19 16:04:25 +00001298 dlog_verbose(" Load address %#lx\n", vm->partition.load_addr);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001299
J-Alves4369bd92020-08-07 16:35:36 +01001300 TRY(read_optional_uint64(&root, "entrypoint-offset", 0,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001301 &vm->partition.ep_offset));
Karl Meakine8937d92024-03-19 16:04:25 +00001302 dlog_verbose(" Entry point offset %#zx\n", vm->partition.ep_offset);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001303
J-Alves35315782022-01-25 17:58:32 +00001304 TRY(read_optional_uint32(&root, "gp-register-num",
1305 DEFAULT_BOOT_GP_REGISTER,
1306 &vm->partition.gp_register_num));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +02001307
1308 if (vm->partition.gp_register_num != DEFAULT_BOOT_GP_REGISTER) {
1309 dlog_verbose(" Boot GP register: x%u\n",
1310 vm->partition.gp_register_num);
1311 }
J-Alves35315782022-01-25 17:58:32 +00001312
J-Alvesb37fd082020-10-22 12:29:21 +01001313 TRY(read_optional_uint16(&root, "boot-order", DEFAULT_BOOT_ORDER,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001314 &vm->partition.boot_order));
Kathleen Capella4a2a6e72023-04-21 14:43:26 -04001315 if (vm->partition.boot_order != DEFAULT_BOOT_ORDER) {
Karl Meakine8937d92024-03-19 16:04:25 +00001316 dlog_verbose(" Boot order %u\n", vm->partition.boot_order);
Kathleen Capella4a2a6e72023-04-21 14:43:26 -04001317 }
1318
1319 if (!check_boot_order(vm->partition.boot_order)) {
1320 return MANIFEST_ERROR_INVALID_BOOT_ORDER;
1321 }
J-Alvesb37fd082020-10-22 12:29:21 +01001322
J-Alves4369bd92020-08-07 16:35:36 +01001323 TRY(read_optional_uint8(&root, "xlat-granule", 0,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001324 (uint8_t *)&vm->partition.xlat_granule));
1325 dlog_verbose(" Translation granule %u\n", vm->partition.xlat_granule);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001326
1327 ffa_node = root;
1328 if (fdt_find_child(&ffa_node, &rxtx_node_name)) {
1329 if (!fdt_is_compatible(&ffa_node,
1330 "arm,ffa-manifest-rx_tx-buffer")) {
1331 return MANIFEST_ERROR_NOT_COMPATIBLE;
1332 }
1333
Manish Pandeyfa1f2912020-05-05 12:57:01 +01001334 /*
1335 * Read only phandles for now, it will be used to update buffers
1336 * while parsing memory regions.
1337 */
1338 TRY(read_uint32(&ffa_node, "rx-buffer",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001339 &vm->partition.rxtx.rx_phandle));
Olivier Deprez62d99e32020-01-09 15:58:07 +01001340
Manish Pandeyfa1f2912020-05-05 12:57:01 +01001341 TRY(read_uint32(&ffa_node, "tx-buffer",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001342 &vm->partition.rxtx.tx_phandle));
Olivier Deprez62d99e32020-01-09 15:58:07 +01001343
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001344 vm->partition.rxtx.available = true;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001345 }
1346
Kathleen Capellaf71dee42023-08-08 16:24:14 -04001347 TRY(read_uint16(&root, "messaging-method",
1348 (uint16_t *)&vm->partition.messaging_method));
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001349 dlog_verbose(" Messaging method %u\n", vm->partition.messaging_method);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001350
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001351 TRY(read_bool(&root, "managed-exit", &managed_exit_field_present));
1352
1353 TRY(read_optional_uint8(
1354 &root, "ns-interrupts-action", NS_ACTION_SIGNALED,
1355 (uint8_t *)&vm->partition.ns_interrupts_action));
1356
1357 /*
1358 * An SP manifest can specify one of the fields listed below:
1359 * `managed-exit`: Introduced in FF-A v1.0 spec.
1360 * `ns-interrupts-action`: Introduced in FF-A v1.1 EAC0 spec.
1361 * If both are missing from the manifest, the default response is
1362 * NS_ACTION_SIGNALED.
1363 */
1364 if (managed_exit_field_present) {
1365 vm->partition.ns_interrupts_action = NS_ACTION_ME;
1366 }
1367
1368 if (vm->partition.ns_interrupts_action != NS_ACTION_QUEUED &&
1369 vm->partition.ns_interrupts_action != NS_ACTION_ME &&
1370 vm->partition.ns_interrupts_action != NS_ACTION_SIGNALED) {
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001371 return MANIFEST_ERROR_ILLEGAL_NS_INT_ACTION;
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001372 }
1373
1374 dlog_verbose(
Olivier Deprezb7aa0c22023-06-29 10:46:26 +02001375 " NS Interrupts %s\n",
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001376 (vm->partition.ns_interrupts_action == NS_ACTION_QUEUED)
1377 ? "Queued"
1378 : (vm->partition.ns_interrupts_action == NS_ACTION_SIGNALED)
1379 ? "Signaled"
1380 : "Managed exit");
1381
1382 if (vm->partition.ns_interrupts_action == NS_ACTION_ME) {
1383 /* Managed exit only supported by S_EL1 partitions. */
1384 if (vm->partition.run_time_el != S_EL1) {
1385 dlog_error(
1386 "Managed exit cannot be supported by this "
1387 "partition\n");
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001388 return MANIFEST_ERROR_ILLEGAL_NS_INT_ACTION;
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001389 }
Madhukar Pappireddy046dad02022-06-21 18:43:33 -05001390
1391 TRY(read_bool(&root, "managed-exit-virq",
1392 &vm->partition.me_signal_virq));
1393 if (vm->partition.me_signal_virq) {
1394 dlog_verbose(" Managed Exit signaled through vIRQ\n");
1395 }
J-Alvesa4730db2021-11-02 10:31:01 +00001396 }
1397
1398 TRY(read_bool(&root, "notification-support",
1399 &vm->partition.notification_support));
1400 if (vm->partition.notification_support) {
1401 dlog_verbose(" Notifications Receipt Supported\n");
1402 }
Maksims Svecovs9ddf86a2021-05-06 17:17:21 +01001403
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001404 TRY(read_optional_uint8(
1405 &root, "other-s-interrupts-action", OTHER_S_INT_ACTION_SIGNALED,
1406 (uint8_t *)&vm->partition.other_s_interrupts_action));
1407
1408 if (vm->partition.other_s_interrupts_action ==
1409 OTHER_S_INT_ACTION_QUEUED) {
1410 if (vm->partition.ns_interrupts_action != NS_ACTION_QUEUED) {
1411 dlog_error(
1412 "Choice of the fields 'ns-interrupts-action' "
1413 "and 'other-s-interrupts-action' not "
1414 "compatible\n");
1415 return MANIFEST_ERROR_NOT_COMPATIBLE;
1416 }
1417 } else if (vm->partition.other_s_interrupts_action >
1418 OTHER_S_INT_ACTION_SIGNALED) {
1419 dlog_error(
J-Alves0a824e92024-04-26 16:20:12 +01001420 "Illegal value specified for the field "
1421 "'other-s-interrupts-action': %u\n",
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001422 vm->partition.other_s_interrupts_action);
1423 return MANIFEST_ERROR_ILLEGAL_OTHER_S_INT_ACTION;
1424 }
1425
J-Alves35315782022-01-25 17:58:32 +00001426 /* Parse boot info node. */
1427 if (boot_info_node != NULL) {
1428 ffa_node = root;
1429 vm->partition.boot_info =
1430 fdt_find_child(&ffa_node, &boot_info_node_name);
1431 if (vm->partition.boot_info) {
1432 *boot_info_node = ffa_node;
1433 }
1434 } else {
1435 vm->partition.boot_info = false;
1436 }
1437
Olivier Depreza15f2352022-09-26 09:17:24 +02001438 TRY(read_optional_uint32(
Karl Meakin18694022024-08-02 13:59:25 +01001439 &root, "vm-availability-messages", 0,
1440 (uint32_t *)&vm->partition.vm_availability_messages));
1441 dlog_verbose("vm-availability-messages=%#x\n",
1442 *(uint32_t *)&vm->partition.vm_availability_messages);
1443
1444 if (vm->partition.vm_availability_messages.mbz != 0) {
1445 return MANIFEST_ERROR_VM_AVAILABILITY_MESSAGE_INVALID;
1446 }
1447
1448 TRY(read_optional_uint32(
Olivier Depreza15f2352022-09-26 09:17:24 +02001449 &root, "power-management-messages",
1450 MANIFEST_POWER_MANAGEMENT_CPU_OFF_SUPPORTED |
1451 MANIFEST_POWER_MANAGEMENT_CPU_ON_SUPPORTED,
1452 &vm->partition.power_management));
1453 vm->partition.power_management &= MANIFEST_POWER_MANAGEMENT_ALL_MASK;
1454 if (vm->partition.execution_ctx_count == 1 ||
Daniel Boulby874d5432023-04-27 12:40:24 +01001455 vm->partition.run_time_el == S_EL0 ||
1456 vm->partition.run_time_el == EL0) {
Olivier Depreza15f2352022-09-26 09:17:24 +02001457 vm->partition.power_management =
1458 MANIFEST_POWER_MANAGEMENT_NONE_MASK;
1459 }
1460
1461 dlog_verbose(" Power management messages %#x\n",
1462 vm->partition.power_management);
1463
Manish Pandey6542f5c2020-04-27 14:37:46 +01001464 /* Parse memory-regions */
1465 ffa_node = root;
1466 if (fdt_find_child(&ffa_node, &mem_region_node_name)) {
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001467 TRY(parse_ffa_memory_region_node(
Karl Meakinf6d49402023-04-04 18:14:26 +01001468 &ffa_node, vm->partition.load_addr,
1469 vm->partition.mem_regions,
J-Alves77b6f4f2023-03-15 11:34:49 +00001470 &vm->partition.mem_region_count, &vm->partition.rxtx,
1471 boot_params));
Manish Pandey6542f5c2020-04-27 14:37:46 +01001472 }
Manish Pandey2145c212020-05-01 16:04:22 +01001473 dlog_verbose(" Total %u memory regions found\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001474 vm->partition.mem_region_count);
Manish Pandey6542f5c2020-04-27 14:37:46 +01001475
Manish Pandeye68e7932020-04-23 15:29:28 +01001476 /* Parse Device-regions */
1477 ffa_node = root;
1478 if (fdt_find_child(&ffa_node, &dev_region_node_name)) {
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001479 TRY(parse_ffa_device_region_node(
1480 &ffa_node, vm->partition.dev_regions,
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001481 &vm->partition.dev_region_count,
Daniel Boulby4339edc2024-02-21 14:59:00 +00001482 &vm->partition.dma_device_count, boot_params));
Manish Pandeye68e7932020-04-23 15:29:28 +01001483 }
Manish Pandey2145c212020-05-01 16:04:22 +01001484 dlog_verbose(" Total %u device regions found\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001485 vm->partition.dev_region_count);
Manish Pandeye68e7932020-04-23 15:29:28 +01001486
Madhukar Pappireddye032af52023-10-11 14:52:58 -05001487 if (!map_dma_device_id_to_stream_ids(vm)) {
1488 return MANIFEST_ERROR_NOT_COMPATIBLE;
1489 }
1490
J-Alves4eb7b542022-03-02 15:21:52 +00001491 return sanity_check_ffa_manifest(vm);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001492}
1493
Olivier Deprez62d99e32020-01-09 15:58:07 +01001494static enum manifest_return_code parse_ffa_partition_package(
1495 struct mm_stage1_locked stage1_locked, struct fdt_node *node,
J-Alves19e20cf2023-08-02 12:48:55 +01001496 struct manifest_vm *vm, ffa_id_t vm_id,
J-Alves77b6f4f2023-03-15 11:34:49 +00001497 const struct boot_params *boot_params, struct mpool *ppool)
Olivier Deprez62d99e32020-01-09 15:58:07 +01001498{
1499 enum manifest_return_code ret = MANIFEST_ERROR_NOT_COMPATIBLE;
J-Alves2f86c1e2022-02-23 18:44:19 +00001500 uintpaddr_t load_address;
1501 struct sp_pkg_header header;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001502 struct fdt sp_fdt;
J-Alves2f86c1e2022-02-23 18:44:19 +00001503 vaddr_t pkg_start;
1504 vaddr_t manifest_address;
J-Alves35315782022-01-25 17:58:32 +00001505 struct fdt_node boot_info_node;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001506
1507 /*
1508 * This must have been hinted as being an FF-A partition,
1509 * return straight with failure if this is not the case.
1510 */
1511 if (!vm->is_ffa_partition) {
J-Alves2f86c1e2022-02-23 18:44:19 +00001512 return ret;
1513 }
1514
1515 TRY(read_uint64(node, "load_address", &load_address));
1516 if (!is_aligned(load_address, PAGE_SIZE)) {
Olivier Deprez62d99e32020-01-09 15:58:07 +01001517 return MANIFEST_ERROR_NOT_COMPATIBLE;
1518 }
1519
J-Alves2f86c1e2022-02-23 18:44:19 +00001520 assert(load_address != 0U);
1521
1522 if (!sp_pkg_init(stage1_locked, pa_init(load_address), &header,
1523 ppool)) {
1524 return ret;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001525 }
1526
J-Alves2f86c1e2022-02-23 18:44:19 +00001527 pkg_start = va_init(load_address);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001528
J-Alves2f86c1e2022-02-23 18:44:19 +00001529 if (vm_id != HF_PRIMARY_VM_ID &&
1530 sp_pkg_get_mem_size(&header) >= vm->secondary.mem_size) {
J-Alves4369bd92020-08-07 16:35:36 +01001531 dlog_error("Invalid package header or DT size.\n");
J-Alves2f86c1e2022-02-23 18:44:19 +00001532 goto out;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001533 }
1534
J-Alves2f86c1e2022-02-23 18:44:19 +00001535 manifest_address = va_add(va_init(load_address), header.pm_offset);
1536 if (!fdt_init_from_ptr(&sp_fdt, ptr_from_va(manifest_address),
1537 header.pm_size)) {
Kathleen Capella422b10b2023-06-30 18:28:27 -04001538 dlog_error("manifest.c: FDT failed validation.\n");
J-Alves2f86c1e2022-02-23 18:44:19 +00001539 goto out;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001540 }
1541
Davidson K5d5f2792024-08-19 19:09:12 +05301542 vm->partition.load_addr = load_address;
1543
J-Alves77b6f4f2023-03-15 11:34:49 +00001544 ret = parse_ffa_manifest(&sp_fdt, vm, &boot_info_node, boot_params);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001545 if (ret != MANIFEST_SUCCESS) {
Olivier Deprez3cbdf862023-06-07 15:36:32 +02001546 dlog_error("Error parsing partition manifest.\n");
J-Alves2f86c1e2022-02-23 18:44:19 +00001547 goto out;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001548 }
1549
J-Alves889a1d72022-05-13 11:38:27 +01001550 if (vm->partition.gp_register_num != DEFAULT_BOOT_GP_REGISTER) {
1551 if (header.version == SP_PKG_HEADER_VERSION_2 &&
1552 vm->partition.boot_info &&
1553 !ffa_boot_info_node(&boot_info_node, pkg_start, &header)) {
1554 dlog_error("Failed to process boot information.\n");
1555 }
J-Alves35315782022-01-25 17:58:32 +00001556 }
J-Alves2f86c1e2022-02-23 18:44:19 +00001557out:
1558 sp_pkg_deinit(stage1_locked, pkg_start, &header, ppool);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001559 return ret;
1560}
1561
David Brazdil7a462ec2019-08-15 12:27:47 +01001562/**
1563 * Parse manifest from FDT.
1564 */
Olivier Deprez62d99e32020-01-09 15:58:07 +01001565enum manifest_return_code manifest_init(struct mm_stage1_locked stage1_locked,
Olivier Deprez93644652022-09-09 11:01:12 +02001566 struct manifest **manifest_ret,
Olivier Deprez62d99e32020-01-09 15:58:07 +01001567 struct memiter *manifest_fdt,
J-Alves77b6f4f2023-03-15 11:34:49 +00001568 struct boot_params *boot_params,
Olivier Deprez62d99e32020-01-09 15:58:07 +01001569 struct mpool *ppool)
David Brazdil7a462ec2019-08-15 12:27:47 +01001570{
Olivier Deprez93644652022-09-09 11:01:12 +02001571 struct manifest *manifest;
David Brazdilb856be62020-03-25 10:14:55 +00001572 struct string vm_name;
1573 struct fdt fdt;
David Brazdil7a462ec2019-08-15 12:27:47 +01001574 struct fdt_node hyp_node;
1575 size_t i = 0;
1576 bool found_primary_vm = false;
Daniel Boulby3f295b12023-12-15 17:38:08 +00001577 const size_t spmc_size =
1578 align_up(pa_difference(layout_text_begin(), layout_image_end()),
1579 PAGE_SIZE);
1580 const size_t spmc_page_count = spmc_size / PAGE_SIZE;
David Brazdil7a462ec2019-08-15 12:27:47 +01001581
J-Alvescd438fa2023-04-26 10:13:12 +01001582 if (boot_params->mem_ranges_count == 0 &&
1583 boot_params->ns_mem_ranges_count == 0) {
1584 return MANIFEST_ERROR_MEMORY_MISSING;
1585 }
1586
J-Alves77b6f4f2023-03-15 11:34:49 +00001587 dump_memory_ranges(boot_params->mem_ranges,
1588 boot_params->mem_ranges_count, false);
1589 dump_memory_ranges(boot_params->ns_mem_ranges,
1590 boot_params->ns_mem_ranges_count, true);
1591
Olivier Deprez93644652022-09-09 11:01:12 +02001592 /* Allocate space in the ppool for the manifest data. */
1593 if (!manifest_data_init(ppool)) {
1594 panic("Unable to allocate manifest data.\n");
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001595 }
1596
Daniel Boulby3f295b12023-12-15 17:38:08 +00001597 /*
1598 * Add SPMC load address range to memory ranges to track to ensure
1599 * no partitions overlap with this memory.
1600 * The system integrator should have prevented this by defining the
1601 * secure memory region ranges so as not to overlap the SPMC load
1602 * address range. Therefore, this code is intended to catch any
1603 * potential misconfigurations there.
1604 */
1605 if (is_aligned(pa_addr(layout_text_begin()), PAGE_SIZE) &&
1606 spmc_page_count != 0) {
1607 TRY(check_and_record_memory_used(
1608 pa_addr(layout_text_begin()), spmc_page_count,
1609 manifest_data->mem_regions,
1610 &manifest_data->mem_regions_index));
1611 }
1612
Olivier Deprez93644652022-09-09 11:01:12 +02001613 manifest = &manifest_data->manifest;
1614 *manifest_ret = manifest;
1615
David Brazdilb856be62020-03-25 10:14:55 +00001616 if (!fdt_init_from_memiter(&fdt, manifest_fdt)) {
1617 return MANIFEST_ERROR_FILE_SIZE; /* TODO */
David Brazdila2358d42020-01-27 18:51:38 +00001618 }
1619
David Brazdil7a462ec2019-08-15 12:27:47 +01001620 /* Find hypervisor node. */
David Brazdilb856be62020-03-25 10:14:55 +00001621 if (!fdt_find_node(&fdt, "/hypervisor", &hyp_node)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001622 return MANIFEST_ERROR_NO_HYPERVISOR_FDT_NODE;
1623 }
1624
David Brazdil74e9c3b2019-08-28 11:09:08 +01001625 /* Check "compatible" property. */
David Brazdilf4925382020-03-25 13:33:51 +00001626 if (!fdt_is_compatible(&hyp_node, "hafnium,hafnium")) {
David Brazdil74e9c3b2019-08-28 11:09:08 +01001627 return MANIFEST_ERROR_NOT_COMPATIBLE;
1628 }
1629
Olivier Deprez622ab8d2021-08-02 12:15:45 +02001630 TRY(read_bool(&hyp_node, "ffa_tee_enabled",
1631 &manifest->ffa_tee_enabled));
Andrew Walbran41a49d82020-01-10 17:46:38 +00001632
David Brazdil7a462ec2019-08-15 12:27:47 +01001633 /* Iterate over reserved VM IDs and check no such nodes exist. */
Olivier Deprez2a8ee342020-08-03 15:10:44 +02001634 for (i = HF_VM_ID_BASE; i < HF_VM_ID_OFFSET; i++) {
J-Alves19e20cf2023-08-02 12:48:55 +01001635 ffa_id_t vm_id = (ffa_id_t)i - HF_VM_ID_BASE;
David Brazdil7a462ec2019-08-15 12:27:47 +01001636 struct fdt_node vm_node = hyp_node;
David Brazdil7a462ec2019-08-15 12:27:47 +01001637
David Brazdilb856be62020-03-25 10:14:55 +00001638 generate_vm_node_name(&vm_name, vm_id);
1639 if (fdt_find_child(&vm_node, &vm_name)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001640 return MANIFEST_ERROR_RESERVED_VM_ID;
1641 }
1642 }
1643
1644 /* Iterate over VM nodes until we find one that does not exist. */
1645 for (i = 0; i <= MAX_VMS; ++i) {
J-Alves19e20cf2023-08-02 12:48:55 +01001646 ffa_id_t vm_id = HF_VM_ID_OFFSET + i;
David Brazdil7a462ec2019-08-15 12:27:47 +01001647 struct fdt_node vm_node = hyp_node;
David Brazdil7a462ec2019-08-15 12:27:47 +01001648
Olivier Deprez2a8ee342020-08-03 15:10:44 +02001649 generate_vm_node_name(&vm_name, vm_id - HF_VM_ID_BASE);
David Brazdilb856be62020-03-25 10:14:55 +00001650 if (!fdt_find_child(&vm_node, &vm_name)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001651 break;
1652 }
1653
1654 if (i == MAX_VMS) {
1655 return MANIFEST_ERROR_TOO_MANY_VMS;
1656 }
1657
1658 if (vm_id == HF_PRIMARY_VM_ID) {
1659 CHECK(found_primary_vm == false); /* sanity check */
1660 found_primary_vm = true;
1661 }
1662
David Brazdil0251b942019-09-10 15:59:50 +01001663 manifest->vm_count = i + 1;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001664
1665 TRY(parse_vm_common(&vm_node, &manifest->vm[i], vm_id));
1666
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -07001667 CHECK(!manifest->vm[i].is_hyp_loaded ||
1668 manifest->vm[i].is_ffa_partition);
1669
1670 if (manifest->vm[i].is_ffa_partition &&
1671 !manifest->vm[i].is_hyp_loaded) {
Olivier Deprez62d99e32020-01-09 15:58:07 +01001672 TRY(parse_ffa_partition_package(stage1_locked, &vm_node,
1673 &manifest->vm[i], vm_id,
J-Alves77b6f4f2023-03-15 11:34:49 +00001674 boot_params, ppool));
J-Alves596049f2023-03-15 11:40:24 +00001675 size_t page_count =
1676 align_up(manifest->vm[i].secondary.mem_size,
1677 PAGE_SIZE) /
1678 PAGE_SIZE;
1679
1680 if (vm_id == HF_PRIMARY_VM_ID) {
1681 continue;
1682 }
1683
1684 TRY(check_partition_memory_is_valid(
1685 manifest->vm[i].partition.load_addr, page_count,
Olivier Deprez058ddee2024-08-27 09:22:11 +02001686 0, boot_params, false));
J-Alves596049f2023-03-15 11:40:24 +00001687
1688 /*
1689 * Check if memory from load-address until (load-address
1690 * + memory size) has been used by other partition.
1691 */
1692 TRY(check_and_record_memory_used(
Daniel Boulby3f295b12023-12-15 17:38:08 +00001693 manifest->vm[i].partition.load_addr, page_count,
1694 manifest_data->mem_regions,
1695 &manifest_data->mem_regions_index));
Olivier Deprez62d99e32020-01-09 15:58:07 +01001696 } else {
1697 TRY(parse_vm(&vm_node, &manifest->vm[i], vm_id));
1698 }
David Brazdil7a462ec2019-08-15 12:27:47 +01001699 }
1700
Olivier Deprezfb05f3c2020-11-10 17:48:04 +01001701 if (!found_primary_vm && vm_id_is_current_world(HF_PRIMARY_VM_ID)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001702 return MANIFEST_ERROR_NO_PRIMARY_VM;
1703 }
1704
1705 return MANIFEST_SUCCESS;
1706}
1707
Olivier Deprez93644652022-09-09 11:01:12 +02001708/**
1709 * Free manifest data resources, called once manifest parsing has
1710 * completed and VMs are loaded.
1711 */
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001712void manifest_deinit(struct mpool *ppool)
1713{
Olivier Deprez93644652022-09-09 11:01:12 +02001714 manifest_data_deinit(ppool);
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001715}
1716
David Brazdil7a462ec2019-08-15 12:27:47 +01001717const char *manifest_strerror(enum manifest_return_code ret_code)
1718{
1719 switch (ret_code) {
1720 case MANIFEST_SUCCESS:
1721 return "Success";
David Brazdila2358d42020-01-27 18:51:38 +00001722 case MANIFEST_ERROR_FILE_SIZE:
1723 return "Total size in header does not match file size";
Olivier Deprez62d99e32020-01-09 15:58:07 +01001724 case MANIFEST_ERROR_MALFORMED_DTB:
1725 return "Malformed device tree blob";
David Brazdila2358d42020-01-27 18:51:38 +00001726 case MANIFEST_ERROR_NO_ROOT_NODE:
1727 return "Could not find root node in manifest";
David Brazdil7a462ec2019-08-15 12:27:47 +01001728 case MANIFEST_ERROR_NO_HYPERVISOR_FDT_NODE:
1729 return "Could not find \"hypervisor\" node in manifest";
David Brazdil74e9c3b2019-08-28 11:09:08 +01001730 case MANIFEST_ERROR_NOT_COMPATIBLE:
1731 return "Hypervisor manifest entry not compatible with Hafnium";
David Brazdil7a462ec2019-08-15 12:27:47 +01001732 case MANIFEST_ERROR_RESERVED_VM_ID:
1733 return "Manifest defines a VM with a reserved ID";
1734 case MANIFEST_ERROR_NO_PRIMARY_VM:
1735 return "Manifest does not contain a primary VM entry";
1736 case MANIFEST_ERROR_TOO_MANY_VMS:
1737 return "Manifest specifies more VMs than Hafnium has "
1738 "statically allocated space for";
1739 case MANIFEST_ERROR_PROPERTY_NOT_FOUND:
1740 return "Property not found";
1741 case MANIFEST_ERROR_MALFORMED_STRING:
1742 return "Malformed string property";
David Brazdil0dbb41f2019-09-09 18:03:35 +01001743 case MANIFEST_ERROR_STRING_TOO_LONG:
1744 return "String too long";
David Brazdil7a462ec2019-08-15 12:27:47 +01001745 case MANIFEST_ERROR_MALFORMED_INTEGER:
1746 return "Malformed integer property";
1747 case MANIFEST_ERROR_INTEGER_OVERFLOW:
1748 return "Integer overflow";
Andrew Scullae9962e2019-10-03 16:51:16 +01001749 case MANIFEST_ERROR_MALFORMED_INTEGER_LIST:
1750 return "Malformed integer list property";
Andrew Scullb2c3a242019-11-04 13:52:36 +00001751 case MANIFEST_ERROR_MALFORMED_BOOLEAN:
1752 return "Malformed boolean property";
Manish Pandey6542f5c2020-04-27 14:37:46 +01001753 case MANIFEST_ERROR_MEMORY_REGION_NODE_EMPTY:
1754 return "Memory-region node should have at least one entry";
Manish Pandeye68e7932020-04-23 15:29:28 +01001755 case MANIFEST_ERROR_DEVICE_REGION_NODE_EMPTY:
1756 return "Device-region node should have at least one entry";
Manish Pandeyf06c9072020-09-29 15:41:58 +01001757 case MANIFEST_ERROR_RXTX_SIZE_MISMATCH:
1758 return "RX and TX buffers should be of same size";
Varun Wadekar4afbfd72022-10-13 14:30:18 +01001759 case MANIFEST_ERROR_MEM_REGION_EMPTY:
1760 return "Memory region should have at least one page";
Karl Meakinf6d49402023-04-04 18:14:26 +01001761 case MANIFEST_ERROR_BASE_ADDRESS_AND_RELATIVE_ADDRESS:
1762 return "Base and relative addresses are mutually exclusive";
Daniel Boulbya7e9e182022-06-27 14:21:01 +01001763 case MANIFEST_ERROR_MEM_REGION_OVERLAP:
1764 return "Memory region overlaps with one already allocated";
Daniel Boulbyc1a613d2022-10-18 11:26:17 +01001765 case MANIFEST_ERROR_MEM_REGION_UNALIGNED:
1766 return "Memory region is not aligned to a page boundary";
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -07001767 case MANIFEST_ERROR_INVALID_MEM_PERM:
1768 return "Memory permission should be RO, RW or RX";
J-Alves35315782022-01-25 17:58:32 +00001769 case MANIFEST_ERROR_ARGUMENTS_LIST_EMPTY:
1770 return "Arguments-list node should have at least one argument";
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001771 case MANIFEST_ERROR_INTERRUPT_ID_REPEATED:
1772 return "Interrupt ID already assigned to another endpoint";
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001773 case MANIFEST_ERROR_ILLEGAL_NS_INT_ACTION:
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001774 return "Illegal value specidied for the field: Action in "
1775 "response to NS Interrupt";
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -07001776 case MANIFEST_ERROR_INTERRUPT_ID_NOT_IN_LIST:
1777 return "Interrupt ID is not in the list of interrupts";
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001778 case MANIFEST_ERROR_ILLEGAL_OTHER_S_INT_ACTION:
1779 return "Illegal value specified for the field: Action in "
1780 "response to Other-S Interrupt";
J-Alves77b6f4f2023-03-15 11:34:49 +00001781 case MANIFEST_ERROR_MEMORY_MISSING:
1782 return "Memory nodes must be defined in the SPMC manifest "
Olivier Deprez3cbdf862023-06-07 15:36:32 +02001783 "('memory' and 'ns-memory')";
J-Alves77b6f4f2023-03-15 11:34:49 +00001784 case MANIFEST_ERROR_PARTITION_ADDRESS_OVERLAP:
1785 return "Partition's memory [load address: load address + "
1786 "memory size[ overlap with other allocated "
Olivier Deprez3cbdf862023-06-07 15:36:32 +02001787 "regions";
J-Alves77b6f4f2023-03-15 11:34:49 +00001788 case MANIFEST_ERROR_MEM_REGION_INVALID:
Olivier Deprez3cbdf862023-06-07 15:36:32 +02001789 return "Invalid memory region range";
Daniel Boulby4339edc2024-02-21 14:59:00 +00001790 case MANIFEST_ERROR_DEVICE_MEM_REGION_INVALID:
1791 return "Invalid device memory region range";
Kathleen Capella4a2a6e72023-04-21 14:43:26 -04001792 case MANIFEST_ERROR_INVALID_BOOT_ORDER:
1793 return "Boot order should be a unique value less than "
1794 "default largest value";
Kathleen Capella422b10b2023-06-30 18:28:27 -04001795 case MANIFEST_ERROR_UUID_ALL_ZEROS:
1796 return "UUID should not be NIL";
Karl Meakin45abeeb2024-08-02 16:55:44 +01001797 case MANIFEST_ERROR_TOO_MANY_UUIDS:
1798 return "Manifest specifies more UUIDs than Hafnium has "
1799 "statically allocated space for";
Madhukar Pappireddy3c2b7912023-10-11 14:47:27 -05001800 case MANIFEST_ERROR_MISSING_SMMU_ID:
1801 return "SMMU ID must be specified for the given Stream IDs";
1802 case MANIFEST_ERROR_MISMATCH_DMA_ACCESS_PERMISSIONS:
1803 return "DMA device access permissions must match memory region "
1804 "attributes";
1805 case MANIFEST_ERROR_STREAM_IDS_OVERFLOW:
1806 return "DMA device stream ID count exceeds predefined limit";
1807 case MANIFEST_ERROR_DMA_ACCESS_PERMISSIONS_OVERFLOW:
1808 return "DMA access permissions count exceeds predefined limit";
Madhukar Pappireddy8a364472023-10-11 15:34:07 -05001809 case MANIFEST_ERROR_DMA_DEVICE_OVERFLOW:
1810 return "Number of device regions with DMA peripheral exceeds "
1811 "limit.";
Karl Meakin18694022024-08-02 13:59:25 +01001812 case MANIFEST_ERROR_VM_AVAILABILITY_MESSAGE_INVALID:
1813 return "VM availability messages invalid (bits [31:2] must be "
1814 "zero)";
David Brazdil7a462ec2019-08-15 12:27:47 +01001815 }
1816
1817 panic("Unexpected manifest return code.");
1818}