blob: 6be414a7faf2452665bfe5b245773f045dc3926c [file] [log] [blame]
David Brazdil7a462ec2019-08-15 12:27:47 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
David Brazdil7a462ec2019-08-15 12:27:47 +01007 */
8
9#include "hf/manifest.h"
10
J-Alvesd8a1d362023-03-08 11:15:28 +000011#include <stddef.h>
J-Alves77b6f4f2023-03-15 11:34:49 +000012#include <stdint.h>
J-Alvesd8a1d362023-03-08 11:15:28 +000013
J-Alves35315782022-01-25 17:58:32 +000014#include "hf/arch/types.h"
J-Alves77b6f4f2023-03-15 11:34:49 +000015#include "hf/arch/vmid_base.h"
J-Alves35315782022-01-25 17:58:32 +000016
David Brazdil7a462ec2019-08-15 12:27:47 +010017#include "hf/addr.h"
Daniel Boulbya2f8c662021-11-26 17:52:53 +000018#include "hf/assert.h"
J-Alves35315782022-01-25 17:58:32 +000019#include "hf/boot_info.h"
J-Alves77b6f4f2023-03-15 11:34:49 +000020#include "hf/boot_params.h"
David Brazdil7a462ec2019-08-15 12:27:47 +010021#include "hf/check.h"
Andrew Scullae9962e2019-10-03 16:51:16 +010022#include "hf/dlog.h"
J-Alves77b6f4f2023-03-15 11:34:49 +000023#include "hf/fdt.h"
24#include "hf/mm.h"
25#include "hf/mpool.h"
J-Alves2f86c1e2022-02-23 18:44:19 +000026#include "hf/sp_pkg.h"
David Brazdil7a462ec2019-08-15 12:27:47 +010027#include "hf/static_assert.h"
28#include "hf/std.h"
29
30#define TRY(expr) \
31 do { \
32 enum manifest_return_code ret_code = (expr); \
33 if (ret_code != MANIFEST_SUCCESS) { \
34 return ret_code; \
35 } \
36 } while (0)
37
David Brazdilb856be62020-03-25 10:14:55 +000038#define VM_ID_MAX (HF_VM_ID_OFFSET + MAX_VMS - 1)
39#define VM_ID_MAX_DIGITS (5)
40#define VM_NAME_EXTRA_CHARS (3) /* "vm" + number + '\0' */
41#define VM_NAME_MAX_SIZE (VM_ID_MAX_DIGITS + VM_NAME_EXTRA_CHARS)
42static_assert(VM_NAME_MAX_SIZE <= STRING_MAX_SIZE,
43 "VM name does not fit into a struct string.");
44static_assert(VM_ID_MAX <= 99999, "Insufficient VM_NAME_BUF_SIZE");
Olivier Deprez2a8ee342020-08-03 15:10:44 +020045static_assert((HF_OTHER_WORLD_ID > VM_ID_MAX) ||
46 (HF_OTHER_WORLD_ID < HF_VM_ID_BASE),
Andrew Walbran9daa57e2019-09-27 13:33:20 +010047 "TrustZone VM ID clashes with normal VM range.");
David Brazdil7a462ec2019-08-15 12:27:47 +010048
Kathleen Capella4a2a6e72023-04-21 14:43:26 -040049/* Bitmap to track boot order values in use. */
50#define BOOT_ORDER_ENTRY_BITS (sizeof(uint64_t) * 8)
51#define BOOT_ORDER_MAP_ENTRIES \
52 ((DEFAULT_BOOT_ORDER + (BOOT_ORDER_ENTRY_BITS - 1)) / \
53 BOOT_ORDER_ENTRY_BITS)
54
Daniel Boulby801f8ef2022-06-27 14:21:01 +010055/**
J-Alves596049f2023-03-15 11:40:24 +000056 * A struct to keep track of the partitions properties during early boot
57 * manifest parsing:
58 * - Interrupts ID.
59 * - Physical memory ranges.
Daniel Boulby801f8ef2022-06-27 14:21:01 +010060 */
Olivier Deprez93644652022-09-09 11:01:12 +020061struct manifest_data {
62 struct manifest manifest;
Daniel Boulby4ca50f02022-07-29 18:29:34 +010063 struct interrupt_bitmap intids;
J-Alves596049f2023-03-15 11:40:24 +000064 /*
65 * Allocate enough for the maximum amount of memory regions defined via
66 * the partitions manifest, and regions for each partition
67 * address-space.
68 */
69 struct mem_range
70 mem_regions[PARTITION_MAX_MEMORY_REGIONS * MAX_VMS + MAX_VMS];
Kathleen Capella4a2a6e72023-04-21 14:43:26 -040071 uint64_t boot_order_values[BOOT_ORDER_MAP_ENTRIES];
Daniel Boulby801f8ef2022-06-27 14:21:01 +010072};
Olivier Deprez93644652022-09-09 11:01:12 +020073
Daniel Boulby801f8ef2022-06-27 14:21:01 +010074/**
Daniel Boulbya7e9e182022-06-27 14:21:01 +010075 * Calculate the number of entries in the ppool that are required to
Olivier Deprez93644652022-09-09 11:01:12 +020076 * store the manifest_data struct.
Daniel Boulby801f8ef2022-06-27 14:21:01 +010077 */
J-Alves596049f2023-03-15 11:40:24 +000078static const size_t manifest_data_ppool_entries =
Olivier Deprez93644652022-09-09 11:01:12 +020079 (align_up(sizeof(struct manifest_data), MM_PPOOL_ENTRY_SIZE) /
Daniel Boulbya7e9e182022-06-27 14:21:01 +010080 MM_PPOOL_ENTRY_SIZE);
81
Olivier Deprez93644652022-09-09 11:01:12 +020082static struct manifest_data *manifest_data;
83/* Index used to track the number of memory regions allocated. */
Daniel Boulbya7e9e182022-06-27 14:21:01 +010084static size_t allocated_mem_regions_index = 0;
Daniel Boulby801f8ef2022-06-27 14:21:01 +010085
Kathleen Capella4a2a6e72023-04-21 14:43:26 -040086static bool check_boot_order(uint16_t boot_order)
87{
88 uint16_t i;
89 uint64_t boot_order_mask;
90
91 if (boot_order == DEFAULT_BOOT_ORDER) {
92 return true;
93 }
94 if (boot_order > DEFAULT_BOOT_ORDER) {
95 dlog_error("Boot order should not exceed %x",
96 DEFAULT_BOOT_ORDER);
97 return false;
98 }
99
100 i = boot_order / BOOT_ORDER_ENTRY_BITS;
101 boot_order_mask = 1 << (boot_order % BOOT_ORDER_ENTRY_BITS);
102
103 if ((boot_order_mask & manifest_data->boot_order_values[i]) != 0U) {
104 dlog_error("Boot order must be a unique value.");
105 return false;
106 }
107
108 manifest_data->boot_order_values[i] |= boot_order_mask;
109
110 return true;
111}
112
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100113/**
Olivier Deprez93644652022-09-09 11:01:12 +0200114 * Allocates and clear memory for the manifest data in the given memory pool.
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100115 * Returns true if the memory is successfully allocated.
116 */
Olivier Deprez93644652022-09-09 11:01:12 +0200117static bool manifest_data_init(struct mpool *ppool)
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100118{
Olivier Deprez93644652022-09-09 11:01:12 +0200119 manifest_data = (struct manifest_data *)mpool_alloc_contiguous(
120 ppool, manifest_data_ppool_entries, 1);
J-Alves5c0ae6f2023-06-14 15:20:21 +0100121
122 assert(manifest_data != NULL);
123
Olivier Deprez93644652022-09-09 11:01:12 +0200124 memset_s(manifest_data, sizeof(struct manifest_data), 0,
125 sizeof(struct manifest_data));
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100126
Olivier Deprez93644652022-09-09 11:01:12 +0200127 return manifest_data != NULL;
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100128}
129
130/**
Olivier Deprez93644652022-09-09 11:01:12 +0200131 * Frees the memory used for the manifest data in the given memory pool.
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100132 */
Olivier Deprez93644652022-09-09 11:01:12 +0200133static void manifest_data_deinit(struct mpool *ppool)
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100134{
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100135 /**
Olivier Deprez93644652022-09-09 11:01:12 +0200136 * Clear and return the memory used for the manifest_data struct to the
137 * memory pool.
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100138 */
Olivier Deprez93644652022-09-09 11:01:12 +0200139 memset_s(manifest_data, sizeof(struct manifest_data), 0,
140 sizeof(struct manifest_data));
141 mpool_add_chunk(ppool, manifest_data, manifest_data_ppool_entries);
142
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100143 /**
144 * Reset the index used for tracking the number of memory regions
145 * allocated.
146 */
147 allocated_mem_regions_index = 0;
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100148}
149
J-Alves19e20cf2023-08-02 12:48:55 +0100150static inline size_t count_digits(ffa_id_t vm_id)
David Brazdilb856be62020-03-25 10:14:55 +0000151{
152 size_t digits = 0;
153
154 do {
155 digits++;
156 vm_id /= 10;
157 } while (vm_id);
158 return digits;
159}
160
David Brazdil7a462ec2019-08-15 12:27:47 +0100161/**
162 * Generates a string with the two letters "vm" followed by an integer.
163 * Assumes `buf` is of size VM_NAME_BUF_SIZE.
164 */
J-Alves19e20cf2023-08-02 12:48:55 +0100165static void generate_vm_node_name(struct string *str, ffa_id_t vm_id)
David Brazdil7a462ec2019-08-15 12:27:47 +0100166{
167 static const char *digits = "0123456789";
David Brazdilb856be62020-03-25 10:14:55 +0000168 size_t vm_id_digits = count_digits(vm_id);
169 char *base = str->data;
170 char *ptr = base + (VM_NAME_EXTRA_CHARS + vm_id_digits);
David Brazdil7a462ec2019-08-15 12:27:47 +0100171
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000172 assert(vm_id_digits <= VM_ID_MAX_DIGITS);
David Brazdil7a462ec2019-08-15 12:27:47 +0100173 *(--ptr) = '\0';
174 do {
175 *(--ptr) = digits[vm_id % 10];
176 vm_id /= 10;
177 } while (vm_id);
178 *(--ptr) = 'm';
179 *(--ptr) = 'v';
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000180 assert(ptr == base);
David Brazdil7a462ec2019-08-15 12:27:47 +0100181}
182
Andrew Scullae9962e2019-10-03 16:51:16 +0100183/**
Andrew Scullb2c3a242019-11-04 13:52:36 +0000184 * Read a boolean property: true if present; false if not. If present, the value
185 * of the property must be empty else it is considered malformed.
Andrew Scullae9962e2019-10-03 16:51:16 +0100186 */
Andrew Scullb2c3a242019-11-04 13:52:36 +0000187static enum manifest_return_code read_bool(const struct fdt_node *node,
188 const char *property, bool *out)
Andrew Scullae9962e2019-10-03 16:51:16 +0100189{
David Brazdilb856be62020-03-25 10:14:55 +0000190 struct memiter data;
191 bool present = fdt_read_property(node, property, &data);
Andrew Scullae9962e2019-10-03 16:51:16 +0100192
David Brazdilb856be62020-03-25 10:14:55 +0000193 if (present && memiter_size(&data) != 0) {
Andrew Scullb2c3a242019-11-04 13:52:36 +0000194 return MANIFEST_ERROR_MALFORMED_BOOLEAN;
195 }
196
197 *out = present;
198 return MANIFEST_SUCCESS;
Andrew Scullae9962e2019-10-03 16:51:16 +0100199}
200
Andrew Scull72b43c02019-09-18 13:53:45 +0100201static enum manifest_return_code read_string(const struct fdt_node *node,
David Brazdil136f2942019-09-23 14:11:03 +0100202 const char *property,
203 struct string *out)
Andrew Scull72b43c02019-09-18 13:53:45 +0100204{
David Brazdilb856be62020-03-25 10:14:55 +0000205 struct memiter data;
Andrew Scull72b43c02019-09-18 13:53:45 +0100206
David Brazdilb856be62020-03-25 10:14:55 +0000207 if (!fdt_read_property(node, property, &data)) {
Andrew Scull72b43c02019-09-18 13:53:45 +0100208 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
209 }
210
David Brazdilb856be62020-03-25 10:14:55 +0000211 switch (string_init(out, &data)) {
David Brazdil136f2942019-09-23 14:11:03 +0100212 case STRING_SUCCESS:
213 return MANIFEST_SUCCESS;
214 case STRING_ERROR_INVALID_INPUT:
215 return MANIFEST_ERROR_MALFORMED_STRING;
216 case STRING_ERROR_TOO_LONG:
217 return MANIFEST_ERROR_STRING_TOO_LONG;
218 }
Andrew Scull72b43c02019-09-18 13:53:45 +0100219}
220
221static enum manifest_return_code read_optional_string(
David Brazdil136f2942019-09-23 14:11:03 +0100222 const struct fdt_node *node, const char *property, struct string *out)
Andrew Scull72b43c02019-09-18 13:53:45 +0100223{
David Brazdil136f2942019-09-23 14:11:03 +0100224 enum manifest_return_code ret;
Andrew Scull72b43c02019-09-18 13:53:45 +0100225
David Brazdil136f2942019-09-23 14:11:03 +0100226 ret = read_string(node, property, out);
227 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
228 string_init_empty(out);
229 ret = MANIFEST_SUCCESS;
Andrew Scull72b43c02019-09-18 13:53:45 +0100230 }
David Brazdil136f2942019-09-23 14:11:03 +0100231 return ret;
Andrew Scull72b43c02019-09-18 13:53:45 +0100232}
233
David Brazdil7a462ec2019-08-15 12:27:47 +0100234static enum manifest_return_code read_uint64(const struct fdt_node *node,
235 const char *property,
236 uint64_t *out)
237{
David Brazdilb856be62020-03-25 10:14:55 +0000238 struct memiter data;
David Brazdil7a462ec2019-08-15 12:27:47 +0100239
David Brazdilb856be62020-03-25 10:14:55 +0000240 if (!fdt_read_property(node, property, &data)) {
David Brazdil7a462ec2019-08-15 12:27:47 +0100241 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
242 }
243
David Brazdilb856be62020-03-25 10:14:55 +0000244 if (!fdt_parse_number(&data, memiter_size(&data), out)) {
David Brazdil7a462ec2019-08-15 12:27:47 +0100245 return MANIFEST_ERROR_MALFORMED_INTEGER;
246 }
247
248 return MANIFEST_SUCCESS;
249}
250
David Brazdil080ee312020-02-25 15:30:30 -0800251static enum manifest_return_code read_optional_uint64(
252 const struct fdt_node *node, const char *property,
253 uint64_t default_value, uint64_t *out)
254{
255 enum manifest_return_code ret;
256
257 ret = read_uint64(node, property, out);
258 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
259 *out = default_value;
260 return MANIFEST_SUCCESS;
261 }
262 return ret;
263}
264
Olivier Deprez62d99e32020-01-09 15:58:07 +0100265static enum manifest_return_code read_uint32(const struct fdt_node *node,
266 const char *property,
267 uint32_t *out)
268{
269 uint64_t value;
270
271 TRY(read_uint64(node, property, &value));
272
273 if (value > UINT32_MAX) {
274 return MANIFEST_ERROR_INTEGER_OVERFLOW;
275 }
276
277 *out = (uint32_t)value;
278 return MANIFEST_SUCCESS;
279}
280
Manish Pandeye68e7932020-04-23 15:29:28 +0100281static enum manifest_return_code read_optional_uint32(
282 const struct fdt_node *node, const char *property,
283 uint32_t default_value, uint32_t *out)
284{
285 enum manifest_return_code ret;
286
287 ret = read_uint32(node, property, out);
288 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
289 *out = default_value;
290 return MANIFEST_SUCCESS;
291 }
292 return ret;
293}
294
David Brazdil7a462ec2019-08-15 12:27:47 +0100295static enum manifest_return_code read_uint16(const struct fdt_node *node,
296 const char *property,
297 uint16_t *out)
298{
299 uint64_t value;
300
301 TRY(read_uint64(node, property, &value));
David Brazdil7a462ec2019-08-15 12:27:47 +0100302 if (value > UINT16_MAX) {
303 return MANIFEST_ERROR_INTEGER_OVERFLOW;
304 }
305
306 *out = (uint16_t)value;
307 return MANIFEST_SUCCESS;
308}
309
J-Alvesb37fd082020-10-22 12:29:21 +0100310static enum manifest_return_code read_optional_uint16(
311 const struct fdt_node *node, const char *property,
312 uint16_t default_value, uint16_t *out)
313{
314 enum manifest_return_code ret;
315
316 ret = read_uint16(node, property, out);
317 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
318 *out = default_value;
319 return MANIFEST_SUCCESS;
320 }
321
Kathleen Capella4a2a6e72023-04-21 14:43:26 -0400322 return ret;
J-Alvesb37fd082020-10-22 12:29:21 +0100323}
324
Olivier Deprez62d99e32020-01-09 15:58:07 +0100325static enum manifest_return_code read_uint8(const struct fdt_node *node,
326 const char *property, uint8_t *out)
327{
328 uint64_t value;
329
330 TRY(read_uint64(node, property, &value));
331
332 if (value > UINT8_MAX) {
333 return MANIFEST_ERROR_INTEGER_OVERFLOW;
334 }
335
336 *out = (uint8_t)value;
337 return MANIFEST_SUCCESS;
338}
339
J-Alves4369bd92020-08-07 16:35:36 +0100340static enum manifest_return_code read_optional_uint8(
341 const struct fdt_node *node, const char *property,
342 uint8_t default_value, uint8_t *out)
343{
344 enum manifest_return_code ret;
345
346 ret = read_uint8(node, property, out);
347 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
348 *out = default_value;
349 return MANIFEST_SUCCESS;
350 }
351
352 return MANIFEST_SUCCESS;
353}
354
Andrew Scullae9962e2019-10-03 16:51:16 +0100355struct uint32list_iter {
356 struct memiter mem_it;
357};
358
J-Alves4369bd92020-08-07 16:35:36 +0100359static enum manifest_return_code read_uint32list(const struct fdt_node *node,
360 const char *property,
361 struct uint32list_iter *out)
Andrew Scullae9962e2019-10-03 16:51:16 +0100362{
David Brazdilb856be62020-03-25 10:14:55 +0000363 struct memiter data;
Andrew Scullae9962e2019-10-03 16:51:16 +0100364
David Brazdilb856be62020-03-25 10:14:55 +0000365 if (!fdt_read_property(node, property, &data)) {
Andrew Scullae9962e2019-10-03 16:51:16 +0100366 memiter_init(&out->mem_it, NULL, 0);
J-Alves4369bd92020-08-07 16:35:36 +0100367 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
Andrew Scullae9962e2019-10-03 16:51:16 +0100368 }
369
David Brazdilb856be62020-03-25 10:14:55 +0000370 if ((memiter_size(&data) % sizeof(uint32_t)) != 0) {
Andrew Scullae9962e2019-10-03 16:51:16 +0100371 return MANIFEST_ERROR_MALFORMED_INTEGER_LIST;
372 }
373
David Brazdilb856be62020-03-25 10:14:55 +0000374 out->mem_it = data;
Andrew Scullae9962e2019-10-03 16:51:16 +0100375 return MANIFEST_SUCCESS;
376}
377
J-Alves4369bd92020-08-07 16:35:36 +0100378static enum manifest_return_code read_optional_uint32list(
379 const struct fdt_node *node, const char *property,
380 struct uint32list_iter *out)
381{
382 enum manifest_return_code ret = read_uint32list(node, property, out);
383
384 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
385 return MANIFEST_SUCCESS;
386 }
387 return ret;
388}
389
Andrew Scullae9962e2019-10-03 16:51:16 +0100390static bool uint32list_has_next(const struct uint32list_iter *list)
391{
392 return memiter_size(&list->mem_it) > 0;
393}
394
David Brazdil5ea99462020-03-25 13:01:47 +0000395static enum manifest_return_code uint32list_get_next(
396 struct uint32list_iter *list, uint32_t *out)
Andrew Scullae9962e2019-10-03 16:51:16 +0100397{
Andrew Scullae9962e2019-10-03 16:51:16 +0100398 uint64_t num;
399
400 CHECK(uint32list_has_next(list));
David Brazdilb856be62020-03-25 10:14:55 +0000401 if (!fdt_parse_number(&list->mem_it, sizeof(uint32_t), &num)) {
Andrew Scullae9962e2019-10-03 16:51:16 +0100402 return MANIFEST_ERROR_MALFORMED_INTEGER;
403 }
404
David Brazdil5ea99462020-03-25 13:01:47 +0000405 *out = (uint32_t)num;
406 return MANIFEST_SUCCESS;
Andrew Scullae9962e2019-10-03 16:51:16 +0100407}
408
Olivier Deprez62d99e32020-01-09 15:58:07 +0100409static enum manifest_return_code parse_vm_common(const struct fdt_node *node,
410 struct manifest_vm *vm,
J-Alves19e20cf2023-08-02 12:48:55 +0100411 ffa_id_t vm_id)
David Brazdil7a462ec2019-08-15 12:27:47 +0100412{
Andrew Scullae9962e2019-10-03 16:51:16 +0100413 struct uint32list_iter smcs;
David Brazdil5ea99462020-03-25 13:01:47 +0000414 size_t idx;
Andrew Scullae9962e2019-10-03 16:51:16 +0100415
Olivier Deprez62d99e32020-01-09 15:58:07 +0100416 TRY(read_bool(node, "is_ffa_partition", &vm->is_ffa_partition));
417
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -0700418 TRY(read_bool(node, "hyp_loaded", &vm->is_hyp_loaded));
419
David Brazdil136f2942019-09-23 14:11:03 +0100420 TRY(read_string(node, "debug_name", &vm->debug_name));
Andrew Scullae9962e2019-10-03 16:51:16 +0100421
422 TRY(read_optional_uint32list(node, "smc_whitelist", &smcs));
423 while (uint32list_has_next(&smcs) &&
424 vm->smc_whitelist.smc_count < MAX_SMCS) {
David Brazdil5ea99462020-03-25 13:01:47 +0000425 idx = vm->smc_whitelist.smc_count++;
426 TRY(uint32list_get_next(&smcs, &vm->smc_whitelist.smcs[idx]));
Andrew Scullae9962e2019-10-03 16:51:16 +0100427 }
428
429 if (uint32list_has_next(&smcs)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000430 dlog_warning("%s SMC whitelist too long.\n", vm->debug_name);
Andrew Scullae9962e2019-10-03 16:51:16 +0100431 }
432
Andrew Scullb2c3a242019-11-04 13:52:36 +0000433 TRY(read_bool(node, "smc_whitelist_permissive",
434 &vm->smc_whitelist.permissive));
Andrew Scullae9962e2019-10-03 16:51:16 +0100435
Olivier Deprez62d99e32020-01-09 15:58:07 +0100436 if (vm_id != HF_PRIMARY_VM_ID) {
437 TRY(read_uint64(node, "mem_size", &vm->secondary.mem_size));
438 TRY(read_uint16(node, "vcpu_count", &vm->secondary.vcpu_count));
Fuad Tabba50469e02020-06-30 15:14:28 +0100439 TRY(read_optional_string(node, "fdt_filename",
440 &vm->secondary.fdt_filename));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100441 }
442
443 return MANIFEST_SUCCESS;
444}
445
446static enum manifest_return_code parse_vm(struct fdt_node *node,
447 struct manifest_vm *vm,
J-Alves19e20cf2023-08-02 12:48:55 +0100448 ffa_id_t vm_id)
Olivier Deprez62d99e32020-01-09 15:58:07 +0100449{
450 TRY(read_optional_string(node, "kernel_filename",
451 &vm->kernel_filename));
452
David Brazdile6f83222019-09-23 14:47:37 +0100453 if (vm_id == HF_PRIMARY_VM_ID) {
454 TRY(read_optional_string(node, "ramdisk_filename",
455 &vm->primary.ramdisk_filename));
David Brazdil080ee312020-02-25 15:30:30 -0800456 TRY(read_optional_uint64(node, "boot_address",
457 MANIFEST_INVALID_ADDRESS,
458 &vm->primary.boot_address));
David Brazdil7a462ec2019-08-15 12:27:47 +0100459 }
Raghu Krishnamurthy988a5e72021-02-27 21:46:06 -0800460 TRY(read_optional_uint8(node, "exception-level", (uint8_t)EL1,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700461 (uint8_t *)&vm->partition.run_time_el));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100462
David Brazdil7a462ec2019-08-15 12:27:47 +0100463 return MANIFEST_SUCCESS;
464}
465
J-Alves77b6f4f2023-03-15 11:34:49 +0000466static bool is_memory_region_within_ranges(uintptr_t base_address,
467 uint32_t page_count,
468 const struct mem_range *ranges,
469 const size_t ranges_size)
470{
471 uintptr_t region_end =
472 base_address + ((uintptr_t)page_count * PAGE_SIZE - 1);
473
474 for (size_t i = 0; i < ranges_size; i++) {
475 uintptr_t base = (uintptr_t)pa_addr(ranges[i].begin);
476 uintptr_t end = (uintptr_t)pa_addr(ranges[i].end);
477
478 if ((base_address >= base && base_address <= end) ||
479 (region_end >= base && region_end <= end)) {
480 return true;
481 }
482 }
483
484 return false;
485}
486
487void dump_memory_ranges(const struct mem_range *ranges,
488 const size_t ranges_size, bool ns)
489{
490 if (LOG_LEVEL < LOG_LEVEL_VERBOSE) {
491 return;
492 }
493
494 dlog("%s Memory ranges:\n", ns ? "NS" : "S");
495
496 for (size_t i = 0; i < ranges_size; i++) {
497 uintptr_t begin = pa_addr(ranges[i].begin);
498 uintptr_t end = pa_addr(ranges[i].end);
499 size_t page_count =
500 align_up(pa_difference(ranges[i].begin, ranges[i].end),
501 PAGE_SIZE) /
502 PAGE_SIZE;
503
504 dlog(" [%x - %x (%u pages)]\n", begin, end, page_count);
505 }
506}
507
508/**
509 * Check the partition's assigned memory is contained in the memory ranges
510 * configured for the SWd, in the SPMC's manifest.
511 */
512static enum manifest_return_code check_partition_memory_is_valid(
513 uintptr_t base_address, uint32_t page_count, uint32_t attributes,
514 const struct boot_params *params)
515{
516 bool is_secure_region =
517 (attributes & MANIFEST_REGION_ATTR_SECURITY) == 0U;
518 const struct mem_range *ranges_from_manifest =
519 is_secure_region ? params->mem_ranges : params->ns_mem_ranges;
520 size_t ranges_count = is_secure_region ? params->mem_ranges_count
521 : params->ns_mem_ranges_count;
522 bool within_ranges = is_memory_region_within_ranges(
523 base_address, page_count, ranges_from_manifest, ranges_count);
524
525 return within_ranges ? MANIFEST_SUCCESS
526 : MANIFEST_ERROR_MEM_REGION_INVALID;
527}
528
529/*
530 * Keep track of the memory allocated by partitions. This includes memory region
531 * nodes defined in their respective partition manifests, as well address space
532 * defined from their load address.
533 */
534static enum manifest_return_code check_and_record_memory_used(
Varun Wadekar4afbfd72022-10-13 14:30:18 +0100535 uintptr_t base_address, uint32_t page_count)
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100536{
J-Alves77b6f4f2023-03-15 11:34:49 +0000537 bool overlap_of_regions;
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100538
Varun Wadekar4afbfd72022-10-13 14:30:18 +0100539 if (page_count == 0U) {
540 dlog_error(
Daniel Boulbyc1a613d2022-10-18 11:26:17 +0100541 "Empty memory region defined with base address: %#x.\n",
Varun Wadekar4afbfd72022-10-13 14:30:18 +0100542 base_address);
543 return MANIFEST_ERROR_MEM_REGION_EMPTY;
544 }
545
Daniel Boulbyc1a613d2022-10-18 11:26:17 +0100546 if (!is_aligned(base_address, PAGE_SIZE)) {
547 dlog_error("base_address (%#x) is not aligned to page size.\n",
548 base_address);
549 return MANIFEST_ERROR_MEM_REGION_UNALIGNED;
550 }
551
J-Alves77b6f4f2023-03-15 11:34:49 +0000552 overlap_of_regions = is_memory_region_within_ranges(
553 base_address, page_count, manifest_data->mem_regions,
554 allocated_mem_regions_index);
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100555
J-Alves77b6f4f2023-03-15 11:34:49 +0000556 if (!overlap_of_regions) {
557 paddr_t begin = pa_init(base_address);
558
559 manifest_data->mem_regions[allocated_mem_regions_index].begin =
560 begin;
561 manifest_data->mem_regions[allocated_mem_regions_index].end =
562 pa_add(begin, page_count * PAGE_SIZE - 1);
563 allocated_mem_regions_index++;
564
565 return MANIFEST_SUCCESS;
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100566 }
567
J-Alves77b6f4f2023-03-15 11:34:49 +0000568 return MANIFEST_ERROR_MEM_REGION_OVERLAP;
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100569}
570
Manish Pandey6542f5c2020-04-27 14:37:46 +0100571static enum manifest_return_code parse_ffa_memory_region_node(
Karl Meakinf6d49402023-04-04 18:14:26 +0100572 struct fdt_node *mem_node, uintptr_t load_address,
573 struct memory_region *mem_regions, uint16_t *count, struct rx_tx *rxtx,
J-Alves77b6f4f2023-03-15 11:34:49 +0000574 const struct boot_params *boot_params)
Manish Pandey6542f5c2020-04-27 14:37:46 +0100575{
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100576 uint32_t phandle;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700577 uint16_t i = 0;
Karl Meakinf6d49402023-04-04 18:14:26 +0100578 uintptr_t relative_address;
Manish Pandey6542f5c2020-04-27 14:37:46 +0100579
580 dlog_verbose(" Partition memory regions\n");
581
582 if (!fdt_is_compatible(mem_node, "arm,ffa-manifest-memory-regions")) {
583 return MANIFEST_ERROR_NOT_COMPATIBLE;
584 }
585
586 if (!fdt_first_child(mem_node)) {
587 return MANIFEST_ERROR_MEMORY_REGION_NODE_EMPTY;
588 }
589
590 do {
591 dlog_verbose(" Memory Region[%u]\n", i);
592
593 TRY(read_optional_string(mem_node, "description",
594 &mem_regions[i].name));
595 dlog_verbose(" Name: %s\n",
596 string_data(&mem_regions[i].name));
597
Karl Meakinf6d49402023-04-04 18:14:26 +0100598 TRY(read_optional_uint64(mem_node, "base-address",
599 MANIFEST_INVALID_ADDRESS,
600 &mem_regions[i].base_address));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200601 dlog_verbose(" Base address: %#x\n",
Manish Pandey6542f5c2020-04-27 14:37:46 +0100602 mem_regions[i].base_address);
603
Karl Meakinf6d49402023-04-04 18:14:26 +0100604 TRY(read_optional_uint64(mem_node, "relative-address",
605 MANIFEST_INVALID_ADDRESS,
606 &relative_address));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200607 if (relative_address != MANIFEST_INVALID_ADDRESS) {
608 dlog_verbose(" Relative address: %#x\n",
609 relative_address);
610 }
Karl Meakinf6d49402023-04-04 18:14:26 +0100611
612 if (mem_regions[i].base_address == MANIFEST_INVALID_ADDRESS &&
613 relative_address == MANIFEST_INVALID_ADDRESS) {
614 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
615 }
616
617 if (mem_regions[i].base_address != MANIFEST_INVALID_ADDRESS &&
618 relative_address != MANIFEST_INVALID_ADDRESS) {
619 return MANIFEST_ERROR_BASE_ADDRESS_AND_RELATIVE_ADDRESS;
620 }
621
622 if (relative_address != MANIFEST_INVALID_ADDRESS &&
623 relative_address > UINT64_MAX - load_address) {
624 return MANIFEST_ERROR_INTEGER_OVERFLOW;
625 }
626
627 if (relative_address != MANIFEST_INVALID_ADDRESS) {
628 mem_regions[i].base_address =
629 load_address + relative_address;
630 }
631
Manish Pandey6542f5c2020-04-27 14:37:46 +0100632 TRY(read_uint32(mem_node, "pages-count",
633 &mem_regions[i].page_count));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200634 dlog_verbose(" Pages_count: %u\n",
Manish Pandey6542f5c2020-04-27 14:37:46 +0100635 mem_regions[i].page_count);
636
637 TRY(read_uint32(mem_node, "attributes",
638 &mem_regions[i].attributes));
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -0700639
Olivier Deprez035fa152022-03-14 11:19:10 +0100640 /*
641 * Check RWX permission attributes.
642 * Security attribute is checked at load phase.
643 */
644 uint32_t permissions = mem_regions[i].attributes &
645 (MANIFEST_REGION_ATTR_READ |
646 MANIFEST_REGION_ATTR_WRITE |
647 MANIFEST_REGION_ATTR_EXEC);
648 if (permissions != MANIFEST_REGION_ATTR_READ &&
649 permissions != (MANIFEST_REGION_ATTR_READ |
650 MANIFEST_REGION_ATTR_WRITE) &&
651 permissions != (MANIFEST_REGION_ATTR_READ |
652 MANIFEST_REGION_ATTR_EXEC)) {
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -0700653 return MANIFEST_ERROR_INVALID_MEM_PERM;
654 }
655
Olivier Deprez035fa152022-03-14 11:19:10 +0100656 /* Filter memory region attributes. */
657 mem_regions[i].attributes &= MANIFEST_REGION_ALL_ATTR_MASK;
658
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200659 dlog_verbose(" Attributes: %#x\n",
Manish Pandey6542f5c2020-04-27 14:37:46 +0100660 mem_regions[i].attributes);
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100661
J-Alves77b6f4f2023-03-15 11:34:49 +0000662 TRY(check_partition_memory_is_valid(
663 mem_regions[i].base_address, mem_regions[i].page_count,
664 mem_regions[i].attributes, boot_params));
665
666 TRY(check_and_record_memory_used(mem_regions[i].base_address,
667 mem_regions[i].page_count));
668
Manish Pandeya70a4192020-10-07 22:05:04 +0100669 if (rxtx->available) {
670 TRY(read_optional_uint32(
671 mem_node, "phandle",
672 (uint32_t)MANIFEST_INVALID_ADDRESS, &phandle));
673 if (phandle == rxtx->rx_phandle) {
674 dlog_verbose(" Assigned as RX buffer\n");
675 rxtx->rx_buffer = &mem_regions[i];
676 } else if (phandle == rxtx->tx_phandle) {
677 dlog_verbose(" Assigned as TX buffer\n");
678 rxtx->tx_buffer = &mem_regions[i];
679 }
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100680 }
681
Manish Pandey6542f5c2020-04-27 14:37:46 +0100682 i++;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700683 } while (fdt_next_sibling(mem_node) &&
684 (i < PARTITION_MAX_MEMORY_REGIONS));
Manish Pandey6542f5c2020-04-27 14:37:46 +0100685
Manish Pandeya70a4192020-10-07 22:05:04 +0100686 if (rxtx->available &&
687 (rxtx->rx_buffer->page_count != rxtx->tx_buffer->page_count)) {
Manish Pandeyf06c9072020-09-29 15:41:58 +0100688 return MANIFEST_ERROR_RXTX_SIZE_MISMATCH;
689 }
690
Manish Pandey2145c212020-05-01 16:04:22 +0100691 *count = i;
Manish Pandey6542f5c2020-04-27 14:37:46 +0100692
693 return MANIFEST_SUCCESS;
694}
695
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -0700696static struct interrupt_info *device_region_get_interrupt_info(
697 struct device_region *dev_regions, uint32_t intid)
698{
699 for (uint32_t i = 0; i < ARRAY_SIZE(dev_regions->interrupts); i++) {
700 if (dev_regions->interrupts[i].id == intid) {
701 return &(dev_regions->interrupts[i]);
702 }
703 }
704 return NULL;
705}
706
Manish Pandeye68e7932020-04-23 15:29:28 +0100707static enum manifest_return_code parse_ffa_device_region_node(
Manish Pandey2145c212020-05-01 16:04:22 +0100708 struct fdt_node *dev_node, struct device_region *dev_regions,
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700709 uint16_t *count)
Manish Pandeye68e7932020-04-23 15:29:28 +0100710{
711 struct uint32list_iter list;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700712 uint16_t i = 0;
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500713 uint32_t j = 0;
Olivier Deprez93644652022-09-09 11:01:12 +0200714 struct interrupt_bitmap allocated_intids = manifest_data->intids;
Manish Pandeye68e7932020-04-23 15:29:28 +0100715
716 dlog_verbose(" Partition Device Regions\n");
717
718 if (!fdt_is_compatible(dev_node, "arm,ffa-manifest-device-regions")) {
719 return MANIFEST_ERROR_NOT_COMPATIBLE;
720 }
721
722 if (!fdt_first_child(dev_node)) {
723 return MANIFEST_ERROR_DEVICE_REGION_NODE_EMPTY;
724 }
725
726 do {
727 dlog_verbose(" Device Region[%u]\n", i);
728
729 TRY(read_optional_string(dev_node, "description",
730 &dev_regions[i].name));
731 dlog_verbose(" Name: %s\n",
732 string_data(&dev_regions[i].name));
733
734 TRY(read_uint64(dev_node, "base-address",
735 &dev_regions[i].base_address));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200736 dlog_verbose(" Base address: %#x\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100737 dev_regions[i].base_address);
738
739 TRY(read_uint32(dev_node, "pages-count",
740 &dev_regions[i].page_count));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200741 dlog_verbose(" Pages_count: %u\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100742 dev_regions[i].page_count);
743
744 TRY(read_uint32(dev_node, "attributes",
745 &dev_regions[i].attributes));
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -0700746
Olivier Deprez035fa152022-03-14 11:19:10 +0100747 /*
748 * Check RWX permission attributes.
749 * Security attribute is checked at load phase.
750 */
751 uint32_t permissions = dev_regions[i].attributes &
752 (MANIFEST_REGION_ATTR_READ |
753 MANIFEST_REGION_ATTR_WRITE |
754 MANIFEST_REGION_ATTR_EXEC);
755
756 if (permissions != MANIFEST_REGION_ATTR_READ &&
757 permissions != (MANIFEST_REGION_ATTR_READ |
758 MANIFEST_REGION_ATTR_WRITE)) {
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -0700759 return MANIFEST_ERROR_INVALID_MEM_PERM;
760 }
761
Olivier Deprez035fa152022-03-14 11:19:10 +0100762 /* Filer device region attributes. */
763 dev_regions[i].attributes = dev_regions[i].attributes &
764 MANIFEST_REGION_ALL_ATTR_MASK;
765
Olivier Deprezb7aa0c22023-06-29 10:46:26 +0200766 dlog_verbose(" Attributes: %#x\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100767 dev_regions[i].attributes);
768
769 TRY(read_optional_uint32list(dev_node, "interrupts", &list));
770 dlog_verbose(" Interrupt List:\n");
771 j = 0;
772 while (uint32list_has_next(&list) &&
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700773 j < PARTITION_MAX_INTERRUPTS_PER_DEVICE) {
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100774 uint32_t intid;
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100775
Manish Pandeye68e7932020-04-23 15:29:28 +0100776 TRY(uint32list_get_next(
777 &list, &dev_regions[i].interrupts[j].id));
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100778 intid = dev_regions[i].interrupts[j].id;
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100779
780 dlog_verbose(" ID = %u\n", intid);
781
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100782 if (interrupt_bitmap_get_value(&allocated_intids,
783 intid) == 1U) {
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100784 return MANIFEST_ERROR_INTERRUPT_ID_REPEATED;
785 }
786
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100787 interrupt_bitmap_set_value(&allocated_intids, intid);
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100788
Manish Pandeye68e7932020-04-23 15:29:28 +0100789 if (uint32list_has_next(&list)) {
790 TRY(uint32list_get_next(&list,
791 &dev_regions[i]
792 .interrupts[j]
793 .attributes));
794 } else {
795 return MANIFEST_ERROR_MALFORMED_INTEGER_LIST;
796 }
797
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -0700798 dev_regions[i].interrupts[j].mpidr_valid = false;
799 dev_regions[i].interrupts[j].mpidr = 0;
800
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100801 dlog_verbose(" attributes = %u\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100802 dev_regions[i].interrupts[j].attributes);
803 j++;
804 }
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500805
806 dev_regions[i].interrupt_count = j;
Manish Pandeye68e7932020-04-23 15:29:28 +0100807 if (j == 0) {
808 dlog_verbose(" Empty\n");
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -0700809 } else {
810 TRY(read_optional_uint32list(
811 dev_node, "interrupts-target", &list));
812 dlog_verbose(" Interrupt Target List:\n");
813
814 while (uint32list_has_next(&list)) {
815 uint32_t intid;
816 uint64_t mpidr = 0;
817 uint32_t mpidr_lower = 0;
818 uint32_t mpidr_upper = 0;
819 struct interrupt_info *info = NULL;
820
821 TRY(uint32list_get_next(&list, &intid));
822
823 dlog_verbose(" ID = %u\n", intid);
824
825 if (interrupt_bitmap_get_value(
826 &allocated_intids, intid) != 1U) {
827 return MANIFEST_ERROR_INTERRUPT_ID_NOT_IN_LIST;
828 }
829
830 TRY(uint32list_get_next(&list, &mpidr_upper));
831 TRY(uint32list_get_next(&list, &mpidr_lower));
832 mpidr = mpidr_upper;
833 mpidr <<= 32;
834 mpidr |= mpidr_lower;
835
836 info = device_region_get_interrupt_info(
837 &dev_regions[i], intid);
838 /*
839 * We should find info since
840 * interrupt_bitmap_get_value already ensures
841 * that we saw the interrupt and allocated ids
842 * for it.
843 */
844 assert(info != NULL);
845 info->mpidr = mpidr;
846 info->mpidr_valid = true;
847 dlog_verbose(" MPIDR = %#x\n", mpidr);
848 }
Manish Pandeye68e7932020-04-23 15:29:28 +0100849 }
850
851 TRY(read_optional_uint32(dev_node, "smmu-id",
Madhukar Pappireddy54680c72020-10-23 15:02:38 -0500852 MANIFEST_INVALID_ID,
Manish Pandeye68e7932020-04-23 15:29:28 +0100853 &dev_regions[i].smmu_id));
Olivier Deprez86d87ae2021-08-19 14:27:46 +0200854 if (dev_regions[i].smmu_id != MANIFEST_INVALID_ID) {
855 dlog_verbose(" smmu-id: %u\n",
856 dev_regions[i].smmu_id);
857 }
Manish Pandeye68e7932020-04-23 15:29:28 +0100858
859 TRY(read_optional_uint32list(dev_node, "stream-ids", &list));
860 dlog_verbose(" Stream IDs assigned:\n");
861
862 j = 0;
863 while (uint32list_has_next(&list) &&
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700864 j < PARTITION_MAX_STREAMS_PER_DEVICE) {
Manish Pandeye68e7932020-04-23 15:29:28 +0100865 TRY(uint32list_get_next(&list,
866 &dev_regions[i].stream_ids[j]));
867 dlog_verbose(" %u\n",
868 dev_regions[i].stream_ids[j]);
869 j++;
870 }
871 if (j == 0) {
872 dlog_verbose(" None\n");
873 }
Madhukar Pappireddy54680c72020-10-23 15:02:38 -0500874 dev_regions[i].stream_count = j;
Manish Pandeye68e7932020-04-23 15:29:28 +0100875
876 TRY(read_bool(dev_node, "exclusive-access",
877 &dev_regions[i].exclusive_access));
J-Alves4369bd92020-08-07 16:35:36 +0100878 dlog_verbose(" Exclusive_access: %u\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100879 dev_regions[i].exclusive_access);
880
881 i++;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700882 } while (fdt_next_sibling(dev_node) &&
883 (i < PARTITION_MAX_DEVICE_REGIONS));
Manish Pandeye68e7932020-04-23 15:29:28 +0100884
Manish Pandey2145c212020-05-01 16:04:22 +0100885 *count = i;
Manish Pandeye68e7932020-04-23 15:29:28 +0100886
887 return MANIFEST_SUCCESS;
888}
889
J-Alvesabebe432022-05-31 14:40:50 +0100890static enum manifest_return_code sanity_check_ffa_manifest(
891 struct manifest_vm *vm)
892{
893 uint16_t ffa_version_major;
894 uint16_t ffa_version_minor;
895 enum manifest_return_code ret_code = MANIFEST_SUCCESS;
896 const char *error_string = "specified in manifest is unsupported";
897 uint32_t k = 0;
Kathleen Capellaf71dee42023-08-08 16:24:14 -0400898 bool using_req2 = (vm->partition.messaging_method &
899 (FFA_PARTITION_DIRECT_REQ2_RECV |
900 FFA_PARTITION_DIRECT_REQ2_SEND)) != 0;
J-Alvesabebe432022-05-31 14:40:50 +0100901
902 /* ensure that the SPM version is compatible */
903 ffa_version_major = (vm->partition.ffa_version & 0xffff0000) >>
904 FFA_VERSION_MAJOR_OFFSET;
905 ffa_version_minor = vm->partition.ffa_version & 0xffff;
906
907 if (ffa_version_major != FFA_VERSION_MAJOR ||
908 ffa_version_minor > FFA_VERSION_MINOR) {
909 dlog_error("FF-A partition manifest version %s: %u.%u\n",
910 error_string, ffa_version_major, ffa_version_minor);
911 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
912 }
913
914 if (vm->partition.xlat_granule != PAGE_4KB) {
915 dlog_error("Translation granule %s: %u\n", error_string,
916 vm->partition.xlat_granule);
917 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
918 }
919
920 if (vm->partition.execution_state != AARCH64) {
921 dlog_error("Execution state %s: %u\n", error_string,
922 vm->partition.execution_state);
923 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
924 }
925
926 if (vm->partition.run_time_el != EL1 &&
927 vm->partition.run_time_el != S_EL1 &&
Daniel Boulby874d5432023-04-27 12:40:24 +0100928 vm->partition.run_time_el != S_EL0 &&
929 vm->partition.run_time_el != EL0) {
J-Alvesabebe432022-05-31 14:40:50 +0100930 dlog_error("Exception level %s: %d\n", error_string,
931 vm->partition.run_time_el);
932 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
933 }
934
Kathleen Capellaf71dee42023-08-08 16:24:14 -0400935 if (vm->partition.ffa_version < MAKE_FFA_VERSION(1, 2) && using_req2) {
936 dlog_error("Messaging method %s: %x\n", error_string,
937 vm->partition.messaging_method);
938 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
939 }
940
J-Alvesabebe432022-05-31 14:40:50 +0100941 if ((vm->partition.messaging_method &
942 ~(FFA_PARTITION_DIRECT_REQ_RECV | FFA_PARTITION_DIRECT_REQ_SEND |
Kathleen Capellaf71dee42023-08-08 16:24:14 -0400943 FFA_PARTITION_INDIRECT_MSG | FFA_PARTITION_DIRECT_REQ2_RECV |
944 FFA_PARTITION_DIRECT_REQ2_SEND)) != 0U) {
J-Alvesabebe432022-05-31 14:40:50 +0100945 dlog_error("Messaging method %s: %x\n", error_string,
946 vm->partition.messaging_method);
947 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
948 }
949
Daniel Boulby874d5432023-04-27 12:40:24 +0100950 if ((vm->partition.run_time_el == S_EL0 ||
951 vm->partition.run_time_el == EL0) &&
J-Alvesabebe432022-05-31 14:40:50 +0100952 vm->partition.execution_ctx_count != 1) {
953 dlog_error(
954 "Exception level and execution context count %s: %d "
955 "%d\n",
956 error_string, vm->partition.run_time_el,
957 vm->partition.execution_ctx_count);
958 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
959 }
960
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700961 for (uint16_t i = 0; i < vm->partition.dev_region_count; i++) {
J-Alvesabebe432022-05-31 14:40:50 +0100962 struct device_region dev_region;
963
964 dev_region = vm->partition.dev_regions[i];
965
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700966 if (dev_region.interrupt_count >
967 PARTITION_MAX_INTERRUPTS_PER_DEVICE) {
J-Alvesabebe432022-05-31 14:40:50 +0100968 dlog_error(
969 "Interrupt count for device region exceeds "
970 "limit.\n");
971 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
972 continue;
973 }
974
975 for (uint8_t j = 0; j < dev_region.interrupt_count; j++) {
976 k++;
977 if (k > VM_MANIFEST_MAX_INTERRUPTS) {
978 dlog_error(
979 "Interrupt count for VM exceeds "
980 "limit.\n");
981 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
982 continue;
983 }
984 }
985 }
986
987 /* GP register is restricted to one of x0 - x3. */
988 if (vm->partition.gp_register_num != -1 &&
989 vm->partition.gp_register_num > 3) {
990 dlog_error("GP register number %s: %u\n", error_string,
991 vm->partition.gp_register_num);
992 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
993 }
994
995 return ret_code;
996}
997
J-Alves77b6f4f2023-03-15 11:34:49 +0000998enum manifest_return_code parse_ffa_manifest(
999 struct fdt *fdt, struct manifest_vm *vm,
1000 struct fdt_node *boot_info_node, const struct boot_params *boot_params)
Olivier Deprez62d99e32020-01-09 15:58:07 +01001001{
1002 unsigned int i = 0;
1003 struct uint32list_iter uuid;
1004 uint32_t uuid_word;
1005 struct fdt_node root;
1006 struct fdt_node ffa_node;
1007 struct string rxtx_node_name = STRING_INIT("rx_tx-info");
Manish Pandey6542f5c2020-04-27 14:37:46 +01001008 struct string mem_region_node_name = STRING_INIT("memory-regions");
Manish Pandeye68e7932020-04-23 15:29:28 +01001009 struct string dev_region_node_name = STRING_INIT("device-regions");
J-Alves35315782022-01-25 17:58:32 +00001010 struct string boot_info_node_name = STRING_INIT("boot-info");
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001011 bool managed_exit_field_present = false;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001012
1013 if (!fdt_find_node(fdt, "/", &root)) {
1014 return MANIFEST_ERROR_NO_ROOT_NODE;
1015 }
1016
1017 /* Check "compatible" property. */
1018 if (!fdt_is_compatible(&root, "arm,ffa-manifest-1.0")) {
1019 return MANIFEST_ERROR_NOT_COMPATIBLE;
1020 }
1021
J-Alves4369bd92020-08-07 16:35:36 +01001022 TRY(read_uint32list(&root, "uuid", &uuid));
Olivier Deprez62d99e32020-01-09 15:58:07 +01001023
1024 while (uint32list_has_next(&uuid) && i < 4) {
1025 TRY(uint32list_get_next(&uuid, &uuid_word));
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001026 vm->partition.uuid.uuid[i] = uuid_word;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001027 i++;
1028 }
Olivier Deprezb7aa0c22023-06-29 10:46:26 +02001029 dlog_verbose("UUID %#x-%x-%x-%x\n", vm->partition.uuid.uuid[0],
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001030 vm->partition.uuid.uuid[1], vm->partition.uuid.uuid[2],
1031 vm->partition.uuid.uuid[3]);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001032
Olivier Deprezb7aa0c22023-06-29 10:46:26 +02001033 TRY(read_uint32(&root, "ffa-version", &vm->partition.ffa_version));
1034 dlog_verbose(" Expected FF-A version %u.%u\n",
1035 vm->partition.ffa_version >> 16,
1036 vm->partition.ffa_version & 0xffff);
1037
Olivier Deprez62d99e32020-01-09 15:58:07 +01001038 TRY(read_uint16(&root, "execution-ctx-count",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001039 &vm->partition.execution_ctx_count));
J-Alves4369bd92020-08-07 16:35:36 +01001040 dlog_verbose(" Number of execution context %u\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001041 vm->partition.execution_ctx_count);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001042
1043 TRY(read_uint8(&root, "exception-level",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001044 (uint8_t *)&vm->partition.run_time_el));
1045 dlog_verbose(" Run-time EL %u\n", vm->partition.run_time_el);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001046
1047 TRY(read_uint8(&root, "execution-state",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001048 (uint8_t *)&vm->partition.execution_state));
1049 dlog_verbose(" Execution state %u\n", vm->partition.execution_state);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001050
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001051 TRY(read_optional_uint64(&root, "load-address", 0,
1052 &vm->partition.load_addr));
1053 dlog_verbose(" Load address %#x\n", vm->partition.load_addr);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001054
J-Alves4369bd92020-08-07 16:35:36 +01001055 TRY(read_optional_uint64(&root, "entrypoint-offset", 0,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001056 &vm->partition.ep_offset));
1057 dlog_verbose(" Entry point offset %#x\n", vm->partition.ep_offset);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001058
J-Alves35315782022-01-25 17:58:32 +00001059 TRY(read_optional_uint32(&root, "gp-register-num",
1060 DEFAULT_BOOT_GP_REGISTER,
1061 &vm->partition.gp_register_num));
Olivier Deprezb7aa0c22023-06-29 10:46:26 +02001062
1063 if (vm->partition.gp_register_num != DEFAULT_BOOT_GP_REGISTER) {
1064 dlog_verbose(" Boot GP register: x%u\n",
1065 vm->partition.gp_register_num);
1066 }
J-Alves35315782022-01-25 17:58:32 +00001067
J-Alvesb37fd082020-10-22 12:29:21 +01001068 TRY(read_optional_uint16(&root, "boot-order", DEFAULT_BOOT_ORDER,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001069 &vm->partition.boot_order));
Kathleen Capella4a2a6e72023-04-21 14:43:26 -04001070 if (vm->partition.boot_order != DEFAULT_BOOT_ORDER) {
1071 dlog_verbose(" Boot order %#u\n", vm->partition.boot_order);
1072 }
1073
1074 if (!check_boot_order(vm->partition.boot_order)) {
1075 return MANIFEST_ERROR_INVALID_BOOT_ORDER;
1076 }
J-Alvesb37fd082020-10-22 12:29:21 +01001077
J-Alves4369bd92020-08-07 16:35:36 +01001078 TRY(read_optional_uint8(&root, "xlat-granule", 0,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001079 (uint8_t *)&vm->partition.xlat_granule));
1080 dlog_verbose(" Translation granule %u\n", vm->partition.xlat_granule);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001081
1082 ffa_node = root;
1083 if (fdt_find_child(&ffa_node, &rxtx_node_name)) {
1084 if (!fdt_is_compatible(&ffa_node,
1085 "arm,ffa-manifest-rx_tx-buffer")) {
1086 return MANIFEST_ERROR_NOT_COMPATIBLE;
1087 }
1088
Manish Pandeyfa1f2912020-05-05 12:57:01 +01001089 /*
1090 * Read only phandles for now, it will be used to update buffers
1091 * while parsing memory regions.
1092 */
1093 TRY(read_uint32(&ffa_node, "rx-buffer",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001094 &vm->partition.rxtx.rx_phandle));
Olivier Deprez62d99e32020-01-09 15:58:07 +01001095
Manish Pandeyfa1f2912020-05-05 12:57:01 +01001096 TRY(read_uint32(&ffa_node, "tx-buffer",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001097 &vm->partition.rxtx.tx_phandle));
Olivier Deprez62d99e32020-01-09 15:58:07 +01001098
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001099 vm->partition.rxtx.available = true;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001100 }
1101
Kathleen Capellaf71dee42023-08-08 16:24:14 -04001102 TRY(read_uint16(&root, "messaging-method",
1103 (uint16_t *)&vm->partition.messaging_method));
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001104 dlog_verbose(" Messaging method %u\n", vm->partition.messaging_method);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001105
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001106 TRY(read_bool(&root, "managed-exit", &managed_exit_field_present));
1107
1108 TRY(read_optional_uint8(
1109 &root, "ns-interrupts-action", NS_ACTION_SIGNALED,
1110 (uint8_t *)&vm->partition.ns_interrupts_action));
1111
1112 /*
1113 * An SP manifest can specify one of the fields listed below:
1114 * `managed-exit`: Introduced in FF-A v1.0 spec.
1115 * `ns-interrupts-action`: Introduced in FF-A v1.1 EAC0 spec.
1116 * If both are missing from the manifest, the default response is
1117 * NS_ACTION_SIGNALED.
1118 */
1119 if (managed_exit_field_present) {
1120 vm->partition.ns_interrupts_action = NS_ACTION_ME;
1121 }
1122
1123 if (vm->partition.ns_interrupts_action != NS_ACTION_QUEUED &&
1124 vm->partition.ns_interrupts_action != NS_ACTION_ME &&
1125 vm->partition.ns_interrupts_action != NS_ACTION_SIGNALED) {
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001126 return MANIFEST_ERROR_ILLEGAL_NS_INT_ACTION;
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001127 }
1128
1129 dlog_verbose(
Olivier Deprezb7aa0c22023-06-29 10:46:26 +02001130 " NS Interrupts %s\n",
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001131 (vm->partition.ns_interrupts_action == NS_ACTION_QUEUED)
1132 ? "Queued"
1133 : (vm->partition.ns_interrupts_action == NS_ACTION_SIGNALED)
1134 ? "Signaled"
1135 : "Managed exit");
1136
1137 if (vm->partition.ns_interrupts_action == NS_ACTION_ME) {
1138 /* Managed exit only supported by S_EL1 partitions. */
1139 if (vm->partition.run_time_el != S_EL1) {
1140 dlog_error(
1141 "Managed exit cannot be supported by this "
1142 "partition\n");
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001143 return MANIFEST_ERROR_ILLEGAL_NS_INT_ACTION;
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001144 }
Madhukar Pappireddy046dad02022-06-21 18:43:33 -05001145
1146 TRY(read_bool(&root, "managed-exit-virq",
1147 &vm->partition.me_signal_virq));
1148 if (vm->partition.me_signal_virq) {
1149 dlog_verbose(" Managed Exit signaled through vIRQ\n");
1150 }
J-Alvesa4730db2021-11-02 10:31:01 +00001151 }
1152
1153 TRY(read_bool(&root, "notification-support",
1154 &vm->partition.notification_support));
1155 if (vm->partition.notification_support) {
1156 dlog_verbose(" Notifications Receipt Supported\n");
1157 }
Maksims Svecovs9ddf86a2021-05-06 17:17:21 +01001158
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001159 TRY(read_optional_uint8(
1160 &root, "other-s-interrupts-action", OTHER_S_INT_ACTION_SIGNALED,
1161 (uint8_t *)&vm->partition.other_s_interrupts_action));
1162
1163 if (vm->partition.other_s_interrupts_action ==
1164 OTHER_S_INT_ACTION_QUEUED) {
1165 if (vm->partition.ns_interrupts_action != NS_ACTION_QUEUED) {
1166 dlog_error(
1167 "Choice of the fields 'ns-interrupts-action' "
1168 "and 'other-s-interrupts-action' not "
1169 "compatible\n");
1170 return MANIFEST_ERROR_NOT_COMPATIBLE;
1171 }
1172 } else if (vm->partition.other_s_interrupts_action >
1173 OTHER_S_INT_ACTION_SIGNALED) {
1174 dlog_error(
1175 "Illegal value specified for the field"
1176 " 'other-s-interrupts-action': %u\n",
1177 vm->partition.other_s_interrupts_action);
1178 return MANIFEST_ERROR_ILLEGAL_OTHER_S_INT_ACTION;
1179 }
1180
J-Alves35315782022-01-25 17:58:32 +00001181 /* Parse boot info node. */
1182 if (boot_info_node != NULL) {
1183 ffa_node = root;
1184 vm->partition.boot_info =
1185 fdt_find_child(&ffa_node, &boot_info_node_name);
1186 if (vm->partition.boot_info) {
1187 *boot_info_node = ffa_node;
1188 }
1189 } else {
1190 vm->partition.boot_info = false;
1191 }
1192
Olivier Depreza15f2352022-09-26 09:17:24 +02001193 TRY(read_optional_uint32(
1194 &root, "power-management-messages",
1195 MANIFEST_POWER_MANAGEMENT_CPU_OFF_SUPPORTED |
1196 MANIFEST_POWER_MANAGEMENT_CPU_ON_SUPPORTED,
1197 &vm->partition.power_management));
1198 vm->partition.power_management &= MANIFEST_POWER_MANAGEMENT_ALL_MASK;
1199 if (vm->partition.execution_ctx_count == 1 ||
Daniel Boulby874d5432023-04-27 12:40:24 +01001200 vm->partition.run_time_el == S_EL0 ||
1201 vm->partition.run_time_el == EL0) {
Olivier Depreza15f2352022-09-26 09:17:24 +02001202 vm->partition.power_management =
1203 MANIFEST_POWER_MANAGEMENT_NONE_MASK;
1204 }
1205
1206 dlog_verbose(" Power management messages %#x\n",
1207 vm->partition.power_management);
1208
Manish Pandey6542f5c2020-04-27 14:37:46 +01001209 /* Parse memory-regions */
1210 ffa_node = root;
1211 if (fdt_find_child(&ffa_node, &mem_region_node_name)) {
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001212 TRY(parse_ffa_memory_region_node(
Karl Meakinf6d49402023-04-04 18:14:26 +01001213 &ffa_node, vm->partition.load_addr,
1214 vm->partition.mem_regions,
J-Alves77b6f4f2023-03-15 11:34:49 +00001215 &vm->partition.mem_region_count, &vm->partition.rxtx,
1216 boot_params));
Manish Pandey6542f5c2020-04-27 14:37:46 +01001217 }
Manish Pandey2145c212020-05-01 16:04:22 +01001218 dlog_verbose(" Total %u memory regions found\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001219 vm->partition.mem_region_count);
Manish Pandey6542f5c2020-04-27 14:37:46 +01001220
Manish Pandeye68e7932020-04-23 15:29:28 +01001221 /* Parse Device-regions */
1222 ffa_node = root;
1223 if (fdt_find_child(&ffa_node, &dev_region_node_name)) {
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001224 TRY(parse_ffa_device_region_node(
1225 &ffa_node, vm->partition.dev_regions,
1226 &vm->partition.dev_region_count));
Manish Pandeye68e7932020-04-23 15:29:28 +01001227 }
Manish Pandey2145c212020-05-01 16:04:22 +01001228 dlog_verbose(" Total %u device regions found\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -07001229 vm->partition.dev_region_count);
Manish Pandeye68e7932020-04-23 15:29:28 +01001230
J-Alves4eb7b542022-03-02 15:21:52 +00001231 return sanity_check_ffa_manifest(vm);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001232}
1233
Olivier Deprez62d99e32020-01-09 15:58:07 +01001234static enum manifest_return_code parse_ffa_partition_package(
1235 struct mm_stage1_locked stage1_locked, struct fdt_node *node,
J-Alves19e20cf2023-08-02 12:48:55 +01001236 struct manifest_vm *vm, ffa_id_t vm_id,
J-Alves77b6f4f2023-03-15 11:34:49 +00001237 const struct boot_params *boot_params, struct mpool *ppool)
Olivier Deprez62d99e32020-01-09 15:58:07 +01001238{
1239 enum manifest_return_code ret = MANIFEST_ERROR_NOT_COMPATIBLE;
J-Alves2f86c1e2022-02-23 18:44:19 +00001240 uintpaddr_t load_address;
1241 struct sp_pkg_header header;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001242 struct fdt sp_fdt;
J-Alves2f86c1e2022-02-23 18:44:19 +00001243 vaddr_t pkg_start;
1244 vaddr_t manifest_address;
J-Alves35315782022-01-25 17:58:32 +00001245 struct fdt_node boot_info_node;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001246
1247 /*
1248 * This must have been hinted as being an FF-A partition,
1249 * return straight with failure if this is not the case.
1250 */
1251 if (!vm->is_ffa_partition) {
J-Alves2f86c1e2022-02-23 18:44:19 +00001252 return ret;
1253 }
1254
1255 TRY(read_uint64(node, "load_address", &load_address));
1256 if (!is_aligned(load_address, PAGE_SIZE)) {
Olivier Deprez62d99e32020-01-09 15:58:07 +01001257 return MANIFEST_ERROR_NOT_COMPATIBLE;
1258 }
1259
J-Alves2f86c1e2022-02-23 18:44:19 +00001260 assert(load_address != 0U);
1261
1262 if (!sp_pkg_init(stage1_locked, pa_init(load_address), &header,
1263 ppool)) {
1264 return ret;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001265 }
1266
J-Alves2f86c1e2022-02-23 18:44:19 +00001267 pkg_start = va_init(load_address);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001268
J-Alves2f86c1e2022-02-23 18:44:19 +00001269 if (vm_id != HF_PRIMARY_VM_ID &&
1270 sp_pkg_get_mem_size(&header) >= vm->secondary.mem_size) {
J-Alves4369bd92020-08-07 16:35:36 +01001271 dlog_error("Invalid package header or DT size.\n");
J-Alves2f86c1e2022-02-23 18:44:19 +00001272 goto out;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001273 }
1274
J-Alves2f86c1e2022-02-23 18:44:19 +00001275 manifest_address = va_add(va_init(load_address), header.pm_offset);
1276 if (!fdt_init_from_ptr(&sp_fdt, ptr_from_va(manifest_address),
1277 header.pm_size)) {
Olivier Deprez62d99e32020-01-09 15:58:07 +01001278 dlog_error("FDT failed validation.\n");
J-Alves2f86c1e2022-02-23 18:44:19 +00001279 goto out;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001280 }
1281
J-Alves77b6f4f2023-03-15 11:34:49 +00001282 ret = parse_ffa_manifest(&sp_fdt, vm, &boot_info_node, boot_params);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001283 if (ret != MANIFEST_SUCCESS) {
Olivier Deprez3cbdf862023-06-07 15:36:32 +02001284 dlog_error("Error parsing partition manifest.\n");
J-Alves2f86c1e2022-02-23 18:44:19 +00001285 goto out;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001286 }
1287
J-Alves2f86c1e2022-02-23 18:44:19 +00001288 if (vm->partition.load_addr != load_address) {
J-Alvesa26ea212021-03-22 14:33:47 +00001289 dlog_warning(
1290 "Partition's load address at its manifest differs"
1291 " from specified in partition's package.\n");
J-Alves2f86c1e2022-02-23 18:44:19 +00001292 vm->partition.load_addr = load_address;
J-Alvesa26ea212021-03-22 14:33:47 +00001293 }
1294
J-Alves889a1d72022-05-13 11:38:27 +01001295 if (vm->partition.gp_register_num != DEFAULT_BOOT_GP_REGISTER) {
1296 if (header.version == SP_PKG_HEADER_VERSION_2 &&
1297 vm->partition.boot_info &&
1298 !ffa_boot_info_node(&boot_info_node, pkg_start, &header)) {
1299 dlog_error("Failed to process boot information.\n");
1300 }
J-Alves35315782022-01-25 17:58:32 +00001301 }
J-Alves2f86c1e2022-02-23 18:44:19 +00001302out:
1303 sp_pkg_deinit(stage1_locked, pkg_start, &header, ppool);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001304 return ret;
1305}
1306
David Brazdil7a462ec2019-08-15 12:27:47 +01001307/**
1308 * Parse manifest from FDT.
1309 */
Olivier Deprez62d99e32020-01-09 15:58:07 +01001310enum manifest_return_code manifest_init(struct mm_stage1_locked stage1_locked,
Olivier Deprez93644652022-09-09 11:01:12 +02001311 struct manifest **manifest_ret,
Olivier Deprez62d99e32020-01-09 15:58:07 +01001312 struct memiter *manifest_fdt,
J-Alves77b6f4f2023-03-15 11:34:49 +00001313 struct boot_params *boot_params,
Olivier Deprez62d99e32020-01-09 15:58:07 +01001314 struct mpool *ppool)
David Brazdil7a462ec2019-08-15 12:27:47 +01001315{
Olivier Deprez93644652022-09-09 11:01:12 +02001316 struct manifest *manifest;
David Brazdilb856be62020-03-25 10:14:55 +00001317 struct string vm_name;
1318 struct fdt fdt;
David Brazdil7a462ec2019-08-15 12:27:47 +01001319 struct fdt_node hyp_node;
1320 size_t i = 0;
1321 bool found_primary_vm = false;
1322
J-Alvescd438fa2023-04-26 10:13:12 +01001323 if (boot_params->mem_ranges_count == 0 &&
1324 boot_params->ns_mem_ranges_count == 0) {
1325 return MANIFEST_ERROR_MEMORY_MISSING;
1326 }
1327
J-Alves77b6f4f2023-03-15 11:34:49 +00001328 dump_memory_ranges(boot_params->mem_ranges,
1329 boot_params->mem_ranges_count, false);
1330 dump_memory_ranges(boot_params->ns_mem_ranges,
1331 boot_params->ns_mem_ranges_count, true);
1332
Olivier Deprez93644652022-09-09 11:01:12 +02001333 /* Allocate space in the ppool for the manifest data. */
1334 if (!manifest_data_init(ppool)) {
1335 panic("Unable to allocate manifest data.\n");
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001336 }
1337
Olivier Deprez93644652022-09-09 11:01:12 +02001338 manifest = &manifest_data->manifest;
1339 *manifest_ret = manifest;
1340
David Brazdilb856be62020-03-25 10:14:55 +00001341 if (!fdt_init_from_memiter(&fdt, manifest_fdt)) {
1342 return MANIFEST_ERROR_FILE_SIZE; /* TODO */
David Brazdila2358d42020-01-27 18:51:38 +00001343 }
1344
David Brazdil7a462ec2019-08-15 12:27:47 +01001345 /* Find hypervisor node. */
David Brazdilb856be62020-03-25 10:14:55 +00001346 if (!fdt_find_node(&fdt, "/hypervisor", &hyp_node)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001347 return MANIFEST_ERROR_NO_HYPERVISOR_FDT_NODE;
1348 }
1349
David Brazdil74e9c3b2019-08-28 11:09:08 +01001350 /* Check "compatible" property. */
David Brazdilf4925382020-03-25 13:33:51 +00001351 if (!fdt_is_compatible(&hyp_node, "hafnium,hafnium")) {
David Brazdil74e9c3b2019-08-28 11:09:08 +01001352 return MANIFEST_ERROR_NOT_COMPATIBLE;
1353 }
1354
Olivier Deprez622ab8d2021-08-02 12:15:45 +02001355 TRY(read_bool(&hyp_node, "ffa_tee_enabled",
1356 &manifest->ffa_tee_enabled));
Andrew Walbran41a49d82020-01-10 17:46:38 +00001357
David Brazdil7a462ec2019-08-15 12:27:47 +01001358 /* Iterate over reserved VM IDs and check no such nodes exist. */
Olivier Deprez2a8ee342020-08-03 15:10:44 +02001359 for (i = HF_VM_ID_BASE; i < HF_VM_ID_OFFSET; i++) {
J-Alves19e20cf2023-08-02 12:48:55 +01001360 ffa_id_t vm_id = (ffa_id_t)i - HF_VM_ID_BASE;
David Brazdil7a462ec2019-08-15 12:27:47 +01001361 struct fdt_node vm_node = hyp_node;
David Brazdil7a462ec2019-08-15 12:27:47 +01001362
David Brazdilb856be62020-03-25 10:14:55 +00001363 generate_vm_node_name(&vm_name, vm_id);
1364 if (fdt_find_child(&vm_node, &vm_name)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001365 return MANIFEST_ERROR_RESERVED_VM_ID;
1366 }
1367 }
1368
1369 /* Iterate over VM nodes until we find one that does not exist. */
1370 for (i = 0; i <= MAX_VMS; ++i) {
J-Alves19e20cf2023-08-02 12:48:55 +01001371 ffa_id_t vm_id = HF_VM_ID_OFFSET + i;
David Brazdil7a462ec2019-08-15 12:27:47 +01001372 struct fdt_node vm_node = hyp_node;
David Brazdil7a462ec2019-08-15 12:27:47 +01001373
Olivier Deprez2a8ee342020-08-03 15:10:44 +02001374 generate_vm_node_name(&vm_name, vm_id - HF_VM_ID_BASE);
David Brazdilb856be62020-03-25 10:14:55 +00001375 if (!fdt_find_child(&vm_node, &vm_name)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001376 break;
1377 }
1378
1379 if (i == MAX_VMS) {
1380 return MANIFEST_ERROR_TOO_MANY_VMS;
1381 }
1382
1383 if (vm_id == HF_PRIMARY_VM_ID) {
1384 CHECK(found_primary_vm == false); /* sanity check */
1385 found_primary_vm = true;
1386 }
1387
David Brazdil0251b942019-09-10 15:59:50 +01001388 manifest->vm_count = i + 1;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001389
1390 TRY(parse_vm_common(&vm_node, &manifest->vm[i], vm_id));
1391
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -07001392 CHECK(!manifest->vm[i].is_hyp_loaded ||
1393 manifest->vm[i].is_ffa_partition);
1394
1395 if (manifest->vm[i].is_ffa_partition &&
1396 !manifest->vm[i].is_hyp_loaded) {
Olivier Deprez62d99e32020-01-09 15:58:07 +01001397 TRY(parse_ffa_partition_package(stage1_locked, &vm_node,
1398 &manifest->vm[i], vm_id,
J-Alves77b6f4f2023-03-15 11:34:49 +00001399 boot_params, ppool));
J-Alves596049f2023-03-15 11:40:24 +00001400 size_t page_count =
1401 align_up(manifest->vm[i].secondary.mem_size,
1402 PAGE_SIZE) /
1403 PAGE_SIZE;
1404
1405 if (vm_id == HF_PRIMARY_VM_ID) {
1406 continue;
1407 }
1408
1409 TRY(check_partition_memory_is_valid(
1410 manifest->vm[i].partition.load_addr, page_count,
1411 0, boot_params));
1412
1413 /*
1414 * Check if memory from load-address until (load-address
1415 * + memory size) has been used by other partition.
1416 */
1417 TRY(check_and_record_memory_used(
1418 manifest->vm[i].partition.load_addr,
1419 page_count));
Olivier Deprez62d99e32020-01-09 15:58:07 +01001420 } else {
1421 TRY(parse_vm(&vm_node, &manifest->vm[i], vm_id));
1422 }
David Brazdil7a462ec2019-08-15 12:27:47 +01001423 }
1424
Olivier Deprezfb05f3c2020-11-10 17:48:04 +01001425 if (!found_primary_vm && vm_id_is_current_world(HF_PRIMARY_VM_ID)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001426 return MANIFEST_ERROR_NO_PRIMARY_VM;
1427 }
1428
1429 return MANIFEST_SUCCESS;
1430}
1431
Olivier Deprez93644652022-09-09 11:01:12 +02001432/**
1433 * Free manifest data resources, called once manifest parsing has
1434 * completed and VMs are loaded.
1435 */
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001436void manifest_deinit(struct mpool *ppool)
1437{
Olivier Deprez93644652022-09-09 11:01:12 +02001438 manifest_data_deinit(ppool);
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001439}
1440
David Brazdil7a462ec2019-08-15 12:27:47 +01001441const char *manifest_strerror(enum manifest_return_code ret_code)
1442{
1443 switch (ret_code) {
1444 case MANIFEST_SUCCESS:
1445 return "Success";
David Brazdila2358d42020-01-27 18:51:38 +00001446 case MANIFEST_ERROR_FILE_SIZE:
1447 return "Total size in header does not match file size";
Olivier Deprez62d99e32020-01-09 15:58:07 +01001448 case MANIFEST_ERROR_MALFORMED_DTB:
1449 return "Malformed device tree blob";
David Brazdila2358d42020-01-27 18:51:38 +00001450 case MANIFEST_ERROR_NO_ROOT_NODE:
1451 return "Could not find root node in manifest";
David Brazdil7a462ec2019-08-15 12:27:47 +01001452 case MANIFEST_ERROR_NO_HYPERVISOR_FDT_NODE:
1453 return "Could not find \"hypervisor\" node in manifest";
David Brazdil74e9c3b2019-08-28 11:09:08 +01001454 case MANIFEST_ERROR_NOT_COMPATIBLE:
1455 return "Hypervisor manifest entry not compatible with Hafnium";
David Brazdil7a462ec2019-08-15 12:27:47 +01001456 case MANIFEST_ERROR_RESERVED_VM_ID:
1457 return "Manifest defines a VM with a reserved ID";
1458 case MANIFEST_ERROR_NO_PRIMARY_VM:
1459 return "Manifest does not contain a primary VM entry";
1460 case MANIFEST_ERROR_TOO_MANY_VMS:
1461 return "Manifest specifies more VMs than Hafnium has "
1462 "statically allocated space for";
1463 case MANIFEST_ERROR_PROPERTY_NOT_FOUND:
1464 return "Property not found";
1465 case MANIFEST_ERROR_MALFORMED_STRING:
1466 return "Malformed string property";
David Brazdil0dbb41f2019-09-09 18:03:35 +01001467 case MANIFEST_ERROR_STRING_TOO_LONG:
1468 return "String too long";
David Brazdil7a462ec2019-08-15 12:27:47 +01001469 case MANIFEST_ERROR_MALFORMED_INTEGER:
1470 return "Malformed integer property";
1471 case MANIFEST_ERROR_INTEGER_OVERFLOW:
1472 return "Integer overflow";
Andrew Scullae9962e2019-10-03 16:51:16 +01001473 case MANIFEST_ERROR_MALFORMED_INTEGER_LIST:
1474 return "Malformed integer list property";
Andrew Scullb2c3a242019-11-04 13:52:36 +00001475 case MANIFEST_ERROR_MALFORMED_BOOLEAN:
1476 return "Malformed boolean property";
Manish Pandey6542f5c2020-04-27 14:37:46 +01001477 case MANIFEST_ERROR_MEMORY_REGION_NODE_EMPTY:
1478 return "Memory-region node should have at least one entry";
Manish Pandeye68e7932020-04-23 15:29:28 +01001479 case MANIFEST_ERROR_DEVICE_REGION_NODE_EMPTY:
1480 return "Device-region node should have at least one entry";
Manish Pandeyf06c9072020-09-29 15:41:58 +01001481 case MANIFEST_ERROR_RXTX_SIZE_MISMATCH:
1482 return "RX and TX buffers should be of same size";
Varun Wadekar4afbfd72022-10-13 14:30:18 +01001483 case MANIFEST_ERROR_MEM_REGION_EMPTY:
1484 return "Memory region should have at least one page";
Karl Meakinf6d49402023-04-04 18:14:26 +01001485 case MANIFEST_ERROR_BASE_ADDRESS_AND_RELATIVE_ADDRESS:
1486 return "Base and relative addresses are mutually exclusive";
Daniel Boulbya7e9e182022-06-27 14:21:01 +01001487 case MANIFEST_ERROR_MEM_REGION_OVERLAP:
1488 return "Memory region overlaps with one already allocated";
Daniel Boulbyc1a613d2022-10-18 11:26:17 +01001489 case MANIFEST_ERROR_MEM_REGION_UNALIGNED:
1490 return "Memory region is not aligned to a page boundary";
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -07001491 case MANIFEST_ERROR_INVALID_MEM_PERM:
1492 return "Memory permission should be RO, RW or RX";
J-Alves35315782022-01-25 17:58:32 +00001493 case MANIFEST_ERROR_ARGUMENTS_LIST_EMPTY:
1494 return "Arguments-list node should have at least one argument";
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001495 case MANIFEST_ERROR_INTERRUPT_ID_REPEATED:
1496 return "Interrupt ID already assigned to another endpoint";
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001497 case MANIFEST_ERROR_ILLEGAL_NS_INT_ACTION:
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001498 return "Illegal value specidied for the field: Action in "
1499 "response to NS Interrupt";
Raghu Krishnamurthy98da1ca2022-10-04 08:59:01 -07001500 case MANIFEST_ERROR_INTERRUPT_ID_NOT_IN_LIST:
1501 return "Interrupt ID is not in the list of interrupts";
Madhukar Pappireddy5c04a382022-12-28 11:29:26 -06001502 case MANIFEST_ERROR_ILLEGAL_OTHER_S_INT_ACTION:
1503 return "Illegal value specified for the field: Action in "
1504 "response to Other-S Interrupt";
J-Alves77b6f4f2023-03-15 11:34:49 +00001505 case MANIFEST_ERROR_MEMORY_MISSING:
1506 return "Memory nodes must be defined in the SPMC manifest "
Olivier Deprez3cbdf862023-06-07 15:36:32 +02001507 "('memory' and 'ns-memory')";
J-Alves77b6f4f2023-03-15 11:34:49 +00001508 case MANIFEST_ERROR_PARTITION_ADDRESS_OVERLAP:
1509 return "Partition's memory [load address: load address + "
1510 "memory size[ overlap with other allocated "
Olivier Deprez3cbdf862023-06-07 15:36:32 +02001511 "regions";
J-Alves77b6f4f2023-03-15 11:34:49 +00001512 case MANIFEST_ERROR_MEM_REGION_INVALID:
Olivier Deprez3cbdf862023-06-07 15:36:32 +02001513 return "Invalid memory region range";
Kathleen Capella4a2a6e72023-04-21 14:43:26 -04001514 case MANIFEST_ERROR_INVALID_BOOT_ORDER:
1515 return "Boot order should be a unique value less than "
1516 "default largest value";
David Brazdil7a462ec2019-08-15 12:27:47 +01001517 }
1518
1519 panic("Unexpected manifest return code.");
1520}