blob: 1108c93e97f7ea8e8f24429792dbebdb59e6c970 [file] [log] [blame]
David Brazdil7a462ec2019-08-15 12:27:47 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
David Brazdil7a462ec2019-08-15 12:27:47 +01007 */
8
9#include "hf/manifest.h"
10
J-Alves35315782022-01-25 17:58:32 +000011#include "hf/arch/types.h"
12
David Brazdil7a462ec2019-08-15 12:27:47 +010013#include "hf/addr.h"
Daniel Boulbya2f8c662021-11-26 17:52:53 +000014#include "hf/assert.h"
J-Alves35315782022-01-25 17:58:32 +000015#include "hf/boot_info.h"
David Brazdil7a462ec2019-08-15 12:27:47 +010016#include "hf/check.h"
Andrew Scullae9962e2019-10-03 16:51:16 +010017#include "hf/dlog.h"
J-Alves2f86c1e2022-02-23 18:44:19 +000018#include "hf/sp_pkg.h"
David Brazdil7a462ec2019-08-15 12:27:47 +010019#include "hf/static_assert.h"
20#include "hf/std.h"
21
22#define TRY(expr) \
23 do { \
24 enum manifest_return_code ret_code = (expr); \
25 if (ret_code != MANIFEST_SUCCESS) { \
26 return ret_code; \
27 } \
28 } while (0)
29
David Brazdilb856be62020-03-25 10:14:55 +000030#define VM_ID_MAX (HF_VM_ID_OFFSET + MAX_VMS - 1)
31#define VM_ID_MAX_DIGITS (5)
32#define VM_NAME_EXTRA_CHARS (3) /* "vm" + number + '\0' */
33#define VM_NAME_MAX_SIZE (VM_ID_MAX_DIGITS + VM_NAME_EXTRA_CHARS)
34static_assert(VM_NAME_MAX_SIZE <= STRING_MAX_SIZE,
35 "VM name does not fit into a struct string.");
36static_assert(VM_ID_MAX <= 99999, "Insufficient VM_NAME_BUF_SIZE");
Olivier Deprez2a8ee342020-08-03 15:10:44 +020037static_assert((HF_OTHER_WORLD_ID > VM_ID_MAX) ||
38 (HF_OTHER_WORLD_ID < HF_VM_ID_BASE),
Andrew Walbran9daa57e2019-09-27 13:33:20 +010039 "TrustZone VM ID clashes with normal VM range.");
David Brazdil7a462ec2019-08-15 12:27:47 +010040
Daniel Boulby801f8ef2022-06-27 14:21:01 +010041/**
42 * A struct to keep track of fields that are allocated by partitions
43 * in the manifest.
44 */
45struct allocated_fields {
Daniel Boulby4ca50f02022-07-29 18:29:34 +010046 struct interrupt_bitmap intids;
Daniel Boulbya7e9e182022-06-27 14:21:01 +010047 struct {
48 uintptr_t base;
49 uintptr_t limit;
50 } mem_regions[PARTITION_MAX_MEMORY_REGIONS * MAX_VMS];
Daniel Boulby801f8ef2022-06-27 14:21:01 +010051};
Daniel Boulby801f8ef2022-06-27 14:21:01 +010052/**
Daniel Boulbya7e9e182022-06-27 14:21:01 +010053 * Calculate the number of entries in the ppool that are required to
54 * store the allocated_fields struct.
Daniel Boulby801f8ef2022-06-27 14:21:01 +010055 */
Daniel Boulbya7e9e182022-06-27 14:21:01 +010056static size_t allocated_fields_ppool_entries =
57 (align_up(sizeof(struct allocated_fields), MM_PPOOL_ENTRY_SIZE) /
58 MM_PPOOL_ENTRY_SIZE);
59
Daniel Boulby801f8ef2022-06-27 14:21:01 +010060static struct allocated_fields *allocated_fields;
Daniel Boulbya7e9e182022-06-27 14:21:01 +010061/* Index used the track the number of memory regions allocted. */
62static size_t allocated_mem_regions_index = 0;
Daniel Boulby801f8ef2022-06-27 14:21:01 +010063
64/**
65 * Allocates memory for the allocated fields struct in the given memory
66 * pool.
67 * Returns true if the memory is successfully allocated.
68 */
69static bool manifest_allocated_fields_init(struct mpool *ppool)
70{
Daniel Boulbya7e9e182022-06-27 14:21:01 +010071 allocated_fields = (struct allocated_fields *)mpool_alloc_contiguous(
72 ppool, allocated_fields_ppool_entries, 1);
73
Daniel Boulby801f8ef2022-06-27 14:21:01 +010074 return allocated_fields != NULL;
75}
76
77/**
78 * Frees the memory used for the allocated field struct in the given
79 * memory pool.
80 */
81static void manifest_allocated_fields_deinit(struct mpool *ppool)
82{
Daniel Boulbya7e9e182022-06-27 14:21:01 +010083 /**
84 * Return the memory used for the allocated_fields struct to the
85 * memory pool
86 */
87 mpool_add_chunk(ppool, allocated_fields,
88 allocated_fields_ppool_entries);
89 /**
90 * Reset the index used for tracking the number of memory regions
91 * allocated.
92 */
93 allocated_mem_regions_index = 0;
Daniel Boulby801f8ef2022-06-27 14:21:01 +010094}
95
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010096static inline size_t count_digits(ffa_vm_id_t vm_id)
David Brazdilb856be62020-03-25 10:14:55 +000097{
98 size_t digits = 0;
99
100 do {
101 digits++;
102 vm_id /= 10;
103 } while (vm_id);
104 return digits;
105}
106
David Brazdil7a462ec2019-08-15 12:27:47 +0100107/**
108 * Generates a string with the two letters "vm" followed by an integer.
109 * Assumes `buf` is of size VM_NAME_BUF_SIZE.
110 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100111static void generate_vm_node_name(struct string *str, ffa_vm_id_t vm_id)
David Brazdil7a462ec2019-08-15 12:27:47 +0100112{
113 static const char *digits = "0123456789";
David Brazdilb856be62020-03-25 10:14:55 +0000114 size_t vm_id_digits = count_digits(vm_id);
115 char *base = str->data;
116 char *ptr = base + (VM_NAME_EXTRA_CHARS + vm_id_digits);
David Brazdil7a462ec2019-08-15 12:27:47 +0100117
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000118 assert(vm_id_digits <= VM_ID_MAX_DIGITS);
David Brazdil7a462ec2019-08-15 12:27:47 +0100119 *(--ptr) = '\0';
120 do {
121 *(--ptr) = digits[vm_id % 10];
122 vm_id /= 10;
123 } while (vm_id);
124 *(--ptr) = 'm';
125 *(--ptr) = 'v';
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000126 assert(ptr == base);
David Brazdil7a462ec2019-08-15 12:27:47 +0100127}
128
Andrew Scullae9962e2019-10-03 16:51:16 +0100129/**
Andrew Scullb2c3a242019-11-04 13:52:36 +0000130 * Read a boolean property: true if present; false if not. If present, the value
131 * of the property must be empty else it is considered malformed.
Andrew Scullae9962e2019-10-03 16:51:16 +0100132 */
Andrew Scullb2c3a242019-11-04 13:52:36 +0000133static enum manifest_return_code read_bool(const struct fdt_node *node,
134 const char *property, bool *out)
Andrew Scullae9962e2019-10-03 16:51:16 +0100135{
David Brazdilb856be62020-03-25 10:14:55 +0000136 struct memiter data;
137 bool present = fdt_read_property(node, property, &data);
Andrew Scullae9962e2019-10-03 16:51:16 +0100138
David Brazdilb856be62020-03-25 10:14:55 +0000139 if (present && memiter_size(&data) != 0) {
Andrew Scullb2c3a242019-11-04 13:52:36 +0000140 return MANIFEST_ERROR_MALFORMED_BOOLEAN;
141 }
142
143 *out = present;
144 return MANIFEST_SUCCESS;
Andrew Scullae9962e2019-10-03 16:51:16 +0100145}
146
Andrew Scull72b43c02019-09-18 13:53:45 +0100147static enum manifest_return_code read_string(const struct fdt_node *node,
David Brazdil136f2942019-09-23 14:11:03 +0100148 const char *property,
149 struct string *out)
Andrew Scull72b43c02019-09-18 13:53:45 +0100150{
David Brazdilb856be62020-03-25 10:14:55 +0000151 struct memiter data;
Andrew Scull72b43c02019-09-18 13:53:45 +0100152
David Brazdilb856be62020-03-25 10:14:55 +0000153 if (!fdt_read_property(node, property, &data)) {
Andrew Scull72b43c02019-09-18 13:53:45 +0100154 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
155 }
156
David Brazdilb856be62020-03-25 10:14:55 +0000157 switch (string_init(out, &data)) {
David Brazdil136f2942019-09-23 14:11:03 +0100158 case STRING_SUCCESS:
159 return MANIFEST_SUCCESS;
160 case STRING_ERROR_INVALID_INPUT:
161 return MANIFEST_ERROR_MALFORMED_STRING;
162 case STRING_ERROR_TOO_LONG:
163 return MANIFEST_ERROR_STRING_TOO_LONG;
164 }
Andrew Scull72b43c02019-09-18 13:53:45 +0100165}
166
167static enum manifest_return_code read_optional_string(
David Brazdil136f2942019-09-23 14:11:03 +0100168 const struct fdt_node *node, const char *property, struct string *out)
Andrew Scull72b43c02019-09-18 13:53:45 +0100169{
David Brazdil136f2942019-09-23 14:11:03 +0100170 enum manifest_return_code ret;
Andrew Scull72b43c02019-09-18 13:53:45 +0100171
David Brazdil136f2942019-09-23 14:11:03 +0100172 ret = read_string(node, property, out);
173 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
174 string_init_empty(out);
175 ret = MANIFEST_SUCCESS;
Andrew Scull72b43c02019-09-18 13:53:45 +0100176 }
David Brazdil136f2942019-09-23 14:11:03 +0100177 return ret;
Andrew Scull72b43c02019-09-18 13:53:45 +0100178}
179
David Brazdil7a462ec2019-08-15 12:27:47 +0100180static enum manifest_return_code read_uint64(const struct fdt_node *node,
181 const char *property,
182 uint64_t *out)
183{
David Brazdilb856be62020-03-25 10:14:55 +0000184 struct memiter data;
David Brazdil7a462ec2019-08-15 12:27:47 +0100185
David Brazdilb856be62020-03-25 10:14:55 +0000186 if (!fdt_read_property(node, property, &data)) {
David Brazdil7a462ec2019-08-15 12:27:47 +0100187 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
188 }
189
David Brazdilb856be62020-03-25 10:14:55 +0000190 if (!fdt_parse_number(&data, memiter_size(&data), out)) {
David Brazdil7a462ec2019-08-15 12:27:47 +0100191 return MANIFEST_ERROR_MALFORMED_INTEGER;
192 }
193
194 return MANIFEST_SUCCESS;
195}
196
David Brazdil080ee312020-02-25 15:30:30 -0800197static enum manifest_return_code read_optional_uint64(
198 const struct fdt_node *node, const char *property,
199 uint64_t default_value, uint64_t *out)
200{
201 enum manifest_return_code ret;
202
203 ret = read_uint64(node, property, out);
204 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
205 *out = default_value;
206 return MANIFEST_SUCCESS;
207 }
208 return ret;
209}
210
Olivier Deprez62d99e32020-01-09 15:58:07 +0100211static enum manifest_return_code read_uint32(const struct fdt_node *node,
212 const char *property,
213 uint32_t *out)
214{
215 uint64_t value;
216
217 TRY(read_uint64(node, property, &value));
218
219 if (value > UINT32_MAX) {
220 return MANIFEST_ERROR_INTEGER_OVERFLOW;
221 }
222
223 *out = (uint32_t)value;
224 return MANIFEST_SUCCESS;
225}
226
Manish Pandeye68e7932020-04-23 15:29:28 +0100227static enum manifest_return_code read_optional_uint32(
228 const struct fdt_node *node, const char *property,
229 uint32_t default_value, uint32_t *out)
230{
231 enum manifest_return_code ret;
232
233 ret = read_uint32(node, property, out);
234 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
235 *out = default_value;
236 return MANIFEST_SUCCESS;
237 }
238 return ret;
239}
240
David Brazdil7a462ec2019-08-15 12:27:47 +0100241static enum manifest_return_code read_uint16(const struct fdt_node *node,
242 const char *property,
243 uint16_t *out)
244{
245 uint64_t value;
246
247 TRY(read_uint64(node, property, &value));
248
249 if (value > UINT16_MAX) {
250 return MANIFEST_ERROR_INTEGER_OVERFLOW;
251 }
252
253 *out = (uint16_t)value;
254 return MANIFEST_SUCCESS;
255}
256
J-Alvesb37fd082020-10-22 12:29:21 +0100257static enum manifest_return_code read_optional_uint16(
258 const struct fdt_node *node, const char *property,
259 uint16_t default_value, uint16_t *out)
260{
261 enum manifest_return_code ret;
262
263 ret = read_uint16(node, property, out);
264 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
265 *out = default_value;
266 return MANIFEST_SUCCESS;
267 }
268
269 return MANIFEST_SUCCESS;
270}
271
Olivier Deprez62d99e32020-01-09 15:58:07 +0100272static enum manifest_return_code read_uint8(const struct fdt_node *node,
273 const char *property, uint8_t *out)
274{
275 uint64_t value;
276
277 TRY(read_uint64(node, property, &value));
278
279 if (value > UINT8_MAX) {
280 return MANIFEST_ERROR_INTEGER_OVERFLOW;
281 }
282
283 *out = (uint8_t)value;
284 return MANIFEST_SUCCESS;
285}
286
J-Alves4369bd92020-08-07 16:35:36 +0100287static enum manifest_return_code read_optional_uint8(
288 const struct fdt_node *node, const char *property,
289 uint8_t default_value, uint8_t *out)
290{
291 enum manifest_return_code ret;
292
293 ret = read_uint8(node, property, out);
294 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
295 *out = default_value;
296 return MANIFEST_SUCCESS;
297 }
298
299 return MANIFEST_SUCCESS;
300}
301
Andrew Scullae9962e2019-10-03 16:51:16 +0100302struct uint32list_iter {
303 struct memiter mem_it;
304};
305
J-Alves4369bd92020-08-07 16:35:36 +0100306static enum manifest_return_code read_uint32list(const struct fdt_node *node,
307 const char *property,
308 struct uint32list_iter *out)
Andrew Scullae9962e2019-10-03 16:51:16 +0100309{
David Brazdilb856be62020-03-25 10:14:55 +0000310 struct memiter data;
Andrew Scullae9962e2019-10-03 16:51:16 +0100311
David Brazdilb856be62020-03-25 10:14:55 +0000312 if (!fdt_read_property(node, property, &data)) {
Andrew Scullae9962e2019-10-03 16:51:16 +0100313 memiter_init(&out->mem_it, NULL, 0);
J-Alves4369bd92020-08-07 16:35:36 +0100314 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
Andrew Scullae9962e2019-10-03 16:51:16 +0100315 }
316
David Brazdilb856be62020-03-25 10:14:55 +0000317 if ((memiter_size(&data) % sizeof(uint32_t)) != 0) {
Andrew Scullae9962e2019-10-03 16:51:16 +0100318 return MANIFEST_ERROR_MALFORMED_INTEGER_LIST;
319 }
320
David Brazdilb856be62020-03-25 10:14:55 +0000321 out->mem_it = data;
Andrew Scullae9962e2019-10-03 16:51:16 +0100322 return MANIFEST_SUCCESS;
323}
324
J-Alves4369bd92020-08-07 16:35:36 +0100325static enum manifest_return_code read_optional_uint32list(
326 const struct fdt_node *node, const char *property,
327 struct uint32list_iter *out)
328{
329 enum manifest_return_code ret = read_uint32list(node, property, out);
330
331 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
332 return MANIFEST_SUCCESS;
333 }
334 return ret;
335}
336
Andrew Scullae9962e2019-10-03 16:51:16 +0100337static bool uint32list_has_next(const struct uint32list_iter *list)
338{
339 return memiter_size(&list->mem_it) > 0;
340}
341
David Brazdil5ea99462020-03-25 13:01:47 +0000342static enum manifest_return_code uint32list_get_next(
343 struct uint32list_iter *list, uint32_t *out)
Andrew Scullae9962e2019-10-03 16:51:16 +0100344{
Andrew Scullae9962e2019-10-03 16:51:16 +0100345 uint64_t num;
346
347 CHECK(uint32list_has_next(list));
David Brazdilb856be62020-03-25 10:14:55 +0000348 if (!fdt_parse_number(&list->mem_it, sizeof(uint32_t), &num)) {
Andrew Scullae9962e2019-10-03 16:51:16 +0100349 return MANIFEST_ERROR_MALFORMED_INTEGER;
350 }
351
David Brazdil5ea99462020-03-25 13:01:47 +0000352 *out = (uint32_t)num;
353 return MANIFEST_SUCCESS;
Andrew Scullae9962e2019-10-03 16:51:16 +0100354}
355
Olivier Deprez62d99e32020-01-09 15:58:07 +0100356static enum manifest_return_code parse_vm_common(const struct fdt_node *node,
357 struct manifest_vm *vm,
358 ffa_vm_id_t vm_id)
David Brazdil7a462ec2019-08-15 12:27:47 +0100359{
Andrew Scullae9962e2019-10-03 16:51:16 +0100360 struct uint32list_iter smcs;
David Brazdil5ea99462020-03-25 13:01:47 +0000361 size_t idx;
Andrew Scullae9962e2019-10-03 16:51:16 +0100362
Olivier Deprez62d99e32020-01-09 15:58:07 +0100363 TRY(read_bool(node, "is_ffa_partition", &vm->is_ffa_partition));
364
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -0700365 TRY(read_bool(node, "hyp_loaded", &vm->is_hyp_loaded));
366
David Brazdil136f2942019-09-23 14:11:03 +0100367 TRY(read_string(node, "debug_name", &vm->debug_name));
Andrew Scullae9962e2019-10-03 16:51:16 +0100368
369 TRY(read_optional_uint32list(node, "smc_whitelist", &smcs));
370 while (uint32list_has_next(&smcs) &&
371 vm->smc_whitelist.smc_count < MAX_SMCS) {
David Brazdil5ea99462020-03-25 13:01:47 +0000372 idx = vm->smc_whitelist.smc_count++;
373 TRY(uint32list_get_next(&smcs, &vm->smc_whitelist.smcs[idx]));
Andrew Scullae9962e2019-10-03 16:51:16 +0100374 }
375
376 if (uint32list_has_next(&smcs)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000377 dlog_warning("%s SMC whitelist too long.\n", vm->debug_name);
Andrew Scullae9962e2019-10-03 16:51:16 +0100378 }
379
Andrew Scullb2c3a242019-11-04 13:52:36 +0000380 TRY(read_bool(node, "smc_whitelist_permissive",
381 &vm->smc_whitelist.permissive));
Andrew Scullae9962e2019-10-03 16:51:16 +0100382
Olivier Deprez62d99e32020-01-09 15:58:07 +0100383 if (vm_id != HF_PRIMARY_VM_ID) {
384 TRY(read_uint64(node, "mem_size", &vm->secondary.mem_size));
385 TRY(read_uint16(node, "vcpu_count", &vm->secondary.vcpu_count));
Fuad Tabba50469e02020-06-30 15:14:28 +0100386 TRY(read_optional_string(node, "fdt_filename",
387 &vm->secondary.fdt_filename));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100388 }
389
390 return MANIFEST_SUCCESS;
391}
392
393static enum manifest_return_code parse_vm(struct fdt_node *node,
394 struct manifest_vm *vm,
395 ffa_vm_id_t vm_id)
396{
397 TRY(read_optional_string(node, "kernel_filename",
398 &vm->kernel_filename));
399
David Brazdile6f83222019-09-23 14:47:37 +0100400 if (vm_id == HF_PRIMARY_VM_ID) {
401 TRY(read_optional_string(node, "ramdisk_filename",
402 &vm->primary.ramdisk_filename));
David Brazdil080ee312020-02-25 15:30:30 -0800403 TRY(read_optional_uint64(node, "boot_address",
404 MANIFEST_INVALID_ADDRESS,
405 &vm->primary.boot_address));
David Brazdil7a462ec2019-08-15 12:27:47 +0100406 }
Raghu Krishnamurthy988a5e72021-02-27 21:46:06 -0800407 TRY(read_optional_uint8(node, "exception-level", (uint8_t)EL1,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700408 (uint8_t *)&vm->partition.run_time_el));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100409
David Brazdil7a462ec2019-08-15 12:27:47 +0100410 return MANIFEST_SUCCESS;
411}
412
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100413static bool check_and_record_mem_regions(uintptr_t base_address,
414 uint32_t page_count)
415{
416 uintptr_t limit = base_address + page_count * PAGE_SIZE;
417
418 for (size_t i = 0; i < allocated_mem_regions_index; i++) {
419 uintptr_t mem_region_base =
420 allocated_fields->mem_regions[i].base;
421 uintptr_t mem_region_limit =
422 allocated_fields->mem_regions[i].limit;
423
424 if ((base_address >= mem_region_base &&
425 base_address < mem_region_limit) ||
426 (limit < mem_region_limit && limit >= mem_region_base)) {
427 dlog_error(
428 "Overlapping memory regions\n"
429 "New Region %#x - %#x\n"
430 "Overlapping region %#x - %#x\n",
431 base_address, limit, mem_region_base,
432 mem_region_limit);
433 return false;
434 }
435 }
436
437 allocated_fields->mem_regions[allocated_mem_regions_index].base =
438 base_address;
439 allocated_fields->mem_regions[allocated_mem_regions_index].limit =
440 limit;
441 allocated_mem_regions_index++;
442
443 return true;
444}
445
Manish Pandey6542f5c2020-04-27 14:37:46 +0100446static enum manifest_return_code parse_ffa_memory_region_node(
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100447 struct fdt_node *mem_node, struct memory_region *mem_regions,
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700448 uint16_t *count, struct rx_tx *rxtx)
Manish Pandey6542f5c2020-04-27 14:37:46 +0100449{
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100450 uint32_t phandle;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700451 uint16_t i = 0;
Manish Pandey6542f5c2020-04-27 14:37:46 +0100452
453 dlog_verbose(" Partition memory regions\n");
454
455 if (!fdt_is_compatible(mem_node, "arm,ffa-manifest-memory-regions")) {
456 return MANIFEST_ERROR_NOT_COMPATIBLE;
457 }
458
459 if (!fdt_first_child(mem_node)) {
460 return MANIFEST_ERROR_MEMORY_REGION_NODE_EMPTY;
461 }
462
463 do {
464 dlog_verbose(" Memory Region[%u]\n", i);
465
466 TRY(read_optional_string(mem_node, "description",
467 &mem_regions[i].name));
468 dlog_verbose(" Name: %s\n",
469 string_data(&mem_regions[i].name));
470
471 TRY(read_optional_uint64(mem_node, "base-address",
472 MANIFEST_INVALID_ADDRESS,
473 &mem_regions[i].base_address));
474 dlog_verbose(" Base address: %#x\n",
475 mem_regions[i].base_address);
476
477 TRY(read_uint32(mem_node, "pages-count",
478 &mem_regions[i].page_count));
479 dlog_verbose(" Pages_count: %u\n",
480 mem_regions[i].page_count);
481
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100482 if (!check_and_record_mem_regions(mem_regions[i].base_address,
483 mem_regions[i].page_count)) {
484 return MANIFEST_ERROR_MEM_REGION_OVERLAP;
485 }
486
Manish Pandey6542f5c2020-04-27 14:37:46 +0100487 TRY(read_uint32(mem_node, "attributes",
488 &mem_regions[i].attributes));
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -0700489
Olivier Deprez035fa152022-03-14 11:19:10 +0100490 /*
491 * Check RWX permission attributes.
492 * Security attribute is checked at load phase.
493 */
494 uint32_t permissions = mem_regions[i].attributes &
495 (MANIFEST_REGION_ATTR_READ |
496 MANIFEST_REGION_ATTR_WRITE |
497 MANIFEST_REGION_ATTR_EXEC);
498 if (permissions != MANIFEST_REGION_ATTR_READ &&
499 permissions != (MANIFEST_REGION_ATTR_READ |
500 MANIFEST_REGION_ATTR_WRITE) &&
501 permissions != (MANIFEST_REGION_ATTR_READ |
502 MANIFEST_REGION_ATTR_EXEC)) {
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -0700503 return MANIFEST_ERROR_INVALID_MEM_PERM;
504 }
505
Olivier Deprez035fa152022-03-14 11:19:10 +0100506 /* Filter memory region attributes. */
507 mem_regions[i].attributes &= MANIFEST_REGION_ALL_ATTR_MASK;
508
509 dlog_verbose(" Attributes: %#x\n",
Manish Pandey6542f5c2020-04-27 14:37:46 +0100510 mem_regions[i].attributes);
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100511
Manish Pandeya70a4192020-10-07 22:05:04 +0100512 if (rxtx->available) {
513 TRY(read_optional_uint32(
514 mem_node, "phandle",
515 (uint32_t)MANIFEST_INVALID_ADDRESS, &phandle));
516 if (phandle == rxtx->rx_phandle) {
517 dlog_verbose(" Assigned as RX buffer\n");
518 rxtx->rx_buffer = &mem_regions[i];
519 } else if (phandle == rxtx->tx_phandle) {
520 dlog_verbose(" Assigned as TX buffer\n");
521 rxtx->tx_buffer = &mem_regions[i];
522 }
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100523 }
524
Manish Pandey6542f5c2020-04-27 14:37:46 +0100525 i++;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700526 } while (fdt_next_sibling(mem_node) &&
527 (i < PARTITION_MAX_MEMORY_REGIONS));
Manish Pandey6542f5c2020-04-27 14:37:46 +0100528
Manish Pandeya70a4192020-10-07 22:05:04 +0100529 if (rxtx->available &&
530 (rxtx->rx_buffer->page_count != rxtx->tx_buffer->page_count)) {
Manish Pandeyf06c9072020-09-29 15:41:58 +0100531 return MANIFEST_ERROR_RXTX_SIZE_MISMATCH;
532 }
533
Manish Pandey2145c212020-05-01 16:04:22 +0100534 *count = i;
Manish Pandey6542f5c2020-04-27 14:37:46 +0100535
536 return MANIFEST_SUCCESS;
537}
538
Manish Pandeye68e7932020-04-23 15:29:28 +0100539static enum manifest_return_code parse_ffa_device_region_node(
Manish Pandey2145c212020-05-01 16:04:22 +0100540 struct fdt_node *dev_node, struct device_region *dev_regions,
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700541 uint16_t *count)
Manish Pandeye68e7932020-04-23 15:29:28 +0100542{
543 struct uint32list_iter list;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700544 uint16_t i = 0;
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500545 uint32_t j = 0;
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100546 struct interrupt_bitmap allocated_intids = allocated_fields->intids;
Manish Pandeye68e7932020-04-23 15:29:28 +0100547
548 dlog_verbose(" Partition Device Regions\n");
549
550 if (!fdt_is_compatible(dev_node, "arm,ffa-manifest-device-regions")) {
551 return MANIFEST_ERROR_NOT_COMPATIBLE;
552 }
553
554 if (!fdt_first_child(dev_node)) {
555 return MANIFEST_ERROR_DEVICE_REGION_NODE_EMPTY;
556 }
557
558 do {
559 dlog_verbose(" Device Region[%u]\n", i);
560
561 TRY(read_optional_string(dev_node, "description",
562 &dev_regions[i].name));
563 dlog_verbose(" Name: %s\n",
564 string_data(&dev_regions[i].name));
565
566 TRY(read_uint64(dev_node, "base-address",
567 &dev_regions[i].base_address));
568 dlog_verbose(" Base address: %#x\n",
569 dev_regions[i].base_address);
570
571 TRY(read_uint32(dev_node, "pages-count",
572 &dev_regions[i].page_count));
573 dlog_verbose(" Pages_count: %u\n",
574 dev_regions[i].page_count);
575
576 TRY(read_uint32(dev_node, "attributes",
577 &dev_regions[i].attributes));
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -0700578
Olivier Deprez035fa152022-03-14 11:19:10 +0100579 /*
580 * Check RWX permission attributes.
581 * Security attribute is checked at load phase.
582 */
583 uint32_t permissions = dev_regions[i].attributes &
584 (MANIFEST_REGION_ATTR_READ |
585 MANIFEST_REGION_ATTR_WRITE |
586 MANIFEST_REGION_ATTR_EXEC);
587
588 if (permissions != MANIFEST_REGION_ATTR_READ &&
589 permissions != (MANIFEST_REGION_ATTR_READ |
590 MANIFEST_REGION_ATTR_WRITE)) {
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -0700591 return MANIFEST_ERROR_INVALID_MEM_PERM;
592 }
593
Olivier Deprez035fa152022-03-14 11:19:10 +0100594 /* Filer device region attributes. */
595 dev_regions[i].attributes = dev_regions[i].attributes &
596 MANIFEST_REGION_ALL_ATTR_MASK;
597
598 dlog_verbose(" Attributes: %#x\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100599 dev_regions[i].attributes);
600
601 TRY(read_optional_uint32list(dev_node, "interrupts", &list));
602 dlog_verbose(" Interrupt List:\n");
603 j = 0;
604 while (uint32list_has_next(&list) &&
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700605 j < PARTITION_MAX_INTERRUPTS_PER_DEVICE) {
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100606 uint32_t intid;
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100607
Manish Pandeye68e7932020-04-23 15:29:28 +0100608 TRY(uint32list_get_next(
609 &list, &dev_regions[i].interrupts[j].id));
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100610 intid = dev_regions[i].interrupts[j].id;
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100611
612 dlog_verbose(" ID = %u\n", intid);
613
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100614 if (interrupt_bitmap_get_value(&allocated_intids,
615 intid) == 1U) {
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100616 return MANIFEST_ERROR_INTERRUPT_ID_REPEATED;
617 }
618
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100619 interrupt_bitmap_set_value(&allocated_intids, intid);
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100620
Manish Pandeye68e7932020-04-23 15:29:28 +0100621 if (uint32list_has_next(&list)) {
622 TRY(uint32list_get_next(&list,
623 &dev_regions[i]
624 .interrupts[j]
625 .attributes));
626 } else {
627 return MANIFEST_ERROR_MALFORMED_INTEGER_LIST;
628 }
629
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100630 dlog_verbose(" attributes = %u\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100631 dev_regions[i].interrupts[j].attributes);
632 j++;
633 }
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500634
635 dev_regions[i].interrupt_count = j;
Manish Pandeye68e7932020-04-23 15:29:28 +0100636 if (j == 0) {
637 dlog_verbose(" Empty\n");
638 }
639
640 TRY(read_optional_uint32(dev_node, "smmu-id",
Madhukar Pappireddy54680c72020-10-23 15:02:38 -0500641 MANIFEST_INVALID_ID,
Manish Pandeye68e7932020-04-23 15:29:28 +0100642 &dev_regions[i].smmu_id));
Olivier Deprez86d87ae2021-08-19 14:27:46 +0200643 if (dev_regions[i].smmu_id != MANIFEST_INVALID_ID) {
644 dlog_verbose(" smmu-id: %u\n",
645 dev_regions[i].smmu_id);
646 }
Manish Pandeye68e7932020-04-23 15:29:28 +0100647
648 TRY(read_optional_uint32list(dev_node, "stream-ids", &list));
649 dlog_verbose(" Stream IDs assigned:\n");
650
651 j = 0;
652 while (uint32list_has_next(&list) &&
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700653 j < PARTITION_MAX_STREAMS_PER_DEVICE) {
Manish Pandeye68e7932020-04-23 15:29:28 +0100654 TRY(uint32list_get_next(&list,
655 &dev_regions[i].stream_ids[j]));
656 dlog_verbose(" %u\n",
657 dev_regions[i].stream_ids[j]);
658 j++;
659 }
660 if (j == 0) {
661 dlog_verbose(" None\n");
662 }
Madhukar Pappireddy54680c72020-10-23 15:02:38 -0500663 dev_regions[i].stream_count = j;
Manish Pandeye68e7932020-04-23 15:29:28 +0100664
665 TRY(read_bool(dev_node, "exclusive-access",
666 &dev_regions[i].exclusive_access));
J-Alves4369bd92020-08-07 16:35:36 +0100667 dlog_verbose(" Exclusive_access: %u\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100668 dev_regions[i].exclusive_access);
669
670 i++;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700671 } while (fdt_next_sibling(dev_node) &&
672 (i < PARTITION_MAX_DEVICE_REGIONS));
Manish Pandeye68e7932020-04-23 15:29:28 +0100673
Manish Pandey2145c212020-05-01 16:04:22 +0100674 *count = i;
Manish Pandeye68e7932020-04-23 15:29:28 +0100675
676 return MANIFEST_SUCCESS;
677}
678
J-Alvesabebe432022-05-31 14:40:50 +0100679static enum manifest_return_code sanity_check_ffa_manifest(
680 struct manifest_vm *vm)
681{
682 uint16_t ffa_version_major;
683 uint16_t ffa_version_minor;
684 enum manifest_return_code ret_code = MANIFEST_SUCCESS;
685 const char *error_string = "specified in manifest is unsupported";
686 uint32_t k = 0;
687
688 /* ensure that the SPM version is compatible */
689 ffa_version_major = (vm->partition.ffa_version & 0xffff0000) >>
690 FFA_VERSION_MAJOR_OFFSET;
691 ffa_version_minor = vm->partition.ffa_version & 0xffff;
692
693 if (ffa_version_major != FFA_VERSION_MAJOR ||
694 ffa_version_minor > FFA_VERSION_MINOR) {
695 dlog_error("FF-A partition manifest version %s: %u.%u\n",
696 error_string, ffa_version_major, ffa_version_minor);
697 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
698 }
699
700 if (vm->partition.xlat_granule != PAGE_4KB) {
701 dlog_error("Translation granule %s: %u\n", error_string,
702 vm->partition.xlat_granule);
703 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
704 }
705
706 if (vm->partition.execution_state != AARCH64) {
707 dlog_error("Execution state %s: %u\n", error_string,
708 vm->partition.execution_state);
709 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
710 }
711
712 if (vm->partition.run_time_el != EL1 &&
713 vm->partition.run_time_el != S_EL1 &&
714 vm->partition.run_time_el != S_EL0) {
715 dlog_error("Exception level %s: %d\n", error_string,
716 vm->partition.run_time_el);
717 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
718 }
719
720 if ((vm->partition.messaging_method &
721 ~(FFA_PARTITION_DIRECT_REQ_RECV | FFA_PARTITION_DIRECT_REQ_SEND |
722 FFA_PARTITION_INDIRECT_MSG)) != 0U) {
723 dlog_error("Messaging method %s: %x\n", error_string,
724 vm->partition.messaging_method);
725 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
726 }
727
728 if (vm->partition.run_time_el == S_EL0 &&
729 vm->partition.execution_ctx_count != 1) {
730 dlog_error(
731 "Exception level and execution context count %s: %d "
732 "%d\n",
733 error_string, vm->partition.run_time_el,
734 vm->partition.execution_ctx_count);
735 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
736 }
737
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700738 for (uint16_t i = 0; i < vm->partition.dev_region_count; i++) {
J-Alvesabebe432022-05-31 14:40:50 +0100739 struct device_region dev_region;
740
741 dev_region = vm->partition.dev_regions[i];
742
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700743 if (dev_region.interrupt_count >
744 PARTITION_MAX_INTERRUPTS_PER_DEVICE) {
J-Alvesabebe432022-05-31 14:40:50 +0100745 dlog_error(
746 "Interrupt count for device region exceeds "
747 "limit.\n");
748 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
749 continue;
750 }
751
752 for (uint8_t j = 0; j < dev_region.interrupt_count; j++) {
753 k++;
754 if (k > VM_MANIFEST_MAX_INTERRUPTS) {
755 dlog_error(
756 "Interrupt count for VM exceeds "
757 "limit.\n");
758 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
759 continue;
760 }
761 }
762 }
763
764 /* GP register is restricted to one of x0 - x3. */
765 if (vm->partition.gp_register_num != -1 &&
766 vm->partition.gp_register_num > 3) {
767 dlog_error("GP register number %s: %u\n", error_string,
768 vm->partition.gp_register_num);
769 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
770 }
771
772 return ret_code;
773}
774
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -0700775enum manifest_return_code parse_ffa_manifest(struct fdt *fdt,
J-Alves35315782022-01-25 17:58:32 +0000776 struct manifest_vm *vm,
777 struct fdt_node *boot_info_node)
Olivier Deprez62d99e32020-01-09 15:58:07 +0100778{
779 unsigned int i = 0;
780 struct uint32list_iter uuid;
781 uint32_t uuid_word;
782 struct fdt_node root;
783 struct fdt_node ffa_node;
784 struct string rxtx_node_name = STRING_INIT("rx_tx-info");
Manish Pandey6542f5c2020-04-27 14:37:46 +0100785 struct string mem_region_node_name = STRING_INIT("memory-regions");
Manish Pandeye68e7932020-04-23 15:29:28 +0100786 struct string dev_region_node_name = STRING_INIT("device-regions");
J-Alves35315782022-01-25 17:58:32 +0000787 struct string boot_info_node_name = STRING_INIT("boot-info");
Olivier Deprez62d99e32020-01-09 15:58:07 +0100788
789 if (!fdt_find_node(fdt, "/", &root)) {
790 return MANIFEST_ERROR_NO_ROOT_NODE;
791 }
792
793 /* Check "compatible" property. */
794 if (!fdt_is_compatible(&root, "arm,ffa-manifest-1.0")) {
795 return MANIFEST_ERROR_NOT_COMPATIBLE;
796 }
797
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700798 TRY(read_uint32(&root, "ffa-version", &vm->partition.ffa_version));
J-Alves4369bd92020-08-07 16:35:36 +0100799 dlog_verbose(" Expected FF-A version %u.%u\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700800 vm->partition.ffa_version >> 16,
801 vm->partition.ffa_version & 0xffff);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100802
J-Alves4369bd92020-08-07 16:35:36 +0100803 TRY(read_uint32list(&root, "uuid", &uuid));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100804
805 while (uint32list_has_next(&uuid) && i < 4) {
806 TRY(uint32list_get_next(&uuid, &uuid_word));
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700807 vm->partition.uuid.uuid[i] = uuid_word;
Olivier Deprez62d99e32020-01-09 15:58:07 +0100808 i++;
809 }
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700810 dlog_verbose(" UUID %#x-%x-%x-%x\n", vm->partition.uuid.uuid[0],
811 vm->partition.uuid.uuid[1], vm->partition.uuid.uuid[2],
812 vm->partition.uuid.uuid[3]);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100813
814 TRY(read_uint16(&root, "execution-ctx-count",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700815 &vm->partition.execution_ctx_count));
J-Alves4369bd92020-08-07 16:35:36 +0100816 dlog_verbose(" Number of execution context %u\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700817 vm->partition.execution_ctx_count);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100818
819 TRY(read_uint8(&root, "exception-level",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700820 (uint8_t *)&vm->partition.run_time_el));
821 dlog_verbose(" Run-time EL %u\n", vm->partition.run_time_el);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100822
823 TRY(read_uint8(&root, "execution-state",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700824 (uint8_t *)&vm->partition.execution_state));
825 dlog_verbose(" Execution state %u\n", vm->partition.execution_state);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100826
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700827 TRY(read_optional_uint64(&root, "load-address", 0,
828 &vm->partition.load_addr));
829 dlog_verbose(" Load address %#x\n", vm->partition.load_addr);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100830
J-Alves4369bd92020-08-07 16:35:36 +0100831 TRY(read_optional_uint64(&root, "entrypoint-offset", 0,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700832 &vm->partition.ep_offset));
833 dlog_verbose(" Entry point offset %#x\n", vm->partition.ep_offset);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100834
J-Alves35315782022-01-25 17:58:32 +0000835 TRY(read_optional_uint32(&root, "gp-register-num",
836 DEFAULT_BOOT_GP_REGISTER,
837 &vm->partition.gp_register_num));
838 dlog_verbose(" Boot GP register: %#x\n",
839 vm->partition.gp_register_num);
840
J-Alvesb37fd082020-10-22 12:29:21 +0100841 TRY(read_optional_uint16(&root, "boot-order", DEFAULT_BOOT_ORDER,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700842 &vm->partition.boot_order));
843 dlog_verbose(" Boot order %#u\n", vm->partition.boot_order);
J-Alvesb37fd082020-10-22 12:29:21 +0100844
J-Alves4369bd92020-08-07 16:35:36 +0100845 TRY(read_optional_uint8(&root, "xlat-granule", 0,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700846 (uint8_t *)&vm->partition.xlat_granule));
847 dlog_verbose(" Translation granule %u\n", vm->partition.xlat_granule);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100848
849 ffa_node = root;
850 if (fdt_find_child(&ffa_node, &rxtx_node_name)) {
851 if (!fdt_is_compatible(&ffa_node,
852 "arm,ffa-manifest-rx_tx-buffer")) {
853 return MANIFEST_ERROR_NOT_COMPATIBLE;
854 }
855
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100856 /*
857 * Read only phandles for now, it will be used to update buffers
858 * while parsing memory regions.
859 */
860 TRY(read_uint32(&ffa_node, "rx-buffer",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700861 &vm->partition.rxtx.rx_phandle));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100862
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100863 TRY(read_uint32(&ffa_node, "tx-buffer",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700864 &vm->partition.rxtx.tx_phandle));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100865
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700866 vm->partition.rxtx.available = true;
Olivier Deprez62d99e32020-01-09 15:58:07 +0100867 }
868
869 TRY(read_uint8(&root, "messaging-method",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700870 (uint8_t *)&vm->partition.messaging_method));
871 dlog_verbose(" Messaging method %u\n", vm->partition.messaging_method);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100872
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700873 TRY(read_bool(&root, "managed-exit", &vm->partition.managed_exit));
J-Alvesa4730db2021-11-02 10:31:01 +0000874 if (vm->partition.managed_exit) {
875 dlog_verbose(" Managed Exit Supported\n");
876 }
877
878 TRY(read_bool(&root, "notification-support",
879 &vm->partition.notification_support));
880 if (vm->partition.notification_support) {
881 dlog_verbose(" Notifications Receipt Supported\n");
882 }
Maksims Svecovs9ddf86a2021-05-06 17:17:21 +0100883
J-Alves35315782022-01-25 17:58:32 +0000884 /* Parse boot info node. */
885 if (boot_info_node != NULL) {
886 ffa_node = root;
887 vm->partition.boot_info =
888 fdt_find_child(&ffa_node, &boot_info_node_name);
889 if (vm->partition.boot_info) {
890 *boot_info_node = ffa_node;
891 }
892 } else {
893 vm->partition.boot_info = false;
894 }
895
Manish Pandey6542f5c2020-04-27 14:37:46 +0100896 /* Parse memory-regions */
897 ffa_node = root;
898 if (fdt_find_child(&ffa_node, &mem_region_node_name)) {
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700899 TRY(parse_ffa_memory_region_node(
900 &ffa_node, vm->partition.mem_regions,
901 &vm->partition.mem_region_count, &vm->partition.rxtx));
Manish Pandey6542f5c2020-04-27 14:37:46 +0100902 }
Manish Pandey2145c212020-05-01 16:04:22 +0100903 dlog_verbose(" Total %u memory regions found\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700904 vm->partition.mem_region_count);
Manish Pandey6542f5c2020-04-27 14:37:46 +0100905
Manish Pandeye68e7932020-04-23 15:29:28 +0100906 /* Parse Device-regions */
907 ffa_node = root;
908 if (fdt_find_child(&ffa_node, &dev_region_node_name)) {
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700909 TRY(parse_ffa_device_region_node(
910 &ffa_node, vm->partition.dev_regions,
911 &vm->partition.dev_region_count));
Manish Pandeye68e7932020-04-23 15:29:28 +0100912 }
Manish Pandey2145c212020-05-01 16:04:22 +0100913 dlog_verbose(" Total %u device regions found\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700914 vm->partition.dev_region_count);
Manish Pandeye68e7932020-04-23 15:29:28 +0100915
J-Alves4eb7b542022-03-02 15:21:52 +0000916 return sanity_check_ffa_manifest(vm);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100917}
918
Olivier Deprez62d99e32020-01-09 15:58:07 +0100919static enum manifest_return_code parse_ffa_partition_package(
920 struct mm_stage1_locked stage1_locked, struct fdt_node *node,
921 struct manifest_vm *vm, ffa_vm_id_t vm_id, struct mpool *ppool)
922{
923 enum manifest_return_code ret = MANIFEST_ERROR_NOT_COMPATIBLE;
J-Alves2f86c1e2022-02-23 18:44:19 +0000924 uintpaddr_t load_address;
925 struct sp_pkg_header header;
Olivier Deprez62d99e32020-01-09 15:58:07 +0100926 struct fdt sp_fdt;
J-Alves2f86c1e2022-02-23 18:44:19 +0000927 vaddr_t pkg_start;
928 vaddr_t manifest_address;
J-Alves35315782022-01-25 17:58:32 +0000929 struct fdt_node boot_info_node;
Olivier Deprez62d99e32020-01-09 15:58:07 +0100930
931 /*
932 * This must have been hinted as being an FF-A partition,
933 * return straight with failure if this is not the case.
934 */
935 if (!vm->is_ffa_partition) {
J-Alves2f86c1e2022-02-23 18:44:19 +0000936 return ret;
937 }
938
939 TRY(read_uint64(node, "load_address", &load_address));
940 if (!is_aligned(load_address, PAGE_SIZE)) {
Olivier Deprez62d99e32020-01-09 15:58:07 +0100941 return MANIFEST_ERROR_NOT_COMPATIBLE;
942 }
943
J-Alves2f86c1e2022-02-23 18:44:19 +0000944 assert(load_address != 0U);
945
946 if (!sp_pkg_init(stage1_locked, pa_init(load_address), &header,
947 ppool)) {
948 return ret;
Olivier Deprez62d99e32020-01-09 15:58:07 +0100949 }
950
J-Alves2f86c1e2022-02-23 18:44:19 +0000951 pkg_start = va_init(load_address);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100952
J-Alves2f86c1e2022-02-23 18:44:19 +0000953 if (vm_id != HF_PRIMARY_VM_ID &&
954 sp_pkg_get_mem_size(&header) >= vm->secondary.mem_size) {
J-Alves4369bd92020-08-07 16:35:36 +0100955 dlog_error("Invalid package header or DT size.\n");
J-Alves2f86c1e2022-02-23 18:44:19 +0000956 goto out;
Olivier Deprez62d99e32020-01-09 15:58:07 +0100957 }
958
J-Alves2f86c1e2022-02-23 18:44:19 +0000959 manifest_address = va_add(va_init(load_address), header.pm_offset);
960 if (!fdt_init_from_ptr(&sp_fdt, ptr_from_va(manifest_address),
961 header.pm_size)) {
Olivier Deprez62d99e32020-01-09 15:58:07 +0100962 dlog_error("FDT failed validation.\n");
J-Alves2f86c1e2022-02-23 18:44:19 +0000963 goto out;
Olivier Deprez62d99e32020-01-09 15:58:07 +0100964 }
965
J-Alves35315782022-01-25 17:58:32 +0000966 ret = parse_ffa_manifest(&sp_fdt, vm, &boot_info_node);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100967 if (ret != MANIFEST_SUCCESS) {
J-Alves4eb7b542022-03-02 15:21:52 +0000968 dlog_error("Error parsing partition manifest: %s.\n",
969 manifest_strerror(ret));
J-Alves2f86c1e2022-02-23 18:44:19 +0000970 goto out;
Olivier Deprez62d99e32020-01-09 15:58:07 +0100971 }
972
J-Alves2f86c1e2022-02-23 18:44:19 +0000973 if (vm->partition.load_addr != load_address) {
J-Alvesa26ea212021-03-22 14:33:47 +0000974 dlog_warning(
975 "Partition's load address at its manifest differs"
976 " from specified in partition's package.\n");
J-Alves2f86c1e2022-02-23 18:44:19 +0000977 vm->partition.load_addr = load_address;
J-Alvesa26ea212021-03-22 14:33:47 +0000978 }
979
J-Alves889a1d72022-05-13 11:38:27 +0100980 if (vm->partition.gp_register_num != DEFAULT_BOOT_GP_REGISTER) {
981 if (header.version == SP_PKG_HEADER_VERSION_2 &&
982 vm->partition.boot_info &&
983 !ffa_boot_info_node(&boot_info_node, pkg_start, &header)) {
984 dlog_error("Failed to process boot information.\n");
985 }
J-Alves35315782022-01-25 17:58:32 +0000986 }
987
J-Alves2f86c1e2022-02-23 18:44:19 +0000988out:
989 sp_pkg_deinit(stage1_locked, pkg_start, &header, ppool);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100990 return ret;
991}
992
David Brazdil7a462ec2019-08-15 12:27:47 +0100993/**
994 * Parse manifest from FDT.
995 */
Olivier Deprez62d99e32020-01-09 15:58:07 +0100996enum manifest_return_code manifest_init(struct mm_stage1_locked stage1_locked,
997 struct manifest *manifest,
998 struct memiter *manifest_fdt,
999 struct mpool *ppool)
David Brazdil7a462ec2019-08-15 12:27:47 +01001000{
David Brazdilb856be62020-03-25 10:14:55 +00001001 struct string vm_name;
1002 struct fdt fdt;
David Brazdil7a462ec2019-08-15 12:27:47 +01001003 struct fdt_node hyp_node;
1004 size_t i = 0;
1005 bool found_primary_vm = false;
1006
1007 memset_s(manifest, sizeof(*manifest), 0, sizeof(*manifest));
1008
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001009 /* Allocate space in the ppool for tracking the allocated fields. */
1010 if (!manifest_allocated_fields_init(ppool)) {
1011 panic("Unable to allocated space for allocated fields "
1012 "struct.\n");
1013 }
1014
David Brazdilb856be62020-03-25 10:14:55 +00001015 if (!fdt_init_from_memiter(&fdt, manifest_fdt)) {
1016 return MANIFEST_ERROR_FILE_SIZE; /* TODO */
David Brazdila2358d42020-01-27 18:51:38 +00001017 }
1018
David Brazdil7a462ec2019-08-15 12:27:47 +01001019 /* Find hypervisor node. */
David Brazdilb856be62020-03-25 10:14:55 +00001020 if (!fdt_find_node(&fdt, "/hypervisor", &hyp_node)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001021 return MANIFEST_ERROR_NO_HYPERVISOR_FDT_NODE;
1022 }
1023
David Brazdil74e9c3b2019-08-28 11:09:08 +01001024 /* Check "compatible" property. */
David Brazdilf4925382020-03-25 13:33:51 +00001025 if (!fdt_is_compatible(&hyp_node, "hafnium,hafnium")) {
David Brazdil74e9c3b2019-08-28 11:09:08 +01001026 return MANIFEST_ERROR_NOT_COMPATIBLE;
1027 }
1028
Olivier Deprez622ab8d2021-08-02 12:15:45 +02001029 TRY(read_bool(&hyp_node, "ffa_tee_enabled",
1030 &manifest->ffa_tee_enabled));
Andrew Walbran41a49d82020-01-10 17:46:38 +00001031
David Brazdil7a462ec2019-08-15 12:27:47 +01001032 /* Iterate over reserved VM IDs and check no such nodes exist. */
Olivier Deprez2a8ee342020-08-03 15:10:44 +02001033 for (i = HF_VM_ID_BASE; i < HF_VM_ID_OFFSET; i++) {
1034 ffa_vm_id_t vm_id = (ffa_vm_id_t)i - HF_VM_ID_BASE;
David Brazdil7a462ec2019-08-15 12:27:47 +01001035 struct fdt_node vm_node = hyp_node;
David Brazdil7a462ec2019-08-15 12:27:47 +01001036
David Brazdilb856be62020-03-25 10:14:55 +00001037 generate_vm_node_name(&vm_name, vm_id);
1038 if (fdt_find_child(&vm_node, &vm_name)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001039 return MANIFEST_ERROR_RESERVED_VM_ID;
1040 }
1041 }
1042
1043 /* Iterate over VM nodes until we find one that does not exist. */
1044 for (i = 0; i <= MAX_VMS; ++i) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001045 ffa_vm_id_t vm_id = HF_VM_ID_OFFSET + i;
David Brazdil7a462ec2019-08-15 12:27:47 +01001046 struct fdt_node vm_node = hyp_node;
David Brazdil7a462ec2019-08-15 12:27:47 +01001047
Olivier Deprez2a8ee342020-08-03 15:10:44 +02001048 generate_vm_node_name(&vm_name, vm_id - HF_VM_ID_BASE);
David Brazdilb856be62020-03-25 10:14:55 +00001049 if (!fdt_find_child(&vm_node, &vm_name)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001050 break;
1051 }
1052
1053 if (i == MAX_VMS) {
1054 return MANIFEST_ERROR_TOO_MANY_VMS;
1055 }
1056
1057 if (vm_id == HF_PRIMARY_VM_ID) {
1058 CHECK(found_primary_vm == false); /* sanity check */
1059 found_primary_vm = true;
1060 }
1061
David Brazdil0251b942019-09-10 15:59:50 +01001062 manifest->vm_count = i + 1;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001063
1064 TRY(parse_vm_common(&vm_node, &manifest->vm[i], vm_id));
1065
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -07001066 CHECK(!manifest->vm[i].is_hyp_loaded ||
1067 manifest->vm[i].is_ffa_partition);
1068
1069 if (manifest->vm[i].is_ffa_partition &&
1070 !manifest->vm[i].is_hyp_loaded) {
Olivier Deprez62d99e32020-01-09 15:58:07 +01001071 TRY(parse_ffa_partition_package(stage1_locked, &vm_node,
1072 &manifest->vm[i], vm_id,
1073 ppool));
1074 } else {
1075 TRY(parse_vm(&vm_node, &manifest->vm[i], vm_id));
1076 }
David Brazdil7a462ec2019-08-15 12:27:47 +01001077 }
1078
Olivier Deprezfb05f3c2020-11-10 17:48:04 +01001079 if (!found_primary_vm && vm_id_is_current_world(HF_PRIMARY_VM_ID)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001080 return MANIFEST_ERROR_NO_PRIMARY_VM;
1081 }
1082
1083 return MANIFEST_SUCCESS;
1084}
1085
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001086/* Free resources used when parsing the manifest. */
1087void manifest_deinit(struct mpool *ppool)
1088{
1089 manifest_allocated_fields_deinit(ppool);
1090}
1091
David Brazdil7a462ec2019-08-15 12:27:47 +01001092const char *manifest_strerror(enum manifest_return_code ret_code)
1093{
1094 switch (ret_code) {
1095 case MANIFEST_SUCCESS:
1096 return "Success";
David Brazdila2358d42020-01-27 18:51:38 +00001097 case MANIFEST_ERROR_FILE_SIZE:
1098 return "Total size in header does not match file size";
Olivier Deprez62d99e32020-01-09 15:58:07 +01001099 case MANIFEST_ERROR_MALFORMED_DTB:
1100 return "Malformed device tree blob";
David Brazdila2358d42020-01-27 18:51:38 +00001101 case MANIFEST_ERROR_NO_ROOT_NODE:
1102 return "Could not find root node in manifest";
David Brazdil7a462ec2019-08-15 12:27:47 +01001103 case MANIFEST_ERROR_NO_HYPERVISOR_FDT_NODE:
1104 return "Could not find \"hypervisor\" node in manifest";
David Brazdil74e9c3b2019-08-28 11:09:08 +01001105 case MANIFEST_ERROR_NOT_COMPATIBLE:
1106 return "Hypervisor manifest entry not compatible with Hafnium";
David Brazdil7a462ec2019-08-15 12:27:47 +01001107 case MANIFEST_ERROR_RESERVED_VM_ID:
1108 return "Manifest defines a VM with a reserved ID";
1109 case MANIFEST_ERROR_NO_PRIMARY_VM:
1110 return "Manifest does not contain a primary VM entry";
1111 case MANIFEST_ERROR_TOO_MANY_VMS:
1112 return "Manifest specifies more VMs than Hafnium has "
1113 "statically allocated space for";
1114 case MANIFEST_ERROR_PROPERTY_NOT_FOUND:
1115 return "Property not found";
1116 case MANIFEST_ERROR_MALFORMED_STRING:
1117 return "Malformed string property";
David Brazdil0dbb41f2019-09-09 18:03:35 +01001118 case MANIFEST_ERROR_STRING_TOO_LONG:
1119 return "String too long";
David Brazdil7a462ec2019-08-15 12:27:47 +01001120 case MANIFEST_ERROR_MALFORMED_INTEGER:
1121 return "Malformed integer property";
1122 case MANIFEST_ERROR_INTEGER_OVERFLOW:
1123 return "Integer overflow";
Andrew Scullae9962e2019-10-03 16:51:16 +01001124 case MANIFEST_ERROR_MALFORMED_INTEGER_LIST:
1125 return "Malformed integer list property";
Andrew Scullb2c3a242019-11-04 13:52:36 +00001126 case MANIFEST_ERROR_MALFORMED_BOOLEAN:
1127 return "Malformed boolean property";
Manish Pandey6542f5c2020-04-27 14:37:46 +01001128 case MANIFEST_ERROR_MEMORY_REGION_NODE_EMPTY:
1129 return "Memory-region node should have at least one entry";
Manish Pandeye68e7932020-04-23 15:29:28 +01001130 case MANIFEST_ERROR_DEVICE_REGION_NODE_EMPTY:
1131 return "Device-region node should have at least one entry";
Manish Pandeyf06c9072020-09-29 15:41:58 +01001132 case MANIFEST_ERROR_RXTX_SIZE_MISMATCH:
1133 return "RX and TX buffers should be of same size";
Daniel Boulbya7e9e182022-06-27 14:21:01 +01001134 case MANIFEST_ERROR_MEM_REGION_OVERLAP:
1135 return "Memory region overlaps with one already allocated";
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -07001136 case MANIFEST_ERROR_INVALID_MEM_PERM:
1137 return "Memory permission should be RO, RW or RX";
J-Alves35315782022-01-25 17:58:32 +00001138 case MANIFEST_ERROR_ARGUMENTS_LIST_EMPTY:
1139 return "Arguments-list node should have at least one argument";
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001140 case MANIFEST_ERROR_INTERRUPT_ID_REPEATED:
1141 return "Interrupt ID already assigned to another endpoint";
David Brazdil7a462ec2019-08-15 12:27:47 +01001142 }
1143
1144 panic("Unexpected manifest return code.");
1145}