blob: 6f366e24d18fac9cbdb59247448f89d441b8543d [file] [log] [blame]
David Brazdil7a462ec2019-08-15 12:27:47 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
David Brazdil7a462ec2019-08-15 12:27:47 +01007 */
8
9#include "hf/manifest.h"
10
11#include "hf/addr.h"
12#include "hf/check.h"
Andrew Scullae9962e2019-10-03 16:51:16 +010013#include "hf/dlog.h"
David Brazdil7a462ec2019-08-15 12:27:47 +010014#include "hf/fdt.h"
15#include "hf/static_assert.h"
16#include "hf/std.h"
17
18#define TRY(expr) \
19 do { \
20 enum manifest_return_code ret_code = (expr); \
21 if (ret_code != MANIFEST_SUCCESS) { \
22 return ret_code; \
23 } \
24 } while (0)
25
David Brazdilb856be62020-03-25 10:14:55 +000026#define VM_ID_MAX (HF_VM_ID_OFFSET + MAX_VMS - 1)
27#define VM_ID_MAX_DIGITS (5)
28#define VM_NAME_EXTRA_CHARS (3) /* "vm" + number + '\0' */
29#define VM_NAME_MAX_SIZE (VM_ID_MAX_DIGITS + VM_NAME_EXTRA_CHARS)
30static_assert(VM_NAME_MAX_SIZE <= STRING_MAX_SIZE,
31 "VM name does not fit into a struct string.");
32static_assert(VM_ID_MAX <= 99999, "Insufficient VM_NAME_BUF_SIZE");
Olivier Deprez2a8ee342020-08-03 15:10:44 +020033static_assert((HF_OTHER_WORLD_ID > VM_ID_MAX) ||
34 (HF_OTHER_WORLD_ID < HF_VM_ID_BASE),
Andrew Walbran9daa57e2019-09-27 13:33:20 +010035 "TrustZone VM ID clashes with normal VM range.");
David Brazdil7a462ec2019-08-15 12:27:47 +010036
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010037static inline size_t count_digits(ffa_vm_id_t vm_id)
David Brazdilb856be62020-03-25 10:14:55 +000038{
39 size_t digits = 0;
40
41 do {
42 digits++;
43 vm_id /= 10;
44 } while (vm_id);
45 return digits;
46}
47
David Brazdil7a462ec2019-08-15 12:27:47 +010048/**
49 * Generates a string with the two letters "vm" followed by an integer.
50 * Assumes `buf` is of size VM_NAME_BUF_SIZE.
51 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010052static void generate_vm_node_name(struct string *str, ffa_vm_id_t vm_id)
David Brazdil7a462ec2019-08-15 12:27:47 +010053{
54 static const char *digits = "0123456789";
David Brazdilb856be62020-03-25 10:14:55 +000055 size_t vm_id_digits = count_digits(vm_id);
56 char *base = str->data;
57 char *ptr = base + (VM_NAME_EXTRA_CHARS + vm_id_digits);
David Brazdil7a462ec2019-08-15 12:27:47 +010058
David Brazdilb856be62020-03-25 10:14:55 +000059 CHECK(vm_id_digits <= VM_ID_MAX_DIGITS);
David Brazdil7a462ec2019-08-15 12:27:47 +010060 *(--ptr) = '\0';
61 do {
62 *(--ptr) = digits[vm_id % 10];
63 vm_id /= 10;
64 } while (vm_id);
65 *(--ptr) = 'm';
66 *(--ptr) = 'v';
David Brazdilb856be62020-03-25 10:14:55 +000067 CHECK(ptr == base);
David Brazdil7a462ec2019-08-15 12:27:47 +010068}
69
Andrew Scullae9962e2019-10-03 16:51:16 +010070/**
Andrew Scullb2c3a242019-11-04 13:52:36 +000071 * Read a boolean property: true if present; false if not. If present, the value
72 * of the property must be empty else it is considered malformed.
Andrew Scullae9962e2019-10-03 16:51:16 +010073 */
Andrew Scullb2c3a242019-11-04 13:52:36 +000074static enum manifest_return_code read_bool(const struct fdt_node *node,
75 const char *property, bool *out)
Andrew Scullae9962e2019-10-03 16:51:16 +010076{
David Brazdilb856be62020-03-25 10:14:55 +000077 struct memiter data;
78 bool present = fdt_read_property(node, property, &data);
Andrew Scullae9962e2019-10-03 16:51:16 +010079
David Brazdilb856be62020-03-25 10:14:55 +000080 if (present && memiter_size(&data) != 0) {
Andrew Scullb2c3a242019-11-04 13:52:36 +000081 return MANIFEST_ERROR_MALFORMED_BOOLEAN;
82 }
83
84 *out = present;
85 return MANIFEST_SUCCESS;
Andrew Scullae9962e2019-10-03 16:51:16 +010086}
87
Andrew Scull72b43c02019-09-18 13:53:45 +010088static enum manifest_return_code read_string(const struct fdt_node *node,
David Brazdil136f2942019-09-23 14:11:03 +010089 const char *property,
90 struct string *out)
Andrew Scull72b43c02019-09-18 13:53:45 +010091{
David Brazdilb856be62020-03-25 10:14:55 +000092 struct memiter data;
Andrew Scull72b43c02019-09-18 13:53:45 +010093
David Brazdilb856be62020-03-25 10:14:55 +000094 if (!fdt_read_property(node, property, &data)) {
Andrew Scull72b43c02019-09-18 13:53:45 +010095 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
96 }
97
David Brazdilb856be62020-03-25 10:14:55 +000098 switch (string_init(out, &data)) {
David Brazdil136f2942019-09-23 14:11:03 +010099 case STRING_SUCCESS:
100 return MANIFEST_SUCCESS;
101 case STRING_ERROR_INVALID_INPUT:
102 return MANIFEST_ERROR_MALFORMED_STRING;
103 case STRING_ERROR_TOO_LONG:
104 return MANIFEST_ERROR_STRING_TOO_LONG;
105 }
Andrew Scull72b43c02019-09-18 13:53:45 +0100106}
107
108static enum manifest_return_code read_optional_string(
David Brazdil136f2942019-09-23 14:11:03 +0100109 const struct fdt_node *node, const char *property, struct string *out)
Andrew Scull72b43c02019-09-18 13:53:45 +0100110{
David Brazdil136f2942019-09-23 14:11:03 +0100111 enum manifest_return_code ret;
Andrew Scull72b43c02019-09-18 13:53:45 +0100112
David Brazdil136f2942019-09-23 14:11:03 +0100113 ret = read_string(node, property, out);
114 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
115 string_init_empty(out);
116 ret = MANIFEST_SUCCESS;
Andrew Scull72b43c02019-09-18 13:53:45 +0100117 }
David Brazdil136f2942019-09-23 14:11:03 +0100118 return ret;
Andrew Scull72b43c02019-09-18 13:53:45 +0100119}
120
David Brazdil7a462ec2019-08-15 12:27:47 +0100121static enum manifest_return_code read_uint64(const struct fdt_node *node,
122 const char *property,
123 uint64_t *out)
124{
David Brazdilb856be62020-03-25 10:14:55 +0000125 struct memiter data;
David Brazdil7a462ec2019-08-15 12:27:47 +0100126
David Brazdilb856be62020-03-25 10:14:55 +0000127 if (!fdt_read_property(node, property, &data)) {
David Brazdil7a462ec2019-08-15 12:27:47 +0100128 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
129 }
130
David Brazdilb856be62020-03-25 10:14:55 +0000131 if (!fdt_parse_number(&data, memiter_size(&data), out)) {
David Brazdil7a462ec2019-08-15 12:27:47 +0100132 return MANIFEST_ERROR_MALFORMED_INTEGER;
133 }
134
135 return MANIFEST_SUCCESS;
136}
137
David Brazdil080ee312020-02-25 15:30:30 -0800138static enum manifest_return_code read_optional_uint64(
139 const struct fdt_node *node, const char *property,
140 uint64_t default_value, uint64_t *out)
141{
142 enum manifest_return_code ret;
143
144 ret = read_uint64(node, property, out);
145 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
146 *out = default_value;
147 return MANIFEST_SUCCESS;
148 }
149 return ret;
150}
151
Olivier Deprez62d99e32020-01-09 15:58:07 +0100152static enum manifest_return_code read_uint32(const struct fdt_node *node,
153 const char *property,
154 uint32_t *out)
155{
156 uint64_t value;
157
158 TRY(read_uint64(node, property, &value));
159
160 if (value > UINT32_MAX) {
161 return MANIFEST_ERROR_INTEGER_OVERFLOW;
162 }
163
164 *out = (uint32_t)value;
165 return MANIFEST_SUCCESS;
166}
167
Manish Pandeye68e7932020-04-23 15:29:28 +0100168static enum manifest_return_code read_optional_uint32(
169 const struct fdt_node *node, const char *property,
170 uint32_t default_value, uint32_t *out)
171{
172 enum manifest_return_code ret;
173
174 ret = read_uint32(node, property, out);
175 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
176 *out = default_value;
177 return MANIFEST_SUCCESS;
178 }
179 return ret;
180}
181
David Brazdil7a462ec2019-08-15 12:27:47 +0100182static enum manifest_return_code read_uint16(const struct fdt_node *node,
183 const char *property,
184 uint16_t *out)
185{
186 uint64_t value;
187
188 TRY(read_uint64(node, property, &value));
189
190 if (value > UINT16_MAX) {
191 return MANIFEST_ERROR_INTEGER_OVERFLOW;
192 }
193
194 *out = (uint16_t)value;
195 return MANIFEST_SUCCESS;
196}
197
J-Alvesb37fd082020-10-22 12:29:21 +0100198static enum manifest_return_code read_optional_uint16(
199 const struct fdt_node *node, const char *property,
200 uint16_t default_value, uint16_t *out)
201{
202 enum manifest_return_code ret;
203
204 ret = read_uint16(node, property, out);
205 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
206 *out = default_value;
207 return MANIFEST_SUCCESS;
208 }
209
210 return MANIFEST_SUCCESS;
211}
212
Olivier Deprez62d99e32020-01-09 15:58:07 +0100213static enum manifest_return_code read_uint8(const struct fdt_node *node,
214 const char *property, uint8_t *out)
215{
216 uint64_t value;
217
218 TRY(read_uint64(node, property, &value));
219
220 if (value > UINT8_MAX) {
221 return MANIFEST_ERROR_INTEGER_OVERFLOW;
222 }
223
224 *out = (uint8_t)value;
225 return MANIFEST_SUCCESS;
226}
227
J-Alves4369bd92020-08-07 16:35:36 +0100228static enum manifest_return_code read_optional_uint8(
229 const struct fdt_node *node, const char *property,
230 uint8_t default_value, uint8_t *out)
231{
232 enum manifest_return_code ret;
233
234 ret = read_uint8(node, property, out);
235 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
236 *out = default_value;
237 return MANIFEST_SUCCESS;
238 }
239
240 return MANIFEST_SUCCESS;
241}
242
Andrew Scullae9962e2019-10-03 16:51:16 +0100243struct uint32list_iter {
244 struct memiter mem_it;
245};
246
J-Alves4369bd92020-08-07 16:35:36 +0100247static enum manifest_return_code read_uint32list(const struct fdt_node *node,
248 const char *property,
249 struct uint32list_iter *out)
Andrew Scullae9962e2019-10-03 16:51:16 +0100250{
David Brazdilb856be62020-03-25 10:14:55 +0000251 struct memiter data;
Andrew Scullae9962e2019-10-03 16:51:16 +0100252
David Brazdilb856be62020-03-25 10:14:55 +0000253 if (!fdt_read_property(node, property, &data)) {
Andrew Scullae9962e2019-10-03 16:51:16 +0100254 memiter_init(&out->mem_it, NULL, 0);
J-Alves4369bd92020-08-07 16:35:36 +0100255 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
Andrew Scullae9962e2019-10-03 16:51:16 +0100256 }
257
David Brazdilb856be62020-03-25 10:14:55 +0000258 if ((memiter_size(&data) % sizeof(uint32_t)) != 0) {
Andrew Scullae9962e2019-10-03 16:51:16 +0100259 return MANIFEST_ERROR_MALFORMED_INTEGER_LIST;
260 }
261
David Brazdilb856be62020-03-25 10:14:55 +0000262 out->mem_it = data;
Andrew Scullae9962e2019-10-03 16:51:16 +0100263 return MANIFEST_SUCCESS;
264}
265
J-Alves4369bd92020-08-07 16:35:36 +0100266static enum manifest_return_code read_optional_uint32list(
267 const struct fdt_node *node, const char *property,
268 struct uint32list_iter *out)
269{
270 enum manifest_return_code ret = read_uint32list(node, property, out);
271
272 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
273 return MANIFEST_SUCCESS;
274 }
275 return ret;
276}
277
Andrew Scullae9962e2019-10-03 16:51:16 +0100278static bool uint32list_has_next(const struct uint32list_iter *list)
279{
280 return memiter_size(&list->mem_it) > 0;
281}
282
David Brazdil5ea99462020-03-25 13:01:47 +0000283static enum manifest_return_code uint32list_get_next(
284 struct uint32list_iter *list, uint32_t *out)
Andrew Scullae9962e2019-10-03 16:51:16 +0100285{
Andrew Scullae9962e2019-10-03 16:51:16 +0100286 uint64_t num;
287
288 CHECK(uint32list_has_next(list));
David Brazdilb856be62020-03-25 10:14:55 +0000289 if (!fdt_parse_number(&list->mem_it, sizeof(uint32_t), &num)) {
Andrew Scullae9962e2019-10-03 16:51:16 +0100290 return MANIFEST_ERROR_MALFORMED_INTEGER;
291 }
292
David Brazdil5ea99462020-03-25 13:01:47 +0000293 *out = (uint32_t)num;
294 return MANIFEST_SUCCESS;
Andrew Scullae9962e2019-10-03 16:51:16 +0100295}
296
Olivier Deprez62d99e32020-01-09 15:58:07 +0100297static enum manifest_return_code parse_vm_common(const struct fdt_node *node,
298 struct manifest_vm *vm,
299 ffa_vm_id_t vm_id)
David Brazdil7a462ec2019-08-15 12:27:47 +0100300{
Andrew Scullae9962e2019-10-03 16:51:16 +0100301 struct uint32list_iter smcs;
David Brazdil5ea99462020-03-25 13:01:47 +0000302 size_t idx;
Andrew Scullae9962e2019-10-03 16:51:16 +0100303
Olivier Deprez62d99e32020-01-09 15:58:07 +0100304 TRY(read_bool(node, "is_ffa_partition", &vm->is_ffa_partition));
305
David Brazdil136f2942019-09-23 14:11:03 +0100306 TRY(read_string(node, "debug_name", &vm->debug_name));
Andrew Scullae9962e2019-10-03 16:51:16 +0100307
308 TRY(read_optional_uint32list(node, "smc_whitelist", &smcs));
309 while (uint32list_has_next(&smcs) &&
310 vm->smc_whitelist.smc_count < MAX_SMCS) {
David Brazdil5ea99462020-03-25 13:01:47 +0000311 idx = vm->smc_whitelist.smc_count++;
312 TRY(uint32list_get_next(&smcs, &vm->smc_whitelist.smcs[idx]));
Andrew Scullae9962e2019-10-03 16:51:16 +0100313 }
314
315 if (uint32list_has_next(&smcs)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000316 dlog_warning("%s SMC whitelist too long.\n", vm->debug_name);
Andrew Scullae9962e2019-10-03 16:51:16 +0100317 }
318
Andrew Scullb2c3a242019-11-04 13:52:36 +0000319 TRY(read_bool(node, "smc_whitelist_permissive",
320 &vm->smc_whitelist.permissive));
Andrew Scullae9962e2019-10-03 16:51:16 +0100321
Olivier Deprez62d99e32020-01-09 15:58:07 +0100322 if (vm_id != HF_PRIMARY_VM_ID) {
323 TRY(read_uint64(node, "mem_size", &vm->secondary.mem_size));
324 TRY(read_uint16(node, "vcpu_count", &vm->secondary.vcpu_count));
Fuad Tabba50469e02020-06-30 15:14:28 +0100325 TRY(read_optional_string(node, "fdt_filename",
326 &vm->secondary.fdt_filename));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100327 }
328
329 return MANIFEST_SUCCESS;
330}
331
332static enum manifest_return_code parse_vm(struct fdt_node *node,
333 struct manifest_vm *vm,
334 ffa_vm_id_t vm_id)
335{
336 TRY(read_optional_string(node, "kernel_filename",
337 &vm->kernel_filename));
338
David Brazdile6f83222019-09-23 14:47:37 +0100339 if (vm_id == HF_PRIMARY_VM_ID) {
340 TRY(read_optional_string(node, "ramdisk_filename",
341 &vm->primary.ramdisk_filename));
David Brazdil080ee312020-02-25 15:30:30 -0800342 TRY(read_optional_uint64(node, "boot_address",
343 MANIFEST_INVALID_ADDRESS,
344 &vm->primary.boot_address));
David Brazdil7a462ec2019-08-15 12:27:47 +0100345 }
Raghu Krishnamurthy988a5e72021-02-27 21:46:06 -0800346 TRY(read_optional_uint8(node, "exception-level", (uint8_t)EL1,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700347 (uint8_t *)&vm->partition.run_time_el));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100348
David Brazdil7a462ec2019-08-15 12:27:47 +0100349 return MANIFEST_SUCCESS;
350}
351
Manish Pandey6542f5c2020-04-27 14:37:46 +0100352static enum manifest_return_code parse_ffa_memory_region_node(
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100353 struct fdt_node *mem_node, struct memory_region *mem_regions,
Manish Pandey2145c212020-05-01 16:04:22 +0100354 uint8_t *count, struct rx_tx *rxtx)
Manish Pandey6542f5c2020-04-27 14:37:46 +0100355{
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100356 uint32_t phandle;
Manish Pandey2145c212020-05-01 16:04:22 +0100357 uint8_t i = 0;
Manish Pandey6542f5c2020-04-27 14:37:46 +0100358
359 dlog_verbose(" Partition memory regions\n");
360
361 if (!fdt_is_compatible(mem_node, "arm,ffa-manifest-memory-regions")) {
362 return MANIFEST_ERROR_NOT_COMPATIBLE;
363 }
364
365 if (!fdt_first_child(mem_node)) {
366 return MANIFEST_ERROR_MEMORY_REGION_NODE_EMPTY;
367 }
368
369 do {
370 dlog_verbose(" Memory Region[%u]\n", i);
371
372 TRY(read_optional_string(mem_node, "description",
373 &mem_regions[i].name));
374 dlog_verbose(" Name: %s\n",
375 string_data(&mem_regions[i].name));
376
377 TRY(read_optional_uint64(mem_node, "base-address",
378 MANIFEST_INVALID_ADDRESS,
379 &mem_regions[i].base_address));
380 dlog_verbose(" Base address: %#x\n",
381 mem_regions[i].base_address);
382
383 TRY(read_uint32(mem_node, "pages-count",
384 &mem_regions[i].page_count));
385 dlog_verbose(" Pages_count: %u\n",
386 mem_regions[i].page_count);
387
388 TRY(read_uint32(mem_node, "attributes",
389 &mem_regions[i].attributes));
390 mem_regions[i].attributes &= MM_PERM_MASK;
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -0700391
392 if (mem_regions[i].attributes != (MM_MODE_R) &&
393 mem_regions[i].attributes != (MM_MODE_R | MM_MODE_W) &&
394 mem_regions[i].attributes != (MM_MODE_R | MM_MODE_X)) {
395 return MANIFEST_ERROR_INVALID_MEM_PERM;
396 }
397
Manish Pandey6542f5c2020-04-27 14:37:46 +0100398 dlog_verbose(" Attributes: %u\n",
399 mem_regions[i].attributes);
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100400
Manish Pandeya70a4192020-10-07 22:05:04 +0100401 if (rxtx->available) {
402 TRY(read_optional_uint32(
403 mem_node, "phandle",
404 (uint32_t)MANIFEST_INVALID_ADDRESS, &phandle));
405 if (phandle == rxtx->rx_phandle) {
406 dlog_verbose(" Assigned as RX buffer\n");
407 rxtx->rx_buffer = &mem_regions[i];
408 } else if (phandle == rxtx->tx_phandle) {
409 dlog_verbose(" Assigned as TX buffer\n");
410 rxtx->tx_buffer = &mem_regions[i];
411 }
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100412 }
413
Manish Pandey6542f5c2020-04-27 14:37:46 +0100414 i++;
415 } while (fdt_next_sibling(mem_node) && (i < SP_MAX_MEMORY_REGIONS));
416
Manish Pandeya70a4192020-10-07 22:05:04 +0100417 if (rxtx->available &&
418 (rxtx->rx_buffer->page_count != rxtx->tx_buffer->page_count)) {
Manish Pandeyf06c9072020-09-29 15:41:58 +0100419 return MANIFEST_ERROR_RXTX_SIZE_MISMATCH;
420 }
421
Manish Pandey2145c212020-05-01 16:04:22 +0100422 *count = i;
Manish Pandey6542f5c2020-04-27 14:37:46 +0100423
424 return MANIFEST_SUCCESS;
425}
426
Manish Pandeye68e7932020-04-23 15:29:28 +0100427static enum manifest_return_code parse_ffa_device_region_node(
Manish Pandey2145c212020-05-01 16:04:22 +0100428 struct fdt_node *dev_node, struct device_region *dev_regions,
429 uint8_t *count)
Manish Pandeye68e7932020-04-23 15:29:28 +0100430{
431 struct uint32list_iter list;
Manish Pandey2145c212020-05-01 16:04:22 +0100432 uint8_t i = 0;
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500433 uint32_t j = 0;
Manish Pandeye68e7932020-04-23 15:29:28 +0100434
435 dlog_verbose(" Partition Device Regions\n");
436
437 if (!fdt_is_compatible(dev_node, "arm,ffa-manifest-device-regions")) {
438 return MANIFEST_ERROR_NOT_COMPATIBLE;
439 }
440
441 if (!fdt_first_child(dev_node)) {
442 return MANIFEST_ERROR_DEVICE_REGION_NODE_EMPTY;
443 }
444
445 do {
446 dlog_verbose(" Device Region[%u]\n", i);
447
448 TRY(read_optional_string(dev_node, "description",
449 &dev_regions[i].name));
450 dlog_verbose(" Name: %s\n",
451 string_data(&dev_regions[i].name));
452
453 TRY(read_uint64(dev_node, "base-address",
454 &dev_regions[i].base_address));
455 dlog_verbose(" Base address: %#x\n",
456 dev_regions[i].base_address);
457
458 TRY(read_uint32(dev_node, "pages-count",
459 &dev_regions[i].page_count));
460 dlog_verbose(" Pages_count: %u\n",
461 dev_regions[i].page_count);
462
463 TRY(read_uint32(dev_node, "attributes",
464 &dev_regions[i].attributes));
465 dev_regions[i].attributes =
466 (dev_regions[i].attributes & MM_PERM_MASK) | MM_MODE_D;
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -0700467
468 if (dev_regions[i].attributes != (MM_MODE_R | MM_MODE_D) &&
469 dev_regions[i].attributes !=
470 (MM_MODE_R | MM_MODE_W | MM_MODE_D)) {
471 return MANIFEST_ERROR_INVALID_MEM_PERM;
472 }
473
Manish Pandeye68e7932020-04-23 15:29:28 +0100474 dlog_verbose(" Attributes: %u\n",
475 dev_regions[i].attributes);
476
477 TRY(read_optional_uint32list(dev_node, "interrupts", &list));
478 dlog_verbose(" Interrupt List:\n");
479 j = 0;
480 while (uint32list_has_next(&list) &&
481 j < SP_MAX_INTERRUPTS_PER_DEVICE) {
482 TRY(uint32list_get_next(
483 &list, &dev_regions[i].interrupts[j].id));
484 if (uint32list_has_next(&list)) {
485 TRY(uint32list_get_next(&list,
486 &dev_regions[i]
487 .interrupts[j]
488 .attributes));
489 } else {
490 return MANIFEST_ERROR_MALFORMED_INTEGER_LIST;
491 }
492
493 dlog_verbose(" ID = %u, attributes = %u\n",
494 dev_regions[i].interrupts[j].id,
495 dev_regions[i].interrupts[j].attributes);
496 j++;
497 }
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500498
499 dev_regions[i].interrupt_count = j;
Manish Pandeye68e7932020-04-23 15:29:28 +0100500 if (j == 0) {
501 dlog_verbose(" Empty\n");
502 }
503
504 TRY(read_optional_uint32(dev_node, "smmu-id",
Madhukar Pappireddy54680c72020-10-23 15:02:38 -0500505 MANIFEST_INVALID_ID,
Manish Pandeye68e7932020-04-23 15:29:28 +0100506 &dev_regions[i].smmu_id));
Olivier Deprez86d87ae2021-08-19 14:27:46 +0200507 if (dev_regions[i].smmu_id != MANIFEST_INVALID_ID) {
508 dlog_verbose(" smmu-id: %u\n",
509 dev_regions[i].smmu_id);
510 }
Manish Pandeye68e7932020-04-23 15:29:28 +0100511
512 TRY(read_optional_uint32list(dev_node, "stream-ids", &list));
513 dlog_verbose(" Stream IDs assigned:\n");
514
515 j = 0;
516 while (uint32list_has_next(&list) &&
517 j < SP_MAX_STREAMS_PER_DEVICE) {
518 TRY(uint32list_get_next(&list,
519 &dev_regions[i].stream_ids[j]));
520 dlog_verbose(" %u\n",
521 dev_regions[i].stream_ids[j]);
522 j++;
523 }
524 if (j == 0) {
525 dlog_verbose(" None\n");
526 }
Madhukar Pappireddy54680c72020-10-23 15:02:38 -0500527 dev_regions[i].stream_count = j;
Manish Pandeye68e7932020-04-23 15:29:28 +0100528
529 TRY(read_bool(dev_node, "exclusive-access",
530 &dev_regions[i].exclusive_access));
J-Alves4369bd92020-08-07 16:35:36 +0100531 dlog_verbose(" Exclusive_access: %u\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100532 dev_regions[i].exclusive_access);
533
534 i++;
535 } while (fdt_next_sibling(dev_node) && (i < SP_MAX_DEVICE_REGIONS));
536
Manish Pandey2145c212020-05-01 16:04:22 +0100537 *count = i;
Manish Pandeye68e7932020-04-23 15:29:28 +0100538
539 return MANIFEST_SUCCESS;
540}
541
Olivier Deprez62d99e32020-01-09 15:58:07 +0100542static enum manifest_return_code parse_ffa_manifest(struct fdt *fdt,
543 struct manifest_vm *vm)
544{
545 unsigned int i = 0;
546 struct uint32list_iter uuid;
547 uint32_t uuid_word;
548 struct fdt_node root;
549 struct fdt_node ffa_node;
550 struct string rxtx_node_name = STRING_INIT("rx_tx-info");
Manish Pandey6542f5c2020-04-27 14:37:46 +0100551 struct string mem_region_node_name = STRING_INIT("memory-regions");
Manish Pandeye68e7932020-04-23 15:29:28 +0100552 struct string dev_region_node_name = STRING_INIT("device-regions");
Olivier Deprez62d99e32020-01-09 15:58:07 +0100553
554 if (!fdt_find_node(fdt, "/", &root)) {
555 return MANIFEST_ERROR_NO_ROOT_NODE;
556 }
557
558 /* Check "compatible" property. */
559 if (!fdt_is_compatible(&root, "arm,ffa-manifest-1.0")) {
560 return MANIFEST_ERROR_NOT_COMPATIBLE;
561 }
562
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700563 TRY(read_uint32(&root, "ffa-version", &vm->partition.ffa_version));
J-Alves4369bd92020-08-07 16:35:36 +0100564 dlog_verbose(" Expected FF-A version %u.%u\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700565 vm->partition.ffa_version >> 16,
566 vm->partition.ffa_version & 0xffff);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100567
J-Alves4369bd92020-08-07 16:35:36 +0100568 TRY(read_uint32list(&root, "uuid", &uuid));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100569
570 while (uint32list_has_next(&uuid) && i < 4) {
571 TRY(uint32list_get_next(&uuid, &uuid_word));
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700572 vm->partition.uuid.uuid[i] = uuid_word;
Olivier Deprez62d99e32020-01-09 15:58:07 +0100573 i++;
574 }
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700575 dlog_verbose(" UUID %#x-%x-%x-%x\n", vm->partition.uuid.uuid[0],
576 vm->partition.uuid.uuid[1], vm->partition.uuid.uuid[2],
577 vm->partition.uuid.uuid[3]);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100578
579 TRY(read_uint16(&root, "execution-ctx-count",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700580 &vm->partition.execution_ctx_count));
J-Alves4369bd92020-08-07 16:35:36 +0100581 dlog_verbose(" Number of execution context %u\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700582 vm->partition.execution_ctx_count);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100583
584 TRY(read_uint8(&root, "exception-level",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700585 (uint8_t *)&vm->partition.run_time_el));
586 dlog_verbose(" Run-time EL %u\n", vm->partition.run_time_el);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100587
588 TRY(read_uint8(&root, "execution-state",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700589 (uint8_t *)&vm->partition.execution_state));
590 dlog_verbose(" Execution state %u\n", vm->partition.execution_state);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100591
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700592 TRY(read_optional_uint64(&root, "load-address", 0,
593 &vm->partition.load_addr));
594 dlog_verbose(" Load address %#x\n", vm->partition.load_addr);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100595
J-Alves4369bd92020-08-07 16:35:36 +0100596 TRY(read_optional_uint64(&root, "entrypoint-offset", 0,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700597 &vm->partition.ep_offset));
598 dlog_verbose(" Entry point offset %#x\n", vm->partition.ep_offset);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100599
J-Alvesb37fd082020-10-22 12:29:21 +0100600 TRY(read_optional_uint16(&root, "boot-order", DEFAULT_BOOT_ORDER,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700601 &vm->partition.boot_order));
602 dlog_verbose(" Boot order %#u\n", vm->partition.boot_order);
J-Alvesb37fd082020-10-22 12:29:21 +0100603
J-Alves4369bd92020-08-07 16:35:36 +0100604 TRY(read_optional_uint8(&root, "xlat-granule", 0,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700605 (uint8_t *)&vm->partition.xlat_granule));
606 dlog_verbose(" Translation granule %u\n", vm->partition.xlat_granule);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100607
608 ffa_node = root;
609 if (fdt_find_child(&ffa_node, &rxtx_node_name)) {
610 if (!fdt_is_compatible(&ffa_node,
611 "arm,ffa-manifest-rx_tx-buffer")) {
612 return MANIFEST_ERROR_NOT_COMPATIBLE;
613 }
614
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100615 /*
616 * Read only phandles for now, it will be used to update buffers
617 * while parsing memory regions.
618 */
619 TRY(read_uint32(&ffa_node, "rx-buffer",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700620 &vm->partition.rxtx.rx_phandle));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100621
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100622 TRY(read_uint32(&ffa_node, "tx-buffer",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700623 &vm->partition.rxtx.tx_phandle));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100624
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700625 vm->partition.rxtx.available = true;
Olivier Deprez62d99e32020-01-09 15:58:07 +0100626 }
627
628 TRY(read_uint8(&root, "messaging-method",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700629 (uint8_t *)&vm->partition.messaging_method));
630 dlog_verbose(" Messaging method %u\n", vm->partition.messaging_method);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100631
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700632 TRY(read_bool(&root, "managed-exit", &vm->partition.managed_exit));
Maksims Svecovs9ddf86a2021-05-06 17:17:21 +0100633
Manish Pandey6542f5c2020-04-27 14:37:46 +0100634 /* Parse memory-regions */
635 ffa_node = root;
636 if (fdt_find_child(&ffa_node, &mem_region_node_name)) {
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700637 TRY(parse_ffa_memory_region_node(
638 &ffa_node, vm->partition.mem_regions,
639 &vm->partition.mem_region_count, &vm->partition.rxtx));
Manish Pandey6542f5c2020-04-27 14:37:46 +0100640 }
Manish Pandey2145c212020-05-01 16:04:22 +0100641 dlog_verbose(" Total %u memory regions found\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700642 vm->partition.mem_region_count);
Manish Pandey6542f5c2020-04-27 14:37:46 +0100643
Manish Pandeye68e7932020-04-23 15:29:28 +0100644 /* Parse Device-regions */
645 ffa_node = root;
646 if (fdt_find_child(&ffa_node, &dev_region_node_name)) {
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700647 TRY(parse_ffa_device_region_node(
648 &ffa_node, vm->partition.dev_regions,
649 &vm->partition.dev_region_count));
Manish Pandeye68e7932020-04-23 15:29:28 +0100650 }
Manish Pandey2145c212020-05-01 16:04:22 +0100651 dlog_verbose(" Total %u device regions found\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700652 vm->partition.dev_region_count);
Manish Pandeye68e7932020-04-23 15:29:28 +0100653
Olivier Deprez62d99e32020-01-09 15:58:07 +0100654 return MANIFEST_SUCCESS;
655}
656
657static enum manifest_return_code sanity_check_ffa_manifest(
658 struct manifest_vm *vm)
659{
660 uint16_t ffa_version_major;
661 uint16_t ffa_version_minor;
662 enum manifest_return_code ret_code = MANIFEST_SUCCESS;
663 const char *error_string = "specified in manifest is unsupported";
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500664 uint32_t k = 0;
Olivier Deprez62d99e32020-01-09 15:58:07 +0100665
666 /* ensure that the SPM version is compatible */
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700667 ffa_version_major = (vm->partition.ffa_version & 0xffff0000) >>
668 FFA_VERSION_MAJOR_OFFSET;
669 ffa_version_minor = vm->partition.ffa_version & 0xffff;
Olivier Deprez62d99e32020-01-09 15:58:07 +0100670
671 if (ffa_version_major != FFA_VERSION_MAJOR ||
672 ffa_version_minor > FFA_VERSION_MINOR) {
J-Alves4369bd92020-08-07 16:35:36 +0100673 dlog_error("FF-A partition manifest version %s: %u.%u\n",
Olivier Deprez62d99e32020-01-09 15:58:07 +0100674 error_string, ffa_version_major, ffa_version_minor);
675 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
676 }
677
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700678 if (vm->partition.xlat_granule != PAGE_4KB) {
J-Alves4369bd92020-08-07 16:35:36 +0100679 dlog_error("Translation granule %s: %u\n", error_string,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700680 vm->partition.xlat_granule);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100681 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
682 }
683
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700684 if (vm->partition.execution_state != AARCH64) {
J-Alves4369bd92020-08-07 16:35:36 +0100685 dlog_error("Execution state %s: %u\n", error_string,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700686 vm->partition.execution_state);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100687 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
688 }
689
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700690 if (vm->partition.run_time_el != EL1 &&
691 vm->partition.run_time_el != S_EL1 &&
692 vm->partition.run_time_el != S_EL0) {
Raghu Krishnamurthyac5a8092021-01-03 14:23:51 -0800693 dlog_error("Exception level %s: %d\n", error_string,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700694 vm->partition.run_time_el);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100695 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
696 }
697
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700698 if ((vm->partition.messaging_method &
Maksims Svecovsb596eab2021-04-27 00:52:27 +0100699 ~(FFA_PARTITION_DIRECT_REQ_RECV | FFA_PARTITION_DIRECT_REQ_SEND |
Maksims Svecovs9ddf86a2021-05-06 17:17:21 +0100700 FFA_PARTITION_INDIRECT_MSG)) != 0U) {
Olivier Deprez62d99e32020-01-09 15:58:07 +0100701 dlog_error("Messaging method %s: %x\n", error_string,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700702 vm->partition.messaging_method);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100703 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
704 }
705
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700706 if (vm->partition.run_time_el == S_EL0 &&
707 vm->partition.execution_ctx_count != 1) {
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800708 dlog_error(
709 "Exception level and execution context count %s: %d "
710 "%d\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700711 error_string, vm->partition.run_time_el,
712 vm->partition.execution_ctx_count);
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800713 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
714 }
715
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500716 for (uint8_t i = 0; i < vm->partition.dev_region_count; i++) {
717 struct device_region dev_region;
718
719 dev_region = vm->partition.dev_regions[i];
720
721 if (dev_region.interrupt_count > SP_MAX_INTERRUPTS_PER_DEVICE) {
722 dlog_error(
723 "Interrupt count for device region exceeds "
724 "limit.\n");
725 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
726 continue;
727 }
728
729 for (uint8_t j = 0; j < dev_region.interrupt_count; j++) {
730 k++;
731 if (k > VM_MANIFEST_MAX_INTERRUPTS) {
732 dlog_error(
733 "Interrupt count for VM exceeds "
734 "limit.\n");
735 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
736 continue;
737 }
738 }
739 }
740
Olivier Deprez62d99e32020-01-09 15:58:07 +0100741 return ret_code;
742}
743
744static enum manifest_return_code parse_ffa_partition_package(
745 struct mm_stage1_locked stage1_locked, struct fdt_node *node,
746 struct manifest_vm *vm, ffa_vm_id_t vm_id, struct mpool *ppool)
747{
748 enum manifest_return_code ret = MANIFEST_ERROR_NOT_COMPATIBLE;
749 uintpaddr_t sp_pkg_addr;
750 paddr_t sp_pkg_start;
751 paddr_t sp_pkg_end;
752 struct sp_pkg_header *sp_pkg;
753 size_t sp_header_dtb_size;
754 paddr_t sp_dtb_addr;
755 struct fdt sp_fdt;
756
757 /*
758 * This must have been hinted as being an FF-A partition,
759 * return straight with failure if this is not the case.
760 */
761 if (!vm->is_ffa_partition) {
762 return MANIFEST_ERROR_NOT_COMPATIBLE;
763 }
764
765 TRY(read_uint64(node, "load_address", &sp_pkg_addr));
766 if (!is_aligned(sp_pkg_addr, PAGE_SIZE)) {
767 return MANIFEST_ERROR_NOT_COMPATIBLE;
768 }
769
J-Alves4369bd92020-08-07 16:35:36 +0100770 /* Map top of package as a single page to extract the header */
Olivier Deprez62d99e32020-01-09 15:58:07 +0100771 sp_pkg_start = pa_init(sp_pkg_addr);
772 sp_pkg_end = pa_add(sp_pkg_start, PAGE_SIZE);
773 sp_pkg = mm_identity_map(stage1_locked, sp_pkg_start,
774 pa_add(sp_pkg_start, PAGE_SIZE), MM_MODE_R,
775 ppool);
776 CHECK(sp_pkg != NULL);
777
J-Alves4369bd92020-08-07 16:35:36 +0100778 dlog_verbose("Package load address %#x\n", sp_pkg_addr);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100779
780 if (sp_pkg->magic != SP_PKG_HEADER_MAGIC) {
J-Alves4369bd92020-08-07 16:35:36 +0100781 dlog_error("Invalid package magic.\n");
Olivier Deprez62d99e32020-01-09 15:58:07 +0100782 goto exit_unmap;
783 }
784
785 if (sp_pkg->version != SP_PKG_HEADER_VERSION) {
J-Alves4369bd92020-08-07 16:35:36 +0100786 dlog_error("Invalid package version.\n");
Olivier Deprez62d99e32020-01-09 15:58:07 +0100787 goto exit_unmap;
788 }
789
J-Alves4369bd92020-08-07 16:35:36 +0100790 /* Expect DTB to immediately follow header */
Olivier Deprez62d99e32020-01-09 15:58:07 +0100791 if (sp_pkg->pm_offset != sizeof(struct sp_pkg_header)) {
J-Alves4369bd92020-08-07 16:35:36 +0100792 dlog_error("Invalid package manifest offset.\n");
Olivier Deprez62d99e32020-01-09 15:58:07 +0100793 goto exit_unmap;
794 }
795
796 sp_header_dtb_size = align_up(
797 sp_pkg->pm_size + sizeof(struct sp_pkg_header), PAGE_SIZE);
798 if ((vm_id != HF_PRIMARY_VM_ID) &&
799 (sp_header_dtb_size >= vm->secondary.mem_size)) {
J-Alves4369bd92020-08-07 16:35:36 +0100800 dlog_error("Invalid package header or DT size.\n");
Olivier Deprez62d99e32020-01-09 15:58:07 +0100801 goto exit_unmap;
802 }
803
804 if (sp_header_dtb_size > PAGE_SIZE) {
805 /* Map remainder of header + DTB */
806 sp_pkg_end = pa_add(sp_pkg_start, sp_header_dtb_size);
807
808 sp_pkg = mm_identity_map(stage1_locked, sp_pkg_start,
809 sp_pkg_end, MM_MODE_R, ppool);
810 CHECK(sp_pkg != NULL);
811 }
812
813 sp_dtb_addr = pa_add(sp_pkg_start, sp_pkg->pm_offset);
Daniel Boulby3f784262021-09-27 13:02:54 +0100814
815 /* Since the address is from pa_addr allow the cast */
816 // NOLINTNEXTLINE(performance-no-int-to-ptr)
J-Alves4369bd92020-08-07 16:35:36 +0100817 if (!fdt_init_from_ptr(&sp_fdt, (void *)pa_addr(sp_dtb_addr),
Olivier Deprez62d99e32020-01-09 15:58:07 +0100818 sp_pkg->pm_size)) {
819 dlog_error("FDT failed validation.\n");
820 goto exit_unmap;
821 }
822
823 ret = parse_ffa_manifest(&sp_fdt, vm);
824 if (ret != MANIFEST_SUCCESS) {
825 goto exit_unmap;
826 }
827
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700828 if (vm->partition.load_addr != sp_pkg_addr) {
J-Alvesa26ea212021-03-22 14:33:47 +0000829 dlog_warning(
830 "Partition's load address at its manifest differs"
831 " from specified in partition's package.\n");
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700832 vm->partition.load_addr = sp_pkg_addr;
J-Alvesa26ea212021-03-22 14:33:47 +0000833 }
834
Olivier Deprez62d99e32020-01-09 15:58:07 +0100835 ret = sanity_check_ffa_manifest(vm);
836
837exit_unmap:
838 CHECK(mm_unmap(stage1_locked, sp_pkg_start, sp_pkg_end, ppool));
839
840 return ret;
841}
842
David Brazdil7a462ec2019-08-15 12:27:47 +0100843/**
844 * Parse manifest from FDT.
845 */
Olivier Deprez62d99e32020-01-09 15:58:07 +0100846enum manifest_return_code manifest_init(struct mm_stage1_locked stage1_locked,
847 struct manifest *manifest,
848 struct memiter *manifest_fdt,
849 struct mpool *ppool)
David Brazdil7a462ec2019-08-15 12:27:47 +0100850{
David Brazdilb856be62020-03-25 10:14:55 +0000851 struct string vm_name;
852 struct fdt fdt;
David Brazdil7a462ec2019-08-15 12:27:47 +0100853 struct fdt_node hyp_node;
854 size_t i = 0;
855 bool found_primary_vm = false;
856
857 memset_s(manifest, sizeof(*manifest), 0, sizeof(*manifest));
858
David Brazdilb856be62020-03-25 10:14:55 +0000859 if (!fdt_init_from_memiter(&fdt, manifest_fdt)) {
860 return MANIFEST_ERROR_FILE_SIZE; /* TODO */
David Brazdila2358d42020-01-27 18:51:38 +0000861 }
862
David Brazdil7a462ec2019-08-15 12:27:47 +0100863 /* Find hypervisor node. */
David Brazdilb856be62020-03-25 10:14:55 +0000864 if (!fdt_find_node(&fdt, "/hypervisor", &hyp_node)) {
David Brazdil7a462ec2019-08-15 12:27:47 +0100865 return MANIFEST_ERROR_NO_HYPERVISOR_FDT_NODE;
866 }
867
David Brazdil74e9c3b2019-08-28 11:09:08 +0100868 /* Check "compatible" property. */
David Brazdilf4925382020-03-25 13:33:51 +0000869 if (!fdt_is_compatible(&hyp_node, "hafnium,hafnium")) {
David Brazdil74e9c3b2019-08-28 11:09:08 +0100870 return MANIFEST_ERROR_NOT_COMPATIBLE;
871 }
872
Olivier Deprez622ab8d2021-08-02 12:15:45 +0200873 TRY(read_bool(&hyp_node, "ffa_tee_enabled",
874 &manifest->ffa_tee_enabled));
Andrew Walbran41a49d82020-01-10 17:46:38 +0000875
David Brazdil7a462ec2019-08-15 12:27:47 +0100876 /* Iterate over reserved VM IDs and check no such nodes exist. */
Olivier Deprez2a8ee342020-08-03 15:10:44 +0200877 for (i = HF_VM_ID_BASE; i < HF_VM_ID_OFFSET; i++) {
878 ffa_vm_id_t vm_id = (ffa_vm_id_t)i - HF_VM_ID_BASE;
David Brazdil7a462ec2019-08-15 12:27:47 +0100879 struct fdt_node vm_node = hyp_node;
David Brazdil7a462ec2019-08-15 12:27:47 +0100880
David Brazdilb856be62020-03-25 10:14:55 +0000881 generate_vm_node_name(&vm_name, vm_id);
882 if (fdt_find_child(&vm_node, &vm_name)) {
David Brazdil7a462ec2019-08-15 12:27:47 +0100883 return MANIFEST_ERROR_RESERVED_VM_ID;
884 }
885 }
886
887 /* Iterate over VM nodes until we find one that does not exist. */
888 for (i = 0; i <= MAX_VMS; ++i) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100889 ffa_vm_id_t vm_id = HF_VM_ID_OFFSET + i;
David Brazdil7a462ec2019-08-15 12:27:47 +0100890 struct fdt_node vm_node = hyp_node;
David Brazdil7a462ec2019-08-15 12:27:47 +0100891
Olivier Deprez2a8ee342020-08-03 15:10:44 +0200892 generate_vm_node_name(&vm_name, vm_id - HF_VM_ID_BASE);
David Brazdilb856be62020-03-25 10:14:55 +0000893 if (!fdt_find_child(&vm_node, &vm_name)) {
David Brazdil7a462ec2019-08-15 12:27:47 +0100894 break;
895 }
896
897 if (i == MAX_VMS) {
898 return MANIFEST_ERROR_TOO_MANY_VMS;
899 }
900
901 if (vm_id == HF_PRIMARY_VM_ID) {
902 CHECK(found_primary_vm == false); /* sanity check */
903 found_primary_vm = true;
904 }
905
David Brazdil0251b942019-09-10 15:59:50 +0100906 manifest->vm_count = i + 1;
Olivier Deprez62d99e32020-01-09 15:58:07 +0100907
908 TRY(parse_vm_common(&vm_node, &manifest->vm[i], vm_id));
909
910 if (manifest->vm[i].is_ffa_partition) {
911 TRY(parse_ffa_partition_package(stage1_locked, &vm_node,
912 &manifest->vm[i], vm_id,
913 ppool));
914 } else {
915 TRY(parse_vm(&vm_node, &manifest->vm[i], vm_id));
916 }
David Brazdil7a462ec2019-08-15 12:27:47 +0100917 }
918
Olivier Deprezfb05f3c2020-11-10 17:48:04 +0100919 if (!found_primary_vm && vm_id_is_current_world(HF_PRIMARY_VM_ID)) {
David Brazdil7a462ec2019-08-15 12:27:47 +0100920 return MANIFEST_ERROR_NO_PRIMARY_VM;
921 }
922
923 return MANIFEST_SUCCESS;
924}
925
926const char *manifest_strerror(enum manifest_return_code ret_code)
927{
928 switch (ret_code) {
929 case MANIFEST_SUCCESS:
930 return "Success";
David Brazdila2358d42020-01-27 18:51:38 +0000931 case MANIFEST_ERROR_FILE_SIZE:
932 return "Total size in header does not match file size";
Olivier Deprez62d99e32020-01-09 15:58:07 +0100933 case MANIFEST_ERROR_MALFORMED_DTB:
934 return "Malformed device tree blob";
David Brazdila2358d42020-01-27 18:51:38 +0000935 case MANIFEST_ERROR_NO_ROOT_NODE:
936 return "Could not find root node in manifest";
David Brazdil7a462ec2019-08-15 12:27:47 +0100937 case MANIFEST_ERROR_NO_HYPERVISOR_FDT_NODE:
938 return "Could not find \"hypervisor\" node in manifest";
David Brazdil74e9c3b2019-08-28 11:09:08 +0100939 case MANIFEST_ERROR_NOT_COMPATIBLE:
940 return "Hypervisor manifest entry not compatible with Hafnium";
David Brazdil7a462ec2019-08-15 12:27:47 +0100941 case MANIFEST_ERROR_RESERVED_VM_ID:
942 return "Manifest defines a VM with a reserved ID";
943 case MANIFEST_ERROR_NO_PRIMARY_VM:
944 return "Manifest does not contain a primary VM entry";
945 case MANIFEST_ERROR_TOO_MANY_VMS:
946 return "Manifest specifies more VMs than Hafnium has "
947 "statically allocated space for";
948 case MANIFEST_ERROR_PROPERTY_NOT_FOUND:
949 return "Property not found";
950 case MANIFEST_ERROR_MALFORMED_STRING:
951 return "Malformed string property";
David Brazdil0dbb41f2019-09-09 18:03:35 +0100952 case MANIFEST_ERROR_STRING_TOO_LONG:
953 return "String too long";
David Brazdil7a462ec2019-08-15 12:27:47 +0100954 case MANIFEST_ERROR_MALFORMED_INTEGER:
955 return "Malformed integer property";
956 case MANIFEST_ERROR_INTEGER_OVERFLOW:
957 return "Integer overflow";
Andrew Scullae9962e2019-10-03 16:51:16 +0100958 case MANIFEST_ERROR_MALFORMED_INTEGER_LIST:
959 return "Malformed integer list property";
Andrew Scullb2c3a242019-11-04 13:52:36 +0000960 case MANIFEST_ERROR_MALFORMED_BOOLEAN:
961 return "Malformed boolean property";
Manish Pandey6542f5c2020-04-27 14:37:46 +0100962 case MANIFEST_ERROR_MEMORY_REGION_NODE_EMPTY:
963 return "Memory-region node should have at least one entry";
Manish Pandeye68e7932020-04-23 15:29:28 +0100964 case MANIFEST_ERROR_DEVICE_REGION_NODE_EMPTY:
965 return "Device-region node should have at least one entry";
Manish Pandeyf06c9072020-09-29 15:41:58 +0100966 case MANIFEST_ERROR_RXTX_SIZE_MISMATCH:
967 return "RX and TX buffers should be of same size";
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -0700968 case MANIFEST_ERROR_INVALID_MEM_PERM:
969 return "Memory permission should be RO, RW or RX";
David Brazdil7a462ec2019-08-15 12:27:47 +0100970 }
971
972 panic("Unexpected manifest return code.");
973}