blob: 0425f683e803c8715e551832c1506bfc8c40ccce [file] [log] [blame]
David Brazdil7a462ec2019-08-15 12:27:47 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
David Brazdil7a462ec2019-08-15 12:27:47 +01007 */
8
9#include "hf/manifest.h"
10
11#include "hf/addr.h"
12#include "hf/check.h"
Andrew Scullae9962e2019-10-03 16:51:16 +010013#include "hf/dlog.h"
David Brazdil7a462ec2019-08-15 12:27:47 +010014#include "hf/fdt.h"
15#include "hf/static_assert.h"
16#include "hf/std.h"
17
18#define TRY(expr) \
19 do { \
20 enum manifest_return_code ret_code = (expr); \
21 if (ret_code != MANIFEST_SUCCESS) { \
22 return ret_code; \
23 } \
24 } while (0)
25
David Brazdilb856be62020-03-25 10:14:55 +000026#define VM_ID_MAX (HF_VM_ID_OFFSET + MAX_VMS - 1)
27#define VM_ID_MAX_DIGITS (5)
28#define VM_NAME_EXTRA_CHARS (3) /* "vm" + number + '\0' */
29#define VM_NAME_MAX_SIZE (VM_ID_MAX_DIGITS + VM_NAME_EXTRA_CHARS)
30static_assert(VM_NAME_MAX_SIZE <= STRING_MAX_SIZE,
31 "VM name does not fit into a struct string.");
32static_assert(VM_ID_MAX <= 99999, "Insufficient VM_NAME_BUF_SIZE");
Olivier Deprez2a8ee342020-08-03 15:10:44 +020033static_assert((HF_OTHER_WORLD_ID > VM_ID_MAX) ||
34 (HF_OTHER_WORLD_ID < HF_VM_ID_BASE),
Andrew Walbran9daa57e2019-09-27 13:33:20 +010035 "TrustZone VM ID clashes with normal VM range.");
David Brazdil7a462ec2019-08-15 12:27:47 +010036
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010037static inline size_t count_digits(ffa_vm_id_t vm_id)
David Brazdilb856be62020-03-25 10:14:55 +000038{
39 size_t digits = 0;
40
41 do {
42 digits++;
43 vm_id /= 10;
44 } while (vm_id);
45 return digits;
46}
47
David Brazdil7a462ec2019-08-15 12:27:47 +010048/**
49 * Generates a string with the two letters "vm" followed by an integer.
50 * Assumes `buf` is of size VM_NAME_BUF_SIZE.
51 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010052static void generate_vm_node_name(struct string *str, ffa_vm_id_t vm_id)
David Brazdil7a462ec2019-08-15 12:27:47 +010053{
54 static const char *digits = "0123456789";
David Brazdilb856be62020-03-25 10:14:55 +000055 size_t vm_id_digits = count_digits(vm_id);
56 char *base = str->data;
57 char *ptr = base + (VM_NAME_EXTRA_CHARS + vm_id_digits);
David Brazdil7a462ec2019-08-15 12:27:47 +010058
David Brazdilb856be62020-03-25 10:14:55 +000059 CHECK(vm_id_digits <= VM_ID_MAX_DIGITS);
David Brazdil7a462ec2019-08-15 12:27:47 +010060 *(--ptr) = '\0';
61 do {
62 *(--ptr) = digits[vm_id % 10];
63 vm_id /= 10;
64 } while (vm_id);
65 *(--ptr) = 'm';
66 *(--ptr) = 'v';
David Brazdilb856be62020-03-25 10:14:55 +000067 CHECK(ptr == base);
David Brazdil7a462ec2019-08-15 12:27:47 +010068}
69
Andrew Scullae9962e2019-10-03 16:51:16 +010070/**
Andrew Scullb2c3a242019-11-04 13:52:36 +000071 * Read a boolean property: true if present; false if not. If present, the value
72 * of the property must be empty else it is considered malformed.
Andrew Scullae9962e2019-10-03 16:51:16 +010073 */
Andrew Scullb2c3a242019-11-04 13:52:36 +000074static enum manifest_return_code read_bool(const struct fdt_node *node,
75 const char *property, bool *out)
Andrew Scullae9962e2019-10-03 16:51:16 +010076{
David Brazdilb856be62020-03-25 10:14:55 +000077 struct memiter data;
78 bool present = fdt_read_property(node, property, &data);
Andrew Scullae9962e2019-10-03 16:51:16 +010079
David Brazdilb856be62020-03-25 10:14:55 +000080 if (present && memiter_size(&data) != 0) {
Andrew Scullb2c3a242019-11-04 13:52:36 +000081 return MANIFEST_ERROR_MALFORMED_BOOLEAN;
82 }
83
84 *out = present;
85 return MANIFEST_SUCCESS;
Andrew Scullae9962e2019-10-03 16:51:16 +010086}
87
Andrew Scull72b43c02019-09-18 13:53:45 +010088static enum manifest_return_code read_string(const struct fdt_node *node,
David Brazdil136f2942019-09-23 14:11:03 +010089 const char *property,
90 struct string *out)
Andrew Scull72b43c02019-09-18 13:53:45 +010091{
David Brazdilb856be62020-03-25 10:14:55 +000092 struct memiter data;
Andrew Scull72b43c02019-09-18 13:53:45 +010093
David Brazdilb856be62020-03-25 10:14:55 +000094 if (!fdt_read_property(node, property, &data)) {
Andrew Scull72b43c02019-09-18 13:53:45 +010095 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
96 }
97
David Brazdilb856be62020-03-25 10:14:55 +000098 switch (string_init(out, &data)) {
David Brazdil136f2942019-09-23 14:11:03 +010099 case STRING_SUCCESS:
100 return MANIFEST_SUCCESS;
101 case STRING_ERROR_INVALID_INPUT:
102 return MANIFEST_ERROR_MALFORMED_STRING;
103 case STRING_ERROR_TOO_LONG:
104 return MANIFEST_ERROR_STRING_TOO_LONG;
105 }
Andrew Scull72b43c02019-09-18 13:53:45 +0100106}
107
108static enum manifest_return_code read_optional_string(
David Brazdil136f2942019-09-23 14:11:03 +0100109 const struct fdt_node *node, const char *property, struct string *out)
Andrew Scull72b43c02019-09-18 13:53:45 +0100110{
David Brazdil136f2942019-09-23 14:11:03 +0100111 enum manifest_return_code ret;
Andrew Scull72b43c02019-09-18 13:53:45 +0100112
David Brazdil136f2942019-09-23 14:11:03 +0100113 ret = read_string(node, property, out);
114 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
115 string_init_empty(out);
116 ret = MANIFEST_SUCCESS;
Andrew Scull72b43c02019-09-18 13:53:45 +0100117 }
David Brazdil136f2942019-09-23 14:11:03 +0100118 return ret;
Andrew Scull72b43c02019-09-18 13:53:45 +0100119}
120
David Brazdil7a462ec2019-08-15 12:27:47 +0100121static enum manifest_return_code read_uint64(const struct fdt_node *node,
122 const char *property,
123 uint64_t *out)
124{
David Brazdilb856be62020-03-25 10:14:55 +0000125 struct memiter data;
David Brazdil7a462ec2019-08-15 12:27:47 +0100126
David Brazdilb856be62020-03-25 10:14:55 +0000127 if (!fdt_read_property(node, property, &data)) {
David Brazdil7a462ec2019-08-15 12:27:47 +0100128 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
129 }
130
David Brazdilb856be62020-03-25 10:14:55 +0000131 if (!fdt_parse_number(&data, memiter_size(&data), out)) {
David Brazdil7a462ec2019-08-15 12:27:47 +0100132 return MANIFEST_ERROR_MALFORMED_INTEGER;
133 }
134
135 return MANIFEST_SUCCESS;
136}
137
David Brazdil080ee312020-02-25 15:30:30 -0800138static enum manifest_return_code read_optional_uint64(
139 const struct fdt_node *node, const char *property,
140 uint64_t default_value, uint64_t *out)
141{
142 enum manifest_return_code ret;
143
144 ret = read_uint64(node, property, out);
145 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
146 *out = default_value;
147 return MANIFEST_SUCCESS;
148 }
149 return ret;
150}
151
Olivier Deprez62d99e32020-01-09 15:58:07 +0100152static enum manifest_return_code read_uint32(const struct fdt_node *node,
153 const char *property,
154 uint32_t *out)
155{
156 uint64_t value;
157
158 TRY(read_uint64(node, property, &value));
159
160 if (value > UINT32_MAX) {
161 return MANIFEST_ERROR_INTEGER_OVERFLOW;
162 }
163
164 *out = (uint32_t)value;
165 return MANIFEST_SUCCESS;
166}
167
Manish Pandeye68e7932020-04-23 15:29:28 +0100168static enum manifest_return_code read_optional_uint32(
169 const struct fdt_node *node, const char *property,
170 uint32_t default_value, uint32_t *out)
171{
172 enum manifest_return_code ret;
173
174 ret = read_uint32(node, property, out);
175 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
176 *out = default_value;
177 return MANIFEST_SUCCESS;
178 }
179 return ret;
180}
181
David Brazdil7a462ec2019-08-15 12:27:47 +0100182static enum manifest_return_code read_uint16(const struct fdt_node *node,
183 const char *property,
184 uint16_t *out)
185{
186 uint64_t value;
187
188 TRY(read_uint64(node, property, &value));
189
190 if (value > UINT16_MAX) {
191 return MANIFEST_ERROR_INTEGER_OVERFLOW;
192 }
193
194 *out = (uint16_t)value;
195 return MANIFEST_SUCCESS;
196}
197
J-Alvesb37fd082020-10-22 12:29:21 +0100198static enum manifest_return_code read_optional_uint16(
199 const struct fdt_node *node, const char *property,
200 uint16_t default_value, uint16_t *out)
201{
202 enum manifest_return_code ret;
203
204 ret = read_uint16(node, property, out);
205 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
206 *out = default_value;
207 return MANIFEST_SUCCESS;
208 }
209
210 return MANIFEST_SUCCESS;
211}
212
Olivier Deprez62d99e32020-01-09 15:58:07 +0100213static enum manifest_return_code read_uint8(const struct fdt_node *node,
214 const char *property, uint8_t *out)
215{
216 uint64_t value;
217
218 TRY(read_uint64(node, property, &value));
219
220 if (value > UINT8_MAX) {
221 return MANIFEST_ERROR_INTEGER_OVERFLOW;
222 }
223
224 *out = (uint8_t)value;
225 return MANIFEST_SUCCESS;
226}
227
Andrew Scullae9962e2019-10-03 16:51:16 +0100228struct uint32list_iter {
229 struct memiter mem_it;
230};
231
232static enum manifest_return_code read_optional_uint32list(
233 const struct fdt_node *node, const char *property,
234 struct uint32list_iter *out)
235{
David Brazdilb856be62020-03-25 10:14:55 +0000236 struct memiter data;
Andrew Scullae9962e2019-10-03 16:51:16 +0100237
David Brazdilb856be62020-03-25 10:14:55 +0000238 if (!fdt_read_property(node, property, &data)) {
Andrew Scullae9962e2019-10-03 16:51:16 +0100239 memiter_init(&out->mem_it, NULL, 0);
240 return MANIFEST_SUCCESS;
241 }
242
David Brazdilb856be62020-03-25 10:14:55 +0000243 if ((memiter_size(&data) % sizeof(uint32_t)) != 0) {
Andrew Scullae9962e2019-10-03 16:51:16 +0100244 return MANIFEST_ERROR_MALFORMED_INTEGER_LIST;
245 }
246
David Brazdilb856be62020-03-25 10:14:55 +0000247 out->mem_it = data;
Andrew Scullae9962e2019-10-03 16:51:16 +0100248 return MANIFEST_SUCCESS;
249}
250
Andrew Scullae9962e2019-10-03 16:51:16 +0100251static bool uint32list_has_next(const struct uint32list_iter *list)
252{
253 return memiter_size(&list->mem_it) > 0;
254}
255
David Brazdil5ea99462020-03-25 13:01:47 +0000256static enum manifest_return_code uint32list_get_next(
257 struct uint32list_iter *list, uint32_t *out)
Andrew Scullae9962e2019-10-03 16:51:16 +0100258{
Andrew Scullae9962e2019-10-03 16:51:16 +0100259 uint64_t num;
260
261 CHECK(uint32list_has_next(list));
David Brazdilb856be62020-03-25 10:14:55 +0000262 if (!fdt_parse_number(&list->mem_it, sizeof(uint32_t), &num)) {
Andrew Scullae9962e2019-10-03 16:51:16 +0100263 return MANIFEST_ERROR_MALFORMED_INTEGER;
264 }
265
David Brazdil5ea99462020-03-25 13:01:47 +0000266 *out = (uint32_t)num;
267 return MANIFEST_SUCCESS;
Andrew Scullae9962e2019-10-03 16:51:16 +0100268}
269
Olivier Deprez62d99e32020-01-09 15:58:07 +0100270static enum manifest_return_code parse_vm_common(const struct fdt_node *node,
271 struct manifest_vm *vm,
272 ffa_vm_id_t vm_id)
David Brazdil7a462ec2019-08-15 12:27:47 +0100273{
Andrew Scullae9962e2019-10-03 16:51:16 +0100274 struct uint32list_iter smcs;
David Brazdil5ea99462020-03-25 13:01:47 +0000275 size_t idx;
Andrew Scullae9962e2019-10-03 16:51:16 +0100276
Olivier Deprez62d99e32020-01-09 15:58:07 +0100277 TRY(read_bool(node, "is_ffa_partition", &vm->is_ffa_partition));
278
David Brazdil136f2942019-09-23 14:11:03 +0100279 TRY(read_string(node, "debug_name", &vm->debug_name));
Andrew Scullae9962e2019-10-03 16:51:16 +0100280
281 TRY(read_optional_uint32list(node, "smc_whitelist", &smcs));
282 while (uint32list_has_next(&smcs) &&
283 vm->smc_whitelist.smc_count < MAX_SMCS) {
David Brazdil5ea99462020-03-25 13:01:47 +0000284 idx = vm->smc_whitelist.smc_count++;
285 TRY(uint32list_get_next(&smcs, &vm->smc_whitelist.smcs[idx]));
Andrew Scullae9962e2019-10-03 16:51:16 +0100286 }
287
288 if (uint32list_has_next(&smcs)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000289 dlog_warning("%s SMC whitelist too long.\n", vm->debug_name);
Andrew Scullae9962e2019-10-03 16:51:16 +0100290 }
291
Andrew Scullb2c3a242019-11-04 13:52:36 +0000292 TRY(read_bool(node, "smc_whitelist_permissive",
293 &vm->smc_whitelist.permissive));
Andrew Scullae9962e2019-10-03 16:51:16 +0100294
Olivier Deprez62d99e32020-01-09 15:58:07 +0100295 if (vm_id != HF_PRIMARY_VM_ID) {
296 TRY(read_uint64(node, "mem_size", &vm->secondary.mem_size));
297 TRY(read_uint16(node, "vcpu_count", &vm->secondary.vcpu_count));
Fuad Tabba50469e02020-06-30 15:14:28 +0100298 TRY(read_optional_string(node, "fdt_filename",
299 &vm->secondary.fdt_filename));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100300 }
301
302 return MANIFEST_SUCCESS;
303}
304
305static enum manifest_return_code parse_vm(struct fdt_node *node,
306 struct manifest_vm *vm,
307 ffa_vm_id_t vm_id)
308{
309 TRY(read_optional_string(node, "kernel_filename",
310 &vm->kernel_filename));
311
David Brazdile6f83222019-09-23 14:47:37 +0100312 if (vm_id == HF_PRIMARY_VM_ID) {
313 TRY(read_optional_string(node, "ramdisk_filename",
314 &vm->primary.ramdisk_filename));
David Brazdil080ee312020-02-25 15:30:30 -0800315 TRY(read_optional_uint64(node, "boot_address",
316 MANIFEST_INVALID_ADDRESS,
317 &vm->primary.boot_address));
David Brazdil7a462ec2019-08-15 12:27:47 +0100318 }
Olivier Deprez62d99e32020-01-09 15:58:07 +0100319
David Brazdil7a462ec2019-08-15 12:27:47 +0100320 return MANIFEST_SUCCESS;
321}
322
Manish Pandey6542f5c2020-04-27 14:37:46 +0100323static enum manifest_return_code parse_ffa_memory_region_node(
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100324 struct fdt_node *mem_node, struct memory_region *mem_regions,
Manish Pandey2145c212020-05-01 16:04:22 +0100325 uint8_t *count, struct rx_tx *rxtx)
Manish Pandey6542f5c2020-04-27 14:37:46 +0100326{
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100327 uint32_t phandle;
Manish Pandey2145c212020-05-01 16:04:22 +0100328 uint8_t i = 0;
Manish Pandey6542f5c2020-04-27 14:37:46 +0100329
330 dlog_verbose(" Partition memory regions\n");
331
332 if (!fdt_is_compatible(mem_node, "arm,ffa-manifest-memory-regions")) {
333 return MANIFEST_ERROR_NOT_COMPATIBLE;
334 }
335
336 if (!fdt_first_child(mem_node)) {
337 return MANIFEST_ERROR_MEMORY_REGION_NODE_EMPTY;
338 }
339
340 do {
341 dlog_verbose(" Memory Region[%u]\n", i);
342
343 TRY(read_optional_string(mem_node, "description",
344 &mem_regions[i].name));
345 dlog_verbose(" Name: %s\n",
346 string_data(&mem_regions[i].name));
347
348 TRY(read_optional_uint64(mem_node, "base-address",
349 MANIFEST_INVALID_ADDRESS,
350 &mem_regions[i].base_address));
351 dlog_verbose(" Base address: %#x\n",
352 mem_regions[i].base_address);
353
354 TRY(read_uint32(mem_node, "pages-count",
355 &mem_regions[i].page_count));
356 dlog_verbose(" Pages_count: %u\n",
357 mem_regions[i].page_count);
358
359 TRY(read_uint32(mem_node, "attributes",
360 &mem_regions[i].attributes));
361 mem_regions[i].attributes &= MM_PERM_MASK;
362 dlog_verbose(" Attributes: %u\n",
363 mem_regions[i].attributes);
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100364
Manish Pandeya70a4192020-10-07 22:05:04 +0100365 if (rxtx->available) {
366 TRY(read_optional_uint32(
367 mem_node, "phandle",
368 (uint32_t)MANIFEST_INVALID_ADDRESS, &phandle));
369 if (phandle == rxtx->rx_phandle) {
370 dlog_verbose(" Assigned as RX buffer\n");
371 rxtx->rx_buffer = &mem_regions[i];
372 } else if (phandle == rxtx->tx_phandle) {
373 dlog_verbose(" Assigned as TX buffer\n");
374 rxtx->tx_buffer = &mem_regions[i];
375 }
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100376 }
377
Manish Pandey6542f5c2020-04-27 14:37:46 +0100378 i++;
379 } while (fdt_next_sibling(mem_node) && (i < SP_MAX_MEMORY_REGIONS));
380
Manish Pandeya70a4192020-10-07 22:05:04 +0100381 if (rxtx->available &&
382 (rxtx->rx_buffer->page_count != rxtx->tx_buffer->page_count)) {
Manish Pandeyf06c9072020-09-29 15:41:58 +0100383 return MANIFEST_ERROR_RXTX_SIZE_MISMATCH;
384 }
385
Manish Pandey2145c212020-05-01 16:04:22 +0100386 *count = i;
Manish Pandey6542f5c2020-04-27 14:37:46 +0100387
388 return MANIFEST_SUCCESS;
389}
390
Manish Pandeye68e7932020-04-23 15:29:28 +0100391static enum manifest_return_code parse_ffa_device_region_node(
Manish Pandey2145c212020-05-01 16:04:22 +0100392 struct fdt_node *dev_node, struct device_region *dev_regions,
393 uint8_t *count)
Manish Pandeye68e7932020-04-23 15:29:28 +0100394{
395 struct uint32list_iter list;
Manish Pandey2145c212020-05-01 16:04:22 +0100396 uint8_t i = 0;
397 uint8_t j = 0;
Manish Pandeye68e7932020-04-23 15:29:28 +0100398
399 dlog_verbose(" Partition Device Regions\n");
400
401 if (!fdt_is_compatible(dev_node, "arm,ffa-manifest-device-regions")) {
402 return MANIFEST_ERROR_NOT_COMPATIBLE;
403 }
404
405 if (!fdt_first_child(dev_node)) {
406 return MANIFEST_ERROR_DEVICE_REGION_NODE_EMPTY;
407 }
408
409 do {
410 dlog_verbose(" Device Region[%u]\n", i);
411
412 TRY(read_optional_string(dev_node, "description",
413 &dev_regions[i].name));
414 dlog_verbose(" Name: %s\n",
415 string_data(&dev_regions[i].name));
416
417 TRY(read_uint64(dev_node, "base-address",
418 &dev_regions[i].base_address));
419 dlog_verbose(" Base address: %#x\n",
420 dev_regions[i].base_address);
421
422 TRY(read_uint32(dev_node, "pages-count",
423 &dev_regions[i].page_count));
424 dlog_verbose(" Pages_count: %u\n",
425 dev_regions[i].page_count);
426
427 TRY(read_uint32(dev_node, "attributes",
428 &dev_regions[i].attributes));
429 dev_regions[i].attributes =
430 (dev_regions[i].attributes & MM_PERM_MASK) | MM_MODE_D;
431 dlog_verbose(" Attributes: %u\n",
432 dev_regions[i].attributes);
433
434 TRY(read_optional_uint32list(dev_node, "interrupts", &list));
435 dlog_verbose(" Interrupt List:\n");
436 j = 0;
437 while (uint32list_has_next(&list) &&
438 j < SP_MAX_INTERRUPTS_PER_DEVICE) {
439 TRY(uint32list_get_next(
440 &list, &dev_regions[i].interrupts[j].id));
441 if (uint32list_has_next(&list)) {
442 TRY(uint32list_get_next(&list,
443 &dev_regions[i]
444 .interrupts[j]
445 .attributes));
446 } else {
447 return MANIFEST_ERROR_MALFORMED_INTEGER_LIST;
448 }
449
450 dlog_verbose(" ID = %u, attributes = %u\n",
451 dev_regions[i].interrupts[j].id,
452 dev_regions[i].interrupts[j].attributes);
453 j++;
454 }
455 if (j == 0) {
456 dlog_verbose(" Empty\n");
457 }
458
459 TRY(read_optional_uint32(dev_node, "smmu-id",
460 (uint32_t)MANIFEST_INVALID_ADDRESS,
461 &dev_regions[i].smmu_id));
462 dlog_verbose(" smmu-id: %u\n", dev_regions[i].smmu_id);
463
464 TRY(read_optional_uint32list(dev_node, "stream-ids", &list));
465 dlog_verbose(" Stream IDs assigned:\n");
466
467 j = 0;
468 while (uint32list_has_next(&list) &&
469 j < SP_MAX_STREAMS_PER_DEVICE) {
470 TRY(uint32list_get_next(&list,
471 &dev_regions[i].stream_ids[j]));
472 dlog_verbose(" %u\n",
473 dev_regions[i].stream_ids[j]);
474 j++;
475 }
476 if (j == 0) {
477 dlog_verbose(" None\n");
478 }
479
480 TRY(read_bool(dev_node, "exclusive-access",
481 &dev_regions[i].exclusive_access));
482 dlog_verbose(" Exclusive_access: %d\n",
483 dev_regions[i].exclusive_access);
484
485 i++;
486 } while (fdt_next_sibling(dev_node) && (i < SP_MAX_DEVICE_REGIONS));
487
Manish Pandey2145c212020-05-01 16:04:22 +0100488 *count = i;
Manish Pandeye68e7932020-04-23 15:29:28 +0100489
490 return MANIFEST_SUCCESS;
491}
492
Olivier Deprez62d99e32020-01-09 15:58:07 +0100493static enum manifest_return_code parse_ffa_manifest(struct fdt *fdt,
494 struct manifest_vm *vm)
495{
496 unsigned int i = 0;
497 struct uint32list_iter uuid;
498 uint32_t uuid_word;
499 struct fdt_node root;
500 struct fdt_node ffa_node;
501 struct string rxtx_node_name = STRING_INIT("rx_tx-info");
Manish Pandey6542f5c2020-04-27 14:37:46 +0100502 struct string mem_region_node_name = STRING_INIT("memory-regions");
Manish Pandeye68e7932020-04-23 15:29:28 +0100503 struct string dev_region_node_name = STRING_INIT("device-regions");
Olivier Deprez62d99e32020-01-09 15:58:07 +0100504
505 if (!fdt_find_node(fdt, "/", &root)) {
506 return MANIFEST_ERROR_NO_ROOT_NODE;
507 }
508
509 /* Check "compatible" property. */
510 if (!fdt_is_compatible(&root, "arm,ffa-manifest-1.0")) {
511 return MANIFEST_ERROR_NOT_COMPATIBLE;
512 }
513
514 TRY(read_uint32(&root, "ffa-version", &vm->sp.ffa_version));
515 dlog_verbose(" SP expected FF-A version %d.%d\n",
516 vm->sp.ffa_version >> 16, vm->sp.ffa_version & 0xffff);
517
518 TRY(read_optional_uint32list(&root, "uuid", &uuid));
519
520 while (uint32list_has_next(&uuid) && i < 4) {
521 TRY(uint32list_get_next(&uuid, &uuid_word));
Fuad Tabbae4efcc32020-07-16 15:37:27 +0100522 vm->sp.uuid.uuid[i] = uuid_word;
Olivier Deprez62d99e32020-01-09 15:58:07 +0100523 i++;
524 }
Fuad Tabba952401e2020-07-24 10:43:40 +0100525 dlog_verbose(" SP UUID %#x-%x-%x_%x\n", vm->sp.uuid.uuid[0],
526 vm->sp.uuid.uuid[1], vm->sp.uuid.uuid[2],
527 vm->sp.uuid.uuid[3]);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100528
529 TRY(read_uint16(&root, "execution-ctx-count",
530 &vm->sp.execution_ctx_count));
531 dlog_verbose(" SP number of execution context %d\n",
532 vm->sp.execution_ctx_count);
533
534 TRY(read_uint8(&root, "exception-level",
535 (uint8_t *)&vm->sp.run_time_el));
536 dlog_verbose(" SP run-time EL %d\n", vm->sp.run_time_el);
537
538 TRY(read_uint8(&root, "execution-state",
539 (uint8_t *)&vm->sp.execution_state));
540 dlog_verbose(" SP execution state %d\n", vm->sp.execution_state);
541
542 TRY(read_uint64(&root, "load-address", &vm->sp.load_addr));
543 dlog_verbose(" SP load address %#x\n", vm->sp.load_addr);
544
545 TRY(read_uint64(&root, "entrypoint-offset", &vm->sp.ep_offset));
546 dlog_verbose(" SP entry point offset %#x\n", vm->sp.ep_offset);
547
J-Alvesb37fd082020-10-22 12:29:21 +0100548 TRY(read_optional_uint16(&root, "boot-order", DEFAULT_BOOT_ORDER,
549 &vm->sp.boot_order));
550 dlog_verbose(" SP boot order %#u\n", vm->sp.boot_order);
551
Olivier Deprez62d99e32020-01-09 15:58:07 +0100552 TRY(read_uint8(&root, "xlat-granule", (uint8_t *)&vm->sp.xlat_granule));
553 dlog_verbose(" SP translation granule %d\n", vm->sp.xlat_granule);
554
555 ffa_node = root;
556 if (fdt_find_child(&ffa_node, &rxtx_node_name)) {
557 if (!fdt_is_compatible(&ffa_node,
558 "arm,ffa-manifest-rx_tx-buffer")) {
559 return MANIFEST_ERROR_NOT_COMPATIBLE;
560 }
561
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100562 /*
563 * Read only phandles for now, it will be used to update buffers
564 * while parsing memory regions.
565 */
566 TRY(read_uint32(&ffa_node, "rx-buffer",
567 &vm->sp.rxtx.rx_phandle));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100568
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100569 TRY(read_uint32(&ffa_node, "tx-buffer",
570 &vm->sp.rxtx.tx_phandle));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100571
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100572 vm->sp.rxtx.available = true;
Olivier Deprez62d99e32020-01-09 15:58:07 +0100573 }
574
575 TRY(read_uint8(&root, "messaging-method",
576 (uint8_t *)&vm->sp.messaging_method));
577 dlog_verbose(" SP messaging method %d\n", vm->sp.messaging_method);
578
Manish Pandey6542f5c2020-04-27 14:37:46 +0100579 /* Parse memory-regions */
580 ffa_node = root;
581 if (fdt_find_child(&ffa_node, &mem_region_node_name)) {
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100582 TRY(parse_ffa_memory_region_node(&ffa_node, vm->sp.mem_regions,
Manish Pandey2145c212020-05-01 16:04:22 +0100583 &vm->sp.mem_region_count,
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100584 &vm->sp.rxtx));
Manish Pandey6542f5c2020-04-27 14:37:46 +0100585 }
Manish Pandey2145c212020-05-01 16:04:22 +0100586 dlog_verbose(" Total %u memory regions found\n",
587 vm->sp.mem_region_count);
Manish Pandey6542f5c2020-04-27 14:37:46 +0100588
Manish Pandeye68e7932020-04-23 15:29:28 +0100589 /* Parse Device-regions */
590 ffa_node = root;
591 if (fdt_find_child(&ffa_node, &dev_region_node_name)) {
Manish Pandey2145c212020-05-01 16:04:22 +0100592 TRY(parse_ffa_device_region_node(&ffa_node, vm->sp.dev_regions,
593 &vm->sp.dev_region_count));
Manish Pandeye68e7932020-04-23 15:29:28 +0100594 }
Manish Pandey2145c212020-05-01 16:04:22 +0100595 dlog_verbose(" Total %u device regions found\n",
596 vm->sp.dev_region_count);
Manish Pandeye68e7932020-04-23 15:29:28 +0100597
Olivier Deprez62d99e32020-01-09 15:58:07 +0100598 return MANIFEST_SUCCESS;
599}
600
601static enum manifest_return_code sanity_check_ffa_manifest(
602 struct manifest_vm *vm)
603{
604 uint16_t ffa_version_major;
605 uint16_t ffa_version_minor;
606 enum manifest_return_code ret_code = MANIFEST_SUCCESS;
607 const char *error_string = "specified in manifest is unsupported";
608
609 /* ensure that the SPM version is compatible */
610 ffa_version_major =
611 (vm->sp.ffa_version & 0xffff0000) >> FFA_VERSION_MAJOR_OFFSET;
612 ffa_version_minor = vm->sp.ffa_version & 0xffff;
613
614 if (ffa_version_major != FFA_VERSION_MAJOR ||
615 ffa_version_minor > FFA_VERSION_MINOR) {
616 dlog_error("FF-A partition manifest version %s: %d.%d\n",
617 error_string, ffa_version_major, ffa_version_minor);
618 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
619 }
620
621 if (vm->sp.xlat_granule != PAGE_4KB) {
622 dlog_error("Translation granule %s: %d\n", error_string,
623 vm->sp.xlat_granule);
624 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
625 }
626
627 if (vm->sp.execution_state != AARCH64) {
628 dlog_error("Execution state %s: %d\n", error_string,
629 vm->sp.execution_state);
630 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
631 }
632
633 if (vm->sp.run_time_el != EL1 && vm->sp.run_time_el != S_EL1) {
634 dlog_error("Exception level %s: %d\n", error_string,
635 vm->sp.run_time_el);
636 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
637 }
638
Olivier Deprez2ebae3a2020-06-11 16:34:30 +0200639 if (vm->sp.messaging_method != INDIRECT_MESSAGING &&
640 vm->sp.messaging_method != DIRECT_MESSAGING) {
Olivier Deprez62d99e32020-01-09 15:58:07 +0100641 dlog_error("Messaging method %s: %x\n", error_string,
642 vm->sp.messaging_method);
643 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
644 }
645
646 return ret_code;
647}
648
649static enum manifest_return_code parse_ffa_partition_package(
650 struct mm_stage1_locked stage1_locked, struct fdt_node *node,
651 struct manifest_vm *vm, ffa_vm_id_t vm_id, struct mpool *ppool)
652{
653 enum manifest_return_code ret = MANIFEST_ERROR_NOT_COMPATIBLE;
654 uintpaddr_t sp_pkg_addr;
655 paddr_t sp_pkg_start;
656 paddr_t sp_pkg_end;
657 struct sp_pkg_header *sp_pkg;
658 size_t sp_header_dtb_size;
659 paddr_t sp_dtb_addr;
660 struct fdt sp_fdt;
661
662 /*
663 * This must have been hinted as being an FF-A partition,
664 * return straight with failure if this is not the case.
665 */
666 if (!vm->is_ffa_partition) {
667 return MANIFEST_ERROR_NOT_COMPATIBLE;
668 }
669
670 TRY(read_uint64(node, "load_address", &sp_pkg_addr));
671 if (!is_aligned(sp_pkg_addr, PAGE_SIZE)) {
672 return MANIFEST_ERROR_NOT_COMPATIBLE;
673 }
674
675 /* Map top of SP package as a single page to extract the header */
676 sp_pkg_start = pa_init(sp_pkg_addr);
677 sp_pkg_end = pa_add(sp_pkg_start, PAGE_SIZE);
678 sp_pkg = mm_identity_map(stage1_locked, sp_pkg_start,
679 pa_add(sp_pkg_start, PAGE_SIZE), MM_MODE_R,
680 ppool);
681 CHECK(sp_pkg != NULL);
682
683 dlog_verbose("SP package load address %#x\n", sp_pkg_addr);
684
685 if (sp_pkg->magic != SP_PKG_HEADER_MAGIC) {
686 dlog_error("Invalid SP package magic.\n");
687 goto exit_unmap;
688 }
689
690 if (sp_pkg->version != SP_PKG_HEADER_VERSION) {
691 dlog_error("Invalid SP package version.\n");
692 goto exit_unmap;
693 }
694
695 /* Expect SP DTB to immediately follow header */
696 if (sp_pkg->pm_offset != sizeof(struct sp_pkg_header)) {
697 dlog_error("Invalid SP package manifest offset.\n");
698 goto exit_unmap;
699 }
700
701 sp_header_dtb_size = align_up(
702 sp_pkg->pm_size + sizeof(struct sp_pkg_header), PAGE_SIZE);
703 if ((vm_id != HF_PRIMARY_VM_ID) &&
704 (sp_header_dtb_size >= vm->secondary.mem_size)) {
705 dlog_error("Invalid SP package header or DT size.\n");
706 goto exit_unmap;
707 }
708
709 if (sp_header_dtb_size > PAGE_SIZE) {
710 /* Map remainder of header + DTB */
711 sp_pkg_end = pa_add(sp_pkg_start, sp_header_dtb_size);
712
713 sp_pkg = mm_identity_map(stage1_locked, sp_pkg_start,
714 sp_pkg_end, MM_MODE_R, ppool);
715 CHECK(sp_pkg != NULL);
716 }
717
718 sp_dtb_addr = pa_add(sp_pkg_start, sp_pkg->pm_offset);
719 if (!fdt_init_from_ptr(&sp_fdt, (void *)sp_dtb_addr.pa,
720 sp_pkg->pm_size)) {
721 dlog_error("FDT failed validation.\n");
722 goto exit_unmap;
723 }
724
725 ret = parse_ffa_manifest(&sp_fdt, vm);
726 if (ret != MANIFEST_SUCCESS) {
727 goto exit_unmap;
728 }
729
730 ret = sanity_check_ffa_manifest(vm);
731
732exit_unmap:
733 CHECK(mm_unmap(stage1_locked, sp_pkg_start, sp_pkg_end, ppool));
734
735 return ret;
736}
737
David Brazdil7a462ec2019-08-15 12:27:47 +0100738/**
739 * Parse manifest from FDT.
740 */
Olivier Deprez62d99e32020-01-09 15:58:07 +0100741enum manifest_return_code manifest_init(struct mm_stage1_locked stage1_locked,
742 struct manifest *manifest,
743 struct memiter *manifest_fdt,
744 struct mpool *ppool)
David Brazdil7a462ec2019-08-15 12:27:47 +0100745{
David Brazdilb856be62020-03-25 10:14:55 +0000746 struct string vm_name;
747 struct fdt fdt;
David Brazdil7a462ec2019-08-15 12:27:47 +0100748 struct fdt_node hyp_node;
749 size_t i = 0;
750 bool found_primary_vm = false;
751
752 memset_s(manifest, sizeof(*manifest), 0, sizeof(*manifest));
753
David Brazdilb856be62020-03-25 10:14:55 +0000754 if (!fdt_init_from_memiter(&fdt, manifest_fdt)) {
755 return MANIFEST_ERROR_FILE_SIZE; /* TODO */
David Brazdila2358d42020-01-27 18:51:38 +0000756 }
757
David Brazdil7a462ec2019-08-15 12:27:47 +0100758 /* Find hypervisor node. */
David Brazdilb856be62020-03-25 10:14:55 +0000759 if (!fdt_find_node(&fdt, "/hypervisor", &hyp_node)) {
David Brazdil7a462ec2019-08-15 12:27:47 +0100760 return MANIFEST_ERROR_NO_HYPERVISOR_FDT_NODE;
761 }
762
David Brazdil74e9c3b2019-08-28 11:09:08 +0100763 /* Check "compatible" property. */
David Brazdilf4925382020-03-25 13:33:51 +0000764 if (!fdt_is_compatible(&hyp_node, "hafnium,hafnium")) {
David Brazdil74e9c3b2019-08-28 11:09:08 +0100765 return MANIFEST_ERROR_NOT_COMPATIBLE;
766 }
767
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100768 TRY(read_bool(&hyp_node, "ffa_tee", &manifest->ffa_tee_enabled));
Andrew Walbran41a49d82020-01-10 17:46:38 +0000769
David Brazdil7a462ec2019-08-15 12:27:47 +0100770 /* Iterate over reserved VM IDs and check no such nodes exist. */
Olivier Deprez2a8ee342020-08-03 15:10:44 +0200771 for (i = HF_VM_ID_BASE; i < HF_VM_ID_OFFSET; i++) {
772 ffa_vm_id_t vm_id = (ffa_vm_id_t)i - HF_VM_ID_BASE;
David Brazdil7a462ec2019-08-15 12:27:47 +0100773 struct fdt_node vm_node = hyp_node;
David Brazdil7a462ec2019-08-15 12:27:47 +0100774
David Brazdilb856be62020-03-25 10:14:55 +0000775 generate_vm_node_name(&vm_name, vm_id);
776 if (fdt_find_child(&vm_node, &vm_name)) {
David Brazdil7a462ec2019-08-15 12:27:47 +0100777 return MANIFEST_ERROR_RESERVED_VM_ID;
778 }
779 }
780
781 /* Iterate over VM nodes until we find one that does not exist. */
782 for (i = 0; i <= MAX_VMS; ++i) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100783 ffa_vm_id_t vm_id = HF_VM_ID_OFFSET + i;
David Brazdil7a462ec2019-08-15 12:27:47 +0100784 struct fdt_node vm_node = hyp_node;
David Brazdil7a462ec2019-08-15 12:27:47 +0100785
Olivier Deprez2a8ee342020-08-03 15:10:44 +0200786 generate_vm_node_name(&vm_name, vm_id - HF_VM_ID_BASE);
David Brazdilb856be62020-03-25 10:14:55 +0000787 if (!fdt_find_child(&vm_node, &vm_name)) {
David Brazdil7a462ec2019-08-15 12:27:47 +0100788 break;
789 }
790
791 if (i == MAX_VMS) {
792 return MANIFEST_ERROR_TOO_MANY_VMS;
793 }
794
795 if (vm_id == HF_PRIMARY_VM_ID) {
796 CHECK(found_primary_vm == false); /* sanity check */
797 found_primary_vm = true;
798 }
799
David Brazdil0251b942019-09-10 15:59:50 +0100800 manifest->vm_count = i + 1;
Olivier Deprez62d99e32020-01-09 15:58:07 +0100801
802 TRY(parse_vm_common(&vm_node, &manifest->vm[i], vm_id));
803
804 if (manifest->vm[i].is_ffa_partition) {
805 TRY(parse_ffa_partition_package(stage1_locked, &vm_node,
806 &manifest->vm[i], vm_id,
807 ppool));
808 } else {
809 TRY(parse_vm(&vm_node, &manifest->vm[i], vm_id));
810 }
David Brazdil7a462ec2019-08-15 12:27:47 +0100811 }
812
Olivier Deprezfb05f3c2020-11-10 17:48:04 +0100813 if (!found_primary_vm && vm_id_is_current_world(HF_PRIMARY_VM_ID)) {
David Brazdil7a462ec2019-08-15 12:27:47 +0100814 return MANIFEST_ERROR_NO_PRIMARY_VM;
815 }
816
817 return MANIFEST_SUCCESS;
818}
819
820const char *manifest_strerror(enum manifest_return_code ret_code)
821{
822 switch (ret_code) {
823 case MANIFEST_SUCCESS:
824 return "Success";
David Brazdila2358d42020-01-27 18:51:38 +0000825 case MANIFEST_ERROR_FILE_SIZE:
826 return "Total size in header does not match file size";
Olivier Deprez62d99e32020-01-09 15:58:07 +0100827 case MANIFEST_ERROR_MALFORMED_DTB:
828 return "Malformed device tree blob";
David Brazdila2358d42020-01-27 18:51:38 +0000829 case MANIFEST_ERROR_NO_ROOT_NODE:
830 return "Could not find root node in manifest";
David Brazdil7a462ec2019-08-15 12:27:47 +0100831 case MANIFEST_ERROR_NO_HYPERVISOR_FDT_NODE:
832 return "Could not find \"hypervisor\" node in manifest";
David Brazdil74e9c3b2019-08-28 11:09:08 +0100833 case MANIFEST_ERROR_NOT_COMPATIBLE:
834 return "Hypervisor manifest entry not compatible with Hafnium";
David Brazdil7a462ec2019-08-15 12:27:47 +0100835 case MANIFEST_ERROR_RESERVED_VM_ID:
836 return "Manifest defines a VM with a reserved ID";
837 case MANIFEST_ERROR_NO_PRIMARY_VM:
838 return "Manifest does not contain a primary VM entry";
839 case MANIFEST_ERROR_TOO_MANY_VMS:
840 return "Manifest specifies more VMs than Hafnium has "
841 "statically allocated space for";
842 case MANIFEST_ERROR_PROPERTY_NOT_FOUND:
843 return "Property not found";
844 case MANIFEST_ERROR_MALFORMED_STRING:
845 return "Malformed string property";
David Brazdil0dbb41f2019-09-09 18:03:35 +0100846 case MANIFEST_ERROR_STRING_TOO_LONG:
847 return "String too long";
David Brazdil7a462ec2019-08-15 12:27:47 +0100848 case MANIFEST_ERROR_MALFORMED_INTEGER:
849 return "Malformed integer property";
850 case MANIFEST_ERROR_INTEGER_OVERFLOW:
851 return "Integer overflow";
Andrew Scullae9962e2019-10-03 16:51:16 +0100852 case MANIFEST_ERROR_MALFORMED_INTEGER_LIST:
853 return "Malformed integer list property";
Andrew Scullb2c3a242019-11-04 13:52:36 +0000854 case MANIFEST_ERROR_MALFORMED_BOOLEAN:
855 return "Malformed boolean property";
Manish Pandey6542f5c2020-04-27 14:37:46 +0100856 case MANIFEST_ERROR_MEMORY_REGION_NODE_EMPTY:
857 return "Memory-region node should have at least one entry";
Manish Pandeye68e7932020-04-23 15:29:28 +0100858 case MANIFEST_ERROR_DEVICE_REGION_NODE_EMPTY:
859 return "Device-region node should have at least one entry";
Manish Pandeyf06c9072020-09-29 15:41:58 +0100860 case MANIFEST_ERROR_RXTX_SIZE_MISMATCH:
861 return "RX and TX buffers should be of same size";
David Brazdil7a462ec2019-08-15 12:27:47 +0100862 }
863
864 panic("Unexpected manifest return code.");
865}