blob: 8570148dd94d04797156cf7dbd674ac0035dd06f [file] [log] [blame]
David Brazdil7a462ec2019-08-15 12:27:47 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
David Brazdil7a462ec2019-08-15 12:27:47 +01007 */
8
9#include "hf/manifest.h"
10
J-Alves35315782022-01-25 17:58:32 +000011#include "hf/arch/types.h"
12
David Brazdil7a462ec2019-08-15 12:27:47 +010013#include "hf/addr.h"
Daniel Boulbya2f8c662021-11-26 17:52:53 +000014#include "hf/assert.h"
J-Alves35315782022-01-25 17:58:32 +000015#include "hf/boot_info.h"
David Brazdil7a462ec2019-08-15 12:27:47 +010016#include "hf/check.h"
Andrew Scullae9962e2019-10-03 16:51:16 +010017#include "hf/dlog.h"
J-Alves2f86c1e2022-02-23 18:44:19 +000018#include "hf/sp_pkg.h"
David Brazdil7a462ec2019-08-15 12:27:47 +010019#include "hf/static_assert.h"
20#include "hf/std.h"
21
22#define TRY(expr) \
23 do { \
24 enum manifest_return_code ret_code = (expr); \
25 if (ret_code != MANIFEST_SUCCESS) { \
26 return ret_code; \
27 } \
28 } while (0)
29
David Brazdilb856be62020-03-25 10:14:55 +000030#define VM_ID_MAX (HF_VM_ID_OFFSET + MAX_VMS - 1)
31#define VM_ID_MAX_DIGITS (5)
32#define VM_NAME_EXTRA_CHARS (3) /* "vm" + number + '\0' */
33#define VM_NAME_MAX_SIZE (VM_ID_MAX_DIGITS + VM_NAME_EXTRA_CHARS)
34static_assert(VM_NAME_MAX_SIZE <= STRING_MAX_SIZE,
35 "VM name does not fit into a struct string.");
36static_assert(VM_ID_MAX <= 99999, "Insufficient VM_NAME_BUF_SIZE");
Olivier Deprez2a8ee342020-08-03 15:10:44 +020037static_assert((HF_OTHER_WORLD_ID > VM_ID_MAX) ||
38 (HF_OTHER_WORLD_ID < HF_VM_ID_BASE),
Andrew Walbran9daa57e2019-09-27 13:33:20 +010039 "TrustZone VM ID clashes with normal VM range.");
David Brazdil7a462ec2019-08-15 12:27:47 +010040
Daniel Boulby801f8ef2022-06-27 14:21:01 +010041/**
Olivier Deprez93644652022-09-09 11:01:12 +020042 * A struct to keep track of a VM's properties during early boot
43 * manifest parsing.
Daniel Boulby801f8ef2022-06-27 14:21:01 +010044 */
Olivier Deprez93644652022-09-09 11:01:12 +020045struct manifest_data {
46 struct manifest manifest;
Daniel Boulby4ca50f02022-07-29 18:29:34 +010047 struct interrupt_bitmap intids;
Daniel Boulbya7e9e182022-06-27 14:21:01 +010048 struct {
49 uintptr_t base;
50 uintptr_t limit;
51 } mem_regions[PARTITION_MAX_MEMORY_REGIONS * MAX_VMS];
Daniel Boulby801f8ef2022-06-27 14:21:01 +010052};
Olivier Deprez93644652022-09-09 11:01:12 +020053
Daniel Boulby801f8ef2022-06-27 14:21:01 +010054/**
Daniel Boulbya7e9e182022-06-27 14:21:01 +010055 * Calculate the number of entries in the ppool that are required to
Olivier Deprez93644652022-09-09 11:01:12 +020056 * store the manifest_data struct.
Daniel Boulby801f8ef2022-06-27 14:21:01 +010057 */
Olivier Deprez93644652022-09-09 11:01:12 +020058static size_t manifest_data_ppool_entries =
59 (align_up(sizeof(struct manifest_data), MM_PPOOL_ENTRY_SIZE) /
Daniel Boulbya7e9e182022-06-27 14:21:01 +010060 MM_PPOOL_ENTRY_SIZE);
61
Olivier Deprez93644652022-09-09 11:01:12 +020062static struct manifest_data *manifest_data;
63/* Index used to track the number of memory regions allocated. */
Daniel Boulbya7e9e182022-06-27 14:21:01 +010064static size_t allocated_mem_regions_index = 0;
Daniel Boulby801f8ef2022-06-27 14:21:01 +010065
66/**
Olivier Deprez93644652022-09-09 11:01:12 +020067 * Allocates and clear memory for the manifest data in the given memory pool.
Daniel Boulby801f8ef2022-06-27 14:21:01 +010068 * Returns true if the memory is successfully allocated.
69 */
Olivier Deprez93644652022-09-09 11:01:12 +020070static bool manifest_data_init(struct mpool *ppool)
Daniel Boulby801f8ef2022-06-27 14:21:01 +010071{
Olivier Deprez93644652022-09-09 11:01:12 +020072 manifest_data = (struct manifest_data *)mpool_alloc_contiguous(
73 ppool, manifest_data_ppool_entries, 1);
74 memset_s(manifest_data, sizeof(struct manifest_data), 0,
75 sizeof(struct manifest_data));
Daniel Boulbya7e9e182022-06-27 14:21:01 +010076
Olivier Deprez93644652022-09-09 11:01:12 +020077 return manifest_data != NULL;
Daniel Boulby801f8ef2022-06-27 14:21:01 +010078}
79
80/**
Olivier Deprez93644652022-09-09 11:01:12 +020081 * Frees the memory used for the manifest data in the given memory pool.
Daniel Boulby801f8ef2022-06-27 14:21:01 +010082 */
Olivier Deprez93644652022-09-09 11:01:12 +020083static void manifest_data_deinit(struct mpool *ppool)
Daniel Boulby801f8ef2022-06-27 14:21:01 +010084{
Daniel Boulbya7e9e182022-06-27 14:21:01 +010085 /**
Olivier Deprez93644652022-09-09 11:01:12 +020086 * Clear and return the memory used for the manifest_data struct to the
87 * memory pool.
Daniel Boulbya7e9e182022-06-27 14:21:01 +010088 */
Olivier Deprez93644652022-09-09 11:01:12 +020089 memset_s(manifest_data, sizeof(struct manifest_data), 0,
90 sizeof(struct manifest_data));
91 mpool_add_chunk(ppool, manifest_data, manifest_data_ppool_entries);
92
Daniel Boulbya7e9e182022-06-27 14:21:01 +010093 /**
94 * Reset the index used for tracking the number of memory regions
95 * allocated.
96 */
97 allocated_mem_regions_index = 0;
Daniel Boulby801f8ef2022-06-27 14:21:01 +010098}
99
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100100static inline size_t count_digits(ffa_vm_id_t vm_id)
David Brazdilb856be62020-03-25 10:14:55 +0000101{
102 size_t digits = 0;
103
104 do {
105 digits++;
106 vm_id /= 10;
107 } while (vm_id);
108 return digits;
109}
110
David Brazdil7a462ec2019-08-15 12:27:47 +0100111/**
112 * Generates a string with the two letters "vm" followed by an integer.
113 * Assumes `buf` is of size VM_NAME_BUF_SIZE.
114 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100115static void generate_vm_node_name(struct string *str, ffa_vm_id_t vm_id)
David Brazdil7a462ec2019-08-15 12:27:47 +0100116{
117 static const char *digits = "0123456789";
David Brazdilb856be62020-03-25 10:14:55 +0000118 size_t vm_id_digits = count_digits(vm_id);
119 char *base = str->data;
120 char *ptr = base + (VM_NAME_EXTRA_CHARS + vm_id_digits);
David Brazdil7a462ec2019-08-15 12:27:47 +0100121
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000122 assert(vm_id_digits <= VM_ID_MAX_DIGITS);
David Brazdil7a462ec2019-08-15 12:27:47 +0100123 *(--ptr) = '\0';
124 do {
125 *(--ptr) = digits[vm_id % 10];
126 vm_id /= 10;
127 } while (vm_id);
128 *(--ptr) = 'm';
129 *(--ptr) = 'v';
Daniel Boulbya2f8c662021-11-26 17:52:53 +0000130 assert(ptr == base);
David Brazdil7a462ec2019-08-15 12:27:47 +0100131}
132
Andrew Scullae9962e2019-10-03 16:51:16 +0100133/**
Andrew Scullb2c3a242019-11-04 13:52:36 +0000134 * Read a boolean property: true if present; false if not. If present, the value
135 * of the property must be empty else it is considered malformed.
Andrew Scullae9962e2019-10-03 16:51:16 +0100136 */
Andrew Scullb2c3a242019-11-04 13:52:36 +0000137static enum manifest_return_code read_bool(const struct fdt_node *node,
138 const char *property, bool *out)
Andrew Scullae9962e2019-10-03 16:51:16 +0100139{
David Brazdilb856be62020-03-25 10:14:55 +0000140 struct memiter data;
141 bool present = fdt_read_property(node, property, &data);
Andrew Scullae9962e2019-10-03 16:51:16 +0100142
David Brazdilb856be62020-03-25 10:14:55 +0000143 if (present && memiter_size(&data) != 0) {
Andrew Scullb2c3a242019-11-04 13:52:36 +0000144 return MANIFEST_ERROR_MALFORMED_BOOLEAN;
145 }
146
147 *out = present;
148 return MANIFEST_SUCCESS;
Andrew Scullae9962e2019-10-03 16:51:16 +0100149}
150
Andrew Scull72b43c02019-09-18 13:53:45 +0100151static enum manifest_return_code read_string(const struct fdt_node *node,
David Brazdil136f2942019-09-23 14:11:03 +0100152 const char *property,
153 struct string *out)
Andrew Scull72b43c02019-09-18 13:53:45 +0100154{
David Brazdilb856be62020-03-25 10:14:55 +0000155 struct memiter data;
Andrew Scull72b43c02019-09-18 13:53:45 +0100156
David Brazdilb856be62020-03-25 10:14:55 +0000157 if (!fdt_read_property(node, property, &data)) {
Andrew Scull72b43c02019-09-18 13:53:45 +0100158 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
159 }
160
David Brazdilb856be62020-03-25 10:14:55 +0000161 switch (string_init(out, &data)) {
David Brazdil136f2942019-09-23 14:11:03 +0100162 case STRING_SUCCESS:
163 return MANIFEST_SUCCESS;
164 case STRING_ERROR_INVALID_INPUT:
165 return MANIFEST_ERROR_MALFORMED_STRING;
166 case STRING_ERROR_TOO_LONG:
167 return MANIFEST_ERROR_STRING_TOO_LONG;
168 }
Andrew Scull72b43c02019-09-18 13:53:45 +0100169}
170
171static enum manifest_return_code read_optional_string(
David Brazdil136f2942019-09-23 14:11:03 +0100172 const struct fdt_node *node, const char *property, struct string *out)
Andrew Scull72b43c02019-09-18 13:53:45 +0100173{
David Brazdil136f2942019-09-23 14:11:03 +0100174 enum manifest_return_code ret;
Andrew Scull72b43c02019-09-18 13:53:45 +0100175
David Brazdil136f2942019-09-23 14:11:03 +0100176 ret = read_string(node, property, out);
177 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
178 string_init_empty(out);
179 ret = MANIFEST_SUCCESS;
Andrew Scull72b43c02019-09-18 13:53:45 +0100180 }
David Brazdil136f2942019-09-23 14:11:03 +0100181 return ret;
Andrew Scull72b43c02019-09-18 13:53:45 +0100182}
183
David Brazdil7a462ec2019-08-15 12:27:47 +0100184static enum manifest_return_code read_uint64(const struct fdt_node *node,
185 const char *property,
186 uint64_t *out)
187{
David Brazdilb856be62020-03-25 10:14:55 +0000188 struct memiter data;
David Brazdil7a462ec2019-08-15 12:27:47 +0100189
David Brazdilb856be62020-03-25 10:14:55 +0000190 if (!fdt_read_property(node, property, &data)) {
David Brazdil7a462ec2019-08-15 12:27:47 +0100191 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
192 }
193
David Brazdilb856be62020-03-25 10:14:55 +0000194 if (!fdt_parse_number(&data, memiter_size(&data), out)) {
David Brazdil7a462ec2019-08-15 12:27:47 +0100195 return MANIFEST_ERROR_MALFORMED_INTEGER;
196 }
197
198 return MANIFEST_SUCCESS;
199}
200
David Brazdil080ee312020-02-25 15:30:30 -0800201static enum manifest_return_code read_optional_uint64(
202 const struct fdt_node *node, const char *property,
203 uint64_t default_value, uint64_t *out)
204{
205 enum manifest_return_code ret;
206
207 ret = read_uint64(node, property, out);
208 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
209 *out = default_value;
210 return MANIFEST_SUCCESS;
211 }
212 return ret;
213}
214
Olivier Deprez62d99e32020-01-09 15:58:07 +0100215static enum manifest_return_code read_uint32(const struct fdt_node *node,
216 const char *property,
217 uint32_t *out)
218{
219 uint64_t value;
220
221 TRY(read_uint64(node, property, &value));
222
223 if (value > UINT32_MAX) {
224 return MANIFEST_ERROR_INTEGER_OVERFLOW;
225 }
226
227 *out = (uint32_t)value;
228 return MANIFEST_SUCCESS;
229}
230
Manish Pandeye68e7932020-04-23 15:29:28 +0100231static enum manifest_return_code read_optional_uint32(
232 const struct fdt_node *node, const char *property,
233 uint32_t default_value, uint32_t *out)
234{
235 enum manifest_return_code ret;
236
237 ret = read_uint32(node, property, out);
238 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
239 *out = default_value;
240 return MANIFEST_SUCCESS;
241 }
242 return ret;
243}
244
David Brazdil7a462ec2019-08-15 12:27:47 +0100245static enum manifest_return_code read_uint16(const struct fdt_node *node,
246 const char *property,
247 uint16_t *out)
248{
249 uint64_t value;
250
251 TRY(read_uint64(node, property, &value));
252
253 if (value > UINT16_MAX) {
254 return MANIFEST_ERROR_INTEGER_OVERFLOW;
255 }
256
257 *out = (uint16_t)value;
258 return MANIFEST_SUCCESS;
259}
260
J-Alvesb37fd082020-10-22 12:29:21 +0100261static enum manifest_return_code read_optional_uint16(
262 const struct fdt_node *node, const char *property,
263 uint16_t default_value, uint16_t *out)
264{
265 enum manifest_return_code ret;
266
267 ret = read_uint16(node, property, out);
268 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
269 *out = default_value;
270 return MANIFEST_SUCCESS;
271 }
272
273 return MANIFEST_SUCCESS;
274}
275
Olivier Deprez62d99e32020-01-09 15:58:07 +0100276static enum manifest_return_code read_uint8(const struct fdt_node *node,
277 const char *property, uint8_t *out)
278{
279 uint64_t value;
280
281 TRY(read_uint64(node, property, &value));
282
283 if (value > UINT8_MAX) {
284 return MANIFEST_ERROR_INTEGER_OVERFLOW;
285 }
286
287 *out = (uint8_t)value;
288 return MANIFEST_SUCCESS;
289}
290
J-Alves4369bd92020-08-07 16:35:36 +0100291static enum manifest_return_code read_optional_uint8(
292 const struct fdt_node *node, const char *property,
293 uint8_t default_value, uint8_t *out)
294{
295 enum manifest_return_code ret;
296
297 ret = read_uint8(node, property, out);
298 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
299 *out = default_value;
300 return MANIFEST_SUCCESS;
301 }
302
303 return MANIFEST_SUCCESS;
304}
305
Andrew Scullae9962e2019-10-03 16:51:16 +0100306struct uint32list_iter {
307 struct memiter mem_it;
308};
309
J-Alves4369bd92020-08-07 16:35:36 +0100310static enum manifest_return_code read_uint32list(const struct fdt_node *node,
311 const char *property,
312 struct uint32list_iter *out)
Andrew Scullae9962e2019-10-03 16:51:16 +0100313{
David Brazdilb856be62020-03-25 10:14:55 +0000314 struct memiter data;
Andrew Scullae9962e2019-10-03 16:51:16 +0100315
David Brazdilb856be62020-03-25 10:14:55 +0000316 if (!fdt_read_property(node, property, &data)) {
Andrew Scullae9962e2019-10-03 16:51:16 +0100317 memiter_init(&out->mem_it, NULL, 0);
J-Alves4369bd92020-08-07 16:35:36 +0100318 return MANIFEST_ERROR_PROPERTY_NOT_FOUND;
Andrew Scullae9962e2019-10-03 16:51:16 +0100319 }
320
David Brazdilb856be62020-03-25 10:14:55 +0000321 if ((memiter_size(&data) % sizeof(uint32_t)) != 0) {
Andrew Scullae9962e2019-10-03 16:51:16 +0100322 return MANIFEST_ERROR_MALFORMED_INTEGER_LIST;
323 }
324
David Brazdilb856be62020-03-25 10:14:55 +0000325 out->mem_it = data;
Andrew Scullae9962e2019-10-03 16:51:16 +0100326 return MANIFEST_SUCCESS;
327}
328
J-Alves4369bd92020-08-07 16:35:36 +0100329static enum manifest_return_code read_optional_uint32list(
330 const struct fdt_node *node, const char *property,
331 struct uint32list_iter *out)
332{
333 enum manifest_return_code ret = read_uint32list(node, property, out);
334
335 if (ret == MANIFEST_ERROR_PROPERTY_NOT_FOUND) {
336 return MANIFEST_SUCCESS;
337 }
338 return ret;
339}
340
Andrew Scullae9962e2019-10-03 16:51:16 +0100341static bool uint32list_has_next(const struct uint32list_iter *list)
342{
343 return memiter_size(&list->mem_it) > 0;
344}
345
David Brazdil5ea99462020-03-25 13:01:47 +0000346static enum manifest_return_code uint32list_get_next(
347 struct uint32list_iter *list, uint32_t *out)
Andrew Scullae9962e2019-10-03 16:51:16 +0100348{
Andrew Scullae9962e2019-10-03 16:51:16 +0100349 uint64_t num;
350
351 CHECK(uint32list_has_next(list));
David Brazdilb856be62020-03-25 10:14:55 +0000352 if (!fdt_parse_number(&list->mem_it, sizeof(uint32_t), &num)) {
Andrew Scullae9962e2019-10-03 16:51:16 +0100353 return MANIFEST_ERROR_MALFORMED_INTEGER;
354 }
355
David Brazdil5ea99462020-03-25 13:01:47 +0000356 *out = (uint32_t)num;
357 return MANIFEST_SUCCESS;
Andrew Scullae9962e2019-10-03 16:51:16 +0100358}
359
Olivier Deprez62d99e32020-01-09 15:58:07 +0100360static enum manifest_return_code parse_vm_common(const struct fdt_node *node,
361 struct manifest_vm *vm,
362 ffa_vm_id_t vm_id)
David Brazdil7a462ec2019-08-15 12:27:47 +0100363{
Andrew Scullae9962e2019-10-03 16:51:16 +0100364 struct uint32list_iter smcs;
David Brazdil5ea99462020-03-25 13:01:47 +0000365 size_t idx;
Andrew Scullae9962e2019-10-03 16:51:16 +0100366
Olivier Deprez62d99e32020-01-09 15:58:07 +0100367 TRY(read_bool(node, "is_ffa_partition", &vm->is_ffa_partition));
368
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -0700369 TRY(read_bool(node, "hyp_loaded", &vm->is_hyp_loaded));
370
David Brazdil136f2942019-09-23 14:11:03 +0100371 TRY(read_string(node, "debug_name", &vm->debug_name));
Andrew Scullae9962e2019-10-03 16:51:16 +0100372
373 TRY(read_optional_uint32list(node, "smc_whitelist", &smcs));
374 while (uint32list_has_next(&smcs) &&
375 vm->smc_whitelist.smc_count < MAX_SMCS) {
David Brazdil5ea99462020-03-25 13:01:47 +0000376 idx = vm->smc_whitelist.smc_count++;
377 TRY(uint32list_get_next(&smcs, &vm->smc_whitelist.smcs[idx]));
Andrew Scullae9962e2019-10-03 16:51:16 +0100378 }
379
380 if (uint32list_has_next(&smcs)) {
Andrew Walbran17eebf92020-02-05 16:35:49 +0000381 dlog_warning("%s SMC whitelist too long.\n", vm->debug_name);
Andrew Scullae9962e2019-10-03 16:51:16 +0100382 }
383
Andrew Scullb2c3a242019-11-04 13:52:36 +0000384 TRY(read_bool(node, "smc_whitelist_permissive",
385 &vm->smc_whitelist.permissive));
Andrew Scullae9962e2019-10-03 16:51:16 +0100386
Olivier Deprez62d99e32020-01-09 15:58:07 +0100387 if (vm_id != HF_PRIMARY_VM_ID) {
388 TRY(read_uint64(node, "mem_size", &vm->secondary.mem_size));
389 TRY(read_uint16(node, "vcpu_count", &vm->secondary.vcpu_count));
Fuad Tabba50469e02020-06-30 15:14:28 +0100390 TRY(read_optional_string(node, "fdt_filename",
391 &vm->secondary.fdt_filename));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100392 }
393
394 return MANIFEST_SUCCESS;
395}
396
397static enum manifest_return_code parse_vm(struct fdt_node *node,
398 struct manifest_vm *vm,
399 ffa_vm_id_t vm_id)
400{
401 TRY(read_optional_string(node, "kernel_filename",
402 &vm->kernel_filename));
403
David Brazdile6f83222019-09-23 14:47:37 +0100404 if (vm_id == HF_PRIMARY_VM_ID) {
405 TRY(read_optional_string(node, "ramdisk_filename",
406 &vm->primary.ramdisk_filename));
David Brazdil080ee312020-02-25 15:30:30 -0800407 TRY(read_optional_uint64(node, "boot_address",
408 MANIFEST_INVALID_ADDRESS,
409 &vm->primary.boot_address));
David Brazdil7a462ec2019-08-15 12:27:47 +0100410 }
Raghu Krishnamurthy988a5e72021-02-27 21:46:06 -0800411 TRY(read_optional_uint8(node, "exception-level", (uint8_t)EL1,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700412 (uint8_t *)&vm->partition.run_time_el));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100413
David Brazdil7a462ec2019-08-15 12:27:47 +0100414 return MANIFEST_SUCCESS;
415}
416
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100417static bool check_and_record_mem_regions(uintptr_t base_address,
418 uint32_t page_count)
419{
420 uintptr_t limit = base_address + page_count * PAGE_SIZE;
421
422 for (size_t i = 0; i < allocated_mem_regions_index; i++) {
Olivier Deprez93644652022-09-09 11:01:12 +0200423 uintptr_t mem_region_base = manifest_data->mem_regions[i].base;
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100424 uintptr_t mem_region_limit =
Olivier Deprez93644652022-09-09 11:01:12 +0200425 manifest_data->mem_regions[i].limit;
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100426
427 if ((base_address >= mem_region_base &&
428 base_address < mem_region_limit) ||
429 (limit < mem_region_limit && limit >= mem_region_base)) {
430 dlog_error(
431 "Overlapping memory regions\n"
432 "New Region %#x - %#x\n"
433 "Overlapping region %#x - %#x\n",
434 base_address, limit, mem_region_base,
435 mem_region_limit);
436 return false;
437 }
438 }
439
Olivier Deprez93644652022-09-09 11:01:12 +0200440 manifest_data->mem_regions[allocated_mem_regions_index].base =
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100441 base_address;
Olivier Deprez93644652022-09-09 11:01:12 +0200442 manifest_data->mem_regions[allocated_mem_regions_index].limit = limit;
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100443 allocated_mem_regions_index++;
444
445 return true;
446}
447
Manish Pandey6542f5c2020-04-27 14:37:46 +0100448static enum manifest_return_code parse_ffa_memory_region_node(
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100449 struct fdt_node *mem_node, struct memory_region *mem_regions,
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700450 uint16_t *count, struct rx_tx *rxtx)
Manish Pandey6542f5c2020-04-27 14:37:46 +0100451{
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100452 uint32_t phandle;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700453 uint16_t i = 0;
Manish Pandey6542f5c2020-04-27 14:37:46 +0100454
455 dlog_verbose(" Partition memory regions\n");
456
457 if (!fdt_is_compatible(mem_node, "arm,ffa-manifest-memory-regions")) {
458 return MANIFEST_ERROR_NOT_COMPATIBLE;
459 }
460
461 if (!fdt_first_child(mem_node)) {
462 return MANIFEST_ERROR_MEMORY_REGION_NODE_EMPTY;
463 }
464
465 do {
466 dlog_verbose(" Memory Region[%u]\n", i);
467
468 TRY(read_optional_string(mem_node, "description",
469 &mem_regions[i].name));
470 dlog_verbose(" Name: %s\n",
471 string_data(&mem_regions[i].name));
472
473 TRY(read_optional_uint64(mem_node, "base-address",
474 MANIFEST_INVALID_ADDRESS,
475 &mem_regions[i].base_address));
476 dlog_verbose(" Base address: %#x\n",
477 mem_regions[i].base_address);
478
479 TRY(read_uint32(mem_node, "pages-count",
480 &mem_regions[i].page_count));
481 dlog_verbose(" Pages_count: %u\n",
482 mem_regions[i].page_count);
483
Daniel Boulbya7e9e182022-06-27 14:21:01 +0100484 if (!check_and_record_mem_regions(mem_regions[i].base_address,
485 mem_regions[i].page_count)) {
486 return MANIFEST_ERROR_MEM_REGION_OVERLAP;
487 }
488
Manish Pandey6542f5c2020-04-27 14:37:46 +0100489 TRY(read_uint32(mem_node, "attributes",
490 &mem_regions[i].attributes));
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -0700491
Olivier Deprez035fa152022-03-14 11:19:10 +0100492 /*
493 * Check RWX permission attributes.
494 * Security attribute is checked at load phase.
495 */
496 uint32_t permissions = mem_regions[i].attributes &
497 (MANIFEST_REGION_ATTR_READ |
498 MANIFEST_REGION_ATTR_WRITE |
499 MANIFEST_REGION_ATTR_EXEC);
500 if (permissions != MANIFEST_REGION_ATTR_READ &&
501 permissions != (MANIFEST_REGION_ATTR_READ |
502 MANIFEST_REGION_ATTR_WRITE) &&
503 permissions != (MANIFEST_REGION_ATTR_READ |
504 MANIFEST_REGION_ATTR_EXEC)) {
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -0700505 return MANIFEST_ERROR_INVALID_MEM_PERM;
506 }
507
Olivier Deprez035fa152022-03-14 11:19:10 +0100508 /* Filter memory region attributes. */
509 mem_regions[i].attributes &= MANIFEST_REGION_ALL_ATTR_MASK;
510
511 dlog_verbose(" Attributes: %#x\n",
Manish Pandey6542f5c2020-04-27 14:37:46 +0100512 mem_regions[i].attributes);
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100513
Manish Pandeya70a4192020-10-07 22:05:04 +0100514 if (rxtx->available) {
515 TRY(read_optional_uint32(
516 mem_node, "phandle",
517 (uint32_t)MANIFEST_INVALID_ADDRESS, &phandle));
518 if (phandle == rxtx->rx_phandle) {
519 dlog_verbose(" Assigned as RX buffer\n");
520 rxtx->rx_buffer = &mem_regions[i];
521 } else if (phandle == rxtx->tx_phandle) {
522 dlog_verbose(" Assigned as TX buffer\n");
523 rxtx->tx_buffer = &mem_regions[i];
524 }
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100525 }
526
Manish Pandey6542f5c2020-04-27 14:37:46 +0100527 i++;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700528 } while (fdt_next_sibling(mem_node) &&
529 (i < PARTITION_MAX_MEMORY_REGIONS));
Manish Pandey6542f5c2020-04-27 14:37:46 +0100530
Manish Pandeya70a4192020-10-07 22:05:04 +0100531 if (rxtx->available &&
532 (rxtx->rx_buffer->page_count != rxtx->tx_buffer->page_count)) {
Manish Pandeyf06c9072020-09-29 15:41:58 +0100533 return MANIFEST_ERROR_RXTX_SIZE_MISMATCH;
534 }
535
Manish Pandey2145c212020-05-01 16:04:22 +0100536 *count = i;
Manish Pandey6542f5c2020-04-27 14:37:46 +0100537
538 return MANIFEST_SUCCESS;
539}
540
Manish Pandeye68e7932020-04-23 15:29:28 +0100541static enum manifest_return_code parse_ffa_device_region_node(
Manish Pandey2145c212020-05-01 16:04:22 +0100542 struct fdt_node *dev_node, struct device_region *dev_regions,
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700543 uint16_t *count)
Manish Pandeye68e7932020-04-23 15:29:28 +0100544{
545 struct uint32list_iter list;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700546 uint16_t i = 0;
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500547 uint32_t j = 0;
Olivier Deprez93644652022-09-09 11:01:12 +0200548 struct interrupt_bitmap allocated_intids = manifest_data->intids;
Manish Pandeye68e7932020-04-23 15:29:28 +0100549
550 dlog_verbose(" Partition Device Regions\n");
551
552 if (!fdt_is_compatible(dev_node, "arm,ffa-manifest-device-regions")) {
553 return MANIFEST_ERROR_NOT_COMPATIBLE;
554 }
555
556 if (!fdt_first_child(dev_node)) {
557 return MANIFEST_ERROR_DEVICE_REGION_NODE_EMPTY;
558 }
559
560 do {
561 dlog_verbose(" Device Region[%u]\n", i);
562
563 TRY(read_optional_string(dev_node, "description",
564 &dev_regions[i].name));
565 dlog_verbose(" Name: %s\n",
566 string_data(&dev_regions[i].name));
567
568 TRY(read_uint64(dev_node, "base-address",
569 &dev_regions[i].base_address));
570 dlog_verbose(" Base address: %#x\n",
571 dev_regions[i].base_address);
572
573 TRY(read_uint32(dev_node, "pages-count",
574 &dev_regions[i].page_count));
575 dlog_verbose(" Pages_count: %u\n",
576 dev_regions[i].page_count);
577
578 TRY(read_uint32(dev_node, "attributes",
579 &dev_regions[i].attributes));
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -0700580
Olivier Deprez035fa152022-03-14 11:19:10 +0100581 /*
582 * Check RWX permission attributes.
583 * Security attribute is checked at load phase.
584 */
585 uint32_t permissions = dev_regions[i].attributes &
586 (MANIFEST_REGION_ATTR_READ |
587 MANIFEST_REGION_ATTR_WRITE |
588 MANIFEST_REGION_ATTR_EXEC);
589
590 if (permissions != MANIFEST_REGION_ATTR_READ &&
591 permissions != (MANIFEST_REGION_ATTR_READ |
592 MANIFEST_REGION_ATTR_WRITE)) {
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -0700593 return MANIFEST_ERROR_INVALID_MEM_PERM;
594 }
595
Olivier Deprez035fa152022-03-14 11:19:10 +0100596 /* Filer device region attributes. */
597 dev_regions[i].attributes = dev_regions[i].attributes &
598 MANIFEST_REGION_ALL_ATTR_MASK;
599
600 dlog_verbose(" Attributes: %#x\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100601 dev_regions[i].attributes);
602
603 TRY(read_optional_uint32list(dev_node, "interrupts", &list));
604 dlog_verbose(" Interrupt List:\n");
605 j = 0;
606 while (uint32list_has_next(&list) &&
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700607 j < PARTITION_MAX_INTERRUPTS_PER_DEVICE) {
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100608 uint32_t intid;
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100609
Manish Pandeye68e7932020-04-23 15:29:28 +0100610 TRY(uint32list_get_next(
611 &list, &dev_regions[i].interrupts[j].id));
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100612 intid = dev_regions[i].interrupts[j].id;
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100613
614 dlog_verbose(" ID = %u\n", intid);
615
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100616 if (interrupt_bitmap_get_value(&allocated_intids,
617 intid) == 1U) {
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100618 return MANIFEST_ERROR_INTERRUPT_ID_REPEATED;
619 }
620
Daniel Boulby4ca50f02022-07-29 18:29:34 +0100621 interrupt_bitmap_set_value(&allocated_intids, intid);
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100622
Manish Pandeye68e7932020-04-23 15:29:28 +0100623 if (uint32list_has_next(&list)) {
624 TRY(uint32list_get_next(&list,
625 &dev_regions[i]
626 .interrupts[j]
627 .attributes));
628 } else {
629 return MANIFEST_ERROR_MALFORMED_INTEGER_LIST;
630 }
631
Daniel Boulby801f8ef2022-06-27 14:21:01 +0100632 dlog_verbose(" attributes = %u\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100633 dev_regions[i].interrupts[j].attributes);
634 j++;
635 }
Madhukar Pappireddy5fc8be12021-08-03 11:42:53 -0500636
637 dev_regions[i].interrupt_count = j;
Manish Pandeye68e7932020-04-23 15:29:28 +0100638 if (j == 0) {
639 dlog_verbose(" Empty\n");
640 }
641
642 TRY(read_optional_uint32(dev_node, "smmu-id",
Madhukar Pappireddy54680c72020-10-23 15:02:38 -0500643 MANIFEST_INVALID_ID,
Manish Pandeye68e7932020-04-23 15:29:28 +0100644 &dev_regions[i].smmu_id));
Olivier Deprez86d87ae2021-08-19 14:27:46 +0200645 if (dev_regions[i].smmu_id != MANIFEST_INVALID_ID) {
646 dlog_verbose(" smmu-id: %u\n",
647 dev_regions[i].smmu_id);
648 }
Manish Pandeye68e7932020-04-23 15:29:28 +0100649
650 TRY(read_optional_uint32list(dev_node, "stream-ids", &list));
651 dlog_verbose(" Stream IDs assigned:\n");
652
653 j = 0;
654 while (uint32list_has_next(&list) &&
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700655 j < PARTITION_MAX_STREAMS_PER_DEVICE) {
Manish Pandeye68e7932020-04-23 15:29:28 +0100656 TRY(uint32list_get_next(&list,
657 &dev_regions[i].stream_ids[j]));
658 dlog_verbose(" %u\n",
659 dev_regions[i].stream_ids[j]);
660 j++;
661 }
662 if (j == 0) {
663 dlog_verbose(" None\n");
664 }
Madhukar Pappireddy54680c72020-10-23 15:02:38 -0500665 dev_regions[i].stream_count = j;
Manish Pandeye68e7932020-04-23 15:29:28 +0100666
667 TRY(read_bool(dev_node, "exclusive-access",
668 &dev_regions[i].exclusive_access));
J-Alves4369bd92020-08-07 16:35:36 +0100669 dlog_verbose(" Exclusive_access: %u\n",
Manish Pandeye68e7932020-04-23 15:29:28 +0100670 dev_regions[i].exclusive_access);
671
672 i++;
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700673 } while (fdt_next_sibling(dev_node) &&
674 (i < PARTITION_MAX_DEVICE_REGIONS));
Manish Pandeye68e7932020-04-23 15:29:28 +0100675
Manish Pandey2145c212020-05-01 16:04:22 +0100676 *count = i;
Manish Pandeye68e7932020-04-23 15:29:28 +0100677
678 return MANIFEST_SUCCESS;
679}
680
J-Alvesabebe432022-05-31 14:40:50 +0100681static enum manifest_return_code sanity_check_ffa_manifest(
682 struct manifest_vm *vm)
683{
684 uint16_t ffa_version_major;
685 uint16_t ffa_version_minor;
686 enum manifest_return_code ret_code = MANIFEST_SUCCESS;
687 const char *error_string = "specified in manifest is unsupported";
688 uint32_t k = 0;
689
690 /* ensure that the SPM version is compatible */
691 ffa_version_major = (vm->partition.ffa_version & 0xffff0000) >>
692 FFA_VERSION_MAJOR_OFFSET;
693 ffa_version_minor = vm->partition.ffa_version & 0xffff;
694
695 if (ffa_version_major != FFA_VERSION_MAJOR ||
696 ffa_version_minor > FFA_VERSION_MINOR) {
697 dlog_error("FF-A partition manifest version %s: %u.%u\n",
698 error_string, ffa_version_major, ffa_version_minor);
699 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
700 }
701
702 if (vm->partition.xlat_granule != PAGE_4KB) {
703 dlog_error("Translation granule %s: %u\n", error_string,
704 vm->partition.xlat_granule);
705 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
706 }
707
708 if (vm->partition.execution_state != AARCH64) {
709 dlog_error("Execution state %s: %u\n", error_string,
710 vm->partition.execution_state);
711 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
712 }
713
714 if (vm->partition.run_time_el != EL1 &&
715 vm->partition.run_time_el != S_EL1 &&
716 vm->partition.run_time_el != S_EL0) {
717 dlog_error("Exception level %s: %d\n", error_string,
718 vm->partition.run_time_el);
719 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
720 }
721
722 if ((vm->partition.messaging_method &
723 ~(FFA_PARTITION_DIRECT_REQ_RECV | FFA_PARTITION_DIRECT_REQ_SEND |
724 FFA_PARTITION_INDIRECT_MSG)) != 0U) {
725 dlog_error("Messaging method %s: %x\n", error_string,
726 vm->partition.messaging_method);
727 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
728 }
729
730 if (vm->partition.run_time_el == S_EL0 &&
731 vm->partition.execution_ctx_count != 1) {
732 dlog_error(
733 "Exception level and execution context count %s: %d "
734 "%d\n",
735 error_string, vm->partition.run_time_el,
736 vm->partition.execution_ctx_count);
737 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
738 }
739
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700740 for (uint16_t i = 0; i < vm->partition.dev_region_count; i++) {
J-Alvesabebe432022-05-31 14:40:50 +0100741 struct device_region dev_region;
742
743 dev_region = vm->partition.dev_regions[i];
744
Raghu Krishnamurthy641dcd82022-07-19 23:21:20 -0700745 if (dev_region.interrupt_count >
746 PARTITION_MAX_INTERRUPTS_PER_DEVICE) {
J-Alvesabebe432022-05-31 14:40:50 +0100747 dlog_error(
748 "Interrupt count for device region exceeds "
749 "limit.\n");
750 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
751 continue;
752 }
753
754 for (uint8_t j = 0; j < dev_region.interrupt_count; j++) {
755 k++;
756 if (k > VM_MANIFEST_MAX_INTERRUPTS) {
757 dlog_error(
758 "Interrupt count for VM exceeds "
759 "limit.\n");
760 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
761 continue;
762 }
763 }
764 }
765
766 /* GP register is restricted to one of x0 - x3. */
767 if (vm->partition.gp_register_num != -1 &&
768 vm->partition.gp_register_num > 3) {
769 dlog_error("GP register number %s: %u\n", error_string,
770 vm->partition.gp_register_num);
771 ret_code = MANIFEST_ERROR_NOT_COMPATIBLE;
772 }
773
774 return ret_code;
775}
776
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -0700777enum manifest_return_code parse_ffa_manifest(struct fdt *fdt,
J-Alves35315782022-01-25 17:58:32 +0000778 struct manifest_vm *vm,
779 struct fdt_node *boot_info_node)
Olivier Deprez62d99e32020-01-09 15:58:07 +0100780{
781 unsigned int i = 0;
782 struct uint32list_iter uuid;
783 uint32_t uuid_word;
784 struct fdt_node root;
785 struct fdt_node ffa_node;
786 struct string rxtx_node_name = STRING_INIT("rx_tx-info");
Manish Pandey6542f5c2020-04-27 14:37:46 +0100787 struct string mem_region_node_name = STRING_INIT("memory-regions");
Manish Pandeye68e7932020-04-23 15:29:28 +0100788 struct string dev_region_node_name = STRING_INIT("device-regions");
J-Alves35315782022-01-25 17:58:32 +0000789 struct string boot_info_node_name = STRING_INIT("boot-info");
Madhukar Pappireddy84154052022-06-21 18:30:25 -0500790 bool managed_exit_field_present = false;
Olivier Deprez62d99e32020-01-09 15:58:07 +0100791
792 if (!fdt_find_node(fdt, "/", &root)) {
793 return MANIFEST_ERROR_NO_ROOT_NODE;
794 }
795
796 /* Check "compatible" property. */
797 if (!fdt_is_compatible(&root, "arm,ffa-manifest-1.0")) {
798 return MANIFEST_ERROR_NOT_COMPATIBLE;
799 }
800
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700801 TRY(read_uint32(&root, "ffa-version", &vm->partition.ffa_version));
J-Alves4369bd92020-08-07 16:35:36 +0100802 dlog_verbose(" Expected FF-A version %u.%u\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700803 vm->partition.ffa_version >> 16,
804 vm->partition.ffa_version & 0xffff);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100805
J-Alves4369bd92020-08-07 16:35:36 +0100806 TRY(read_uint32list(&root, "uuid", &uuid));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100807
808 while (uint32list_has_next(&uuid) && i < 4) {
809 TRY(uint32list_get_next(&uuid, &uuid_word));
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700810 vm->partition.uuid.uuid[i] = uuid_word;
Olivier Deprez62d99e32020-01-09 15:58:07 +0100811 i++;
812 }
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700813 dlog_verbose(" UUID %#x-%x-%x-%x\n", vm->partition.uuid.uuid[0],
814 vm->partition.uuid.uuid[1], vm->partition.uuid.uuid[2],
815 vm->partition.uuid.uuid[3]);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100816
817 TRY(read_uint16(&root, "execution-ctx-count",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700818 &vm->partition.execution_ctx_count));
J-Alves4369bd92020-08-07 16:35:36 +0100819 dlog_verbose(" Number of execution context %u\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700820 vm->partition.execution_ctx_count);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100821
822 TRY(read_uint8(&root, "exception-level",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700823 (uint8_t *)&vm->partition.run_time_el));
824 dlog_verbose(" Run-time EL %u\n", vm->partition.run_time_el);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100825
826 TRY(read_uint8(&root, "execution-state",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700827 (uint8_t *)&vm->partition.execution_state));
828 dlog_verbose(" Execution state %u\n", vm->partition.execution_state);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100829
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700830 TRY(read_optional_uint64(&root, "load-address", 0,
831 &vm->partition.load_addr));
832 dlog_verbose(" Load address %#x\n", vm->partition.load_addr);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100833
J-Alves4369bd92020-08-07 16:35:36 +0100834 TRY(read_optional_uint64(&root, "entrypoint-offset", 0,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700835 &vm->partition.ep_offset));
836 dlog_verbose(" Entry point offset %#x\n", vm->partition.ep_offset);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100837
J-Alves35315782022-01-25 17:58:32 +0000838 TRY(read_optional_uint32(&root, "gp-register-num",
839 DEFAULT_BOOT_GP_REGISTER,
840 &vm->partition.gp_register_num));
841 dlog_verbose(" Boot GP register: %#x\n",
842 vm->partition.gp_register_num);
843
J-Alvesb37fd082020-10-22 12:29:21 +0100844 TRY(read_optional_uint16(&root, "boot-order", DEFAULT_BOOT_ORDER,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700845 &vm->partition.boot_order));
846 dlog_verbose(" Boot order %#u\n", vm->partition.boot_order);
J-Alvesb37fd082020-10-22 12:29:21 +0100847
J-Alves4369bd92020-08-07 16:35:36 +0100848 TRY(read_optional_uint8(&root, "xlat-granule", 0,
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700849 (uint8_t *)&vm->partition.xlat_granule));
850 dlog_verbose(" Translation granule %u\n", vm->partition.xlat_granule);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100851
852 ffa_node = root;
853 if (fdt_find_child(&ffa_node, &rxtx_node_name)) {
854 if (!fdt_is_compatible(&ffa_node,
855 "arm,ffa-manifest-rx_tx-buffer")) {
856 return MANIFEST_ERROR_NOT_COMPATIBLE;
857 }
858
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100859 /*
860 * Read only phandles for now, it will be used to update buffers
861 * while parsing memory regions.
862 */
863 TRY(read_uint32(&ffa_node, "rx-buffer",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700864 &vm->partition.rxtx.rx_phandle));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100865
Manish Pandeyfa1f2912020-05-05 12:57:01 +0100866 TRY(read_uint32(&ffa_node, "tx-buffer",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700867 &vm->partition.rxtx.tx_phandle));
Olivier Deprez62d99e32020-01-09 15:58:07 +0100868
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700869 vm->partition.rxtx.available = true;
Olivier Deprez62d99e32020-01-09 15:58:07 +0100870 }
871
872 TRY(read_uint8(&root, "messaging-method",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700873 (uint8_t *)&vm->partition.messaging_method));
874 dlog_verbose(" Messaging method %u\n", vm->partition.messaging_method);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100875
Madhukar Pappireddy84154052022-06-21 18:30:25 -0500876 TRY(read_bool(&root, "managed-exit", &managed_exit_field_present));
877
878 TRY(read_optional_uint8(
879 &root, "ns-interrupts-action", NS_ACTION_SIGNALED,
880 (uint8_t *)&vm->partition.ns_interrupts_action));
881
882 /*
883 * An SP manifest can specify one of the fields listed below:
884 * `managed-exit`: Introduced in FF-A v1.0 spec.
885 * `ns-interrupts-action`: Introduced in FF-A v1.1 EAC0 spec.
886 * If both are missing from the manifest, the default response is
887 * NS_ACTION_SIGNALED.
888 */
889 if (managed_exit_field_present) {
890 vm->partition.ns_interrupts_action = NS_ACTION_ME;
891 }
892
893 if (vm->partition.ns_interrupts_action != NS_ACTION_QUEUED &&
894 vm->partition.ns_interrupts_action != NS_ACTION_ME &&
895 vm->partition.ns_interrupts_action != NS_ACTION_SIGNALED) {
896 return MANIFEST_ILLEGAL_NS_ACTION;
897 }
898
899 dlog_verbose(
900 "NS Interrupts %s\n",
901 (vm->partition.ns_interrupts_action == NS_ACTION_QUEUED)
902 ? "Queued"
903 : (vm->partition.ns_interrupts_action == NS_ACTION_SIGNALED)
904 ? "Signaled"
905 : "Managed exit");
906
907 if (vm->partition.ns_interrupts_action == NS_ACTION_ME) {
908 /* Managed exit only supported by S_EL1 partitions. */
909 if (vm->partition.run_time_el != S_EL1) {
910 dlog_error(
911 "Managed exit cannot be supported by this "
912 "partition\n");
913 return MANIFEST_ILLEGAL_NS_ACTION;
914 }
Madhukar Pappireddy046dad02022-06-21 18:43:33 -0500915
916 TRY(read_bool(&root, "managed-exit-virq",
917 &vm->partition.me_signal_virq));
918 if (vm->partition.me_signal_virq) {
919 dlog_verbose(" Managed Exit signaled through vIRQ\n");
920 }
J-Alvesa4730db2021-11-02 10:31:01 +0000921 }
922
923 TRY(read_bool(&root, "notification-support",
924 &vm->partition.notification_support));
925 if (vm->partition.notification_support) {
926 dlog_verbose(" Notifications Receipt Supported\n");
927 }
Maksims Svecovs9ddf86a2021-05-06 17:17:21 +0100928
J-Alves35315782022-01-25 17:58:32 +0000929 /* Parse boot info node. */
930 if (boot_info_node != NULL) {
931 ffa_node = root;
932 vm->partition.boot_info =
933 fdt_find_child(&ffa_node, &boot_info_node_name);
934 if (vm->partition.boot_info) {
935 *boot_info_node = ffa_node;
936 }
937 } else {
938 vm->partition.boot_info = false;
939 }
940
Manish Pandey6542f5c2020-04-27 14:37:46 +0100941 /* Parse memory-regions */
942 ffa_node = root;
943 if (fdt_find_child(&ffa_node, &mem_region_node_name)) {
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700944 TRY(parse_ffa_memory_region_node(
945 &ffa_node, vm->partition.mem_regions,
946 &vm->partition.mem_region_count, &vm->partition.rxtx));
Manish Pandey6542f5c2020-04-27 14:37:46 +0100947 }
Manish Pandey2145c212020-05-01 16:04:22 +0100948 dlog_verbose(" Total %u memory regions found\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700949 vm->partition.mem_region_count);
Manish Pandey6542f5c2020-04-27 14:37:46 +0100950
Manish Pandeye68e7932020-04-23 15:29:28 +0100951 /* Parse Device-regions */
952 ffa_node = root;
953 if (fdt_find_child(&ffa_node, &dev_region_node_name)) {
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700954 TRY(parse_ffa_device_region_node(
955 &ffa_node, vm->partition.dev_regions,
956 &vm->partition.dev_region_count));
Manish Pandeye68e7932020-04-23 15:29:28 +0100957 }
Manish Pandey2145c212020-05-01 16:04:22 +0100958 dlog_verbose(" Total %u device regions found\n",
Raghu Krishnamurthy8c250a92021-07-02 12:16:42 -0700959 vm->partition.dev_region_count);
Manish Pandeye68e7932020-04-23 15:29:28 +0100960
J-Alves4eb7b542022-03-02 15:21:52 +0000961 return sanity_check_ffa_manifest(vm);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100962}
963
Olivier Deprez62d99e32020-01-09 15:58:07 +0100964static enum manifest_return_code parse_ffa_partition_package(
965 struct mm_stage1_locked stage1_locked, struct fdt_node *node,
966 struct manifest_vm *vm, ffa_vm_id_t vm_id, struct mpool *ppool)
967{
968 enum manifest_return_code ret = MANIFEST_ERROR_NOT_COMPATIBLE;
J-Alves2f86c1e2022-02-23 18:44:19 +0000969 uintpaddr_t load_address;
970 struct sp_pkg_header header;
Olivier Deprez62d99e32020-01-09 15:58:07 +0100971 struct fdt sp_fdt;
J-Alves2f86c1e2022-02-23 18:44:19 +0000972 vaddr_t pkg_start;
973 vaddr_t manifest_address;
J-Alves35315782022-01-25 17:58:32 +0000974 struct fdt_node boot_info_node;
Olivier Deprez62d99e32020-01-09 15:58:07 +0100975
976 /*
977 * This must have been hinted as being an FF-A partition,
978 * return straight with failure if this is not the case.
979 */
980 if (!vm->is_ffa_partition) {
J-Alves2f86c1e2022-02-23 18:44:19 +0000981 return ret;
982 }
983
984 TRY(read_uint64(node, "load_address", &load_address));
985 if (!is_aligned(load_address, PAGE_SIZE)) {
Olivier Deprez62d99e32020-01-09 15:58:07 +0100986 return MANIFEST_ERROR_NOT_COMPATIBLE;
987 }
988
J-Alves2f86c1e2022-02-23 18:44:19 +0000989 assert(load_address != 0U);
990
991 if (!sp_pkg_init(stage1_locked, pa_init(load_address), &header,
992 ppool)) {
993 return ret;
Olivier Deprez62d99e32020-01-09 15:58:07 +0100994 }
995
J-Alves2f86c1e2022-02-23 18:44:19 +0000996 pkg_start = va_init(load_address);
Olivier Deprez62d99e32020-01-09 15:58:07 +0100997
J-Alves2f86c1e2022-02-23 18:44:19 +0000998 if (vm_id != HF_PRIMARY_VM_ID &&
999 sp_pkg_get_mem_size(&header) >= vm->secondary.mem_size) {
J-Alves4369bd92020-08-07 16:35:36 +01001000 dlog_error("Invalid package header or DT size.\n");
J-Alves2f86c1e2022-02-23 18:44:19 +00001001 goto out;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001002 }
1003
J-Alves2f86c1e2022-02-23 18:44:19 +00001004 manifest_address = va_add(va_init(load_address), header.pm_offset);
1005 if (!fdt_init_from_ptr(&sp_fdt, ptr_from_va(manifest_address),
1006 header.pm_size)) {
Olivier Deprez62d99e32020-01-09 15:58:07 +01001007 dlog_error("FDT failed validation.\n");
J-Alves2f86c1e2022-02-23 18:44:19 +00001008 goto out;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001009 }
1010
J-Alves35315782022-01-25 17:58:32 +00001011 ret = parse_ffa_manifest(&sp_fdt, vm, &boot_info_node);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001012 if (ret != MANIFEST_SUCCESS) {
J-Alves4eb7b542022-03-02 15:21:52 +00001013 dlog_error("Error parsing partition manifest: %s.\n",
1014 manifest_strerror(ret));
J-Alves2f86c1e2022-02-23 18:44:19 +00001015 goto out;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001016 }
1017
J-Alves2f86c1e2022-02-23 18:44:19 +00001018 if (vm->partition.load_addr != load_address) {
J-Alvesa26ea212021-03-22 14:33:47 +00001019 dlog_warning(
1020 "Partition's load address at its manifest differs"
1021 " from specified in partition's package.\n");
J-Alves2f86c1e2022-02-23 18:44:19 +00001022 vm->partition.load_addr = load_address;
J-Alvesa26ea212021-03-22 14:33:47 +00001023 }
1024
J-Alves889a1d72022-05-13 11:38:27 +01001025 if (vm->partition.gp_register_num != DEFAULT_BOOT_GP_REGISTER) {
1026 if (header.version == SP_PKG_HEADER_VERSION_2 &&
1027 vm->partition.boot_info &&
1028 !ffa_boot_info_node(&boot_info_node, pkg_start, &header)) {
1029 dlog_error("Failed to process boot information.\n");
1030 }
J-Alves35315782022-01-25 17:58:32 +00001031 }
1032
J-Alves2f86c1e2022-02-23 18:44:19 +00001033out:
1034 sp_pkg_deinit(stage1_locked, pkg_start, &header, ppool);
Olivier Deprez62d99e32020-01-09 15:58:07 +01001035 return ret;
1036}
1037
David Brazdil7a462ec2019-08-15 12:27:47 +01001038/**
1039 * Parse manifest from FDT.
1040 */
Olivier Deprez62d99e32020-01-09 15:58:07 +01001041enum manifest_return_code manifest_init(struct mm_stage1_locked stage1_locked,
Olivier Deprez93644652022-09-09 11:01:12 +02001042 struct manifest **manifest_ret,
Olivier Deprez62d99e32020-01-09 15:58:07 +01001043 struct memiter *manifest_fdt,
1044 struct mpool *ppool)
David Brazdil7a462ec2019-08-15 12:27:47 +01001045{
Olivier Deprez93644652022-09-09 11:01:12 +02001046 struct manifest *manifest;
David Brazdilb856be62020-03-25 10:14:55 +00001047 struct string vm_name;
1048 struct fdt fdt;
David Brazdil7a462ec2019-08-15 12:27:47 +01001049 struct fdt_node hyp_node;
1050 size_t i = 0;
1051 bool found_primary_vm = false;
1052
Olivier Deprez93644652022-09-09 11:01:12 +02001053 /* Allocate space in the ppool for the manifest data. */
1054 if (!manifest_data_init(ppool)) {
1055 panic("Unable to allocate manifest data.\n");
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001056 }
1057
Olivier Deprez93644652022-09-09 11:01:12 +02001058 manifest = &manifest_data->manifest;
1059 *manifest_ret = manifest;
1060
David Brazdilb856be62020-03-25 10:14:55 +00001061 if (!fdt_init_from_memiter(&fdt, manifest_fdt)) {
1062 return MANIFEST_ERROR_FILE_SIZE; /* TODO */
David Brazdila2358d42020-01-27 18:51:38 +00001063 }
1064
David Brazdil7a462ec2019-08-15 12:27:47 +01001065 /* Find hypervisor node. */
David Brazdilb856be62020-03-25 10:14:55 +00001066 if (!fdt_find_node(&fdt, "/hypervisor", &hyp_node)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001067 return MANIFEST_ERROR_NO_HYPERVISOR_FDT_NODE;
1068 }
1069
David Brazdil74e9c3b2019-08-28 11:09:08 +01001070 /* Check "compatible" property. */
David Brazdilf4925382020-03-25 13:33:51 +00001071 if (!fdt_is_compatible(&hyp_node, "hafnium,hafnium")) {
David Brazdil74e9c3b2019-08-28 11:09:08 +01001072 return MANIFEST_ERROR_NOT_COMPATIBLE;
1073 }
1074
Olivier Deprez622ab8d2021-08-02 12:15:45 +02001075 TRY(read_bool(&hyp_node, "ffa_tee_enabled",
1076 &manifest->ffa_tee_enabled));
Andrew Walbran41a49d82020-01-10 17:46:38 +00001077
David Brazdil7a462ec2019-08-15 12:27:47 +01001078 /* Iterate over reserved VM IDs and check no such nodes exist. */
Olivier Deprez2a8ee342020-08-03 15:10:44 +02001079 for (i = HF_VM_ID_BASE; i < HF_VM_ID_OFFSET; i++) {
1080 ffa_vm_id_t vm_id = (ffa_vm_id_t)i - HF_VM_ID_BASE;
David Brazdil7a462ec2019-08-15 12:27:47 +01001081 struct fdt_node vm_node = hyp_node;
David Brazdil7a462ec2019-08-15 12:27:47 +01001082
David Brazdilb856be62020-03-25 10:14:55 +00001083 generate_vm_node_name(&vm_name, vm_id);
1084 if (fdt_find_child(&vm_node, &vm_name)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001085 return MANIFEST_ERROR_RESERVED_VM_ID;
1086 }
1087 }
1088
1089 /* Iterate over VM nodes until we find one that does not exist. */
1090 for (i = 0; i <= MAX_VMS; ++i) {
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001091 ffa_vm_id_t vm_id = HF_VM_ID_OFFSET + i;
David Brazdil7a462ec2019-08-15 12:27:47 +01001092 struct fdt_node vm_node = hyp_node;
David Brazdil7a462ec2019-08-15 12:27:47 +01001093
Olivier Deprez2a8ee342020-08-03 15:10:44 +02001094 generate_vm_node_name(&vm_name, vm_id - HF_VM_ID_BASE);
David Brazdilb856be62020-03-25 10:14:55 +00001095 if (!fdt_find_child(&vm_node, &vm_name)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001096 break;
1097 }
1098
1099 if (i == MAX_VMS) {
1100 return MANIFEST_ERROR_TOO_MANY_VMS;
1101 }
1102
1103 if (vm_id == HF_PRIMARY_VM_ID) {
1104 CHECK(found_primary_vm == false); /* sanity check */
1105 found_primary_vm = true;
1106 }
1107
David Brazdil0251b942019-09-10 15:59:50 +01001108 manifest->vm_count = i + 1;
Olivier Deprez62d99e32020-01-09 15:58:07 +01001109
1110 TRY(parse_vm_common(&vm_node, &manifest->vm[i], vm_id));
1111
Raghu Krishnamurthyb49549e2021-07-02 08:27:38 -07001112 CHECK(!manifest->vm[i].is_hyp_loaded ||
1113 manifest->vm[i].is_ffa_partition);
1114
1115 if (manifest->vm[i].is_ffa_partition &&
1116 !manifest->vm[i].is_hyp_loaded) {
Olivier Deprez62d99e32020-01-09 15:58:07 +01001117 TRY(parse_ffa_partition_package(stage1_locked, &vm_node,
1118 &manifest->vm[i], vm_id,
1119 ppool));
1120 } else {
1121 TRY(parse_vm(&vm_node, &manifest->vm[i], vm_id));
1122 }
David Brazdil7a462ec2019-08-15 12:27:47 +01001123 }
1124
Olivier Deprezfb05f3c2020-11-10 17:48:04 +01001125 if (!found_primary_vm && vm_id_is_current_world(HF_PRIMARY_VM_ID)) {
David Brazdil7a462ec2019-08-15 12:27:47 +01001126 return MANIFEST_ERROR_NO_PRIMARY_VM;
1127 }
1128
1129 return MANIFEST_SUCCESS;
1130}
1131
Olivier Deprez93644652022-09-09 11:01:12 +02001132/**
1133 * Free manifest data resources, called once manifest parsing has
1134 * completed and VMs are loaded.
1135 */
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001136void manifest_deinit(struct mpool *ppool)
1137{
Olivier Deprez93644652022-09-09 11:01:12 +02001138 manifest_data_deinit(ppool);
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001139}
1140
David Brazdil7a462ec2019-08-15 12:27:47 +01001141const char *manifest_strerror(enum manifest_return_code ret_code)
1142{
1143 switch (ret_code) {
1144 case MANIFEST_SUCCESS:
1145 return "Success";
David Brazdila2358d42020-01-27 18:51:38 +00001146 case MANIFEST_ERROR_FILE_SIZE:
1147 return "Total size in header does not match file size";
Olivier Deprez62d99e32020-01-09 15:58:07 +01001148 case MANIFEST_ERROR_MALFORMED_DTB:
1149 return "Malformed device tree blob";
David Brazdila2358d42020-01-27 18:51:38 +00001150 case MANIFEST_ERROR_NO_ROOT_NODE:
1151 return "Could not find root node in manifest";
David Brazdil7a462ec2019-08-15 12:27:47 +01001152 case MANIFEST_ERROR_NO_HYPERVISOR_FDT_NODE:
1153 return "Could not find \"hypervisor\" node in manifest";
David Brazdil74e9c3b2019-08-28 11:09:08 +01001154 case MANIFEST_ERROR_NOT_COMPATIBLE:
1155 return "Hypervisor manifest entry not compatible with Hafnium";
David Brazdil7a462ec2019-08-15 12:27:47 +01001156 case MANIFEST_ERROR_RESERVED_VM_ID:
1157 return "Manifest defines a VM with a reserved ID";
1158 case MANIFEST_ERROR_NO_PRIMARY_VM:
1159 return "Manifest does not contain a primary VM entry";
1160 case MANIFEST_ERROR_TOO_MANY_VMS:
1161 return "Manifest specifies more VMs than Hafnium has "
1162 "statically allocated space for";
1163 case MANIFEST_ERROR_PROPERTY_NOT_FOUND:
1164 return "Property not found";
1165 case MANIFEST_ERROR_MALFORMED_STRING:
1166 return "Malformed string property";
David Brazdil0dbb41f2019-09-09 18:03:35 +01001167 case MANIFEST_ERROR_STRING_TOO_LONG:
1168 return "String too long";
David Brazdil7a462ec2019-08-15 12:27:47 +01001169 case MANIFEST_ERROR_MALFORMED_INTEGER:
1170 return "Malformed integer property";
1171 case MANIFEST_ERROR_INTEGER_OVERFLOW:
1172 return "Integer overflow";
Andrew Scullae9962e2019-10-03 16:51:16 +01001173 case MANIFEST_ERROR_MALFORMED_INTEGER_LIST:
1174 return "Malformed integer list property";
Andrew Scullb2c3a242019-11-04 13:52:36 +00001175 case MANIFEST_ERROR_MALFORMED_BOOLEAN:
1176 return "Malformed boolean property";
Manish Pandey6542f5c2020-04-27 14:37:46 +01001177 case MANIFEST_ERROR_MEMORY_REGION_NODE_EMPTY:
1178 return "Memory-region node should have at least one entry";
Manish Pandeye68e7932020-04-23 15:29:28 +01001179 case MANIFEST_ERROR_DEVICE_REGION_NODE_EMPTY:
1180 return "Device-region node should have at least one entry";
Manish Pandeyf06c9072020-09-29 15:41:58 +01001181 case MANIFEST_ERROR_RXTX_SIZE_MISMATCH:
1182 return "RX and TX buffers should be of same size";
Daniel Boulbya7e9e182022-06-27 14:21:01 +01001183 case MANIFEST_ERROR_MEM_REGION_OVERLAP:
1184 return "Memory region overlaps with one already allocated";
Raghu Krishnamurthy384693c2021-10-11 13:56:24 -07001185 case MANIFEST_ERROR_INVALID_MEM_PERM:
1186 return "Memory permission should be RO, RW or RX";
J-Alves35315782022-01-25 17:58:32 +00001187 case MANIFEST_ERROR_ARGUMENTS_LIST_EMPTY:
1188 return "Arguments-list node should have at least one argument";
Daniel Boulby801f8ef2022-06-27 14:21:01 +01001189 case MANIFEST_ERROR_INTERRUPT_ID_REPEATED:
1190 return "Interrupt ID already assigned to another endpoint";
Madhukar Pappireddy84154052022-06-21 18:30:25 -05001191 case MANIFEST_ILLEGAL_NS_ACTION:
1192 return "Illegal value specidied for the field: Action in "
1193 "response to NS Interrupt";
David Brazdil7a462ec2019-08-15 12:27:47 +01001194 }
1195
1196 panic("Unexpected manifest return code.");
1197}