aboutsummaryrefslogtreecommitdiff
path: root/src/load.c
blob: 83eed532214655887847c9df0d4ee11689ffac35 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
/*
 * Copyright 2018 The Hafnium Authors.
 *
 * Use of this source code is governed by a BSD-style
 * license that can be found in the LICENSE file or at
 * https://opensource.org/licenses/BSD-3-Clause.
 */

#include "hf/load.h"

#include <stdbool.h>

#include "hf/arch/vm.h"

#include "hf/api.h"
#include "hf/boot_params.h"
#include "hf/check.h"
#include "hf/dlog.h"
#include "hf/fdt_patch.h"
#include "hf/layout.h"
#include "hf/memiter.h"
#include "hf/mm.h"
#include "hf/plat/console.h"
#include "hf/plat/iommu.h"
#include "hf/static_assert.h"
#include "hf/std.h"
#include "hf/vm.h"

#include "vmapi/hf/call.h"

alignas(PAGE_SIZE) static uint8_t tee_send_buffer[HF_MAILBOX_SIZE];
alignas(PAGE_SIZE) static uint8_t tee_recv_buffer[HF_MAILBOX_SIZE];

/**
 * Copies data to an unmapped location by mapping it for write, copying the
 * data, then unmapping it.
 *
 * The data is written so that it is available to all cores with the cache
 * disabled. When switching to the partitions, the caching is initially disabled
 * so the data must be available without the cache.
 */
static bool copy_to_unmapped(struct mm_stage1_locked stage1_locked, paddr_t to,
			     struct memiter *from_it, struct mpool *ppool)
{
	const void *from = memiter_base(from_it);
	size_t size = memiter_size(from_it);
	paddr_t to_end = pa_add(to, size);
	void *ptr;

	ptr = mm_identity_map(stage1_locked, to, to_end, MM_MODE_W, ppool);
	if (!ptr) {
		return false;
	}

	memcpy_s(ptr, size, from, size);
	arch_mm_flush_dcache(ptr, size);

	CHECK(mm_unmap(stage1_locked, to, to_end, ppool));

	return true;
}

/**
 * Loads the secondary VM's kernel.
 * Stores the kernel size in kernel_size (if kernel_size is not NULL).
 * Returns false if it cannot load the kernel.
 */
static bool load_kernel(struct mm_stage1_locked stage1_locked, paddr_t begin,
			paddr_t end, const struct manifest_vm *manifest_vm,
			const struct memiter *cpio, struct mpool *ppool,
			size_t *kernel_size)
{
	struct memiter kernel;
	size_t size;

	if (!cpio_get_file(cpio, &manifest_vm->kernel_filename, &kernel)) {
		dlog_error("Could not find kernel file \"%s\".\n",
			   string_data(&manifest_vm->kernel_filename));
		return false;
	}

	size = memiter_size(&kernel);
	if (pa_difference(begin, end) < size) {
		dlog_error("Kernel is larger than available memory.\n");
		return false;
	}

	if (!copy_to_unmapped(stage1_locked, begin, &kernel, ppool)) {
		dlog_error("Unable to copy kernel.\n");
		return false;
	}

	if (kernel_size) {
		*kernel_size = size;
	}

	return true;
}

/**
 * Performs VM loading activities that are common between the primary and
 * secondaries.
 */
static bool load_common(const struct manifest_vm *manifest_vm, struct vm *vm)
{
	vm->smc_whitelist = manifest_vm->smc_whitelist;

	/* Initialize architecture-specific features. */
	arch_vm_features_set(vm);

	return true;
}

/**
 * Loads the primary VM.
 */
static bool load_primary(struct mm_stage1_locked stage1_locked,
			 const struct manifest_vm *manifest_vm,
			 const struct memiter *cpio,
			 const struct boot_params *params, struct mpool *ppool)
{
	paddr_t primary_begin;
	ipaddr_t primary_entry;
	struct vm *vm;
	struct vm_locked vm_locked;
	struct vcpu_locked vcpu_locked;
	size_t i;
	bool ret;

	if (manifest_vm->is_ffa_partition) {
		primary_begin = pa_init(manifest_vm->sp.load_addr);
		primary_entry = ipa_add(ipa_from_pa(primary_begin),
					manifest_vm->sp.ep_offset);
	} else {
		primary_begin =
			(manifest_vm->primary.boot_address ==
			 MANIFEST_INVALID_ADDRESS)
				? layout_primary_begin()
				: pa_init(manifest_vm->primary.boot_address);
		primary_entry = ipa_from_pa(primary_begin);
	}

	paddr_t primary_end = pa_add(primary_begin, RSIZE_MAX);

	/*
	 * Load the kernel if a filename is specified in the VM manifest.
	 * For an FF-A partition, kernel_filename is undefined indicating
	 * the partition package has already been loaded prior to Hafnium
	 * booting.
	 */
	if (!string_is_empty(&manifest_vm->kernel_filename)) {
		if (!load_kernel(stage1_locked, primary_begin, primary_end,
				 manifest_vm, cpio, ppool, NULL)) {
			dlog_error("Unable to load primary kernel.\n");
			return false;
		}
	}

	if (!vm_init_next(MAX_CPUS, ppool, &vm)) {
		dlog_error("Unable to initialise primary VM.\n");
		return false;
	}

	if (vm->id != HF_PRIMARY_VM_ID) {
		dlog_error("Primary VM was not given correct ID.\n");
		return false;
	}

	vm_locked = vm_lock(vm);

	if (!load_common(manifest_vm, vm)) {
		ret = false;
		goto out;
	}

	if (params->device_mem_ranges_count == 0) {
		/*
		 * Map 1TB of address space as device memory to, most likely,
		 * make all devices available to the primary VM.
		 *
		 * TODO: remove this once all targets provide valid ranges.
		 */
		dlog_warning(
			"Device memory not provided, defaulting to 1 TB.\n");

		if (!vm_identity_map(
			    vm_locked, pa_init(0),
			    pa_init(UINT64_C(1024) * 1024 * 1024 * 1024),
			    MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool, NULL)) {
			dlog_error(
				"Unable to initialise address space for "
				"primary VM.\n");
			ret = false;
			goto out;
		}
	}

	/* Map normal memory as such to permit caching, execution, etc. */
	for (i = 0; i < params->mem_ranges_count; ++i) {
		if (!vm_identity_map(vm_locked, params->mem_ranges[i].begin,
				     params->mem_ranges[i].end,
				     MM_MODE_R | MM_MODE_W | MM_MODE_X, ppool,
				     NULL)) {
			dlog_error(
				"Unable to initialise memory for primary "
				"VM.\n");
			ret = false;
			goto out;
		}
	}

	/* Map device memory as such to prevent execution, speculation etc. */
	for (i = 0; i < params->device_mem_ranges_count; ++i) {
		if (!vm_identity_map(
			    vm_locked, params->device_mem_ranges[i].begin,
			    params->device_mem_ranges[i].end,
			    MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool, NULL)) {
			dlog("Unable to initialise device memory for primary "
			     "VM.\n");
			ret = false;
			goto out;
		}
	}

	if (!vm_unmap_hypervisor(vm_locked, ppool)) {
		dlog_error("Unable to unmap hypervisor from primary VM.\n");
		ret = false;
		goto out;
	}

	if (!plat_iommu_unmap_iommus(vm_locked, ppool)) {
		dlog_error("Unable to unmap IOMMUs from primary VM.\n");
		ret = false;
		goto out;
	}

	dlog_info("Loaded primary VM with %u vCPUs, entry at %#x.\n",
		  vm->vcpu_count, pa_addr(primary_begin));

	vcpu_locked = vcpu_lock(vm_get_vcpu(vm, 0));
	vcpu_on(vcpu_locked, primary_entry, params->kernel_arg);
	vcpu_unlock(&vcpu_locked);
	ret = true;

out:
	vm_unlock(&vm_locked);

	return ret;
}

/**
 * Loads the secondary VM's FDT.
 * Stores the total allocated size for the FDT in fdt_allocated_size (if
 * fdt_allocated_size is not NULL). The allocated size includes additional space
 * for potential patching.
 */
static bool load_secondary_fdt(struct mm_stage1_locked stage1_locked,
			       paddr_t end, size_t fdt_max_size,
			       const struct manifest_vm *manifest_vm,
			       const struct memiter *cpio, struct mpool *ppool,
			       paddr_t *fdt_addr, size_t *fdt_allocated_size)
{
	struct memiter fdt;
	size_t allocated_size;

	CHECK(!string_is_empty(&manifest_vm->secondary.fdt_filename));

	if (!cpio_get_file(cpio, &manifest_vm->secondary.fdt_filename, &fdt)) {
		dlog_error("Cannot open the secondary VM's FDT.\n");
		return false;
	}

	/*
	 * Ensure the FDT has one additional page at the end for patching, and
	 * and align it to the page boundary.
	 */
	allocated_size = align_up(memiter_size(&fdt), PAGE_SIZE) + PAGE_SIZE;

	if (allocated_size > fdt_max_size) {
		dlog_error(
			"FDT allocated space (%u) is more than the specified "
			"maximum to use (%u).\n",
			allocated_size, fdt_max_size);
		return false;
	}

	/* Load the FDT to the end of the VM's allocated memory space. */
	*fdt_addr = pa_init(pa_addr(pa_sub(end, allocated_size)));

	dlog_info("Loading secondary FDT of allocated size %u at 0x%x.\n",
		  allocated_size, pa_addr(*fdt_addr));

	if (!copy_to_unmapped(stage1_locked, *fdt_addr, &fdt, ppool)) {
		dlog_error("Unable to copy FDT.\n");
		return false;
	}

	if (fdt_allocated_size) {
		*fdt_allocated_size = allocated_size;
	}

	return true;
}

/*
 * Loads a secondary VM.
 */
static bool load_secondary(struct mm_stage1_locked stage1_locked,
			   paddr_t mem_begin, paddr_t mem_end,
			   const struct manifest_vm *manifest_vm,
			   const struct memiter *cpio, struct mpool *ppool)
{
	struct vm *vm;
	struct vm_locked vm_locked;
	struct vcpu *vcpu;
	ipaddr_t secondary_entry;
	bool ret;
	paddr_t fdt_addr;
	bool has_fdt;
	size_t kernel_size = 0;
	const size_t mem_size = pa_difference(mem_begin, mem_end);

	/*
	 * Load the kernel if a filename is specified in the VM manifest.
	 * For an FF-A partition, kernel_filename is undefined indicating
	 * the partition package has already been loaded prior to Hafnium
	 * booting.
	 */
	if (!string_is_empty(&manifest_vm->kernel_filename)) {
		if (!load_kernel(stage1_locked, mem_begin, mem_end, manifest_vm,
				 cpio, ppool, &kernel_size)) {
			dlog_error("Unable to load kernel.\n");
			return false;
		}
	}

	has_fdt = !string_is_empty(&manifest_vm->secondary.fdt_filename);
	if (has_fdt) {
		/*
		 * Ensure that the FDT does not overwrite the kernel or overlap
		 * its page, for the FDT to start at a page boundary.
		 */
		const size_t fdt_max_size =
			mem_size - align_up(kernel_size, PAGE_SIZE);

		size_t fdt_allocated_size;

		if (!load_secondary_fdt(stage1_locked, mem_end, fdt_max_size,
					manifest_vm, cpio, ppool, &fdt_addr,
					&fdt_allocated_size)) {
			dlog_error("Unable to load FDT.\n");
			return false;
		}

		if (!fdt_patch_mem(stage1_locked, fdt_addr, fdt_allocated_size,
				   mem_begin, mem_end, ppool)) {
			dlog_error("Unable to patch FDT.\n");
			return false;
		}
	}

	if (!vm_init_next(manifest_vm->secondary.vcpu_count, ppool, &vm)) {
		dlog_error("Unable to initialise VM.\n");
		return false;
	}

	if (!load_common(manifest_vm, vm)) {
		return false;
	}

	vm_locked = vm_lock(vm);

	/* Grant the VM access to the memory. */
	if (!vm_identity_map(vm_locked, mem_begin, mem_end,
			     MM_MODE_R | MM_MODE_W | MM_MODE_X, ppool,
			     &secondary_entry)) {
		dlog_error("Unable to initialise memory.\n");
		ret = false;
		goto out;
	}

	dlog_info("Loaded with %u vCPUs, entry at %#x.\n",
		  manifest_vm->secondary.vcpu_count, pa_addr(mem_begin));

	if (manifest_vm->is_ffa_partition) {
		secondary_entry =
			ipa_add(secondary_entry, manifest_vm->sp.ep_offset);
	}

	vcpu = vm_get_vcpu(vm, 0);

	if (has_fdt) {
		vcpu_secondary_reset_and_start(vcpu, secondary_entry,
					       pa_addr(fdt_addr));
	} else {
		/*
		 * Without an FDT, secondary VMs expect the memory size to be
		 * passed in register x0, which is what
		 * vcpu_secondary_reset_and_start does in this case.
		 */
		vcpu_secondary_reset_and_start(vcpu, secondary_entry, mem_size);
	}

	ret = true;

out:
	vm_unlock(&vm_locked);

	return ret;
}

/**
 * Try to find a memory range of the given size within the given ranges, and
 * remove it from them. Return true on success, or false if no large enough
 * contiguous range is found.
 */
static bool carve_out_mem_range(struct mem_range *mem_ranges,
				size_t mem_ranges_count, uint64_t size_to_find,
				paddr_t *found_begin, paddr_t *found_end)
{
	size_t i;

	/*
	 * TODO(b/116191358): Consider being cleverer about how we pack VMs
	 * together, with a non-greedy algorithm.
	 */
	for (i = 0; i < mem_ranges_count; ++i) {
		if (size_to_find <=
		    pa_difference(mem_ranges[i].begin, mem_ranges[i].end)) {
			/*
			 * This range is big enough, take some of it from the
			 * end and reduce its size accordingly.
			 */
			*found_end = mem_ranges[i].end;
			*found_begin = pa_init(pa_addr(mem_ranges[i].end) -
					       size_to_find);
			mem_ranges[i].end = *found_begin;
			return true;
		}
	}
	return false;
}

/**
 * Given arrays of memory ranges before and after memory was removed for
 * secondary VMs, add the difference to the reserved ranges of the given update.
 * Return true on success, or false if there would be more than MAX_MEM_RANGES
 * reserved ranges after adding the new ones.
 * `before` and `after` must be arrays of exactly `mem_ranges_count` elements.
 */
static bool update_reserved_ranges(struct boot_params_update *update,
				   const struct mem_range *before,
				   const struct mem_range *after,
				   size_t mem_ranges_count)
{
	size_t i;

	for (i = 0; i < mem_ranges_count; ++i) {
		if (pa_addr(after[i].begin) > pa_addr(before[i].begin)) {
			if (update->reserved_ranges_count >= MAX_MEM_RANGES) {
				dlog_error(
					"Too many reserved ranges after "
					"loading secondary VMs.\n");
				return false;
			}
			update->reserved_ranges[update->reserved_ranges_count]
				.begin = before[i].begin;
			update->reserved_ranges[update->reserved_ranges_count]
				.end = after[i].begin;
			update->reserved_ranges_count++;
		}
		if (pa_addr(after[i].end) < pa_addr(before[i].end)) {
			if (update->reserved_ranges_count >= MAX_MEM_RANGES) {
				dlog_error(
					"Too many reserved ranges after "
					"loading secondary VMs.\n");
				return false;
			}
			update->reserved_ranges[update->reserved_ranges_count]
				.begin = after[i].end;
			update->reserved_ranges[update->reserved_ranges_count]
				.end = before[i].end;
			update->reserved_ranges_count++;
		}
	}

	return true;
}

/*
 * Loads alls VMs from the manifest.
 */
bool load_vms(struct mm_stage1_locked stage1_locked,
	      const struct manifest *manifest, const struct memiter *cpio,
	      const struct boot_params *params,
	      struct boot_params_update *update, struct mpool *ppool)
{
	struct vm *primary;
	struct vm *tee;
	struct mem_range mem_ranges_available[MAX_MEM_RANGES];
	struct vm_locked primary_vm_locked;
	size_t i;
	bool success = true;

	if (!load_primary(stage1_locked, &manifest->vm[HF_PRIMARY_VM_INDEX],
			  cpio, params, ppool)) {
		dlog_error("Unable to load primary VM.\n");
		return false;
	}

	/*
	 * Initialise the dummy VM which represents TrustZone, and set up its
	 * RX/TX buffers.
	 */
	tee = vm_init(HF_TEE_VM_ID, 0, ppool);
	CHECK(tee != NULL);
	tee->mailbox.send = &tee_send_buffer;
	tee->mailbox.recv = &tee_recv_buffer;

	static_assert(
		sizeof(mem_ranges_available) == sizeof(params->mem_ranges),
		"mem_range arrays must be the same size for memcpy.");
	static_assert(sizeof(mem_ranges_available) < 500,
		      "This will use too much stack, either make "
		      "MAX_MEM_RANGES smaller or change this.");
	memcpy_s(mem_ranges_available, sizeof(mem_ranges_available),
		 params->mem_ranges, sizeof(params->mem_ranges));

	/* Round the last addresses down to the page size. */
	for (i = 0; i < params->mem_ranges_count; ++i) {
		mem_ranges_available[i].end = pa_init(align_down(
			pa_addr(mem_ranges_available[i].end), PAGE_SIZE));
	}

	primary = vm_find(HF_PRIMARY_VM_ID);
	primary_vm_locked = vm_lock(primary);

	for (i = 0; i < manifest->vm_count; ++i) {
		const struct manifest_vm *manifest_vm = &manifest->vm[i];
		ffa_vm_id_t vm_id = HF_VM_ID_OFFSET + i;
		uint64_t mem_size;
		paddr_t secondary_mem_begin;
		paddr_t secondary_mem_end;

		if (vm_id == HF_PRIMARY_VM_ID) {
			continue;
		}

		dlog_info("Loading VM%d: %s.\n", (int)vm_id,
			  manifest_vm->debug_name);

		mem_size = align_up(manifest_vm->secondary.mem_size, PAGE_SIZE);

		if (manifest_vm->is_ffa_partition) {
			secondary_mem_begin =
				pa_init(manifest_vm->sp.load_addr);
			secondary_mem_end =
				pa_init(manifest_vm->sp.load_addr + mem_size);
		} else if (!carve_out_mem_range(mem_ranges_available,
						params->mem_ranges_count,
						mem_size, &secondary_mem_begin,
						&secondary_mem_end)) {
			dlog_error("Not enough memory (%u bytes).\n", mem_size);
			continue;
		}

		if (!load_secondary(stage1_locked, secondary_mem_begin,
				    secondary_mem_end, manifest_vm, cpio,
				    ppool)) {
			dlog_error("Unable to load VM.\n");
			continue;
		}

		/* Deny the primary VM access to this memory. */
		if (!vm_unmap(primary_vm_locked, secondary_mem_begin,
			      secondary_mem_end, ppool)) {
			dlog_error(
				"Unable to unmap secondary VM from primary "
				"VM.\n");
			success = false;
			break;
		}
	}

	vm_unlock(&primary_vm_locked);

	if (!success) {
		return false;
	}

	/*
	 * Add newly reserved areas to update params by looking at the
	 * difference between the available ranges from the original params and
	 * the updated mem_ranges_available. We assume that the number and order
	 * of available ranges is the same, i.e. we don't remove any ranges
	 * above only make them smaller.
	 */
	return update_reserved_ranges(update, params->mem_ranges,
				      mem_ranges_available,
				      params->mem_ranges_count);
}