blob: ea65f156e5b24464581e3a18069e7487e801f374 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/cpu.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010010
Andrew Scull04502e42018-09-03 14:54:52 +010011#include <stdalign.h>
12
Max Shvetsov9c0ebe42020-08-27 12:37:57 +010013#include "hf/arch/cache.h"
14
Andrew Scull18c78fc2018-08-20 12:57:41 +010015#include "hf/api.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010016#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010017#include "hf/dlog.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010018
Andrew Scull19503262018-09-20 14:48:39 +010019#include "vmapi/hf/call.h"
20
Maksims Svecovs134b8f92022-03-04 15:14:09 +000021#include "system/sys/cdefs.h"
22
Andrew Scull23e93a82018-10-26 14:56:04 +010023#define STACK_SIZE PAGE_SIZE
24
Andrew Sculleed724b2019-09-12 09:57:19 +010025/**
26 * The stacks to be used by the CPUs.
27 *
28 * Align to page boundaries to ensure that cache lines are not shared between a
29 * CPU's stack and data that can be accessed from other CPUs. If this did
30 * happen, there may be coherency problems when the stack is being used before
31 * caching is enabled.
32 */
Maksims Svecovs134b8f92022-03-04 15:14:09 +000033alignas(PAGE_SIZE) static char callstacks[MAX_CPUS][STACK_SIZE] __section(
34 .stacks);
Andrew Sculleed724b2019-09-12 09:57:19 +010035
36/* NOLINTNEXTLINE(misc-redundant-expression) */
37static_assert((STACK_SIZE % PAGE_SIZE) == 0, "Keep each stack page aligned.");
38static_assert((PAGE_SIZE % STACK_ALIGN) == 0,
39 "Page alignment is too weak for the stack.");
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010040
Jose Marinho20713fa2019-08-07 15:42:07 +010041/**
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010042 * Internal buffer used to store FF-A messages from a VM Tx. Its usage prevents
Jose Marinho20713fa2019-08-07 15:42:07 +010043 * TOCTOU issues while Hafnium performs actions on information that would
44 * otherwise be re-writable by the VM.
45 *
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000046 * Each buffer is owned by a single CPU. The buffer can only be used for
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010047 * ffa_msg_send. The information stored in the buffer is only valid during the
48 * ffa_msg_send request is performed.
Jose Marinho20713fa2019-08-07 15:42:07 +010049 */
Andrew Walbran0ec61592019-12-17 13:29:01 +000050alignas(PAGE_SIZE) static uint8_t cpu_message_buffer[MAX_CPUS][PAGE_SIZE];
Jose Marinho20713fa2019-08-07 15:42:07 +010051
Mahesh Bireddy8ca57862020-01-07 13:43:21 +053052uint8_t *cpu_get_buffer(struct cpu *c)
Jose Marinho20713fa2019-08-07 15:42:07 +010053{
Mahesh Bireddy8ca57862020-01-07 13:43:21 +053054 size_t cpu_indx = cpu_index(c);
Jose Marinho20713fa2019-08-07 15:42:07 +010055
Mahesh Bireddy8ca57862020-01-07 13:43:21 +053056 CHECK(cpu_indx < MAX_CPUS);
57
58 return cpu_message_buffer[cpu_indx];
Jose Marinho20713fa2019-08-07 15:42:07 +010059}
60
Mahesh Bireddy8ca57862020-01-07 13:43:21 +053061uint32_t cpu_get_buffer_size(struct cpu *c)
Jose Marinho20713fa2019-08-07 15:42:07 +010062{
Mahesh Bireddy8ca57862020-01-07 13:43:21 +053063 size_t cpu_indx = cpu_index(c);
Jose Marinho20713fa2019-08-07 15:42:07 +010064
Mahesh Bireddy8ca57862020-01-07 13:43:21 +053065 CHECK(cpu_indx < MAX_CPUS);
66
67 return sizeof(cpu_message_buffer[cpu_indx]);
Jose Marinho20713fa2019-08-07 15:42:07 +010068}
69
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010070/* State of all supported CPUs. The stack of the first one is initialized. */
71struct cpu cpus[MAX_CPUS] = {
72 {
73 .is_on = 1,
Andrew Scullf3d45592018-09-20 14:30:22 +010074 .stack_bottom = &callstacks[0][STACK_SIZE],
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010075 },
76};
77
Max Shvetsov9c0ebe42020-08-27 12:37:57 +010078uint32_t cpu_count = 1;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010079
Andrew Walbran4d3fa282019-06-26 13:31:15 +010080void cpu_module_init(const cpu_id_t *cpu_ids, size_t count)
Andrew Scullbb3ab6c2018-11-26 20:38:49 +000081{
82 uint32_t i;
83 uint32_t j;
Andrew Walbran4d3fa282019-06-26 13:31:15 +010084 cpu_id_t boot_cpu_id = cpus[0].id;
Andrew Scullbb3ab6c2018-11-26 20:38:49 +000085 bool found_boot_cpu = false;
86
87 cpu_count = count;
88
89 /*
90 * Initialize CPUs with the IDs from the configuration passed in. The
91 * CPUs after the boot CPU are initialized in reverse order. The boot
92 * CPU is initialized when it is found or in place of the last CPU if it
93 * is not found.
94 */
95 j = cpu_count;
96 for (i = 0; i < cpu_count; ++i) {
97 struct cpu *c;
Andrew Walbran4d3fa282019-06-26 13:31:15 +010098 cpu_id_t id = cpu_ids[i];
Andrew Scullbb3ab6c2018-11-26 20:38:49 +000099
100 if (found_boot_cpu || id != boot_cpu_id) {
Andrew Scull48973982019-08-16 17:40:28 +0100101 --j;
102 c = &cpus[j];
103 c->stack_bottom = &callstacks[j][STACK_SIZE];
Andrew Scullbb3ab6c2018-11-26 20:38:49 +0000104 } else {
105 found_boot_cpu = true;
106 c = &cpus[0];
Andrew Scull48973982019-08-16 17:40:28 +0100107 CHECK(c->stack_bottom == &callstacks[0][STACK_SIZE]);
Andrew Scullbb3ab6c2018-11-26 20:38:49 +0000108 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000109
Andrew Scull48973982019-08-16 17:40:28 +0100110 sl_init(&c->lock);
Andrew Scullbb3ab6c2018-11-26 20:38:49 +0000111 c->id = id;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100112 }
Andrew Scullbb3ab6c2018-11-26 20:38:49 +0000113
114 if (!found_boot_cpu) {
115 /* Boot CPU was initialized but with wrong ID. */
Andrew Walbran17eebf92020-02-05 16:35:49 +0000116 dlog_warning("Boot CPU's ID not found in config.\n");
Andrew Scullbb3ab6c2018-11-26 20:38:49 +0000117 cpus[0].id = boot_cpu_id;
118 }
Max Shvetsov9c0ebe42020-08-27 12:37:57 +0100119
120 /*
121 * Clean the cache for the cpus array such that secondary cores
122 * hitting the entry point can read the cpus array consistently
123 * with MMU off (hence data cache off).
124 */
Olivier Depreza2846172021-03-23 18:45:41 +0100125 arch_cache_data_clean_range(va_from_ptr(cpus), sizeof(cpus));
Max Shvetsov9c0ebe42020-08-27 12:37:57 +0100126
Olivier Depreza2846172021-03-23 18:45:41 +0100127 arch_cache_data_clean_range(va_from_ptr(&cpu_count), sizeof(cpu_count));
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100128}
129
130size_t cpu_index(struct cpu *c)
131{
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +0100132 return c - cpus;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100133}
134
Olivier Deprez7d5e5532020-09-22 15:06:58 +0200135/*
136 * Return cpu with the given index.
137 */
138struct cpu *cpu_find_index(size_t index)
139{
140 return (index < MAX_CPUS) ? &cpus[index] : NULL;
141}
142
Wedson Almeida Filho87009642018-07-02 10:20:07 +0100143/**
144 * Turns CPU on and returns the previous state.
145 */
Andrew Scull37402872018-10-24 14:23:06 +0100146bool cpu_on(struct cpu *c, ipaddr_t entry, uintreg_t arg)
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100147{
Wedson Almeida Filho87009642018-07-02 10:20:07 +0100148 bool prev;
149
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100150 sl_lock(&c->lock);
Wedson Almeida Filho87009642018-07-02 10:20:07 +0100151 prev = c->is_on;
152 c->is_on = true;
153 sl_unlock(&c->lock);
154
155 if (!prev) {
Olivier Deprezb9adff42021-02-01 12:14:05 +0100156 /* This returns the first booted VM (e.g. primary in the NWd) */
157 struct vm *vm = vm_get_first_boot();
Andrew Walbrane1310df2019-04-29 17:28:28 +0100158 struct vcpu *vcpu = vm_get_vcpu(vm, cpu_index(c));
Andrew Walbranb58f8992019-04-15 12:29:31 +0100159 struct vcpu_locked vcpu_locked;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000160
Andrew Walbranb58f8992019-04-15 12:29:31 +0100161 vcpu_locked = vcpu_lock(vcpu);
162 vcpu_on(vcpu_locked, entry, arg);
163 vcpu_unlock(&vcpu_locked);
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100164 }
Wedson Almeida Filho87009642018-07-02 10:20:07 +0100165
166 return prev;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100167}
168
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +0100169/**
170 * Prepares the CPU for turning itself off.
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100171 */
172void cpu_off(struct cpu *c)
173{
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100174 sl_lock(&c->lock);
Wedson Almeida Filho87009642018-07-02 10:20:07 +0100175 c->is_on = false;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100176 sl_unlock(&c->lock);
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +0100177}
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100178
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +0100179/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000180 * Searches for a CPU based on its ID.
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +0100181 */
Andrew Walbran4d3fa282019-06-26 13:31:15 +0100182struct cpu *cpu_find(cpu_id_t id)
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +0100183{
184 size_t i;
185
Andrew Scullbb3ab6c2018-11-26 20:38:49 +0000186 for (i = 0; i < cpu_count; i++) {
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +0100187 if (cpus[i].id == id) {
Andrew Scullf3d45592018-09-20 14:30:22 +0100188 return &cpus[i];
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +0100189 }
190 }
191
192 return NULL;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100193}