blob: bc26801959f94f1e52171214c7fcddb29cdcf7cf [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scull18c78fc2018-08-20 12:57:41 +01009#include "hf/cpu.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010010
Andrew Scull04502e42018-09-03 14:54:52 +010011#include <stdalign.h>
12
Max Shvetsov9c0ebe42020-08-27 12:37:57 +010013#include "hf/arch/cache.h"
14
Andrew Scull18c78fc2018-08-20 12:57:41 +010015#include "hf/api.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010016#include "hf/check.h"
Andrew Scull18c78fc2018-08-20 12:57:41 +010017#include "hf/dlog.h"
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +010018
Andrew Scull19503262018-09-20 14:48:39 +010019#include "vmapi/hf/call.h"
20
Andrew Scull23e93a82018-10-26 14:56:04 +010021#define STACK_SIZE PAGE_SIZE
22
Andrew Sculleed724b2019-09-12 09:57:19 +010023/**
24 * The stacks to be used by the CPUs.
25 *
26 * Align to page boundaries to ensure that cache lines are not shared between a
27 * CPU's stack and data that can be accessed from other CPUs. If this did
28 * happen, there may be coherency problems when the stack is being used before
29 * caching is enabled.
30 */
31alignas(PAGE_SIZE) static char callstacks[MAX_CPUS][STACK_SIZE];
32
33/* NOLINTNEXTLINE(misc-redundant-expression) */
34static_assert((STACK_SIZE % PAGE_SIZE) == 0, "Keep each stack page aligned.");
35static_assert((PAGE_SIZE % STACK_ALIGN) == 0,
36 "Page alignment is too weak for the stack.");
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010037
Jose Marinho20713fa2019-08-07 15:42:07 +010038/**
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010039 * Internal buffer used to store FF-A messages from a VM Tx. Its usage prevents
Jose Marinho20713fa2019-08-07 15:42:07 +010040 * TOCTOU issues while Hafnium performs actions on information that would
41 * otherwise be re-writable by the VM.
42 *
Fuad Tabbab0ef2a42019-12-19 11:19:25 +000043 * Each buffer is owned by a single CPU. The buffer can only be used for
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010044 * ffa_msg_send. The information stored in the buffer is only valid during the
45 * ffa_msg_send request is performed.
Jose Marinho20713fa2019-08-07 15:42:07 +010046 */
Andrew Walbran0ec61592019-12-17 13:29:01 +000047alignas(PAGE_SIZE) static uint8_t cpu_message_buffer[MAX_CPUS][PAGE_SIZE];
Jose Marinho20713fa2019-08-07 15:42:07 +010048
Mahesh Bireddy8ca57862020-01-07 13:43:21 +053049uint8_t *cpu_get_buffer(struct cpu *c)
Jose Marinho20713fa2019-08-07 15:42:07 +010050{
Mahesh Bireddy8ca57862020-01-07 13:43:21 +053051 size_t cpu_indx = cpu_index(c);
Jose Marinho20713fa2019-08-07 15:42:07 +010052
Mahesh Bireddy8ca57862020-01-07 13:43:21 +053053 CHECK(cpu_indx < MAX_CPUS);
54
55 return cpu_message_buffer[cpu_indx];
Jose Marinho20713fa2019-08-07 15:42:07 +010056}
57
Mahesh Bireddy8ca57862020-01-07 13:43:21 +053058uint32_t cpu_get_buffer_size(struct cpu *c)
Jose Marinho20713fa2019-08-07 15:42:07 +010059{
Mahesh Bireddy8ca57862020-01-07 13:43:21 +053060 size_t cpu_indx = cpu_index(c);
Jose Marinho20713fa2019-08-07 15:42:07 +010061
Mahesh Bireddy8ca57862020-01-07 13:43:21 +053062 CHECK(cpu_indx < MAX_CPUS);
63
64 return sizeof(cpu_message_buffer[cpu_indx]);
Jose Marinho20713fa2019-08-07 15:42:07 +010065}
66
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010067/* State of all supported CPUs. The stack of the first one is initialized. */
68struct cpu cpus[MAX_CPUS] = {
69 {
70 .is_on = 1,
Andrew Scullf3d45592018-09-20 14:30:22 +010071 .stack_bottom = &callstacks[0][STACK_SIZE],
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010072 },
73};
74
Max Shvetsov9c0ebe42020-08-27 12:37:57 +010075uint32_t cpu_count = 1;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +010076
Andrew Walbran4d3fa282019-06-26 13:31:15 +010077void cpu_module_init(const cpu_id_t *cpu_ids, size_t count)
Andrew Scullbb3ab6c2018-11-26 20:38:49 +000078{
79 uint32_t i;
80 uint32_t j;
Andrew Walbran4d3fa282019-06-26 13:31:15 +010081 cpu_id_t boot_cpu_id = cpus[0].id;
Andrew Scullbb3ab6c2018-11-26 20:38:49 +000082 bool found_boot_cpu = false;
83
84 cpu_count = count;
85
86 /*
87 * Initialize CPUs with the IDs from the configuration passed in. The
88 * CPUs after the boot CPU are initialized in reverse order. The boot
89 * CPU is initialized when it is found or in place of the last CPU if it
90 * is not found.
91 */
92 j = cpu_count;
93 for (i = 0; i < cpu_count; ++i) {
94 struct cpu *c;
Andrew Walbran4d3fa282019-06-26 13:31:15 +010095 cpu_id_t id = cpu_ids[i];
Andrew Scullbb3ab6c2018-11-26 20:38:49 +000096
97 if (found_boot_cpu || id != boot_cpu_id) {
Andrew Scull48973982019-08-16 17:40:28 +010098 --j;
99 c = &cpus[j];
100 c->stack_bottom = &callstacks[j][STACK_SIZE];
Andrew Scullbb3ab6c2018-11-26 20:38:49 +0000101 } else {
102 found_boot_cpu = true;
103 c = &cpus[0];
Andrew Scull48973982019-08-16 17:40:28 +0100104 CHECK(c->stack_bottom == &callstacks[0][STACK_SIZE]);
Andrew Scullbb3ab6c2018-11-26 20:38:49 +0000105 }
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000106
Andrew Scull48973982019-08-16 17:40:28 +0100107 sl_init(&c->lock);
Andrew Scullbb3ab6c2018-11-26 20:38:49 +0000108 c->id = id;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100109 }
Andrew Scullbb3ab6c2018-11-26 20:38:49 +0000110
111 if (!found_boot_cpu) {
112 /* Boot CPU was initialized but with wrong ID. */
Andrew Walbran17eebf92020-02-05 16:35:49 +0000113 dlog_warning("Boot CPU's ID not found in config.\n");
Andrew Scullbb3ab6c2018-11-26 20:38:49 +0000114 cpus[0].id = boot_cpu_id;
115 }
Max Shvetsov9c0ebe42020-08-27 12:37:57 +0100116
117 /*
118 * Clean the cache for the cpus array such that secondary cores
119 * hitting the entry point can read the cpus array consistently
120 * with MMU off (hence data cache off).
121 */
Olivier Depreza2846172021-03-23 18:45:41 +0100122 arch_cache_data_clean_range(va_from_ptr(cpus), sizeof(cpus));
Max Shvetsov9c0ebe42020-08-27 12:37:57 +0100123
Olivier Depreza2846172021-03-23 18:45:41 +0100124 arch_cache_data_clean_range(va_from_ptr(&cpu_count), sizeof(cpu_count));
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100125}
126
127size_t cpu_index(struct cpu *c)
128{
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +0100129 return c - cpus;
Wedson Almeida Filho3fcbcff2018-07-10 23:53:39 +0100130}
131
Olivier Deprez7d5e5532020-09-22 15:06:58 +0200132/*
133 * Return cpu with the given index.
134 */
135struct cpu *cpu_find_index(size_t index)
136{
137 return (index < MAX_CPUS) ? &cpus[index] : NULL;
138}
139
Wedson Almeida Filho87009642018-07-02 10:20:07 +0100140/**
141 * Turns CPU on and returns the previous state.
142 */
Andrew Scull37402872018-10-24 14:23:06 +0100143bool cpu_on(struct cpu *c, ipaddr_t entry, uintreg_t arg)
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100144{
Wedson Almeida Filho87009642018-07-02 10:20:07 +0100145 bool prev;
146
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100147 sl_lock(&c->lock);
Wedson Almeida Filho87009642018-07-02 10:20:07 +0100148 prev = c->is_on;
149 c->is_on = true;
150 sl_unlock(&c->lock);
151
152 if (!prev) {
Olivier Deprezb9adff42021-02-01 12:14:05 +0100153 /* This returns the first booted VM (e.g. primary in the NWd) */
154 struct vm *vm = vm_get_first_boot();
Andrew Walbrane1310df2019-04-29 17:28:28 +0100155 struct vcpu *vcpu = vm_get_vcpu(vm, cpu_index(c));
Andrew Walbranb58f8992019-04-15 12:29:31 +0100156 struct vcpu_locked vcpu_locked;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000157
Andrew Walbranb58f8992019-04-15 12:29:31 +0100158 vcpu_locked = vcpu_lock(vcpu);
159 vcpu_on(vcpu_locked, entry, arg);
160 vcpu_unlock(&vcpu_locked);
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100161 }
Wedson Almeida Filho87009642018-07-02 10:20:07 +0100162
163 return prev;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100164}
165
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +0100166/**
167 * Prepares the CPU for turning itself off.
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100168 */
169void cpu_off(struct cpu *c)
170{
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100171 sl_lock(&c->lock);
Wedson Almeida Filho87009642018-07-02 10:20:07 +0100172 c->is_on = false;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100173 sl_unlock(&c->lock);
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +0100174}
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100175
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +0100176/**
Fuad Tabbab0ef2a42019-12-19 11:19:25 +0000177 * Searches for a CPU based on its ID.
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +0100178 */
Andrew Walbran4d3fa282019-06-26 13:31:15 +0100179struct cpu *cpu_find(cpu_id_t id)
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +0100180{
181 size_t i;
182
Andrew Scullbb3ab6c2018-11-26 20:38:49 +0000183 for (i = 0; i < cpu_count; i++) {
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +0100184 if (cpus[i].id == id) {
Andrew Scullf3d45592018-09-20 14:30:22 +0100185 return &cpus[i];
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +0100186 }
187 }
188
189 return NULL;
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +0100190}