blob: 0b1eb76d39fa70999d8fd4b202aa047e0a6ac71a [file] [log] [blame]
Andrew Scull01778112019-01-14 15:37:53 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Walbran13c3a0b2018-11-30 11:51:53 +00002/*
Andrew Walbran2bc0a322019-03-07 15:48:06 +00003 * Copyright 2018 The Hafnium Authors.
Andrew Walbran13c3a0b2018-11-30 11:51:53 +00004 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Andrew Walbran13c3a0b2018-11-30 11:51:53 +000013 */
14
J-Alvescc100f62024-03-06 17:47:46 +000015#include "uapi/hf/socket.h"
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +000016#include <clocksource/arm_arch_timer.h>
J-Alvescc100f62024-03-06 17:47:46 +000017#include <hf/call.h>
18#include <hf/ffa.h>
19#include <hf/transport.h>
20#include <hf/vm_ids.h>
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000021#include <linux/atomic.h>
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +000022#include <linux/cpuhotplug.h>
23#include <linux/hrtimer.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010024#include <linux/init.h>
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +000025#include <linux/interrupt.h>
26#include <linux/irq.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010027#include <linux/kernel.h>
28#include <linux/kthread.h>
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +010029#include <linux/mm.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010030#include <linux/module.h>
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +000031#include <linux/net.h>
32#include <linux/of.h>
33#include <linux/platform_device.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010034#include <linux/sched/task.h>
35#include <linux/slab.h>
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000036#include <net/sock.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010037
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +000038#define HYPERVISOR_TIMER_NAME "el2_timer"
39
J-Alvescc100f62024-03-06 17:47:46 +000040#define CONFIG_HAFNIUM_MAX_VMS 16
Andrew Scull82257c42018-10-01 10:37:48 +010041#define CONFIG_HAFNIUM_MAX_VCPUS 32
42
Olivier Deprezf441ed02020-08-11 15:50:23 +020043#define HF_VM_ID_BASE 0
Andrew Walbrana6974312020-10-29 17:00:09 +000044#define PRIMARY_VM_ID HF_VM_ID_OFFSET
Fuad Tabba5da4b6b2019-08-05 13:56:20 +010045#define FIRST_SECONDARY_VM_ID (HF_VM_ID_OFFSET + 1)
Wedson Almeida Filhoec841932019-01-22 23:07:50 +000046
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010047struct hf_vcpu {
Andrew Scullb722f952018-09-27 15:39:10 +010048 struct hf_vm *vm;
Andrew Walbran196ed0e2020-04-30 11:32:29 +010049 ffa_vcpu_index_t vcpu_index;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010050 struct task_struct *task;
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +000051 atomic_t abort_sleep;
Andrew Scull71f57362019-02-05 16:11:35 +000052 atomic_t waiting_for_message;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010053 struct hrtimer timer;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010054};
55
56struct hf_vm {
J-Alvesc653e722023-08-02 13:08:54 +010057 ffa_id_t id;
Andrew Walbran196ed0e2020-04-30 11:32:29 +010058 ffa_vcpu_count_t vcpu_count;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010059 struct hf_vcpu *vcpu;
60};
61
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000062struct hf_sock {
63 /* This needs to be the first field. */
64 struct sock sk;
65
66 /*
67 * The following fields are immutable after the socket transitions to
68 * SS_CONNECTED state.
69 */
70 uint64_t local_port;
71 uint64_t remote_port;
72 struct hf_vm *peer_vm;
73};
74
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000075static struct proto hf_sock_proto = {
76 .name = "hafnium",
77 .owner = THIS_MODULE,
78 .obj_size = sizeof(struct hf_sock),
79};
80
J-Alvescc100f62024-03-06 17:47:46 +000081static struct hf_vm hf_vms[] = {{
82 .id = 2,
83 .vcpu_count = 1,
84}};
85const ffa_vm_count_t hf_vm_count = ARRAY_SIZE(hf_vms);
86
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000087static struct page *hf_send_page;
88static struct page *hf_recv_page;
89static atomic64_t hf_next_port = ATOMIC64_INIT(0);
90static DEFINE_SPINLOCK(hf_send_lock);
91static DEFINE_HASHTABLE(hf_local_port_hash, 7);
92static DEFINE_SPINLOCK(hf_local_port_hash_lock);
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +000093static int hf_irq;
Andrew Walbran8d55e502019-02-05 11:42:08 +000094static enum cpuhp_state hf_cpuhp_state;
J-Alvesc653e722023-08-02 13:08:54 +010095static ffa_id_t current_vm_id;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010096
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010097/**
Wedson Almeida Filhoec841932019-01-22 23:07:50 +000098 * Retrieves a VM from its ID, returning NULL if the VM doesn't exist.
99 */
J-Alvesc653e722023-08-02 13:08:54 +0100100static struct hf_vm *hf_vm_from_id(ffa_id_t vm_id)
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000101{
102 if (vm_id < FIRST_SECONDARY_VM_ID ||
103 vm_id >= FIRST_SECONDARY_VM_ID + hf_vm_count)
104 return NULL;
105
106 return &hf_vms[vm_id - FIRST_SECONDARY_VM_ID];
107}
108
109/**
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000110 * Wakes up the kernel thread responsible for running the given vcpu.
111 *
112 * Returns 0 if the thread was already running, 1 otherwise.
113 */
114static int hf_vcpu_wake_up(struct hf_vcpu *vcpu)
115{
116 /* Set a flag indicating that the thread should not go to sleep. */
117 atomic_set(&vcpu->abort_sleep, 1);
118
119 /* Set the thread to running state. */
120 return wake_up_process(vcpu->task);
121}
122
123/**
124 * Puts the current thread to sleep. The current thread must be responsible for
125 * running the given vcpu.
126 *
127 * Going to sleep will fail if hf_vcpu_wake_up() or kthread_stop() was called on
128 * this vcpu/thread since the last time it [re]started running.
129 */
130static void hf_vcpu_sleep(struct hf_vcpu *vcpu)
131{
132 int abort;
133
134 set_current_state(TASK_INTERRUPTIBLE);
135
136 /* Check the sleep-abort flag after making thread interruptible. */
137 abort = atomic_read(&vcpu->abort_sleep);
138 if (!abort && !kthread_should_stop())
139 schedule();
140
141 /* Set state back to running on the way out. */
142 set_current_state(TASK_RUNNING);
143}
144
145/**
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100146 * Wakes up the thread associated with the vcpu that owns the given timer. This
147 * is called when the timer the thread is waiting on expires.
148 */
149static enum hrtimer_restart hf_vcpu_timer_expired(struct hrtimer *timer)
150{
151 struct hf_vcpu *vcpu = container_of(timer, struct hf_vcpu, timer);
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000152 /* TODO: Inject interrupt. */
153 hf_vcpu_wake_up(vcpu);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100154 return HRTIMER_NORESTART;
155}
156
157/**
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000158 * This function is called when Hafnium requests that the primary VM wake up a
159 * vCPU that belongs to a secondary VM.
160 *
161 * It wakes up the thread if it's sleeping, or kicks it if it's already running.
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000162 */
J-Alvescc100f62024-03-06 17:47:46 +0000163static void hf_handle_wake_up_request(ffa_id_t vm_id, ffa_vcpu_index_t vcpu)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000164{
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000165 struct hf_vm *vm = hf_vm_from_id(vm_id);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000166
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000167 if (!vm) {
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000168 pr_warn("Request to wake up non-existent VM id: %u\n", vm_id);
169 return;
170 }
171
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000172 if (vcpu >= vm->vcpu_count) {
J-Alvescc100f62024-03-06 17:47:46 +0000173 pr_warn("Request to wake up non-existent vCPU: %u.%u\n", vm_id,
174 vcpu);
Andrew Scull71f57362019-02-05 16:11:35 +0000175 return;
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000176 }
177
178 if (hf_vcpu_wake_up(&vm->vcpu[vcpu]) == 0) {
179 /*
180 * The task was already running (presumably on a different
181 * physical CPU); interrupt it. This gives Hafnium a chance to
182 * inject any new interrupts.
183 */
184 kick_process(vm->vcpu[vcpu].task);
185 }
186}
187
188/**
Andrew Scull71f57362019-02-05 16:11:35 +0000189 * Injects an interrupt into a vCPU of the VM and ensures the vCPU will run to
190 * handle the interrupt.
191 */
J-Alvesc653e722023-08-02 13:08:54 +0100192static void hf_interrupt_vm(ffa_id_t vm_id, uint64_t int_id)
Andrew Scull71f57362019-02-05 16:11:35 +0000193{
194 struct hf_vm *vm = hf_vm_from_id(vm_id);
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100195 ffa_vcpu_index_t vcpu;
Andrew Scull71f57362019-02-05 16:11:35 +0000196 int64_t ret;
197
198 if (!vm) {
199 pr_warn("Request to wake up non-existent VM id: %u\n", vm_id);
200 return;
201 }
202
203 /*
204 * TODO: For now we're picking the first vcpu to interrupt, but
205 * we want to be smarter.
206 */
207 vcpu = 0;
208 ret = hf_interrupt_inject(vm_id, vcpu, int_id);
209
210 if (ret == -1) {
211 pr_warn("Failed to inject interrupt %lld to vCPU %d of VM %d",
212 int_id, vcpu, vm_id);
213 return;
214 }
215
216 if (ret != 1) {
217 /* We don't need to wake up the vcpu. */
218 return;
219 }
220
221 hf_handle_wake_up_request(vm_id, vcpu);
222}
223
224/**
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000225 * Notify all waiters on the given VM.
226 */
J-Alvesc653e722023-08-02 13:08:54 +0100227static void hf_notify_waiters(ffa_id_t vm_id)
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000228{
J-Alvesc653e722023-08-02 13:08:54 +0100229 ffa_id_t waiter_vm_id;
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000230
Andrew Scull71f57362019-02-05 16:11:35 +0000231 while ((waiter_vm_id = hf_mailbox_waiter_get(vm_id)) != -1) {
Andrew Walbrana6974312020-10-29 17:00:09 +0000232 if (waiter_vm_id == PRIMARY_VM_ID) {
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000233 /*
234 * TODO: Use this information when implementing per-vm
235 * queues.
236 */
237 } else {
Andrew Scull71f57362019-02-05 16:11:35 +0000238 hf_interrupt_vm(waiter_vm_id,
239 HF_MAILBOX_WRITABLE_INTID);
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000240 }
241 }
242}
243
244/**
Andrew Scull71f57362019-02-05 16:11:35 +0000245 * Delivers a message to a VM.
246 */
J-Alvesc653e722023-08-02 13:08:54 +0100247static void hf_deliver_message(ffa_id_t vm_id)
Andrew Scull71f57362019-02-05 16:11:35 +0000248{
249 struct hf_vm *vm = hf_vm_from_id(vm_id);
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100250 ffa_vcpu_index_t i;
Andrew Scull71f57362019-02-05 16:11:35 +0000251
252 if (!vm) {
253 pr_warn("Tried to deliver message to non-existent VM id: %u\n",
254 vm_id);
255 return;
256 }
257
258 /* Try to wake a vCPU that is waiting for a message. */
259 for (i = 0; i < vm->vcpu_count; i++) {
260 if (atomic_read(&vm->vcpu[i].waiting_for_message)) {
261 hf_handle_wake_up_request(vm->id,
262 vm->vcpu[i].vcpu_index);
263 return;
264 }
265 }
266
267 /* None were waiting for a message so interrupt one. */
268 hf_interrupt_vm(vm->id, HF_MAILBOX_READABLE_INTID);
269}
270
271/**
Andrew Sculldf6478f2019-02-19 17:52:08 +0000272 * Handles a message delivered to this VM by validating that it's well-formed
273 * and then queueing it for delivery to the appropriate socket.
274 */
Andrew Walbranb331fa92019-10-03 16:48:07 +0100275static void hf_handle_message(struct hf_vm *sender, size_t len,
Andrew Walbrancafe0172019-10-07 14:14:05 +0100276 const void *message)
Andrew Sculldf6478f2019-02-19 17:52:08 +0000277{
278 struct hf_sock *hsock;
Andrew Walbrancafe0172019-10-07 14:14:05 +0100279 const struct hf_msg_hdr *hdr = (struct hf_msg_hdr *)message;
Andrew Sculldf6478f2019-02-19 17:52:08 +0000280 struct sk_buff *skb;
281 int err;
282
283 /* Ignore messages that are too small to hold a header. */
Marc Bonnici39fdefd2019-11-08 15:05:07 +0000284 if (len < sizeof(struct hf_msg_hdr)) {
285 pr_err("Message received without header of length %d\n", len);
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100286 ffa_rx_release();
Andrew Sculldf6478f2019-02-19 17:52:08 +0000287 return;
Marc Bonnici39fdefd2019-11-08 15:05:07 +0000288 }
Andrew Sculldf6478f2019-02-19 17:52:08 +0000289
290 len -= sizeof(struct hf_msg_hdr);
291
292 /* Go through the colliding sockets. */
293 rcu_read_lock();
294 hash_for_each_possible_rcu(hf_local_port_hash, hsock, sk.sk_node,
295 hdr->dst_port) {
296 if (hsock->peer_vm == sender &&
297 hsock->remote_port == hdr->src_port) {
298 sock_hold(&hsock->sk);
299 break;
300 }
301 }
302 rcu_read_unlock();
303
304 /* Nothing to do if we couldn't find the target. */
Marc Bonnici39fdefd2019-11-08 15:05:07 +0000305 if (!hsock) {
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100306 ffa_rx_release();
Andrew Sculldf6478f2019-02-19 17:52:08 +0000307 return;
Marc Bonnici39fdefd2019-11-08 15:05:07 +0000308 }
Andrew Sculldf6478f2019-02-19 17:52:08 +0000309
310 /*
311 * TODO: From this point on, there are two failure paths: when we
312 * create the skb below, and when we enqueue it to the socket. What
313 * should we do if they fail? Ideally we would have some form of flow
314 * control to prevent message loss, but how to do it efficiently?
315 *
316 * One option is to have a pre-allocated message that indicates to the
317 * sender that a message was dropped. This way we guarantee that the
318 * sender will be aware of loss and should back-off.
319 */
320 /* Create the skb. */
321 skb = alloc_skb(len, GFP_KERNEL);
322 if (!skb)
323 goto exit;
324
325 memcpy(skb_put(skb, len), hdr + 1, len);
326
327 /*
328 * Add the skb to the receive queue of the target socket. On success it
329 * calls sk->sk_data_ready, which is currently set to sock_def_readable,
330 * which wakes up any waiters.
331 */
332 err = sock_queue_rcv_skb(&hsock->sk, skb);
333 if (err)
334 kfree_skb(skb);
335
336exit:
337 sock_put(&hsock->sk);
Andrew Scull71f57362019-02-05 16:11:35 +0000338
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100339 if (ffa_rx_release().func == FFA_RX_RELEASE_32)
Andrew Walbrana6974312020-10-29 17:00:09 +0000340 hf_notify_waiters(PRIMARY_VM_ID);
Andrew Sculldf6478f2019-02-19 17:52:08 +0000341}
342
343/**
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100344 * This is the main loop of each vcpu.
345 */
346static int hf_vcpu_thread(void *data)
347{
348 struct hf_vcpu *vcpu = data;
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100349 struct ffa_value ret;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100350
351 hrtimer_init(&vcpu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
352 vcpu->timer.function = &hf_vcpu_timer_expired;
353
354 while (!kthread_should_stop()) {
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100355 ffa_vcpu_index_t i;
Andrew Scull01f83de2019-01-23 13:41:47 +0000356
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000357 /*
358 * We're about to run the vcpu, so we can reset the abort-sleep
359 * flag.
360 */
361 atomic_set(&vcpu->abort_sleep, 0);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100362
Andrew Scullbb7ae412018-09-28 21:07:15 +0100363 /* Call into Hafnium to run vcpu. */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100364 ret = ffa_run(vcpu->vm->id, vcpu->vcpu_index);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100365
Andrew Walbran39bf7892019-11-01 14:14:47 +0000366 switch (ret.func) {
Andrew Walbranc886c612020-10-22 16:47:57 +0100367 /* Preempted, or wants to wake up another vCPU. */
J-Alvescc100f62024-03-06 17:47:46 +0000368 case FFA_INTERRUPT_32: {
J-Alvesc653e722023-08-02 13:08:54 +0100369 ffa_id_t vm_id = ffa_vm_id(ret);
Andrew Walbranc886c612020-10-22 16:47:57 +0100370 ffa_vcpu_index_t vcpu_index = ffa_vcpu_index(ret);
371
372 if (vm_id >= FIRST_SECONDARY_VM_ID &&
373 vm_id != vcpu->vm->id) {
374 /* Wake up another vCPU. */
375 hf_handle_wake_up_request(vm_id, vcpu_index);
376 }
Andrew Sculle05702e2019-01-08 14:46:46 +0000377 if (need_resched())
378 schedule();
379 break;
Andrew Walbranc886c612020-10-22 16:47:57 +0100380 }
Andrew Sculle05702e2019-01-08 14:46:46 +0000381
382 /* Yield. */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100383 case FFA_YIELD_32:
Andrew Sculle05702e2019-01-08 14:46:46 +0000384 if (!kthread_should_stop())
385 schedule();
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100386 break;
387
Andrew Scull01778112019-01-14 15:37:53 +0000388 /* WFI. */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100389 case HF_FFA_RUN_WAIT_FOR_INTERRUPT:
390 if (ret.arg2 != FFA_SLEEP_INDEFINITE) {
Andrew Walbran39bf7892019-11-01 14:14:47 +0000391 hrtimer_start(&vcpu->timer, ret.arg2,
Andrew Scull71f57362019-02-05 16:11:35 +0000392 HRTIMER_MODE_REL);
393 }
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000394 hf_vcpu_sleep(vcpu);
Andrew Scull71f57362019-02-05 16:11:35 +0000395 hrtimer_cancel(&vcpu->timer);
396 break;
397
398 /* Waiting for a message. */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100399 case FFA_MSG_WAIT_32:
Andrew Scull71f57362019-02-05 16:11:35 +0000400 atomic_set(&vcpu->waiting_for_message, 1);
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100401 if (ret.arg2 != FFA_SLEEP_INDEFINITE) {
Andrew Walbran39bf7892019-11-01 14:14:47 +0000402 hrtimer_start(&vcpu->timer, ret.arg2,
Andrew Scull71f57362019-02-05 16:11:35 +0000403 HRTIMER_MODE_REL);
404 }
405 hf_vcpu_sleep(vcpu);
406 hrtimer_cancel(&vcpu->timer);
407 atomic_set(&vcpu->waiting_for_message, 0);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100408 break;
409
Andrew Scullb3a61b52018-09-17 14:30:34 +0100410 /* Response available. */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100411 case FFA_MSG_SEND_32:
J-Alvesc26981f2021-03-05 13:25:40 +0000412 if (ffa_receiver(ret) == PRIMARY_VM_ID) {
Andrew Walbran39bf7892019-11-01 14:14:47 +0000413 hf_handle_message(vcpu->vm,
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100414 ffa_msg_send_size(ret),
Andrew Scull94704232019-04-01 12:36:37 +0100415 page_address(hf_recv_page));
Andrew Scull71f57362019-02-05 16:11:35 +0000416 } else {
J-Alvesc26981f2021-03-05 13:25:40 +0000417 hf_deliver_message(ffa_receiver(ret));
Andrew Scull71f57362019-02-05 16:11:35 +0000418 }
Andrew Sculldc8cab52018-10-10 18:29:39 +0100419 break;
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000420
421 /* Notify all waiters. */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100422 case FFA_RX_RELEASE_32:
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000423 hf_notify_waiters(vcpu->vm->id);
424 break;
Andrew Scull01f83de2019-01-23 13:41:47 +0000425
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100426 case FFA_ERROR_32:
427 pr_warn("FF-A error %d running VM %d vCPU %d", ret.arg2,
Andrew Walbran18f08a62019-11-13 11:57:52 +0000428 vcpu->vm->id, vcpu->vcpu_index);
Andrew Walbran39bf7892019-11-01 14:14:47 +0000429 switch (ret.arg2) {
Andrew Walbran9abce272019-11-27 18:41:05 +0000430 /* Abort was triggered. */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100431 case FFA_ABORTED:
Andrew Walbran39bf7892019-11-01 14:14:47 +0000432 for (i = 0; i < vcpu->vm->vcpu_count; i++) {
433 if (i == vcpu->vcpu_index)
434 continue;
435 hf_handle_wake_up_request(vcpu->vm->id,
436 i);
437 }
438 hf_vcpu_sleep(vcpu);
439 break;
Andrew Walbran9abce272019-11-27 18:41:05 +0000440 default:
441 /* Treat as a yield and try again later. */
442 if (!kthread_should_stop())
443 schedule();
444 break;
Andrew Scull01f83de2019-01-23 13:41:47 +0000445 }
Andrew Scull01f83de2019-01-23 13:41:47 +0000446 break;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100447 }
448 }
449
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100450 return 0;
451}
452
453/**
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000454 * Converts a pointer to a struct sock into a pointer to a struct hf_sock. It
455 * relies on the fact that the first field of hf_sock is a sock.
456 */
457static struct hf_sock *hsock_from_sk(struct sock *sk)
458{
459 return (struct hf_sock *)sk;
460}
461
462/**
463 * This is called when the last reference to the outer socket is released. For
464 * example, if it's a user-space socket, when the last file descriptor pointing
465 * to this socket is closed.
466 *
467 * It begins cleaning up resources, though some can only be cleaned up after all
468 * references to the underlying socket are released, which is handled by
469 * hf_sock_destruct().
470 */
471static int hf_sock_release(struct socket *sock)
472{
473 struct sock *sk = sock->sk;
474 struct hf_sock *hsock = hsock_from_sk(sk);
475 unsigned long flags;
476
477 if (!sk)
478 return 0;
479
480 /* Shutdown for both send and receive. */
481 lock_sock(sk);
482 sk->sk_shutdown |= RCV_SHUTDOWN | SEND_SHUTDOWN;
483 sk->sk_state_change(sk);
484 release_sock(sk);
485
486 /* Remove from the hash table, so lookups from now on won't find it. */
487 spin_lock_irqsave(&hf_local_port_hash_lock, flags);
488 hash_del_rcu(&hsock->sk.sk_node);
489 spin_unlock_irqrestore(&hf_local_port_hash_lock, flags);
490
491 /*
492 * TODO: When we implement a tx queue, we need to clear it here so that
493 * sk_wmem_alloc will not prevent sk from being freed (sk_free).
494 */
495
496 /*
497 * Wait for in-flight lookups to finish. We need to do this here because
Wedson Almeida Filho89d0e472019-01-03 19:18:39 +0000498 * in-flight lookups rely on the reference to the socket we're about to
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000499 * release.
500 */
501 synchronize_rcu();
502 sock_put(sk);
503 sock->sk = NULL;
504
505 return 0;
506}
507
508/**
509 * This is called when there are no more references to the socket. It frees all
510 * resources that haven't been freed during release.
511 */
512static void hf_sock_destruct(struct sock *sk)
513{
514 /*
515 * Clear the receive queue now that the handler cannot add any more
516 * skbs to it.
517 */
518 skb_queue_purge(&sk->sk_receive_queue);
519}
520
521/**
522 * Connects the Hafnium socket to the provided VM and port. After the socket is
523 * connected, it can be used to exchange datagrams with the specified peer.
524 */
Andrew Scull01778112019-01-14 15:37:53 +0000525static int hf_sock_connect(struct socket *sock, struct sockaddr *saddr, int len,
526 int connect_flags)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000527{
528 struct sock *sk = sock->sk;
529 struct hf_sock *hsock = hsock_from_sk(sk);
530 struct hf_vm *vm;
Fuad Tabba3e669bc2019-08-08 16:43:55 +0100531 struct hf_sockaddr *addr;
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000532 int err;
533 unsigned long flags;
534
535 /* Basic address validation. */
Fuad Tabba3e669bc2019-08-08 16:43:55 +0100536 if (len < sizeof(struct hf_sockaddr) || saddr->sa_family != AF_HF)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000537 return -EINVAL;
538
Fuad Tabba3e669bc2019-08-08 16:43:55 +0100539 addr = (struct hf_sockaddr *)saddr;
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000540 vm = hf_vm_from_id(addr->vm_id);
541 if (!vm)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000542 return -ENETUNREACH;
543
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000544 /*
545 * TODO: Once we implement access control in Hafnium, check that the
546 * caller is allowed to contact the specified VM. Return -ECONNREFUSED
547 * if access is denied.
548 */
549
550 /* Take lock to make sure state doesn't change as we connect. */
551 lock_sock(sk);
552
553 /* Only unconnected sockets are allowed to become connected. */
554 if (sock->state != SS_UNCONNECTED) {
555 err = -EISCONN;
556 goto exit;
557 }
558
559 hsock->local_port = atomic64_inc_return(&hf_next_port);
560 hsock->remote_port = addr->port;
561 hsock->peer_vm = vm;
562
563 sock->state = SS_CONNECTED;
564
565 /* Add socket to hash table now that it's fully initialised. */
566 spin_lock_irqsave(&hf_local_port_hash_lock, flags);
567 hash_add_rcu(hf_local_port_hash, &sk->sk_node, hsock->local_port);
568 spin_unlock_irqrestore(&hf_local_port_hash_lock, flags);
569
570 err = 0;
571exit:
572 release_sock(sk);
573 return err;
574}
575
576/**
577 * Sends the given skb to the appropriate VM by calling Hafnium. It will also
578 * trigger the wake up of a recipient VM.
579 *
580 * Takes ownership of the skb on success.
581 */
582static int hf_send_skb(struct sk_buff *skb)
583{
584 unsigned long flags;
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100585 struct ffa_value ret;
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000586 struct hf_sock *hsock = hsock_from_sk(skb->sk);
587 struct hf_vm *vm = hsock->peer_vm;
Andrew Walbrancafe0172019-10-07 14:14:05 +0100588 void *message = page_address(hf_send_page);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000589
590 /*
591 * Call Hafnium under the send lock so that we serialize the use of the
592 * global send buffer.
593 */
594 spin_lock_irqsave(&hf_send_lock, flags);
Andrew Walbrancafe0172019-10-07 14:14:05 +0100595 memcpy(message, skb->data, skb->len);
Jose Marinho1cc6c752019-03-11 16:28:03 +0000596
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100597 ret = ffa_msg_send(current_vm_id, vm->id, skb->len, 0);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000598 spin_unlock_irqrestore(&hf_send_lock, flags);
599
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100600 if (ret.func == FFA_ERROR_32) {
Andrew Walbranb040b302019-10-10 13:50:06 +0100601 switch (ret.arg2) {
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100602 case FFA_INVALID_PARAMETERS:
Andrew Walbrancafe0172019-10-07 14:14:05 +0100603 return -ENXIO;
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100604 case FFA_NOT_SUPPORTED:
Andrew Walbrancafe0172019-10-07 14:14:05 +0100605 return -EIO;
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100606 case FFA_DENIED:
607 case FFA_BUSY:
Andrew Walbrancafe0172019-10-07 14:14:05 +0100608 default:
609 return -EAGAIN;
610 }
611 }
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000612
Andrew Scull71f57362019-02-05 16:11:35 +0000613 /* Ensure the VM will run to pick up the message. */
614 hf_deliver_message(vm->id);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000615
616 kfree_skb(skb);
617
618 return 0;
619}
620
621/**
622 * Determines if the given socket is in the connected state. It acquires and
623 * releases the socket lock.
624 */
625static bool hf_sock_is_connected(struct socket *sock)
626{
627 bool ret;
628
629 lock_sock(sock->sk);
630 ret = sock->state == SS_CONNECTED;
631 release_sock(sock->sk);
632
633 return ret;
634}
635
636/**
637 * Sends a message to the VM & port the socket is connected to. All variants
638 * of write/send/sendto/sendmsg eventually call this function.
639 */
640static int hf_sock_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
641{
642 struct sock *sk = sock->sk;
643 struct sk_buff *skb;
644 int err;
645 struct hf_msg_hdr *hdr;
646 struct hf_sock *hsock = hsock_from_sk(sk);
Andrew Walbrancafe0172019-10-07 14:14:05 +0100647 size_t payload_max_len = HF_MAILBOX_SIZE - sizeof(struct hf_msg_hdr);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000648
649 /* Check length. */
Andrew Scull614ed7f2019-04-01 12:12:38 +0100650 if (len > payload_max_len)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000651 return -EMSGSIZE;
652
653 /* We don't allow the destination address to be specified. */
654 if (m->msg_namelen > 0)
655 return -EISCONN;
656
657 /* We don't support out of band messages. */
658 if (m->msg_flags & MSG_OOB)
659 return -EOPNOTSUPP;
660
661 /*
662 * Ensure that the socket is connected. We don't need to hold the socket
663 * lock (acquired and released by hf_sock_is_connected) for the
664 * remainder of the function because the fields we care about are
665 * immutable once the state is SS_CONNECTED.
666 */
667 if (!hf_sock_is_connected(sock))
668 return -ENOTCONN;
669
670 /*
671 * Allocate an skb for this write. If there isn't enough room in the
672 * socket's send buffer (sk_wmem_alloc >= sk_sndbuf), this will block
673 * (if it's a blocking call). On success, it increments sk_wmem_alloc
674 * and sets up the skb such that sk_wmem_alloc gets decremented when
675 * the skb is freed (sock_wfree gets called).
676 */
677 skb = sock_alloc_send_skb(sk, len + sizeof(struct hf_msg_hdr),
678 m->msg_flags & MSG_DONTWAIT, &err);
679 if (!skb)
680 return err;
681
682 /* Reserve room for the header and initialise it. */
683 skb_reserve(skb, sizeof(struct hf_msg_hdr));
684 hdr = skb_push(skb, sizeof(struct hf_msg_hdr));
685 hdr->src_port = hsock->local_port;
686 hdr->dst_port = hsock->remote_port;
687
688 /* Allocate area for the contents, then copy into skb. */
689 if (!copy_from_iter_full(skb_put(skb, len), len, &m->msg_iter)) {
690 err = -EFAULT;
691 goto err_cleanup;
692 }
693
694 /*
695 * TODO: We currently do this inline, but when we have support for
696 * readiness notification from Hafnium, we must add this to a per-VM tx
697 * queue that can make progress when the VM becomes writable. This will
698 * fix send buffering and poll readiness notification.
699 */
700 err = hf_send_skb(skb);
701 if (err)
702 goto err_cleanup;
703
704 return 0;
705
706err_cleanup:
707 kfree_skb(skb);
708 return err;
709}
710
711/**
712 * Receives a message originated from the VM & port the socket is connected to.
713 * All variants of read/recv/recvfrom/recvmsg eventually call this function.
714 */
715static int hf_sock_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
716 int flags)
717{
718 struct sock *sk = sock->sk;
719 struct sk_buff *skb;
720 int err;
721 size_t copy_len;
722
723 if (!hf_sock_is_connected(sock))
724 return -ENOTCONN;
725
726 /* Grab the next skb from the receive queue. */
727 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
728 if (!skb)
729 return err;
730
731 /* Make sure we don't copy more than what fits in the output buffer. */
732 copy_len = skb->len;
733 if (copy_len > len) {
734 copy_len = len;
735 m->msg_flags |= MSG_TRUNC;
736 }
737
738 /* Make sure we don't overflow the return value type. */
739 if (copy_len > INT_MAX) {
740 copy_len = INT_MAX;
741 m->msg_flags |= MSG_TRUNC;
742 }
743
744 /* Copy skb to output iterator, then free it. */
745 err = skb_copy_datagram_msg(skb, 0, m, copy_len);
746 skb_free_datagram(sk, skb);
747 if (err)
748 return err;
749
750 return copy_len;
751}
752
753/**
754 * This function is called when a Hafnium socket is created. It initialises all
755 * state such that the caller will be able to connect the socket and then send
756 * and receive messages through it.
757 */
758static int hf_sock_create(struct net *net, struct socket *sock, int protocol,
Andrew Scull01778112019-01-14 15:37:53 +0000759 int kern)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000760{
761 static const struct proto_ops ops = {
762 .family = PF_HF,
763 .owner = THIS_MODULE,
764 .release = hf_sock_release,
765 .bind = sock_no_bind,
766 .connect = hf_sock_connect,
767 .socketpair = sock_no_socketpair,
768 .accept = sock_no_accept,
769 .ioctl = sock_no_ioctl,
770 .listen = sock_no_listen,
771 .shutdown = sock_no_shutdown,
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000772 .sendmsg = hf_sock_sendmsg,
773 .recvmsg = hf_sock_recvmsg,
774 .mmap = sock_no_mmap,
775 .sendpage = sock_no_sendpage,
776 .poll = datagram_poll,
777 };
778 struct sock *sk;
779
780 if (sock->type != SOCK_DGRAM)
781 return -ESOCKTNOSUPPORT;
782
783 if (protocol != 0)
784 return -EPROTONOSUPPORT;
785
786 /*
787 * For now we only allow callers with sys admin capability to create
788 * Hafnium sockets.
789 */
790 if (!capable(CAP_SYS_ADMIN))
791 return -EPERM;
792
793 /* Allocate and initialise socket. */
794 sk = sk_alloc(net, PF_HF, GFP_KERNEL, &hf_sock_proto, kern);
795 if (!sk)
796 return -ENOMEM;
797
798 sock_init_data(sock, sk);
799
800 sk->sk_destruct = hf_sock_destruct;
801 sock->ops = &ops;
802 sock->state = SS_UNCONNECTED;
803
804 return 0;
805}
806
807/**
Andrew Scullbb7ae412018-09-28 21:07:15 +0100808 * Frees all resources, including threads, associated with the Hafnium driver.
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100809 */
Andrew Scull82257c42018-10-01 10:37:48 +0100810static void hf_free_resources(void)
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100811{
Fuad Tabba5da4b6b2019-08-05 13:56:20 +0100812 uint16_t i;
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100813 ffa_vcpu_index_t j;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100814
815 /*
816 * First stop all worker threads. We need to do this before freeing
817 * resources because workers may reference each other, so it is only
818 * safe to free resources after they have all stopped.
819 */
Andrew Scull82257c42018-10-01 10:37:48 +0100820 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100821 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000822
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100823 for (j = 0; j < vm->vcpu_count; j++)
824 kthread_stop(vm->vcpu[j].task);
825 }
826
827 /* Free resources. */
Andrew Scull82257c42018-10-01 10:37:48 +0100828 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100829 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000830
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100831 for (j = 0; j < vm->vcpu_count; j++)
832 put_task_struct(vm->vcpu[j].task);
833 kfree(vm->vcpu);
834 }
835
Andrew Walbran81cf04c2020-08-06 16:00:43 +0100836 ffa_rx_release();
Olivier Deprez77b1c7f2022-07-05 10:29:01 +0200837 ffa_rxtx_unmap();
Andrew Walbran81cf04c2020-08-06 16:00:43 +0100838 if (hf_send_page) {
839 __free_page(hf_send_page);
840 hf_send_page = NULL;
841 }
842 if (hf_recv_page) {
843 __free_page(hf_recv_page);
844 hf_recv_page = NULL;
845 }
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100846}
847
Andrew Scullbb7ae412018-09-28 21:07:15 +0100848/**
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +0000849 * Handles the hypervisor timer interrupt.
850 */
851static irqreturn_t hf_nop_irq_handler(int irq, void *dev)
852{
853 /*
854 * No need to do anything, the interrupt only exists to return to the
855 * primary vCPU so that the virtual timer will be restored and fire as
856 * normal.
857 */
858 return IRQ_HANDLED;
859}
860
861/**
862 * Enables the hypervisor timer interrupt on a CPU, when it starts or after the
863 * driver is first loaded.
864 */
865static int hf_starting_cpu(unsigned int cpu)
866{
867 if (hf_irq != 0) {
868 /* Enable the interrupt, and set it to be edge-triggered. */
869 enable_percpu_irq(hf_irq, IRQ_TYPE_EDGE_RISING);
870 }
Andrew Walbran8d55e502019-02-05 11:42:08 +0000871
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +0000872 return 0;
873}
874
875/**
876 * Disables the hypervisor timer interrupt on a CPU when it is powered down.
877 */
878static int hf_dying_cpu(unsigned int cpu)
879{
880 if (hf_irq != 0) {
881 /* Disable the interrupt while the CPU is asleep. */
882 disable_percpu_irq(hf_irq);
883 }
884
885 return 0;
886}
887
888/**
889 * Registers for the hypervisor timer interrupt.
890 */
891static int hf_int_driver_probe(struct platform_device *pdev)
892{
893 int irq;
894 int ret;
895
896 /*
897 * Register a handler for the hyperviser timer IRQ, as it is needed for
898 * Hafnium to emulate the virtual timer for Linux while a secondary vCPU
899 * is running.
900 */
901 irq = platform_get_irq(pdev, ARCH_TIMER_HYP_PPI);
902 if (irq < 0) {
903 pr_err("Error getting hypervisor timer IRQ: %d\n", irq);
904 return irq;
905 }
906 hf_irq = irq;
907
908 ret = request_percpu_irq(irq, hf_nop_irq_handler, HYPERVISOR_TIMER_NAME,
909 pdev);
910 if (ret != 0) {
J-Alvescc100f62024-03-06 17:47:46 +0000911 pr_err("Error registering hypervisor timer IRQ %d: %d\n", irq,
912 ret);
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +0000913 return ret;
914 }
915 pr_info("Hafnium registered for IRQ %d\n", irq);
916 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
917 "hafnium/hypervisor_timer:starting",
918 hf_starting_cpu, hf_dying_cpu);
919 if (ret < 0) {
920 pr_err("Error enabling timer on all CPUs: %d\n", ret);
Andrew Walbran8d55e502019-02-05 11:42:08 +0000921 free_percpu_irq(irq, pdev);
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +0000922 return ret;
923 }
Andrew Walbran8d55e502019-02-05 11:42:08 +0000924 hf_cpuhp_state = ret;
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +0000925
926 return 0;
927}
928
929/**
930 * Unregisters for the hypervisor timer interrupt.
931 */
932static int hf_int_driver_remove(struct platform_device *pdev)
933{
Andrew Walbran8d55e502019-02-05 11:42:08 +0000934 /*
935 * This will cause hf_dying_cpu to be called on each CPU, which will
936 * disable the IRQs.
937 */
938 cpuhp_remove_state(hf_cpuhp_state);
939 free_percpu_irq(hf_irq, pdev);
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +0000940
941 return 0;
942}
943
944static const struct of_device_id hf_int_driver_id[] = {
945 {.compatible = "arm,armv7-timer"},
946 {.compatible = "arm,armv8-timer"},
J-Alvescc100f62024-03-06 17:47:46 +0000947 {}};
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +0000948
949static struct platform_driver hf_int_driver = {
950 .driver = {
J-Alvescc100f62024-03-06 17:47:46 +0000951 .name = HYPERVISOR_TIMER_NAME,
952 .owner = THIS_MODULE,
953 .of_match_table = of_match_ptr(hf_int_driver_id),
954 },
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +0000955 .probe = hf_int_driver_probe,
956 .remove = hf_int_driver_remove,
957};
958
959/**
Andrew Walbran6b796192020-08-06 16:01:59 +0100960 * Print the error code of the given FF-A value if it is an error, or the
961 * function ID otherwise.
962 */
963static void print_ffa_error(struct ffa_value ffa_ret)
964{
965 if (ffa_ret.func == FFA_ERROR_32)
966 pr_err("FF-A error code %d\n", ffa_ret.arg2);
967 else
968 pr_err("Unexpected FF-A function %#x\n", ffa_ret.func);
969}
970
971/**
Andrew Scullbb7ae412018-09-28 21:07:15 +0100972 * Initializes the Hafnium driver by creating a thread for each vCPU of each
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100973 * virtual machine.
974 */
975static int __init hf_init(void)
976{
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000977 static const struct net_proto_family proto_family = {
978 .family = PF_HF,
979 .create = hf_sock_create,
980 .owner = THIS_MODULE,
981 };
Andrew Scullbb7ae412018-09-28 21:07:15 +0100982 int64_t ret;
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100983 struct ffa_value ffa_ret;
J-Alvesc653e722023-08-02 13:08:54 +0100984 ffa_id_t i;
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100985 ffa_vcpu_index_t j;
Andrew Scull82257c42018-10-01 10:37:48 +0100986 uint32_t total_vcpu_count;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100987
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100988 /* Allocate a page for send and receive buffers. */
989 hf_send_page = alloc_page(GFP_KERNEL);
990 if (!hf_send_page) {
991 pr_err("Unable to allocate send buffer\n");
992 return -ENOMEM;
993 }
994
995 hf_recv_page = alloc_page(GFP_KERNEL);
996 if (!hf_recv_page) {
997 __free_page(hf_send_page);
Andrew Walbran81cf04c2020-08-06 16:00:43 +0100998 hf_send_page = NULL;
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100999 pr_err("Unable to allocate receive buffer\n");
1000 return -ENOMEM;
1001 }
1002
1003 /*
Olivier Deprez77b1c7f2022-07-05 10:29:01 +02001004 * Map RX/TX buffers to hypervisor.
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +01001005 */
Andrew Walbran196ed0e2020-04-30 11:32:29 +01001006 ffa_ret = ffa_rxtx_map(page_to_phys(hf_send_page),
J-Alvescc100f62024-03-06 17:47:46 +00001007 page_to_phys(hf_recv_page));
Andrew Walbran196ed0e2020-04-30 11:32:29 +01001008 if (ffa_ret.func != FFA_SUCCESS_32) {
Andrew Walbran81cf04c2020-08-06 16:00:43 +01001009 pr_err("Unable to configure VM mailbox.\n");
Andrew Walbran6b796192020-08-06 16:01:59 +01001010 print_ffa_error(ffa_ret);
Andrew Walbran81cf04c2020-08-06 16:00:43 +01001011 ret = -EIO;
1012 goto fail_with_cleanup;
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +01001013 }
1014
Andrew Scull82257c42018-10-01 10:37:48 +01001015 /* Confirm the maximum number of VMs looks sane. */
1016 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VMS < 1);
1017 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VMS > U16_MAX);
1018
1019 /* Validate the number of VMs. There must at least be the primary. */
J-Alvescc100f62024-03-06 17:47:46 +00001020 if (hf_vm_count > CONFIG_HAFNIUM_MAX_VMS - 1) {
1021 pr_err("Number of VMs is out of range: %d\n", hf_vm_count);
Andrew Walbran81cf04c2020-08-06 16:00:43 +01001022 ret = -EDQUOT;
1023 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001024 }
1025
Jose Marinho1cc6c752019-03-11 16:28:03 +00001026 /* Cache the VM id for later usage. */
1027 current_vm_id = hf_vm_get_id();
1028
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001029 /* Initialize each VM. */
Andrew Scull82257c42018-10-01 10:37:48 +01001030 total_vcpu_count = 0;
J-Alvescc100f62024-03-06 17:47:46 +00001031 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +01001032 struct hf_vm *vm = &hf_vms[i];
Andrew Walbran196ed0e2020-04-30 11:32:29 +01001033 ffa_vcpu_count_t vcpu_count;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001034
Andrew Walbran6b796192020-08-06 16:01:59 +01001035 /* Adjust the index as only the secondaries are tracked. */
J-Alvescc100f62024-03-06 17:47:46 +00001036 vcpu_count = vm->vcpu_count;
1037
1038 pr_info("%s: info Id: %x vcpu count: %x\n", __func__, vm->id,
1039 vcpu_count);
Andrew Scull82257c42018-10-01 10:37:48 +01001040
1041 /* Avoid overflowing the vcpu count. */
Andrew Walbran3eeb1de2019-06-25 18:32:30 +01001042 if (vcpu_count > (U32_MAX - total_vcpu_count)) {
Andrew Scull82257c42018-10-01 10:37:48 +01001043 pr_err("Too many vcpus: %u\n", total_vcpu_count);
1044 ret = -EDQUOT;
1045 goto fail_with_cleanup;
1046 }
1047
1048 /* Confirm the maximum number of VCPUs looks sane. */
1049 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VCPUS < 1);
1050 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VCPUS > U16_MAX);
1051
1052 /* Enforce the limit on vcpus. */
Andrew Walbran3eeb1de2019-06-25 18:32:30 +01001053 total_vcpu_count += vcpu_count;
Andrew Scull82257c42018-10-01 10:37:48 +01001054 if (total_vcpu_count > CONFIG_HAFNIUM_MAX_VCPUS) {
1055 pr_err("Too many vcpus: %u\n", total_vcpu_count);
1056 ret = -EDQUOT;
1057 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001058 }
1059
Andrew Walbran3eeb1de2019-06-25 18:32:30 +01001060 vm->vcpu_count = vcpu_count;
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001061 vm->vcpu = kmalloc_array(vm->vcpu_count, sizeof(struct hf_vcpu),
1062 GFP_KERNEL);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001063 if (!vm->vcpu) {
Andrew Scull82257c42018-10-01 10:37:48 +01001064 ret = -ENOMEM;
1065 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001066 }
1067
1068 /* Create a kernel thread for each vcpu. */
J-Alvescc100f62024-03-06 17:47:46 +00001069 for (j = 0; j < vcpu_count; j++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +01001070 struct hf_vcpu *vcpu = &vm->vcpu[j];
Andrew Scull01778112019-01-14 15:37:53 +00001071
1072 vcpu->task =
1073 kthread_create(hf_vcpu_thread, vcpu,
1074 "vcpu_thread_%u_%u", vm->id, j);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001075 if (IS_ERR(vcpu->task)) {
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001076 pr_err("Error creating task (vm=%u,vcpu=%u): %ld\n",
1077 vm->id, j, PTR_ERR(vcpu->task));
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001078 vm->vcpu_count = j;
Andrew Scull82257c42018-10-01 10:37:48 +01001079 ret = PTR_ERR(vcpu->task);
1080 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001081 }
1082
1083 get_task_struct(vcpu->task);
Andrew Scullb722f952018-09-27 15:39:10 +01001084 vcpu->vm = vm;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001085 vcpu->vcpu_index = j;
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +00001086 atomic_set(&vcpu->abort_sleep, 0);
Andrew Scullece5ef42019-05-08 15:07:25 +01001087 atomic_set(&vcpu->waiting_for_message, 0);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001088 }
1089 }
1090
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001091 /* Register protocol and socket family. */
1092 ret = proto_register(&hf_sock_proto, 0);
1093 if (ret) {
1094 pr_err("Unable to register protocol: %lld\n", ret);
1095 goto fail_with_cleanup;
1096 }
1097
1098 ret = sock_register(&proto_family);
1099 if (ret) {
1100 pr_err("Unable to register Hafnium's socket family: %lld\n",
1101 ret);
1102 goto fail_unregister_proto;
1103 }
1104
1105 /*
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +00001106 * Register as a driver for the timer device, so we can register a
1107 * handler for the hyperviser timer IRQ.
1108 */
1109 ret = platform_driver_register(&hf_int_driver);
1110 if (ret != 0) {
1111 pr_err("Error registering timer driver %lld\n", ret);
1112 goto fail_unregister_socket;
1113 }
1114
1115 /*
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001116 * Start running threads now that all is initialized.
1117 *
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +00001118 * Any failures from this point on must also unregister the driver with
1119 * platform_driver_unregister().
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001120 */
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001121 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +01001122 struct hf_vm *vm = &hf_vms[i];
Andrew Scull01778112019-01-14 15:37:53 +00001123
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001124 for (j = 0; j < vm->vcpu_count; j++)
1125 wake_up_process(vm->vcpu[j].task);
1126 }
1127
1128 /* Dump vm/vcpu count info. */
Andrew Scullbb7ae412018-09-28 21:07:15 +01001129 pr_info("Hafnium successfully loaded with %u VMs:\n", hf_vm_count);
Andrew Scullb722f952018-09-27 15:39:10 +01001130 for (i = 0; i < hf_vm_count; i++) {
1131 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001132
Andrew Scullbb7ae412018-09-28 21:07:15 +01001133 pr_info("\tVM %u: %u vCPUS\n", vm->id, vm->vcpu_count);
Andrew Scullb722f952018-09-27 15:39:10 +01001134 }
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001135
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001136 return 0;
Andrew Scull82257c42018-10-01 10:37:48 +01001137
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +00001138fail_unregister_socket:
1139 sock_unregister(PF_HF);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001140fail_unregister_proto:
1141 proto_unregister(&hf_sock_proto);
Andrew Scull82257c42018-10-01 10:37:48 +01001142fail_with_cleanup:
1143 hf_free_resources();
1144 return ret;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001145}
1146
1147/**
Andrew Scullbb7ae412018-09-28 21:07:15 +01001148 * Frees up all resources used by the Hafnium driver in preparation for
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001149 * unloading it.
1150 */
1151static void __exit hf_exit(void)
1152{
Andrew Scullbb7ae412018-09-28 21:07:15 +01001153 pr_info("Preparing to unload Hafnium\n");
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001154 sock_unregister(PF_HF);
1155 proto_unregister(&hf_sock_proto);
Andrew Scull82257c42018-10-01 10:37:48 +01001156 hf_free_resources();
Andrew Walbran8d55e502019-02-05 11:42:08 +00001157 platform_driver_unregister(&hf_int_driver);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001158 pr_info("Hafnium ready to unload\n");
1159}
1160
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001161MODULE_LICENSE("GPL v2");
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001162
1163module_init(hf_init);
1164module_exit(hf_exit);