blob: 6e859cfcd7013771f2841c988af3b0a45a3d2a62 [file] [log] [blame]
Andrew Scull01778112019-01-14 15:37:53 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Walbran13c3a0b2018-11-30 11:51:53 +00002/*
Andrew Walbran2bc0a322019-03-07 15:48:06 +00003 * Copyright 2018 The Hafnium Authors.
Andrew Walbran13c3a0b2018-11-30 11:51:53 +00004 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Andrew Walbran13c3a0b2018-11-30 11:51:53 +000013 */
14
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +000015#include <clocksource/arm_arch_timer.h>
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000016#include <linux/atomic.h>
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +000017#include <linux/cpuhotplug.h>
18#include <linux/hrtimer.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010019#include <linux/init.h>
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +000020#include <linux/interrupt.h>
21#include <linux/irq.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010022#include <linux/kernel.h>
23#include <linux/kthread.h>
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +010024#include <linux/mm.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010025#include <linux/module.h>
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +000026#include <linux/net.h>
27#include <linux/of.h>
28#include <linux/platform_device.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010029#include <linux/sched/task.h>
30#include <linux/slab.h>
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000031#include <net/sock.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010032
Andrew Scull55704232018-08-10 17:19:54 +010033#include <hf/call.h>
Andrew Walbran196ed0e2020-04-30 11:32:29 +010034#include <hf/ffa.h>
Fuad Tabba3e669bc2019-08-08 16:43:55 +010035#include <hf/transport.h>
Olivier Deprezdfe722f2021-02-12 10:21:01 +010036#include <hf/vm_ids.h>
Andrew Scull55704232018-08-10 17:19:54 +010037
Fuad Tabba3e669bc2019-08-08 16:43:55 +010038#include "uapi/hf/socket.h"
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000039
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +000040#define HYPERVISOR_TIMER_NAME "el2_timer"
41
Andrew Scull82257c42018-10-01 10:37:48 +010042#define CONFIG_HAFNIUM_MAX_VMS 16
43#define CONFIG_HAFNIUM_MAX_VCPUS 32
44
Olivier Deprezf441ed02020-08-11 15:50:23 +020045#define HF_VM_ID_BASE 0
Andrew Walbrana6974312020-10-29 17:00:09 +000046#define PRIMARY_VM_ID HF_VM_ID_OFFSET
Fuad Tabba5da4b6b2019-08-05 13:56:20 +010047#define FIRST_SECONDARY_VM_ID (HF_VM_ID_OFFSET + 1)
Wedson Almeida Filhoec841932019-01-22 23:07:50 +000048
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010049struct hf_vcpu {
Andrew Scullb722f952018-09-27 15:39:10 +010050 struct hf_vm *vm;
Andrew Walbran196ed0e2020-04-30 11:32:29 +010051 ffa_vcpu_index_t vcpu_index;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010052 struct task_struct *task;
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +000053 atomic_t abort_sleep;
Andrew Scull71f57362019-02-05 16:11:35 +000054 atomic_t waiting_for_message;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010055 struct hrtimer timer;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010056};
57
58struct hf_vm {
Andrew Walbran196ed0e2020-04-30 11:32:29 +010059 ffa_vm_id_t id;
60 ffa_vcpu_count_t vcpu_count;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010061 struct hf_vcpu *vcpu;
62};
63
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000064struct hf_sock {
65 /* This needs to be the first field. */
66 struct sock sk;
67
68 /*
69 * The following fields are immutable after the socket transitions to
70 * SS_CONNECTED state.
71 */
72 uint64_t local_port;
73 uint64_t remote_port;
74 struct hf_vm *peer_vm;
75};
76
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000077static struct proto hf_sock_proto = {
78 .name = "hafnium",
79 .owner = THIS_MODULE,
80 .obj_size = sizeof(struct hf_sock),
81};
82
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010083static struct hf_vm *hf_vms;
Andrew Walbran196ed0e2020-04-30 11:32:29 +010084static ffa_vm_count_t hf_vm_count;
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000085static struct page *hf_send_page;
86static struct page *hf_recv_page;
87static atomic64_t hf_next_port = ATOMIC64_INIT(0);
88static DEFINE_SPINLOCK(hf_send_lock);
89static DEFINE_HASHTABLE(hf_local_port_hash, 7);
90static DEFINE_SPINLOCK(hf_local_port_hash_lock);
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +000091static int hf_irq;
Andrew Walbran8d55e502019-02-05 11:42:08 +000092static enum cpuhp_state hf_cpuhp_state;
Andrew Walbran196ed0e2020-04-30 11:32:29 +010093static ffa_vm_id_t current_vm_id;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010094
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010095/**
Wedson Almeida Filhoec841932019-01-22 23:07:50 +000096 * Retrieves a VM from its ID, returning NULL if the VM doesn't exist.
97 */
Andrew Walbran196ed0e2020-04-30 11:32:29 +010098static struct hf_vm *hf_vm_from_id(ffa_vm_id_t vm_id)
Wedson Almeida Filhoec841932019-01-22 23:07:50 +000099{
100 if (vm_id < FIRST_SECONDARY_VM_ID ||
101 vm_id >= FIRST_SECONDARY_VM_ID + hf_vm_count)
102 return NULL;
103
104 return &hf_vms[vm_id - FIRST_SECONDARY_VM_ID];
105}
106
107/**
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000108 * Wakes up the kernel thread responsible for running the given vcpu.
109 *
110 * Returns 0 if the thread was already running, 1 otherwise.
111 */
112static int hf_vcpu_wake_up(struct hf_vcpu *vcpu)
113{
114 /* Set a flag indicating that the thread should not go to sleep. */
115 atomic_set(&vcpu->abort_sleep, 1);
116
117 /* Set the thread to running state. */
118 return wake_up_process(vcpu->task);
119}
120
121/**
122 * Puts the current thread to sleep. The current thread must be responsible for
123 * running the given vcpu.
124 *
125 * Going to sleep will fail if hf_vcpu_wake_up() or kthread_stop() was called on
126 * this vcpu/thread since the last time it [re]started running.
127 */
128static void hf_vcpu_sleep(struct hf_vcpu *vcpu)
129{
130 int abort;
131
132 set_current_state(TASK_INTERRUPTIBLE);
133
134 /* Check the sleep-abort flag after making thread interruptible. */
135 abort = atomic_read(&vcpu->abort_sleep);
136 if (!abort && !kthread_should_stop())
137 schedule();
138
139 /* Set state back to running on the way out. */
140 set_current_state(TASK_RUNNING);
141}
142
143/**
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100144 * Wakes up the thread associated with the vcpu that owns the given timer. This
145 * is called when the timer the thread is waiting on expires.
146 */
147static enum hrtimer_restart hf_vcpu_timer_expired(struct hrtimer *timer)
148{
149 struct hf_vcpu *vcpu = container_of(timer, struct hf_vcpu, timer);
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000150 /* TODO: Inject interrupt. */
151 hf_vcpu_wake_up(vcpu);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100152 return HRTIMER_NORESTART;
153}
154
155/**
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000156 * This function is called when Hafnium requests that the primary VM wake up a
157 * vCPU that belongs to a secondary VM.
158 *
159 * It wakes up the thread if it's sleeping, or kicks it if it's already running.
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000160 */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100161static void hf_handle_wake_up_request(ffa_vm_id_t vm_id,
162 ffa_vcpu_index_t vcpu)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000163{
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000164 struct hf_vm *vm = hf_vm_from_id(vm_id);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000165
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000166 if (!vm) {
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000167 pr_warn("Request to wake up non-existent VM id: %u\n", vm_id);
168 return;
169 }
170
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000171 if (vcpu >= vm->vcpu_count) {
Andrew Scull71f57362019-02-05 16:11:35 +0000172 pr_warn("Request to wake up non-existent vCPU: %u.%u\n",
173 vm_id, vcpu);
174 return;
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000175 }
176
177 if (hf_vcpu_wake_up(&vm->vcpu[vcpu]) == 0) {
178 /*
179 * The task was already running (presumably on a different
180 * physical CPU); interrupt it. This gives Hafnium a chance to
181 * inject any new interrupts.
182 */
183 kick_process(vm->vcpu[vcpu].task);
184 }
185}
186
187/**
Andrew Scull71f57362019-02-05 16:11:35 +0000188 * Injects an interrupt into a vCPU of the VM and ensures the vCPU will run to
189 * handle the interrupt.
190 */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100191static void hf_interrupt_vm(ffa_vm_id_t vm_id, uint64_t int_id)
Andrew Scull71f57362019-02-05 16:11:35 +0000192{
193 struct hf_vm *vm = hf_vm_from_id(vm_id);
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100194 ffa_vcpu_index_t vcpu;
Andrew Scull71f57362019-02-05 16:11:35 +0000195 int64_t ret;
196
197 if (!vm) {
198 pr_warn("Request to wake up non-existent VM id: %u\n", vm_id);
199 return;
200 }
201
202 /*
203 * TODO: For now we're picking the first vcpu to interrupt, but
204 * we want to be smarter.
205 */
206 vcpu = 0;
207 ret = hf_interrupt_inject(vm_id, vcpu, int_id);
208
209 if (ret == -1) {
210 pr_warn("Failed to inject interrupt %lld to vCPU %d of VM %d",
211 int_id, vcpu, vm_id);
212 return;
213 }
214
215 if (ret != 1) {
216 /* We don't need to wake up the vcpu. */
217 return;
218 }
219
220 hf_handle_wake_up_request(vm_id, vcpu);
221}
222
223/**
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000224 * Notify all waiters on the given VM.
225 */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100226static void hf_notify_waiters(ffa_vm_id_t vm_id)
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000227{
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100228 ffa_vm_id_t waiter_vm_id;
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000229
Andrew Scull71f57362019-02-05 16:11:35 +0000230 while ((waiter_vm_id = hf_mailbox_waiter_get(vm_id)) != -1) {
Andrew Walbrana6974312020-10-29 17:00:09 +0000231 if (waiter_vm_id == PRIMARY_VM_ID) {
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000232 /*
233 * TODO: Use this information when implementing per-vm
234 * queues.
235 */
236 } else {
Andrew Scull71f57362019-02-05 16:11:35 +0000237 hf_interrupt_vm(waiter_vm_id,
238 HF_MAILBOX_WRITABLE_INTID);
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000239 }
240 }
241}
242
243/**
Andrew Scull71f57362019-02-05 16:11:35 +0000244 * Delivers a message to a VM.
245 */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100246static void hf_deliver_message(ffa_vm_id_t vm_id)
Andrew Scull71f57362019-02-05 16:11:35 +0000247{
248 struct hf_vm *vm = hf_vm_from_id(vm_id);
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100249 ffa_vcpu_index_t i;
Andrew Scull71f57362019-02-05 16:11:35 +0000250
251 if (!vm) {
252 pr_warn("Tried to deliver message to non-existent VM id: %u\n",
253 vm_id);
254 return;
255 }
256
257 /* Try to wake a vCPU that is waiting for a message. */
258 for (i = 0; i < vm->vcpu_count; i++) {
259 if (atomic_read(&vm->vcpu[i].waiting_for_message)) {
260 hf_handle_wake_up_request(vm->id,
261 vm->vcpu[i].vcpu_index);
262 return;
263 }
264 }
265
266 /* None were waiting for a message so interrupt one. */
267 hf_interrupt_vm(vm->id, HF_MAILBOX_READABLE_INTID);
268}
269
270/**
Andrew Sculldf6478f2019-02-19 17:52:08 +0000271 * Handles a message delivered to this VM by validating that it's well-formed
272 * and then queueing it for delivery to the appropriate socket.
273 */
Andrew Walbranb331fa92019-10-03 16:48:07 +0100274static void hf_handle_message(struct hf_vm *sender, size_t len,
Andrew Walbrancafe0172019-10-07 14:14:05 +0100275 const void *message)
Andrew Sculldf6478f2019-02-19 17:52:08 +0000276{
277 struct hf_sock *hsock;
Andrew Walbrancafe0172019-10-07 14:14:05 +0100278 const struct hf_msg_hdr *hdr = (struct hf_msg_hdr *)message;
Andrew Sculldf6478f2019-02-19 17:52:08 +0000279 struct sk_buff *skb;
280 int err;
281
282 /* Ignore messages that are too small to hold a header. */
Marc Bonnici39fdefd2019-11-08 15:05:07 +0000283 if (len < sizeof(struct hf_msg_hdr)) {
284 pr_err("Message received without header of length %d\n", len);
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100285 ffa_rx_release();
Andrew Sculldf6478f2019-02-19 17:52:08 +0000286 return;
Marc Bonnici39fdefd2019-11-08 15:05:07 +0000287 }
Andrew Sculldf6478f2019-02-19 17:52:08 +0000288
289 len -= sizeof(struct hf_msg_hdr);
290
291 /* Go through the colliding sockets. */
292 rcu_read_lock();
293 hash_for_each_possible_rcu(hf_local_port_hash, hsock, sk.sk_node,
294 hdr->dst_port) {
295 if (hsock->peer_vm == sender &&
296 hsock->remote_port == hdr->src_port) {
297 sock_hold(&hsock->sk);
298 break;
299 }
300 }
301 rcu_read_unlock();
302
303 /* Nothing to do if we couldn't find the target. */
Marc Bonnici39fdefd2019-11-08 15:05:07 +0000304 if (!hsock) {
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100305 ffa_rx_release();
Andrew Sculldf6478f2019-02-19 17:52:08 +0000306 return;
Marc Bonnici39fdefd2019-11-08 15:05:07 +0000307 }
Andrew Sculldf6478f2019-02-19 17:52:08 +0000308
309 /*
310 * TODO: From this point on, there are two failure paths: when we
311 * create the skb below, and when we enqueue it to the socket. What
312 * should we do if they fail? Ideally we would have some form of flow
313 * control to prevent message loss, but how to do it efficiently?
314 *
315 * One option is to have a pre-allocated message that indicates to the
316 * sender that a message was dropped. This way we guarantee that the
317 * sender will be aware of loss and should back-off.
318 */
319 /* Create the skb. */
320 skb = alloc_skb(len, GFP_KERNEL);
321 if (!skb)
322 goto exit;
323
324 memcpy(skb_put(skb, len), hdr + 1, len);
325
326 /*
327 * Add the skb to the receive queue of the target socket. On success it
328 * calls sk->sk_data_ready, which is currently set to sock_def_readable,
329 * which wakes up any waiters.
330 */
331 err = sock_queue_rcv_skb(&hsock->sk, skb);
332 if (err)
333 kfree_skb(skb);
334
335exit:
336 sock_put(&hsock->sk);
Andrew Scull71f57362019-02-05 16:11:35 +0000337
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100338 if (ffa_rx_release().func == FFA_RX_RELEASE_32)
Andrew Walbrana6974312020-10-29 17:00:09 +0000339 hf_notify_waiters(PRIMARY_VM_ID);
Andrew Sculldf6478f2019-02-19 17:52:08 +0000340}
341
342/**
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100343 * This is the main loop of each vcpu.
344 */
345static int hf_vcpu_thread(void *data)
346{
347 struct hf_vcpu *vcpu = data;
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100348 struct ffa_value ret;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100349
350 hrtimer_init(&vcpu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
351 vcpu->timer.function = &hf_vcpu_timer_expired;
352
353 while (!kthread_should_stop()) {
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100354 ffa_vcpu_index_t i;
Andrew Scull01f83de2019-01-23 13:41:47 +0000355
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000356 /*
357 * We're about to run the vcpu, so we can reset the abort-sleep
358 * flag.
359 */
360 atomic_set(&vcpu->abort_sleep, 0);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100361
Andrew Scullbb7ae412018-09-28 21:07:15 +0100362 /* Call into Hafnium to run vcpu. */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100363 ret = ffa_run(vcpu->vm->id, vcpu->vcpu_index);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100364
Andrew Walbran39bf7892019-11-01 14:14:47 +0000365 switch (ret.func) {
Andrew Sculle05702e2019-01-08 14:46:46 +0000366 /* Preempted. */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100367 case FFA_INTERRUPT_32:
Andrew Sculle05702e2019-01-08 14:46:46 +0000368 if (need_resched())
369 schedule();
370 break;
371
372 /* Yield. */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100373 case FFA_YIELD_32:
Andrew Sculle05702e2019-01-08 14:46:46 +0000374 if (!kthread_should_stop())
375 schedule();
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100376 break;
377
Andrew Scull01778112019-01-14 15:37:53 +0000378 /* WFI. */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100379 case HF_FFA_RUN_WAIT_FOR_INTERRUPT:
380 if (ret.arg2 != FFA_SLEEP_INDEFINITE) {
Andrew Walbran39bf7892019-11-01 14:14:47 +0000381 hrtimer_start(&vcpu->timer, ret.arg2,
Andrew Scull71f57362019-02-05 16:11:35 +0000382 HRTIMER_MODE_REL);
383 }
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000384 hf_vcpu_sleep(vcpu);
Andrew Scull71f57362019-02-05 16:11:35 +0000385 hrtimer_cancel(&vcpu->timer);
386 break;
387
388 /* Waiting for a message. */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100389 case FFA_MSG_WAIT_32:
Andrew Scull71f57362019-02-05 16:11:35 +0000390 atomic_set(&vcpu->waiting_for_message, 1);
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100391 if (ret.arg2 != FFA_SLEEP_INDEFINITE) {
Andrew Walbran39bf7892019-11-01 14:14:47 +0000392 hrtimer_start(&vcpu->timer, ret.arg2,
Andrew Scull71f57362019-02-05 16:11:35 +0000393 HRTIMER_MODE_REL);
394 }
395 hf_vcpu_sleep(vcpu);
396 hrtimer_cancel(&vcpu->timer);
397 atomic_set(&vcpu->waiting_for_message, 0);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100398 break;
399
Andrew Scullb3a61b52018-09-17 14:30:34 +0100400 /* Wake up another vcpu. */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100401 case HF_FFA_RUN_WAKE_UP:
402 hf_handle_wake_up_request(ffa_vm_id(ret),
403 ffa_vcpu_index(ret));
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100404 break;
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100405
Andrew Scullb3a61b52018-09-17 14:30:34 +0100406 /* Response available. */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100407 case FFA_MSG_SEND_32:
J-Alvesc26981f2021-03-05 13:25:40 +0000408 if (ffa_receiver(ret) == PRIMARY_VM_ID) {
Andrew Walbran39bf7892019-11-01 14:14:47 +0000409 hf_handle_message(vcpu->vm,
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100410 ffa_msg_send_size(ret),
Andrew Scull94704232019-04-01 12:36:37 +0100411 page_address(hf_recv_page));
Andrew Scull71f57362019-02-05 16:11:35 +0000412 } else {
J-Alvesc26981f2021-03-05 13:25:40 +0000413 hf_deliver_message(ffa_receiver(ret));
Andrew Scull71f57362019-02-05 16:11:35 +0000414 }
Andrew Sculldc8cab52018-10-10 18:29:39 +0100415 break;
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000416
417 /* Notify all waiters. */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100418 case FFA_RX_RELEASE_32:
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000419 hf_notify_waiters(vcpu->vm->id);
420 break;
Andrew Scull01f83de2019-01-23 13:41:47 +0000421
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100422 case FFA_ERROR_32:
423 pr_warn("FF-A error %d running VM %d vCPU %d", ret.arg2,
Andrew Walbran18f08a62019-11-13 11:57:52 +0000424 vcpu->vm->id, vcpu->vcpu_index);
Andrew Walbran39bf7892019-11-01 14:14:47 +0000425 switch (ret.arg2) {
Andrew Walbran9abce272019-11-27 18:41:05 +0000426 /* Abort was triggered. */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100427 case FFA_ABORTED:
Andrew Walbran39bf7892019-11-01 14:14:47 +0000428 for (i = 0; i < vcpu->vm->vcpu_count; i++) {
429 if (i == vcpu->vcpu_index)
430 continue;
431 hf_handle_wake_up_request(vcpu->vm->id,
432 i);
433 }
434 hf_vcpu_sleep(vcpu);
435 break;
Andrew Walbran9abce272019-11-27 18:41:05 +0000436 default:
437 /* Treat as a yield and try again later. */
438 if (!kthread_should_stop())
439 schedule();
440 break;
Andrew Scull01f83de2019-01-23 13:41:47 +0000441 }
Andrew Scull01f83de2019-01-23 13:41:47 +0000442 break;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100443 }
444 }
445
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100446 return 0;
447}
448
449/**
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000450 * Converts a pointer to a struct sock into a pointer to a struct hf_sock. It
451 * relies on the fact that the first field of hf_sock is a sock.
452 */
453static struct hf_sock *hsock_from_sk(struct sock *sk)
454{
455 return (struct hf_sock *)sk;
456}
457
458/**
459 * This is called when the last reference to the outer socket is released. For
460 * example, if it's a user-space socket, when the last file descriptor pointing
461 * to this socket is closed.
462 *
463 * It begins cleaning up resources, though some can only be cleaned up after all
464 * references to the underlying socket are released, which is handled by
465 * hf_sock_destruct().
466 */
467static int hf_sock_release(struct socket *sock)
468{
469 struct sock *sk = sock->sk;
470 struct hf_sock *hsock = hsock_from_sk(sk);
471 unsigned long flags;
472
473 if (!sk)
474 return 0;
475
476 /* Shutdown for both send and receive. */
477 lock_sock(sk);
478 sk->sk_shutdown |= RCV_SHUTDOWN | SEND_SHUTDOWN;
479 sk->sk_state_change(sk);
480 release_sock(sk);
481
482 /* Remove from the hash table, so lookups from now on won't find it. */
483 spin_lock_irqsave(&hf_local_port_hash_lock, flags);
484 hash_del_rcu(&hsock->sk.sk_node);
485 spin_unlock_irqrestore(&hf_local_port_hash_lock, flags);
486
487 /*
488 * TODO: When we implement a tx queue, we need to clear it here so that
489 * sk_wmem_alloc will not prevent sk from being freed (sk_free).
490 */
491
492 /*
493 * Wait for in-flight lookups to finish. We need to do this here because
Wedson Almeida Filho89d0e472019-01-03 19:18:39 +0000494 * in-flight lookups rely on the reference to the socket we're about to
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000495 * release.
496 */
497 synchronize_rcu();
498 sock_put(sk);
499 sock->sk = NULL;
500
501 return 0;
502}
503
504/**
505 * This is called when there are no more references to the socket. It frees all
506 * resources that haven't been freed during release.
507 */
508static void hf_sock_destruct(struct sock *sk)
509{
510 /*
511 * Clear the receive queue now that the handler cannot add any more
512 * skbs to it.
513 */
514 skb_queue_purge(&sk->sk_receive_queue);
515}
516
517/**
518 * Connects the Hafnium socket to the provided VM and port. After the socket is
519 * connected, it can be used to exchange datagrams with the specified peer.
520 */
Andrew Scull01778112019-01-14 15:37:53 +0000521static int hf_sock_connect(struct socket *sock, struct sockaddr *saddr, int len,
522 int connect_flags)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000523{
524 struct sock *sk = sock->sk;
525 struct hf_sock *hsock = hsock_from_sk(sk);
526 struct hf_vm *vm;
Fuad Tabba3e669bc2019-08-08 16:43:55 +0100527 struct hf_sockaddr *addr;
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000528 int err;
529 unsigned long flags;
530
531 /* Basic address validation. */
Fuad Tabba3e669bc2019-08-08 16:43:55 +0100532 if (len < sizeof(struct hf_sockaddr) || saddr->sa_family != AF_HF)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000533 return -EINVAL;
534
Fuad Tabba3e669bc2019-08-08 16:43:55 +0100535 addr = (struct hf_sockaddr *)saddr;
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000536 vm = hf_vm_from_id(addr->vm_id);
537 if (!vm)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000538 return -ENETUNREACH;
539
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000540 /*
541 * TODO: Once we implement access control in Hafnium, check that the
542 * caller is allowed to contact the specified VM. Return -ECONNREFUSED
543 * if access is denied.
544 */
545
546 /* Take lock to make sure state doesn't change as we connect. */
547 lock_sock(sk);
548
549 /* Only unconnected sockets are allowed to become connected. */
550 if (sock->state != SS_UNCONNECTED) {
551 err = -EISCONN;
552 goto exit;
553 }
554
555 hsock->local_port = atomic64_inc_return(&hf_next_port);
556 hsock->remote_port = addr->port;
557 hsock->peer_vm = vm;
558
559 sock->state = SS_CONNECTED;
560
561 /* Add socket to hash table now that it's fully initialised. */
562 spin_lock_irqsave(&hf_local_port_hash_lock, flags);
563 hash_add_rcu(hf_local_port_hash, &sk->sk_node, hsock->local_port);
564 spin_unlock_irqrestore(&hf_local_port_hash_lock, flags);
565
566 err = 0;
567exit:
568 release_sock(sk);
569 return err;
570}
571
572/**
573 * Sends the given skb to the appropriate VM by calling Hafnium. It will also
574 * trigger the wake up of a recipient VM.
575 *
576 * Takes ownership of the skb on success.
577 */
578static int hf_send_skb(struct sk_buff *skb)
579{
580 unsigned long flags;
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100581 struct ffa_value ret;
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000582 struct hf_sock *hsock = hsock_from_sk(skb->sk);
583 struct hf_vm *vm = hsock->peer_vm;
Andrew Walbrancafe0172019-10-07 14:14:05 +0100584 void *message = page_address(hf_send_page);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000585
586 /*
587 * Call Hafnium under the send lock so that we serialize the use of the
588 * global send buffer.
589 */
590 spin_lock_irqsave(&hf_send_lock, flags);
Andrew Walbrancafe0172019-10-07 14:14:05 +0100591 memcpy(message, skb->data, skb->len);
Jose Marinho1cc6c752019-03-11 16:28:03 +0000592
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100593 ret = ffa_msg_send(current_vm_id, vm->id, skb->len, 0);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000594 spin_unlock_irqrestore(&hf_send_lock, flags);
595
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100596 if (ret.func == FFA_ERROR_32) {
Andrew Walbranb040b302019-10-10 13:50:06 +0100597 switch (ret.arg2) {
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100598 case FFA_INVALID_PARAMETERS:
Andrew Walbrancafe0172019-10-07 14:14:05 +0100599 return -ENXIO;
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100600 case FFA_NOT_SUPPORTED:
Andrew Walbrancafe0172019-10-07 14:14:05 +0100601 return -EIO;
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100602 case FFA_DENIED:
603 case FFA_BUSY:
Andrew Walbrancafe0172019-10-07 14:14:05 +0100604 default:
605 return -EAGAIN;
606 }
607 }
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000608
Andrew Scull71f57362019-02-05 16:11:35 +0000609 /* Ensure the VM will run to pick up the message. */
610 hf_deliver_message(vm->id);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000611
612 kfree_skb(skb);
613
614 return 0;
615}
616
617/**
618 * Determines if the given socket is in the connected state. It acquires and
619 * releases the socket lock.
620 */
621static bool hf_sock_is_connected(struct socket *sock)
622{
623 bool ret;
624
625 lock_sock(sock->sk);
626 ret = sock->state == SS_CONNECTED;
627 release_sock(sock->sk);
628
629 return ret;
630}
631
632/**
633 * Sends a message to the VM & port the socket is connected to. All variants
634 * of write/send/sendto/sendmsg eventually call this function.
635 */
636static int hf_sock_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
637{
638 struct sock *sk = sock->sk;
639 struct sk_buff *skb;
640 int err;
641 struct hf_msg_hdr *hdr;
642 struct hf_sock *hsock = hsock_from_sk(sk);
Andrew Walbrancafe0172019-10-07 14:14:05 +0100643 size_t payload_max_len = HF_MAILBOX_SIZE - sizeof(struct hf_msg_hdr);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000644
645 /* Check length. */
Andrew Scull614ed7f2019-04-01 12:12:38 +0100646 if (len > payload_max_len)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000647 return -EMSGSIZE;
648
649 /* We don't allow the destination address to be specified. */
650 if (m->msg_namelen > 0)
651 return -EISCONN;
652
653 /* We don't support out of band messages. */
654 if (m->msg_flags & MSG_OOB)
655 return -EOPNOTSUPP;
656
657 /*
658 * Ensure that the socket is connected. We don't need to hold the socket
659 * lock (acquired and released by hf_sock_is_connected) for the
660 * remainder of the function because the fields we care about are
661 * immutable once the state is SS_CONNECTED.
662 */
663 if (!hf_sock_is_connected(sock))
664 return -ENOTCONN;
665
666 /*
667 * Allocate an skb for this write. If there isn't enough room in the
668 * socket's send buffer (sk_wmem_alloc >= sk_sndbuf), this will block
669 * (if it's a blocking call). On success, it increments sk_wmem_alloc
670 * and sets up the skb such that sk_wmem_alloc gets decremented when
671 * the skb is freed (sock_wfree gets called).
672 */
673 skb = sock_alloc_send_skb(sk, len + sizeof(struct hf_msg_hdr),
674 m->msg_flags & MSG_DONTWAIT, &err);
675 if (!skb)
676 return err;
677
678 /* Reserve room for the header and initialise it. */
679 skb_reserve(skb, sizeof(struct hf_msg_hdr));
680 hdr = skb_push(skb, sizeof(struct hf_msg_hdr));
681 hdr->src_port = hsock->local_port;
682 hdr->dst_port = hsock->remote_port;
683
684 /* Allocate area for the contents, then copy into skb. */
685 if (!copy_from_iter_full(skb_put(skb, len), len, &m->msg_iter)) {
686 err = -EFAULT;
687 goto err_cleanup;
688 }
689
690 /*
691 * TODO: We currently do this inline, but when we have support for
692 * readiness notification from Hafnium, we must add this to a per-VM tx
693 * queue that can make progress when the VM becomes writable. This will
694 * fix send buffering and poll readiness notification.
695 */
696 err = hf_send_skb(skb);
697 if (err)
698 goto err_cleanup;
699
700 return 0;
701
702err_cleanup:
703 kfree_skb(skb);
704 return err;
705}
706
707/**
708 * Receives a message originated from the VM & port the socket is connected to.
709 * All variants of read/recv/recvfrom/recvmsg eventually call this function.
710 */
711static int hf_sock_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
712 int flags)
713{
714 struct sock *sk = sock->sk;
715 struct sk_buff *skb;
716 int err;
717 size_t copy_len;
718
719 if (!hf_sock_is_connected(sock))
720 return -ENOTCONN;
721
722 /* Grab the next skb from the receive queue. */
723 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
724 if (!skb)
725 return err;
726
727 /* Make sure we don't copy more than what fits in the output buffer. */
728 copy_len = skb->len;
729 if (copy_len > len) {
730 copy_len = len;
731 m->msg_flags |= MSG_TRUNC;
732 }
733
734 /* Make sure we don't overflow the return value type. */
735 if (copy_len > INT_MAX) {
736 copy_len = INT_MAX;
737 m->msg_flags |= MSG_TRUNC;
738 }
739
740 /* Copy skb to output iterator, then free it. */
741 err = skb_copy_datagram_msg(skb, 0, m, copy_len);
742 skb_free_datagram(sk, skb);
743 if (err)
744 return err;
745
746 return copy_len;
747}
748
749/**
750 * This function is called when a Hafnium socket is created. It initialises all
751 * state such that the caller will be able to connect the socket and then send
752 * and receive messages through it.
753 */
754static int hf_sock_create(struct net *net, struct socket *sock, int protocol,
Andrew Scull01778112019-01-14 15:37:53 +0000755 int kern)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000756{
757 static const struct proto_ops ops = {
758 .family = PF_HF,
759 .owner = THIS_MODULE,
760 .release = hf_sock_release,
761 .bind = sock_no_bind,
762 .connect = hf_sock_connect,
763 .socketpair = sock_no_socketpair,
764 .accept = sock_no_accept,
765 .ioctl = sock_no_ioctl,
766 .listen = sock_no_listen,
767 .shutdown = sock_no_shutdown,
768 .setsockopt = sock_no_setsockopt,
769 .getsockopt = sock_no_getsockopt,
770 .sendmsg = hf_sock_sendmsg,
771 .recvmsg = hf_sock_recvmsg,
772 .mmap = sock_no_mmap,
773 .sendpage = sock_no_sendpage,
774 .poll = datagram_poll,
775 };
776 struct sock *sk;
777
778 if (sock->type != SOCK_DGRAM)
779 return -ESOCKTNOSUPPORT;
780
781 if (protocol != 0)
782 return -EPROTONOSUPPORT;
783
784 /*
785 * For now we only allow callers with sys admin capability to create
786 * Hafnium sockets.
787 */
788 if (!capable(CAP_SYS_ADMIN))
789 return -EPERM;
790
791 /* Allocate and initialise socket. */
792 sk = sk_alloc(net, PF_HF, GFP_KERNEL, &hf_sock_proto, kern);
793 if (!sk)
794 return -ENOMEM;
795
796 sock_init_data(sock, sk);
797
798 sk->sk_destruct = hf_sock_destruct;
799 sock->ops = &ops;
800 sock->state = SS_UNCONNECTED;
801
802 return 0;
803}
804
805/**
Andrew Scullbb7ae412018-09-28 21:07:15 +0100806 * Frees all resources, including threads, associated with the Hafnium driver.
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100807 */
Andrew Scull82257c42018-10-01 10:37:48 +0100808static void hf_free_resources(void)
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100809{
Fuad Tabba5da4b6b2019-08-05 13:56:20 +0100810 uint16_t i;
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100811 ffa_vcpu_index_t j;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100812
813 /*
814 * First stop all worker threads. We need to do this before freeing
815 * resources because workers may reference each other, so it is only
816 * safe to free resources after they have all stopped.
817 */
Andrew Scull82257c42018-10-01 10:37:48 +0100818 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100819 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000820
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100821 for (j = 0; j < vm->vcpu_count; j++)
822 kthread_stop(vm->vcpu[j].task);
823 }
824
825 /* Free resources. */
Andrew Scull82257c42018-10-01 10:37:48 +0100826 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100827 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000828
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100829 for (j = 0; j < vm->vcpu_count; j++)
830 put_task_struct(vm->vcpu[j].task);
831 kfree(vm->vcpu);
832 }
833
834 kfree(hf_vms);
Andrew Walbran81cf04c2020-08-06 16:00:43 +0100835
836 ffa_rx_release();
837 if (hf_send_page) {
838 __free_page(hf_send_page);
839 hf_send_page = NULL;
840 }
841 if (hf_recv_page) {
842 __free_page(hf_recv_page);
843 hf_recv_page = NULL;
844 }
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100845}
846
Andrew Scullbb7ae412018-09-28 21:07:15 +0100847/**
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +0000848 * Handles the hypervisor timer interrupt.
849 */
850static irqreturn_t hf_nop_irq_handler(int irq, void *dev)
851{
852 /*
853 * No need to do anything, the interrupt only exists to return to the
854 * primary vCPU so that the virtual timer will be restored and fire as
855 * normal.
856 */
857 return IRQ_HANDLED;
858}
859
860/**
861 * Enables the hypervisor timer interrupt on a CPU, when it starts or after the
862 * driver is first loaded.
863 */
864static int hf_starting_cpu(unsigned int cpu)
865{
866 if (hf_irq != 0) {
867 /* Enable the interrupt, and set it to be edge-triggered. */
868 enable_percpu_irq(hf_irq, IRQ_TYPE_EDGE_RISING);
869 }
Andrew Walbran8d55e502019-02-05 11:42:08 +0000870
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +0000871 return 0;
872}
873
874/**
875 * Disables the hypervisor timer interrupt on a CPU when it is powered down.
876 */
877static int hf_dying_cpu(unsigned int cpu)
878{
879 if (hf_irq != 0) {
880 /* Disable the interrupt while the CPU is asleep. */
881 disable_percpu_irq(hf_irq);
882 }
883
884 return 0;
885}
886
887/**
888 * Registers for the hypervisor timer interrupt.
889 */
890static int hf_int_driver_probe(struct platform_device *pdev)
891{
892 int irq;
893 int ret;
894
895 /*
896 * Register a handler for the hyperviser timer IRQ, as it is needed for
897 * Hafnium to emulate the virtual timer for Linux while a secondary vCPU
898 * is running.
899 */
900 irq = platform_get_irq(pdev, ARCH_TIMER_HYP_PPI);
901 if (irq < 0) {
902 pr_err("Error getting hypervisor timer IRQ: %d\n", irq);
903 return irq;
904 }
905 hf_irq = irq;
906
907 ret = request_percpu_irq(irq, hf_nop_irq_handler, HYPERVISOR_TIMER_NAME,
908 pdev);
909 if (ret != 0) {
910 pr_err("Error registering hypervisor timer IRQ %d: %d\n",
911 irq, ret);
912 return ret;
913 }
914 pr_info("Hafnium registered for IRQ %d\n", irq);
915 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
916 "hafnium/hypervisor_timer:starting",
917 hf_starting_cpu, hf_dying_cpu);
918 if (ret < 0) {
919 pr_err("Error enabling timer on all CPUs: %d\n", ret);
Andrew Walbran8d55e502019-02-05 11:42:08 +0000920 free_percpu_irq(irq, pdev);
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +0000921 return ret;
922 }
Andrew Walbran8d55e502019-02-05 11:42:08 +0000923 hf_cpuhp_state = ret;
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +0000924
925 return 0;
926}
927
928/**
929 * Unregisters for the hypervisor timer interrupt.
930 */
931static int hf_int_driver_remove(struct platform_device *pdev)
932{
Andrew Walbran8d55e502019-02-05 11:42:08 +0000933 /*
934 * This will cause hf_dying_cpu to be called on each CPU, which will
935 * disable the IRQs.
936 */
937 cpuhp_remove_state(hf_cpuhp_state);
938 free_percpu_irq(hf_irq, pdev);
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +0000939
940 return 0;
941}
942
943static const struct of_device_id hf_int_driver_id[] = {
944 {.compatible = "arm,armv7-timer"},
945 {.compatible = "arm,armv8-timer"},
946 {}
947};
948
949static struct platform_driver hf_int_driver = {
950 .driver = {
951 .name = HYPERVISOR_TIMER_NAME,
952 .owner = THIS_MODULE,
953 .of_match_table = of_match_ptr(hf_int_driver_id),
954 },
955 .probe = hf_int_driver_probe,
956 .remove = hf_int_driver_remove,
957};
958
959/**
Andrew Walbran6b796192020-08-06 16:01:59 +0100960 * Print the error code of the given FF-A value if it is an error, or the
961 * function ID otherwise.
962 */
963static void print_ffa_error(struct ffa_value ffa_ret)
964{
965 if (ffa_ret.func == FFA_ERROR_32)
966 pr_err("FF-A error code %d\n", ffa_ret.arg2);
967 else
968 pr_err("Unexpected FF-A function %#x\n", ffa_ret.func);
969}
970
971/**
Andrew Scullbb7ae412018-09-28 21:07:15 +0100972 * Initializes the Hafnium driver by creating a thread for each vCPU of each
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100973 * virtual machine.
974 */
975static int __init hf_init(void)
976{
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000977 static const struct net_proto_family proto_family = {
978 .family = PF_HF,
979 .create = hf_sock_create,
980 .owner = THIS_MODULE,
981 };
Andrew Scullbb7ae412018-09-28 21:07:15 +0100982 int64_t ret;
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100983 struct ffa_value ffa_ret;
984 ffa_vm_id_t i;
985 ffa_vcpu_index_t j;
Andrew Walbran6b796192020-08-06 16:01:59 +0100986 struct ffa_uuid null_uuid;
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100987 ffa_vm_count_t secondary_vm_count;
Andrew Walbran6b796192020-08-06 16:01:59 +0100988 const struct ffa_partition_info *partition_info;
Andrew Scull82257c42018-10-01 10:37:48 +0100989 uint32_t total_vcpu_count;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100990
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100991 /* Allocate a page for send and receive buffers. */
992 hf_send_page = alloc_page(GFP_KERNEL);
993 if (!hf_send_page) {
994 pr_err("Unable to allocate send buffer\n");
995 return -ENOMEM;
996 }
997
998 hf_recv_page = alloc_page(GFP_KERNEL);
999 if (!hf_recv_page) {
1000 __free_page(hf_send_page);
Andrew Walbran81cf04c2020-08-06 16:00:43 +01001001 hf_send_page = NULL;
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +01001002 pr_err("Unable to allocate receive buffer\n");
1003 return -ENOMEM;
1004 }
1005
1006 /*
1007 * Configure both addresses. Once configured, we cannot free these pages
1008 * because the hypervisor will use them, even if the module is
1009 * unloaded.
1010 */
Andrew Walbran196ed0e2020-04-30 11:32:29 +01001011 ffa_ret = ffa_rxtx_map(page_to_phys(hf_send_page),
Andrew Walbran2c6e7512019-11-05 14:02:29 +00001012 page_to_phys(hf_recv_page));
Andrew Walbran196ed0e2020-04-30 11:32:29 +01001013 if (ffa_ret.func != FFA_SUCCESS_32) {
Andrew Walbran81cf04c2020-08-06 16:00:43 +01001014 pr_err("Unable to configure VM mailbox.\n");
Andrew Walbran6b796192020-08-06 16:01:59 +01001015 print_ffa_error(ffa_ret);
Andrew Walbran81cf04c2020-08-06 16:00:43 +01001016 ret = -EIO;
1017 goto fail_with_cleanup;
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +01001018 }
1019
Andrew Walbran6b796192020-08-06 16:01:59 +01001020 /* Get information about secondary VMs. */
1021 ffa_uuid_init(0, 0, 0, 0, &null_uuid);
1022 ffa_ret = ffa_partition_info_get(&null_uuid);
1023 if (ffa_ret.func != FFA_SUCCESS_32) {
1024 pr_err("Unable to get VM information.\n");
1025 print_ffa_error(ffa_ret);
1026 ret = -EIO;
1027 goto fail_with_cleanup;
1028 }
1029 secondary_vm_count = ffa_ret.arg2 - 1;
1030 partition_info = page_address(hf_recv_page);
Andrew Scull82257c42018-10-01 10:37:48 +01001031
1032 /* Confirm the maximum number of VMs looks sane. */
1033 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VMS < 1);
1034 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VMS > U16_MAX);
1035
1036 /* Validate the number of VMs. There must at least be the primary. */
Andrew Walbran4c96d0c2019-06-25 18:32:56 +01001037 if (secondary_vm_count > CONFIG_HAFNIUM_MAX_VMS - 1) {
Fuad Tabba8523ccd2019-07-31 15:37:29 +01001038 pr_err("Number of VMs is out of range: %d\n",
Andrew Walbran4c96d0c2019-06-25 18:32:56 +01001039 secondary_vm_count);
Andrew Walbran81cf04c2020-08-06 16:00:43 +01001040 ret = -EDQUOT;
1041 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001042 }
1043
Andrew Scullb722f952018-09-27 15:39:10 +01001044 /* Only track the secondary VMs. */
Andrew Walbran4c96d0c2019-06-25 18:32:56 +01001045 hf_vms = kmalloc_array(secondary_vm_count, sizeof(struct hf_vm),
1046 GFP_KERNEL);
Andrew Walbran81cf04c2020-08-06 16:00:43 +01001047 if (!hf_vms) {
1048 ret = -ENOMEM;
1049 goto fail_with_cleanup;
1050 }
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001051
Jose Marinho1cc6c752019-03-11 16:28:03 +00001052 /* Cache the VM id for later usage. */
1053 current_vm_id = hf_vm_get_id();
1054
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001055 /* Initialize each VM. */
Andrew Scull82257c42018-10-01 10:37:48 +01001056 total_vcpu_count = 0;
Andrew Walbran4c96d0c2019-06-25 18:32:56 +01001057 for (i = 0; i < secondary_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +01001058 struct hf_vm *vm = &hf_vms[i];
Andrew Walbran196ed0e2020-04-30 11:32:29 +01001059 ffa_vcpu_count_t vcpu_count;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001060
Andrew Walbran6b796192020-08-06 16:01:59 +01001061 /* Adjust the index as only the secondaries are tracked. */
1062 vm->id = partition_info[i + 1].vm_id;
1063 vcpu_count = partition_info[i + 1].vcpu_count;
Andrew Scull82257c42018-10-01 10:37:48 +01001064
1065 /* Avoid overflowing the vcpu count. */
Andrew Walbran3eeb1de2019-06-25 18:32:30 +01001066 if (vcpu_count > (U32_MAX - total_vcpu_count)) {
Andrew Scull82257c42018-10-01 10:37:48 +01001067 pr_err("Too many vcpus: %u\n", total_vcpu_count);
1068 ret = -EDQUOT;
1069 goto fail_with_cleanup;
1070 }
1071
1072 /* Confirm the maximum number of VCPUs looks sane. */
1073 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VCPUS < 1);
1074 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VCPUS > U16_MAX);
1075
1076 /* Enforce the limit on vcpus. */
Andrew Walbran3eeb1de2019-06-25 18:32:30 +01001077 total_vcpu_count += vcpu_count;
Andrew Scull82257c42018-10-01 10:37:48 +01001078 if (total_vcpu_count > CONFIG_HAFNIUM_MAX_VCPUS) {
1079 pr_err("Too many vcpus: %u\n", total_vcpu_count);
1080 ret = -EDQUOT;
1081 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001082 }
1083
Andrew Walbran3eeb1de2019-06-25 18:32:30 +01001084 vm->vcpu_count = vcpu_count;
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001085 vm->vcpu = kmalloc_array(vm->vcpu_count, sizeof(struct hf_vcpu),
1086 GFP_KERNEL);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001087 if (!vm->vcpu) {
Andrew Scull82257c42018-10-01 10:37:48 +01001088 ret = -ENOMEM;
1089 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001090 }
1091
Andrew Scull82257c42018-10-01 10:37:48 +01001092 /* Update the number of initialized VMs. */
1093 hf_vm_count = i + 1;
1094
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001095 /* Create a kernel thread for each vcpu. */
1096 for (j = 0; j < vm->vcpu_count; j++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +01001097 struct hf_vcpu *vcpu = &vm->vcpu[j];
Andrew Scull01778112019-01-14 15:37:53 +00001098
1099 vcpu->task =
1100 kthread_create(hf_vcpu_thread, vcpu,
1101 "vcpu_thread_%u_%u", vm->id, j);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001102 if (IS_ERR(vcpu->task)) {
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001103 pr_err("Error creating task (vm=%u,vcpu=%u): %ld\n",
1104 vm->id, j, PTR_ERR(vcpu->task));
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001105 vm->vcpu_count = j;
Andrew Scull82257c42018-10-01 10:37:48 +01001106 ret = PTR_ERR(vcpu->task);
1107 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001108 }
1109
1110 get_task_struct(vcpu->task);
Andrew Scullb722f952018-09-27 15:39:10 +01001111 vcpu->vm = vm;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001112 vcpu->vcpu_index = j;
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +00001113 atomic_set(&vcpu->abort_sleep, 0);
Andrew Scullece5ef42019-05-08 15:07:25 +01001114 atomic_set(&vcpu->waiting_for_message, 0);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001115 }
1116 }
1117
Andrew Walbran6b796192020-08-06 16:01:59 +01001118 ffa_ret = ffa_rx_release();
1119 if (ffa_ret.func != FFA_SUCCESS_32) {
1120 pr_err("Unable to release RX buffer.\n");
1121 print_ffa_error(ffa_ret);
1122 ret = -EIO;
1123 goto fail_with_cleanup;
1124 }
1125
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001126 /* Register protocol and socket family. */
1127 ret = proto_register(&hf_sock_proto, 0);
1128 if (ret) {
1129 pr_err("Unable to register protocol: %lld\n", ret);
1130 goto fail_with_cleanup;
1131 }
1132
1133 ret = sock_register(&proto_family);
1134 if (ret) {
1135 pr_err("Unable to register Hafnium's socket family: %lld\n",
1136 ret);
1137 goto fail_unregister_proto;
1138 }
1139
1140 /*
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +00001141 * Register as a driver for the timer device, so we can register a
1142 * handler for the hyperviser timer IRQ.
1143 */
1144 ret = platform_driver_register(&hf_int_driver);
1145 if (ret != 0) {
1146 pr_err("Error registering timer driver %lld\n", ret);
1147 goto fail_unregister_socket;
1148 }
1149
1150 /*
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001151 * Start running threads now that all is initialized.
1152 *
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +00001153 * Any failures from this point on must also unregister the driver with
1154 * platform_driver_unregister().
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001155 */
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001156 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +01001157 struct hf_vm *vm = &hf_vms[i];
Andrew Scull01778112019-01-14 15:37:53 +00001158
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001159 for (j = 0; j < vm->vcpu_count; j++)
1160 wake_up_process(vm->vcpu[j].task);
1161 }
1162
1163 /* Dump vm/vcpu count info. */
Andrew Scullbb7ae412018-09-28 21:07:15 +01001164 pr_info("Hafnium successfully loaded with %u VMs:\n", hf_vm_count);
Andrew Scullb722f952018-09-27 15:39:10 +01001165 for (i = 0; i < hf_vm_count; i++) {
1166 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001167
Andrew Scullbb7ae412018-09-28 21:07:15 +01001168 pr_info("\tVM %u: %u vCPUS\n", vm->id, vm->vcpu_count);
Andrew Scullb722f952018-09-27 15:39:10 +01001169 }
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001170
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001171 return 0;
Andrew Scull82257c42018-10-01 10:37:48 +01001172
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +00001173fail_unregister_socket:
1174 sock_unregister(PF_HF);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001175fail_unregister_proto:
1176 proto_unregister(&hf_sock_proto);
Andrew Scull82257c42018-10-01 10:37:48 +01001177fail_with_cleanup:
1178 hf_free_resources();
1179 return ret;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001180}
1181
1182/**
Andrew Scullbb7ae412018-09-28 21:07:15 +01001183 * Frees up all resources used by the Hafnium driver in preparation for
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001184 * unloading it.
1185 */
1186static void __exit hf_exit(void)
1187{
Andrew Scullbb7ae412018-09-28 21:07:15 +01001188 pr_info("Preparing to unload Hafnium\n");
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001189 sock_unregister(PF_HF);
1190 proto_unregister(&hf_sock_proto);
Andrew Scull82257c42018-10-01 10:37:48 +01001191 hf_free_resources();
Andrew Walbran8d55e502019-02-05 11:42:08 +00001192 platform_driver_unregister(&hf_int_driver);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001193 pr_info("Hafnium ready to unload\n");
1194}
1195
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001196MODULE_LICENSE("GPL v2");
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001197
1198module_init(hf_init);
1199module_exit(hf_exit);