blob: d37f2f6b463546b993f622a08114c34b99fccb4f [file] [log] [blame]
Andrew Scull01778112019-01-14 15:37:53 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Walbran13c3a0b2018-11-30 11:51:53 +00002/*
Andrew Walbran2bc0a322019-03-07 15:48:06 +00003 * Copyright 2018 The Hafnium Authors.
Andrew Walbran13c3a0b2018-11-30 11:51:53 +00004 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Andrew Walbran13c3a0b2018-11-30 11:51:53 +000013 */
14
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +000015#include <clocksource/arm_arch_timer.h>
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000016#include <linux/atomic.h>
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +000017#include <linux/cpuhotplug.h>
18#include <linux/hrtimer.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010019#include <linux/init.h>
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +000020#include <linux/interrupt.h>
21#include <linux/irq.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010022#include <linux/kernel.h>
23#include <linux/kthread.h>
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +010024#include <linux/mm.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010025#include <linux/module.h>
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +000026#include <linux/net.h>
27#include <linux/of.h>
28#include <linux/platform_device.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010029#include <linux/sched/task.h>
30#include <linux/slab.h>
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000031#include <net/sock.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010032
Andrew Scull55704232018-08-10 17:19:54 +010033#include <hf/call.h>
Andrew Walbran196ed0e2020-04-30 11:32:29 +010034#include <hf/ffa.h>
Fuad Tabba3e669bc2019-08-08 16:43:55 +010035#include <hf/transport.h>
Andrew Scull55704232018-08-10 17:19:54 +010036
Fuad Tabba3e669bc2019-08-08 16:43:55 +010037#include "uapi/hf/socket.h"
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000038
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +000039#define HYPERVISOR_TIMER_NAME "el2_timer"
40
Andrew Scull82257c42018-10-01 10:37:48 +010041#define CONFIG_HAFNIUM_MAX_VMS 16
42#define CONFIG_HAFNIUM_MAX_VCPUS 32
43
Olivier Deprezf441ed02020-08-11 15:50:23 +020044#define HF_VM_ID_BASE 0
Andrew Walbrana6974312020-10-29 17:00:09 +000045#define PRIMARY_VM_ID HF_VM_ID_OFFSET
Fuad Tabba5da4b6b2019-08-05 13:56:20 +010046#define FIRST_SECONDARY_VM_ID (HF_VM_ID_OFFSET + 1)
Wedson Almeida Filhoec841932019-01-22 23:07:50 +000047
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010048struct hf_vcpu {
Andrew Scullb722f952018-09-27 15:39:10 +010049 struct hf_vm *vm;
Andrew Walbran196ed0e2020-04-30 11:32:29 +010050 ffa_vcpu_index_t vcpu_index;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010051 struct task_struct *task;
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +000052 atomic_t abort_sleep;
Andrew Scull71f57362019-02-05 16:11:35 +000053 atomic_t waiting_for_message;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010054 struct hrtimer timer;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010055};
56
57struct hf_vm {
Andrew Walbran196ed0e2020-04-30 11:32:29 +010058 ffa_vm_id_t id;
59 ffa_vcpu_count_t vcpu_count;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010060 struct hf_vcpu *vcpu;
61};
62
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000063struct hf_sock {
64 /* This needs to be the first field. */
65 struct sock sk;
66
67 /*
68 * The following fields are immutable after the socket transitions to
69 * SS_CONNECTED state.
70 */
71 uint64_t local_port;
72 uint64_t remote_port;
73 struct hf_vm *peer_vm;
74};
75
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000076static struct proto hf_sock_proto = {
77 .name = "hafnium",
78 .owner = THIS_MODULE,
79 .obj_size = sizeof(struct hf_sock),
80};
81
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010082static struct hf_vm *hf_vms;
Andrew Walbran196ed0e2020-04-30 11:32:29 +010083static ffa_vm_count_t hf_vm_count;
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000084static struct page *hf_send_page;
85static struct page *hf_recv_page;
86static atomic64_t hf_next_port = ATOMIC64_INIT(0);
87static DEFINE_SPINLOCK(hf_send_lock);
88static DEFINE_HASHTABLE(hf_local_port_hash, 7);
89static DEFINE_SPINLOCK(hf_local_port_hash_lock);
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +000090static int hf_irq;
Andrew Walbran8d55e502019-02-05 11:42:08 +000091static enum cpuhp_state hf_cpuhp_state;
Andrew Walbran196ed0e2020-04-30 11:32:29 +010092static ffa_vm_id_t current_vm_id;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010093
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010094/**
Wedson Almeida Filhoec841932019-01-22 23:07:50 +000095 * Retrieves a VM from its ID, returning NULL if the VM doesn't exist.
96 */
Andrew Walbran196ed0e2020-04-30 11:32:29 +010097static struct hf_vm *hf_vm_from_id(ffa_vm_id_t vm_id)
Wedson Almeida Filhoec841932019-01-22 23:07:50 +000098{
99 if (vm_id < FIRST_SECONDARY_VM_ID ||
100 vm_id >= FIRST_SECONDARY_VM_ID + hf_vm_count)
101 return NULL;
102
103 return &hf_vms[vm_id - FIRST_SECONDARY_VM_ID];
104}
105
106/**
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000107 * Wakes up the kernel thread responsible for running the given vcpu.
108 *
109 * Returns 0 if the thread was already running, 1 otherwise.
110 */
111static int hf_vcpu_wake_up(struct hf_vcpu *vcpu)
112{
113 /* Set a flag indicating that the thread should not go to sleep. */
114 atomic_set(&vcpu->abort_sleep, 1);
115
116 /* Set the thread to running state. */
117 return wake_up_process(vcpu->task);
118}
119
120/**
121 * Puts the current thread to sleep. The current thread must be responsible for
122 * running the given vcpu.
123 *
124 * Going to sleep will fail if hf_vcpu_wake_up() or kthread_stop() was called on
125 * this vcpu/thread since the last time it [re]started running.
126 */
127static void hf_vcpu_sleep(struct hf_vcpu *vcpu)
128{
129 int abort;
130
131 set_current_state(TASK_INTERRUPTIBLE);
132
133 /* Check the sleep-abort flag after making thread interruptible. */
134 abort = atomic_read(&vcpu->abort_sleep);
135 if (!abort && !kthread_should_stop())
136 schedule();
137
138 /* Set state back to running on the way out. */
139 set_current_state(TASK_RUNNING);
140}
141
142/**
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100143 * Wakes up the thread associated with the vcpu that owns the given timer. This
144 * is called when the timer the thread is waiting on expires.
145 */
146static enum hrtimer_restart hf_vcpu_timer_expired(struct hrtimer *timer)
147{
148 struct hf_vcpu *vcpu = container_of(timer, struct hf_vcpu, timer);
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000149 /* TODO: Inject interrupt. */
150 hf_vcpu_wake_up(vcpu);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100151 return HRTIMER_NORESTART;
152}
153
154/**
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000155 * This function is called when Hafnium requests that the primary VM wake up a
156 * vCPU that belongs to a secondary VM.
157 *
158 * It wakes up the thread if it's sleeping, or kicks it if it's already running.
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000159 */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100160static void hf_handle_wake_up_request(ffa_vm_id_t vm_id,
161 ffa_vcpu_index_t vcpu)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000162{
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000163 struct hf_vm *vm = hf_vm_from_id(vm_id);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000164
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000165 if (!vm) {
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000166 pr_warn("Request to wake up non-existent VM id: %u\n", vm_id);
167 return;
168 }
169
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000170 if (vcpu >= vm->vcpu_count) {
Andrew Scull71f57362019-02-05 16:11:35 +0000171 pr_warn("Request to wake up non-existent vCPU: %u.%u\n",
172 vm_id, vcpu);
173 return;
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000174 }
175
176 if (hf_vcpu_wake_up(&vm->vcpu[vcpu]) == 0) {
177 /*
178 * The task was already running (presumably on a different
179 * physical CPU); interrupt it. This gives Hafnium a chance to
180 * inject any new interrupts.
181 */
182 kick_process(vm->vcpu[vcpu].task);
183 }
184}
185
186/**
Andrew Scull71f57362019-02-05 16:11:35 +0000187 * Injects an interrupt into a vCPU of the VM and ensures the vCPU will run to
188 * handle the interrupt.
189 */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100190static void hf_interrupt_vm(ffa_vm_id_t vm_id, uint64_t int_id)
Andrew Scull71f57362019-02-05 16:11:35 +0000191{
192 struct hf_vm *vm = hf_vm_from_id(vm_id);
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100193 ffa_vcpu_index_t vcpu;
Andrew Scull71f57362019-02-05 16:11:35 +0000194 int64_t ret;
195
196 if (!vm) {
197 pr_warn("Request to wake up non-existent VM id: %u\n", vm_id);
198 return;
199 }
200
201 /*
202 * TODO: For now we're picking the first vcpu to interrupt, but
203 * we want to be smarter.
204 */
205 vcpu = 0;
206 ret = hf_interrupt_inject(vm_id, vcpu, int_id);
207
208 if (ret == -1) {
209 pr_warn("Failed to inject interrupt %lld to vCPU %d of VM %d",
210 int_id, vcpu, vm_id);
211 return;
212 }
213
214 if (ret != 1) {
215 /* We don't need to wake up the vcpu. */
216 return;
217 }
218
219 hf_handle_wake_up_request(vm_id, vcpu);
220}
221
222/**
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000223 * Notify all waiters on the given VM.
224 */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100225static void hf_notify_waiters(ffa_vm_id_t vm_id)
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000226{
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100227 ffa_vm_id_t waiter_vm_id;
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000228
Andrew Scull71f57362019-02-05 16:11:35 +0000229 while ((waiter_vm_id = hf_mailbox_waiter_get(vm_id)) != -1) {
Andrew Walbrana6974312020-10-29 17:00:09 +0000230 if (waiter_vm_id == PRIMARY_VM_ID) {
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000231 /*
232 * TODO: Use this information when implementing per-vm
233 * queues.
234 */
235 } else {
Andrew Scull71f57362019-02-05 16:11:35 +0000236 hf_interrupt_vm(waiter_vm_id,
237 HF_MAILBOX_WRITABLE_INTID);
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000238 }
239 }
240}
241
242/**
Andrew Scull71f57362019-02-05 16:11:35 +0000243 * Delivers a message to a VM.
244 */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100245static void hf_deliver_message(ffa_vm_id_t vm_id)
Andrew Scull71f57362019-02-05 16:11:35 +0000246{
247 struct hf_vm *vm = hf_vm_from_id(vm_id);
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100248 ffa_vcpu_index_t i;
Andrew Scull71f57362019-02-05 16:11:35 +0000249
250 if (!vm) {
251 pr_warn("Tried to deliver message to non-existent VM id: %u\n",
252 vm_id);
253 return;
254 }
255
256 /* Try to wake a vCPU that is waiting for a message. */
257 for (i = 0; i < vm->vcpu_count; i++) {
258 if (atomic_read(&vm->vcpu[i].waiting_for_message)) {
259 hf_handle_wake_up_request(vm->id,
260 vm->vcpu[i].vcpu_index);
261 return;
262 }
263 }
264
265 /* None were waiting for a message so interrupt one. */
266 hf_interrupt_vm(vm->id, HF_MAILBOX_READABLE_INTID);
267}
268
269/**
Andrew Sculldf6478f2019-02-19 17:52:08 +0000270 * Handles a message delivered to this VM by validating that it's well-formed
271 * and then queueing it for delivery to the appropriate socket.
272 */
Andrew Walbranb331fa92019-10-03 16:48:07 +0100273static void hf_handle_message(struct hf_vm *sender, size_t len,
Andrew Walbrancafe0172019-10-07 14:14:05 +0100274 const void *message)
Andrew Sculldf6478f2019-02-19 17:52:08 +0000275{
276 struct hf_sock *hsock;
Andrew Walbrancafe0172019-10-07 14:14:05 +0100277 const struct hf_msg_hdr *hdr = (struct hf_msg_hdr *)message;
Andrew Sculldf6478f2019-02-19 17:52:08 +0000278 struct sk_buff *skb;
279 int err;
280
281 /* Ignore messages that are too small to hold a header. */
Marc Bonnici39fdefd2019-11-08 15:05:07 +0000282 if (len < sizeof(struct hf_msg_hdr)) {
283 pr_err("Message received without header of length %d\n", len);
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100284 ffa_rx_release();
Andrew Sculldf6478f2019-02-19 17:52:08 +0000285 return;
Marc Bonnici39fdefd2019-11-08 15:05:07 +0000286 }
Andrew Sculldf6478f2019-02-19 17:52:08 +0000287
288 len -= sizeof(struct hf_msg_hdr);
289
290 /* Go through the colliding sockets. */
291 rcu_read_lock();
292 hash_for_each_possible_rcu(hf_local_port_hash, hsock, sk.sk_node,
293 hdr->dst_port) {
294 if (hsock->peer_vm == sender &&
295 hsock->remote_port == hdr->src_port) {
296 sock_hold(&hsock->sk);
297 break;
298 }
299 }
300 rcu_read_unlock();
301
302 /* Nothing to do if we couldn't find the target. */
Marc Bonnici39fdefd2019-11-08 15:05:07 +0000303 if (!hsock) {
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100304 ffa_rx_release();
Andrew Sculldf6478f2019-02-19 17:52:08 +0000305 return;
Marc Bonnici39fdefd2019-11-08 15:05:07 +0000306 }
Andrew Sculldf6478f2019-02-19 17:52:08 +0000307
308 /*
309 * TODO: From this point on, there are two failure paths: when we
310 * create the skb below, and when we enqueue it to the socket. What
311 * should we do if they fail? Ideally we would have some form of flow
312 * control to prevent message loss, but how to do it efficiently?
313 *
314 * One option is to have a pre-allocated message that indicates to the
315 * sender that a message was dropped. This way we guarantee that the
316 * sender will be aware of loss and should back-off.
317 */
318 /* Create the skb. */
319 skb = alloc_skb(len, GFP_KERNEL);
320 if (!skb)
321 goto exit;
322
323 memcpy(skb_put(skb, len), hdr + 1, len);
324
325 /*
326 * Add the skb to the receive queue of the target socket. On success it
327 * calls sk->sk_data_ready, which is currently set to sock_def_readable,
328 * which wakes up any waiters.
329 */
330 err = sock_queue_rcv_skb(&hsock->sk, skb);
331 if (err)
332 kfree_skb(skb);
333
334exit:
335 sock_put(&hsock->sk);
Andrew Scull71f57362019-02-05 16:11:35 +0000336
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100337 if (ffa_rx_release().func == FFA_RX_RELEASE_32)
Andrew Walbrana6974312020-10-29 17:00:09 +0000338 hf_notify_waiters(PRIMARY_VM_ID);
Andrew Sculldf6478f2019-02-19 17:52:08 +0000339}
340
341/**
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100342 * This is the main loop of each vcpu.
343 */
344static int hf_vcpu_thread(void *data)
345{
346 struct hf_vcpu *vcpu = data;
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100347 struct ffa_value ret;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100348
349 hrtimer_init(&vcpu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
350 vcpu->timer.function = &hf_vcpu_timer_expired;
351
352 while (!kthread_should_stop()) {
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100353 ffa_vcpu_index_t i;
Andrew Scull01f83de2019-01-23 13:41:47 +0000354
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000355 /*
356 * We're about to run the vcpu, so we can reset the abort-sleep
357 * flag.
358 */
359 atomic_set(&vcpu->abort_sleep, 0);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100360
Andrew Scullbb7ae412018-09-28 21:07:15 +0100361 /* Call into Hafnium to run vcpu. */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100362 ret = ffa_run(vcpu->vm->id, vcpu->vcpu_index);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100363
Andrew Walbran39bf7892019-11-01 14:14:47 +0000364 switch (ret.func) {
Andrew Sculle05702e2019-01-08 14:46:46 +0000365 /* Preempted. */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100366 case FFA_INTERRUPT_32:
Andrew Sculle05702e2019-01-08 14:46:46 +0000367 if (need_resched())
368 schedule();
369 break;
370
371 /* Yield. */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100372 case FFA_YIELD_32:
Andrew Sculle05702e2019-01-08 14:46:46 +0000373 if (!kthread_should_stop())
374 schedule();
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100375 break;
376
Andrew Scull01778112019-01-14 15:37:53 +0000377 /* WFI. */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100378 case HF_FFA_RUN_WAIT_FOR_INTERRUPT:
379 if (ret.arg2 != FFA_SLEEP_INDEFINITE) {
Andrew Walbran39bf7892019-11-01 14:14:47 +0000380 hrtimer_start(&vcpu->timer, ret.arg2,
Andrew Scull71f57362019-02-05 16:11:35 +0000381 HRTIMER_MODE_REL);
382 }
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000383 hf_vcpu_sleep(vcpu);
Andrew Scull71f57362019-02-05 16:11:35 +0000384 hrtimer_cancel(&vcpu->timer);
385 break;
386
387 /* Waiting for a message. */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100388 case FFA_MSG_WAIT_32:
Andrew Scull71f57362019-02-05 16:11:35 +0000389 atomic_set(&vcpu->waiting_for_message, 1);
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100390 if (ret.arg2 != FFA_SLEEP_INDEFINITE) {
Andrew Walbran39bf7892019-11-01 14:14:47 +0000391 hrtimer_start(&vcpu->timer, ret.arg2,
Andrew Scull71f57362019-02-05 16:11:35 +0000392 HRTIMER_MODE_REL);
393 }
394 hf_vcpu_sleep(vcpu);
395 hrtimer_cancel(&vcpu->timer);
396 atomic_set(&vcpu->waiting_for_message, 0);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100397 break;
398
Andrew Scullb3a61b52018-09-17 14:30:34 +0100399 /* Wake up another vcpu. */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100400 case HF_FFA_RUN_WAKE_UP:
401 hf_handle_wake_up_request(ffa_vm_id(ret),
402 ffa_vcpu_index(ret));
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100403 break;
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100404
Andrew Scullb3a61b52018-09-17 14:30:34 +0100405 /* Response available. */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100406 case FFA_MSG_SEND_32:
Andrew Walbrana6974312020-10-29 17:00:09 +0000407 if (ffa_msg_send_receiver(ret) == PRIMARY_VM_ID) {
Andrew Walbran39bf7892019-11-01 14:14:47 +0000408 hf_handle_message(vcpu->vm,
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100409 ffa_msg_send_size(ret),
Andrew Scull94704232019-04-01 12:36:37 +0100410 page_address(hf_recv_page));
Andrew Scull71f57362019-02-05 16:11:35 +0000411 } else {
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100412 hf_deliver_message(ffa_msg_send_receiver(ret));
Andrew Scull71f57362019-02-05 16:11:35 +0000413 }
Andrew Sculldc8cab52018-10-10 18:29:39 +0100414 break;
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000415
416 /* Notify all waiters. */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100417 case FFA_RX_RELEASE_32:
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000418 hf_notify_waiters(vcpu->vm->id);
419 break;
Andrew Scull01f83de2019-01-23 13:41:47 +0000420
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100421 case FFA_ERROR_32:
422 pr_warn("FF-A error %d running VM %d vCPU %d", ret.arg2,
Andrew Walbran18f08a62019-11-13 11:57:52 +0000423 vcpu->vm->id, vcpu->vcpu_index);
Andrew Walbran39bf7892019-11-01 14:14:47 +0000424 switch (ret.arg2) {
Andrew Walbran9abce272019-11-27 18:41:05 +0000425 /* Abort was triggered. */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100426 case FFA_ABORTED:
Andrew Walbran39bf7892019-11-01 14:14:47 +0000427 for (i = 0; i < vcpu->vm->vcpu_count; i++) {
428 if (i == vcpu->vcpu_index)
429 continue;
430 hf_handle_wake_up_request(vcpu->vm->id,
431 i);
432 }
433 hf_vcpu_sleep(vcpu);
434 break;
Andrew Walbran9abce272019-11-27 18:41:05 +0000435 default:
436 /* Treat as a yield and try again later. */
437 if (!kthread_should_stop())
438 schedule();
439 break;
Andrew Scull01f83de2019-01-23 13:41:47 +0000440 }
Andrew Scull01f83de2019-01-23 13:41:47 +0000441 break;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100442 }
443 }
444
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100445 return 0;
446}
447
448/**
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000449 * Converts a pointer to a struct sock into a pointer to a struct hf_sock. It
450 * relies on the fact that the first field of hf_sock is a sock.
451 */
452static struct hf_sock *hsock_from_sk(struct sock *sk)
453{
454 return (struct hf_sock *)sk;
455}
456
457/**
458 * This is called when the last reference to the outer socket is released. For
459 * example, if it's a user-space socket, when the last file descriptor pointing
460 * to this socket is closed.
461 *
462 * It begins cleaning up resources, though some can only be cleaned up after all
463 * references to the underlying socket are released, which is handled by
464 * hf_sock_destruct().
465 */
466static int hf_sock_release(struct socket *sock)
467{
468 struct sock *sk = sock->sk;
469 struct hf_sock *hsock = hsock_from_sk(sk);
470 unsigned long flags;
471
472 if (!sk)
473 return 0;
474
475 /* Shutdown for both send and receive. */
476 lock_sock(sk);
477 sk->sk_shutdown |= RCV_SHUTDOWN | SEND_SHUTDOWN;
478 sk->sk_state_change(sk);
479 release_sock(sk);
480
481 /* Remove from the hash table, so lookups from now on won't find it. */
482 spin_lock_irqsave(&hf_local_port_hash_lock, flags);
483 hash_del_rcu(&hsock->sk.sk_node);
484 spin_unlock_irqrestore(&hf_local_port_hash_lock, flags);
485
486 /*
487 * TODO: When we implement a tx queue, we need to clear it here so that
488 * sk_wmem_alloc will not prevent sk from being freed (sk_free).
489 */
490
491 /*
492 * Wait for in-flight lookups to finish. We need to do this here because
Wedson Almeida Filho89d0e472019-01-03 19:18:39 +0000493 * in-flight lookups rely on the reference to the socket we're about to
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000494 * release.
495 */
496 synchronize_rcu();
497 sock_put(sk);
498 sock->sk = NULL;
499
500 return 0;
501}
502
503/**
504 * This is called when there are no more references to the socket. It frees all
505 * resources that haven't been freed during release.
506 */
507static void hf_sock_destruct(struct sock *sk)
508{
509 /*
510 * Clear the receive queue now that the handler cannot add any more
511 * skbs to it.
512 */
513 skb_queue_purge(&sk->sk_receive_queue);
514}
515
516/**
517 * Connects the Hafnium socket to the provided VM and port. After the socket is
518 * connected, it can be used to exchange datagrams with the specified peer.
519 */
Andrew Scull01778112019-01-14 15:37:53 +0000520static int hf_sock_connect(struct socket *sock, struct sockaddr *saddr, int len,
521 int connect_flags)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000522{
523 struct sock *sk = sock->sk;
524 struct hf_sock *hsock = hsock_from_sk(sk);
525 struct hf_vm *vm;
Fuad Tabba3e669bc2019-08-08 16:43:55 +0100526 struct hf_sockaddr *addr;
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000527 int err;
528 unsigned long flags;
529
530 /* Basic address validation. */
Fuad Tabba3e669bc2019-08-08 16:43:55 +0100531 if (len < sizeof(struct hf_sockaddr) || saddr->sa_family != AF_HF)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000532 return -EINVAL;
533
Fuad Tabba3e669bc2019-08-08 16:43:55 +0100534 addr = (struct hf_sockaddr *)saddr;
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000535 vm = hf_vm_from_id(addr->vm_id);
536 if (!vm)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000537 return -ENETUNREACH;
538
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000539 /*
540 * TODO: Once we implement access control in Hafnium, check that the
541 * caller is allowed to contact the specified VM. Return -ECONNREFUSED
542 * if access is denied.
543 */
544
545 /* Take lock to make sure state doesn't change as we connect. */
546 lock_sock(sk);
547
548 /* Only unconnected sockets are allowed to become connected. */
549 if (sock->state != SS_UNCONNECTED) {
550 err = -EISCONN;
551 goto exit;
552 }
553
554 hsock->local_port = atomic64_inc_return(&hf_next_port);
555 hsock->remote_port = addr->port;
556 hsock->peer_vm = vm;
557
558 sock->state = SS_CONNECTED;
559
560 /* Add socket to hash table now that it's fully initialised. */
561 spin_lock_irqsave(&hf_local_port_hash_lock, flags);
562 hash_add_rcu(hf_local_port_hash, &sk->sk_node, hsock->local_port);
563 spin_unlock_irqrestore(&hf_local_port_hash_lock, flags);
564
565 err = 0;
566exit:
567 release_sock(sk);
568 return err;
569}
570
571/**
572 * Sends the given skb to the appropriate VM by calling Hafnium. It will also
573 * trigger the wake up of a recipient VM.
574 *
575 * Takes ownership of the skb on success.
576 */
577static int hf_send_skb(struct sk_buff *skb)
578{
579 unsigned long flags;
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100580 struct ffa_value ret;
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000581 struct hf_sock *hsock = hsock_from_sk(skb->sk);
582 struct hf_vm *vm = hsock->peer_vm;
Andrew Walbrancafe0172019-10-07 14:14:05 +0100583 void *message = page_address(hf_send_page);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000584
585 /*
586 * Call Hafnium under the send lock so that we serialize the use of the
587 * global send buffer.
588 */
589 spin_lock_irqsave(&hf_send_lock, flags);
Andrew Walbrancafe0172019-10-07 14:14:05 +0100590 memcpy(message, skb->data, skb->len);
Jose Marinho1cc6c752019-03-11 16:28:03 +0000591
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100592 ret = ffa_msg_send(current_vm_id, vm->id, skb->len, 0);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000593 spin_unlock_irqrestore(&hf_send_lock, flags);
594
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100595 if (ret.func == FFA_ERROR_32) {
Andrew Walbranb040b302019-10-10 13:50:06 +0100596 switch (ret.arg2) {
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100597 case FFA_INVALID_PARAMETERS:
Andrew Walbrancafe0172019-10-07 14:14:05 +0100598 return -ENXIO;
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100599 case FFA_NOT_SUPPORTED:
Andrew Walbrancafe0172019-10-07 14:14:05 +0100600 return -EIO;
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100601 case FFA_DENIED:
602 case FFA_BUSY:
Andrew Walbrancafe0172019-10-07 14:14:05 +0100603 default:
604 return -EAGAIN;
605 }
606 }
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000607
Andrew Scull71f57362019-02-05 16:11:35 +0000608 /* Ensure the VM will run to pick up the message. */
609 hf_deliver_message(vm->id);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000610
611 kfree_skb(skb);
612
613 return 0;
614}
615
616/**
617 * Determines if the given socket is in the connected state. It acquires and
618 * releases the socket lock.
619 */
620static bool hf_sock_is_connected(struct socket *sock)
621{
622 bool ret;
623
624 lock_sock(sock->sk);
625 ret = sock->state == SS_CONNECTED;
626 release_sock(sock->sk);
627
628 return ret;
629}
630
631/**
632 * Sends a message to the VM & port the socket is connected to. All variants
633 * of write/send/sendto/sendmsg eventually call this function.
634 */
635static int hf_sock_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
636{
637 struct sock *sk = sock->sk;
638 struct sk_buff *skb;
639 int err;
640 struct hf_msg_hdr *hdr;
641 struct hf_sock *hsock = hsock_from_sk(sk);
Andrew Walbrancafe0172019-10-07 14:14:05 +0100642 size_t payload_max_len = HF_MAILBOX_SIZE - sizeof(struct hf_msg_hdr);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000643
644 /* Check length. */
Andrew Scull614ed7f2019-04-01 12:12:38 +0100645 if (len > payload_max_len)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000646 return -EMSGSIZE;
647
648 /* We don't allow the destination address to be specified. */
649 if (m->msg_namelen > 0)
650 return -EISCONN;
651
652 /* We don't support out of band messages. */
653 if (m->msg_flags & MSG_OOB)
654 return -EOPNOTSUPP;
655
656 /*
657 * Ensure that the socket is connected. We don't need to hold the socket
658 * lock (acquired and released by hf_sock_is_connected) for the
659 * remainder of the function because the fields we care about are
660 * immutable once the state is SS_CONNECTED.
661 */
662 if (!hf_sock_is_connected(sock))
663 return -ENOTCONN;
664
665 /*
666 * Allocate an skb for this write. If there isn't enough room in the
667 * socket's send buffer (sk_wmem_alloc >= sk_sndbuf), this will block
668 * (if it's a blocking call). On success, it increments sk_wmem_alloc
669 * and sets up the skb such that sk_wmem_alloc gets decremented when
670 * the skb is freed (sock_wfree gets called).
671 */
672 skb = sock_alloc_send_skb(sk, len + sizeof(struct hf_msg_hdr),
673 m->msg_flags & MSG_DONTWAIT, &err);
674 if (!skb)
675 return err;
676
677 /* Reserve room for the header and initialise it. */
678 skb_reserve(skb, sizeof(struct hf_msg_hdr));
679 hdr = skb_push(skb, sizeof(struct hf_msg_hdr));
680 hdr->src_port = hsock->local_port;
681 hdr->dst_port = hsock->remote_port;
682
683 /* Allocate area for the contents, then copy into skb. */
684 if (!copy_from_iter_full(skb_put(skb, len), len, &m->msg_iter)) {
685 err = -EFAULT;
686 goto err_cleanup;
687 }
688
689 /*
690 * TODO: We currently do this inline, but when we have support for
691 * readiness notification from Hafnium, we must add this to a per-VM tx
692 * queue that can make progress when the VM becomes writable. This will
693 * fix send buffering and poll readiness notification.
694 */
695 err = hf_send_skb(skb);
696 if (err)
697 goto err_cleanup;
698
699 return 0;
700
701err_cleanup:
702 kfree_skb(skb);
703 return err;
704}
705
706/**
707 * Receives a message originated from the VM & port the socket is connected to.
708 * All variants of read/recv/recvfrom/recvmsg eventually call this function.
709 */
710static int hf_sock_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
711 int flags)
712{
713 struct sock *sk = sock->sk;
714 struct sk_buff *skb;
715 int err;
716 size_t copy_len;
717
718 if (!hf_sock_is_connected(sock))
719 return -ENOTCONN;
720
721 /* Grab the next skb from the receive queue. */
722 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
723 if (!skb)
724 return err;
725
726 /* Make sure we don't copy more than what fits in the output buffer. */
727 copy_len = skb->len;
728 if (copy_len > len) {
729 copy_len = len;
730 m->msg_flags |= MSG_TRUNC;
731 }
732
733 /* Make sure we don't overflow the return value type. */
734 if (copy_len > INT_MAX) {
735 copy_len = INT_MAX;
736 m->msg_flags |= MSG_TRUNC;
737 }
738
739 /* Copy skb to output iterator, then free it. */
740 err = skb_copy_datagram_msg(skb, 0, m, copy_len);
741 skb_free_datagram(sk, skb);
742 if (err)
743 return err;
744
745 return copy_len;
746}
747
748/**
749 * This function is called when a Hafnium socket is created. It initialises all
750 * state such that the caller will be able to connect the socket and then send
751 * and receive messages through it.
752 */
753static int hf_sock_create(struct net *net, struct socket *sock, int protocol,
Andrew Scull01778112019-01-14 15:37:53 +0000754 int kern)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000755{
756 static const struct proto_ops ops = {
757 .family = PF_HF,
758 .owner = THIS_MODULE,
759 .release = hf_sock_release,
760 .bind = sock_no_bind,
761 .connect = hf_sock_connect,
762 .socketpair = sock_no_socketpair,
763 .accept = sock_no_accept,
764 .ioctl = sock_no_ioctl,
765 .listen = sock_no_listen,
766 .shutdown = sock_no_shutdown,
767 .setsockopt = sock_no_setsockopt,
768 .getsockopt = sock_no_getsockopt,
769 .sendmsg = hf_sock_sendmsg,
770 .recvmsg = hf_sock_recvmsg,
771 .mmap = sock_no_mmap,
772 .sendpage = sock_no_sendpage,
773 .poll = datagram_poll,
774 };
775 struct sock *sk;
776
777 if (sock->type != SOCK_DGRAM)
778 return -ESOCKTNOSUPPORT;
779
780 if (protocol != 0)
781 return -EPROTONOSUPPORT;
782
783 /*
784 * For now we only allow callers with sys admin capability to create
785 * Hafnium sockets.
786 */
787 if (!capable(CAP_SYS_ADMIN))
788 return -EPERM;
789
790 /* Allocate and initialise socket. */
791 sk = sk_alloc(net, PF_HF, GFP_KERNEL, &hf_sock_proto, kern);
792 if (!sk)
793 return -ENOMEM;
794
795 sock_init_data(sock, sk);
796
797 sk->sk_destruct = hf_sock_destruct;
798 sock->ops = &ops;
799 sock->state = SS_UNCONNECTED;
800
801 return 0;
802}
803
804/**
Andrew Scullbb7ae412018-09-28 21:07:15 +0100805 * Frees all resources, including threads, associated with the Hafnium driver.
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100806 */
Andrew Scull82257c42018-10-01 10:37:48 +0100807static void hf_free_resources(void)
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100808{
Fuad Tabba5da4b6b2019-08-05 13:56:20 +0100809 uint16_t i;
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100810 ffa_vcpu_index_t j;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100811
812 /*
813 * First stop all worker threads. We need to do this before freeing
814 * resources because workers may reference each other, so it is only
815 * safe to free resources after they have all stopped.
816 */
Andrew Scull82257c42018-10-01 10:37:48 +0100817 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100818 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000819
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100820 for (j = 0; j < vm->vcpu_count; j++)
821 kthread_stop(vm->vcpu[j].task);
822 }
823
824 /* Free resources. */
Andrew Scull82257c42018-10-01 10:37:48 +0100825 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100826 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000827
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100828 for (j = 0; j < vm->vcpu_count; j++)
829 put_task_struct(vm->vcpu[j].task);
830 kfree(vm->vcpu);
831 }
832
833 kfree(hf_vms);
Andrew Walbran81cf04c2020-08-06 16:00:43 +0100834
835 ffa_rx_release();
836 if (hf_send_page) {
837 __free_page(hf_send_page);
838 hf_send_page = NULL;
839 }
840 if (hf_recv_page) {
841 __free_page(hf_recv_page);
842 hf_recv_page = NULL;
843 }
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100844}
845
Andrew Scullbb7ae412018-09-28 21:07:15 +0100846/**
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +0000847 * Handles the hypervisor timer interrupt.
848 */
849static irqreturn_t hf_nop_irq_handler(int irq, void *dev)
850{
851 /*
852 * No need to do anything, the interrupt only exists to return to the
853 * primary vCPU so that the virtual timer will be restored and fire as
854 * normal.
855 */
856 return IRQ_HANDLED;
857}
858
859/**
860 * Enables the hypervisor timer interrupt on a CPU, when it starts or after the
861 * driver is first loaded.
862 */
863static int hf_starting_cpu(unsigned int cpu)
864{
865 if (hf_irq != 0) {
866 /* Enable the interrupt, and set it to be edge-triggered. */
867 enable_percpu_irq(hf_irq, IRQ_TYPE_EDGE_RISING);
868 }
Andrew Walbran8d55e502019-02-05 11:42:08 +0000869
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +0000870 return 0;
871}
872
873/**
874 * Disables the hypervisor timer interrupt on a CPU when it is powered down.
875 */
876static int hf_dying_cpu(unsigned int cpu)
877{
878 if (hf_irq != 0) {
879 /* Disable the interrupt while the CPU is asleep. */
880 disable_percpu_irq(hf_irq);
881 }
882
883 return 0;
884}
885
886/**
887 * Registers for the hypervisor timer interrupt.
888 */
889static int hf_int_driver_probe(struct platform_device *pdev)
890{
891 int irq;
892 int ret;
893
894 /*
895 * Register a handler for the hyperviser timer IRQ, as it is needed for
896 * Hafnium to emulate the virtual timer for Linux while a secondary vCPU
897 * is running.
898 */
899 irq = platform_get_irq(pdev, ARCH_TIMER_HYP_PPI);
900 if (irq < 0) {
901 pr_err("Error getting hypervisor timer IRQ: %d\n", irq);
902 return irq;
903 }
904 hf_irq = irq;
905
906 ret = request_percpu_irq(irq, hf_nop_irq_handler, HYPERVISOR_TIMER_NAME,
907 pdev);
908 if (ret != 0) {
909 pr_err("Error registering hypervisor timer IRQ %d: %d\n",
910 irq, ret);
911 return ret;
912 }
913 pr_info("Hafnium registered for IRQ %d\n", irq);
914 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
915 "hafnium/hypervisor_timer:starting",
916 hf_starting_cpu, hf_dying_cpu);
917 if (ret < 0) {
918 pr_err("Error enabling timer on all CPUs: %d\n", ret);
Andrew Walbran8d55e502019-02-05 11:42:08 +0000919 free_percpu_irq(irq, pdev);
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +0000920 return ret;
921 }
Andrew Walbran8d55e502019-02-05 11:42:08 +0000922 hf_cpuhp_state = ret;
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +0000923
924 return 0;
925}
926
927/**
928 * Unregisters for the hypervisor timer interrupt.
929 */
930static int hf_int_driver_remove(struct platform_device *pdev)
931{
Andrew Walbran8d55e502019-02-05 11:42:08 +0000932 /*
933 * This will cause hf_dying_cpu to be called on each CPU, which will
934 * disable the IRQs.
935 */
936 cpuhp_remove_state(hf_cpuhp_state);
937 free_percpu_irq(hf_irq, pdev);
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +0000938
939 return 0;
940}
941
942static const struct of_device_id hf_int_driver_id[] = {
943 {.compatible = "arm,armv7-timer"},
944 {.compatible = "arm,armv8-timer"},
945 {}
946};
947
948static struct platform_driver hf_int_driver = {
949 .driver = {
950 .name = HYPERVISOR_TIMER_NAME,
951 .owner = THIS_MODULE,
952 .of_match_table = of_match_ptr(hf_int_driver_id),
953 },
954 .probe = hf_int_driver_probe,
955 .remove = hf_int_driver_remove,
956};
957
958/**
Andrew Walbran6b796192020-08-06 16:01:59 +0100959 * Print the error code of the given FF-A value if it is an error, or the
960 * function ID otherwise.
961 */
962static void print_ffa_error(struct ffa_value ffa_ret)
963{
964 if (ffa_ret.func == FFA_ERROR_32)
965 pr_err("FF-A error code %d\n", ffa_ret.arg2);
966 else
967 pr_err("Unexpected FF-A function %#x\n", ffa_ret.func);
968}
969
970/**
Andrew Scullbb7ae412018-09-28 21:07:15 +0100971 * Initializes the Hafnium driver by creating a thread for each vCPU of each
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100972 * virtual machine.
973 */
974static int __init hf_init(void)
975{
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000976 static const struct net_proto_family proto_family = {
977 .family = PF_HF,
978 .create = hf_sock_create,
979 .owner = THIS_MODULE,
980 };
Andrew Scullbb7ae412018-09-28 21:07:15 +0100981 int64_t ret;
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100982 struct ffa_value ffa_ret;
983 ffa_vm_id_t i;
984 ffa_vcpu_index_t j;
Andrew Walbran6b796192020-08-06 16:01:59 +0100985 struct ffa_uuid null_uuid;
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100986 ffa_vm_count_t secondary_vm_count;
Andrew Walbran6b796192020-08-06 16:01:59 +0100987 const struct ffa_partition_info *partition_info;
Andrew Scull82257c42018-10-01 10:37:48 +0100988 uint32_t total_vcpu_count;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100989
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100990 /* Allocate a page for send and receive buffers. */
991 hf_send_page = alloc_page(GFP_KERNEL);
992 if (!hf_send_page) {
993 pr_err("Unable to allocate send buffer\n");
994 return -ENOMEM;
995 }
996
997 hf_recv_page = alloc_page(GFP_KERNEL);
998 if (!hf_recv_page) {
999 __free_page(hf_send_page);
Andrew Walbran81cf04c2020-08-06 16:00:43 +01001000 hf_send_page = NULL;
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +01001001 pr_err("Unable to allocate receive buffer\n");
1002 return -ENOMEM;
1003 }
1004
1005 /*
1006 * Configure both addresses. Once configured, we cannot free these pages
1007 * because the hypervisor will use them, even if the module is
1008 * unloaded.
1009 */
Andrew Walbran196ed0e2020-04-30 11:32:29 +01001010 ffa_ret = ffa_rxtx_map(page_to_phys(hf_send_page),
Andrew Walbran2c6e7512019-11-05 14:02:29 +00001011 page_to_phys(hf_recv_page));
Andrew Walbran196ed0e2020-04-30 11:32:29 +01001012 if (ffa_ret.func != FFA_SUCCESS_32) {
Andrew Walbran81cf04c2020-08-06 16:00:43 +01001013 pr_err("Unable to configure VM mailbox.\n");
Andrew Walbran6b796192020-08-06 16:01:59 +01001014 print_ffa_error(ffa_ret);
Andrew Walbran81cf04c2020-08-06 16:00:43 +01001015 ret = -EIO;
1016 goto fail_with_cleanup;
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +01001017 }
1018
Andrew Walbran6b796192020-08-06 16:01:59 +01001019 /* Get information about secondary VMs. */
1020 ffa_uuid_init(0, 0, 0, 0, &null_uuid);
1021 ffa_ret = ffa_partition_info_get(&null_uuid);
1022 if (ffa_ret.func != FFA_SUCCESS_32) {
1023 pr_err("Unable to get VM information.\n");
1024 print_ffa_error(ffa_ret);
1025 ret = -EIO;
1026 goto fail_with_cleanup;
1027 }
1028 secondary_vm_count = ffa_ret.arg2 - 1;
1029 partition_info = page_address(hf_recv_page);
Andrew Scull82257c42018-10-01 10:37:48 +01001030
1031 /* Confirm the maximum number of VMs looks sane. */
1032 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VMS < 1);
1033 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VMS > U16_MAX);
1034
1035 /* Validate the number of VMs. There must at least be the primary. */
Andrew Walbran4c96d0c2019-06-25 18:32:56 +01001036 if (secondary_vm_count > CONFIG_HAFNIUM_MAX_VMS - 1) {
Fuad Tabba8523ccd2019-07-31 15:37:29 +01001037 pr_err("Number of VMs is out of range: %d\n",
Andrew Walbran4c96d0c2019-06-25 18:32:56 +01001038 secondary_vm_count);
Andrew Walbran81cf04c2020-08-06 16:00:43 +01001039 ret = -EDQUOT;
1040 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001041 }
1042
Andrew Scullb722f952018-09-27 15:39:10 +01001043 /* Only track the secondary VMs. */
Andrew Walbran4c96d0c2019-06-25 18:32:56 +01001044 hf_vms = kmalloc_array(secondary_vm_count, sizeof(struct hf_vm),
1045 GFP_KERNEL);
Andrew Walbran81cf04c2020-08-06 16:00:43 +01001046 if (!hf_vms) {
1047 ret = -ENOMEM;
1048 goto fail_with_cleanup;
1049 }
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001050
Jose Marinho1cc6c752019-03-11 16:28:03 +00001051 /* Cache the VM id for later usage. */
1052 current_vm_id = hf_vm_get_id();
1053
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001054 /* Initialize each VM. */
Andrew Scull82257c42018-10-01 10:37:48 +01001055 total_vcpu_count = 0;
Andrew Walbran4c96d0c2019-06-25 18:32:56 +01001056 for (i = 0; i < secondary_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +01001057 struct hf_vm *vm = &hf_vms[i];
Andrew Walbran196ed0e2020-04-30 11:32:29 +01001058 ffa_vcpu_count_t vcpu_count;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001059
Andrew Walbran6b796192020-08-06 16:01:59 +01001060 /* Adjust the index as only the secondaries are tracked. */
1061 vm->id = partition_info[i + 1].vm_id;
1062 vcpu_count = partition_info[i + 1].vcpu_count;
Andrew Scull82257c42018-10-01 10:37:48 +01001063
1064 /* Avoid overflowing the vcpu count. */
Andrew Walbran3eeb1de2019-06-25 18:32:30 +01001065 if (vcpu_count > (U32_MAX - total_vcpu_count)) {
Andrew Scull82257c42018-10-01 10:37:48 +01001066 pr_err("Too many vcpus: %u\n", total_vcpu_count);
1067 ret = -EDQUOT;
1068 goto fail_with_cleanup;
1069 }
1070
1071 /* Confirm the maximum number of VCPUs looks sane. */
1072 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VCPUS < 1);
1073 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VCPUS > U16_MAX);
1074
1075 /* Enforce the limit on vcpus. */
Andrew Walbran3eeb1de2019-06-25 18:32:30 +01001076 total_vcpu_count += vcpu_count;
Andrew Scull82257c42018-10-01 10:37:48 +01001077 if (total_vcpu_count > CONFIG_HAFNIUM_MAX_VCPUS) {
1078 pr_err("Too many vcpus: %u\n", total_vcpu_count);
1079 ret = -EDQUOT;
1080 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001081 }
1082
Andrew Walbran3eeb1de2019-06-25 18:32:30 +01001083 vm->vcpu_count = vcpu_count;
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001084 vm->vcpu = kmalloc_array(vm->vcpu_count, sizeof(struct hf_vcpu),
1085 GFP_KERNEL);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001086 if (!vm->vcpu) {
Andrew Scull82257c42018-10-01 10:37:48 +01001087 ret = -ENOMEM;
1088 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001089 }
1090
Andrew Scull82257c42018-10-01 10:37:48 +01001091 /* Update the number of initialized VMs. */
1092 hf_vm_count = i + 1;
1093
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001094 /* Create a kernel thread for each vcpu. */
1095 for (j = 0; j < vm->vcpu_count; j++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +01001096 struct hf_vcpu *vcpu = &vm->vcpu[j];
Andrew Scull01778112019-01-14 15:37:53 +00001097
1098 vcpu->task =
1099 kthread_create(hf_vcpu_thread, vcpu,
1100 "vcpu_thread_%u_%u", vm->id, j);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001101 if (IS_ERR(vcpu->task)) {
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001102 pr_err("Error creating task (vm=%u,vcpu=%u): %ld\n",
1103 vm->id, j, PTR_ERR(vcpu->task));
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001104 vm->vcpu_count = j;
Andrew Scull82257c42018-10-01 10:37:48 +01001105 ret = PTR_ERR(vcpu->task);
1106 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001107 }
1108
1109 get_task_struct(vcpu->task);
Andrew Scullb722f952018-09-27 15:39:10 +01001110 vcpu->vm = vm;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001111 vcpu->vcpu_index = j;
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +00001112 atomic_set(&vcpu->abort_sleep, 0);
Andrew Scullece5ef42019-05-08 15:07:25 +01001113 atomic_set(&vcpu->waiting_for_message, 0);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001114 }
1115 }
1116
Andrew Walbran6b796192020-08-06 16:01:59 +01001117 ffa_ret = ffa_rx_release();
1118 if (ffa_ret.func != FFA_SUCCESS_32) {
1119 pr_err("Unable to release RX buffer.\n");
1120 print_ffa_error(ffa_ret);
1121 ret = -EIO;
1122 goto fail_with_cleanup;
1123 }
1124
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001125 /* Register protocol and socket family. */
1126 ret = proto_register(&hf_sock_proto, 0);
1127 if (ret) {
1128 pr_err("Unable to register protocol: %lld\n", ret);
1129 goto fail_with_cleanup;
1130 }
1131
1132 ret = sock_register(&proto_family);
1133 if (ret) {
1134 pr_err("Unable to register Hafnium's socket family: %lld\n",
1135 ret);
1136 goto fail_unregister_proto;
1137 }
1138
1139 /*
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +00001140 * Register as a driver for the timer device, so we can register a
1141 * handler for the hyperviser timer IRQ.
1142 */
1143 ret = platform_driver_register(&hf_int_driver);
1144 if (ret != 0) {
1145 pr_err("Error registering timer driver %lld\n", ret);
1146 goto fail_unregister_socket;
1147 }
1148
1149 /*
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001150 * Start running threads now that all is initialized.
1151 *
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +00001152 * Any failures from this point on must also unregister the driver with
1153 * platform_driver_unregister().
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001154 */
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001155 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +01001156 struct hf_vm *vm = &hf_vms[i];
Andrew Scull01778112019-01-14 15:37:53 +00001157
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001158 for (j = 0; j < vm->vcpu_count; j++)
1159 wake_up_process(vm->vcpu[j].task);
1160 }
1161
1162 /* Dump vm/vcpu count info. */
Andrew Scullbb7ae412018-09-28 21:07:15 +01001163 pr_info("Hafnium successfully loaded with %u VMs:\n", hf_vm_count);
Andrew Scullb722f952018-09-27 15:39:10 +01001164 for (i = 0; i < hf_vm_count; i++) {
1165 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001166
Andrew Scullbb7ae412018-09-28 21:07:15 +01001167 pr_info("\tVM %u: %u vCPUS\n", vm->id, vm->vcpu_count);
Andrew Scullb722f952018-09-27 15:39:10 +01001168 }
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001169
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001170 return 0;
Andrew Scull82257c42018-10-01 10:37:48 +01001171
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +00001172fail_unregister_socket:
1173 sock_unregister(PF_HF);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001174fail_unregister_proto:
1175 proto_unregister(&hf_sock_proto);
Andrew Scull82257c42018-10-01 10:37:48 +01001176fail_with_cleanup:
1177 hf_free_resources();
1178 return ret;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001179}
1180
1181/**
Andrew Scullbb7ae412018-09-28 21:07:15 +01001182 * Frees up all resources used by the Hafnium driver in preparation for
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001183 * unloading it.
1184 */
1185static void __exit hf_exit(void)
1186{
Andrew Scullbb7ae412018-09-28 21:07:15 +01001187 pr_info("Preparing to unload Hafnium\n");
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001188 sock_unregister(PF_HF);
1189 proto_unregister(&hf_sock_proto);
Andrew Scull82257c42018-10-01 10:37:48 +01001190 hf_free_resources();
Andrew Walbran8d55e502019-02-05 11:42:08 +00001191 platform_driver_unregister(&hf_int_driver);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001192 pr_info("Hafnium ready to unload\n");
1193}
1194
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001195MODULE_LICENSE("GPL v2");
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001196
1197module_init(hf_init);
1198module_exit(hf_exit);