blob: 67c740a80340a3c1184c2f1f902fcb40589429ad [file] [log] [blame]
Andrew Scull01778112019-01-14 15:37:53 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Walbran13c3a0b2018-11-30 11:51:53 +00002/*
Andrew Walbran2bc0a322019-03-07 15:48:06 +00003 * Copyright 2018 The Hafnium Authors.
Andrew Walbran13c3a0b2018-11-30 11:51:53 +00004 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Andrew Walbran13c3a0b2018-11-30 11:51:53 +000013 */
14
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +000015#include <clocksource/arm_arch_timer.h>
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000016#include <linux/atomic.h>
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +000017#include <linux/cpuhotplug.h>
18#include <linux/hrtimer.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010019#include <linux/init.h>
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +000020#include <linux/interrupt.h>
21#include <linux/irq.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010022#include <linux/kernel.h>
23#include <linux/kthread.h>
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +010024#include <linux/mm.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010025#include <linux/module.h>
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +000026#include <linux/net.h>
27#include <linux/of.h>
28#include <linux/platform_device.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010029#include <linux/sched/task.h>
30#include <linux/slab.h>
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000031#include <net/sock.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010032
Andrew Scull55704232018-08-10 17:19:54 +010033#include <hf/call.h>
Andrew Walbran196ed0e2020-04-30 11:32:29 +010034#include <hf/ffa.h>
Fuad Tabba3e669bc2019-08-08 16:43:55 +010035#include <hf/transport.h>
Andrew Scull55704232018-08-10 17:19:54 +010036
Fuad Tabba3e669bc2019-08-08 16:43:55 +010037#include "uapi/hf/socket.h"
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000038
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +000039#define HYPERVISOR_TIMER_NAME "el2_timer"
40
Andrew Scull82257c42018-10-01 10:37:48 +010041#define CONFIG_HAFNIUM_MAX_VMS 16
42#define CONFIG_HAFNIUM_MAX_VCPUS 32
43
Fuad Tabba5da4b6b2019-08-05 13:56:20 +010044#define FIRST_SECONDARY_VM_ID (HF_VM_ID_OFFSET + 1)
Wedson Almeida Filhoec841932019-01-22 23:07:50 +000045
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010046struct hf_vcpu {
Andrew Scullb722f952018-09-27 15:39:10 +010047 struct hf_vm *vm;
Andrew Walbran196ed0e2020-04-30 11:32:29 +010048 ffa_vcpu_index_t vcpu_index;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010049 struct task_struct *task;
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +000050 atomic_t abort_sleep;
Andrew Scull71f57362019-02-05 16:11:35 +000051 atomic_t waiting_for_message;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010052 struct hrtimer timer;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010053};
54
55struct hf_vm {
Andrew Walbran196ed0e2020-04-30 11:32:29 +010056 ffa_vm_id_t id;
57 ffa_vcpu_count_t vcpu_count;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010058 struct hf_vcpu *vcpu;
59};
60
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000061struct hf_sock {
62 /* This needs to be the first field. */
63 struct sock sk;
64
65 /*
66 * The following fields are immutable after the socket transitions to
67 * SS_CONNECTED state.
68 */
69 uint64_t local_port;
70 uint64_t remote_port;
71 struct hf_vm *peer_vm;
72};
73
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000074static struct proto hf_sock_proto = {
75 .name = "hafnium",
76 .owner = THIS_MODULE,
77 .obj_size = sizeof(struct hf_sock),
78};
79
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010080static struct hf_vm *hf_vms;
Andrew Walbran196ed0e2020-04-30 11:32:29 +010081static ffa_vm_count_t hf_vm_count;
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000082static struct page *hf_send_page;
83static struct page *hf_recv_page;
84static atomic64_t hf_next_port = ATOMIC64_INIT(0);
85static DEFINE_SPINLOCK(hf_send_lock);
86static DEFINE_HASHTABLE(hf_local_port_hash, 7);
87static DEFINE_SPINLOCK(hf_local_port_hash_lock);
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +000088static int hf_irq;
Andrew Walbran8d55e502019-02-05 11:42:08 +000089static enum cpuhp_state hf_cpuhp_state;
Andrew Walbran196ed0e2020-04-30 11:32:29 +010090static ffa_vm_id_t current_vm_id;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010091
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010092/**
Wedson Almeida Filhoec841932019-01-22 23:07:50 +000093 * Retrieves a VM from its ID, returning NULL if the VM doesn't exist.
94 */
Andrew Walbran196ed0e2020-04-30 11:32:29 +010095static struct hf_vm *hf_vm_from_id(ffa_vm_id_t vm_id)
Wedson Almeida Filhoec841932019-01-22 23:07:50 +000096{
97 if (vm_id < FIRST_SECONDARY_VM_ID ||
98 vm_id >= FIRST_SECONDARY_VM_ID + hf_vm_count)
99 return NULL;
100
101 return &hf_vms[vm_id - FIRST_SECONDARY_VM_ID];
102}
103
104/**
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000105 * Wakes up the kernel thread responsible for running the given vcpu.
106 *
107 * Returns 0 if the thread was already running, 1 otherwise.
108 */
109static int hf_vcpu_wake_up(struct hf_vcpu *vcpu)
110{
111 /* Set a flag indicating that the thread should not go to sleep. */
112 atomic_set(&vcpu->abort_sleep, 1);
113
114 /* Set the thread to running state. */
115 return wake_up_process(vcpu->task);
116}
117
118/**
119 * Puts the current thread to sleep. The current thread must be responsible for
120 * running the given vcpu.
121 *
122 * Going to sleep will fail if hf_vcpu_wake_up() or kthread_stop() was called on
123 * this vcpu/thread since the last time it [re]started running.
124 */
125static void hf_vcpu_sleep(struct hf_vcpu *vcpu)
126{
127 int abort;
128
129 set_current_state(TASK_INTERRUPTIBLE);
130
131 /* Check the sleep-abort flag after making thread interruptible. */
132 abort = atomic_read(&vcpu->abort_sleep);
133 if (!abort && !kthread_should_stop())
134 schedule();
135
136 /* Set state back to running on the way out. */
137 set_current_state(TASK_RUNNING);
138}
139
140/**
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100141 * Wakes up the thread associated with the vcpu that owns the given timer. This
142 * is called when the timer the thread is waiting on expires.
143 */
144static enum hrtimer_restart hf_vcpu_timer_expired(struct hrtimer *timer)
145{
146 struct hf_vcpu *vcpu = container_of(timer, struct hf_vcpu, timer);
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000147 /* TODO: Inject interrupt. */
148 hf_vcpu_wake_up(vcpu);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100149 return HRTIMER_NORESTART;
150}
151
152/**
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000153 * This function is called when Hafnium requests that the primary VM wake up a
154 * vCPU that belongs to a secondary VM.
155 *
156 * It wakes up the thread if it's sleeping, or kicks it if it's already running.
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000157 */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100158static void hf_handle_wake_up_request(ffa_vm_id_t vm_id,
159 ffa_vcpu_index_t vcpu)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000160{
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000161 struct hf_vm *vm = hf_vm_from_id(vm_id);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000162
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000163 if (!vm) {
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000164 pr_warn("Request to wake up non-existent VM id: %u\n", vm_id);
165 return;
166 }
167
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000168 if (vcpu >= vm->vcpu_count) {
Andrew Scull71f57362019-02-05 16:11:35 +0000169 pr_warn("Request to wake up non-existent vCPU: %u.%u\n",
170 vm_id, vcpu);
171 return;
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000172 }
173
174 if (hf_vcpu_wake_up(&vm->vcpu[vcpu]) == 0) {
175 /*
176 * The task was already running (presumably on a different
177 * physical CPU); interrupt it. This gives Hafnium a chance to
178 * inject any new interrupts.
179 */
180 kick_process(vm->vcpu[vcpu].task);
181 }
182}
183
184/**
Andrew Scull71f57362019-02-05 16:11:35 +0000185 * Injects an interrupt into a vCPU of the VM and ensures the vCPU will run to
186 * handle the interrupt.
187 */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100188static void hf_interrupt_vm(ffa_vm_id_t vm_id, uint64_t int_id)
Andrew Scull71f57362019-02-05 16:11:35 +0000189{
190 struct hf_vm *vm = hf_vm_from_id(vm_id);
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100191 ffa_vcpu_index_t vcpu;
Andrew Scull71f57362019-02-05 16:11:35 +0000192 int64_t ret;
193
194 if (!vm) {
195 pr_warn("Request to wake up non-existent VM id: %u\n", vm_id);
196 return;
197 }
198
199 /*
200 * TODO: For now we're picking the first vcpu to interrupt, but
201 * we want to be smarter.
202 */
203 vcpu = 0;
204 ret = hf_interrupt_inject(vm_id, vcpu, int_id);
205
206 if (ret == -1) {
207 pr_warn("Failed to inject interrupt %lld to vCPU %d of VM %d",
208 int_id, vcpu, vm_id);
209 return;
210 }
211
212 if (ret != 1) {
213 /* We don't need to wake up the vcpu. */
214 return;
215 }
216
217 hf_handle_wake_up_request(vm_id, vcpu);
218}
219
220/**
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000221 * Notify all waiters on the given VM.
222 */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100223static void hf_notify_waiters(ffa_vm_id_t vm_id)
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000224{
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100225 ffa_vm_id_t waiter_vm_id;
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000226
Andrew Scull71f57362019-02-05 16:11:35 +0000227 while ((waiter_vm_id = hf_mailbox_waiter_get(vm_id)) != -1) {
228 if (waiter_vm_id == HF_PRIMARY_VM_ID) {
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000229 /*
230 * TODO: Use this information when implementing per-vm
231 * queues.
232 */
233 } else {
Andrew Scull71f57362019-02-05 16:11:35 +0000234 hf_interrupt_vm(waiter_vm_id,
235 HF_MAILBOX_WRITABLE_INTID);
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000236 }
237 }
238}
239
240/**
Andrew Scull71f57362019-02-05 16:11:35 +0000241 * Delivers a message to a VM.
242 */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100243static void hf_deliver_message(ffa_vm_id_t vm_id)
Andrew Scull71f57362019-02-05 16:11:35 +0000244{
245 struct hf_vm *vm = hf_vm_from_id(vm_id);
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100246 ffa_vcpu_index_t i;
Andrew Scull71f57362019-02-05 16:11:35 +0000247
248 if (!vm) {
249 pr_warn("Tried to deliver message to non-existent VM id: %u\n",
250 vm_id);
251 return;
252 }
253
254 /* Try to wake a vCPU that is waiting for a message. */
255 for (i = 0; i < vm->vcpu_count; i++) {
256 if (atomic_read(&vm->vcpu[i].waiting_for_message)) {
257 hf_handle_wake_up_request(vm->id,
258 vm->vcpu[i].vcpu_index);
259 return;
260 }
261 }
262
263 /* None were waiting for a message so interrupt one. */
264 hf_interrupt_vm(vm->id, HF_MAILBOX_READABLE_INTID);
265}
266
267/**
Andrew Sculldf6478f2019-02-19 17:52:08 +0000268 * Handles a message delivered to this VM by validating that it's well-formed
269 * and then queueing it for delivery to the appropriate socket.
270 */
Andrew Walbranb331fa92019-10-03 16:48:07 +0100271static void hf_handle_message(struct hf_vm *sender, size_t len,
Andrew Walbrancafe0172019-10-07 14:14:05 +0100272 const void *message)
Andrew Sculldf6478f2019-02-19 17:52:08 +0000273{
274 struct hf_sock *hsock;
Andrew Walbrancafe0172019-10-07 14:14:05 +0100275 const struct hf_msg_hdr *hdr = (struct hf_msg_hdr *)message;
Andrew Sculldf6478f2019-02-19 17:52:08 +0000276 struct sk_buff *skb;
277 int err;
278
279 /* Ignore messages that are too small to hold a header. */
Marc Bonnici39fdefd2019-11-08 15:05:07 +0000280 if (len < sizeof(struct hf_msg_hdr)) {
281 pr_err("Message received without header of length %d\n", len);
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100282 ffa_rx_release();
Andrew Sculldf6478f2019-02-19 17:52:08 +0000283 return;
Marc Bonnici39fdefd2019-11-08 15:05:07 +0000284 }
Andrew Sculldf6478f2019-02-19 17:52:08 +0000285
286 len -= sizeof(struct hf_msg_hdr);
287
288 /* Go through the colliding sockets. */
289 rcu_read_lock();
290 hash_for_each_possible_rcu(hf_local_port_hash, hsock, sk.sk_node,
291 hdr->dst_port) {
292 if (hsock->peer_vm == sender &&
293 hsock->remote_port == hdr->src_port) {
294 sock_hold(&hsock->sk);
295 break;
296 }
297 }
298 rcu_read_unlock();
299
300 /* Nothing to do if we couldn't find the target. */
Marc Bonnici39fdefd2019-11-08 15:05:07 +0000301 if (!hsock) {
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100302 ffa_rx_release();
Andrew Sculldf6478f2019-02-19 17:52:08 +0000303 return;
Marc Bonnici39fdefd2019-11-08 15:05:07 +0000304 }
Andrew Sculldf6478f2019-02-19 17:52:08 +0000305
306 /*
307 * TODO: From this point on, there are two failure paths: when we
308 * create the skb below, and when we enqueue it to the socket. What
309 * should we do if they fail? Ideally we would have some form of flow
310 * control to prevent message loss, but how to do it efficiently?
311 *
312 * One option is to have a pre-allocated message that indicates to the
313 * sender that a message was dropped. This way we guarantee that the
314 * sender will be aware of loss and should back-off.
315 */
316 /* Create the skb. */
317 skb = alloc_skb(len, GFP_KERNEL);
318 if (!skb)
319 goto exit;
320
321 memcpy(skb_put(skb, len), hdr + 1, len);
322
323 /*
324 * Add the skb to the receive queue of the target socket. On success it
325 * calls sk->sk_data_ready, which is currently set to sock_def_readable,
326 * which wakes up any waiters.
327 */
328 err = sock_queue_rcv_skb(&hsock->sk, skb);
329 if (err)
330 kfree_skb(skb);
331
332exit:
333 sock_put(&hsock->sk);
Andrew Scull71f57362019-02-05 16:11:35 +0000334
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100335 if (ffa_rx_release().func == FFA_RX_RELEASE_32)
Andrew Scull71f57362019-02-05 16:11:35 +0000336 hf_notify_waiters(HF_PRIMARY_VM_ID);
Andrew Sculldf6478f2019-02-19 17:52:08 +0000337}
338
339/**
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100340 * This is the main loop of each vcpu.
341 */
342static int hf_vcpu_thread(void *data)
343{
344 struct hf_vcpu *vcpu = data;
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100345 struct ffa_value ret;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100346
347 hrtimer_init(&vcpu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
348 vcpu->timer.function = &hf_vcpu_timer_expired;
349
350 while (!kthread_should_stop()) {
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100351 ffa_vcpu_index_t i;
Andrew Scull01f83de2019-01-23 13:41:47 +0000352
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000353 /*
354 * We're about to run the vcpu, so we can reset the abort-sleep
355 * flag.
356 */
357 atomic_set(&vcpu->abort_sleep, 0);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100358
Andrew Scullbb7ae412018-09-28 21:07:15 +0100359 /* Call into Hafnium to run vcpu. */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100360 ret = ffa_run(vcpu->vm->id, vcpu->vcpu_index);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100361
Andrew Walbran39bf7892019-11-01 14:14:47 +0000362 switch (ret.func) {
Andrew Sculle05702e2019-01-08 14:46:46 +0000363 /* Preempted. */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100364 case FFA_INTERRUPT_32:
Andrew Sculle05702e2019-01-08 14:46:46 +0000365 if (need_resched())
366 schedule();
367 break;
368
369 /* Yield. */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100370 case FFA_YIELD_32:
Andrew Sculle05702e2019-01-08 14:46:46 +0000371 if (!kthread_should_stop())
372 schedule();
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100373 break;
374
Andrew Scull01778112019-01-14 15:37:53 +0000375 /* WFI. */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100376 case HF_FFA_RUN_WAIT_FOR_INTERRUPT:
377 if (ret.arg2 != FFA_SLEEP_INDEFINITE) {
Andrew Walbran39bf7892019-11-01 14:14:47 +0000378 hrtimer_start(&vcpu->timer, ret.arg2,
Andrew Scull71f57362019-02-05 16:11:35 +0000379 HRTIMER_MODE_REL);
380 }
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000381 hf_vcpu_sleep(vcpu);
Andrew Scull71f57362019-02-05 16:11:35 +0000382 hrtimer_cancel(&vcpu->timer);
383 break;
384
385 /* Waiting for a message. */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100386 case FFA_MSG_WAIT_32:
Andrew Scull71f57362019-02-05 16:11:35 +0000387 atomic_set(&vcpu->waiting_for_message, 1);
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100388 if (ret.arg2 != FFA_SLEEP_INDEFINITE) {
Andrew Walbran39bf7892019-11-01 14:14:47 +0000389 hrtimer_start(&vcpu->timer, ret.arg2,
Andrew Scull71f57362019-02-05 16:11:35 +0000390 HRTIMER_MODE_REL);
391 }
392 hf_vcpu_sleep(vcpu);
393 hrtimer_cancel(&vcpu->timer);
394 atomic_set(&vcpu->waiting_for_message, 0);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100395 break;
396
Andrew Scullb3a61b52018-09-17 14:30:34 +0100397 /* Wake up another vcpu. */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100398 case HF_FFA_RUN_WAKE_UP:
399 hf_handle_wake_up_request(ffa_vm_id(ret),
400 ffa_vcpu_index(ret));
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100401 break;
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100402
Andrew Scullb3a61b52018-09-17 14:30:34 +0100403 /* Response available. */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100404 case FFA_MSG_SEND_32:
405 if (ffa_msg_send_receiver(ret) == HF_PRIMARY_VM_ID) {
Andrew Walbran39bf7892019-11-01 14:14:47 +0000406 hf_handle_message(vcpu->vm,
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100407 ffa_msg_send_size(ret),
Andrew Scull94704232019-04-01 12:36:37 +0100408 page_address(hf_recv_page));
Andrew Scull71f57362019-02-05 16:11:35 +0000409 } else {
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100410 hf_deliver_message(ffa_msg_send_receiver(ret));
Andrew Scull71f57362019-02-05 16:11:35 +0000411 }
Andrew Sculldc8cab52018-10-10 18:29:39 +0100412 break;
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000413
414 /* Notify all waiters. */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100415 case FFA_RX_RELEASE_32:
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000416 hf_notify_waiters(vcpu->vm->id);
417 break;
Andrew Scull01f83de2019-01-23 13:41:47 +0000418
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100419 case FFA_ERROR_32:
420 pr_warn("FF-A error %d running VM %d vCPU %d", ret.arg2,
Andrew Walbran18f08a62019-11-13 11:57:52 +0000421 vcpu->vm->id, vcpu->vcpu_index);
Andrew Walbran39bf7892019-11-01 14:14:47 +0000422 switch (ret.arg2) {
Andrew Walbran9abce272019-11-27 18:41:05 +0000423 /* Abort was triggered. */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100424 case FFA_ABORTED:
Andrew Walbran39bf7892019-11-01 14:14:47 +0000425 for (i = 0; i < vcpu->vm->vcpu_count; i++) {
426 if (i == vcpu->vcpu_index)
427 continue;
428 hf_handle_wake_up_request(vcpu->vm->id,
429 i);
430 }
431 hf_vcpu_sleep(vcpu);
432 break;
Andrew Walbran9abce272019-11-27 18:41:05 +0000433 default:
434 /* Treat as a yield and try again later. */
435 if (!kthread_should_stop())
436 schedule();
437 break;
Andrew Scull01f83de2019-01-23 13:41:47 +0000438 }
Andrew Scull01f83de2019-01-23 13:41:47 +0000439 break;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100440 }
441 }
442
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100443 return 0;
444}
445
446/**
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000447 * Converts a pointer to a struct sock into a pointer to a struct hf_sock. It
448 * relies on the fact that the first field of hf_sock is a sock.
449 */
450static struct hf_sock *hsock_from_sk(struct sock *sk)
451{
452 return (struct hf_sock *)sk;
453}
454
455/**
456 * This is called when the last reference to the outer socket is released. For
457 * example, if it's a user-space socket, when the last file descriptor pointing
458 * to this socket is closed.
459 *
460 * It begins cleaning up resources, though some can only be cleaned up after all
461 * references to the underlying socket are released, which is handled by
462 * hf_sock_destruct().
463 */
464static int hf_sock_release(struct socket *sock)
465{
466 struct sock *sk = sock->sk;
467 struct hf_sock *hsock = hsock_from_sk(sk);
468 unsigned long flags;
469
470 if (!sk)
471 return 0;
472
473 /* Shutdown for both send and receive. */
474 lock_sock(sk);
475 sk->sk_shutdown |= RCV_SHUTDOWN | SEND_SHUTDOWN;
476 sk->sk_state_change(sk);
477 release_sock(sk);
478
479 /* Remove from the hash table, so lookups from now on won't find it. */
480 spin_lock_irqsave(&hf_local_port_hash_lock, flags);
481 hash_del_rcu(&hsock->sk.sk_node);
482 spin_unlock_irqrestore(&hf_local_port_hash_lock, flags);
483
484 /*
485 * TODO: When we implement a tx queue, we need to clear it here so that
486 * sk_wmem_alloc will not prevent sk from being freed (sk_free).
487 */
488
489 /*
490 * Wait for in-flight lookups to finish. We need to do this here because
Wedson Almeida Filho89d0e472019-01-03 19:18:39 +0000491 * in-flight lookups rely on the reference to the socket we're about to
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000492 * release.
493 */
494 synchronize_rcu();
495 sock_put(sk);
496 sock->sk = NULL;
497
498 return 0;
499}
500
501/**
502 * This is called when there are no more references to the socket. It frees all
503 * resources that haven't been freed during release.
504 */
505static void hf_sock_destruct(struct sock *sk)
506{
507 /*
508 * Clear the receive queue now that the handler cannot add any more
509 * skbs to it.
510 */
511 skb_queue_purge(&sk->sk_receive_queue);
512}
513
514/**
515 * Connects the Hafnium socket to the provided VM and port. After the socket is
516 * connected, it can be used to exchange datagrams with the specified peer.
517 */
Andrew Scull01778112019-01-14 15:37:53 +0000518static int hf_sock_connect(struct socket *sock, struct sockaddr *saddr, int len,
519 int connect_flags)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000520{
521 struct sock *sk = sock->sk;
522 struct hf_sock *hsock = hsock_from_sk(sk);
523 struct hf_vm *vm;
Fuad Tabba3e669bc2019-08-08 16:43:55 +0100524 struct hf_sockaddr *addr;
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000525 int err;
526 unsigned long flags;
527
528 /* Basic address validation. */
Fuad Tabba3e669bc2019-08-08 16:43:55 +0100529 if (len < sizeof(struct hf_sockaddr) || saddr->sa_family != AF_HF)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000530 return -EINVAL;
531
Fuad Tabba3e669bc2019-08-08 16:43:55 +0100532 addr = (struct hf_sockaddr *)saddr;
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000533 vm = hf_vm_from_id(addr->vm_id);
534 if (!vm)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000535 return -ENETUNREACH;
536
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000537 /*
538 * TODO: Once we implement access control in Hafnium, check that the
539 * caller is allowed to contact the specified VM. Return -ECONNREFUSED
540 * if access is denied.
541 */
542
543 /* Take lock to make sure state doesn't change as we connect. */
544 lock_sock(sk);
545
546 /* Only unconnected sockets are allowed to become connected. */
547 if (sock->state != SS_UNCONNECTED) {
548 err = -EISCONN;
549 goto exit;
550 }
551
552 hsock->local_port = atomic64_inc_return(&hf_next_port);
553 hsock->remote_port = addr->port;
554 hsock->peer_vm = vm;
555
556 sock->state = SS_CONNECTED;
557
558 /* Add socket to hash table now that it's fully initialised. */
559 spin_lock_irqsave(&hf_local_port_hash_lock, flags);
560 hash_add_rcu(hf_local_port_hash, &sk->sk_node, hsock->local_port);
561 spin_unlock_irqrestore(&hf_local_port_hash_lock, flags);
562
563 err = 0;
564exit:
565 release_sock(sk);
566 return err;
567}
568
569/**
570 * Sends the given skb to the appropriate VM by calling Hafnium. It will also
571 * trigger the wake up of a recipient VM.
572 *
573 * Takes ownership of the skb on success.
574 */
575static int hf_send_skb(struct sk_buff *skb)
576{
577 unsigned long flags;
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100578 struct ffa_value ret;
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000579 struct hf_sock *hsock = hsock_from_sk(skb->sk);
580 struct hf_vm *vm = hsock->peer_vm;
Andrew Walbrancafe0172019-10-07 14:14:05 +0100581 void *message = page_address(hf_send_page);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000582
583 /*
584 * Call Hafnium under the send lock so that we serialize the use of the
585 * global send buffer.
586 */
587 spin_lock_irqsave(&hf_send_lock, flags);
Andrew Walbrancafe0172019-10-07 14:14:05 +0100588 memcpy(message, skb->data, skb->len);
Jose Marinho1cc6c752019-03-11 16:28:03 +0000589
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100590 ret = ffa_msg_send(current_vm_id, vm->id, skb->len, 0);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000591 spin_unlock_irqrestore(&hf_send_lock, flags);
592
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100593 if (ret.func == FFA_ERROR_32) {
Andrew Walbranb040b302019-10-10 13:50:06 +0100594 switch (ret.arg2) {
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100595 case FFA_INVALID_PARAMETERS:
Andrew Walbrancafe0172019-10-07 14:14:05 +0100596 return -ENXIO;
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100597 case FFA_NOT_SUPPORTED:
Andrew Walbrancafe0172019-10-07 14:14:05 +0100598 return -EIO;
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100599 case FFA_DENIED:
600 case FFA_BUSY:
Andrew Walbrancafe0172019-10-07 14:14:05 +0100601 default:
602 return -EAGAIN;
603 }
604 }
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000605
Andrew Scull71f57362019-02-05 16:11:35 +0000606 /* Ensure the VM will run to pick up the message. */
607 hf_deliver_message(vm->id);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000608
609 kfree_skb(skb);
610
611 return 0;
612}
613
614/**
615 * Determines if the given socket is in the connected state. It acquires and
616 * releases the socket lock.
617 */
618static bool hf_sock_is_connected(struct socket *sock)
619{
620 bool ret;
621
622 lock_sock(sock->sk);
623 ret = sock->state == SS_CONNECTED;
624 release_sock(sock->sk);
625
626 return ret;
627}
628
629/**
630 * Sends a message to the VM & port the socket is connected to. All variants
631 * of write/send/sendto/sendmsg eventually call this function.
632 */
633static int hf_sock_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
634{
635 struct sock *sk = sock->sk;
636 struct sk_buff *skb;
637 int err;
638 struct hf_msg_hdr *hdr;
639 struct hf_sock *hsock = hsock_from_sk(sk);
Andrew Walbrancafe0172019-10-07 14:14:05 +0100640 size_t payload_max_len = HF_MAILBOX_SIZE - sizeof(struct hf_msg_hdr);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000641
642 /* Check length. */
Andrew Scull614ed7f2019-04-01 12:12:38 +0100643 if (len > payload_max_len)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000644 return -EMSGSIZE;
645
646 /* We don't allow the destination address to be specified. */
647 if (m->msg_namelen > 0)
648 return -EISCONN;
649
650 /* We don't support out of band messages. */
651 if (m->msg_flags & MSG_OOB)
652 return -EOPNOTSUPP;
653
654 /*
655 * Ensure that the socket is connected. We don't need to hold the socket
656 * lock (acquired and released by hf_sock_is_connected) for the
657 * remainder of the function because the fields we care about are
658 * immutable once the state is SS_CONNECTED.
659 */
660 if (!hf_sock_is_connected(sock))
661 return -ENOTCONN;
662
663 /*
664 * Allocate an skb for this write. If there isn't enough room in the
665 * socket's send buffer (sk_wmem_alloc >= sk_sndbuf), this will block
666 * (if it's a blocking call). On success, it increments sk_wmem_alloc
667 * and sets up the skb such that sk_wmem_alloc gets decremented when
668 * the skb is freed (sock_wfree gets called).
669 */
670 skb = sock_alloc_send_skb(sk, len + sizeof(struct hf_msg_hdr),
671 m->msg_flags & MSG_DONTWAIT, &err);
672 if (!skb)
673 return err;
674
675 /* Reserve room for the header and initialise it. */
676 skb_reserve(skb, sizeof(struct hf_msg_hdr));
677 hdr = skb_push(skb, sizeof(struct hf_msg_hdr));
678 hdr->src_port = hsock->local_port;
679 hdr->dst_port = hsock->remote_port;
680
681 /* Allocate area for the contents, then copy into skb. */
682 if (!copy_from_iter_full(skb_put(skb, len), len, &m->msg_iter)) {
683 err = -EFAULT;
684 goto err_cleanup;
685 }
686
687 /*
688 * TODO: We currently do this inline, but when we have support for
689 * readiness notification from Hafnium, we must add this to a per-VM tx
690 * queue that can make progress when the VM becomes writable. This will
691 * fix send buffering and poll readiness notification.
692 */
693 err = hf_send_skb(skb);
694 if (err)
695 goto err_cleanup;
696
697 return 0;
698
699err_cleanup:
700 kfree_skb(skb);
701 return err;
702}
703
704/**
705 * Receives a message originated from the VM & port the socket is connected to.
706 * All variants of read/recv/recvfrom/recvmsg eventually call this function.
707 */
708static int hf_sock_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
709 int flags)
710{
711 struct sock *sk = sock->sk;
712 struct sk_buff *skb;
713 int err;
714 size_t copy_len;
715
716 if (!hf_sock_is_connected(sock))
717 return -ENOTCONN;
718
719 /* Grab the next skb from the receive queue. */
720 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
721 if (!skb)
722 return err;
723
724 /* Make sure we don't copy more than what fits in the output buffer. */
725 copy_len = skb->len;
726 if (copy_len > len) {
727 copy_len = len;
728 m->msg_flags |= MSG_TRUNC;
729 }
730
731 /* Make sure we don't overflow the return value type. */
732 if (copy_len > INT_MAX) {
733 copy_len = INT_MAX;
734 m->msg_flags |= MSG_TRUNC;
735 }
736
737 /* Copy skb to output iterator, then free it. */
738 err = skb_copy_datagram_msg(skb, 0, m, copy_len);
739 skb_free_datagram(sk, skb);
740 if (err)
741 return err;
742
743 return copy_len;
744}
745
746/**
747 * This function is called when a Hafnium socket is created. It initialises all
748 * state such that the caller will be able to connect the socket and then send
749 * and receive messages through it.
750 */
751static int hf_sock_create(struct net *net, struct socket *sock, int protocol,
Andrew Scull01778112019-01-14 15:37:53 +0000752 int kern)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000753{
754 static const struct proto_ops ops = {
755 .family = PF_HF,
756 .owner = THIS_MODULE,
757 .release = hf_sock_release,
758 .bind = sock_no_bind,
759 .connect = hf_sock_connect,
760 .socketpair = sock_no_socketpair,
761 .accept = sock_no_accept,
762 .ioctl = sock_no_ioctl,
763 .listen = sock_no_listen,
764 .shutdown = sock_no_shutdown,
765 .setsockopt = sock_no_setsockopt,
766 .getsockopt = sock_no_getsockopt,
767 .sendmsg = hf_sock_sendmsg,
768 .recvmsg = hf_sock_recvmsg,
769 .mmap = sock_no_mmap,
770 .sendpage = sock_no_sendpage,
771 .poll = datagram_poll,
772 };
773 struct sock *sk;
774
775 if (sock->type != SOCK_DGRAM)
776 return -ESOCKTNOSUPPORT;
777
778 if (protocol != 0)
779 return -EPROTONOSUPPORT;
780
781 /*
782 * For now we only allow callers with sys admin capability to create
783 * Hafnium sockets.
784 */
785 if (!capable(CAP_SYS_ADMIN))
786 return -EPERM;
787
788 /* Allocate and initialise socket. */
789 sk = sk_alloc(net, PF_HF, GFP_KERNEL, &hf_sock_proto, kern);
790 if (!sk)
791 return -ENOMEM;
792
793 sock_init_data(sock, sk);
794
795 sk->sk_destruct = hf_sock_destruct;
796 sock->ops = &ops;
797 sock->state = SS_UNCONNECTED;
798
799 return 0;
800}
801
802/**
Andrew Scullbb7ae412018-09-28 21:07:15 +0100803 * Frees all resources, including threads, associated with the Hafnium driver.
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100804 */
Andrew Scull82257c42018-10-01 10:37:48 +0100805static void hf_free_resources(void)
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100806{
Fuad Tabba5da4b6b2019-08-05 13:56:20 +0100807 uint16_t i;
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100808 ffa_vcpu_index_t j;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100809
810 /*
811 * First stop all worker threads. We need to do this before freeing
812 * resources because workers may reference each other, so it is only
813 * safe to free resources after they have all stopped.
814 */
Andrew Scull82257c42018-10-01 10:37:48 +0100815 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100816 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000817
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100818 for (j = 0; j < vm->vcpu_count; j++)
819 kthread_stop(vm->vcpu[j].task);
820 }
821
822 /* Free resources. */
Andrew Scull82257c42018-10-01 10:37:48 +0100823 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100824 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000825
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100826 for (j = 0; j < vm->vcpu_count; j++)
827 put_task_struct(vm->vcpu[j].task);
828 kfree(vm->vcpu);
829 }
830
831 kfree(hf_vms);
Andrew Walbran81cf04c2020-08-06 16:00:43 +0100832
833 ffa_rx_release();
834 if (hf_send_page) {
835 __free_page(hf_send_page);
836 hf_send_page = NULL;
837 }
838 if (hf_recv_page) {
839 __free_page(hf_recv_page);
840 hf_recv_page = NULL;
841 }
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100842}
843
Andrew Scullbb7ae412018-09-28 21:07:15 +0100844/**
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +0000845 * Handles the hypervisor timer interrupt.
846 */
847static irqreturn_t hf_nop_irq_handler(int irq, void *dev)
848{
849 /*
850 * No need to do anything, the interrupt only exists to return to the
851 * primary vCPU so that the virtual timer will be restored and fire as
852 * normal.
853 */
854 return IRQ_HANDLED;
855}
856
857/**
858 * Enables the hypervisor timer interrupt on a CPU, when it starts or after the
859 * driver is first loaded.
860 */
861static int hf_starting_cpu(unsigned int cpu)
862{
863 if (hf_irq != 0) {
864 /* Enable the interrupt, and set it to be edge-triggered. */
865 enable_percpu_irq(hf_irq, IRQ_TYPE_EDGE_RISING);
866 }
Andrew Walbran8d55e502019-02-05 11:42:08 +0000867
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +0000868 return 0;
869}
870
871/**
872 * Disables the hypervisor timer interrupt on a CPU when it is powered down.
873 */
874static int hf_dying_cpu(unsigned int cpu)
875{
876 if (hf_irq != 0) {
877 /* Disable the interrupt while the CPU is asleep. */
878 disable_percpu_irq(hf_irq);
879 }
880
881 return 0;
882}
883
884/**
885 * Registers for the hypervisor timer interrupt.
886 */
887static int hf_int_driver_probe(struct platform_device *pdev)
888{
889 int irq;
890 int ret;
891
892 /*
893 * Register a handler for the hyperviser timer IRQ, as it is needed for
894 * Hafnium to emulate the virtual timer for Linux while a secondary vCPU
895 * is running.
896 */
897 irq = platform_get_irq(pdev, ARCH_TIMER_HYP_PPI);
898 if (irq < 0) {
899 pr_err("Error getting hypervisor timer IRQ: %d\n", irq);
900 return irq;
901 }
902 hf_irq = irq;
903
904 ret = request_percpu_irq(irq, hf_nop_irq_handler, HYPERVISOR_TIMER_NAME,
905 pdev);
906 if (ret != 0) {
907 pr_err("Error registering hypervisor timer IRQ %d: %d\n",
908 irq, ret);
909 return ret;
910 }
911 pr_info("Hafnium registered for IRQ %d\n", irq);
912 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
913 "hafnium/hypervisor_timer:starting",
914 hf_starting_cpu, hf_dying_cpu);
915 if (ret < 0) {
916 pr_err("Error enabling timer on all CPUs: %d\n", ret);
Andrew Walbran8d55e502019-02-05 11:42:08 +0000917 free_percpu_irq(irq, pdev);
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +0000918 return ret;
919 }
Andrew Walbran8d55e502019-02-05 11:42:08 +0000920 hf_cpuhp_state = ret;
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +0000921
922 return 0;
923}
924
925/**
926 * Unregisters for the hypervisor timer interrupt.
927 */
928static int hf_int_driver_remove(struct platform_device *pdev)
929{
Andrew Walbran8d55e502019-02-05 11:42:08 +0000930 /*
931 * This will cause hf_dying_cpu to be called on each CPU, which will
932 * disable the IRQs.
933 */
934 cpuhp_remove_state(hf_cpuhp_state);
935 free_percpu_irq(hf_irq, pdev);
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +0000936
937 return 0;
938}
939
940static const struct of_device_id hf_int_driver_id[] = {
941 {.compatible = "arm,armv7-timer"},
942 {.compatible = "arm,armv8-timer"},
943 {}
944};
945
946static struct platform_driver hf_int_driver = {
947 .driver = {
948 .name = HYPERVISOR_TIMER_NAME,
949 .owner = THIS_MODULE,
950 .of_match_table = of_match_ptr(hf_int_driver_id),
951 },
952 .probe = hf_int_driver_probe,
953 .remove = hf_int_driver_remove,
954};
955
956/**
Andrew Scullbb7ae412018-09-28 21:07:15 +0100957 * Initializes the Hafnium driver by creating a thread for each vCPU of each
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100958 * virtual machine.
959 */
960static int __init hf_init(void)
961{
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000962 static const struct net_proto_family proto_family = {
963 .family = PF_HF,
964 .create = hf_sock_create,
965 .owner = THIS_MODULE,
966 };
Andrew Scullbb7ae412018-09-28 21:07:15 +0100967 int64_t ret;
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100968 struct ffa_value ffa_ret;
969 ffa_vm_id_t i;
970 ffa_vcpu_index_t j;
971 ffa_vm_count_t secondary_vm_count;
Andrew Scull82257c42018-10-01 10:37:48 +0100972 uint32_t total_vcpu_count;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100973
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100974 /* Allocate a page for send and receive buffers. */
975 hf_send_page = alloc_page(GFP_KERNEL);
976 if (!hf_send_page) {
977 pr_err("Unable to allocate send buffer\n");
978 return -ENOMEM;
979 }
980
981 hf_recv_page = alloc_page(GFP_KERNEL);
982 if (!hf_recv_page) {
983 __free_page(hf_send_page);
Andrew Walbran81cf04c2020-08-06 16:00:43 +0100984 hf_send_page = NULL;
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100985 pr_err("Unable to allocate receive buffer\n");
986 return -ENOMEM;
987 }
988
989 /*
990 * Configure both addresses. Once configured, we cannot free these pages
991 * because the hypervisor will use them, even if the module is
992 * unloaded.
993 */
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100994 ffa_ret = ffa_rxtx_map(page_to_phys(hf_send_page),
Andrew Walbran2c6e7512019-11-05 14:02:29 +0000995 page_to_phys(hf_recv_page));
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100996 if (ffa_ret.func != FFA_SUCCESS_32) {
Andrew Walbran81cf04c2020-08-06 16:00:43 +0100997 pr_err("Unable to configure VM mailbox.\n");
Andrew Walbran196ed0e2020-04-30 11:32:29 +0100998 if (ffa_ret.func == FFA_ERROR_32)
999 pr_err("FF-A error code %d\n", ffa_ret.arg2);
Andrew Walbran2c6e7512019-11-05 14:02:29 +00001000 else
Andrew Walbran196ed0e2020-04-30 11:32:29 +01001001 pr_err("Unexpected FF-A function %#x\n", ffa_ret.func);
Andrew Walbran81cf04c2020-08-06 16:00:43 +01001002 ret = -EIO;
1003 goto fail_with_cleanup;
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +01001004 }
1005
Andrew Walbran4c96d0c2019-06-25 18:32:56 +01001006 /* Get the number of secondary VMs. */
1007 secondary_vm_count = hf_vm_get_count() - 1;
Andrew Scull82257c42018-10-01 10:37:48 +01001008
1009 /* Confirm the maximum number of VMs looks sane. */
1010 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VMS < 1);
1011 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VMS > U16_MAX);
1012
1013 /* Validate the number of VMs. There must at least be the primary. */
Andrew Walbran4c96d0c2019-06-25 18:32:56 +01001014 if (secondary_vm_count > CONFIG_HAFNIUM_MAX_VMS - 1) {
Fuad Tabba8523ccd2019-07-31 15:37:29 +01001015 pr_err("Number of VMs is out of range: %d\n",
Andrew Walbran4c96d0c2019-06-25 18:32:56 +01001016 secondary_vm_count);
Andrew Walbran81cf04c2020-08-06 16:00:43 +01001017 ret = -EDQUOT;
1018 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001019 }
1020
Andrew Scullb722f952018-09-27 15:39:10 +01001021 /* Only track the secondary VMs. */
Andrew Walbran4c96d0c2019-06-25 18:32:56 +01001022 hf_vms = kmalloc_array(secondary_vm_count, sizeof(struct hf_vm),
1023 GFP_KERNEL);
Andrew Walbran81cf04c2020-08-06 16:00:43 +01001024 if (!hf_vms) {
1025 ret = -ENOMEM;
1026 goto fail_with_cleanup;
1027 }
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001028
Jose Marinho1cc6c752019-03-11 16:28:03 +00001029 /* Cache the VM id for later usage. */
1030 current_vm_id = hf_vm_get_id();
1031
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001032 /* Initialize each VM. */
Andrew Scull82257c42018-10-01 10:37:48 +01001033 total_vcpu_count = 0;
Andrew Walbran4c96d0c2019-06-25 18:32:56 +01001034 for (i = 0; i < secondary_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +01001035 struct hf_vm *vm = &hf_vms[i];
Andrew Walbran196ed0e2020-04-30 11:32:29 +01001036 ffa_vcpu_count_t vcpu_count;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001037
Andrew Scullb722f952018-09-27 15:39:10 +01001038 /* Adjust the ID as only the secondaries are tracked. */
Wedson Almeida Filhoec841932019-01-22 23:07:50 +00001039 vm->id = i + FIRST_SECONDARY_VM_ID;
Andrew Scullb722f952018-09-27 15:39:10 +01001040
Andrew Walbran3eeb1de2019-06-25 18:32:30 +01001041 vcpu_count = hf_vcpu_get_count(vm->id);
1042 if (vcpu_count < 0) {
1043 pr_err("HF_VCPU_GET_COUNT failed for vm=%u: %d",
1044 vm->id, vcpu_count);
Andrew Scull82257c42018-10-01 10:37:48 +01001045 ret = -EIO;
1046 goto fail_with_cleanup;
1047 }
1048
1049 /* Avoid overflowing the vcpu count. */
Andrew Walbran3eeb1de2019-06-25 18:32:30 +01001050 if (vcpu_count > (U32_MAX - total_vcpu_count)) {
Andrew Scull82257c42018-10-01 10:37:48 +01001051 pr_err("Too many vcpus: %u\n", total_vcpu_count);
1052 ret = -EDQUOT;
1053 goto fail_with_cleanup;
1054 }
1055
1056 /* Confirm the maximum number of VCPUs looks sane. */
1057 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VCPUS < 1);
1058 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VCPUS > U16_MAX);
1059
1060 /* Enforce the limit on vcpus. */
Andrew Walbran3eeb1de2019-06-25 18:32:30 +01001061 total_vcpu_count += vcpu_count;
Andrew Scull82257c42018-10-01 10:37:48 +01001062 if (total_vcpu_count > CONFIG_HAFNIUM_MAX_VCPUS) {
1063 pr_err("Too many vcpus: %u\n", total_vcpu_count);
1064 ret = -EDQUOT;
1065 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001066 }
1067
Andrew Walbran3eeb1de2019-06-25 18:32:30 +01001068 vm->vcpu_count = vcpu_count;
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001069 vm->vcpu = kmalloc_array(vm->vcpu_count, sizeof(struct hf_vcpu),
1070 GFP_KERNEL);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001071 if (!vm->vcpu) {
Andrew Scull82257c42018-10-01 10:37:48 +01001072 ret = -ENOMEM;
1073 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001074 }
1075
Andrew Scull82257c42018-10-01 10:37:48 +01001076 /* Update the number of initialized VMs. */
1077 hf_vm_count = i + 1;
1078
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001079 /* Create a kernel thread for each vcpu. */
1080 for (j = 0; j < vm->vcpu_count; j++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +01001081 struct hf_vcpu *vcpu = &vm->vcpu[j];
Andrew Scull01778112019-01-14 15:37:53 +00001082
1083 vcpu->task =
1084 kthread_create(hf_vcpu_thread, vcpu,
1085 "vcpu_thread_%u_%u", vm->id, j);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001086 if (IS_ERR(vcpu->task)) {
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001087 pr_err("Error creating task (vm=%u,vcpu=%u): %ld\n",
1088 vm->id, j, PTR_ERR(vcpu->task));
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001089 vm->vcpu_count = j;
Andrew Scull82257c42018-10-01 10:37:48 +01001090 ret = PTR_ERR(vcpu->task);
1091 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001092 }
1093
1094 get_task_struct(vcpu->task);
Andrew Scullb722f952018-09-27 15:39:10 +01001095 vcpu->vm = vm;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001096 vcpu->vcpu_index = j;
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +00001097 atomic_set(&vcpu->abort_sleep, 0);
Andrew Scullece5ef42019-05-08 15:07:25 +01001098 atomic_set(&vcpu->waiting_for_message, 0);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001099 }
1100 }
1101
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001102 /* Register protocol and socket family. */
1103 ret = proto_register(&hf_sock_proto, 0);
1104 if (ret) {
1105 pr_err("Unable to register protocol: %lld\n", ret);
1106 goto fail_with_cleanup;
1107 }
1108
1109 ret = sock_register(&proto_family);
1110 if (ret) {
1111 pr_err("Unable to register Hafnium's socket family: %lld\n",
1112 ret);
1113 goto fail_unregister_proto;
1114 }
1115
1116 /*
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +00001117 * Register as a driver for the timer device, so we can register a
1118 * handler for the hyperviser timer IRQ.
1119 */
1120 ret = platform_driver_register(&hf_int_driver);
1121 if (ret != 0) {
1122 pr_err("Error registering timer driver %lld\n", ret);
1123 goto fail_unregister_socket;
1124 }
1125
1126 /*
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001127 * Start running threads now that all is initialized.
1128 *
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +00001129 * Any failures from this point on must also unregister the driver with
1130 * platform_driver_unregister().
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001131 */
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001132 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +01001133 struct hf_vm *vm = &hf_vms[i];
Andrew Scull01778112019-01-14 15:37:53 +00001134
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001135 for (j = 0; j < vm->vcpu_count; j++)
1136 wake_up_process(vm->vcpu[j].task);
1137 }
1138
1139 /* Dump vm/vcpu count info. */
Andrew Scullbb7ae412018-09-28 21:07:15 +01001140 pr_info("Hafnium successfully loaded with %u VMs:\n", hf_vm_count);
Andrew Scullb722f952018-09-27 15:39:10 +01001141 for (i = 0; i < hf_vm_count; i++) {
1142 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001143
Andrew Scullbb7ae412018-09-28 21:07:15 +01001144 pr_info("\tVM %u: %u vCPUS\n", vm->id, vm->vcpu_count);
Andrew Scullb722f952018-09-27 15:39:10 +01001145 }
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001146
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001147 return 0;
Andrew Scull82257c42018-10-01 10:37:48 +01001148
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +00001149fail_unregister_socket:
1150 sock_unregister(PF_HF);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001151fail_unregister_proto:
1152 proto_unregister(&hf_sock_proto);
Andrew Scull82257c42018-10-01 10:37:48 +01001153fail_with_cleanup:
1154 hf_free_resources();
1155 return ret;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001156}
1157
1158/**
Andrew Scullbb7ae412018-09-28 21:07:15 +01001159 * Frees up all resources used by the Hafnium driver in preparation for
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001160 * unloading it.
1161 */
1162static void __exit hf_exit(void)
1163{
Andrew Scullbb7ae412018-09-28 21:07:15 +01001164 pr_info("Preparing to unload Hafnium\n");
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001165 sock_unregister(PF_HF);
1166 proto_unregister(&hf_sock_proto);
Andrew Scull82257c42018-10-01 10:37:48 +01001167 hf_free_resources();
Andrew Walbran8d55e502019-02-05 11:42:08 +00001168 platform_driver_unregister(&hf_int_driver);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001169 pr_info("Hafnium ready to unload\n");
1170}
1171
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001172MODULE_LICENSE("GPL v2");
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001173
1174module_init(hf_init);
1175module_exit(hf_exit);