blob: c076fd4a4a8b015046f189adb338f940ddd1d431 [file] [log] [blame]
Andrew Scull01778112019-01-14 15:37:53 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Walbran13c3a0b2018-11-30 11:51:53 +00002/*
3 * Copyright 2018 Google LLC
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
Andrew Scull01778112019-01-14 15:37:53 +000016 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
17 * USA.
Andrew Walbran13c3a0b2018-11-30 11:51:53 +000018 */
19
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010020#include <linux/hrtimer.h>
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000021#include <linux/atomic.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010022#include <linux/init.h>
23#include <linux/kernel.h>
24#include <linux/kthread.h>
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +010025#include <linux/mm.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010026#include <linux/module.h>
27#include <linux/sched/task.h>
28#include <linux/slab.h>
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000029#include <linux/net.h>
30#include <net/sock.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010031
Andrew Scull55704232018-08-10 17:19:54 +010032#include <hf/call.h>
33
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000034/* TODO: Reusing AF_ECONET for now as it's otherwise unused. */
35#define AF_HF AF_ECONET
36#define PF_HF AF_HF
37
Andrew Scull82257c42018-10-01 10:37:48 +010038#define CONFIG_HAFNIUM_MAX_VMS 16
39#define CONFIG_HAFNIUM_MAX_VCPUS 32
40
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010041struct hf_vcpu {
Andrew Scullb722f952018-09-27 15:39:10 +010042 struct hf_vm *vm;
Andrew Scull55704232018-08-10 17:19:54 +010043 uint32_t vcpu_index;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010044 struct task_struct *task;
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +000045 atomic_t abort_sleep;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010046 struct hrtimer timer;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010047};
48
49struct hf_vm {
Andrew Scullb722f952018-09-27 15:39:10 +010050 uint32_t id;
Andrew Scullbb7ae412018-09-28 21:07:15 +010051 uint32_t vcpu_count;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010052 struct hf_vcpu *vcpu;
53};
54
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000055struct hf_msg_hdr {
56 uint64_t src_port;
57 uint64_t dst_port;
58};
59
60struct hf_sock {
61 /* This needs to be the first field. */
62 struct sock sk;
63
64 /*
65 * The following fields are immutable after the socket transitions to
66 * SS_CONNECTED state.
67 */
68 uint64_t local_port;
69 uint64_t remote_port;
70 struct hf_vm *peer_vm;
71};
72
73struct sockaddr_hf {
74 sa_family_t family;
75 uint32_t vm_id;
76 uint64_t port;
77};
78
79static struct proto hf_sock_proto = {
80 .name = "hafnium",
81 .owner = THIS_MODULE,
82 .obj_size = sizeof(struct hf_sock),
83};
84
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010085static struct hf_vm *hf_vms;
Andrew Scullbb7ae412018-09-28 21:07:15 +010086static uint32_t hf_vm_count;
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000087static struct page *hf_send_page;
88static struct page *hf_recv_page;
89static atomic64_t hf_next_port = ATOMIC64_INIT(0);
90static DEFINE_SPINLOCK(hf_send_lock);
91static DEFINE_HASHTABLE(hf_local_port_hash, 7);
92static DEFINE_SPINLOCK(hf_local_port_hash_lock);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010093
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010094/**
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +000095 * Wakes up the kernel thread responsible for running the given vcpu.
96 *
97 * Returns 0 if the thread was already running, 1 otherwise.
98 */
99static int hf_vcpu_wake_up(struct hf_vcpu *vcpu)
100{
101 /* Set a flag indicating that the thread should not go to sleep. */
102 atomic_set(&vcpu->abort_sleep, 1);
103
104 /* Set the thread to running state. */
105 return wake_up_process(vcpu->task);
106}
107
108/**
109 * Puts the current thread to sleep. The current thread must be responsible for
110 * running the given vcpu.
111 *
112 * Going to sleep will fail if hf_vcpu_wake_up() or kthread_stop() was called on
113 * this vcpu/thread since the last time it [re]started running.
114 */
115static void hf_vcpu_sleep(struct hf_vcpu *vcpu)
116{
117 int abort;
118
119 set_current_state(TASK_INTERRUPTIBLE);
120
121 /* Check the sleep-abort flag after making thread interruptible. */
122 abort = atomic_read(&vcpu->abort_sleep);
123 if (!abort && !kthread_should_stop())
124 schedule();
125
126 /* Set state back to running on the way out. */
127 set_current_state(TASK_RUNNING);
128}
129
130/**
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100131 * Wakes up the thread associated with the vcpu that owns the given timer. This
132 * is called when the timer the thread is waiting on expires.
133 */
134static enum hrtimer_restart hf_vcpu_timer_expired(struct hrtimer *timer)
135{
136 struct hf_vcpu *vcpu = container_of(timer, struct hf_vcpu, timer);
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000137 /* TODO: Inject interrupt. */
138 hf_vcpu_wake_up(vcpu);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100139 return HRTIMER_NORESTART;
140}
141
142/**
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000143 * Handles a message delivered to this VM by validating that it's well-formed
144 * and then queueing it for delivery to the appropriate socket.
145 */
146static void hf_handle_message(struct hf_vm *sender, const void *ptr, size_t len)
147{
148 struct hf_sock *hsock;
149 const struct hf_msg_hdr *hdr = ptr;
150 struct sk_buff *skb;
151 int err;
152
153 /* Ignore messages that are too small to hold a header. */
154 if (len < sizeof(struct hf_msg_hdr))
155 return;
156
157 len -= sizeof(struct hf_msg_hdr);
158
159 /* Go through the colliding sockets. */
160 rcu_read_lock();
161 hash_for_each_possible_rcu(hf_local_port_hash, hsock, sk.sk_node,
162 hdr->dst_port) {
163 if (hsock->peer_vm == sender &&
164 hsock->remote_port == hdr->src_port) {
165 sock_hold(&hsock->sk);
166 break;
167 }
168 }
169 rcu_read_unlock();
170
171 /* Nothing to do if we couldn't find the target. */
172 if (!hsock)
173 return;
174
Wedson Almeida Filho89d0e472019-01-03 19:18:39 +0000175 /*
176 * TODO: From this point on, there are two failure paths: when we
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000177 * create the skb below, and when we enqueue it to the socket. What
178 * should we do if they fail? Ideally we would have some form of flow
179 * control to prevent message loss, but how to do it efficiently?
180 *
181 * One option is to have a pre-allocated message that indicates to the
182 * sender that a message was dropped. This way we guarantee that the
183 * sender will be aware of loss and should back-off.
184 */
185 /* Create the skb. */
186 skb = alloc_skb(len, GFP_KERNEL);
187 if (!skb)
188 goto exit;
189
190 memcpy(skb_put(skb, len), hdr + 1, len);
191
192 /*
193 * Add the skb to the receive queue of the target socket. On success it
194 * calls sk->sk_data_ready, which is currently set to sock_def_readable,
195 * which wakes up any waiters.
196 */
197 err = sock_queue_rcv_skb(&hsock->sk, skb);
198 if (err)
199 kfree_skb(skb);
200
201exit:
202 sock_put(&hsock->sk);
203}
204
205/**
206 * This function is called when Hafnium requests that the primary VM wake up a
207 * vCPU that belongs to a secondary VM.
208 *
209 * It wakes up the thread if it's sleeping, or kicks it if it's already running.
210 *
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000211 * If vCPU is HF_INVALID_VCPU, it injects an interrupt into a vCPU belonging to
212 * the specified VM.
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000213 */
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000214static void hf_handle_wake_up_request(uint32_t vm_id, uint16_t vcpu,
215 uint64_t int_id)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000216{
217 struct hf_vm *vm;
218
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000219 if (vm_id < 1 || vm_id > hf_vm_count) {
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000220 pr_warn("Request to wake up non-existent VM id: %u\n", vm_id);
221 return;
222 }
223
224 vm = &hf_vms[vm_id - 1];
225 if (vcpu >= vm->vcpu_count) {
226 int64_t ret;
227
228 if (vcpu != HF_INVALID_VCPU) {
229 pr_warn("Request to wake up non-existent vCPU: %u.%u\n",
230 vm_id, vcpu);
231 return;
232 }
233
234 /*
235 * TODO: For now we're picking the first vcpu to interrupt, but
236 * we want to be smarter.
237 */
238 vcpu = 0;
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000239 ret = hf_interrupt_inject(vm_id, vcpu, int_id);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000240 if (ret != 1) {
241 /* We don't need to wake up the vcpu. */
242 return;
243 }
244 }
245
246 if (hf_vcpu_wake_up(&vm->vcpu[vcpu]) == 0) {
247 /*
248 * The task was already running (presumably on a different
249 * physical CPU); interrupt it. This gives Hafnium a chance to
250 * inject any new interrupts.
251 */
252 kick_process(vm->vcpu[vcpu].task);
253 }
254}
255
256/**
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000257 * Notify all waiters on the given VM.
258 */
259static void hf_notify_waiters(uint32_t vm_id)
260{
261 int64_t ret;
262
263 while ((ret = hf_mailbox_waiter_get(vm_id)) != -1) {
264 if (ret == HF_PRIMARY_VM_ID) {
265 /*
266 * TODO: Use this information when implementing per-vm
267 * queues.
268 */
269 } else {
270 hf_handle_wake_up_request(ret, HF_INVALID_VCPU,
271 HF_MAILBOX_WRITABLE_INTID);
272 }
273 }
274}
275
276/**
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100277 * This is the main loop of each vcpu.
278 */
279static int hf_vcpu_thread(void *data)
280{
281 struct hf_vcpu *vcpu = data;
Andrew Sculldc8cab52018-10-10 18:29:39 +0100282 struct hf_vcpu_run_return ret;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100283
284 hrtimer_init(&vcpu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
285 vcpu->timer.function = &hf_vcpu_timer_expired;
286
287 while (!kthread_should_stop()) {
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000288 /*
289 * We're about to run the vcpu, so we can reset the abort-sleep
290 * flag.
291 */
292 atomic_set(&vcpu->abort_sleep, 0);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100293
Andrew Scullbb7ae412018-09-28 21:07:15 +0100294 /* Call into Hafnium to run vcpu. */
Andrew Scullb722f952018-09-27 15:39:10 +0100295 ret = hf_vcpu_run(vcpu->vm->id, vcpu->vcpu_index);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100296
Andrew Sculldc8cab52018-10-10 18:29:39 +0100297 switch (ret.code) {
Andrew Sculle05702e2019-01-08 14:46:46 +0000298 /* Preempted. */
299 case HF_VCPU_RUN_PREEMPTED:
300 if (need_resched())
301 schedule();
302 break;
303
304 /* Yield. */
Andrew Scullb3a61b52018-09-17 14:30:34 +0100305 case HF_VCPU_RUN_YIELD:
Andrew Sculle05702e2019-01-08 14:46:46 +0000306 if (!kthread_should_stop())
307 schedule();
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100308 break;
309
Andrew Scull01778112019-01-14 15:37:53 +0000310 /* WFI. */
Andrew Scullb3a61b52018-09-17 14:30:34 +0100311 case HF_VCPU_RUN_WAIT_FOR_INTERRUPT:
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000312 hf_vcpu_sleep(vcpu);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100313 break;
314
Andrew Scullb3a61b52018-09-17 14:30:34 +0100315 /* Wake up another vcpu. */
316 case HF_VCPU_RUN_WAKE_UP:
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000317 hf_handle_wake_up_request(ret.wake_up.vm_id,
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000318 ret.wake_up.vcpu,
319 HF_MAILBOX_READBLE_INTID);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100320 break;
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100321
Andrew Scullb3a61b52018-09-17 14:30:34 +0100322 /* Response available. */
Andrew Scull0973a2e2018-10-05 11:11:24 +0100323 case HF_VCPU_RUN_MESSAGE:
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000324 hf_handle_message(vcpu->vm, page_address(hf_recv_page),
325 ret.message.size);
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000326 if (hf_mailbox_clear() == 1)
327 hf_notify_waiters(HF_PRIMARY_VM_ID);
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100328 break;
Andrew Sculldc8cab52018-10-10 18:29:39 +0100329
330 case HF_VCPU_RUN_SLEEP:
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000331 hrtimer_start(&vcpu->timer, ret.sleep.ns,
332 HRTIMER_MODE_REL);
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000333 hf_vcpu_sleep(vcpu);
Andrew Sculldc8cab52018-10-10 18:29:39 +0100334 hrtimer_cancel(&vcpu->timer);
335 break;
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000336
337 /* Notify all waiters. */
338 case HF_VCPU_RUN_NOTIFY_WAITERS:
339 hf_notify_waiters(vcpu->vm->id);
340 break;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100341 }
342 }
343
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100344 return 0;
345}
346
347/**
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000348 * Converts a pointer to a struct sock into a pointer to a struct hf_sock. It
349 * relies on the fact that the first field of hf_sock is a sock.
350 */
351static struct hf_sock *hsock_from_sk(struct sock *sk)
352{
353 return (struct hf_sock *)sk;
354}
355
356/**
357 * This is called when the last reference to the outer socket is released. For
358 * example, if it's a user-space socket, when the last file descriptor pointing
359 * to this socket is closed.
360 *
361 * It begins cleaning up resources, though some can only be cleaned up after all
362 * references to the underlying socket are released, which is handled by
363 * hf_sock_destruct().
364 */
365static int hf_sock_release(struct socket *sock)
366{
367 struct sock *sk = sock->sk;
368 struct hf_sock *hsock = hsock_from_sk(sk);
369 unsigned long flags;
370
371 if (!sk)
372 return 0;
373
374 /* Shutdown for both send and receive. */
375 lock_sock(sk);
376 sk->sk_shutdown |= RCV_SHUTDOWN | SEND_SHUTDOWN;
377 sk->sk_state_change(sk);
378 release_sock(sk);
379
380 /* Remove from the hash table, so lookups from now on won't find it. */
381 spin_lock_irqsave(&hf_local_port_hash_lock, flags);
382 hash_del_rcu(&hsock->sk.sk_node);
383 spin_unlock_irqrestore(&hf_local_port_hash_lock, flags);
384
385 /*
386 * TODO: When we implement a tx queue, we need to clear it here so that
387 * sk_wmem_alloc will not prevent sk from being freed (sk_free).
388 */
389
390 /*
391 * Wait for in-flight lookups to finish. We need to do this here because
Wedson Almeida Filho89d0e472019-01-03 19:18:39 +0000392 * in-flight lookups rely on the reference to the socket we're about to
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000393 * release.
394 */
395 synchronize_rcu();
396 sock_put(sk);
397 sock->sk = NULL;
398
399 return 0;
400}
401
402/**
403 * This is called when there are no more references to the socket. It frees all
404 * resources that haven't been freed during release.
405 */
406static void hf_sock_destruct(struct sock *sk)
407{
408 /*
409 * Clear the receive queue now that the handler cannot add any more
410 * skbs to it.
411 */
412 skb_queue_purge(&sk->sk_receive_queue);
413}
414
415/**
416 * Connects the Hafnium socket to the provided VM and port. After the socket is
417 * connected, it can be used to exchange datagrams with the specified peer.
418 */
Andrew Scull01778112019-01-14 15:37:53 +0000419static int hf_sock_connect(struct socket *sock, struct sockaddr *saddr, int len,
420 int connect_flags)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000421{
422 struct sock *sk = sock->sk;
423 struct hf_sock *hsock = hsock_from_sk(sk);
424 struct hf_vm *vm;
425 struct sockaddr_hf *addr;
426 int err;
427 unsigned long flags;
428
429 /* Basic address validation. */
430 if (len < sizeof(struct sockaddr_hf) || saddr->sa_family != AF_HF)
431 return -EINVAL;
432
433 addr = (struct sockaddr_hf *)saddr;
434 if (addr->vm_id > hf_vm_count)
435 return -ENETUNREACH;
436
437 vm = &hf_vms[addr->vm_id - 1];
438
439 /*
440 * TODO: Once we implement access control in Hafnium, check that the
441 * caller is allowed to contact the specified VM. Return -ECONNREFUSED
442 * if access is denied.
443 */
444
445 /* Take lock to make sure state doesn't change as we connect. */
446 lock_sock(sk);
447
448 /* Only unconnected sockets are allowed to become connected. */
449 if (sock->state != SS_UNCONNECTED) {
450 err = -EISCONN;
451 goto exit;
452 }
453
454 hsock->local_port = atomic64_inc_return(&hf_next_port);
455 hsock->remote_port = addr->port;
456 hsock->peer_vm = vm;
457
458 sock->state = SS_CONNECTED;
459
460 /* Add socket to hash table now that it's fully initialised. */
461 spin_lock_irqsave(&hf_local_port_hash_lock, flags);
462 hash_add_rcu(hf_local_port_hash, &sk->sk_node, hsock->local_port);
463 spin_unlock_irqrestore(&hf_local_port_hash_lock, flags);
464
465 err = 0;
466exit:
467 release_sock(sk);
468 return err;
469}
470
471/**
472 * Sends the given skb to the appropriate VM by calling Hafnium. It will also
473 * trigger the wake up of a recipient VM.
474 *
475 * Takes ownership of the skb on success.
476 */
477static int hf_send_skb(struct sk_buff *skb)
478{
479 unsigned long flags;
480 int64_t ret;
481 struct hf_sock *hsock = hsock_from_sk(skb->sk);
482 struct hf_vm *vm = hsock->peer_vm;
483
484 /*
485 * Call Hafnium under the send lock so that we serialize the use of the
486 * global send buffer.
487 */
488 spin_lock_irqsave(&hf_send_lock, flags);
489 memcpy(page_address(hf_send_page), skb->data, skb->len);
Wedson Almeida Filhodbfc9032019-01-09 19:03:32 +0000490 ret = hf_mailbox_send(vm->id, skb->len, false);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000491 spin_unlock_irqrestore(&hf_send_lock, flags);
492
493 if (ret < 0)
494 return -EAGAIN;
495
496 /* Wake some vcpu up to handle the new message. */
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000497 hf_handle_wake_up_request(vm->id, ret, HF_MAILBOX_READBLE_INTID);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000498
499 kfree_skb(skb);
500
501 return 0;
502}
503
504/**
505 * Determines if the given socket is in the connected state. It acquires and
506 * releases the socket lock.
507 */
508static bool hf_sock_is_connected(struct socket *sock)
509{
510 bool ret;
511
512 lock_sock(sock->sk);
513 ret = sock->state == SS_CONNECTED;
514 release_sock(sock->sk);
515
516 return ret;
517}
518
519/**
520 * Sends a message to the VM & port the socket is connected to. All variants
521 * of write/send/sendto/sendmsg eventually call this function.
522 */
523static int hf_sock_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
524{
525 struct sock *sk = sock->sk;
526 struct sk_buff *skb;
527 int err;
528 struct hf_msg_hdr *hdr;
529 struct hf_sock *hsock = hsock_from_sk(sk);
530
531 /* Check length. */
532 if (len > HF_MAILBOX_SIZE - sizeof(struct hf_msg_hdr))
533 return -EMSGSIZE;
534
535 /* We don't allow the destination address to be specified. */
536 if (m->msg_namelen > 0)
537 return -EISCONN;
538
539 /* We don't support out of band messages. */
540 if (m->msg_flags & MSG_OOB)
541 return -EOPNOTSUPP;
542
543 /*
544 * Ensure that the socket is connected. We don't need to hold the socket
545 * lock (acquired and released by hf_sock_is_connected) for the
546 * remainder of the function because the fields we care about are
547 * immutable once the state is SS_CONNECTED.
548 */
549 if (!hf_sock_is_connected(sock))
550 return -ENOTCONN;
551
552 /*
553 * Allocate an skb for this write. If there isn't enough room in the
554 * socket's send buffer (sk_wmem_alloc >= sk_sndbuf), this will block
555 * (if it's a blocking call). On success, it increments sk_wmem_alloc
556 * and sets up the skb such that sk_wmem_alloc gets decremented when
557 * the skb is freed (sock_wfree gets called).
558 */
559 skb = sock_alloc_send_skb(sk, len + sizeof(struct hf_msg_hdr),
560 m->msg_flags & MSG_DONTWAIT, &err);
561 if (!skb)
562 return err;
563
564 /* Reserve room for the header and initialise it. */
565 skb_reserve(skb, sizeof(struct hf_msg_hdr));
566 hdr = skb_push(skb, sizeof(struct hf_msg_hdr));
567 hdr->src_port = hsock->local_port;
568 hdr->dst_port = hsock->remote_port;
569
570 /* Allocate area for the contents, then copy into skb. */
571 if (!copy_from_iter_full(skb_put(skb, len), len, &m->msg_iter)) {
572 err = -EFAULT;
573 goto err_cleanup;
574 }
575
576 /*
577 * TODO: We currently do this inline, but when we have support for
578 * readiness notification from Hafnium, we must add this to a per-VM tx
579 * queue that can make progress when the VM becomes writable. This will
580 * fix send buffering and poll readiness notification.
581 */
582 err = hf_send_skb(skb);
583 if (err)
584 goto err_cleanup;
585
586 return 0;
587
588err_cleanup:
589 kfree_skb(skb);
590 return err;
591}
592
593/**
594 * Receives a message originated from the VM & port the socket is connected to.
595 * All variants of read/recv/recvfrom/recvmsg eventually call this function.
596 */
597static int hf_sock_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
598 int flags)
599{
600 struct sock *sk = sock->sk;
601 struct sk_buff *skb;
602 int err;
603 size_t copy_len;
604
605 if (!hf_sock_is_connected(sock))
606 return -ENOTCONN;
607
608 /* Grab the next skb from the receive queue. */
609 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
610 if (!skb)
611 return err;
612
613 /* Make sure we don't copy more than what fits in the output buffer. */
614 copy_len = skb->len;
615 if (copy_len > len) {
616 copy_len = len;
617 m->msg_flags |= MSG_TRUNC;
618 }
619
620 /* Make sure we don't overflow the return value type. */
621 if (copy_len > INT_MAX) {
622 copy_len = INT_MAX;
623 m->msg_flags |= MSG_TRUNC;
624 }
625
626 /* Copy skb to output iterator, then free it. */
627 err = skb_copy_datagram_msg(skb, 0, m, copy_len);
628 skb_free_datagram(sk, skb);
629 if (err)
630 return err;
631
632 return copy_len;
633}
634
635/**
636 * This function is called when a Hafnium socket is created. It initialises all
637 * state such that the caller will be able to connect the socket and then send
638 * and receive messages through it.
639 */
640static int hf_sock_create(struct net *net, struct socket *sock, int protocol,
Andrew Scull01778112019-01-14 15:37:53 +0000641 int kern)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000642{
643 static const struct proto_ops ops = {
644 .family = PF_HF,
645 .owner = THIS_MODULE,
646 .release = hf_sock_release,
647 .bind = sock_no_bind,
648 .connect = hf_sock_connect,
649 .socketpair = sock_no_socketpair,
650 .accept = sock_no_accept,
651 .ioctl = sock_no_ioctl,
652 .listen = sock_no_listen,
653 .shutdown = sock_no_shutdown,
654 .setsockopt = sock_no_setsockopt,
655 .getsockopt = sock_no_getsockopt,
656 .sendmsg = hf_sock_sendmsg,
657 .recvmsg = hf_sock_recvmsg,
658 .mmap = sock_no_mmap,
659 .sendpage = sock_no_sendpage,
660 .poll = datagram_poll,
661 };
662 struct sock *sk;
663
664 if (sock->type != SOCK_DGRAM)
665 return -ESOCKTNOSUPPORT;
666
667 if (protocol != 0)
668 return -EPROTONOSUPPORT;
669
670 /*
671 * For now we only allow callers with sys admin capability to create
672 * Hafnium sockets.
673 */
674 if (!capable(CAP_SYS_ADMIN))
675 return -EPERM;
676
677 /* Allocate and initialise socket. */
678 sk = sk_alloc(net, PF_HF, GFP_KERNEL, &hf_sock_proto, kern);
679 if (!sk)
680 return -ENOMEM;
681
682 sock_init_data(sock, sk);
683
684 sk->sk_destruct = hf_sock_destruct;
685 sock->ops = &ops;
686 sock->state = SS_UNCONNECTED;
687
688 return 0;
689}
690
691/**
Andrew Scullbb7ae412018-09-28 21:07:15 +0100692 * Frees all resources, including threads, associated with the Hafnium driver.
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100693 */
Andrew Scull82257c42018-10-01 10:37:48 +0100694static void hf_free_resources(void)
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100695{
Andrew Scullbb7ae412018-09-28 21:07:15 +0100696 uint32_t i, j;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100697
698 /*
699 * First stop all worker threads. We need to do this before freeing
700 * resources because workers may reference each other, so it is only
701 * safe to free resources after they have all stopped.
702 */
Andrew Scull82257c42018-10-01 10:37:48 +0100703 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100704 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000705
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100706 for (j = 0; j < vm->vcpu_count; j++)
707 kthread_stop(vm->vcpu[j].task);
708 }
709
710 /* Free resources. */
Andrew Scull82257c42018-10-01 10:37:48 +0100711 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100712 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000713
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100714 for (j = 0; j < vm->vcpu_count; j++)
715 put_task_struct(vm->vcpu[j].task);
716 kfree(vm->vcpu);
717 }
718
719 kfree(hf_vms);
720}
721
Andrew Scullbb7ae412018-09-28 21:07:15 +0100722/**
723 * Initializes the Hafnium driver by creating a thread for each vCPU of each
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100724 * virtual machine.
725 */
726static int __init hf_init(void)
727{
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000728 static const struct net_proto_family proto_family = {
729 .family = PF_HF,
730 .create = hf_sock_create,
731 .owner = THIS_MODULE,
732 };
Andrew Scullbb7ae412018-09-28 21:07:15 +0100733 int64_t ret;
734 uint32_t i, j;
Andrew Scull82257c42018-10-01 10:37:48 +0100735 uint32_t total_vm_count;
736 uint32_t total_vcpu_count;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100737
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100738 /* Allocate a page for send and receive buffers. */
739 hf_send_page = alloc_page(GFP_KERNEL);
740 if (!hf_send_page) {
741 pr_err("Unable to allocate send buffer\n");
742 return -ENOMEM;
743 }
744
745 hf_recv_page = alloc_page(GFP_KERNEL);
746 if (!hf_recv_page) {
747 __free_page(hf_send_page);
748 pr_err("Unable to allocate receive buffer\n");
749 return -ENOMEM;
750 }
751
752 /*
753 * Configure both addresses. Once configured, we cannot free these pages
754 * because the hypervisor will use them, even if the module is
755 * unloaded.
756 */
Andrew Scull55704232018-08-10 17:19:54 +0100757 ret = hf_vm_configure(page_to_phys(hf_send_page),
758 page_to_phys(hf_recv_page));
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100759 if (ret) {
760 __free_page(hf_send_page);
761 __free_page(hf_recv_page);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000762 /*
763 * TODO: We may want to grab this information from hypervisor
764 * and go from there.
765 */
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100766 pr_err("Unable to configure VM\n");
767 return -EIO;
768 }
769
Andrew Scull82257c42018-10-01 10:37:48 +0100770 /* Get the number of VMs. */
Andrew Scull55704232018-08-10 17:19:54 +0100771 ret = hf_vm_get_count();
Andrew Scull82257c42018-10-01 10:37:48 +0100772 if (ret < 0) {
Andrew Scullbb7ae412018-09-28 21:07:15 +0100773 pr_err("Unable to retrieve number of VMs: %lld\n", ret);
Andrew Scull82257c42018-10-01 10:37:48 +0100774 return -EIO;
775 }
776
777 /* Confirm the maximum number of VMs looks sane. */
778 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VMS < 1);
779 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VMS > U16_MAX);
780
781 /* Validate the number of VMs. There must at least be the primary. */
782 if (ret < 1 || ret > CONFIG_HAFNIUM_MAX_VMS) {
783 pr_err("Number of VMs is out of range: %lld\n", ret);
784 return -EDQUOT;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100785 }
786
Andrew Scullb722f952018-09-27 15:39:10 +0100787 /* Only track the secondary VMs. */
Andrew Scull82257c42018-10-01 10:37:48 +0100788 total_vm_count = ret - 1;
Andrew Scull01778112019-01-14 15:37:53 +0000789 hf_vms =
790 kmalloc_array(total_vm_count, sizeof(struct hf_vm), GFP_KERNEL);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100791 if (!hf_vms)
792 return -ENOMEM;
793
794 /* Initialize each VM. */
Andrew Scull82257c42018-10-01 10:37:48 +0100795 total_vcpu_count = 0;
796 for (i = 0; i < total_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100797 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100798
Andrew Scullb722f952018-09-27 15:39:10 +0100799 /* Adjust the ID as only the secondaries are tracked. */
800 vm->id = i + 1;
801
802 ret = hf_vcpu_get_count(vm->id);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100803 if (ret < 0) {
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000804 pr_err("HF_VCPU_GET_COUNT failed for vm=%u: %lld",
805 vm->id, ret);
Andrew Scull82257c42018-10-01 10:37:48 +0100806 ret = -EIO;
807 goto fail_with_cleanup;
808 }
809
810 /* Avoid overflowing the vcpu count. */
811 if (ret > (U32_MAX - total_vcpu_count)) {
812 pr_err("Too many vcpus: %u\n", total_vcpu_count);
813 ret = -EDQUOT;
814 goto fail_with_cleanup;
815 }
816
817 /* Confirm the maximum number of VCPUs looks sane. */
818 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VCPUS < 1);
819 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VCPUS > U16_MAX);
820
821 /* Enforce the limit on vcpus. */
822 total_vcpu_count += ret;
823 if (total_vcpu_count > CONFIG_HAFNIUM_MAX_VCPUS) {
824 pr_err("Too many vcpus: %u\n", total_vcpu_count);
825 ret = -EDQUOT;
826 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100827 }
828
829 vm->vcpu_count = ret;
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000830 vm->vcpu = kmalloc_array(vm->vcpu_count, sizeof(struct hf_vcpu),
831 GFP_KERNEL);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100832 if (!vm->vcpu) {
Andrew Scull82257c42018-10-01 10:37:48 +0100833 ret = -ENOMEM;
834 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100835 }
836
Andrew Scull82257c42018-10-01 10:37:48 +0100837 /* Update the number of initialized VMs. */
838 hf_vm_count = i + 1;
839
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100840 /* Create a kernel thread for each vcpu. */
841 for (j = 0; j < vm->vcpu_count; j++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100842 struct hf_vcpu *vcpu = &vm->vcpu[j];
Andrew Scull01778112019-01-14 15:37:53 +0000843
844 vcpu->task =
845 kthread_create(hf_vcpu_thread, vcpu,
846 "vcpu_thread_%u_%u", vm->id, j);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100847 if (IS_ERR(vcpu->task)) {
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000848 pr_err("Error creating task (vm=%u,vcpu=%u): %ld\n",
849 vm->id, j, PTR_ERR(vcpu->task));
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100850 vm->vcpu_count = j;
Andrew Scull82257c42018-10-01 10:37:48 +0100851 ret = PTR_ERR(vcpu->task);
852 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100853 }
854
855 get_task_struct(vcpu->task);
Andrew Scullb722f952018-09-27 15:39:10 +0100856 vcpu->vm = vm;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100857 vcpu->vcpu_index = j;
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000858 atomic_set(&vcpu->abort_sleep, 0);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100859 }
860 }
861
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000862 /* Register protocol and socket family. */
863 ret = proto_register(&hf_sock_proto, 0);
864 if (ret) {
865 pr_err("Unable to register protocol: %lld\n", ret);
866 goto fail_with_cleanup;
867 }
868
869 ret = sock_register(&proto_family);
870 if (ret) {
871 pr_err("Unable to register Hafnium's socket family: %lld\n",
872 ret);
873 goto fail_unregister_proto;
874 }
875
876 /*
877 * Start running threads now that all is initialized.
878 *
879 * Any failures from this point on must also unregister the socket
880 * family with a call to sock_unregister().
881 */
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100882 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100883 struct hf_vm *vm = &hf_vms[i];
Andrew Scull01778112019-01-14 15:37:53 +0000884
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100885 for (j = 0; j < vm->vcpu_count; j++)
886 wake_up_process(vm->vcpu[j].task);
887 }
888
889 /* Dump vm/vcpu count info. */
Andrew Scullbb7ae412018-09-28 21:07:15 +0100890 pr_info("Hafnium successfully loaded with %u VMs:\n", hf_vm_count);
Andrew Scullb722f952018-09-27 15:39:10 +0100891 for (i = 0; i < hf_vm_count; i++) {
892 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000893
Andrew Scullbb7ae412018-09-28 21:07:15 +0100894 pr_info("\tVM %u: %u vCPUS\n", vm->id, vm->vcpu_count);
Andrew Scullb722f952018-09-27 15:39:10 +0100895 }
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100896
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100897 return 0;
Andrew Scull82257c42018-10-01 10:37:48 +0100898
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000899fail_unregister_proto:
900 proto_unregister(&hf_sock_proto);
Andrew Scull82257c42018-10-01 10:37:48 +0100901fail_with_cleanup:
902 hf_free_resources();
903 return ret;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100904}
905
906/**
Andrew Scullbb7ae412018-09-28 21:07:15 +0100907 * Frees up all resources used by the Hafnium driver in preparation for
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100908 * unloading it.
909 */
910static void __exit hf_exit(void)
911{
Andrew Scullbb7ae412018-09-28 21:07:15 +0100912 pr_info("Preparing to unload Hafnium\n");
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000913 sock_unregister(PF_HF);
914 proto_unregister(&hf_sock_proto);
Andrew Scull82257c42018-10-01 10:37:48 +0100915 hf_free_resources();
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100916 pr_info("Hafnium ready to unload\n");
917}
918
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000919MODULE_LICENSE("GPL v2");
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100920
921module_init(hf_init);
922module_exit(hf_exit);