blob: e5141271ea8a8ee4f2bf237ee00d6b3e78013c52 [file] [log] [blame]
Andrew Scull01778112019-01-14 15:37:53 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Walbran13c3a0b2018-11-30 11:51:53 +00002/*
3 * Copyright 2018 Google LLC
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Andrew Walbran13c3a0b2018-11-30 11:51:53 +000013 */
14
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010015#include <linux/hrtimer.h>
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000016#include <linux/atomic.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010017#include <linux/init.h>
18#include <linux/kernel.h>
19#include <linux/kthread.h>
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +010020#include <linux/mm.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010021#include <linux/module.h>
22#include <linux/sched/task.h>
23#include <linux/slab.h>
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000024#include <linux/net.h>
25#include <net/sock.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010026
Andrew Scull55704232018-08-10 17:19:54 +010027#include <hf/call.h>
28
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000029/* TODO: Reusing AF_ECONET for now as it's otherwise unused. */
30#define AF_HF AF_ECONET
31#define PF_HF AF_HF
32
Andrew Scull82257c42018-10-01 10:37:48 +010033#define CONFIG_HAFNIUM_MAX_VMS 16
34#define CONFIG_HAFNIUM_MAX_VCPUS 32
35
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010036struct hf_vcpu {
Andrew Scullb722f952018-09-27 15:39:10 +010037 struct hf_vm *vm;
Andrew Scull55704232018-08-10 17:19:54 +010038 uint32_t vcpu_index;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010039 struct task_struct *task;
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +000040 atomic_t abort_sleep;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010041 struct hrtimer timer;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010042};
43
44struct hf_vm {
Andrew Scullb722f952018-09-27 15:39:10 +010045 uint32_t id;
Andrew Scullbb7ae412018-09-28 21:07:15 +010046 uint32_t vcpu_count;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010047 struct hf_vcpu *vcpu;
48};
49
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000050struct hf_msg_hdr {
51 uint64_t src_port;
52 uint64_t dst_port;
53};
54
55struct hf_sock {
56 /* This needs to be the first field. */
57 struct sock sk;
58
59 /*
60 * The following fields are immutable after the socket transitions to
61 * SS_CONNECTED state.
62 */
63 uint64_t local_port;
64 uint64_t remote_port;
65 struct hf_vm *peer_vm;
66};
67
68struct sockaddr_hf {
69 sa_family_t family;
70 uint32_t vm_id;
71 uint64_t port;
72};
73
74static struct proto hf_sock_proto = {
75 .name = "hafnium",
76 .owner = THIS_MODULE,
77 .obj_size = sizeof(struct hf_sock),
78};
79
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010080static struct hf_vm *hf_vms;
Andrew Scullbb7ae412018-09-28 21:07:15 +010081static uint32_t hf_vm_count;
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000082static struct page *hf_send_page;
83static struct page *hf_recv_page;
84static atomic64_t hf_next_port = ATOMIC64_INIT(0);
85static DEFINE_SPINLOCK(hf_send_lock);
86static DEFINE_HASHTABLE(hf_local_port_hash, 7);
87static DEFINE_SPINLOCK(hf_local_port_hash_lock);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010088
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010089/**
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +000090 * Wakes up the kernel thread responsible for running the given vcpu.
91 *
92 * Returns 0 if the thread was already running, 1 otherwise.
93 */
94static int hf_vcpu_wake_up(struct hf_vcpu *vcpu)
95{
96 /* Set a flag indicating that the thread should not go to sleep. */
97 atomic_set(&vcpu->abort_sleep, 1);
98
99 /* Set the thread to running state. */
100 return wake_up_process(vcpu->task);
101}
102
103/**
104 * Puts the current thread to sleep. The current thread must be responsible for
105 * running the given vcpu.
106 *
107 * Going to sleep will fail if hf_vcpu_wake_up() or kthread_stop() was called on
108 * this vcpu/thread since the last time it [re]started running.
109 */
110static void hf_vcpu_sleep(struct hf_vcpu *vcpu)
111{
112 int abort;
113
114 set_current_state(TASK_INTERRUPTIBLE);
115
116 /* Check the sleep-abort flag after making thread interruptible. */
117 abort = atomic_read(&vcpu->abort_sleep);
118 if (!abort && !kthread_should_stop())
119 schedule();
120
121 /* Set state back to running on the way out. */
122 set_current_state(TASK_RUNNING);
123}
124
125/**
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100126 * Wakes up the thread associated with the vcpu that owns the given timer. This
127 * is called when the timer the thread is waiting on expires.
128 */
129static enum hrtimer_restart hf_vcpu_timer_expired(struct hrtimer *timer)
130{
131 struct hf_vcpu *vcpu = container_of(timer, struct hf_vcpu, timer);
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000132 /* TODO: Inject interrupt. */
133 hf_vcpu_wake_up(vcpu);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100134 return HRTIMER_NORESTART;
135}
136
137/**
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000138 * Handles a message delivered to this VM by validating that it's well-formed
139 * and then queueing it for delivery to the appropriate socket.
140 */
141static void hf_handle_message(struct hf_vm *sender, const void *ptr, size_t len)
142{
143 struct hf_sock *hsock;
144 const struct hf_msg_hdr *hdr = ptr;
145 struct sk_buff *skb;
146 int err;
147
148 /* Ignore messages that are too small to hold a header. */
149 if (len < sizeof(struct hf_msg_hdr))
150 return;
151
152 len -= sizeof(struct hf_msg_hdr);
153
154 /* Go through the colliding sockets. */
155 rcu_read_lock();
156 hash_for_each_possible_rcu(hf_local_port_hash, hsock, sk.sk_node,
157 hdr->dst_port) {
158 if (hsock->peer_vm == sender &&
159 hsock->remote_port == hdr->src_port) {
160 sock_hold(&hsock->sk);
161 break;
162 }
163 }
164 rcu_read_unlock();
165
166 /* Nothing to do if we couldn't find the target. */
167 if (!hsock)
168 return;
169
Wedson Almeida Filho89d0e472019-01-03 19:18:39 +0000170 /*
171 * TODO: From this point on, there are two failure paths: when we
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000172 * create the skb below, and when we enqueue it to the socket. What
173 * should we do if they fail? Ideally we would have some form of flow
174 * control to prevent message loss, but how to do it efficiently?
175 *
176 * One option is to have a pre-allocated message that indicates to the
177 * sender that a message was dropped. This way we guarantee that the
178 * sender will be aware of loss and should back-off.
179 */
180 /* Create the skb. */
181 skb = alloc_skb(len, GFP_KERNEL);
182 if (!skb)
183 goto exit;
184
185 memcpy(skb_put(skb, len), hdr + 1, len);
186
187 /*
188 * Add the skb to the receive queue of the target socket. On success it
189 * calls sk->sk_data_ready, which is currently set to sock_def_readable,
190 * which wakes up any waiters.
191 */
192 err = sock_queue_rcv_skb(&hsock->sk, skb);
193 if (err)
194 kfree_skb(skb);
195
196exit:
197 sock_put(&hsock->sk);
198}
199
200/**
201 * This function is called when Hafnium requests that the primary VM wake up a
202 * vCPU that belongs to a secondary VM.
203 *
204 * It wakes up the thread if it's sleeping, or kicks it if it's already running.
205 *
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000206 * If vCPU is HF_INVALID_VCPU, it injects an interrupt into a vCPU belonging to
207 * the specified VM.
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000208 */
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000209static void hf_handle_wake_up_request(uint32_t vm_id, uint16_t vcpu,
210 uint64_t int_id)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000211{
212 struct hf_vm *vm;
213
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000214 if (vm_id < 1 || vm_id > hf_vm_count) {
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000215 pr_warn("Request to wake up non-existent VM id: %u\n", vm_id);
216 return;
217 }
218
219 vm = &hf_vms[vm_id - 1];
220 if (vcpu >= vm->vcpu_count) {
221 int64_t ret;
222
223 if (vcpu != HF_INVALID_VCPU) {
224 pr_warn("Request to wake up non-existent vCPU: %u.%u\n",
225 vm_id, vcpu);
226 return;
227 }
228
229 /*
230 * TODO: For now we're picking the first vcpu to interrupt, but
231 * we want to be smarter.
232 */
233 vcpu = 0;
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000234 ret = hf_interrupt_inject(vm_id, vcpu, int_id);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000235 if (ret != 1) {
236 /* We don't need to wake up the vcpu. */
237 return;
238 }
239 }
240
241 if (hf_vcpu_wake_up(&vm->vcpu[vcpu]) == 0) {
242 /*
243 * The task was already running (presumably on a different
244 * physical CPU); interrupt it. This gives Hafnium a chance to
245 * inject any new interrupts.
246 */
247 kick_process(vm->vcpu[vcpu].task);
248 }
249}
250
251/**
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000252 * Notify all waiters on the given VM.
253 */
254static void hf_notify_waiters(uint32_t vm_id)
255{
256 int64_t ret;
257
258 while ((ret = hf_mailbox_waiter_get(vm_id)) != -1) {
259 if (ret == HF_PRIMARY_VM_ID) {
260 /*
261 * TODO: Use this information when implementing per-vm
262 * queues.
263 */
264 } else {
265 hf_handle_wake_up_request(ret, HF_INVALID_VCPU,
266 HF_MAILBOX_WRITABLE_INTID);
267 }
268 }
269}
270
271/**
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100272 * This is the main loop of each vcpu.
273 */
274static int hf_vcpu_thread(void *data)
275{
276 struct hf_vcpu *vcpu = data;
Andrew Sculldc8cab52018-10-10 18:29:39 +0100277 struct hf_vcpu_run_return ret;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100278
279 hrtimer_init(&vcpu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
280 vcpu->timer.function = &hf_vcpu_timer_expired;
281
282 while (!kthread_should_stop()) {
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000283 /*
284 * We're about to run the vcpu, so we can reset the abort-sleep
285 * flag.
286 */
287 atomic_set(&vcpu->abort_sleep, 0);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100288
Andrew Scullbb7ae412018-09-28 21:07:15 +0100289 /* Call into Hafnium to run vcpu. */
Andrew Scullb722f952018-09-27 15:39:10 +0100290 ret = hf_vcpu_run(vcpu->vm->id, vcpu->vcpu_index);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100291
Andrew Sculldc8cab52018-10-10 18:29:39 +0100292 switch (ret.code) {
Andrew Sculle05702e2019-01-08 14:46:46 +0000293 /* Preempted. */
294 case HF_VCPU_RUN_PREEMPTED:
295 if (need_resched())
296 schedule();
297 break;
298
299 /* Yield. */
Andrew Scullb3a61b52018-09-17 14:30:34 +0100300 case HF_VCPU_RUN_YIELD:
Andrew Sculle05702e2019-01-08 14:46:46 +0000301 if (!kthread_should_stop())
302 schedule();
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100303 break;
304
Andrew Scull01778112019-01-14 15:37:53 +0000305 /* WFI. */
Andrew Scullb3a61b52018-09-17 14:30:34 +0100306 case HF_VCPU_RUN_WAIT_FOR_INTERRUPT:
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000307 hf_vcpu_sleep(vcpu);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100308 break;
309
Andrew Scullb3a61b52018-09-17 14:30:34 +0100310 /* Wake up another vcpu. */
311 case HF_VCPU_RUN_WAKE_UP:
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000312 hf_handle_wake_up_request(ret.wake_up.vm_id,
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000313 ret.wake_up.vcpu,
314 HF_MAILBOX_READBLE_INTID);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100315 break;
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100316
Andrew Scullb3a61b52018-09-17 14:30:34 +0100317 /* Response available. */
Andrew Scull0973a2e2018-10-05 11:11:24 +0100318 case HF_VCPU_RUN_MESSAGE:
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000319 hf_handle_message(vcpu->vm, page_address(hf_recv_page),
320 ret.message.size);
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000321 if (hf_mailbox_clear() == 1)
322 hf_notify_waiters(HF_PRIMARY_VM_ID);
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100323 break;
Andrew Sculldc8cab52018-10-10 18:29:39 +0100324
325 case HF_VCPU_RUN_SLEEP:
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000326 hrtimer_start(&vcpu->timer, ret.sleep.ns,
327 HRTIMER_MODE_REL);
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000328 hf_vcpu_sleep(vcpu);
Andrew Sculldc8cab52018-10-10 18:29:39 +0100329 hrtimer_cancel(&vcpu->timer);
330 break;
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000331
332 /* Notify all waiters. */
333 case HF_VCPU_RUN_NOTIFY_WAITERS:
334 hf_notify_waiters(vcpu->vm->id);
335 break;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100336 }
337 }
338
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100339 return 0;
340}
341
342/**
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000343 * Converts a pointer to a struct sock into a pointer to a struct hf_sock. It
344 * relies on the fact that the first field of hf_sock is a sock.
345 */
346static struct hf_sock *hsock_from_sk(struct sock *sk)
347{
348 return (struct hf_sock *)sk;
349}
350
351/**
352 * This is called when the last reference to the outer socket is released. For
353 * example, if it's a user-space socket, when the last file descriptor pointing
354 * to this socket is closed.
355 *
356 * It begins cleaning up resources, though some can only be cleaned up after all
357 * references to the underlying socket are released, which is handled by
358 * hf_sock_destruct().
359 */
360static int hf_sock_release(struct socket *sock)
361{
362 struct sock *sk = sock->sk;
363 struct hf_sock *hsock = hsock_from_sk(sk);
364 unsigned long flags;
365
366 if (!sk)
367 return 0;
368
369 /* Shutdown for both send and receive. */
370 lock_sock(sk);
371 sk->sk_shutdown |= RCV_SHUTDOWN | SEND_SHUTDOWN;
372 sk->sk_state_change(sk);
373 release_sock(sk);
374
375 /* Remove from the hash table, so lookups from now on won't find it. */
376 spin_lock_irqsave(&hf_local_port_hash_lock, flags);
377 hash_del_rcu(&hsock->sk.sk_node);
378 spin_unlock_irqrestore(&hf_local_port_hash_lock, flags);
379
380 /*
381 * TODO: When we implement a tx queue, we need to clear it here so that
382 * sk_wmem_alloc will not prevent sk from being freed (sk_free).
383 */
384
385 /*
386 * Wait for in-flight lookups to finish. We need to do this here because
Wedson Almeida Filho89d0e472019-01-03 19:18:39 +0000387 * in-flight lookups rely on the reference to the socket we're about to
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000388 * release.
389 */
390 synchronize_rcu();
391 sock_put(sk);
392 sock->sk = NULL;
393
394 return 0;
395}
396
397/**
398 * This is called when there are no more references to the socket. It frees all
399 * resources that haven't been freed during release.
400 */
401static void hf_sock_destruct(struct sock *sk)
402{
403 /*
404 * Clear the receive queue now that the handler cannot add any more
405 * skbs to it.
406 */
407 skb_queue_purge(&sk->sk_receive_queue);
408}
409
410/**
411 * Connects the Hafnium socket to the provided VM and port. After the socket is
412 * connected, it can be used to exchange datagrams with the specified peer.
413 */
Andrew Scull01778112019-01-14 15:37:53 +0000414static int hf_sock_connect(struct socket *sock, struct sockaddr *saddr, int len,
415 int connect_flags)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000416{
417 struct sock *sk = sock->sk;
418 struct hf_sock *hsock = hsock_from_sk(sk);
419 struct hf_vm *vm;
420 struct sockaddr_hf *addr;
421 int err;
422 unsigned long flags;
423
424 /* Basic address validation. */
425 if (len < sizeof(struct sockaddr_hf) || saddr->sa_family != AF_HF)
426 return -EINVAL;
427
428 addr = (struct sockaddr_hf *)saddr;
429 if (addr->vm_id > hf_vm_count)
430 return -ENETUNREACH;
431
432 vm = &hf_vms[addr->vm_id - 1];
433
434 /*
435 * TODO: Once we implement access control in Hafnium, check that the
436 * caller is allowed to contact the specified VM. Return -ECONNREFUSED
437 * if access is denied.
438 */
439
440 /* Take lock to make sure state doesn't change as we connect. */
441 lock_sock(sk);
442
443 /* Only unconnected sockets are allowed to become connected. */
444 if (sock->state != SS_UNCONNECTED) {
445 err = -EISCONN;
446 goto exit;
447 }
448
449 hsock->local_port = atomic64_inc_return(&hf_next_port);
450 hsock->remote_port = addr->port;
451 hsock->peer_vm = vm;
452
453 sock->state = SS_CONNECTED;
454
455 /* Add socket to hash table now that it's fully initialised. */
456 spin_lock_irqsave(&hf_local_port_hash_lock, flags);
457 hash_add_rcu(hf_local_port_hash, &sk->sk_node, hsock->local_port);
458 spin_unlock_irqrestore(&hf_local_port_hash_lock, flags);
459
460 err = 0;
461exit:
462 release_sock(sk);
463 return err;
464}
465
466/**
467 * Sends the given skb to the appropriate VM by calling Hafnium. It will also
468 * trigger the wake up of a recipient VM.
469 *
470 * Takes ownership of the skb on success.
471 */
472static int hf_send_skb(struct sk_buff *skb)
473{
474 unsigned long flags;
475 int64_t ret;
476 struct hf_sock *hsock = hsock_from_sk(skb->sk);
477 struct hf_vm *vm = hsock->peer_vm;
478
479 /*
480 * Call Hafnium under the send lock so that we serialize the use of the
481 * global send buffer.
482 */
483 spin_lock_irqsave(&hf_send_lock, flags);
484 memcpy(page_address(hf_send_page), skb->data, skb->len);
Wedson Almeida Filhodbfc9032019-01-09 19:03:32 +0000485 ret = hf_mailbox_send(vm->id, skb->len, false);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000486 spin_unlock_irqrestore(&hf_send_lock, flags);
487
488 if (ret < 0)
489 return -EAGAIN;
490
491 /* Wake some vcpu up to handle the new message. */
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000492 hf_handle_wake_up_request(vm->id, ret, HF_MAILBOX_READBLE_INTID);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000493
494 kfree_skb(skb);
495
496 return 0;
497}
498
499/**
500 * Determines if the given socket is in the connected state. It acquires and
501 * releases the socket lock.
502 */
503static bool hf_sock_is_connected(struct socket *sock)
504{
505 bool ret;
506
507 lock_sock(sock->sk);
508 ret = sock->state == SS_CONNECTED;
509 release_sock(sock->sk);
510
511 return ret;
512}
513
514/**
515 * Sends a message to the VM & port the socket is connected to. All variants
516 * of write/send/sendto/sendmsg eventually call this function.
517 */
518static int hf_sock_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
519{
520 struct sock *sk = sock->sk;
521 struct sk_buff *skb;
522 int err;
523 struct hf_msg_hdr *hdr;
524 struct hf_sock *hsock = hsock_from_sk(sk);
525
526 /* Check length. */
527 if (len > HF_MAILBOX_SIZE - sizeof(struct hf_msg_hdr))
528 return -EMSGSIZE;
529
530 /* We don't allow the destination address to be specified. */
531 if (m->msg_namelen > 0)
532 return -EISCONN;
533
534 /* We don't support out of band messages. */
535 if (m->msg_flags & MSG_OOB)
536 return -EOPNOTSUPP;
537
538 /*
539 * Ensure that the socket is connected. We don't need to hold the socket
540 * lock (acquired and released by hf_sock_is_connected) for the
541 * remainder of the function because the fields we care about are
542 * immutable once the state is SS_CONNECTED.
543 */
544 if (!hf_sock_is_connected(sock))
545 return -ENOTCONN;
546
547 /*
548 * Allocate an skb for this write. If there isn't enough room in the
549 * socket's send buffer (sk_wmem_alloc >= sk_sndbuf), this will block
550 * (if it's a blocking call). On success, it increments sk_wmem_alloc
551 * and sets up the skb such that sk_wmem_alloc gets decremented when
552 * the skb is freed (sock_wfree gets called).
553 */
554 skb = sock_alloc_send_skb(sk, len + sizeof(struct hf_msg_hdr),
555 m->msg_flags & MSG_DONTWAIT, &err);
556 if (!skb)
557 return err;
558
559 /* Reserve room for the header and initialise it. */
560 skb_reserve(skb, sizeof(struct hf_msg_hdr));
561 hdr = skb_push(skb, sizeof(struct hf_msg_hdr));
562 hdr->src_port = hsock->local_port;
563 hdr->dst_port = hsock->remote_port;
564
565 /* Allocate area for the contents, then copy into skb. */
566 if (!copy_from_iter_full(skb_put(skb, len), len, &m->msg_iter)) {
567 err = -EFAULT;
568 goto err_cleanup;
569 }
570
571 /*
572 * TODO: We currently do this inline, but when we have support for
573 * readiness notification from Hafnium, we must add this to a per-VM tx
574 * queue that can make progress when the VM becomes writable. This will
575 * fix send buffering and poll readiness notification.
576 */
577 err = hf_send_skb(skb);
578 if (err)
579 goto err_cleanup;
580
581 return 0;
582
583err_cleanup:
584 kfree_skb(skb);
585 return err;
586}
587
588/**
589 * Receives a message originated from the VM & port the socket is connected to.
590 * All variants of read/recv/recvfrom/recvmsg eventually call this function.
591 */
592static int hf_sock_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
593 int flags)
594{
595 struct sock *sk = sock->sk;
596 struct sk_buff *skb;
597 int err;
598 size_t copy_len;
599
600 if (!hf_sock_is_connected(sock))
601 return -ENOTCONN;
602
603 /* Grab the next skb from the receive queue. */
604 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
605 if (!skb)
606 return err;
607
608 /* Make sure we don't copy more than what fits in the output buffer. */
609 copy_len = skb->len;
610 if (copy_len > len) {
611 copy_len = len;
612 m->msg_flags |= MSG_TRUNC;
613 }
614
615 /* Make sure we don't overflow the return value type. */
616 if (copy_len > INT_MAX) {
617 copy_len = INT_MAX;
618 m->msg_flags |= MSG_TRUNC;
619 }
620
621 /* Copy skb to output iterator, then free it. */
622 err = skb_copy_datagram_msg(skb, 0, m, copy_len);
623 skb_free_datagram(sk, skb);
624 if (err)
625 return err;
626
627 return copy_len;
628}
629
630/**
631 * This function is called when a Hafnium socket is created. It initialises all
632 * state such that the caller will be able to connect the socket and then send
633 * and receive messages through it.
634 */
635static int hf_sock_create(struct net *net, struct socket *sock, int protocol,
Andrew Scull01778112019-01-14 15:37:53 +0000636 int kern)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000637{
638 static const struct proto_ops ops = {
639 .family = PF_HF,
640 .owner = THIS_MODULE,
641 .release = hf_sock_release,
642 .bind = sock_no_bind,
643 .connect = hf_sock_connect,
644 .socketpair = sock_no_socketpair,
645 .accept = sock_no_accept,
646 .ioctl = sock_no_ioctl,
647 .listen = sock_no_listen,
648 .shutdown = sock_no_shutdown,
649 .setsockopt = sock_no_setsockopt,
650 .getsockopt = sock_no_getsockopt,
651 .sendmsg = hf_sock_sendmsg,
652 .recvmsg = hf_sock_recvmsg,
653 .mmap = sock_no_mmap,
654 .sendpage = sock_no_sendpage,
655 .poll = datagram_poll,
656 };
657 struct sock *sk;
658
659 if (sock->type != SOCK_DGRAM)
660 return -ESOCKTNOSUPPORT;
661
662 if (protocol != 0)
663 return -EPROTONOSUPPORT;
664
665 /*
666 * For now we only allow callers with sys admin capability to create
667 * Hafnium sockets.
668 */
669 if (!capable(CAP_SYS_ADMIN))
670 return -EPERM;
671
672 /* Allocate and initialise socket. */
673 sk = sk_alloc(net, PF_HF, GFP_KERNEL, &hf_sock_proto, kern);
674 if (!sk)
675 return -ENOMEM;
676
677 sock_init_data(sock, sk);
678
679 sk->sk_destruct = hf_sock_destruct;
680 sock->ops = &ops;
681 sock->state = SS_UNCONNECTED;
682
683 return 0;
684}
685
686/**
Andrew Scullbb7ae412018-09-28 21:07:15 +0100687 * Frees all resources, including threads, associated with the Hafnium driver.
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100688 */
Andrew Scull82257c42018-10-01 10:37:48 +0100689static void hf_free_resources(void)
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100690{
Andrew Scullbb7ae412018-09-28 21:07:15 +0100691 uint32_t i, j;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100692
693 /*
694 * First stop all worker threads. We need to do this before freeing
695 * resources because workers may reference each other, so it is only
696 * safe to free resources after they have all stopped.
697 */
Andrew Scull82257c42018-10-01 10:37:48 +0100698 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100699 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000700
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100701 for (j = 0; j < vm->vcpu_count; j++)
702 kthread_stop(vm->vcpu[j].task);
703 }
704
705 /* Free resources. */
Andrew Scull82257c42018-10-01 10:37:48 +0100706 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100707 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000708
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100709 for (j = 0; j < vm->vcpu_count; j++)
710 put_task_struct(vm->vcpu[j].task);
711 kfree(vm->vcpu);
712 }
713
714 kfree(hf_vms);
715}
716
Andrew Scullbb7ae412018-09-28 21:07:15 +0100717/**
718 * Initializes the Hafnium driver by creating a thread for each vCPU of each
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100719 * virtual machine.
720 */
721static int __init hf_init(void)
722{
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000723 static const struct net_proto_family proto_family = {
724 .family = PF_HF,
725 .create = hf_sock_create,
726 .owner = THIS_MODULE,
727 };
Andrew Scullbb7ae412018-09-28 21:07:15 +0100728 int64_t ret;
729 uint32_t i, j;
Andrew Scull82257c42018-10-01 10:37:48 +0100730 uint32_t total_vm_count;
731 uint32_t total_vcpu_count;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100732
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100733 /* Allocate a page for send and receive buffers. */
734 hf_send_page = alloc_page(GFP_KERNEL);
735 if (!hf_send_page) {
736 pr_err("Unable to allocate send buffer\n");
737 return -ENOMEM;
738 }
739
740 hf_recv_page = alloc_page(GFP_KERNEL);
741 if (!hf_recv_page) {
742 __free_page(hf_send_page);
743 pr_err("Unable to allocate receive buffer\n");
744 return -ENOMEM;
745 }
746
747 /*
748 * Configure both addresses. Once configured, we cannot free these pages
749 * because the hypervisor will use them, even if the module is
750 * unloaded.
751 */
Andrew Scull55704232018-08-10 17:19:54 +0100752 ret = hf_vm_configure(page_to_phys(hf_send_page),
753 page_to_phys(hf_recv_page));
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100754 if (ret) {
755 __free_page(hf_send_page);
756 __free_page(hf_recv_page);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000757 /*
758 * TODO: We may want to grab this information from hypervisor
759 * and go from there.
760 */
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100761 pr_err("Unable to configure VM\n");
762 return -EIO;
763 }
764
Andrew Scull82257c42018-10-01 10:37:48 +0100765 /* Get the number of VMs. */
Andrew Scull55704232018-08-10 17:19:54 +0100766 ret = hf_vm_get_count();
Andrew Scull82257c42018-10-01 10:37:48 +0100767 if (ret < 0) {
Andrew Scullbb7ae412018-09-28 21:07:15 +0100768 pr_err("Unable to retrieve number of VMs: %lld\n", ret);
Andrew Scull82257c42018-10-01 10:37:48 +0100769 return -EIO;
770 }
771
772 /* Confirm the maximum number of VMs looks sane. */
773 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VMS < 1);
774 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VMS > U16_MAX);
775
776 /* Validate the number of VMs. There must at least be the primary. */
777 if (ret < 1 || ret > CONFIG_HAFNIUM_MAX_VMS) {
778 pr_err("Number of VMs is out of range: %lld\n", ret);
779 return -EDQUOT;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100780 }
781
Andrew Scullb722f952018-09-27 15:39:10 +0100782 /* Only track the secondary VMs. */
Andrew Scull82257c42018-10-01 10:37:48 +0100783 total_vm_count = ret - 1;
Andrew Scull01778112019-01-14 15:37:53 +0000784 hf_vms =
785 kmalloc_array(total_vm_count, sizeof(struct hf_vm), GFP_KERNEL);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100786 if (!hf_vms)
787 return -ENOMEM;
788
789 /* Initialize each VM. */
Andrew Scull82257c42018-10-01 10:37:48 +0100790 total_vcpu_count = 0;
791 for (i = 0; i < total_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100792 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100793
Andrew Scullb722f952018-09-27 15:39:10 +0100794 /* Adjust the ID as only the secondaries are tracked. */
795 vm->id = i + 1;
796
797 ret = hf_vcpu_get_count(vm->id);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100798 if (ret < 0) {
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000799 pr_err("HF_VCPU_GET_COUNT failed for vm=%u: %lld",
800 vm->id, ret);
Andrew Scull82257c42018-10-01 10:37:48 +0100801 ret = -EIO;
802 goto fail_with_cleanup;
803 }
804
805 /* Avoid overflowing the vcpu count. */
806 if (ret > (U32_MAX - total_vcpu_count)) {
807 pr_err("Too many vcpus: %u\n", total_vcpu_count);
808 ret = -EDQUOT;
809 goto fail_with_cleanup;
810 }
811
812 /* Confirm the maximum number of VCPUs looks sane. */
813 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VCPUS < 1);
814 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VCPUS > U16_MAX);
815
816 /* Enforce the limit on vcpus. */
817 total_vcpu_count += ret;
818 if (total_vcpu_count > CONFIG_HAFNIUM_MAX_VCPUS) {
819 pr_err("Too many vcpus: %u\n", total_vcpu_count);
820 ret = -EDQUOT;
821 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100822 }
823
824 vm->vcpu_count = ret;
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000825 vm->vcpu = kmalloc_array(vm->vcpu_count, sizeof(struct hf_vcpu),
826 GFP_KERNEL);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100827 if (!vm->vcpu) {
Andrew Scull82257c42018-10-01 10:37:48 +0100828 ret = -ENOMEM;
829 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100830 }
831
Andrew Scull82257c42018-10-01 10:37:48 +0100832 /* Update the number of initialized VMs. */
833 hf_vm_count = i + 1;
834
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100835 /* Create a kernel thread for each vcpu. */
836 for (j = 0; j < vm->vcpu_count; j++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100837 struct hf_vcpu *vcpu = &vm->vcpu[j];
Andrew Scull01778112019-01-14 15:37:53 +0000838
839 vcpu->task =
840 kthread_create(hf_vcpu_thread, vcpu,
841 "vcpu_thread_%u_%u", vm->id, j);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100842 if (IS_ERR(vcpu->task)) {
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000843 pr_err("Error creating task (vm=%u,vcpu=%u): %ld\n",
844 vm->id, j, PTR_ERR(vcpu->task));
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100845 vm->vcpu_count = j;
Andrew Scull82257c42018-10-01 10:37:48 +0100846 ret = PTR_ERR(vcpu->task);
847 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100848 }
849
850 get_task_struct(vcpu->task);
Andrew Scullb722f952018-09-27 15:39:10 +0100851 vcpu->vm = vm;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100852 vcpu->vcpu_index = j;
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000853 atomic_set(&vcpu->abort_sleep, 0);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100854 }
855 }
856
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000857 /* Register protocol and socket family. */
858 ret = proto_register(&hf_sock_proto, 0);
859 if (ret) {
860 pr_err("Unable to register protocol: %lld\n", ret);
861 goto fail_with_cleanup;
862 }
863
864 ret = sock_register(&proto_family);
865 if (ret) {
866 pr_err("Unable to register Hafnium's socket family: %lld\n",
867 ret);
868 goto fail_unregister_proto;
869 }
870
871 /*
872 * Start running threads now that all is initialized.
873 *
874 * Any failures from this point on must also unregister the socket
875 * family with a call to sock_unregister().
876 */
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100877 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100878 struct hf_vm *vm = &hf_vms[i];
Andrew Scull01778112019-01-14 15:37:53 +0000879
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100880 for (j = 0; j < vm->vcpu_count; j++)
881 wake_up_process(vm->vcpu[j].task);
882 }
883
884 /* Dump vm/vcpu count info. */
Andrew Scullbb7ae412018-09-28 21:07:15 +0100885 pr_info("Hafnium successfully loaded with %u VMs:\n", hf_vm_count);
Andrew Scullb722f952018-09-27 15:39:10 +0100886 for (i = 0; i < hf_vm_count; i++) {
887 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000888
Andrew Scullbb7ae412018-09-28 21:07:15 +0100889 pr_info("\tVM %u: %u vCPUS\n", vm->id, vm->vcpu_count);
Andrew Scullb722f952018-09-27 15:39:10 +0100890 }
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100891
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100892 return 0;
Andrew Scull82257c42018-10-01 10:37:48 +0100893
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000894fail_unregister_proto:
895 proto_unregister(&hf_sock_proto);
Andrew Scull82257c42018-10-01 10:37:48 +0100896fail_with_cleanup:
897 hf_free_resources();
898 return ret;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100899}
900
901/**
Andrew Scullbb7ae412018-09-28 21:07:15 +0100902 * Frees up all resources used by the Hafnium driver in preparation for
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100903 * unloading it.
904 */
905static void __exit hf_exit(void)
906{
Andrew Scullbb7ae412018-09-28 21:07:15 +0100907 pr_info("Preparing to unload Hafnium\n");
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000908 sock_unregister(PF_HF);
909 proto_unregister(&hf_sock_proto);
Andrew Scull82257c42018-10-01 10:37:48 +0100910 hf_free_resources();
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100911 pr_info("Hafnium ready to unload\n");
912}
913
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000914MODULE_LICENSE("GPL v2");
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100915
916module_init(hf_init);
917module_exit(hf_exit);