blob: 9ae60c0add308344a243f634d6b2e49f6b1ca809 [file] [log] [blame]
Andrew Scull01778112019-01-14 15:37:53 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Walbran13c3a0b2018-11-30 11:51:53 +00002/*
3 * Copyright 2018 Google LLC
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Andrew Walbran13c3a0b2018-11-30 11:51:53 +000013 */
14
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010015#include <linux/hrtimer.h>
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000016#include <linux/atomic.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010017#include <linux/init.h>
18#include <linux/kernel.h>
19#include <linux/kthread.h>
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +010020#include <linux/mm.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010021#include <linux/module.h>
22#include <linux/sched/task.h>
23#include <linux/slab.h>
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000024#include <linux/net.h>
25#include <net/sock.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010026
Andrew Scull55704232018-08-10 17:19:54 +010027#include <hf/call.h>
28
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000029/* TODO: Reusing AF_ECONET for now as it's otherwise unused. */
30#define AF_HF AF_ECONET
31#define PF_HF AF_HF
32
Andrew Scull82257c42018-10-01 10:37:48 +010033#define CONFIG_HAFNIUM_MAX_VMS 16
34#define CONFIG_HAFNIUM_MAX_VCPUS 32
35
Wedson Almeida Filhoec841932019-01-22 23:07:50 +000036#define FIRST_SECONDARY_VM_ID 1
37
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010038struct hf_vcpu {
Andrew Scullb722f952018-09-27 15:39:10 +010039 struct hf_vm *vm;
Andrew Scull55704232018-08-10 17:19:54 +010040 uint32_t vcpu_index;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010041 struct task_struct *task;
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +000042 atomic_t abort_sleep;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010043 struct hrtimer timer;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010044};
45
46struct hf_vm {
Andrew Scullb722f952018-09-27 15:39:10 +010047 uint32_t id;
Andrew Scullbb7ae412018-09-28 21:07:15 +010048 uint32_t vcpu_count;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010049 struct hf_vcpu *vcpu;
50};
51
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000052struct hf_msg_hdr {
53 uint64_t src_port;
54 uint64_t dst_port;
55};
56
57struct hf_sock {
58 /* This needs to be the first field. */
59 struct sock sk;
60
61 /*
62 * The following fields are immutable after the socket transitions to
63 * SS_CONNECTED state.
64 */
65 uint64_t local_port;
66 uint64_t remote_port;
67 struct hf_vm *peer_vm;
68};
69
70struct sockaddr_hf {
71 sa_family_t family;
72 uint32_t vm_id;
73 uint64_t port;
74};
75
76static struct proto hf_sock_proto = {
77 .name = "hafnium",
78 .owner = THIS_MODULE,
79 .obj_size = sizeof(struct hf_sock),
80};
81
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010082static struct hf_vm *hf_vms;
Andrew Scullbb7ae412018-09-28 21:07:15 +010083static uint32_t hf_vm_count;
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000084static struct page *hf_send_page;
85static struct page *hf_recv_page;
86static atomic64_t hf_next_port = ATOMIC64_INIT(0);
87static DEFINE_SPINLOCK(hf_send_lock);
88static DEFINE_HASHTABLE(hf_local_port_hash, 7);
89static DEFINE_SPINLOCK(hf_local_port_hash_lock);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010090
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010091/**
Wedson Almeida Filhoec841932019-01-22 23:07:50 +000092 * Retrieves a VM from its ID, returning NULL if the VM doesn't exist.
93 */
94static struct hf_vm *hf_vm_from_id(uint32_t vm_id)
95{
96 if (vm_id < FIRST_SECONDARY_VM_ID ||
97 vm_id >= FIRST_SECONDARY_VM_ID + hf_vm_count)
98 return NULL;
99
100 return &hf_vms[vm_id - FIRST_SECONDARY_VM_ID];
101}
102
103/**
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000104 * Wakes up the kernel thread responsible for running the given vcpu.
105 *
106 * Returns 0 if the thread was already running, 1 otherwise.
107 */
108static int hf_vcpu_wake_up(struct hf_vcpu *vcpu)
109{
110 /* Set a flag indicating that the thread should not go to sleep. */
111 atomic_set(&vcpu->abort_sleep, 1);
112
113 /* Set the thread to running state. */
114 return wake_up_process(vcpu->task);
115}
116
117/**
118 * Puts the current thread to sleep. The current thread must be responsible for
119 * running the given vcpu.
120 *
121 * Going to sleep will fail if hf_vcpu_wake_up() or kthread_stop() was called on
122 * this vcpu/thread since the last time it [re]started running.
123 */
124static void hf_vcpu_sleep(struct hf_vcpu *vcpu)
125{
126 int abort;
127
128 set_current_state(TASK_INTERRUPTIBLE);
129
130 /* Check the sleep-abort flag after making thread interruptible. */
131 abort = atomic_read(&vcpu->abort_sleep);
132 if (!abort && !kthread_should_stop())
133 schedule();
134
135 /* Set state back to running on the way out. */
136 set_current_state(TASK_RUNNING);
137}
138
139/**
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100140 * Wakes up the thread associated with the vcpu that owns the given timer. This
141 * is called when the timer the thread is waiting on expires.
142 */
143static enum hrtimer_restart hf_vcpu_timer_expired(struct hrtimer *timer)
144{
145 struct hf_vcpu *vcpu = container_of(timer, struct hf_vcpu, timer);
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000146 /* TODO: Inject interrupt. */
147 hf_vcpu_wake_up(vcpu);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100148 return HRTIMER_NORESTART;
149}
150
151/**
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000152 * Handles a message delivered to this VM by validating that it's well-formed
153 * and then queueing it for delivery to the appropriate socket.
154 */
155static void hf_handle_message(struct hf_vm *sender, const void *ptr, size_t len)
156{
157 struct hf_sock *hsock;
158 const struct hf_msg_hdr *hdr = ptr;
159 struct sk_buff *skb;
160 int err;
161
162 /* Ignore messages that are too small to hold a header. */
163 if (len < sizeof(struct hf_msg_hdr))
164 return;
165
166 len -= sizeof(struct hf_msg_hdr);
167
168 /* Go through the colliding sockets. */
169 rcu_read_lock();
170 hash_for_each_possible_rcu(hf_local_port_hash, hsock, sk.sk_node,
171 hdr->dst_port) {
172 if (hsock->peer_vm == sender &&
173 hsock->remote_port == hdr->src_port) {
174 sock_hold(&hsock->sk);
175 break;
176 }
177 }
178 rcu_read_unlock();
179
180 /* Nothing to do if we couldn't find the target. */
181 if (!hsock)
182 return;
183
Wedson Almeida Filho89d0e472019-01-03 19:18:39 +0000184 /*
185 * TODO: From this point on, there are two failure paths: when we
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000186 * create the skb below, and when we enqueue it to the socket. What
187 * should we do if they fail? Ideally we would have some form of flow
188 * control to prevent message loss, but how to do it efficiently?
189 *
190 * One option is to have a pre-allocated message that indicates to the
191 * sender that a message was dropped. This way we guarantee that the
192 * sender will be aware of loss and should back-off.
193 */
194 /* Create the skb. */
195 skb = alloc_skb(len, GFP_KERNEL);
196 if (!skb)
197 goto exit;
198
199 memcpy(skb_put(skb, len), hdr + 1, len);
200
201 /*
202 * Add the skb to the receive queue of the target socket. On success it
203 * calls sk->sk_data_ready, which is currently set to sock_def_readable,
204 * which wakes up any waiters.
205 */
206 err = sock_queue_rcv_skb(&hsock->sk, skb);
207 if (err)
208 kfree_skb(skb);
209
210exit:
211 sock_put(&hsock->sk);
212}
213
214/**
215 * This function is called when Hafnium requests that the primary VM wake up a
216 * vCPU that belongs to a secondary VM.
217 *
218 * It wakes up the thread if it's sleeping, or kicks it if it's already running.
219 *
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000220 * If vCPU is HF_INVALID_VCPU, it injects an interrupt into a vCPU belonging to
221 * the specified VM.
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000222 */
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000223static void hf_handle_wake_up_request(uint32_t vm_id, uint16_t vcpu,
224 uint64_t int_id)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000225{
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000226 struct hf_vm *vm = hf_vm_from_id(vm_id);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000227
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000228 if (!vm) {
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000229 pr_warn("Request to wake up non-existent VM id: %u\n", vm_id);
230 return;
231 }
232
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000233 if (vcpu >= vm->vcpu_count) {
234 int64_t ret;
235
236 if (vcpu != HF_INVALID_VCPU) {
237 pr_warn("Request to wake up non-existent vCPU: %u.%u\n",
238 vm_id, vcpu);
239 return;
240 }
241
242 /*
243 * TODO: For now we're picking the first vcpu to interrupt, but
244 * we want to be smarter.
245 */
246 vcpu = 0;
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000247 ret = hf_interrupt_inject(vm_id, vcpu, int_id);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000248 if (ret != 1) {
249 /* We don't need to wake up the vcpu. */
250 return;
251 }
252 }
253
254 if (hf_vcpu_wake_up(&vm->vcpu[vcpu]) == 0) {
255 /*
256 * The task was already running (presumably on a different
257 * physical CPU); interrupt it. This gives Hafnium a chance to
258 * inject any new interrupts.
259 */
260 kick_process(vm->vcpu[vcpu].task);
261 }
262}
263
264/**
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000265 * Notify all waiters on the given VM.
266 */
267static void hf_notify_waiters(uint32_t vm_id)
268{
269 int64_t ret;
270
271 while ((ret = hf_mailbox_waiter_get(vm_id)) != -1) {
272 if (ret == HF_PRIMARY_VM_ID) {
273 /*
274 * TODO: Use this information when implementing per-vm
275 * queues.
276 */
277 } else {
278 hf_handle_wake_up_request(ret, HF_INVALID_VCPU,
279 HF_MAILBOX_WRITABLE_INTID);
280 }
281 }
282}
283
284/**
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100285 * This is the main loop of each vcpu.
286 */
287static int hf_vcpu_thread(void *data)
288{
289 struct hf_vcpu *vcpu = data;
Andrew Sculldc8cab52018-10-10 18:29:39 +0100290 struct hf_vcpu_run_return ret;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100291
292 hrtimer_init(&vcpu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
293 vcpu->timer.function = &hf_vcpu_timer_expired;
294
295 while (!kthread_should_stop()) {
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000296 /*
297 * We're about to run the vcpu, so we can reset the abort-sleep
298 * flag.
299 */
300 atomic_set(&vcpu->abort_sleep, 0);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100301
Andrew Scullbb7ae412018-09-28 21:07:15 +0100302 /* Call into Hafnium to run vcpu. */
Andrew Scullb722f952018-09-27 15:39:10 +0100303 ret = hf_vcpu_run(vcpu->vm->id, vcpu->vcpu_index);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100304
Andrew Sculldc8cab52018-10-10 18:29:39 +0100305 switch (ret.code) {
Andrew Sculle05702e2019-01-08 14:46:46 +0000306 /* Preempted. */
307 case HF_VCPU_RUN_PREEMPTED:
308 if (need_resched())
309 schedule();
310 break;
311
312 /* Yield. */
Andrew Scullb3a61b52018-09-17 14:30:34 +0100313 case HF_VCPU_RUN_YIELD:
Andrew Sculle05702e2019-01-08 14:46:46 +0000314 if (!kthread_should_stop())
315 schedule();
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100316 break;
317
Andrew Scull01778112019-01-14 15:37:53 +0000318 /* WFI. */
Andrew Scullb3a61b52018-09-17 14:30:34 +0100319 case HF_VCPU_RUN_WAIT_FOR_INTERRUPT:
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000320 hf_vcpu_sleep(vcpu);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100321 break;
322
Andrew Scullb3a61b52018-09-17 14:30:34 +0100323 /* Wake up another vcpu. */
324 case HF_VCPU_RUN_WAKE_UP:
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000325 hf_handle_wake_up_request(ret.wake_up.vm_id,
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000326 ret.wake_up.vcpu,
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000327 HF_MAILBOX_READABLE_INTID);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100328 break;
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100329
Andrew Scullb3a61b52018-09-17 14:30:34 +0100330 /* Response available. */
Andrew Scull0973a2e2018-10-05 11:11:24 +0100331 case HF_VCPU_RUN_MESSAGE:
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000332 hf_handle_message(vcpu->vm, page_address(hf_recv_page),
333 ret.message.size);
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000334 if (hf_mailbox_clear() == 1)
335 hf_notify_waiters(HF_PRIMARY_VM_ID);
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100336 break;
Andrew Sculldc8cab52018-10-10 18:29:39 +0100337
338 case HF_VCPU_RUN_SLEEP:
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000339 hrtimer_start(&vcpu->timer, ret.sleep.ns,
340 HRTIMER_MODE_REL);
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000341 hf_vcpu_sleep(vcpu);
Andrew Sculldc8cab52018-10-10 18:29:39 +0100342 hrtimer_cancel(&vcpu->timer);
343 break;
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000344
345 /* Notify all waiters. */
346 case HF_VCPU_RUN_NOTIFY_WAITERS:
347 hf_notify_waiters(vcpu->vm->id);
348 break;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100349 }
350 }
351
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100352 return 0;
353}
354
355/**
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000356 * Converts a pointer to a struct sock into a pointer to a struct hf_sock. It
357 * relies on the fact that the first field of hf_sock is a sock.
358 */
359static struct hf_sock *hsock_from_sk(struct sock *sk)
360{
361 return (struct hf_sock *)sk;
362}
363
364/**
365 * This is called when the last reference to the outer socket is released. For
366 * example, if it's a user-space socket, when the last file descriptor pointing
367 * to this socket is closed.
368 *
369 * It begins cleaning up resources, though some can only be cleaned up after all
370 * references to the underlying socket are released, which is handled by
371 * hf_sock_destruct().
372 */
373static int hf_sock_release(struct socket *sock)
374{
375 struct sock *sk = sock->sk;
376 struct hf_sock *hsock = hsock_from_sk(sk);
377 unsigned long flags;
378
379 if (!sk)
380 return 0;
381
382 /* Shutdown for both send and receive. */
383 lock_sock(sk);
384 sk->sk_shutdown |= RCV_SHUTDOWN | SEND_SHUTDOWN;
385 sk->sk_state_change(sk);
386 release_sock(sk);
387
388 /* Remove from the hash table, so lookups from now on won't find it. */
389 spin_lock_irqsave(&hf_local_port_hash_lock, flags);
390 hash_del_rcu(&hsock->sk.sk_node);
391 spin_unlock_irqrestore(&hf_local_port_hash_lock, flags);
392
393 /*
394 * TODO: When we implement a tx queue, we need to clear it here so that
395 * sk_wmem_alloc will not prevent sk from being freed (sk_free).
396 */
397
398 /*
399 * Wait for in-flight lookups to finish. We need to do this here because
Wedson Almeida Filho89d0e472019-01-03 19:18:39 +0000400 * in-flight lookups rely on the reference to the socket we're about to
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000401 * release.
402 */
403 synchronize_rcu();
404 sock_put(sk);
405 sock->sk = NULL;
406
407 return 0;
408}
409
410/**
411 * This is called when there are no more references to the socket. It frees all
412 * resources that haven't been freed during release.
413 */
414static void hf_sock_destruct(struct sock *sk)
415{
416 /*
417 * Clear the receive queue now that the handler cannot add any more
418 * skbs to it.
419 */
420 skb_queue_purge(&sk->sk_receive_queue);
421}
422
423/**
424 * Connects the Hafnium socket to the provided VM and port. After the socket is
425 * connected, it can be used to exchange datagrams with the specified peer.
426 */
Andrew Scull01778112019-01-14 15:37:53 +0000427static int hf_sock_connect(struct socket *sock, struct sockaddr *saddr, int len,
428 int connect_flags)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000429{
430 struct sock *sk = sock->sk;
431 struct hf_sock *hsock = hsock_from_sk(sk);
432 struct hf_vm *vm;
433 struct sockaddr_hf *addr;
434 int err;
435 unsigned long flags;
436
437 /* Basic address validation. */
438 if (len < sizeof(struct sockaddr_hf) || saddr->sa_family != AF_HF)
439 return -EINVAL;
440
441 addr = (struct sockaddr_hf *)saddr;
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000442 vm = hf_vm_from_id(addr->vm_id);
443 if (!vm)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000444 return -ENETUNREACH;
445
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000446 /*
447 * TODO: Once we implement access control in Hafnium, check that the
448 * caller is allowed to contact the specified VM. Return -ECONNREFUSED
449 * if access is denied.
450 */
451
452 /* Take lock to make sure state doesn't change as we connect. */
453 lock_sock(sk);
454
455 /* Only unconnected sockets are allowed to become connected. */
456 if (sock->state != SS_UNCONNECTED) {
457 err = -EISCONN;
458 goto exit;
459 }
460
461 hsock->local_port = atomic64_inc_return(&hf_next_port);
462 hsock->remote_port = addr->port;
463 hsock->peer_vm = vm;
464
465 sock->state = SS_CONNECTED;
466
467 /* Add socket to hash table now that it's fully initialised. */
468 spin_lock_irqsave(&hf_local_port_hash_lock, flags);
469 hash_add_rcu(hf_local_port_hash, &sk->sk_node, hsock->local_port);
470 spin_unlock_irqrestore(&hf_local_port_hash_lock, flags);
471
472 err = 0;
473exit:
474 release_sock(sk);
475 return err;
476}
477
478/**
479 * Sends the given skb to the appropriate VM by calling Hafnium. It will also
480 * trigger the wake up of a recipient VM.
481 *
482 * Takes ownership of the skb on success.
483 */
484static int hf_send_skb(struct sk_buff *skb)
485{
486 unsigned long flags;
487 int64_t ret;
488 struct hf_sock *hsock = hsock_from_sk(skb->sk);
489 struct hf_vm *vm = hsock->peer_vm;
490
491 /*
492 * Call Hafnium under the send lock so that we serialize the use of the
493 * global send buffer.
494 */
495 spin_lock_irqsave(&hf_send_lock, flags);
496 memcpy(page_address(hf_send_page), skb->data, skb->len);
Wedson Almeida Filhodbfc9032019-01-09 19:03:32 +0000497 ret = hf_mailbox_send(vm->id, skb->len, false);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000498 spin_unlock_irqrestore(&hf_send_lock, flags);
499
500 if (ret < 0)
501 return -EAGAIN;
502
503 /* Wake some vcpu up to handle the new message. */
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000504 hf_handle_wake_up_request(vm->id, ret, HF_MAILBOX_READABLE_INTID);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000505
506 kfree_skb(skb);
507
508 return 0;
509}
510
511/**
512 * Determines if the given socket is in the connected state. It acquires and
513 * releases the socket lock.
514 */
515static bool hf_sock_is_connected(struct socket *sock)
516{
517 bool ret;
518
519 lock_sock(sock->sk);
520 ret = sock->state == SS_CONNECTED;
521 release_sock(sock->sk);
522
523 return ret;
524}
525
526/**
527 * Sends a message to the VM & port the socket is connected to. All variants
528 * of write/send/sendto/sendmsg eventually call this function.
529 */
530static int hf_sock_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
531{
532 struct sock *sk = sock->sk;
533 struct sk_buff *skb;
534 int err;
535 struct hf_msg_hdr *hdr;
536 struct hf_sock *hsock = hsock_from_sk(sk);
537
538 /* Check length. */
539 if (len > HF_MAILBOX_SIZE - sizeof(struct hf_msg_hdr))
540 return -EMSGSIZE;
541
542 /* We don't allow the destination address to be specified. */
543 if (m->msg_namelen > 0)
544 return -EISCONN;
545
546 /* We don't support out of band messages. */
547 if (m->msg_flags & MSG_OOB)
548 return -EOPNOTSUPP;
549
550 /*
551 * Ensure that the socket is connected. We don't need to hold the socket
552 * lock (acquired and released by hf_sock_is_connected) for the
553 * remainder of the function because the fields we care about are
554 * immutable once the state is SS_CONNECTED.
555 */
556 if (!hf_sock_is_connected(sock))
557 return -ENOTCONN;
558
559 /*
560 * Allocate an skb for this write. If there isn't enough room in the
561 * socket's send buffer (sk_wmem_alloc >= sk_sndbuf), this will block
562 * (if it's a blocking call). On success, it increments sk_wmem_alloc
563 * and sets up the skb such that sk_wmem_alloc gets decremented when
564 * the skb is freed (sock_wfree gets called).
565 */
566 skb = sock_alloc_send_skb(sk, len + sizeof(struct hf_msg_hdr),
567 m->msg_flags & MSG_DONTWAIT, &err);
568 if (!skb)
569 return err;
570
571 /* Reserve room for the header and initialise it. */
572 skb_reserve(skb, sizeof(struct hf_msg_hdr));
573 hdr = skb_push(skb, sizeof(struct hf_msg_hdr));
574 hdr->src_port = hsock->local_port;
575 hdr->dst_port = hsock->remote_port;
576
577 /* Allocate area for the contents, then copy into skb. */
578 if (!copy_from_iter_full(skb_put(skb, len), len, &m->msg_iter)) {
579 err = -EFAULT;
580 goto err_cleanup;
581 }
582
583 /*
584 * TODO: We currently do this inline, but when we have support for
585 * readiness notification from Hafnium, we must add this to a per-VM tx
586 * queue that can make progress when the VM becomes writable. This will
587 * fix send buffering and poll readiness notification.
588 */
589 err = hf_send_skb(skb);
590 if (err)
591 goto err_cleanup;
592
593 return 0;
594
595err_cleanup:
596 kfree_skb(skb);
597 return err;
598}
599
600/**
601 * Receives a message originated from the VM & port the socket is connected to.
602 * All variants of read/recv/recvfrom/recvmsg eventually call this function.
603 */
604static int hf_sock_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
605 int flags)
606{
607 struct sock *sk = sock->sk;
608 struct sk_buff *skb;
609 int err;
610 size_t copy_len;
611
612 if (!hf_sock_is_connected(sock))
613 return -ENOTCONN;
614
615 /* Grab the next skb from the receive queue. */
616 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
617 if (!skb)
618 return err;
619
620 /* Make sure we don't copy more than what fits in the output buffer. */
621 copy_len = skb->len;
622 if (copy_len > len) {
623 copy_len = len;
624 m->msg_flags |= MSG_TRUNC;
625 }
626
627 /* Make sure we don't overflow the return value type. */
628 if (copy_len > INT_MAX) {
629 copy_len = INT_MAX;
630 m->msg_flags |= MSG_TRUNC;
631 }
632
633 /* Copy skb to output iterator, then free it. */
634 err = skb_copy_datagram_msg(skb, 0, m, copy_len);
635 skb_free_datagram(sk, skb);
636 if (err)
637 return err;
638
639 return copy_len;
640}
641
642/**
643 * This function is called when a Hafnium socket is created. It initialises all
644 * state such that the caller will be able to connect the socket and then send
645 * and receive messages through it.
646 */
647static int hf_sock_create(struct net *net, struct socket *sock, int protocol,
Andrew Scull01778112019-01-14 15:37:53 +0000648 int kern)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000649{
650 static const struct proto_ops ops = {
651 .family = PF_HF,
652 .owner = THIS_MODULE,
653 .release = hf_sock_release,
654 .bind = sock_no_bind,
655 .connect = hf_sock_connect,
656 .socketpair = sock_no_socketpair,
657 .accept = sock_no_accept,
658 .ioctl = sock_no_ioctl,
659 .listen = sock_no_listen,
660 .shutdown = sock_no_shutdown,
661 .setsockopt = sock_no_setsockopt,
662 .getsockopt = sock_no_getsockopt,
663 .sendmsg = hf_sock_sendmsg,
664 .recvmsg = hf_sock_recvmsg,
665 .mmap = sock_no_mmap,
666 .sendpage = sock_no_sendpage,
667 .poll = datagram_poll,
668 };
669 struct sock *sk;
670
671 if (sock->type != SOCK_DGRAM)
672 return -ESOCKTNOSUPPORT;
673
674 if (protocol != 0)
675 return -EPROTONOSUPPORT;
676
677 /*
678 * For now we only allow callers with sys admin capability to create
679 * Hafnium sockets.
680 */
681 if (!capable(CAP_SYS_ADMIN))
682 return -EPERM;
683
684 /* Allocate and initialise socket. */
685 sk = sk_alloc(net, PF_HF, GFP_KERNEL, &hf_sock_proto, kern);
686 if (!sk)
687 return -ENOMEM;
688
689 sock_init_data(sock, sk);
690
691 sk->sk_destruct = hf_sock_destruct;
692 sock->ops = &ops;
693 sock->state = SS_UNCONNECTED;
694
695 return 0;
696}
697
698/**
Andrew Scullbb7ae412018-09-28 21:07:15 +0100699 * Frees all resources, including threads, associated with the Hafnium driver.
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100700 */
Andrew Scull82257c42018-10-01 10:37:48 +0100701static void hf_free_resources(void)
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100702{
Andrew Scullbb7ae412018-09-28 21:07:15 +0100703 uint32_t i, j;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100704
705 /*
706 * First stop all worker threads. We need to do this before freeing
707 * resources because workers may reference each other, so it is only
708 * safe to free resources after they have all stopped.
709 */
Andrew Scull82257c42018-10-01 10:37:48 +0100710 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100711 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000712
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100713 for (j = 0; j < vm->vcpu_count; j++)
714 kthread_stop(vm->vcpu[j].task);
715 }
716
717 /* Free resources. */
Andrew Scull82257c42018-10-01 10:37:48 +0100718 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100719 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000720
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100721 for (j = 0; j < vm->vcpu_count; j++)
722 put_task_struct(vm->vcpu[j].task);
723 kfree(vm->vcpu);
724 }
725
726 kfree(hf_vms);
727}
728
Andrew Scullbb7ae412018-09-28 21:07:15 +0100729/**
730 * Initializes the Hafnium driver by creating a thread for each vCPU of each
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100731 * virtual machine.
732 */
733static int __init hf_init(void)
734{
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000735 static const struct net_proto_family proto_family = {
736 .family = PF_HF,
737 .create = hf_sock_create,
738 .owner = THIS_MODULE,
739 };
Andrew Scullbb7ae412018-09-28 21:07:15 +0100740 int64_t ret;
741 uint32_t i, j;
Andrew Scull82257c42018-10-01 10:37:48 +0100742 uint32_t total_vm_count;
743 uint32_t total_vcpu_count;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100744
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100745 /* Allocate a page for send and receive buffers. */
746 hf_send_page = alloc_page(GFP_KERNEL);
747 if (!hf_send_page) {
748 pr_err("Unable to allocate send buffer\n");
749 return -ENOMEM;
750 }
751
752 hf_recv_page = alloc_page(GFP_KERNEL);
753 if (!hf_recv_page) {
754 __free_page(hf_send_page);
755 pr_err("Unable to allocate receive buffer\n");
756 return -ENOMEM;
757 }
758
759 /*
760 * Configure both addresses. Once configured, we cannot free these pages
761 * because the hypervisor will use them, even if the module is
762 * unloaded.
763 */
Andrew Scull55704232018-08-10 17:19:54 +0100764 ret = hf_vm_configure(page_to_phys(hf_send_page),
765 page_to_phys(hf_recv_page));
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100766 if (ret) {
767 __free_page(hf_send_page);
768 __free_page(hf_recv_page);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000769 /*
770 * TODO: We may want to grab this information from hypervisor
771 * and go from there.
772 */
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100773 pr_err("Unable to configure VM\n");
774 return -EIO;
775 }
776
Andrew Scull82257c42018-10-01 10:37:48 +0100777 /* Get the number of VMs. */
Andrew Scull55704232018-08-10 17:19:54 +0100778 ret = hf_vm_get_count();
Andrew Scull82257c42018-10-01 10:37:48 +0100779 if (ret < 0) {
Andrew Scullbb7ae412018-09-28 21:07:15 +0100780 pr_err("Unable to retrieve number of VMs: %lld\n", ret);
Andrew Scull82257c42018-10-01 10:37:48 +0100781 return -EIO;
782 }
783
784 /* Confirm the maximum number of VMs looks sane. */
785 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VMS < 1);
786 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VMS > U16_MAX);
787
788 /* Validate the number of VMs. There must at least be the primary. */
789 if (ret < 1 || ret > CONFIG_HAFNIUM_MAX_VMS) {
790 pr_err("Number of VMs is out of range: %lld\n", ret);
791 return -EDQUOT;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100792 }
793
Andrew Scullb722f952018-09-27 15:39:10 +0100794 /* Only track the secondary VMs. */
Andrew Scull82257c42018-10-01 10:37:48 +0100795 total_vm_count = ret - 1;
Andrew Scull01778112019-01-14 15:37:53 +0000796 hf_vms =
797 kmalloc_array(total_vm_count, sizeof(struct hf_vm), GFP_KERNEL);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100798 if (!hf_vms)
799 return -ENOMEM;
800
801 /* Initialize each VM. */
Andrew Scull82257c42018-10-01 10:37:48 +0100802 total_vcpu_count = 0;
803 for (i = 0; i < total_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100804 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100805
Andrew Scullb722f952018-09-27 15:39:10 +0100806 /* Adjust the ID as only the secondaries are tracked. */
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000807 vm->id = i + FIRST_SECONDARY_VM_ID;
Andrew Scullb722f952018-09-27 15:39:10 +0100808
809 ret = hf_vcpu_get_count(vm->id);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100810 if (ret < 0) {
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000811 pr_err("HF_VCPU_GET_COUNT failed for vm=%u: %lld",
812 vm->id, ret);
Andrew Scull82257c42018-10-01 10:37:48 +0100813 ret = -EIO;
814 goto fail_with_cleanup;
815 }
816
817 /* Avoid overflowing the vcpu count. */
818 if (ret > (U32_MAX - total_vcpu_count)) {
819 pr_err("Too many vcpus: %u\n", total_vcpu_count);
820 ret = -EDQUOT;
821 goto fail_with_cleanup;
822 }
823
824 /* Confirm the maximum number of VCPUs looks sane. */
825 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VCPUS < 1);
826 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VCPUS > U16_MAX);
827
828 /* Enforce the limit on vcpus. */
829 total_vcpu_count += ret;
830 if (total_vcpu_count > CONFIG_HAFNIUM_MAX_VCPUS) {
831 pr_err("Too many vcpus: %u\n", total_vcpu_count);
832 ret = -EDQUOT;
833 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100834 }
835
836 vm->vcpu_count = ret;
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000837 vm->vcpu = kmalloc_array(vm->vcpu_count, sizeof(struct hf_vcpu),
838 GFP_KERNEL);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100839 if (!vm->vcpu) {
Andrew Scull82257c42018-10-01 10:37:48 +0100840 ret = -ENOMEM;
841 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100842 }
843
Andrew Scull82257c42018-10-01 10:37:48 +0100844 /* Update the number of initialized VMs. */
845 hf_vm_count = i + 1;
846
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100847 /* Create a kernel thread for each vcpu. */
848 for (j = 0; j < vm->vcpu_count; j++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100849 struct hf_vcpu *vcpu = &vm->vcpu[j];
Andrew Scull01778112019-01-14 15:37:53 +0000850
851 vcpu->task =
852 kthread_create(hf_vcpu_thread, vcpu,
853 "vcpu_thread_%u_%u", vm->id, j);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100854 if (IS_ERR(vcpu->task)) {
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000855 pr_err("Error creating task (vm=%u,vcpu=%u): %ld\n",
856 vm->id, j, PTR_ERR(vcpu->task));
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100857 vm->vcpu_count = j;
Andrew Scull82257c42018-10-01 10:37:48 +0100858 ret = PTR_ERR(vcpu->task);
859 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100860 }
861
862 get_task_struct(vcpu->task);
Andrew Scullb722f952018-09-27 15:39:10 +0100863 vcpu->vm = vm;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100864 vcpu->vcpu_index = j;
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000865 atomic_set(&vcpu->abort_sleep, 0);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100866 }
867 }
868
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000869 /* Register protocol and socket family. */
870 ret = proto_register(&hf_sock_proto, 0);
871 if (ret) {
872 pr_err("Unable to register protocol: %lld\n", ret);
873 goto fail_with_cleanup;
874 }
875
876 ret = sock_register(&proto_family);
877 if (ret) {
878 pr_err("Unable to register Hafnium's socket family: %lld\n",
879 ret);
880 goto fail_unregister_proto;
881 }
882
883 /*
884 * Start running threads now that all is initialized.
885 *
886 * Any failures from this point on must also unregister the socket
887 * family with a call to sock_unregister().
888 */
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100889 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100890 struct hf_vm *vm = &hf_vms[i];
Andrew Scull01778112019-01-14 15:37:53 +0000891
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100892 for (j = 0; j < vm->vcpu_count; j++)
893 wake_up_process(vm->vcpu[j].task);
894 }
895
896 /* Dump vm/vcpu count info. */
Andrew Scullbb7ae412018-09-28 21:07:15 +0100897 pr_info("Hafnium successfully loaded with %u VMs:\n", hf_vm_count);
Andrew Scullb722f952018-09-27 15:39:10 +0100898 for (i = 0; i < hf_vm_count; i++) {
899 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000900
Andrew Scullbb7ae412018-09-28 21:07:15 +0100901 pr_info("\tVM %u: %u vCPUS\n", vm->id, vm->vcpu_count);
Andrew Scullb722f952018-09-27 15:39:10 +0100902 }
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100903
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100904 return 0;
Andrew Scull82257c42018-10-01 10:37:48 +0100905
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000906fail_unregister_proto:
907 proto_unregister(&hf_sock_proto);
Andrew Scull82257c42018-10-01 10:37:48 +0100908fail_with_cleanup:
909 hf_free_resources();
910 return ret;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100911}
912
913/**
Andrew Scullbb7ae412018-09-28 21:07:15 +0100914 * Frees up all resources used by the Hafnium driver in preparation for
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100915 * unloading it.
916 */
917static void __exit hf_exit(void)
918{
Andrew Scullbb7ae412018-09-28 21:07:15 +0100919 pr_info("Preparing to unload Hafnium\n");
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000920 sock_unregister(PF_HF);
921 proto_unregister(&hf_sock_proto);
Andrew Scull82257c42018-10-01 10:37:48 +0100922 hf_free_resources();
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100923 pr_info("Hafnium ready to unload\n");
924}
925
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000926MODULE_LICENSE("GPL v2");
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100927
928module_init(hf_init);
929module_exit(hf_exit);