blob: 9ff6e3255901e9c7ab9fa03508602e43c3692991 [file] [log] [blame]
Andrew Scull01778112019-01-14 15:37:53 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Walbran13c3a0b2018-11-30 11:51:53 +00002/*
3 * Copyright 2018 Google LLC
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Andrew Walbran13c3a0b2018-11-30 11:51:53 +000013 */
14
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010015#include <linux/hrtimer.h>
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000016#include <linux/atomic.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010017#include <linux/init.h>
18#include <linux/kernel.h>
19#include <linux/kthread.h>
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +010020#include <linux/mm.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010021#include <linux/module.h>
22#include <linux/sched/task.h>
23#include <linux/slab.h>
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000024#include <linux/net.h>
25#include <net/sock.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010026
Andrew Scull55704232018-08-10 17:19:54 +010027#include <hf/call.h>
28
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000029/* TODO: Reusing AF_ECONET for now as it's otherwise unused. */
30#define AF_HF AF_ECONET
31#define PF_HF AF_HF
32
Andrew Scull82257c42018-10-01 10:37:48 +010033#define CONFIG_HAFNIUM_MAX_VMS 16
34#define CONFIG_HAFNIUM_MAX_VCPUS 32
35
Wedson Almeida Filhoec841932019-01-22 23:07:50 +000036#define FIRST_SECONDARY_VM_ID 1
37
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010038struct hf_vcpu {
Andrew Scullb722f952018-09-27 15:39:10 +010039 struct hf_vm *vm;
Andrew Scull55704232018-08-10 17:19:54 +010040 uint32_t vcpu_index;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010041 struct task_struct *task;
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +000042 atomic_t abort_sleep;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010043 struct hrtimer timer;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010044};
45
46struct hf_vm {
Andrew Scullb722f952018-09-27 15:39:10 +010047 uint32_t id;
Andrew Scullbb7ae412018-09-28 21:07:15 +010048 uint32_t vcpu_count;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010049 struct hf_vcpu *vcpu;
50};
51
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000052struct hf_msg_hdr {
53 uint64_t src_port;
54 uint64_t dst_port;
55};
56
57struct hf_sock {
58 /* This needs to be the first field. */
59 struct sock sk;
60
61 /*
62 * The following fields are immutable after the socket transitions to
63 * SS_CONNECTED state.
64 */
65 uint64_t local_port;
66 uint64_t remote_port;
67 struct hf_vm *peer_vm;
68};
69
70struct sockaddr_hf {
71 sa_family_t family;
72 uint32_t vm_id;
73 uint64_t port;
74};
75
76static struct proto hf_sock_proto = {
77 .name = "hafnium",
78 .owner = THIS_MODULE,
79 .obj_size = sizeof(struct hf_sock),
80};
81
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010082static struct hf_vm *hf_vms;
Andrew Scullbb7ae412018-09-28 21:07:15 +010083static uint32_t hf_vm_count;
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000084static struct page *hf_send_page;
85static struct page *hf_recv_page;
86static atomic64_t hf_next_port = ATOMIC64_INIT(0);
87static DEFINE_SPINLOCK(hf_send_lock);
88static DEFINE_HASHTABLE(hf_local_port_hash, 7);
89static DEFINE_SPINLOCK(hf_local_port_hash_lock);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010090
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010091/**
Wedson Almeida Filhoec841932019-01-22 23:07:50 +000092 * Retrieves a VM from its ID, returning NULL if the VM doesn't exist.
93 */
94static struct hf_vm *hf_vm_from_id(uint32_t vm_id)
95{
96 if (vm_id < FIRST_SECONDARY_VM_ID ||
97 vm_id >= FIRST_SECONDARY_VM_ID + hf_vm_count)
98 return NULL;
99
100 return &hf_vms[vm_id - FIRST_SECONDARY_VM_ID];
101}
102
103/**
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000104 * Wakes up the kernel thread responsible for running the given vcpu.
105 *
106 * Returns 0 if the thread was already running, 1 otherwise.
107 */
108static int hf_vcpu_wake_up(struct hf_vcpu *vcpu)
109{
110 /* Set a flag indicating that the thread should not go to sleep. */
111 atomic_set(&vcpu->abort_sleep, 1);
112
113 /* Set the thread to running state. */
114 return wake_up_process(vcpu->task);
115}
116
117/**
118 * Puts the current thread to sleep. The current thread must be responsible for
119 * running the given vcpu.
120 *
121 * Going to sleep will fail if hf_vcpu_wake_up() or kthread_stop() was called on
122 * this vcpu/thread since the last time it [re]started running.
123 */
124static void hf_vcpu_sleep(struct hf_vcpu *vcpu)
125{
126 int abort;
127
128 set_current_state(TASK_INTERRUPTIBLE);
129
130 /* Check the sleep-abort flag after making thread interruptible. */
131 abort = atomic_read(&vcpu->abort_sleep);
132 if (!abort && !kthread_should_stop())
133 schedule();
134
135 /* Set state back to running on the way out. */
136 set_current_state(TASK_RUNNING);
137}
138
139/**
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100140 * Wakes up the thread associated with the vcpu that owns the given timer. This
141 * is called when the timer the thread is waiting on expires.
142 */
143static enum hrtimer_restart hf_vcpu_timer_expired(struct hrtimer *timer)
144{
145 struct hf_vcpu *vcpu = container_of(timer, struct hf_vcpu, timer);
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000146 /* TODO: Inject interrupt. */
147 hf_vcpu_wake_up(vcpu);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100148 return HRTIMER_NORESTART;
149}
150
151/**
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000152 * Handles a message delivered to this VM by validating that it's well-formed
153 * and then queueing it for delivery to the appropriate socket.
154 */
155static void hf_handle_message(struct hf_vm *sender, const void *ptr, size_t len)
156{
157 struct hf_sock *hsock;
158 const struct hf_msg_hdr *hdr = ptr;
159 struct sk_buff *skb;
160 int err;
161
162 /* Ignore messages that are too small to hold a header. */
163 if (len < sizeof(struct hf_msg_hdr))
164 return;
165
166 len -= sizeof(struct hf_msg_hdr);
167
168 /* Go through the colliding sockets. */
169 rcu_read_lock();
170 hash_for_each_possible_rcu(hf_local_port_hash, hsock, sk.sk_node,
171 hdr->dst_port) {
172 if (hsock->peer_vm == sender &&
173 hsock->remote_port == hdr->src_port) {
174 sock_hold(&hsock->sk);
175 break;
176 }
177 }
178 rcu_read_unlock();
179
180 /* Nothing to do if we couldn't find the target. */
181 if (!hsock)
182 return;
183
Wedson Almeida Filho89d0e472019-01-03 19:18:39 +0000184 /*
185 * TODO: From this point on, there are two failure paths: when we
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000186 * create the skb below, and when we enqueue it to the socket. What
187 * should we do if they fail? Ideally we would have some form of flow
188 * control to prevent message loss, but how to do it efficiently?
189 *
190 * One option is to have a pre-allocated message that indicates to the
191 * sender that a message was dropped. This way we guarantee that the
192 * sender will be aware of loss and should back-off.
193 */
194 /* Create the skb. */
195 skb = alloc_skb(len, GFP_KERNEL);
196 if (!skb)
197 goto exit;
198
199 memcpy(skb_put(skb, len), hdr + 1, len);
200
201 /*
202 * Add the skb to the receive queue of the target socket. On success it
203 * calls sk->sk_data_ready, which is currently set to sock_def_readable,
204 * which wakes up any waiters.
205 */
206 err = sock_queue_rcv_skb(&hsock->sk, skb);
207 if (err)
208 kfree_skb(skb);
209
210exit:
211 sock_put(&hsock->sk);
212}
213
214/**
215 * This function is called when Hafnium requests that the primary VM wake up a
216 * vCPU that belongs to a secondary VM.
217 *
218 * It wakes up the thread if it's sleeping, or kicks it if it's already running.
219 *
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000220 * If vCPU is HF_INVALID_VCPU, it injects an interrupt into a vCPU belonging to
221 * the specified VM.
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000222 */
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000223static void hf_handle_wake_up_request(uint32_t vm_id, uint16_t vcpu,
224 uint64_t int_id)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000225{
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000226 struct hf_vm *vm = hf_vm_from_id(vm_id);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000227
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000228 if (!vm) {
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000229 pr_warn("Request to wake up non-existent VM id: %u\n", vm_id);
230 return;
231 }
232
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000233 if (vcpu >= vm->vcpu_count) {
234 int64_t ret;
235
236 if (vcpu != HF_INVALID_VCPU) {
237 pr_warn("Request to wake up non-existent vCPU: %u.%u\n",
238 vm_id, vcpu);
239 return;
240 }
241
242 /*
243 * TODO: For now we're picking the first vcpu to interrupt, but
244 * we want to be smarter.
245 */
246 vcpu = 0;
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000247 ret = hf_interrupt_inject(vm_id, vcpu, int_id);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000248 if (ret != 1) {
249 /* We don't need to wake up the vcpu. */
250 return;
251 }
252 }
253
254 if (hf_vcpu_wake_up(&vm->vcpu[vcpu]) == 0) {
255 /*
256 * The task was already running (presumably on a different
257 * physical CPU); interrupt it. This gives Hafnium a chance to
258 * inject any new interrupts.
259 */
260 kick_process(vm->vcpu[vcpu].task);
261 }
262}
263
264/**
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000265 * Notify all waiters on the given VM.
266 */
267static void hf_notify_waiters(uint32_t vm_id)
268{
269 int64_t ret;
270
271 while ((ret = hf_mailbox_waiter_get(vm_id)) != -1) {
272 if (ret == HF_PRIMARY_VM_ID) {
273 /*
274 * TODO: Use this information when implementing per-vm
275 * queues.
276 */
277 } else {
278 hf_handle_wake_up_request(ret, HF_INVALID_VCPU,
279 HF_MAILBOX_WRITABLE_INTID);
280 }
281 }
282}
283
284/**
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100285 * This is the main loop of each vcpu.
286 */
287static int hf_vcpu_thread(void *data)
288{
289 struct hf_vcpu *vcpu = data;
Andrew Sculldc8cab52018-10-10 18:29:39 +0100290 struct hf_vcpu_run_return ret;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100291
292 hrtimer_init(&vcpu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
293 vcpu->timer.function = &hf_vcpu_timer_expired;
294
295 while (!kthread_should_stop()) {
Andrew Scull01f83de2019-01-23 13:41:47 +0000296 uint32_t i;
297
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000298 /*
299 * We're about to run the vcpu, so we can reset the abort-sleep
300 * flag.
301 */
302 atomic_set(&vcpu->abort_sleep, 0);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100303
Andrew Scullbb7ae412018-09-28 21:07:15 +0100304 /* Call into Hafnium to run vcpu. */
Andrew Scullb722f952018-09-27 15:39:10 +0100305 ret = hf_vcpu_run(vcpu->vm->id, vcpu->vcpu_index);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100306
Andrew Sculldc8cab52018-10-10 18:29:39 +0100307 switch (ret.code) {
Andrew Sculle05702e2019-01-08 14:46:46 +0000308 /* Preempted. */
309 case HF_VCPU_RUN_PREEMPTED:
310 if (need_resched())
311 schedule();
312 break;
313
314 /* Yield. */
Andrew Scullb3a61b52018-09-17 14:30:34 +0100315 case HF_VCPU_RUN_YIELD:
Andrew Sculle05702e2019-01-08 14:46:46 +0000316 if (!kthread_should_stop())
317 schedule();
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100318 break;
319
Andrew Scull01778112019-01-14 15:37:53 +0000320 /* WFI. */
Andrew Scullb3a61b52018-09-17 14:30:34 +0100321 case HF_VCPU_RUN_WAIT_FOR_INTERRUPT:
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000322 hf_vcpu_sleep(vcpu);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100323 break;
324
Andrew Scullb3a61b52018-09-17 14:30:34 +0100325 /* Wake up another vcpu. */
326 case HF_VCPU_RUN_WAKE_UP:
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000327 hf_handle_wake_up_request(ret.wake_up.vm_id,
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000328 ret.wake_up.vcpu,
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000329 HF_MAILBOX_READABLE_INTID);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100330 break;
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100331
Andrew Scullb3a61b52018-09-17 14:30:34 +0100332 /* Response available. */
Andrew Scull0973a2e2018-10-05 11:11:24 +0100333 case HF_VCPU_RUN_MESSAGE:
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000334 hf_handle_message(vcpu->vm, page_address(hf_recv_page),
335 ret.message.size);
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000336 if (hf_mailbox_clear() == 1)
337 hf_notify_waiters(HF_PRIMARY_VM_ID);
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100338 break;
Andrew Sculldc8cab52018-10-10 18:29:39 +0100339
340 case HF_VCPU_RUN_SLEEP:
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000341 hrtimer_start(&vcpu->timer, ret.sleep.ns,
342 HRTIMER_MODE_REL);
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000343 hf_vcpu_sleep(vcpu);
Andrew Sculldc8cab52018-10-10 18:29:39 +0100344 hrtimer_cancel(&vcpu->timer);
345 break;
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000346
347 /* Notify all waiters. */
348 case HF_VCPU_RUN_NOTIFY_WAITERS:
349 hf_notify_waiters(vcpu->vm->id);
350 break;
Andrew Scull01f83de2019-01-23 13:41:47 +0000351
352 case HF_VCPU_RUN_ABORTED:
353 for (i = 0; i < vcpu->vm->vcpu_count; i++) {
354 if (i == vcpu->vcpu_index)
355 continue;
356 hf_handle_wake_up_request(vcpu->vm->id, i, 0);
357 }
358 hf_vcpu_sleep(vcpu);
359 break;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100360 }
361 }
362
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100363 return 0;
364}
365
366/**
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000367 * Converts a pointer to a struct sock into a pointer to a struct hf_sock. It
368 * relies on the fact that the first field of hf_sock is a sock.
369 */
370static struct hf_sock *hsock_from_sk(struct sock *sk)
371{
372 return (struct hf_sock *)sk;
373}
374
375/**
376 * This is called when the last reference to the outer socket is released. For
377 * example, if it's a user-space socket, when the last file descriptor pointing
378 * to this socket is closed.
379 *
380 * It begins cleaning up resources, though some can only be cleaned up after all
381 * references to the underlying socket are released, which is handled by
382 * hf_sock_destruct().
383 */
384static int hf_sock_release(struct socket *sock)
385{
386 struct sock *sk = sock->sk;
387 struct hf_sock *hsock = hsock_from_sk(sk);
388 unsigned long flags;
389
390 if (!sk)
391 return 0;
392
393 /* Shutdown for both send and receive. */
394 lock_sock(sk);
395 sk->sk_shutdown |= RCV_SHUTDOWN | SEND_SHUTDOWN;
396 sk->sk_state_change(sk);
397 release_sock(sk);
398
399 /* Remove from the hash table, so lookups from now on won't find it. */
400 spin_lock_irqsave(&hf_local_port_hash_lock, flags);
401 hash_del_rcu(&hsock->sk.sk_node);
402 spin_unlock_irqrestore(&hf_local_port_hash_lock, flags);
403
404 /*
405 * TODO: When we implement a tx queue, we need to clear it here so that
406 * sk_wmem_alloc will not prevent sk from being freed (sk_free).
407 */
408
409 /*
410 * Wait for in-flight lookups to finish. We need to do this here because
Wedson Almeida Filho89d0e472019-01-03 19:18:39 +0000411 * in-flight lookups rely on the reference to the socket we're about to
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000412 * release.
413 */
414 synchronize_rcu();
415 sock_put(sk);
416 sock->sk = NULL;
417
418 return 0;
419}
420
421/**
422 * This is called when there are no more references to the socket. It frees all
423 * resources that haven't been freed during release.
424 */
425static void hf_sock_destruct(struct sock *sk)
426{
427 /*
428 * Clear the receive queue now that the handler cannot add any more
429 * skbs to it.
430 */
431 skb_queue_purge(&sk->sk_receive_queue);
432}
433
434/**
435 * Connects the Hafnium socket to the provided VM and port. After the socket is
436 * connected, it can be used to exchange datagrams with the specified peer.
437 */
Andrew Scull01778112019-01-14 15:37:53 +0000438static int hf_sock_connect(struct socket *sock, struct sockaddr *saddr, int len,
439 int connect_flags)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000440{
441 struct sock *sk = sock->sk;
442 struct hf_sock *hsock = hsock_from_sk(sk);
443 struct hf_vm *vm;
444 struct sockaddr_hf *addr;
445 int err;
446 unsigned long flags;
447
448 /* Basic address validation. */
449 if (len < sizeof(struct sockaddr_hf) || saddr->sa_family != AF_HF)
450 return -EINVAL;
451
452 addr = (struct sockaddr_hf *)saddr;
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000453 vm = hf_vm_from_id(addr->vm_id);
454 if (!vm)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000455 return -ENETUNREACH;
456
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000457 /*
458 * TODO: Once we implement access control in Hafnium, check that the
459 * caller is allowed to contact the specified VM. Return -ECONNREFUSED
460 * if access is denied.
461 */
462
463 /* Take lock to make sure state doesn't change as we connect. */
464 lock_sock(sk);
465
466 /* Only unconnected sockets are allowed to become connected. */
467 if (sock->state != SS_UNCONNECTED) {
468 err = -EISCONN;
469 goto exit;
470 }
471
472 hsock->local_port = atomic64_inc_return(&hf_next_port);
473 hsock->remote_port = addr->port;
474 hsock->peer_vm = vm;
475
476 sock->state = SS_CONNECTED;
477
478 /* Add socket to hash table now that it's fully initialised. */
479 spin_lock_irqsave(&hf_local_port_hash_lock, flags);
480 hash_add_rcu(hf_local_port_hash, &sk->sk_node, hsock->local_port);
481 spin_unlock_irqrestore(&hf_local_port_hash_lock, flags);
482
483 err = 0;
484exit:
485 release_sock(sk);
486 return err;
487}
488
489/**
490 * Sends the given skb to the appropriate VM by calling Hafnium. It will also
491 * trigger the wake up of a recipient VM.
492 *
493 * Takes ownership of the skb on success.
494 */
495static int hf_send_skb(struct sk_buff *skb)
496{
497 unsigned long flags;
498 int64_t ret;
499 struct hf_sock *hsock = hsock_from_sk(skb->sk);
500 struct hf_vm *vm = hsock->peer_vm;
501
502 /*
503 * Call Hafnium under the send lock so that we serialize the use of the
504 * global send buffer.
505 */
506 spin_lock_irqsave(&hf_send_lock, flags);
507 memcpy(page_address(hf_send_page), skb->data, skb->len);
Wedson Almeida Filhodbfc9032019-01-09 19:03:32 +0000508 ret = hf_mailbox_send(vm->id, skb->len, false);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000509 spin_unlock_irqrestore(&hf_send_lock, flags);
510
511 if (ret < 0)
512 return -EAGAIN;
513
514 /* Wake some vcpu up to handle the new message. */
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000515 hf_handle_wake_up_request(vm->id, ret, HF_MAILBOX_READABLE_INTID);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000516
517 kfree_skb(skb);
518
519 return 0;
520}
521
522/**
523 * Determines if the given socket is in the connected state. It acquires and
524 * releases the socket lock.
525 */
526static bool hf_sock_is_connected(struct socket *sock)
527{
528 bool ret;
529
530 lock_sock(sock->sk);
531 ret = sock->state == SS_CONNECTED;
532 release_sock(sock->sk);
533
534 return ret;
535}
536
537/**
538 * Sends a message to the VM & port the socket is connected to. All variants
539 * of write/send/sendto/sendmsg eventually call this function.
540 */
541static int hf_sock_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
542{
543 struct sock *sk = sock->sk;
544 struct sk_buff *skb;
545 int err;
546 struct hf_msg_hdr *hdr;
547 struct hf_sock *hsock = hsock_from_sk(sk);
548
549 /* Check length. */
550 if (len > HF_MAILBOX_SIZE - sizeof(struct hf_msg_hdr))
551 return -EMSGSIZE;
552
553 /* We don't allow the destination address to be specified. */
554 if (m->msg_namelen > 0)
555 return -EISCONN;
556
557 /* We don't support out of band messages. */
558 if (m->msg_flags & MSG_OOB)
559 return -EOPNOTSUPP;
560
561 /*
562 * Ensure that the socket is connected. We don't need to hold the socket
563 * lock (acquired and released by hf_sock_is_connected) for the
564 * remainder of the function because the fields we care about are
565 * immutable once the state is SS_CONNECTED.
566 */
567 if (!hf_sock_is_connected(sock))
568 return -ENOTCONN;
569
570 /*
571 * Allocate an skb for this write. If there isn't enough room in the
572 * socket's send buffer (sk_wmem_alloc >= sk_sndbuf), this will block
573 * (if it's a blocking call). On success, it increments sk_wmem_alloc
574 * and sets up the skb such that sk_wmem_alloc gets decremented when
575 * the skb is freed (sock_wfree gets called).
576 */
577 skb = sock_alloc_send_skb(sk, len + sizeof(struct hf_msg_hdr),
578 m->msg_flags & MSG_DONTWAIT, &err);
579 if (!skb)
580 return err;
581
582 /* Reserve room for the header and initialise it. */
583 skb_reserve(skb, sizeof(struct hf_msg_hdr));
584 hdr = skb_push(skb, sizeof(struct hf_msg_hdr));
585 hdr->src_port = hsock->local_port;
586 hdr->dst_port = hsock->remote_port;
587
588 /* Allocate area for the contents, then copy into skb. */
589 if (!copy_from_iter_full(skb_put(skb, len), len, &m->msg_iter)) {
590 err = -EFAULT;
591 goto err_cleanup;
592 }
593
594 /*
595 * TODO: We currently do this inline, but when we have support for
596 * readiness notification from Hafnium, we must add this to a per-VM tx
597 * queue that can make progress when the VM becomes writable. This will
598 * fix send buffering and poll readiness notification.
599 */
600 err = hf_send_skb(skb);
601 if (err)
602 goto err_cleanup;
603
604 return 0;
605
606err_cleanup:
607 kfree_skb(skb);
608 return err;
609}
610
611/**
612 * Receives a message originated from the VM & port the socket is connected to.
613 * All variants of read/recv/recvfrom/recvmsg eventually call this function.
614 */
615static int hf_sock_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
616 int flags)
617{
618 struct sock *sk = sock->sk;
619 struct sk_buff *skb;
620 int err;
621 size_t copy_len;
622
623 if (!hf_sock_is_connected(sock))
624 return -ENOTCONN;
625
626 /* Grab the next skb from the receive queue. */
627 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
628 if (!skb)
629 return err;
630
631 /* Make sure we don't copy more than what fits in the output buffer. */
632 copy_len = skb->len;
633 if (copy_len > len) {
634 copy_len = len;
635 m->msg_flags |= MSG_TRUNC;
636 }
637
638 /* Make sure we don't overflow the return value type. */
639 if (copy_len > INT_MAX) {
640 copy_len = INT_MAX;
641 m->msg_flags |= MSG_TRUNC;
642 }
643
644 /* Copy skb to output iterator, then free it. */
645 err = skb_copy_datagram_msg(skb, 0, m, copy_len);
646 skb_free_datagram(sk, skb);
647 if (err)
648 return err;
649
650 return copy_len;
651}
652
653/**
654 * This function is called when a Hafnium socket is created. It initialises all
655 * state such that the caller will be able to connect the socket and then send
656 * and receive messages through it.
657 */
658static int hf_sock_create(struct net *net, struct socket *sock, int protocol,
Andrew Scull01778112019-01-14 15:37:53 +0000659 int kern)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000660{
661 static const struct proto_ops ops = {
662 .family = PF_HF,
663 .owner = THIS_MODULE,
664 .release = hf_sock_release,
665 .bind = sock_no_bind,
666 .connect = hf_sock_connect,
667 .socketpair = sock_no_socketpair,
668 .accept = sock_no_accept,
669 .ioctl = sock_no_ioctl,
670 .listen = sock_no_listen,
671 .shutdown = sock_no_shutdown,
672 .setsockopt = sock_no_setsockopt,
673 .getsockopt = sock_no_getsockopt,
674 .sendmsg = hf_sock_sendmsg,
675 .recvmsg = hf_sock_recvmsg,
676 .mmap = sock_no_mmap,
677 .sendpage = sock_no_sendpage,
678 .poll = datagram_poll,
679 };
680 struct sock *sk;
681
682 if (sock->type != SOCK_DGRAM)
683 return -ESOCKTNOSUPPORT;
684
685 if (protocol != 0)
686 return -EPROTONOSUPPORT;
687
688 /*
689 * For now we only allow callers with sys admin capability to create
690 * Hafnium sockets.
691 */
692 if (!capable(CAP_SYS_ADMIN))
693 return -EPERM;
694
695 /* Allocate and initialise socket. */
696 sk = sk_alloc(net, PF_HF, GFP_KERNEL, &hf_sock_proto, kern);
697 if (!sk)
698 return -ENOMEM;
699
700 sock_init_data(sock, sk);
701
702 sk->sk_destruct = hf_sock_destruct;
703 sock->ops = &ops;
704 sock->state = SS_UNCONNECTED;
705
706 return 0;
707}
708
709/**
Andrew Scullbb7ae412018-09-28 21:07:15 +0100710 * Frees all resources, including threads, associated with the Hafnium driver.
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100711 */
Andrew Scull82257c42018-10-01 10:37:48 +0100712static void hf_free_resources(void)
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100713{
Andrew Scullbb7ae412018-09-28 21:07:15 +0100714 uint32_t i, j;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100715
716 /*
717 * First stop all worker threads. We need to do this before freeing
718 * resources because workers may reference each other, so it is only
719 * safe to free resources after they have all stopped.
720 */
Andrew Scull82257c42018-10-01 10:37:48 +0100721 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100722 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000723
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100724 for (j = 0; j < vm->vcpu_count; j++)
725 kthread_stop(vm->vcpu[j].task);
726 }
727
728 /* Free resources. */
Andrew Scull82257c42018-10-01 10:37:48 +0100729 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100730 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000731
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100732 for (j = 0; j < vm->vcpu_count; j++)
733 put_task_struct(vm->vcpu[j].task);
734 kfree(vm->vcpu);
735 }
736
737 kfree(hf_vms);
738}
739
Andrew Scullbb7ae412018-09-28 21:07:15 +0100740/**
741 * Initializes the Hafnium driver by creating a thread for each vCPU of each
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100742 * virtual machine.
743 */
744static int __init hf_init(void)
745{
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000746 static const struct net_proto_family proto_family = {
747 .family = PF_HF,
748 .create = hf_sock_create,
749 .owner = THIS_MODULE,
750 };
Andrew Scullbb7ae412018-09-28 21:07:15 +0100751 int64_t ret;
752 uint32_t i, j;
Andrew Scull82257c42018-10-01 10:37:48 +0100753 uint32_t total_vm_count;
754 uint32_t total_vcpu_count;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100755
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100756 /* Allocate a page for send and receive buffers. */
757 hf_send_page = alloc_page(GFP_KERNEL);
758 if (!hf_send_page) {
759 pr_err("Unable to allocate send buffer\n");
760 return -ENOMEM;
761 }
762
763 hf_recv_page = alloc_page(GFP_KERNEL);
764 if (!hf_recv_page) {
765 __free_page(hf_send_page);
766 pr_err("Unable to allocate receive buffer\n");
767 return -ENOMEM;
768 }
769
770 /*
771 * Configure both addresses. Once configured, we cannot free these pages
772 * because the hypervisor will use them, even if the module is
773 * unloaded.
774 */
Andrew Scull55704232018-08-10 17:19:54 +0100775 ret = hf_vm_configure(page_to_phys(hf_send_page),
776 page_to_phys(hf_recv_page));
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100777 if (ret) {
778 __free_page(hf_send_page);
779 __free_page(hf_recv_page);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000780 /*
781 * TODO: We may want to grab this information from hypervisor
782 * and go from there.
783 */
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100784 pr_err("Unable to configure VM\n");
785 return -EIO;
786 }
787
Andrew Scull82257c42018-10-01 10:37:48 +0100788 /* Get the number of VMs. */
Andrew Scull55704232018-08-10 17:19:54 +0100789 ret = hf_vm_get_count();
Andrew Scull82257c42018-10-01 10:37:48 +0100790 if (ret < 0) {
Andrew Scullbb7ae412018-09-28 21:07:15 +0100791 pr_err("Unable to retrieve number of VMs: %lld\n", ret);
Andrew Scull82257c42018-10-01 10:37:48 +0100792 return -EIO;
793 }
794
795 /* Confirm the maximum number of VMs looks sane. */
796 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VMS < 1);
797 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VMS > U16_MAX);
798
799 /* Validate the number of VMs. There must at least be the primary. */
800 if (ret < 1 || ret > CONFIG_HAFNIUM_MAX_VMS) {
801 pr_err("Number of VMs is out of range: %lld\n", ret);
802 return -EDQUOT;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100803 }
804
Andrew Scullb722f952018-09-27 15:39:10 +0100805 /* Only track the secondary VMs. */
Andrew Scull82257c42018-10-01 10:37:48 +0100806 total_vm_count = ret - 1;
Andrew Scull01778112019-01-14 15:37:53 +0000807 hf_vms =
808 kmalloc_array(total_vm_count, sizeof(struct hf_vm), GFP_KERNEL);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100809 if (!hf_vms)
810 return -ENOMEM;
811
812 /* Initialize each VM. */
Andrew Scull82257c42018-10-01 10:37:48 +0100813 total_vcpu_count = 0;
814 for (i = 0; i < total_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100815 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100816
Andrew Scullb722f952018-09-27 15:39:10 +0100817 /* Adjust the ID as only the secondaries are tracked. */
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000818 vm->id = i + FIRST_SECONDARY_VM_ID;
Andrew Scullb722f952018-09-27 15:39:10 +0100819
820 ret = hf_vcpu_get_count(vm->id);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100821 if (ret < 0) {
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000822 pr_err("HF_VCPU_GET_COUNT failed for vm=%u: %lld",
823 vm->id, ret);
Andrew Scull82257c42018-10-01 10:37:48 +0100824 ret = -EIO;
825 goto fail_with_cleanup;
826 }
827
828 /* Avoid overflowing the vcpu count. */
829 if (ret > (U32_MAX - total_vcpu_count)) {
830 pr_err("Too many vcpus: %u\n", total_vcpu_count);
831 ret = -EDQUOT;
832 goto fail_with_cleanup;
833 }
834
835 /* Confirm the maximum number of VCPUs looks sane. */
836 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VCPUS < 1);
837 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VCPUS > U16_MAX);
838
839 /* Enforce the limit on vcpus. */
840 total_vcpu_count += ret;
841 if (total_vcpu_count > CONFIG_HAFNIUM_MAX_VCPUS) {
842 pr_err("Too many vcpus: %u\n", total_vcpu_count);
843 ret = -EDQUOT;
844 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100845 }
846
847 vm->vcpu_count = ret;
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000848 vm->vcpu = kmalloc_array(vm->vcpu_count, sizeof(struct hf_vcpu),
849 GFP_KERNEL);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100850 if (!vm->vcpu) {
Andrew Scull82257c42018-10-01 10:37:48 +0100851 ret = -ENOMEM;
852 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100853 }
854
Andrew Scull82257c42018-10-01 10:37:48 +0100855 /* Update the number of initialized VMs. */
856 hf_vm_count = i + 1;
857
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100858 /* Create a kernel thread for each vcpu. */
859 for (j = 0; j < vm->vcpu_count; j++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100860 struct hf_vcpu *vcpu = &vm->vcpu[j];
Andrew Scull01778112019-01-14 15:37:53 +0000861
862 vcpu->task =
863 kthread_create(hf_vcpu_thread, vcpu,
864 "vcpu_thread_%u_%u", vm->id, j);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100865 if (IS_ERR(vcpu->task)) {
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000866 pr_err("Error creating task (vm=%u,vcpu=%u): %ld\n",
867 vm->id, j, PTR_ERR(vcpu->task));
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100868 vm->vcpu_count = j;
Andrew Scull82257c42018-10-01 10:37:48 +0100869 ret = PTR_ERR(vcpu->task);
870 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100871 }
872
873 get_task_struct(vcpu->task);
Andrew Scullb722f952018-09-27 15:39:10 +0100874 vcpu->vm = vm;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100875 vcpu->vcpu_index = j;
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000876 atomic_set(&vcpu->abort_sleep, 0);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100877 }
878 }
879
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000880 /* Register protocol and socket family. */
881 ret = proto_register(&hf_sock_proto, 0);
882 if (ret) {
883 pr_err("Unable to register protocol: %lld\n", ret);
884 goto fail_with_cleanup;
885 }
886
887 ret = sock_register(&proto_family);
888 if (ret) {
889 pr_err("Unable to register Hafnium's socket family: %lld\n",
890 ret);
891 goto fail_unregister_proto;
892 }
893
894 /*
895 * Start running threads now that all is initialized.
896 *
897 * Any failures from this point on must also unregister the socket
898 * family with a call to sock_unregister().
899 */
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100900 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100901 struct hf_vm *vm = &hf_vms[i];
Andrew Scull01778112019-01-14 15:37:53 +0000902
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100903 for (j = 0; j < vm->vcpu_count; j++)
904 wake_up_process(vm->vcpu[j].task);
905 }
906
907 /* Dump vm/vcpu count info. */
Andrew Scullbb7ae412018-09-28 21:07:15 +0100908 pr_info("Hafnium successfully loaded with %u VMs:\n", hf_vm_count);
Andrew Scullb722f952018-09-27 15:39:10 +0100909 for (i = 0; i < hf_vm_count; i++) {
910 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000911
Andrew Scullbb7ae412018-09-28 21:07:15 +0100912 pr_info("\tVM %u: %u vCPUS\n", vm->id, vm->vcpu_count);
Andrew Scullb722f952018-09-27 15:39:10 +0100913 }
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100914
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100915 return 0;
Andrew Scull82257c42018-10-01 10:37:48 +0100916
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000917fail_unregister_proto:
918 proto_unregister(&hf_sock_proto);
Andrew Scull82257c42018-10-01 10:37:48 +0100919fail_with_cleanup:
920 hf_free_resources();
921 return ret;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100922}
923
924/**
Andrew Scullbb7ae412018-09-28 21:07:15 +0100925 * Frees up all resources used by the Hafnium driver in preparation for
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100926 * unloading it.
927 */
928static void __exit hf_exit(void)
929{
Andrew Scullbb7ae412018-09-28 21:07:15 +0100930 pr_info("Preparing to unload Hafnium\n");
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000931 sock_unregister(PF_HF);
932 proto_unregister(&hf_sock_proto);
Andrew Scull82257c42018-10-01 10:37:48 +0100933 hf_free_resources();
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100934 pr_info("Hafnium ready to unload\n");
935}
936
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000937MODULE_LICENSE("GPL v2");
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100938
939module_init(hf_init);
940module_exit(hf_exit);