blob: 75eedfda0ae425ddda71087901e034f03bb67912 [file] [log] [blame]
Andrew Scull01778112019-01-14 15:37:53 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Walbran13c3a0b2018-11-30 11:51:53 +00002/*
Andrew Walbran2bc0a322019-03-07 15:48:06 +00003 * Copyright 2018 The Hafnium Authors.
Andrew Walbran13c3a0b2018-11-30 11:51:53 +00004 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Andrew Walbran13c3a0b2018-11-30 11:51:53 +000013 */
14
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +000015#include <clocksource/arm_arch_timer.h>
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000016#include <linux/atomic.h>
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +000017#include <linux/cpuhotplug.h>
18#include <linux/hrtimer.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010019#include <linux/init.h>
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +000020#include <linux/interrupt.h>
21#include <linux/irq.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010022#include <linux/kernel.h>
23#include <linux/kthread.h>
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +010024#include <linux/mm.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010025#include <linux/module.h>
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +000026#include <linux/net.h>
27#include <linux/of.h>
28#include <linux/platform_device.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010029#include <linux/sched/task.h>
30#include <linux/slab.h>
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000031#include <net/sock.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010032
Andrew Scull55704232018-08-10 17:19:54 +010033#include <hf/call.h>
34
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000035/* TODO: Reusing AF_ECONET for now as it's otherwise unused. */
36#define AF_HF AF_ECONET
37#define PF_HF AF_HF
38
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +000039#define HYPERVISOR_TIMER_NAME "el2_timer"
40
Andrew Scull82257c42018-10-01 10:37:48 +010041#define CONFIG_HAFNIUM_MAX_VMS 16
42#define CONFIG_HAFNIUM_MAX_VCPUS 32
43
Wedson Almeida Filhoec841932019-01-22 23:07:50 +000044#define FIRST_SECONDARY_VM_ID 1
45
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010046struct hf_vcpu {
Andrew Scullb722f952018-09-27 15:39:10 +010047 struct hf_vm *vm;
Andrew Scull55704232018-08-10 17:19:54 +010048 uint32_t vcpu_index;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010049 struct task_struct *task;
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +000050 atomic_t abort_sleep;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010051 struct hrtimer timer;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010052};
53
54struct hf_vm {
Andrew Scullb722f952018-09-27 15:39:10 +010055 uint32_t id;
Andrew Scullbb7ae412018-09-28 21:07:15 +010056 uint32_t vcpu_count;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010057 struct hf_vcpu *vcpu;
58};
59
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000060struct hf_msg_hdr {
61 uint64_t src_port;
62 uint64_t dst_port;
63};
64
65struct hf_sock {
66 /* This needs to be the first field. */
67 struct sock sk;
68
69 /*
70 * The following fields are immutable after the socket transitions to
71 * SS_CONNECTED state.
72 */
73 uint64_t local_port;
74 uint64_t remote_port;
75 struct hf_vm *peer_vm;
76};
77
78struct sockaddr_hf {
79 sa_family_t family;
80 uint32_t vm_id;
81 uint64_t port;
82};
83
84static struct proto hf_sock_proto = {
85 .name = "hafnium",
86 .owner = THIS_MODULE,
87 .obj_size = sizeof(struct hf_sock),
88};
89
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010090static struct hf_vm *hf_vms;
Andrew Scullbb7ae412018-09-28 21:07:15 +010091static uint32_t hf_vm_count;
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000092static struct page *hf_send_page;
93static struct page *hf_recv_page;
94static atomic64_t hf_next_port = ATOMIC64_INIT(0);
95static DEFINE_SPINLOCK(hf_send_lock);
96static DEFINE_HASHTABLE(hf_local_port_hash, 7);
97static DEFINE_SPINLOCK(hf_local_port_hash_lock);
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +000098static int hf_irq;
Andrew Walbran8d55e502019-02-05 11:42:08 +000099static enum cpuhp_state hf_cpuhp_state;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100100
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100101/**
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000102 * Retrieves a VM from its ID, returning NULL if the VM doesn't exist.
103 */
104static struct hf_vm *hf_vm_from_id(uint32_t vm_id)
105{
106 if (vm_id < FIRST_SECONDARY_VM_ID ||
107 vm_id >= FIRST_SECONDARY_VM_ID + hf_vm_count)
108 return NULL;
109
110 return &hf_vms[vm_id - FIRST_SECONDARY_VM_ID];
111}
112
113/**
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000114 * Wakes up the kernel thread responsible for running the given vcpu.
115 *
116 * Returns 0 if the thread was already running, 1 otherwise.
117 */
118static int hf_vcpu_wake_up(struct hf_vcpu *vcpu)
119{
120 /* Set a flag indicating that the thread should not go to sleep. */
121 atomic_set(&vcpu->abort_sleep, 1);
122
123 /* Set the thread to running state. */
124 return wake_up_process(vcpu->task);
125}
126
127/**
128 * Puts the current thread to sleep. The current thread must be responsible for
129 * running the given vcpu.
130 *
131 * Going to sleep will fail if hf_vcpu_wake_up() or kthread_stop() was called on
132 * this vcpu/thread since the last time it [re]started running.
133 */
134static void hf_vcpu_sleep(struct hf_vcpu *vcpu)
135{
136 int abort;
137
138 set_current_state(TASK_INTERRUPTIBLE);
139
140 /* Check the sleep-abort flag after making thread interruptible. */
141 abort = atomic_read(&vcpu->abort_sleep);
142 if (!abort && !kthread_should_stop())
143 schedule();
144
145 /* Set state back to running on the way out. */
146 set_current_state(TASK_RUNNING);
147}
148
149/**
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100150 * Wakes up the thread associated with the vcpu that owns the given timer. This
151 * is called when the timer the thread is waiting on expires.
152 */
153static enum hrtimer_restart hf_vcpu_timer_expired(struct hrtimer *timer)
154{
155 struct hf_vcpu *vcpu = container_of(timer, struct hf_vcpu, timer);
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000156 /* TODO: Inject interrupt. */
157 hf_vcpu_wake_up(vcpu);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100158 return HRTIMER_NORESTART;
159}
160
161/**
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000162 * This function is called when Hafnium requests that the primary VM wake up a
163 * vCPU that belongs to a secondary VM.
164 *
165 * It wakes up the thread if it's sleeping, or kicks it if it's already running.
166 *
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000167 * If vCPU is HF_INVALID_VCPU, it injects an interrupt into a vCPU belonging to
168 * the specified VM.
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000169 */
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000170static void hf_handle_wake_up_request(uint32_t vm_id, uint16_t vcpu,
171 uint64_t int_id)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000172{
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000173 struct hf_vm *vm = hf_vm_from_id(vm_id);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000174
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000175 if (!vm) {
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000176 pr_warn("Request to wake up non-existent VM id: %u\n", vm_id);
177 return;
178 }
179
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000180 if (vcpu >= vm->vcpu_count) {
181 int64_t ret;
182
183 if (vcpu != HF_INVALID_VCPU) {
184 pr_warn("Request to wake up non-existent vCPU: %u.%u\n",
185 vm_id, vcpu);
186 return;
187 }
188
189 /*
190 * TODO: For now we're picking the first vcpu to interrupt, but
191 * we want to be smarter.
192 */
193 vcpu = 0;
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000194 ret = hf_interrupt_inject(vm_id, vcpu, int_id);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000195 if (ret != 1) {
196 /* We don't need to wake up the vcpu. */
197 return;
198 }
199 }
200
201 if (hf_vcpu_wake_up(&vm->vcpu[vcpu]) == 0) {
202 /*
203 * The task was already running (presumably on a different
204 * physical CPU); interrupt it. This gives Hafnium a chance to
205 * inject any new interrupts.
206 */
207 kick_process(vm->vcpu[vcpu].task);
208 }
209}
210
211/**
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000212 * Notify all waiters on the given VM.
213 */
214static void hf_notify_waiters(uint32_t vm_id)
215{
216 int64_t ret;
217
218 while ((ret = hf_mailbox_waiter_get(vm_id)) != -1) {
219 if (ret == HF_PRIMARY_VM_ID) {
220 /*
221 * TODO: Use this information when implementing per-vm
222 * queues.
223 */
224 } else {
225 hf_handle_wake_up_request(ret, HF_INVALID_VCPU,
226 HF_MAILBOX_WRITABLE_INTID);
227 }
228 }
229}
230
231/**
Andrew Sculldf6478f2019-02-19 17:52:08 +0000232 * Handles a message delivered to this VM by validating that it's well-formed
233 * and then queueing it for delivery to the appropriate socket.
234 */
235static void hf_handle_message(struct hf_vm *sender, const void *ptr, size_t len)
236{
237 struct hf_sock *hsock;
238 const struct hf_msg_hdr *hdr = ptr;
239 struct sk_buff *skb;
240 int err;
241
242 /* Ignore messages that are too small to hold a header. */
243 if (len < sizeof(struct hf_msg_hdr))
244 return;
245
246 len -= sizeof(struct hf_msg_hdr);
247
248 /* Go through the colliding sockets. */
249 rcu_read_lock();
250 hash_for_each_possible_rcu(hf_local_port_hash, hsock, sk.sk_node,
251 hdr->dst_port) {
252 if (hsock->peer_vm == sender &&
253 hsock->remote_port == hdr->src_port) {
254 sock_hold(&hsock->sk);
255 break;
256 }
257 }
258 rcu_read_unlock();
259
260 /* Nothing to do if we couldn't find the target. */
261 if (!hsock)
262 return;
263
264 /*
265 * TODO: From this point on, there are two failure paths: when we
266 * create the skb below, and when we enqueue it to the socket. What
267 * should we do if they fail? Ideally we would have some form of flow
268 * control to prevent message loss, but how to do it efficiently?
269 *
270 * One option is to have a pre-allocated message that indicates to the
271 * sender that a message was dropped. This way we guarantee that the
272 * sender will be aware of loss and should back-off.
273 */
274 /* Create the skb. */
275 skb = alloc_skb(len, GFP_KERNEL);
276 if (!skb)
277 goto exit;
278
279 memcpy(skb_put(skb, len), hdr + 1, len);
280
281 /*
282 * Add the skb to the receive queue of the target socket. On success it
283 * calls sk->sk_data_ready, which is currently set to sock_def_readable,
284 * which wakes up any waiters.
285 */
286 err = sock_queue_rcv_skb(&hsock->sk, skb);
287 if (err)
288 kfree_skb(skb);
289
290exit:
291 sock_put(&hsock->sk);
292}
293
294/**
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100295 * This is the main loop of each vcpu.
296 */
297static int hf_vcpu_thread(void *data)
298{
299 struct hf_vcpu *vcpu = data;
Andrew Sculldc8cab52018-10-10 18:29:39 +0100300 struct hf_vcpu_run_return ret;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100301
302 hrtimer_init(&vcpu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
303 vcpu->timer.function = &hf_vcpu_timer_expired;
304
305 while (!kthread_should_stop()) {
Andrew Scull01f83de2019-01-23 13:41:47 +0000306 uint32_t i;
307
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000308 /*
309 * We're about to run the vcpu, so we can reset the abort-sleep
310 * flag.
311 */
312 atomic_set(&vcpu->abort_sleep, 0);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100313
Andrew Scullbb7ae412018-09-28 21:07:15 +0100314 /* Call into Hafnium to run vcpu. */
Andrew Scullb722f952018-09-27 15:39:10 +0100315 ret = hf_vcpu_run(vcpu->vm->id, vcpu->vcpu_index);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100316
Andrew Sculldc8cab52018-10-10 18:29:39 +0100317 switch (ret.code) {
Andrew Sculle05702e2019-01-08 14:46:46 +0000318 /* Preempted. */
319 case HF_VCPU_RUN_PREEMPTED:
320 if (need_resched())
321 schedule();
322 break;
323
324 /* Yield. */
Andrew Scullb3a61b52018-09-17 14:30:34 +0100325 case HF_VCPU_RUN_YIELD:
Andrew Sculle05702e2019-01-08 14:46:46 +0000326 if (!kthread_should_stop())
327 schedule();
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100328 break;
329
Andrew Scull01778112019-01-14 15:37:53 +0000330 /* WFI. */
Andrew Scullb3a61b52018-09-17 14:30:34 +0100331 case HF_VCPU_RUN_WAIT_FOR_INTERRUPT:
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000332 hf_vcpu_sleep(vcpu);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100333 break;
334
Andrew Scullb3a61b52018-09-17 14:30:34 +0100335 /* Wake up another vcpu. */
336 case HF_VCPU_RUN_WAKE_UP:
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000337 hf_handle_wake_up_request(ret.wake_up.vm_id,
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000338 ret.wake_up.vcpu,
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000339 HF_MAILBOX_READABLE_INTID);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100340 break;
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100341
Andrew Scullb3a61b52018-09-17 14:30:34 +0100342 /* Response available. */
Andrew Scull0973a2e2018-10-05 11:11:24 +0100343 case HF_VCPU_RUN_MESSAGE:
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000344 hf_handle_message(vcpu->vm, page_address(hf_recv_page),
345 ret.message.size);
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000346 if (hf_mailbox_clear() == 1)
347 hf_notify_waiters(HF_PRIMARY_VM_ID);
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100348 break;
Andrew Sculldc8cab52018-10-10 18:29:39 +0100349
350 case HF_VCPU_RUN_SLEEP:
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000351 hrtimer_start(&vcpu->timer, ret.sleep.ns,
352 HRTIMER_MODE_REL);
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000353 hf_vcpu_sleep(vcpu);
Andrew Sculldc8cab52018-10-10 18:29:39 +0100354 hrtimer_cancel(&vcpu->timer);
355 break;
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000356
357 /* Notify all waiters. */
358 case HF_VCPU_RUN_NOTIFY_WAITERS:
359 hf_notify_waiters(vcpu->vm->id);
360 break;
Andrew Scull01f83de2019-01-23 13:41:47 +0000361
362 case HF_VCPU_RUN_ABORTED:
363 for (i = 0; i < vcpu->vm->vcpu_count; i++) {
364 if (i == vcpu->vcpu_index)
365 continue;
366 hf_handle_wake_up_request(vcpu->vm->id, i, 0);
367 }
368 hf_vcpu_sleep(vcpu);
369 break;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100370 }
371 }
372
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100373 return 0;
374}
375
376/**
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000377 * Converts a pointer to a struct sock into a pointer to a struct hf_sock. It
378 * relies on the fact that the first field of hf_sock is a sock.
379 */
380static struct hf_sock *hsock_from_sk(struct sock *sk)
381{
382 return (struct hf_sock *)sk;
383}
384
385/**
386 * This is called when the last reference to the outer socket is released. For
387 * example, if it's a user-space socket, when the last file descriptor pointing
388 * to this socket is closed.
389 *
390 * It begins cleaning up resources, though some can only be cleaned up after all
391 * references to the underlying socket are released, which is handled by
392 * hf_sock_destruct().
393 */
394static int hf_sock_release(struct socket *sock)
395{
396 struct sock *sk = sock->sk;
397 struct hf_sock *hsock = hsock_from_sk(sk);
398 unsigned long flags;
399
400 if (!sk)
401 return 0;
402
403 /* Shutdown for both send and receive. */
404 lock_sock(sk);
405 sk->sk_shutdown |= RCV_SHUTDOWN | SEND_SHUTDOWN;
406 sk->sk_state_change(sk);
407 release_sock(sk);
408
409 /* Remove from the hash table, so lookups from now on won't find it. */
410 spin_lock_irqsave(&hf_local_port_hash_lock, flags);
411 hash_del_rcu(&hsock->sk.sk_node);
412 spin_unlock_irqrestore(&hf_local_port_hash_lock, flags);
413
414 /*
415 * TODO: When we implement a tx queue, we need to clear it here so that
416 * sk_wmem_alloc will not prevent sk from being freed (sk_free).
417 */
418
419 /*
420 * Wait for in-flight lookups to finish. We need to do this here because
Wedson Almeida Filho89d0e472019-01-03 19:18:39 +0000421 * in-flight lookups rely on the reference to the socket we're about to
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000422 * release.
423 */
424 synchronize_rcu();
425 sock_put(sk);
426 sock->sk = NULL;
427
428 return 0;
429}
430
431/**
432 * This is called when there are no more references to the socket. It frees all
433 * resources that haven't been freed during release.
434 */
435static void hf_sock_destruct(struct sock *sk)
436{
437 /*
438 * Clear the receive queue now that the handler cannot add any more
439 * skbs to it.
440 */
441 skb_queue_purge(&sk->sk_receive_queue);
442}
443
444/**
445 * Connects the Hafnium socket to the provided VM and port. After the socket is
446 * connected, it can be used to exchange datagrams with the specified peer.
447 */
Andrew Scull01778112019-01-14 15:37:53 +0000448static int hf_sock_connect(struct socket *sock, struct sockaddr *saddr, int len,
449 int connect_flags)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000450{
451 struct sock *sk = sock->sk;
452 struct hf_sock *hsock = hsock_from_sk(sk);
453 struct hf_vm *vm;
454 struct sockaddr_hf *addr;
455 int err;
456 unsigned long flags;
457
458 /* Basic address validation. */
459 if (len < sizeof(struct sockaddr_hf) || saddr->sa_family != AF_HF)
460 return -EINVAL;
461
462 addr = (struct sockaddr_hf *)saddr;
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000463 vm = hf_vm_from_id(addr->vm_id);
464 if (!vm)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000465 return -ENETUNREACH;
466
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000467 /*
468 * TODO: Once we implement access control in Hafnium, check that the
469 * caller is allowed to contact the specified VM. Return -ECONNREFUSED
470 * if access is denied.
471 */
472
473 /* Take lock to make sure state doesn't change as we connect. */
474 lock_sock(sk);
475
476 /* Only unconnected sockets are allowed to become connected. */
477 if (sock->state != SS_UNCONNECTED) {
478 err = -EISCONN;
479 goto exit;
480 }
481
482 hsock->local_port = atomic64_inc_return(&hf_next_port);
483 hsock->remote_port = addr->port;
484 hsock->peer_vm = vm;
485
486 sock->state = SS_CONNECTED;
487
488 /* Add socket to hash table now that it's fully initialised. */
489 spin_lock_irqsave(&hf_local_port_hash_lock, flags);
490 hash_add_rcu(hf_local_port_hash, &sk->sk_node, hsock->local_port);
491 spin_unlock_irqrestore(&hf_local_port_hash_lock, flags);
492
493 err = 0;
494exit:
495 release_sock(sk);
496 return err;
497}
498
499/**
500 * Sends the given skb to the appropriate VM by calling Hafnium. It will also
501 * trigger the wake up of a recipient VM.
502 *
503 * Takes ownership of the skb on success.
504 */
505static int hf_send_skb(struct sk_buff *skb)
506{
507 unsigned long flags;
508 int64_t ret;
509 struct hf_sock *hsock = hsock_from_sk(skb->sk);
510 struct hf_vm *vm = hsock->peer_vm;
511
512 /*
513 * Call Hafnium under the send lock so that we serialize the use of the
514 * global send buffer.
515 */
516 spin_lock_irqsave(&hf_send_lock, flags);
517 memcpy(page_address(hf_send_page), skb->data, skb->len);
Wedson Almeida Filhodbfc9032019-01-09 19:03:32 +0000518 ret = hf_mailbox_send(vm->id, skb->len, false);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000519 spin_unlock_irqrestore(&hf_send_lock, flags);
520
521 if (ret < 0)
522 return -EAGAIN;
523
524 /* Wake some vcpu up to handle the new message. */
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000525 hf_handle_wake_up_request(vm->id, ret, HF_MAILBOX_READABLE_INTID);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000526
527 kfree_skb(skb);
528
529 return 0;
530}
531
532/**
533 * Determines if the given socket is in the connected state. It acquires and
534 * releases the socket lock.
535 */
536static bool hf_sock_is_connected(struct socket *sock)
537{
538 bool ret;
539
540 lock_sock(sock->sk);
541 ret = sock->state == SS_CONNECTED;
542 release_sock(sock->sk);
543
544 return ret;
545}
546
547/**
548 * Sends a message to the VM & port the socket is connected to. All variants
549 * of write/send/sendto/sendmsg eventually call this function.
550 */
551static int hf_sock_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
552{
553 struct sock *sk = sock->sk;
554 struct sk_buff *skb;
555 int err;
556 struct hf_msg_hdr *hdr;
557 struct hf_sock *hsock = hsock_from_sk(sk);
558
559 /* Check length. */
560 if (len > HF_MAILBOX_SIZE - sizeof(struct hf_msg_hdr))
561 return -EMSGSIZE;
562
563 /* We don't allow the destination address to be specified. */
564 if (m->msg_namelen > 0)
565 return -EISCONN;
566
567 /* We don't support out of band messages. */
568 if (m->msg_flags & MSG_OOB)
569 return -EOPNOTSUPP;
570
571 /*
572 * Ensure that the socket is connected. We don't need to hold the socket
573 * lock (acquired and released by hf_sock_is_connected) for the
574 * remainder of the function because the fields we care about are
575 * immutable once the state is SS_CONNECTED.
576 */
577 if (!hf_sock_is_connected(sock))
578 return -ENOTCONN;
579
580 /*
581 * Allocate an skb for this write. If there isn't enough room in the
582 * socket's send buffer (sk_wmem_alloc >= sk_sndbuf), this will block
583 * (if it's a blocking call). On success, it increments sk_wmem_alloc
584 * and sets up the skb such that sk_wmem_alloc gets decremented when
585 * the skb is freed (sock_wfree gets called).
586 */
587 skb = sock_alloc_send_skb(sk, len + sizeof(struct hf_msg_hdr),
588 m->msg_flags & MSG_DONTWAIT, &err);
589 if (!skb)
590 return err;
591
592 /* Reserve room for the header and initialise it. */
593 skb_reserve(skb, sizeof(struct hf_msg_hdr));
594 hdr = skb_push(skb, sizeof(struct hf_msg_hdr));
595 hdr->src_port = hsock->local_port;
596 hdr->dst_port = hsock->remote_port;
597
598 /* Allocate area for the contents, then copy into skb. */
599 if (!copy_from_iter_full(skb_put(skb, len), len, &m->msg_iter)) {
600 err = -EFAULT;
601 goto err_cleanup;
602 }
603
604 /*
605 * TODO: We currently do this inline, but when we have support for
606 * readiness notification from Hafnium, we must add this to a per-VM tx
607 * queue that can make progress when the VM becomes writable. This will
608 * fix send buffering and poll readiness notification.
609 */
610 err = hf_send_skb(skb);
611 if (err)
612 goto err_cleanup;
613
614 return 0;
615
616err_cleanup:
617 kfree_skb(skb);
618 return err;
619}
620
621/**
622 * Receives a message originated from the VM & port the socket is connected to.
623 * All variants of read/recv/recvfrom/recvmsg eventually call this function.
624 */
625static int hf_sock_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
626 int flags)
627{
628 struct sock *sk = sock->sk;
629 struct sk_buff *skb;
630 int err;
631 size_t copy_len;
632
633 if (!hf_sock_is_connected(sock))
634 return -ENOTCONN;
635
636 /* Grab the next skb from the receive queue. */
637 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
638 if (!skb)
639 return err;
640
641 /* Make sure we don't copy more than what fits in the output buffer. */
642 copy_len = skb->len;
643 if (copy_len > len) {
644 copy_len = len;
645 m->msg_flags |= MSG_TRUNC;
646 }
647
648 /* Make sure we don't overflow the return value type. */
649 if (copy_len > INT_MAX) {
650 copy_len = INT_MAX;
651 m->msg_flags |= MSG_TRUNC;
652 }
653
654 /* Copy skb to output iterator, then free it. */
655 err = skb_copy_datagram_msg(skb, 0, m, copy_len);
656 skb_free_datagram(sk, skb);
657 if (err)
658 return err;
659
660 return copy_len;
661}
662
663/**
664 * This function is called when a Hafnium socket is created. It initialises all
665 * state such that the caller will be able to connect the socket and then send
666 * and receive messages through it.
667 */
668static int hf_sock_create(struct net *net, struct socket *sock, int protocol,
Andrew Scull01778112019-01-14 15:37:53 +0000669 int kern)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000670{
671 static const struct proto_ops ops = {
672 .family = PF_HF,
673 .owner = THIS_MODULE,
674 .release = hf_sock_release,
675 .bind = sock_no_bind,
676 .connect = hf_sock_connect,
677 .socketpair = sock_no_socketpair,
678 .accept = sock_no_accept,
679 .ioctl = sock_no_ioctl,
680 .listen = sock_no_listen,
681 .shutdown = sock_no_shutdown,
682 .setsockopt = sock_no_setsockopt,
683 .getsockopt = sock_no_getsockopt,
684 .sendmsg = hf_sock_sendmsg,
685 .recvmsg = hf_sock_recvmsg,
686 .mmap = sock_no_mmap,
687 .sendpage = sock_no_sendpage,
688 .poll = datagram_poll,
689 };
690 struct sock *sk;
691
692 if (sock->type != SOCK_DGRAM)
693 return -ESOCKTNOSUPPORT;
694
695 if (protocol != 0)
696 return -EPROTONOSUPPORT;
697
698 /*
699 * For now we only allow callers with sys admin capability to create
700 * Hafnium sockets.
701 */
702 if (!capable(CAP_SYS_ADMIN))
703 return -EPERM;
704
705 /* Allocate and initialise socket. */
706 sk = sk_alloc(net, PF_HF, GFP_KERNEL, &hf_sock_proto, kern);
707 if (!sk)
708 return -ENOMEM;
709
710 sock_init_data(sock, sk);
711
712 sk->sk_destruct = hf_sock_destruct;
713 sock->ops = &ops;
714 sock->state = SS_UNCONNECTED;
715
716 return 0;
717}
718
719/**
Andrew Scullbb7ae412018-09-28 21:07:15 +0100720 * Frees all resources, including threads, associated with the Hafnium driver.
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100721 */
Andrew Scull82257c42018-10-01 10:37:48 +0100722static void hf_free_resources(void)
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100723{
Andrew Scullbb7ae412018-09-28 21:07:15 +0100724 uint32_t i, j;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100725
726 /*
727 * First stop all worker threads. We need to do this before freeing
728 * resources because workers may reference each other, so it is only
729 * safe to free resources after they have all stopped.
730 */
Andrew Scull82257c42018-10-01 10:37:48 +0100731 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100732 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000733
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100734 for (j = 0; j < vm->vcpu_count; j++)
735 kthread_stop(vm->vcpu[j].task);
736 }
737
738 /* Free resources. */
Andrew Scull82257c42018-10-01 10:37:48 +0100739 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100740 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000741
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100742 for (j = 0; j < vm->vcpu_count; j++)
743 put_task_struct(vm->vcpu[j].task);
744 kfree(vm->vcpu);
745 }
746
747 kfree(hf_vms);
748}
749
Andrew Scullbb7ae412018-09-28 21:07:15 +0100750/**
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +0000751 * Handles the hypervisor timer interrupt.
752 */
753static irqreturn_t hf_nop_irq_handler(int irq, void *dev)
754{
755 /*
756 * No need to do anything, the interrupt only exists to return to the
757 * primary vCPU so that the virtual timer will be restored and fire as
758 * normal.
759 */
760 return IRQ_HANDLED;
761}
762
763/**
764 * Enables the hypervisor timer interrupt on a CPU, when it starts or after the
765 * driver is first loaded.
766 */
767static int hf_starting_cpu(unsigned int cpu)
768{
769 if (hf_irq != 0) {
770 /* Enable the interrupt, and set it to be edge-triggered. */
771 enable_percpu_irq(hf_irq, IRQ_TYPE_EDGE_RISING);
772 }
Andrew Walbran8d55e502019-02-05 11:42:08 +0000773
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +0000774 return 0;
775}
776
777/**
778 * Disables the hypervisor timer interrupt on a CPU when it is powered down.
779 */
780static int hf_dying_cpu(unsigned int cpu)
781{
782 if (hf_irq != 0) {
783 /* Disable the interrupt while the CPU is asleep. */
784 disable_percpu_irq(hf_irq);
785 }
786
787 return 0;
788}
789
790/**
791 * Registers for the hypervisor timer interrupt.
792 */
793static int hf_int_driver_probe(struct platform_device *pdev)
794{
795 int irq;
796 int ret;
797
798 /*
799 * Register a handler for the hyperviser timer IRQ, as it is needed for
800 * Hafnium to emulate the virtual timer for Linux while a secondary vCPU
801 * is running.
802 */
803 irq = platform_get_irq(pdev, ARCH_TIMER_HYP_PPI);
804 if (irq < 0) {
805 pr_err("Error getting hypervisor timer IRQ: %d\n", irq);
806 return irq;
807 }
808 hf_irq = irq;
809
810 ret = request_percpu_irq(irq, hf_nop_irq_handler, HYPERVISOR_TIMER_NAME,
811 pdev);
812 if (ret != 0) {
813 pr_err("Error registering hypervisor timer IRQ %d: %d\n",
814 irq, ret);
815 return ret;
816 }
817 pr_info("Hafnium registered for IRQ %d\n", irq);
818 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
819 "hafnium/hypervisor_timer:starting",
820 hf_starting_cpu, hf_dying_cpu);
821 if (ret < 0) {
822 pr_err("Error enabling timer on all CPUs: %d\n", ret);
Andrew Walbran8d55e502019-02-05 11:42:08 +0000823 free_percpu_irq(irq, pdev);
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +0000824 return ret;
825 }
Andrew Walbran8d55e502019-02-05 11:42:08 +0000826 hf_cpuhp_state = ret;
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +0000827
828 return 0;
829}
830
831/**
832 * Unregisters for the hypervisor timer interrupt.
833 */
834static int hf_int_driver_remove(struct platform_device *pdev)
835{
Andrew Walbran8d55e502019-02-05 11:42:08 +0000836 /*
837 * This will cause hf_dying_cpu to be called on each CPU, which will
838 * disable the IRQs.
839 */
840 cpuhp_remove_state(hf_cpuhp_state);
841 free_percpu_irq(hf_irq, pdev);
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +0000842
843 return 0;
844}
845
846static const struct of_device_id hf_int_driver_id[] = {
847 {.compatible = "arm,armv7-timer"},
848 {.compatible = "arm,armv8-timer"},
849 {}
850};
851
852static struct platform_driver hf_int_driver = {
853 .driver = {
854 .name = HYPERVISOR_TIMER_NAME,
855 .owner = THIS_MODULE,
856 .of_match_table = of_match_ptr(hf_int_driver_id),
857 },
858 .probe = hf_int_driver_probe,
859 .remove = hf_int_driver_remove,
860};
861
862/**
Andrew Scullbb7ae412018-09-28 21:07:15 +0100863 * Initializes the Hafnium driver by creating a thread for each vCPU of each
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100864 * virtual machine.
865 */
866static int __init hf_init(void)
867{
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000868 static const struct net_proto_family proto_family = {
869 .family = PF_HF,
870 .create = hf_sock_create,
871 .owner = THIS_MODULE,
872 };
Andrew Scullbb7ae412018-09-28 21:07:15 +0100873 int64_t ret;
874 uint32_t i, j;
Andrew Scull82257c42018-10-01 10:37:48 +0100875 uint32_t total_vm_count;
876 uint32_t total_vcpu_count;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100877
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100878 /* Allocate a page for send and receive buffers. */
879 hf_send_page = alloc_page(GFP_KERNEL);
880 if (!hf_send_page) {
881 pr_err("Unable to allocate send buffer\n");
882 return -ENOMEM;
883 }
884
885 hf_recv_page = alloc_page(GFP_KERNEL);
886 if (!hf_recv_page) {
887 __free_page(hf_send_page);
888 pr_err("Unable to allocate receive buffer\n");
889 return -ENOMEM;
890 }
891
892 /*
893 * Configure both addresses. Once configured, we cannot free these pages
894 * because the hypervisor will use them, even if the module is
895 * unloaded.
896 */
Andrew Scull55704232018-08-10 17:19:54 +0100897 ret = hf_vm_configure(page_to_phys(hf_send_page),
898 page_to_phys(hf_recv_page));
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100899 if (ret) {
900 __free_page(hf_send_page);
901 __free_page(hf_recv_page);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000902 /*
903 * TODO: We may want to grab this information from hypervisor
904 * and go from there.
905 */
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100906 pr_err("Unable to configure VM\n");
907 return -EIO;
908 }
909
Andrew Scull82257c42018-10-01 10:37:48 +0100910 /* Get the number of VMs. */
Andrew Scull55704232018-08-10 17:19:54 +0100911 ret = hf_vm_get_count();
Andrew Scull82257c42018-10-01 10:37:48 +0100912 if (ret < 0) {
Andrew Scullbb7ae412018-09-28 21:07:15 +0100913 pr_err("Unable to retrieve number of VMs: %lld\n", ret);
Andrew Scull82257c42018-10-01 10:37:48 +0100914 return -EIO;
915 }
916
917 /* Confirm the maximum number of VMs looks sane. */
918 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VMS < 1);
919 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VMS > U16_MAX);
920
921 /* Validate the number of VMs. There must at least be the primary. */
922 if (ret < 1 || ret > CONFIG_HAFNIUM_MAX_VMS) {
923 pr_err("Number of VMs is out of range: %lld\n", ret);
924 return -EDQUOT;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100925 }
926
Andrew Scullb722f952018-09-27 15:39:10 +0100927 /* Only track the secondary VMs. */
Andrew Scull82257c42018-10-01 10:37:48 +0100928 total_vm_count = ret - 1;
Andrew Scull01778112019-01-14 15:37:53 +0000929 hf_vms =
930 kmalloc_array(total_vm_count, sizeof(struct hf_vm), GFP_KERNEL);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100931 if (!hf_vms)
932 return -ENOMEM;
933
934 /* Initialize each VM. */
Andrew Scull82257c42018-10-01 10:37:48 +0100935 total_vcpu_count = 0;
936 for (i = 0; i < total_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100937 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100938
Andrew Scullb722f952018-09-27 15:39:10 +0100939 /* Adjust the ID as only the secondaries are tracked. */
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000940 vm->id = i + FIRST_SECONDARY_VM_ID;
Andrew Scullb722f952018-09-27 15:39:10 +0100941
942 ret = hf_vcpu_get_count(vm->id);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100943 if (ret < 0) {
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000944 pr_err("HF_VCPU_GET_COUNT failed for vm=%u: %lld",
945 vm->id, ret);
Andrew Scull82257c42018-10-01 10:37:48 +0100946 ret = -EIO;
947 goto fail_with_cleanup;
948 }
949
950 /* Avoid overflowing the vcpu count. */
951 if (ret > (U32_MAX - total_vcpu_count)) {
952 pr_err("Too many vcpus: %u\n", total_vcpu_count);
953 ret = -EDQUOT;
954 goto fail_with_cleanup;
955 }
956
957 /* Confirm the maximum number of VCPUs looks sane. */
958 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VCPUS < 1);
959 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VCPUS > U16_MAX);
960
961 /* Enforce the limit on vcpus. */
962 total_vcpu_count += ret;
963 if (total_vcpu_count > CONFIG_HAFNIUM_MAX_VCPUS) {
964 pr_err("Too many vcpus: %u\n", total_vcpu_count);
965 ret = -EDQUOT;
966 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100967 }
968
969 vm->vcpu_count = ret;
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000970 vm->vcpu = kmalloc_array(vm->vcpu_count, sizeof(struct hf_vcpu),
971 GFP_KERNEL);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100972 if (!vm->vcpu) {
Andrew Scull82257c42018-10-01 10:37:48 +0100973 ret = -ENOMEM;
974 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100975 }
976
Andrew Scull82257c42018-10-01 10:37:48 +0100977 /* Update the number of initialized VMs. */
978 hf_vm_count = i + 1;
979
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100980 /* Create a kernel thread for each vcpu. */
981 for (j = 0; j < vm->vcpu_count; j++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100982 struct hf_vcpu *vcpu = &vm->vcpu[j];
Andrew Scull01778112019-01-14 15:37:53 +0000983
984 vcpu->task =
985 kthread_create(hf_vcpu_thread, vcpu,
986 "vcpu_thread_%u_%u", vm->id, j);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100987 if (IS_ERR(vcpu->task)) {
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000988 pr_err("Error creating task (vm=%u,vcpu=%u): %ld\n",
989 vm->id, j, PTR_ERR(vcpu->task));
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100990 vm->vcpu_count = j;
Andrew Scull82257c42018-10-01 10:37:48 +0100991 ret = PTR_ERR(vcpu->task);
992 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100993 }
994
995 get_task_struct(vcpu->task);
Andrew Scullb722f952018-09-27 15:39:10 +0100996 vcpu->vm = vm;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100997 vcpu->vcpu_index = j;
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000998 atomic_set(&vcpu->abort_sleep, 0);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100999 }
1000 }
1001
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001002 /* Register protocol and socket family. */
1003 ret = proto_register(&hf_sock_proto, 0);
1004 if (ret) {
1005 pr_err("Unable to register protocol: %lld\n", ret);
1006 goto fail_with_cleanup;
1007 }
1008
1009 ret = sock_register(&proto_family);
1010 if (ret) {
1011 pr_err("Unable to register Hafnium's socket family: %lld\n",
1012 ret);
1013 goto fail_unregister_proto;
1014 }
1015
1016 /*
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +00001017 * Register as a driver for the timer device, so we can register a
1018 * handler for the hyperviser timer IRQ.
1019 */
1020 ret = platform_driver_register(&hf_int_driver);
1021 if (ret != 0) {
1022 pr_err("Error registering timer driver %lld\n", ret);
1023 goto fail_unregister_socket;
1024 }
1025
1026 /*
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001027 * Start running threads now that all is initialized.
1028 *
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +00001029 * Any failures from this point on must also unregister the driver with
1030 * platform_driver_unregister().
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001031 */
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001032 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +01001033 struct hf_vm *vm = &hf_vms[i];
Andrew Scull01778112019-01-14 15:37:53 +00001034
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001035 for (j = 0; j < vm->vcpu_count; j++)
1036 wake_up_process(vm->vcpu[j].task);
1037 }
1038
1039 /* Dump vm/vcpu count info. */
Andrew Scullbb7ae412018-09-28 21:07:15 +01001040 pr_info("Hafnium successfully loaded with %u VMs:\n", hf_vm_count);
Andrew Scullb722f952018-09-27 15:39:10 +01001041 for (i = 0; i < hf_vm_count; i++) {
1042 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001043
Andrew Scullbb7ae412018-09-28 21:07:15 +01001044 pr_info("\tVM %u: %u vCPUS\n", vm->id, vm->vcpu_count);
Andrew Scullb722f952018-09-27 15:39:10 +01001045 }
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001046
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001047 return 0;
Andrew Scull82257c42018-10-01 10:37:48 +01001048
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +00001049fail_unregister_socket:
1050 sock_unregister(PF_HF);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001051fail_unregister_proto:
1052 proto_unregister(&hf_sock_proto);
Andrew Scull82257c42018-10-01 10:37:48 +01001053fail_with_cleanup:
1054 hf_free_resources();
1055 return ret;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001056}
1057
1058/**
Andrew Scullbb7ae412018-09-28 21:07:15 +01001059 * Frees up all resources used by the Hafnium driver in preparation for
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001060 * unloading it.
1061 */
1062static void __exit hf_exit(void)
1063{
Andrew Scullbb7ae412018-09-28 21:07:15 +01001064 pr_info("Preparing to unload Hafnium\n");
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001065 sock_unregister(PF_HF);
1066 proto_unregister(&hf_sock_proto);
Andrew Scull82257c42018-10-01 10:37:48 +01001067 hf_free_resources();
Andrew Walbran8d55e502019-02-05 11:42:08 +00001068 platform_driver_unregister(&hf_int_driver);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001069 pr_info("Hafnium ready to unload\n");
1070}
1071
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001072MODULE_LICENSE("GPL v2");
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001073
1074module_init(hf_init);
1075module_exit(hf_exit);