blob: 6f8369910b3ec191296d55ad88b3e1e83fa18386 [file] [log] [blame]
Andrew Scull01778112019-01-14 15:37:53 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Walbran13c3a0b2018-11-30 11:51:53 +00002/*
3 * Copyright 2018 Google LLC
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Andrew Walbran13c3a0b2018-11-30 11:51:53 +000013 */
14
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +000015#include <clocksource/arm_arch_timer.h>
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000016#include <linux/atomic.h>
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +000017#include <linux/cpuhotplug.h>
18#include <linux/hrtimer.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010019#include <linux/init.h>
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +000020#include <linux/interrupt.h>
21#include <linux/irq.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010022#include <linux/kernel.h>
23#include <linux/kthread.h>
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +010024#include <linux/mm.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010025#include <linux/module.h>
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +000026#include <linux/net.h>
27#include <linux/of.h>
28#include <linux/platform_device.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010029#include <linux/sched/task.h>
30#include <linux/slab.h>
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000031#include <net/sock.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010032
Andrew Scull55704232018-08-10 17:19:54 +010033#include <hf/call.h>
34
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000035/* TODO: Reusing AF_ECONET for now as it's otherwise unused. */
36#define AF_HF AF_ECONET
37#define PF_HF AF_HF
38
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +000039#define HYPERVISOR_TIMER_NAME "el2_timer"
40
Andrew Scull82257c42018-10-01 10:37:48 +010041#define CONFIG_HAFNIUM_MAX_VMS 16
42#define CONFIG_HAFNIUM_MAX_VCPUS 32
43
Wedson Almeida Filhoec841932019-01-22 23:07:50 +000044#define FIRST_SECONDARY_VM_ID 1
45
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010046struct hf_vcpu {
Andrew Scullb722f952018-09-27 15:39:10 +010047 struct hf_vm *vm;
Andrew Scull55704232018-08-10 17:19:54 +010048 uint32_t vcpu_index;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010049 struct task_struct *task;
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +000050 atomic_t abort_sleep;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010051 struct hrtimer timer;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010052};
53
54struct hf_vm {
Andrew Scullb722f952018-09-27 15:39:10 +010055 uint32_t id;
Andrew Scullbb7ae412018-09-28 21:07:15 +010056 uint32_t vcpu_count;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010057 struct hf_vcpu *vcpu;
58};
59
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000060struct hf_msg_hdr {
61 uint64_t src_port;
62 uint64_t dst_port;
63};
64
65struct hf_sock {
66 /* This needs to be the first field. */
67 struct sock sk;
68
69 /*
70 * The following fields are immutable after the socket transitions to
71 * SS_CONNECTED state.
72 */
73 uint64_t local_port;
74 uint64_t remote_port;
75 struct hf_vm *peer_vm;
76};
77
78struct sockaddr_hf {
79 sa_family_t family;
80 uint32_t vm_id;
81 uint64_t port;
82};
83
84static struct proto hf_sock_proto = {
85 .name = "hafnium",
86 .owner = THIS_MODULE,
87 .obj_size = sizeof(struct hf_sock),
88};
89
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010090static struct hf_vm *hf_vms;
Andrew Scullbb7ae412018-09-28 21:07:15 +010091static uint32_t hf_vm_count;
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +000092static struct page *hf_send_page;
93static struct page *hf_recv_page;
94static atomic64_t hf_next_port = ATOMIC64_INIT(0);
95static DEFINE_SPINLOCK(hf_send_lock);
96static DEFINE_HASHTABLE(hf_local_port_hash, 7);
97static DEFINE_SPINLOCK(hf_local_port_hash_lock);
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +000098static int hf_irq;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010099
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100100/**
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000101 * Retrieves a VM from its ID, returning NULL if the VM doesn't exist.
102 */
103static struct hf_vm *hf_vm_from_id(uint32_t vm_id)
104{
105 if (vm_id < FIRST_SECONDARY_VM_ID ||
106 vm_id >= FIRST_SECONDARY_VM_ID + hf_vm_count)
107 return NULL;
108
109 return &hf_vms[vm_id - FIRST_SECONDARY_VM_ID];
110}
111
112/**
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000113 * Wakes up the kernel thread responsible for running the given vcpu.
114 *
115 * Returns 0 if the thread was already running, 1 otherwise.
116 */
117static int hf_vcpu_wake_up(struct hf_vcpu *vcpu)
118{
119 /* Set a flag indicating that the thread should not go to sleep. */
120 atomic_set(&vcpu->abort_sleep, 1);
121
122 /* Set the thread to running state. */
123 return wake_up_process(vcpu->task);
124}
125
126/**
127 * Puts the current thread to sleep. The current thread must be responsible for
128 * running the given vcpu.
129 *
130 * Going to sleep will fail if hf_vcpu_wake_up() or kthread_stop() was called on
131 * this vcpu/thread since the last time it [re]started running.
132 */
133static void hf_vcpu_sleep(struct hf_vcpu *vcpu)
134{
135 int abort;
136
137 set_current_state(TASK_INTERRUPTIBLE);
138
139 /* Check the sleep-abort flag after making thread interruptible. */
140 abort = atomic_read(&vcpu->abort_sleep);
141 if (!abort && !kthread_should_stop())
142 schedule();
143
144 /* Set state back to running on the way out. */
145 set_current_state(TASK_RUNNING);
146}
147
148/**
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100149 * Wakes up the thread associated with the vcpu that owns the given timer. This
150 * is called when the timer the thread is waiting on expires.
151 */
152static enum hrtimer_restart hf_vcpu_timer_expired(struct hrtimer *timer)
153{
154 struct hf_vcpu *vcpu = container_of(timer, struct hf_vcpu, timer);
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000155 /* TODO: Inject interrupt. */
156 hf_vcpu_wake_up(vcpu);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100157 return HRTIMER_NORESTART;
158}
159
160/**
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000161 * Handles a message delivered to this VM by validating that it's well-formed
162 * and then queueing it for delivery to the appropriate socket.
163 */
164static void hf_handle_message(struct hf_vm *sender, const void *ptr, size_t len)
165{
166 struct hf_sock *hsock;
167 const struct hf_msg_hdr *hdr = ptr;
168 struct sk_buff *skb;
169 int err;
170
171 /* Ignore messages that are too small to hold a header. */
172 if (len < sizeof(struct hf_msg_hdr))
173 return;
174
175 len -= sizeof(struct hf_msg_hdr);
176
177 /* Go through the colliding sockets. */
178 rcu_read_lock();
179 hash_for_each_possible_rcu(hf_local_port_hash, hsock, sk.sk_node,
180 hdr->dst_port) {
181 if (hsock->peer_vm == sender &&
182 hsock->remote_port == hdr->src_port) {
183 sock_hold(&hsock->sk);
184 break;
185 }
186 }
187 rcu_read_unlock();
188
189 /* Nothing to do if we couldn't find the target. */
190 if (!hsock)
191 return;
192
Wedson Almeida Filho89d0e472019-01-03 19:18:39 +0000193 /*
194 * TODO: From this point on, there are two failure paths: when we
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000195 * create the skb below, and when we enqueue it to the socket. What
196 * should we do if they fail? Ideally we would have some form of flow
197 * control to prevent message loss, but how to do it efficiently?
198 *
199 * One option is to have a pre-allocated message that indicates to the
200 * sender that a message was dropped. This way we guarantee that the
201 * sender will be aware of loss and should back-off.
202 */
203 /* Create the skb. */
204 skb = alloc_skb(len, GFP_KERNEL);
205 if (!skb)
206 goto exit;
207
208 memcpy(skb_put(skb, len), hdr + 1, len);
209
210 /*
211 * Add the skb to the receive queue of the target socket. On success it
212 * calls sk->sk_data_ready, which is currently set to sock_def_readable,
213 * which wakes up any waiters.
214 */
215 err = sock_queue_rcv_skb(&hsock->sk, skb);
216 if (err)
217 kfree_skb(skb);
218
219exit:
220 sock_put(&hsock->sk);
221}
222
223/**
224 * This function is called when Hafnium requests that the primary VM wake up a
225 * vCPU that belongs to a secondary VM.
226 *
227 * It wakes up the thread if it's sleeping, or kicks it if it's already running.
228 *
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000229 * If vCPU is HF_INVALID_VCPU, it injects an interrupt into a vCPU belonging to
230 * the specified VM.
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000231 */
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000232static void hf_handle_wake_up_request(uint32_t vm_id, uint16_t vcpu,
233 uint64_t int_id)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000234{
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000235 struct hf_vm *vm = hf_vm_from_id(vm_id);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000236
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000237 if (!vm) {
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000238 pr_warn("Request to wake up non-existent VM id: %u\n", vm_id);
239 return;
240 }
241
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000242 if (vcpu >= vm->vcpu_count) {
243 int64_t ret;
244
245 if (vcpu != HF_INVALID_VCPU) {
246 pr_warn("Request to wake up non-existent vCPU: %u.%u\n",
247 vm_id, vcpu);
248 return;
249 }
250
251 /*
252 * TODO: For now we're picking the first vcpu to interrupt, but
253 * we want to be smarter.
254 */
255 vcpu = 0;
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000256 ret = hf_interrupt_inject(vm_id, vcpu, int_id);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000257 if (ret != 1) {
258 /* We don't need to wake up the vcpu. */
259 return;
260 }
261 }
262
263 if (hf_vcpu_wake_up(&vm->vcpu[vcpu]) == 0) {
264 /*
265 * The task was already running (presumably on a different
266 * physical CPU); interrupt it. This gives Hafnium a chance to
267 * inject any new interrupts.
268 */
269 kick_process(vm->vcpu[vcpu].task);
270 }
271}
272
273/**
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000274 * Notify all waiters on the given VM.
275 */
276static void hf_notify_waiters(uint32_t vm_id)
277{
278 int64_t ret;
279
280 while ((ret = hf_mailbox_waiter_get(vm_id)) != -1) {
281 if (ret == HF_PRIMARY_VM_ID) {
282 /*
283 * TODO: Use this information when implementing per-vm
284 * queues.
285 */
286 } else {
287 hf_handle_wake_up_request(ret, HF_INVALID_VCPU,
288 HF_MAILBOX_WRITABLE_INTID);
289 }
290 }
291}
292
293/**
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100294 * This is the main loop of each vcpu.
295 */
296static int hf_vcpu_thread(void *data)
297{
298 struct hf_vcpu *vcpu = data;
Andrew Sculldc8cab52018-10-10 18:29:39 +0100299 struct hf_vcpu_run_return ret;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100300
301 hrtimer_init(&vcpu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
302 vcpu->timer.function = &hf_vcpu_timer_expired;
303
304 while (!kthread_should_stop()) {
Andrew Scull01f83de2019-01-23 13:41:47 +0000305 uint32_t i;
306
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000307 /*
308 * We're about to run the vcpu, so we can reset the abort-sleep
309 * flag.
310 */
311 atomic_set(&vcpu->abort_sleep, 0);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100312
Andrew Scullbb7ae412018-09-28 21:07:15 +0100313 /* Call into Hafnium to run vcpu. */
Andrew Scullb722f952018-09-27 15:39:10 +0100314 ret = hf_vcpu_run(vcpu->vm->id, vcpu->vcpu_index);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100315
Andrew Sculldc8cab52018-10-10 18:29:39 +0100316 switch (ret.code) {
Andrew Sculle05702e2019-01-08 14:46:46 +0000317 /* Preempted. */
318 case HF_VCPU_RUN_PREEMPTED:
319 if (need_resched())
320 schedule();
321 break;
322
323 /* Yield. */
Andrew Scullb3a61b52018-09-17 14:30:34 +0100324 case HF_VCPU_RUN_YIELD:
Andrew Sculle05702e2019-01-08 14:46:46 +0000325 if (!kthread_should_stop())
326 schedule();
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100327 break;
328
Andrew Scull01778112019-01-14 15:37:53 +0000329 /* WFI. */
Andrew Scullb3a61b52018-09-17 14:30:34 +0100330 case HF_VCPU_RUN_WAIT_FOR_INTERRUPT:
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000331 hf_vcpu_sleep(vcpu);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100332 break;
333
Andrew Scullb3a61b52018-09-17 14:30:34 +0100334 /* Wake up another vcpu. */
335 case HF_VCPU_RUN_WAKE_UP:
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000336 hf_handle_wake_up_request(ret.wake_up.vm_id,
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000337 ret.wake_up.vcpu,
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000338 HF_MAILBOX_READABLE_INTID);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100339 break;
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100340
Andrew Scullb3a61b52018-09-17 14:30:34 +0100341 /* Response available. */
Andrew Scull0973a2e2018-10-05 11:11:24 +0100342 case HF_VCPU_RUN_MESSAGE:
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000343 hf_handle_message(vcpu->vm, page_address(hf_recv_page),
344 ret.message.size);
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000345 if (hf_mailbox_clear() == 1)
346 hf_notify_waiters(HF_PRIMARY_VM_ID);
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100347 break;
Andrew Sculldc8cab52018-10-10 18:29:39 +0100348
349 case HF_VCPU_RUN_SLEEP:
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000350 hrtimer_start(&vcpu->timer, ret.sleep.ns,
351 HRTIMER_MODE_REL);
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000352 hf_vcpu_sleep(vcpu);
Andrew Sculldc8cab52018-10-10 18:29:39 +0100353 hrtimer_cancel(&vcpu->timer);
354 break;
Wedson Almeida Filhocd9fef92019-01-11 21:24:08 +0000355
356 /* Notify all waiters. */
357 case HF_VCPU_RUN_NOTIFY_WAITERS:
358 hf_notify_waiters(vcpu->vm->id);
359 break;
Andrew Scull01f83de2019-01-23 13:41:47 +0000360
361 case HF_VCPU_RUN_ABORTED:
362 for (i = 0; i < vcpu->vm->vcpu_count; i++) {
363 if (i == vcpu->vcpu_index)
364 continue;
365 hf_handle_wake_up_request(vcpu->vm->id, i, 0);
366 }
367 hf_vcpu_sleep(vcpu);
368 break;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100369 }
370 }
371
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100372 return 0;
373}
374
375/**
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000376 * Converts a pointer to a struct sock into a pointer to a struct hf_sock. It
377 * relies on the fact that the first field of hf_sock is a sock.
378 */
379static struct hf_sock *hsock_from_sk(struct sock *sk)
380{
381 return (struct hf_sock *)sk;
382}
383
384/**
385 * This is called when the last reference to the outer socket is released. For
386 * example, if it's a user-space socket, when the last file descriptor pointing
387 * to this socket is closed.
388 *
389 * It begins cleaning up resources, though some can only be cleaned up after all
390 * references to the underlying socket are released, which is handled by
391 * hf_sock_destruct().
392 */
393static int hf_sock_release(struct socket *sock)
394{
395 struct sock *sk = sock->sk;
396 struct hf_sock *hsock = hsock_from_sk(sk);
397 unsigned long flags;
398
399 if (!sk)
400 return 0;
401
402 /* Shutdown for both send and receive. */
403 lock_sock(sk);
404 sk->sk_shutdown |= RCV_SHUTDOWN | SEND_SHUTDOWN;
405 sk->sk_state_change(sk);
406 release_sock(sk);
407
408 /* Remove from the hash table, so lookups from now on won't find it. */
409 spin_lock_irqsave(&hf_local_port_hash_lock, flags);
410 hash_del_rcu(&hsock->sk.sk_node);
411 spin_unlock_irqrestore(&hf_local_port_hash_lock, flags);
412
413 /*
414 * TODO: When we implement a tx queue, we need to clear it here so that
415 * sk_wmem_alloc will not prevent sk from being freed (sk_free).
416 */
417
418 /*
419 * Wait for in-flight lookups to finish. We need to do this here because
Wedson Almeida Filho89d0e472019-01-03 19:18:39 +0000420 * in-flight lookups rely on the reference to the socket we're about to
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000421 * release.
422 */
423 synchronize_rcu();
424 sock_put(sk);
425 sock->sk = NULL;
426
427 return 0;
428}
429
430/**
431 * This is called when there are no more references to the socket. It frees all
432 * resources that haven't been freed during release.
433 */
434static void hf_sock_destruct(struct sock *sk)
435{
436 /*
437 * Clear the receive queue now that the handler cannot add any more
438 * skbs to it.
439 */
440 skb_queue_purge(&sk->sk_receive_queue);
441}
442
443/**
444 * Connects the Hafnium socket to the provided VM and port. After the socket is
445 * connected, it can be used to exchange datagrams with the specified peer.
446 */
Andrew Scull01778112019-01-14 15:37:53 +0000447static int hf_sock_connect(struct socket *sock, struct sockaddr *saddr, int len,
448 int connect_flags)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000449{
450 struct sock *sk = sock->sk;
451 struct hf_sock *hsock = hsock_from_sk(sk);
452 struct hf_vm *vm;
453 struct sockaddr_hf *addr;
454 int err;
455 unsigned long flags;
456
457 /* Basic address validation. */
458 if (len < sizeof(struct sockaddr_hf) || saddr->sa_family != AF_HF)
459 return -EINVAL;
460
461 addr = (struct sockaddr_hf *)saddr;
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000462 vm = hf_vm_from_id(addr->vm_id);
463 if (!vm)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000464 return -ENETUNREACH;
465
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000466 /*
467 * TODO: Once we implement access control in Hafnium, check that the
468 * caller is allowed to contact the specified VM. Return -ECONNREFUSED
469 * if access is denied.
470 */
471
472 /* Take lock to make sure state doesn't change as we connect. */
473 lock_sock(sk);
474
475 /* Only unconnected sockets are allowed to become connected. */
476 if (sock->state != SS_UNCONNECTED) {
477 err = -EISCONN;
478 goto exit;
479 }
480
481 hsock->local_port = atomic64_inc_return(&hf_next_port);
482 hsock->remote_port = addr->port;
483 hsock->peer_vm = vm;
484
485 sock->state = SS_CONNECTED;
486
487 /* Add socket to hash table now that it's fully initialised. */
488 spin_lock_irqsave(&hf_local_port_hash_lock, flags);
489 hash_add_rcu(hf_local_port_hash, &sk->sk_node, hsock->local_port);
490 spin_unlock_irqrestore(&hf_local_port_hash_lock, flags);
491
492 err = 0;
493exit:
494 release_sock(sk);
495 return err;
496}
497
498/**
499 * Sends the given skb to the appropriate VM by calling Hafnium. It will also
500 * trigger the wake up of a recipient VM.
501 *
502 * Takes ownership of the skb on success.
503 */
504static int hf_send_skb(struct sk_buff *skb)
505{
506 unsigned long flags;
507 int64_t ret;
508 struct hf_sock *hsock = hsock_from_sk(skb->sk);
509 struct hf_vm *vm = hsock->peer_vm;
510
511 /*
512 * Call Hafnium under the send lock so that we serialize the use of the
513 * global send buffer.
514 */
515 spin_lock_irqsave(&hf_send_lock, flags);
516 memcpy(page_address(hf_send_page), skb->data, skb->len);
Wedson Almeida Filhodbfc9032019-01-09 19:03:32 +0000517 ret = hf_mailbox_send(vm->id, skb->len, false);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000518 spin_unlock_irqrestore(&hf_send_lock, flags);
519
520 if (ret < 0)
521 return -EAGAIN;
522
523 /* Wake some vcpu up to handle the new message. */
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000524 hf_handle_wake_up_request(vm->id, ret, HF_MAILBOX_READABLE_INTID);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000525
526 kfree_skb(skb);
527
528 return 0;
529}
530
531/**
532 * Determines if the given socket is in the connected state. It acquires and
533 * releases the socket lock.
534 */
535static bool hf_sock_is_connected(struct socket *sock)
536{
537 bool ret;
538
539 lock_sock(sock->sk);
540 ret = sock->state == SS_CONNECTED;
541 release_sock(sock->sk);
542
543 return ret;
544}
545
546/**
547 * Sends a message to the VM & port the socket is connected to. All variants
548 * of write/send/sendto/sendmsg eventually call this function.
549 */
550static int hf_sock_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
551{
552 struct sock *sk = sock->sk;
553 struct sk_buff *skb;
554 int err;
555 struct hf_msg_hdr *hdr;
556 struct hf_sock *hsock = hsock_from_sk(sk);
557
558 /* Check length. */
559 if (len > HF_MAILBOX_SIZE - sizeof(struct hf_msg_hdr))
560 return -EMSGSIZE;
561
562 /* We don't allow the destination address to be specified. */
563 if (m->msg_namelen > 0)
564 return -EISCONN;
565
566 /* We don't support out of band messages. */
567 if (m->msg_flags & MSG_OOB)
568 return -EOPNOTSUPP;
569
570 /*
571 * Ensure that the socket is connected. We don't need to hold the socket
572 * lock (acquired and released by hf_sock_is_connected) for the
573 * remainder of the function because the fields we care about are
574 * immutable once the state is SS_CONNECTED.
575 */
576 if (!hf_sock_is_connected(sock))
577 return -ENOTCONN;
578
579 /*
580 * Allocate an skb for this write. If there isn't enough room in the
581 * socket's send buffer (sk_wmem_alloc >= sk_sndbuf), this will block
582 * (if it's a blocking call). On success, it increments sk_wmem_alloc
583 * and sets up the skb such that sk_wmem_alloc gets decremented when
584 * the skb is freed (sock_wfree gets called).
585 */
586 skb = sock_alloc_send_skb(sk, len + sizeof(struct hf_msg_hdr),
587 m->msg_flags & MSG_DONTWAIT, &err);
588 if (!skb)
589 return err;
590
591 /* Reserve room for the header and initialise it. */
592 skb_reserve(skb, sizeof(struct hf_msg_hdr));
593 hdr = skb_push(skb, sizeof(struct hf_msg_hdr));
594 hdr->src_port = hsock->local_port;
595 hdr->dst_port = hsock->remote_port;
596
597 /* Allocate area for the contents, then copy into skb. */
598 if (!copy_from_iter_full(skb_put(skb, len), len, &m->msg_iter)) {
599 err = -EFAULT;
600 goto err_cleanup;
601 }
602
603 /*
604 * TODO: We currently do this inline, but when we have support for
605 * readiness notification from Hafnium, we must add this to a per-VM tx
606 * queue that can make progress when the VM becomes writable. This will
607 * fix send buffering and poll readiness notification.
608 */
609 err = hf_send_skb(skb);
610 if (err)
611 goto err_cleanup;
612
613 return 0;
614
615err_cleanup:
616 kfree_skb(skb);
617 return err;
618}
619
620/**
621 * Receives a message originated from the VM & port the socket is connected to.
622 * All variants of read/recv/recvfrom/recvmsg eventually call this function.
623 */
624static int hf_sock_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
625 int flags)
626{
627 struct sock *sk = sock->sk;
628 struct sk_buff *skb;
629 int err;
630 size_t copy_len;
631
632 if (!hf_sock_is_connected(sock))
633 return -ENOTCONN;
634
635 /* Grab the next skb from the receive queue. */
636 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
637 if (!skb)
638 return err;
639
640 /* Make sure we don't copy more than what fits in the output buffer. */
641 copy_len = skb->len;
642 if (copy_len > len) {
643 copy_len = len;
644 m->msg_flags |= MSG_TRUNC;
645 }
646
647 /* Make sure we don't overflow the return value type. */
648 if (copy_len > INT_MAX) {
649 copy_len = INT_MAX;
650 m->msg_flags |= MSG_TRUNC;
651 }
652
653 /* Copy skb to output iterator, then free it. */
654 err = skb_copy_datagram_msg(skb, 0, m, copy_len);
655 skb_free_datagram(sk, skb);
656 if (err)
657 return err;
658
659 return copy_len;
660}
661
662/**
663 * This function is called when a Hafnium socket is created. It initialises all
664 * state such that the caller will be able to connect the socket and then send
665 * and receive messages through it.
666 */
667static int hf_sock_create(struct net *net, struct socket *sock, int protocol,
Andrew Scull01778112019-01-14 15:37:53 +0000668 int kern)
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000669{
670 static const struct proto_ops ops = {
671 .family = PF_HF,
672 .owner = THIS_MODULE,
673 .release = hf_sock_release,
674 .bind = sock_no_bind,
675 .connect = hf_sock_connect,
676 .socketpair = sock_no_socketpair,
677 .accept = sock_no_accept,
678 .ioctl = sock_no_ioctl,
679 .listen = sock_no_listen,
680 .shutdown = sock_no_shutdown,
681 .setsockopt = sock_no_setsockopt,
682 .getsockopt = sock_no_getsockopt,
683 .sendmsg = hf_sock_sendmsg,
684 .recvmsg = hf_sock_recvmsg,
685 .mmap = sock_no_mmap,
686 .sendpage = sock_no_sendpage,
687 .poll = datagram_poll,
688 };
689 struct sock *sk;
690
691 if (sock->type != SOCK_DGRAM)
692 return -ESOCKTNOSUPPORT;
693
694 if (protocol != 0)
695 return -EPROTONOSUPPORT;
696
697 /*
698 * For now we only allow callers with sys admin capability to create
699 * Hafnium sockets.
700 */
701 if (!capable(CAP_SYS_ADMIN))
702 return -EPERM;
703
704 /* Allocate and initialise socket. */
705 sk = sk_alloc(net, PF_HF, GFP_KERNEL, &hf_sock_proto, kern);
706 if (!sk)
707 return -ENOMEM;
708
709 sock_init_data(sock, sk);
710
711 sk->sk_destruct = hf_sock_destruct;
712 sock->ops = &ops;
713 sock->state = SS_UNCONNECTED;
714
715 return 0;
716}
717
718/**
Andrew Scullbb7ae412018-09-28 21:07:15 +0100719 * Frees all resources, including threads, associated with the Hafnium driver.
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100720 */
Andrew Scull82257c42018-10-01 10:37:48 +0100721static void hf_free_resources(void)
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100722{
Andrew Scullbb7ae412018-09-28 21:07:15 +0100723 uint32_t i, j;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100724
725 /*
726 * First stop all worker threads. We need to do this before freeing
727 * resources because workers may reference each other, so it is only
728 * safe to free resources after they have all stopped.
729 */
Andrew Scull82257c42018-10-01 10:37:48 +0100730 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100731 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000732
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100733 for (j = 0; j < vm->vcpu_count; j++)
734 kthread_stop(vm->vcpu[j].task);
735 }
736
737 /* Free resources. */
Andrew Scull82257c42018-10-01 10:37:48 +0100738 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100739 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000740
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100741 for (j = 0; j < vm->vcpu_count; j++)
742 put_task_struct(vm->vcpu[j].task);
743 kfree(vm->vcpu);
744 }
745
746 kfree(hf_vms);
747}
748
Andrew Scullbb7ae412018-09-28 21:07:15 +0100749/**
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +0000750 * Handles the hypervisor timer interrupt.
751 */
752static irqreturn_t hf_nop_irq_handler(int irq, void *dev)
753{
754 /*
755 * No need to do anything, the interrupt only exists to return to the
756 * primary vCPU so that the virtual timer will be restored and fire as
757 * normal.
758 */
759 return IRQ_HANDLED;
760}
761
762/**
763 * Enables the hypervisor timer interrupt on a CPU, when it starts or after the
764 * driver is first loaded.
765 */
766static int hf_starting_cpu(unsigned int cpu)
767{
768 if (hf_irq != 0) {
769 /* Enable the interrupt, and set it to be edge-triggered. */
770 enable_percpu_irq(hf_irq, IRQ_TYPE_EDGE_RISING);
771 }
772 return 0;
773}
774
775/**
776 * Disables the hypervisor timer interrupt on a CPU when it is powered down.
777 */
778static int hf_dying_cpu(unsigned int cpu)
779{
780 if (hf_irq != 0) {
781 /* Disable the interrupt while the CPU is asleep. */
782 disable_percpu_irq(hf_irq);
783 }
784
785 return 0;
786}
787
788/**
789 * Registers for the hypervisor timer interrupt.
790 */
791static int hf_int_driver_probe(struct platform_device *pdev)
792{
793 int irq;
794 int ret;
795
796 /*
797 * Register a handler for the hyperviser timer IRQ, as it is needed for
798 * Hafnium to emulate the virtual timer for Linux while a secondary vCPU
799 * is running.
800 */
801 irq = platform_get_irq(pdev, ARCH_TIMER_HYP_PPI);
802 if (irq < 0) {
803 pr_err("Error getting hypervisor timer IRQ: %d\n", irq);
804 return irq;
805 }
806 hf_irq = irq;
807
808 ret = request_percpu_irq(irq, hf_nop_irq_handler, HYPERVISOR_TIMER_NAME,
809 pdev);
810 if (ret != 0) {
811 pr_err("Error registering hypervisor timer IRQ %d: %d\n",
812 irq, ret);
813 return ret;
814 }
815 pr_info("Hafnium registered for IRQ %d\n", irq);
816 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
817 "hafnium/hypervisor_timer:starting",
818 hf_starting_cpu, hf_dying_cpu);
819 if (ret < 0) {
820 pr_err("Error enabling timer on all CPUs: %d\n", ret);
821 return ret;
822 }
823
824 return 0;
825}
826
827/**
828 * Unregisters for the hypervisor timer interrupt.
829 */
830static int hf_int_driver_remove(struct platform_device *pdev)
831{
832 int irq;
833
834 irq = platform_get_irq(pdev, ARCH_TIMER_HYP_PPI);
835 if (irq < 0) {
836 pr_err("Error getting hypervisor timer IRQ: %d\n", irq);
837 return irq;
838 }
839
840 disable_percpu_irq(irq);
841 free_percpu_irq(irq, pdev);
842
843 return 0;
844}
845
846static const struct of_device_id hf_int_driver_id[] = {
847 {.compatible = "arm,armv7-timer"},
848 {.compatible = "arm,armv8-timer"},
849 {}
850};
851
852static struct platform_driver hf_int_driver = {
853 .driver = {
854 .name = HYPERVISOR_TIMER_NAME,
855 .owner = THIS_MODULE,
856 .of_match_table = of_match_ptr(hf_int_driver_id),
857 },
858 .probe = hf_int_driver_probe,
859 .remove = hf_int_driver_remove,
860};
861
862/**
Andrew Scullbb7ae412018-09-28 21:07:15 +0100863 * Initializes the Hafnium driver by creating a thread for each vCPU of each
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100864 * virtual machine.
865 */
866static int __init hf_init(void)
867{
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000868 static const struct net_proto_family proto_family = {
869 .family = PF_HF,
870 .create = hf_sock_create,
871 .owner = THIS_MODULE,
872 };
Andrew Scullbb7ae412018-09-28 21:07:15 +0100873 int64_t ret;
874 uint32_t i, j;
Andrew Scull82257c42018-10-01 10:37:48 +0100875 uint32_t total_vm_count;
876 uint32_t total_vcpu_count;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100877
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100878 /* Allocate a page for send and receive buffers. */
879 hf_send_page = alloc_page(GFP_KERNEL);
880 if (!hf_send_page) {
881 pr_err("Unable to allocate send buffer\n");
882 return -ENOMEM;
883 }
884
885 hf_recv_page = alloc_page(GFP_KERNEL);
886 if (!hf_recv_page) {
887 __free_page(hf_send_page);
888 pr_err("Unable to allocate receive buffer\n");
889 return -ENOMEM;
890 }
891
892 /*
893 * Configure both addresses. Once configured, we cannot free these pages
894 * because the hypervisor will use them, even if the module is
895 * unloaded.
896 */
Andrew Scull55704232018-08-10 17:19:54 +0100897 ret = hf_vm_configure(page_to_phys(hf_send_page),
898 page_to_phys(hf_recv_page));
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100899 if (ret) {
900 __free_page(hf_send_page);
901 __free_page(hf_recv_page);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000902 /*
903 * TODO: We may want to grab this information from hypervisor
904 * and go from there.
905 */
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100906 pr_err("Unable to configure VM\n");
907 return -EIO;
908 }
909
Andrew Scull82257c42018-10-01 10:37:48 +0100910 /* Get the number of VMs. */
Andrew Scull55704232018-08-10 17:19:54 +0100911 ret = hf_vm_get_count();
Andrew Scull82257c42018-10-01 10:37:48 +0100912 if (ret < 0) {
Andrew Scullbb7ae412018-09-28 21:07:15 +0100913 pr_err("Unable to retrieve number of VMs: %lld\n", ret);
Andrew Scull82257c42018-10-01 10:37:48 +0100914 return -EIO;
915 }
916
917 /* Confirm the maximum number of VMs looks sane. */
918 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VMS < 1);
919 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VMS > U16_MAX);
920
921 /* Validate the number of VMs. There must at least be the primary. */
922 if (ret < 1 || ret > CONFIG_HAFNIUM_MAX_VMS) {
923 pr_err("Number of VMs is out of range: %lld\n", ret);
924 return -EDQUOT;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100925 }
926
Andrew Scullb722f952018-09-27 15:39:10 +0100927 /* Only track the secondary VMs. */
Andrew Scull82257c42018-10-01 10:37:48 +0100928 total_vm_count = ret - 1;
Andrew Scull01778112019-01-14 15:37:53 +0000929 hf_vms =
930 kmalloc_array(total_vm_count, sizeof(struct hf_vm), GFP_KERNEL);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100931 if (!hf_vms)
932 return -ENOMEM;
933
934 /* Initialize each VM. */
Andrew Scull82257c42018-10-01 10:37:48 +0100935 total_vcpu_count = 0;
936 for (i = 0; i < total_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100937 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100938
Andrew Scullb722f952018-09-27 15:39:10 +0100939 /* Adjust the ID as only the secondaries are tracked. */
Wedson Almeida Filhoec841932019-01-22 23:07:50 +0000940 vm->id = i + FIRST_SECONDARY_VM_ID;
Andrew Scullb722f952018-09-27 15:39:10 +0100941
942 ret = hf_vcpu_get_count(vm->id);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100943 if (ret < 0) {
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000944 pr_err("HF_VCPU_GET_COUNT failed for vm=%u: %lld",
945 vm->id, ret);
Andrew Scull82257c42018-10-01 10:37:48 +0100946 ret = -EIO;
947 goto fail_with_cleanup;
948 }
949
950 /* Avoid overflowing the vcpu count. */
951 if (ret > (U32_MAX - total_vcpu_count)) {
952 pr_err("Too many vcpus: %u\n", total_vcpu_count);
953 ret = -EDQUOT;
954 goto fail_with_cleanup;
955 }
956
957 /* Confirm the maximum number of VCPUs looks sane. */
958 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VCPUS < 1);
959 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VCPUS > U16_MAX);
960
961 /* Enforce the limit on vcpus. */
962 total_vcpu_count += ret;
963 if (total_vcpu_count > CONFIG_HAFNIUM_MAX_VCPUS) {
964 pr_err("Too many vcpus: %u\n", total_vcpu_count);
965 ret = -EDQUOT;
966 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100967 }
968
969 vm->vcpu_count = ret;
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000970 vm->vcpu = kmalloc_array(vm->vcpu_count, sizeof(struct hf_vcpu),
971 GFP_KERNEL);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100972 if (!vm->vcpu) {
Andrew Scull82257c42018-10-01 10:37:48 +0100973 ret = -ENOMEM;
974 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100975 }
976
Andrew Scull82257c42018-10-01 10:37:48 +0100977 /* Update the number of initialized VMs. */
978 hf_vm_count = i + 1;
979
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100980 /* Create a kernel thread for each vcpu. */
981 for (j = 0; j < vm->vcpu_count; j++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100982 struct hf_vcpu *vcpu = &vm->vcpu[j];
Andrew Scull01778112019-01-14 15:37:53 +0000983
984 vcpu->task =
985 kthread_create(hf_vcpu_thread, vcpu,
986 "vcpu_thread_%u_%u", vm->id, j);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100987 if (IS_ERR(vcpu->task)) {
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +0000988 pr_err("Error creating task (vm=%u,vcpu=%u): %ld\n",
989 vm->id, j, PTR_ERR(vcpu->task));
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100990 vm->vcpu_count = j;
Andrew Scull82257c42018-10-01 10:37:48 +0100991 ret = PTR_ERR(vcpu->task);
992 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100993 }
994
995 get_task_struct(vcpu->task);
Andrew Scullb722f952018-09-27 15:39:10 +0100996 vcpu->vm = vm;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100997 vcpu->vcpu_index = j;
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000998 atomic_set(&vcpu->abort_sleep, 0);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100999 }
1000 }
1001
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001002 /* Register protocol and socket family. */
1003 ret = proto_register(&hf_sock_proto, 0);
1004 if (ret) {
1005 pr_err("Unable to register protocol: %lld\n", ret);
1006 goto fail_with_cleanup;
1007 }
1008
1009 ret = sock_register(&proto_family);
1010 if (ret) {
1011 pr_err("Unable to register Hafnium's socket family: %lld\n",
1012 ret);
1013 goto fail_unregister_proto;
1014 }
1015
1016 /*
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +00001017 * Register as a driver for the timer device, so we can register a
1018 * handler for the hyperviser timer IRQ.
1019 */
1020 ret = platform_driver_register(&hf_int_driver);
1021 if (ret != 0) {
1022 pr_err("Error registering timer driver %lld\n", ret);
1023 goto fail_unregister_socket;
1024 }
1025
1026 /*
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001027 * Start running threads now that all is initialized.
1028 *
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +00001029 * Any failures from this point on must also unregister the driver with
1030 * platform_driver_unregister().
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001031 */
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001032 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +01001033 struct hf_vm *vm = &hf_vms[i];
Andrew Scull01778112019-01-14 15:37:53 +00001034
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001035 for (j = 0; j < vm->vcpu_count; j++)
1036 wake_up_process(vm->vcpu[j].task);
1037 }
1038
1039 /* Dump vm/vcpu count info. */
Andrew Scullbb7ae412018-09-28 21:07:15 +01001040 pr_info("Hafnium successfully loaded with %u VMs:\n", hf_vm_count);
Andrew Scullb722f952018-09-27 15:39:10 +01001041 for (i = 0; i < hf_vm_count; i++) {
1042 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001043
Andrew Scullbb7ae412018-09-28 21:07:15 +01001044 pr_info("\tVM %u: %u vCPUS\n", vm->id, vm->vcpu_count);
Andrew Scullb722f952018-09-27 15:39:10 +01001045 }
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001046
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001047 return 0;
Andrew Scull82257c42018-10-01 10:37:48 +01001048
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +00001049fail_unregister_socket:
1050 sock_unregister(PF_HF);
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001051fail_unregister_proto:
1052 proto_unregister(&hf_sock_proto);
Andrew Scull82257c42018-10-01 10:37:48 +01001053fail_with_cleanup:
1054 hf_free_resources();
1055 return ret;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001056}
1057
1058/**
Andrew Scullbb7ae412018-09-28 21:07:15 +01001059 * Frees up all resources used by the Hafnium driver in preparation for
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001060 * unloading it.
1061 */
1062static void __exit hf_exit(void)
1063{
Andrew Scullbb7ae412018-09-28 21:07:15 +01001064 pr_info("Preparing to unload Hafnium\n");
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001065 sock_unregister(PF_HF);
1066 proto_unregister(&hf_sock_proto);
Andrew Walbranb3ca1dc2019-01-30 17:13:44 +00001067 platform_driver_unregister(&hf_int_driver);
Andrew Scull82257c42018-10-01 10:37:48 +01001068 hf_free_resources();
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001069 pr_info("Hafnium ready to unload\n");
1070}
1071
Wedson Almeida Filho1ee35652018-12-24 01:36:48 +00001072MODULE_LICENSE("GPL v2");
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +01001073
1074module_init(hf_init);
1075module_exit(hf_exit);