blob: e4fe01fed00dd3dbda408e3e27edb77ec15cd0a4 [file] [log] [blame]
Andrew Walbran13c3a0b2018-11-30 11:51:53 +00001/*
2 * Copyright 2018 Google LLC
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
16 */
17
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010018#include <linux/hrtimer.h>
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/kthread.h>
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +010022#include <linux/mm.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010023#include <linux/module.h>
24#include <linux/sched/task.h>
25#include <linux/slab.h>
26
Andrew Scull55704232018-08-10 17:19:54 +010027#include <hf/call.h>
28
Andrew Scull82257c42018-10-01 10:37:48 +010029#define CONFIG_HAFNIUM_MAX_VMS 16
30#define CONFIG_HAFNIUM_MAX_VCPUS 32
31
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010032struct hf_vcpu {
33 spinlock_t lock;
Andrew Scullb722f952018-09-27 15:39:10 +010034 struct hf_vm *vm;
Andrew Scull55704232018-08-10 17:19:54 +010035 uint32_t vcpu_index;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010036 struct task_struct *task;
37 struct hrtimer timer;
38 bool pending_irq;
39};
40
41struct hf_vm {
Andrew Scullb722f952018-09-27 15:39:10 +010042 uint32_t id;
Andrew Scullbb7ae412018-09-28 21:07:15 +010043 uint32_t vcpu_count;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010044 struct hf_vcpu *vcpu;
45};
46
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010047static struct hf_vm *hf_vms;
Andrew Scullbb7ae412018-09-28 21:07:15 +010048static uint32_t hf_vm_count;
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +010049static struct page *hf_send_page = NULL;
50static struct page *hf_recv_page = NULL;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010051
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010052/**
53 * Wakes up the thread associated with the vcpu that owns the given timer. This
54 * is called when the timer the thread is waiting on expires.
55 */
56static enum hrtimer_restart hf_vcpu_timer_expired(struct hrtimer *timer)
57{
58 struct hf_vcpu *vcpu = container_of(timer, struct hf_vcpu, timer);
59 wake_up_process(vcpu->task);
60 return HRTIMER_NORESTART;
61}
62
63/**
64 * This is the main loop of each vcpu.
65 */
66static int hf_vcpu_thread(void *data)
67{
68 struct hf_vcpu *vcpu = data;
Andrew Sculldc8cab52018-10-10 18:29:39 +010069 struct hf_vcpu_run_return ret;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010070
71 hrtimer_init(&vcpu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
72 vcpu->timer.function = &hf_vcpu_timer_expired;
73
74 while (!kthread_should_stop()) {
75 unsigned long flags;
76 size_t irqs;
77
78 set_current_state(TASK_RUNNING);
79
80 /* Determine if we must interrupt the vcpu. */
81 spin_lock_irqsave(&vcpu->lock, flags);
82 irqs = vcpu->pending_irq ? 1 : 0;
83 vcpu->pending_irq = false;
84 spin_unlock_irqrestore(&vcpu->lock, flags);
85
Andrew Scullbb7ae412018-09-28 21:07:15 +010086 /* Call into Hafnium to run vcpu. */
Andrew Scullb722f952018-09-27 15:39:10 +010087 ret = hf_vcpu_run(vcpu->vm->id, vcpu->vcpu_index);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010088
Andrew Sculldc8cab52018-10-10 18:29:39 +010089 switch (ret.code) {
Andrew Scullb3a61b52018-09-17 14:30:34 +010090 /* Yield (forcibly or voluntarily). */
91 case HF_VCPU_RUN_YIELD:
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010092 break;
93
Andrew Scullb3a61b52018-09-17 14:30:34 +010094 /* WFI. */
95 case HF_VCPU_RUN_WAIT_FOR_INTERRUPT:
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010096 set_current_state(TASK_INTERRUPTIBLE);
97 if (kthread_should_stop())
98 break;
99 schedule();
100 break;
101
Andrew Scullb3a61b52018-09-17 14:30:34 +0100102 /* Wake up another vcpu. */
103 case HF_VCPU_RUN_WAKE_UP:
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100104 {
Andrew Scull0973a2e2018-10-05 11:11:24 +0100105 struct hf_vm *vm;
Andrew Sculldc8cab52018-10-10 18:29:39 +0100106 if (ret.wake_up.vm_id > hf_vm_count)
Andrew Scull0973a2e2018-10-05 11:11:24 +0100107 break;
Andrew Sculldc8cab52018-10-10 18:29:39 +0100108 vm = &hf_vms[ret.wake_up.vm_id - 1];
109 if (ret.wake_up.vcpu < vm->vcpu_count) {
110 wake_up_process(vm->vcpu[ret.wake_up.vcpu].task);
111 } else if (ret.wake_up.vcpu == HF_INVALID_VCPU) {
Andrew Scull0973a2e2018-10-05 11:11:24 +0100112 /* TODO: pick one to interrupt. */
113 pr_warning("No vcpu to wake.");
114 }
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100115 }
116 break;
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100117
Andrew Scullb3a61b52018-09-17 14:30:34 +0100118 /* Response available. */
Andrew Scull0973a2e2018-10-05 11:11:24 +0100119 case HF_VCPU_RUN_MESSAGE:
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100120 {
Andrew Sculldc8cab52018-10-10 18:29:39 +0100121 uint32_t i;
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100122 const char *buf = page_address(hf_recv_page);
Andrew Scull0973a2e2018-10-05 11:11:24 +0100123 pr_info("Received response from vm %u (%u bytes): ",
Andrew Sculldc8cab52018-10-10 18:29:39 +0100124 vcpu->vm->id, ret.message.size);
125 for (i = 0; i < ret.message.size; i++)
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100126 printk(KERN_CONT "%c", buf[i]);
127 printk(KERN_CONT "\n");
Andrew Scull0973a2e2018-10-05 11:11:24 +0100128 hf_mailbox_clear();
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100129 }
130 break;
Andrew Sculldc8cab52018-10-10 18:29:39 +0100131
132 case HF_VCPU_RUN_SLEEP:
133 set_current_state(TASK_INTERRUPTIBLE);
134 if (kthread_should_stop())
135 break;
136 hrtimer_start(&vcpu->timer, ret.sleep.ns, HRTIMER_MODE_REL);
137 schedule();
138 hrtimer_cancel(&vcpu->timer);
139 break;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100140 }
141 }
142
143 set_current_state(TASK_RUNNING);
144
145 return 0;
146}
147
148/**
Andrew Scullbb7ae412018-09-28 21:07:15 +0100149 * Frees all resources, including threads, associated with the Hafnium driver.
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100150 */
Andrew Scull82257c42018-10-01 10:37:48 +0100151static void hf_free_resources(void)
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100152{
Andrew Scullbb7ae412018-09-28 21:07:15 +0100153 uint32_t i, j;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100154
155 /*
156 * First stop all worker threads. We need to do this before freeing
157 * resources because workers may reference each other, so it is only
158 * safe to free resources after they have all stopped.
159 */
Andrew Scull82257c42018-10-01 10:37:48 +0100160 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100161 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100162 for (j = 0; j < vm->vcpu_count; j++)
163 kthread_stop(vm->vcpu[j].task);
164 }
165
166 /* Free resources. */
Andrew Scull82257c42018-10-01 10:37:48 +0100167 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100168 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100169 for (j = 0; j < vm->vcpu_count; j++)
170 put_task_struct(vm->vcpu[j].task);
171 kfree(vm->vcpu);
172 }
173
174 kfree(hf_vms);
175}
176
177static ssize_t hf_interrupt_store(struct kobject *kobj,
178 struct kobj_attribute *attr, const char *buf,
179 size_t count)
180{
181 struct hf_vcpu *vcpu;
182 unsigned long flags;
183 struct task_struct *task;
184
185 /* TODO: Parse input to determine which vcpu to interrupt. */
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100186 /* TODO: Check bounds. */
187
Andrew Scullb3a61b52018-09-17 14:30:34 +0100188 vcpu = &hf_vms[0].vcpu[0];
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100189
190 spin_lock_irqsave(&vcpu->lock, flags);
191 vcpu->pending_irq = true;
192 /* TODO: Do we need to increment the task's ref count here? */
193 task = vcpu->task;
194 spin_unlock_irqrestore(&vcpu->lock, flags);
195
196 /* Wake up the task. If it's already running, kick it out. */
197 /* TODO: There's a race here: the kick may happen right before we go
198 * to the hypervisor. */
199 if (wake_up_process(task) == 0)
200 kick_process(task);
201
202 return count;
203}
204
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100205static ssize_t hf_send_store(struct kobject *kobj, struct kobj_attribute *attr,
206 const char *buf, size_t count)
207{
Andrew Scullbb7ae412018-09-28 21:07:15 +0100208 int64_t ret;
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100209 struct hf_vm *vm;
210
Andrew Scull0973a2e2018-10-05 11:11:24 +0100211 count = min_t(size_t, count, HF_MAILBOX_SIZE);
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100212
213 /* Copy data to send buffer. */
214 memcpy(page_address(hf_send_page), buf, count);
Andrew Scullb722f952018-09-27 15:39:10 +0100215
216 vm = &hf_vms[0];
Andrew Scull0973a2e2018-10-05 11:11:24 +0100217 ret = hf_mailbox_send(vm->id, count);
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100218 if (ret < 0)
219 return -EAGAIN;
220
Andrew Scull0973a2e2018-10-05 11:11:24 +0100221 if (ret == HF_INVALID_VCPU) {
222 /*
223 * TODO: We need to interrupt some vcpu because none are waiting
224 * for data.
225 */
226 pr_warning("No vcpu to receive message.");
227 return -ENOSYS;
228 }
229
230 if (ret >= vm->vcpu_count)
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100231 return -EINVAL;
232
Andrew Scull0973a2e2018-10-05 11:11:24 +0100233 /* Wake up the vcpu that is going to process the data. */
234 /* TODO: There's a race where thread may get wake up before it
235 * goes to sleep. Fix this. */
236 wake_up_process(vm->vcpu[ret].task);
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100237
238 return count;
239}
240
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100241static struct kobject *hf_sysfs_obj = NULL;
242static struct kobj_attribute interrupt_attr =
243 __ATTR(interrupt, 0200, NULL, hf_interrupt_store);
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100244static struct kobj_attribute send_attr =
245 __ATTR(send, 0200, NULL, hf_send_store);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100246
247/**
Andrew Scullbb7ae412018-09-28 21:07:15 +0100248 * Initializes the Hafnium driver's sysfs interface.
249 */
250static void __init hf_init_sysfs(void)
251{
252 int ret;
253
254 /* Create the sysfs interface to interrupt vcpus. */
255 hf_sysfs_obj = kobject_create_and_add("hafnium", kernel_kobj);
256 if (!hf_sysfs_obj) {
257 pr_err("Unable to create sysfs object");
258 } else {
259 ret = sysfs_create_file(hf_sysfs_obj, &interrupt_attr.attr);
260 if (ret)
261 pr_err("Unable to create 'interrupt' sysfs file");
262
263 ret = sysfs_create_file(hf_sysfs_obj, &send_attr.attr);
264 if (ret)
265 pr_err("Unable to create 'send' sysfs file");
266 }
267}
268
269/**
270 * Initializes the Hafnium driver by creating a thread for each vCPU of each
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100271 * virtual machine.
272 */
273static int __init hf_init(void)
274{
Andrew Scullbb7ae412018-09-28 21:07:15 +0100275 int64_t ret;
276 uint32_t i, j;
Andrew Scull82257c42018-10-01 10:37:48 +0100277 uint32_t total_vm_count;
278 uint32_t total_vcpu_count;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100279
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100280 /* Allocate a page for send and receive buffers. */
281 hf_send_page = alloc_page(GFP_KERNEL);
282 if (!hf_send_page) {
283 pr_err("Unable to allocate send buffer\n");
284 return -ENOMEM;
285 }
286
287 hf_recv_page = alloc_page(GFP_KERNEL);
288 if (!hf_recv_page) {
289 __free_page(hf_send_page);
290 pr_err("Unable to allocate receive buffer\n");
291 return -ENOMEM;
292 }
293
294 /*
295 * Configure both addresses. Once configured, we cannot free these pages
296 * because the hypervisor will use them, even if the module is
297 * unloaded.
298 */
Andrew Scull55704232018-08-10 17:19:54 +0100299 ret = hf_vm_configure(page_to_phys(hf_send_page),
300 page_to_phys(hf_recv_page));
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100301 if (ret) {
302 __free_page(hf_send_page);
303 __free_page(hf_recv_page);
304 /* TODO: We may want to grab this information from hypervisor
305 * and go from there. */
306 pr_err("Unable to configure VM\n");
307 return -EIO;
308 }
309
Andrew Scull82257c42018-10-01 10:37:48 +0100310 /* Get the number of VMs. */
Andrew Scull55704232018-08-10 17:19:54 +0100311 ret = hf_vm_get_count();
Andrew Scull82257c42018-10-01 10:37:48 +0100312 if (ret < 0) {
Andrew Scullbb7ae412018-09-28 21:07:15 +0100313 pr_err("Unable to retrieve number of VMs: %lld\n", ret);
Andrew Scull82257c42018-10-01 10:37:48 +0100314 return -EIO;
315 }
316
317 /* Confirm the maximum number of VMs looks sane. */
318 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VMS < 1);
319 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VMS > U16_MAX);
320
321 /* Validate the number of VMs. There must at least be the primary. */
322 if (ret < 1 || ret > CONFIG_HAFNIUM_MAX_VMS) {
323 pr_err("Number of VMs is out of range: %lld\n", ret);
324 return -EDQUOT;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100325 }
326
Andrew Scullb722f952018-09-27 15:39:10 +0100327 /* Only track the secondary VMs. */
Andrew Scull82257c42018-10-01 10:37:48 +0100328 total_vm_count = ret - 1;
329 hf_vms = kmalloc(sizeof(struct hf_vm) * total_vm_count, GFP_KERNEL);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100330 if (!hf_vms)
331 return -ENOMEM;
332
333 /* Initialize each VM. */
Andrew Scull82257c42018-10-01 10:37:48 +0100334 total_vcpu_count = 0;
335 for (i = 0; i < total_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100336 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100337
Andrew Scullb722f952018-09-27 15:39:10 +0100338 /* Adjust the ID as only the secondaries are tracked. */
339 vm->id = i + 1;
340
341 ret = hf_vcpu_get_count(vm->id);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100342 if (ret < 0) {
Andrew Scull82257c42018-10-01 10:37:48 +0100343 pr_err("HF_VCPU_GET_COUNT failed for vm=%u: %lld", vm->id,
344 ret);
345 ret = -EIO;
346 goto fail_with_cleanup;
347 }
348
349 /* Avoid overflowing the vcpu count. */
350 if (ret > (U32_MAX - total_vcpu_count)) {
351 pr_err("Too many vcpus: %u\n", total_vcpu_count);
352 ret = -EDQUOT;
353 goto fail_with_cleanup;
354 }
355
356 /* Confirm the maximum number of VCPUs looks sane. */
357 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VCPUS < 1);
358 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VCPUS > U16_MAX);
359
360 /* Enforce the limit on vcpus. */
361 total_vcpu_count += ret;
362 if (total_vcpu_count > CONFIG_HAFNIUM_MAX_VCPUS) {
363 pr_err("Too many vcpus: %u\n", total_vcpu_count);
364 ret = -EDQUOT;
365 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100366 }
367
368 vm->vcpu_count = ret;
369 vm->vcpu = kmalloc(sizeof(struct hf_vcpu) * vm->vcpu_count,
370 GFP_KERNEL);
371 if (!vm->vcpu) {
Andrew Scullbb7ae412018-09-28 21:07:15 +0100372 pr_err("No memory for %u vcpus for vm %u",
Andrew Scullb722f952018-09-27 15:39:10 +0100373 vm->vcpu_count, vm->id);
Andrew Scull82257c42018-10-01 10:37:48 +0100374 ret = -ENOMEM;
375 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100376 }
377
Andrew Scull82257c42018-10-01 10:37:48 +0100378 /* Update the number of initialized VMs. */
379 hf_vm_count = i + 1;
380
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100381 /* Create a kernel thread for each vcpu. */
382 for (j = 0; j < vm->vcpu_count; j++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100383 struct hf_vcpu *vcpu = &vm->vcpu[j];
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100384 vcpu->task = kthread_create(hf_vcpu_thread, vcpu,
Andrew Scullbb7ae412018-09-28 21:07:15 +0100385 "vcpu_thread_%u_%u",
Andrew Scullb722f952018-09-27 15:39:10 +0100386 vm->id, j);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100387 if (IS_ERR(vcpu->task)) {
Andrew Scullbb7ae412018-09-28 21:07:15 +0100388 pr_err("Error creating task (vm=%u,vcpu=%u)"
Andrew Scullb722f952018-09-27 15:39:10 +0100389 ": %ld\n", vm->id, j, PTR_ERR(vcpu->task));
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100390 vm->vcpu_count = j;
Andrew Scull82257c42018-10-01 10:37:48 +0100391 ret = PTR_ERR(vcpu->task);
392 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100393 }
394
395 get_task_struct(vcpu->task);
396 spin_lock_init(&vcpu->lock);
Andrew Scullb722f952018-09-27 15:39:10 +0100397 vcpu->vm = vm;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100398 vcpu->vcpu_index = j;
399 vcpu->pending_irq = false;
400 }
401 }
402
403 /* Start running threads now that all is initialized. */
404 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100405 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100406 for (j = 0; j < vm->vcpu_count; j++)
407 wake_up_process(vm->vcpu[j].task);
408 }
409
410 /* Dump vm/vcpu count info. */
Andrew Scullbb7ae412018-09-28 21:07:15 +0100411 pr_info("Hafnium successfully loaded with %u VMs:\n", hf_vm_count);
Andrew Scullb722f952018-09-27 15:39:10 +0100412 for (i = 0; i < hf_vm_count; i++) {
413 struct hf_vm *vm = &hf_vms[i];
Andrew Scullbb7ae412018-09-28 21:07:15 +0100414 pr_info("\tVM %u: %u vCPUS\n", vm->id, vm->vcpu_count);
Andrew Scullb722f952018-09-27 15:39:10 +0100415 }
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100416
Andrew Scullbb7ae412018-09-28 21:07:15 +0100417 hf_init_sysfs();
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100418
419 return 0;
Andrew Scull82257c42018-10-01 10:37:48 +0100420
421fail_with_cleanup:
422 hf_free_resources();
423 return ret;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100424}
425
426/**
Andrew Scullbb7ae412018-09-28 21:07:15 +0100427 * Frees up all resources used by the Hafnium driver in preparation for
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100428 * unloading it.
429 */
430static void __exit hf_exit(void)
431{
432 if (hf_sysfs_obj)
433 kobject_put(hf_sysfs_obj);
434
Andrew Scullbb7ae412018-09-28 21:07:15 +0100435 pr_info("Preparing to unload Hafnium\n");
Andrew Scull82257c42018-10-01 10:37:48 +0100436 hf_free_resources();
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100437 pr_info("Hafnium ready to unload\n");
438}
439
440MODULE_LICENSE("GPL");
441
442module_init(hf_init);
443module_exit(hf_exit);