blob: a1f1e38e188ef57e8768a810964198884bb8c557 [file] [log] [blame]
Andrew Walbran13c3a0b2018-11-30 11:51:53 +00001/*
2 * Copyright 2018 Google LLC
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
16 */
17
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010018#include <linux/hrtimer.h>
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/kthread.h>
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +010022#include <linux/mm.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010023#include <linux/module.h>
24#include <linux/sched/task.h>
25#include <linux/slab.h>
26
Andrew Scull55704232018-08-10 17:19:54 +010027#include <hf/call.h>
28
Andrew Scull82257c42018-10-01 10:37:48 +010029#define CONFIG_HAFNIUM_MAX_VMS 16
30#define CONFIG_HAFNIUM_MAX_VCPUS 32
31
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010032struct hf_vcpu {
33 spinlock_t lock;
Andrew Scullb722f952018-09-27 15:39:10 +010034 struct hf_vm *vm;
Andrew Scull55704232018-08-10 17:19:54 +010035 uint32_t vcpu_index;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010036 struct task_struct *task;
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +000037 atomic_t abort_sleep;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010038 struct hrtimer timer;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010039};
40
41struct hf_vm {
Andrew Scullb722f952018-09-27 15:39:10 +010042 uint32_t id;
Andrew Scullbb7ae412018-09-28 21:07:15 +010043 uint32_t vcpu_count;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010044 struct hf_vcpu *vcpu;
45};
46
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010047static struct hf_vm *hf_vms;
Andrew Scullbb7ae412018-09-28 21:07:15 +010048static uint32_t hf_vm_count;
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +010049static struct page *hf_send_page = NULL;
50static struct page *hf_recv_page = NULL;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010051
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010052/**
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +000053 * Wakes up the kernel thread responsible for running the given vcpu.
54 *
55 * Returns 0 if the thread was already running, 1 otherwise.
56 */
57static int hf_vcpu_wake_up(struct hf_vcpu *vcpu)
58{
59 /* Set a flag indicating that the thread should not go to sleep. */
60 atomic_set(&vcpu->abort_sleep, 1);
61
62 /* Set the thread to running state. */
63 return wake_up_process(vcpu->task);
64}
65
66/**
67 * Puts the current thread to sleep. The current thread must be responsible for
68 * running the given vcpu.
69 *
70 * Going to sleep will fail if hf_vcpu_wake_up() or kthread_stop() was called on
71 * this vcpu/thread since the last time it [re]started running.
72 */
73static void hf_vcpu_sleep(struct hf_vcpu *vcpu)
74{
75 int abort;
76
77 set_current_state(TASK_INTERRUPTIBLE);
78
79 /* Check the sleep-abort flag after making thread interruptible. */
80 abort = atomic_read(&vcpu->abort_sleep);
81 if (!abort && !kthread_should_stop())
82 schedule();
83
84 /* Set state back to running on the way out. */
85 set_current_state(TASK_RUNNING);
86}
87
88/**
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010089 * Wakes up the thread associated with the vcpu that owns the given timer. This
90 * is called when the timer the thread is waiting on expires.
91 */
92static enum hrtimer_restart hf_vcpu_timer_expired(struct hrtimer *timer)
93{
94 struct hf_vcpu *vcpu = container_of(timer, struct hf_vcpu, timer);
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +000095 /* TODO: Inject interrupt. */
96 hf_vcpu_wake_up(vcpu);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010097 return HRTIMER_NORESTART;
98}
99
100/**
101 * This is the main loop of each vcpu.
102 */
103static int hf_vcpu_thread(void *data)
104{
105 struct hf_vcpu *vcpu = data;
Andrew Sculldc8cab52018-10-10 18:29:39 +0100106 struct hf_vcpu_run_return ret;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100107
108 hrtimer_init(&vcpu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
109 vcpu->timer.function = &hf_vcpu_timer_expired;
110
111 while (!kthread_should_stop()) {
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000112 /*
113 * We're about to run the vcpu, so we can reset the abort-sleep
114 * flag.
115 */
116 atomic_set(&vcpu->abort_sleep, 0);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100117
Andrew Scullbb7ae412018-09-28 21:07:15 +0100118 /* Call into Hafnium to run vcpu. */
Andrew Scullb722f952018-09-27 15:39:10 +0100119 ret = hf_vcpu_run(vcpu->vm->id, vcpu->vcpu_index);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100120
Andrew Sculldc8cab52018-10-10 18:29:39 +0100121 switch (ret.code) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100122 /* Yield (forcibly or voluntarily). */
123 case HF_VCPU_RUN_YIELD:
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100124 break;
125
Andrew Scullb3a61b52018-09-17 14:30:34 +0100126 /* WFI. */
127 case HF_VCPU_RUN_WAIT_FOR_INTERRUPT:
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000128 hf_vcpu_sleep(vcpu);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100129 break;
130
Andrew Scullb3a61b52018-09-17 14:30:34 +0100131 /* Wake up another vcpu. */
132 case HF_VCPU_RUN_WAKE_UP:
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100133 {
Andrew Scull0973a2e2018-10-05 11:11:24 +0100134 struct hf_vm *vm;
Andrew Sculldc8cab52018-10-10 18:29:39 +0100135 if (ret.wake_up.vm_id > hf_vm_count)
Andrew Scull0973a2e2018-10-05 11:11:24 +0100136 break;
Andrew Sculldc8cab52018-10-10 18:29:39 +0100137 vm = &hf_vms[ret.wake_up.vm_id - 1];
138 if (ret.wake_up.vcpu < vm->vcpu_count) {
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000139 hf_vcpu_wake_up(&vm->vcpu[ret.wake_up.vcpu]);
Andrew Sculldc8cab52018-10-10 18:29:39 +0100140 } else if (ret.wake_up.vcpu == HF_INVALID_VCPU) {
Andrew Scull0973a2e2018-10-05 11:11:24 +0100141 /* TODO: pick one to interrupt. */
142 pr_warning("No vcpu to wake.");
143 }
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100144 }
145 break;
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100146
Andrew Scullb3a61b52018-09-17 14:30:34 +0100147 /* Response available. */
Andrew Scull0973a2e2018-10-05 11:11:24 +0100148 case HF_VCPU_RUN_MESSAGE:
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100149 {
Andrew Sculldc8cab52018-10-10 18:29:39 +0100150 uint32_t i;
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100151 const char *buf = page_address(hf_recv_page);
Andrew Scull0973a2e2018-10-05 11:11:24 +0100152 pr_info("Received response from vm %u (%u bytes): ",
Andrew Sculldc8cab52018-10-10 18:29:39 +0100153 vcpu->vm->id, ret.message.size);
154 for (i = 0; i < ret.message.size; i++)
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100155 printk(KERN_CONT "%c", buf[i]);
156 printk(KERN_CONT "\n");
Andrew Scull0973a2e2018-10-05 11:11:24 +0100157 hf_mailbox_clear();
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100158 }
159 break;
Andrew Sculldc8cab52018-10-10 18:29:39 +0100160
161 case HF_VCPU_RUN_SLEEP:
Andrew Sculldc8cab52018-10-10 18:29:39 +0100162 hrtimer_start(&vcpu->timer, ret.sleep.ns, HRTIMER_MODE_REL);
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000163 hf_vcpu_sleep(vcpu);
Andrew Sculldc8cab52018-10-10 18:29:39 +0100164 hrtimer_cancel(&vcpu->timer);
165 break;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100166 }
167 }
168
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100169 return 0;
170}
171
172/**
Andrew Scullbb7ae412018-09-28 21:07:15 +0100173 * Frees all resources, including threads, associated with the Hafnium driver.
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100174 */
Andrew Scull82257c42018-10-01 10:37:48 +0100175static void hf_free_resources(void)
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100176{
Andrew Scullbb7ae412018-09-28 21:07:15 +0100177 uint32_t i, j;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100178
179 /*
180 * First stop all worker threads. We need to do this before freeing
181 * resources because workers may reference each other, so it is only
182 * safe to free resources after they have all stopped.
183 */
Andrew Scull82257c42018-10-01 10:37:48 +0100184 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100185 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100186 for (j = 0; j < vm->vcpu_count; j++)
187 kthread_stop(vm->vcpu[j].task);
188 }
189
190 /* Free resources. */
Andrew Scull82257c42018-10-01 10:37:48 +0100191 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100192 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100193 for (j = 0; j < vm->vcpu_count; j++)
194 put_task_struct(vm->vcpu[j].task);
195 kfree(vm->vcpu);
196 }
197
198 kfree(hf_vms);
199}
200
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100201static ssize_t hf_send_store(struct kobject *kobj, struct kobj_attribute *attr,
202 const char *buf, size_t count)
203{
Andrew Scullbb7ae412018-09-28 21:07:15 +0100204 int64_t ret;
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100205 struct hf_vm *vm;
206
Andrew Scull0973a2e2018-10-05 11:11:24 +0100207 count = min_t(size_t, count, HF_MAILBOX_SIZE);
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100208
209 /* Copy data to send buffer. */
210 memcpy(page_address(hf_send_page), buf, count);
Andrew Scullb722f952018-09-27 15:39:10 +0100211
212 vm = &hf_vms[0];
Andrew Scull0973a2e2018-10-05 11:11:24 +0100213 ret = hf_mailbox_send(vm->id, count);
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100214 if (ret < 0)
215 return -EAGAIN;
216
Andrew Scull0973a2e2018-10-05 11:11:24 +0100217 if (ret == HF_INVALID_VCPU) {
218 /*
219 * TODO: We need to interrupt some vcpu because none are waiting
220 * for data.
221 */
222 pr_warning("No vcpu to receive message.");
223 return -ENOSYS;
224 }
225
226 if (ret >= vm->vcpu_count)
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100227 return -EINVAL;
228
Andrew Scull0973a2e2018-10-05 11:11:24 +0100229 /* Wake up the vcpu that is going to process the data. */
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000230 hf_vcpu_wake_up(&vm->vcpu[ret]);
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100231
232 return count;
233}
234
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100235static struct kobject *hf_sysfs_obj = NULL;
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100236static struct kobj_attribute send_attr =
237 __ATTR(send, 0200, NULL, hf_send_store);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100238
239/**
Andrew Scullbb7ae412018-09-28 21:07:15 +0100240 * Initializes the Hafnium driver's sysfs interface.
241 */
242static void __init hf_init_sysfs(void)
243{
244 int ret;
245
246 /* Create the sysfs interface to interrupt vcpus. */
247 hf_sysfs_obj = kobject_create_and_add("hafnium", kernel_kobj);
248 if (!hf_sysfs_obj) {
249 pr_err("Unable to create sysfs object");
250 } else {
Andrew Scullbb7ae412018-09-28 21:07:15 +0100251 ret = sysfs_create_file(hf_sysfs_obj, &send_attr.attr);
252 if (ret)
253 pr_err("Unable to create 'send' sysfs file");
254 }
255}
256
257/**
258 * Initializes the Hafnium driver by creating a thread for each vCPU of each
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100259 * virtual machine.
260 */
261static int __init hf_init(void)
262{
Andrew Scullbb7ae412018-09-28 21:07:15 +0100263 int64_t ret;
264 uint32_t i, j;
Andrew Scull82257c42018-10-01 10:37:48 +0100265 uint32_t total_vm_count;
266 uint32_t total_vcpu_count;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100267
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100268 /* Allocate a page for send and receive buffers. */
269 hf_send_page = alloc_page(GFP_KERNEL);
270 if (!hf_send_page) {
271 pr_err("Unable to allocate send buffer\n");
272 return -ENOMEM;
273 }
274
275 hf_recv_page = alloc_page(GFP_KERNEL);
276 if (!hf_recv_page) {
277 __free_page(hf_send_page);
278 pr_err("Unable to allocate receive buffer\n");
279 return -ENOMEM;
280 }
281
282 /*
283 * Configure both addresses. Once configured, we cannot free these pages
284 * because the hypervisor will use them, even if the module is
285 * unloaded.
286 */
Andrew Scull55704232018-08-10 17:19:54 +0100287 ret = hf_vm_configure(page_to_phys(hf_send_page),
288 page_to_phys(hf_recv_page));
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100289 if (ret) {
290 __free_page(hf_send_page);
291 __free_page(hf_recv_page);
292 /* TODO: We may want to grab this information from hypervisor
293 * and go from there. */
294 pr_err("Unable to configure VM\n");
295 return -EIO;
296 }
297
Andrew Scull82257c42018-10-01 10:37:48 +0100298 /* Get the number of VMs. */
Andrew Scull55704232018-08-10 17:19:54 +0100299 ret = hf_vm_get_count();
Andrew Scull82257c42018-10-01 10:37:48 +0100300 if (ret < 0) {
Andrew Scullbb7ae412018-09-28 21:07:15 +0100301 pr_err("Unable to retrieve number of VMs: %lld\n", ret);
Andrew Scull82257c42018-10-01 10:37:48 +0100302 return -EIO;
303 }
304
305 /* Confirm the maximum number of VMs looks sane. */
306 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VMS < 1);
307 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VMS > U16_MAX);
308
309 /* Validate the number of VMs. There must at least be the primary. */
310 if (ret < 1 || ret > CONFIG_HAFNIUM_MAX_VMS) {
311 pr_err("Number of VMs is out of range: %lld\n", ret);
312 return -EDQUOT;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100313 }
314
Andrew Scullb722f952018-09-27 15:39:10 +0100315 /* Only track the secondary VMs. */
Andrew Scull82257c42018-10-01 10:37:48 +0100316 total_vm_count = ret - 1;
317 hf_vms = kmalloc(sizeof(struct hf_vm) * total_vm_count, GFP_KERNEL);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100318 if (!hf_vms)
319 return -ENOMEM;
320
321 /* Initialize each VM. */
Andrew Scull82257c42018-10-01 10:37:48 +0100322 total_vcpu_count = 0;
323 for (i = 0; i < total_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100324 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100325
Andrew Scullb722f952018-09-27 15:39:10 +0100326 /* Adjust the ID as only the secondaries are tracked. */
327 vm->id = i + 1;
328
329 ret = hf_vcpu_get_count(vm->id);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100330 if (ret < 0) {
Andrew Scull82257c42018-10-01 10:37:48 +0100331 pr_err("HF_VCPU_GET_COUNT failed for vm=%u: %lld", vm->id,
332 ret);
333 ret = -EIO;
334 goto fail_with_cleanup;
335 }
336
337 /* Avoid overflowing the vcpu count. */
338 if (ret > (U32_MAX - total_vcpu_count)) {
339 pr_err("Too many vcpus: %u\n", total_vcpu_count);
340 ret = -EDQUOT;
341 goto fail_with_cleanup;
342 }
343
344 /* Confirm the maximum number of VCPUs looks sane. */
345 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VCPUS < 1);
346 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VCPUS > U16_MAX);
347
348 /* Enforce the limit on vcpus. */
349 total_vcpu_count += ret;
350 if (total_vcpu_count > CONFIG_HAFNIUM_MAX_VCPUS) {
351 pr_err("Too many vcpus: %u\n", total_vcpu_count);
352 ret = -EDQUOT;
353 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100354 }
355
356 vm->vcpu_count = ret;
357 vm->vcpu = kmalloc(sizeof(struct hf_vcpu) * vm->vcpu_count,
358 GFP_KERNEL);
359 if (!vm->vcpu) {
Andrew Scullbb7ae412018-09-28 21:07:15 +0100360 pr_err("No memory for %u vcpus for vm %u",
Andrew Scullb722f952018-09-27 15:39:10 +0100361 vm->vcpu_count, vm->id);
Andrew Scull82257c42018-10-01 10:37:48 +0100362 ret = -ENOMEM;
363 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100364 }
365
Andrew Scull82257c42018-10-01 10:37:48 +0100366 /* Update the number of initialized VMs. */
367 hf_vm_count = i + 1;
368
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100369 /* Create a kernel thread for each vcpu. */
370 for (j = 0; j < vm->vcpu_count; j++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100371 struct hf_vcpu *vcpu = &vm->vcpu[j];
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100372 vcpu->task = kthread_create(hf_vcpu_thread, vcpu,
Andrew Scullbb7ae412018-09-28 21:07:15 +0100373 "vcpu_thread_%u_%u",
Andrew Scullb722f952018-09-27 15:39:10 +0100374 vm->id, j);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100375 if (IS_ERR(vcpu->task)) {
Andrew Scullbb7ae412018-09-28 21:07:15 +0100376 pr_err("Error creating task (vm=%u,vcpu=%u)"
Andrew Scullb722f952018-09-27 15:39:10 +0100377 ": %ld\n", vm->id, j, PTR_ERR(vcpu->task));
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100378 vm->vcpu_count = j;
Andrew Scull82257c42018-10-01 10:37:48 +0100379 ret = PTR_ERR(vcpu->task);
380 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100381 }
382
383 get_task_struct(vcpu->task);
384 spin_lock_init(&vcpu->lock);
Andrew Scullb722f952018-09-27 15:39:10 +0100385 vcpu->vm = vm;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100386 vcpu->vcpu_index = j;
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000387 atomic_set(&vcpu->abort_sleep, 0);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100388 }
389 }
390
391 /* Start running threads now that all is initialized. */
392 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100393 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100394 for (j = 0; j < vm->vcpu_count; j++)
395 wake_up_process(vm->vcpu[j].task);
396 }
397
398 /* Dump vm/vcpu count info. */
Andrew Scullbb7ae412018-09-28 21:07:15 +0100399 pr_info("Hafnium successfully loaded with %u VMs:\n", hf_vm_count);
Andrew Scullb722f952018-09-27 15:39:10 +0100400 for (i = 0; i < hf_vm_count; i++) {
401 struct hf_vm *vm = &hf_vms[i];
Andrew Scullbb7ae412018-09-28 21:07:15 +0100402 pr_info("\tVM %u: %u vCPUS\n", vm->id, vm->vcpu_count);
Andrew Scullb722f952018-09-27 15:39:10 +0100403 }
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100404
Andrew Scullbb7ae412018-09-28 21:07:15 +0100405 hf_init_sysfs();
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100406
407 return 0;
Andrew Scull82257c42018-10-01 10:37:48 +0100408
409fail_with_cleanup:
410 hf_free_resources();
411 return ret;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100412}
413
414/**
Andrew Scullbb7ae412018-09-28 21:07:15 +0100415 * Frees up all resources used by the Hafnium driver in preparation for
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100416 * unloading it.
417 */
418static void __exit hf_exit(void)
419{
420 if (hf_sysfs_obj)
421 kobject_put(hf_sysfs_obj);
422
Andrew Scullbb7ae412018-09-28 21:07:15 +0100423 pr_info("Preparing to unload Hafnium\n");
Andrew Scull82257c42018-10-01 10:37:48 +0100424 hf_free_resources();
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100425 pr_info("Hafnium ready to unload\n");
426}
427
428MODULE_LICENSE("GPL");
429
430module_init(hf_init);
431module_exit(hf_exit);