blob: 26c5cabf3ec4a651b80b53fde30208ce0bf77351 [file] [log] [blame]
Andrew Walbran13c3a0b2018-11-30 11:51:53 +00001/*
2 * Copyright 2018 Google LLC
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
16 */
17
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010018#include <linux/hrtimer.h>
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/kthread.h>
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +010022#include <linux/mm.h>
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010023#include <linux/module.h>
24#include <linux/sched/task.h>
25#include <linux/slab.h>
26
Andrew Scull55704232018-08-10 17:19:54 +010027#include <hf/call.h>
28
Andrew Scull82257c42018-10-01 10:37:48 +010029#define CONFIG_HAFNIUM_MAX_VMS 16
30#define CONFIG_HAFNIUM_MAX_VCPUS 32
31
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010032struct hf_vcpu {
33 spinlock_t lock;
Andrew Scullb722f952018-09-27 15:39:10 +010034 struct hf_vm *vm;
Andrew Scull55704232018-08-10 17:19:54 +010035 uint32_t vcpu_index;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010036 struct task_struct *task;
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +000037 atomic_t abort_sleep;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010038 struct hrtimer timer;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010039};
40
41struct hf_vm {
Andrew Scullb722f952018-09-27 15:39:10 +010042 uint32_t id;
Andrew Scullbb7ae412018-09-28 21:07:15 +010043 uint32_t vcpu_count;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010044 struct hf_vcpu *vcpu;
45};
46
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010047static struct hf_vm *hf_vms;
Andrew Scullbb7ae412018-09-28 21:07:15 +010048static uint32_t hf_vm_count;
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +010049static struct page *hf_send_page = NULL;
50static struct page *hf_recv_page = NULL;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010051
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010052/**
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +000053 * Wakes up the kernel thread responsible for running the given vcpu.
54 *
55 * Returns 0 if the thread was already running, 1 otherwise.
56 */
57static int hf_vcpu_wake_up(struct hf_vcpu *vcpu)
58{
59 /* Set a flag indicating that the thread should not go to sleep. */
60 atomic_set(&vcpu->abort_sleep, 1);
61
62 /* Set the thread to running state. */
63 return wake_up_process(vcpu->task);
64}
65
66/**
67 * Puts the current thread to sleep. The current thread must be responsible for
68 * running the given vcpu.
69 *
70 * Going to sleep will fail if hf_vcpu_wake_up() or kthread_stop() was called on
71 * this vcpu/thread since the last time it [re]started running.
72 */
73static void hf_vcpu_sleep(struct hf_vcpu *vcpu)
74{
75 int abort;
76
77 set_current_state(TASK_INTERRUPTIBLE);
78
79 /* Check the sleep-abort flag after making thread interruptible. */
80 abort = atomic_read(&vcpu->abort_sleep);
81 if (!abort && !kthread_should_stop())
82 schedule();
83
84 /* Set state back to running on the way out. */
85 set_current_state(TASK_RUNNING);
86}
87
88/**
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010089 * Wakes up the thread associated with the vcpu that owns the given timer. This
90 * is called when the timer the thread is waiting on expires.
91 */
92static enum hrtimer_restart hf_vcpu_timer_expired(struct hrtimer *timer)
93{
94 struct hf_vcpu *vcpu = container_of(timer, struct hf_vcpu, timer);
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +000095 /* TODO: Inject interrupt. */
96 hf_vcpu_wake_up(vcpu);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +010097 return HRTIMER_NORESTART;
98}
99
100/**
101 * This is the main loop of each vcpu.
102 */
103static int hf_vcpu_thread(void *data)
104{
105 struct hf_vcpu *vcpu = data;
Andrew Sculldc8cab52018-10-10 18:29:39 +0100106 struct hf_vcpu_run_return ret;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100107
108 hrtimer_init(&vcpu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
109 vcpu->timer.function = &hf_vcpu_timer_expired;
110
111 while (!kthread_should_stop()) {
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000112 /*
113 * We're about to run the vcpu, so we can reset the abort-sleep
114 * flag.
115 */
116 atomic_set(&vcpu->abort_sleep, 0);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100117
Andrew Scullbb7ae412018-09-28 21:07:15 +0100118 /* Call into Hafnium to run vcpu. */
Andrew Scullb722f952018-09-27 15:39:10 +0100119 ret = hf_vcpu_run(vcpu->vm->id, vcpu->vcpu_index);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100120
Andrew Sculldc8cab52018-10-10 18:29:39 +0100121 switch (ret.code) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100122 /* Yield (forcibly or voluntarily). */
123 case HF_VCPU_RUN_YIELD:
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100124 break;
125
Andrew Scullb3a61b52018-09-17 14:30:34 +0100126 /* WFI. */
127 case HF_VCPU_RUN_WAIT_FOR_INTERRUPT:
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000128 hf_vcpu_sleep(vcpu);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100129 break;
130
Andrew Scullb3a61b52018-09-17 14:30:34 +0100131 /* Wake up another vcpu. */
132 case HF_VCPU_RUN_WAKE_UP:
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100133 {
Andrew Scull0973a2e2018-10-05 11:11:24 +0100134 struct hf_vm *vm;
Andrew Sculldc8cab52018-10-10 18:29:39 +0100135 if (ret.wake_up.vm_id > hf_vm_count)
Andrew Scull0973a2e2018-10-05 11:11:24 +0100136 break;
Andrew Sculldc8cab52018-10-10 18:29:39 +0100137 vm = &hf_vms[ret.wake_up.vm_id - 1];
138 if (ret.wake_up.vcpu < vm->vcpu_count) {
Andrew Walbrancda1cb22018-12-14 13:35:42 +0000139 if (hf_vcpu_wake_up(&vm->vcpu[ret.wake_up.vcpu]) == 0) {
140 /*
141 * The task was already running (presumably on a
142 * different physical CPU); interrupt it. This gives
143 * Hafnium a chance to inject any new interrupts.
144 */
145 kick_process(vm->vcpu[ret.wake_up.vcpu].task);
146 }
Andrew Sculldc8cab52018-10-10 18:29:39 +0100147 } else if (ret.wake_up.vcpu == HF_INVALID_VCPU) {
Andrew Scull0973a2e2018-10-05 11:11:24 +0100148 /* TODO: pick one to interrupt. */
149 pr_warning("No vcpu to wake.");
150 }
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100151 }
152 break;
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100153
Andrew Scullb3a61b52018-09-17 14:30:34 +0100154 /* Response available. */
Andrew Scull0973a2e2018-10-05 11:11:24 +0100155 case HF_VCPU_RUN_MESSAGE:
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100156 {
Andrew Sculldc8cab52018-10-10 18:29:39 +0100157 uint32_t i;
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100158 const char *buf = page_address(hf_recv_page);
Andrew Scull0973a2e2018-10-05 11:11:24 +0100159 pr_info("Received response from vm %u (%u bytes): ",
Andrew Sculldc8cab52018-10-10 18:29:39 +0100160 vcpu->vm->id, ret.message.size);
161 for (i = 0; i < ret.message.size; i++)
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100162 printk(KERN_CONT "%c", buf[i]);
163 printk(KERN_CONT "\n");
Andrew Scull0973a2e2018-10-05 11:11:24 +0100164 hf_mailbox_clear();
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100165 }
166 break;
Andrew Sculldc8cab52018-10-10 18:29:39 +0100167
168 case HF_VCPU_RUN_SLEEP:
Andrew Sculldc8cab52018-10-10 18:29:39 +0100169 hrtimer_start(&vcpu->timer, ret.sleep.ns, HRTIMER_MODE_REL);
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000170 hf_vcpu_sleep(vcpu);
Andrew Sculldc8cab52018-10-10 18:29:39 +0100171 hrtimer_cancel(&vcpu->timer);
172 break;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100173 }
174 }
175
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100176 return 0;
177}
178
179/**
Andrew Scullbb7ae412018-09-28 21:07:15 +0100180 * Frees all resources, including threads, associated with the Hafnium driver.
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100181 */
Andrew Scull82257c42018-10-01 10:37:48 +0100182static void hf_free_resources(void)
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100183{
Andrew Scullbb7ae412018-09-28 21:07:15 +0100184 uint32_t i, j;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100185
186 /*
187 * First stop all worker threads. We need to do this before freeing
188 * resources because workers may reference each other, so it is only
189 * safe to free resources after they have all stopped.
190 */
Andrew Scull82257c42018-10-01 10:37:48 +0100191 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100192 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100193 for (j = 0; j < vm->vcpu_count; j++)
194 kthread_stop(vm->vcpu[j].task);
195 }
196
197 /* Free resources. */
Andrew Scull82257c42018-10-01 10:37:48 +0100198 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100199 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100200 for (j = 0; j < vm->vcpu_count; j++)
201 put_task_struct(vm->vcpu[j].task);
202 kfree(vm->vcpu);
203 }
204
205 kfree(hf_vms);
206}
207
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100208static ssize_t hf_send_store(struct kobject *kobj, struct kobj_attribute *attr,
209 const char *buf, size_t count)
210{
Andrew Scullbb7ae412018-09-28 21:07:15 +0100211 int64_t ret;
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100212 struct hf_vm *vm;
213
Andrew Scull0973a2e2018-10-05 11:11:24 +0100214 count = min_t(size_t, count, HF_MAILBOX_SIZE);
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100215
216 /* Copy data to send buffer. */
217 memcpy(page_address(hf_send_page), buf, count);
Andrew Scullb722f952018-09-27 15:39:10 +0100218
219 vm = &hf_vms[0];
Andrew Scull0973a2e2018-10-05 11:11:24 +0100220 ret = hf_mailbox_send(vm->id, count);
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100221 if (ret < 0)
222 return -EAGAIN;
223
Andrew Scull0973a2e2018-10-05 11:11:24 +0100224 if (ret == HF_INVALID_VCPU) {
225 /*
226 * TODO: We need to interrupt some vcpu because none are waiting
227 * for data.
228 */
229 pr_warning("No vcpu to receive message.");
230 return -ENOSYS;
231 }
232
233 if (ret >= vm->vcpu_count)
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100234 return -EINVAL;
235
Andrew Scull0973a2e2018-10-05 11:11:24 +0100236 /* Wake up the vcpu that is going to process the data. */
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000237 hf_vcpu_wake_up(&vm->vcpu[ret]);
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100238
239 return count;
240}
241
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100242static struct kobject *hf_sysfs_obj = NULL;
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100243static struct kobj_attribute send_attr =
244 __ATTR(send, 0200, NULL, hf_send_store);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100245
246/**
Andrew Scullbb7ae412018-09-28 21:07:15 +0100247 * Initializes the Hafnium driver's sysfs interface.
248 */
249static void __init hf_init_sysfs(void)
250{
251 int ret;
252
253 /* Create the sysfs interface to interrupt vcpus. */
254 hf_sysfs_obj = kobject_create_and_add("hafnium", kernel_kobj);
255 if (!hf_sysfs_obj) {
256 pr_err("Unable to create sysfs object");
257 } else {
Andrew Scullbb7ae412018-09-28 21:07:15 +0100258 ret = sysfs_create_file(hf_sysfs_obj, &send_attr.attr);
259 if (ret)
260 pr_err("Unable to create 'send' sysfs file");
261 }
262}
263
264/**
265 * Initializes the Hafnium driver by creating a thread for each vCPU of each
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100266 * virtual machine.
267 */
268static int __init hf_init(void)
269{
Andrew Scullbb7ae412018-09-28 21:07:15 +0100270 int64_t ret;
271 uint32_t i, j;
Andrew Scull82257c42018-10-01 10:37:48 +0100272 uint32_t total_vm_count;
273 uint32_t total_vcpu_count;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100274
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100275 /* Allocate a page for send and receive buffers. */
276 hf_send_page = alloc_page(GFP_KERNEL);
277 if (!hf_send_page) {
278 pr_err("Unable to allocate send buffer\n");
279 return -ENOMEM;
280 }
281
282 hf_recv_page = alloc_page(GFP_KERNEL);
283 if (!hf_recv_page) {
284 __free_page(hf_send_page);
285 pr_err("Unable to allocate receive buffer\n");
286 return -ENOMEM;
287 }
288
289 /*
290 * Configure both addresses. Once configured, we cannot free these pages
291 * because the hypervisor will use them, even if the module is
292 * unloaded.
293 */
Andrew Scull55704232018-08-10 17:19:54 +0100294 ret = hf_vm_configure(page_to_phys(hf_send_page),
295 page_to_phys(hf_recv_page));
Wedson Almeida Filhof9e11922018-08-12 15:54:31 +0100296 if (ret) {
297 __free_page(hf_send_page);
298 __free_page(hf_recv_page);
299 /* TODO: We may want to grab this information from hypervisor
300 * and go from there. */
301 pr_err("Unable to configure VM\n");
302 return -EIO;
303 }
304
Andrew Scull82257c42018-10-01 10:37:48 +0100305 /* Get the number of VMs. */
Andrew Scull55704232018-08-10 17:19:54 +0100306 ret = hf_vm_get_count();
Andrew Scull82257c42018-10-01 10:37:48 +0100307 if (ret < 0) {
Andrew Scullbb7ae412018-09-28 21:07:15 +0100308 pr_err("Unable to retrieve number of VMs: %lld\n", ret);
Andrew Scull82257c42018-10-01 10:37:48 +0100309 return -EIO;
310 }
311
312 /* Confirm the maximum number of VMs looks sane. */
313 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VMS < 1);
314 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VMS > U16_MAX);
315
316 /* Validate the number of VMs. There must at least be the primary. */
317 if (ret < 1 || ret > CONFIG_HAFNIUM_MAX_VMS) {
318 pr_err("Number of VMs is out of range: %lld\n", ret);
319 return -EDQUOT;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100320 }
321
Andrew Scullb722f952018-09-27 15:39:10 +0100322 /* Only track the secondary VMs. */
Andrew Scull82257c42018-10-01 10:37:48 +0100323 total_vm_count = ret - 1;
324 hf_vms = kmalloc(sizeof(struct hf_vm) * total_vm_count, GFP_KERNEL);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100325 if (!hf_vms)
326 return -ENOMEM;
327
328 /* Initialize each VM. */
Andrew Scull82257c42018-10-01 10:37:48 +0100329 total_vcpu_count = 0;
330 for (i = 0; i < total_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100331 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100332
Andrew Scullb722f952018-09-27 15:39:10 +0100333 /* Adjust the ID as only the secondaries are tracked. */
334 vm->id = i + 1;
335
336 ret = hf_vcpu_get_count(vm->id);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100337 if (ret < 0) {
Andrew Scull82257c42018-10-01 10:37:48 +0100338 pr_err("HF_VCPU_GET_COUNT failed for vm=%u: %lld", vm->id,
339 ret);
340 ret = -EIO;
341 goto fail_with_cleanup;
342 }
343
344 /* Avoid overflowing the vcpu count. */
345 if (ret > (U32_MAX - total_vcpu_count)) {
346 pr_err("Too many vcpus: %u\n", total_vcpu_count);
347 ret = -EDQUOT;
348 goto fail_with_cleanup;
349 }
350
351 /* Confirm the maximum number of VCPUs looks sane. */
352 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VCPUS < 1);
353 BUILD_BUG_ON(CONFIG_HAFNIUM_MAX_VCPUS > U16_MAX);
354
355 /* Enforce the limit on vcpus. */
356 total_vcpu_count += ret;
357 if (total_vcpu_count > CONFIG_HAFNIUM_MAX_VCPUS) {
358 pr_err("Too many vcpus: %u\n", total_vcpu_count);
359 ret = -EDQUOT;
360 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100361 }
362
363 vm->vcpu_count = ret;
364 vm->vcpu = kmalloc(sizeof(struct hf_vcpu) * vm->vcpu_count,
365 GFP_KERNEL);
366 if (!vm->vcpu) {
Andrew Scullbb7ae412018-09-28 21:07:15 +0100367 pr_err("No memory for %u vcpus for vm %u",
Andrew Scullb722f952018-09-27 15:39:10 +0100368 vm->vcpu_count, vm->id);
Andrew Scull82257c42018-10-01 10:37:48 +0100369 ret = -ENOMEM;
370 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100371 }
372
Andrew Scull82257c42018-10-01 10:37:48 +0100373 /* Update the number of initialized VMs. */
374 hf_vm_count = i + 1;
375
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100376 /* Create a kernel thread for each vcpu. */
377 for (j = 0; j < vm->vcpu_count; j++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100378 struct hf_vcpu *vcpu = &vm->vcpu[j];
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100379 vcpu->task = kthread_create(hf_vcpu_thread, vcpu,
Andrew Scullbb7ae412018-09-28 21:07:15 +0100380 "vcpu_thread_%u_%u",
Andrew Scullb722f952018-09-27 15:39:10 +0100381 vm->id, j);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100382 if (IS_ERR(vcpu->task)) {
Andrew Scullbb7ae412018-09-28 21:07:15 +0100383 pr_err("Error creating task (vm=%u,vcpu=%u)"
Andrew Scullb722f952018-09-27 15:39:10 +0100384 ": %ld\n", vm->id, j, PTR_ERR(vcpu->task));
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100385 vm->vcpu_count = j;
Andrew Scull82257c42018-10-01 10:37:48 +0100386 ret = PTR_ERR(vcpu->task);
387 goto fail_with_cleanup;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100388 }
389
390 get_task_struct(vcpu->task);
391 spin_lock_init(&vcpu->lock);
Andrew Scullb722f952018-09-27 15:39:10 +0100392 vcpu->vm = vm;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100393 vcpu->vcpu_index = j;
Wedson Almeida Filho7fe62332018-12-15 03:09:57 +0000394 atomic_set(&vcpu->abort_sleep, 0);
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100395 }
396 }
397
398 /* Start running threads now that all is initialized. */
399 for (i = 0; i < hf_vm_count; i++) {
Andrew Scullb3a61b52018-09-17 14:30:34 +0100400 struct hf_vm *vm = &hf_vms[i];
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100401 for (j = 0; j < vm->vcpu_count; j++)
402 wake_up_process(vm->vcpu[j].task);
403 }
404
405 /* Dump vm/vcpu count info. */
Andrew Scullbb7ae412018-09-28 21:07:15 +0100406 pr_info("Hafnium successfully loaded with %u VMs:\n", hf_vm_count);
Andrew Scullb722f952018-09-27 15:39:10 +0100407 for (i = 0; i < hf_vm_count; i++) {
408 struct hf_vm *vm = &hf_vms[i];
Andrew Scullbb7ae412018-09-28 21:07:15 +0100409 pr_info("\tVM %u: %u vCPUS\n", vm->id, vm->vcpu_count);
Andrew Scullb722f952018-09-27 15:39:10 +0100410 }
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100411
Andrew Scullbb7ae412018-09-28 21:07:15 +0100412 hf_init_sysfs();
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100413
414 return 0;
Andrew Scull82257c42018-10-01 10:37:48 +0100415
416fail_with_cleanup:
417 hf_free_resources();
418 return ret;
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100419}
420
421/**
Andrew Scullbb7ae412018-09-28 21:07:15 +0100422 * Frees up all resources used by the Hafnium driver in preparation for
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100423 * unloading it.
424 */
425static void __exit hf_exit(void)
426{
427 if (hf_sysfs_obj)
428 kobject_put(hf_sysfs_obj);
429
Andrew Scullbb7ae412018-09-28 21:07:15 +0100430 pr_info("Preparing to unload Hafnium\n");
Andrew Scull82257c42018-10-01 10:37:48 +0100431 hf_free_resources();
Wedson Almeida Filho2f62b422018-06-19 06:44:32 +0100432 pr_info("Hafnium ready to unload\n");
433}
434
435MODULE_LICENSE("GPL");
436
437module_init(hf_init);
438module_exit(hf_exit);