blob: 62e6c3045252df1c9fbc2b1a0a377cf7d779f207 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-or-later
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003 *
4 * Copyright (C) IBM Corporation, 2011
5 *
6 * Authors: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com>
7 * Anton Blanchard <anton@au.ibm.com>
8 */
9#include <linux/uaccess.h>
10#include <linux/hardirq.h>
11#include <asm/switch_to.h>
12#include <asm/asm-prototypes.h>
13
14int enter_vmx_usercopy(void)
15{
16 if (in_interrupt())
17 return 0;
18
19 preempt_disable();
20 /*
21 * We need to disable page faults as they can call schedule and
22 * thus make us lose the VMX context. So on page faults, we just
23 * fail which will cause a fallback to the normal non-vmx copy.
24 */
25 pagefault_disable();
26
27 enable_kernel_altivec();
28
29 return 1;
30}
31
32/*
33 * This function must return 0 because we tail call optimise when calling
34 * from __copy_tofrom_user_power7 which returns 0 on success.
35 */
36int exit_vmx_usercopy(void)
37{
38 disable_kernel_altivec();
39 pagefault_enable();
40 preempt_enable();
41 return 0;
42}
43
44int enter_vmx_ops(void)
45{
46 if (in_interrupt())
47 return 0;
48
49 preempt_disable();
50
51 enable_kernel_altivec();
52
53 return 1;
54}
55
56/*
57 * All calls to this function will be optimised into tail calls. We are
58 * passed a pointer to the destination which we return as required by a
59 * memcpy implementation.
60 */
61void *exit_vmx_ops(void *dest)
62{
63 disable_kernel_altivec();
64 preempt_enable();
65 return dest;
66}