xref: /OK3568_Linux_fs/kernel/arch/powerpc/lib/vmx-helper.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (C) IBM Corporation, 2011
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Authors: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com>
7*4882a593Smuzhiyun  *          Anton Blanchard <anton@au.ibm.com>
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun #include <linux/uaccess.h>
10*4882a593Smuzhiyun #include <linux/hardirq.h>
11*4882a593Smuzhiyun #include <asm/switch_to.h>
12*4882a593Smuzhiyun #include <asm/asm-prototypes.h>
13*4882a593Smuzhiyun 
enter_vmx_usercopy(void)14*4882a593Smuzhiyun int enter_vmx_usercopy(void)
15*4882a593Smuzhiyun {
16*4882a593Smuzhiyun 	if (in_interrupt())
17*4882a593Smuzhiyun 		return 0;
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun 	preempt_disable();
20*4882a593Smuzhiyun 	/*
21*4882a593Smuzhiyun 	 * We need to disable page faults as they can call schedule and
22*4882a593Smuzhiyun 	 * thus make us lose the VMX context. So on page faults, we just
23*4882a593Smuzhiyun 	 * fail which will cause a fallback to the normal non-vmx copy.
24*4882a593Smuzhiyun 	 */
25*4882a593Smuzhiyun 	pagefault_disable();
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun 	enable_kernel_altivec();
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	return 1;
30*4882a593Smuzhiyun }
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun  * This function must return 0 because we tail call optimise when calling
34*4882a593Smuzhiyun  * from __copy_tofrom_user_power7 which returns 0 on success.
35*4882a593Smuzhiyun  */
exit_vmx_usercopy(void)36*4882a593Smuzhiyun int exit_vmx_usercopy(void)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun 	disable_kernel_altivec();
39*4882a593Smuzhiyun 	pagefault_enable();
40*4882a593Smuzhiyun 	preempt_enable();
41*4882a593Smuzhiyun 	return 0;
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun 
enter_vmx_ops(void)44*4882a593Smuzhiyun int enter_vmx_ops(void)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun 	if (in_interrupt())
47*4882a593Smuzhiyun 		return 0;
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	preempt_disable();
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	enable_kernel_altivec();
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	return 1;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun /*
57*4882a593Smuzhiyun  * All calls to this function will be optimised into tail calls. We are
58*4882a593Smuzhiyun  * passed a pointer to the destination which we return as required by a
59*4882a593Smuzhiyun  * memcpy implementation.
60*4882a593Smuzhiyun  */
exit_vmx_ops(void * dest)61*4882a593Smuzhiyun void *exit_vmx_ops(void *dest)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun 	disable_kernel_altivec();
64*4882a593Smuzhiyun 	preempt_enable();
65*4882a593Smuzhiyun 	return dest;
66*4882a593Smuzhiyun }
67