xref: /OK3568_Linux_fs/kernel/arch/x86/lib/usercopy.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * User address space access functions.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  *  For licencing details see kernel-base/COPYING
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/uaccess.h>
8*4882a593Smuzhiyun #include <linux/export.h>
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <asm/tlbflush.h>
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun /*
13*4882a593Smuzhiyun  * We rely on the nested NMI work to allow atomic faults from the NMI path; the
14*4882a593Smuzhiyun  * nested NMI paths are careful to preserve CR2.
15*4882a593Smuzhiyun  */
16*4882a593Smuzhiyun unsigned long
copy_from_user_nmi(void * to,const void __user * from,unsigned long n)17*4882a593Smuzhiyun copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
18*4882a593Smuzhiyun {
19*4882a593Smuzhiyun 	unsigned long ret;
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun 	if (__range_not_ok(from, n, TASK_SIZE))
22*4882a593Smuzhiyun 		return n;
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun 	if (!nmi_uaccess_okay())
25*4882a593Smuzhiyun 		return n;
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun 	/*
28*4882a593Smuzhiyun 	 * Even though this function is typically called from NMI/IRQ context
29*4882a593Smuzhiyun 	 * disable pagefaults so that its behaviour is consistent even when
30*4882a593Smuzhiyun 	 * called form other contexts.
31*4882a593Smuzhiyun 	 */
32*4882a593Smuzhiyun 	pagefault_disable();
33*4882a593Smuzhiyun 	ret = __copy_from_user_inatomic(to, from, n);
34*4882a593Smuzhiyun 	pagefault_enable();
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	return ret;
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(copy_from_user_nmi);
39