1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * linux/arch/arm/lib/uaccess_with_memcpy.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Written by: Lennert Buytenhek and Nicolas Pitre
6*4882a593Smuzhiyun * Copyright (C) 2009 Marvell Semiconductor
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/kernel.h>
10*4882a593Smuzhiyun #include <linux/ctype.h>
11*4882a593Smuzhiyun #include <linux/uaccess.h>
12*4882a593Smuzhiyun #include <linux/rwsem.h>
13*4882a593Smuzhiyun #include <linux/mm.h>
14*4882a593Smuzhiyun #include <linux/sched.h>
15*4882a593Smuzhiyun #include <linux/hardirq.h> /* for in_atomic() */
16*4882a593Smuzhiyun #include <linux/gfp.h>
17*4882a593Smuzhiyun #include <linux/highmem.h>
18*4882a593Smuzhiyun #include <linux/hugetlb.h>
19*4882a593Smuzhiyun #include <asm/current.h>
20*4882a593Smuzhiyun #include <asm/page.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun static int
pin_page_for_write(const void __user * _addr,pte_t ** ptep,spinlock_t ** ptlp)23*4882a593Smuzhiyun pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun unsigned long addr = (unsigned long)_addr;
26*4882a593Smuzhiyun pgd_t *pgd;
27*4882a593Smuzhiyun p4d_t *p4d;
28*4882a593Smuzhiyun pmd_t *pmd;
29*4882a593Smuzhiyun pte_t *pte;
30*4882a593Smuzhiyun pud_t *pud;
31*4882a593Smuzhiyun spinlock_t *ptl;
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun pgd = pgd_offset(current->mm, addr);
34*4882a593Smuzhiyun if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd)))
35*4882a593Smuzhiyun return 0;
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun p4d = p4d_offset(pgd, addr);
38*4882a593Smuzhiyun if (unlikely(p4d_none(*p4d) || p4d_bad(*p4d)))
39*4882a593Smuzhiyun return 0;
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun pud = pud_offset(p4d, addr);
42*4882a593Smuzhiyun if (unlikely(pud_none(*pud) || pud_bad(*pud)))
43*4882a593Smuzhiyun return 0;
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun pmd = pmd_offset(pud, addr);
46*4882a593Smuzhiyun if (unlikely(pmd_none(*pmd)))
47*4882a593Smuzhiyun return 0;
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /*
50*4882a593Smuzhiyun * A pmd can be bad if it refers to a HugeTLB or THP page.
51*4882a593Smuzhiyun *
52*4882a593Smuzhiyun * Both THP and HugeTLB pages have the same pmd layout
53*4882a593Smuzhiyun * and should not be manipulated by the pte functions.
54*4882a593Smuzhiyun *
55*4882a593Smuzhiyun * Lock the page table for the destination and check
56*4882a593Smuzhiyun * to see that it's still huge and whether or not we will
57*4882a593Smuzhiyun * need to fault on write.
58*4882a593Smuzhiyun */
59*4882a593Smuzhiyun if (unlikely(pmd_thp_or_huge(*pmd))) {
60*4882a593Smuzhiyun ptl = ¤t->mm->page_table_lock;
61*4882a593Smuzhiyun spin_lock(ptl);
62*4882a593Smuzhiyun if (unlikely(!pmd_thp_or_huge(*pmd)
63*4882a593Smuzhiyun || pmd_hugewillfault(*pmd))) {
64*4882a593Smuzhiyun spin_unlock(ptl);
65*4882a593Smuzhiyun return 0;
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun *ptep = NULL;
69*4882a593Smuzhiyun *ptlp = ptl;
70*4882a593Smuzhiyun return 1;
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun if (unlikely(pmd_bad(*pmd)))
74*4882a593Smuzhiyun return 0;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
77*4882a593Smuzhiyun if (unlikely(!pte_present(*pte) || !pte_young(*pte) ||
78*4882a593Smuzhiyun !pte_write(*pte) || !pte_dirty(*pte))) {
79*4882a593Smuzhiyun pte_unmap_unlock(pte, ptl);
80*4882a593Smuzhiyun return 0;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun *ptep = pte;
84*4882a593Smuzhiyun *ptlp = ptl;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun return 1;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun static unsigned long noinline
__copy_to_user_memcpy(void __user * to,const void * from,unsigned long n)90*4882a593Smuzhiyun __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun unsigned long ua_flags;
93*4882a593Smuzhiyun int atomic;
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun if (uaccess_kernel()) {
96*4882a593Smuzhiyun memcpy((void *)to, from, n);
97*4882a593Smuzhiyun return 0;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /* the mmap semaphore is taken only if not in an atomic context */
101*4882a593Smuzhiyun atomic = faulthandler_disabled();
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun if (!atomic)
104*4882a593Smuzhiyun mmap_read_lock(current->mm);
105*4882a593Smuzhiyun while (n) {
106*4882a593Smuzhiyun pte_t *pte;
107*4882a593Smuzhiyun spinlock_t *ptl;
108*4882a593Smuzhiyun int tocopy;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun while (!pin_page_for_write(to, &pte, &ptl)) {
111*4882a593Smuzhiyun if (!atomic)
112*4882a593Smuzhiyun mmap_read_unlock(current->mm);
113*4882a593Smuzhiyun if (__put_user(0, (char __user *)to))
114*4882a593Smuzhiyun goto out;
115*4882a593Smuzhiyun if (!atomic)
116*4882a593Smuzhiyun mmap_read_lock(current->mm);
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1;
120*4882a593Smuzhiyun if (tocopy > n)
121*4882a593Smuzhiyun tocopy = n;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun ua_flags = uaccess_save_and_enable();
124*4882a593Smuzhiyun memcpy((void *)to, from, tocopy);
125*4882a593Smuzhiyun uaccess_restore(ua_flags);
126*4882a593Smuzhiyun to += tocopy;
127*4882a593Smuzhiyun from += tocopy;
128*4882a593Smuzhiyun n -= tocopy;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun if (pte)
131*4882a593Smuzhiyun pte_unmap_unlock(pte, ptl);
132*4882a593Smuzhiyun else
133*4882a593Smuzhiyun spin_unlock(ptl);
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun if (!atomic)
136*4882a593Smuzhiyun mmap_read_unlock(current->mm);
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun out:
139*4882a593Smuzhiyun return n;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun unsigned long
arm_copy_to_user(void __user * to,const void * from,unsigned long n)143*4882a593Smuzhiyun arm_copy_to_user(void __user *to, const void *from, unsigned long n)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun /*
146*4882a593Smuzhiyun * This test is stubbed out of the main function above to keep
147*4882a593Smuzhiyun * the overhead for small copies low by avoiding a large
148*4882a593Smuzhiyun * register dump on the stack just to reload them right away.
149*4882a593Smuzhiyun * With frame pointer disabled, tail call optimization kicks in
150*4882a593Smuzhiyun * as well making this test almost invisible.
151*4882a593Smuzhiyun */
152*4882a593Smuzhiyun if (n < 64) {
153*4882a593Smuzhiyun unsigned long ua_flags = uaccess_save_and_enable();
154*4882a593Smuzhiyun n = __copy_to_user_std(to, from, n);
155*4882a593Smuzhiyun uaccess_restore(ua_flags);
156*4882a593Smuzhiyun } else {
157*4882a593Smuzhiyun n = __copy_to_user_memcpy(uaccess_mask_range_ptr(to, n),
158*4882a593Smuzhiyun from, n);
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun return n;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun static unsigned long noinline
__clear_user_memset(void __user * addr,unsigned long n)164*4882a593Smuzhiyun __clear_user_memset(void __user *addr, unsigned long n)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun unsigned long ua_flags;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun if (uaccess_kernel()) {
169*4882a593Smuzhiyun memset((void *)addr, 0, n);
170*4882a593Smuzhiyun return 0;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun mmap_read_lock(current->mm);
174*4882a593Smuzhiyun while (n) {
175*4882a593Smuzhiyun pte_t *pte;
176*4882a593Smuzhiyun spinlock_t *ptl;
177*4882a593Smuzhiyun int tocopy;
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun while (!pin_page_for_write(addr, &pte, &ptl)) {
180*4882a593Smuzhiyun mmap_read_unlock(current->mm);
181*4882a593Smuzhiyun if (__put_user(0, (char __user *)addr))
182*4882a593Smuzhiyun goto out;
183*4882a593Smuzhiyun mmap_read_lock(current->mm);
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1;
187*4882a593Smuzhiyun if (tocopy > n)
188*4882a593Smuzhiyun tocopy = n;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun ua_flags = uaccess_save_and_enable();
191*4882a593Smuzhiyun memset((void *)addr, 0, tocopy);
192*4882a593Smuzhiyun uaccess_restore(ua_flags);
193*4882a593Smuzhiyun addr += tocopy;
194*4882a593Smuzhiyun n -= tocopy;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun if (pte)
197*4882a593Smuzhiyun pte_unmap_unlock(pte, ptl);
198*4882a593Smuzhiyun else
199*4882a593Smuzhiyun spin_unlock(ptl);
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun mmap_read_unlock(current->mm);
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun out:
204*4882a593Smuzhiyun return n;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
arm_clear_user(void __user * addr,unsigned long n)207*4882a593Smuzhiyun unsigned long arm_clear_user(void __user *addr, unsigned long n)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun /* See rational for this in __copy_to_user() above. */
210*4882a593Smuzhiyun if (n < 64) {
211*4882a593Smuzhiyun unsigned long ua_flags = uaccess_save_and_enable();
212*4882a593Smuzhiyun n = __clear_user_std(addr, n);
213*4882a593Smuzhiyun uaccess_restore(ua_flags);
214*4882a593Smuzhiyun } else {
215*4882a593Smuzhiyun n = __clear_user_memset(addr, n);
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun return n;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun #if 0
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun /*
223*4882a593Smuzhiyun * This code is disabled by default, but kept around in case the chosen
224*4882a593Smuzhiyun * thresholds need to be revalidated. Some overhead (small but still)
225*4882a593Smuzhiyun * would be implied by a runtime determined variable threshold, and
226*4882a593Smuzhiyun * so far the measurement on concerned targets didn't show a worthwhile
227*4882a593Smuzhiyun * variation.
228*4882a593Smuzhiyun *
229*4882a593Smuzhiyun * Note that a fairly precise sched_clock() implementation is needed
230*4882a593Smuzhiyun * for results to make some sense.
231*4882a593Smuzhiyun */
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun #include <linux/vmalloc.h>
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun static int __init test_size_treshold(void)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun struct page *src_page, *dst_page;
238*4882a593Smuzhiyun void *user_ptr, *kernel_ptr;
239*4882a593Smuzhiyun unsigned long long t0, t1, t2;
240*4882a593Smuzhiyun int size, ret;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun ret = -ENOMEM;
243*4882a593Smuzhiyun src_page = alloc_page(GFP_KERNEL);
244*4882a593Smuzhiyun if (!src_page)
245*4882a593Smuzhiyun goto no_src;
246*4882a593Smuzhiyun dst_page = alloc_page(GFP_KERNEL);
247*4882a593Smuzhiyun if (!dst_page)
248*4882a593Smuzhiyun goto no_dst;
249*4882a593Smuzhiyun kernel_ptr = page_address(src_page);
250*4882a593Smuzhiyun user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__P010));
251*4882a593Smuzhiyun if (!user_ptr)
252*4882a593Smuzhiyun goto no_vmap;
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun /* warm up the src page dcache */
255*4882a593Smuzhiyun ret = __copy_to_user_memcpy(user_ptr, kernel_ptr, PAGE_SIZE);
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun for (size = PAGE_SIZE; size >= 4; size /= 2) {
258*4882a593Smuzhiyun t0 = sched_clock();
259*4882a593Smuzhiyun ret |= __copy_to_user_memcpy(user_ptr, kernel_ptr, size);
260*4882a593Smuzhiyun t1 = sched_clock();
261*4882a593Smuzhiyun ret |= __copy_to_user_std(user_ptr, kernel_ptr, size);
262*4882a593Smuzhiyun t2 = sched_clock();
263*4882a593Smuzhiyun printk("copy_to_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun for (size = PAGE_SIZE; size >= 4; size /= 2) {
267*4882a593Smuzhiyun t0 = sched_clock();
268*4882a593Smuzhiyun ret |= __clear_user_memset(user_ptr, size);
269*4882a593Smuzhiyun t1 = sched_clock();
270*4882a593Smuzhiyun ret |= __clear_user_std(user_ptr, size);
271*4882a593Smuzhiyun t2 = sched_clock();
272*4882a593Smuzhiyun printk("clear_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun if (ret)
276*4882a593Smuzhiyun ret = -EFAULT;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun vunmap(user_ptr);
279*4882a593Smuzhiyun no_vmap:
280*4882a593Smuzhiyun put_page(dst_page);
281*4882a593Smuzhiyun no_dst:
282*4882a593Smuzhiyun put_page(src_page);
283*4882a593Smuzhiyun no_src:
284*4882a593Smuzhiyun return ret;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun subsys_initcall(test_size_treshold);
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun #endif
290