xref: /OK3568_Linux_fs/kernel/arch/nds32/mm/fault.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun // Copyright (C) 2005-2017 Andes Technology Corporation
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun #include <linux/extable.h>
5*4882a593Smuzhiyun #include <linux/module.h>
6*4882a593Smuzhiyun #include <linux/signal.h>
7*4882a593Smuzhiyun #include <linux/ptrace.h>
8*4882a593Smuzhiyun #include <linux/mm.h>
9*4882a593Smuzhiyun #include <linux/init.h>
10*4882a593Smuzhiyun #include <linux/hardirq.h>
11*4882a593Smuzhiyun #include <linux/uaccess.h>
12*4882a593Smuzhiyun #include <linux/perf_event.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include <asm/tlbflush.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun extern void die(const char *str, struct pt_regs *regs, long err);
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun /*
19*4882a593Smuzhiyun  * This is useful to dump out the page tables associated with
20*4882a593Smuzhiyun  * 'addr' in mm 'mm'.
21*4882a593Smuzhiyun  */
show_pte(struct mm_struct * mm,unsigned long addr)22*4882a593Smuzhiyun void show_pte(struct mm_struct *mm, unsigned long addr)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun 	pgd_t *pgd;
25*4882a593Smuzhiyun 	if (!mm)
26*4882a593Smuzhiyun 		mm = &init_mm;
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun 	pr_alert("pgd = %p\n", mm->pgd);
29*4882a593Smuzhiyun 	pgd = pgd_offset(mm, addr);
30*4882a593Smuzhiyun 	pr_alert("[%08lx] *pgd=%08lx", addr, pgd_val(*pgd));
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun 	do {
33*4882a593Smuzhiyun 		p4d_t *p4d;
34*4882a593Smuzhiyun 		pud_t *pud;
35*4882a593Smuzhiyun 		pmd_t *pmd;
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 		if (pgd_none(*pgd))
38*4882a593Smuzhiyun 			break;
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 		if (pgd_bad(*pgd)) {
41*4882a593Smuzhiyun 			pr_alert("(bad)");
42*4882a593Smuzhiyun 			break;
43*4882a593Smuzhiyun 		}
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 		p4d = p4d_offset(pgd, addr);
46*4882a593Smuzhiyun 		pud = pud_offset(p4d, addr);
47*4882a593Smuzhiyun 		pmd = pmd_offset(pud, addr);
48*4882a593Smuzhiyun #if PTRS_PER_PMD != 1
49*4882a593Smuzhiyun 		pr_alert(", *pmd=%08lx", pmd_val(*pmd));
50*4882a593Smuzhiyun #endif
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 		if (pmd_none(*pmd))
53*4882a593Smuzhiyun 			break;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 		if (pmd_bad(*pmd)) {
56*4882a593Smuzhiyun 			pr_alert("(bad)");
57*4882a593Smuzhiyun 			break;
58*4882a593Smuzhiyun 		}
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 		if (IS_ENABLED(CONFIG_HIGHMEM))
61*4882a593Smuzhiyun 		{
62*4882a593Smuzhiyun 			pte_t *pte;
63*4882a593Smuzhiyun 			/* We must not map this if we have highmem enabled */
64*4882a593Smuzhiyun 			pte = pte_offset_map(pmd, addr);
65*4882a593Smuzhiyun 			pr_alert(", *pte=%08lx", pte_val(*pte));
66*4882a593Smuzhiyun 			pte_unmap(pte);
67*4882a593Smuzhiyun 		}
68*4882a593Smuzhiyun 	} while (0);
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	pr_alert("\n");
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun 
do_page_fault(unsigned long entry,unsigned long addr,unsigned int error_code,struct pt_regs * regs)73*4882a593Smuzhiyun void do_page_fault(unsigned long entry, unsigned long addr,
74*4882a593Smuzhiyun 		   unsigned int error_code, struct pt_regs *regs)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun 	struct task_struct *tsk;
77*4882a593Smuzhiyun 	struct mm_struct *mm;
78*4882a593Smuzhiyun 	struct vm_area_struct *vma;
79*4882a593Smuzhiyun 	int si_code;
80*4882a593Smuzhiyun 	vm_fault_t fault;
81*4882a593Smuzhiyun 	unsigned int mask = VM_ACCESS_FLAGS;
82*4882a593Smuzhiyun 	unsigned int flags = FAULT_FLAG_DEFAULT;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	error_code = error_code & (ITYPE_mskINST | ITYPE_mskETYPE);
85*4882a593Smuzhiyun 	tsk = current;
86*4882a593Smuzhiyun 	mm = tsk->mm;
87*4882a593Smuzhiyun 	si_code = SEGV_MAPERR;
88*4882a593Smuzhiyun 	/*
89*4882a593Smuzhiyun 	 * We fault-in kernel-space virtual memory on-demand. The
90*4882a593Smuzhiyun 	 * 'reference' page table is init_mm.pgd.
91*4882a593Smuzhiyun 	 *
92*4882a593Smuzhiyun 	 * NOTE! We MUST NOT take any locks for this case. We may
93*4882a593Smuzhiyun 	 * be in an interrupt or a critical region, and should
94*4882a593Smuzhiyun 	 * only copy the information from the master page table,
95*4882a593Smuzhiyun 	 * nothing more.
96*4882a593Smuzhiyun 	 */
97*4882a593Smuzhiyun 	if (addr >= TASK_SIZE) {
98*4882a593Smuzhiyun 		if (user_mode(regs))
99*4882a593Smuzhiyun 			goto bad_area_nosemaphore;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 		if (addr >= TASK_SIZE && addr < VMALLOC_END
102*4882a593Smuzhiyun 		    && (entry == ENTRY_PTE_NOT_PRESENT))
103*4882a593Smuzhiyun 			goto vmalloc_fault;
104*4882a593Smuzhiyun 		else
105*4882a593Smuzhiyun 			goto no_context;
106*4882a593Smuzhiyun 	}
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	/* Send a signal to the task for handling the unalignment access. */
109*4882a593Smuzhiyun 	if (entry == ENTRY_GENERAL_EXCPETION
110*4882a593Smuzhiyun 	    && error_code == ETYPE_ALIGNMENT_CHECK) {
111*4882a593Smuzhiyun 		if (user_mode(regs))
112*4882a593Smuzhiyun 			goto bad_area_nosemaphore;
113*4882a593Smuzhiyun 		else
114*4882a593Smuzhiyun 			goto no_context;
115*4882a593Smuzhiyun 	}
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	/*
118*4882a593Smuzhiyun 	 * If we're in an interrupt or have no user
119*4882a593Smuzhiyun 	 * context, we must not take the fault..
120*4882a593Smuzhiyun 	 */
121*4882a593Smuzhiyun 	if (unlikely(faulthandler_disabled() || !mm))
122*4882a593Smuzhiyun 		goto no_context;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	/*
127*4882a593Smuzhiyun 	 * As per x86, we may deadlock here. However, since the kernel only
128*4882a593Smuzhiyun 	 * validly references user space from well defined areas of the code,
129*4882a593Smuzhiyun 	 * we can bug out early if this is from code which shouldn't.
130*4882a593Smuzhiyun 	 */
131*4882a593Smuzhiyun 	if (unlikely(!mmap_read_trylock(mm))) {
132*4882a593Smuzhiyun 		if (!user_mode(regs) &&
133*4882a593Smuzhiyun 		    !search_exception_tables(instruction_pointer(regs)))
134*4882a593Smuzhiyun 			goto no_context;
135*4882a593Smuzhiyun retry:
136*4882a593Smuzhiyun 		mmap_read_lock(mm);
137*4882a593Smuzhiyun 	} else {
138*4882a593Smuzhiyun 		/*
139*4882a593Smuzhiyun 		 * The above down_read_trylock() might have succeeded in which
140*4882a593Smuzhiyun 		 * case, we'll have missed the might_sleep() from down_read().
141*4882a593Smuzhiyun 		 */
142*4882a593Smuzhiyun 		might_sleep();
143*4882a593Smuzhiyun 		if (IS_ENABLED(CONFIG_DEBUG_VM)) {
144*4882a593Smuzhiyun 			if (!user_mode(regs) &&
145*4882a593Smuzhiyun 			    !search_exception_tables(instruction_pointer(regs)))
146*4882a593Smuzhiyun 				goto no_context;
147*4882a593Smuzhiyun 		}
148*4882a593Smuzhiyun 	}
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	vma = find_vma(mm, addr);
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	if (unlikely(!vma))
153*4882a593Smuzhiyun 		goto bad_area;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	if (vma->vm_start <= addr)
156*4882a593Smuzhiyun 		goto good_area;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
159*4882a593Smuzhiyun 		goto bad_area;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	if (unlikely(expand_stack(vma, addr)))
162*4882a593Smuzhiyun 		goto bad_area;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	/*
165*4882a593Smuzhiyun 	 * Ok, we have a good vm_area for this memory access, so
166*4882a593Smuzhiyun 	 * we can handle it..
167*4882a593Smuzhiyun 	 */
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun good_area:
170*4882a593Smuzhiyun 	si_code = SEGV_ACCERR;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	/* first do some preliminary protection checks */
173*4882a593Smuzhiyun 	if (entry == ENTRY_PTE_NOT_PRESENT) {
174*4882a593Smuzhiyun 		if (error_code & ITYPE_mskINST)
175*4882a593Smuzhiyun 			mask = VM_EXEC;
176*4882a593Smuzhiyun 		else {
177*4882a593Smuzhiyun 			mask = VM_READ | VM_WRITE;
178*4882a593Smuzhiyun 		}
179*4882a593Smuzhiyun 	} else if (entry == ENTRY_TLB_MISC) {
180*4882a593Smuzhiyun 		switch (error_code & ITYPE_mskETYPE) {
181*4882a593Smuzhiyun 		case RD_PROT:
182*4882a593Smuzhiyun 			mask = VM_READ;
183*4882a593Smuzhiyun 			break;
184*4882a593Smuzhiyun 		case WRT_PROT:
185*4882a593Smuzhiyun 			mask = VM_WRITE;
186*4882a593Smuzhiyun 			flags |= FAULT_FLAG_WRITE;
187*4882a593Smuzhiyun 			break;
188*4882a593Smuzhiyun 		case NOEXEC:
189*4882a593Smuzhiyun 			mask = VM_EXEC;
190*4882a593Smuzhiyun 			break;
191*4882a593Smuzhiyun 		case PAGE_MODIFY:
192*4882a593Smuzhiyun 			mask = VM_WRITE;
193*4882a593Smuzhiyun 			flags |= FAULT_FLAG_WRITE;
194*4882a593Smuzhiyun 			break;
195*4882a593Smuzhiyun 		case ACC_BIT:
196*4882a593Smuzhiyun 			BUG();
197*4882a593Smuzhiyun 		default:
198*4882a593Smuzhiyun 			break;
199*4882a593Smuzhiyun 		}
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	}
202*4882a593Smuzhiyun 	if (!(vma->vm_flags & mask))
203*4882a593Smuzhiyun 		goto bad_area;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	/*
206*4882a593Smuzhiyun 	 * If for any reason at all we couldn't handle the fault,
207*4882a593Smuzhiyun 	 * make sure we exit gracefully rather than endlessly redo
208*4882a593Smuzhiyun 	 * the fault.
209*4882a593Smuzhiyun 	 */
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	fault = handle_mm_fault(vma, addr, flags, regs);
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	/*
214*4882a593Smuzhiyun 	 * If we need to retry but a fatal signal is pending, handle the
215*4882a593Smuzhiyun 	 * signal first. We do not need to release the mmap_lock because it
216*4882a593Smuzhiyun 	 * would already be released in __lock_page_or_retry in mm/filemap.c.
217*4882a593Smuzhiyun 	 */
218*4882a593Smuzhiyun 	if (fault_signal_pending(fault, regs)) {
219*4882a593Smuzhiyun 		if (!user_mode(regs))
220*4882a593Smuzhiyun 			goto no_context;
221*4882a593Smuzhiyun 		return;
222*4882a593Smuzhiyun 	}
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	if (unlikely(fault & VM_FAULT_ERROR)) {
225*4882a593Smuzhiyun 		if (fault & VM_FAULT_OOM)
226*4882a593Smuzhiyun 			goto out_of_memory;
227*4882a593Smuzhiyun 		else if (fault & VM_FAULT_SIGBUS)
228*4882a593Smuzhiyun 			goto do_sigbus;
229*4882a593Smuzhiyun 		else
230*4882a593Smuzhiyun 			goto bad_area;
231*4882a593Smuzhiyun 	}
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	if (flags & FAULT_FLAG_ALLOW_RETRY) {
234*4882a593Smuzhiyun 		if (fault & VM_FAULT_RETRY) {
235*4882a593Smuzhiyun 			flags |= FAULT_FLAG_TRIED;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 			/* No need to mmap_read_unlock(mm) as we would
238*4882a593Smuzhiyun 			 * have already released it in __lock_page_or_retry
239*4882a593Smuzhiyun 			 * in mm/filemap.c.
240*4882a593Smuzhiyun 			 */
241*4882a593Smuzhiyun 			goto retry;
242*4882a593Smuzhiyun 		}
243*4882a593Smuzhiyun 	}
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	mmap_read_unlock(mm);
246*4882a593Smuzhiyun 	return;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	/*
249*4882a593Smuzhiyun 	 * Something tried to access memory that isn't in our memory map..
250*4882a593Smuzhiyun 	 * Fix it, but check if it's kernel or user first..
251*4882a593Smuzhiyun 	 */
252*4882a593Smuzhiyun bad_area:
253*4882a593Smuzhiyun 	mmap_read_unlock(mm);
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun bad_area_nosemaphore:
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	/* User mode accesses just cause a SIGSEGV */
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	if (user_mode(regs)) {
260*4882a593Smuzhiyun 		tsk->thread.address = addr;
261*4882a593Smuzhiyun 		tsk->thread.error_code = error_code;
262*4882a593Smuzhiyun 		tsk->thread.trap_no = entry;
263*4882a593Smuzhiyun 		force_sig_fault(SIGSEGV, si_code, (void __user *)addr);
264*4882a593Smuzhiyun 		return;
265*4882a593Smuzhiyun 	}
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun no_context:
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	/* Are we prepared to handle this kernel fault?
270*4882a593Smuzhiyun 	 *
271*4882a593Smuzhiyun 	 * (The kernel has valid exception-points in the source
272*4882a593Smuzhiyun 	 *  when it acesses user-memory. When it fails in one
273*4882a593Smuzhiyun 	 *  of those points, we find it in a table and do a jump
274*4882a593Smuzhiyun 	 *  to some fixup code that loads an appropriate error
275*4882a593Smuzhiyun 	 *  code)
276*4882a593Smuzhiyun 	 */
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	{
279*4882a593Smuzhiyun 		const struct exception_table_entry *entry;
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 		if ((entry =
282*4882a593Smuzhiyun 		     search_exception_tables(instruction_pointer(regs))) !=
283*4882a593Smuzhiyun 		    NULL) {
284*4882a593Smuzhiyun 			/* Adjust the instruction pointer in the stackframe */
285*4882a593Smuzhiyun 			instruction_pointer(regs) = entry->fixup;
286*4882a593Smuzhiyun 			return;
287*4882a593Smuzhiyun 		}
288*4882a593Smuzhiyun 	}
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	/*
291*4882a593Smuzhiyun 	 * Oops. The kernel tried to access some bad page. We'll have to
292*4882a593Smuzhiyun 	 * terminate things with extreme prejudice.
293*4882a593Smuzhiyun 	 */
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	bust_spinlocks(1);
296*4882a593Smuzhiyun 	pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
297*4882a593Smuzhiyun 		 (addr < PAGE_SIZE) ? "NULL pointer dereference" :
298*4882a593Smuzhiyun 		 "paging request", addr);
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	show_pte(mm, addr);
301*4882a593Smuzhiyun 	die("Oops", regs, error_code);
302*4882a593Smuzhiyun 	bust_spinlocks(0);
303*4882a593Smuzhiyun 	do_exit(SIGKILL);
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	return;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	/*
308*4882a593Smuzhiyun 	 * We ran out of memory, or some other thing happened to us that made
309*4882a593Smuzhiyun 	 * us unable to handle the page fault gracefully.
310*4882a593Smuzhiyun 	 */
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun out_of_memory:
313*4882a593Smuzhiyun 	mmap_read_unlock(mm);
314*4882a593Smuzhiyun 	if (!user_mode(regs))
315*4882a593Smuzhiyun 		goto no_context;
316*4882a593Smuzhiyun 	pagefault_out_of_memory();
317*4882a593Smuzhiyun 	return;
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun do_sigbus:
320*4882a593Smuzhiyun 	mmap_read_unlock(mm);
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	/* Kernel mode? Handle exceptions or die */
323*4882a593Smuzhiyun 	if (!user_mode(regs))
324*4882a593Smuzhiyun 		goto no_context;
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	/*
327*4882a593Smuzhiyun 	 * Send a sigbus
328*4882a593Smuzhiyun 	 */
329*4882a593Smuzhiyun 	tsk->thread.address = addr;
330*4882a593Smuzhiyun 	tsk->thread.error_code = error_code;
331*4882a593Smuzhiyun 	tsk->thread.trap_no = entry;
332*4882a593Smuzhiyun 	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)addr);
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	return;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun vmalloc_fault:
337*4882a593Smuzhiyun 	{
338*4882a593Smuzhiyun 		/*
339*4882a593Smuzhiyun 		 * Synchronize this task's top level page-table
340*4882a593Smuzhiyun 		 * with the 'reference' page table.
341*4882a593Smuzhiyun 		 *
342*4882a593Smuzhiyun 		 * Use current_pgd instead of tsk->active_mm->pgd
343*4882a593Smuzhiyun 		 * since the latter might be unavailable if this
344*4882a593Smuzhiyun 		 * code is executed in a misfortunately run irq
345*4882a593Smuzhiyun 		 * (like inside schedule() between switch_mm and
346*4882a593Smuzhiyun 		 *  switch_to...).
347*4882a593Smuzhiyun 		 */
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 		unsigned int index = pgd_index(addr);
350*4882a593Smuzhiyun 		pgd_t *pgd, *pgd_k;
351*4882a593Smuzhiyun 		p4d_t *p4d, *p4d_k;
352*4882a593Smuzhiyun 		pud_t *pud, *pud_k;
353*4882a593Smuzhiyun 		pmd_t *pmd, *pmd_k;
354*4882a593Smuzhiyun 		pte_t *pte_k;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 		pgd = (pgd_t *) __va(__nds32__mfsr(NDS32_SR_L1_PPTB)) + index;
357*4882a593Smuzhiyun 		pgd_k = init_mm.pgd + index;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 		if (!pgd_present(*pgd_k))
360*4882a593Smuzhiyun 			goto no_context;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 		p4d = p4d_offset(pgd, addr);
363*4882a593Smuzhiyun 		p4d_k = p4d_offset(pgd_k, addr);
364*4882a593Smuzhiyun 		if (!p4d_present(*p4d_k))
365*4882a593Smuzhiyun 			goto no_context;
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 		pud = pud_offset(p4d, addr);
368*4882a593Smuzhiyun 		pud_k = pud_offset(p4d_k, addr);
369*4882a593Smuzhiyun 		if (!pud_present(*pud_k))
370*4882a593Smuzhiyun 			goto no_context;
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 		pmd = pmd_offset(pud, addr);
373*4882a593Smuzhiyun 		pmd_k = pmd_offset(pud_k, addr);
374*4882a593Smuzhiyun 		if (!pmd_present(*pmd_k))
375*4882a593Smuzhiyun 			goto no_context;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 		if (!pmd_present(*pmd))
378*4882a593Smuzhiyun 			set_pmd(pmd, *pmd_k);
379*4882a593Smuzhiyun 		else
380*4882a593Smuzhiyun 			BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 		/*
383*4882a593Smuzhiyun 		 * Since the vmalloc area is global, we don't
384*4882a593Smuzhiyun 		 * need to copy individual PTE's, it is enough to
385*4882a593Smuzhiyun 		 * copy the pgd pointer into the pte page of the
386*4882a593Smuzhiyun 		 * root task. If that is there, we'll find our pte if
387*4882a593Smuzhiyun 		 * it exists.
388*4882a593Smuzhiyun 		 */
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 		/* Make sure the actual PTE exists as well to
391*4882a593Smuzhiyun 		 * catch kernel vmalloc-area accesses to non-mapped
392*4882a593Smuzhiyun 		 * addres. If we don't do this, this will just
393*4882a593Smuzhiyun 		 * silently loop forever.
394*4882a593Smuzhiyun 		 */
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 		pte_k = pte_offset_kernel(pmd_k, addr);
397*4882a593Smuzhiyun 		if (!pte_present(*pte_k))
398*4882a593Smuzhiyun 			goto no_context;
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 		return;
401*4882a593Smuzhiyun 	}
402*4882a593Smuzhiyun }
403