1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * TLB miss handler for SH with an MMU.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 1999 Niibe Yutaka
5*4882a593Smuzhiyun * Copyright (C) 2003 - 2012 Paul Mundt
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * This file is subject to the terms and conditions of the GNU General Public
8*4882a593Smuzhiyun * License. See the file "COPYING" in the main directory of this archive
9*4882a593Smuzhiyun * for more details.
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/mm.h>
13*4882a593Smuzhiyun #include <linux/kprobes.h>
14*4882a593Smuzhiyun #include <linux/kdebug.h>
15*4882a593Smuzhiyun #include <asm/mmu_context.h>
16*4882a593Smuzhiyun #include <asm/thread_info.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun /*
19*4882a593Smuzhiyun * Called with interrupts disabled.
20*4882a593Smuzhiyun */
21*4882a593Smuzhiyun asmlinkage int __kprobes
handle_tlbmiss(struct pt_regs * regs,unsigned long error_code,unsigned long address)22*4882a593Smuzhiyun handle_tlbmiss(struct pt_regs *regs, unsigned long error_code,
23*4882a593Smuzhiyun unsigned long address)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun pgd_t *pgd;
26*4882a593Smuzhiyun p4d_t *p4d;
27*4882a593Smuzhiyun pud_t *pud;
28*4882a593Smuzhiyun pmd_t *pmd;
29*4882a593Smuzhiyun pte_t *pte;
30*4882a593Smuzhiyun pte_t entry;
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun * We don't take page faults for P1, P2, and parts of P4, these
34*4882a593Smuzhiyun * are always mapped, whether it be due to legacy behaviour in
35*4882a593Smuzhiyun * 29-bit mode, or due to PMB configuration in 32-bit mode.
36*4882a593Smuzhiyun */
37*4882a593Smuzhiyun if (address >= P3SEG && address < P3_ADDR_MAX) {
38*4882a593Smuzhiyun pgd = pgd_offset_k(address);
39*4882a593Smuzhiyun } else {
40*4882a593Smuzhiyun if (unlikely(address >= TASK_SIZE || !current->mm))
41*4882a593Smuzhiyun return 1;
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun pgd = pgd_offset(current->mm, address);
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun p4d = p4d_offset(pgd, address);
47*4882a593Smuzhiyun if (p4d_none_or_clear_bad(p4d))
48*4882a593Smuzhiyun return 1;
49*4882a593Smuzhiyun pud = pud_offset(p4d, address);
50*4882a593Smuzhiyun if (pud_none_or_clear_bad(pud))
51*4882a593Smuzhiyun return 1;
52*4882a593Smuzhiyun pmd = pmd_offset(pud, address);
53*4882a593Smuzhiyun if (pmd_none_or_clear_bad(pmd))
54*4882a593Smuzhiyun return 1;
55*4882a593Smuzhiyun pte = pte_offset_kernel(pmd, address);
56*4882a593Smuzhiyun entry = *pte;
57*4882a593Smuzhiyun if (unlikely(pte_none(entry) || pte_not_present(entry)))
58*4882a593Smuzhiyun return 1;
59*4882a593Smuzhiyun if (unlikely(error_code && !pte_write(entry)))
60*4882a593Smuzhiyun return 1;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun if (error_code)
63*4882a593Smuzhiyun entry = pte_mkdirty(entry);
64*4882a593Smuzhiyun entry = pte_mkyoung(entry);
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun set_pte(pte, entry);
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP)
69*4882a593Smuzhiyun /*
70*4882a593Smuzhiyun * SH-4 does not set MMUCR.RC to the corresponding TLB entry in
71*4882a593Smuzhiyun * the case of an initial page write exception, so we need to
72*4882a593Smuzhiyun * flush it in order to avoid potential TLB entry duplication.
73*4882a593Smuzhiyun */
74*4882a593Smuzhiyun if (error_code == FAULT_CODE_INITIAL)
75*4882a593Smuzhiyun local_flush_tlb_one(get_asid(), address & PAGE_MASK);
76*4882a593Smuzhiyun #endif
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun set_thread_fault_code(error_code);
79*4882a593Smuzhiyun update_mmu_cache(NULL, address, pte);
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun return 0;
82*4882a593Smuzhiyun }
83