1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * TLB flushing operations for SH with an MMU.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 1999 Niibe Yutaka
5*4882a593Smuzhiyun * Copyright (C) 2003 Paul Mundt
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * This file is subject to the terms and conditions of the GNU General Public
8*4882a593Smuzhiyun * License. See the file "COPYING" in the main directory of this archive
9*4882a593Smuzhiyun * for more details.
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun #include <linux/mm.h>
12*4882a593Smuzhiyun #include <asm/mmu_context.h>
13*4882a593Smuzhiyun #include <asm/tlbflush.h>
14*4882a593Smuzhiyun
local_flush_tlb_page(struct vm_area_struct * vma,unsigned long page)15*4882a593Smuzhiyun void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
16*4882a593Smuzhiyun {
17*4882a593Smuzhiyun unsigned int cpu = smp_processor_id();
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) {
20*4882a593Smuzhiyun unsigned long flags;
21*4882a593Smuzhiyun unsigned long asid;
22*4882a593Smuzhiyun unsigned long saved_asid = MMU_NO_ASID;
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun asid = cpu_asid(cpu, vma->vm_mm);
25*4882a593Smuzhiyun page &= PAGE_MASK;
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun local_irq_save(flags);
28*4882a593Smuzhiyun if (vma->vm_mm != current->mm) {
29*4882a593Smuzhiyun saved_asid = get_asid();
30*4882a593Smuzhiyun set_asid(asid);
31*4882a593Smuzhiyun }
32*4882a593Smuzhiyun local_flush_tlb_one(asid, page);
33*4882a593Smuzhiyun if (saved_asid != MMU_NO_ASID)
34*4882a593Smuzhiyun set_asid(saved_asid);
35*4882a593Smuzhiyun local_irq_restore(flags);
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun
local_flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)39*4882a593Smuzhiyun void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
40*4882a593Smuzhiyun unsigned long end)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun struct mm_struct *mm = vma->vm_mm;
43*4882a593Smuzhiyun unsigned int cpu = smp_processor_id();
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun if (cpu_context(cpu, mm) != NO_CONTEXT) {
46*4882a593Smuzhiyun unsigned long flags;
47*4882a593Smuzhiyun int size;
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun local_irq_save(flags);
50*4882a593Smuzhiyun size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
51*4882a593Smuzhiyun if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
52*4882a593Smuzhiyun cpu_context(cpu, mm) = NO_CONTEXT;
53*4882a593Smuzhiyun if (mm == current->mm)
54*4882a593Smuzhiyun activate_context(mm, cpu);
55*4882a593Smuzhiyun } else {
56*4882a593Smuzhiyun unsigned long asid;
57*4882a593Smuzhiyun unsigned long saved_asid = MMU_NO_ASID;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun asid = cpu_asid(cpu, mm);
60*4882a593Smuzhiyun start &= PAGE_MASK;
61*4882a593Smuzhiyun end += (PAGE_SIZE - 1);
62*4882a593Smuzhiyun end &= PAGE_MASK;
63*4882a593Smuzhiyun if (mm != current->mm) {
64*4882a593Smuzhiyun saved_asid = get_asid();
65*4882a593Smuzhiyun set_asid(asid);
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun while (start < end) {
68*4882a593Smuzhiyun local_flush_tlb_one(asid, start);
69*4882a593Smuzhiyun start += PAGE_SIZE;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun if (saved_asid != MMU_NO_ASID)
72*4882a593Smuzhiyun set_asid(saved_asid);
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun local_irq_restore(flags);
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
local_flush_tlb_kernel_range(unsigned long start,unsigned long end)78*4882a593Smuzhiyun void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun unsigned int cpu = smp_processor_id();
81*4882a593Smuzhiyun unsigned long flags;
82*4882a593Smuzhiyun int size;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun local_irq_save(flags);
85*4882a593Smuzhiyun size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
86*4882a593Smuzhiyun if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
87*4882a593Smuzhiyun local_flush_tlb_all();
88*4882a593Smuzhiyun } else {
89*4882a593Smuzhiyun unsigned long asid;
90*4882a593Smuzhiyun unsigned long saved_asid = get_asid();
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun asid = cpu_asid(cpu, &init_mm);
93*4882a593Smuzhiyun start &= PAGE_MASK;
94*4882a593Smuzhiyun end += (PAGE_SIZE - 1);
95*4882a593Smuzhiyun end &= PAGE_MASK;
96*4882a593Smuzhiyun set_asid(asid);
97*4882a593Smuzhiyun while (start < end) {
98*4882a593Smuzhiyun local_flush_tlb_one(asid, start);
99*4882a593Smuzhiyun start += PAGE_SIZE;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun set_asid(saved_asid);
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun local_irq_restore(flags);
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
local_flush_tlb_mm(struct mm_struct * mm)106*4882a593Smuzhiyun void local_flush_tlb_mm(struct mm_struct *mm)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun unsigned int cpu = smp_processor_id();
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /* Invalidate all TLB of this process. */
111*4882a593Smuzhiyun /* Instead of invalidating each TLB, we get new MMU context. */
112*4882a593Smuzhiyun if (cpu_context(cpu, mm) != NO_CONTEXT) {
113*4882a593Smuzhiyun unsigned long flags;
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun local_irq_save(flags);
116*4882a593Smuzhiyun cpu_context(cpu, mm) = NO_CONTEXT;
117*4882a593Smuzhiyun if (mm == current->mm)
118*4882a593Smuzhiyun activate_context(mm, cpu);
119*4882a593Smuzhiyun local_irq_restore(flags);
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
__flush_tlb_global(void)123*4882a593Smuzhiyun void __flush_tlb_global(void)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun unsigned long flags;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun local_irq_save(flags);
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun /*
130*4882a593Smuzhiyun * This is the most destructive of the TLB flushing options,
131*4882a593Smuzhiyun * and will tear down all of the UTLB/ITLB mappings, including
132*4882a593Smuzhiyun * wired entries.
133*4882a593Smuzhiyun */
134*4882a593Smuzhiyun __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR);
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun local_irq_restore(flags);
137*4882a593Smuzhiyun }
138