xref: /OK3568_Linux_fs/kernel/arch/alpha/include/asm/tlbflush.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ALPHA_TLBFLUSH_H
3*4882a593Smuzhiyun #define _ALPHA_TLBFLUSH_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/mm.h>
6*4882a593Smuzhiyun #include <linux/sched.h>
7*4882a593Smuzhiyun #include <asm/compiler.h>
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #ifndef __EXTERN_INLINE
10*4882a593Smuzhiyun #define __EXTERN_INLINE extern inline
11*4882a593Smuzhiyun #define __MMU_EXTERN_INLINE
12*4882a593Smuzhiyun #endif
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun extern void __load_new_mm_context(struct mm_struct *);
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun /* Use a few helper functions to hide the ugly broken ASN
18*4882a593Smuzhiyun    numbers on early Alphas (ev4 and ev45).  */
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun __EXTERN_INLINE void
ev4_flush_tlb_current(struct mm_struct * mm)21*4882a593Smuzhiyun ev4_flush_tlb_current(struct mm_struct *mm)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun 	__load_new_mm_context(mm);
24*4882a593Smuzhiyun 	tbiap();
25*4882a593Smuzhiyun }
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun __EXTERN_INLINE void
ev5_flush_tlb_current(struct mm_struct * mm)28*4882a593Smuzhiyun ev5_flush_tlb_current(struct mm_struct *mm)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun 	__load_new_mm_context(mm);
31*4882a593Smuzhiyun }
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun /* Flush just one page in the current TLB set.  We need to be very
34*4882a593Smuzhiyun    careful about the icache here, there is no way to invalidate a
35*4882a593Smuzhiyun    specific icache page.  */
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun __EXTERN_INLINE void
ev4_flush_tlb_current_page(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr)38*4882a593Smuzhiyun ev4_flush_tlb_current_page(struct mm_struct * mm,
39*4882a593Smuzhiyun 			   struct vm_area_struct *vma,
40*4882a593Smuzhiyun 			   unsigned long addr)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun 	int tbi_flag = 2;
43*4882a593Smuzhiyun 	if (vma->vm_flags & VM_EXEC) {
44*4882a593Smuzhiyun 		__load_new_mm_context(mm);
45*4882a593Smuzhiyun 		tbi_flag = 3;
46*4882a593Smuzhiyun 	}
47*4882a593Smuzhiyun 	tbi(tbi_flag, addr);
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun __EXTERN_INLINE void
ev5_flush_tlb_current_page(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr)51*4882a593Smuzhiyun ev5_flush_tlb_current_page(struct mm_struct * mm,
52*4882a593Smuzhiyun 			   struct vm_area_struct *vma,
53*4882a593Smuzhiyun 			   unsigned long addr)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun 	if (vma->vm_flags & VM_EXEC)
56*4882a593Smuzhiyun 		__load_new_mm_context(mm);
57*4882a593Smuzhiyun 	else
58*4882a593Smuzhiyun 		tbi(2, addr);
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun #ifdef CONFIG_ALPHA_GENERIC
63*4882a593Smuzhiyun # define flush_tlb_current		alpha_mv.mv_flush_tlb_current
64*4882a593Smuzhiyun # define flush_tlb_current_page		alpha_mv.mv_flush_tlb_current_page
65*4882a593Smuzhiyun #else
66*4882a593Smuzhiyun # ifdef CONFIG_ALPHA_EV4
67*4882a593Smuzhiyun #  define flush_tlb_current		ev4_flush_tlb_current
68*4882a593Smuzhiyun #  define flush_tlb_current_page	ev4_flush_tlb_current_page
69*4882a593Smuzhiyun # else
70*4882a593Smuzhiyun #  define flush_tlb_current		ev5_flush_tlb_current
71*4882a593Smuzhiyun #  define flush_tlb_current_page	ev5_flush_tlb_current_page
72*4882a593Smuzhiyun # endif
73*4882a593Smuzhiyun #endif
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun #ifdef __MMU_EXTERN_INLINE
76*4882a593Smuzhiyun #undef __EXTERN_INLINE
77*4882a593Smuzhiyun #undef __MMU_EXTERN_INLINE
78*4882a593Smuzhiyun #endif
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun /* Flush current user mapping.  */
81*4882a593Smuzhiyun static inline void
flush_tlb(void)82*4882a593Smuzhiyun flush_tlb(void)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun 	flush_tlb_current(current->active_mm);
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun /* Flush someone else's user mapping.  */
88*4882a593Smuzhiyun static inline void
flush_tlb_other(struct mm_struct * mm)89*4882a593Smuzhiyun flush_tlb_other(struct mm_struct *mm)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun 	unsigned long *mmc = &mm->context[smp_processor_id()];
92*4882a593Smuzhiyun 	/* Check it's not zero first to avoid cacheline ping pong
93*4882a593Smuzhiyun 	   when possible.  */
94*4882a593Smuzhiyun 	if (*mmc) *mmc = 0;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun #ifndef CONFIG_SMP
98*4882a593Smuzhiyun /* Flush everything (kernel mapping may also have changed
99*4882a593Smuzhiyun    due to vmalloc/vfree).  */
flush_tlb_all(void)100*4882a593Smuzhiyun static inline void flush_tlb_all(void)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun 	tbia();
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun /* Flush a specified user mapping.  */
106*4882a593Smuzhiyun static inline void
flush_tlb_mm(struct mm_struct * mm)107*4882a593Smuzhiyun flush_tlb_mm(struct mm_struct *mm)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun 	if (mm == current->active_mm)
110*4882a593Smuzhiyun 		flush_tlb_current(mm);
111*4882a593Smuzhiyun 	else
112*4882a593Smuzhiyun 		flush_tlb_other(mm);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun /* Page-granular tlb flush.  */
116*4882a593Smuzhiyun static inline void
flush_tlb_page(struct vm_area_struct * vma,unsigned long addr)117*4882a593Smuzhiyun flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun 	struct mm_struct *mm = vma->vm_mm;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	if (mm == current->active_mm)
122*4882a593Smuzhiyun 		flush_tlb_current_page(mm, vma, addr);
123*4882a593Smuzhiyun 	else
124*4882a593Smuzhiyun 		flush_tlb_other(mm);
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun /* Flush a specified range of user mapping.  On the Alpha we flush
128*4882a593Smuzhiyun    the whole user tlb.  */
129*4882a593Smuzhiyun static inline void
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)130*4882a593Smuzhiyun flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
131*4882a593Smuzhiyun 		unsigned long end)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun 	flush_tlb_mm(vma->vm_mm);
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun #else /* CONFIG_SMP */
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun extern void flush_tlb_all(void);
139*4882a593Smuzhiyun extern void flush_tlb_mm(struct mm_struct *);
140*4882a593Smuzhiyun extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
141*4882a593Smuzhiyun extern void flush_tlb_range(struct vm_area_struct *, unsigned long,
142*4882a593Smuzhiyun 			    unsigned long);
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun #endif /* CONFIG_SMP */
145*4882a593Smuzhiyun 
flush_tlb_kernel_range(unsigned long start,unsigned long end)146*4882a593Smuzhiyun static inline void flush_tlb_kernel_range(unsigned long start,
147*4882a593Smuzhiyun 					unsigned long end)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun 	flush_tlb_all();
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun #endif /* _ALPHA_TLBFLUSH_H */
153