xref: /OK3568_Linux_fs/kernel/arch/s390/include/asm/tlbflush.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _S390_TLBFLUSH_H
3*4882a593Smuzhiyun #define _S390_TLBFLUSH_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/mm.h>
6*4882a593Smuzhiyun #include <linux/sched.h>
7*4882a593Smuzhiyun #include <asm/processor.h>
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun /*
10*4882a593Smuzhiyun  * Flush all TLB entries on the local CPU.
11*4882a593Smuzhiyun  */
__tlb_flush_local(void)12*4882a593Smuzhiyun static inline void __tlb_flush_local(void)
13*4882a593Smuzhiyun {
14*4882a593Smuzhiyun 	asm volatile("ptlb" : : : "memory");
15*4882a593Smuzhiyun }
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun /*
18*4882a593Smuzhiyun  * Flush TLB entries for a specific ASCE on all CPUs
19*4882a593Smuzhiyun  */
__tlb_flush_idte(unsigned long asce)20*4882a593Smuzhiyun static inline void __tlb_flush_idte(unsigned long asce)
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun 	unsigned long opt;
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun 	opt = IDTE_PTOA;
25*4882a593Smuzhiyun 	if (MACHINE_HAS_TLB_GUEST)
26*4882a593Smuzhiyun 		opt |= IDTE_GUEST_ASCE;
27*4882a593Smuzhiyun 	/* Global TLB flush for the mm */
28*4882a593Smuzhiyun 	asm volatile(
29*4882a593Smuzhiyun 		"	.insn	rrf,0xb98e0000,0,%0,%1,0"
30*4882a593Smuzhiyun 		: : "a" (opt), "a" (asce) : "cc");
31*4882a593Smuzhiyun }
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun /*
34*4882a593Smuzhiyun  * Flush all TLB entries on all CPUs.
35*4882a593Smuzhiyun  */
__tlb_flush_global(void)36*4882a593Smuzhiyun static inline void __tlb_flush_global(void)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun 	unsigned int dummy = 0;
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	csp(&dummy, 0, 0);
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun /*
44*4882a593Smuzhiyun  * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
45*4882a593Smuzhiyun  * this implicates multiple ASCEs!).
46*4882a593Smuzhiyun  */
__tlb_flush_mm(struct mm_struct * mm)47*4882a593Smuzhiyun static inline void __tlb_flush_mm(struct mm_struct *mm)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	unsigned long gmap_asce;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	/*
52*4882a593Smuzhiyun 	 * If the machine has IDTE we prefer to do a per mm flush
53*4882a593Smuzhiyun 	 * on all cpus instead of doing a local flush if the mm
54*4882a593Smuzhiyun 	 * only ran on the local cpu.
55*4882a593Smuzhiyun 	 */
56*4882a593Smuzhiyun 	preempt_disable();
57*4882a593Smuzhiyun 	atomic_inc(&mm->context.flush_count);
58*4882a593Smuzhiyun 	/* Reset TLB flush mask */
59*4882a593Smuzhiyun 	cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
60*4882a593Smuzhiyun 	barrier();
61*4882a593Smuzhiyun 	gmap_asce = READ_ONCE(mm->context.gmap_asce);
62*4882a593Smuzhiyun 	if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
63*4882a593Smuzhiyun 		if (gmap_asce)
64*4882a593Smuzhiyun 			__tlb_flush_idte(gmap_asce);
65*4882a593Smuzhiyun 		__tlb_flush_idte(mm->context.asce);
66*4882a593Smuzhiyun 	} else {
67*4882a593Smuzhiyun 		/* Global TLB flush */
68*4882a593Smuzhiyun 		__tlb_flush_global();
69*4882a593Smuzhiyun 	}
70*4882a593Smuzhiyun 	atomic_dec(&mm->context.flush_count);
71*4882a593Smuzhiyun 	preempt_enable();
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun 
__tlb_flush_kernel(void)74*4882a593Smuzhiyun static inline void __tlb_flush_kernel(void)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun 	if (MACHINE_HAS_IDTE)
77*4882a593Smuzhiyun 		__tlb_flush_idte(init_mm.context.asce);
78*4882a593Smuzhiyun 	else
79*4882a593Smuzhiyun 		__tlb_flush_global();
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun 
__tlb_flush_mm_lazy(struct mm_struct * mm)82*4882a593Smuzhiyun static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun 	spin_lock(&mm->context.lock);
85*4882a593Smuzhiyun 	if (mm->context.flush_mm) {
86*4882a593Smuzhiyun 		mm->context.flush_mm = 0;
87*4882a593Smuzhiyun 		__tlb_flush_mm(mm);
88*4882a593Smuzhiyun 	}
89*4882a593Smuzhiyun 	spin_unlock(&mm->context.lock);
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun /*
93*4882a593Smuzhiyun  * TLB flushing:
94*4882a593Smuzhiyun  *  flush_tlb() - flushes the current mm struct TLBs
95*4882a593Smuzhiyun  *  flush_tlb_all() - flushes all processes TLBs
96*4882a593Smuzhiyun  *  flush_tlb_mm(mm) - flushes the specified mm context TLB's
97*4882a593Smuzhiyun  *  flush_tlb_page(vma, vmaddr) - flushes one page
98*4882a593Smuzhiyun  *  flush_tlb_range(vma, start, end) - flushes a range of pages
99*4882a593Smuzhiyun  *  flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
100*4882a593Smuzhiyun  */
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun /*
103*4882a593Smuzhiyun  * flush_tlb_mm goes together with ptep_set_wrprotect for the
104*4882a593Smuzhiyun  * copy_page_range operation and flush_tlb_range is related to
105*4882a593Smuzhiyun  * ptep_get_and_clear for change_protection. ptep_set_wrprotect and
106*4882a593Smuzhiyun  * ptep_get_and_clear do not flush the TLBs directly if the mm has
107*4882a593Smuzhiyun  * only one user. At the end of the update the flush_tlb_mm and
108*4882a593Smuzhiyun  * flush_tlb_range functions need to do the flush.
109*4882a593Smuzhiyun  */
110*4882a593Smuzhiyun #define flush_tlb()				do { } while (0)
111*4882a593Smuzhiyun #define flush_tlb_all()				do { } while (0)
112*4882a593Smuzhiyun #define flush_tlb_page(vma, addr)		do { } while (0)
113*4882a593Smuzhiyun 
flush_tlb_mm(struct mm_struct * mm)114*4882a593Smuzhiyun static inline void flush_tlb_mm(struct mm_struct *mm)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun 	__tlb_flush_mm_lazy(mm);
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun 
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)119*4882a593Smuzhiyun static inline void flush_tlb_range(struct vm_area_struct *vma,
120*4882a593Smuzhiyun 				   unsigned long start, unsigned long end)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun 	__tlb_flush_mm_lazy(vma->vm_mm);
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun 
flush_tlb_kernel_range(unsigned long start,unsigned long end)125*4882a593Smuzhiyun static inline void flush_tlb_kernel_range(unsigned long start,
126*4882a593Smuzhiyun 					  unsigned long end)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	__tlb_flush_kernel();
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun #endif /* _S390_TLBFLUSH_H */
132