xref: /OK3568_Linux_fs/kernel/include/asm-generic/tlb.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun /* include/asm-generic/tlb.h
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  *	Generic TLB shootdown code
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright 2001 Red Hat, Inc.
7*4882a593Smuzhiyun  * Based on code from mm/memory.c Copyright Linus Torvalds and others.
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * Copyright 2011 Red Hat, Inc., Peter Zijlstra
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun #ifndef _ASM_GENERIC__TLB_H
12*4882a593Smuzhiyun #define _ASM_GENERIC__TLB_H
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include <linux/mmu_notifier.h>
15*4882a593Smuzhiyun #include <linux/swap.h>
16*4882a593Smuzhiyun #include <linux/hugetlb_inline.h>
17*4882a593Smuzhiyun #include <asm/tlbflush.h>
18*4882a593Smuzhiyun #include <asm/cacheflush.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun /*
21*4882a593Smuzhiyun  * Blindly accessing user memory from NMI context can be dangerous
22*4882a593Smuzhiyun  * if we're in the middle of switching the current user task or switching
23*4882a593Smuzhiyun  * the loaded mm.
24*4882a593Smuzhiyun  */
25*4882a593Smuzhiyun #ifndef nmi_uaccess_okay
26*4882a593Smuzhiyun # define nmi_uaccess_okay() true
27*4882a593Smuzhiyun #endif
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #ifdef CONFIG_MMU
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun /*
32*4882a593Smuzhiyun  * Generic MMU-gather implementation.
33*4882a593Smuzhiyun  *
34*4882a593Smuzhiyun  * The mmu_gather data structure is used by the mm code to implement the
35*4882a593Smuzhiyun  * correct and efficient ordering of freeing pages and TLB invalidations.
36*4882a593Smuzhiyun  *
37*4882a593Smuzhiyun  * This correct ordering is:
38*4882a593Smuzhiyun  *
39*4882a593Smuzhiyun  *  1) unhook page
40*4882a593Smuzhiyun  *  2) TLB invalidate page
41*4882a593Smuzhiyun  *  3) free page
42*4882a593Smuzhiyun  *
43*4882a593Smuzhiyun  * That is, we must never free a page before we have ensured there are no live
44*4882a593Smuzhiyun  * translations left to it. Otherwise it might be possible to observe (or
45*4882a593Smuzhiyun  * worse, change) the page content after it has been reused.
46*4882a593Smuzhiyun  *
47*4882a593Smuzhiyun  * The mmu_gather API consists of:
48*4882a593Smuzhiyun  *
49*4882a593Smuzhiyun  *  - tlb_gather_mmu() / tlb_finish_mmu(); start and finish a mmu_gather
50*4882a593Smuzhiyun  *
51*4882a593Smuzhiyun  *    Finish in particular will issue a (final) TLB invalidate and free
52*4882a593Smuzhiyun  *    all (remaining) queued pages.
53*4882a593Smuzhiyun  *
54*4882a593Smuzhiyun  *  - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA
55*4882a593Smuzhiyun  *
56*4882a593Smuzhiyun  *    Defaults to flushing at tlb_end_vma() to reset the range; helps when
57*4882a593Smuzhiyun  *    there's large holes between the VMAs.
58*4882a593Smuzhiyun  *
59*4882a593Smuzhiyun  *  - tlb_remove_table()
60*4882a593Smuzhiyun  *
61*4882a593Smuzhiyun  *    tlb_remove_table() is the basic primitive to free page-table directories
62*4882a593Smuzhiyun  *    (__p*_free_tlb()).  In it's most primitive form it is an alias for
63*4882a593Smuzhiyun  *    tlb_remove_page() below, for when page directories are pages and have no
64*4882a593Smuzhiyun  *    additional constraints.
65*4882a593Smuzhiyun  *
66*4882a593Smuzhiyun  *    See also MMU_GATHER_TABLE_FREE and MMU_GATHER_RCU_TABLE_FREE.
67*4882a593Smuzhiyun  *
68*4882a593Smuzhiyun  *  - tlb_remove_page() / __tlb_remove_page()
69*4882a593Smuzhiyun  *  - tlb_remove_page_size() / __tlb_remove_page_size()
70*4882a593Smuzhiyun  *
71*4882a593Smuzhiyun  *    __tlb_remove_page_size() is the basic primitive that queues a page for
72*4882a593Smuzhiyun  *    freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a
73*4882a593Smuzhiyun  *    boolean indicating if the queue is (now) full and a call to
74*4882a593Smuzhiyun  *    tlb_flush_mmu() is required.
75*4882a593Smuzhiyun  *
76*4882a593Smuzhiyun  *    tlb_remove_page() and tlb_remove_page_size() imply the call to
77*4882a593Smuzhiyun  *    tlb_flush_mmu() when required and has no return value.
78*4882a593Smuzhiyun  *
79*4882a593Smuzhiyun  *  - tlb_change_page_size()
80*4882a593Smuzhiyun  *
81*4882a593Smuzhiyun  *    call before __tlb_remove_page*() to set the current page-size; implies a
82*4882a593Smuzhiyun  *    possible tlb_flush_mmu() call.
83*4882a593Smuzhiyun  *
84*4882a593Smuzhiyun  *  - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
85*4882a593Smuzhiyun  *
86*4882a593Smuzhiyun  *    tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
87*4882a593Smuzhiyun  *                              related state, like the range)
88*4882a593Smuzhiyun  *
89*4882a593Smuzhiyun  *    tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
90*4882a593Smuzhiyun  *			whatever pages are still batched.
91*4882a593Smuzhiyun  *
92*4882a593Smuzhiyun  *  - mmu_gather::fullmm
93*4882a593Smuzhiyun  *
94*4882a593Smuzhiyun  *    A flag set by tlb_gather_mmu() to indicate we're going to free
95*4882a593Smuzhiyun  *    the entire mm; this allows a number of optimizations.
96*4882a593Smuzhiyun  *
97*4882a593Smuzhiyun  *    - We can ignore tlb_{start,end}_vma(); because we don't
98*4882a593Smuzhiyun  *      care about ranges. Everything will be shot down.
99*4882a593Smuzhiyun  *
100*4882a593Smuzhiyun  *    - (RISC) architectures that use ASIDs can cycle to a new ASID
101*4882a593Smuzhiyun  *      and delay the invalidation until ASID space runs out.
102*4882a593Smuzhiyun  *
103*4882a593Smuzhiyun  *  - mmu_gather::need_flush_all
104*4882a593Smuzhiyun  *
105*4882a593Smuzhiyun  *    A flag that can be set by the arch code if it wants to force
106*4882a593Smuzhiyun  *    flush the entire TLB irrespective of the range. For instance
107*4882a593Smuzhiyun  *    x86-PAE needs this when changing top-level entries.
108*4882a593Smuzhiyun  *
109*4882a593Smuzhiyun  * And allows the architecture to provide and implement tlb_flush():
110*4882a593Smuzhiyun  *
111*4882a593Smuzhiyun  * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make
112*4882a593Smuzhiyun  * use of:
113*4882a593Smuzhiyun  *
114*4882a593Smuzhiyun  *  - mmu_gather::start / mmu_gather::end
115*4882a593Smuzhiyun  *
116*4882a593Smuzhiyun  *    which provides the range that needs to be flushed to cover the pages to
117*4882a593Smuzhiyun  *    be freed.
118*4882a593Smuzhiyun  *
119*4882a593Smuzhiyun  *  - mmu_gather::freed_tables
120*4882a593Smuzhiyun  *
121*4882a593Smuzhiyun  *    set when we freed page table pages
122*4882a593Smuzhiyun  *
123*4882a593Smuzhiyun  *  - tlb_get_unmap_shift() / tlb_get_unmap_size()
124*4882a593Smuzhiyun  *
125*4882a593Smuzhiyun  *    returns the smallest TLB entry size unmapped in this range.
126*4882a593Smuzhiyun  *
127*4882a593Smuzhiyun  * If an architecture does not provide tlb_flush() a default implementation
128*4882a593Smuzhiyun  * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is
129*4882a593Smuzhiyun  * specified, in which case we'll default to flush_tlb_mm().
130*4882a593Smuzhiyun  *
131*4882a593Smuzhiyun  * Additionally there are a few opt-in features:
132*4882a593Smuzhiyun  *
133*4882a593Smuzhiyun  *  MMU_GATHER_PAGE_SIZE
134*4882a593Smuzhiyun  *
135*4882a593Smuzhiyun  *  This ensures we call tlb_flush() every time tlb_change_page_size() actually
136*4882a593Smuzhiyun  *  changes the size and provides mmu_gather::page_size to tlb_flush().
137*4882a593Smuzhiyun  *
138*4882a593Smuzhiyun  *  This might be useful if your architecture has size specific TLB
139*4882a593Smuzhiyun  *  invalidation instructions.
140*4882a593Smuzhiyun  *
141*4882a593Smuzhiyun  *  MMU_GATHER_TABLE_FREE
142*4882a593Smuzhiyun  *
143*4882a593Smuzhiyun  *  This provides tlb_remove_table(), to be used instead of tlb_remove_page()
144*4882a593Smuzhiyun  *  for page directores (__p*_free_tlb()).
145*4882a593Smuzhiyun  *
146*4882a593Smuzhiyun  *  Useful if your architecture has non-page page directories.
147*4882a593Smuzhiyun  *
148*4882a593Smuzhiyun  *  When used, an architecture is expected to provide __tlb_remove_table()
149*4882a593Smuzhiyun  *  which does the actual freeing of these pages.
150*4882a593Smuzhiyun  *
151*4882a593Smuzhiyun  *  MMU_GATHER_RCU_TABLE_FREE
152*4882a593Smuzhiyun  *
153*4882a593Smuzhiyun  *  Like MMU_GATHER_TABLE_FREE, and adds semi-RCU semantics to the free (see
154*4882a593Smuzhiyun  *  comment below).
155*4882a593Smuzhiyun  *
156*4882a593Smuzhiyun  *  Useful if your architecture doesn't use IPIs for remote TLB invalidates
157*4882a593Smuzhiyun  *  and therefore doesn't naturally serialize with software page-table walkers.
158*4882a593Smuzhiyun  *
159*4882a593Smuzhiyun  *  MMU_GATHER_NO_RANGE
160*4882a593Smuzhiyun  *
161*4882a593Smuzhiyun  *  Use this if your architecture lacks an efficient flush_tlb_range().
162*4882a593Smuzhiyun  *
163*4882a593Smuzhiyun  *  MMU_GATHER_NO_GATHER
164*4882a593Smuzhiyun  *
165*4882a593Smuzhiyun  *  If the option is set the mmu_gather will not track individual pages for
166*4882a593Smuzhiyun  *  delayed page free anymore. A platform that enables the option needs to
167*4882a593Smuzhiyun  *  provide its own implementation of the __tlb_remove_page_size() function to
168*4882a593Smuzhiyun  *  free pages.
169*4882a593Smuzhiyun  *
170*4882a593Smuzhiyun  *  This is useful if your architecture already flushes TLB entries in the
171*4882a593Smuzhiyun  *  various ptep_get_and_clear() functions.
172*4882a593Smuzhiyun  */
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun #ifdef CONFIG_MMU_GATHER_TABLE_FREE
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun struct mmu_table_batch {
177*4882a593Smuzhiyun #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
178*4882a593Smuzhiyun 	struct rcu_head		rcu;
179*4882a593Smuzhiyun #endif
180*4882a593Smuzhiyun 	unsigned int		nr;
181*4882a593Smuzhiyun 	void			*tables[0];
182*4882a593Smuzhiyun };
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun #define MAX_TABLE_BATCH		\
185*4882a593Smuzhiyun 	((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun #else /* !CONFIG_MMU_GATHER_HAVE_TABLE_FREE */
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun /*
192*4882a593Smuzhiyun  * Without MMU_GATHER_TABLE_FREE the architecture is assumed to have page based
193*4882a593Smuzhiyun  * page directories and we can use the normal page batching to free them.
194*4882a593Smuzhiyun  */
195*4882a593Smuzhiyun #define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page))
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun #endif /* CONFIG_MMU_GATHER_TABLE_FREE */
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
200*4882a593Smuzhiyun /*
201*4882a593Smuzhiyun  * This allows an architecture that does not use the linux page-tables for
202*4882a593Smuzhiyun  * hardware to skip the TLBI when freeing page tables.
203*4882a593Smuzhiyun  */
204*4882a593Smuzhiyun #ifndef tlb_needs_table_invalidate
205*4882a593Smuzhiyun #define tlb_needs_table_invalidate() (true)
206*4882a593Smuzhiyun #endif
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun void tlb_remove_table_sync_one(void);
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun #else
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun #ifdef tlb_needs_table_invalidate
213*4882a593Smuzhiyun #error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
214*4882a593Smuzhiyun #endif
215*4882a593Smuzhiyun 
tlb_remove_table_sync_one(void)216*4882a593Smuzhiyun static inline void tlb_remove_table_sync_one(void) { }
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun #endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun #ifndef CONFIG_MMU_GATHER_NO_GATHER
222*4882a593Smuzhiyun /*
223*4882a593Smuzhiyun  * If we can't allocate a page to make a big batch of page pointers
224*4882a593Smuzhiyun  * to work on, then just handle a few from the on-stack structure.
225*4882a593Smuzhiyun  */
226*4882a593Smuzhiyun #define MMU_GATHER_BUNDLE	8
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun struct mmu_gather_batch {
229*4882a593Smuzhiyun 	struct mmu_gather_batch	*next;
230*4882a593Smuzhiyun 	unsigned int		nr;
231*4882a593Smuzhiyun 	unsigned int		max;
232*4882a593Smuzhiyun 	struct page		*pages[0];
233*4882a593Smuzhiyun };
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun #define MAX_GATHER_BATCH	\
236*4882a593Smuzhiyun 	((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun /*
239*4882a593Smuzhiyun  * Limit the maximum number of mmu_gather batches to reduce a risk of soft
240*4882a593Smuzhiyun  * lockups for non-preemptible kernels on huge machines when a lot of memory
241*4882a593Smuzhiyun  * is zapped during unmapping.
242*4882a593Smuzhiyun  * 10K pages freed at once should be safe even without a preemption point.
243*4882a593Smuzhiyun  */
244*4882a593Smuzhiyun #define MAX_GATHER_BATCH_COUNT	(10000UL/MAX_GATHER_BATCH)
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
247*4882a593Smuzhiyun 				   int page_size);
248*4882a593Smuzhiyun #endif
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun /*
251*4882a593Smuzhiyun  * struct mmu_gather is an opaque type used by the mm code for passing around
252*4882a593Smuzhiyun  * any data needed by arch specific code for tlb_remove_page.
253*4882a593Smuzhiyun  */
254*4882a593Smuzhiyun struct mmu_gather {
255*4882a593Smuzhiyun 	struct mm_struct	*mm;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun #ifdef CONFIG_MMU_GATHER_TABLE_FREE
258*4882a593Smuzhiyun 	struct mmu_table_batch	*batch;
259*4882a593Smuzhiyun #endif
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	unsigned long		start;
262*4882a593Smuzhiyun 	unsigned long		end;
263*4882a593Smuzhiyun 	/*
264*4882a593Smuzhiyun 	 * we are in the middle of an operation to clear
265*4882a593Smuzhiyun 	 * a full mm and can make some optimizations
266*4882a593Smuzhiyun 	 */
267*4882a593Smuzhiyun 	unsigned int		fullmm : 1;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	/*
270*4882a593Smuzhiyun 	 * we have performed an operation which
271*4882a593Smuzhiyun 	 * requires a complete flush of the tlb
272*4882a593Smuzhiyun 	 */
273*4882a593Smuzhiyun 	unsigned int		need_flush_all : 1;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	/*
276*4882a593Smuzhiyun 	 * we have removed page directories
277*4882a593Smuzhiyun 	 */
278*4882a593Smuzhiyun 	unsigned int		freed_tables : 1;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	/*
281*4882a593Smuzhiyun 	 * at which levels have we cleared entries?
282*4882a593Smuzhiyun 	 */
283*4882a593Smuzhiyun 	unsigned int		cleared_ptes : 1;
284*4882a593Smuzhiyun 	unsigned int		cleared_pmds : 1;
285*4882a593Smuzhiyun 	unsigned int		cleared_puds : 1;
286*4882a593Smuzhiyun 	unsigned int		cleared_p4ds : 1;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	/*
289*4882a593Smuzhiyun 	 * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma
290*4882a593Smuzhiyun 	 */
291*4882a593Smuzhiyun 	unsigned int		vma_exec : 1;
292*4882a593Smuzhiyun 	unsigned int		vma_huge : 1;
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	unsigned int		batch_count;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun #ifndef CONFIG_MMU_GATHER_NO_GATHER
297*4882a593Smuzhiyun 	struct mmu_gather_batch *active;
298*4882a593Smuzhiyun 	struct mmu_gather_batch	local;
299*4882a593Smuzhiyun 	struct page		*__pages[MMU_GATHER_BUNDLE];
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun #ifdef CONFIG_MMU_GATHER_PAGE_SIZE
302*4882a593Smuzhiyun 	unsigned int page_size;
303*4882a593Smuzhiyun #endif
304*4882a593Smuzhiyun #endif
305*4882a593Smuzhiyun };
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun void tlb_flush_mmu(struct mmu_gather *tlb);
308*4882a593Smuzhiyun 
__tlb_adjust_range(struct mmu_gather * tlb,unsigned long address,unsigned int range_size)309*4882a593Smuzhiyun static inline void __tlb_adjust_range(struct mmu_gather *tlb,
310*4882a593Smuzhiyun 				      unsigned long address,
311*4882a593Smuzhiyun 				      unsigned int range_size)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun 	tlb->start = min(tlb->start, address);
314*4882a593Smuzhiyun 	tlb->end = max(tlb->end, address + range_size);
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun 
__tlb_reset_range(struct mmu_gather * tlb)317*4882a593Smuzhiyun static inline void __tlb_reset_range(struct mmu_gather *tlb)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun 	if (tlb->fullmm) {
320*4882a593Smuzhiyun 		tlb->start = tlb->end = ~0;
321*4882a593Smuzhiyun 	} else {
322*4882a593Smuzhiyun 		tlb->start = TASK_SIZE;
323*4882a593Smuzhiyun 		tlb->end = 0;
324*4882a593Smuzhiyun 	}
325*4882a593Smuzhiyun 	tlb->freed_tables = 0;
326*4882a593Smuzhiyun 	tlb->cleared_ptes = 0;
327*4882a593Smuzhiyun 	tlb->cleared_pmds = 0;
328*4882a593Smuzhiyun 	tlb->cleared_puds = 0;
329*4882a593Smuzhiyun 	tlb->cleared_p4ds = 0;
330*4882a593Smuzhiyun 	/*
331*4882a593Smuzhiyun 	 * Do not reset mmu_gather::vma_* fields here, we do not
332*4882a593Smuzhiyun 	 * call into tlb_start_vma() again to set them if there is an
333*4882a593Smuzhiyun 	 * intermediate flush.
334*4882a593Smuzhiyun 	 */
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun #ifdef CONFIG_MMU_GATHER_NO_RANGE
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun #if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma)
340*4882a593Smuzhiyun #error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma()
341*4882a593Smuzhiyun #endif
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun /*
344*4882a593Smuzhiyun  * When an architecture does not have efficient means of range flushing TLBs
345*4882a593Smuzhiyun  * there is no point in doing intermediate flushes on tlb_end_vma() to keep the
346*4882a593Smuzhiyun  * range small. We equally don't have to worry about page granularity or other
347*4882a593Smuzhiyun  * things.
348*4882a593Smuzhiyun  *
349*4882a593Smuzhiyun  * All we need to do is issue a full flush for any !0 range.
350*4882a593Smuzhiyun  */
tlb_flush(struct mmu_gather * tlb)351*4882a593Smuzhiyun static inline void tlb_flush(struct mmu_gather *tlb)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun 	if (tlb->end)
354*4882a593Smuzhiyun 		flush_tlb_mm(tlb->mm);
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun static inline void
tlb_update_vma_flags(struct mmu_gather * tlb,struct vm_area_struct * vma)358*4882a593Smuzhiyun tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun #define tlb_end_vma tlb_end_vma
tlb_end_vma(struct mmu_gather * tlb,struct vm_area_struct * vma)361*4882a593Smuzhiyun static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun #else /* CONFIG_MMU_GATHER_NO_RANGE */
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun #ifndef tlb_flush
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun #if defined(tlb_start_vma) || defined(tlb_end_vma)
368*4882a593Smuzhiyun #error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma()
369*4882a593Smuzhiyun #endif
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun /*
372*4882a593Smuzhiyun  * When an architecture does not provide its own tlb_flush() implementation
373*4882a593Smuzhiyun  * but does have a reasonably efficient flush_vma_range() implementation
374*4882a593Smuzhiyun  * use that.
375*4882a593Smuzhiyun  */
tlb_flush(struct mmu_gather * tlb)376*4882a593Smuzhiyun static inline void tlb_flush(struct mmu_gather *tlb)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun 	if (tlb->fullmm || tlb->need_flush_all) {
379*4882a593Smuzhiyun 		flush_tlb_mm(tlb->mm);
380*4882a593Smuzhiyun 	} else if (tlb->end) {
381*4882a593Smuzhiyun 		struct vm_area_struct vma = {
382*4882a593Smuzhiyun 			.vm_mm = tlb->mm,
383*4882a593Smuzhiyun 			.vm_flags = (tlb->vma_exec ? VM_EXEC    : 0) |
384*4882a593Smuzhiyun 				    (tlb->vma_huge ? VM_HUGETLB : 0),
385*4882a593Smuzhiyun 		};
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 		flush_tlb_range(&vma, tlb->start, tlb->end);
388*4882a593Smuzhiyun 	}
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun static inline void
tlb_update_vma_flags(struct mmu_gather * tlb,struct vm_area_struct * vma)392*4882a593Smuzhiyun tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun 	/*
395*4882a593Smuzhiyun 	 * flush_tlb_range() implementations that look at VM_HUGETLB (tile,
396*4882a593Smuzhiyun 	 * mips-4k) flush only large pages.
397*4882a593Smuzhiyun 	 *
398*4882a593Smuzhiyun 	 * flush_tlb_range() implementations that flush I-TLB also flush D-TLB
399*4882a593Smuzhiyun 	 * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing
400*4882a593Smuzhiyun 	 * range.
401*4882a593Smuzhiyun 	 *
402*4882a593Smuzhiyun 	 * We rely on tlb_end_vma() to issue a flush, such that when we reset
403*4882a593Smuzhiyun 	 * these values the batch is empty.
404*4882a593Smuzhiyun 	 */
405*4882a593Smuzhiyun 	tlb->vma_huge = is_vm_hugetlb_page(vma);
406*4882a593Smuzhiyun 	tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun #else
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun static inline void
tlb_update_vma_flags(struct mmu_gather * tlb,struct vm_area_struct * vma)412*4882a593Smuzhiyun tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun #endif
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun #endif /* CONFIG_MMU_GATHER_NO_RANGE */
417*4882a593Smuzhiyun 
tlb_flush_mmu_tlbonly(struct mmu_gather * tlb)418*4882a593Smuzhiyun static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
419*4882a593Smuzhiyun {
420*4882a593Smuzhiyun 	/*
421*4882a593Smuzhiyun 	 * Anything calling __tlb_adjust_range() also sets at least one of
422*4882a593Smuzhiyun 	 * these bits.
423*4882a593Smuzhiyun 	 */
424*4882a593Smuzhiyun 	if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds ||
425*4882a593Smuzhiyun 	      tlb->cleared_puds || tlb->cleared_p4ds))
426*4882a593Smuzhiyun 		return;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	tlb_flush(tlb);
429*4882a593Smuzhiyun 	mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
430*4882a593Smuzhiyun 	__tlb_reset_range(tlb);
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun 
tlb_remove_page_size(struct mmu_gather * tlb,struct page * page,int page_size)433*4882a593Smuzhiyun static inline void tlb_remove_page_size(struct mmu_gather *tlb,
434*4882a593Smuzhiyun 					struct page *page, int page_size)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun 	if (__tlb_remove_page_size(tlb, page, page_size))
437*4882a593Smuzhiyun 		tlb_flush_mmu(tlb);
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun 
__tlb_remove_page(struct mmu_gather * tlb,struct page * page)440*4882a593Smuzhiyun static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
441*4882a593Smuzhiyun {
442*4882a593Smuzhiyun 	return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun /* tlb_remove_page
446*4882a593Smuzhiyun  *	Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
447*4882a593Smuzhiyun  *	required.
448*4882a593Smuzhiyun  */
tlb_remove_page(struct mmu_gather * tlb,struct page * page)449*4882a593Smuzhiyun static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun 	return tlb_remove_page_size(tlb, page, PAGE_SIZE);
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun 
tlb_change_page_size(struct mmu_gather * tlb,unsigned int page_size)454*4882a593Smuzhiyun static inline void tlb_change_page_size(struct mmu_gather *tlb,
455*4882a593Smuzhiyun 						     unsigned int page_size)
456*4882a593Smuzhiyun {
457*4882a593Smuzhiyun #ifdef CONFIG_MMU_GATHER_PAGE_SIZE
458*4882a593Smuzhiyun 	if (tlb->page_size && tlb->page_size != page_size) {
459*4882a593Smuzhiyun 		if (!tlb->fullmm && !tlb->need_flush_all)
460*4882a593Smuzhiyun 			tlb_flush_mmu(tlb);
461*4882a593Smuzhiyun 	}
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	tlb->page_size = page_size;
464*4882a593Smuzhiyun #endif
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun 
tlb_get_unmap_shift(struct mmu_gather * tlb)467*4882a593Smuzhiyun static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
468*4882a593Smuzhiyun {
469*4882a593Smuzhiyun 	if (tlb->cleared_ptes)
470*4882a593Smuzhiyun 		return PAGE_SHIFT;
471*4882a593Smuzhiyun 	if (tlb->cleared_pmds)
472*4882a593Smuzhiyun 		return PMD_SHIFT;
473*4882a593Smuzhiyun 	if (tlb->cleared_puds)
474*4882a593Smuzhiyun 		return PUD_SHIFT;
475*4882a593Smuzhiyun 	if (tlb->cleared_p4ds)
476*4882a593Smuzhiyun 		return P4D_SHIFT;
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	return PAGE_SHIFT;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun 
tlb_get_unmap_size(struct mmu_gather * tlb)481*4882a593Smuzhiyun static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun 	return 1UL << tlb_get_unmap_shift(tlb);
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun /*
487*4882a593Smuzhiyun  * In the case of tlb vma handling, we can optimise these away in the
488*4882a593Smuzhiyun  * case where we're doing a full MM flush.  When we're doing a munmap,
489*4882a593Smuzhiyun  * the vmas are adjusted to only cover the region to be torn down.
490*4882a593Smuzhiyun  */
491*4882a593Smuzhiyun #ifndef tlb_start_vma
tlb_start_vma(struct mmu_gather * tlb,struct vm_area_struct * vma)492*4882a593Smuzhiyun static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
493*4882a593Smuzhiyun {
494*4882a593Smuzhiyun 	if (tlb->fullmm)
495*4882a593Smuzhiyun 		return;
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	tlb_update_vma_flags(tlb, vma);
498*4882a593Smuzhiyun 	flush_cache_range(vma, vma->vm_start, vma->vm_end);
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun #endif
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun #ifndef tlb_end_vma
tlb_end_vma(struct mmu_gather * tlb,struct vm_area_struct * vma)503*4882a593Smuzhiyun static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
504*4882a593Smuzhiyun {
505*4882a593Smuzhiyun 	if (tlb->fullmm)
506*4882a593Smuzhiyun 		return;
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 	/*
509*4882a593Smuzhiyun 	 * Do a TLB flush and reset the range at VMA boundaries; this avoids
510*4882a593Smuzhiyun 	 * the ranges growing with the unused space between consecutive VMAs,
511*4882a593Smuzhiyun 	 * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
512*4882a593Smuzhiyun 	 * this.
513*4882a593Smuzhiyun 	 */
514*4882a593Smuzhiyun 	tlb_flush_mmu_tlbonly(tlb);
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun #endif
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun /*
519*4882a593Smuzhiyun  * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
520*4882a593Smuzhiyun  * and set corresponding cleared_*.
521*4882a593Smuzhiyun  */
tlb_flush_pte_range(struct mmu_gather * tlb,unsigned long address,unsigned long size)522*4882a593Smuzhiyun static inline void tlb_flush_pte_range(struct mmu_gather *tlb,
523*4882a593Smuzhiyun 				     unsigned long address, unsigned long size)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun 	__tlb_adjust_range(tlb, address, size);
526*4882a593Smuzhiyun 	tlb->cleared_ptes = 1;
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun 
tlb_flush_pmd_range(struct mmu_gather * tlb,unsigned long address,unsigned long size)529*4882a593Smuzhiyun static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
530*4882a593Smuzhiyun 				     unsigned long address, unsigned long size)
531*4882a593Smuzhiyun {
532*4882a593Smuzhiyun 	__tlb_adjust_range(tlb, address, size);
533*4882a593Smuzhiyun 	tlb->cleared_pmds = 1;
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun 
tlb_flush_pud_range(struct mmu_gather * tlb,unsigned long address,unsigned long size)536*4882a593Smuzhiyun static inline void tlb_flush_pud_range(struct mmu_gather *tlb,
537*4882a593Smuzhiyun 				     unsigned long address, unsigned long size)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun 	__tlb_adjust_range(tlb, address, size);
540*4882a593Smuzhiyun 	tlb->cleared_puds = 1;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun 
tlb_flush_p4d_range(struct mmu_gather * tlb,unsigned long address,unsigned long size)543*4882a593Smuzhiyun static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
544*4882a593Smuzhiyun 				     unsigned long address, unsigned long size)
545*4882a593Smuzhiyun {
546*4882a593Smuzhiyun 	__tlb_adjust_range(tlb, address, size);
547*4882a593Smuzhiyun 	tlb->cleared_p4ds = 1;
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun #ifndef __tlb_remove_tlb_entry
551*4882a593Smuzhiyun #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
552*4882a593Smuzhiyun #endif
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun /**
555*4882a593Smuzhiyun  * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
556*4882a593Smuzhiyun  *
557*4882a593Smuzhiyun  * Record the fact that pte's were really unmapped by updating the range,
558*4882a593Smuzhiyun  * so we can later optimise away the tlb invalidate.   This helps when
559*4882a593Smuzhiyun  * userspace is unmapping already-unmapped pages, which happens quite a lot.
560*4882a593Smuzhiyun  */
561*4882a593Smuzhiyun #define tlb_remove_tlb_entry(tlb, ptep, address)		\
562*4882a593Smuzhiyun 	do {							\
563*4882a593Smuzhiyun 		tlb_flush_pte_range(tlb, address, PAGE_SIZE);	\
564*4882a593Smuzhiyun 		__tlb_remove_tlb_entry(tlb, ptep, address);	\
565*4882a593Smuzhiyun 	} while (0)
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)	\
568*4882a593Smuzhiyun 	do {							\
569*4882a593Smuzhiyun 		unsigned long _sz = huge_page_size(h);		\
570*4882a593Smuzhiyun 		if (_sz >= P4D_SIZE)				\
571*4882a593Smuzhiyun 			tlb_flush_p4d_range(tlb, address, _sz);	\
572*4882a593Smuzhiyun 		else if (_sz >= PUD_SIZE)			\
573*4882a593Smuzhiyun 			tlb_flush_pud_range(tlb, address, _sz);	\
574*4882a593Smuzhiyun 		else if (_sz >= PMD_SIZE)			\
575*4882a593Smuzhiyun 			tlb_flush_pmd_range(tlb, address, _sz);	\
576*4882a593Smuzhiyun 		else						\
577*4882a593Smuzhiyun 			tlb_flush_pte_range(tlb, address, _sz);	\
578*4882a593Smuzhiyun 		__tlb_remove_tlb_entry(tlb, ptep, address);	\
579*4882a593Smuzhiyun 	} while (0)
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun /**
582*4882a593Smuzhiyun  * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
583*4882a593Smuzhiyun  * This is a nop so far, because only x86 needs it.
584*4882a593Smuzhiyun  */
585*4882a593Smuzhiyun #ifndef __tlb_remove_pmd_tlb_entry
586*4882a593Smuzhiyun #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
587*4882a593Smuzhiyun #endif
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address)			\
590*4882a593Smuzhiyun 	do {								\
591*4882a593Smuzhiyun 		tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE);	\
592*4882a593Smuzhiyun 		__tlb_remove_pmd_tlb_entry(tlb, pmdp, address);		\
593*4882a593Smuzhiyun 	} while (0)
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun /**
596*4882a593Smuzhiyun  * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
597*4882a593Smuzhiyun  * invalidation. This is a nop so far, because only x86 needs it.
598*4882a593Smuzhiyun  */
599*4882a593Smuzhiyun #ifndef __tlb_remove_pud_tlb_entry
600*4882a593Smuzhiyun #define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
601*4882a593Smuzhiyun #endif
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun #define tlb_remove_pud_tlb_entry(tlb, pudp, address)			\
604*4882a593Smuzhiyun 	do {								\
605*4882a593Smuzhiyun 		tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE);	\
606*4882a593Smuzhiyun 		__tlb_remove_pud_tlb_entry(tlb, pudp, address);		\
607*4882a593Smuzhiyun 	} while (0)
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun /*
610*4882a593Smuzhiyun  * For things like page tables caches (ie caching addresses "inside" the
611*4882a593Smuzhiyun  * page tables, like x86 does), for legacy reasons, flushing an
612*4882a593Smuzhiyun  * individual page had better flush the page table caches behind it. This
613*4882a593Smuzhiyun  * is definitely how x86 works, for example. And if you have an
614*4882a593Smuzhiyun  * architected non-legacy page table cache (which I'm not aware of
615*4882a593Smuzhiyun  * anybody actually doing), you're going to have some architecturally
616*4882a593Smuzhiyun  * explicit flushing for that, likely *separate* from a regular TLB entry
617*4882a593Smuzhiyun  * flush, and thus you'd need more than just some range expansion..
618*4882a593Smuzhiyun  *
619*4882a593Smuzhiyun  * So if we ever find an architecture
620*4882a593Smuzhiyun  * that would want something that odd, I think it is up to that
621*4882a593Smuzhiyun  * architecture to do its own odd thing, not cause pain for others
622*4882a593Smuzhiyun  * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com
623*4882a593Smuzhiyun  *
624*4882a593Smuzhiyun  * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
625*4882a593Smuzhiyun  */
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun #ifndef pte_free_tlb
628*4882a593Smuzhiyun #define pte_free_tlb(tlb, ptep, address)			\
629*4882a593Smuzhiyun 	do {							\
630*4882a593Smuzhiyun 		tlb_flush_pmd_range(tlb, address, PAGE_SIZE);	\
631*4882a593Smuzhiyun 		tlb->freed_tables = 1;				\
632*4882a593Smuzhiyun 		__pte_free_tlb(tlb, ptep, address);		\
633*4882a593Smuzhiyun 	} while (0)
634*4882a593Smuzhiyun #endif
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun #ifndef pmd_free_tlb
637*4882a593Smuzhiyun #define pmd_free_tlb(tlb, pmdp, address)			\
638*4882a593Smuzhiyun 	do {							\
639*4882a593Smuzhiyun 		tlb_flush_pud_range(tlb, address, PAGE_SIZE);	\
640*4882a593Smuzhiyun 		tlb->freed_tables = 1;				\
641*4882a593Smuzhiyun 		__pmd_free_tlb(tlb, pmdp, address);		\
642*4882a593Smuzhiyun 	} while (0)
643*4882a593Smuzhiyun #endif
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun #ifndef pud_free_tlb
646*4882a593Smuzhiyun #define pud_free_tlb(tlb, pudp, address)			\
647*4882a593Smuzhiyun 	do {							\
648*4882a593Smuzhiyun 		tlb_flush_p4d_range(tlb, address, PAGE_SIZE);	\
649*4882a593Smuzhiyun 		tlb->freed_tables = 1;				\
650*4882a593Smuzhiyun 		__pud_free_tlb(tlb, pudp, address);		\
651*4882a593Smuzhiyun 	} while (0)
652*4882a593Smuzhiyun #endif
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun #ifndef p4d_free_tlb
655*4882a593Smuzhiyun #define p4d_free_tlb(tlb, pudp, address)			\
656*4882a593Smuzhiyun 	do {							\
657*4882a593Smuzhiyun 		__tlb_adjust_range(tlb, address, PAGE_SIZE);	\
658*4882a593Smuzhiyun 		tlb->freed_tables = 1;				\
659*4882a593Smuzhiyun 		__p4d_free_tlb(tlb, pudp, address);		\
660*4882a593Smuzhiyun 	} while (0)
661*4882a593Smuzhiyun #endif
662*4882a593Smuzhiyun 
663*4882a593Smuzhiyun #endif /* CONFIG_MMU */
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun #endif /* _ASM_GENERIC__TLB_H */
666