xref: /OK3568_Linux_fs/kernel/mm/mmu_gather.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun #include <linux/gfp.h>
2*4882a593Smuzhiyun #include <linux/highmem.h>
3*4882a593Smuzhiyun #include <linux/kernel.h>
4*4882a593Smuzhiyun #include <linux/mmdebug.h>
5*4882a593Smuzhiyun #include <linux/mm_types.h>
6*4882a593Smuzhiyun #include <linux/pagemap.h>
7*4882a593Smuzhiyun #include <linux/rcupdate.h>
8*4882a593Smuzhiyun #include <linux/smp.h>
9*4882a593Smuzhiyun #include <linux/swap.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <asm/pgalloc.h>
12*4882a593Smuzhiyun #include <asm/tlb.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #ifndef CONFIG_MMU_GATHER_NO_GATHER
15*4882a593Smuzhiyun 
tlb_next_batch(struct mmu_gather * tlb)16*4882a593Smuzhiyun static bool tlb_next_batch(struct mmu_gather *tlb)
17*4882a593Smuzhiyun {
18*4882a593Smuzhiyun 	struct mmu_gather_batch *batch;
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun 	batch = tlb->active;
21*4882a593Smuzhiyun 	if (batch->next) {
22*4882a593Smuzhiyun 		tlb->active = batch->next;
23*4882a593Smuzhiyun 		return true;
24*4882a593Smuzhiyun 	}
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun 	if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
27*4882a593Smuzhiyun 		return false;
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
30*4882a593Smuzhiyun 	if (!batch)
31*4882a593Smuzhiyun 		return false;
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun 	tlb->batch_count++;
34*4882a593Smuzhiyun 	batch->next = NULL;
35*4882a593Smuzhiyun 	batch->nr   = 0;
36*4882a593Smuzhiyun 	batch->max  = MAX_GATHER_BATCH;
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	tlb->active->next = batch;
39*4882a593Smuzhiyun 	tlb->active = batch;
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	return true;
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun 
tlb_batch_pages_flush(struct mmu_gather * tlb)44*4882a593Smuzhiyun static void tlb_batch_pages_flush(struct mmu_gather *tlb)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun 	struct mmu_gather_batch *batch;
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
49*4882a593Smuzhiyun 		free_pages_and_swap_cache(batch->pages, batch->nr);
50*4882a593Smuzhiyun 		batch->nr = 0;
51*4882a593Smuzhiyun 	}
52*4882a593Smuzhiyun 	tlb->active = &tlb->local;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun 
tlb_batch_list_free(struct mmu_gather * tlb)55*4882a593Smuzhiyun static void tlb_batch_list_free(struct mmu_gather *tlb)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun 	struct mmu_gather_batch *batch, *next;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	for (batch = tlb->local.next; batch; batch = next) {
60*4882a593Smuzhiyun 		next = batch->next;
61*4882a593Smuzhiyun 		free_pages((unsigned long)batch, 0);
62*4882a593Smuzhiyun 	}
63*4882a593Smuzhiyun 	tlb->local.next = NULL;
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun 
__tlb_remove_page_size(struct mmu_gather * tlb,struct page * page,int page_size)66*4882a593Smuzhiyun bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	struct mmu_gather_batch *batch;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	VM_BUG_ON(!tlb->end);
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun #ifdef CONFIG_MMU_GATHER_PAGE_SIZE
73*4882a593Smuzhiyun 	VM_WARN_ON(tlb->page_size != page_size);
74*4882a593Smuzhiyun #endif
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	batch = tlb->active;
77*4882a593Smuzhiyun 	/*
78*4882a593Smuzhiyun 	 * Add the page and check if we are full. If so
79*4882a593Smuzhiyun 	 * force a flush.
80*4882a593Smuzhiyun 	 */
81*4882a593Smuzhiyun 	batch->pages[batch->nr++] = page;
82*4882a593Smuzhiyun 	if (batch->nr == batch->max) {
83*4882a593Smuzhiyun 		if (!tlb_next_batch(tlb))
84*4882a593Smuzhiyun 			return true;
85*4882a593Smuzhiyun 		batch = tlb->active;
86*4882a593Smuzhiyun 	}
87*4882a593Smuzhiyun 	VM_BUG_ON_PAGE(batch->nr > batch->max, page);
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	return false;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun #endif /* MMU_GATHER_NO_GATHER */
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun #ifdef CONFIG_MMU_GATHER_TABLE_FREE
95*4882a593Smuzhiyun 
__tlb_remove_table_free(struct mmu_table_batch * batch)96*4882a593Smuzhiyun static void __tlb_remove_table_free(struct mmu_table_batch *batch)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun 	int i;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	for (i = 0; i < batch->nr; i++)
101*4882a593Smuzhiyun 		__tlb_remove_table(batch->tables[i]);
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	free_page((unsigned long)batch);
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun /*
109*4882a593Smuzhiyun  * Semi RCU freeing of the page directories.
110*4882a593Smuzhiyun  *
111*4882a593Smuzhiyun  * This is needed by some architectures to implement software pagetable walkers.
112*4882a593Smuzhiyun  *
113*4882a593Smuzhiyun  * gup_fast() and other software pagetable walkers do a lockless page-table
114*4882a593Smuzhiyun  * walk and therefore needs some synchronization with the freeing of the page
115*4882a593Smuzhiyun  * directories. The chosen means to accomplish that is by disabling IRQs over
116*4882a593Smuzhiyun  * the walk.
117*4882a593Smuzhiyun  *
118*4882a593Smuzhiyun  * Architectures that use IPIs to flush TLBs will then automagically DTRT,
119*4882a593Smuzhiyun  * since we unlink the page, flush TLBs, free the page. Since the disabling of
120*4882a593Smuzhiyun  * IRQs delays the completion of the TLB flush we can never observe an already
121*4882a593Smuzhiyun  * freed page.
122*4882a593Smuzhiyun  *
123*4882a593Smuzhiyun  * Architectures that do not have this (PPC) need to delay the freeing by some
124*4882a593Smuzhiyun  * other means, this is that means.
125*4882a593Smuzhiyun  *
126*4882a593Smuzhiyun  * What we do is batch the freed directory pages (tables) and RCU free them.
127*4882a593Smuzhiyun  * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling
128*4882a593Smuzhiyun  * holds off grace periods.
129*4882a593Smuzhiyun  *
130*4882a593Smuzhiyun  * However, in order to batch these pages we need to allocate storage, this
131*4882a593Smuzhiyun  * allocation is deep inside the MM code and can thus easily fail on memory
132*4882a593Smuzhiyun  * pressure. To guarantee progress we fall back to single table freeing, see
133*4882a593Smuzhiyun  * the implementation of tlb_remove_table_one().
134*4882a593Smuzhiyun  *
135*4882a593Smuzhiyun  */
136*4882a593Smuzhiyun 
tlb_remove_table_smp_sync(void * arg)137*4882a593Smuzhiyun static void tlb_remove_table_smp_sync(void *arg)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	/* Simply deliver the interrupt */
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
tlb_remove_table_sync_one(void)142*4882a593Smuzhiyun void tlb_remove_table_sync_one(void)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun 	/*
145*4882a593Smuzhiyun 	 * This isn't an RCU grace period and hence the page-tables cannot be
146*4882a593Smuzhiyun 	 * assumed to be actually RCU-freed.
147*4882a593Smuzhiyun 	 *
148*4882a593Smuzhiyun 	 * It is however sufficient for software page-table walkers that rely on
149*4882a593Smuzhiyun 	 * IRQ disabling.
150*4882a593Smuzhiyun 	 */
151*4882a593Smuzhiyun 	smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun 
tlb_remove_table_rcu(struct rcu_head * head)154*4882a593Smuzhiyun static void tlb_remove_table_rcu(struct rcu_head *head)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun 	__tlb_remove_table_free(container_of(head, struct mmu_table_batch, rcu));
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
tlb_remove_table_free(struct mmu_table_batch * batch)159*4882a593Smuzhiyun static void tlb_remove_table_free(struct mmu_table_batch *batch)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun 	call_rcu(&batch->rcu, tlb_remove_table_rcu);
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun #else /* !CONFIG_MMU_GATHER_RCU_TABLE_FREE */
165*4882a593Smuzhiyun 
tlb_remove_table_free(struct mmu_table_batch * batch)166*4882a593Smuzhiyun static void tlb_remove_table_free(struct mmu_table_batch *batch)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun 	__tlb_remove_table_free(batch);
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun #endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun /*
174*4882a593Smuzhiyun  * If we want tlb_remove_table() to imply TLB invalidates.
175*4882a593Smuzhiyun  */
tlb_table_invalidate(struct mmu_gather * tlb)176*4882a593Smuzhiyun static inline void tlb_table_invalidate(struct mmu_gather *tlb)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun 	if (tlb_needs_table_invalidate()) {
179*4882a593Smuzhiyun 		/*
180*4882a593Smuzhiyun 		 * Invalidate page-table caches used by hardware walkers. Then
181*4882a593Smuzhiyun 		 * we still need to RCU-sched wait while freeing the pages
182*4882a593Smuzhiyun 		 * because software walkers can still be in-flight.
183*4882a593Smuzhiyun 		 */
184*4882a593Smuzhiyun 		tlb_flush_mmu_tlbonly(tlb);
185*4882a593Smuzhiyun 	}
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun 
tlb_remove_table_one(void * table)188*4882a593Smuzhiyun static void tlb_remove_table_one(void *table)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun 	tlb_remove_table_sync_one();
191*4882a593Smuzhiyun 	__tlb_remove_table(table);
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun 
tlb_table_flush(struct mmu_gather * tlb)194*4882a593Smuzhiyun static void tlb_table_flush(struct mmu_gather *tlb)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun 	struct mmu_table_batch **batch = &tlb->batch;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	if (*batch) {
199*4882a593Smuzhiyun 		tlb_table_invalidate(tlb);
200*4882a593Smuzhiyun 		tlb_remove_table_free(*batch);
201*4882a593Smuzhiyun 		*batch = NULL;
202*4882a593Smuzhiyun 	}
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun 
tlb_remove_table(struct mmu_gather * tlb,void * table)205*4882a593Smuzhiyun void tlb_remove_table(struct mmu_gather *tlb, void *table)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun 	struct mmu_table_batch **batch = &tlb->batch;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	if (*batch == NULL) {
210*4882a593Smuzhiyun 		*batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
211*4882a593Smuzhiyun 		if (*batch == NULL) {
212*4882a593Smuzhiyun 			tlb_table_invalidate(tlb);
213*4882a593Smuzhiyun 			tlb_remove_table_one(table);
214*4882a593Smuzhiyun 			return;
215*4882a593Smuzhiyun 		}
216*4882a593Smuzhiyun 		(*batch)->nr = 0;
217*4882a593Smuzhiyun 	}
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	(*batch)->tables[(*batch)->nr++] = table;
220*4882a593Smuzhiyun 	if ((*batch)->nr == MAX_TABLE_BATCH)
221*4882a593Smuzhiyun 		tlb_table_flush(tlb);
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun 
tlb_table_init(struct mmu_gather * tlb)224*4882a593Smuzhiyun static inline void tlb_table_init(struct mmu_gather *tlb)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun 	tlb->batch = NULL;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun #else /* !CONFIG_MMU_GATHER_TABLE_FREE */
230*4882a593Smuzhiyun 
tlb_table_flush(struct mmu_gather * tlb)231*4882a593Smuzhiyun static inline void tlb_table_flush(struct mmu_gather *tlb) { }
tlb_table_init(struct mmu_gather * tlb)232*4882a593Smuzhiyun static inline void tlb_table_init(struct mmu_gather *tlb) { }
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun #endif /* CONFIG_MMU_GATHER_TABLE_FREE */
235*4882a593Smuzhiyun 
tlb_flush_mmu_free(struct mmu_gather * tlb)236*4882a593Smuzhiyun static void tlb_flush_mmu_free(struct mmu_gather *tlb)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun 	tlb_table_flush(tlb);
239*4882a593Smuzhiyun #ifndef CONFIG_MMU_GATHER_NO_GATHER
240*4882a593Smuzhiyun 	tlb_batch_pages_flush(tlb);
241*4882a593Smuzhiyun #endif
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
tlb_flush_mmu(struct mmu_gather * tlb)244*4882a593Smuzhiyun void tlb_flush_mmu(struct mmu_gather *tlb)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun 	tlb_flush_mmu_tlbonly(tlb);
247*4882a593Smuzhiyun 	tlb_flush_mmu_free(tlb);
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun /**
251*4882a593Smuzhiyun  * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
252*4882a593Smuzhiyun  * @tlb: the mmu_gather structure to initialize
253*4882a593Smuzhiyun  * @mm: the mm_struct of the target address space
254*4882a593Smuzhiyun  * @start: start of the region that will be removed from the page-table
255*4882a593Smuzhiyun  * @end: end of the region that will be removed from the page-table
256*4882a593Smuzhiyun  *
257*4882a593Smuzhiyun  * Called to initialize an (on-stack) mmu_gather structure for page-table
258*4882a593Smuzhiyun  * tear-down from @mm. The @start and @end are set to 0 and -1
259*4882a593Smuzhiyun  * respectively when @mm is without users and we're going to destroy
260*4882a593Smuzhiyun  * the full address space (exit/execve).
261*4882a593Smuzhiyun  */
tlb_gather_mmu(struct mmu_gather * tlb,struct mm_struct * mm,unsigned long start,unsigned long end)262*4882a593Smuzhiyun void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
263*4882a593Smuzhiyun 			unsigned long start, unsigned long end)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun 	tlb->mm = mm;
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	/* Is it from 0 to ~0? */
268*4882a593Smuzhiyun 	tlb->fullmm     = !(start | (end+1));
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun #ifndef CONFIG_MMU_GATHER_NO_GATHER
271*4882a593Smuzhiyun 	tlb->need_flush_all = 0;
272*4882a593Smuzhiyun 	tlb->local.next = NULL;
273*4882a593Smuzhiyun 	tlb->local.nr   = 0;
274*4882a593Smuzhiyun 	tlb->local.max  = ARRAY_SIZE(tlb->__pages);
275*4882a593Smuzhiyun 	tlb->active     = &tlb->local;
276*4882a593Smuzhiyun 	tlb->batch_count = 0;
277*4882a593Smuzhiyun #endif
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	tlb_table_init(tlb);
280*4882a593Smuzhiyun #ifdef CONFIG_MMU_GATHER_PAGE_SIZE
281*4882a593Smuzhiyun 	tlb->page_size = 0;
282*4882a593Smuzhiyun #endif
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	__tlb_reset_range(tlb);
285*4882a593Smuzhiyun 	inc_tlb_flush_pending(tlb->mm);
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun /**
289*4882a593Smuzhiyun  * tlb_finish_mmu - finish an mmu_gather structure
290*4882a593Smuzhiyun  * @tlb: the mmu_gather structure to finish
291*4882a593Smuzhiyun  * @start: start of the region that will be removed from the page-table
292*4882a593Smuzhiyun  * @end: end of the region that will be removed from the page-table
293*4882a593Smuzhiyun  *
294*4882a593Smuzhiyun  * Called at the end of the shootdown operation to free up any resources that
295*4882a593Smuzhiyun  * were required.
296*4882a593Smuzhiyun  */
tlb_finish_mmu(struct mmu_gather * tlb,unsigned long start,unsigned long end)297*4882a593Smuzhiyun void tlb_finish_mmu(struct mmu_gather *tlb,
298*4882a593Smuzhiyun 		unsigned long start, unsigned long end)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun 	/*
301*4882a593Smuzhiyun 	 * If there are parallel threads are doing PTE changes on same range
302*4882a593Smuzhiyun 	 * under non-exclusive lock (e.g., mmap_lock read-side) but defer TLB
303*4882a593Smuzhiyun 	 * flush by batching, one thread may end up seeing inconsistent PTEs
304*4882a593Smuzhiyun 	 * and result in having stale TLB entries.  So flush TLB forcefully
305*4882a593Smuzhiyun 	 * if we detect parallel PTE batching threads.
306*4882a593Smuzhiyun 	 *
307*4882a593Smuzhiyun 	 * However, some syscalls, e.g. munmap(), may free page tables, this
308*4882a593Smuzhiyun 	 * needs force flush everything in the given range. Otherwise this
309*4882a593Smuzhiyun 	 * may result in having stale TLB entries for some architectures,
310*4882a593Smuzhiyun 	 * e.g. aarch64, that could specify flush what level TLB.
311*4882a593Smuzhiyun 	 */
312*4882a593Smuzhiyun 	if (mm_tlb_flush_nested(tlb->mm)) {
313*4882a593Smuzhiyun 		/*
314*4882a593Smuzhiyun 		 * The aarch64 yields better performance with fullmm by
315*4882a593Smuzhiyun 		 * avoiding multiple CPUs spamming TLBI messages at the
316*4882a593Smuzhiyun 		 * same time.
317*4882a593Smuzhiyun 		 *
318*4882a593Smuzhiyun 		 * On x86 non-fullmm doesn't yield significant difference
319*4882a593Smuzhiyun 		 * against fullmm.
320*4882a593Smuzhiyun 		 */
321*4882a593Smuzhiyun 		tlb->fullmm = 1;
322*4882a593Smuzhiyun 		__tlb_reset_range(tlb);
323*4882a593Smuzhiyun 		tlb->freed_tables = 1;
324*4882a593Smuzhiyun 	}
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	tlb_flush_mmu(tlb);
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun #ifndef CONFIG_MMU_GATHER_NO_GATHER
329*4882a593Smuzhiyun 	tlb_batch_list_free(tlb);
330*4882a593Smuzhiyun #endif
331*4882a593Smuzhiyun 	dec_tlb_flush_pending(tlb->mm);
332*4882a593Smuzhiyun }
333