xref: /OK3568_Linux_fs/kernel/mm/khugepaged.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun #include <linux/mm.h>
5*4882a593Smuzhiyun #include <linux/sched.h>
6*4882a593Smuzhiyun #include <linux/sched/mm.h>
7*4882a593Smuzhiyun #include <linux/sched/coredump.h>
8*4882a593Smuzhiyun #include <linux/mmu_notifier.h>
9*4882a593Smuzhiyun #include <linux/rmap.h>
10*4882a593Smuzhiyun #include <linux/swap.h>
11*4882a593Smuzhiyun #include <linux/mm_inline.h>
12*4882a593Smuzhiyun #include <linux/kthread.h>
13*4882a593Smuzhiyun #include <linux/khugepaged.h>
14*4882a593Smuzhiyun #include <linux/freezer.h>
15*4882a593Smuzhiyun #include <linux/mman.h>
16*4882a593Smuzhiyun #include <linux/hashtable.h>
17*4882a593Smuzhiyun #include <linux/userfaultfd_k.h>
18*4882a593Smuzhiyun #include <linux/page_idle.h>
19*4882a593Smuzhiyun #include <linux/swapops.h>
20*4882a593Smuzhiyun #include <linux/shmem_fs.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #include <asm/tlb.h>
23*4882a593Smuzhiyun #include <asm/pgalloc.h>
24*4882a593Smuzhiyun #include "internal.h"
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun enum scan_result {
27*4882a593Smuzhiyun 	SCAN_FAIL,
28*4882a593Smuzhiyun 	SCAN_SUCCEED,
29*4882a593Smuzhiyun 	SCAN_PMD_NULL,
30*4882a593Smuzhiyun 	SCAN_EXCEED_NONE_PTE,
31*4882a593Smuzhiyun 	SCAN_EXCEED_SWAP_PTE,
32*4882a593Smuzhiyun 	SCAN_EXCEED_SHARED_PTE,
33*4882a593Smuzhiyun 	SCAN_PTE_NON_PRESENT,
34*4882a593Smuzhiyun 	SCAN_PTE_UFFD_WP,
35*4882a593Smuzhiyun 	SCAN_PAGE_RO,
36*4882a593Smuzhiyun 	SCAN_LACK_REFERENCED_PAGE,
37*4882a593Smuzhiyun 	SCAN_PAGE_NULL,
38*4882a593Smuzhiyun 	SCAN_SCAN_ABORT,
39*4882a593Smuzhiyun 	SCAN_PAGE_COUNT,
40*4882a593Smuzhiyun 	SCAN_PAGE_LRU,
41*4882a593Smuzhiyun 	SCAN_PAGE_LOCK,
42*4882a593Smuzhiyun 	SCAN_PAGE_ANON,
43*4882a593Smuzhiyun 	SCAN_PAGE_COMPOUND,
44*4882a593Smuzhiyun 	SCAN_ANY_PROCESS,
45*4882a593Smuzhiyun 	SCAN_VMA_NULL,
46*4882a593Smuzhiyun 	SCAN_VMA_CHECK,
47*4882a593Smuzhiyun 	SCAN_ADDRESS_RANGE,
48*4882a593Smuzhiyun 	SCAN_SWAP_CACHE_PAGE,
49*4882a593Smuzhiyun 	SCAN_DEL_PAGE_LRU,
50*4882a593Smuzhiyun 	SCAN_ALLOC_HUGE_PAGE_FAIL,
51*4882a593Smuzhiyun 	SCAN_CGROUP_CHARGE_FAIL,
52*4882a593Smuzhiyun 	SCAN_TRUNCATED,
53*4882a593Smuzhiyun 	SCAN_PAGE_HAS_PRIVATE,
54*4882a593Smuzhiyun };
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun #define CREATE_TRACE_POINTS
57*4882a593Smuzhiyun #include <trace/events/huge_memory.h>
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun static struct task_struct *khugepaged_thread __read_mostly;
60*4882a593Smuzhiyun static DEFINE_MUTEX(khugepaged_mutex);
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun /* default scan 8*512 pte (or vmas) every 30 second */
63*4882a593Smuzhiyun static unsigned int khugepaged_pages_to_scan __read_mostly;
64*4882a593Smuzhiyun static unsigned int khugepaged_pages_collapsed;
65*4882a593Smuzhiyun static unsigned int khugepaged_full_scans;
66*4882a593Smuzhiyun static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
67*4882a593Smuzhiyun /* during fragmentation poll the hugepage allocator once every minute */
68*4882a593Smuzhiyun static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
69*4882a593Smuzhiyun static unsigned long khugepaged_sleep_expire;
70*4882a593Smuzhiyun static DEFINE_SPINLOCK(khugepaged_mm_lock);
71*4882a593Smuzhiyun static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
72*4882a593Smuzhiyun /*
73*4882a593Smuzhiyun  * default collapse hugepages if there is at least one pte mapped like
74*4882a593Smuzhiyun  * it would have happened if the vma was large enough during page
75*4882a593Smuzhiyun  * fault.
76*4882a593Smuzhiyun  */
77*4882a593Smuzhiyun static unsigned int khugepaged_max_ptes_none __read_mostly;
78*4882a593Smuzhiyun static unsigned int khugepaged_max_ptes_swap __read_mostly;
79*4882a593Smuzhiyun static unsigned int khugepaged_max_ptes_shared __read_mostly;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun #define MM_SLOTS_HASH_BITS 10
82*4882a593Smuzhiyun static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun static struct kmem_cache *mm_slot_cache __read_mostly;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun #define MAX_PTE_MAPPED_THP 8
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun /**
89*4882a593Smuzhiyun  * struct mm_slot - hash lookup from mm to mm_slot
90*4882a593Smuzhiyun  * @hash: hash collision list
91*4882a593Smuzhiyun  * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
92*4882a593Smuzhiyun  * @mm: the mm that this information is valid for
93*4882a593Smuzhiyun  */
94*4882a593Smuzhiyun struct mm_slot {
95*4882a593Smuzhiyun 	struct hlist_node hash;
96*4882a593Smuzhiyun 	struct list_head mm_node;
97*4882a593Smuzhiyun 	struct mm_struct *mm;
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	/* pte-mapped THP in this mm */
100*4882a593Smuzhiyun 	int nr_pte_mapped_thp;
101*4882a593Smuzhiyun 	unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
102*4882a593Smuzhiyun };
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun /**
105*4882a593Smuzhiyun  * struct khugepaged_scan - cursor for scanning
106*4882a593Smuzhiyun  * @mm_head: the head of the mm list to scan
107*4882a593Smuzhiyun  * @mm_slot: the current mm_slot we are scanning
108*4882a593Smuzhiyun  * @address: the next address inside that to be scanned
109*4882a593Smuzhiyun  *
110*4882a593Smuzhiyun  * There is only the one khugepaged_scan instance of this cursor structure.
111*4882a593Smuzhiyun  */
112*4882a593Smuzhiyun struct khugepaged_scan {
113*4882a593Smuzhiyun 	struct list_head mm_head;
114*4882a593Smuzhiyun 	struct mm_slot *mm_slot;
115*4882a593Smuzhiyun 	unsigned long address;
116*4882a593Smuzhiyun };
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun static struct khugepaged_scan khugepaged_scan = {
119*4882a593Smuzhiyun 	.mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
120*4882a593Smuzhiyun };
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun #ifdef CONFIG_SYSFS
scan_sleep_millisecs_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)123*4882a593Smuzhiyun static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
124*4882a593Smuzhiyun 					 struct kobj_attribute *attr,
125*4882a593Smuzhiyun 					 char *buf)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun 	return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun 
scan_sleep_millisecs_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)130*4882a593Smuzhiyun static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
131*4882a593Smuzhiyun 					  struct kobj_attribute *attr,
132*4882a593Smuzhiyun 					  const char *buf, size_t count)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun 	unsigned long msecs;
135*4882a593Smuzhiyun 	int err;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	err = kstrtoul(buf, 10, &msecs);
138*4882a593Smuzhiyun 	if (err || msecs > UINT_MAX)
139*4882a593Smuzhiyun 		return -EINVAL;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	khugepaged_scan_sleep_millisecs = msecs;
142*4882a593Smuzhiyun 	khugepaged_sleep_expire = 0;
143*4882a593Smuzhiyun 	wake_up_interruptible(&khugepaged_wait);
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	return count;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun static struct kobj_attribute scan_sleep_millisecs_attr =
148*4882a593Smuzhiyun 	__ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
149*4882a593Smuzhiyun 	       scan_sleep_millisecs_store);
150*4882a593Smuzhiyun 
alloc_sleep_millisecs_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)151*4882a593Smuzhiyun static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
152*4882a593Smuzhiyun 					  struct kobj_attribute *attr,
153*4882a593Smuzhiyun 					  char *buf)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun 	return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun 
alloc_sleep_millisecs_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)158*4882a593Smuzhiyun static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
159*4882a593Smuzhiyun 					   struct kobj_attribute *attr,
160*4882a593Smuzhiyun 					   const char *buf, size_t count)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun 	unsigned long msecs;
163*4882a593Smuzhiyun 	int err;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	err = kstrtoul(buf, 10, &msecs);
166*4882a593Smuzhiyun 	if (err || msecs > UINT_MAX)
167*4882a593Smuzhiyun 		return -EINVAL;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	khugepaged_alloc_sleep_millisecs = msecs;
170*4882a593Smuzhiyun 	khugepaged_sleep_expire = 0;
171*4882a593Smuzhiyun 	wake_up_interruptible(&khugepaged_wait);
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	return count;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun static struct kobj_attribute alloc_sleep_millisecs_attr =
176*4882a593Smuzhiyun 	__ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
177*4882a593Smuzhiyun 	       alloc_sleep_millisecs_store);
178*4882a593Smuzhiyun 
pages_to_scan_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)179*4882a593Smuzhiyun static ssize_t pages_to_scan_show(struct kobject *kobj,
180*4882a593Smuzhiyun 				  struct kobj_attribute *attr,
181*4882a593Smuzhiyun 				  char *buf)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun 	return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
184*4882a593Smuzhiyun }
pages_to_scan_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)185*4882a593Smuzhiyun static ssize_t pages_to_scan_store(struct kobject *kobj,
186*4882a593Smuzhiyun 				   struct kobj_attribute *attr,
187*4882a593Smuzhiyun 				   const char *buf, size_t count)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun 	int err;
190*4882a593Smuzhiyun 	unsigned long pages;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	err = kstrtoul(buf, 10, &pages);
193*4882a593Smuzhiyun 	if (err || !pages || pages > UINT_MAX)
194*4882a593Smuzhiyun 		return -EINVAL;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	khugepaged_pages_to_scan = pages;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	return count;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun static struct kobj_attribute pages_to_scan_attr =
201*4882a593Smuzhiyun 	__ATTR(pages_to_scan, 0644, pages_to_scan_show,
202*4882a593Smuzhiyun 	       pages_to_scan_store);
203*4882a593Smuzhiyun 
pages_collapsed_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)204*4882a593Smuzhiyun static ssize_t pages_collapsed_show(struct kobject *kobj,
205*4882a593Smuzhiyun 				    struct kobj_attribute *attr,
206*4882a593Smuzhiyun 				    char *buf)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun 	return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun static struct kobj_attribute pages_collapsed_attr =
211*4882a593Smuzhiyun 	__ATTR_RO(pages_collapsed);
212*4882a593Smuzhiyun 
full_scans_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)213*4882a593Smuzhiyun static ssize_t full_scans_show(struct kobject *kobj,
214*4882a593Smuzhiyun 			       struct kobj_attribute *attr,
215*4882a593Smuzhiyun 			       char *buf)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun 	return sprintf(buf, "%u\n", khugepaged_full_scans);
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun static struct kobj_attribute full_scans_attr =
220*4882a593Smuzhiyun 	__ATTR_RO(full_scans);
221*4882a593Smuzhiyun 
khugepaged_defrag_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)222*4882a593Smuzhiyun static ssize_t khugepaged_defrag_show(struct kobject *kobj,
223*4882a593Smuzhiyun 				      struct kobj_attribute *attr, char *buf)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun 	return single_hugepage_flag_show(kobj, attr, buf,
226*4882a593Smuzhiyun 				TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
227*4882a593Smuzhiyun }
khugepaged_defrag_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)228*4882a593Smuzhiyun static ssize_t khugepaged_defrag_store(struct kobject *kobj,
229*4882a593Smuzhiyun 				       struct kobj_attribute *attr,
230*4882a593Smuzhiyun 				       const char *buf, size_t count)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun 	return single_hugepage_flag_store(kobj, attr, buf, count,
233*4882a593Smuzhiyun 				 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun static struct kobj_attribute khugepaged_defrag_attr =
236*4882a593Smuzhiyun 	__ATTR(defrag, 0644, khugepaged_defrag_show,
237*4882a593Smuzhiyun 	       khugepaged_defrag_store);
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun /*
240*4882a593Smuzhiyun  * max_ptes_none controls if khugepaged should collapse hugepages over
241*4882a593Smuzhiyun  * any unmapped ptes in turn potentially increasing the memory
242*4882a593Smuzhiyun  * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
243*4882a593Smuzhiyun  * reduce the available free memory in the system as it
244*4882a593Smuzhiyun  * runs. Increasing max_ptes_none will instead potentially reduce the
245*4882a593Smuzhiyun  * free memory in the system during the khugepaged scan.
246*4882a593Smuzhiyun  */
khugepaged_max_ptes_none_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)247*4882a593Smuzhiyun static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
248*4882a593Smuzhiyun 					     struct kobj_attribute *attr,
249*4882a593Smuzhiyun 					     char *buf)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun 	return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
252*4882a593Smuzhiyun }
khugepaged_max_ptes_none_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)253*4882a593Smuzhiyun static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
254*4882a593Smuzhiyun 					      struct kobj_attribute *attr,
255*4882a593Smuzhiyun 					      const char *buf, size_t count)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun 	int err;
258*4882a593Smuzhiyun 	unsigned long max_ptes_none;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	err = kstrtoul(buf, 10, &max_ptes_none);
261*4882a593Smuzhiyun 	if (err || max_ptes_none > HPAGE_PMD_NR-1)
262*4882a593Smuzhiyun 		return -EINVAL;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	khugepaged_max_ptes_none = max_ptes_none;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	return count;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun static struct kobj_attribute khugepaged_max_ptes_none_attr =
269*4882a593Smuzhiyun 	__ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
270*4882a593Smuzhiyun 	       khugepaged_max_ptes_none_store);
271*4882a593Smuzhiyun 
khugepaged_max_ptes_swap_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)272*4882a593Smuzhiyun static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
273*4882a593Smuzhiyun 					     struct kobj_attribute *attr,
274*4882a593Smuzhiyun 					     char *buf)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun 	return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun 
khugepaged_max_ptes_swap_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)279*4882a593Smuzhiyun static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
280*4882a593Smuzhiyun 					      struct kobj_attribute *attr,
281*4882a593Smuzhiyun 					      const char *buf, size_t count)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun 	int err;
284*4882a593Smuzhiyun 	unsigned long max_ptes_swap;
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	err  = kstrtoul(buf, 10, &max_ptes_swap);
287*4882a593Smuzhiyun 	if (err || max_ptes_swap > HPAGE_PMD_NR-1)
288*4882a593Smuzhiyun 		return -EINVAL;
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	khugepaged_max_ptes_swap = max_ptes_swap;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	return count;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun static struct kobj_attribute khugepaged_max_ptes_swap_attr =
296*4882a593Smuzhiyun 	__ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
297*4882a593Smuzhiyun 	       khugepaged_max_ptes_swap_store);
298*4882a593Smuzhiyun 
khugepaged_max_ptes_shared_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)299*4882a593Smuzhiyun static ssize_t khugepaged_max_ptes_shared_show(struct kobject *kobj,
300*4882a593Smuzhiyun 					     struct kobj_attribute *attr,
301*4882a593Smuzhiyun 					     char *buf)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun 	return sprintf(buf, "%u\n", khugepaged_max_ptes_shared);
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun 
khugepaged_max_ptes_shared_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)306*4882a593Smuzhiyun static ssize_t khugepaged_max_ptes_shared_store(struct kobject *kobj,
307*4882a593Smuzhiyun 					      struct kobj_attribute *attr,
308*4882a593Smuzhiyun 					      const char *buf, size_t count)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun 	int err;
311*4882a593Smuzhiyun 	unsigned long max_ptes_shared;
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	err  = kstrtoul(buf, 10, &max_ptes_shared);
314*4882a593Smuzhiyun 	if (err || max_ptes_shared > HPAGE_PMD_NR-1)
315*4882a593Smuzhiyun 		return -EINVAL;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	khugepaged_max_ptes_shared = max_ptes_shared;
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	return count;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun static struct kobj_attribute khugepaged_max_ptes_shared_attr =
323*4882a593Smuzhiyun 	__ATTR(max_ptes_shared, 0644, khugepaged_max_ptes_shared_show,
324*4882a593Smuzhiyun 	       khugepaged_max_ptes_shared_store);
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun static struct attribute *khugepaged_attr[] = {
327*4882a593Smuzhiyun 	&khugepaged_defrag_attr.attr,
328*4882a593Smuzhiyun 	&khugepaged_max_ptes_none_attr.attr,
329*4882a593Smuzhiyun 	&khugepaged_max_ptes_swap_attr.attr,
330*4882a593Smuzhiyun 	&khugepaged_max_ptes_shared_attr.attr,
331*4882a593Smuzhiyun 	&pages_to_scan_attr.attr,
332*4882a593Smuzhiyun 	&pages_collapsed_attr.attr,
333*4882a593Smuzhiyun 	&full_scans_attr.attr,
334*4882a593Smuzhiyun 	&scan_sleep_millisecs_attr.attr,
335*4882a593Smuzhiyun 	&alloc_sleep_millisecs_attr.attr,
336*4882a593Smuzhiyun 	NULL,
337*4882a593Smuzhiyun };
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun struct attribute_group khugepaged_attr_group = {
340*4882a593Smuzhiyun 	.attrs = khugepaged_attr,
341*4882a593Smuzhiyun 	.name = "khugepaged",
342*4882a593Smuzhiyun };
343*4882a593Smuzhiyun #endif /* CONFIG_SYSFS */
344*4882a593Smuzhiyun 
hugepage_madvise(struct vm_area_struct * vma,unsigned long * vm_flags,int advice)345*4882a593Smuzhiyun int hugepage_madvise(struct vm_area_struct *vma,
346*4882a593Smuzhiyun 		     unsigned long *vm_flags, int advice)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun 	switch (advice) {
349*4882a593Smuzhiyun 	case MADV_HUGEPAGE:
350*4882a593Smuzhiyun #ifdef CONFIG_S390
351*4882a593Smuzhiyun 		/*
352*4882a593Smuzhiyun 		 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
353*4882a593Smuzhiyun 		 * can't handle this properly after s390_enable_sie, so we simply
354*4882a593Smuzhiyun 		 * ignore the madvise to prevent qemu from causing a SIGSEGV.
355*4882a593Smuzhiyun 		 */
356*4882a593Smuzhiyun 		if (mm_has_pgste(vma->vm_mm))
357*4882a593Smuzhiyun 			return 0;
358*4882a593Smuzhiyun #endif
359*4882a593Smuzhiyun 		*vm_flags &= ~VM_NOHUGEPAGE;
360*4882a593Smuzhiyun 		*vm_flags |= VM_HUGEPAGE;
361*4882a593Smuzhiyun 		/*
362*4882a593Smuzhiyun 		 * If the vma become good for khugepaged to scan,
363*4882a593Smuzhiyun 		 * register it here without waiting a page fault that
364*4882a593Smuzhiyun 		 * may not happen any time soon.
365*4882a593Smuzhiyun 		 */
366*4882a593Smuzhiyun 		if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
367*4882a593Smuzhiyun 				khugepaged_enter_vma_merge(vma, *vm_flags))
368*4882a593Smuzhiyun 			return -ENOMEM;
369*4882a593Smuzhiyun 		break;
370*4882a593Smuzhiyun 	case MADV_NOHUGEPAGE:
371*4882a593Smuzhiyun 		*vm_flags &= ~VM_HUGEPAGE;
372*4882a593Smuzhiyun 		*vm_flags |= VM_NOHUGEPAGE;
373*4882a593Smuzhiyun 		/*
374*4882a593Smuzhiyun 		 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
375*4882a593Smuzhiyun 		 * this vma even if we leave the mm registered in khugepaged if
376*4882a593Smuzhiyun 		 * it got registered before VM_NOHUGEPAGE was set.
377*4882a593Smuzhiyun 		 */
378*4882a593Smuzhiyun 		break;
379*4882a593Smuzhiyun 	}
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	return 0;
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun 
khugepaged_init(void)384*4882a593Smuzhiyun int __init khugepaged_init(void)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun 	mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
387*4882a593Smuzhiyun 					  sizeof(struct mm_slot),
388*4882a593Smuzhiyun 					  __alignof__(struct mm_slot), 0, NULL);
389*4882a593Smuzhiyun 	if (!mm_slot_cache)
390*4882a593Smuzhiyun 		return -ENOMEM;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
393*4882a593Smuzhiyun 	khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
394*4882a593Smuzhiyun 	khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
395*4882a593Smuzhiyun 	khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	return 0;
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun 
khugepaged_destroy(void)400*4882a593Smuzhiyun void __init khugepaged_destroy(void)
401*4882a593Smuzhiyun {
402*4882a593Smuzhiyun 	kmem_cache_destroy(mm_slot_cache);
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun 
alloc_mm_slot(void)405*4882a593Smuzhiyun static inline struct mm_slot *alloc_mm_slot(void)
406*4882a593Smuzhiyun {
407*4882a593Smuzhiyun 	if (!mm_slot_cache)	/* initialization failed */
408*4882a593Smuzhiyun 		return NULL;
409*4882a593Smuzhiyun 	return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun 
free_mm_slot(struct mm_slot * mm_slot)412*4882a593Smuzhiyun static inline void free_mm_slot(struct mm_slot *mm_slot)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun 	kmem_cache_free(mm_slot_cache, mm_slot);
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun 
get_mm_slot(struct mm_struct * mm)417*4882a593Smuzhiyun static struct mm_slot *get_mm_slot(struct mm_struct *mm)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun 	struct mm_slot *mm_slot;
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
422*4882a593Smuzhiyun 		if (mm == mm_slot->mm)
423*4882a593Smuzhiyun 			return mm_slot;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	return NULL;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun 
insert_to_mm_slots_hash(struct mm_struct * mm,struct mm_slot * mm_slot)428*4882a593Smuzhiyun static void insert_to_mm_slots_hash(struct mm_struct *mm,
429*4882a593Smuzhiyun 				    struct mm_slot *mm_slot)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun 	mm_slot->mm = mm;
432*4882a593Smuzhiyun 	hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun 
khugepaged_test_exit(struct mm_struct * mm)435*4882a593Smuzhiyun static inline int khugepaged_test_exit(struct mm_struct *mm)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun 	return atomic_read(&mm->mm_users) == 0;
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun 
hugepage_vma_check(struct vm_area_struct * vma,unsigned long vm_flags)440*4882a593Smuzhiyun static bool hugepage_vma_check(struct vm_area_struct *vma,
441*4882a593Smuzhiyun 			       unsigned long vm_flags)
442*4882a593Smuzhiyun {
443*4882a593Smuzhiyun 	if (!transhuge_vma_enabled(vma, vm_flags))
444*4882a593Smuzhiyun 		return false;
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	if (vma->vm_file && !IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) -
447*4882a593Smuzhiyun 				vma->vm_pgoff, HPAGE_PMD_NR))
448*4882a593Smuzhiyun 		return false;
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	/* Enabled via shmem mount options or sysfs settings. */
451*4882a593Smuzhiyun 	if (shmem_file(vma->vm_file))
452*4882a593Smuzhiyun 		return shmem_huge_enabled(vma);
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	/* THP settings require madvise. */
455*4882a593Smuzhiyun 	if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always())
456*4882a593Smuzhiyun 		return false;
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	/* Only regular file is valid */
459*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && vma->vm_file &&
460*4882a593Smuzhiyun 	    !inode_is_open_for_write(vma->vm_file->f_inode) &&
461*4882a593Smuzhiyun 	    (vm_flags & VM_EXEC)) {
462*4882a593Smuzhiyun 		struct inode *inode = vma->vm_file->f_inode;
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 		return S_ISREG(inode->i_mode);
465*4882a593Smuzhiyun 	}
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	if (!vma->anon_vma || vma->vm_ops)
468*4882a593Smuzhiyun 		return false;
469*4882a593Smuzhiyun 	if (vma_is_temporary_stack(vma))
470*4882a593Smuzhiyun 		return false;
471*4882a593Smuzhiyun 	return !(vm_flags & VM_NO_KHUGEPAGED);
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun 
__khugepaged_enter(struct mm_struct * mm)474*4882a593Smuzhiyun int __khugepaged_enter(struct mm_struct *mm)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun 	struct mm_slot *mm_slot;
477*4882a593Smuzhiyun 	int wakeup;
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	mm_slot = alloc_mm_slot();
480*4882a593Smuzhiyun 	if (!mm_slot)
481*4882a593Smuzhiyun 		return -ENOMEM;
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	/* __khugepaged_exit() must not run from under us */
484*4882a593Smuzhiyun 	VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm);
485*4882a593Smuzhiyun 	if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
486*4882a593Smuzhiyun 		free_mm_slot(mm_slot);
487*4882a593Smuzhiyun 		return 0;
488*4882a593Smuzhiyun 	}
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	spin_lock(&khugepaged_mm_lock);
491*4882a593Smuzhiyun 	insert_to_mm_slots_hash(mm, mm_slot);
492*4882a593Smuzhiyun 	/*
493*4882a593Smuzhiyun 	 * Insert just behind the scanning cursor, to let the area settle
494*4882a593Smuzhiyun 	 * down a little.
495*4882a593Smuzhiyun 	 */
496*4882a593Smuzhiyun 	wakeup = list_empty(&khugepaged_scan.mm_head);
497*4882a593Smuzhiyun 	list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
498*4882a593Smuzhiyun 	spin_unlock(&khugepaged_mm_lock);
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	mmgrab(mm);
501*4882a593Smuzhiyun 	if (wakeup)
502*4882a593Smuzhiyun 		wake_up_interruptible(&khugepaged_wait);
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	return 0;
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun 
khugepaged_enter_vma_merge(struct vm_area_struct * vma,unsigned long vm_flags)507*4882a593Smuzhiyun int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
508*4882a593Smuzhiyun 			       unsigned long vm_flags)
509*4882a593Smuzhiyun {
510*4882a593Smuzhiyun 	unsigned long hstart, hend;
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	/*
513*4882a593Smuzhiyun 	 * khugepaged only supports read-only files for non-shmem files.
514*4882a593Smuzhiyun 	 * khugepaged does not yet work on special mappings. And
515*4882a593Smuzhiyun 	 * file-private shmem THP is not supported.
516*4882a593Smuzhiyun 	 */
517*4882a593Smuzhiyun 	if (!hugepage_vma_check(vma, vm_flags))
518*4882a593Smuzhiyun 		return 0;
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
521*4882a593Smuzhiyun 	hend = vma->vm_end & HPAGE_PMD_MASK;
522*4882a593Smuzhiyun 	if (hstart < hend)
523*4882a593Smuzhiyun 		return khugepaged_enter(vma, vm_flags);
524*4882a593Smuzhiyun 	return 0;
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun 
__khugepaged_exit(struct mm_struct * mm)527*4882a593Smuzhiyun void __khugepaged_exit(struct mm_struct *mm)
528*4882a593Smuzhiyun {
529*4882a593Smuzhiyun 	struct mm_slot *mm_slot;
530*4882a593Smuzhiyun 	int free = 0;
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	spin_lock(&khugepaged_mm_lock);
533*4882a593Smuzhiyun 	mm_slot = get_mm_slot(mm);
534*4882a593Smuzhiyun 	if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
535*4882a593Smuzhiyun 		hash_del(&mm_slot->hash);
536*4882a593Smuzhiyun 		list_del(&mm_slot->mm_node);
537*4882a593Smuzhiyun 		free = 1;
538*4882a593Smuzhiyun 	}
539*4882a593Smuzhiyun 	spin_unlock(&khugepaged_mm_lock);
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	if (free) {
542*4882a593Smuzhiyun 		clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
543*4882a593Smuzhiyun 		free_mm_slot(mm_slot);
544*4882a593Smuzhiyun 		mmdrop(mm);
545*4882a593Smuzhiyun 	} else if (mm_slot) {
546*4882a593Smuzhiyun 		/*
547*4882a593Smuzhiyun 		 * This is required to serialize against
548*4882a593Smuzhiyun 		 * khugepaged_test_exit() (which is guaranteed to run
549*4882a593Smuzhiyun 		 * under mmap sem read mode). Stop here (after we
550*4882a593Smuzhiyun 		 * return all pagetables will be destroyed) until
551*4882a593Smuzhiyun 		 * khugepaged has finished working on the pagetables
552*4882a593Smuzhiyun 		 * under the mmap_lock.
553*4882a593Smuzhiyun 		 */
554*4882a593Smuzhiyun 		mmap_write_lock(mm);
555*4882a593Smuzhiyun 		mmap_write_unlock(mm);
556*4882a593Smuzhiyun 	}
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun 
release_pte_page(struct page * page)559*4882a593Smuzhiyun static void release_pte_page(struct page *page)
560*4882a593Smuzhiyun {
561*4882a593Smuzhiyun 	mod_node_page_state(page_pgdat(page),
562*4882a593Smuzhiyun 			NR_ISOLATED_ANON + page_is_file_lru(page),
563*4882a593Smuzhiyun 			-compound_nr(page));
564*4882a593Smuzhiyun 	unlock_page(page);
565*4882a593Smuzhiyun 	putback_lru_page(page);
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun 
release_pte_pages(pte_t * pte,pte_t * _pte,struct list_head * compound_pagelist)568*4882a593Smuzhiyun static void release_pte_pages(pte_t *pte, pte_t *_pte,
569*4882a593Smuzhiyun 		struct list_head *compound_pagelist)
570*4882a593Smuzhiyun {
571*4882a593Smuzhiyun 	struct page *page, *tmp;
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	while (--_pte >= pte) {
574*4882a593Smuzhiyun 		pte_t pteval = *_pte;
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 		page = pte_page(pteval);
577*4882a593Smuzhiyun 		if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)) &&
578*4882a593Smuzhiyun 				!PageCompound(page))
579*4882a593Smuzhiyun 			release_pte_page(page);
580*4882a593Smuzhiyun 	}
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	list_for_each_entry_safe(page, tmp, compound_pagelist, lru) {
583*4882a593Smuzhiyun 		list_del(&page->lru);
584*4882a593Smuzhiyun 		release_pte_page(page);
585*4882a593Smuzhiyun 	}
586*4882a593Smuzhiyun }
587*4882a593Smuzhiyun 
is_refcount_suitable(struct page * page)588*4882a593Smuzhiyun static bool is_refcount_suitable(struct page *page)
589*4882a593Smuzhiyun {
590*4882a593Smuzhiyun 	int expected_refcount;
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 	expected_refcount = total_mapcount(page);
593*4882a593Smuzhiyun 	if (PageSwapCache(page))
594*4882a593Smuzhiyun 		expected_refcount += compound_nr(page);
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	return page_count(page) == expected_refcount;
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun 
__collapse_huge_page_isolate(struct vm_area_struct * vma,unsigned long address,pte_t * pte,struct list_head * compound_pagelist)599*4882a593Smuzhiyun static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
600*4882a593Smuzhiyun 					unsigned long address,
601*4882a593Smuzhiyun 					pte_t *pte,
602*4882a593Smuzhiyun 					struct list_head *compound_pagelist)
603*4882a593Smuzhiyun {
604*4882a593Smuzhiyun 	struct page *page = NULL;
605*4882a593Smuzhiyun 	pte_t *_pte;
606*4882a593Smuzhiyun 	int none_or_zero = 0, shared = 0, result = 0, referenced = 0;
607*4882a593Smuzhiyun 	bool writable = false;
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 	for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
610*4882a593Smuzhiyun 	     _pte++, address += PAGE_SIZE) {
611*4882a593Smuzhiyun 		pte_t pteval = *_pte;
612*4882a593Smuzhiyun 		if (pte_none(pteval) || (pte_present(pteval) &&
613*4882a593Smuzhiyun 				is_zero_pfn(pte_pfn(pteval)))) {
614*4882a593Smuzhiyun 			if (!userfaultfd_armed(vma) &&
615*4882a593Smuzhiyun 			    ++none_or_zero <= khugepaged_max_ptes_none) {
616*4882a593Smuzhiyun 				continue;
617*4882a593Smuzhiyun 			} else {
618*4882a593Smuzhiyun 				result = SCAN_EXCEED_NONE_PTE;
619*4882a593Smuzhiyun 				goto out;
620*4882a593Smuzhiyun 			}
621*4882a593Smuzhiyun 		}
622*4882a593Smuzhiyun 		if (!pte_present(pteval)) {
623*4882a593Smuzhiyun 			result = SCAN_PTE_NON_PRESENT;
624*4882a593Smuzhiyun 			goto out;
625*4882a593Smuzhiyun 		}
626*4882a593Smuzhiyun 		page = vm_normal_page(vma, address, pteval);
627*4882a593Smuzhiyun 		if (unlikely(!page)) {
628*4882a593Smuzhiyun 			result = SCAN_PAGE_NULL;
629*4882a593Smuzhiyun 			goto out;
630*4882a593Smuzhiyun 		}
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 		VM_BUG_ON_PAGE(!PageAnon(page), page);
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 		if (page_mapcount(page) > 1 &&
635*4882a593Smuzhiyun 				++shared > khugepaged_max_ptes_shared) {
636*4882a593Smuzhiyun 			result = SCAN_EXCEED_SHARED_PTE;
637*4882a593Smuzhiyun 			goto out;
638*4882a593Smuzhiyun 		}
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 		if (PageCompound(page)) {
641*4882a593Smuzhiyun 			struct page *p;
642*4882a593Smuzhiyun 			page = compound_head(page);
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 			/*
645*4882a593Smuzhiyun 			 * Check if we have dealt with the compound page
646*4882a593Smuzhiyun 			 * already
647*4882a593Smuzhiyun 			 */
648*4882a593Smuzhiyun 			list_for_each_entry(p, compound_pagelist, lru) {
649*4882a593Smuzhiyun 				if (page == p)
650*4882a593Smuzhiyun 					goto next;
651*4882a593Smuzhiyun 			}
652*4882a593Smuzhiyun 		}
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 		/*
655*4882a593Smuzhiyun 		 * We can do it before isolate_lru_page because the
656*4882a593Smuzhiyun 		 * page can't be freed from under us. NOTE: PG_lock
657*4882a593Smuzhiyun 		 * is needed to serialize against split_huge_page
658*4882a593Smuzhiyun 		 * when invoked from the VM.
659*4882a593Smuzhiyun 		 */
660*4882a593Smuzhiyun 		if (!trylock_page(page)) {
661*4882a593Smuzhiyun 			result = SCAN_PAGE_LOCK;
662*4882a593Smuzhiyun 			goto out;
663*4882a593Smuzhiyun 		}
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun 		/*
666*4882a593Smuzhiyun 		 * Check if the page has any GUP (or other external) pins.
667*4882a593Smuzhiyun 		 *
668*4882a593Smuzhiyun 		 * The page table that maps the page has been already unlinked
669*4882a593Smuzhiyun 		 * from the page table tree and this process cannot get
670*4882a593Smuzhiyun 		 * an additinal pin on the page.
671*4882a593Smuzhiyun 		 *
672*4882a593Smuzhiyun 		 * New pins can come later if the page is shared across fork,
673*4882a593Smuzhiyun 		 * but not from this process. The other process cannot write to
674*4882a593Smuzhiyun 		 * the page, only trigger CoW.
675*4882a593Smuzhiyun 		 */
676*4882a593Smuzhiyun 		if (!is_refcount_suitable(page)) {
677*4882a593Smuzhiyun 			unlock_page(page);
678*4882a593Smuzhiyun 			result = SCAN_PAGE_COUNT;
679*4882a593Smuzhiyun 			goto out;
680*4882a593Smuzhiyun 		}
681*4882a593Smuzhiyun 		if (!pte_write(pteval) && PageSwapCache(page) &&
682*4882a593Smuzhiyun 				!reuse_swap_page(page, NULL)) {
683*4882a593Smuzhiyun 			/*
684*4882a593Smuzhiyun 			 * Page is in the swap cache and cannot be re-used.
685*4882a593Smuzhiyun 			 * It cannot be collapsed into a THP.
686*4882a593Smuzhiyun 			 */
687*4882a593Smuzhiyun 			unlock_page(page);
688*4882a593Smuzhiyun 			result = SCAN_SWAP_CACHE_PAGE;
689*4882a593Smuzhiyun 			goto out;
690*4882a593Smuzhiyun 		}
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 		/*
693*4882a593Smuzhiyun 		 * Isolate the page to avoid collapsing an hugepage
694*4882a593Smuzhiyun 		 * currently in use by the VM.
695*4882a593Smuzhiyun 		 */
696*4882a593Smuzhiyun 		if (isolate_lru_page(page)) {
697*4882a593Smuzhiyun 			unlock_page(page);
698*4882a593Smuzhiyun 			result = SCAN_DEL_PAGE_LRU;
699*4882a593Smuzhiyun 			goto out;
700*4882a593Smuzhiyun 		}
701*4882a593Smuzhiyun 		mod_node_page_state(page_pgdat(page),
702*4882a593Smuzhiyun 				NR_ISOLATED_ANON + page_is_file_lru(page),
703*4882a593Smuzhiyun 				compound_nr(page));
704*4882a593Smuzhiyun 		VM_BUG_ON_PAGE(!PageLocked(page), page);
705*4882a593Smuzhiyun 		VM_BUG_ON_PAGE(PageLRU(page), page);
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 		if (PageCompound(page))
708*4882a593Smuzhiyun 			list_add_tail(&page->lru, compound_pagelist);
709*4882a593Smuzhiyun next:
710*4882a593Smuzhiyun 		/* There should be enough young pte to collapse the page */
711*4882a593Smuzhiyun 		if (pte_young(pteval) ||
712*4882a593Smuzhiyun 		    page_is_young(page) || PageReferenced(page) ||
713*4882a593Smuzhiyun 		    mmu_notifier_test_young(vma->vm_mm, address))
714*4882a593Smuzhiyun 			referenced++;
715*4882a593Smuzhiyun 
716*4882a593Smuzhiyun 		if (pte_write(pteval))
717*4882a593Smuzhiyun 			writable = true;
718*4882a593Smuzhiyun 	}
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 	if (unlikely(!writable)) {
721*4882a593Smuzhiyun 		result = SCAN_PAGE_RO;
722*4882a593Smuzhiyun 	} else if (unlikely(!referenced)) {
723*4882a593Smuzhiyun 		result = SCAN_LACK_REFERENCED_PAGE;
724*4882a593Smuzhiyun 	} else {
725*4882a593Smuzhiyun 		result = SCAN_SUCCEED;
726*4882a593Smuzhiyun 		trace_mm_collapse_huge_page_isolate(page, none_or_zero,
727*4882a593Smuzhiyun 						    referenced, writable, result);
728*4882a593Smuzhiyun 		return 1;
729*4882a593Smuzhiyun 	}
730*4882a593Smuzhiyun out:
731*4882a593Smuzhiyun 	release_pte_pages(pte, _pte, compound_pagelist);
732*4882a593Smuzhiyun 	trace_mm_collapse_huge_page_isolate(page, none_or_zero,
733*4882a593Smuzhiyun 					    referenced, writable, result);
734*4882a593Smuzhiyun 	return 0;
735*4882a593Smuzhiyun }
736*4882a593Smuzhiyun 
__collapse_huge_page_copy(pte_t * pte,struct page * page,struct vm_area_struct * vma,unsigned long address,spinlock_t * ptl,struct list_head * compound_pagelist)737*4882a593Smuzhiyun static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
738*4882a593Smuzhiyun 				      struct vm_area_struct *vma,
739*4882a593Smuzhiyun 				      unsigned long address,
740*4882a593Smuzhiyun 				      spinlock_t *ptl,
741*4882a593Smuzhiyun 				      struct list_head *compound_pagelist)
742*4882a593Smuzhiyun {
743*4882a593Smuzhiyun 	struct page *src_page, *tmp;
744*4882a593Smuzhiyun 	pte_t *_pte;
745*4882a593Smuzhiyun 	for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
746*4882a593Smuzhiyun 				_pte++, page++, address += PAGE_SIZE) {
747*4882a593Smuzhiyun 		pte_t pteval = *_pte;
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
750*4882a593Smuzhiyun 			clear_user_highpage(page, address);
751*4882a593Smuzhiyun 			add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
752*4882a593Smuzhiyun 			if (is_zero_pfn(pte_pfn(pteval))) {
753*4882a593Smuzhiyun 				/*
754*4882a593Smuzhiyun 				 * ptl mostly unnecessary.
755*4882a593Smuzhiyun 				 */
756*4882a593Smuzhiyun 				spin_lock(ptl);
757*4882a593Smuzhiyun 				/*
758*4882a593Smuzhiyun 				 * paravirt calls inside pte_clear here are
759*4882a593Smuzhiyun 				 * superfluous.
760*4882a593Smuzhiyun 				 */
761*4882a593Smuzhiyun 				pte_clear(vma->vm_mm, address, _pte);
762*4882a593Smuzhiyun 				spin_unlock(ptl);
763*4882a593Smuzhiyun 			}
764*4882a593Smuzhiyun 		} else {
765*4882a593Smuzhiyun 			src_page = pte_page(pteval);
766*4882a593Smuzhiyun 			copy_user_highpage(page, src_page, address, vma);
767*4882a593Smuzhiyun 			if (!PageCompound(src_page))
768*4882a593Smuzhiyun 				release_pte_page(src_page);
769*4882a593Smuzhiyun 			/*
770*4882a593Smuzhiyun 			 * ptl mostly unnecessary, but preempt has to
771*4882a593Smuzhiyun 			 * be disabled to update the per-cpu stats
772*4882a593Smuzhiyun 			 * inside page_remove_rmap().
773*4882a593Smuzhiyun 			 */
774*4882a593Smuzhiyun 			spin_lock(ptl);
775*4882a593Smuzhiyun 			/*
776*4882a593Smuzhiyun 			 * paravirt calls inside pte_clear here are
777*4882a593Smuzhiyun 			 * superfluous.
778*4882a593Smuzhiyun 			 */
779*4882a593Smuzhiyun 			pte_clear(vma->vm_mm, address, _pte);
780*4882a593Smuzhiyun 			page_remove_rmap(src_page, false);
781*4882a593Smuzhiyun 			spin_unlock(ptl);
782*4882a593Smuzhiyun 			free_page_and_swap_cache(src_page);
783*4882a593Smuzhiyun 		}
784*4882a593Smuzhiyun 	}
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 	list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
787*4882a593Smuzhiyun 		list_del(&src_page->lru);
788*4882a593Smuzhiyun 		release_pte_page(src_page);
789*4882a593Smuzhiyun 	}
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun 
khugepaged_alloc_sleep(void)792*4882a593Smuzhiyun static void khugepaged_alloc_sleep(void)
793*4882a593Smuzhiyun {
794*4882a593Smuzhiyun 	DEFINE_WAIT(wait);
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun 	add_wait_queue(&khugepaged_wait, &wait);
797*4882a593Smuzhiyun 	freezable_schedule_timeout_interruptible(
798*4882a593Smuzhiyun 		msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
799*4882a593Smuzhiyun 	remove_wait_queue(&khugepaged_wait, &wait);
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun static int khugepaged_node_load[MAX_NUMNODES];
803*4882a593Smuzhiyun 
khugepaged_scan_abort(int nid)804*4882a593Smuzhiyun static bool khugepaged_scan_abort(int nid)
805*4882a593Smuzhiyun {
806*4882a593Smuzhiyun 	int i;
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 	/*
809*4882a593Smuzhiyun 	 * If node_reclaim_mode is disabled, then no extra effort is made to
810*4882a593Smuzhiyun 	 * allocate memory locally.
811*4882a593Smuzhiyun 	 */
812*4882a593Smuzhiyun 	if (!node_reclaim_mode)
813*4882a593Smuzhiyun 		return false;
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun 	/* If there is a count for this node already, it must be acceptable */
816*4882a593Smuzhiyun 	if (khugepaged_node_load[nid])
817*4882a593Smuzhiyun 		return false;
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 	for (i = 0; i < MAX_NUMNODES; i++) {
820*4882a593Smuzhiyun 		if (!khugepaged_node_load[i])
821*4882a593Smuzhiyun 			continue;
822*4882a593Smuzhiyun 		if (node_distance(nid, i) > node_reclaim_distance)
823*4882a593Smuzhiyun 			return true;
824*4882a593Smuzhiyun 	}
825*4882a593Smuzhiyun 	return false;
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
alloc_hugepage_khugepaged_gfpmask(void)829*4882a593Smuzhiyun static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
830*4882a593Smuzhiyun {
831*4882a593Smuzhiyun 	return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
832*4882a593Smuzhiyun }
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun #ifdef CONFIG_NUMA
khugepaged_find_target_node(void)835*4882a593Smuzhiyun static int khugepaged_find_target_node(void)
836*4882a593Smuzhiyun {
837*4882a593Smuzhiyun 	static int last_khugepaged_target_node = NUMA_NO_NODE;
838*4882a593Smuzhiyun 	int nid, target_node = 0, max_value = 0;
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun 	/* find first node with max normal pages hit */
841*4882a593Smuzhiyun 	for (nid = 0; nid < MAX_NUMNODES; nid++)
842*4882a593Smuzhiyun 		if (khugepaged_node_load[nid] > max_value) {
843*4882a593Smuzhiyun 			max_value = khugepaged_node_load[nid];
844*4882a593Smuzhiyun 			target_node = nid;
845*4882a593Smuzhiyun 		}
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun 	/* do some balance if several nodes have the same hit record */
848*4882a593Smuzhiyun 	if (target_node <= last_khugepaged_target_node)
849*4882a593Smuzhiyun 		for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
850*4882a593Smuzhiyun 				nid++)
851*4882a593Smuzhiyun 			if (max_value == khugepaged_node_load[nid]) {
852*4882a593Smuzhiyun 				target_node = nid;
853*4882a593Smuzhiyun 				break;
854*4882a593Smuzhiyun 			}
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun 	last_khugepaged_target_node = target_node;
857*4882a593Smuzhiyun 	return target_node;
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun 
khugepaged_prealloc_page(struct page ** hpage,bool * wait)860*4882a593Smuzhiyun static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
861*4882a593Smuzhiyun {
862*4882a593Smuzhiyun 	if (IS_ERR(*hpage)) {
863*4882a593Smuzhiyun 		if (!*wait)
864*4882a593Smuzhiyun 			return false;
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 		*wait = false;
867*4882a593Smuzhiyun 		*hpage = NULL;
868*4882a593Smuzhiyun 		khugepaged_alloc_sleep();
869*4882a593Smuzhiyun 	} else if (*hpage) {
870*4882a593Smuzhiyun 		put_page(*hpage);
871*4882a593Smuzhiyun 		*hpage = NULL;
872*4882a593Smuzhiyun 	}
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun 	return true;
875*4882a593Smuzhiyun }
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun static struct page *
khugepaged_alloc_page(struct page ** hpage,gfp_t gfp,int node)878*4882a593Smuzhiyun khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
879*4882a593Smuzhiyun {
880*4882a593Smuzhiyun 	VM_BUG_ON_PAGE(*hpage, *hpage);
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun 	*hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
883*4882a593Smuzhiyun 	if (unlikely(!*hpage)) {
884*4882a593Smuzhiyun 		count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
885*4882a593Smuzhiyun 		*hpage = ERR_PTR(-ENOMEM);
886*4882a593Smuzhiyun 		return NULL;
887*4882a593Smuzhiyun 	}
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 	prep_transhuge_page(*hpage);
890*4882a593Smuzhiyun 	count_vm_event(THP_COLLAPSE_ALLOC);
891*4882a593Smuzhiyun 	return *hpage;
892*4882a593Smuzhiyun }
893*4882a593Smuzhiyun #else
khugepaged_find_target_node(void)894*4882a593Smuzhiyun static int khugepaged_find_target_node(void)
895*4882a593Smuzhiyun {
896*4882a593Smuzhiyun 	return 0;
897*4882a593Smuzhiyun }
898*4882a593Smuzhiyun 
alloc_khugepaged_hugepage(void)899*4882a593Smuzhiyun static inline struct page *alloc_khugepaged_hugepage(void)
900*4882a593Smuzhiyun {
901*4882a593Smuzhiyun 	struct page *page;
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun 	page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
904*4882a593Smuzhiyun 			   HPAGE_PMD_ORDER);
905*4882a593Smuzhiyun 	if (page)
906*4882a593Smuzhiyun 		prep_transhuge_page(page);
907*4882a593Smuzhiyun 	return page;
908*4882a593Smuzhiyun }
909*4882a593Smuzhiyun 
khugepaged_alloc_hugepage(bool * wait)910*4882a593Smuzhiyun static struct page *khugepaged_alloc_hugepage(bool *wait)
911*4882a593Smuzhiyun {
912*4882a593Smuzhiyun 	struct page *hpage;
913*4882a593Smuzhiyun 
914*4882a593Smuzhiyun 	do {
915*4882a593Smuzhiyun 		hpage = alloc_khugepaged_hugepage();
916*4882a593Smuzhiyun 		if (!hpage) {
917*4882a593Smuzhiyun 			count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
918*4882a593Smuzhiyun 			if (!*wait)
919*4882a593Smuzhiyun 				return NULL;
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 			*wait = false;
922*4882a593Smuzhiyun 			khugepaged_alloc_sleep();
923*4882a593Smuzhiyun 		} else
924*4882a593Smuzhiyun 			count_vm_event(THP_COLLAPSE_ALLOC);
925*4882a593Smuzhiyun 	} while (unlikely(!hpage) && likely(khugepaged_enabled()));
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun 	return hpage;
928*4882a593Smuzhiyun }
929*4882a593Smuzhiyun 
khugepaged_prealloc_page(struct page ** hpage,bool * wait)930*4882a593Smuzhiyun static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
931*4882a593Smuzhiyun {
932*4882a593Smuzhiyun 	/*
933*4882a593Smuzhiyun 	 * If the hpage allocated earlier was briefly exposed in page cache
934*4882a593Smuzhiyun 	 * before collapse_file() failed, it is possible that racing lookups
935*4882a593Smuzhiyun 	 * have not yet completed, and would then be unpleasantly surprised by
936*4882a593Smuzhiyun 	 * finding the hpage reused for the same mapping at a different offset.
937*4882a593Smuzhiyun 	 * Just release the previous allocation if there is any danger of that.
938*4882a593Smuzhiyun 	 */
939*4882a593Smuzhiyun 	if (*hpage && page_count(*hpage) > 1) {
940*4882a593Smuzhiyun 		put_page(*hpage);
941*4882a593Smuzhiyun 		*hpage = NULL;
942*4882a593Smuzhiyun 	}
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 	if (!*hpage)
945*4882a593Smuzhiyun 		*hpage = khugepaged_alloc_hugepage(wait);
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 	if (unlikely(!*hpage))
948*4882a593Smuzhiyun 		return false;
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun 	return true;
951*4882a593Smuzhiyun }
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun static struct page *
khugepaged_alloc_page(struct page ** hpage,gfp_t gfp,int node)954*4882a593Smuzhiyun khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
955*4882a593Smuzhiyun {
956*4882a593Smuzhiyun 	VM_BUG_ON(!*hpage);
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun 	return  *hpage;
959*4882a593Smuzhiyun }
960*4882a593Smuzhiyun #endif
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun /*
963*4882a593Smuzhiyun  * If mmap_lock temporarily dropped, revalidate vma
964*4882a593Smuzhiyun  * before taking mmap_lock.
965*4882a593Smuzhiyun  * Return 0 if succeeds, otherwise return none-zero
966*4882a593Smuzhiyun  * value (scan code).
967*4882a593Smuzhiyun  */
968*4882a593Smuzhiyun 
hugepage_vma_revalidate(struct mm_struct * mm,unsigned long address,struct vm_area_struct ** vmap)969*4882a593Smuzhiyun static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
970*4882a593Smuzhiyun 		struct vm_area_struct **vmap)
971*4882a593Smuzhiyun {
972*4882a593Smuzhiyun 	struct vm_area_struct *vma;
973*4882a593Smuzhiyun 	unsigned long hstart, hend;
974*4882a593Smuzhiyun 
975*4882a593Smuzhiyun 	if (unlikely(khugepaged_test_exit(mm)))
976*4882a593Smuzhiyun 		return SCAN_ANY_PROCESS;
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 	*vmap = vma = find_vma(mm, address);
979*4882a593Smuzhiyun 	if (!vma)
980*4882a593Smuzhiyun 		return SCAN_VMA_NULL;
981*4882a593Smuzhiyun 
982*4882a593Smuzhiyun 	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
983*4882a593Smuzhiyun 	hend = vma->vm_end & HPAGE_PMD_MASK;
984*4882a593Smuzhiyun 	if (address < hstart || address + HPAGE_PMD_SIZE > hend)
985*4882a593Smuzhiyun 		return SCAN_ADDRESS_RANGE;
986*4882a593Smuzhiyun 	if (!hugepage_vma_check(vma, vma->vm_flags))
987*4882a593Smuzhiyun 		return SCAN_VMA_CHECK;
988*4882a593Smuzhiyun 	/* Anon VMA expected */
989*4882a593Smuzhiyun 	if (!vma->anon_vma || vma->vm_ops)
990*4882a593Smuzhiyun 		return SCAN_VMA_CHECK;
991*4882a593Smuzhiyun 	return 0;
992*4882a593Smuzhiyun }
993*4882a593Smuzhiyun 
994*4882a593Smuzhiyun /*
995*4882a593Smuzhiyun  * Bring missing pages in from swap, to complete THP collapse.
996*4882a593Smuzhiyun  * Only done if khugepaged_scan_pmd believes it is worthwhile.
997*4882a593Smuzhiyun  *
998*4882a593Smuzhiyun  * Called and returns without pte mapped or spinlocks held,
999*4882a593Smuzhiyun  * but with mmap_lock held to protect against vma changes.
1000*4882a593Smuzhiyun  */
1001*4882a593Smuzhiyun 
__collapse_huge_page_swapin(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long haddr,pmd_t * pmd,int referenced)1002*4882a593Smuzhiyun static bool __collapse_huge_page_swapin(struct mm_struct *mm,
1003*4882a593Smuzhiyun 					struct vm_area_struct *vma,
1004*4882a593Smuzhiyun 					unsigned long haddr, pmd_t *pmd,
1005*4882a593Smuzhiyun 					int referenced)
1006*4882a593Smuzhiyun {
1007*4882a593Smuzhiyun 	int swapped_in = 0;
1008*4882a593Smuzhiyun 	vm_fault_t ret = 0;
1009*4882a593Smuzhiyun 	unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun 	for (address = haddr; address < end; address += PAGE_SIZE) {
1012*4882a593Smuzhiyun 		struct vm_fault vmf = {
1013*4882a593Smuzhiyun 			.vma = vma,
1014*4882a593Smuzhiyun 			.address = address,
1015*4882a593Smuzhiyun 			.pgoff = linear_page_index(vma, haddr),
1016*4882a593Smuzhiyun 			.flags = FAULT_FLAG_ALLOW_RETRY,
1017*4882a593Smuzhiyun 			.pmd = pmd,
1018*4882a593Smuzhiyun 			.vma_flags = vma->vm_flags,
1019*4882a593Smuzhiyun 			.vma_page_prot = vma->vm_page_prot,
1020*4882a593Smuzhiyun 		};
1021*4882a593Smuzhiyun 
1022*4882a593Smuzhiyun 		vmf.pte = pte_offset_map(pmd, address);
1023*4882a593Smuzhiyun 		vmf.orig_pte = *vmf.pte;
1024*4882a593Smuzhiyun 		if (!is_swap_pte(vmf.orig_pte)) {
1025*4882a593Smuzhiyun 			pte_unmap(vmf.pte);
1026*4882a593Smuzhiyun 			continue;
1027*4882a593Smuzhiyun 		}
1028*4882a593Smuzhiyun 		swapped_in++;
1029*4882a593Smuzhiyun 		ret = do_swap_page(&vmf);
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun 		/* do_swap_page returns VM_FAULT_RETRY with released mmap_lock */
1032*4882a593Smuzhiyun 		if (ret & VM_FAULT_RETRY) {
1033*4882a593Smuzhiyun 			mmap_read_lock(mm);
1034*4882a593Smuzhiyun 			if (hugepage_vma_revalidate(mm, haddr, &vma)) {
1035*4882a593Smuzhiyun 				/* vma is no longer available, don't continue to swapin */
1036*4882a593Smuzhiyun 				trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1037*4882a593Smuzhiyun 				return false;
1038*4882a593Smuzhiyun 			}
1039*4882a593Smuzhiyun 			/* check if the pmd is still valid */
1040*4882a593Smuzhiyun 			if (mm_find_pmd(mm, haddr) != pmd) {
1041*4882a593Smuzhiyun 				trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1042*4882a593Smuzhiyun 				return false;
1043*4882a593Smuzhiyun 			}
1044*4882a593Smuzhiyun 		}
1045*4882a593Smuzhiyun 		if (ret & VM_FAULT_ERROR) {
1046*4882a593Smuzhiyun 			trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1047*4882a593Smuzhiyun 			return false;
1048*4882a593Smuzhiyun 		}
1049*4882a593Smuzhiyun 	}
1050*4882a593Smuzhiyun 
1051*4882a593Smuzhiyun 	/* Drain LRU add pagevec to remove extra pin on the swapped in pages */
1052*4882a593Smuzhiyun 	if (swapped_in)
1053*4882a593Smuzhiyun 		lru_add_drain();
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun 	trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
1056*4882a593Smuzhiyun 	return true;
1057*4882a593Smuzhiyun }
1058*4882a593Smuzhiyun 
collapse_huge_page(struct mm_struct * mm,unsigned long address,struct page ** hpage,int node,int referenced,int unmapped)1059*4882a593Smuzhiyun static void collapse_huge_page(struct mm_struct *mm,
1060*4882a593Smuzhiyun 				   unsigned long address,
1061*4882a593Smuzhiyun 				   struct page **hpage,
1062*4882a593Smuzhiyun 				   int node, int referenced, int unmapped)
1063*4882a593Smuzhiyun {
1064*4882a593Smuzhiyun 	LIST_HEAD(compound_pagelist);
1065*4882a593Smuzhiyun 	pmd_t *pmd, _pmd;
1066*4882a593Smuzhiyun 	pte_t *pte;
1067*4882a593Smuzhiyun 	pgtable_t pgtable;
1068*4882a593Smuzhiyun 	struct page *new_page;
1069*4882a593Smuzhiyun 	spinlock_t *pmd_ptl, *pte_ptl;
1070*4882a593Smuzhiyun 	int isolated = 0, result = 0;
1071*4882a593Smuzhiyun 	struct vm_area_struct *vma;
1072*4882a593Smuzhiyun 	struct mmu_notifier_range range;
1073*4882a593Smuzhiyun 	gfp_t gfp;
1074*4882a593Smuzhiyun 
1075*4882a593Smuzhiyun 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1076*4882a593Smuzhiyun 
1077*4882a593Smuzhiyun 	/* Only allocate from the target node */
1078*4882a593Smuzhiyun 	gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun 	/*
1081*4882a593Smuzhiyun 	 * Before allocating the hugepage, release the mmap_lock read lock.
1082*4882a593Smuzhiyun 	 * The allocation can take potentially a long time if it involves
1083*4882a593Smuzhiyun 	 * sync compaction, and we do not need to hold the mmap_lock during
1084*4882a593Smuzhiyun 	 * that. We will recheck the vma after taking it again in write mode.
1085*4882a593Smuzhiyun 	 */
1086*4882a593Smuzhiyun 	mmap_read_unlock(mm);
1087*4882a593Smuzhiyun 	new_page = khugepaged_alloc_page(hpage, gfp, node);
1088*4882a593Smuzhiyun 	if (!new_page) {
1089*4882a593Smuzhiyun 		result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1090*4882a593Smuzhiyun 		goto out_nolock;
1091*4882a593Smuzhiyun 	}
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun 	if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
1094*4882a593Smuzhiyun 		result = SCAN_CGROUP_CHARGE_FAIL;
1095*4882a593Smuzhiyun 		goto out_nolock;
1096*4882a593Smuzhiyun 	}
1097*4882a593Smuzhiyun 	count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun 	mmap_read_lock(mm);
1100*4882a593Smuzhiyun 	result = hugepage_vma_revalidate(mm, address, &vma);
1101*4882a593Smuzhiyun 	if (result) {
1102*4882a593Smuzhiyun 		mmap_read_unlock(mm);
1103*4882a593Smuzhiyun 		goto out_nolock;
1104*4882a593Smuzhiyun 	}
1105*4882a593Smuzhiyun 
1106*4882a593Smuzhiyun 	pmd = mm_find_pmd(mm, address);
1107*4882a593Smuzhiyun 	if (!pmd) {
1108*4882a593Smuzhiyun 		result = SCAN_PMD_NULL;
1109*4882a593Smuzhiyun 		mmap_read_unlock(mm);
1110*4882a593Smuzhiyun 		goto out_nolock;
1111*4882a593Smuzhiyun 	}
1112*4882a593Smuzhiyun 
1113*4882a593Smuzhiyun 	/*
1114*4882a593Smuzhiyun 	 * __collapse_huge_page_swapin always returns with mmap_lock locked.
1115*4882a593Smuzhiyun 	 * If it fails, we release mmap_lock and jump out_nolock.
1116*4882a593Smuzhiyun 	 * Continuing to collapse causes inconsistency.
1117*4882a593Smuzhiyun 	 */
1118*4882a593Smuzhiyun 	if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
1119*4882a593Smuzhiyun 						     pmd, referenced)) {
1120*4882a593Smuzhiyun 		mmap_read_unlock(mm);
1121*4882a593Smuzhiyun 		goto out_nolock;
1122*4882a593Smuzhiyun 	}
1123*4882a593Smuzhiyun 
1124*4882a593Smuzhiyun 	mmap_read_unlock(mm);
1125*4882a593Smuzhiyun 	/*
1126*4882a593Smuzhiyun 	 * Prevent all access to pagetables with the exception of
1127*4882a593Smuzhiyun 	 * gup_fast later handled by the ptep_clear_flush and the VM
1128*4882a593Smuzhiyun 	 * handled by the anon_vma lock + PG_lock.
1129*4882a593Smuzhiyun 	 */
1130*4882a593Smuzhiyun 	mmap_write_lock(mm);
1131*4882a593Smuzhiyun 	result = hugepage_vma_revalidate(mm, address, &vma);
1132*4882a593Smuzhiyun 	if (result)
1133*4882a593Smuzhiyun 		goto out;
1134*4882a593Smuzhiyun 	/* check if the pmd is still valid */
1135*4882a593Smuzhiyun 	if (mm_find_pmd(mm, address) != pmd)
1136*4882a593Smuzhiyun 		goto out;
1137*4882a593Smuzhiyun 
1138*4882a593Smuzhiyun 	vm_write_begin(vma);
1139*4882a593Smuzhiyun 	anon_vma_lock_write(vma->anon_vma);
1140*4882a593Smuzhiyun 
1141*4882a593Smuzhiyun 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
1142*4882a593Smuzhiyun 				address, address + HPAGE_PMD_SIZE);
1143*4882a593Smuzhiyun 	mmu_notifier_invalidate_range_start(&range);
1144*4882a593Smuzhiyun 
1145*4882a593Smuzhiyun 	pte = pte_offset_map(pmd, address);
1146*4882a593Smuzhiyun 	pte_ptl = pte_lockptr(mm, pmd);
1147*4882a593Smuzhiyun 
1148*4882a593Smuzhiyun 	pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1149*4882a593Smuzhiyun 	/*
1150*4882a593Smuzhiyun 	 * This removes any huge TLB entry from the CPU so we won't allow
1151*4882a593Smuzhiyun 	 * huge and small TLB entries for the same virtual address to
1152*4882a593Smuzhiyun 	 * avoid the risk of CPU bugs in that area.
1153*4882a593Smuzhiyun 	 *
1154*4882a593Smuzhiyun 	 * Parallel fast GUP is fine since fast GUP will back off when
1155*4882a593Smuzhiyun 	 * it detects PMD is changed.
1156*4882a593Smuzhiyun 	 */
1157*4882a593Smuzhiyun 	_pmd = pmdp_collapse_flush(vma, address, pmd);
1158*4882a593Smuzhiyun 	spin_unlock(pmd_ptl);
1159*4882a593Smuzhiyun 	mmu_notifier_invalidate_range_end(&range);
1160*4882a593Smuzhiyun 	tlb_remove_table_sync_one();
1161*4882a593Smuzhiyun 
1162*4882a593Smuzhiyun 	spin_lock(pte_ptl);
1163*4882a593Smuzhiyun 	isolated = __collapse_huge_page_isolate(vma, address, pte,
1164*4882a593Smuzhiyun 			&compound_pagelist);
1165*4882a593Smuzhiyun 	spin_unlock(pte_ptl);
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun 	if (unlikely(!isolated)) {
1168*4882a593Smuzhiyun 		pte_unmap(pte);
1169*4882a593Smuzhiyun 		spin_lock(pmd_ptl);
1170*4882a593Smuzhiyun 		BUG_ON(!pmd_none(*pmd));
1171*4882a593Smuzhiyun 		/*
1172*4882a593Smuzhiyun 		 * We can only use set_pmd_at when establishing
1173*4882a593Smuzhiyun 		 * hugepmds and never for establishing regular pmds that
1174*4882a593Smuzhiyun 		 * points to regular pagetables. Use pmd_populate for that
1175*4882a593Smuzhiyun 		 */
1176*4882a593Smuzhiyun 		pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1177*4882a593Smuzhiyun 		spin_unlock(pmd_ptl);
1178*4882a593Smuzhiyun 		anon_vma_unlock_write(vma->anon_vma);
1179*4882a593Smuzhiyun 		vm_write_end(vma);
1180*4882a593Smuzhiyun 		result = SCAN_FAIL;
1181*4882a593Smuzhiyun 		goto out;
1182*4882a593Smuzhiyun 	}
1183*4882a593Smuzhiyun 
1184*4882a593Smuzhiyun 	/*
1185*4882a593Smuzhiyun 	 * All pages are isolated and locked so anon_vma rmap
1186*4882a593Smuzhiyun 	 * can't run anymore.
1187*4882a593Smuzhiyun 	 */
1188*4882a593Smuzhiyun 	anon_vma_unlock_write(vma->anon_vma);
1189*4882a593Smuzhiyun 
1190*4882a593Smuzhiyun 	__collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl,
1191*4882a593Smuzhiyun 			&compound_pagelist);
1192*4882a593Smuzhiyun 	pte_unmap(pte);
1193*4882a593Smuzhiyun 	__SetPageUptodate(new_page);
1194*4882a593Smuzhiyun 	pgtable = pmd_pgtable(_pmd);
1195*4882a593Smuzhiyun 
1196*4882a593Smuzhiyun 	_pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1197*4882a593Smuzhiyun 	_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1198*4882a593Smuzhiyun 
1199*4882a593Smuzhiyun 	/*
1200*4882a593Smuzhiyun 	 * spin_lock() below is not the equivalent of smp_wmb(), so
1201*4882a593Smuzhiyun 	 * this is needed to avoid the copy_huge_page writes to become
1202*4882a593Smuzhiyun 	 * visible after the set_pmd_at() write.
1203*4882a593Smuzhiyun 	 */
1204*4882a593Smuzhiyun 	smp_wmb();
1205*4882a593Smuzhiyun 
1206*4882a593Smuzhiyun 	spin_lock(pmd_ptl);
1207*4882a593Smuzhiyun 	BUG_ON(!pmd_none(*pmd));
1208*4882a593Smuzhiyun 	page_add_new_anon_rmap(new_page, vma, address, true);
1209*4882a593Smuzhiyun 	lru_cache_add_inactive_or_unevictable(new_page, vma);
1210*4882a593Smuzhiyun 	pgtable_trans_huge_deposit(mm, pmd, pgtable);
1211*4882a593Smuzhiyun 	set_pmd_at(mm, address, pmd, _pmd);
1212*4882a593Smuzhiyun 	update_mmu_cache_pmd(vma, address, pmd);
1213*4882a593Smuzhiyun 	spin_unlock(pmd_ptl);
1214*4882a593Smuzhiyun 	vm_write_end(vma);
1215*4882a593Smuzhiyun 
1216*4882a593Smuzhiyun 	*hpage = NULL;
1217*4882a593Smuzhiyun 
1218*4882a593Smuzhiyun 	khugepaged_pages_collapsed++;
1219*4882a593Smuzhiyun 	result = SCAN_SUCCEED;
1220*4882a593Smuzhiyun out_up_write:
1221*4882a593Smuzhiyun 	mmap_write_unlock(mm);
1222*4882a593Smuzhiyun out_nolock:
1223*4882a593Smuzhiyun 	if (!IS_ERR_OR_NULL(*hpage))
1224*4882a593Smuzhiyun 		mem_cgroup_uncharge(*hpage);
1225*4882a593Smuzhiyun 	trace_mm_collapse_huge_page(mm, isolated, result);
1226*4882a593Smuzhiyun 	return;
1227*4882a593Smuzhiyun out:
1228*4882a593Smuzhiyun 	goto out_up_write;
1229*4882a593Smuzhiyun }
1230*4882a593Smuzhiyun 
khugepaged_scan_pmd(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long address,struct page ** hpage)1231*4882a593Smuzhiyun static int khugepaged_scan_pmd(struct mm_struct *mm,
1232*4882a593Smuzhiyun 			       struct vm_area_struct *vma,
1233*4882a593Smuzhiyun 			       unsigned long address,
1234*4882a593Smuzhiyun 			       struct page **hpage)
1235*4882a593Smuzhiyun {
1236*4882a593Smuzhiyun 	pmd_t *pmd;
1237*4882a593Smuzhiyun 	pte_t *pte, *_pte;
1238*4882a593Smuzhiyun 	int ret = 0, result = 0, referenced = 0;
1239*4882a593Smuzhiyun 	int none_or_zero = 0, shared = 0;
1240*4882a593Smuzhiyun 	struct page *page = NULL;
1241*4882a593Smuzhiyun 	unsigned long _address;
1242*4882a593Smuzhiyun 	spinlock_t *ptl;
1243*4882a593Smuzhiyun 	int node = NUMA_NO_NODE, unmapped = 0;
1244*4882a593Smuzhiyun 	bool writable = false;
1245*4882a593Smuzhiyun 
1246*4882a593Smuzhiyun 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1247*4882a593Smuzhiyun 
1248*4882a593Smuzhiyun 	pmd = mm_find_pmd(mm, address);
1249*4882a593Smuzhiyun 	if (!pmd) {
1250*4882a593Smuzhiyun 		result = SCAN_PMD_NULL;
1251*4882a593Smuzhiyun 		goto out;
1252*4882a593Smuzhiyun 	}
1253*4882a593Smuzhiyun 
1254*4882a593Smuzhiyun 	memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1255*4882a593Smuzhiyun 	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1256*4882a593Smuzhiyun 	for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1257*4882a593Smuzhiyun 	     _pte++, _address += PAGE_SIZE) {
1258*4882a593Smuzhiyun 		pte_t pteval = *_pte;
1259*4882a593Smuzhiyun 		if (is_swap_pte(pteval)) {
1260*4882a593Smuzhiyun 			if (++unmapped <= khugepaged_max_ptes_swap) {
1261*4882a593Smuzhiyun 				/*
1262*4882a593Smuzhiyun 				 * Always be strict with uffd-wp
1263*4882a593Smuzhiyun 				 * enabled swap entries.  Please see
1264*4882a593Smuzhiyun 				 * comment below for pte_uffd_wp().
1265*4882a593Smuzhiyun 				 */
1266*4882a593Smuzhiyun 				if (pte_swp_uffd_wp(pteval)) {
1267*4882a593Smuzhiyun 					result = SCAN_PTE_UFFD_WP;
1268*4882a593Smuzhiyun 					goto out_unmap;
1269*4882a593Smuzhiyun 				}
1270*4882a593Smuzhiyun 				continue;
1271*4882a593Smuzhiyun 			} else {
1272*4882a593Smuzhiyun 				result = SCAN_EXCEED_SWAP_PTE;
1273*4882a593Smuzhiyun 				goto out_unmap;
1274*4882a593Smuzhiyun 			}
1275*4882a593Smuzhiyun 		}
1276*4882a593Smuzhiyun 		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1277*4882a593Smuzhiyun 			if (!userfaultfd_armed(vma) &&
1278*4882a593Smuzhiyun 			    ++none_or_zero <= khugepaged_max_ptes_none) {
1279*4882a593Smuzhiyun 				continue;
1280*4882a593Smuzhiyun 			} else {
1281*4882a593Smuzhiyun 				result = SCAN_EXCEED_NONE_PTE;
1282*4882a593Smuzhiyun 				goto out_unmap;
1283*4882a593Smuzhiyun 			}
1284*4882a593Smuzhiyun 		}
1285*4882a593Smuzhiyun 		if (!pte_present(pteval)) {
1286*4882a593Smuzhiyun 			result = SCAN_PTE_NON_PRESENT;
1287*4882a593Smuzhiyun 			goto out_unmap;
1288*4882a593Smuzhiyun 		}
1289*4882a593Smuzhiyun 		if (pte_uffd_wp(pteval)) {
1290*4882a593Smuzhiyun 			/*
1291*4882a593Smuzhiyun 			 * Don't collapse the page if any of the small
1292*4882a593Smuzhiyun 			 * PTEs are armed with uffd write protection.
1293*4882a593Smuzhiyun 			 * Here we can also mark the new huge pmd as
1294*4882a593Smuzhiyun 			 * write protected if any of the small ones is
1295*4882a593Smuzhiyun 			 * marked but that could bring uknown
1296*4882a593Smuzhiyun 			 * userfault messages that falls outside of
1297*4882a593Smuzhiyun 			 * the registered range.  So, just be simple.
1298*4882a593Smuzhiyun 			 */
1299*4882a593Smuzhiyun 			result = SCAN_PTE_UFFD_WP;
1300*4882a593Smuzhiyun 			goto out_unmap;
1301*4882a593Smuzhiyun 		}
1302*4882a593Smuzhiyun 		if (pte_write(pteval))
1303*4882a593Smuzhiyun 			writable = true;
1304*4882a593Smuzhiyun 
1305*4882a593Smuzhiyun 		page = vm_normal_page(vma, _address, pteval);
1306*4882a593Smuzhiyun 		if (unlikely(!page)) {
1307*4882a593Smuzhiyun 			result = SCAN_PAGE_NULL;
1308*4882a593Smuzhiyun 			goto out_unmap;
1309*4882a593Smuzhiyun 		}
1310*4882a593Smuzhiyun 
1311*4882a593Smuzhiyun 		if (page_mapcount(page) > 1 &&
1312*4882a593Smuzhiyun 				++shared > khugepaged_max_ptes_shared) {
1313*4882a593Smuzhiyun 			result = SCAN_EXCEED_SHARED_PTE;
1314*4882a593Smuzhiyun 			goto out_unmap;
1315*4882a593Smuzhiyun 		}
1316*4882a593Smuzhiyun 
1317*4882a593Smuzhiyun 		page = compound_head(page);
1318*4882a593Smuzhiyun 
1319*4882a593Smuzhiyun 		/*
1320*4882a593Smuzhiyun 		 * Record which node the original page is from and save this
1321*4882a593Smuzhiyun 		 * information to khugepaged_node_load[].
1322*4882a593Smuzhiyun 		 * Khupaged will allocate hugepage from the node has the max
1323*4882a593Smuzhiyun 		 * hit record.
1324*4882a593Smuzhiyun 		 */
1325*4882a593Smuzhiyun 		node = page_to_nid(page);
1326*4882a593Smuzhiyun 		if (khugepaged_scan_abort(node)) {
1327*4882a593Smuzhiyun 			result = SCAN_SCAN_ABORT;
1328*4882a593Smuzhiyun 			goto out_unmap;
1329*4882a593Smuzhiyun 		}
1330*4882a593Smuzhiyun 		khugepaged_node_load[node]++;
1331*4882a593Smuzhiyun 		if (!PageLRU(page)) {
1332*4882a593Smuzhiyun 			result = SCAN_PAGE_LRU;
1333*4882a593Smuzhiyun 			goto out_unmap;
1334*4882a593Smuzhiyun 		}
1335*4882a593Smuzhiyun 		if (PageLocked(page)) {
1336*4882a593Smuzhiyun 			result = SCAN_PAGE_LOCK;
1337*4882a593Smuzhiyun 			goto out_unmap;
1338*4882a593Smuzhiyun 		}
1339*4882a593Smuzhiyun 		if (!PageAnon(page)) {
1340*4882a593Smuzhiyun 			result = SCAN_PAGE_ANON;
1341*4882a593Smuzhiyun 			goto out_unmap;
1342*4882a593Smuzhiyun 		}
1343*4882a593Smuzhiyun 
1344*4882a593Smuzhiyun 		/*
1345*4882a593Smuzhiyun 		 * Check if the page has any GUP (or other external) pins.
1346*4882a593Smuzhiyun 		 *
1347*4882a593Smuzhiyun 		 * Here the check is racy it may see totmal_mapcount > refcount
1348*4882a593Smuzhiyun 		 * in some cases.
1349*4882a593Smuzhiyun 		 * For example, one process with one forked child process.
1350*4882a593Smuzhiyun 		 * The parent has the PMD split due to MADV_DONTNEED, then
1351*4882a593Smuzhiyun 		 * the child is trying unmap the whole PMD, but khugepaged
1352*4882a593Smuzhiyun 		 * may be scanning the parent between the child has
1353*4882a593Smuzhiyun 		 * PageDoubleMap flag cleared and dec the mapcount.  So
1354*4882a593Smuzhiyun 		 * khugepaged may see total_mapcount > refcount.
1355*4882a593Smuzhiyun 		 *
1356*4882a593Smuzhiyun 		 * But such case is ephemeral we could always retry collapse
1357*4882a593Smuzhiyun 		 * later.  However it may report false positive if the page
1358*4882a593Smuzhiyun 		 * has excessive GUP pins (i.e. 512).  Anyway the same check
1359*4882a593Smuzhiyun 		 * will be done again later the risk seems low.
1360*4882a593Smuzhiyun 		 */
1361*4882a593Smuzhiyun 		if (!is_refcount_suitable(page)) {
1362*4882a593Smuzhiyun 			result = SCAN_PAGE_COUNT;
1363*4882a593Smuzhiyun 			goto out_unmap;
1364*4882a593Smuzhiyun 		}
1365*4882a593Smuzhiyun 		if (pte_young(pteval) ||
1366*4882a593Smuzhiyun 		    page_is_young(page) || PageReferenced(page) ||
1367*4882a593Smuzhiyun 		    mmu_notifier_test_young(vma->vm_mm, address))
1368*4882a593Smuzhiyun 			referenced++;
1369*4882a593Smuzhiyun 	}
1370*4882a593Smuzhiyun 	if (!writable) {
1371*4882a593Smuzhiyun 		result = SCAN_PAGE_RO;
1372*4882a593Smuzhiyun 	} else if (!referenced || (unmapped && referenced < HPAGE_PMD_NR/2)) {
1373*4882a593Smuzhiyun 		result = SCAN_LACK_REFERENCED_PAGE;
1374*4882a593Smuzhiyun 	} else {
1375*4882a593Smuzhiyun 		result = SCAN_SUCCEED;
1376*4882a593Smuzhiyun 		ret = 1;
1377*4882a593Smuzhiyun 	}
1378*4882a593Smuzhiyun out_unmap:
1379*4882a593Smuzhiyun 	pte_unmap_unlock(pte, ptl);
1380*4882a593Smuzhiyun 	if (ret) {
1381*4882a593Smuzhiyun 		node = khugepaged_find_target_node();
1382*4882a593Smuzhiyun 		/* collapse_huge_page will return with the mmap_lock released */
1383*4882a593Smuzhiyun 		collapse_huge_page(mm, address, hpage, node,
1384*4882a593Smuzhiyun 				referenced, unmapped);
1385*4882a593Smuzhiyun 	}
1386*4882a593Smuzhiyun out:
1387*4882a593Smuzhiyun 	trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1388*4882a593Smuzhiyun 				     none_or_zero, result, unmapped);
1389*4882a593Smuzhiyun 	return ret;
1390*4882a593Smuzhiyun }
1391*4882a593Smuzhiyun 
collect_mm_slot(struct mm_slot * mm_slot)1392*4882a593Smuzhiyun static void collect_mm_slot(struct mm_slot *mm_slot)
1393*4882a593Smuzhiyun {
1394*4882a593Smuzhiyun 	struct mm_struct *mm = mm_slot->mm;
1395*4882a593Smuzhiyun 
1396*4882a593Smuzhiyun 	lockdep_assert_held(&khugepaged_mm_lock);
1397*4882a593Smuzhiyun 
1398*4882a593Smuzhiyun 	if (khugepaged_test_exit(mm)) {
1399*4882a593Smuzhiyun 		/* free mm_slot */
1400*4882a593Smuzhiyun 		hash_del(&mm_slot->hash);
1401*4882a593Smuzhiyun 		list_del(&mm_slot->mm_node);
1402*4882a593Smuzhiyun 
1403*4882a593Smuzhiyun 		/*
1404*4882a593Smuzhiyun 		 * Not strictly needed because the mm exited already.
1405*4882a593Smuzhiyun 		 *
1406*4882a593Smuzhiyun 		 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1407*4882a593Smuzhiyun 		 */
1408*4882a593Smuzhiyun 
1409*4882a593Smuzhiyun 		/* khugepaged_mm_lock actually not necessary for the below */
1410*4882a593Smuzhiyun 		free_mm_slot(mm_slot);
1411*4882a593Smuzhiyun 		mmdrop(mm);
1412*4882a593Smuzhiyun 	}
1413*4882a593Smuzhiyun }
1414*4882a593Smuzhiyun 
1415*4882a593Smuzhiyun #ifdef CONFIG_SHMEM
1416*4882a593Smuzhiyun /*
1417*4882a593Smuzhiyun  * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1418*4882a593Smuzhiyun  * khugepaged should try to collapse the page table.
1419*4882a593Smuzhiyun  */
khugepaged_add_pte_mapped_thp(struct mm_struct * mm,unsigned long addr)1420*4882a593Smuzhiyun static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1421*4882a593Smuzhiyun 					 unsigned long addr)
1422*4882a593Smuzhiyun {
1423*4882a593Smuzhiyun 	struct mm_slot *mm_slot;
1424*4882a593Smuzhiyun 
1425*4882a593Smuzhiyun 	VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1426*4882a593Smuzhiyun 
1427*4882a593Smuzhiyun 	spin_lock(&khugepaged_mm_lock);
1428*4882a593Smuzhiyun 	mm_slot = get_mm_slot(mm);
1429*4882a593Smuzhiyun 	if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
1430*4882a593Smuzhiyun 		mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
1431*4882a593Smuzhiyun 	spin_unlock(&khugepaged_mm_lock);
1432*4882a593Smuzhiyun 	return 0;
1433*4882a593Smuzhiyun }
1434*4882a593Smuzhiyun 
1435*4882a593Smuzhiyun /**
1436*4882a593Smuzhiyun  * Try to collapse a pte-mapped THP for mm at address haddr.
1437*4882a593Smuzhiyun  *
1438*4882a593Smuzhiyun  * This function checks whether all the PTEs in the PMD are pointing to the
1439*4882a593Smuzhiyun  * right THP. If so, retract the page table so the THP can refault in with
1440*4882a593Smuzhiyun  * as pmd-mapped.
1441*4882a593Smuzhiyun  */
collapse_pte_mapped_thp(struct mm_struct * mm,unsigned long addr)1442*4882a593Smuzhiyun void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
1443*4882a593Smuzhiyun {
1444*4882a593Smuzhiyun 	unsigned long haddr = addr & HPAGE_PMD_MASK;
1445*4882a593Smuzhiyun 	struct vm_area_struct *vma = find_vma(mm, haddr);
1446*4882a593Smuzhiyun 	struct page *hpage;
1447*4882a593Smuzhiyun 	pte_t *start_pte, *pte;
1448*4882a593Smuzhiyun 	pmd_t *pmd, _pmd;
1449*4882a593Smuzhiyun 	spinlock_t *ptl;
1450*4882a593Smuzhiyun 	int count = 0;
1451*4882a593Smuzhiyun 	int i;
1452*4882a593Smuzhiyun 	struct mmu_notifier_range range;
1453*4882a593Smuzhiyun 
1454*4882a593Smuzhiyun 	if (!vma || !vma->vm_file ||
1455*4882a593Smuzhiyun 	    vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE)
1456*4882a593Smuzhiyun 		return;
1457*4882a593Smuzhiyun 
1458*4882a593Smuzhiyun 	/*
1459*4882a593Smuzhiyun 	 * This vm_flags may not have VM_HUGEPAGE if the page was not
1460*4882a593Smuzhiyun 	 * collapsed by this mm. But we can still collapse if the page is
1461*4882a593Smuzhiyun 	 * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
1462*4882a593Smuzhiyun 	 * will not fail the vma for missing VM_HUGEPAGE
1463*4882a593Smuzhiyun 	 */
1464*4882a593Smuzhiyun 	if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
1465*4882a593Smuzhiyun 		return;
1466*4882a593Smuzhiyun 
1467*4882a593Smuzhiyun 	/*
1468*4882a593Smuzhiyun 	 * Symmetry with retract_page_tables(): Exclude MAP_PRIVATE mappings
1469*4882a593Smuzhiyun 	 * that got written to. Without this, we'd have to also lock the
1470*4882a593Smuzhiyun 	 * anon_vma if one exists.
1471*4882a593Smuzhiyun 	 */
1472*4882a593Smuzhiyun 	if (vma->anon_vma)
1473*4882a593Smuzhiyun 		return;
1474*4882a593Smuzhiyun 
1475*4882a593Smuzhiyun 	hpage = find_lock_page(vma->vm_file->f_mapping,
1476*4882a593Smuzhiyun 			       linear_page_index(vma, haddr));
1477*4882a593Smuzhiyun 	if (!hpage)
1478*4882a593Smuzhiyun 		return;
1479*4882a593Smuzhiyun 
1480*4882a593Smuzhiyun 	if (!PageHead(hpage))
1481*4882a593Smuzhiyun 		goto drop_hpage;
1482*4882a593Smuzhiyun 
1483*4882a593Smuzhiyun 	pmd = mm_find_pmd(mm, haddr);
1484*4882a593Smuzhiyun 	if (!pmd)
1485*4882a593Smuzhiyun 		goto drop_hpage;
1486*4882a593Smuzhiyun 
1487*4882a593Smuzhiyun 	vm_write_begin(vma);
1488*4882a593Smuzhiyun 
1489*4882a593Smuzhiyun 	/*
1490*4882a593Smuzhiyun 	 * We need to lock the mapping so that from here on, only GUP-fast and
1491*4882a593Smuzhiyun 	 * hardware page walks can access the parts of the page tables that
1492*4882a593Smuzhiyun 	 * we're operating on.
1493*4882a593Smuzhiyun 	 */
1494*4882a593Smuzhiyun 	i_mmap_lock_write(vma->vm_file->f_mapping);
1495*4882a593Smuzhiyun 
1496*4882a593Smuzhiyun 	/*
1497*4882a593Smuzhiyun 	 * This spinlock should be unnecessary: Nobody else should be accessing
1498*4882a593Smuzhiyun 	 * the page tables under spinlock protection here, only
1499*4882a593Smuzhiyun 	 * lockless_pages_from_mm() and the hardware page walker can access page
1500*4882a593Smuzhiyun 	 * tables while all the high-level locks are held in write mode.
1501*4882a593Smuzhiyun 	 */
1502*4882a593Smuzhiyun 	start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1503*4882a593Smuzhiyun 
1504*4882a593Smuzhiyun 	/* step 1: check all mapped PTEs are to the right huge page */
1505*4882a593Smuzhiyun 	for (i = 0, addr = haddr, pte = start_pte;
1506*4882a593Smuzhiyun 	     i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1507*4882a593Smuzhiyun 		struct page *page;
1508*4882a593Smuzhiyun 
1509*4882a593Smuzhiyun 		/* empty pte, skip */
1510*4882a593Smuzhiyun 		if (pte_none(*pte))
1511*4882a593Smuzhiyun 			continue;
1512*4882a593Smuzhiyun 
1513*4882a593Smuzhiyun 		/* page swapped out, abort */
1514*4882a593Smuzhiyun 		if (!pte_present(*pte))
1515*4882a593Smuzhiyun 			goto abort;
1516*4882a593Smuzhiyun 
1517*4882a593Smuzhiyun 		page = vm_normal_page(vma, addr, *pte);
1518*4882a593Smuzhiyun 
1519*4882a593Smuzhiyun 		/*
1520*4882a593Smuzhiyun 		 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1521*4882a593Smuzhiyun 		 * page table, but the new page will not be a subpage of hpage.
1522*4882a593Smuzhiyun 		 */
1523*4882a593Smuzhiyun 		if (hpage + i != page)
1524*4882a593Smuzhiyun 			goto abort;
1525*4882a593Smuzhiyun 		count++;
1526*4882a593Smuzhiyun 	}
1527*4882a593Smuzhiyun 
1528*4882a593Smuzhiyun 	/* step 2: adjust rmap */
1529*4882a593Smuzhiyun 	for (i = 0, addr = haddr, pte = start_pte;
1530*4882a593Smuzhiyun 	     i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1531*4882a593Smuzhiyun 		struct page *page;
1532*4882a593Smuzhiyun 
1533*4882a593Smuzhiyun 		if (pte_none(*pte))
1534*4882a593Smuzhiyun 			continue;
1535*4882a593Smuzhiyun 		page = vm_normal_page(vma, addr, *pte);
1536*4882a593Smuzhiyun 		page_remove_rmap(page, false);
1537*4882a593Smuzhiyun 	}
1538*4882a593Smuzhiyun 
1539*4882a593Smuzhiyun 	pte_unmap_unlock(start_pte, ptl);
1540*4882a593Smuzhiyun 
1541*4882a593Smuzhiyun 	/* step 3: set proper refcount and mm_counters. */
1542*4882a593Smuzhiyun 	if (count) {
1543*4882a593Smuzhiyun 		page_ref_sub(hpage, count);
1544*4882a593Smuzhiyun 		add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1545*4882a593Smuzhiyun 	}
1546*4882a593Smuzhiyun 
1547*4882a593Smuzhiyun 	/* step 4: collapse pmd */
1548*4882a593Smuzhiyun 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm, haddr,
1549*4882a593Smuzhiyun 				haddr + HPAGE_PMD_SIZE);
1550*4882a593Smuzhiyun 	mmu_notifier_invalidate_range_start(&range);
1551*4882a593Smuzhiyun 	_pmd = pmdp_collapse_flush(vma, haddr, pmd);
1552*4882a593Smuzhiyun 	vm_write_end(vma);
1553*4882a593Smuzhiyun 	mm_dec_nr_ptes(mm);
1554*4882a593Smuzhiyun 	tlb_remove_table_sync_one();
1555*4882a593Smuzhiyun 	mmu_notifier_invalidate_range_end(&range);
1556*4882a593Smuzhiyun 	pte_free(mm, pmd_pgtable(_pmd));
1557*4882a593Smuzhiyun 
1558*4882a593Smuzhiyun 	i_mmap_unlock_write(vma->vm_file->f_mapping);
1559*4882a593Smuzhiyun 
1560*4882a593Smuzhiyun drop_hpage:
1561*4882a593Smuzhiyun 	unlock_page(hpage);
1562*4882a593Smuzhiyun 	put_page(hpage);
1563*4882a593Smuzhiyun 	return;
1564*4882a593Smuzhiyun 
1565*4882a593Smuzhiyun abort:
1566*4882a593Smuzhiyun 	pte_unmap_unlock(start_pte, ptl);
1567*4882a593Smuzhiyun 	vm_write_end(vma);
1568*4882a593Smuzhiyun 	i_mmap_unlock_write(vma->vm_file->f_mapping);
1569*4882a593Smuzhiyun 	goto drop_hpage;
1570*4882a593Smuzhiyun }
1571*4882a593Smuzhiyun 
khugepaged_collapse_pte_mapped_thps(struct mm_slot * mm_slot)1572*4882a593Smuzhiyun static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
1573*4882a593Smuzhiyun {
1574*4882a593Smuzhiyun 	struct mm_struct *mm = mm_slot->mm;
1575*4882a593Smuzhiyun 	int i;
1576*4882a593Smuzhiyun 
1577*4882a593Smuzhiyun 	if (likely(mm_slot->nr_pte_mapped_thp == 0))
1578*4882a593Smuzhiyun 		return 0;
1579*4882a593Smuzhiyun 
1580*4882a593Smuzhiyun 	if (!mmap_write_trylock(mm))
1581*4882a593Smuzhiyun 		return -EBUSY;
1582*4882a593Smuzhiyun 
1583*4882a593Smuzhiyun 	if (unlikely(khugepaged_test_exit(mm)))
1584*4882a593Smuzhiyun 		goto out;
1585*4882a593Smuzhiyun 
1586*4882a593Smuzhiyun 	for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
1587*4882a593Smuzhiyun 		collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
1588*4882a593Smuzhiyun 
1589*4882a593Smuzhiyun out:
1590*4882a593Smuzhiyun 	mm_slot->nr_pte_mapped_thp = 0;
1591*4882a593Smuzhiyun 	mmap_write_unlock(mm);
1592*4882a593Smuzhiyun 	return 0;
1593*4882a593Smuzhiyun }
1594*4882a593Smuzhiyun 
retract_page_tables(struct address_space * mapping,pgoff_t pgoff)1595*4882a593Smuzhiyun static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1596*4882a593Smuzhiyun {
1597*4882a593Smuzhiyun 	struct vm_area_struct *vma;
1598*4882a593Smuzhiyun 	struct mm_struct *mm;
1599*4882a593Smuzhiyun 	unsigned long addr;
1600*4882a593Smuzhiyun 	pmd_t *pmd, _pmd;
1601*4882a593Smuzhiyun 
1602*4882a593Smuzhiyun 	i_mmap_lock_write(mapping);
1603*4882a593Smuzhiyun 	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1604*4882a593Smuzhiyun 		/*
1605*4882a593Smuzhiyun 		 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1606*4882a593Smuzhiyun 		 * got written to. These VMAs are likely not worth investing
1607*4882a593Smuzhiyun 		 * mmap_write_lock(mm) as PMD-mapping is likely to be split
1608*4882a593Smuzhiyun 		 * later.
1609*4882a593Smuzhiyun 		 *
1610*4882a593Smuzhiyun 		 * Not that vma->anon_vma check is racy: it can be set up after
1611*4882a593Smuzhiyun 		 * the check but before we took mmap_lock by the fault path.
1612*4882a593Smuzhiyun 		 * But page lock would prevent establishing any new ptes of the
1613*4882a593Smuzhiyun 		 * page, so we are safe.
1614*4882a593Smuzhiyun 		 *
1615*4882a593Smuzhiyun 		 * An alternative would be drop the check, but check that page
1616*4882a593Smuzhiyun 		 * table is clear before calling pmdp_collapse_flush() under
1617*4882a593Smuzhiyun 		 * ptl. It has higher chance to recover THP for the VMA, but
1618*4882a593Smuzhiyun 		 * has higher cost too. It would also probably require locking
1619*4882a593Smuzhiyun 		 * the anon_vma.
1620*4882a593Smuzhiyun 		 */
1621*4882a593Smuzhiyun 		if (vma->anon_vma)
1622*4882a593Smuzhiyun 			continue;
1623*4882a593Smuzhiyun 		addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1624*4882a593Smuzhiyun 		if (addr & ~HPAGE_PMD_MASK)
1625*4882a593Smuzhiyun 			continue;
1626*4882a593Smuzhiyun 		if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1627*4882a593Smuzhiyun 			continue;
1628*4882a593Smuzhiyun 		mm = vma->vm_mm;
1629*4882a593Smuzhiyun 		pmd = mm_find_pmd(mm, addr);
1630*4882a593Smuzhiyun 		if (!pmd)
1631*4882a593Smuzhiyun 			continue;
1632*4882a593Smuzhiyun 		/*
1633*4882a593Smuzhiyun 		 * We need exclusive mmap_lock to retract page table.
1634*4882a593Smuzhiyun 		 *
1635*4882a593Smuzhiyun 		 * We use trylock due to lock inversion: we need to acquire
1636*4882a593Smuzhiyun 		 * mmap_lock while holding page lock. Fault path does it in
1637*4882a593Smuzhiyun 		 * reverse order. Trylock is a way to avoid deadlock.
1638*4882a593Smuzhiyun 		 */
1639*4882a593Smuzhiyun 		if (mmap_write_trylock(mm)) {
1640*4882a593Smuzhiyun 			if (!khugepaged_test_exit(mm)) {
1641*4882a593Smuzhiyun 				struct mmu_notifier_range range;
1642*4882a593Smuzhiyun 
1643*4882a593Smuzhiyun 				vm_write_begin(vma);
1644*4882a593Smuzhiyun 				mmu_notifier_range_init(&range,
1645*4882a593Smuzhiyun 							MMU_NOTIFY_CLEAR, 0,
1646*4882a593Smuzhiyun 							NULL, mm, addr,
1647*4882a593Smuzhiyun 							addr + HPAGE_PMD_SIZE);
1648*4882a593Smuzhiyun 				mmu_notifier_invalidate_range_start(&range);
1649*4882a593Smuzhiyun 				/* assume page table is clear */
1650*4882a593Smuzhiyun 				_pmd = pmdp_collapse_flush(vma, addr, pmd);
1651*4882a593Smuzhiyun 				vm_write_end(vma);
1652*4882a593Smuzhiyun 				mm_dec_nr_ptes(mm);
1653*4882a593Smuzhiyun 				tlb_remove_table_sync_one();
1654*4882a593Smuzhiyun 				pte_free(mm, pmd_pgtable(_pmd));
1655*4882a593Smuzhiyun 				mmu_notifier_invalidate_range_end(&range);
1656*4882a593Smuzhiyun 			}
1657*4882a593Smuzhiyun 			mmap_write_unlock(mm);
1658*4882a593Smuzhiyun 		} else {
1659*4882a593Smuzhiyun 			/* Try again later */
1660*4882a593Smuzhiyun 			khugepaged_add_pte_mapped_thp(mm, addr);
1661*4882a593Smuzhiyun 		}
1662*4882a593Smuzhiyun 	}
1663*4882a593Smuzhiyun 	i_mmap_unlock_write(mapping);
1664*4882a593Smuzhiyun }
1665*4882a593Smuzhiyun 
1666*4882a593Smuzhiyun /**
1667*4882a593Smuzhiyun  * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1668*4882a593Smuzhiyun  *
1669*4882a593Smuzhiyun  * Basic scheme is simple, details are more complex:
1670*4882a593Smuzhiyun  *  - allocate and lock a new huge page;
1671*4882a593Smuzhiyun  *  - scan page cache replacing old pages with the new one
1672*4882a593Smuzhiyun  *    + swap/gup in pages if necessary;
1673*4882a593Smuzhiyun  *    + fill in gaps;
1674*4882a593Smuzhiyun  *    + keep old pages around in case rollback is required;
1675*4882a593Smuzhiyun  *  - if replacing succeeds:
1676*4882a593Smuzhiyun  *    + copy data over;
1677*4882a593Smuzhiyun  *    + free old pages;
1678*4882a593Smuzhiyun  *    + unlock huge page;
1679*4882a593Smuzhiyun  *  - if replacing failed;
1680*4882a593Smuzhiyun  *    + put all pages back and unfreeze them;
1681*4882a593Smuzhiyun  *    + restore gaps in the page cache;
1682*4882a593Smuzhiyun  *    + unlock and free huge page;
1683*4882a593Smuzhiyun  */
collapse_file(struct mm_struct * mm,struct file * file,pgoff_t start,struct page ** hpage,int node)1684*4882a593Smuzhiyun static void collapse_file(struct mm_struct *mm,
1685*4882a593Smuzhiyun 		struct file *file, pgoff_t start,
1686*4882a593Smuzhiyun 		struct page **hpage, int node)
1687*4882a593Smuzhiyun {
1688*4882a593Smuzhiyun 	struct address_space *mapping = file->f_mapping;
1689*4882a593Smuzhiyun 	gfp_t gfp;
1690*4882a593Smuzhiyun 	struct page *new_page;
1691*4882a593Smuzhiyun 	pgoff_t index, end = start + HPAGE_PMD_NR;
1692*4882a593Smuzhiyun 	LIST_HEAD(pagelist);
1693*4882a593Smuzhiyun 	XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
1694*4882a593Smuzhiyun 	int nr_none = 0, result = SCAN_SUCCEED;
1695*4882a593Smuzhiyun 	bool is_shmem = shmem_file(file);
1696*4882a593Smuzhiyun 
1697*4882a593Smuzhiyun 	VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
1698*4882a593Smuzhiyun 	VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1699*4882a593Smuzhiyun 
1700*4882a593Smuzhiyun 	/* Only allocate from the target node */
1701*4882a593Smuzhiyun 	gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1702*4882a593Smuzhiyun 
1703*4882a593Smuzhiyun 	new_page = khugepaged_alloc_page(hpage, gfp, node);
1704*4882a593Smuzhiyun 	if (!new_page) {
1705*4882a593Smuzhiyun 		result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1706*4882a593Smuzhiyun 		goto out;
1707*4882a593Smuzhiyun 	}
1708*4882a593Smuzhiyun 
1709*4882a593Smuzhiyun 	if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
1710*4882a593Smuzhiyun 		result = SCAN_CGROUP_CHARGE_FAIL;
1711*4882a593Smuzhiyun 		goto out;
1712*4882a593Smuzhiyun 	}
1713*4882a593Smuzhiyun 	count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
1714*4882a593Smuzhiyun 
1715*4882a593Smuzhiyun 	/* This will be less messy when we use multi-index entries */
1716*4882a593Smuzhiyun 	do {
1717*4882a593Smuzhiyun 		xas_lock_irq(&xas);
1718*4882a593Smuzhiyun 		xas_create_range(&xas);
1719*4882a593Smuzhiyun 		if (!xas_error(&xas))
1720*4882a593Smuzhiyun 			break;
1721*4882a593Smuzhiyun 		xas_unlock_irq(&xas);
1722*4882a593Smuzhiyun 		if (!xas_nomem(&xas, GFP_KERNEL)) {
1723*4882a593Smuzhiyun 			result = SCAN_FAIL;
1724*4882a593Smuzhiyun 			goto out;
1725*4882a593Smuzhiyun 		}
1726*4882a593Smuzhiyun 	} while (1);
1727*4882a593Smuzhiyun 
1728*4882a593Smuzhiyun 	__SetPageLocked(new_page);
1729*4882a593Smuzhiyun 	if (is_shmem)
1730*4882a593Smuzhiyun 		__SetPageSwapBacked(new_page);
1731*4882a593Smuzhiyun 	new_page->index = start;
1732*4882a593Smuzhiyun 	new_page->mapping = mapping;
1733*4882a593Smuzhiyun 
1734*4882a593Smuzhiyun 	/*
1735*4882a593Smuzhiyun 	 * At this point the new_page is locked and not up-to-date.
1736*4882a593Smuzhiyun 	 * It's safe to insert it into the page cache, because nobody would
1737*4882a593Smuzhiyun 	 * be able to map it or use it in another way until we unlock it.
1738*4882a593Smuzhiyun 	 */
1739*4882a593Smuzhiyun 
1740*4882a593Smuzhiyun 	xas_set(&xas, start);
1741*4882a593Smuzhiyun 	for (index = start; index < end; index++) {
1742*4882a593Smuzhiyun 		struct page *page = xas_next(&xas);
1743*4882a593Smuzhiyun 
1744*4882a593Smuzhiyun 		VM_BUG_ON(index != xas.xa_index);
1745*4882a593Smuzhiyun 		if (is_shmem) {
1746*4882a593Smuzhiyun 			if (!page) {
1747*4882a593Smuzhiyun 				/*
1748*4882a593Smuzhiyun 				 * Stop if extent has been truncated or
1749*4882a593Smuzhiyun 				 * hole-punched, and is now completely
1750*4882a593Smuzhiyun 				 * empty.
1751*4882a593Smuzhiyun 				 */
1752*4882a593Smuzhiyun 				if (index == start) {
1753*4882a593Smuzhiyun 					if (!xas_next_entry(&xas, end - 1)) {
1754*4882a593Smuzhiyun 						result = SCAN_TRUNCATED;
1755*4882a593Smuzhiyun 						goto xa_locked;
1756*4882a593Smuzhiyun 					}
1757*4882a593Smuzhiyun 					xas_set(&xas, index);
1758*4882a593Smuzhiyun 				}
1759*4882a593Smuzhiyun 				if (!shmem_charge(mapping->host, 1)) {
1760*4882a593Smuzhiyun 					result = SCAN_FAIL;
1761*4882a593Smuzhiyun 					goto xa_locked;
1762*4882a593Smuzhiyun 				}
1763*4882a593Smuzhiyun 				xas_store(&xas, new_page);
1764*4882a593Smuzhiyun 				nr_none++;
1765*4882a593Smuzhiyun 				continue;
1766*4882a593Smuzhiyun 			}
1767*4882a593Smuzhiyun 
1768*4882a593Smuzhiyun 			if (xa_is_value(page) || !PageUptodate(page)) {
1769*4882a593Smuzhiyun 				xas_unlock_irq(&xas);
1770*4882a593Smuzhiyun 				/* swap in or instantiate fallocated page */
1771*4882a593Smuzhiyun 				if (shmem_getpage(mapping->host, index, &page,
1772*4882a593Smuzhiyun 						  SGP_NOHUGE)) {
1773*4882a593Smuzhiyun 					result = SCAN_FAIL;
1774*4882a593Smuzhiyun 					goto xa_unlocked;
1775*4882a593Smuzhiyun 				}
1776*4882a593Smuzhiyun 			} else if (trylock_page(page)) {
1777*4882a593Smuzhiyun 				get_page(page);
1778*4882a593Smuzhiyun 				xas_unlock_irq(&xas);
1779*4882a593Smuzhiyun 			} else {
1780*4882a593Smuzhiyun 				result = SCAN_PAGE_LOCK;
1781*4882a593Smuzhiyun 				goto xa_locked;
1782*4882a593Smuzhiyun 			}
1783*4882a593Smuzhiyun 		} else {	/* !is_shmem */
1784*4882a593Smuzhiyun 			if (!page || xa_is_value(page)) {
1785*4882a593Smuzhiyun 				xas_unlock_irq(&xas);
1786*4882a593Smuzhiyun 				page_cache_sync_readahead(mapping, &file->f_ra,
1787*4882a593Smuzhiyun 							  file, index,
1788*4882a593Smuzhiyun 							  end - index);
1789*4882a593Smuzhiyun 				/* drain pagevecs to help isolate_lru_page() */
1790*4882a593Smuzhiyun 				lru_add_drain();
1791*4882a593Smuzhiyun 				page = find_lock_page(mapping, index);
1792*4882a593Smuzhiyun 				if (unlikely(page == NULL)) {
1793*4882a593Smuzhiyun 					result = SCAN_FAIL;
1794*4882a593Smuzhiyun 					goto xa_unlocked;
1795*4882a593Smuzhiyun 				}
1796*4882a593Smuzhiyun 			} else if (PageDirty(page)) {
1797*4882a593Smuzhiyun 				/*
1798*4882a593Smuzhiyun 				 * khugepaged only works on read-only fd,
1799*4882a593Smuzhiyun 				 * so this page is dirty because it hasn't
1800*4882a593Smuzhiyun 				 * been flushed since first write. There
1801*4882a593Smuzhiyun 				 * won't be new dirty pages.
1802*4882a593Smuzhiyun 				 *
1803*4882a593Smuzhiyun 				 * Trigger async flush here and hope the
1804*4882a593Smuzhiyun 				 * writeback is done when khugepaged
1805*4882a593Smuzhiyun 				 * revisits this page.
1806*4882a593Smuzhiyun 				 *
1807*4882a593Smuzhiyun 				 * This is a one-off situation. We are not
1808*4882a593Smuzhiyun 				 * forcing writeback in loop.
1809*4882a593Smuzhiyun 				 */
1810*4882a593Smuzhiyun 				xas_unlock_irq(&xas);
1811*4882a593Smuzhiyun 				filemap_flush(mapping);
1812*4882a593Smuzhiyun 				result = SCAN_FAIL;
1813*4882a593Smuzhiyun 				goto xa_unlocked;
1814*4882a593Smuzhiyun 			} else if (PageWriteback(page)) {
1815*4882a593Smuzhiyun 				xas_unlock_irq(&xas);
1816*4882a593Smuzhiyun 				result = SCAN_FAIL;
1817*4882a593Smuzhiyun 				goto xa_unlocked;
1818*4882a593Smuzhiyun 			} else if (trylock_page(page)) {
1819*4882a593Smuzhiyun 				get_page(page);
1820*4882a593Smuzhiyun 				xas_unlock_irq(&xas);
1821*4882a593Smuzhiyun 			} else {
1822*4882a593Smuzhiyun 				result = SCAN_PAGE_LOCK;
1823*4882a593Smuzhiyun 				goto xa_locked;
1824*4882a593Smuzhiyun 			}
1825*4882a593Smuzhiyun 		}
1826*4882a593Smuzhiyun 
1827*4882a593Smuzhiyun 		/*
1828*4882a593Smuzhiyun 		 * The page must be locked, so we can drop the i_pages lock
1829*4882a593Smuzhiyun 		 * without racing with truncate.
1830*4882a593Smuzhiyun 		 */
1831*4882a593Smuzhiyun 		VM_BUG_ON_PAGE(!PageLocked(page), page);
1832*4882a593Smuzhiyun 
1833*4882a593Smuzhiyun 		/* make sure the page is up to date */
1834*4882a593Smuzhiyun 		if (unlikely(!PageUptodate(page))) {
1835*4882a593Smuzhiyun 			result = SCAN_FAIL;
1836*4882a593Smuzhiyun 			goto out_unlock;
1837*4882a593Smuzhiyun 		}
1838*4882a593Smuzhiyun 
1839*4882a593Smuzhiyun 		/*
1840*4882a593Smuzhiyun 		 * If file was truncated then extended, or hole-punched, before
1841*4882a593Smuzhiyun 		 * we locked the first page, then a THP might be there already.
1842*4882a593Smuzhiyun 		 */
1843*4882a593Smuzhiyun 		if (PageTransCompound(page)) {
1844*4882a593Smuzhiyun 			result = SCAN_PAGE_COMPOUND;
1845*4882a593Smuzhiyun 			goto out_unlock;
1846*4882a593Smuzhiyun 		}
1847*4882a593Smuzhiyun 
1848*4882a593Smuzhiyun 		if (page_mapping(page) != mapping) {
1849*4882a593Smuzhiyun 			result = SCAN_TRUNCATED;
1850*4882a593Smuzhiyun 			goto out_unlock;
1851*4882a593Smuzhiyun 		}
1852*4882a593Smuzhiyun 
1853*4882a593Smuzhiyun 		if (!is_shmem && (PageDirty(page) ||
1854*4882a593Smuzhiyun 				  PageWriteback(page))) {
1855*4882a593Smuzhiyun 			/*
1856*4882a593Smuzhiyun 			 * khugepaged only works on read-only fd, so this
1857*4882a593Smuzhiyun 			 * page is dirty because it hasn't been flushed
1858*4882a593Smuzhiyun 			 * since first write.
1859*4882a593Smuzhiyun 			 */
1860*4882a593Smuzhiyun 			result = SCAN_FAIL;
1861*4882a593Smuzhiyun 			goto out_unlock;
1862*4882a593Smuzhiyun 		}
1863*4882a593Smuzhiyun 
1864*4882a593Smuzhiyun 		if (isolate_lru_page(page)) {
1865*4882a593Smuzhiyun 			result = SCAN_DEL_PAGE_LRU;
1866*4882a593Smuzhiyun 			goto out_unlock;
1867*4882a593Smuzhiyun 		}
1868*4882a593Smuzhiyun 
1869*4882a593Smuzhiyun 		if (page_has_private(page) &&
1870*4882a593Smuzhiyun 		    !try_to_release_page(page, GFP_KERNEL)) {
1871*4882a593Smuzhiyun 			result = SCAN_PAGE_HAS_PRIVATE;
1872*4882a593Smuzhiyun 			putback_lru_page(page);
1873*4882a593Smuzhiyun 			goto out_unlock;
1874*4882a593Smuzhiyun 		}
1875*4882a593Smuzhiyun 
1876*4882a593Smuzhiyun 		if (page_mapped(page))
1877*4882a593Smuzhiyun 			unmap_mapping_pages(mapping, index, 1, false);
1878*4882a593Smuzhiyun 
1879*4882a593Smuzhiyun 		xas_lock_irq(&xas);
1880*4882a593Smuzhiyun 		xas_set(&xas, index);
1881*4882a593Smuzhiyun 
1882*4882a593Smuzhiyun 		VM_BUG_ON_PAGE(page != xas_load(&xas), page);
1883*4882a593Smuzhiyun 		VM_BUG_ON_PAGE(page_mapped(page), page);
1884*4882a593Smuzhiyun 
1885*4882a593Smuzhiyun 		/*
1886*4882a593Smuzhiyun 		 * The page is expected to have page_count() == 3:
1887*4882a593Smuzhiyun 		 *  - we hold a pin on it;
1888*4882a593Smuzhiyun 		 *  - one reference from page cache;
1889*4882a593Smuzhiyun 		 *  - one from isolate_lru_page;
1890*4882a593Smuzhiyun 		 */
1891*4882a593Smuzhiyun 		if (!page_ref_freeze(page, 3)) {
1892*4882a593Smuzhiyun 			result = SCAN_PAGE_COUNT;
1893*4882a593Smuzhiyun 			xas_unlock_irq(&xas);
1894*4882a593Smuzhiyun 			putback_lru_page(page);
1895*4882a593Smuzhiyun 			goto out_unlock;
1896*4882a593Smuzhiyun 		}
1897*4882a593Smuzhiyun 
1898*4882a593Smuzhiyun 		/*
1899*4882a593Smuzhiyun 		 * Add the page to the list to be able to undo the collapse if
1900*4882a593Smuzhiyun 		 * something go wrong.
1901*4882a593Smuzhiyun 		 */
1902*4882a593Smuzhiyun 		list_add_tail(&page->lru, &pagelist);
1903*4882a593Smuzhiyun 
1904*4882a593Smuzhiyun 		/* Finally, replace with the new page. */
1905*4882a593Smuzhiyun 		xas_store(&xas, new_page);
1906*4882a593Smuzhiyun 		continue;
1907*4882a593Smuzhiyun out_unlock:
1908*4882a593Smuzhiyun 		unlock_page(page);
1909*4882a593Smuzhiyun 		put_page(page);
1910*4882a593Smuzhiyun 		goto xa_unlocked;
1911*4882a593Smuzhiyun 	}
1912*4882a593Smuzhiyun 
1913*4882a593Smuzhiyun 	if (is_shmem)
1914*4882a593Smuzhiyun 		__inc_node_page_state(new_page, NR_SHMEM_THPS);
1915*4882a593Smuzhiyun 	else {
1916*4882a593Smuzhiyun 		__inc_node_page_state(new_page, NR_FILE_THPS);
1917*4882a593Smuzhiyun 		filemap_nr_thps_inc(mapping);
1918*4882a593Smuzhiyun 		/*
1919*4882a593Smuzhiyun 		 * Paired with smp_mb() in do_dentry_open() to ensure
1920*4882a593Smuzhiyun 		 * i_writecount is up to date and the update to nr_thps is
1921*4882a593Smuzhiyun 		 * visible. Ensures the page cache will be truncated if the
1922*4882a593Smuzhiyun 		 * file is opened writable.
1923*4882a593Smuzhiyun 		*/
1924*4882a593Smuzhiyun 		smp_mb();
1925*4882a593Smuzhiyun 		if (inode_is_open_for_write(mapping->host)) {
1926*4882a593Smuzhiyun 			result = SCAN_FAIL;
1927*4882a593Smuzhiyun 			__dec_node_page_state(new_page, NR_FILE_THPS);
1928*4882a593Smuzhiyun 			filemap_nr_thps_dec(mapping);
1929*4882a593Smuzhiyun 			goto xa_locked;
1930*4882a593Smuzhiyun 		}
1931*4882a593Smuzhiyun 	}
1932*4882a593Smuzhiyun 
1933*4882a593Smuzhiyun 	if (nr_none) {
1934*4882a593Smuzhiyun 		__mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none);
1935*4882a593Smuzhiyun 		if (is_shmem)
1936*4882a593Smuzhiyun 			__mod_lruvec_page_state(new_page, NR_SHMEM, nr_none);
1937*4882a593Smuzhiyun 	}
1938*4882a593Smuzhiyun 
1939*4882a593Smuzhiyun xa_locked:
1940*4882a593Smuzhiyun 	xas_unlock_irq(&xas);
1941*4882a593Smuzhiyun xa_unlocked:
1942*4882a593Smuzhiyun 
1943*4882a593Smuzhiyun 	if (result == SCAN_SUCCEED) {
1944*4882a593Smuzhiyun 		struct page *page, *tmp;
1945*4882a593Smuzhiyun 
1946*4882a593Smuzhiyun 		/*
1947*4882a593Smuzhiyun 		 * Replacing old pages with new one has succeeded, now we
1948*4882a593Smuzhiyun 		 * need to copy the content and free the old pages.
1949*4882a593Smuzhiyun 		 */
1950*4882a593Smuzhiyun 		index = start;
1951*4882a593Smuzhiyun 		list_for_each_entry_safe(page, tmp, &pagelist, lru) {
1952*4882a593Smuzhiyun 			while (index < page->index) {
1953*4882a593Smuzhiyun 				clear_highpage(new_page + (index % HPAGE_PMD_NR));
1954*4882a593Smuzhiyun 				index++;
1955*4882a593Smuzhiyun 			}
1956*4882a593Smuzhiyun 			copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1957*4882a593Smuzhiyun 					page);
1958*4882a593Smuzhiyun 			list_del(&page->lru);
1959*4882a593Smuzhiyun 			page->mapping = NULL;
1960*4882a593Smuzhiyun 			page_ref_unfreeze(page, 1);
1961*4882a593Smuzhiyun 			ClearPageActive(page);
1962*4882a593Smuzhiyun 			ClearPageUnevictable(page);
1963*4882a593Smuzhiyun 			unlock_page(page);
1964*4882a593Smuzhiyun 			put_page(page);
1965*4882a593Smuzhiyun 			index++;
1966*4882a593Smuzhiyun 		}
1967*4882a593Smuzhiyun 		while (index < end) {
1968*4882a593Smuzhiyun 			clear_highpage(new_page + (index % HPAGE_PMD_NR));
1969*4882a593Smuzhiyun 			index++;
1970*4882a593Smuzhiyun 		}
1971*4882a593Smuzhiyun 
1972*4882a593Smuzhiyun 		SetPageUptodate(new_page);
1973*4882a593Smuzhiyun 		page_ref_add(new_page, HPAGE_PMD_NR - 1);
1974*4882a593Smuzhiyun 		if (is_shmem)
1975*4882a593Smuzhiyun 			set_page_dirty(new_page);
1976*4882a593Smuzhiyun 		lru_cache_add(new_page);
1977*4882a593Smuzhiyun 
1978*4882a593Smuzhiyun 		/*
1979*4882a593Smuzhiyun 		 * Remove pte page tables, so we can re-fault the page as huge.
1980*4882a593Smuzhiyun 		 */
1981*4882a593Smuzhiyun 		retract_page_tables(mapping, start);
1982*4882a593Smuzhiyun 		*hpage = NULL;
1983*4882a593Smuzhiyun 
1984*4882a593Smuzhiyun 		khugepaged_pages_collapsed++;
1985*4882a593Smuzhiyun 	} else {
1986*4882a593Smuzhiyun 		struct page *page;
1987*4882a593Smuzhiyun 
1988*4882a593Smuzhiyun 		/* Something went wrong: roll back page cache changes */
1989*4882a593Smuzhiyun 		xas_lock_irq(&xas);
1990*4882a593Smuzhiyun 		mapping->nrpages -= nr_none;
1991*4882a593Smuzhiyun 
1992*4882a593Smuzhiyun 		if (is_shmem)
1993*4882a593Smuzhiyun 			shmem_uncharge(mapping->host, nr_none);
1994*4882a593Smuzhiyun 
1995*4882a593Smuzhiyun 		xas_set(&xas, start);
1996*4882a593Smuzhiyun 		xas_for_each(&xas, page, end - 1) {
1997*4882a593Smuzhiyun 			page = list_first_entry_or_null(&pagelist,
1998*4882a593Smuzhiyun 					struct page, lru);
1999*4882a593Smuzhiyun 			if (!page || xas.xa_index < page->index) {
2000*4882a593Smuzhiyun 				if (!nr_none)
2001*4882a593Smuzhiyun 					break;
2002*4882a593Smuzhiyun 				nr_none--;
2003*4882a593Smuzhiyun 				/* Put holes back where they were */
2004*4882a593Smuzhiyun 				xas_store(&xas, NULL);
2005*4882a593Smuzhiyun 				continue;
2006*4882a593Smuzhiyun 			}
2007*4882a593Smuzhiyun 
2008*4882a593Smuzhiyun 			VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
2009*4882a593Smuzhiyun 
2010*4882a593Smuzhiyun 			/* Unfreeze the page. */
2011*4882a593Smuzhiyun 			list_del(&page->lru);
2012*4882a593Smuzhiyun 			page_ref_unfreeze(page, 2);
2013*4882a593Smuzhiyun 			xas_store(&xas, page);
2014*4882a593Smuzhiyun 			xas_pause(&xas);
2015*4882a593Smuzhiyun 			xas_unlock_irq(&xas);
2016*4882a593Smuzhiyun 			unlock_page(page);
2017*4882a593Smuzhiyun 			putback_lru_page(page);
2018*4882a593Smuzhiyun 			xas_lock_irq(&xas);
2019*4882a593Smuzhiyun 		}
2020*4882a593Smuzhiyun 		VM_BUG_ON(nr_none);
2021*4882a593Smuzhiyun 		xas_unlock_irq(&xas);
2022*4882a593Smuzhiyun 
2023*4882a593Smuzhiyun 		new_page->mapping = NULL;
2024*4882a593Smuzhiyun 	}
2025*4882a593Smuzhiyun 
2026*4882a593Smuzhiyun 	unlock_page(new_page);
2027*4882a593Smuzhiyun out:
2028*4882a593Smuzhiyun 	VM_BUG_ON(!list_empty(&pagelist));
2029*4882a593Smuzhiyun 	if (!IS_ERR_OR_NULL(*hpage))
2030*4882a593Smuzhiyun 		mem_cgroup_uncharge(*hpage);
2031*4882a593Smuzhiyun 	/* TODO: tracepoints */
2032*4882a593Smuzhiyun }
2033*4882a593Smuzhiyun 
khugepaged_scan_file(struct mm_struct * mm,struct file * file,pgoff_t start,struct page ** hpage)2034*4882a593Smuzhiyun static void khugepaged_scan_file(struct mm_struct *mm,
2035*4882a593Smuzhiyun 		struct file *file, pgoff_t start, struct page **hpage)
2036*4882a593Smuzhiyun {
2037*4882a593Smuzhiyun 	struct page *page = NULL;
2038*4882a593Smuzhiyun 	struct address_space *mapping = file->f_mapping;
2039*4882a593Smuzhiyun 	XA_STATE(xas, &mapping->i_pages, start);
2040*4882a593Smuzhiyun 	int present, swap;
2041*4882a593Smuzhiyun 	int node = NUMA_NO_NODE;
2042*4882a593Smuzhiyun 	int result = SCAN_SUCCEED;
2043*4882a593Smuzhiyun 
2044*4882a593Smuzhiyun 	present = 0;
2045*4882a593Smuzhiyun 	swap = 0;
2046*4882a593Smuzhiyun 	memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
2047*4882a593Smuzhiyun 	rcu_read_lock();
2048*4882a593Smuzhiyun 	xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
2049*4882a593Smuzhiyun 		if (xas_retry(&xas, page))
2050*4882a593Smuzhiyun 			continue;
2051*4882a593Smuzhiyun 
2052*4882a593Smuzhiyun 		if (xa_is_value(page)) {
2053*4882a593Smuzhiyun 			if (++swap > khugepaged_max_ptes_swap) {
2054*4882a593Smuzhiyun 				result = SCAN_EXCEED_SWAP_PTE;
2055*4882a593Smuzhiyun 				break;
2056*4882a593Smuzhiyun 			}
2057*4882a593Smuzhiyun 			continue;
2058*4882a593Smuzhiyun 		}
2059*4882a593Smuzhiyun 
2060*4882a593Smuzhiyun 		if (PageTransCompound(page)) {
2061*4882a593Smuzhiyun 			result = SCAN_PAGE_COMPOUND;
2062*4882a593Smuzhiyun 			break;
2063*4882a593Smuzhiyun 		}
2064*4882a593Smuzhiyun 
2065*4882a593Smuzhiyun 		node = page_to_nid(page);
2066*4882a593Smuzhiyun 		if (khugepaged_scan_abort(node)) {
2067*4882a593Smuzhiyun 			result = SCAN_SCAN_ABORT;
2068*4882a593Smuzhiyun 			break;
2069*4882a593Smuzhiyun 		}
2070*4882a593Smuzhiyun 		khugepaged_node_load[node]++;
2071*4882a593Smuzhiyun 
2072*4882a593Smuzhiyun 		if (!PageLRU(page)) {
2073*4882a593Smuzhiyun 			result = SCAN_PAGE_LRU;
2074*4882a593Smuzhiyun 			break;
2075*4882a593Smuzhiyun 		}
2076*4882a593Smuzhiyun 
2077*4882a593Smuzhiyun 		if (page_count(page) !=
2078*4882a593Smuzhiyun 		    1 + page_mapcount(page) + page_has_private(page)) {
2079*4882a593Smuzhiyun 			result = SCAN_PAGE_COUNT;
2080*4882a593Smuzhiyun 			break;
2081*4882a593Smuzhiyun 		}
2082*4882a593Smuzhiyun 
2083*4882a593Smuzhiyun 		/*
2084*4882a593Smuzhiyun 		 * We probably should check if the page is referenced here, but
2085*4882a593Smuzhiyun 		 * nobody would transfer pte_young() to PageReferenced() for us.
2086*4882a593Smuzhiyun 		 * And rmap walk here is just too costly...
2087*4882a593Smuzhiyun 		 */
2088*4882a593Smuzhiyun 
2089*4882a593Smuzhiyun 		present++;
2090*4882a593Smuzhiyun 
2091*4882a593Smuzhiyun 		if (need_resched()) {
2092*4882a593Smuzhiyun 			xas_pause(&xas);
2093*4882a593Smuzhiyun 			cond_resched_rcu();
2094*4882a593Smuzhiyun 		}
2095*4882a593Smuzhiyun 	}
2096*4882a593Smuzhiyun 	rcu_read_unlock();
2097*4882a593Smuzhiyun 
2098*4882a593Smuzhiyun 	if (result == SCAN_SUCCEED) {
2099*4882a593Smuzhiyun 		if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
2100*4882a593Smuzhiyun 			result = SCAN_EXCEED_NONE_PTE;
2101*4882a593Smuzhiyun 		} else {
2102*4882a593Smuzhiyun 			node = khugepaged_find_target_node();
2103*4882a593Smuzhiyun 			collapse_file(mm, file, start, hpage, node);
2104*4882a593Smuzhiyun 		}
2105*4882a593Smuzhiyun 	}
2106*4882a593Smuzhiyun 
2107*4882a593Smuzhiyun 	/* TODO: tracepoints */
2108*4882a593Smuzhiyun }
2109*4882a593Smuzhiyun #else
khugepaged_scan_file(struct mm_struct * mm,struct file * file,pgoff_t start,struct page ** hpage)2110*4882a593Smuzhiyun static void khugepaged_scan_file(struct mm_struct *mm,
2111*4882a593Smuzhiyun 		struct file *file, pgoff_t start, struct page **hpage)
2112*4882a593Smuzhiyun {
2113*4882a593Smuzhiyun 	BUILD_BUG();
2114*4882a593Smuzhiyun }
2115*4882a593Smuzhiyun 
khugepaged_collapse_pte_mapped_thps(struct mm_slot * mm_slot)2116*4882a593Smuzhiyun static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
2117*4882a593Smuzhiyun {
2118*4882a593Smuzhiyun 	return 0;
2119*4882a593Smuzhiyun }
2120*4882a593Smuzhiyun #endif
2121*4882a593Smuzhiyun 
khugepaged_scan_mm_slot(unsigned int pages,struct page ** hpage)2122*4882a593Smuzhiyun static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2123*4882a593Smuzhiyun 					    struct page **hpage)
2124*4882a593Smuzhiyun 	__releases(&khugepaged_mm_lock)
2125*4882a593Smuzhiyun 	__acquires(&khugepaged_mm_lock)
2126*4882a593Smuzhiyun {
2127*4882a593Smuzhiyun 	struct mm_slot *mm_slot;
2128*4882a593Smuzhiyun 	struct mm_struct *mm;
2129*4882a593Smuzhiyun 	struct vm_area_struct *vma;
2130*4882a593Smuzhiyun 	int progress = 0;
2131*4882a593Smuzhiyun 
2132*4882a593Smuzhiyun 	VM_BUG_ON(!pages);
2133*4882a593Smuzhiyun 	lockdep_assert_held(&khugepaged_mm_lock);
2134*4882a593Smuzhiyun 
2135*4882a593Smuzhiyun 	if (khugepaged_scan.mm_slot)
2136*4882a593Smuzhiyun 		mm_slot = khugepaged_scan.mm_slot;
2137*4882a593Smuzhiyun 	else {
2138*4882a593Smuzhiyun 		mm_slot = list_entry(khugepaged_scan.mm_head.next,
2139*4882a593Smuzhiyun 				     struct mm_slot, mm_node);
2140*4882a593Smuzhiyun 		khugepaged_scan.address = 0;
2141*4882a593Smuzhiyun 		khugepaged_scan.mm_slot = mm_slot;
2142*4882a593Smuzhiyun 	}
2143*4882a593Smuzhiyun 	spin_unlock(&khugepaged_mm_lock);
2144*4882a593Smuzhiyun 	khugepaged_collapse_pte_mapped_thps(mm_slot);
2145*4882a593Smuzhiyun 
2146*4882a593Smuzhiyun 	mm = mm_slot->mm;
2147*4882a593Smuzhiyun 	/*
2148*4882a593Smuzhiyun 	 * Don't wait for semaphore (to avoid long wait times).  Just move to
2149*4882a593Smuzhiyun 	 * the next mm on the list.
2150*4882a593Smuzhiyun 	 */
2151*4882a593Smuzhiyun 	vma = NULL;
2152*4882a593Smuzhiyun 	if (unlikely(!mmap_read_trylock(mm)))
2153*4882a593Smuzhiyun 		goto breakouterloop_mmap_lock;
2154*4882a593Smuzhiyun 	if (likely(!khugepaged_test_exit(mm)))
2155*4882a593Smuzhiyun 		vma = find_vma(mm, khugepaged_scan.address);
2156*4882a593Smuzhiyun 
2157*4882a593Smuzhiyun 	progress++;
2158*4882a593Smuzhiyun 	for (; vma; vma = vma->vm_next) {
2159*4882a593Smuzhiyun 		unsigned long hstart, hend;
2160*4882a593Smuzhiyun 
2161*4882a593Smuzhiyun 		cond_resched();
2162*4882a593Smuzhiyun 		if (unlikely(khugepaged_test_exit(mm))) {
2163*4882a593Smuzhiyun 			progress++;
2164*4882a593Smuzhiyun 			break;
2165*4882a593Smuzhiyun 		}
2166*4882a593Smuzhiyun 		if (!hugepage_vma_check(vma, vma->vm_flags)) {
2167*4882a593Smuzhiyun skip:
2168*4882a593Smuzhiyun 			progress++;
2169*4882a593Smuzhiyun 			continue;
2170*4882a593Smuzhiyun 		}
2171*4882a593Smuzhiyun 		hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2172*4882a593Smuzhiyun 		hend = vma->vm_end & HPAGE_PMD_MASK;
2173*4882a593Smuzhiyun 		if (hstart >= hend)
2174*4882a593Smuzhiyun 			goto skip;
2175*4882a593Smuzhiyun 		if (khugepaged_scan.address > hend)
2176*4882a593Smuzhiyun 			goto skip;
2177*4882a593Smuzhiyun 		if (khugepaged_scan.address < hstart)
2178*4882a593Smuzhiyun 			khugepaged_scan.address = hstart;
2179*4882a593Smuzhiyun 		VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2180*4882a593Smuzhiyun 		if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma))
2181*4882a593Smuzhiyun 			goto skip;
2182*4882a593Smuzhiyun 
2183*4882a593Smuzhiyun 		while (khugepaged_scan.address < hend) {
2184*4882a593Smuzhiyun 			int ret;
2185*4882a593Smuzhiyun 			cond_resched();
2186*4882a593Smuzhiyun 			if (unlikely(khugepaged_test_exit(mm)))
2187*4882a593Smuzhiyun 				goto breakouterloop;
2188*4882a593Smuzhiyun 
2189*4882a593Smuzhiyun 			VM_BUG_ON(khugepaged_scan.address < hstart ||
2190*4882a593Smuzhiyun 				  khugepaged_scan.address + HPAGE_PMD_SIZE >
2191*4882a593Smuzhiyun 				  hend);
2192*4882a593Smuzhiyun 			if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2193*4882a593Smuzhiyun 				struct file *file = get_file(vma->vm_file);
2194*4882a593Smuzhiyun 				pgoff_t pgoff = linear_page_index(vma,
2195*4882a593Smuzhiyun 						khugepaged_scan.address);
2196*4882a593Smuzhiyun 
2197*4882a593Smuzhiyun 				mmap_read_unlock(mm);
2198*4882a593Smuzhiyun 				ret = 1;
2199*4882a593Smuzhiyun 				khugepaged_scan_file(mm, file, pgoff, hpage);
2200*4882a593Smuzhiyun 				fput(file);
2201*4882a593Smuzhiyun 			} else {
2202*4882a593Smuzhiyun 				ret = khugepaged_scan_pmd(mm, vma,
2203*4882a593Smuzhiyun 						khugepaged_scan.address,
2204*4882a593Smuzhiyun 						hpage);
2205*4882a593Smuzhiyun 			}
2206*4882a593Smuzhiyun 			/* move to next address */
2207*4882a593Smuzhiyun 			khugepaged_scan.address += HPAGE_PMD_SIZE;
2208*4882a593Smuzhiyun 			progress += HPAGE_PMD_NR;
2209*4882a593Smuzhiyun 			if (ret)
2210*4882a593Smuzhiyun 				/* we released mmap_lock so break loop */
2211*4882a593Smuzhiyun 				goto breakouterloop_mmap_lock;
2212*4882a593Smuzhiyun 			if (progress >= pages)
2213*4882a593Smuzhiyun 				goto breakouterloop;
2214*4882a593Smuzhiyun 		}
2215*4882a593Smuzhiyun 	}
2216*4882a593Smuzhiyun breakouterloop:
2217*4882a593Smuzhiyun 	mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
2218*4882a593Smuzhiyun breakouterloop_mmap_lock:
2219*4882a593Smuzhiyun 
2220*4882a593Smuzhiyun 	spin_lock(&khugepaged_mm_lock);
2221*4882a593Smuzhiyun 	VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2222*4882a593Smuzhiyun 	/*
2223*4882a593Smuzhiyun 	 * Release the current mm_slot if this mm is about to die, or
2224*4882a593Smuzhiyun 	 * if we scanned all vmas of this mm.
2225*4882a593Smuzhiyun 	 */
2226*4882a593Smuzhiyun 	if (khugepaged_test_exit(mm) || !vma) {
2227*4882a593Smuzhiyun 		/*
2228*4882a593Smuzhiyun 		 * Make sure that if mm_users is reaching zero while
2229*4882a593Smuzhiyun 		 * khugepaged runs here, khugepaged_exit will find
2230*4882a593Smuzhiyun 		 * mm_slot not pointing to the exiting mm.
2231*4882a593Smuzhiyun 		 */
2232*4882a593Smuzhiyun 		if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2233*4882a593Smuzhiyun 			khugepaged_scan.mm_slot = list_entry(
2234*4882a593Smuzhiyun 				mm_slot->mm_node.next,
2235*4882a593Smuzhiyun 				struct mm_slot, mm_node);
2236*4882a593Smuzhiyun 			khugepaged_scan.address = 0;
2237*4882a593Smuzhiyun 		} else {
2238*4882a593Smuzhiyun 			khugepaged_scan.mm_slot = NULL;
2239*4882a593Smuzhiyun 			khugepaged_full_scans++;
2240*4882a593Smuzhiyun 		}
2241*4882a593Smuzhiyun 
2242*4882a593Smuzhiyun 		collect_mm_slot(mm_slot);
2243*4882a593Smuzhiyun 	}
2244*4882a593Smuzhiyun 
2245*4882a593Smuzhiyun 	return progress;
2246*4882a593Smuzhiyun }
2247*4882a593Smuzhiyun 
khugepaged_has_work(void)2248*4882a593Smuzhiyun static int khugepaged_has_work(void)
2249*4882a593Smuzhiyun {
2250*4882a593Smuzhiyun 	return !list_empty(&khugepaged_scan.mm_head) &&
2251*4882a593Smuzhiyun 		khugepaged_enabled();
2252*4882a593Smuzhiyun }
2253*4882a593Smuzhiyun 
khugepaged_wait_event(void)2254*4882a593Smuzhiyun static int khugepaged_wait_event(void)
2255*4882a593Smuzhiyun {
2256*4882a593Smuzhiyun 	return !list_empty(&khugepaged_scan.mm_head) ||
2257*4882a593Smuzhiyun 		kthread_should_stop();
2258*4882a593Smuzhiyun }
2259*4882a593Smuzhiyun 
khugepaged_do_scan(void)2260*4882a593Smuzhiyun static void khugepaged_do_scan(void)
2261*4882a593Smuzhiyun {
2262*4882a593Smuzhiyun 	struct page *hpage = NULL;
2263*4882a593Smuzhiyun 	unsigned int progress = 0, pass_through_head = 0;
2264*4882a593Smuzhiyun 	unsigned int pages = khugepaged_pages_to_scan;
2265*4882a593Smuzhiyun 	bool wait = true;
2266*4882a593Smuzhiyun 
2267*4882a593Smuzhiyun 	barrier(); /* write khugepaged_pages_to_scan to local stack */
2268*4882a593Smuzhiyun 
2269*4882a593Smuzhiyun 	lru_add_drain_all();
2270*4882a593Smuzhiyun 
2271*4882a593Smuzhiyun 	while (progress < pages) {
2272*4882a593Smuzhiyun 		if (!khugepaged_prealloc_page(&hpage, &wait))
2273*4882a593Smuzhiyun 			break;
2274*4882a593Smuzhiyun 
2275*4882a593Smuzhiyun 		cond_resched();
2276*4882a593Smuzhiyun 
2277*4882a593Smuzhiyun 		if (unlikely(kthread_should_stop() || try_to_freeze()))
2278*4882a593Smuzhiyun 			break;
2279*4882a593Smuzhiyun 
2280*4882a593Smuzhiyun 		spin_lock(&khugepaged_mm_lock);
2281*4882a593Smuzhiyun 		if (!khugepaged_scan.mm_slot)
2282*4882a593Smuzhiyun 			pass_through_head++;
2283*4882a593Smuzhiyun 		if (khugepaged_has_work() &&
2284*4882a593Smuzhiyun 		    pass_through_head < 2)
2285*4882a593Smuzhiyun 			progress += khugepaged_scan_mm_slot(pages - progress,
2286*4882a593Smuzhiyun 							    &hpage);
2287*4882a593Smuzhiyun 		else
2288*4882a593Smuzhiyun 			progress = pages;
2289*4882a593Smuzhiyun 		spin_unlock(&khugepaged_mm_lock);
2290*4882a593Smuzhiyun 	}
2291*4882a593Smuzhiyun 
2292*4882a593Smuzhiyun 	if (!IS_ERR_OR_NULL(hpage))
2293*4882a593Smuzhiyun 		put_page(hpage);
2294*4882a593Smuzhiyun }
2295*4882a593Smuzhiyun 
khugepaged_should_wakeup(void)2296*4882a593Smuzhiyun static bool khugepaged_should_wakeup(void)
2297*4882a593Smuzhiyun {
2298*4882a593Smuzhiyun 	return kthread_should_stop() ||
2299*4882a593Smuzhiyun 	       time_after_eq(jiffies, khugepaged_sleep_expire);
2300*4882a593Smuzhiyun }
2301*4882a593Smuzhiyun 
khugepaged_wait_work(void)2302*4882a593Smuzhiyun static void khugepaged_wait_work(void)
2303*4882a593Smuzhiyun {
2304*4882a593Smuzhiyun 	if (khugepaged_has_work()) {
2305*4882a593Smuzhiyun 		const unsigned long scan_sleep_jiffies =
2306*4882a593Smuzhiyun 			msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2307*4882a593Smuzhiyun 
2308*4882a593Smuzhiyun 		if (!scan_sleep_jiffies)
2309*4882a593Smuzhiyun 			return;
2310*4882a593Smuzhiyun 
2311*4882a593Smuzhiyun 		khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2312*4882a593Smuzhiyun 		wait_event_freezable_timeout(khugepaged_wait,
2313*4882a593Smuzhiyun 					     khugepaged_should_wakeup(),
2314*4882a593Smuzhiyun 					     scan_sleep_jiffies);
2315*4882a593Smuzhiyun 		return;
2316*4882a593Smuzhiyun 	}
2317*4882a593Smuzhiyun 
2318*4882a593Smuzhiyun 	if (khugepaged_enabled())
2319*4882a593Smuzhiyun 		wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2320*4882a593Smuzhiyun }
2321*4882a593Smuzhiyun 
khugepaged(void * none)2322*4882a593Smuzhiyun static int khugepaged(void *none)
2323*4882a593Smuzhiyun {
2324*4882a593Smuzhiyun 	struct mm_slot *mm_slot;
2325*4882a593Smuzhiyun 
2326*4882a593Smuzhiyun 	set_freezable();
2327*4882a593Smuzhiyun 	set_user_nice(current, MAX_NICE);
2328*4882a593Smuzhiyun 
2329*4882a593Smuzhiyun 	while (!kthread_should_stop()) {
2330*4882a593Smuzhiyun 		khugepaged_do_scan();
2331*4882a593Smuzhiyun 		khugepaged_wait_work();
2332*4882a593Smuzhiyun 	}
2333*4882a593Smuzhiyun 
2334*4882a593Smuzhiyun 	spin_lock(&khugepaged_mm_lock);
2335*4882a593Smuzhiyun 	mm_slot = khugepaged_scan.mm_slot;
2336*4882a593Smuzhiyun 	khugepaged_scan.mm_slot = NULL;
2337*4882a593Smuzhiyun 	if (mm_slot)
2338*4882a593Smuzhiyun 		collect_mm_slot(mm_slot);
2339*4882a593Smuzhiyun 	spin_unlock(&khugepaged_mm_lock);
2340*4882a593Smuzhiyun 	return 0;
2341*4882a593Smuzhiyun }
2342*4882a593Smuzhiyun 
set_recommended_min_free_kbytes(void)2343*4882a593Smuzhiyun static void set_recommended_min_free_kbytes(void)
2344*4882a593Smuzhiyun {
2345*4882a593Smuzhiyun 	struct zone *zone;
2346*4882a593Smuzhiyun 	int nr_zones = 0;
2347*4882a593Smuzhiyun 	unsigned long recommended_min;
2348*4882a593Smuzhiyun 
2349*4882a593Smuzhiyun 	for_each_populated_zone(zone) {
2350*4882a593Smuzhiyun 		/*
2351*4882a593Smuzhiyun 		 * We don't need to worry about fragmentation of
2352*4882a593Smuzhiyun 		 * ZONE_MOVABLE since it only has movable pages.
2353*4882a593Smuzhiyun 		 */
2354*4882a593Smuzhiyun 		if (zone_idx(zone) > gfp_zone(GFP_USER))
2355*4882a593Smuzhiyun 			continue;
2356*4882a593Smuzhiyun 
2357*4882a593Smuzhiyun 		nr_zones++;
2358*4882a593Smuzhiyun 	}
2359*4882a593Smuzhiyun 
2360*4882a593Smuzhiyun 	/* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2361*4882a593Smuzhiyun 	recommended_min = pageblock_nr_pages * nr_zones * 2;
2362*4882a593Smuzhiyun 
2363*4882a593Smuzhiyun 	/*
2364*4882a593Smuzhiyun 	 * Make sure that on average at least two pageblocks are almost free
2365*4882a593Smuzhiyun 	 * of another type, one for a migratetype to fall back to and a
2366*4882a593Smuzhiyun 	 * second to avoid subsequent fallbacks of other types There are 3
2367*4882a593Smuzhiyun 	 * MIGRATE_TYPES we care about.
2368*4882a593Smuzhiyun 	 */
2369*4882a593Smuzhiyun 	recommended_min += pageblock_nr_pages * nr_zones *
2370*4882a593Smuzhiyun 			   MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2371*4882a593Smuzhiyun 
2372*4882a593Smuzhiyun 	/* don't ever allow to reserve more than 5% of the lowmem */
2373*4882a593Smuzhiyun 	recommended_min = min(recommended_min,
2374*4882a593Smuzhiyun 			      (unsigned long) nr_free_buffer_pages() / 20);
2375*4882a593Smuzhiyun 	recommended_min <<= (PAGE_SHIFT-10);
2376*4882a593Smuzhiyun 
2377*4882a593Smuzhiyun 	if (recommended_min > min_free_kbytes) {
2378*4882a593Smuzhiyun 		if (user_min_free_kbytes >= 0)
2379*4882a593Smuzhiyun 			pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2380*4882a593Smuzhiyun 				min_free_kbytes, recommended_min);
2381*4882a593Smuzhiyun 
2382*4882a593Smuzhiyun 		min_free_kbytes = recommended_min;
2383*4882a593Smuzhiyun 	}
2384*4882a593Smuzhiyun 	setup_per_zone_wmarks();
2385*4882a593Smuzhiyun }
2386*4882a593Smuzhiyun 
start_stop_khugepaged(void)2387*4882a593Smuzhiyun int start_stop_khugepaged(void)
2388*4882a593Smuzhiyun {
2389*4882a593Smuzhiyun 	int err = 0;
2390*4882a593Smuzhiyun 
2391*4882a593Smuzhiyun 	mutex_lock(&khugepaged_mutex);
2392*4882a593Smuzhiyun 	if (khugepaged_enabled()) {
2393*4882a593Smuzhiyun 		if (!khugepaged_thread)
2394*4882a593Smuzhiyun 			khugepaged_thread = kthread_run(khugepaged, NULL,
2395*4882a593Smuzhiyun 							"khugepaged");
2396*4882a593Smuzhiyun 		if (IS_ERR(khugepaged_thread)) {
2397*4882a593Smuzhiyun 			pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2398*4882a593Smuzhiyun 			err = PTR_ERR(khugepaged_thread);
2399*4882a593Smuzhiyun 			khugepaged_thread = NULL;
2400*4882a593Smuzhiyun 			goto fail;
2401*4882a593Smuzhiyun 		}
2402*4882a593Smuzhiyun 
2403*4882a593Smuzhiyun 		if (!list_empty(&khugepaged_scan.mm_head))
2404*4882a593Smuzhiyun 			wake_up_interruptible(&khugepaged_wait);
2405*4882a593Smuzhiyun 
2406*4882a593Smuzhiyun 		set_recommended_min_free_kbytes();
2407*4882a593Smuzhiyun 	} else if (khugepaged_thread) {
2408*4882a593Smuzhiyun 		kthread_stop(khugepaged_thread);
2409*4882a593Smuzhiyun 		khugepaged_thread = NULL;
2410*4882a593Smuzhiyun 	}
2411*4882a593Smuzhiyun fail:
2412*4882a593Smuzhiyun 	mutex_unlock(&khugepaged_mutex);
2413*4882a593Smuzhiyun 	return err;
2414*4882a593Smuzhiyun }
2415*4882a593Smuzhiyun 
khugepaged_min_free_kbytes_update(void)2416*4882a593Smuzhiyun void khugepaged_min_free_kbytes_update(void)
2417*4882a593Smuzhiyun {
2418*4882a593Smuzhiyun 	mutex_lock(&khugepaged_mutex);
2419*4882a593Smuzhiyun 	if (khugepaged_enabled() && khugepaged_thread)
2420*4882a593Smuzhiyun 		set_recommended_min_free_kbytes();
2421*4882a593Smuzhiyun 	mutex_unlock(&khugepaged_mutex);
2422*4882a593Smuzhiyun }
2423