1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Generic hugetlb support.
4*4882a593Smuzhiyun * (C) Nadia Yvette Chambers, April 2004
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun #include <linux/list.h>
7*4882a593Smuzhiyun #include <linux/init.h>
8*4882a593Smuzhiyun #include <linux/mm.h>
9*4882a593Smuzhiyun #include <linux/seq_file.h>
10*4882a593Smuzhiyun #include <linux/sysctl.h>
11*4882a593Smuzhiyun #include <linux/highmem.h>
12*4882a593Smuzhiyun #include <linux/mmu_notifier.h>
13*4882a593Smuzhiyun #include <linux/nodemask.h>
14*4882a593Smuzhiyun #include <linux/pagemap.h>
15*4882a593Smuzhiyun #include <linux/mempolicy.h>
16*4882a593Smuzhiyun #include <linux/compiler.h>
17*4882a593Smuzhiyun #include <linux/cpuset.h>
18*4882a593Smuzhiyun #include <linux/mutex.h>
19*4882a593Smuzhiyun #include <linux/memblock.h>
20*4882a593Smuzhiyun #include <linux/sysfs.h>
21*4882a593Smuzhiyun #include <linux/slab.h>
22*4882a593Smuzhiyun #include <linux/sched/mm.h>
23*4882a593Smuzhiyun #include <linux/mmdebug.h>
24*4882a593Smuzhiyun #include <linux/sched/signal.h>
25*4882a593Smuzhiyun #include <linux/rmap.h>
26*4882a593Smuzhiyun #include <linux/string_helpers.h>
27*4882a593Smuzhiyun #include <linux/swap.h>
28*4882a593Smuzhiyun #include <linux/swapops.h>
29*4882a593Smuzhiyun #include <linux/jhash.h>
30*4882a593Smuzhiyun #include <linux/numa.h>
31*4882a593Smuzhiyun #include <linux/llist.h>
32*4882a593Smuzhiyun #include <linux/cma.h>
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #include <asm/page.h>
35*4882a593Smuzhiyun #include <asm/pgalloc.h>
36*4882a593Smuzhiyun #include <asm/tlb.h>
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #include <linux/io.h>
39*4882a593Smuzhiyun #include <linux/hugetlb.h>
40*4882a593Smuzhiyun #include <linux/hugetlb_cgroup.h>
41*4882a593Smuzhiyun #include <linux/node.h>
42*4882a593Smuzhiyun #include <linux/page_owner.h>
43*4882a593Smuzhiyun #include "internal.h"
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun int hugetlb_max_hstate __read_mostly;
46*4882a593Smuzhiyun unsigned int default_hstate_idx;
47*4882a593Smuzhiyun struct hstate hstates[HUGE_MAX_HSTATE];
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun #ifdef CONFIG_CMA
50*4882a593Smuzhiyun static struct cma *hugetlb_cma[MAX_NUMNODES];
51*4882a593Smuzhiyun #endif
52*4882a593Smuzhiyun static unsigned long hugetlb_cma_size __initdata;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /*
55*4882a593Smuzhiyun * Minimum page order among possible hugepage sizes, set to a proper value
56*4882a593Smuzhiyun * at boot time.
57*4882a593Smuzhiyun */
58*4882a593Smuzhiyun static unsigned int minimum_order __read_mostly = UINT_MAX;
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun __initdata LIST_HEAD(huge_boot_pages);
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun /* for command line parsing */
63*4882a593Smuzhiyun static struct hstate * __initdata parsed_hstate;
64*4882a593Smuzhiyun static unsigned long __initdata default_hstate_max_huge_pages;
65*4882a593Smuzhiyun static bool __initdata parsed_valid_hugepagesz = true;
66*4882a593Smuzhiyun static bool __initdata parsed_default_hugepagesz;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun /*
69*4882a593Smuzhiyun * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
70*4882a593Smuzhiyun * free_huge_pages, and surplus_huge_pages.
71*4882a593Smuzhiyun */
72*4882a593Smuzhiyun DEFINE_SPINLOCK(hugetlb_lock);
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /*
75*4882a593Smuzhiyun * Serializes faults on the same logical page. This is used to
76*4882a593Smuzhiyun * prevent spurious OOMs when the hugepage pool is fully utilized.
77*4882a593Smuzhiyun */
78*4882a593Smuzhiyun static int num_fault_mutexes;
79*4882a593Smuzhiyun struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
80*4882a593Smuzhiyun
PageHugeFreed(struct page * head)81*4882a593Smuzhiyun static inline bool PageHugeFreed(struct page *head)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun return page_private(head + 4) == -1UL;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun
SetPageHugeFreed(struct page * head)86*4882a593Smuzhiyun static inline void SetPageHugeFreed(struct page *head)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun set_page_private(head + 4, -1UL);
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
ClearPageHugeFreed(struct page * head)91*4882a593Smuzhiyun static inline void ClearPageHugeFreed(struct page *head)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun set_page_private(head + 4, 0);
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun /* Forward declaration */
97*4882a593Smuzhiyun static int hugetlb_acct_memory(struct hstate *h, long delta);
98*4882a593Smuzhiyun
unlock_or_release_subpool(struct hugepage_subpool * spool)99*4882a593Smuzhiyun static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun bool free = (spool->count == 0) && (spool->used_hpages == 0);
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun spin_unlock(&spool->lock);
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun /* If no pages are used, and no other handles to the subpool
106*4882a593Smuzhiyun * remain, give up any reservations based on minimum size and
107*4882a593Smuzhiyun * free the subpool */
108*4882a593Smuzhiyun if (free) {
109*4882a593Smuzhiyun if (spool->min_hpages != -1)
110*4882a593Smuzhiyun hugetlb_acct_memory(spool->hstate,
111*4882a593Smuzhiyun -spool->min_hpages);
112*4882a593Smuzhiyun kfree(spool);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun
hugepage_new_subpool(struct hstate * h,long max_hpages,long min_hpages)116*4882a593Smuzhiyun struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
117*4882a593Smuzhiyun long min_hpages)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun struct hugepage_subpool *spool;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun spool = kzalloc(sizeof(*spool), GFP_KERNEL);
122*4882a593Smuzhiyun if (!spool)
123*4882a593Smuzhiyun return NULL;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun spin_lock_init(&spool->lock);
126*4882a593Smuzhiyun spool->count = 1;
127*4882a593Smuzhiyun spool->max_hpages = max_hpages;
128*4882a593Smuzhiyun spool->hstate = h;
129*4882a593Smuzhiyun spool->min_hpages = min_hpages;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
132*4882a593Smuzhiyun kfree(spool);
133*4882a593Smuzhiyun return NULL;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun spool->rsv_hpages = min_hpages;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun return spool;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
hugepage_put_subpool(struct hugepage_subpool * spool)140*4882a593Smuzhiyun void hugepage_put_subpool(struct hugepage_subpool *spool)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun spin_lock(&spool->lock);
143*4882a593Smuzhiyun BUG_ON(!spool->count);
144*4882a593Smuzhiyun spool->count--;
145*4882a593Smuzhiyun unlock_or_release_subpool(spool);
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun /*
149*4882a593Smuzhiyun * Subpool accounting for allocating and reserving pages.
150*4882a593Smuzhiyun * Return -ENOMEM if there are not enough resources to satisfy the
151*4882a593Smuzhiyun * request. Otherwise, return the number of pages by which the
152*4882a593Smuzhiyun * global pools must be adjusted (upward). The returned value may
153*4882a593Smuzhiyun * only be different than the passed value (delta) in the case where
154*4882a593Smuzhiyun * a subpool minimum size must be maintained.
155*4882a593Smuzhiyun */
hugepage_subpool_get_pages(struct hugepage_subpool * spool,long delta)156*4882a593Smuzhiyun static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
157*4882a593Smuzhiyun long delta)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun long ret = delta;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun if (!spool)
162*4882a593Smuzhiyun return ret;
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun spin_lock(&spool->lock);
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun if (spool->max_hpages != -1) { /* maximum size accounting */
167*4882a593Smuzhiyun if ((spool->used_hpages + delta) <= spool->max_hpages)
168*4882a593Smuzhiyun spool->used_hpages += delta;
169*4882a593Smuzhiyun else {
170*4882a593Smuzhiyun ret = -ENOMEM;
171*4882a593Smuzhiyun goto unlock_ret;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /* minimum size accounting */
176*4882a593Smuzhiyun if (spool->min_hpages != -1 && spool->rsv_hpages) {
177*4882a593Smuzhiyun if (delta > spool->rsv_hpages) {
178*4882a593Smuzhiyun /*
179*4882a593Smuzhiyun * Asking for more reserves than those already taken on
180*4882a593Smuzhiyun * behalf of subpool. Return difference.
181*4882a593Smuzhiyun */
182*4882a593Smuzhiyun ret = delta - spool->rsv_hpages;
183*4882a593Smuzhiyun spool->rsv_hpages = 0;
184*4882a593Smuzhiyun } else {
185*4882a593Smuzhiyun ret = 0; /* reserves already accounted for */
186*4882a593Smuzhiyun spool->rsv_hpages -= delta;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun unlock_ret:
191*4882a593Smuzhiyun spin_unlock(&spool->lock);
192*4882a593Smuzhiyun return ret;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun /*
196*4882a593Smuzhiyun * Subpool accounting for freeing and unreserving pages.
197*4882a593Smuzhiyun * Return the number of global page reservations that must be dropped.
198*4882a593Smuzhiyun * The return value may only be different than the passed value (delta)
199*4882a593Smuzhiyun * in the case where a subpool minimum size must be maintained.
200*4882a593Smuzhiyun */
hugepage_subpool_put_pages(struct hugepage_subpool * spool,long delta)201*4882a593Smuzhiyun static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
202*4882a593Smuzhiyun long delta)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun long ret = delta;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun if (!spool)
207*4882a593Smuzhiyun return delta;
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun spin_lock(&spool->lock);
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun if (spool->max_hpages != -1) /* maximum size accounting */
212*4882a593Smuzhiyun spool->used_hpages -= delta;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun /* minimum size accounting */
215*4882a593Smuzhiyun if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
216*4882a593Smuzhiyun if (spool->rsv_hpages + delta <= spool->min_hpages)
217*4882a593Smuzhiyun ret = 0;
218*4882a593Smuzhiyun else
219*4882a593Smuzhiyun ret = spool->rsv_hpages + delta - spool->min_hpages;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun spool->rsv_hpages += delta;
222*4882a593Smuzhiyun if (spool->rsv_hpages > spool->min_hpages)
223*4882a593Smuzhiyun spool->rsv_hpages = spool->min_hpages;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun /*
227*4882a593Smuzhiyun * If hugetlbfs_put_super couldn't free spool due to an outstanding
228*4882a593Smuzhiyun * quota reference, free it now.
229*4882a593Smuzhiyun */
230*4882a593Smuzhiyun unlock_or_release_subpool(spool);
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun return ret;
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
subpool_inode(struct inode * inode)235*4882a593Smuzhiyun static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun return HUGETLBFS_SB(inode->i_sb)->spool;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
subpool_vma(struct vm_area_struct * vma)240*4882a593Smuzhiyun static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun return subpool_inode(file_inode(vma->vm_file));
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun /* Helper that removes a struct file_region from the resv_map cache and returns
246*4882a593Smuzhiyun * it for use.
247*4882a593Smuzhiyun */
248*4882a593Smuzhiyun static struct file_region *
get_file_region_entry_from_cache(struct resv_map * resv,long from,long to)249*4882a593Smuzhiyun get_file_region_entry_from_cache(struct resv_map *resv, long from, long to)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun struct file_region *nrg = NULL;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun VM_BUG_ON(resv->region_cache_count <= 0);
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun resv->region_cache_count--;
256*4882a593Smuzhiyun nrg = list_first_entry(&resv->region_cache, struct file_region, link);
257*4882a593Smuzhiyun list_del(&nrg->link);
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun nrg->from = from;
260*4882a593Smuzhiyun nrg->to = to;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun return nrg;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
copy_hugetlb_cgroup_uncharge_info(struct file_region * nrg,struct file_region * rg)265*4882a593Smuzhiyun static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg,
266*4882a593Smuzhiyun struct file_region *rg)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun #ifdef CONFIG_CGROUP_HUGETLB
269*4882a593Smuzhiyun nrg->reservation_counter = rg->reservation_counter;
270*4882a593Smuzhiyun nrg->css = rg->css;
271*4882a593Smuzhiyun if (rg->css)
272*4882a593Smuzhiyun css_get(rg->css);
273*4882a593Smuzhiyun #endif
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun /* Helper that records hugetlb_cgroup uncharge info. */
record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup * h_cg,struct hstate * h,struct resv_map * resv,struct file_region * nrg)277*4882a593Smuzhiyun static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
278*4882a593Smuzhiyun struct hstate *h,
279*4882a593Smuzhiyun struct resv_map *resv,
280*4882a593Smuzhiyun struct file_region *nrg)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun #ifdef CONFIG_CGROUP_HUGETLB
283*4882a593Smuzhiyun if (h_cg) {
284*4882a593Smuzhiyun nrg->reservation_counter =
285*4882a593Smuzhiyun &h_cg->rsvd_hugepage[hstate_index(h)];
286*4882a593Smuzhiyun nrg->css = &h_cg->css;
287*4882a593Smuzhiyun /*
288*4882a593Smuzhiyun * The caller will hold exactly one h_cg->css reference for the
289*4882a593Smuzhiyun * whole contiguous reservation region. But this area might be
290*4882a593Smuzhiyun * scattered when there are already some file_regions reside in
291*4882a593Smuzhiyun * it. As a result, many file_regions may share only one css
292*4882a593Smuzhiyun * reference. In order to ensure that one file_region must hold
293*4882a593Smuzhiyun * exactly one h_cg->css reference, we should do css_get for
294*4882a593Smuzhiyun * each file_region and leave the reference held by caller
295*4882a593Smuzhiyun * untouched.
296*4882a593Smuzhiyun */
297*4882a593Smuzhiyun css_get(&h_cg->css);
298*4882a593Smuzhiyun if (!resv->pages_per_hpage)
299*4882a593Smuzhiyun resv->pages_per_hpage = pages_per_huge_page(h);
300*4882a593Smuzhiyun /* pages_per_hpage should be the same for all entries in
301*4882a593Smuzhiyun * a resv_map.
302*4882a593Smuzhiyun */
303*4882a593Smuzhiyun VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h));
304*4882a593Smuzhiyun } else {
305*4882a593Smuzhiyun nrg->reservation_counter = NULL;
306*4882a593Smuzhiyun nrg->css = NULL;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun #endif
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
put_uncharge_info(struct file_region * rg)311*4882a593Smuzhiyun static void put_uncharge_info(struct file_region *rg)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun #ifdef CONFIG_CGROUP_HUGETLB
314*4882a593Smuzhiyun if (rg->css)
315*4882a593Smuzhiyun css_put(rg->css);
316*4882a593Smuzhiyun #endif
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun
has_same_uncharge_info(struct file_region * rg,struct file_region * org)319*4882a593Smuzhiyun static bool has_same_uncharge_info(struct file_region *rg,
320*4882a593Smuzhiyun struct file_region *org)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun #ifdef CONFIG_CGROUP_HUGETLB
323*4882a593Smuzhiyun return rg && org &&
324*4882a593Smuzhiyun rg->reservation_counter == org->reservation_counter &&
325*4882a593Smuzhiyun rg->css == org->css;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun #else
328*4882a593Smuzhiyun return true;
329*4882a593Smuzhiyun #endif
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
coalesce_file_region(struct resv_map * resv,struct file_region * rg)332*4882a593Smuzhiyun static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun struct file_region *nrg = NULL, *prg = NULL;
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun prg = list_prev_entry(rg, link);
337*4882a593Smuzhiyun if (&prg->link != &resv->regions && prg->to == rg->from &&
338*4882a593Smuzhiyun has_same_uncharge_info(prg, rg)) {
339*4882a593Smuzhiyun prg->to = rg->to;
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun list_del(&rg->link);
342*4882a593Smuzhiyun put_uncharge_info(rg);
343*4882a593Smuzhiyun kfree(rg);
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun rg = prg;
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun nrg = list_next_entry(rg, link);
349*4882a593Smuzhiyun if (&nrg->link != &resv->regions && nrg->from == rg->to &&
350*4882a593Smuzhiyun has_same_uncharge_info(nrg, rg)) {
351*4882a593Smuzhiyun nrg->from = rg->from;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun list_del(&rg->link);
354*4882a593Smuzhiyun put_uncharge_info(rg);
355*4882a593Smuzhiyun kfree(rg);
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun /*
360*4882a593Smuzhiyun * Must be called with resv->lock held.
361*4882a593Smuzhiyun *
362*4882a593Smuzhiyun * Calling this with regions_needed != NULL will count the number of pages
363*4882a593Smuzhiyun * to be added but will not modify the linked list. And regions_needed will
364*4882a593Smuzhiyun * indicate the number of file_regions needed in the cache to carry out to add
365*4882a593Smuzhiyun * the regions for this range.
366*4882a593Smuzhiyun */
add_reservation_in_range(struct resv_map * resv,long f,long t,struct hugetlb_cgroup * h_cg,struct hstate * h,long * regions_needed)367*4882a593Smuzhiyun static long add_reservation_in_range(struct resv_map *resv, long f, long t,
368*4882a593Smuzhiyun struct hugetlb_cgroup *h_cg,
369*4882a593Smuzhiyun struct hstate *h, long *regions_needed)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun long add = 0;
372*4882a593Smuzhiyun struct list_head *head = &resv->regions;
373*4882a593Smuzhiyun long last_accounted_offset = f;
374*4882a593Smuzhiyun struct file_region *rg = NULL, *trg = NULL, *nrg = NULL;
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun if (regions_needed)
377*4882a593Smuzhiyun *regions_needed = 0;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun /* In this loop, we essentially handle an entry for the range
380*4882a593Smuzhiyun * [last_accounted_offset, rg->from), at every iteration, with some
381*4882a593Smuzhiyun * bounds checking.
382*4882a593Smuzhiyun */
383*4882a593Smuzhiyun list_for_each_entry_safe(rg, trg, head, link) {
384*4882a593Smuzhiyun /* Skip irrelevant regions that start before our range. */
385*4882a593Smuzhiyun if (rg->from < f) {
386*4882a593Smuzhiyun /* If this region ends after the last accounted offset,
387*4882a593Smuzhiyun * then we need to update last_accounted_offset.
388*4882a593Smuzhiyun */
389*4882a593Smuzhiyun if (rg->to > last_accounted_offset)
390*4882a593Smuzhiyun last_accounted_offset = rg->to;
391*4882a593Smuzhiyun continue;
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun /* When we find a region that starts beyond our range, we've
395*4882a593Smuzhiyun * finished.
396*4882a593Smuzhiyun */
397*4882a593Smuzhiyun if (rg->from > t)
398*4882a593Smuzhiyun break;
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun /* Add an entry for last_accounted_offset -> rg->from, and
401*4882a593Smuzhiyun * update last_accounted_offset.
402*4882a593Smuzhiyun */
403*4882a593Smuzhiyun if (rg->from > last_accounted_offset) {
404*4882a593Smuzhiyun add += rg->from - last_accounted_offset;
405*4882a593Smuzhiyun if (!regions_needed) {
406*4882a593Smuzhiyun nrg = get_file_region_entry_from_cache(
407*4882a593Smuzhiyun resv, last_accounted_offset, rg->from);
408*4882a593Smuzhiyun record_hugetlb_cgroup_uncharge_info(h_cg, h,
409*4882a593Smuzhiyun resv, nrg);
410*4882a593Smuzhiyun list_add(&nrg->link, rg->link.prev);
411*4882a593Smuzhiyun coalesce_file_region(resv, nrg);
412*4882a593Smuzhiyun } else
413*4882a593Smuzhiyun *regions_needed += 1;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun last_accounted_offset = rg->to;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun /* Handle the case where our range extends beyond
420*4882a593Smuzhiyun * last_accounted_offset.
421*4882a593Smuzhiyun */
422*4882a593Smuzhiyun if (last_accounted_offset < t) {
423*4882a593Smuzhiyun add += t - last_accounted_offset;
424*4882a593Smuzhiyun if (!regions_needed) {
425*4882a593Smuzhiyun nrg = get_file_region_entry_from_cache(
426*4882a593Smuzhiyun resv, last_accounted_offset, t);
427*4882a593Smuzhiyun record_hugetlb_cgroup_uncharge_info(h_cg, h, resv, nrg);
428*4882a593Smuzhiyun list_add(&nrg->link, rg->link.prev);
429*4882a593Smuzhiyun coalesce_file_region(resv, nrg);
430*4882a593Smuzhiyun } else
431*4882a593Smuzhiyun *regions_needed += 1;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun VM_BUG_ON(add < 0);
435*4882a593Smuzhiyun return add;
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun /* Must be called with resv->lock acquired. Will drop lock to allocate entries.
439*4882a593Smuzhiyun */
allocate_file_region_entries(struct resv_map * resv,int regions_needed)440*4882a593Smuzhiyun static int allocate_file_region_entries(struct resv_map *resv,
441*4882a593Smuzhiyun int regions_needed)
442*4882a593Smuzhiyun __must_hold(&resv->lock)
443*4882a593Smuzhiyun {
444*4882a593Smuzhiyun struct list_head allocated_regions;
445*4882a593Smuzhiyun int to_allocate = 0, i = 0;
446*4882a593Smuzhiyun struct file_region *trg = NULL, *rg = NULL;
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun VM_BUG_ON(regions_needed < 0);
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun INIT_LIST_HEAD(&allocated_regions);
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun /*
453*4882a593Smuzhiyun * Check for sufficient descriptors in the cache to accommodate
454*4882a593Smuzhiyun * the number of in progress add operations plus regions_needed.
455*4882a593Smuzhiyun *
456*4882a593Smuzhiyun * This is a while loop because when we drop the lock, some other call
457*4882a593Smuzhiyun * to region_add or region_del may have consumed some region_entries,
458*4882a593Smuzhiyun * so we keep looping here until we finally have enough entries for
459*4882a593Smuzhiyun * (adds_in_progress + regions_needed).
460*4882a593Smuzhiyun */
461*4882a593Smuzhiyun while (resv->region_cache_count <
462*4882a593Smuzhiyun (resv->adds_in_progress + regions_needed)) {
463*4882a593Smuzhiyun to_allocate = resv->adds_in_progress + regions_needed -
464*4882a593Smuzhiyun resv->region_cache_count;
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun /* At this point, we should have enough entries in the cache
467*4882a593Smuzhiyun * for all the existings adds_in_progress. We should only be
468*4882a593Smuzhiyun * needing to allocate for regions_needed.
469*4882a593Smuzhiyun */
470*4882a593Smuzhiyun VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress);
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun spin_unlock(&resv->lock);
473*4882a593Smuzhiyun for (i = 0; i < to_allocate; i++) {
474*4882a593Smuzhiyun trg = kmalloc(sizeof(*trg), GFP_KERNEL);
475*4882a593Smuzhiyun if (!trg)
476*4882a593Smuzhiyun goto out_of_memory;
477*4882a593Smuzhiyun list_add(&trg->link, &allocated_regions);
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun spin_lock(&resv->lock);
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun list_splice(&allocated_regions, &resv->region_cache);
483*4882a593Smuzhiyun resv->region_cache_count += to_allocate;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun return 0;
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun out_of_memory:
489*4882a593Smuzhiyun list_for_each_entry_safe(rg, trg, &allocated_regions, link) {
490*4882a593Smuzhiyun list_del(&rg->link);
491*4882a593Smuzhiyun kfree(rg);
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun return -ENOMEM;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun /*
497*4882a593Smuzhiyun * Add the huge page range represented by [f, t) to the reserve
498*4882a593Smuzhiyun * map. Regions will be taken from the cache to fill in this range.
499*4882a593Smuzhiyun * Sufficient regions should exist in the cache due to the previous
500*4882a593Smuzhiyun * call to region_chg with the same range, but in some cases the cache will not
501*4882a593Smuzhiyun * have sufficient entries due to races with other code doing region_add or
502*4882a593Smuzhiyun * region_del. The extra needed entries will be allocated.
503*4882a593Smuzhiyun *
504*4882a593Smuzhiyun * regions_needed is the out value provided by a previous call to region_chg.
505*4882a593Smuzhiyun *
506*4882a593Smuzhiyun * Return the number of new huge pages added to the map. This number is greater
507*4882a593Smuzhiyun * than or equal to zero. If file_region entries needed to be allocated for
508*4882a593Smuzhiyun * this operation and we were not able to allocate, it returns -ENOMEM.
509*4882a593Smuzhiyun * region_add of regions of length 1 never allocate file_regions and cannot
510*4882a593Smuzhiyun * fail; region_chg will always allocate at least 1 entry and a region_add for
511*4882a593Smuzhiyun * 1 page will only require at most 1 entry.
512*4882a593Smuzhiyun */
region_add(struct resv_map * resv,long f,long t,long in_regions_needed,struct hstate * h,struct hugetlb_cgroup * h_cg)513*4882a593Smuzhiyun static long region_add(struct resv_map *resv, long f, long t,
514*4882a593Smuzhiyun long in_regions_needed, struct hstate *h,
515*4882a593Smuzhiyun struct hugetlb_cgroup *h_cg)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun long add = 0, actual_regions_needed = 0;
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun spin_lock(&resv->lock);
520*4882a593Smuzhiyun retry:
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun /* Count how many regions are actually needed to execute this add. */
523*4882a593Smuzhiyun add_reservation_in_range(resv, f, t, NULL, NULL,
524*4882a593Smuzhiyun &actual_regions_needed);
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun /*
527*4882a593Smuzhiyun * Check for sufficient descriptors in the cache to accommodate
528*4882a593Smuzhiyun * this add operation. Note that actual_regions_needed may be greater
529*4882a593Smuzhiyun * than in_regions_needed, as the resv_map may have been modified since
530*4882a593Smuzhiyun * the region_chg call. In this case, we need to make sure that we
531*4882a593Smuzhiyun * allocate extra entries, such that we have enough for all the
532*4882a593Smuzhiyun * existing adds_in_progress, plus the excess needed for this
533*4882a593Smuzhiyun * operation.
534*4882a593Smuzhiyun */
535*4882a593Smuzhiyun if (actual_regions_needed > in_regions_needed &&
536*4882a593Smuzhiyun resv->region_cache_count <
537*4882a593Smuzhiyun resv->adds_in_progress +
538*4882a593Smuzhiyun (actual_regions_needed - in_regions_needed)) {
539*4882a593Smuzhiyun /* region_add operation of range 1 should never need to
540*4882a593Smuzhiyun * allocate file_region entries.
541*4882a593Smuzhiyun */
542*4882a593Smuzhiyun VM_BUG_ON(t - f <= 1);
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun if (allocate_file_region_entries(
545*4882a593Smuzhiyun resv, actual_regions_needed - in_regions_needed)) {
546*4882a593Smuzhiyun return -ENOMEM;
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun goto retry;
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun add = add_reservation_in_range(resv, f, t, h_cg, h, NULL);
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun resv->adds_in_progress -= in_regions_needed;
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun spin_unlock(&resv->lock);
557*4882a593Smuzhiyun VM_BUG_ON(add < 0);
558*4882a593Smuzhiyun return add;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun /*
562*4882a593Smuzhiyun * Examine the existing reserve map and determine how many
563*4882a593Smuzhiyun * huge pages in the specified range [f, t) are NOT currently
564*4882a593Smuzhiyun * represented. This routine is called before a subsequent
565*4882a593Smuzhiyun * call to region_add that will actually modify the reserve
566*4882a593Smuzhiyun * map to add the specified range [f, t). region_chg does
567*4882a593Smuzhiyun * not change the number of huge pages represented by the
568*4882a593Smuzhiyun * map. A number of new file_region structures is added to the cache as a
569*4882a593Smuzhiyun * placeholder, for the subsequent region_add call to use. At least 1
570*4882a593Smuzhiyun * file_region structure is added.
571*4882a593Smuzhiyun *
572*4882a593Smuzhiyun * out_regions_needed is the number of regions added to the
573*4882a593Smuzhiyun * resv->adds_in_progress. This value needs to be provided to a follow up call
574*4882a593Smuzhiyun * to region_add or region_abort for proper accounting.
575*4882a593Smuzhiyun *
576*4882a593Smuzhiyun * Returns the number of huge pages that need to be added to the existing
577*4882a593Smuzhiyun * reservation map for the range [f, t). This number is greater or equal to
578*4882a593Smuzhiyun * zero. -ENOMEM is returned if a new file_region structure or cache entry
579*4882a593Smuzhiyun * is needed and can not be allocated.
580*4882a593Smuzhiyun */
region_chg(struct resv_map * resv,long f,long t,long * out_regions_needed)581*4882a593Smuzhiyun static long region_chg(struct resv_map *resv, long f, long t,
582*4882a593Smuzhiyun long *out_regions_needed)
583*4882a593Smuzhiyun {
584*4882a593Smuzhiyun long chg = 0;
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun spin_lock(&resv->lock);
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun /* Count how many hugepages in this range are NOT represented. */
589*4882a593Smuzhiyun chg = add_reservation_in_range(resv, f, t, NULL, NULL,
590*4882a593Smuzhiyun out_regions_needed);
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun if (*out_regions_needed == 0)
593*4882a593Smuzhiyun *out_regions_needed = 1;
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun if (allocate_file_region_entries(resv, *out_regions_needed))
596*4882a593Smuzhiyun return -ENOMEM;
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun resv->adds_in_progress += *out_regions_needed;
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun spin_unlock(&resv->lock);
601*4882a593Smuzhiyun return chg;
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun /*
605*4882a593Smuzhiyun * Abort the in progress add operation. The adds_in_progress field
606*4882a593Smuzhiyun * of the resv_map keeps track of the operations in progress between
607*4882a593Smuzhiyun * calls to region_chg and region_add. Operations are sometimes
608*4882a593Smuzhiyun * aborted after the call to region_chg. In such cases, region_abort
609*4882a593Smuzhiyun * is called to decrement the adds_in_progress counter. regions_needed
610*4882a593Smuzhiyun * is the value returned by the region_chg call, it is used to decrement
611*4882a593Smuzhiyun * the adds_in_progress counter.
612*4882a593Smuzhiyun *
613*4882a593Smuzhiyun * NOTE: The range arguments [f, t) are not needed or used in this
614*4882a593Smuzhiyun * routine. They are kept to make reading the calling code easier as
615*4882a593Smuzhiyun * arguments will match the associated region_chg call.
616*4882a593Smuzhiyun */
region_abort(struct resv_map * resv,long f,long t,long regions_needed)617*4882a593Smuzhiyun static void region_abort(struct resv_map *resv, long f, long t,
618*4882a593Smuzhiyun long regions_needed)
619*4882a593Smuzhiyun {
620*4882a593Smuzhiyun spin_lock(&resv->lock);
621*4882a593Smuzhiyun VM_BUG_ON(!resv->region_cache_count);
622*4882a593Smuzhiyun resv->adds_in_progress -= regions_needed;
623*4882a593Smuzhiyun spin_unlock(&resv->lock);
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun /*
627*4882a593Smuzhiyun * Delete the specified range [f, t) from the reserve map. If the
628*4882a593Smuzhiyun * t parameter is LONG_MAX, this indicates that ALL regions after f
629*4882a593Smuzhiyun * should be deleted. Locate the regions which intersect [f, t)
630*4882a593Smuzhiyun * and either trim, delete or split the existing regions.
631*4882a593Smuzhiyun *
632*4882a593Smuzhiyun * Returns the number of huge pages deleted from the reserve map.
633*4882a593Smuzhiyun * In the normal case, the return value is zero or more. In the
634*4882a593Smuzhiyun * case where a region must be split, a new region descriptor must
635*4882a593Smuzhiyun * be allocated. If the allocation fails, -ENOMEM will be returned.
636*4882a593Smuzhiyun * NOTE: If the parameter t == LONG_MAX, then we will never split
637*4882a593Smuzhiyun * a region and possibly return -ENOMEM. Callers specifying
638*4882a593Smuzhiyun * t == LONG_MAX do not need to check for -ENOMEM error.
639*4882a593Smuzhiyun */
region_del(struct resv_map * resv,long f,long t)640*4882a593Smuzhiyun static long region_del(struct resv_map *resv, long f, long t)
641*4882a593Smuzhiyun {
642*4882a593Smuzhiyun struct list_head *head = &resv->regions;
643*4882a593Smuzhiyun struct file_region *rg, *trg;
644*4882a593Smuzhiyun struct file_region *nrg = NULL;
645*4882a593Smuzhiyun long del = 0;
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun retry:
648*4882a593Smuzhiyun spin_lock(&resv->lock);
649*4882a593Smuzhiyun list_for_each_entry_safe(rg, trg, head, link) {
650*4882a593Smuzhiyun /*
651*4882a593Smuzhiyun * Skip regions before the range to be deleted. file_region
652*4882a593Smuzhiyun * ranges are normally of the form [from, to). However, there
653*4882a593Smuzhiyun * may be a "placeholder" entry in the map which is of the form
654*4882a593Smuzhiyun * (from, to) with from == to. Check for placeholder entries
655*4882a593Smuzhiyun * at the beginning of the range to be deleted.
656*4882a593Smuzhiyun */
657*4882a593Smuzhiyun if (rg->to <= f && (rg->to != rg->from || rg->to != f))
658*4882a593Smuzhiyun continue;
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun if (rg->from >= t)
661*4882a593Smuzhiyun break;
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun if (f > rg->from && t < rg->to) { /* Must split region */
664*4882a593Smuzhiyun /*
665*4882a593Smuzhiyun * Check for an entry in the cache before dropping
666*4882a593Smuzhiyun * lock and attempting allocation.
667*4882a593Smuzhiyun */
668*4882a593Smuzhiyun if (!nrg &&
669*4882a593Smuzhiyun resv->region_cache_count > resv->adds_in_progress) {
670*4882a593Smuzhiyun nrg = list_first_entry(&resv->region_cache,
671*4882a593Smuzhiyun struct file_region,
672*4882a593Smuzhiyun link);
673*4882a593Smuzhiyun list_del(&nrg->link);
674*4882a593Smuzhiyun resv->region_cache_count--;
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun if (!nrg) {
678*4882a593Smuzhiyun spin_unlock(&resv->lock);
679*4882a593Smuzhiyun nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
680*4882a593Smuzhiyun if (!nrg)
681*4882a593Smuzhiyun return -ENOMEM;
682*4882a593Smuzhiyun goto retry;
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun del += t - f;
686*4882a593Smuzhiyun hugetlb_cgroup_uncharge_file_region(
687*4882a593Smuzhiyun resv, rg, t - f, false);
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun /* New entry for end of split region */
690*4882a593Smuzhiyun nrg->from = t;
691*4882a593Smuzhiyun nrg->to = rg->to;
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun copy_hugetlb_cgroup_uncharge_info(nrg, rg);
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun INIT_LIST_HEAD(&nrg->link);
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun /* Original entry is trimmed */
698*4882a593Smuzhiyun rg->to = f;
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun list_add(&nrg->link, &rg->link);
701*4882a593Smuzhiyun nrg = NULL;
702*4882a593Smuzhiyun break;
703*4882a593Smuzhiyun }
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun if (f <= rg->from && t >= rg->to) { /* Remove entire region */
706*4882a593Smuzhiyun del += rg->to - rg->from;
707*4882a593Smuzhiyun hugetlb_cgroup_uncharge_file_region(resv, rg,
708*4882a593Smuzhiyun rg->to - rg->from, true);
709*4882a593Smuzhiyun list_del(&rg->link);
710*4882a593Smuzhiyun kfree(rg);
711*4882a593Smuzhiyun continue;
712*4882a593Smuzhiyun }
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun if (f <= rg->from) { /* Trim beginning of region */
715*4882a593Smuzhiyun hugetlb_cgroup_uncharge_file_region(resv, rg,
716*4882a593Smuzhiyun t - rg->from, false);
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun del += t - rg->from;
719*4882a593Smuzhiyun rg->from = t;
720*4882a593Smuzhiyun } else { /* Trim end of region */
721*4882a593Smuzhiyun hugetlb_cgroup_uncharge_file_region(resv, rg,
722*4882a593Smuzhiyun rg->to - f, false);
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun del += rg->to - f;
725*4882a593Smuzhiyun rg->to = f;
726*4882a593Smuzhiyun }
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun spin_unlock(&resv->lock);
730*4882a593Smuzhiyun kfree(nrg);
731*4882a593Smuzhiyun return del;
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun /*
735*4882a593Smuzhiyun * A rare out of memory error was encountered which prevented removal of
736*4882a593Smuzhiyun * the reserve map region for a page. The huge page itself was free'ed
737*4882a593Smuzhiyun * and removed from the page cache. This routine will adjust the subpool
738*4882a593Smuzhiyun * usage count, and the global reserve count if needed. By incrementing
739*4882a593Smuzhiyun * these counts, the reserve map entry which could not be deleted will
740*4882a593Smuzhiyun * appear as a "reserved" entry instead of simply dangling with incorrect
741*4882a593Smuzhiyun * counts.
742*4882a593Smuzhiyun */
hugetlb_fix_reserve_counts(struct inode * inode)743*4882a593Smuzhiyun void hugetlb_fix_reserve_counts(struct inode *inode)
744*4882a593Smuzhiyun {
745*4882a593Smuzhiyun struct hugepage_subpool *spool = subpool_inode(inode);
746*4882a593Smuzhiyun long rsv_adjust;
747*4882a593Smuzhiyun bool reserved = false;
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun rsv_adjust = hugepage_subpool_get_pages(spool, 1);
750*4882a593Smuzhiyun if (rsv_adjust > 0) {
751*4882a593Smuzhiyun struct hstate *h = hstate_inode(inode);
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun if (!hugetlb_acct_memory(h, 1))
754*4882a593Smuzhiyun reserved = true;
755*4882a593Smuzhiyun } else if (!rsv_adjust) {
756*4882a593Smuzhiyun reserved = true;
757*4882a593Smuzhiyun }
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun if (!reserved)
760*4882a593Smuzhiyun pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
761*4882a593Smuzhiyun }
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun /*
764*4882a593Smuzhiyun * Count and return the number of huge pages in the reserve map
765*4882a593Smuzhiyun * that intersect with the range [f, t).
766*4882a593Smuzhiyun */
region_count(struct resv_map * resv,long f,long t)767*4882a593Smuzhiyun static long region_count(struct resv_map *resv, long f, long t)
768*4882a593Smuzhiyun {
769*4882a593Smuzhiyun struct list_head *head = &resv->regions;
770*4882a593Smuzhiyun struct file_region *rg;
771*4882a593Smuzhiyun long chg = 0;
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun spin_lock(&resv->lock);
774*4882a593Smuzhiyun /* Locate each segment we overlap with, and count that overlap. */
775*4882a593Smuzhiyun list_for_each_entry(rg, head, link) {
776*4882a593Smuzhiyun long seg_from;
777*4882a593Smuzhiyun long seg_to;
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun if (rg->to <= f)
780*4882a593Smuzhiyun continue;
781*4882a593Smuzhiyun if (rg->from >= t)
782*4882a593Smuzhiyun break;
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun seg_from = max(rg->from, f);
785*4882a593Smuzhiyun seg_to = min(rg->to, t);
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun chg += seg_to - seg_from;
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun spin_unlock(&resv->lock);
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun return chg;
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun /*
795*4882a593Smuzhiyun * Convert the address within this vma to the page offset within
796*4882a593Smuzhiyun * the mapping, in pagecache page units; huge pages here.
797*4882a593Smuzhiyun */
vma_hugecache_offset(struct hstate * h,struct vm_area_struct * vma,unsigned long address)798*4882a593Smuzhiyun static pgoff_t vma_hugecache_offset(struct hstate *h,
799*4882a593Smuzhiyun struct vm_area_struct *vma, unsigned long address)
800*4882a593Smuzhiyun {
801*4882a593Smuzhiyun return ((address - vma->vm_start) >> huge_page_shift(h)) +
802*4882a593Smuzhiyun (vma->vm_pgoff >> huge_page_order(h));
803*4882a593Smuzhiyun }
804*4882a593Smuzhiyun
linear_hugepage_index(struct vm_area_struct * vma,unsigned long address)805*4882a593Smuzhiyun pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
806*4882a593Smuzhiyun unsigned long address)
807*4882a593Smuzhiyun {
808*4882a593Smuzhiyun return vma_hugecache_offset(hstate_vma(vma), vma, address);
809*4882a593Smuzhiyun }
810*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(linear_hugepage_index);
811*4882a593Smuzhiyun
812*4882a593Smuzhiyun /*
813*4882a593Smuzhiyun * Return the size of the pages allocated when backing a VMA. In the majority
814*4882a593Smuzhiyun * cases this will be same size as used by the page table entries.
815*4882a593Smuzhiyun */
vma_kernel_pagesize(struct vm_area_struct * vma)816*4882a593Smuzhiyun unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
817*4882a593Smuzhiyun {
818*4882a593Smuzhiyun if (vma->vm_ops && vma->vm_ops->pagesize)
819*4882a593Smuzhiyun return vma->vm_ops->pagesize(vma);
820*4882a593Smuzhiyun return PAGE_SIZE;
821*4882a593Smuzhiyun }
822*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun /*
825*4882a593Smuzhiyun * Return the page size being used by the MMU to back a VMA. In the majority
826*4882a593Smuzhiyun * of cases, the page size used by the kernel matches the MMU size. On
827*4882a593Smuzhiyun * architectures where it differs, an architecture-specific 'strong'
828*4882a593Smuzhiyun * version of this symbol is required.
829*4882a593Smuzhiyun */
vma_mmu_pagesize(struct vm_area_struct * vma)830*4882a593Smuzhiyun __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
831*4882a593Smuzhiyun {
832*4882a593Smuzhiyun return vma_kernel_pagesize(vma);
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun /*
836*4882a593Smuzhiyun * Flags for MAP_PRIVATE reservations. These are stored in the bottom
837*4882a593Smuzhiyun * bits of the reservation map pointer, which are always clear due to
838*4882a593Smuzhiyun * alignment.
839*4882a593Smuzhiyun */
840*4882a593Smuzhiyun #define HPAGE_RESV_OWNER (1UL << 0)
841*4882a593Smuzhiyun #define HPAGE_RESV_UNMAPPED (1UL << 1)
842*4882a593Smuzhiyun #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun /*
845*4882a593Smuzhiyun * These helpers are used to track how many pages are reserved for
846*4882a593Smuzhiyun * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
847*4882a593Smuzhiyun * is guaranteed to have their future faults succeed.
848*4882a593Smuzhiyun *
849*4882a593Smuzhiyun * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
850*4882a593Smuzhiyun * the reserve counters are updated with the hugetlb_lock held. It is safe
851*4882a593Smuzhiyun * to reset the VMA at fork() time as it is not in use yet and there is no
852*4882a593Smuzhiyun * chance of the global counters getting corrupted as a result of the values.
853*4882a593Smuzhiyun *
854*4882a593Smuzhiyun * The private mapping reservation is represented in a subtly different
855*4882a593Smuzhiyun * manner to a shared mapping. A shared mapping has a region map associated
856*4882a593Smuzhiyun * with the underlying file, this region map represents the backing file
857*4882a593Smuzhiyun * pages which have ever had a reservation assigned which this persists even
858*4882a593Smuzhiyun * after the page is instantiated. A private mapping has a region map
859*4882a593Smuzhiyun * associated with the original mmap which is attached to all VMAs which
860*4882a593Smuzhiyun * reference it, this region map represents those offsets which have consumed
861*4882a593Smuzhiyun * reservation ie. where pages have been instantiated.
862*4882a593Smuzhiyun */
get_vma_private_data(struct vm_area_struct * vma)863*4882a593Smuzhiyun static unsigned long get_vma_private_data(struct vm_area_struct *vma)
864*4882a593Smuzhiyun {
865*4882a593Smuzhiyun return (unsigned long)vma->vm_private_data;
866*4882a593Smuzhiyun }
867*4882a593Smuzhiyun
set_vma_private_data(struct vm_area_struct * vma,unsigned long value)868*4882a593Smuzhiyun static void set_vma_private_data(struct vm_area_struct *vma,
869*4882a593Smuzhiyun unsigned long value)
870*4882a593Smuzhiyun {
871*4882a593Smuzhiyun vma->vm_private_data = (void *)value;
872*4882a593Smuzhiyun }
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun static void
resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map * resv_map,struct hugetlb_cgroup * h_cg,struct hstate * h)875*4882a593Smuzhiyun resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map,
876*4882a593Smuzhiyun struct hugetlb_cgroup *h_cg,
877*4882a593Smuzhiyun struct hstate *h)
878*4882a593Smuzhiyun {
879*4882a593Smuzhiyun #ifdef CONFIG_CGROUP_HUGETLB
880*4882a593Smuzhiyun if (!h_cg || !h) {
881*4882a593Smuzhiyun resv_map->reservation_counter = NULL;
882*4882a593Smuzhiyun resv_map->pages_per_hpage = 0;
883*4882a593Smuzhiyun resv_map->css = NULL;
884*4882a593Smuzhiyun } else {
885*4882a593Smuzhiyun resv_map->reservation_counter =
886*4882a593Smuzhiyun &h_cg->rsvd_hugepage[hstate_index(h)];
887*4882a593Smuzhiyun resv_map->pages_per_hpage = pages_per_huge_page(h);
888*4882a593Smuzhiyun resv_map->css = &h_cg->css;
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun #endif
891*4882a593Smuzhiyun }
892*4882a593Smuzhiyun
resv_map_alloc(void)893*4882a593Smuzhiyun struct resv_map *resv_map_alloc(void)
894*4882a593Smuzhiyun {
895*4882a593Smuzhiyun struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
896*4882a593Smuzhiyun struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun if (!resv_map || !rg) {
899*4882a593Smuzhiyun kfree(resv_map);
900*4882a593Smuzhiyun kfree(rg);
901*4882a593Smuzhiyun return NULL;
902*4882a593Smuzhiyun }
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun kref_init(&resv_map->refs);
905*4882a593Smuzhiyun spin_lock_init(&resv_map->lock);
906*4882a593Smuzhiyun INIT_LIST_HEAD(&resv_map->regions);
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun resv_map->adds_in_progress = 0;
909*4882a593Smuzhiyun /*
910*4882a593Smuzhiyun * Initialize these to 0. On shared mappings, 0's here indicate these
911*4882a593Smuzhiyun * fields don't do cgroup accounting. On private mappings, these will be
912*4882a593Smuzhiyun * re-initialized to the proper values, to indicate that hugetlb cgroup
913*4882a593Smuzhiyun * reservations are to be un-charged from here.
914*4882a593Smuzhiyun */
915*4882a593Smuzhiyun resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL);
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun INIT_LIST_HEAD(&resv_map->region_cache);
918*4882a593Smuzhiyun list_add(&rg->link, &resv_map->region_cache);
919*4882a593Smuzhiyun resv_map->region_cache_count = 1;
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun return resv_map;
922*4882a593Smuzhiyun }
923*4882a593Smuzhiyun
resv_map_release(struct kref * ref)924*4882a593Smuzhiyun void resv_map_release(struct kref *ref)
925*4882a593Smuzhiyun {
926*4882a593Smuzhiyun struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
927*4882a593Smuzhiyun struct list_head *head = &resv_map->region_cache;
928*4882a593Smuzhiyun struct file_region *rg, *trg;
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun /* Clear out any active regions before we release the map. */
931*4882a593Smuzhiyun region_del(resv_map, 0, LONG_MAX);
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun /* ... and any entries left in the cache */
934*4882a593Smuzhiyun list_for_each_entry_safe(rg, trg, head, link) {
935*4882a593Smuzhiyun list_del(&rg->link);
936*4882a593Smuzhiyun kfree(rg);
937*4882a593Smuzhiyun }
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun VM_BUG_ON(resv_map->adds_in_progress);
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun kfree(resv_map);
942*4882a593Smuzhiyun }
943*4882a593Smuzhiyun
inode_resv_map(struct inode * inode)944*4882a593Smuzhiyun static inline struct resv_map *inode_resv_map(struct inode *inode)
945*4882a593Smuzhiyun {
946*4882a593Smuzhiyun /*
947*4882a593Smuzhiyun * At inode evict time, i_mapping may not point to the original
948*4882a593Smuzhiyun * address space within the inode. This original address space
949*4882a593Smuzhiyun * contains the pointer to the resv_map. So, always use the
950*4882a593Smuzhiyun * address space embedded within the inode.
951*4882a593Smuzhiyun * The VERY common case is inode->mapping == &inode->i_data but,
952*4882a593Smuzhiyun * this may not be true for device special inodes.
953*4882a593Smuzhiyun */
954*4882a593Smuzhiyun return (struct resv_map *)(&inode->i_data)->private_data;
955*4882a593Smuzhiyun }
956*4882a593Smuzhiyun
vma_resv_map(struct vm_area_struct * vma)957*4882a593Smuzhiyun static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
958*4882a593Smuzhiyun {
959*4882a593Smuzhiyun VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
960*4882a593Smuzhiyun if (vma->vm_flags & VM_MAYSHARE) {
961*4882a593Smuzhiyun struct address_space *mapping = vma->vm_file->f_mapping;
962*4882a593Smuzhiyun struct inode *inode = mapping->host;
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun return inode_resv_map(inode);
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun } else {
967*4882a593Smuzhiyun return (struct resv_map *)(get_vma_private_data(vma) &
968*4882a593Smuzhiyun ~HPAGE_RESV_MASK);
969*4882a593Smuzhiyun }
970*4882a593Smuzhiyun }
971*4882a593Smuzhiyun
set_vma_resv_map(struct vm_area_struct * vma,struct resv_map * map)972*4882a593Smuzhiyun static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
973*4882a593Smuzhiyun {
974*4882a593Smuzhiyun VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
975*4882a593Smuzhiyun VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun set_vma_private_data(vma, (get_vma_private_data(vma) &
978*4882a593Smuzhiyun HPAGE_RESV_MASK) | (unsigned long)map);
979*4882a593Smuzhiyun }
980*4882a593Smuzhiyun
set_vma_resv_flags(struct vm_area_struct * vma,unsigned long flags)981*4882a593Smuzhiyun static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
982*4882a593Smuzhiyun {
983*4882a593Smuzhiyun VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
984*4882a593Smuzhiyun VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun set_vma_private_data(vma, get_vma_private_data(vma) | flags);
987*4882a593Smuzhiyun }
988*4882a593Smuzhiyun
is_vma_resv_set(struct vm_area_struct * vma,unsigned long flag)989*4882a593Smuzhiyun static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
990*4882a593Smuzhiyun {
991*4882a593Smuzhiyun VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun return (get_vma_private_data(vma) & flag) != 0;
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
reset_vma_resv_huge_pages(struct vm_area_struct * vma)997*4882a593Smuzhiyun void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
998*4882a593Smuzhiyun {
999*4882a593Smuzhiyun VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1000*4882a593Smuzhiyun if (!(vma->vm_flags & VM_MAYSHARE))
1001*4882a593Smuzhiyun vma->vm_private_data = (void *)0;
1002*4882a593Smuzhiyun }
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun /* Returns true if the VMA has associated reserve pages */
vma_has_reserves(struct vm_area_struct * vma,long chg)1005*4882a593Smuzhiyun static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
1006*4882a593Smuzhiyun {
1007*4882a593Smuzhiyun if (vma->vm_flags & VM_NORESERVE) {
1008*4882a593Smuzhiyun /*
1009*4882a593Smuzhiyun * This address is already reserved by other process(chg == 0),
1010*4882a593Smuzhiyun * so, we should decrement reserved count. Without decrementing,
1011*4882a593Smuzhiyun * reserve count remains after releasing inode, because this
1012*4882a593Smuzhiyun * allocated page will go into page cache and is regarded as
1013*4882a593Smuzhiyun * coming from reserved pool in releasing step. Currently, we
1014*4882a593Smuzhiyun * don't have any other solution to deal with this situation
1015*4882a593Smuzhiyun * properly, so add work-around here.
1016*4882a593Smuzhiyun */
1017*4882a593Smuzhiyun if (vma->vm_flags & VM_MAYSHARE && chg == 0)
1018*4882a593Smuzhiyun return true;
1019*4882a593Smuzhiyun else
1020*4882a593Smuzhiyun return false;
1021*4882a593Smuzhiyun }
1022*4882a593Smuzhiyun
1023*4882a593Smuzhiyun /* Shared mappings always use reserves */
1024*4882a593Smuzhiyun if (vma->vm_flags & VM_MAYSHARE) {
1025*4882a593Smuzhiyun /*
1026*4882a593Smuzhiyun * We know VM_NORESERVE is not set. Therefore, there SHOULD
1027*4882a593Smuzhiyun * be a region map for all pages. The only situation where
1028*4882a593Smuzhiyun * there is no region map is if a hole was punched via
1029*4882a593Smuzhiyun * fallocate. In this case, there really are no reserves to
1030*4882a593Smuzhiyun * use. This situation is indicated if chg != 0.
1031*4882a593Smuzhiyun */
1032*4882a593Smuzhiyun if (chg)
1033*4882a593Smuzhiyun return false;
1034*4882a593Smuzhiyun else
1035*4882a593Smuzhiyun return true;
1036*4882a593Smuzhiyun }
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun /*
1039*4882a593Smuzhiyun * Only the process that called mmap() has reserves for
1040*4882a593Smuzhiyun * private mappings.
1041*4882a593Smuzhiyun */
1042*4882a593Smuzhiyun if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1043*4882a593Smuzhiyun /*
1044*4882a593Smuzhiyun * Like the shared case above, a hole punch or truncate
1045*4882a593Smuzhiyun * could have been performed on the private mapping.
1046*4882a593Smuzhiyun * Examine the value of chg to determine if reserves
1047*4882a593Smuzhiyun * actually exist or were previously consumed.
1048*4882a593Smuzhiyun * Very Subtle - The value of chg comes from a previous
1049*4882a593Smuzhiyun * call to vma_needs_reserves(). The reserve map for
1050*4882a593Smuzhiyun * private mappings has different (opposite) semantics
1051*4882a593Smuzhiyun * than that of shared mappings. vma_needs_reserves()
1052*4882a593Smuzhiyun * has already taken this difference in semantics into
1053*4882a593Smuzhiyun * account. Therefore, the meaning of chg is the same
1054*4882a593Smuzhiyun * as in the shared case above. Code could easily be
1055*4882a593Smuzhiyun * combined, but keeping it separate draws attention to
1056*4882a593Smuzhiyun * subtle differences.
1057*4882a593Smuzhiyun */
1058*4882a593Smuzhiyun if (chg)
1059*4882a593Smuzhiyun return false;
1060*4882a593Smuzhiyun else
1061*4882a593Smuzhiyun return true;
1062*4882a593Smuzhiyun }
1063*4882a593Smuzhiyun
1064*4882a593Smuzhiyun return false;
1065*4882a593Smuzhiyun }
1066*4882a593Smuzhiyun
enqueue_huge_page(struct hstate * h,struct page * page)1067*4882a593Smuzhiyun static void enqueue_huge_page(struct hstate *h, struct page *page)
1068*4882a593Smuzhiyun {
1069*4882a593Smuzhiyun int nid = page_to_nid(page);
1070*4882a593Smuzhiyun list_move(&page->lru, &h->hugepage_freelists[nid]);
1071*4882a593Smuzhiyun h->free_huge_pages++;
1072*4882a593Smuzhiyun h->free_huge_pages_node[nid]++;
1073*4882a593Smuzhiyun SetPageHugeFreed(page);
1074*4882a593Smuzhiyun }
1075*4882a593Smuzhiyun
dequeue_huge_page_node_exact(struct hstate * h,int nid)1076*4882a593Smuzhiyun static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
1077*4882a593Smuzhiyun {
1078*4882a593Smuzhiyun struct page *page;
1079*4882a593Smuzhiyun bool nocma = !!(current->flags & PF_MEMALLOC_NOCMA);
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun list_for_each_entry(page, &h->hugepage_freelists[nid], lru) {
1082*4882a593Smuzhiyun if (nocma && is_migrate_cma_page(page))
1083*4882a593Smuzhiyun continue;
1084*4882a593Smuzhiyun
1085*4882a593Smuzhiyun if (PageHWPoison(page))
1086*4882a593Smuzhiyun continue;
1087*4882a593Smuzhiyun
1088*4882a593Smuzhiyun list_move(&page->lru, &h->hugepage_activelist);
1089*4882a593Smuzhiyun set_page_refcounted(page);
1090*4882a593Smuzhiyun ClearPageHugeFreed(page);
1091*4882a593Smuzhiyun h->free_huge_pages--;
1092*4882a593Smuzhiyun h->free_huge_pages_node[nid]--;
1093*4882a593Smuzhiyun return page;
1094*4882a593Smuzhiyun }
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun return NULL;
1097*4882a593Smuzhiyun }
1098*4882a593Smuzhiyun
dequeue_huge_page_nodemask(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nmask)1099*4882a593Smuzhiyun static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
1100*4882a593Smuzhiyun nodemask_t *nmask)
1101*4882a593Smuzhiyun {
1102*4882a593Smuzhiyun unsigned int cpuset_mems_cookie;
1103*4882a593Smuzhiyun struct zonelist *zonelist;
1104*4882a593Smuzhiyun struct zone *zone;
1105*4882a593Smuzhiyun struct zoneref *z;
1106*4882a593Smuzhiyun int node = NUMA_NO_NODE;
1107*4882a593Smuzhiyun
1108*4882a593Smuzhiyun zonelist = node_zonelist(nid, gfp_mask);
1109*4882a593Smuzhiyun
1110*4882a593Smuzhiyun retry_cpuset:
1111*4882a593Smuzhiyun cpuset_mems_cookie = read_mems_allowed_begin();
1112*4882a593Smuzhiyun for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
1113*4882a593Smuzhiyun struct page *page;
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun if (!cpuset_zone_allowed(zone, gfp_mask))
1116*4882a593Smuzhiyun continue;
1117*4882a593Smuzhiyun /*
1118*4882a593Smuzhiyun * no need to ask again on the same node. Pool is node rather than
1119*4882a593Smuzhiyun * zone aware
1120*4882a593Smuzhiyun */
1121*4882a593Smuzhiyun if (zone_to_nid(zone) == node)
1122*4882a593Smuzhiyun continue;
1123*4882a593Smuzhiyun node = zone_to_nid(zone);
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun page = dequeue_huge_page_node_exact(h, node);
1126*4882a593Smuzhiyun if (page)
1127*4882a593Smuzhiyun return page;
1128*4882a593Smuzhiyun }
1129*4882a593Smuzhiyun if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
1130*4882a593Smuzhiyun goto retry_cpuset;
1131*4882a593Smuzhiyun
1132*4882a593Smuzhiyun return NULL;
1133*4882a593Smuzhiyun }
1134*4882a593Smuzhiyun
dequeue_huge_page_vma(struct hstate * h,struct vm_area_struct * vma,unsigned long address,int avoid_reserve,long chg)1135*4882a593Smuzhiyun static struct page *dequeue_huge_page_vma(struct hstate *h,
1136*4882a593Smuzhiyun struct vm_area_struct *vma,
1137*4882a593Smuzhiyun unsigned long address, int avoid_reserve,
1138*4882a593Smuzhiyun long chg)
1139*4882a593Smuzhiyun {
1140*4882a593Smuzhiyun struct page *page;
1141*4882a593Smuzhiyun struct mempolicy *mpol;
1142*4882a593Smuzhiyun gfp_t gfp_mask;
1143*4882a593Smuzhiyun nodemask_t *nodemask;
1144*4882a593Smuzhiyun int nid;
1145*4882a593Smuzhiyun
1146*4882a593Smuzhiyun /*
1147*4882a593Smuzhiyun * A child process with MAP_PRIVATE mappings created by their parent
1148*4882a593Smuzhiyun * have no page reserves. This check ensures that reservations are
1149*4882a593Smuzhiyun * not "stolen". The child may still get SIGKILLed
1150*4882a593Smuzhiyun */
1151*4882a593Smuzhiyun if (!vma_has_reserves(vma, chg) &&
1152*4882a593Smuzhiyun h->free_huge_pages - h->resv_huge_pages == 0)
1153*4882a593Smuzhiyun goto err;
1154*4882a593Smuzhiyun
1155*4882a593Smuzhiyun /* If reserves cannot be used, ensure enough pages are in the pool */
1156*4882a593Smuzhiyun if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
1157*4882a593Smuzhiyun goto err;
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun gfp_mask = htlb_alloc_mask(h);
1160*4882a593Smuzhiyun nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1161*4882a593Smuzhiyun page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
1162*4882a593Smuzhiyun if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
1163*4882a593Smuzhiyun SetPagePrivate(page);
1164*4882a593Smuzhiyun h->resv_huge_pages--;
1165*4882a593Smuzhiyun }
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun mpol_cond_put(mpol);
1168*4882a593Smuzhiyun return page;
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun err:
1171*4882a593Smuzhiyun return NULL;
1172*4882a593Smuzhiyun }
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyun /*
1175*4882a593Smuzhiyun * common helper functions for hstate_next_node_to_{alloc|free}.
1176*4882a593Smuzhiyun * We may have allocated or freed a huge page based on a different
1177*4882a593Smuzhiyun * nodes_allowed previously, so h->next_node_to_{alloc|free} might
1178*4882a593Smuzhiyun * be outside of *nodes_allowed. Ensure that we use an allowed
1179*4882a593Smuzhiyun * node for alloc or free.
1180*4882a593Smuzhiyun */
next_node_allowed(int nid,nodemask_t * nodes_allowed)1181*4882a593Smuzhiyun static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
1182*4882a593Smuzhiyun {
1183*4882a593Smuzhiyun nid = next_node_in(nid, *nodes_allowed);
1184*4882a593Smuzhiyun VM_BUG_ON(nid >= MAX_NUMNODES);
1185*4882a593Smuzhiyun
1186*4882a593Smuzhiyun return nid;
1187*4882a593Smuzhiyun }
1188*4882a593Smuzhiyun
get_valid_node_allowed(int nid,nodemask_t * nodes_allowed)1189*4882a593Smuzhiyun static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
1190*4882a593Smuzhiyun {
1191*4882a593Smuzhiyun if (!node_isset(nid, *nodes_allowed))
1192*4882a593Smuzhiyun nid = next_node_allowed(nid, nodes_allowed);
1193*4882a593Smuzhiyun return nid;
1194*4882a593Smuzhiyun }
1195*4882a593Smuzhiyun
1196*4882a593Smuzhiyun /*
1197*4882a593Smuzhiyun * returns the previously saved node ["this node"] from which to
1198*4882a593Smuzhiyun * allocate a persistent huge page for the pool and advance the
1199*4882a593Smuzhiyun * next node from which to allocate, handling wrap at end of node
1200*4882a593Smuzhiyun * mask.
1201*4882a593Smuzhiyun */
hstate_next_node_to_alloc(struct hstate * h,nodemask_t * nodes_allowed)1202*4882a593Smuzhiyun static int hstate_next_node_to_alloc(struct hstate *h,
1203*4882a593Smuzhiyun nodemask_t *nodes_allowed)
1204*4882a593Smuzhiyun {
1205*4882a593Smuzhiyun int nid;
1206*4882a593Smuzhiyun
1207*4882a593Smuzhiyun VM_BUG_ON(!nodes_allowed);
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyun nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
1210*4882a593Smuzhiyun h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
1211*4882a593Smuzhiyun
1212*4882a593Smuzhiyun return nid;
1213*4882a593Smuzhiyun }
1214*4882a593Smuzhiyun
1215*4882a593Smuzhiyun /*
1216*4882a593Smuzhiyun * helper for free_pool_huge_page() - return the previously saved
1217*4882a593Smuzhiyun * node ["this node"] from which to free a huge page. Advance the
1218*4882a593Smuzhiyun * next node id whether or not we find a free huge page to free so
1219*4882a593Smuzhiyun * that the next attempt to free addresses the next node.
1220*4882a593Smuzhiyun */
hstate_next_node_to_free(struct hstate * h,nodemask_t * nodes_allowed)1221*4882a593Smuzhiyun static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1222*4882a593Smuzhiyun {
1223*4882a593Smuzhiyun int nid;
1224*4882a593Smuzhiyun
1225*4882a593Smuzhiyun VM_BUG_ON(!nodes_allowed);
1226*4882a593Smuzhiyun
1227*4882a593Smuzhiyun nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1228*4882a593Smuzhiyun h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1229*4882a593Smuzhiyun
1230*4882a593Smuzhiyun return nid;
1231*4882a593Smuzhiyun }
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \
1234*4882a593Smuzhiyun for (nr_nodes = nodes_weight(*mask); \
1235*4882a593Smuzhiyun nr_nodes > 0 && \
1236*4882a593Smuzhiyun ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
1237*4882a593Smuzhiyun nr_nodes--)
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
1240*4882a593Smuzhiyun for (nr_nodes = nodes_weight(*mask); \
1241*4882a593Smuzhiyun nr_nodes > 0 && \
1242*4882a593Smuzhiyun ((node = hstate_next_node_to_free(hs, mask)) || 1); \
1243*4882a593Smuzhiyun nr_nodes--)
1244*4882a593Smuzhiyun
1245*4882a593Smuzhiyun #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
destroy_compound_gigantic_page(struct page * page,unsigned int order)1246*4882a593Smuzhiyun static void destroy_compound_gigantic_page(struct page *page,
1247*4882a593Smuzhiyun unsigned int order)
1248*4882a593Smuzhiyun {
1249*4882a593Smuzhiyun int i;
1250*4882a593Smuzhiyun int nr_pages = 1 << order;
1251*4882a593Smuzhiyun struct page *p = page + 1;
1252*4882a593Smuzhiyun
1253*4882a593Smuzhiyun atomic_set(compound_mapcount_ptr(page), 0);
1254*4882a593Smuzhiyun atomic_set(compound_pincount_ptr(page), 0);
1255*4882a593Smuzhiyun
1256*4882a593Smuzhiyun for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1257*4882a593Smuzhiyun clear_compound_head(p);
1258*4882a593Smuzhiyun set_page_refcounted(p);
1259*4882a593Smuzhiyun }
1260*4882a593Smuzhiyun
1261*4882a593Smuzhiyun set_compound_order(page, 0);
1262*4882a593Smuzhiyun page[1].compound_nr = 0;
1263*4882a593Smuzhiyun __ClearPageHead(page);
1264*4882a593Smuzhiyun }
1265*4882a593Smuzhiyun
free_gigantic_page(struct page * page,unsigned int order)1266*4882a593Smuzhiyun static void free_gigantic_page(struct page *page, unsigned int order)
1267*4882a593Smuzhiyun {
1268*4882a593Smuzhiyun /*
1269*4882a593Smuzhiyun * If the page isn't allocated using the cma allocator,
1270*4882a593Smuzhiyun * cma_release() returns false.
1271*4882a593Smuzhiyun */
1272*4882a593Smuzhiyun #ifdef CONFIG_CMA
1273*4882a593Smuzhiyun if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order))
1274*4882a593Smuzhiyun return;
1275*4882a593Smuzhiyun #endif
1276*4882a593Smuzhiyun
1277*4882a593Smuzhiyun free_contig_range(page_to_pfn(page), 1 << order);
1278*4882a593Smuzhiyun }
1279*4882a593Smuzhiyun
1280*4882a593Smuzhiyun #ifdef CONFIG_CONTIG_ALLOC
alloc_gigantic_page(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nodemask)1281*4882a593Smuzhiyun static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1282*4882a593Smuzhiyun int nid, nodemask_t *nodemask)
1283*4882a593Smuzhiyun {
1284*4882a593Smuzhiyun unsigned long nr_pages = 1UL << huge_page_order(h);
1285*4882a593Smuzhiyun if (nid == NUMA_NO_NODE)
1286*4882a593Smuzhiyun nid = numa_mem_id();
1287*4882a593Smuzhiyun
1288*4882a593Smuzhiyun #ifdef CONFIG_CMA
1289*4882a593Smuzhiyun {
1290*4882a593Smuzhiyun struct page *page;
1291*4882a593Smuzhiyun int node;
1292*4882a593Smuzhiyun
1293*4882a593Smuzhiyun if (hugetlb_cma[nid]) {
1294*4882a593Smuzhiyun page = cma_alloc(hugetlb_cma[nid], nr_pages,
1295*4882a593Smuzhiyun huge_page_order(h),
1296*4882a593Smuzhiyun GFP_KERNEL | __GFP_NOWARN);
1297*4882a593Smuzhiyun if (page)
1298*4882a593Smuzhiyun return page;
1299*4882a593Smuzhiyun }
1300*4882a593Smuzhiyun
1301*4882a593Smuzhiyun if (!(gfp_mask & __GFP_THISNODE)) {
1302*4882a593Smuzhiyun for_each_node_mask(node, *nodemask) {
1303*4882a593Smuzhiyun if (node == nid || !hugetlb_cma[node])
1304*4882a593Smuzhiyun continue;
1305*4882a593Smuzhiyun
1306*4882a593Smuzhiyun page = cma_alloc(hugetlb_cma[node], nr_pages,
1307*4882a593Smuzhiyun huge_page_order(h),
1308*4882a593Smuzhiyun GFP_KERNEL | __GFP_NOWARN);
1309*4882a593Smuzhiyun if (page)
1310*4882a593Smuzhiyun return page;
1311*4882a593Smuzhiyun }
1312*4882a593Smuzhiyun }
1313*4882a593Smuzhiyun }
1314*4882a593Smuzhiyun #endif
1315*4882a593Smuzhiyun
1316*4882a593Smuzhiyun return alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask);
1317*4882a593Smuzhiyun }
1318*4882a593Smuzhiyun
1319*4882a593Smuzhiyun #else /* !CONFIG_CONTIG_ALLOC */
alloc_gigantic_page(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nodemask)1320*4882a593Smuzhiyun static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1321*4882a593Smuzhiyun int nid, nodemask_t *nodemask)
1322*4882a593Smuzhiyun {
1323*4882a593Smuzhiyun return NULL;
1324*4882a593Smuzhiyun }
1325*4882a593Smuzhiyun #endif /* CONFIG_CONTIG_ALLOC */
1326*4882a593Smuzhiyun
1327*4882a593Smuzhiyun #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
alloc_gigantic_page(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nodemask)1328*4882a593Smuzhiyun static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1329*4882a593Smuzhiyun int nid, nodemask_t *nodemask)
1330*4882a593Smuzhiyun {
1331*4882a593Smuzhiyun return NULL;
1332*4882a593Smuzhiyun }
free_gigantic_page(struct page * page,unsigned int order)1333*4882a593Smuzhiyun static inline void free_gigantic_page(struct page *page, unsigned int order) { }
destroy_compound_gigantic_page(struct page * page,unsigned int order)1334*4882a593Smuzhiyun static inline void destroy_compound_gigantic_page(struct page *page,
1335*4882a593Smuzhiyun unsigned int order) { }
1336*4882a593Smuzhiyun #endif
1337*4882a593Smuzhiyun
update_and_free_page(struct hstate * h,struct page * page)1338*4882a593Smuzhiyun static void update_and_free_page(struct hstate *h, struct page *page)
1339*4882a593Smuzhiyun {
1340*4882a593Smuzhiyun int i;
1341*4882a593Smuzhiyun struct page *subpage = page;
1342*4882a593Smuzhiyun
1343*4882a593Smuzhiyun if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1344*4882a593Smuzhiyun return;
1345*4882a593Smuzhiyun
1346*4882a593Smuzhiyun h->nr_huge_pages--;
1347*4882a593Smuzhiyun h->nr_huge_pages_node[page_to_nid(page)]--;
1348*4882a593Smuzhiyun for (i = 0; i < pages_per_huge_page(h);
1349*4882a593Smuzhiyun i++, subpage = mem_map_next(subpage, page, i)) {
1350*4882a593Smuzhiyun subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
1351*4882a593Smuzhiyun 1 << PG_referenced | 1 << PG_dirty |
1352*4882a593Smuzhiyun 1 << PG_active | 1 << PG_private |
1353*4882a593Smuzhiyun 1 << PG_writeback);
1354*4882a593Smuzhiyun }
1355*4882a593Smuzhiyun VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1356*4882a593Smuzhiyun VM_BUG_ON_PAGE(hugetlb_cgroup_from_page_rsvd(page), page);
1357*4882a593Smuzhiyun set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1358*4882a593Smuzhiyun set_page_refcounted(page);
1359*4882a593Smuzhiyun if (hstate_is_gigantic(h)) {
1360*4882a593Smuzhiyun /*
1361*4882a593Smuzhiyun * Temporarily drop the hugetlb_lock, because
1362*4882a593Smuzhiyun * we might block in free_gigantic_page().
1363*4882a593Smuzhiyun */
1364*4882a593Smuzhiyun spin_unlock(&hugetlb_lock);
1365*4882a593Smuzhiyun destroy_compound_gigantic_page(page, huge_page_order(h));
1366*4882a593Smuzhiyun free_gigantic_page(page, huge_page_order(h));
1367*4882a593Smuzhiyun spin_lock(&hugetlb_lock);
1368*4882a593Smuzhiyun } else {
1369*4882a593Smuzhiyun __free_pages(page, huge_page_order(h));
1370*4882a593Smuzhiyun }
1371*4882a593Smuzhiyun }
1372*4882a593Smuzhiyun
size_to_hstate(unsigned long size)1373*4882a593Smuzhiyun struct hstate *size_to_hstate(unsigned long size)
1374*4882a593Smuzhiyun {
1375*4882a593Smuzhiyun struct hstate *h;
1376*4882a593Smuzhiyun
1377*4882a593Smuzhiyun for_each_hstate(h) {
1378*4882a593Smuzhiyun if (huge_page_size(h) == size)
1379*4882a593Smuzhiyun return h;
1380*4882a593Smuzhiyun }
1381*4882a593Smuzhiyun return NULL;
1382*4882a593Smuzhiyun }
1383*4882a593Smuzhiyun
1384*4882a593Smuzhiyun /*
1385*4882a593Smuzhiyun * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
1386*4882a593Smuzhiyun * to hstate->hugepage_activelist.)
1387*4882a593Smuzhiyun *
1388*4882a593Smuzhiyun * This function can be called for tail pages, but never returns true for them.
1389*4882a593Smuzhiyun */
page_huge_active(struct page * page)1390*4882a593Smuzhiyun bool page_huge_active(struct page *page)
1391*4882a593Smuzhiyun {
1392*4882a593Smuzhiyun return PageHeadHuge(page) && PagePrivate(&page[1]);
1393*4882a593Smuzhiyun }
1394*4882a593Smuzhiyun
1395*4882a593Smuzhiyun /* never called for tail page */
set_page_huge_active(struct page * page)1396*4882a593Smuzhiyun void set_page_huge_active(struct page *page)
1397*4882a593Smuzhiyun {
1398*4882a593Smuzhiyun VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1399*4882a593Smuzhiyun SetPagePrivate(&page[1]);
1400*4882a593Smuzhiyun }
1401*4882a593Smuzhiyun
clear_page_huge_active(struct page * page)1402*4882a593Smuzhiyun static void clear_page_huge_active(struct page *page)
1403*4882a593Smuzhiyun {
1404*4882a593Smuzhiyun VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1405*4882a593Smuzhiyun ClearPagePrivate(&page[1]);
1406*4882a593Smuzhiyun }
1407*4882a593Smuzhiyun
1408*4882a593Smuzhiyun /*
1409*4882a593Smuzhiyun * Internal hugetlb specific page flag. Do not use outside of the hugetlb
1410*4882a593Smuzhiyun * code
1411*4882a593Smuzhiyun */
PageHugeTemporary(struct page * page)1412*4882a593Smuzhiyun static inline bool PageHugeTemporary(struct page *page)
1413*4882a593Smuzhiyun {
1414*4882a593Smuzhiyun if (!PageHuge(page))
1415*4882a593Smuzhiyun return false;
1416*4882a593Smuzhiyun
1417*4882a593Smuzhiyun return (unsigned long)page[2].mapping == -1U;
1418*4882a593Smuzhiyun }
1419*4882a593Smuzhiyun
SetPageHugeTemporary(struct page * page)1420*4882a593Smuzhiyun static inline void SetPageHugeTemporary(struct page *page)
1421*4882a593Smuzhiyun {
1422*4882a593Smuzhiyun page[2].mapping = (void *)-1U;
1423*4882a593Smuzhiyun }
1424*4882a593Smuzhiyun
ClearPageHugeTemporary(struct page * page)1425*4882a593Smuzhiyun static inline void ClearPageHugeTemporary(struct page *page)
1426*4882a593Smuzhiyun {
1427*4882a593Smuzhiyun page[2].mapping = NULL;
1428*4882a593Smuzhiyun }
1429*4882a593Smuzhiyun
__free_huge_page(struct page * page)1430*4882a593Smuzhiyun static void __free_huge_page(struct page *page)
1431*4882a593Smuzhiyun {
1432*4882a593Smuzhiyun /*
1433*4882a593Smuzhiyun * Can't pass hstate in here because it is called from the
1434*4882a593Smuzhiyun * compound page destructor.
1435*4882a593Smuzhiyun */
1436*4882a593Smuzhiyun struct hstate *h = page_hstate(page);
1437*4882a593Smuzhiyun int nid = page_to_nid(page);
1438*4882a593Smuzhiyun struct hugepage_subpool *spool =
1439*4882a593Smuzhiyun (struct hugepage_subpool *)page_private(page);
1440*4882a593Smuzhiyun bool restore_reserve;
1441*4882a593Smuzhiyun
1442*4882a593Smuzhiyun VM_BUG_ON_PAGE(page_count(page), page);
1443*4882a593Smuzhiyun VM_BUG_ON_PAGE(page_mapcount(page), page);
1444*4882a593Smuzhiyun
1445*4882a593Smuzhiyun set_page_private(page, 0);
1446*4882a593Smuzhiyun page->mapping = NULL;
1447*4882a593Smuzhiyun restore_reserve = PagePrivate(page);
1448*4882a593Smuzhiyun ClearPagePrivate(page);
1449*4882a593Smuzhiyun
1450*4882a593Smuzhiyun /*
1451*4882a593Smuzhiyun * If PagePrivate() was set on page, page allocation consumed a
1452*4882a593Smuzhiyun * reservation. If the page was associated with a subpool, there
1453*4882a593Smuzhiyun * would have been a page reserved in the subpool before allocation
1454*4882a593Smuzhiyun * via hugepage_subpool_get_pages(). Since we are 'restoring' the
1455*4882a593Smuzhiyun * reservtion, do not call hugepage_subpool_put_pages() as this will
1456*4882a593Smuzhiyun * remove the reserved page from the subpool.
1457*4882a593Smuzhiyun */
1458*4882a593Smuzhiyun if (!restore_reserve) {
1459*4882a593Smuzhiyun /*
1460*4882a593Smuzhiyun * A return code of zero implies that the subpool will be
1461*4882a593Smuzhiyun * under its minimum size if the reservation is not restored
1462*4882a593Smuzhiyun * after page is free. Therefore, force restore_reserve
1463*4882a593Smuzhiyun * operation.
1464*4882a593Smuzhiyun */
1465*4882a593Smuzhiyun if (hugepage_subpool_put_pages(spool, 1) == 0)
1466*4882a593Smuzhiyun restore_reserve = true;
1467*4882a593Smuzhiyun }
1468*4882a593Smuzhiyun
1469*4882a593Smuzhiyun spin_lock(&hugetlb_lock);
1470*4882a593Smuzhiyun clear_page_huge_active(page);
1471*4882a593Smuzhiyun hugetlb_cgroup_uncharge_page(hstate_index(h),
1472*4882a593Smuzhiyun pages_per_huge_page(h), page);
1473*4882a593Smuzhiyun hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
1474*4882a593Smuzhiyun pages_per_huge_page(h), page);
1475*4882a593Smuzhiyun if (restore_reserve)
1476*4882a593Smuzhiyun h->resv_huge_pages++;
1477*4882a593Smuzhiyun
1478*4882a593Smuzhiyun if (PageHugeTemporary(page)) {
1479*4882a593Smuzhiyun list_del(&page->lru);
1480*4882a593Smuzhiyun ClearPageHugeTemporary(page);
1481*4882a593Smuzhiyun update_and_free_page(h, page);
1482*4882a593Smuzhiyun } else if (h->surplus_huge_pages_node[nid]) {
1483*4882a593Smuzhiyun /* remove the page from active list */
1484*4882a593Smuzhiyun list_del(&page->lru);
1485*4882a593Smuzhiyun update_and_free_page(h, page);
1486*4882a593Smuzhiyun h->surplus_huge_pages--;
1487*4882a593Smuzhiyun h->surplus_huge_pages_node[nid]--;
1488*4882a593Smuzhiyun } else {
1489*4882a593Smuzhiyun arch_clear_hugepage_flags(page);
1490*4882a593Smuzhiyun enqueue_huge_page(h, page);
1491*4882a593Smuzhiyun }
1492*4882a593Smuzhiyun spin_unlock(&hugetlb_lock);
1493*4882a593Smuzhiyun }
1494*4882a593Smuzhiyun
1495*4882a593Smuzhiyun /*
1496*4882a593Smuzhiyun * As free_huge_page() can be called from a non-task context, we have
1497*4882a593Smuzhiyun * to defer the actual freeing in a workqueue to prevent potential
1498*4882a593Smuzhiyun * hugetlb_lock deadlock.
1499*4882a593Smuzhiyun *
1500*4882a593Smuzhiyun * free_hpage_workfn() locklessly retrieves the linked list of pages to
1501*4882a593Smuzhiyun * be freed and frees them one-by-one. As the page->mapping pointer is
1502*4882a593Smuzhiyun * going to be cleared in __free_huge_page() anyway, it is reused as the
1503*4882a593Smuzhiyun * llist_node structure of a lockless linked list of huge pages to be freed.
1504*4882a593Smuzhiyun */
1505*4882a593Smuzhiyun static LLIST_HEAD(hpage_freelist);
1506*4882a593Smuzhiyun
free_hpage_workfn(struct work_struct * work)1507*4882a593Smuzhiyun static void free_hpage_workfn(struct work_struct *work)
1508*4882a593Smuzhiyun {
1509*4882a593Smuzhiyun struct llist_node *node;
1510*4882a593Smuzhiyun struct page *page;
1511*4882a593Smuzhiyun
1512*4882a593Smuzhiyun node = llist_del_all(&hpage_freelist);
1513*4882a593Smuzhiyun
1514*4882a593Smuzhiyun while (node) {
1515*4882a593Smuzhiyun page = container_of((struct address_space **)node,
1516*4882a593Smuzhiyun struct page, mapping);
1517*4882a593Smuzhiyun node = node->next;
1518*4882a593Smuzhiyun __free_huge_page(page);
1519*4882a593Smuzhiyun }
1520*4882a593Smuzhiyun }
1521*4882a593Smuzhiyun static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
1522*4882a593Smuzhiyun
free_huge_page(struct page * page)1523*4882a593Smuzhiyun void free_huge_page(struct page *page)
1524*4882a593Smuzhiyun {
1525*4882a593Smuzhiyun /*
1526*4882a593Smuzhiyun * Defer freeing if in non-task context to avoid hugetlb_lock deadlock.
1527*4882a593Smuzhiyun */
1528*4882a593Smuzhiyun if (!in_task()) {
1529*4882a593Smuzhiyun /*
1530*4882a593Smuzhiyun * Only call schedule_work() if hpage_freelist is previously
1531*4882a593Smuzhiyun * empty. Otherwise, schedule_work() had been called but the
1532*4882a593Smuzhiyun * workfn hasn't retrieved the list yet.
1533*4882a593Smuzhiyun */
1534*4882a593Smuzhiyun if (llist_add((struct llist_node *)&page->mapping,
1535*4882a593Smuzhiyun &hpage_freelist))
1536*4882a593Smuzhiyun schedule_work(&free_hpage_work);
1537*4882a593Smuzhiyun return;
1538*4882a593Smuzhiyun }
1539*4882a593Smuzhiyun
1540*4882a593Smuzhiyun __free_huge_page(page);
1541*4882a593Smuzhiyun }
1542*4882a593Smuzhiyun
prep_new_huge_page(struct hstate * h,struct page * page,int nid)1543*4882a593Smuzhiyun static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1544*4882a593Smuzhiyun {
1545*4882a593Smuzhiyun INIT_LIST_HEAD(&page->lru);
1546*4882a593Smuzhiyun set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1547*4882a593Smuzhiyun set_hugetlb_cgroup(page, NULL);
1548*4882a593Smuzhiyun set_hugetlb_cgroup_rsvd(page, NULL);
1549*4882a593Smuzhiyun spin_lock(&hugetlb_lock);
1550*4882a593Smuzhiyun h->nr_huge_pages++;
1551*4882a593Smuzhiyun h->nr_huge_pages_node[nid]++;
1552*4882a593Smuzhiyun ClearPageHugeFreed(page);
1553*4882a593Smuzhiyun spin_unlock(&hugetlb_lock);
1554*4882a593Smuzhiyun }
1555*4882a593Smuzhiyun
prep_compound_gigantic_page(struct page * page,unsigned int order)1556*4882a593Smuzhiyun static void prep_compound_gigantic_page(struct page *page, unsigned int order)
1557*4882a593Smuzhiyun {
1558*4882a593Smuzhiyun int i;
1559*4882a593Smuzhiyun int nr_pages = 1 << order;
1560*4882a593Smuzhiyun struct page *p = page + 1;
1561*4882a593Smuzhiyun
1562*4882a593Smuzhiyun /* we rely on prep_new_huge_page to set the destructor */
1563*4882a593Smuzhiyun set_compound_order(page, order);
1564*4882a593Smuzhiyun __ClearPageReserved(page);
1565*4882a593Smuzhiyun __SetPageHead(page);
1566*4882a593Smuzhiyun for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1567*4882a593Smuzhiyun /*
1568*4882a593Smuzhiyun * For gigantic hugepages allocated through bootmem at
1569*4882a593Smuzhiyun * boot, it's safer to be consistent with the not-gigantic
1570*4882a593Smuzhiyun * hugepages and clear the PG_reserved bit from all tail pages
1571*4882a593Smuzhiyun * too. Otherwise drivers using get_user_pages() to access tail
1572*4882a593Smuzhiyun * pages may get the reference counting wrong if they see
1573*4882a593Smuzhiyun * PG_reserved set on a tail page (despite the head page not
1574*4882a593Smuzhiyun * having PG_reserved set). Enforcing this consistency between
1575*4882a593Smuzhiyun * head and tail pages allows drivers to optimize away a check
1576*4882a593Smuzhiyun * on the head page when they need know if put_page() is needed
1577*4882a593Smuzhiyun * after get_user_pages().
1578*4882a593Smuzhiyun */
1579*4882a593Smuzhiyun __ClearPageReserved(p);
1580*4882a593Smuzhiyun set_page_count(p, 0);
1581*4882a593Smuzhiyun set_compound_head(p, page);
1582*4882a593Smuzhiyun }
1583*4882a593Smuzhiyun atomic_set(compound_mapcount_ptr(page), -1);
1584*4882a593Smuzhiyun atomic_set(compound_pincount_ptr(page), 0);
1585*4882a593Smuzhiyun }
1586*4882a593Smuzhiyun
1587*4882a593Smuzhiyun /*
1588*4882a593Smuzhiyun * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1589*4882a593Smuzhiyun * transparent huge pages. See the PageTransHuge() documentation for more
1590*4882a593Smuzhiyun * details.
1591*4882a593Smuzhiyun */
PageHuge(struct page * page)1592*4882a593Smuzhiyun int PageHuge(struct page *page)
1593*4882a593Smuzhiyun {
1594*4882a593Smuzhiyun if (!PageCompound(page))
1595*4882a593Smuzhiyun return 0;
1596*4882a593Smuzhiyun
1597*4882a593Smuzhiyun page = compound_head(page);
1598*4882a593Smuzhiyun return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1599*4882a593Smuzhiyun }
1600*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(PageHuge);
1601*4882a593Smuzhiyun
1602*4882a593Smuzhiyun /*
1603*4882a593Smuzhiyun * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1604*4882a593Smuzhiyun * normal or transparent huge pages.
1605*4882a593Smuzhiyun */
PageHeadHuge(struct page * page_head)1606*4882a593Smuzhiyun int PageHeadHuge(struct page *page_head)
1607*4882a593Smuzhiyun {
1608*4882a593Smuzhiyun if (!PageHead(page_head))
1609*4882a593Smuzhiyun return 0;
1610*4882a593Smuzhiyun
1611*4882a593Smuzhiyun return page_head[1].compound_dtor == HUGETLB_PAGE_DTOR;
1612*4882a593Smuzhiyun }
1613*4882a593Smuzhiyun
1614*4882a593Smuzhiyun /*
1615*4882a593Smuzhiyun * Find and lock address space (mapping) in write mode.
1616*4882a593Smuzhiyun *
1617*4882a593Smuzhiyun * Upon entry, the page is locked which means that page_mapping() is
1618*4882a593Smuzhiyun * stable. Due to locking order, we can only trylock_write. If we can
1619*4882a593Smuzhiyun * not get the lock, simply return NULL to caller.
1620*4882a593Smuzhiyun */
hugetlb_page_mapping_lock_write(struct page * hpage)1621*4882a593Smuzhiyun struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
1622*4882a593Smuzhiyun {
1623*4882a593Smuzhiyun struct address_space *mapping = page_mapping(hpage);
1624*4882a593Smuzhiyun
1625*4882a593Smuzhiyun if (!mapping)
1626*4882a593Smuzhiyun return mapping;
1627*4882a593Smuzhiyun
1628*4882a593Smuzhiyun if (i_mmap_trylock_write(mapping))
1629*4882a593Smuzhiyun return mapping;
1630*4882a593Smuzhiyun
1631*4882a593Smuzhiyun return NULL;
1632*4882a593Smuzhiyun }
1633*4882a593Smuzhiyun
hugetlb_basepage_index(struct page * page)1634*4882a593Smuzhiyun pgoff_t hugetlb_basepage_index(struct page *page)
1635*4882a593Smuzhiyun {
1636*4882a593Smuzhiyun struct page *page_head = compound_head(page);
1637*4882a593Smuzhiyun pgoff_t index = page_index(page_head);
1638*4882a593Smuzhiyun unsigned long compound_idx;
1639*4882a593Smuzhiyun
1640*4882a593Smuzhiyun if (compound_order(page_head) >= MAX_ORDER)
1641*4882a593Smuzhiyun compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1642*4882a593Smuzhiyun else
1643*4882a593Smuzhiyun compound_idx = page - page_head;
1644*4882a593Smuzhiyun
1645*4882a593Smuzhiyun return (index << compound_order(page_head)) + compound_idx;
1646*4882a593Smuzhiyun }
1647*4882a593Smuzhiyun
alloc_buddy_huge_page(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nmask,nodemask_t * node_alloc_noretry)1648*4882a593Smuzhiyun static struct page *alloc_buddy_huge_page(struct hstate *h,
1649*4882a593Smuzhiyun gfp_t gfp_mask, int nid, nodemask_t *nmask,
1650*4882a593Smuzhiyun nodemask_t *node_alloc_noretry)
1651*4882a593Smuzhiyun {
1652*4882a593Smuzhiyun int order = huge_page_order(h);
1653*4882a593Smuzhiyun struct page *page;
1654*4882a593Smuzhiyun bool alloc_try_hard = true;
1655*4882a593Smuzhiyun
1656*4882a593Smuzhiyun /*
1657*4882a593Smuzhiyun * By default we always try hard to allocate the page with
1658*4882a593Smuzhiyun * __GFP_RETRY_MAYFAIL flag. However, if we are allocating pages in
1659*4882a593Smuzhiyun * a loop (to adjust global huge page counts) and previous allocation
1660*4882a593Smuzhiyun * failed, do not continue to try hard on the same node. Use the
1661*4882a593Smuzhiyun * node_alloc_noretry bitmap to manage this state information.
1662*4882a593Smuzhiyun */
1663*4882a593Smuzhiyun if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry))
1664*4882a593Smuzhiyun alloc_try_hard = false;
1665*4882a593Smuzhiyun gfp_mask |= __GFP_COMP|__GFP_NOWARN;
1666*4882a593Smuzhiyun if (alloc_try_hard)
1667*4882a593Smuzhiyun gfp_mask |= __GFP_RETRY_MAYFAIL;
1668*4882a593Smuzhiyun if (nid == NUMA_NO_NODE)
1669*4882a593Smuzhiyun nid = numa_mem_id();
1670*4882a593Smuzhiyun page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
1671*4882a593Smuzhiyun if (page)
1672*4882a593Smuzhiyun __count_vm_event(HTLB_BUDDY_PGALLOC);
1673*4882a593Smuzhiyun else
1674*4882a593Smuzhiyun __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1675*4882a593Smuzhiyun
1676*4882a593Smuzhiyun /*
1677*4882a593Smuzhiyun * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this
1678*4882a593Smuzhiyun * indicates an overall state change. Clear bit so that we resume
1679*4882a593Smuzhiyun * normal 'try hard' allocations.
1680*4882a593Smuzhiyun */
1681*4882a593Smuzhiyun if (node_alloc_noretry && page && !alloc_try_hard)
1682*4882a593Smuzhiyun node_clear(nid, *node_alloc_noretry);
1683*4882a593Smuzhiyun
1684*4882a593Smuzhiyun /*
1685*4882a593Smuzhiyun * If we tried hard to get a page but failed, set bit so that
1686*4882a593Smuzhiyun * subsequent attempts will not try as hard until there is an
1687*4882a593Smuzhiyun * overall state change.
1688*4882a593Smuzhiyun */
1689*4882a593Smuzhiyun if (node_alloc_noretry && !page && alloc_try_hard)
1690*4882a593Smuzhiyun node_set(nid, *node_alloc_noretry);
1691*4882a593Smuzhiyun
1692*4882a593Smuzhiyun return page;
1693*4882a593Smuzhiyun }
1694*4882a593Smuzhiyun
1695*4882a593Smuzhiyun /*
1696*4882a593Smuzhiyun * Common helper to allocate a fresh hugetlb page. All specific allocators
1697*4882a593Smuzhiyun * should use this function to get new hugetlb pages
1698*4882a593Smuzhiyun */
alloc_fresh_huge_page(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nmask,nodemask_t * node_alloc_noretry)1699*4882a593Smuzhiyun static struct page *alloc_fresh_huge_page(struct hstate *h,
1700*4882a593Smuzhiyun gfp_t gfp_mask, int nid, nodemask_t *nmask,
1701*4882a593Smuzhiyun nodemask_t *node_alloc_noretry)
1702*4882a593Smuzhiyun {
1703*4882a593Smuzhiyun struct page *page;
1704*4882a593Smuzhiyun
1705*4882a593Smuzhiyun if (hstate_is_gigantic(h))
1706*4882a593Smuzhiyun page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
1707*4882a593Smuzhiyun else
1708*4882a593Smuzhiyun page = alloc_buddy_huge_page(h, gfp_mask,
1709*4882a593Smuzhiyun nid, nmask, node_alloc_noretry);
1710*4882a593Smuzhiyun if (!page)
1711*4882a593Smuzhiyun return NULL;
1712*4882a593Smuzhiyun
1713*4882a593Smuzhiyun if (hstate_is_gigantic(h))
1714*4882a593Smuzhiyun prep_compound_gigantic_page(page, huge_page_order(h));
1715*4882a593Smuzhiyun prep_new_huge_page(h, page, page_to_nid(page));
1716*4882a593Smuzhiyun
1717*4882a593Smuzhiyun return page;
1718*4882a593Smuzhiyun }
1719*4882a593Smuzhiyun
1720*4882a593Smuzhiyun /*
1721*4882a593Smuzhiyun * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
1722*4882a593Smuzhiyun * manner.
1723*4882a593Smuzhiyun */
alloc_pool_huge_page(struct hstate * h,nodemask_t * nodes_allowed,nodemask_t * node_alloc_noretry)1724*4882a593Smuzhiyun static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1725*4882a593Smuzhiyun nodemask_t *node_alloc_noretry)
1726*4882a593Smuzhiyun {
1727*4882a593Smuzhiyun struct page *page;
1728*4882a593Smuzhiyun int nr_nodes, node;
1729*4882a593Smuzhiyun gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
1730*4882a593Smuzhiyun
1731*4882a593Smuzhiyun for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1732*4882a593Smuzhiyun page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed,
1733*4882a593Smuzhiyun node_alloc_noretry);
1734*4882a593Smuzhiyun if (page)
1735*4882a593Smuzhiyun break;
1736*4882a593Smuzhiyun }
1737*4882a593Smuzhiyun
1738*4882a593Smuzhiyun if (!page)
1739*4882a593Smuzhiyun return 0;
1740*4882a593Smuzhiyun
1741*4882a593Smuzhiyun put_page(page); /* free it into the hugepage allocator */
1742*4882a593Smuzhiyun
1743*4882a593Smuzhiyun return 1;
1744*4882a593Smuzhiyun }
1745*4882a593Smuzhiyun
1746*4882a593Smuzhiyun /*
1747*4882a593Smuzhiyun * Free huge page from pool from next node to free.
1748*4882a593Smuzhiyun * Attempt to keep persistent huge pages more or less
1749*4882a593Smuzhiyun * balanced over allowed nodes.
1750*4882a593Smuzhiyun * Called with hugetlb_lock locked.
1751*4882a593Smuzhiyun */
free_pool_huge_page(struct hstate * h,nodemask_t * nodes_allowed,bool acct_surplus)1752*4882a593Smuzhiyun static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1753*4882a593Smuzhiyun bool acct_surplus)
1754*4882a593Smuzhiyun {
1755*4882a593Smuzhiyun int nr_nodes, node;
1756*4882a593Smuzhiyun int ret = 0;
1757*4882a593Smuzhiyun
1758*4882a593Smuzhiyun for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1759*4882a593Smuzhiyun /*
1760*4882a593Smuzhiyun * If we're returning unused surplus pages, only examine
1761*4882a593Smuzhiyun * nodes with surplus pages.
1762*4882a593Smuzhiyun */
1763*4882a593Smuzhiyun if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1764*4882a593Smuzhiyun !list_empty(&h->hugepage_freelists[node])) {
1765*4882a593Smuzhiyun struct page *page =
1766*4882a593Smuzhiyun list_entry(h->hugepage_freelists[node].next,
1767*4882a593Smuzhiyun struct page, lru);
1768*4882a593Smuzhiyun list_del(&page->lru);
1769*4882a593Smuzhiyun h->free_huge_pages--;
1770*4882a593Smuzhiyun h->free_huge_pages_node[node]--;
1771*4882a593Smuzhiyun if (acct_surplus) {
1772*4882a593Smuzhiyun h->surplus_huge_pages--;
1773*4882a593Smuzhiyun h->surplus_huge_pages_node[node]--;
1774*4882a593Smuzhiyun }
1775*4882a593Smuzhiyun update_and_free_page(h, page);
1776*4882a593Smuzhiyun ret = 1;
1777*4882a593Smuzhiyun break;
1778*4882a593Smuzhiyun }
1779*4882a593Smuzhiyun }
1780*4882a593Smuzhiyun
1781*4882a593Smuzhiyun return ret;
1782*4882a593Smuzhiyun }
1783*4882a593Smuzhiyun
1784*4882a593Smuzhiyun /*
1785*4882a593Smuzhiyun * Dissolve a given free hugepage into free buddy pages. This function does
1786*4882a593Smuzhiyun * nothing for in-use hugepages and non-hugepages.
1787*4882a593Smuzhiyun * This function returns values like below:
1788*4882a593Smuzhiyun *
1789*4882a593Smuzhiyun * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use
1790*4882a593Smuzhiyun * (allocated or reserved.)
1791*4882a593Smuzhiyun * 0: successfully dissolved free hugepages or the page is not a
1792*4882a593Smuzhiyun * hugepage (considered as already dissolved)
1793*4882a593Smuzhiyun */
dissolve_free_huge_page(struct page * page)1794*4882a593Smuzhiyun int dissolve_free_huge_page(struct page *page)
1795*4882a593Smuzhiyun {
1796*4882a593Smuzhiyun int rc = -EBUSY;
1797*4882a593Smuzhiyun
1798*4882a593Smuzhiyun retry:
1799*4882a593Smuzhiyun /* Not to disrupt normal path by vainly holding hugetlb_lock */
1800*4882a593Smuzhiyun if (!PageHuge(page))
1801*4882a593Smuzhiyun return 0;
1802*4882a593Smuzhiyun
1803*4882a593Smuzhiyun spin_lock(&hugetlb_lock);
1804*4882a593Smuzhiyun if (!PageHuge(page)) {
1805*4882a593Smuzhiyun rc = 0;
1806*4882a593Smuzhiyun goto out;
1807*4882a593Smuzhiyun }
1808*4882a593Smuzhiyun
1809*4882a593Smuzhiyun if (!page_count(page)) {
1810*4882a593Smuzhiyun struct page *head = compound_head(page);
1811*4882a593Smuzhiyun struct hstate *h = page_hstate(head);
1812*4882a593Smuzhiyun int nid = page_to_nid(head);
1813*4882a593Smuzhiyun if (h->free_huge_pages - h->resv_huge_pages == 0)
1814*4882a593Smuzhiyun goto out;
1815*4882a593Smuzhiyun
1816*4882a593Smuzhiyun /*
1817*4882a593Smuzhiyun * We should make sure that the page is already on the free list
1818*4882a593Smuzhiyun * when it is dissolved.
1819*4882a593Smuzhiyun */
1820*4882a593Smuzhiyun if (unlikely(!PageHugeFreed(head))) {
1821*4882a593Smuzhiyun spin_unlock(&hugetlb_lock);
1822*4882a593Smuzhiyun cond_resched();
1823*4882a593Smuzhiyun
1824*4882a593Smuzhiyun /*
1825*4882a593Smuzhiyun * Theoretically, we should return -EBUSY when we
1826*4882a593Smuzhiyun * encounter this race. In fact, we have a chance
1827*4882a593Smuzhiyun * to successfully dissolve the page if we do a
1828*4882a593Smuzhiyun * retry. Because the race window is quite small.
1829*4882a593Smuzhiyun * If we seize this opportunity, it is an optimization
1830*4882a593Smuzhiyun * for increasing the success rate of dissolving page.
1831*4882a593Smuzhiyun */
1832*4882a593Smuzhiyun goto retry;
1833*4882a593Smuzhiyun }
1834*4882a593Smuzhiyun
1835*4882a593Smuzhiyun /*
1836*4882a593Smuzhiyun * Move PageHWPoison flag from head page to the raw error page,
1837*4882a593Smuzhiyun * which makes any subpages rather than the error page reusable.
1838*4882a593Smuzhiyun */
1839*4882a593Smuzhiyun if (PageHWPoison(head) && page != head) {
1840*4882a593Smuzhiyun SetPageHWPoison(page);
1841*4882a593Smuzhiyun ClearPageHWPoison(head);
1842*4882a593Smuzhiyun }
1843*4882a593Smuzhiyun list_del(&head->lru);
1844*4882a593Smuzhiyun h->free_huge_pages--;
1845*4882a593Smuzhiyun h->free_huge_pages_node[nid]--;
1846*4882a593Smuzhiyun h->max_huge_pages--;
1847*4882a593Smuzhiyun update_and_free_page(h, head);
1848*4882a593Smuzhiyun rc = 0;
1849*4882a593Smuzhiyun }
1850*4882a593Smuzhiyun out:
1851*4882a593Smuzhiyun spin_unlock(&hugetlb_lock);
1852*4882a593Smuzhiyun return rc;
1853*4882a593Smuzhiyun }
1854*4882a593Smuzhiyun
1855*4882a593Smuzhiyun /*
1856*4882a593Smuzhiyun * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1857*4882a593Smuzhiyun * make specified memory blocks removable from the system.
1858*4882a593Smuzhiyun * Note that this will dissolve a free gigantic hugepage completely, if any
1859*4882a593Smuzhiyun * part of it lies within the given range.
1860*4882a593Smuzhiyun * Also note that if dissolve_free_huge_page() returns with an error, all
1861*4882a593Smuzhiyun * free hugepages that were dissolved before that error are lost.
1862*4882a593Smuzhiyun */
dissolve_free_huge_pages(unsigned long start_pfn,unsigned long end_pfn)1863*4882a593Smuzhiyun int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1864*4882a593Smuzhiyun {
1865*4882a593Smuzhiyun unsigned long pfn;
1866*4882a593Smuzhiyun struct page *page;
1867*4882a593Smuzhiyun int rc = 0;
1868*4882a593Smuzhiyun
1869*4882a593Smuzhiyun if (!hugepages_supported())
1870*4882a593Smuzhiyun return rc;
1871*4882a593Smuzhiyun
1872*4882a593Smuzhiyun for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
1873*4882a593Smuzhiyun page = pfn_to_page(pfn);
1874*4882a593Smuzhiyun rc = dissolve_free_huge_page(page);
1875*4882a593Smuzhiyun if (rc)
1876*4882a593Smuzhiyun break;
1877*4882a593Smuzhiyun }
1878*4882a593Smuzhiyun
1879*4882a593Smuzhiyun return rc;
1880*4882a593Smuzhiyun }
1881*4882a593Smuzhiyun
1882*4882a593Smuzhiyun /*
1883*4882a593Smuzhiyun * Allocates a fresh surplus page from the page allocator.
1884*4882a593Smuzhiyun */
alloc_surplus_huge_page(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nmask)1885*4882a593Smuzhiyun static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
1886*4882a593Smuzhiyun int nid, nodemask_t *nmask)
1887*4882a593Smuzhiyun {
1888*4882a593Smuzhiyun struct page *page = NULL;
1889*4882a593Smuzhiyun
1890*4882a593Smuzhiyun if (hstate_is_gigantic(h))
1891*4882a593Smuzhiyun return NULL;
1892*4882a593Smuzhiyun
1893*4882a593Smuzhiyun spin_lock(&hugetlb_lock);
1894*4882a593Smuzhiyun if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
1895*4882a593Smuzhiyun goto out_unlock;
1896*4882a593Smuzhiyun spin_unlock(&hugetlb_lock);
1897*4882a593Smuzhiyun
1898*4882a593Smuzhiyun page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
1899*4882a593Smuzhiyun if (!page)
1900*4882a593Smuzhiyun return NULL;
1901*4882a593Smuzhiyun
1902*4882a593Smuzhiyun spin_lock(&hugetlb_lock);
1903*4882a593Smuzhiyun /*
1904*4882a593Smuzhiyun * We could have raced with the pool size change.
1905*4882a593Smuzhiyun * Double check that and simply deallocate the new page
1906*4882a593Smuzhiyun * if we would end up overcommiting the surpluses. Abuse
1907*4882a593Smuzhiyun * temporary page to workaround the nasty free_huge_page
1908*4882a593Smuzhiyun * codeflow
1909*4882a593Smuzhiyun */
1910*4882a593Smuzhiyun if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1911*4882a593Smuzhiyun SetPageHugeTemporary(page);
1912*4882a593Smuzhiyun spin_unlock(&hugetlb_lock);
1913*4882a593Smuzhiyun put_page(page);
1914*4882a593Smuzhiyun return NULL;
1915*4882a593Smuzhiyun } else {
1916*4882a593Smuzhiyun h->surplus_huge_pages++;
1917*4882a593Smuzhiyun h->surplus_huge_pages_node[page_to_nid(page)]++;
1918*4882a593Smuzhiyun }
1919*4882a593Smuzhiyun
1920*4882a593Smuzhiyun out_unlock:
1921*4882a593Smuzhiyun spin_unlock(&hugetlb_lock);
1922*4882a593Smuzhiyun
1923*4882a593Smuzhiyun return page;
1924*4882a593Smuzhiyun }
1925*4882a593Smuzhiyun
alloc_migrate_huge_page(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nmask)1926*4882a593Smuzhiyun static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
1927*4882a593Smuzhiyun int nid, nodemask_t *nmask)
1928*4882a593Smuzhiyun {
1929*4882a593Smuzhiyun struct page *page;
1930*4882a593Smuzhiyun
1931*4882a593Smuzhiyun if (hstate_is_gigantic(h))
1932*4882a593Smuzhiyun return NULL;
1933*4882a593Smuzhiyun
1934*4882a593Smuzhiyun page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
1935*4882a593Smuzhiyun if (!page)
1936*4882a593Smuzhiyun return NULL;
1937*4882a593Smuzhiyun
1938*4882a593Smuzhiyun /*
1939*4882a593Smuzhiyun * We do not account these pages as surplus because they are only
1940*4882a593Smuzhiyun * temporary and will be released properly on the last reference
1941*4882a593Smuzhiyun */
1942*4882a593Smuzhiyun SetPageHugeTemporary(page);
1943*4882a593Smuzhiyun
1944*4882a593Smuzhiyun return page;
1945*4882a593Smuzhiyun }
1946*4882a593Smuzhiyun
1947*4882a593Smuzhiyun /*
1948*4882a593Smuzhiyun * Use the VMA's mpolicy to allocate a huge page from the buddy.
1949*4882a593Smuzhiyun */
1950*4882a593Smuzhiyun static
alloc_buddy_huge_page_with_mpol(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)1951*4882a593Smuzhiyun struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
1952*4882a593Smuzhiyun struct vm_area_struct *vma, unsigned long addr)
1953*4882a593Smuzhiyun {
1954*4882a593Smuzhiyun struct page *page;
1955*4882a593Smuzhiyun struct mempolicy *mpol;
1956*4882a593Smuzhiyun gfp_t gfp_mask = htlb_alloc_mask(h);
1957*4882a593Smuzhiyun int nid;
1958*4882a593Smuzhiyun nodemask_t *nodemask;
1959*4882a593Smuzhiyun
1960*4882a593Smuzhiyun nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
1961*4882a593Smuzhiyun page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask);
1962*4882a593Smuzhiyun mpol_cond_put(mpol);
1963*4882a593Smuzhiyun
1964*4882a593Smuzhiyun return page;
1965*4882a593Smuzhiyun }
1966*4882a593Smuzhiyun
1967*4882a593Smuzhiyun /* page migration callback function */
alloc_huge_page_nodemask(struct hstate * h,int preferred_nid,nodemask_t * nmask,gfp_t gfp_mask)1968*4882a593Smuzhiyun struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
1969*4882a593Smuzhiyun nodemask_t *nmask, gfp_t gfp_mask)
1970*4882a593Smuzhiyun {
1971*4882a593Smuzhiyun spin_lock(&hugetlb_lock);
1972*4882a593Smuzhiyun if (h->free_huge_pages - h->resv_huge_pages > 0) {
1973*4882a593Smuzhiyun struct page *page;
1974*4882a593Smuzhiyun
1975*4882a593Smuzhiyun page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
1976*4882a593Smuzhiyun if (page) {
1977*4882a593Smuzhiyun spin_unlock(&hugetlb_lock);
1978*4882a593Smuzhiyun return page;
1979*4882a593Smuzhiyun }
1980*4882a593Smuzhiyun }
1981*4882a593Smuzhiyun spin_unlock(&hugetlb_lock);
1982*4882a593Smuzhiyun
1983*4882a593Smuzhiyun return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
1984*4882a593Smuzhiyun }
1985*4882a593Smuzhiyun
1986*4882a593Smuzhiyun /* mempolicy aware migration callback */
alloc_huge_page_vma(struct hstate * h,struct vm_area_struct * vma,unsigned long address)1987*4882a593Smuzhiyun struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
1988*4882a593Smuzhiyun unsigned long address)
1989*4882a593Smuzhiyun {
1990*4882a593Smuzhiyun struct mempolicy *mpol;
1991*4882a593Smuzhiyun nodemask_t *nodemask;
1992*4882a593Smuzhiyun struct page *page;
1993*4882a593Smuzhiyun gfp_t gfp_mask;
1994*4882a593Smuzhiyun int node;
1995*4882a593Smuzhiyun
1996*4882a593Smuzhiyun gfp_mask = htlb_alloc_mask(h);
1997*4882a593Smuzhiyun node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1998*4882a593Smuzhiyun page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask);
1999*4882a593Smuzhiyun mpol_cond_put(mpol);
2000*4882a593Smuzhiyun
2001*4882a593Smuzhiyun return page;
2002*4882a593Smuzhiyun }
2003*4882a593Smuzhiyun
2004*4882a593Smuzhiyun /*
2005*4882a593Smuzhiyun * Increase the hugetlb pool such that it can accommodate a reservation
2006*4882a593Smuzhiyun * of size 'delta'.
2007*4882a593Smuzhiyun */
gather_surplus_pages(struct hstate * h,int delta)2008*4882a593Smuzhiyun static int gather_surplus_pages(struct hstate *h, int delta)
2009*4882a593Smuzhiyun __must_hold(&hugetlb_lock)
2010*4882a593Smuzhiyun {
2011*4882a593Smuzhiyun struct list_head surplus_list;
2012*4882a593Smuzhiyun struct page *page, *tmp;
2013*4882a593Smuzhiyun int ret, i;
2014*4882a593Smuzhiyun int needed, allocated;
2015*4882a593Smuzhiyun bool alloc_ok = true;
2016*4882a593Smuzhiyun
2017*4882a593Smuzhiyun needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
2018*4882a593Smuzhiyun if (needed <= 0) {
2019*4882a593Smuzhiyun h->resv_huge_pages += delta;
2020*4882a593Smuzhiyun return 0;
2021*4882a593Smuzhiyun }
2022*4882a593Smuzhiyun
2023*4882a593Smuzhiyun allocated = 0;
2024*4882a593Smuzhiyun INIT_LIST_HEAD(&surplus_list);
2025*4882a593Smuzhiyun
2026*4882a593Smuzhiyun ret = -ENOMEM;
2027*4882a593Smuzhiyun retry:
2028*4882a593Smuzhiyun spin_unlock(&hugetlb_lock);
2029*4882a593Smuzhiyun for (i = 0; i < needed; i++) {
2030*4882a593Smuzhiyun page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
2031*4882a593Smuzhiyun NUMA_NO_NODE, NULL);
2032*4882a593Smuzhiyun if (!page) {
2033*4882a593Smuzhiyun alloc_ok = false;
2034*4882a593Smuzhiyun break;
2035*4882a593Smuzhiyun }
2036*4882a593Smuzhiyun list_add(&page->lru, &surplus_list);
2037*4882a593Smuzhiyun cond_resched();
2038*4882a593Smuzhiyun }
2039*4882a593Smuzhiyun allocated += i;
2040*4882a593Smuzhiyun
2041*4882a593Smuzhiyun /*
2042*4882a593Smuzhiyun * After retaking hugetlb_lock, we need to recalculate 'needed'
2043*4882a593Smuzhiyun * because either resv_huge_pages or free_huge_pages may have changed.
2044*4882a593Smuzhiyun */
2045*4882a593Smuzhiyun spin_lock(&hugetlb_lock);
2046*4882a593Smuzhiyun needed = (h->resv_huge_pages + delta) -
2047*4882a593Smuzhiyun (h->free_huge_pages + allocated);
2048*4882a593Smuzhiyun if (needed > 0) {
2049*4882a593Smuzhiyun if (alloc_ok)
2050*4882a593Smuzhiyun goto retry;
2051*4882a593Smuzhiyun /*
2052*4882a593Smuzhiyun * We were not able to allocate enough pages to
2053*4882a593Smuzhiyun * satisfy the entire reservation so we free what
2054*4882a593Smuzhiyun * we've allocated so far.
2055*4882a593Smuzhiyun */
2056*4882a593Smuzhiyun goto free;
2057*4882a593Smuzhiyun }
2058*4882a593Smuzhiyun /*
2059*4882a593Smuzhiyun * The surplus_list now contains _at_least_ the number of extra pages
2060*4882a593Smuzhiyun * needed to accommodate the reservation. Add the appropriate number
2061*4882a593Smuzhiyun * of pages to the hugetlb pool and free the extras back to the buddy
2062*4882a593Smuzhiyun * allocator. Commit the entire reservation here to prevent another
2063*4882a593Smuzhiyun * process from stealing the pages as they are added to the pool but
2064*4882a593Smuzhiyun * before they are reserved.
2065*4882a593Smuzhiyun */
2066*4882a593Smuzhiyun needed += allocated;
2067*4882a593Smuzhiyun h->resv_huge_pages += delta;
2068*4882a593Smuzhiyun ret = 0;
2069*4882a593Smuzhiyun
2070*4882a593Smuzhiyun /* Free the needed pages to the hugetlb pool */
2071*4882a593Smuzhiyun list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
2072*4882a593Smuzhiyun if ((--needed) < 0)
2073*4882a593Smuzhiyun break;
2074*4882a593Smuzhiyun /*
2075*4882a593Smuzhiyun * This page is now managed by the hugetlb allocator and has
2076*4882a593Smuzhiyun * no users -- drop the buddy allocator's reference.
2077*4882a593Smuzhiyun */
2078*4882a593Smuzhiyun put_page_testzero(page);
2079*4882a593Smuzhiyun VM_BUG_ON_PAGE(page_count(page), page);
2080*4882a593Smuzhiyun enqueue_huge_page(h, page);
2081*4882a593Smuzhiyun }
2082*4882a593Smuzhiyun free:
2083*4882a593Smuzhiyun spin_unlock(&hugetlb_lock);
2084*4882a593Smuzhiyun
2085*4882a593Smuzhiyun /* Free unnecessary surplus pages to the buddy allocator */
2086*4882a593Smuzhiyun list_for_each_entry_safe(page, tmp, &surplus_list, lru)
2087*4882a593Smuzhiyun put_page(page);
2088*4882a593Smuzhiyun spin_lock(&hugetlb_lock);
2089*4882a593Smuzhiyun
2090*4882a593Smuzhiyun return ret;
2091*4882a593Smuzhiyun }
2092*4882a593Smuzhiyun
2093*4882a593Smuzhiyun /*
2094*4882a593Smuzhiyun * This routine has two main purposes:
2095*4882a593Smuzhiyun * 1) Decrement the reservation count (resv_huge_pages) by the value passed
2096*4882a593Smuzhiyun * in unused_resv_pages. This corresponds to the prior adjustments made
2097*4882a593Smuzhiyun * to the associated reservation map.
2098*4882a593Smuzhiyun * 2) Free any unused surplus pages that may have been allocated to satisfy
2099*4882a593Smuzhiyun * the reservation. As many as unused_resv_pages may be freed.
2100*4882a593Smuzhiyun *
2101*4882a593Smuzhiyun * Called with hugetlb_lock held. However, the lock could be dropped (and
2102*4882a593Smuzhiyun * reacquired) during calls to cond_resched_lock. Whenever dropping the lock,
2103*4882a593Smuzhiyun * we must make sure nobody else can claim pages we are in the process of
2104*4882a593Smuzhiyun * freeing. Do this by ensuring resv_huge_page always is greater than the
2105*4882a593Smuzhiyun * number of huge pages we plan to free when dropping the lock.
2106*4882a593Smuzhiyun */
return_unused_surplus_pages(struct hstate * h,unsigned long unused_resv_pages)2107*4882a593Smuzhiyun static void return_unused_surplus_pages(struct hstate *h,
2108*4882a593Smuzhiyun unsigned long unused_resv_pages)
2109*4882a593Smuzhiyun {
2110*4882a593Smuzhiyun unsigned long nr_pages;
2111*4882a593Smuzhiyun
2112*4882a593Smuzhiyun /* Cannot return gigantic pages currently */
2113*4882a593Smuzhiyun if (hstate_is_gigantic(h))
2114*4882a593Smuzhiyun goto out;
2115*4882a593Smuzhiyun
2116*4882a593Smuzhiyun /*
2117*4882a593Smuzhiyun * Part (or even all) of the reservation could have been backed
2118*4882a593Smuzhiyun * by pre-allocated pages. Only free surplus pages.
2119*4882a593Smuzhiyun */
2120*4882a593Smuzhiyun nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
2121*4882a593Smuzhiyun
2122*4882a593Smuzhiyun /*
2123*4882a593Smuzhiyun * We want to release as many surplus pages as possible, spread
2124*4882a593Smuzhiyun * evenly across all nodes with memory. Iterate across these nodes
2125*4882a593Smuzhiyun * until we can no longer free unreserved surplus pages. This occurs
2126*4882a593Smuzhiyun * when the nodes with surplus pages have no free pages.
2127*4882a593Smuzhiyun * free_pool_huge_page() will balance the freed pages across the
2128*4882a593Smuzhiyun * on-line nodes with memory and will handle the hstate accounting.
2129*4882a593Smuzhiyun *
2130*4882a593Smuzhiyun * Note that we decrement resv_huge_pages as we free the pages. If
2131*4882a593Smuzhiyun * we drop the lock, resv_huge_pages will still be sufficiently large
2132*4882a593Smuzhiyun * to cover subsequent pages we may free.
2133*4882a593Smuzhiyun */
2134*4882a593Smuzhiyun while (nr_pages--) {
2135*4882a593Smuzhiyun h->resv_huge_pages--;
2136*4882a593Smuzhiyun unused_resv_pages--;
2137*4882a593Smuzhiyun if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
2138*4882a593Smuzhiyun goto out;
2139*4882a593Smuzhiyun cond_resched_lock(&hugetlb_lock);
2140*4882a593Smuzhiyun }
2141*4882a593Smuzhiyun
2142*4882a593Smuzhiyun out:
2143*4882a593Smuzhiyun /* Fully uncommit the reservation */
2144*4882a593Smuzhiyun h->resv_huge_pages -= unused_resv_pages;
2145*4882a593Smuzhiyun }
2146*4882a593Smuzhiyun
2147*4882a593Smuzhiyun
2148*4882a593Smuzhiyun /*
2149*4882a593Smuzhiyun * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
2150*4882a593Smuzhiyun * are used by the huge page allocation routines to manage reservations.
2151*4882a593Smuzhiyun *
2152*4882a593Smuzhiyun * vma_needs_reservation is called to determine if the huge page at addr
2153*4882a593Smuzhiyun * within the vma has an associated reservation. If a reservation is
2154*4882a593Smuzhiyun * needed, the value 1 is returned. The caller is then responsible for
2155*4882a593Smuzhiyun * managing the global reservation and subpool usage counts. After
2156*4882a593Smuzhiyun * the huge page has been allocated, vma_commit_reservation is called
2157*4882a593Smuzhiyun * to add the page to the reservation map. If the page allocation fails,
2158*4882a593Smuzhiyun * the reservation must be ended instead of committed. vma_end_reservation
2159*4882a593Smuzhiyun * is called in such cases.
2160*4882a593Smuzhiyun *
2161*4882a593Smuzhiyun * In the normal case, vma_commit_reservation returns the same value
2162*4882a593Smuzhiyun * as the preceding vma_needs_reservation call. The only time this
2163*4882a593Smuzhiyun * is not the case is if a reserve map was changed between calls. It
2164*4882a593Smuzhiyun * is the responsibility of the caller to notice the difference and
2165*4882a593Smuzhiyun * take appropriate action.
2166*4882a593Smuzhiyun *
2167*4882a593Smuzhiyun * vma_add_reservation is used in error paths where a reservation must
2168*4882a593Smuzhiyun * be restored when a newly allocated huge page must be freed. It is
2169*4882a593Smuzhiyun * to be called after calling vma_needs_reservation to determine if a
2170*4882a593Smuzhiyun * reservation exists.
2171*4882a593Smuzhiyun */
2172*4882a593Smuzhiyun enum vma_resv_mode {
2173*4882a593Smuzhiyun VMA_NEEDS_RESV,
2174*4882a593Smuzhiyun VMA_COMMIT_RESV,
2175*4882a593Smuzhiyun VMA_END_RESV,
2176*4882a593Smuzhiyun VMA_ADD_RESV,
2177*4882a593Smuzhiyun };
__vma_reservation_common(struct hstate * h,struct vm_area_struct * vma,unsigned long addr,enum vma_resv_mode mode)2178*4882a593Smuzhiyun static long __vma_reservation_common(struct hstate *h,
2179*4882a593Smuzhiyun struct vm_area_struct *vma, unsigned long addr,
2180*4882a593Smuzhiyun enum vma_resv_mode mode)
2181*4882a593Smuzhiyun {
2182*4882a593Smuzhiyun struct resv_map *resv;
2183*4882a593Smuzhiyun pgoff_t idx;
2184*4882a593Smuzhiyun long ret;
2185*4882a593Smuzhiyun long dummy_out_regions_needed;
2186*4882a593Smuzhiyun
2187*4882a593Smuzhiyun resv = vma_resv_map(vma);
2188*4882a593Smuzhiyun if (!resv)
2189*4882a593Smuzhiyun return 1;
2190*4882a593Smuzhiyun
2191*4882a593Smuzhiyun idx = vma_hugecache_offset(h, vma, addr);
2192*4882a593Smuzhiyun switch (mode) {
2193*4882a593Smuzhiyun case VMA_NEEDS_RESV:
2194*4882a593Smuzhiyun ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed);
2195*4882a593Smuzhiyun /* We assume that vma_reservation_* routines always operate on
2196*4882a593Smuzhiyun * 1 page, and that adding to resv map a 1 page entry can only
2197*4882a593Smuzhiyun * ever require 1 region.
2198*4882a593Smuzhiyun */
2199*4882a593Smuzhiyun VM_BUG_ON(dummy_out_regions_needed != 1);
2200*4882a593Smuzhiyun break;
2201*4882a593Smuzhiyun case VMA_COMMIT_RESV:
2202*4882a593Smuzhiyun ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2203*4882a593Smuzhiyun /* region_add calls of range 1 should never fail. */
2204*4882a593Smuzhiyun VM_BUG_ON(ret < 0);
2205*4882a593Smuzhiyun break;
2206*4882a593Smuzhiyun case VMA_END_RESV:
2207*4882a593Smuzhiyun region_abort(resv, idx, idx + 1, 1);
2208*4882a593Smuzhiyun ret = 0;
2209*4882a593Smuzhiyun break;
2210*4882a593Smuzhiyun case VMA_ADD_RESV:
2211*4882a593Smuzhiyun if (vma->vm_flags & VM_MAYSHARE) {
2212*4882a593Smuzhiyun ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2213*4882a593Smuzhiyun /* region_add calls of range 1 should never fail. */
2214*4882a593Smuzhiyun VM_BUG_ON(ret < 0);
2215*4882a593Smuzhiyun } else {
2216*4882a593Smuzhiyun region_abort(resv, idx, idx + 1, 1);
2217*4882a593Smuzhiyun ret = region_del(resv, idx, idx + 1);
2218*4882a593Smuzhiyun }
2219*4882a593Smuzhiyun break;
2220*4882a593Smuzhiyun default:
2221*4882a593Smuzhiyun BUG();
2222*4882a593Smuzhiyun }
2223*4882a593Smuzhiyun
2224*4882a593Smuzhiyun if (vma->vm_flags & VM_MAYSHARE)
2225*4882a593Smuzhiyun return ret;
2226*4882a593Smuzhiyun else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
2227*4882a593Smuzhiyun /*
2228*4882a593Smuzhiyun * In most cases, reserves always exist for private mappings.
2229*4882a593Smuzhiyun * However, a file associated with mapping could have been
2230*4882a593Smuzhiyun * hole punched or truncated after reserves were consumed.
2231*4882a593Smuzhiyun * As subsequent fault on such a range will not use reserves.
2232*4882a593Smuzhiyun * Subtle - The reserve map for private mappings has the
2233*4882a593Smuzhiyun * opposite meaning than that of shared mappings. If NO
2234*4882a593Smuzhiyun * entry is in the reserve map, it means a reservation exists.
2235*4882a593Smuzhiyun * If an entry exists in the reserve map, it means the
2236*4882a593Smuzhiyun * reservation has already been consumed. As a result, the
2237*4882a593Smuzhiyun * return value of this routine is the opposite of the
2238*4882a593Smuzhiyun * value returned from reserve map manipulation routines above.
2239*4882a593Smuzhiyun */
2240*4882a593Smuzhiyun if (ret)
2241*4882a593Smuzhiyun return 0;
2242*4882a593Smuzhiyun else
2243*4882a593Smuzhiyun return 1;
2244*4882a593Smuzhiyun }
2245*4882a593Smuzhiyun else
2246*4882a593Smuzhiyun return ret < 0 ? ret : 0;
2247*4882a593Smuzhiyun }
2248*4882a593Smuzhiyun
vma_needs_reservation(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)2249*4882a593Smuzhiyun static long vma_needs_reservation(struct hstate *h,
2250*4882a593Smuzhiyun struct vm_area_struct *vma, unsigned long addr)
2251*4882a593Smuzhiyun {
2252*4882a593Smuzhiyun return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
2253*4882a593Smuzhiyun }
2254*4882a593Smuzhiyun
vma_commit_reservation(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)2255*4882a593Smuzhiyun static long vma_commit_reservation(struct hstate *h,
2256*4882a593Smuzhiyun struct vm_area_struct *vma, unsigned long addr)
2257*4882a593Smuzhiyun {
2258*4882a593Smuzhiyun return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
2259*4882a593Smuzhiyun }
2260*4882a593Smuzhiyun
vma_end_reservation(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)2261*4882a593Smuzhiyun static void vma_end_reservation(struct hstate *h,
2262*4882a593Smuzhiyun struct vm_area_struct *vma, unsigned long addr)
2263*4882a593Smuzhiyun {
2264*4882a593Smuzhiyun (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
2265*4882a593Smuzhiyun }
2266*4882a593Smuzhiyun
vma_add_reservation(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)2267*4882a593Smuzhiyun static long vma_add_reservation(struct hstate *h,
2268*4882a593Smuzhiyun struct vm_area_struct *vma, unsigned long addr)
2269*4882a593Smuzhiyun {
2270*4882a593Smuzhiyun return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
2271*4882a593Smuzhiyun }
2272*4882a593Smuzhiyun
2273*4882a593Smuzhiyun /*
2274*4882a593Smuzhiyun * This routine is called to restore a reservation on error paths. In the
2275*4882a593Smuzhiyun * specific error paths, a huge page was allocated (via alloc_huge_page)
2276*4882a593Smuzhiyun * and is about to be freed. If a reservation for the page existed,
2277*4882a593Smuzhiyun * alloc_huge_page would have consumed the reservation and set PagePrivate
2278*4882a593Smuzhiyun * in the newly allocated page. When the page is freed via free_huge_page,
2279*4882a593Smuzhiyun * the global reservation count will be incremented if PagePrivate is set.
2280*4882a593Smuzhiyun * However, free_huge_page can not adjust the reserve map. Adjust the
2281*4882a593Smuzhiyun * reserve map here to be consistent with global reserve count adjustments
2282*4882a593Smuzhiyun * to be made by free_huge_page.
2283*4882a593Smuzhiyun */
restore_reserve_on_error(struct hstate * h,struct vm_area_struct * vma,unsigned long address,struct page * page)2284*4882a593Smuzhiyun static void restore_reserve_on_error(struct hstate *h,
2285*4882a593Smuzhiyun struct vm_area_struct *vma, unsigned long address,
2286*4882a593Smuzhiyun struct page *page)
2287*4882a593Smuzhiyun {
2288*4882a593Smuzhiyun if (unlikely(PagePrivate(page))) {
2289*4882a593Smuzhiyun long rc = vma_needs_reservation(h, vma, address);
2290*4882a593Smuzhiyun
2291*4882a593Smuzhiyun if (unlikely(rc < 0)) {
2292*4882a593Smuzhiyun /*
2293*4882a593Smuzhiyun * Rare out of memory condition in reserve map
2294*4882a593Smuzhiyun * manipulation. Clear PagePrivate so that
2295*4882a593Smuzhiyun * global reserve count will not be incremented
2296*4882a593Smuzhiyun * by free_huge_page. This will make it appear
2297*4882a593Smuzhiyun * as though the reservation for this page was
2298*4882a593Smuzhiyun * consumed. This may prevent the task from
2299*4882a593Smuzhiyun * faulting in the page at a later time. This
2300*4882a593Smuzhiyun * is better than inconsistent global huge page
2301*4882a593Smuzhiyun * accounting of reserve counts.
2302*4882a593Smuzhiyun */
2303*4882a593Smuzhiyun ClearPagePrivate(page);
2304*4882a593Smuzhiyun } else if (rc) {
2305*4882a593Smuzhiyun rc = vma_add_reservation(h, vma, address);
2306*4882a593Smuzhiyun if (unlikely(rc < 0))
2307*4882a593Smuzhiyun /*
2308*4882a593Smuzhiyun * See above comment about rare out of
2309*4882a593Smuzhiyun * memory condition.
2310*4882a593Smuzhiyun */
2311*4882a593Smuzhiyun ClearPagePrivate(page);
2312*4882a593Smuzhiyun } else
2313*4882a593Smuzhiyun vma_end_reservation(h, vma, address);
2314*4882a593Smuzhiyun }
2315*4882a593Smuzhiyun }
2316*4882a593Smuzhiyun
alloc_huge_page(struct vm_area_struct * vma,unsigned long addr,int avoid_reserve)2317*4882a593Smuzhiyun struct page *alloc_huge_page(struct vm_area_struct *vma,
2318*4882a593Smuzhiyun unsigned long addr, int avoid_reserve)
2319*4882a593Smuzhiyun {
2320*4882a593Smuzhiyun struct hugepage_subpool *spool = subpool_vma(vma);
2321*4882a593Smuzhiyun struct hstate *h = hstate_vma(vma);
2322*4882a593Smuzhiyun struct page *page;
2323*4882a593Smuzhiyun long map_chg, map_commit;
2324*4882a593Smuzhiyun long gbl_chg;
2325*4882a593Smuzhiyun int ret, idx;
2326*4882a593Smuzhiyun struct hugetlb_cgroup *h_cg;
2327*4882a593Smuzhiyun bool deferred_reserve;
2328*4882a593Smuzhiyun
2329*4882a593Smuzhiyun idx = hstate_index(h);
2330*4882a593Smuzhiyun /*
2331*4882a593Smuzhiyun * Examine the region/reserve map to determine if the process
2332*4882a593Smuzhiyun * has a reservation for the page to be allocated. A return
2333*4882a593Smuzhiyun * code of zero indicates a reservation exists (no change).
2334*4882a593Smuzhiyun */
2335*4882a593Smuzhiyun map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
2336*4882a593Smuzhiyun if (map_chg < 0)
2337*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
2338*4882a593Smuzhiyun
2339*4882a593Smuzhiyun /*
2340*4882a593Smuzhiyun * Processes that did not create the mapping will have no
2341*4882a593Smuzhiyun * reserves as indicated by the region/reserve map. Check
2342*4882a593Smuzhiyun * that the allocation will not exceed the subpool limit.
2343*4882a593Smuzhiyun * Allocations for MAP_NORESERVE mappings also need to be
2344*4882a593Smuzhiyun * checked against any subpool limit.
2345*4882a593Smuzhiyun */
2346*4882a593Smuzhiyun if (map_chg || avoid_reserve) {
2347*4882a593Smuzhiyun gbl_chg = hugepage_subpool_get_pages(spool, 1);
2348*4882a593Smuzhiyun if (gbl_chg < 0) {
2349*4882a593Smuzhiyun vma_end_reservation(h, vma, addr);
2350*4882a593Smuzhiyun return ERR_PTR(-ENOSPC);
2351*4882a593Smuzhiyun }
2352*4882a593Smuzhiyun
2353*4882a593Smuzhiyun /*
2354*4882a593Smuzhiyun * Even though there was no reservation in the region/reserve
2355*4882a593Smuzhiyun * map, there could be reservations associated with the
2356*4882a593Smuzhiyun * subpool that can be used. This would be indicated if the
2357*4882a593Smuzhiyun * return value of hugepage_subpool_get_pages() is zero.
2358*4882a593Smuzhiyun * However, if avoid_reserve is specified we still avoid even
2359*4882a593Smuzhiyun * the subpool reservations.
2360*4882a593Smuzhiyun */
2361*4882a593Smuzhiyun if (avoid_reserve)
2362*4882a593Smuzhiyun gbl_chg = 1;
2363*4882a593Smuzhiyun }
2364*4882a593Smuzhiyun
2365*4882a593Smuzhiyun /* If this allocation is not consuming a reservation, charge it now.
2366*4882a593Smuzhiyun */
2367*4882a593Smuzhiyun deferred_reserve = map_chg || avoid_reserve || !vma_resv_map(vma);
2368*4882a593Smuzhiyun if (deferred_reserve) {
2369*4882a593Smuzhiyun ret = hugetlb_cgroup_charge_cgroup_rsvd(
2370*4882a593Smuzhiyun idx, pages_per_huge_page(h), &h_cg);
2371*4882a593Smuzhiyun if (ret)
2372*4882a593Smuzhiyun goto out_subpool_put;
2373*4882a593Smuzhiyun }
2374*4882a593Smuzhiyun
2375*4882a593Smuzhiyun ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
2376*4882a593Smuzhiyun if (ret)
2377*4882a593Smuzhiyun goto out_uncharge_cgroup_reservation;
2378*4882a593Smuzhiyun
2379*4882a593Smuzhiyun spin_lock(&hugetlb_lock);
2380*4882a593Smuzhiyun /*
2381*4882a593Smuzhiyun * glb_chg is passed to indicate whether or not a page must be taken
2382*4882a593Smuzhiyun * from the global free pool (global change). gbl_chg == 0 indicates
2383*4882a593Smuzhiyun * a reservation exists for the allocation.
2384*4882a593Smuzhiyun */
2385*4882a593Smuzhiyun page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
2386*4882a593Smuzhiyun if (!page) {
2387*4882a593Smuzhiyun spin_unlock(&hugetlb_lock);
2388*4882a593Smuzhiyun page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
2389*4882a593Smuzhiyun if (!page)
2390*4882a593Smuzhiyun goto out_uncharge_cgroup;
2391*4882a593Smuzhiyun spin_lock(&hugetlb_lock);
2392*4882a593Smuzhiyun if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
2393*4882a593Smuzhiyun SetPagePrivate(page);
2394*4882a593Smuzhiyun h->resv_huge_pages--;
2395*4882a593Smuzhiyun }
2396*4882a593Smuzhiyun list_add(&page->lru, &h->hugepage_activelist);
2397*4882a593Smuzhiyun /* Fall through */
2398*4882a593Smuzhiyun }
2399*4882a593Smuzhiyun hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
2400*4882a593Smuzhiyun /* If allocation is not consuming a reservation, also store the
2401*4882a593Smuzhiyun * hugetlb_cgroup pointer on the page.
2402*4882a593Smuzhiyun */
2403*4882a593Smuzhiyun if (deferred_reserve) {
2404*4882a593Smuzhiyun hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
2405*4882a593Smuzhiyun h_cg, page);
2406*4882a593Smuzhiyun }
2407*4882a593Smuzhiyun
2408*4882a593Smuzhiyun spin_unlock(&hugetlb_lock);
2409*4882a593Smuzhiyun
2410*4882a593Smuzhiyun set_page_private(page, (unsigned long)spool);
2411*4882a593Smuzhiyun
2412*4882a593Smuzhiyun map_commit = vma_commit_reservation(h, vma, addr);
2413*4882a593Smuzhiyun if (unlikely(map_chg > map_commit)) {
2414*4882a593Smuzhiyun /*
2415*4882a593Smuzhiyun * The page was added to the reservation map between
2416*4882a593Smuzhiyun * vma_needs_reservation and vma_commit_reservation.
2417*4882a593Smuzhiyun * This indicates a race with hugetlb_reserve_pages.
2418*4882a593Smuzhiyun * Adjust for the subpool count incremented above AND
2419*4882a593Smuzhiyun * in hugetlb_reserve_pages for the same page. Also,
2420*4882a593Smuzhiyun * the reservation count added in hugetlb_reserve_pages
2421*4882a593Smuzhiyun * no longer applies.
2422*4882a593Smuzhiyun */
2423*4882a593Smuzhiyun long rsv_adjust;
2424*4882a593Smuzhiyun
2425*4882a593Smuzhiyun rsv_adjust = hugepage_subpool_put_pages(spool, 1);
2426*4882a593Smuzhiyun hugetlb_acct_memory(h, -rsv_adjust);
2427*4882a593Smuzhiyun if (deferred_reserve)
2428*4882a593Smuzhiyun hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
2429*4882a593Smuzhiyun pages_per_huge_page(h), page);
2430*4882a593Smuzhiyun }
2431*4882a593Smuzhiyun return page;
2432*4882a593Smuzhiyun
2433*4882a593Smuzhiyun out_uncharge_cgroup:
2434*4882a593Smuzhiyun hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
2435*4882a593Smuzhiyun out_uncharge_cgroup_reservation:
2436*4882a593Smuzhiyun if (deferred_reserve)
2437*4882a593Smuzhiyun hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h),
2438*4882a593Smuzhiyun h_cg);
2439*4882a593Smuzhiyun out_subpool_put:
2440*4882a593Smuzhiyun if (map_chg || avoid_reserve)
2441*4882a593Smuzhiyun hugepage_subpool_put_pages(spool, 1);
2442*4882a593Smuzhiyun vma_end_reservation(h, vma, addr);
2443*4882a593Smuzhiyun return ERR_PTR(-ENOSPC);
2444*4882a593Smuzhiyun }
2445*4882a593Smuzhiyun
2446*4882a593Smuzhiyun int alloc_bootmem_huge_page(struct hstate *h)
2447*4882a593Smuzhiyun __attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
__alloc_bootmem_huge_page(struct hstate * h)2448*4882a593Smuzhiyun int __alloc_bootmem_huge_page(struct hstate *h)
2449*4882a593Smuzhiyun {
2450*4882a593Smuzhiyun struct huge_bootmem_page *m;
2451*4882a593Smuzhiyun int nr_nodes, node;
2452*4882a593Smuzhiyun
2453*4882a593Smuzhiyun for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
2454*4882a593Smuzhiyun void *addr;
2455*4882a593Smuzhiyun
2456*4882a593Smuzhiyun addr = memblock_alloc_try_nid_raw(
2457*4882a593Smuzhiyun huge_page_size(h), huge_page_size(h),
2458*4882a593Smuzhiyun 0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
2459*4882a593Smuzhiyun if (addr) {
2460*4882a593Smuzhiyun /*
2461*4882a593Smuzhiyun * Use the beginning of the huge page to store the
2462*4882a593Smuzhiyun * huge_bootmem_page struct (until gather_bootmem
2463*4882a593Smuzhiyun * puts them into the mem_map).
2464*4882a593Smuzhiyun */
2465*4882a593Smuzhiyun m = addr;
2466*4882a593Smuzhiyun goto found;
2467*4882a593Smuzhiyun }
2468*4882a593Smuzhiyun }
2469*4882a593Smuzhiyun return 0;
2470*4882a593Smuzhiyun
2471*4882a593Smuzhiyun found:
2472*4882a593Smuzhiyun BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
2473*4882a593Smuzhiyun /* Put them into a private list first because mem_map is not up yet */
2474*4882a593Smuzhiyun INIT_LIST_HEAD(&m->list);
2475*4882a593Smuzhiyun list_add(&m->list, &huge_boot_pages);
2476*4882a593Smuzhiyun m->hstate = h;
2477*4882a593Smuzhiyun return 1;
2478*4882a593Smuzhiyun }
2479*4882a593Smuzhiyun
2480*4882a593Smuzhiyun /*
2481*4882a593Smuzhiyun * Put bootmem huge pages into the standard lists after mem_map is up.
2482*4882a593Smuzhiyun * Note: This only applies to gigantic (order > MAX_ORDER) pages.
2483*4882a593Smuzhiyun */
gather_bootmem_prealloc(void)2484*4882a593Smuzhiyun static void __init gather_bootmem_prealloc(void)
2485*4882a593Smuzhiyun {
2486*4882a593Smuzhiyun struct huge_bootmem_page *m;
2487*4882a593Smuzhiyun
2488*4882a593Smuzhiyun list_for_each_entry(m, &huge_boot_pages, list) {
2489*4882a593Smuzhiyun struct page *page = virt_to_page(m);
2490*4882a593Smuzhiyun struct hstate *h = m->hstate;
2491*4882a593Smuzhiyun
2492*4882a593Smuzhiyun VM_BUG_ON(!hstate_is_gigantic(h));
2493*4882a593Smuzhiyun WARN_ON(page_count(page) != 1);
2494*4882a593Smuzhiyun prep_compound_gigantic_page(page, huge_page_order(h));
2495*4882a593Smuzhiyun WARN_ON(PageReserved(page));
2496*4882a593Smuzhiyun prep_new_huge_page(h, page, page_to_nid(page));
2497*4882a593Smuzhiyun put_page(page); /* free it into the hugepage allocator */
2498*4882a593Smuzhiyun
2499*4882a593Smuzhiyun /*
2500*4882a593Smuzhiyun * We need to restore the 'stolen' pages to totalram_pages
2501*4882a593Smuzhiyun * in order to fix confusing memory reports from free(1) and
2502*4882a593Smuzhiyun * other side-effects, like CommitLimit going negative.
2503*4882a593Smuzhiyun */
2504*4882a593Smuzhiyun adjust_managed_page_count(page, pages_per_huge_page(h));
2505*4882a593Smuzhiyun cond_resched();
2506*4882a593Smuzhiyun }
2507*4882a593Smuzhiyun }
2508*4882a593Smuzhiyun
hugetlb_hstate_alloc_pages(struct hstate * h)2509*4882a593Smuzhiyun static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
2510*4882a593Smuzhiyun {
2511*4882a593Smuzhiyun unsigned long i;
2512*4882a593Smuzhiyun nodemask_t *node_alloc_noretry;
2513*4882a593Smuzhiyun
2514*4882a593Smuzhiyun if (!hstate_is_gigantic(h)) {
2515*4882a593Smuzhiyun /*
2516*4882a593Smuzhiyun * Bit mask controlling how hard we retry per-node allocations.
2517*4882a593Smuzhiyun * Ignore errors as lower level routines can deal with
2518*4882a593Smuzhiyun * node_alloc_noretry == NULL. If this kmalloc fails at boot
2519*4882a593Smuzhiyun * time, we are likely in bigger trouble.
2520*4882a593Smuzhiyun */
2521*4882a593Smuzhiyun node_alloc_noretry = kmalloc(sizeof(*node_alloc_noretry),
2522*4882a593Smuzhiyun GFP_KERNEL);
2523*4882a593Smuzhiyun } else {
2524*4882a593Smuzhiyun /* allocations done at boot time */
2525*4882a593Smuzhiyun node_alloc_noretry = NULL;
2526*4882a593Smuzhiyun }
2527*4882a593Smuzhiyun
2528*4882a593Smuzhiyun /* bit mask controlling how hard we retry per-node allocations */
2529*4882a593Smuzhiyun if (node_alloc_noretry)
2530*4882a593Smuzhiyun nodes_clear(*node_alloc_noretry);
2531*4882a593Smuzhiyun
2532*4882a593Smuzhiyun for (i = 0; i < h->max_huge_pages; ++i) {
2533*4882a593Smuzhiyun if (hstate_is_gigantic(h)) {
2534*4882a593Smuzhiyun if (hugetlb_cma_size) {
2535*4882a593Smuzhiyun pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
2536*4882a593Smuzhiyun goto free;
2537*4882a593Smuzhiyun }
2538*4882a593Smuzhiyun if (!alloc_bootmem_huge_page(h))
2539*4882a593Smuzhiyun break;
2540*4882a593Smuzhiyun } else if (!alloc_pool_huge_page(h,
2541*4882a593Smuzhiyun &node_states[N_MEMORY],
2542*4882a593Smuzhiyun node_alloc_noretry))
2543*4882a593Smuzhiyun break;
2544*4882a593Smuzhiyun cond_resched();
2545*4882a593Smuzhiyun }
2546*4882a593Smuzhiyun if (i < h->max_huge_pages) {
2547*4882a593Smuzhiyun char buf[32];
2548*4882a593Smuzhiyun
2549*4882a593Smuzhiyun string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2550*4882a593Smuzhiyun pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n",
2551*4882a593Smuzhiyun h->max_huge_pages, buf, i);
2552*4882a593Smuzhiyun h->max_huge_pages = i;
2553*4882a593Smuzhiyun }
2554*4882a593Smuzhiyun free:
2555*4882a593Smuzhiyun kfree(node_alloc_noretry);
2556*4882a593Smuzhiyun }
2557*4882a593Smuzhiyun
hugetlb_init_hstates(void)2558*4882a593Smuzhiyun static void __init hugetlb_init_hstates(void)
2559*4882a593Smuzhiyun {
2560*4882a593Smuzhiyun struct hstate *h;
2561*4882a593Smuzhiyun
2562*4882a593Smuzhiyun for_each_hstate(h) {
2563*4882a593Smuzhiyun if (minimum_order > huge_page_order(h))
2564*4882a593Smuzhiyun minimum_order = huge_page_order(h);
2565*4882a593Smuzhiyun
2566*4882a593Smuzhiyun /* oversize hugepages were init'ed in early boot */
2567*4882a593Smuzhiyun if (!hstate_is_gigantic(h))
2568*4882a593Smuzhiyun hugetlb_hstate_alloc_pages(h);
2569*4882a593Smuzhiyun }
2570*4882a593Smuzhiyun VM_BUG_ON(minimum_order == UINT_MAX);
2571*4882a593Smuzhiyun }
2572*4882a593Smuzhiyun
report_hugepages(void)2573*4882a593Smuzhiyun static void __init report_hugepages(void)
2574*4882a593Smuzhiyun {
2575*4882a593Smuzhiyun struct hstate *h;
2576*4882a593Smuzhiyun
2577*4882a593Smuzhiyun for_each_hstate(h) {
2578*4882a593Smuzhiyun char buf[32];
2579*4882a593Smuzhiyun
2580*4882a593Smuzhiyun string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2581*4882a593Smuzhiyun pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
2582*4882a593Smuzhiyun buf, h->free_huge_pages);
2583*4882a593Smuzhiyun }
2584*4882a593Smuzhiyun }
2585*4882a593Smuzhiyun
2586*4882a593Smuzhiyun #ifdef CONFIG_HIGHMEM
try_to_free_low(struct hstate * h,unsigned long count,nodemask_t * nodes_allowed)2587*4882a593Smuzhiyun static void try_to_free_low(struct hstate *h, unsigned long count,
2588*4882a593Smuzhiyun nodemask_t *nodes_allowed)
2589*4882a593Smuzhiyun {
2590*4882a593Smuzhiyun int i;
2591*4882a593Smuzhiyun
2592*4882a593Smuzhiyun if (hstate_is_gigantic(h))
2593*4882a593Smuzhiyun return;
2594*4882a593Smuzhiyun
2595*4882a593Smuzhiyun for_each_node_mask(i, *nodes_allowed) {
2596*4882a593Smuzhiyun struct page *page, *next;
2597*4882a593Smuzhiyun struct list_head *freel = &h->hugepage_freelists[i];
2598*4882a593Smuzhiyun list_for_each_entry_safe(page, next, freel, lru) {
2599*4882a593Smuzhiyun if (count >= h->nr_huge_pages)
2600*4882a593Smuzhiyun return;
2601*4882a593Smuzhiyun if (PageHighMem(page))
2602*4882a593Smuzhiyun continue;
2603*4882a593Smuzhiyun list_del(&page->lru);
2604*4882a593Smuzhiyun update_and_free_page(h, page);
2605*4882a593Smuzhiyun h->free_huge_pages--;
2606*4882a593Smuzhiyun h->free_huge_pages_node[page_to_nid(page)]--;
2607*4882a593Smuzhiyun }
2608*4882a593Smuzhiyun }
2609*4882a593Smuzhiyun }
2610*4882a593Smuzhiyun #else
try_to_free_low(struct hstate * h,unsigned long count,nodemask_t * nodes_allowed)2611*4882a593Smuzhiyun static inline void try_to_free_low(struct hstate *h, unsigned long count,
2612*4882a593Smuzhiyun nodemask_t *nodes_allowed)
2613*4882a593Smuzhiyun {
2614*4882a593Smuzhiyun }
2615*4882a593Smuzhiyun #endif
2616*4882a593Smuzhiyun
2617*4882a593Smuzhiyun /*
2618*4882a593Smuzhiyun * Increment or decrement surplus_huge_pages. Keep node-specific counters
2619*4882a593Smuzhiyun * balanced by operating on them in a round-robin fashion.
2620*4882a593Smuzhiyun * Returns 1 if an adjustment was made.
2621*4882a593Smuzhiyun */
adjust_pool_surplus(struct hstate * h,nodemask_t * nodes_allowed,int delta)2622*4882a593Smuzhiyun static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
2623*4882a593Smuzhiyun int delta)
2624*4882a593Smuzhiyun {
2625*4882a593Smuzhiyun int nr_nodes, node;
2626*4882a593Smuzhiyun
2627*4882a593Smuzhiyun VM_BUG_ON(delta != -1 && delta != 1);
2628*4882a593Smuzhiyun
2629*4882a593Smuzhiyun if (delta < 0) {
2630*4882a593Smuzhiyun for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2631*4882a593Smuzhiyun if (h->surplus_huge_pages_node[node])
2632*4882a593Smuzhiyun goto found;
2633*4882a593Smuzhiyun }
2634*4882a593Smuzhiyun } else {
2635*4882a593Smuzhiyun for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2636*4882a593Smuzhiyun if (h->surplus_huge_pages_node[node] <
2637*4882a593Smuzhiyun h->nr_huge_pages_node[node])
2638*4882a593Smuzhiyun goto found;
2639*4882a593Smuzhiyun }
2640*4882a593Smuzhiyun }
2641*4882a593Smuzhiyun return 0;
2642*4882a593Smuzhiyun
2643*4882a593Smuzhiyun found:
2644*4882a593Smuzhiyun h->surplus_huge_pages += delta;
2645*4882a593Smuzhiyun h->surplus_huge_pages_node[node] += delta;
2646*4882a593Smuzhiyun return 1;
2647*4882a593Smuzhiyun }
2648*4882a593Smuzhiyun
2649*4882a593Smuzhiyun #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
set_max_huge_pages(struct hstate * h,unsigned long count,int nid,nodemask_t * nodes_allowed)2650*4882a593Smuzhiyun static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
2651*4882a593Smuzhiyun nodemask_t *nodes_allowed)
2652*4882a593Smuzhiyun {
2653*4882a593Smuzhiyun unsigned long min_count, ret;
2654*4882a593Smuzhiyun NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);
2655*4882a593Smuzhiyun
2656*4882a593Smuzhiyun /*
2657*4882a593Smuzhiyun * Bit mask controlling how hard we retry per-node allocations.
2658*4882a593Smuzhiyun * If we can not allocate the bit mask, do not attempt to allocate
2659*4882a593Smuzhiyun * the requested huge pages.
2660*4882a593Smuzhiyun */
2661*4882a593Smuzhiyun if (node_alloc_noretry)
2662*4882a593Smuzhiyun nodes_clear(*node_alloc_noretry);
2663*4882a593Smuzhiyun else
2664*4882a593Smuzhiyun return -ENOMEM;
2665*4882a593Smuzhiyun
2666*4882a593Smuzhiyun spin_lock(&hugetlb_lock);
2667*4882a593Smuzhiyun
2668*4882a593Smuzhiyun /*
2669*4882a593Smuzhiyun * Check for a node specific request.
2670*4882a593Smuzhiyun * Changing node specific huge page count may require a corresponding
2671*4882a593Smuzhiyun * change to the global count. In any case, the passed node mask
2672*4882a593Smuzhiyun * (nodes_allowed) will restrict alloc/free to the specified node.
2673*4882a593Smuzhiyun */
2674*4882a593Smuzhiyun if (nid != NUMA_NO_NODE) {
2675*4882a593Smuzhiyun unsigned long old_count = count;
2676*4882a593Smuzhiyun
2677*4882a593Smuzhiyun count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2678*4882a593Smuzhiyun /*
2679*4882a593Smuzhiyun * User may have specified a large count value which caused the
2680*4882a593Smuzhiyun * above calculation to overflow. In this case, they wanted
2681*4882a593Smuzhiyun * to allocate as many huge pages as possible. Set count to
2682*4882a593Smuzhiyun * largest possible value to align with their intention.
2683*4882a593Smuzhiyun */
2684*4882a593Smuzhiyun if (count < old_count)
2685*4882a593Smuzhiyun count = ULONG_MAX;
2686*4882a593Smuzhiyun }
2687*4882a593Smuzhiyun
2688*4882a593Smuzhiyun /*
2689*4882a593Smuzhiyun * Gigantic pages runtime allocation depend on the capability for large
2690*4882a593Smuzhiyun * page range allocation.
2691*4882a593Smuzhiyun * If the system does not provide this feature, return an error when
2692*4882a593Smuzhiyun * the user tries to allocate gigantic pages but let the user free the
2693*4882a593Smuzhiyun * boottime allocated gigantic pages.
2694*4882a593Smuzhiyun */
2695*4882a593Smuzhiyun if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
2696*4882a593Smuzhiyun if (count > persistent_huge_pages(h)) {
2697*4882a593Smuzhiyun spin_unlock(&hugetlb_lock);
2698*4882a593Smuzhiyun NODEMASK_FREE(node_alloc_noretry);
2699*4882a593Smuzhiyun return -EINVAL;
2700*4882a593Smuzhiyun }
2701*4882a593Smuzhiyun /* Fall through to decrease pool */
2702*4882a593Smuzhiyun }
2703*4882a593Smuzhiyun
2704*4882a593Smuzhiyun /*
2705*4882a593Smuzhiyun * Increase the pool size
2706*4882a593Smuzhiyun * First take pages out of surplus state. Then make up the
2707*4882a593Smuzhiyun * remaining difference by allocating fresh huge pages.
2708*4882a593Smuzhiyun *
2709*4882a593Smuzhiyun * We might race with alloc_surplus_huge_page() here and be unable
2710*4882a593Smuzhiyun * to convert a surplus huge page to a normal huge page. That is
2711*4882a593Smuzhiyun * not critical, though, it just means the overall size of the
2712*4882a593Smuzhiyun * pool might be one hugepage larger than it needs to be, but
2713*4882a593Smuzhiyun * within all the constraints specified by the sysctls.
2714*4882a593Smuzhiyun */
2715*4882a593Smuzhiyun while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
2716*4882a593Smuzhiyun if (!adjust_pool_surplus(h, nodes_allowed, -1))
2717*4882a593Smuzhiyun break;
2718*4882a593Smuzhiyun }
2719*4882a593Smuzhiyun
2720*4882a593Smuzhiyun while (count > persistent_huge_pages(h)) {
2721*4882a593Smuzhiyun /*
2722*4882a593Smuzhiyun * If this allocation races such that we no longer need the
2723*4882a593Smuzhiyun * page, free_huge_page will handle it by freeing the page
2724*4882a593Smuzhiyun * and reducing the surplus.
2725*4882a593Smuzhiyun */
2726*4882a593Smuzhiyun spin_unlock(&hugetlb_lock);
2727*4882a593Smuzhiyun
2728*4882a593Smuzhiyun /* yield cpu to avoid soft lockup */
2729*4882a593Smuzhiyun cond_resched();
2730*4882a593Smuzhiyun
2731*4882a593Smuzhiyun ret = alloc_pool_huge_page(h, nodes_allowed,
2732*4882a593Smuzhiyun node_alloc_noretry);
2733*4882a593Smuzhiyun spin_lock(&hugetlb_lock);
2734*4882a593Smuzhiyun if (!ret)
2735*4882a593Smuzhiyun goto out;
2736*4882a593Smuzhiyun
2737*4882a593Smuzhiyun /* Bail for signals. Probably ctrl-c from user */
2738*4882a593Smuzhiyun if (signal_pending(current))
2739*4882a593Smuzhiyun goto out;
2740*4882a593Smuzhiyun }
2741*4882a593Smuzhiyun
2742*4882a593Smuzhiyun /*
2743*4882a593Smuzhiyun * Decrease the pool size
2744*4882a593Smuzhiyun * First return free pages to the buddy allocator (being careful
2745*4882a593Smuzhiyun * to keep enough around to satisfy reservations). Then place
2746*4882a593Smuzhiyun * pages into surplus state as needed so the pool will shrink
2747*4882a593Smuzhiyun * to the desired size as pages become free.
2748*4882a593Smuzhiyun *
2749*4882a593Smuzhiyun * By placing pages into the surplus state independent of the
2750*4882a593Smuzhiyun * overcommit value, we are allowing the surplus pool size to
2751*4882a593Smuzhiyun * exceed overcommit. There are few sane options here. Since
2752*4882a593Smuzhiyun * alloc_surplus_huge_page() is checking the global counter,
2753*4882a593Smuzhiyun * though, we'll note that we're not allowed to exceed surplus
2754*4882a593Smuzhiyun * and won't grow the pool anywhere else. Not until one of the
2755*4882a593Smuzhiyun * sysctls are changed, or the surplus pages go out of use.
2756*4882a593Smuzhiyun */
2757*4882a593Smuzhiyun min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
2758*4882a593Smuzhiyun min_count = max(count, min_count);
2759*4882a593Smuzhiyun try_to_free_low(h, min_count, nodes_allowed);
2760*4882a593Smuzhiyun while (min_count < persistent_huge_pages(h)) {
2761*4882a593Smuzhiyun if (!free_pool_huge_page(h, nodes_allowed, 0))
2762*4882a593Smuzhiyun break;
2763*4882a593Smuzhiyun cond_resched_lock(&hugetlb_lock);
2764*4882a593Smuzhiyun }
2765*4882a593Smuzhiyun while (count < persistent_huge_pages(h)) {
2766*4882a593Smuzhiyun if (!adjust_pool_surplus(h, nodes_allowed, 1))
2767*4882a593Smuzhiyun break;
2768*4882a593Smuzhiyun }
2769*4882a593Smuzhiyun out:
2770*4882a593Smuzhiyun h->max_huge_pages = persistent_huge_pages(h);
2771*4882a593Smuzhiyun spin_unlock(&hugetlb_lock);
2772*4882a593Smuzhiyun
2773*4882a593Smuzhiyun NODEMASK_FREE(node_alloc_noretry);
2774*4882a593Smuzhiyun
2775*4882a593Smuzhiyun return 0;
2776*4882a593Smuzhiyun }
2777*4882a593Smuzhiyun
2778*4882a593Smuzhiyun #define HSTATE_ATTR_RO(_name) \
2779*4882a593Smuzhiyun static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2780*4882a593Smuzhiyun
2781*4882a593Smuzhiyun #define HSTATE_ATTR(_name) \
2782*4882a593Smuzhiyun static struct kobj_attribute _name##_attr = \
2783*4882a593Smuzhiyun __ATTR(_name, 0644, _name##_show, _name##_store)
2784*4882a593Smuzhiyun
2785*4882a593Smuzhiyun static struct kobject *hugepages_kobj;
2786*4882a593Smuzhiyun static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2787*4882a593Smuzhiyun
2788*4882a593Smuzhiyun static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2789*4882a593Smuzhiyun
kobj_to_hstate(struct kobject * kobj,int * nidp)2790*4882a593Smuzhiyun static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2791*4882a593Smuzhiyun {
2792*4882a593Smuzhiyun int i;
2793*4882a593Smuzhiyun
2794*4882a593Smuzhiyun for (i = 0; i < HUGE_MAX_HSTATE; i++)
2795*4882a593Smuzhiyun if (hstate_kobjs[i] == kobj) {
2796*4882a593Smuzhiyun if (nidp)
2797*4882a593Smuzhiyun *nidp = NUMA_NO_NODE;
2798*4882a593Smuzhiyun return &hstates[i];
2799*4882a593Smuzhiyun }
2800*4882a593Smuzhiyun
2801*4882a593Smuzhiyun return kobj_to_node_hstate(kobj, nidp);
2802*4882a593Smuzhiyun }
2803*4882a593Smuzhiyun
nr_hugepages_show_common(struct kobject * kobj,struct kobj_attribute * attr,char * buf)2804*4882a593Smuzhiyun static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2805*4882a593Smuzhiyun struct kobj_attribute *attr, char *buf)
2806*4882a593Smuzhiyun {
2807*4882a593Smuzhiyun struct hstate *h;
2808*4882a593Smuzhiyun unsigned long nr_huge_pages;
2809*4882a593Smuzhiyun int nid;
2810*4882a593Smuzhiyun
2811*4882a593Smuzhiyun h = kobj_to_hstate(kobj, &nid);
2812*4882a593Smuzhiyun if (nid == NUMA_NO_NODE)
2813*4882a593Smuzhiyun nr_huge_pages = h->nr_huge_pages;
2814*4882a593Smuzhiyun else
2815*4882a593Smuzhiyun nr_huge_pages = h->nr_huge_pages_node[nid];
2816*4882a593Smuzhiyun
2817*4882a593Smuzhiyun return sprintf(buf, "%lu\n", nr_huge_pages);
2818*4882a593Smuzhiyun }
2819*4882a593Smuzhiyun
__nr_hugepages_store_common(bool obey_mempolicy,struct hstate * h,int nid,unsigned long count,size_t len)2820*4882a593Smuzhiyun static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2821*4882a593Smuzhiyun struct hstate *h, int nid,
2822*4882a593Smuzhiyun unsigned long count, size_t len)
2823*4882a593Smuzhiyun {
2824*4882a593Smuzhiyun int err;
2825*4882a593Smuzhiyun nodemask_t nodes_allowed, *n_mask;
2826*4882a593Smuzhiyun
2827*4882a593Smuzhiyun if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
2828*4882a593Smuzhiyun return -EINVAL;
2829*4882a593Smuzhiyun
2830*4882a593Smuzhiyun if (nid == NUMA_NO_NODE) {
2831*4882a593Smuzhiyun /*
2832*4882a593Smuzhiyun * global hstate attribute
2833*4882a593Smuzhiyun */
2834*4882a593Smuzhiyun if (!(obey_mempolicy &&
2835*4882a593Smuzhiyun init_nodemask_of_mempolicy(&nodes_allowed)))
2836*4882a593Smuzhiyun n_mask = &node_states[N_MEMORY];
2837*4882a593Smuzhiyun else
2838*4882a593Smuzhiyun n_mask = &nodes_allowed;
2839*4882a593Smuzhiyun } else {
2840*4882a593Smuzhiyun /*
2841*4882a593Smuzhiyun * Node specific request. count adjustment happens in
2842*4882a593Smuzhiyun * set_max_huge_pages() after acquiring hugetlb_lock.
2843*4882a593Smuzhiyun */
2844*4882a593Smuzhiyun init_nodemask_of_node(&nodes_allowed, nid);
2845*4882a593Smuzhiyun n_mask = &nodes_allowed;
2846*4882a593Smuzhiyun }
2847*4882a593Smuzhiyun
2848*4882a593Smuzhiyun err = set_max_huge_pages(h, count, nid, n_mask);
2849*4882a593Smuzhiyun
2850*4882a593Smuzhiyun return err ? err : len;
2851*4882a593Smuzhiyun }
2852*4882a593Smuzhiyun
nr_hugepages_store_common(bool obey_mempolicy,struct kobject * kobj,const char * buf,size_t len)2853*4882a593Smuzhiyun static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2854*4882a593Smuzhiyun struct kobject *kobj, const char *buf,
2855*4882a593Smuzhiyun size_t len)
2856*4882a593Smuzhiyun {
2857*4882a593Smuzhiyun struct hstate *h;
2858*4882a593Smuzhiyun unsigned long count;
2859*4882a593Smuzhiyun int nid;
2860*4882a593Smuzhiyun int err;
2861*4882a593Smuzhiyun
2862*4882a593Smuzhiyun err = kstrtoul(buf, 10, &count);
2863*4882a593Smuzhiyun if (err)
2864*4882a593Smuzhiyun return err;
2865*4882a593Smuzhiyun
2866*4882a593Smuzhiyun h = kobj_to_hstate(kobj, &nid);
2867*4882a593Smuzhiyun return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2868*4882a593Smuzhiyun }
2869*4882a593Smuzhiyun
nr_hugepages_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)2870*4882a593Smuzhiyun static ssize_t nr_hugepages_show(struct kobject *kobj,
2871*4882a593Smuzhiyun struct kobj_attribute *attr, char *buf)
2872*4882a593Smuzhiyun {
2873*4882a593Smuzhiyun return nr_hugepages_show_common(kobj, attr, buf);
2874*4882a593Smuzhiyun }
2875*4882a593Smuzhiyun
nr_hugepages_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t len)2876*4882a593Smuzhiyun static ssize_t nr_hugepages_store(struct kobject *kobj,
2877*4882a593Smuzhiyun struct kobj_attribute *attr, const char *buf, size_t len)
2878*4882a593Smuzhiyun {
2879*4882a593Smuzhiyun return nr_hugepages_store_common(false, kobj, buf, len);
2880*4882a593Smuzhiyun }
2881*4882a593Smuzhiyun HSTATE_ATTR(nr_hugepages);
2882*4882a593Smuzhiyun
2883*4882a593Smuzhiyun #ifdef CONFIG_NUMA
2884*4882a593Smuzhiyun
2885*4882a593Smuzhiyun /*
2886*4882a593Smuzhiyun * hstate attribute for optionally mempolicy-based constraint on persistent
2887*4882a593Smuzhiyun * huge page alloc/free.
2888*4882a593Smuzhiyun */
nr_hugepages_mempolicy_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)2889*4882a593Smuzhiyun static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
2890*4882a593Smuzhiyun struct kobj_attribute *attr, char *buf)
2891*4882a593Smuzhiyun {
2892*4882a593Smuzhiyun return nr_hugepages_show_common(kobj, attr, buf);
2893*4882a593Smuzhiyun }
2894*4882a593Smuzhiyun
nr_hugepages_mempolicy_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t len)2895*4882a593Smuzhiyun static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
2896*4882a593Smuzhiyun struct kobj_attribute *attr, const char *buf, size_t len)
2897*4882a593Smuzhiyun {
2898*4882a593Smuzhiyun return nr_hugepages_store_common(true, kobj, buf, len);
2899*4882a593Smuzhiyun }
2900*4882a593Smuzhiyun HSTATE_ATTR(nr_hugepages_mempolicy);
2901*4882a593Smuzhiyun #endif
2902*4882a593Smuzhiyun
2903*4882a593Smuzhiyun
nr_overcommit_hugepages_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)2904*4882a593Smuzhiyun static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2905*4882a593Smuzhiyun struct kobj_attribute *attr, char *buf)
2906*4882a593Smuzhiyun {
2907*4882a593Smuzhiyun struct hstate *h = kobj_to_hstate(kobj, NULL);
2908*4882a593Smuzhiyun return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
2909*4882a593Smuzhiyun }
2910*4882a593Smuzhiyun
nr_overcommit_hugepages_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)2911*4882a593Smuzhiyun static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2912*4882a593Smuzhiyun struct kobj_attribute *attr, const char *buf, size_t count)
2913*4882a593Smuzhiyun {
2914*4882a593Smuzhiyun int err;
2915*4882a593Smuzhiyun unsigned long input;
2916*4882a593Smuzhiyun struct hstate *h = kobj_to_hstate(kobj, NULL);
2917*4882a593Smuzhiyun
2918*4882a593Smuzhiyun if (hstate_is_gigantic(h))
2919*4882a593Smuzhiyun return -EINVAL;
2920*4882a593Smuzhiyun
2921*4882a593Smuzhiyun err = kstrtoul(buf, 10, &input);
2922*4882a593Smuzhiyun if (err)
2923*4882a593Smuzhiyun return err;
2924*4882a593Smuzhiyun
2925*4882a593Smuzhiyun spin_lock(&hugetlb_lock);
2926*4882a593Smuzhiyun h->nr_overcommit_huge_pages = input;
2927*4882a593Smuzhiyun spin_unlock(&hugetlb_lock);
2928*4882a593Smuzhiyun
2929*4882a593Smuzhiyun return count;
2930*4882a593Smuzhiyun }
2931*4882a593Smuzhiyun HSTATE_ATTR(nr_overcommit_hugepages);
2932*4882a593Smuzhiyun
free_hugepages_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)2933*4882a593Smuzhiyun static ssize_t free_hugepages_show(struct kobject *kobj,
2934*4882a593Smuzhiyun struct kobj_attribute *attr, char *buf)
2935*4882a593Smuzhiyun {
2936*4882a593Smuzhiyun struct hstate *h;
2937*4882a593Smuzhiyun unsigned long free_huge_pages;
2938*4882a593Smuzhiyun int nid;
2939*4882a593Smuzhiyun
2940*4882a593Smuzhiyun h = kobj_to_hstate(kobj, &nid);
2941*4882a593Smuzhiyun if (nid == NUMA_NO_NODE)
2942*4882a593Smuzhiyun free_huge_pages = h->free_huge_pages;
2943*4882a593Smuzhiyun else
2944*4882a593Smuzhiyun free_huge_pages = h->free_huge_pages_node[nid];
2945*4882a593Smuzhiyun
2946*4882a593Smuzhiyun return sprintf(buf, "%lu\n", free_huge_pages);
2947*4882a593Smuzhiyun }
2948*4882a593Smuzhiyun HSTATE_ATTR_RO(free_hugepages);
2949*4882a593Smuzhiyun
resv_hugepages_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)2950*4882a593Smuzhiyun static ssize_t resv_hugepages_show(struct kobject *kobj,
2951*4882a593Smuzhiyun struct kobj_attribute *attr, char *buf)
2952*4882a593Smuzhiyun {
2953*4882a593Smuzhiyun struct hstate *h = kobj_to_hstate(kobj, NULL);
2954*4882a593Smuzhiyun return sprintf(buf, "%lu\n", h->resv_huge_pages);
2955*4882a593Smuzhiyun }
2956*4882a593Smuzhiyun HSTATE_ATTR_RO(resv_hugepages);
2957*4882a593Smuzhiyun
surplus_hugepages_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)2958*4882a593Smuzhiyun static ssize_t surplus_hugepages_show(struct kobject *kobj,
2959*4882a593Smuzhiyun struct kobj_attribute *attr, char *buf)
2960*4882a593Smuzhiyun {
2961*4882a593Smuzhiyun struct hstate *h;
2962*4882a593Smuzhiyun unsigned long surplus_huge_pages;
2963*4882a593Smuzhiyun int nid;
2964*4882a593Smuzhiyun
2965*4882a593Smuzhiyun h = kobj_to_hstate(kobj, &nid);
2966*4882a593Smuzhiyun if (nid == NUMA_NO_NODE)
2967*4882a593Smuzhiyun surplus_huge_pages = h->surplus_huge_pages;
2968*4882a593Smuzhiyun else
2969*4882a593Smuzhiyun surplus_huge_pages = h->surplus_huge_pages_node[nid];
2970*4882a593Smuzhiyun
2971*4882a593Smuzhiyun return sprintf(buf, "%lu\n", surplus_huge_pages);
2972*4882a593Smuzhiyun }
2973*4882a593Smuzhiyun HSTATE_ATTR_RO(surplus_hugepages);
2974*4882a593Smuzhiyun
2975*4882a593Smuzhiyun static struct attribute *hstate_attrs[] = {
2976*4882a593Smuzhiyun &nr_hugepages_attr.attr,
2977*4882a593Smuzhiyun &nr_overcommit_hugepages_attr.attr,
2978*4882a593Smuzhiyun &free_hugepages_attr.attr,
2979*4882a593Smuzhiyun &resv_hugepages_attr.attr,
2980*4882a593Smuzhiyun &surplus_hugepages_attr.attr,
2981*4882a593Smuzhiyun #ifdef CONFIG_NUMA
2982*4882a593Smuzhiyun &nr_hugepages_mempolicy_attr.attr,
2983*4882a593Smuzhiyun #endif
2984*4882a593Smuzhiyun NULL,
2985*4882a593Smuzhiyun };
2986*4882a593Smuzhiyun
2987*4882a593Smuzhiyun static const struct attribute_group hstate_attr_group = {
2988*4882a593Smuzhiyun .attrs = hstate_attrs,
2989*4882a593Smuzhiyun };
2990*4882a593Smuzhiyun
hugetlb_sysfs_add_hstate(struct hstate * h,struct kobject * parent,struct kobject ** hstate_kobjs,const struct attribute_group * hstate_attr_group)2991*4882a593Smuzhiyun static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2992*4882a593Smuzhiyun struct kobject **hstate_kobjs,
2993*4882a593Smuzhiyun const struct attribute_group *hstate_attr_group)
2994*4882a593Smuzhiyun {
2995*4882a593Smuzhiyun int retval;
2996*4882a593Smuzhiyun int hi = hstate_index(h);
2997*4882a593Smuzhiyun
2998*4882a593Smuzhiyun hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2999*4882a593Smuzhiyun if (!hstate_kobjs[hi])
3000*4882a593Smuzhiyun return -ENOMEM;
3001*4882a593Smuzhiyun
3002*4882a593Smuzhiyun retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
3003*4882a593Smuzhiyun if (retval) {
3004*4882a593Smuzhiyun kobject_put(hstate_kobjs[hi]);
3005*4882a593Smuzhiyun hstate_kobjs[hi] = NULL;
3006*4882a593Smuzhiyun }
3007*4882a593Smuzhiyun
3008*4882a593Smuzhiyun return retval;
3009*4882a593Smuzhiyun }
3010*4882a593Smuzhiyun
hugetlb_sysfs_init(void)3011*4882a593Smuzhiyun static void __init hugetlb_sysfs_init(void)
3012*4882a593Smuzhiyun {
3013*4882a593Smuzhiyun struct hstate *h;
3014*4882a593Smuzhiyun int err;
3015*4882a593Smuzhiyun
3016*4882a593Smuzhiyun hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
3017*4882a593Smuzhiyun if (!hugepages_kobj)
3018*4882a593Smuzhiyun return;
3019*4882a593Smuzhiyun
3020*4882a593Smuzhiyun for_each_hstate(h) {
3021*4882a593Smuzhiyun err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
3022*4882a593Smuzhiyun hstate_kobjs, &hstate_attr_group);
3023*4882a593Smuzhiyun if (err)
3024*4882a593Smuzhiyun pr_err("HugeTLB: Unable to add hstate %s", h->name);
3025*4882a593Smuzhiyun }
3026*4882a593Smuzhiyun }
3027*4882a593Smuzhiyun
3028*4882a593Smuzhiyun #ifdef CONFIG_NUMA
3029*4882a593Smuzhiyun
3030*4882a593Smuzhiyun /*
3031*4882a593Smuzhiyun * node_hstate/s - associate per node hstate attributes, via their kobjects,
3032*4882a593Smuzhiyun * with node devices in node_devices[] using a parallel array. The array
3033*4882a593Smuzhiyun * index of a node device or _hstate == node id.
3034*4882a593Smuzhiyun * This is here to avoid any static dependency of the node device driver, in
3035*4882a593Smuzhiyun * the base kernel, on the hugetlb module.
3036*4882a593Smuzhiyun */
3037*4882a593Smuzhiyun struct node_hstate {
3038*4882a593Smuzhiyun struct kobject *hugepages_kobj;
3039*4882a593Smuzhiyun struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
3040*4882a593Smuzhiyun };
3041*4882a593Smuzhiyun static struct node_hstate node_hstates[MAX_NUMNODES];
3042*4882a593Smuzhiyun
3043*4882a593Smuzhiyun /*
3044*4882a593Smuzhiyun * A subset of global hstate attributes for node devices
3045*4882a593Smuzhiyun */
3046*4882a593Smuzhiyun static struct attribute *per_node_hstate_attrs[] = {
3047*4882a593Smuzhiyun &nr_hugepages_attr.attr,
3048*4882a593Smuzhiyun &free_hugepages_attr.attr,
3049*4882a593Smuzhiyun &surplus_hugepages_attr.attr,
3050*4882a593Smuzhiyun NULL,
3051*4882a593Smuzhiyun };
3052*4882a593Smuzhiyun
3053*4882a593Smuzhiyun static const struct attribute_group per_node_hstate_attr_group = {
3054*4882a593Smuzhiyun .attrs = per_node_hstate_attrs,
3055*4882a593Smuzhiyun };
3056*4882a593Smuzhiyun
3057*4882a593Smuzhiyun /*
3058*4882a593Smuzhiyun * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
3059*4882a593Smuzhiyun * Returns node id via non-NULL nidp.
3060*4882a593Smuzhiyun */
kobj_to_node_hstate(struct kobject * kobj,int * nidp)3061*4882a593Smuzhiyun static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
3062*4882a593Smuzhiyun {
3063*4882a593Smuzhiyun int nid;
3064*4882a593Smuzhiyun
3065*4882a593Smuzhiyun for (nid = 0; nid < nr_node_ids; nid++) {
3066*4882a593Smuzhiyun struct node_hstate *nhs = &node_hstates[nid];
3067*4882a593Smuzhiyun int i;
3068*4882a593Smuzhiyun for (i = 0; i < HUGE_MAX_HSTATE; i++)
3069*4882a593Smuzhiyun if (nhs->hstate_kobjs[i] == kobj) {
3070*4882a593Smuzhiyun if (nidp)
3071*4882a593Smuzhiyun *nidp = nid;
3072*4882a593Smuzhiyun return &hstates[i];
3073*4882a593Smuzhiyun }
3074*4882a593Smuzhiyun }
3075*4882a593Smuzhiyun
3076*4882a593Smuzhiyun BUG();
3077*4882a593Smuzhiyun return NULL;
3078*4882a593Smuzhiyun }
3079*4882a593Smuzhiyun
3080*4882a593Smuzhiyun /*
3081*4882a593Smuzhiyun * Unregister hstate attributes from a single node device.
3082*4882a593Smuzhiyun * No-op if no hstate attributes attached.
3083*4882a593Smuzhiyun */
hugetlb_unregister_node(struct node * node)3084*4882a593Smuzhiyun static void hugetlb_unregister_node(struct node *node)
3085*4882a593Smuzhiyun {
3086*4882a593Smuzhiyun struct hstate *h;
3087*4882a593Smuzhiyun struct node_hstate *nhs = &node_hstates[node->dev.id];
3088*4882a593Smuzhiyun
3089*4882a593Smuzhiyun if (!nhs->hugepages_kobj)
3090*4882a593Smuzhiyun return; /* no hstate attributes */
3091*4882a593Smuzhiyun
3092*4882a593Smuzhiyun for_each_hstate(h) {
3093*4882a593Smuzhiyun int idx = hstate_index(h);
3094*4882a593Smuzhiyun if (nhs->hstate_kobjs[idx]) {
3095*4882a593Smuzhiyun kobject_put(nhs->hstate_kobjs[idx]);
3096*4882a593Smuzhiyun nhs->hstate_kobjs[idx] = NULL;
3097*4882a593Smuzhiyun }
3098*4882a593Smuzhiyun }
3099*4882a593Smuzhiyun
3100*4882a593Smuzhiyun kobject_put(nhs->hugepages_kobj);
3101*4882a593Smuzhiyun nhs->hugepages_kobj = NULL;
3102*4882a593Smuzhiyun }
3103*4882a593Smuzhiyun
3104*4882a593Smuzhiyun
3105*4882a593Smuzhiyun /*
3106*4882a593Smuzhiyun * Register hstate attributes for a single node device.
3107*4882a593Smuzhiyun * No-op if attributes already registered.
3108*4882a593Smuzhiyun */
hugetlb_register_node(struct node * node)3109*4882a593Smuzhiyun static void hugetlb_register_node(struct node *node)
3110*4882a593Smuzhiyun {
3111*4882a593Smuzhiyun struct hstate *h;
3112*4882a593Smuzhiyun struct node_hstate *nhs = &node_hstates[node->dev.id];
3113*4882a593Smuzhiyun int err;
3114*4882a593Smuzhiyun
3115*4882a593Smuzhiyun if (nhs->hugepages_kobj)
3116*4882a593Smuzhiyun return; /* already allocated */
3117*4882a593Smuzhiyun
3118*4882a593Smuzhiyun nhs->hugepages_kobj = kobject_create_and_add("hugepages",
3119*4882a593Smuzhiyun &node->dev.kobj);
3120*4882a593Smuzhiyun if (!nhs->hugepages_kobj)
3121*4882a593Smuzhiyun return;
3122*4882a593Smuzhiyun
3123*4882a593Smuzhiyun for_each_hstate(h) {
3124*4882a593Smuzhiyun err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
3125*4882a593Smuzhiyun nhs->hstate_kobjs,
3126*4882a593Smuzhiyun &per_node_hstate_attr_group);
3127*4882a593Smuzhiyun if (err) {
3128*4882a593Smuzhiyun pr_err("HugeTLB: Unable to add hstate %s for node %d\n",
3129*4882a593Smuzhiyun h->name, node->dev.id);
3130*4882a593Smuzhiyun hugetlb_unregister_node(node);
3131*4882a593Smuzhiyun break;
3132*4882a593Smuzhiyun }
3133*4882a593Smuzhiyun }
3134*4882a593Smuzhiyun }
3135*4882a593Smuzhiyun
3136*4882a593Smuzhiyun /*
3137*4882a593Smuzhiyun * hugetlb init time: register hstate attributes for all registered node
3138*4882a593Smuzhiyun * devices of nodes that have memory. All on-line nodes should have
3139*4882a593Smuzhiyun * registered their associated device by this time.
3140*4882a593Smuzhiyun */
hugetlb_register_all_nodes(void)3141*4882a593Smuzhiyun static void __init hugetlb_register_all_nodes(void)
3142*4882a593Smuzhiyun {
3143*4882a593Smuzhiyun int nid;
3144*4882a593Smuzhiyun
3145*4882a593Smuzhiyun for_each_node_state(nid, N_MEMORY) {
3146*4882a593Smuzhiyun struct node *node = node_devices[nid];
3147*4882a593Smuzhiyun if (node->dev.id == nid)
3148*4882a593Smuzhiyun hugetlb_register_node(node);
3149*4882a593Smuzhiyun }
3150*4882a593Smuzhiyun
3151*4882a593Smuzhiyun /*
3152*4882a593Smuzhiyun * Let the node device driver know we're here so it can
3153*4882a593Smuzhiyun * [un]register hstate attributes on node hotplug.
3154*4882a593Smuzhiyun */
3155*4882a593Smuzhiyun register_hugetlbfs_with_node(hugetlb_register_node,
3156*4882a593Smuzhiyun hugetlb_unregister_node);
3157*4882a593Smuzhiyun }
3158*4882a593Smuzhiyun #else /* !CONFIG_NUMA */
3159*4882a593Smuzhiyun
kobj_to_node_hstate(struct kobject * kobj,int * nidp)3160*4882a593Smuzhiyun static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
3161*4882a593Smuzhiyun {
3162*4882a593Smuzhiyun BUG();
3163*4882a593Smuzhiyun if (nidp)
3164*4882a593Smuzhiyun *nidp = -1;
3165*4882a593Smuzhiyun return NULL;
3166*4882a593Smuzhiyun }
3167*4882a593Smuzhiyun
hugetlb_register_all_nodes(void)3168*4882a593Smuzhiyun static void hugetlb_register_all_nodes(void) { }
3169*4882a593Smuzhiyun
3170*4882a593Smuzhiyun #endif
3171*4882a593Smuzhiyun
hugetlb_init(void)3172*4882a593Smuzhiyun static int __init hugetlb_init(void)
3173*4882a593Smuzhiyun {
3174*4882a593Smuzhiyun int i;
3175*4882a593Smuzhiyun
3176*4882a593Smuzhiyun if (!hugepages_supported()) {
3177*4882a593Smuzhiyun if (hugetlb_max_hstate || default_hstate_max_huge_pages)
3178*4882a593Smuzhiyun pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n");
3179*4882a593Smuzhiyun return 0;
3180*4882a593Smuzhiyun }
3181*4882a593Smuzhiyun
3182*4882a593Smuzhiyun /*
3183*4882a593Smuzhiyun * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists. Some
3184*4882a593Smuzhiyun * architectures depend on setup being done here.
3185*4882a593Smuzhiyun */
3186*4882a593Smuzhiyun hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
3187*4882a593Smuzhiyun if (!parsed_default_hugepagesz) {
3188*4882a593Smuzhiyun /*
3189*4882a593Smuzhiyun * If we did not parse a default huge page size, set
3190*4882a593Smuzhiyun * default_hstate_idx to HPAGE_SIZE hstate. And, if the
3191*4882a593Smuzhiyun * number of huge pages for this default size was implicitly
3192*4882a593Smuzhiyun * specified, set that here as well.
3193*4882a593Smuzhiyun * Note that the implicit setting will overwrite an explicit
3194*4882a593Smuzhiyun * setting. A warning will be printed in this case.
3195*4882a593Smuzhiyun */
3196*4882a593Smuzhiyun default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE));
3197*4882a593Smuzhiyun if (default_hstate_max_huge_pages) {
3198*4882a593Smuzhiyun if (default_hstate.max_huge_pages) {
3199*4882a593Smuzhiyun char buf[32];
3200*4882a593Smuzhiyun
3201*4882a593Smuzhiyun string_get_size(huge_page_size(&default_hstate),
3202*4882a593Smuzhiyun 1, STRING_UNITS_2, buf, 32);
3203*4882a593Smuzhiyun pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n",
3204*4882a593Smuzhiyun default_hstate.max_huge_pages, buf);
3205*4882a593Smuzhiyun pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n",
3206*4882a593Smuzhiyun default_hstate_max_huge_pages);
3207*4882a593Smuzhiyun }
3208*4882a593Smuzhiyun default_hstate.max_huge_pages =
3209*4882a593Smuzhiyun default_hstate_max_huge_pages;
3210*4882a593Smuzhiyun }
3211*4882a593Smuzhiyun }
3212*4882a593Smuzhiyun
3213*4882a593Smuzhiyun hugetlb_cma_check();
3214*4882a593Smuzhiyun hugetlb_init_hstates();
3215*4882a593Smuzhiyun gather_bootmem_prealloc();
3216*4882a593Smuzhiyun report_hugepages();
3217*4882a593Smuzhiyun
3218*4882a593Smuzhiyun hugetlb_sysfs_init();
3219*4882a593Smuzhiyun hugetlb_register_all_nodes();
3220*4882a593Smuzhiyun hugetlb_cgroup_file_init();
3221*4882a593Smuzhiyun
3222*4882a593Smuzhiyun #ifdef CONFIG_SMP
3223*4882a593Smuzhiyun num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
3224*4882a593Smuzhiyun #else
3225*4882a593Smuzhiyun num_fault_mutexes = 1;
3226*4882a593Smuzhiyun #endif
3227*4882a593Smuzhiyun hugetlb_fault_mutex_table =
3228*4882a593Smuzhiyun kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
3229*4882a593Smuzhiyun GFP_KERNEL);
3230*4882a593Smuzhiyun BUG_ON(!hugetlb_fault_mutex_table);
3231*4882a593Smuzhiyun
3232*4882a593Smuzhiyun for (i = 0; i < num_fault_mutexes; i++)
3233*4882a593Smuzhiyun mutex_init(&hugetlb_fault_mutex_table[i]);
3234*4882a593Smuzhiyun return 0;
3235*4882a593Smuzhiyun }
3236*4882a593Smuzhiyun subsys_initcall(hugetlb_init);
3237*4882a593Smuzhiyun
3238*4882a593Smuzhiyun /* Overwritten by architectures with more huge page sizes */
__init(weak)3239*4882a593Smuzhiyun bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size)
3240*4882a593Smuzhiyun {
3241*4882a593Smuzhiyun return size == HPAGE_SIZE;
3242*4882a593Smuzhiyun }
3243*4882a593Smuzhiyun
hugetlb_add_hstate(unsigned int order)3244*4882a593Smuzhiyun void __init hugetlb_add_hstate(unsigned int order)
3245*4882a593Smuzhiyun {
3246*4882a593Smuzhiyun struct hstate *h;
3247*4882a593Smuzhiyun unsigned long i;
3248*4882a593Smuzhiyun
3249*4882a593Smuzhiyun if (size_to_hstate(PAGE_SIZE << order)) {
3250*4882a593Smuzhiyun return;
3251*4882a593Smuzhiyun }
3252*4882a593Smuzhiyun BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
3253*4882a593Smuzhiyun BUG_ON(order == 0);
3254*4882a593Smuzhiyun h = &hstates[hugetlb_max_hstate++];
3255*4882a593Smuzhiyun h->order = order;
3256*4882a593Smuzhiyun h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
3257*4882a593Smuzhiyun h->nr_huge_pages = 0;
3258*4882a593Smuzhiyun h->free_huge_pages = 0;
3259*4882a593Smuzhiyun for (i = 0; i < MAX_NUMNODES; ++i)
3260*4882a593Smuzhiyun INIT_LIST_HEAD(&h->hugepage_freelists[i]);
3261*4882a593Smuzhiyun INIT_LIST_HEAD(&h->hugepage_activelist);
3262*4882a593Smuzhiyun h->next_nid_to_alloc = first_memory_node;
3263*4882a593Smuzhiyun h->next_nid_to_free = first_memory_node;
3264*4882a593Smuzhiyun snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
3265*4882a593Smuzhiyun huge_page_size(h)/1024);
3266*4882a593Smuzhiyun
3267*4882a593Smuzhiyun parsed_hstate = h;
3268*4882a593Smuzhiyun }
3269*4882a593Smuzhiyun
3270*4882a593Smuzhiyun /*
3271*4882a593Smuzhiyun * hugepages command line processing
3272*4882a593Smuzhiyun * hugepages normally follows a valid hugepagsz or default_hugepagsz
3273*4882a593Smuzhiyun * specification. If not, ignore the hugepages value. hugepages can also
3274*4882a593Smuzhiyun * be the first huge page command line option in which case it implicitly
3275*4882a593Smuzhiyun * specifies the number of huge pages for the default size.
3276*4882a593Smuzhiyun */
hugepages_setup(char * s)3277*4882a593Smuzhiyun static int __init hugepages_setup(char *s)
3278*4882a593Smuzhiyun {
3279*4882a593Smuzhiyun unsigned long *mhp;
3280*4882a593Smuzhiyun static unsigned long *last_mhp;
3281*4882a593Smuzhiyun
3282*4882a593Smuzhiyun if (!parsed_valid_hugepagesz) {
3283*4882a593Smuzhiyun pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s);
3284*4882a593Smuzhiyun parsed_valid_hugepagesz = true;
3285*4882a593Smuzhiyun return 0;
3286*4882a593Smuzhiyun }
3287*4882a593Smuzhiyun
3288*4882a593Smuzhiyun /*
3289*4882a593Smuzhiyun * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter
3290*4882a593Smuzhiyun * yet, so this hugepages= parameter goes to the "default hstate".
3291*4882a593Smuzhiyun * Otherwise, it goes with the previously parsed hugepagesz or
3292*4882a593Smuzhiyun * default_hugepagesz.
3293*4882a593Smuzhiyun */
3294*4882a593Smuzhiyun else if (!hugetlb_max_hstate)
3295*4882a593Smuzhiyun mhp = &default_hstate_max_huge_pages;
3296*4882a593Smuzhiyun else
3297*4882a593Smuzhiyun mhp = &parsed_hstate->max_huge_pages;
3298*4882a593Smuzhiyun
3299*4882a593Smuzhiyun if (mhp == last_mhp) {
3300*4882a593Smuzhiyun pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s);
3301*4882a593Smuzhiyun return 0;
3302*4882a593Smuzhiyun }
3303*4882a593Smuzhiyun
3304*4882a593Smuzhiyun if (sscanf(s, "%lu", mhp) <= 0)
3305*4882a593Smuzhiyun *mhp = 0;
3306*4882a593Smuzhiyun
3307*4882a593Smuzhiyun /*
3308*4882a593Smuzhiyun * Global state is always initialized later in hugetlb_init.
3309*4882a593Smuzhiyun * But we need to allocate >= MAX_ORDER hstates here early to still
3310*4882a593Smuzhiyun * use the bootmem allocator.
3311*4882a593Smuzhiyun */
3312*4882a593Smuzhiyun if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
3313*4882a593Smuzhiyun hugetlb_hstate_alloc_pages(parsed_hstate);
3314*4882a593Smuzhiyun
3315*4882a593Smuzhiyun last_mhp = mhp;
3316*4882a593Smuzhiyun
3317*4882a593Smuzhiyun return 1;
3318*4882a593Smuzhiyun }
3319*4882a593Smuzhiyun __setup("hugepages=", hugepages_setup);
3320*4882a593Smuzhiyun
3321*4882a593Smuzhiyun /*
3322*4882a593Smuzhiyun * hugepagesz command line processing
3323*4882a593Smuzhiyun * A specific huge page size can only be specified once with hugepagesz.
3324*4882a593Smuzhiyun * hugepagesz is followed by hugepages on the command line. The global
3325*4882a593Smuzhiyun * variable 'parsed_valid_hugepagesz' is used to determine if prior
3326*4882a593Smuzhiyun * hugepagesz argument was valid.
3327*4882a593Smuzhiyun */
hugepagesz_setup(char * s)3328*4882a593Smuzhiyun static int __init hugepagesz_setup(char *s)
3329*4882a593Smuzhiyun {
3330*4882a593Smuzhiyun unsigned long size;
3331*4882a593Smuzhiyun struct hstate *h;
3332*4882a593Smuzhiyun
3333*4882a593Smuzhiyun parsed_valid_hugepagesz = false;
3334*4882a593Smuzhiyun size = (unsigned long)memparse(s, NULL);
3335*4882a593Smuzhiyun
3336*4882a593Smuzhiyun if (!arch_hugetlb_valid_size(size)) {
3337*4882a593Smuzhiyun pr_err("HugeTLB: unsupported hugepagesz=%s\n", s);
3338*4882a593Smuzhiyun return 0;
3339*4882a593Smuzhiyun }
3340*4882a593Smuzhiyun
3341*4882a593Smuzhiyun h = size_to_hstate(size);
3342*4882a593Smuzhiyun if (h) {
3343*4882a593Smuzhiyun /*
3344*4882a593Smuzhiyun * hstate for this size already exists. This is normally
3345*4882a593Smuzhiyun * an error, but is allowed if the existing hstate is the
3346*4882a593Smuzhiyun * default hstate. More specifically, it is only allowed if
3347*4882a593Smuzhiyun * the number of huge pages for the default hstate was not
3348*4882a593Smuzhiyun * previously specified.
3349*4882a593Smuzhiyun */
3350*4882a593Smuzhiyun if (!parsed_default_hugepagesz || h != &default_hstate ||
3351*4882a593Smuzhiyun default_hstate.max_huge_pages) {
3352*4882a593Smuzhiyun pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s);
3353*4882a593Smuzhiyun return 0;
3354*4882a593Smuzhiyun }
3355*4882a593Smuzhiyun
3356*4882a593Smuzhiyun /*
3357*4882a593Smuzhiyun * No need to call hugetlb_add_hstate() as hstate already
3358*4882a593Smuzhiyun * exists. But, do set parsed_hstate so that a following
3359*4882a593Smuzhiyun * hugepages= parameter will be applied to this hstate.
3360*4882a593Smuzhiyun */
3361*4882a593Smuzhiyun parsed_hstate = h;
3362*4882a593Smuzhiyun parsed_valid_hugepagesz = true;
3363*4882a593Smuzhiyun return 1;
3364*4882a593Smuzhiyun }
3365*4882a593Smuzhiyun
3366*4882a593Smuzhiyun hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
3367*4882a593Smuzhiyun parsed_valid_hugepagesz = true;
3368*4882a593Smuzhiyun return 1;
3369*4882a593Smuzhiyun }
3370*4882a593Smuzhiyun __setup("hugepagesz=", hugepagesz_setup);
3371*4882a593Smuzhiyun
3372*4882a593Smuzhiyun /*
3373*4882a593Smuzhiyun * default_hugepagesz command line input
3374*4882a593Smuzhiyun * Only one instance of default_hugepagesz allowed on command line.
3375*4882a593Smuzhiyun */
default_hugepagesz_setup(char * s)3376*4882a593Smuzhiyun static int __init default_hugepagesz_setup(char *s)
3377*4882a593Smuzhiyun {
3378*4882a593Smuzhiyun unsigned long size;
3379*4882a593Smuzhiyun
3380*4882a593Smuzhiyun parsed_valid_hugepagesz = false;
3381*4882a593Smuzhiyun if (parsed_default_hugepagesz) {
3382*4882a593Smuzhiyun pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s);
3383*4882a593Smuzhiyun return 0;
3384*4882a593Smuzhiyun }
3385*4882a593Smuzhiyun
3386*4882a593Smuzhiyun size = (unsigned long)memparse(s, NULL);
3387*4882a593Smuzhiyun
3388*4882a593Smuzhiyun if (!arch_hugetlb_valid_size(size)) {
3389*4882a593Smuzhiyun pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s);
3390*4882a593Smuzhiyun return 0;
3391*4882a593Smuzhiyun }
3392*4882a593Smuzhiyun
3393*4882a593Smuzhiyun hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
3394*4882a593Smuzhiyun parsed_valid_hugepagesz = true;
3395*4882a593Smuzhiyun parsed_default_hugepagesz = true;
3396*4882a593Smuzhiyun default_hstate_idx = hstate_index(size_to_hstate(size));
3397*4882a593Smuzhiyun
3398*4882a593Smuzhiyun /*
3399*4882a593Smuzhiyun * The number of default huge pages (for this size) could have been
3400*4882a593Smuzhiyun * specified as the first hugetlb parameter: hugepages=X. If so,
3401*4882a593Smuzhiyun * then default_hstate_max_huge_pages is set. If the default huge
3402*4882a593Smuzhiyun * page size is gigantic (>= MAX_ORDER), then the pages must be
3403*4882a593Smuzhiyun * allocated here from bootmem allocator.
3404*4882a593Smuzhiyun */
3405*4882a593Smuzhiyun if (default_hstate_max_huge_pages) {
3406*4882a593Smuzhiyun default_hstate.max_huge_pages = default_hstate_max_huge_pages;
3407*4882a593Smuzhiyun if (hstate_is_gigantic(&default_hstate))
3408*4882a593Smuzhiyun hugetlb_hstate_alloc_pages(&default_hstate);
3409*4882a593Smuzhiyun default_hstate_max_huge_pages = 0;
3410*4882a593Smuzhiyun }
3411*4882a593Smuzhiyun
3412*4882a593Smuzhiyun return 1;
3413*4882a593Smuzhiyun }
3414*4882a593Smuzhiyun __setup("default_hugepagesz=", default_hugepagesz_setup);
3415*4882a593Smuzhiyun
allowed_mems_nr(struct hstate * h)3416*4882a593Smuzhiyun static unsigned int allowed_mems_nr(struct hstate *h)
3417*4882a593Smuzhiyun {
3418*4882a593Smuzhiyun int node;
3419*4882a593Smuzhiyun unsigned int nr = 0;
3420*4882a593Smuzhiyun nodemask_t *mpol_allowed;
3421*4882a593Smuzhiyun unsigned int *array = h->free_huge_pages_node;
3422*4882a593Smuzhiyun gfp_t gfp_mask = htlb_alloc_mask(h);
3423*4882a593Smuzhiyun
3424*4882a593Smuzhiyun mpol_allowed = policy_nodemask_current(gfp_mask);
3425*4882a593Smuzhiyun
3426*4882a593Smuzhiyun for_each_node_mask(node, cpuset_current_mems_allowed) {
3427*4882a593Smuzhiyun if (!mpol_allowed ||
3428*4882a593Smuzhiyun (mpol_allowed && node_isset(node, *mpol_allowed)))
3429*4882a593Smuzhiyun nr += array[node];
3430*4882a593Smuzhiyun }
3431*4882a593Smuzhiyun
3432*4882a593Smuzhiyun return nr;
3433*4882a593Smuzhiyun }
3434*4882a593Smuzhiyun
3435*4882a593Smuzhiyun #ifdef CONFIG_SYSCTL
proc_hugetlb_doulongvec_minmax(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos,unsigned long * out)3436*4882a593Smuzhiyun static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write,
3437*4882a593Smuzhiyun void *buffer, size_t *length,
3438*4882a593Smuzhiyun loff_t *ppos, unsigned long *out)
3439*4882a593Smuzhiyun {
3440*4882a593Smuzhiyun struct ctl_table dup_table;
3441*4882a593Smuzhiyun
3442*4882a593Smuzhiyun /*
3443*4882a593Smuzhiyun * In order to avoid races with __do_proc_doulongvec_minmax(), we
3444*4882a593Smuzhiyun * can duplicate the @table and alter the duplicate of it.
3445*4882a593Smuzhiyun */
3446*4882a593Smuzhiyun dup_table = *table;
3447*4882a593Smuzhiyun dup_table.data = out;
3448*4882a593Smuzhiyun
3449*4882a593Smuzhiyun return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos);
3450*4882a593Smuzhiyun }
3451*4882a593Smuzhiyun
hugetlb_sysctl_handler_common(bool obey_mempolicy,struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)3452*4882a593Smuzhiyun static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
3453*4882a593Smuzhiyun struct ctl_table *table, int write,
3454*4882a593Smuzhiyun void *buffer, size_t *length, loff_t *ppos)
3455*4882a593Smuzhiyun {
3456*4882a593Smuzhiyun struct hstate *h = &default_hstate;
3457*4882a593Smuzhiyun unsigned long tmp = h->max_huge_pages;
3458*4882a593Smuzhiyun int ret;
3459*4882a593Smuzhiyun
3460*4882a593Smuzhiyun if (!hugepages_supported())
3461*4882a593Smuzhiyun return -EOPNOTSUPP;
3462*4882a593Smuzhiyun
3463*4882a593Smuzhiyun ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
3464*4882a593Smuzhiyun &tmp);
3465*4882a593Smuzhiyun if (ret)
3466*4882a593Smuzhiyun goto out;
3467*4882a593Smuzhiyun
3468*4882a593Smuzhiyun if (write)
3469*4882a593Smuzhiyun ret = __nr_hugepages_store_common(obey_mempolicy, h,
3470*4882a593Smuzhiyun NUMA_NO_NODE, tmp, *length);
3471*4882a593Smuzhiyun out:
3472*4882a593Smuzhiyun return ret;
3473*4882a593Smuzhiyun }
3474*4882a593Smuzhiyun
hugetlb_sysctl_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)3475*4882a593Smuzhiyun int hugetlb_sysctl_handler(struct ctl_table *table, int write,
3476*4882a593Smuzhiyun void *buffer, size_t *length, loff_t *ppos)
3477*4882a593Smuzhiyun {
3478*4882a593Smuzhiyun
3479*4882a593Smuzhiyun return hugetlb_sysctl_handler_common(false, table, write,
3480*4882a593Smuzhiyun buffer, length, ppos);
3481*4882a593Smuzhiyun }
3482*4882a593Smuzhiyun
3483*4882a593Smuzhiyun #ifdef CONFIG_NUMA
hugetlb_mempolicy_sysctl_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)3484*4882a593Smuzhiyun int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
3485*4882a593Smuzhiyun void *buffer, size_t *length, loff_t *ppos)
3486*4882a593Smuzhiyun {
3487*4882a593Smuzhiyun return hugetlb_sysctl_handler_common(true, table, write,
3488*4882a593Smuzhiyun buffer, length, ppos);
3489*4882a593Smuzhiyun }
3490*4882a593Smuzhiyun #endif /* CONFIG_NUMA */
3491*4882a593Smuzhiyun
hugetlb_overcommit_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)3492*4882a593Smuzhiyun int hugetlb_overcommit_handler(struct ctl_table *table, int write,
3493*4882a593Smuzhiyun void *buffer, size_t *length, loff_t *ppos)
3494*4882a593Smuzhiyun {
3495*4882a593Smuzhiyun struct hstate *h = &default_hstate;
3496*4882a593Smuzhiyun unsigned long tmp;
3497*4882a593Smuzhiyun int ret;
3498*4882a593Smuzhiyun
3499*4882a593Smuzhiyun if (!hugepages_supported())
3500*4882a593Smuzhiyun return -EOPNOTSUPP;
3501*4882a593Smuzhiyun
3502*4882a593Smuzhiyun tmp = h->nr_overcommit_huge_pages;
3503*4882a593Smuzhiyun
3504*4882a593Smuzhiyun if (write && hstate_is_gigantic(h))
3505*4882a593Smuzhiyun return -EINVAL;
3506*4882a593Smuzhiyun
3507*4882a593Smuzhiyun ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
3508*4882a593Smuzhiyun &tmp);
3509*4882a593Smuzhiyun if (ret)
3510*4882a593Smuzhiyun goto out;
3511*4882a593Smuzhiyun
3512*4882a593Smuzhiyun if (write) {
3513*4882a593Smuzhiyun spin_lock(&hugetlb_lock);
3514*4882a593Smuzhiyun h->nr_overcommit_huge_pages = tmp;
3515*4882a593Smuzhiyun spin_unlock(&hugetlb_lock);
3516*4882a593Smuzhiyun }
3517*4882a593Smuzhiyun out:
3518*4882a593Smuzhiyun return ret;
3519*4882a593Smuzhiyun }
3520*4882a593Smuzhiyun
3521*4882a593Smuzhiyun #endif /* CONFIG_SYSCTL */
3522*4882a593Smuzhiyun
hugetlb_report_meminfo(struct seq_file * m)3523*4882a593Smuzhiyun void hugetlb_report_meminfo(struct seq_file *m)
3524*4882a593Smuzhiyun {
3525*4882a593Smuzhiyun struct hstate *h;
3526*4882a593Smuzhiyun unsigned long total = 0;
3527*4882a593Smuzhiyun
3528*4882a593Smuzhiyun if (!hugepages_supported())
3529*4882a593Smuzhiyun return;
3530*4882a593Smuzhiyun
3531*4882a593Smuzhiyun for_each_hstate(h) {
3532*4882a593Smuzhiyun unsigned long count = h->nr_huge_pages;
3533*4882a593Smuzhiyun
3534*4882a593Smuzhiyun total += (PAGE_SIZE << huge_page_order(h)) * count;
3535*4882a593Smuzhiyun
3536*4882a593Smuzhiyun if (h == &default_hstate)
3537*4882a593Smuzhiyun seq_printf(m,
3538*4882a593Smuzhiyun "HugePages_Total: %5lu\n"
3539*4882a593Smuzhiyun "HugePages_Free: %5lu\n"
3540*4882a593Smuzhiyun "HugePages_Rsvd: %5lu\n"
3541*4882a593Smuzhiyun "HugePages_Surp: %5lu\n"
3542*4882a593Smuzhiyun "Hugepagesize: %8lu kB\n",
3543*4882a593Smuzhiyun count,
3544*4882a593Smuzhiyun h->free_huge_pages,
3545*4882a593Smuzhiyun h->resv_huge_pages,
3546*4882a593Smuzhiyun h->surplus_huge_pages,
3547*4882a593Smuzhiyun (PAGE_SIZE << huge_page_order(h)) / 1024);
3548*4882a593Smuzhiyun }
3549*4882a593Smuzhiyun
3550*4882a593Smuzhiyun seq_printf(m, "Hugetlb: %8lu kB\n", total / 1024);
3551*4882a593Smuzhiyun }
3552*4882a593Smuzhiyun
hugetlb_report_node_meminfo(char * buf,int len,int nid)3553*4882a593Smuzhiyun int hugetlb_report_node_meminfo(char *buf, int len, int nid)
3554*4882a593Smuzhiyun {
3555*4882a593Smuzhiyun struct hstate *h = &default_hstate;
3556*4882a593Smuzhiyun
3557*4882a593Smuzhiyun if (!hugepages_supported())
3558*4882a593Smuzhiyun return 0;
3559*4882a593Smuzhiyun
3560*4882a593Smuzhiyun return sysfs_emit_at(buf, len,
3561*4882a593Smuzhiyun "Node %d HugePages_Total: %5u\n"
3562*4882a593Smuzhiyun "Node %d HugePages_Free: %5u\n"
3563*4882a593Smuzhiyun "Node %d HugePages_Surp: %5u\n",
3564*4882a593Smuzhiyun nid, h->nr_huge_pages_node[nid],
3565*4882a593Smuzhiyun nid, h->free_huge_pages_node[nid],
3566*4882a593Smuzhiyun nid, h->surplus_huge_pages_node[nid]);
3567*4882a593Smuzhiyun }
3568*4882a593Smuzhiyun
hugetlb_show_meminfo(void)3569*4882a593Smuzhiyun void hugetlb_show_meminfo(void)
3570*4882a593Smuzhiyun {
3571*4882a593Smuzhiyun struct hstate *h;
3572*4882a593Smuzhiyun int nid;
3573*4882a593Smuzhiyun
3574*4882a593Smuzhiyun if (!hugepages_supported())
3575*4882a593Smuzhiyun return;
3576*4882a593Smuzhiyun
3577*4882a593Smuzhiyun for_each_node_state(nid, N_MEMORY)
3578*4882a593Smuzhiyun for_each_hstate(h)
3579*4882a593Smuzhiyun pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
3580*4882a593Smuzhiyun nid,
3581*4882a593Smuzhiyun h->nr_huge_pages_node[nid],
3582*4882a593Smuzhiyun h->free_huge_pages_node[nid],
3583*4882a593Smuzhiyun h->surplus_huge_pages_node[nid],
3584*4882a593Smuzhiyun 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
3585*4882a593Smuzhiyun }
3586*4882a593Smuzhiyun
hugetlb_report_usage(struct seq_file * m,struct mm_struct * mm)3587*4882a593Smuzhiyun void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
3588*4882a593Smuzhiyun {
3589*4882a593Smuzhiyun seq_printf(m, "HugetlbPages:\t%8lu kB\n",
3590*4882a593Smuzhiyun atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
3591*4882a593Smuzhiyun }
3592*4882a593Smuzhiyun
3593*4882a593Smuzhiyun /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
hugetlb_total_pages(void)3594*4882a593Smuzhiyun unsigned long hugetlb_total_pages(void)
3595*4882a593Smuzhiyun {
3596*4882a593Smuzhiyun struct hstate *h;
3597*4882a593Smuzhiyun unsigned long nr_total_pages = 0;
3598*4882a593Smuzhiyun
3599*4882a593Smuzhiyun for_each_hstate(h)
3600*4882a593Smuzhiyun nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
3601*4882a593Smuzhiyun return nr_total_pages;
3602*4882a593Smuzhiyun }
3603*4882a593Smuzhiyun
hugetlb_acct_memory(struct hstate * h,long delta)3604*4882a593Smuzhiyun static int hugetlb_acct_memory(struct hstate *h, long delta)
3605*4882a593Smuzhiyun {
3606*4882a593Smuzhiyun int ret = -ENOMEM;
3607*4882a593Smuzhiyun
3608*4882a593Smuzhiyun spin_lock(&hugetlb_lock);
3609*4882a593Smuzhiyun /*
3610*4882a593Smuzhiyun * When cpuset is configured, it breaks the strict hugetlb page
3611*4882a593Smuzhiyun * reservation as the accounting is done on a global variable. Such
3612*4882a593Smuzhiyun * reservation is completely rubbish in the presence of cpuset because
3613*4882a593Smuzhiyun * the reservation is not checked against page availability for the
3614*4882a593Smuzhiyun * current cpuset. Application can still potentially OOM'ed by kernel
3615*4882a593Smuzhiyun * with lack of free htlb page in cpuset that the task is in.
3616*4882a593Smuzhiyun * Attempt to enforce strict accounting with cpuset is almost
3617*4882a593Smuzhiyun * impossible (or too ugly) because cpuset is too fluid that
3618*4882a593Smuzhiyun * task or memory node can be dynamically moved between cpusets.
3619*4882a593Smuzhiyun *
3620*4882a593Smuzhiyun * The change of semantics for shared hugetlb mapping with cpuset is
3621*4882a593Smuzhiyun * undesirable. However, in order to preserve some of the semantics,
3622*4882a593Smuzhiyun * we fall back to check against current free page availability as
3623*4882a593Smuzhiyun * a best attempt and hopefully to minimize the impact of changing
3624*4882a593Smuzhiyun * semantics that cpuset has.
3625*4882a593Smuzhiyun *
3626*4882a593Smuzhiyun * Apart from cpuset, we also have memory policy mechanism that
3627*4882a593Smuzhiyun * also determines from which node the kernel will allocate memory
3628*4882a593Smuzhiyun * in a NUMA system. So similar to cpuset, we also should consider
3629*4882a593Smuzhiyun * the memory policy of the current task. Similar to the description
3630*4882a593Smuzhiyun * above.
3631*4882a593Smuzhiyun */
3632*4882a593Smuzhiyun if (delta > 0) {
3633*4882a593Smuzhiyun if (gather_surplus_pages(h, delta) < 0)
3634*4882a593Smuzhiyun goto out;
3635*4882a593Smuzhiyun
3636*4882a593Smuzhiyun if (delta > allowed_mems_nr(h)) {
3637*4882a593Smuzhiyun return_unused_surplus_pages(h, delta);
3638*4882a593Smuzhiyun goto out;
3639*4882a593Smuzhiyun }
3640*4882a593Smuzhiyun }
3641*4882a593Smuzhiyun
3642*4882a593Smuzhiyun ret = 0;
3643*4882a593Smuzhiyun if (delta < 0)
3644*4882a593Smuzhiyun return_unused_surplus_pages(h, (unsigned long) -delta);
3645*4882a593Smuzhiyun
3646*4882a593Smuzhiyun out:
3647*4882a593Smuzhiyun spin_unlock(&hugetlb_lock);
3648*4882a593Smuzhiyun return ret;
3649*4882a593Smuzhiyun }
3650*4882a593Smuzhiyun
hugetlb_vm_op_open(struct vm_area_struct * vma)3651*4882a593Smuzhiyun static void hugetlb_vm_op_open(struct vm_area_struct *vma)
3652*4882a593Smuzhiyun {
3653*4882a593Smuzhiyun struct resv_map *resv = vma_resv_map(vma);
3654*4882a593Smuzhiyun
3655*4882a593Smuzhiyun /*
3656*4882a593Smuzhiyun * This new VMA should share its siblings reservation map if present.
3657*4882a593Smuzhiyun * The VMA will only ever have a valid reservation map pointer where
3658*4882a593Smuzhiyun * it is being copied for another still existing VMA. As that VMA
3659*4882a593Smuzhiyun * has a reference to the reservation map it cannot disappear until
3660*4882a593Smuzhiyun * after this open call completes. It is therefore safe to take a
3661*4882a593Smuzhiyun * new reference here without additional locking.
3662*4882a593Smuzhiyun */
3663*4882a593Smuzhiyun if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
3664*4882a593Smuzhiyun resv_map_dup_hugetlb_cgroup_uncharge_info(resv);
3665*4882a593Smuzhiyun kref_get(&resv->refs);
3666*4882a593Smuzhiyun }
3667*4882a593Smuzhiyun }
3668*4882a593Smuzhiyun
hugetlb_vm_op_close(struct vm_area_struct * vma)3669*4882a593Smuzhiyun static void hugetlb_vm_op_close(struct vm_area_struct *vma)
3670*4882a593Smuzhiyun {
3671*4882a593Smuzhiyun struct hstate *h = hstate_vma(vma);
3672*4882a593Smuzhiyun struct resv_map *resv = vma_resv_map(vma);
3673*4882a593Smuzhiyun struct hugepage_subpool *spool = subpool_vma(vma);
3674*4882a593Smuzhiyun unsigned long reserve, start, end;
3675*4882a593Smuzhiyun long gbl_reserve;
3676*4882a593Smuzhiyun
3677*4882a593Smuzhiyun if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3678*4882a593Smuzhiyun return;
3679*4882a593Smuzhiyun
3680*4882a593Smuzhiyun start = vma_hugecache_offset(h, vma, vma->vm_start);
3681*4882a593Smuzhiyun end = vma_hugecache_offset(h, vma, vma->vm_end);
3682*4882a593Smuzhiyun
3683*4882a593Smuzhiyun reserve = (end - start) - region_count(resv, start, end);
3684*4882a593Smuzhiyun hugetlb_cgroup_uncharge_counter(resv, start, end);
3685*4882a593Smuzhiyun if (reserve) {
3686*4882a593Smuzhiyun /*
3687*4882a593Smuzhiyun * Decrement reserve counts. The global reserve count may be
3688*4882a593Smuzhiyun * adjusted if the subpool has a minimum size.
3689*4882a593Smuzhiyun */
3690*4882a593Smuzhiyun gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
3691*4882a593Smuzhiyun hugetlb_acct_memory(h, -gbl_reserve);
3692*4882a593Smuzhiyun }
3693*4882a593Smuzhiyun
3694*4882a593Smuzhiyun kref_put(&resv->refs, resv_map_release);
3695*4882a593Smuzhiyun }
3696*4882a593Smuzhiyun
hugetlb_vm_op_split(struct vm_area_struct * vma,unsigned long addr)3697*4882a593Smuzhiyun static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
3698*4882a593Smuzhiyun {
3699*4882a593Smuzhiyun if (addr & ~(huge_page_mask(hstate_vma(vma))))
3700*4882a593Smuzhiyun return -EINVAL;
3701*4882a593Smuzhiyun return 0;
3702*4882a593Smuzhiyun }
3703*4882a593Smuzhiyun
hugetlb_vm_op_pagesize(struct vm_area_struct * vma)3704*4882a593Smuzhiyun static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
3705*4882a593Smuzhiyun {
3706*4882a593Smuzhiyun struct hstate *hstate = hstate_vma(vma);
3707*4882a593Smuzhiyun
3708*4882a593Smuzhiyun return 1UL << huge_page_shift(hstate);
3709*4882a593Smuzhiyun }
3710*4882a593Smuzhiyun
3711*4882a593Smuzhiyun /*
3712*4882a593Smuzhiyun * We cannot handle pagefaults against hugetlb pages at all. They cause
3713*4882a593Smuzhiyun * handle_mm_fault() to try to instantiate regular-sized pages in the
3714*4882a593Smuzhiyun * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
3715*4882a593Smuzhiyun * this far.
3716*4882a593Smuzhiyun */
hugetlb_vm_op_fault(struct vm_fault * vmf)3717*4882a593Smuzhiyun static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
3718*4882a593Smuzhiyun {
3719*4882a593Smuzhiyun BUG();
3720*4882a593Smuzhiyun return 0;
3721*4882a593Smuzhiyun }
3722*4882a593Smuzhiyun
3723*4882a593Smuzhiyun /*
3724*4882a593Smuzhiyun * When a new function is introduced to vm_operations_struct and added
3725*4882a593Smuzhiyun * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
3726*4882a593Smuzhiyun * This is because under System V memory model, mappings created via
3727*4882a593Smuzhiyun * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
3728*4882a593Smuzhiyun * their original vm_ops are overwritten with shm_vm_ops.
3729*4882a593Smuzhiyun */
3730*4882a593Smuzhiyun const struct vm_operations_struct hugetlb_vm_ops = {
3731*4882a593Smuzhiyun .fault = hugetlb_vm_op_fault,
3732*4882a593Smuzhiyun .open = hugetlb_vm_op_open,
3733*4882a593Smuzhiyun .close = hugetlb_vm_op_close,
3734*4882a593Smuzhiyun .split = hugetlb_vm_op_split,
3735*4882a593Smuzhiyun .pagesize = hugetlb_vm_op_pagesize,
3736*4882a593Smuzhiyun };
3737*4882a593Smuzhiyun
make_huge_pte(struct vm_area_struct * vma,struct page * page,int writable)3738*4882a593Smuzhiyun static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
3739*4882a593Smuzhiyun int writable)
3740*4882a593Smuzhiyun {
3741*4882a593Smuzhiyun pte_t entry;
3742*4882a593Smuzhiyun
3743*4882a593Smuzhiyun if (writable) {
3744*4882a593Smuzhiyun entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
3745*4882a593Smuzhiyun vma->vm_page_prot)));
3746*4882a593Smuzhiyun } else {
3747*4882a593Smuzhiyun entry = huge_pte_wrprotect(mk_huge_pte(page,
3748*4882a593Smuzhiyun vma->vm_page_prot));
3749*4882a593Smuzhiyun }
3750*4882a593Smuzhiyun entry = pte_mkyoung(entry);
3751*4882a593Smuzhiyun entry = pte_mkhuge(entry);
3752*4882a593Smuzhiyun entry = arch_make_huge_pte(entry, vma, page, writable);
3753*4882a593Smuzhiyun
3754*4882a593Smuzhiyun return entry;
3755*4882a593Smuzhiyun }
3756*4882a593Smuzhiyun
set_huge_ptep_writable(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)3757*4882a593Smuzhiyun static void set_huge_ptep_writable(struct vm_area_struct *vma,
3758*4882a593Smuzhiyun unsigned long address, pte_t *ptep)
3759*4882a593Smuzhiyun {
3760*4882a593Smuzhiyun pte_t entry;
3761*4882a593Smuzhiyun
3762*4882a593Smuzhiyun entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
3763*4882a593Smuzhiyun if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
3764*4882a593Smuzhiyun update_mmu_cache(vma, address, ptep);
3765*4882a593Smuzhiyun }
3766*4882a593Smuzhiyun
is_hugetlb_entry_migration(pte_t pte)3767*4882a593Smuzhiyun bool is_hugetlb_entry_migration(pte_t pte)
3768*4882a593Smuzhiyun {
3769*4882a593Smuzhiyun swp_entry_t swp;
3770*4882a593Smuzhiyun
3771*4882a593Smuzhiyun if (huge_pte_none(pte) || pte_present(pte))
3772*4882a593Smuzhiyun return false;
3773*4882a593Smuzhiyun swp = pte_to_swp_entry(pte);
3774*4882a593Smuzhiyun if (is_migration_entry(swp))
3775*4882a593Smuzhiyun return true;
3776*4882a593Smuzhiyun else
3777*4882a593Smuzhiyun return false;
3778*4882a593Smuzhiyun }
3779*4882a593Smuzhiyun
is_hugetlb_entry_hwpoisoned(pte_t pte)3780*4882a593Smuzhiyun static bool is_hugetlb_entry_hwpoisoned(pte_t pte)
3781*4882a593Smuzhiyun {
3782*4882a593Smuzhiyun swp_entry_t swp;
3783*4882a593Smuzhiyun
3784*4882a593Smuzhiyun if (huge_pte_none(pte) || pte_present(pte))
3785*4882a593Smuzhiyun return false;
3786*4882a593Smuzhiyun swp = pte_to_swp_entry(pte);
3787*4882a593Smuzhiyun if (is_hwpoison_entry(swp))
3788*4882a593Smuzhiyun return true;
3789*4882a593Smuzhiyun else
3790*4882a593Smuzhiyun return false;
3791*4882a593Smuzhiyun }
3792*4882a593Smuzhiyun
copy_hugetlb_page_range(struct mm_struct * dst,struct mm_struct * src,struct vm_area_struct * vma)3793*4882a593Smuzhiyun int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3794*4882a593Smuzhiyun struct vm_area_struct *vma)
3795*4882a593Smuzhiyun {
3796*4882a593Smuzhiyun pte_t *src_pte, *dst_pte, entry, dst_entry;
3797*4882a593Smuzhiyun struct page *ptepage;
3798*4882a593Smuzhiyun unsigned long addr;
3799*4882a593Smuzhiyun int cow;
3800*4882a593Smuzhiyun struct hstate *h = hstate_vma(vma);
3801*4882a593Smuzhiyun unsigned long sz = huge_page_size(h);
3802*4882a593Smuzhiyun struct address_space *mapping = vma->vm_file->f_mapping;
3803*4882a593Smuzhiyun struct mmu_notifier_range range;
3804*4882a593Smuzhiyun int ret = 0;
3805*4882a593Smuzhiyun
3806*4882a593Smuzhiyun cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
3807*4882a593Smuzhiyun
3808*4882a593Smuzhiyun if (cow) {
3809*4882a593Smuzhiyun mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, src,
3810*4882a593Smuzhiyun vma->vm_start,
3811*4882a593Smuzhiyun vma->vm_end);
3812*4882a593Smuzhiyun mmu_notifier_invalidate_range_start(&range);
3813*4882a593Smuzhiyun } else {
3814*4882a593Smuzhiyun /*
3815*4882a593Smuzhiyun * For shared mappings i_mmap_rwsem must be held to call
3816*4882a593Smuzhiyun * huge_pte_alloc, otherwise the returned ptep could go
3817*4882a593Smuzhiyun * away if part of a shared pmd and another thread calls
3818*4882a593Smuzhiyun * huge_pmd_unshare.
3819*4882a593Smuzhiyun */
3820*4882a593Smuzhiyun i_mmap_lock_read(mapping);
3821*4882a593Smuzhiyun }
3822*4882a593Smuzhiyun
3823*4882a593Smuzhiyun for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
3824*4882a593Smuzhiyun spinlock_t *src_ptl, *dst_ptl;
3825*4882a593Smuzhiyun src_pte = huge_pte_offset(src, addr, sz);
3826*4882a593Smuzhiyun if (!src_pte)
3827*4882a593Smuzhiyun continue;
3828*4882a593Smuzhiyun dst_pte = huge_pte_alloc(dst, vma, addr, sz);
3829*4882a593Smuzhiyun if (!dst_pte) {
3830*4882a593Smuzhiyun ret = -ENOMEM;
3831*4882a593Smuzhiyun break;
3832*4882a593Smuzhiyun }
3833*4882a593Smuzhiyun
3834*4882a593Smuzhiyun /*
3835*4882a593Smuzhiyun * If the pagetables are shared don't copy or take references.
3836*4882a593Smuzhiyun * dst_pte == src_pte is the common case of src/dest sharing.
3837*4882a593Smuzhiyun *
3838*4882a593Smuzhiyun * However, src could have 'unshared' and dst shares with
3839*4882a593Smuzhiyun * another vma. If dst_pte !none, this implies sharing.
3840*4882a593Smuzhiyun * Check here before taking page table lock, and once again
3841*4882a593Smuzhiyun * after taking the lock below.
3842*4882a593Smuzhiyun */
3843*4882a593Smuzhiyun dst_entry = huge_ptep_get(dst_pte);
3844*4882a593Smuzhiyun if ((dst_pte == src_pte) || !huge_pte_none(dst_entry))
3845*4882a593Smuzhiyun continue;
3846*4882a593Smuzhiyun
3847*4882a593Smuzhiyun dst_ptl = huge_pte_lock(h, dst, dst_pte);
3848*4882a593Smuzhiyun src_ptl = huge_pte_lockptr(h, src, src_pte);
3849*4882a593Smuzhiyun spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
3850*4882a593Smuzhiyun entry = huge_ptep_get(src_pte);
3851*4882a593Smuzhiyun dst_entry = huge_ptep_get(dst_pte);
3852*4882a593Smuzhiyun if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
3853*4882a593Smuzhiyun /*
3854*4882a593Smuzhiyun * Skip if src entry none. Also, skip in the
3855*4882a593Smuzhiyun * unlikely case dst entry !none as this implies
3856*4882a593Smuzhiyun * sharing with another vma.
3857*4882a593Smuzhiyun */
3858*4882a593Smuzhiyun ;
3859*4882a593Smuzhiyun } else if (unlikely(is_hugetlb_entry_migration(entry) ||
3860*4882a593Smuzhiyun is_hugetlb_entry_hwpoisoned(entry))) {
3861*4882a593Smuzhiyun swp_entry_t swp_entry = pte_to_swp_entry(entry);
3862*4882a593Smuzhiyun
3863*4882a593Smuzhiyun if (is_write_migration_entry(swp_entry) && cow) {
3864*4882a593Smuzhiyun /*
3865*4882a593Smuzhiyun * COW mappings require pages in both
3866*4882a593Smuzhiyun * parent and child to be set to read.
3867*4882a593Smuzhiyun */
3868*4882a593Smuzhiyun make_migration_entry_read(&swp_entry);
3869*4882a593Smuzhiyun entry = swp_entry_to_pte(swp_entry);
3870*4882a593Smuzhiyun set_huge_swap_pte_at(src, addr, src_pte,
3871*4882a593Smuzhiyun entry, sz);
3872*4882a593Smuzhiyun }
3873*4882a593Smuzhiyun set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
3874*4882a593Smuzhiyun } else {
3875*4882a593Smuzhiyun if (cow) {
3876*4882a593Smuzhiyun /*
3877*4882a593Smuzhiyun * No need to notify as we are downgrading page
3878*4882a593Smuzhiyun * table protection not changing it to point
3879*4882a593Smuzhiyun * to a new page.
3880*4882a593Smuzhiyun *
3881*4882a593Smuzhiyun * See Documentation/vm/mmu_notifier.rst
3882*4882a593Smuzhiyun */
3883*4882a593Smuzhiyun huge_ptep_set_wrprotect(src, addr, src_pte);
3884*4882a593Smuzhiyun }
3885*4882a593Smuzhiyun entry = huge_ptep_get(src_pte);
3886*4882a593Smuzhiyun ptepage = pte_page(entry);
3887*4882a593Smuzhiyun get_page(ptepage);
3888*4882a593Smuzhiyun page_dup_rmap(ptepage, true);
3889*4882a593Smuzhiyun set_huge_pte_at(dst, addr, dst_pte, entry);
3890*4882a593Smuzhiyun hugetlb_count_add(pages_per_huge_page(h), dst);
3891*4882a593Smuzhiyun }
3892*4882a593Smuzhiyun spin_unlock(src_ptl);
3893*4882a593Smuzhiyun spin_unlock(dst_ptl);
3894*4882a593Smuzhiyun }
3895*4882a593Smuzhiyun
3896*4882a593Smuzhiyun if (cow)
3897*4882a593Smuzhiyun mmu_notifier_invalidate_range_end(&range);
3898*4882a593Smuzhiyun else
3899*4882a593Smuzhiyun i_mmap_unlock_read(mapping);
3900*4882a593Smuzhiyun
3901*4882a593Smuzhiyun return ret;
3902*4882a593Smuzhiyun }
3903*4882a593Smuzhiyun
__unmap_hugepage_range(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct page * ref_page)3904*4882a593Smuzhiyun void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3905*4882a593Smuzhiyun unsigned long start, unsigned long end,
3906*4882a593Smuzhiyun struct page *ref_page)
3907*4882a593Smuzhiyun {
3908*4882a593Smuzhiyun struct mm_struct *mm = vma->vm_mm;
3909*4882a593Smuzhiyun unsigned long address;
3910*4882a593Smuzhiyun pte_t *ptep;
3911*4882a593Smuzhiyun pte_t pte;
3912*4882a593Smuzhiyun spinlock_t *ptl;
3913*4882a593Smuzhiyun struct page *page;
3914*4882a593Smuzhiyun struct hstate *h = hstate_vma(vma);
3915*4882a593Smuzhiyun unsigned long sz = huge_page_size(h);
3916*4882a593Smuzhiyun struct mmu_notifier_range range;
3917*4882a593Smuzhiyun bool force_flush = false;
3918*4882a593Smuzhiyun
3919*4882a593Smuzhiyun WARN_ON(!is_vm_hugetlb_page(vma));
3920*4882a593Smuzhiyun BUG_ON(start & ~huge_page_mask(h));
3921*4882a593Smuzhiyun BUG_ON(end & ~huge_page_mask(h));
3922*4882a593Smuzhiyun
3923*4882a593Smuzhiyun /*
3924*4882a593Smuzhiyun * This is a hugetlb vma, all the pte entries should point
3925*4882a593Smuzhiyun * to huge page.
3926*4882a593Smuzhiyun */
3927*4882a593Smuzhiyun tlb_change_page_size(tlb, sz);
3928*4882a593Smuzhiyun tlb_start_vma(tlb, vma);
3929*4882a593Smuzhiyun
3930*4882a593Smuzhiyun /*
3931*4882a593Smuzhiyun * If sharing possible, alert mmu notifiers of worst case.
3932*4882a593Smuzhiyun */
3933*4882a593Smuzhiyun mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start,
3934*4882a593Smuzhiyun end);
3935*4882a593Smuzhiyun adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
3936*4882a593Smuzhiyun mmu_notifier_invalidate_range_start(&range);
3937*4882a593Smuzhiyun address = start;
3938*4882a593Smuzhiyun for (; address < end; address += sz) {
3939*4882a593Smuzhiyun ptep = huge_pte_offset(mm, address, sz);
3940*4882a593Smuzhiyun if (!ptep)
3941*4882a593Smuzhiyun continue;
3942*4882a593Smuzhiyun
3943*4882a593Smuzhiyun ptl = huge_pte_lock(h, mm, ptep);
3944*4882a593Smuzhiyun if (huge_pmd_unshare(mm, vma, &address, ptep)) {
3945*4882a593Smuzhiyun spin_unlock(ptl);
3946*4882a593Smuzhiyun tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
3947*4882a593Smuzhiyun force_flush = true;
3948*4882a593Smuzhiyun continue;
3949*4882a593Smuzhiyun }
3950*4882a593Smuzhiyun
3951*4882a593Smuzhiyun pte = huge_ptep_get(ptep);
3952*4882a593Smuzhiyun if (huge_pte_none(pte)) {
3953*4882a593Smuzhiyun spin_unlock(ptl);
3954*4882a593Smuzhiyun continue;
3955*4882a593Smuzhiyun }
3956*4882a593Smuzhiyun
3957*4882a593Smuzhiyun /*
3958*4882a593Smuzhiyun * Migrating hugepage or HWPoisoned hugepage is already
3959*4882a593Smuzhiyun * unmapped and its refcount is dropped, so just clear pte here.
3960*4882a593Smuzhiyun */
3961*4882a593Smuzhiyun if (unlikely(!pte_present(pte))) {
3962*4882a593Smuzhiyun huge_pte_clear(mm, address, ptep, sz);
3963*4882a593Smuzhiyun spin_unlock(ptl);
3964*4882a593Smuzhiyun continue;
3965*4882a593Smuzhiyun }
3966*4882a593Smuzhiyun
3967*4882a593Smuzhiyun page = pte_page(pte);
3968*4882a593Smuzhiyun /*
3969*4882a593Smuzhiyun * If a reference page is supplied, it is because a specific
3970*4882a593Smuzhiyun * page is being unmapped, not a range. Ensure the page we
3971*4882a593Smuzhiyun * are about to unmap is the actual page of interest.
3972*4882a593Smuzhiyun */
3973*4882a593Smuzhiyun if (ref_page) {
3974*4882a593Smuzhiyun if (page != ref_page) {
3975*4882a593Smuzhiyun spin_unlock(ptl);
3976*4882a593Smuzhiyun continue;
3977*4882a593Smuzhiyun }
3978*4882a593Smuzhiyun /*
3979*4882a593Smuzhiyun * Mark the VMA as having unmapped its page so that
3980*4882a593Smuzhiyun * future faults in this VMA will fail rather than
3981*4882a593Smuzhiyun * looking like data was lost
3982*4882a593Smuzhiyun */
3983*4882a593Smuzhiyun set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
3984*4882a593Smuzhiyun }
3985*4882a593Smuzhiyun
3986*4882a593Smuzhiyun pte = huge_ptep_get_and_clear(mm, address, ptep);
3987*4882a593Smuzhiyun tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
3988*4882a593Smuzhiyun if (huge_pte_dirty(pte))
3989*4882a593Smuzhiyun set_page_dirty(page);
3990*4882a593Smuzhiyun
3991*4882a593Smuzhiyun hugetlb_count_sub(pages_per_huge_page(h), mm);
3992*4882a593Smuzhiyun page_remove_rmap(page, true);
3993*4882a593Smuzhiyun
3994*4882a593Smuzhiyun spin_unlock(ptl);
3995*4882a593Smuzhiyun tlb_remove_page_size(tlb, page, huge_page_size(h));
3996*4882a593Smuzhiyun /*
3997*4882a593Smuzhiyun * Bail out after unmapping reference page if supplied
3998*4882a593Smuzhiyun */
3999*4882a593Smuzhiyun if (ref_page)
4000*4882a593Smuzhiyun break;
4001*4882a593Smuzhiyun }
4002*4882a593Smuzhiyun mmu_notifier_invalidate_range_end(&range);
4003*4882a593Smuzhiyun tlb_end_vma(tlb, vma);
4004*4882a593Smuzhiyun
4005*4882a593Smuzhiyun /*
4006*4882a593Smuzhiyun * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
4007*4882a593Smuzhiyun * could defer the flush until now, since by holding i_mmap_rwsem we
4008*4882a593Smuzhiyun * guaranteed that the last refernece would not be dropped. But we must
4009*4882a593Smuzhiyun * do the flushing before we return, as otherwise i_mmap_rwsem will be
4010*4882a593Smuzhiyun * dropped and the last reference to the shared PMDs page might be
4011*4882a593Smuzhiyun * dropped as well.
4012*4882a593Smuzhiyun *
4013*4882a593Smuzhiyun * In theory we could defer the freeing of the PMD pages as well, but
4014*4882a593Smuzhiyun * huge_pmd_unshare() relies on the exact page_count for the PMD page to
4015*4882a593Smuzhiyun * detect sharing, so we cannot defer the release of the page either.
4016*4882a593Smuzhiyun * Instead, do flush now.
4017*4882a593Smuzhiyun */
4018*4882a593Smuzhiyun if (force_flush)
4019*4882a593Smuzhiyun tlb_flush_mmu_tlbonly(tlb);
4020*4882a593Smuzhiyun }
4021*4882a593Smuzhiyun
__unmap_hugepage_range_final(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct page * ref_page)4022*4882a593Smuzhiyun void __unmap_hugepage_range_final(struct mmu_gather *tlb,
4023*4882a593Smuzhiyun struct vm_area_struct *vma, unsigned long start,
4024*4882a593Smuzhiyun unsigned long end, struct page *ref_page)
4025*4882a593Smuzhiyun {
4026*4882a593Smuzhiyun __unmap_hugepage_range(tlb, vma, start, end, ref_page);
4027*4882a593Smuzhiyun
4028*4882a593Smuzhiyun /*
4029*4882a593Smuzhiyun * Clear this flag so that x86's huge_pmd_share page_table_shareable
4030*4882a593Smuzhiyun * test will fail on a vma being torn down, and not grab a page table
4031*4882a593Smuzhiyun * on its way out. We're lucky that the flag has such an appropriate
4032*4882a593Smuzhiyun * name, and can in fact be safely cleared here. We could clear it
4033*4882a593Smuzhiyun * before the __unmap_hugepage_range above, but all that's necessary
4034*4882a593Smuzhiyun * is to clear it before releasing the i_mmap_rwsem. This works
4035*4882a593Smuzhiyun * because in the context this is called, the VMA is about to be
4036*4882a593Smuzhiyun * destroyed and the i_mmap_rwsem is held.
4037*4882a593Smuzhiyun */
4038*4882a593Smuzhiyun vma->vm_flags &= ~VM_MAYSHARE;
4039*4882a593Smuzhiyun }
4040*4882a593Smuzhiyun
unmap_hugepage_range(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct page * ref_page)4041*4882a593Smuzhiyun void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
4042*4882a593Smuzhiyun unsigned long end, struct page *ref_page)
4043*4882a593Smuzhiyun {
4044*4882a593Smuzhiyun struct mm_struct *mm;
4045*4882a593Smuzhiyun struct mmu_gather tlb;
4046*4882a593Smuzhiyun unsigned long tlb_start = start;
4047*4882a593Smuzhiyun unsigned long tlb_end = end;
4048*4882a593Smuzhiyun
4049*4882a593Smuzhiyun /*
4050*4882a593Smuzhiyun * If shared PMDs were possibly used within this vma range, adjust
4051*4882a593Smuzhiyun * start/end for worst case tlb flushing.
4052*4882a593Smuzhiyun * Note that we can not be sure if PMDs are shared until we try to
4053*4882a593Smuzhiyun * unmap pages. However, we want to make sure TLB flushing covers
4054*4882a593Smuzhiyun * the largest possible range.
4055*4882a593Smuzhiyun */
4056*4882a593Smuzhiyun adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end);
4057*4882a593Smuzhiyun
4058*4882a593Smuzhiyun mm = vma->vm_mm;
4059*4882a593Smuzhiyun
4060*4882a593Smuzhiyun tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end);
4061*4882a593Smuzhiyun __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
4062*4882a593Smuzhiyun tlb_finish_mmu(&tlb, tlb_start, tlb_end);
4063*4882a593Smuzhiyun }
4064*4882a593Smuzhiyun
4065*4882a593Smuzhiyun /*
4066*4882a593Smuzhiyun * This is called when the original mapper is failing to COW a MAP_PRIVATE
4067*4882a593Smuzhiyun * mappping it owns the reserve page for. The intention is to unmap the page
4068*4882a593Smuzhiyun * from other VMAs and let the children be SIGKILLed if they are faulting the
4069*4882a593Smuzhiyun * same region.
4070*4882a593Smuzhiyun */
unmap_ref_private(struct mm_struct * mm,struct vm_area_struct * vma,struct page * page,unsigned long address)4071*4882a593Smuzhiyun static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
4072*4882a593Smuzhiyun struct page *page, unsigned long address)
4073*4882a593Smuzhiyun {
4074*4882a593Smuzhiyun struct hstate *h = hstate_vma(vma);
4075*4882a593Smuzhiyun struct vm_area_struct *iter_vma;
4076*4882a593Smuzhiyun struct address_space *mapping;
4077*4882a593Smuzhiyun pgoff_t pgoff;
4078*4882a593Smuzhiyun
4079*4882a593Smuzhiyun /*
4080*4882a593Smuzhiyun * vm_pgoff is in PAGE_SIZE units, hence the different calculation
4081*4882a593Smuzhiyun * from page cache lookup which is in HPAGE_SIZE units.
4082*4882a593Smuzhiyun */
4083*4882a593Smuzhiyun address = address & huge_page_mask(h);
4084*4882a593Smuzhiyun pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
4085*4882a593Smuzhiyun vma->vm_pgoff;
4086*4882a593Smuzhiyun mapping = vma->vm_file->f_mapping;
4087*4882a593Smuzhiyun
4088*4882a593Smuzhiyun /*
4089*4882a593Smuzhiyun * Take the mapping lock for the duration of the table walk. As
4090*4882a593Smuzhiyun * this mapping should be shared between all the VMAs,
4091*4882a593Smuzhiyun * __unmap_hugepage_range() is called as the lock is already held
4092*4882a593Smuzhiyun */
4093*4882a593Smuzhiyun i_mmap_lock_write(mapping);
4094*4882a593Smuzhiyun vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
4095*4882a593Smuzhiyun /* Do not unmap the current VMA */
4096*4882a593Smuzhiyun if (iter_vma == vma)
4097*4882a593Smuzhiyun continue;
4098*4882a593Smuzhiyun
4099*4882a593Smuzhiyun /*
4100*4882a593Smuzhiyun * Shared VMAs have their own reserves and do not affect
4101*4882a593Smuzhiyun * MAP_PRIVATE accounting but it is possible that a shared
4102*4882a593Smuzhiyun * VMA is using the same page so check and skip such VMAs.
4103*4882a593Smuzhiyun */
4104*4882a593Smuzhiyun if (iter_vma->vm_flags & VM_MAYSHARE)
4105*4882a593Smuzhiyun continue;
4106*4882a593Smuzhiyun
4107*4882a593Smuzhiyun /*
4108*4882a593Smuzhiyun * Unmap the page from other VMAs without their own reserves.
4109*4882a593Smuzhiyun * They get marked to be SIGKILLed if they fault in these
4110*4882a593Smuzhiyun * areas. This is because a future no-page fault on this VMA
4111*4882a593Smuzhiyun * could insert a zeroed page instead of the data existing
4112*4882a593Smuzhiyun * from the time of fork. This would look like data corruption
4113*4882a593Smuzhiyun */
4114*4882a593Smuzhiyun if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
4115*4882a593Smuzhiyun unmap_hugepage_range(iter_vma, address,
4116*4882a593Smuzhiyun address + huge_page_size(h), page);
4117*4882a593Smuzhiyun }
4118*4882a593Smuzhiyun i_mmap_unlock_write(mapping);
4119*4882a593Smuzhiyun }
4120*4882a593Smuzhiyun
4121*4882a593Smuzhiyun /*
4122*4882a593Smuzhiyun * Hugetlb_cow() should be called with page lock of the original hugepage held.
4123*4882a593Smuzhiyun * Called with hugetlb_instantiation_mutex held and pte_page locked so we
4124*4882a593Smuzhiyun * cannot race with other handlers or page migration.
4125*4882a593Smuzhiyun * Keep the pte_same checks anyway to make transition from the mutex easier.
4126*4882a593Smuzhiyun */
hugetlb_cow(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long address,pte_t * ptep,struct page * pagecache_page,spinlock_t * ptl)4127*4882a593Smuzhiyun static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
4128*4882a593Smuzhiyun unsigned long address, pte_t *ptep,
4129*4882a593Smuzhiyun struct page *pagecache_page, spinlock_t *ptl)
4130*4882a593Smuzhiyun {
4131*4882a593Smuzhiyun pte_t pte;
4132*4882a593Smuzhiyun struct hstate *h = hstate_vma(vma);
4133*4882a593Smuzhiyun struct page *old_page, *new_page;
4134*4882a593Smuzhiyun int outside_reserve = 0;
4135*4882a593Smuzhiyun vm_fault_t ret = 0;
4136*4882a593Smuzhiyun unsigned long haddr = address & huge_page_mask(h);
4137*4882a593Smuzhiyun struct mmu_notifier_range range;
4138*4882a593Smuzhiyun
4139*4882a593Smuzhiyun pte = huge_ptep_get(ptep);
4140*4882a593Smuzhiyun old_page = pte_page(pte);
4141*4882a593Smuzhiyun
4142*4882a593Smuzhiyun retry_avoidcopy:
4143*4882a593Smuzhiyun /* If no-one else is actually using this page, avoid the copy
4144*4882a593Smuzhiyun * and just make the page writable */
4145*4882a593Smuzhiyun if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
4146*4882a593Smuzhiyun page_move_anon_rmap(old_page, vma);
4147*4882a593Smuzhiyun set_huge_ptep_writable(vma, haddr, ptep);
4148*4882a593Smuzhiyun return 0;
4149*4882a593Smuzhiyun }
4150*4882a593Smuzhiyun
4151*4882a593Smuzhiyun /*
4152*4882a593Smuzhiyun * If the process that created a MAP_PRIVATE mapping is about to
4153*4882a593Smuzhiyun * perform a COW due to a shared page count, attempt to satisfy
4154*4882a593Smuzhiyun * the allocation without using the existing reserves. The pagecache
4155*4882a593Smuzhiyun * page is used to determine if the reserve at this address was
4156*4882a593Smuzhiyun * consumed or not. If reserves were used, a partial faulted mapping
4157*4882a593Smuzhiyun * at the time of fork() could consume its reserves on COW instead
4158*4882a593Smuzhiyun * of the full address range.
4159*4882a593Smuzhiyun */
4160*4882a593Smuzhiyun if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
4161*4882a593Smuzhiyun old_page != pagecache_page)
4162*4882a593Smuzhiyun outside_reserve = 1;
4163*4882a593Smuzhiyun
4164*4882a593Smuzhiyun get_page(old_page);
4165*4882a593Smuzhiyun
4166*4882a593Smuzhiyun /*
4167*4882a593Smuzhiyun * Drop page table lock as buddy allocator may be called. It will
4168*4882a593Smuzhiyun * be acquired again before returning to the caller, as expected.
4169*4882a593Smuzhiyun */
4170*4882a593Smuzhiyun spin_unlock(ptl);
4171*4882a593Smuzhiyun new_page = alloc_huge_page(vma, haddr, outside_reserve);
4172*4882a593Smuzhiyun
4173*4882a593Smuzhiyun if (IS_ERR(new_page)) {
4174*4882a593Smuzhiyun /*
4175*4882a593Smuzhiyun * If a process owning a MAP_PRIVATE mapping fails to COW,
4176*4882a593Smuzhiyun * it is due to references held by a child and an insufficient
4177*4882a593Smuzhiyun * huge page pool. To guarantee the original mappers
4178*4882a593Smuzhiyun * reliability, unmap the page from child processes. The child
4179*4882a593Smuzhiyun * may get SIGKILLed if it later faults.
4180*4882a593Smuzhiyun */
4181*4882a593Smuzhiyun if (outside_reserve) {
4182*4882a593Smuzhiyun struct address_space *mapping = vma->vm_file->f_mapping;
4183*4882a593Smuzhiyun pgoff_t idx;
4184*4882a593Smuzhiyun u32 hash;
4185*4882a593Smuzhiyun
4186*4882a593Smuzhiyun put_page(old_page);
4187*4882a593Smuzhiyun BUG_ON(huge_pte_none(pte));
4188*4882a593Smuzhiyun /*
4189*4882a593Smuzhiyun * Drop hugetlb_fault_mutex and i_mmap_rwsem before
4190*4882a593Smuzhiyun * unmapping. unmapping needs to hold i_mmap_rwsem
4191*4882a593Smuzhiyun * in write mode. Dropping i_mmap_rwsem in read mode
4192*4882a593Smuzhiyun * here is OK as COW mappings do not interact with
4193*4882a593Smuzhiyun * PMD sharing.
4194*4882a593Smuzhiyun *
4195*4882a593Smuzhiyun * Reacquire both after unmap operation.
4196*4882a593Smuzhiyun */
4197*4882a593Smuzhiyun idx = vma_hugecache_offset(h, vma, haddr);
4198*4882a593Smuzhiyun hash = hugetlb_fault_mutex_hash(mapping, idx);
4199*4882a593Smuzhiyun mutex_unlock(&hugetlb_fault_mutex_table[hash]);
4200*4882a593Smuzhiyun i_mmap_unlock_read(mapping);
4201*4882a593Smuzhiyun
4202*4882a593Smuzhiyun unmap_ref_private(mm, vma, old_page, haddr);
4203*4882a593Smuzhiyun
4204*4882a593Smuzhiyun i_mmap_lock_read(mapping);
4205*4882a593Smuzhiyun mutex_lock(&hugetlb_fault_mutex_table[hash]);
4206*4882a593Smuzhiyun spin_lock(ptl);
4207*4882a593Smuzhiyun ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
4208*4882a593Smuzhiyun if (likely(ptep &&
4209*4882a593Smuzhiyun pte_same(huge_ptep_get(ptep), pte)))
4210*4882a593Smuzhiyun goto retry_avoidcopy;
4211*4882a593Smuzhiyun /*
4212*4882a593Smuzhiyun * race occurs while re-acquiring page table
4213*4882a593Smuzhiyun * lock, and our job is done.
4214*4882a593Smuzhiyun */
4215*4882a593Smuzhiyun return 0;
4216*4882a593Smuzhiyun }
4217*4882a593Smuzhiyun
4218*4882a593Smuzhiyun ret = vmf_error(PTR_ERR(new_page));
4219*4882a593Smuzhiyun goto out_release_old;
4220*4882a593Smuzhiyun }
4221*4882a593Smuzhiyun
4222*4882a593Smuzhiyun /*
4223*4882a593Smuzhiyun * When the original hugepage is shared one, it does not have
4224*4882a593Smuzhiyun * anon_vma prepared.
4225*4882a593Smuzhiyun */
4226*4882a593Smuzhiyun if (unlikely(anon_vma_prepare(vma))) {
4227*4882a593Smuzhiyun ret = VM_FAULT_OOM;
4228*4882a593Smuzhiyun goto out_release_all;
4229*4882a593Smuzhiyun }
4230*4882a593Smuzhiyun
4231*4882a593Smuzhiyun copy_user_huge_page(new_page, old_page, address, vma,
4232*4882a593Smuzhiyun pages_per_huge_page(h));
4233*4882a593Smuzhiyun __SetPageUptodate(new_page);
4234*4882a593Smuzhiyun
4235*4882a593Smuzhiyun mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, haddr,
4236*4882a593Smuzhiyun haddr + huge_page_size(h));
4237*4882a593Smuzhiyun mmu_notifier_invalidate_range_start(&range);
4238*4882a593Smuzhiyun
4239*4882a593Smuzhiyun /*
4240*4882a593Smuzhiyun * Retake the page table lock to check for racing updates
4241*4882a593Smuzhiyun * before the page tables are altered
4242*4882a593Smuzhiyun */
4243*4882a593Smuzhiyun spin_lock(ptl);
4244*4882a593Smuzhiyun ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
4245*4882a593Smuzhiyun if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
4246*4882a593Smuzhiyun ClearPagePrivate(new_page);
4247*4882a593Smuzhiyun
4248*4882a593Smuzhiyun /* Break COW */
4249*4882a593Smuzhiyun huge_ptep_clear_flush(vma, haddr, ptep);
4250*4882a593Smuzhiyun mmu_notifier_invalidate_range(mm, range.start, range.end);
4251*4882a593Smuzhiyun set_huge_pte_at(mm, haddr, ptep,
4252*4882a593Smuzhiyun make_huge_pte(vma, new_page, 1));
4253*4882a593Smuzhiyun page_remove_rmap(old_page, true);
4254*4882a593Smuzhiyun hugepage_add_new_anon_rmap(new_page, vma, haddr);
4255*4882a593Smuzhiyun set_page_huge_active(new_page);
4256*4882a593Smuzhiyun /* Make the old page be freed below */
4257*4882a593Smuzhiyun new_page = old_page;
4258*4882a593Smuzhiyun }
4259*4882a593Smuzhiyun spin_unlock(ptl);
4260*4882a593Smuzhiyun mmu_notifier_invalidate_range_end(&range);
4261*4882a593Smuzhiyun out_release_all:
4262*4882a593Smuzhiyun restore_reserve_on_error(h, vma, haddr, new_page);
4263*4882a593Smuzhiyun put_page(new_page);
4264*4882a593Smuzhiyun out_release_old:
4265*4882a593Smuzhiyun put_page(old_page);
4266*4882a593Smuzhiyun
4267*4882a593Smuzhiyun spin_lock(ptl); /* Caller expects lock to be held */
4268*4882a593Smuzhiyun return ret;
4269*4882a593Smuzhiyun }
4270*4882a593Smuzhiyun
4271*4882a593Smuzhiyun /* Return the pagecache page at a given address within a VMA */
hugetlbfs_pagecache_page(struct hstate * h,struct vm_area_struct * vma,unsigned long address)4272*4882a593Smuzhiyun static struct page *hugetlbfs_pagecache_page(struct hstate *h,
4273*4882a593Smuzhiyun struct vm_area_struct *vma, unsigned long address)
4274*4882a593Smuzhiyun {
4275*4882a593Smuzhiyun struct address_space *mapping;
4276*4882a593Smuzhiyun pgoff_t idx;
4277*4882a593Smuzhiyun
4278*4882a593Smuzhiyun mapping = vma->vm_file->f_mapping;
4279*4882a593Smuzhiyun idx = vma_hugecache_offset(h, vma, address);
4280*4882a593Smuzhiyun
4281*4882a593Smuzhiyun return find_lock_page(mapping, idx);
4282*4882a593Smuzhiyun }
4283*4882a593Smuzhiyun
4284*4882a593Smuzhiyun /*
4285*4882a593Smuzhiyun * Return whether there is a pagecache page to back given address within VMA.
4286*4882a593Smuzhiyun * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
4287*4882a593Smuzhiyun */
hugetlbfs_pagecache_present(struct hstate * h,struct vm_area_struct * vma,unsigned long address)4288*4882a593Smuzhiyun static bool hugetlbfs_pagecache_present(struct hstate *h,
4289*4882a593Smuzhiyun struct vm_area_struct *vma, unsigned long address)
4290*4882a593Smuzhiyun {
4291*4882a593Smuzhiyun struct address_space *mapping;
4292*4882a593Smuzhiyun pgoff_t idx;
4293*4882a593Smuzhiyun struct page *page;
4294*4882a593Smuzhiyun
4295*4882a593Smuzhiyun mapping = vma->vm_file->f_mapping;
4296*4882a593Smuzhiyun idx = vma_hugecache_offset(h, vma, address);
4297*4882a593Smuzhiyun
4298*4882a593Smuzhiyun page = find_get_page(mapping, idx);
4299*4882a593Smuzhiyun if (page)
4300*4882a593Smuzhiyun put_page(page);
4301*4882a593Smuzhiyun return page != NULL;
4302*4882a593Smuzhiyun }
4303*4882a593Smuzhiyun
huge_add_to_page_cache(struct page * page,struct address_space * mapping,pgoff_t idx)4304*4882a593Smuzhiyun int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
4305*4882a593Smuzhiyun pgoff_t idx)
4306*4882a593Smuzhiyun {
4307*4882a593Smuzhiyun struct inode *inode = mapping->host;
4308*4882a593Smuzhiyun struct hstate *h = hstate_inode(inode);
4309*4882a593Smuzhiyun int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
4310*4882a593Smuzhiyun
4311*4882a593Smuzhiyun if (err)
4312*4882a593Smuzhiyun return err;
4313*4882a593Smuzhiyun ClearPagePrivate(page);
4314*4882a593Smuzhiyun
4315*4882a593Smuzhiyun /*
4316*4882a593Smuzhiyun * set page dirty so that it will not be removed from cache/file
4317*4882a593Smuzhiyun * by non-hugetlbfs specific code paths.
4318*4882a593Smuzhiyun */
4319*4882a593Smuzhiyun set_page_dirty(page);
4320*4882a593Smuzhiyun
4321*4882a593Smuzhiyun spin_lock(&inode->i_lock);
4322*4882a593Smuzhiyun inode->i_blocks += blocks_per_huge_page(h);
4323*4882a593Smuzhiyun spin_unlock(&inode->i_lock);
4324*4882a593Smuzhiyun return 0;
4325*4882a593Smuzhiyun }
4326*4882a593Smuzhiyun
hugetlb_handle_userfault(struct vm_area_struct * vma,struct address_space * mapping,pgoff_t idx,unsigned int flags,unsigned long haddr,unsigned long reason)4327*4882a593Smuzhiyun static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma,
4328*4882a593Smuzhiyun struct address_space *mapping,
4329*4882a593Smuzhiyun pgoff_t idx,
4330*4882a593Smuzhiyun unsigned int flags,
4331*4882a593Smuzhiyun unsigned long haddr,
4332*4882a593Smuzhiyun unsigned long reason)
4333*4882a593Smuzhiyun {
4334*4882a593Smuzhiyun u32 hash = hugetlb_fault_mutex_hash(mapping, idx);
4335*4882a593Smuzhiyun struct vm_fault vmf = {
4336*4882a593Smuzhiyun .vma = vma,
4337*4882a593Smuzhiyun .address = haddr,
4338*4882a593Smuzhiyun .flags = flags,
4339*4882a593Smuzhiyun /*
4340*4882a593Smuzhiyun * Hard to debug if it ends up being
4341*4882a593Smuzhiyun * used by a callee that assumes
4342*4882a593Smuzhiyun * something about the other
4343*4882a593Smuzhiyun * uninitialized fields... same as in
4344*4882a593Smuzhiyun * memory.c
4345*4882a593Smuzhiyun */
4346*4882a593Smuzhiyun };
4347*4882a593Smuzhiyun
4348*4882a593Smuzhiyun /*
4349*4882a593Smuzhiyun * vma_lock and hugetlb_fault_mutex must be dropped
4350*4882a593Smuzhiyun * before handling userfault. Also mmap_lock will
4351*4882a593Smuzhiyun * be dropped during handling userfault, any vma
4352*4882a593Smuzhiyun * operation should be careful from here.
4353*4882a593Smuzhiyun */
4354*4882a593Smuzhiyun mutex_unlock(&hugetlb_fault_mutex_table[hash]);
4355*4882a593Smuzhiyun i_mmap_unlock_read(mapping);
4356*4882a593Smuzhiyun return handle_userfault(&vmf, VM_UFFD_MISSING);
4357*4882a593Smuzhiyun }
4358*4882a593Smuzhiyun
hugetlb_no_page(struct mm_struct * mm,struct vm_area_struct * vma,struct address_space * mapping,pgoff_t idx,unsigned long address,pte_t * ptep,unsigned int flags)4359*4882a593Smuzhiyun static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
4360*4882a593Smuzhiyun struct vm_area_struct *vma,
4361*4882a593Smuzhiyun struct address_space *mapping, pgoff_t idx,
4362*4882a593Smuzhiyun unsigned long address, pte_t *ptep, unsigned int flags)
4363*4882a593Smuzhiyun {
4364*4882a593Smuzhiyun struct hstate *h = hstate_vma(vma);
4365*4882a593Smuzhiyun vm_fault_t ret = VM_FAULT_SIGBUS;
4366*4882a593Smuzhiyun int anon_rmap = 0;
4367*4882a593Smuzhiyun unsigned long size;
4368*4882a593Smuzhiyun struct page *page;
4369*4882a593Smuzhiyun pte_t new_pte;
4370*4882a593Smuzhiyun spinlock_t *ptl;
4371*4882a593Smuzhiyun unsigned long haddr = address & huge_page_mask(h);
4372*4882a593Smuzhiyun bool new_page = false;
4373*4882a593Smuzhiyun u32 hash = hugetlb_fault_mutex_hash(mapping, idx);
4374*4882a593Smuzhiyun
4375*4882a593Smuzhiyun /*
4376*4882a593Smuzhiyun * Currently, we are forced to kill the process in the event the
4377*4882a593Smuzhiyun * original mapper has unmapped pages from the child due to a failed
4378*4882a593Smuzhiyun * COW. Warn that such a situation has occurred as it may not be obvious
4379*4882a593Smuzhiyun */
4380*4882a593Smuzhiyun if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
4381*4882a593Smuzhiyun pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
4382*4882a593Smuzhiyun current->pid);
4383*4882a593Smuzhiyun goto out;
4384*4882a593Smuzhiyun }
4385*4882a593Smuzhiyun
4386*4882a593Smuzhiyun /*
4387*4882a593Smuzhiyun * We can not race with truncation due to holding i_mmap_rwsem.
4388*4882a593Smuzhiyun * i_size is modified when holding i_mmap_rwsem, so check here
4389*4882a593Smuzhiyun * once for faults beyond end of file.
4390*4882a593Smuzhiyun */
4391*4882a593Smuzhiyun size = i_size_read(mapping->host) >> huge_page_shift(h);
4392*4882a593Smuzhiyun if (idx >= size)
4393*4882a593Smuzhiyun goto out;
4394*4882a593Smuzhiyun
4395*4882a593Smuzhiyun retry:
4396*4882a593Smuzhiyun page = find_lock_page(mapping, idx);
4397*4882a593Smuzhiyun if (!page) {
4398*4882a593Smuzhiyun /* Check for page in userfault range */
4399*4882a593Smuzhiyun if (userfaultfd_missing(vma)) {
4400*4882a593Smuzhiyun ret = hugetlb_handle_userfault(vma, mapping, idx,
4401*4882a593Smuzhiyun flags, haddr,
4402*4882a593Smuzhiyun VM_UFFD_MISSING);
4403*4882a593Smuzhiyun goto out;
4404*4882a593Smuzhiyun }
4405*4882a593Smuzhiyun
4406*4882a593Smuzhiyun page = alloc_huge_page(vma, haddr, 0);
4407*4882a593Smuzhiyun if (IS_ERR(page)) {
4408*4882a593Smuzhiyun /*
4409*4882a593Smuzhiyun * Returning error will result in faulting task being
4410*4882a593Smuzhiyun * sent SIGBUS. The hugetlb fault mutex prevents two
4411*4882a593Smuzhiyun * tasks from racing to fault in the same page which
4412*4882a593Smuzhiyun * could result in false unable to allocate errors.
4413*4882a593Smuzhiyun * Page migration does not take the fault mutex, but
4414*4882a593Smuzhiyun * does a clear then write of pte's under page table
4415*4882a593Smuzhiyun * lock. Page fault code could race with migration,
4416*4882a593Smuzhiyun * notice the clear pte and try to allocate a page
4417*4882a593Smuzhiyun * here. Before returning error, get ptl and make
4418*4882a593Smuzhiyun * sure there really is no pte entry.
4419*4882a593Smuzhiyun */
4420*4882a593Smuzhiyun ptl = huge_pte_lock(h, mm, ptep);
4421*4882a593Smuzhiyun if (!huge_pte_none(huge_ptep_get(ptep))) {
4422*4882a593Smuzhiyun ret = 0;
4423*4882a593Smuzhiyun spin_unlock(ptl);
4424*4882a593Smuzhiyun goto out;
4425*4882a593Smuzhiyun }
4426*4882a593Smuzhiyun spin_unlock(ptl);
4427*4882a593Smuzhiyun ret = vmf_error(PTR_ERR(page));
4428*4882a593Smuzhiyun goto out;
4429*4882a593Smuzhiyun }
4430*4882a593Smuzhiyun clear_huge_page(page, address, pages_per_huge_page(h));
4431*4882a593Smuzhiyun __SetPageUptodate(page);
4432*4882a593Smuzhiyun new_page = true;
4433*4882a593Smuzhiyun
4434*4882a593Smuzhiyun if (vma->vm_flags & VM_MAYSHARE) {
4435*4882a593Smuzhiyun int err = huge_add_to_page_cache(page, mapping, idx);
4436*4882a593Smuzhiyun if (err) {
4437*4882a593Smuzhiyun put_page(page);
4438*4882a593Smuzhiyun if (err == -EEXIST)
4439*4882a593Smuzhiyun goto retry;
4440*4882a593Smuzhiyun goto out;
4441*4882a593Smuzhiyun }
4442*4882a593Smuzhiyun } else {
4443*4882a593Smuzhiyun lock_page(page);
4444*4882a593Smuzhiyun if (unlikely(anon_vma_prepare(vma))) {
4445*4882a593Smuzhiyun ret = VM_FAULT_OOM;
4446*4882a593Smuzhiyun goto backout_unlocked;
4447*4882a593Smuzhiyun }
4448*4882a593Smuzhiyun anon_rmap = 1;
4449*4882a593Smuzhiyun }
4450*4882a593Smuzhiyun } else {
4451*4882a593Smuzhiyun /*
4452*4882a593Smuzhiyun * If memory error occurs between mmap() and fault, some process
4453*4882a593Smuzhiyun * don't have hwpoisoned swap entry for errored virtual address.
4454*4882a593Smuzhiyun * So we need to block hugepage fault by PG_hwpoison bit check.
4455*4882a593Smuzhiyun */
4456*4882a593Smuzhiyun if (unlikely(PageHWPoison(page))) {
4457*4882a593Smuzhiyun ret = VM_FAULT_HWPOISON_LARGE |
4458*4882a593Smuzhiyun VM_FAULT_SET_HINDEX(hstate_index(h));
4459*4882a593Smuzhiyun goto backout_unlocked;
4460*4882a593Smuzhiyun }
4461*4882a593Smuzhiyun
4462*4882a593Smuzhiyun /* Check for page in userfault range. */
4463*4882a593Smuzhiyun if (userfaultfd_minor(vma)) {
4464*4882a593Smuzhiyun unlock_page(page);
4465*4882a593Smuzhiyun put_page(page);
4466*4882a593Smuzhiyun ret = hugetlb_handle_userfault(vma, mapping, idx,
4467*4882a593Smuzhiyun flags, haddr,
4468*4882a593Smuzhiyun VM_UFFD_MINOR);
4469*4882a593Smuzhiyun goto out;
4470*4882a593Smuzhiyun }
4471*4882a593Smuzhiyun }
4472*4882a593Smuzhiyun
4473*4882a593Smuzhiyun /*
4474*4882a593Smuzhiyun * If we are going to COW a private mapping later, we examine the
4475*4882a593Smuzhiyun * pending reservations for this page now. This will ensure that
4476*4882a593Smuzhiyun * any allocations necessary to record that reservation occur outside
4477*4882a593Smuzhiyun * the spinlock.
4478*4882a593Smuzhiyun */
4479*4882a593Smuzhiyun if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
4480*4882a593Smuzhiyun if (vma_needs_reservation(h, vma, haddr) < 0) {
4481*4882a593Smuzhiyun ret = VM_FAULT_OOM;
4482*4882a593Smuzhiyun goto backout_unlocked;
4483*4882a593Smuzhiyun }
4484*4882a593Smuzhiyun /* Just decrements count, does not deallocate */
4485*4882a593Smuzhiyun vma_end_reservation(h, vma, haddr);
4486*4882a593Smuzhiyun }
4487*4882a593Smuzhiyun
4488*4882a593Smuzhiyun ptl = huge_pte_lock(h, mm, ptep);
4489*4882a593Smuzhiyun ret = 0;
4490*4882a593Smuzhiyun if (!huge_pte_none(huge_ptep_get(ptep)))
4491*4882a593Smuzhiyun goto backout;
4492*4882a593Smuzhiyun
4493*4882a593Smuzhiyun if (anon_rmap) {
4494*4882a593Smuzhiyun ClearPagePrivate(page);
4495*4882a593Smuzhiyun hugepage_add_new_anon_rmap(page, vma, haddr);
4496*4882a593Smuzhiyun } else
4497*4882a593Smuzhiyun page_dup_rmap(page, true);
4498*4882a593Smuzhiyun new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
4499*4882a593Smuzhiyun && (vma->vm_flags & VM_SHARED)));
4500*4882a593Smuzhiyun set_huge_pte_at(mm, haddr, ptep, new_pte);
4501*4882a593Smuzhiyun
4502*4882a593Smuzhiyun hugetlb_count_add(pages_per_huge_page(h), mm);
4503*4882a593Smuzhiyun if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
4504*4882a593Smuzhiyun /* Optimization, do the COW without a second fault */
4505*4882a593Smuzhiyun ret = hugetlb_cow(mm, vma, address, ptep, page, ptl);
4506*4882a593Smuzhiyun }
4507*4882a593Smuzhiyun
4508*4882a593Smuzhiyun spin_unlock(ptl);
4509*4882a593Smuzhiyun
4510*4882a593Smuzhiyun /*
4511*4882a593Smuzhiyun * Only make newly allocated pages active. Existing pages found
4512*4882a593Smuzhiyun * in the pagecache could be !page_huge_active() if they have been
4513*4882a593Smuzhiyun * isolated for migration.
4514*4882a593Smuzhiyun */
4515*4882a593Smuzhiyun if (new_page)
4516*4882a593Smuzhiyun set_page_huge_active(page);
4517*4882a593Smuzhiyun
4518*4882a593Smuzhiyun unlock_page(page);
4519*4882a593Smuzhiyun out:
4520*4882a593Smuzhiyun mutex_unlock(&hugetlb_fault_mutex_table[hash]);
4521*4882a593Smuzhiyun i_mmap_unlock_read(mapping);
4522*4882a593Smuzhiyun return ret;
4523*4882a593Smuzhiyun
4524*4882a593Smuzhiyun backout:
4525*4882a593Smuzhiyun spin_unlock(ptl);
4526*4882a593Smuzhiyun backout_unlocked:
4527*4882a593Smuzhiyun unlock_page(page);
4528*4882a593Smuzhiyun restore_reserve_on_error(h, vma, haddr, page);
4529*4882a593Smuzhiyun put_page(page);
4530*4882a593Smuzhiyun goto out;
4531*4882a593Smuzhiyun }
4532*4882a593Smuzhiyun
4533*4882a593Smuzhiyun #ifdef CONFIG_SMP
hugetlb_fault_mutex_hash(struct address_space * mapping,pgoff_t idx)4534*4882a593Smuzhiyun u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
4535*4882a593Smuzhiyun {
4536*4882a593Smuzhiyun unsigned long key[2];
4537*4882a593Smuzhiyun u32 hash;
4538*4882a593Smuzhiyun
4539*4882a593Smuzhiyun key[0] = (unsigned long) mapping;
4540*4882a593Smuzhiyun key[1] = idx;
4541*4882a593Smuzhiyun
4542*4882a593Smuzhiyun hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0);
4543*4882a593Smuzhiyun
4544*4882a593Smuzhiyun return hash & (num_fault_mutexes - 1);
4545*4882a593Smuzhiyun }
4546*4882a593Smuzhiyun #else
4547*4882a593Smuzhiyun /*
4548*4882a593Smuzhiyun * For uniprocesor systems we always use a single mutex, so just
4549*4882a593Smuzhiyun * return 0 and avoid the hashing overhead.
4550*4882a593Smuzhiyun */
hugetlb_fault_mutex_hash(struct address_space * mapping,pgoff_t idx)4551*4882a593Smuzhiyun u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
4552*4882a593Smuzhiyun {
4553*4882a593Smuzhiyun return 0;
4554*4882a593Smuzhiyun }
4555*4882a593Smuzhiyun #endif
4556*4882a593Smuzhiyun
hugetlb_fault(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long address,unsigned int flags)4557*4882a593Smuzhiyun vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
4558*4882a593Smuzhiyun unsigned long address, unsigned int flags)
4559*4882a593Smuzhiyun {
4560*4882a593Smuzhiyun pte_t *ptep, entry;
4561*4882a593Smuzhiyun spinlock_t *ptl;
4562*4882a593Smuzhiyun vm_fault_t ret;
4563*4882a593Smuzhiyun u32 hash;
4564*4882a593Smuzhiyun pgoff_t idx;
4565*4882a593Smuzhiyun struct page *page = NULL;
4566*4882a593Smuzhiyun struct page *pagecache_page = NULL;
4567*4882a593Smuzhiyun struct hstate *h = hstate_vma(vma);
4568*4882a593Smuzhiyun struct address_space *mapping;
4569*4882a593Smuzhiyun int need_wait_lock = 0;
4570*4882a593Smuzhiyun unsigned long haddr = address & huge_page_mask(h);
4571*4882a593Smuzhiyun
4572*4882a593Smuzhiyun ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
4573*4882a593Smuzhiyun if (ptep) {
4574*4882a593Smuzhiyun /*
4575*4882a593Smuzhiyun * Since we hold no locks, ptep could be stale. That is
4576*4882a593Smuzhiyun * OK as we are only making decisions based on content and
4577*4882a593Smuzhiyun * not actually modifying content here.
4578*4882a593Smuzhiyun */
4579*4882a593Smuzhiyun entry = huge_ptep_get(ptep);
4580*4882a593Smuzhiyun if (unlikely(is_hugetlb_entry_migration(entry))) {
4581*4882a593Smuzhiyun migration_entry_wait_huge(vma, mm, ptep);
4582*4882a593Smuzhiyun return 0;
4583*4882a593Smuzhiyun } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
4584*4882a593Smuzhiyun return VM_FAULT_HWPOISON_LARGE |
4585*4882a593Smuzhiyun VM_FAULT_SET_HINDEX(hstate_index(h));
4586*4882a593Smuzhiyun }
4587*4882a593Smuzhiyun
4588*4882a593Smuzhiyun /*
4589*4882a593Smuzhiyun * Acquire i_mmap_rwsem before calling huge_pte_alloc and hold
4590*4882a593Smuzhiyun * until finished with ptep. This serves two purposes:
4591*4882a593Smuzhiyun * 1) It prevents huge_pmd_unshare from being called elsewhere
4592*4882a593Smuzhiyun * and making the ptep no longer valid.
4593*4882a593Smuzhiyun * 2) It synchronizes us with i_size modifications during truncation.
4594*4882a593Smuzhiyun *
4595*4882a593Smuzhiyun * ptep could have already be assigned via huge_pte_offset. That
4596*4882a593Smuzhiyun * is OK, as huge_pte_alloc will return the same value unless
4597*4882a593Smuzhiyun * something has changed.
4598*4882a593Smuzhiyun */
4599*4882a593Smuzhiyun mapping = vma->vm_file->f_mapping;
4600*4882a593Smuzhiyun i_mmap_lock_read(mapping);
4601*4882a593Smuzhiyun ptep = huge_pte_alloc(mm, vma, haddr, huge_page_size(h));
4602*4882a593Smuzhiyun if (!ptep) {
4603*4882a593Smuzhiyun i_mmap_unlock_read(mapping);
4604*4882a593Smuzhiyun return VM_FAULT_OOM;
4605*4882a593Smuzhiyun }
4606*4882a593Smuzhiyun
4607*4882a593Smuzhiyun /*
4608*4882a593Smuzhiyun * Serialize hugepage allocation and instantiation, so that we don't
4609*4882a593Smuzhiyun * get spurious allocation failures if two CPUs race to instantiate
4610*4882a593Smuzhiyun * the same page in the page cache.
4611*4882a593Smuzhiyun */
4612*4882a593Smuzhiyun idx = vma_hugecache_offset(h, vma, haddr);
4613*4882a593Smuzhiyun hash = hugetlb_fault_mutex_hash(mapping, idx);
4614*4882a593Smuzhiyun mutex_lock(&hugetlb_fault_mutex_table[hash]);
4615*4882a593Smuzhiyun
4616*4882a593Smuzhiyun entry = huge_ptep_get(ptep);
4617*4882a593Smuzhiyun if (huge_pte_none(entry))
4618*4882a593Smuzhiyun /*
4619*4882a593Smuzhiyun * hugetlb_no_page will drop vma lock and hugetlb fault
4620*4882a593Smuzhiyun * mutex internally, which make us return immediately.
4621*4882a593Smuzhiyun */
4622*4882a593Smuzhiyun return hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
4623*4882a593Smuzhiyun
4624*4882a593Smuzhiyun ret = 0;
4625*4882a593Smuzhiyun
4626*4882a593Smuzhiyun /*
4627*4882a593Smuzhiyun * entry could be a migration/hwpoison entry at this point, so this
4628*4882a593Smuzhiyun * check prevents the kernel from going below assuming that we have
4629*4882a593Smuzhiyun * an active hugepage in pagecache. This goto expects the 2nd page
4630*4882a593Smuzhiyun * fault, and is_hugetlb_entry_(migration|hwpoisoned) check will
4631*4882a593Smuzhiyun * properly handle it.
4632*4882a593Smuzhiyun */
4633*4882a593Smuzhiyun if (!pte_present(entry))
4634*4882a593Smuzhiyun goto out_mutex;
4635*4882a593Smuzhiyun
4636*4882a593Smuzhiyun /*
4637*4882a593Smuzhiyun * If we are going to COW the mapping later, we examine the pending
4638*4882a593Smuzhiyun * reservations for this page now. This will ensure that any
4639*4882a593Smuzhiyun * allocations necessary to record that reservation occur outside the
4640*4882a593Smuzhiyun * spinlock. For private mappings, we also lookup the pagecache
4641*4882a593Smuzhiyun * page now as it is used to determine if a reservation has been
4642*4882a593Smuzhiyun * consumed.
4643*4882a593Smuzhiyun */
4644*4882a593Smuzhiyun if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
4645*4882a593Smuzhiyun if (vma_needs_reservation(h, vma, haddr) < 0) {
4646*4882a593Smuzhiyun ret = VM_FAULT_OOM;
4647*4882a593Smuzhiyun goto out_mutex;
4648*4882a593Smuzhiyun }
4649*4882a593Smuzhiyun /* Just decrements count, does not deallocate */
4650*4882a593Smuzhiyun vma_end_reservation(h, vma, haddr);
4651*4882a593Smuzhiyun
4652*4882a593Smuzhiyun if (!(vma->vm_flags & VM_MAYSHARE))
4653*4882a593Smuzhiyun pagecache_page = hugetlbfs_pagecache_page(h,
4654*4882a593Smuzhiyun vma, haddr);
4655*4882a593Smuzhiyun }
4656*4882a593Smuzhiyun
4657*4882a593Smuzhiyun ptl = huge_pte_lock(h, mm, ptep);
4658*4882a593Smuzhiyun
4659*4882a593Smuzhiyun /* Check for a racing update before calling hugetlb_cow */
4660*4882a593Smuzhiyun if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
4661*4882a593Smuzhiyun goto out_ptl;
4662*4882a593Smuzhiyun
4663*4882a593Smuzhiyun /*
4664*4882a593Smuzhiyun * hugetlb_cow() requires page locks of pte_page(entry) and
4665*4882a593Smuzhiyun * pagecache_page, so here we need take the former one
4666*4882a593Smuzhiyun * when page != pagecache_page or !pagecache_page.
4667*4882a593Smuzhiyun */
4668*4882a593Smuzhiyun page = pte_page(entry);
4669*4882a593Smuzhiyun if (page != pagecache_page)
4670*4882a593Smuzhiyun if (!trylock_page(page)) {
4671*4882a593Smuzhiyun need_wait_lock = 1;
4672*4882a593Smuzhiyun goto out_ptl;
4673*4882a593Smuzhiyun }
4674*4882a593Smuzhiyun
4675*4882a593Smuzhiyun get_page(page);
4676*4882a593Smuzhiyun
4677*4882a593Smuzhiyun if (flags & FAULT_FLAG_WRITE) {
4678*4882a593Smuzhiyun if (!huge_pte_write(entry)) {
4679*4882a593Smuzhiyun ret = hugetlb_cow(mm, vma, address, ptep,
4680*4882a593Smuzhiyun pagecache_page, ptl);
4681*4882a593Smuzhiyun goto out_put_page;
4682*4882a593Smuzhiyun }
4683*4882a593Smuzhiyun entry = huge_pte_mkdirty(entry);
4684*4882a593Smuzhiyun }
4685*4882a593Smuzhiyun entry = pte_mkyoung(entry);
4686*4882a593Smuzhiyun if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
4687*4882a593Smuzhiyun flags & FAULT_FLAG_WRITE))
4688*4882a593Smuzhiyun update_mmu_cache(vma, haddr, ptep);
4689*4882a593Smuzhiyun out_put_page:
4690*4882a593Smuzhiyun if (page != pagecache_page)
4691*4882a593Smuzhiyun unlock_page(page);
4692*4882a593Smuzhiyun put_page(page);
4693*4882a593Smuzhiyun out_ptl:
4694*4882a593Smuzhiyun spin_unlock(ptl);
4695*4882a593Smuzhiyun
4696*4882a593Smuzhiyun if (pagecache_page) {
4697*4882a593Smuzhiyun unlock_page(pagecache_page);
4698*4882a593Smuzhiyun put_page(pagecache_page);
4699*4882a593Smuzhiyun }
4700*4882a593Smuzhiyun out_mutex:
4701*4882a593Smuzhiyun mutex_unlock(&hugetlb_fault_mutex_table[hash]);
4702*4882a593Smuzhiyun i_mmap_unlock_read(mapping);
4703*4882a593Smuzhiyun /*
4704*4882a593Smuzhiyun * Generally it's safe to hold refcount during waiting page lock. But
4705*4882a593Smuzhiyun * here we just wait to defer the next page fault to avoid busy loop and
4706*4882a593Smuzhiyun * the page is not used after unlocked before returning from the current
4707*4882a593Smuzhiyun * page fault. So we are safe from accessing freed page, even if we wait
4708*4882a593Smuzhiyun * here without taking refcount.
4709*4882a593Smuzhiyun */
4710*4882a593Smuzhiyun if (need_wait_lock)
4711*4882a593Smuzhiyun wait_on_page_locked(page);
4712*4882a593Smuzhiyun return ret;
4713*4882a593Smuzhiyun }
4714*4882a593Smuzhiyun
4715*4882a593Smuzhiyun #ifdef CONFIG_USERFAULTFD
4716*4882a593Smuzhiyun /*
4717*4882a593Smuzhiyun * Used by userfaultfd UFFDIO_COPY. Based on mcopy_atomic_pte with
4718*4882a593Smuzhiyun * modifications for huge pages.
4719*4882a593Smuzhiyun */
hugetlb_mcopy_atomic_pte(struct mm_struct * dst_mm,pte_t * dst_pte,struct vm_area_struct * dst_vma,unsigned long dst_addr,unsigned long src_addr,enum mcopy_atomic_mode mode,struct page ** pagep)4720*4882a593Smuzhiyun int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
4721*4882a593Smuzhiyun pte_t *dst_pte,
4722*4882a593Smuzhiyun struct vm_area_struct *dst_vma,
4723*4882a593Smuzhiyun unsigned long dst_addr,
4724*4882a593Smuzhiyun unsigned long src_addr,
4725*4882a593Smuzhiyun enum mcopy_atomic_mode mode,
4726*4882a593Smuzhiyun struct page **pagep)
4727*4882a593Smuzhiyun {
4728*4882a593Smuzhiyun bool is_continue = (mode == MCOPY_ATOMIC_CONTINUE);
4729*4882a593Smuzhiyun struct address_space *mapping;
4730*4882a593Smuzhiyun pgoff_t idx;
4731*4882a593Smuzhiyun unsigned long size;
4732*4882a593Smuzhiyun int vm_shared = dst_vma->vm_flags & VM_SHARED;
4733*4882a593Smuzhiyun struct hstate *h = hstate_vma(dst_vma);
4734*4882a593Smuzhiyun pte_t _dst_pte;
4735*4882a593Smuzhiyun spinlock_t *ptl;
4736*4882a593Smuzhiyun int ret;
4737*4882a593Smuzhiyun struct page *page;
4738*4882a593Smuzhiyun int writable;
4739*4882a593Smuzhiyun
4740*4882a593Smuzhiyun mapping = dst_vma->vm_file->f_mapping;
4741*4882a593Smuzhiyun idx = vma_hugecache_offset(h, dst_vma, dst_addr);
4742*4882a593Smuzhiyun
4743*4882a593Smuzhiyun if (is_continue) {
4744*4882a593Smuzhiyun ret = -EFAULT;
4745*4882a593Smuzhiyun page = find_lock_page(mapping, idx);
4746*4882a593Smuzhiyun if (!page)
4747*4882a593Smuzhiyun goto out;
4748*4882a593Smuzhiyun } else if (!*pagep) {
4749*4882a593Smuzhiyun /* If a page already exists, then it's UFFDIO_COPY for
4750*4882a593Smuzhiyun * a non-missing case. Return -EEXIST.
4751*4882a593Smuzhiyun */
4752*4882a593Smuzhiyun if (vm_shared &&
4753*4882a593Smuzhiyun hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
4754*4882a593Smuzhiyun ret = -EEXIST;
4755*4882a593Smuzhiyun goto out;
4756*4882a593Smuzhiyun }
4757*4882a593Smuzhiyun
4758*4882a593Smuzhiyun page = alloc_huge_page(dst_vma, dst_addr, 0);
4759*4882a593Smuzhiyun if (IS_ERR(page)) {
4760*4882a593Smuzhiyun ret = -ENOMEM;
4761*4882a593Smuzhiyun goto out;
4762*4882a593Smuzhiyun }
4763*4882a593Smuzhiyun
4764*4882a593Smuzhiyun ret = copy_huge_page_from_user(page,
4765*4882a593Smuzhiyun (const void __user *) src_addr,
4766*4882a593Smuzhiyun pages_per_huge_page(h), false);
4767*4882a593Smuzhiyun
4768*4882a593Smuzhiyun /* fallback to copy_from_user outside mmap_lock */
4769*4882a593Smuzhiyun if (unlikely(ret)) {
4770*4882a593Smuzhiyun ret = -ENOENT;
4771*4882a593Smuzhiyun *pagep = page;
4772*4882a593Smuzhiyun /* don't free the page */
4773*4882a593Smuzhiyun goto out;
4774*4882a593Smuzhiyun }
4775*4882a593Smuzhiyun } else {
4776*4882a593Smuzhiyun page = *pagep;
4777*4882a593Smuzhiyun *pagep = NULL;
4778*4882a593Smuzhiyun }
4779*4882a593Smuzhiyun
4780*4882a593Smuzhiyun /*
4781*4882a593Smuzhiyun * The memory barrier inside __SetPageUptodate makes sure that
4782*4882a593Smuzhiyun * preceding stores to the page contents become visible before
4783*4882a593Smuzhiyun * the set_pte_at() write.
4784*4882a593Smuzhiyun */
4785*4882a593Smuzhiyun __SetPageUptodate(page);
4786*4882a593Smuzhiyun
4787*4882a593Smuzhiyun /* Add shared, newly allocated pages to the page cache. */
4788*4882a593Smuzhiyun if (vm_shared && !is_continue) {
4789*4882a593Smuzhiyun size = i_size_read(mapping->host) >> huge_page_shift(h);
4790*4882a593Smuzhiyun ret = -EFAULT;
4791*4882a593Smuzhiyun if (idx >= size)
4792*4882a593Smuzhiyun goto out_release_nounlock;
4793*4882a593Smuzhiyun
4794*4882a593Smuzhiyun /*
4795*4882a593Smuzhiyun * Serialization between remove_inode_hugepages() and
4796*4882a593Smuzhiyun * huge_add_to_page_cache() below happens through the
4797*4882a593Smuzhiyun * hugetlb_fault_mutex_table that here must be hold by
4798*4882a593Smuzhiyun * the caller.
4799*4882a593Smuzhiyun */
4800*4882a593Smuzhiyun ret = huge_add_to_page_cache(page, mapping, idx);
4801*4882a593Smuzhiyun if (ret)
4802*4882a593Smuzhiyun goto out_release_nounlock;
4803*4882a593Smuzhiyun }
4804*4882a593Smuzhiyun
4805*4882a593Smuzhiyun ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
4806*4882a593Smuzhiyun spin_lock(ptl);
4807*4882a593Smuzhiyun
4808*4882a593Smuzhiyun /*
4809*4882a593Smuzhiyun * Recheck the i_size after holding PT lock to make sure not
4810*4882a593Smuzhiyun * to leave any page mapped (as page_mapped()) beyond the end
4811*4882a593Smuzhiyun * of the i_size (remove_inode_hugepages() is strict about
4812*4882a593Smuzhiyun * enforcing that). If we bail out here, we'll also leave a
4813*4882a593Smuzhiyun * page in the radix tree in the vm_shared case beyond the end
4814*4882a593Smuzhiyun * of the i_size, but remove_inode_hugepages() will take care
4815*4882a593Smuzhiyun * of it as soon as we drop the hugetlb_fault_mutex_table.
4816*4882a593Smuzhiyun */
4817*4882a593Smuzhiyun size = i_size_read(mapping->host) >> huge_page_shift(h);
4818*4882a593Smuzhiyun ret = -EFAULT;
4819*4882a593Smuzhiyun if (idx >= size)
4820*4882a593Smuzhiyun goto out_release_unlock;
4821*4882a593Smuzhiyun
4822*4882a593Smuzhiyun ret = -EEXIST;
4823*4882a593Smuzhiyun if (!huge_pte_none(huge_ptep_get(dst_pte)))
4824*4882a593Smuzhiyun goto out_release_unlock;
4825*4882a593Smuzhiyun
4826*4882a593Smuzhiyun if (vm_shared) {
4827*4882a593Smuzhiyun page_dup_rmap(page, true);
4828*4882a593Smuzhiyun } else {
4829*4882a593Smuzhiyun ClearPagePrivate(page);
4830*4882a593Smuzhiyun hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
4831*4882a593Smuzhiyun }
4832*4882a593Smuzhiyun
4833*4882a593Smuzhiyun /* For CONTINUE on a non-shared VMA, don't set VM_WRITE for CoW. */
4834*4882a593Smuzhiyun if (is_continue && !vm_shared)
4835*4882a593Smuzhiyun writable = 0;
4836*4882a593Smuzhiyun else
4837*4882a593Smuzhiyun writable = dst_vma->vm_flags & VM_WRITE;
4838*4882a593Smuzhiyun
4839*4882a593Smuzhiyun _dst_pte = make_huge_pte(dst_vma, page, writable);
4840*4882a593Smuzhiyun if (writable)
4841*4882a593Smuzhiyun _dst_pte = huge_pte_mkdirty(_dst_pte);
4842*4882a593Smuzhiyun _dst_pte = pte_mkyoung(_dst_pte);
4843*4882a593Smuzhiyun
4844*4882a593Smuzhiyun set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
4845*4882a593Smuzhiyun
4846*4882a593Smuzhiyun (void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte,
4847*4882a593Smuzhiyun dst_vma->vm_flags & VM_WRITE);
4848*4882a593Smuzhiyun hugetlb_count_add(pages_per_huge_page(h), dst_mm);
4849*4882a593Smuzhiyun
4850*4882a593Smuzhiyun /* No need to invalidate - it was non-present before */
4851*4882a593Smuzhiyun update_mmu_cache(dst_vma, dst_addr, dst_pte);
4852*4882a593Smuzhiyun
4853*4882a593Smuzhiyun spin_unlock(ptl);
4854*4882a593Smuzhiyun if (!is_continue)
4855*4882a593Smuzhiyun set_page_huge_active(page);
4856*4882a593Smuzhiyun if (vm_shared || is_continue)
4857*4882a593Smuzhiyun unlock_page(page);
4858*4882a593Smuzhiyun ret = 0;
4859*4882a593Smuzhiyun out:
4860*4882a593Smuzhiyun return ret;
4861*4882a593Smuzhiyun out_release_unlock:
4862*4882a593Smuzhiyun spin_unlock(ptl);
4863*4882a593Smuzhiyun if (vm_shared || is_continue)
4864*4882a593Smuzhiyun unlock_page(page);
4865*4882a593Smuzhiyun out_release_nounlock:
4866*4882a593Smuzhiyun put_page(page);
4867*4882a593Smuzhiyun goto out;
4868*4882a593Smuzhiyun }
4869*4882a593Smuzhiyun #endif /* CONFIG_USERFAULTFD */
4870*4882a593Smuzhiyun
follow_hugetlb_page(struct mm_struct * mm,struct vm_area_struct * vma,struct page ** pages,struct vm_area_struct ** vmas,unsigned long * position,unsigned long * nr_pages,long i,unsigned int flags,int * locked)4871*4882a593Smuzhiyun long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
4872*4882a593Smuzhiyun struct page **pages, struct vm_area_struct **vmas,
4873*4882a593Smuzhiyun unsigned long *position, unsigned long *nr_pages,
4874*4882a593Smuzhiyun long i, unsigned int flags, int *locked)
4875*4882a593Smuzhiyun {
4876*4882a593Smuzhiyun unsigned long pfn_offset;
4877*4882a593Smuzhiyun unsigned long vaddr = *position;
4878*4882a593Smuzhiyun unsigned long remainder = *nr_pages;
4879*4882a593Smuzhiyun struct hstate *h = hstate_vma(vma);
4880*4882a593Smuzhiyun int err = -EFAULT;
4881*4882a593Smuzhiyun
4882*4882a593Smuzhiyun while (vaddr < vma->vm_end && remainder) {
4883*4882a593Smuzhiyun pte_t *pte;
4884*4882a593Smuzhiyun spinlock_t *ptl = NULL;
4885*4882a593Smuzhiyun int absent;
4886*4882a593Smuzhiyun struct page *page;
4887*4882a593Smuzhiyun
4888*4882a593Smuzhiyun /*
4889*4882a593Smuzhiyun * If we have a pending SIGKILL, don't keep faulting pages and
4890*4882a593Smuzhiyun * potentially allocating memory.
4891*4882a593Smuzhiyun */
4892*4882a593Smuzhiyun if (fatal_signal_pending(current)) {
4893*4882a593Smuzhiyun remainder = 0;
4894*4882a593Smuzhiyun break;
4895*4882a593Smuzhiyun }
4896*4882a593Smuzhiyun
4897*4882a593Smuzhiyun /*
4898*4882a593Smuzhiyun * Some archs (sparc64, sh*) have multiple pte_ts to
4899*4882a593Smuzhiyun * each hugepage. We have to make sure we get the
4900*4882a593Smuzhiyun * first, for the page indexing below to work.
4901*4882a593Smuzhiyun *
4902*4882a593Smuzhiyun * Note that page table lock is not held when pte is null.
4903*4882a593Smuzhiyun */
4904*4882a593Smuzhiyun pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
4905*4882a593Smuzhiyun huge_page_size(h));
4906*4882a593Smuzhiyun if (pte)
4907*4882a593Smuzhiyun ptl = huge_pte_lock(h, mm, pte);
4908*4882a593Smuzhiyun absent = !pte || huge_pte_none(huge_ptep_get(pte));
4909*4882a593Smuzhiyun
4910*4882a593Smuzhiyun /*
4911*4882a593Smuzhiyun * When coredumping, it suits get_dump_page if we just return
4912*4882a593Smuzhiyun * an error where there's an empty slot with no huge pagecache
4913*4882a593Smuzhiyun * to back it. This way, we avoid allocating a hugepage, and
4914*4882a593Smuzhiyun * the sparse dumpfile avoids allocating disk blocks, but its
4915*4882a593Smuzhiyun * huge holes still show up with zeroes where they need to be.
4916*4882a593Smuzhiyun */
4917*4882a593Smuzhiyun if (absent && (flags & FOLL_DUMP) &&
4918*4882a593Smuzhiyun !hugetlbfs_pagecache_present(h, vma, vaddr)) {
4919*4882a593Smuzhiyun if (pte)
4920*4882a593Smuzhiyun spin_unlock(ptl);
4921*4882a593Smuzhiyun remainder = 0;
4922*4882a593Smuzhiyun break;
4923*4882a593Smuzhiyun }
4924*4882a593Smuzhiyun
4925*4882a593Smuzhiyun /*
4926*4882a593Smuzhiyun * We need call hugetlb_fault for both hugepages under migration
4927*4882a593Smuzhiyun * (in which case hugetlb_fault waits for the migration,) and
4928*4882a593Smuzhiyun * hwpoisoned hugepages (in which case we need to prevent the
4929*4882a593Smuzhiyun * caller from accessing to them.) In order to do this, we use
4930*4882a593Smuzhiyun * here is_swap_pte instead of is_hugetlb_entry_migration and
4931*4882a593Smuzhiyun * is_hugetlb_entry_hwpoisoned. This is because it simply covers
4932*4882a593Smuzhiyun * both cases, and because we can't follow correct pages
4933*4882a593Smuzhiyun * directly from any kind of swap entries.
4934*4882a593Smuzhiyun */
4935*4882a593Smuzhiyun if (absent || is_swap_pte(huge_ptep_get(pte)) ||
4936*4882a593Smuzhiyun ((flags & FOLL_WRITE) &&
4937*4882a593Smuzhiyun !huge_pte_write(huge_ptep_get(pte)))) {
4938*4882a593Smuzhiyun vm_fault_t ret;
4939*4882a593Smuzhiyun unsigned int fault_flags = 0;
4940*4882a593Smuzhiyun
4941*4882a593Smuzhiyun if (pte)
4942*4882a593Smuzhiyun spin_unlock(ptl);
4943*4882a593Smuzhiyun if (flags & FOLL_WRITE)
4944*4882a593Smuzhiyun fault_flags |= FAULT_FLAG_WRITE;
4945*4882a593Smuzhiyun if (locked)
4946*4882a593Smuzhiyun fault_flags |= FAULT_FLAG_ALLOW_RETRY |
4947*4882a593Smuzhiyun FAULT_FLAG_KILLABLE;
4948*4882a593Smuzhiyun if (flags & FOLL_NOWAIT)
4949*4882a593Smuzhiyun fault_flags |= FAULT_FLAG_ALLOW_RETRY |
4950*4882a593Smuzhiyun FAULT_FLAG_RETRY_NOWAIT;
4951*4882a593Smuzhiyun if (flags & FOLL_TRIED) {
4952*4882a593Smuzhiyun /*
4953*4882a593Smuzhiyun * Note: FAULT_FLAG_ALLOW_RETRY and
4954*4882a593Smuzhiyun * FAULT_FLAG_TRIED can co-exist
4955*4882a593Smuzhiyun */
4956*4882a593Smuzhiyun fault_flags |= FAULT_FLAG_TRIED;
4957*4882a593Smuzhiyun }
4958*4882a593Smuzhiyun ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
4959*4882a593Smuzhiyun if (ret & VM_FAULT_ERROR) {
4960*4882a593Smuzhiyun err = vm_fault_to_errno(ret, flags);
4961*4882a593Smuzhiyun remainder = 0;
4962*4882a593Smuzhiyun break;
4963*4882a593Smuzhiyun }
4964*4882a593Smuzhiyun if (ret & VM_FAULT_RETRY) {
4965*4882a593Smuzhiyun if (locked &&
4966*4882a593Smuzhiyun !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
4967*4882a593Smuzhiyun *locked = 0;
4968*4882a593Smuzhiyun *nr_pages = 0;
4969*4882a593Smuzhiyun /*
4970*4882a593Smuzhiyun * VM_FAULT_RETRY must not return an
4971*4882a593Smuzhiyun * error, it will return zero
4972*4882a593Smuzhiyun * instead.
4973*4882a593Smuzhiyun *
4974*4882a593Smuzhiyun * No need to update "position" as the
4975*4882a593Smuzhiyun * caller will not check it after
4976*4882a593Smuzhiyun * *nr_pages is set to 0.
4977*4882a593Smuzhiyun */
4978*4882a593Smuzhiyun return i;
4979*4882a593Smuzhiyun }
4980*4882a593Smuzhiyun continue;
4981*4882a593Smuzhiyun }
4982*4882a593Smuzhiyun
4983*4882a593Smuzhiyun pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
4984*4882a593Smuzhiyun page = pte_page(huge_ptep_get(pte));
4985*4882a593Smuzhiyun
4986*4882a593Smuzhiyun /*
4987*4882a593Smuzhiyun * If subpage information not requested, update counters
4988*4882a593Smuzhiyun * and skip the same_page loop below.
4989*4882a593Smuzhiyun */
4990*4882a593Smuzhiyun if (!pages && !vmas && !pfn_offset &&
4991*4882a593Smuzhiyun (vaddr + huge_page_size(h) < vma->vm_end) &&
4992*4882a593Smuzhiyun (remainder >= pages_per_huge_page(h))) {
4993*4882a593Smuzhiyun vaddr += huge_page_size(h);
4994*4882a593Smuzhiyun remainder -= pages_per_huge_page(h);
4995*4882a593Smuzhiyun i += pages_per_huge_page(h);
4996*4882a593Smuzhiyun spin_unlock(ptl);
4997*4882a593Smuzhiyun continue;
4998*4882a593Smuzhiyun }
4999*4882a593Smuzhiyun
5000*4882a593Smuzhiyun same_page:
5001*4882a593Smuzhiyun if (pages) {
5002*4882a593Smuzhiyun pages[i] = mem_map_offset(page, pfn_offset);
5003*4882a593Smuzhiyun /*
5004*4882a593Smuzhiyun * try_grab_page() should always succeed here, because:
5005*4882a593Smuzhiyun * a) we hold the ptl lock, and b) we've just checked
5006*4882a593Smuzhiyun * that the huge page is present in the page tables. If
5007*4882a593Smuzhiyun * the huge page is present, then the tail pages must
5008*4882a593Smuzhiyun * also be present. The ptl prevents the head page and
5009*4882a593Smuzhiyun * tail pages from being rearranged in any way. So this
5010*4882a593Smuzhiyun * page must be available at this point, unless the page
5011*4882a593Smuzhiyun * refcount overflowed:
5012*4882a593Smuzhiyun */
5013*4882a593Smuzhiyun if (WARN_ON_ONCE(!try_grab_page(pages[i], flags))) {
5014*4882a593Smuzhiyun spin_unlock(ptl);
5015*4882a593Smuzhiyun remainder = 0;
5016*4882a593Smuzhiyun err = -ENOMEM;
5017*4882a593Smuzhiyun break;
5018*4882a593Smuzhiyun }
5019*4882a593Smuzhiyun }
5020*4882a593Smuzhiyun
5021*4882a593Smuzhiyun if (vmas)
5022*4882a593Smuzhiyun vmas[i] = vma;
5023*4882a593Smuzhiyun
5024*4882a593Smuzhiyun vaddr += PAGE_SIZE;
5025*4882a593Smuzhiyun ++pfn_offset;
5026*4882a593Smuzhiyun --remainder;
5027*4882a593Smuzhiyun ++i;
5028*4882a593Smuzhiyun if (vaddr < vma->vm_end && remainder &&
5029*4882a593Smuzhiyun pfn_offset < pages_per_huge_page(h)) {
5030*4882a593Smuzhiyun /*
5031*4882a593Smuzhiyun * We use pfn_offset to avoid touching the pageframes
5032*4882a593Smuzhiyun * of this compound page.
5033*4882a593Smuzhiyun */
5034*4882a593Smuzhiyun goto same_page;
5035*4882a593Smuzhiyun }
5036*4882a593Smuzhiyun spin_unlock(ptl);
5037*4882a593Smuzhiyun }
5038*4882a593Smuzhiyun *nr_pages = remainder;
5039*4882a593Smuzhiyun /*
5040*4882a593Smuzhiyun * setting position is actually required only if remainder is
5041*4882a593Smuzhiyun * not zero but it's faster not to add a "if (remainder)"
5042*4882a593Smuzhiyun * branch.
5043*4882a593Smuzhiyun */
5044*4882a593Smuzhiyun *position = vaddr;
5045*4882a593Smuzhiyun
5046*4882a593Smuzhiyun return i ? i : err;
5047*4882a593Smuzhiyun }
5048*4882a593Smuzhiyun
hugetlb_change_protection(struct vm_area_struct * vma,unsigned long address,unsigned long end,pgprot_t newprot)5049*4882a593Smuzhiyun unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
5050*4882a593Smuzhiyun unsigned long address, unsigned long end, pgprot_t newprot)
5051*4882a593Smuzhiyun {
5052*4882a593Smuzhiyun struct mm_struct *mm = vma->vm_mm;
5053*4882a593Smuzhiyun unsigned long start = address;
5054*4882a593Smuzhiyun pte_t *ptep;
5055*4882a593Smuzhiyun pte_t pte;
5056*4882a593Smuzhiyun struct hstate *h = hstate_vma(vma);
5057*4882a593Smuzhiyun unsigned long pages = 0;
5058*4882a593Smuzhiyun bool shared_pmd = false;
5059*4882a593Smuzhiyun struct mmu_notifier_range range;
5060*4882a593Smuzhiyun
5061*4882a593Smuzhiyun /*
5062*4882a593Smuzhiyun * In the case of shared PMDs, the area to flush could be beyond
5063*4882a593Smuzhiyun * start/end. Set range.start/range.end to cover the maximum possible
5064*4882a593Smuzhiyun * range if PMD sharing is possible.
5065*4882a593Smuzhiyun */
5066*4882a593Smuzhiyun mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
5067*4882a593Smuzhiyun 0, vma, mm, start, end);
5068*4882a593Smuzhiyun adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5069*4882a593Smuzhiyun
5070*4882a593Smuzhiyun BUG_ON(address >= end);
5071*4882a593Smuzhiyun flush_cache_range(vma, range.start, range.end);
5072*4882a593Smuzhiyun
5073*4882a593Smuzhiyun mmu_notifier_invalidate_range_start(&range);
5074*4882a593Smuzhiyun i_mmap_lock_write(vma->vm_file->f_mapping);
5075*4882a593Smuzhiyun for (; address < end; address += huge_page_size(h)) {
5076*4882a593Smuzhiyun spinlock_t *ptl;
5077*4882a593Smuzhiyun ptep = huge_pte_offset(mm, address, huge_page_size(h));
5078*4882a593Smuzhiyun if (!ptep)
5079*4882a593Smuzhiyun continue;
5080*4882a593Smuzhiyun ptl = huge_pte_lock(h, mm, ptep);
5081*4882a593Smuzhiyun if (huge_pmd_unshare(mm, vma, &address, ptep)) {
5082*4882a593Smuzhiyun pages++;
5083*4882a593Smuzhiyun spin_unlock(ptl);
5084*4882a593Smuzhiyun shared_pmd = true;
5085*4882a593Smuzhiyun continue;
5086*4882a593Smuzhiyun }
5087*4882a593Smuzhiyun pte = huge_ptep_get(ptep);
5088*4882a593Smuzhiyun if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
5089*4882a593Smuzhiyun spin_unlock(ptl);
5090*4882a593Smuzhiyun continue;
5091*4882a593Smuzhiyun }
5092*4882a593Smuzhiyun if (unlikely(is_hugetlb_entry_migration(pte))) {
5093*4882a593Smuzhiyun swp_entry_t entry = pte_to_swp_entry(pte);
5094*4882a593Smuzhiyun
5095*4882a593Smuzhiyun if (is_write_migration_entry(entry)) {
5096*4882a593Smuzhiyun pte_t newpte;
5097*4882a593Smuzhiyun
5098*4882a593Smuzhiyun make_migration_entry_read(&entry);
5099*4882a593Smuzhiyun newpte = swp_entry_to_pte(entry);
5100*4882a593Smuzhiyun set_huge_swap_pte_at(mm, address, ptep,
5101*4882a593Smuzhiyun newpte, huge_page_size(h));
5102*4882a593Smuzhiyun pages++;
5103*4882a593Smuzhiyun }
5104*4882a593Smuzhiyun spin_unlock(ptl);
5105*4882a593Smuzhiyun continue;
5106*4882a593Smuzhiyun }
5107*4882a593Smuzhiyun if (!huge_pte_none(pte)) {
5108*4882a593Smuzhiyun pte_t old_pte;
5109*4882a593Smuzhiyun
5110*4882a593Smuzhiyun old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
5111*4882a593Smuzhiyun pte = pte_mkhuge(huge_pte_modify(old_pte, newprot));
5112*4882a593Smuzhiyun pte = arch_make_huge_pte(pte, vma, NULL, 0);
5113*4882a593Smuzhiyun huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
5114*4882a593Smuzhiyun pages++;
5115*4882a593Smuzhiyun }
5116*4882a593Smuzhiyun spin_unlock(ptl);
5117*4882a593Smuzhiyun }
5118*4882a593Smuzhiyun /*
5119*4882a593Smuzhiyun * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
5120*4882a593Smuzhiyun * may have cleared our pud entry and done put_page on the page table:
5121*4882a593Smuzhiyun * once we release i_mmap_rwsem, another task can do the final put_page
5122*4882a593Smuzhiyun * and that page table be reused and filled with junk. If we actually
5123*4882a593Smuzhiyun * did unshare a page of pmds, flush the range corresponding to the pud.
5124*4882a593Smuzhiyun */
5125*4882a593Smuzhiyun if (shared_pmd)
5126*4882a593Smuzhiyun flush_hugetlb_tlb_range(vma, range.start, range.end);
5127*4882a593Smuzhiyun else
5128*4882a593Smuzhiyun flush_hugetlb_tlb_range(vma, start, end);
5129*4882a593Smuzhiyun /*
5130*4882a593Smuzhiyun * No need to call mmu_notifier_invalidate_range() we are downgrading
5131*4882a593Smuzhiyun * page table protection not changing it to point to a new page.
5132*4882a593Smuzhiyun *
5133*4882a593Smuzhiyun * See Documentation/vm/mmu_notifier.rst
5134*4882a593Smuzhiyun */
5135*4882a593Smuzhiyun i_mmap_unlock_write(vma->vm_file->f_mapping);
5136*4882a593Smuzhiyun mmu_notifier_invalidate_range_end(&range);
5137*4882a593Smuzhiyun
5138*4882a593Smuzhiyun return pages << h->order;
5139*4882a593Smuzhiyun }
5140*4882a593Smuzhiyun
hugetlb_reserve_pages(struct inode * inode,long from,long to,struct vm_area_struct * vma,vm_flags_t vm_flags)5141*4882a593Smuzhiyun int hugetlb_reserve_pages(struct inode *inode,
5142*4882a593Smuzhiyun long from, long to,
5143*4882a593Smuzhiyun struct vm_area_struct *vma,
5144*4882a593Smuzhiyun vm_flags_t vm_flags)
5145*4882a593Smuzhiyun {
5146*4882a593Smuzhiyun long ret, chg, add = -1;
5147*4882a593Smuzhiyun struct hstate *h = hstate_inode(inode);
5148*4882a593Smuzhiyun struct hugepage_subpool *spool = subpool_inode(inode);
5149*4882a593Smuzhiyun struct resv_map *resv_map;
5150*4882a593Smuzhiyun struct hugetlb_cgroup *h_cg = NULL;
5151*4882a593Smuzhiyun long gbl_reserve, regions_needed = 0;
5152*4882a593Smuzhiyun
5153*4882a593Smuzhiyun /* This should never happen */
5154*4882a593Smuzhiyun if (from > to) {
5155*4882a593Smuzhiyun VM_WARN(1, "%s called with a negative range\n", __func__);
5156*4882a593Smuzhiyun return -EINVAL;
5157*4882a593Smuzhiyun }
5158*4882a593Smuzhiyun
5159*4882a593Smuzhiyun /*
5160*4882a593Smuzhiyun * Only apply hugepage reservation if asked. At fault time, an
5161*4882a593Smuzhiyun * attempt will be made for VM_NORESERVE to allocate a page
5162*4882a593Smuzhiyun * without using reserves
5163*4882a593Smuzhiyun */
5164*4882a593Smuzhiyun if (vm_flags & VM_NORESERVE)
5165*4882a593Smuzhiyun return 0;
5166*4882a593Smuzhiyun
5167*4882a593Smuzhiyun /*
5168*4882a593Smuzhiyun * Shared mappings base their reservation on the number of pages that
5169*4882a593Smuzhiyun * are already allocated on behalf of the file. Private mappings need
5170*4882a593Smuzhiyun * to reserve the full area even if read-only as mprotect() may be
5171*4882a593Smuzhiyun * called to make the mapping read-write. Assume !vma is a shm mapping
5172*4882a593Smuzhiyun */
5173*4882a593Smuzhiyun if (!vma || vma->vm_flags & VM_MAYSHARE) {
5174*4882a593Smuzhiyun /*
5175*4882a593Smuzhiyun * resv_map can not be NULL as hugetlb_reserve_pages is only
5176*4882a593Smuzhiyun * called for inodes for which resv_maps were created (see
5177*4882a593Smuzhiyun * hugetlbfs_get_inode).
5178*4882a593Smuzhiyun */
5179*4882a593Smuzhiyun resv_map = inode_resv_map(inode);
5180*4882a593Smuzhiyun
5181*4882a593Smuzhiyun chg = region_chg(resv_map, from, to, ®ions_needed);
5182*4882a593Smuzhiyun
5183*4882a593Smuzhiyun } else {
5184*4882a593Smuzhiyun /* Private mapping. */
5185*4882a593Smuzhiyun resv_map = resv_map_alloc();
5186*4882a593Smuzhiyun if (!resv_map)
5187*4882a593Smuzhiyun return -ENOMEM;
5188*4882a593Smuzhiyun
5189*4882a593Smuzhiyun chg = to - from;
5190*4882a593Smuzhiyun
5191*4882a593Smuzhiyun set_vma_resv_map(vma, resv_map);
5192*4882a593Smuzhiyun set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
5193*4882a593Smuzhiyun }
5194*4882a593Smuzhiyun
5195*4882a593Smuzhiyun if (chg < 0) {
5196*4882a593Smuzhiyun ret = chg;
5197*4882a593Smuzhiyun goto out_err;
5198*4882a593Smuzhiyun }
5199*4882a593Smuzhiyun
5200*4882a593Smuzhiyun ret = hugetlb_cgroup_charge_cgroup_rsvd(
5201*4882a593Smuzhiyun hstate_index(h), chg * pages_per_huge_page(h), &h_cg);
5202*4882a593Smuzhiyun
5203*4882a593Smuzhiyun if (ret < 0) {
5204*4882a593Smuzhiyun ret = -ENOMEM;
5205*4882a593Smuzhiyun goto out_err;
5206*4882a593Smuzhiyun }
5207*4882a593Smuzhiyun
5208*4882a593Smuzhiyun if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) {
5209*4882a593Smuzhiyun /* For private mappings, the hugetlb_cgroup uncharge info hangs
5210*4882a593Smuzhiyun * of the resv_map.
5211*4882a593Smuzhiyun */
5212*4882a593Smuzhiyun resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h);
5213*4882a593Smuzhiyun }
5214*4882a593Smuzhiyun
5215*4882a593Smuzhiyun /*
5216*4882a593Smuzhiyun * There must be enough pages in the subpool for the mapping. If
5217*4882a593Smuzhiyun * the subpool has a minimum size, there may be some global
5218*4882a593Smuzhiyun * reservations already in place (gbl_reserve).
5219*4882a593Smuzhiyun */
5220*4882a593Smuzhiyun gbl_reserve = hugepage_subpool_get_pages(spool, chg);
5221*4882a593Smuzhiyun if (gbl_reserve < 0) {
5222*4882a593Smuzhiyun ret = -ENOSPC;
5223*4882a593Smuzhiyun goto out_uncharge_cgroup;
5224*4882a593Smuzhiyun }
5225*4882a593Smuzhiyun
5226*4882a593Smuzhiyun /*
5227*4882a593Smuzhiyun * Check enough hugepages are available for the reservation.
5228*4882a593Smuzhiyun * Hand the pages back to the subpool if there are not
5229*4882a593Smuzhiyun */
5230*4882a593Smuzhiyun ret = hugetlb_acct_memory(h, gbl_reserve);
5231*4882a593Smuzhiyun if (ret < 0) {
5232*4882a593Smuzhiyun goto out_put_pages;
5233*4882a593Smuzhiyun }
5234*4882a593Smuzhiyun
5235*4882a593Smuzhiyun /*
5236*4882a593Smuzhiyun * Account for the reservations made. Shared mappings record regions
5237*4882a593Smuzhiyun * that have reservations as they are shared by multiple VMAs.
5238*4882a593Smuzhiyun * When the last VMA disappears, the region map says how much
5239*4882a593Smuzhiyun * the reservation was and the page cache tells how much of
5240*4882a593Smuzhiyun * the reservation was consumed. Private mappings are per-VMA and
5241*4882a593Smuzhiyun * only the consumed reservations are tracked. When the VMA
5242*4882a593Smuzhiyun * disappears, the original reservation is the VMA size and the
5243*4882a593Smuzhiyun * consumed reservations are stored in the map. Hence, nothing
5244*4882a593Smuzhiyun * else has to be done for private mappings here
5245*4882a593Smuzhiyun */
5246*4882a593Smuzhiyun if (!vma || vma->vm_flags & VM_MAYSHARE) {
5247*4882a593Smuzhiyun add = region_add(resv_map, from, to, regions_needed, h, h_cg);
5248*4882a593Smuzhiyun
5249*4882a593Smuzhiyun if (unlikely(add < 0)) {
5250*4882a593Smuzhiyun hugetlb_acct_memory(h, -gbl_reserve);
5251*4882a593Smuzhiyun ret = add;
5252*4882a593Smuzhiyun goto out_put_pages;
5253*4882a593Smuzhiyun } else if (unlikely(chg > add)) {
5254*4882a593Smuzhiyun /*
5255*4882a593Smuzhiyun * pages in this range were added to the reserve
5256*4882a593Smuzhiyun * map between region_chg and region_add. This
5257*4882a593Smuzhiyun * indicates a race with alloc_huge_page. Adjust
5258*4882a593Smuzhiyun * the subpool and reserve counts modified above
5259*4882a593Smuzhiyun * based on the difference.
5260*4882a593Smuzhiyun */
5261*4882a593Smuzhiyun long rsv_adjust;
5262*4882a593Smuzhiyun
5263*4882a593Smuzhiyun /*
5264*4882a593Smuzhiyun * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the
5265*4882a593Smuzhiyun * reference to h_cg->css. See comment below for detail.
5266*4882a593Smuzhiyun */
5267*4882a593Smuzhiyun hugetlb_cgroup_uncharge_cgroup_rsvd(
5268*4882a593Smuzhiyun hstate_index(h),
5269*4882a593Smuzhiyun (chg - add) * pages_per_huge_page(h), h_cg);
5270*4882a593Smuzhiyun
5271*4882a593Smuzhiyun rsv_adjust = hugepage_subpool_put_pages(spool,
5272*4882a593Smuzhiyun chg - add);
5273*4882a593Smuzhiyun hugetlb_acct_memory(h, -rsv_adjust);
5274*4882a593Smuzhiyun } else if (h_cg) {
5275*4882a593Smuzhiyun /*
5276*4882a593Smuzhiyun * The file_regions will hold their own reference to
5277*4882a593Smuzhiyun * h_cg->css. So we should release the reference held
5278*4882a593Smuzhiyun * via hugetlb_cgroup_charge_cgroup_rsvd() when we are
5279*4882a593Smuzhiyun * done.
5280*4882a593Smuzhiyun */
5281*4882a593Smuzhiyun hugetlb_cgroup_put_rsvd_cgroup(h_cg);
5282*4882a593Smuzhiyun }
5283*4882a593Smuzhiyun }
5284*4882a593Smuzhiyun return 0;
5285*4882a593Smuzhiyun out_put_pages:
5286*4882a593Smuzhiyun /* put back original number of pages, chg */
5287*4882a593Smuzhiyun (void)hugepage_subpool_put_pages(spool, chg);
5288*4882a593Smuzhiyun out_uncharge_cgroup:
5289*4882a593Smuzhiyun hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h),
5290*4882a593Smuzhiyun chg * pages_per_huge_page(h), h_cg);
5291*4882a593Smuzhiyun out_err:
5292*4882a593Smuzhiyun if (!vma || vma->vm_flags & VM_MAYSHARE)
5293*4882a593Smuzhiyun /* Only call region_abort if the region_chg succeeded but the
5294*4882a593Smuzhiyun * region_add failed or didn't run.
5295*4882a593Smuzhiyun */
5296*4882a593Smuzhiyun if (chg >= 0 && add < 0)
5297*4882a593Smuzhiyun region_abort(resv_map, from, to, regions_needed);
5298*4882a593Smuzhiyun if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
5299*4882a593Smuzhiyun kref_put(&resv_map->refs, resv_map_release);
5300*4882a593Smuzhiyun return ret;
5301*4882a593Smuzhiyun }
5302*4882a593Smuzhiyun
hugetlb_unreserve_pages(struct inode * inode,long start,long end,long freed)5303*4882a593Smuzhiyun long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
5304*4882a593Smuzhiyun long freed)
5305*4882a593Smuzhiyun {
5306*4882a593Smuzhiyun struct hstate *h = hstate_inode(inode);
5307*4882a593Smuzhiyun struct resv_map *resv_map = inode_resv_map(inode);
5308*4882a593Smuzhiyun long chg = 0;
5309*4882a593Smuzhiyun struct hugepage_subpool *spool = subpool_inode(inode);
5310*4882a593Smuzhiyun long gbl_reserve;
5311*4882a593Smuzhiyun
5312*4882a593Smuzhiyun /*
5313*4882a593Smuzhiyun * Since this routine can be called in the evict inode path for all
5314*4882a593Smuzhiyun * hugetlbfs inodes, resv_map could be NULL.
5315*4882a593Smuzhiyun */
5316*4882a593Smuzhiyun if (resv_map) {
5317*4882a593Smuzhiyun chg = region_del(resv_map, start, end);
5318*4882a593Smuzhiyun /*
5319*4882a593Smuzhiyun * region_del() can fail in the rare case where a region
5320*4882a593Smuzhiyun * must be split and another region descriptor can not be
5321*4882a593Smuzhiyun * allocated. If end == LONG_MAX, it will not fail.
5322*4882a593Smuzhiyun */
5323*4882a593Smuzhiyun if (chg < 0)
5324*4882a593Smuzhiyun return chg;
5325*4882a593Smuzhiyun }
5326*4882a593Smuzhiyun
5327*4882a593Smuzhiyun spin_lock(&inode->i_lock);
5328*4882a593Smuzhiyun inode->i_blocks -= (blocks_per_huge_page(h) * freed);
5329*4882a593Smuzhiyun spin_unlock(&inode->i_lock);
5330*4882a593Smuzhiyun
5331*4882a593Smuzhiyun /*
5332*4882a593Smuzhiyun * If the subpool has a minimum size, the number of global
5333*4882a593Smuzhiyun * reservations to be released may be adjusted.
5334*4882a593Smuzhiyun */
5335*4882a593Smuzhiyun gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
5336*4882a593Smuzhiyun hugetlb_acct_memory(h, -gbl_reserve);
5337*4882a593Smuzhiyun
5338*4882a593Smuzhiyun return 0;
5339*4882a593Smuzhiyun }
5340*4882a593Smuzhiyun
5341*4882a593Smuzhiyun #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
page_table_shareable(struct vm_area_struct * svma,struct vm_area_struct * vma,unsigned long addr,pgoff_t idx)5342*4882a593Smuzhiyun static unsigned long page_table_shareable(struct vm_area_struct *svma,
5343*4882a593Smuzhiyun struct vm_area_struct *vma,
5344*4882a593Smuzhiyun unsigned long addr, pgoff_t idx)
5345*4882a593Smuzhiyun {
5346*4882a593Smuzhiyun unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
5347*4882a593Smuzhiyun svma->vm_start;
5348*4882a593Smuzhiyun unsigned long sbase = saddr & PUD_MASK;
5349*4882a593Smuzhiyun unsigned long s_end = sbase + PUD_SIZE;
5350*4882a593Smuzhiyun
5351*4882a593Smuzhiyun /* Allow segments to share if only one is marked locked */
5352*4882a593Smuzhiyun unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
5353*4882a593Smuzhiyun unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
5354*4882a593Smuzhiyun
5355*4882a593Smuzhiyun /*
5356*4882a593Smuzhiyun * match the virtual addresses, permission and the alignment of the
5357*4882a593Smuzhiyun * page table page.
5358*4882a593Smuzhiyun */
5359*4882a593Smuzhiyun if (pmd_index(addr) != pmd_index(saddr) ||
5360*4882a593Smuzhiyun vm_flags != svm_flags ||
5361*4882a593Smuzhiyun sbase < svma->vm_start || svma->vm_end < s_end)
5362*4882a593Smuzhiyun return 0;
5363*4882a593Smuzhiyun
5364*4882a593Smuzhiyun return saddr;
5365*4882a593Smuzhiyun }
5366*4882a593Smuzhiyun
vma_shareable(struct vm_area_struct * vma,unsigned long addr)5367*4882a593Smuzhiyun static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
5368*4882a593Smuzhiyun {
5369*4882a593Smuzhiyun unsigned long base = addr & PUD_MASK;
5370*4882a593Smuzhiyun unsigned long end = base + PUD_SIZE;
5371*4882a593Smuzhiyun
5372*4882a593Smuzhiyun /*
5373*4882a593Smuzhiyun * check on proper vm_flags and page table alignment
5374*4882a593Smuzhiyun */
5375*4882a593Smuzhiyun if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
5376*4882a593Smuzhiyun return true;
5377*4882a593Smuzhiyun return false;
5378*4882a593Smuzhiyun }
5379*4882a593Smuzhiyun
want_pmd_share(struct vm_area_struct * vma,unsigned long addr)5380*4882a593Smuzhiyun bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
5381*4882a593Smuzhiyun {
5382*4882a593Smuzhiyun #ifdef CONFIG_USERFAULTFD
5383*4882a593Smuzhiyun if (uffd_disable_huge_pmd_share(vma))
5384*4882a593Smuzhiyun return false;
5385*4882a593Smuzhiyun #endif
5386*4882a593Smuzhiyun return vma_shareable(vma, addr);
5387*4882a593Smuzhiyun }
5388*4882a593Smuzhiyun
5389*4882a593Smuzhiyun /*
5390*4882a593Smuzhiyun * Determine if start,end range within vma could be mapped by shared pmd.
5391*4882a593Smuzhiyun * If yes, adjust start and end to cover range associated with possible
5392*4882a593Smuzhiyun * shared pmd mappings.
5393*4882a593Smuzhiyun */
adjust_range_if_pmd_sharing_possible(struct vm_area_struct * vma,unsigned long * start,unsigned long * end)5394*4882a593Smuzhiyun void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
5395*4882a593Smuzhiyun unsigned long *start, unsigned long *end)
5396*4882a593Smuzhiyun {
5397*4882a593Smuzhiyun unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE),
5398*4882a593Smuzhiyun v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
5399*4882a593Smuzhiyun
5400*4882a593Smuzhiyun /*
5401*4882a593Smuzhiyun * vma need span at least one aligned PUD size and the start,end range
5402*4882a593Smuzhiyun * must at least partialy within it.
5403*4882a593Smuzhiyun */
5404*4882a593Smuzhiyun if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
5405*4882a593Smuzhiyun (*end <= v_start) || (*start >= v_end))
5406*4882a593Smuzhiyun return;
5407*4882a593Smuzhiyun
5408*4882a593Smuzhiyun /* Extend the range to be PUD aligned for a worst case scenario */
5409*4882a593Smuzhiyun if (*start > v_start)
5410*4882a593Smuzhiyun *start = ALIGN_DOWN(*start, PUD_SIZE);
5411*4882a593Smuzhiyun
5412*4882a593Smuzhiyun if (*end < v_end)
5413*4882a593Smuzhiyun *end = ALIGN(*end, PUD_SIZE);
5414*4882a593Smuzhiyun }
5415*4882a593Smuzhiyun
5416*4882a593Smuzhiyun /*
5417*4882a593Smuzhiyun * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
5418*4882a593Smuzhiyun * and returns the corresponding pte. While this is not necessary for the
5419*4882a593Smuzhiyun * !shared pmd case because we can allocate the pmd later as well, it makes the
5420*4882a593Smuzhiyun * code much cleaner.
5421*4882a593Smuzhiyun *
5422*4882a593Smuzhiyun * This routine must be called with i_mmap_rwsem held in at least read mode if
5423*4882a593Smuzhiyun * sharing is possible. For hugetlbfs, this prevents removal of any page
5424*4882a593Smuzhiyun * table entries associated with the address space. This is important as we
5425*4882a593Smuzhiyun * are setting up sharing based on existing page table entries (mappings).
5426*4882a593Smuzhiyun *
5427*4882a593Smuzhiyun * NOTE: This routine is only called from huge_pte_alloc. Some callers of
5428*4882a593Smuzhiyun * huge_pte_alloc know that sharing is not possible and do not take
5429*4882a593Smuzhiyun * i_mmap_rwsem as a performance optimization. This is handled by the
5430*4882a593Smuzhiyun * if !vma_shareable check at the beginning of the routine. i_mmap_rwsem is
5431*4882a593Smuzhiyun * only required for subsequent processing.
5432*4882a593Smuzhiyun */
huge_pmd_share(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,pud_t * pud)5433*4882a593Smuzhiyun pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
5434*4882a593Smuzhiyun unsigned long addr, pud_t *pud)
5435*4882a593Smuzhiyun {
5436*4882a593Smuzhiyun struct address_space *mapping = vma->vm_file->f_mapping;
5437*4882a593Smuzhiyun pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
5438*4882a593Smuzhiyun vma->vm_pgoff;
5439*4882a593Smuzhiyun struct vm_area_struct *svma;
5440*4882a593Smuzhiyun unsigned long saddr;
5441*4882a593Smuzhiyun pte_t *spte = NULL;
5442*4882a593Smuzhiyun pte_t *pte;
5443*4882a593Smuzhiyun spinlock_t *ptl;
5444*4882a593Smuzhiyun
5445*4882a593Smuzhiyun i_mmap_assert_locked(mapping);
5446*4882a593Smuzhiyun vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
5447*4882a593Smuzhiyun if (svma == vma)
5448*4882a593Smuzhiyun continue;
5449*4882a593Smuzhiyun
5450*4882a593Smuzhiyun saddr = page_table_shareable(svma, vma, addr, idx);
5451*4882a593Smuzhiyun if (saddr) {
5452*4882a593Smuzhiyun spte = huge_pte_offset(svma->vm_mm, saddr,
5453*4882a593Smuzhiyun vma_mmu_pagesize(svma));
5454*4882a593Smuzhiyun if (spte) {
5455*4882a593Smuzhiyun get_page(virt_to_page(spte));
5456*4882a593Smuzhiyun break;
5457*4882a593Smuzhiyun }
5458*4882a593Smuzhiyun }
5459*4882a593Smuzhiyun }
5460*4882a593Smuzhiyun
5461*4882a593Smuzhiyun if (!spte)
5462*4882a593Smuzhiyun goto out;
5463*4882a593Smuzhiyun
5464*4882a593Smuzhiyun ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
5465*4882a593Smuzhiyun if (pud_none(*pud)) {
5466*4882a593Smuzhiyun pud_populate(mm, pud,
5467*4882a593Smuzhiyun (pmd_t *)((unsigned long)spte & PAGE_MASK));
5468*4882a593Smuzhiyun mm_inc_nr_pmds(mm);
5469*4882a593Smuzhiyun } else {
5470*4882a593Smuzhiyun put_page(virt_to_page(spte));
5471*4882a593Smuzhiyun }
5472*4882a593Smuzhiyun spin_unlock(ptl);
5473*4882a593Smuzhiyun out:
5474*4882a593Smuzhiyun pte = (pte_t *)pmd_alloc(mm, pud, addr);
5475*4882a593Smuzhiyun return pte;
5476*4882a593Smuzhiyun }
5477*4882a593Smuzhiyun
5478*4882a593Smuzhiyun /*
5479*4882a593Smuzhiyun * unmap huge page backed by shared pte.
5480*4882a593Smuzhiyun *
5481*4882a593Smuzhiyun * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
5482*4882a593Smuzhiyun * indicated by page_count > 1, unmap is achieved by clearing pud and
5483*4882a593Smuzhiyun * decrementing the ref count. If count == 1, the pte page is not shared.
5484*4882a593Smuzhiyun *
5485*4882a593Smuzhiyun * Called with page table lock held and i_mmap_rwsem held in write mode.
5486*4882a593Smuzhiyun *
5487*4882a593Smuzhiyun * returns: 1 successfully unmapped a shared pte page
5488*4882a593Smuzhiyun * 0 the underlying pte page is not shared, or it is the last user
5489*4882a593Smuzhiyun */
huge_pmd_unshare(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long * addr,pte_t * ptep)5490*4882a593Smuzhiyun int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
5491*4882a593Smuzhiyun unsigned long *addr, pte_t *ptep)
5492*4882a593Smuzhiyun {
5493*4882a593Smuzhiyun pgd_t *pgd = pgd_offset(mm, *addr);
5494*4882a593Smuzhiyun p4d_t *p4d = p4d_offset(pgd, *addr);
5495*4882a593Smuzhiyun pud_t *pud = pud_offset(p4d, *addr);
5496*4882a593Smuzhiyun
5497*4882a593Smuzhiyun i_mmap_assert_write_locked(vma->vm_file->f_mapping);
5498*4882a593Smuzhiyun BUG_ON(page_count(virt_to_page(ptep)) == 0);
5499*4882a593Smuzhiyun if (page_count(virt_to_page(ptep)) == 1)
5500*4882a593Smuzhiyun return 0;
5501*4882a593Smuzhiyun
5502*4882a593Smuzhiyun pud_clear(pud);
5503*4882a593Smuzhiyun put_page(virt_to_page(ptep));
5504*4882a593Smuzhiyun mm_dec_nr_pmds(mm);
5505*4882a593Smuzhiyun /*
5506*4882a593Smuzhiyun * This update of passed address optimizes loops sequentially
5507*4882a593Smuzhiyun * processing addresses in increments of huge page size (PMD_SIZE
5508*4882a593Smuzhiyun * in this case). By clearing the pud, a PUD_SIZE area is unmapped.
5509*4882a593Smuzhiyun * Update address to the 'last page' in the cleared area so that
5510*4882a593Smuzhiyun * calling loop can move to first page past this area.
5511*4882a593Smuzhiyun */
5512*4882a593Smuzhiyun *addr |= PUD_SIZE - PMD_SIZE;
5513*4882a593Smuzhiyun return 1;
5514*4882a593Smuzhiyun }
5515*4882a593Smuzhiyun
5516*4882a593Smuzhiyun #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
huge_pmd_share(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,pud_t * pud)5517*4882a593Smuzhiyun pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
5518*4882a593Smuzhiyun unsigned long addr, pud_t *pud)
5519*4882a593Smuzhiyun {
5520*4882a593Smuzhiyun return NULL;
5521*4882a593Smuzhiyun }
5522*4882a593Smuzhiyun
huge_pmd_unshare(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long * addr,pte_t * ptep)5523*4882a593Smuzhiyun int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
5524*4882a593Smuzhiyun unsigned long *addr, pte_t *ptep)
5525*4882a593Smuzhiyun {
5526*4882a593Smuzhiyun return 0;
5527*4882a593Smuzhiyun }
5528*4882a593Smuzhiyun
adjust_range_if_pmd_sharing_possible(struct vm_area_struct * vma,unsigned long * start,unsigned long * end)5529*4882a593Smuzhiyun void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
5530*4882a593Smuzhiyun unsigned long *start, unsigned long *end)
5531*4882a593Smuzhiyun {
5532*4882a593Smuzhiyun }
5533*4882a593Smuzhiyun
want_pmd_share(struct vm_area_struct * vma,unsigned long addr)5534*4882a593Smuzhiyun bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
5535*4882a593Smuzhiyun {
5536*4882a593Smuzhiyun return false;
5537*4882a593Smuzhiyun }
5538*4882a593Smuzhiyun #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
5539*4882a593Smuzhiyun
5540*4882a593Smuzhiyun #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
huge_pte_alloc(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,unsigned long sz)5541*4882a593Smuzhiyun pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
5542*4882a593Smuzhiyun unsigned long addr, unsigned long sz)
5543*4882a593Smuzhiyun {
5544*4882a593Smuzhiyun pgd_t *pgd;
5545*4882a593Smuzhiyun p4d_t *p4d;
5546*4882a593Smuzhiyun pud_t *pud;
5547*4882a593Smuzhiyun pte_t *pte = NULL;
5548*4882a593Smuzhiyun
5549*4882a593Smuzhiyun pgd = pgd_offset(mm, addr);
5550*4882a593Smuzhiyun p4d = p4d_alloc(mm, pgd, addr);
5551*4882a593Smuzhiyun if (!p4d)
5552*4882a593Smuzhiyun return NULL;
5553*4882a593Smuzhiyun pud = pud_alloc(mm, p4d, addr);
5554*4882a593Smuzhiyun if (pud) {
5555*4882a593Smuzhiyun if (sz == PUD_SIZE) {
5556*4882a593Smuzhiyun pte = (pte_t *)pud;
5557*4882a593Smuzhiyun } else {
5558*4882a593Smuzhiyun BUG_ON(sz != PMD_SIZE);
5559*4882a593Smuzhiyun if (want_pmd_share(vma, addr) && pud_none(*pud))
5560*4882a593Smuzhiyun pte = huge_pmd_share(mm, vma, addr, pud);
5561*4882a593Smuzhiyun else
5562*4882a593Smuzhiyun pte = (pte_t *)pmd_alloc(mm, pud, addr);
5563*4882a593Smuzhiyun }
5564*4882a593Smuzhiyun }
5565*4882a593Smuzhiyun BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
5566*4882a593Smuzhiyun
5567*4882a593Smuzhiyun return pte;
5568*4882a593Smuzhiyun }
5569*4882a593Smuzhiyun
5570*4882a593Smuzhiyun /*
5571*4882a593Smuzhiyun * huge_pte_offset() - Walk the page table to resolve the hugepage
5572*4882a593Smuzhiyun * entry at address @addr
5573*4882a593Smuzhiyun *
5574*4882a593Smuzhiyun * Return: Pointer to page table entry (PUD or PMD) for
5575*4882a593Smuzhiyun * address @addr, or NULL if a !p*d_present() entry is encountered and the
5576*4882a593Smuzhiyun * size @sz doesn't match the hugepage size at this level of the page
5577*4882a593Smuzhiyun * table.
5578*4882a593Smuzhiyun */
huge_pte_offset(struct mm_struct * mm,unsigned long addr,unsigned long sz)5579*4882a593Smuzhiyun pte_t *huge_pte_offset(struct mm_struct *mm,
5580*4882a593Smuzhiyun unsigned long addr, unsigned long sz)
5581*4882a593Smuzhiyun {
5582*4882a593Smuzhiyun pgd_t *pgd;
5583*4882a593Smuzhiyun p4d_t *p4d;
5584*4882a593Smuzhiyun pud_t *pud;
5585*4882a593Smuzhiyun pmd_t *pmd;
5586*4882a593Smuzhiyun
5587*4882a593Smuzhiyun pgd = pgd_offset(mm, addr);
5588*4882a593Smuzhiyun if (!pgd_present(*pgd))
5589*4882a593Smuzhiyun return NULL;
5590*4882a593Smuzhiyun p4d = p4d_offset(pgd, addr);
5591*4882a593Smuzhiyun if (!p4d_present(*p4d))
5592*4882a593Smuzhiyun return NULL;
5593*4882a593Smuzhiyun
5594*4882a593Smuzhiyun pud = pud_offset(p4d, addr);
5595*4882a593Smuzhiyun if (sz == PUD_SIZE)
5596*4882a593Smuzhiyun /* must be pud huge, non-present or none */
5597*4882a593Smuzhiyun return (pte_t *)pud;
5598*4882a593Smuzhiyun if (!pud_present(*pud))
5599*4882a593Smuzhiyun return NULL;
5600*4882a593Smuzhiyun /* must have a valid entry and size to go further */
5601*4882a593Smuzhiyun
5602*4882a593Smuzhiyun pmd = pmd_offset(pud, addr);
5603*4882a593Smuzhiyun /* must be pmd huge, non-present or none */
5604*4882a593Smuzhiyun return (pte_t *)pmd;
5605*4882a593Smuzhiyun }
5606*4882a593Smuzhiyun
5607*4882a593Smuzhiyun #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
5608*4882a593Smuzhiyun
5609*4882a593Smuzhiyun /*
5610*4882a593Smuzhiyun * These functions are overwritable if your architecture needs its own
5611*4882a593Smuzhiyun * behavior.
5612*4882a593Smuzhiyun */
5613*4882a593Smuzhiyun struct page * __weak
follow_huge_addr(struct mm_struct * mm,unsigned long address,int write)5614*4882a593Smuzhiyun follow_huge_addr(struct mm_struct *mm, unsigned long address,
5615*4882a593Smuzhiyun int write)
5616*4882a593Smuzhiyun {
5617*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
5618*4882a593Smuzhiyun }
5619*4882a593Smuzhiyun
5620*4882a593Smuzhiyun struct page * __weak
follow_huge_pd(struct vm_area_struct * vma,unsigned long address,hugepd_t hpd,int flags,int pdshift)5621*4882a593Smuzhiyun follow_huge_pd(struct vm_area_struct *vma,
5622*4882a593Smuzhiyun unsigned long address, hugepd_t hpd, int flags, int pdshift)
5623*4882a593Smuzhiyun {
5624*4882a593Smuzhiyun WARN(1, "hugepd follow called with no support for hugepage directory format\n");
5625*4882a593Smuzhiyun return NULL;
5626*4882a593Smuzhiyun }
5627*4882a593Smuzhiyun
5628*4882a593Smuzhiyun struct page * __weak
follow_huge_pmd_pte(struct vm_area_struct * vma,unsigned long address,int flags)5629*4882a593Smuzhiyun follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address, int flags)
5630*4882a593Smuzhiyun {
5631*4882a593Smuzhiyun struct hstate *h = hstate_vma(vma);
5632*4882a593Smuzhiyun struct mm_struct *mm = vma->vm_mm;
5633*4882a593Smuzhiyun struct page *page = NULL;
5634*4882a593Smuzhiyun spinlock_t *ptl;
5635*4882a593Smuzhiyun pte_t *ptep, pte;
5636*4882a593Smuzhiyun
5637*4882a593Smuzhiyun /* FOLL_GET and FOLL_PIN are mutually exclusive. */
5638*4882a593Smuzhiyun if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
5639*4882a593Smuzhiyun (FOLL_PIN | FOLL_GET)))
5640*4882a593Smuzhiyun return NULL;
5641*4882a593Smuzhiyun
5642*4882a593Smuzhiyun retry:
5643*4882a593Smuzhiyun ptep = huge_pte_offset(mm, address, huge_page_size(h));
5644*4882a593Smuzhiyun if (!ptep)
5645*4882a593Smuzhiyun return NULL;
5646*4882a593Smuzhiyun
5647*4882a593Smuzhiyun ptl = huge_pte_lock(h, mm, ptep);
5648*4882a593Smuzhiyun pte = huge_ptep_get(ptep);
5649*4882a593Smuzhiyun if (pte_present(pte)) {
5650*4882a593Smuzhiyun page = pte_page(pte) +
5651*4882a593Smuzhiyun ((address & ~huge_page_mask(h)) >> PAGE_SHIFT);
5652*4882a593Smuzhiyun /*
5653*4882a593Smuzhiyun * try_grab_page() should always succeed here, because: a) we
5654*4882a593Smuzhiyun * hold the pmd (ptl) lock, and b) we've just checked that the
5655*4882a593Smuzhiyun * huge pmd (head) page is present in the page tables. The ptl
5656*4882a593Smuzhiyun * prevents the head page and tail pages from being rearranged
5657*4882a593Smuzhiyun * in any way. So this page must be available at this point,
5658*4882a593Smuzhiyun * unless the page refcount overflowed:
5659*4882a593Smuzhiyun */
5660*4882a593Smuzhiyun if (WARN_ON_ONCE(!try_grab_page(page, flags))) {
5661*4882a593Smuzhiyun page = NULL;
5662*4882a593Smuzhiyun goto out;
5663*4882a593Smuzhiyun }
5664*4882a593Smuzhiyun } else {
5665*4882a593Smuzhiyun if (is_hugetlb_entry_migration(pte)) {
5666*4882a593Smuzhiyun spin_unlock(ptl);
5667*4882a593Smuzhiyun __migration_entry_wait(mm, ptep, ptl);
5668*4882a593Smuzhiyun goto retry;
5669*4882a593Smuzhiyun }
5670*4882a593Smuzhiyun /*
5671*4882a593Smuzhiyun * hwpoisoned entry is treated as no_page_table in
5672*4882a593Smuzhiyun * follow_page_mask().
5673*4882a593Smuzhiyun */
5674*4882a593Smuzhiyun }
5675*4882a593Smuzhiyun out:
5676*4882a593Smuzhiyun spin_unlock(ptl);
5677*4882a593Smuzhiyun return page;
5678*4882a593Smuzhiyun }
5679*4882a593Smuzhiyun
5680*4882a593Smuzhiyun struct page * __weak
follow_huge_pud(struct mm_struct * mm,unsigned long address,pud_t * pud,int flags)5681*4882a593Smuzhiyun follow_huge_pud(struct mm_struct *mm, unsigned long address,
5682*4882a593Smuzhiyun pud_t *pud, int flags)
5683*4882a593Smuzhiyun {
5684*4882a593Smuzhiyun if (flags & (FOLL_GET | FOLL_PIN))
5685*4882a593Smuzhiyun return NULL;
5686*4882a593Smuzhiyun
5687*4882a593Smuzhiyun return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
5688*4882a593Smuzhiyun }
5689*4882a593Smuzhiyun
5690*4882a593Smuzhiyun struct page * __weak
follow_huge_pgd(struct mm_struct * mm,unsigned long address,pgd_t * pgd,int flags)5691*4882a593Smuzhiyun follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
5692*4882a593Smuzhiyun {
5693*4882a593Smuzhiyun if (flags & (FOLL_GET | FOLL_PIN))
5694*4882a593Smuzhiyun return NULL;
5695*4882a593Smuzhiyun
5696*4882a593Smuzhiyun return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
5697*4882a593Smuzhiyun }
5698*4882a593Smuzhiyun
isolate_huge_page(struct page * page,struct list_head * list)5699*4882a593Smuzhiyun bool isolate_huge_page(struct page *page, struct list_head *list)
5700*4882a593Smuzhiyun {
5701*4882a593Smuzhiyun bool ret = true;
5702*4882a593Smuzhiyun
5703*4882a593Smuzhiyun spin_lock(&hugetlb_lock);
5704*4882a593Smuzhiyun if (!PageHeadHuge(page) || !page_huge_active(page) ||
5705*4882a593Smuzhiyun !get_page_unless_zero(page)) {
5706*4882a593Smuzhiyun ret = false;
5707*4882a593Smuzhiyun goto unlock;
5708*4882a593Smuzhiyun }
5709*4882a593Smuzhiyun clear_page_huge_active(page);
5710*4882a593Smuzhiyun list_move_tail(&page->lru, list);
5711*4882a593Smuzhiyun unlock:
5712*4882a593Smuzhiyun spin_unlock(&hugetlb_lock);
5713*4882a593Smuzhiyun return ret;
5714*4882a593Smuzhiyun }
5715*4882a593Smuzhiyun
putback_active_hugepage(struct page * page)5716*4882a593Smuzhiyun void putback_active_hugepage(struct page *page)
5717*4882a593Smuzhiyun {
5718*4882a593Smuzhiyun VM_BUG_ON_PAGE(!PageHead(page), page);
5719*4882a593Smuzhiyun spin_lock(&hugetlb_lock);
5720*4882a593Smuzhiyun set_page_huge_active(page);
5721*4882a593Smuzhiyun list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
5722*4882a593Smuzhiyun spin_unlock(&hugetlb_lock);
5723*4882a593Smuzhiyun put_page(page);
5724*4882a593Smuzhiyun }
5725*4882a593Smuzhiyun
move_hugetlb_state(struct page * oldpage,struct page * newpage,int reason)5726*4882a593Smuzhiyun void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
5727*4882a593Smuzhiyun {
5728*4882a593Smuzhiyun struct hstate *h = page_hstate(oldpage);
5729*4882a593Smuzhiyun
5730*4882a593Smuzhiyun hugetlb_cgroup_migrate(oldpage, newpage);
5731*4882a593Smuzhiyun set_page_owner_migrate_reason(newpage, reason);
5732*4882a593Smuzhiyun
5733*4882a593Smuzhiyun /*
5734*4882a593Smuzhiyun * transfer temporary state of the new huge page. This is
5735*4882a593Smuzhiyun * reverse to other transitions because the newpage is going to
5736*4882a593Smuzhiyun * be final while the old one will be freed so it takes over
5737*4882a593Smuzhiyun * the temporary status.
5738*4882a593Smuzhiyun *
5739*4882a593Smuzhiyun * Also note that we have to transfer the per-node surplus state
5740*4882a593Smuzhiyun * here as well otherwise the global surplus count will not match
5741*4882a593Smuzhiyun * the per-node's.
5742*4882a593Smuzhiyun */
5743*4882a593Smuzhiyun if (PageHugeTemporary(newpage)) {
5744*4882a593Smuzhiyun int old_nid = page_to_nid(oldpage);
5745*4882a593Smuzhiyun int new_nid = page_to_nid(newpage);
5746*4882a593Smuzhiyun
5747*4882a593Smuzhiyun SetPageHugeTemporary(oldpage);
5748*4882a593Smuzhiyun ClearPageHugeTemporary(newpage);
5749*4882a593Smuzhiyun
5750*4882a593Smuzhiyun spin_lock(&hugetlb_lock);
5751*4882a593Smuzhiyun if (h->surplus_huge_pages_node[old_nid]) {
5752*4882a593Smuzhiyun h->surplus_huge_pages_node[old_nid]--;
5753*4882a593Smuzhiyun h->surplus_huge_pages_node[new_nid]++;
5754*4882a593Smuzhiyun }
5755*4882a593Smuzhiyun spin_unlock(&hugetlb_lock);
5756*4882a593Smuzhiyun }
5757*4882a593Smuzhiyun }
5758*4882a593Smuzhiyun
5759*4882a593Smuzhiyun /*
5760*4882a593Smuzhiyun * This function will unconditionally remove all the shared pmd pgtable entries
5761*4882a593Smuzhiyun * within the specific vma for a hugetlbfs memory range.
5762*4882a593Smuzhiyun */
hugetlb_unshare_all_pmds(struct vm_area_struct * vma)5763*4882a593Smuzhiyun void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
5764*4882a593Smuzhiyun {
5765*4882a593Smuzhiyun struct hstate *h = hstate_vma(vma);
5766*4882a593Smuzhiyun unsigned long sz = huge_page_size(h);
5767*4882a593Smuzhiyun struct mm_struct *mm = vma->vm_mm;
5768*4882a593Smuzhiyun struct mmu_notifier_range range;
5769*4882a593Smuzhiyun unsigned long address, start, end;
5770*4882a593Smuzhiyun spinlock_t *ptl;
5771*4882a593Smuzhiyun pte_t *ptep;
5772*4882a593Smuzhiyun
5773*4882a593Smuzhiyun if (!(vma->vm_flags & VM_MAYSHARE))
5774*4882a593Smuzhiyun return;
5775*4882a593Smuzhiyun
5776*4882a593Smuzhiyun start = ALIGN(vma->vm_start, PUD_SIZE);
5777*4882a593Smuzhiyun end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
5778*4882a593Smuzhiyun
5779*4882a593Smuzhiyun if (start >= end)
5780*4882a593Smuzhiyun return;
5781*4882a593Smuzhiyun
5782*4882a593Smuzhiyun flush_cache_range(vma, start, end);
5783*4882a593Smuzhiyun /*
5784*4882a593Smuzhiyun * No need to call adjust_range_if_pmd_sharing_possible(), because
5785*4882a593Smuzhiyun * we have already done the PUD_SIZE alignment.
5786*4882a593Smuzhiyun */
5787*4882a593Smuzhiyun mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
5788*4882a593Smuzhiyun start, end);
5789*4882a593Smuzhiyun mmu_notifier_invalidate_range_start(&range);
5790*4882a593Smuzhiyun i_mmap_lock_write(vma->vm_file->f_mapping);
5791*4882a593Smuzhiyun for (address = start; address < end; address += PUD_SIZE) {
5792*4882a593Smuzhiyun unsigned long tmp = address;
5793*4882a593Smuzhiyun
5794*4882a593Smuzhiyun ptep = huge_pte_offset(mm, address, sz);
5795*4882a593Smuzhiyun if (!ptep)
5796*4882a593Smuzhiyun continue;
5797*4882a593Smuzhiyun ptl = huge_pte_lock(h, mm, ptep);
5798*4882a593Smuzhiyun /* We don't want 'address' to be changed */
5799*4882a593Smuzhiyun huge_pmd_unshare(mm, vma, &tmp, ptep);
5800*4882a593Smuzhiyun spin_unlock(ptl);
5801*4882a593Smuzhiyun }
5802*4882a593Smuzhiyun flush_hugetlb_tlb_range(vma, start, end);
5803*4882a593Smuzhiyun i_mmap_unlock_write(vma->vm_file->f_mapping);
5804*4882a593Smuzhiyun /*
5805*4882a593Smuzhiyun * No need to call mmu_notifier_invalidate_range(), see
5806*4882a593Smuzhiyun * Documentation/vm/mmu_notifier.rst.
5807*4882a593Smuzhiyun */
5808*4882a593Smuzhiyun mmu_notifier_invalidate_range_end(&range);
5809*4882a593Smuzhiyun }
5810*4882a593Smuzhiyun
5811*4882a593Smuzhiyun #ifdef CONFIG_CMA
5812*4882a593Smuzhiyun static bool cma_reserve_called __initdata;
5813*4882a593Smuzhiyun
cmdline_parse_hugetlb_cma(char * p)5814*4882a593Smuzhiyun static int __init cmdline_parse_hugetlb_cma(char *p)
5815*4882a593Smuzhiyun {
5816*4882a593Smuzhiyun hugetlb_cma_size = memparse(p, &p);
5817*4882a593Smuzhiyun return 0;
5818*4882a593Smuzhiyun }
5819*4882a593Smuzhiyun
5820*4882a593Smuzhiyun early_param("hugetlb_cma", cmdline_parse_hugetlb_cma);
5821*4882a593Smuzhiyun
hugetlb_cma_reserve(int order)5822*4882a593Smuzhiyun void __init hugetlb_cma_reserve(int order)
5823*4882a593Smuzhiyun {
5824*4882a593Smuzhiyun unsigned long size, reserved, per_node;
5825*4882a593Smuzhiyun int nid;
5826*4882a593Smuzhiyun
5827*4882a593Smuzhiyun cma_reserve_called = true;
5828*4882a593Smuzhiyun
5829*4882a593Smuzhiyun if (!hugetlb_cma_size)
5830*4882a593Smuzhiyun return;
5831*4882a593Smuzhiyun
5832*4882a593Smuzhiyun if (hugetlb_cma_size < (PAGE_SIZE << order)) {
5833*4882a593Smuzhiyun pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n",
5834*4882a593Smuzhiyun (PAGE_SIZE << order) / SZ_1M);
5835*4882a593Smuzhiyun return;
5836*4882a593Smuzhiyun }
5837*4882a593Smuzhiyun
5838*4882a593Smuzhiyun /*
5839*4882a593Smuzhiyun * If 3 GB area is requested on a machine with 4 numa nodes,
5840*4882a593Smuzhiyun * let's allocate 1 GB on first three nodes and ignore the last one.
5841*4882a593Smuzhiyun */
5842*4882a593Smuzhiyun per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes);
5843*4882a593Smuzhiyun pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
5844*4882a593Smuzhiyun hugetlb_cma_size / SZ_1M, per_node / SZ_1M);
5845*4882a593Smuzhiyun
5846*4882a593Smuzhiyun reserved = 0;
5847*4882a593Smuzhiyun for_each_node_state(nid, N_ONLINE) {
5848*4882a593Smuzhiyun int res;
5849*4882a593Smuzhiyun char name[CMA_MAX_NAME];
5850*4882a593Smuzhiyun
5851*4882a593Smuzhiyun size = min(per_node, hugetlb_cma_size - reserved);
5852*4882a593Smuzhiyun size = round_up(size, PAGE_SIZE << order);
5853*4882a593Smuzhiyun
5854*4882a593Smuzhiyun snprintf(name, sizeof(name), "hugetlb%d", nid);
5855*4882a593Smuzhiyun res = cma_declare_contiguous_nid(0, size, 0, PAGE_SIZE << order,
5856*4882a593Smuzhiyun 0, false, name,
5857*4882a593Smuzhiyun &hugetlb_cma[nid], nid);
5858*4882a593Smuzhiyun if (res) {
5859*4882a593Smuzhiyun pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
5860*4882a593Smuzhiyun res, nid);
5861*4882a593Smuzhiyun continue;
5862*4882a593Smuzhiyun }
5863*4882a593Smuzhiyun
5864*4882a593Smuzhiyun reserved += size;
5865*4882a593Smuzhiyun pr_info("hugetlb_cma: reserved %lu MiB on node %d\n",
5866*4882a593Smuzhiyun size / SZ_1M, nid);
5867*4882a593Smuzhiyun
5868*4882a593Smuzhiyun if (reserved >= hugetlb_cma_size)
5869*4882a593Smuzhiyun break;
5870*4882a593Smuzhiyun }
5871*4882a593Smuzhiyun }
5872*4882a593Smuzhiyun
hugetlb_cma_check(void)5873*4882a593Smuzhiyun void __init hugetlb_cma_check(void)
5874*4882a593Smuzhiyun {
5875*4882a593Smuzhiyun if (!hugetlb_cma_size || cma_reserve_called)
5876*4882a593Smuzhiyun return;
5877*4882a593Smuzhiyun
5878*4882a593Smuzhiyun pr_warn("hugetlb_cma: the option isn't supported by current arch\n");
5879*4882a593Smuzhiyun }
5880*4882a593Smuzhiyun
5881*4882a593Smuzhiyun #endif /* CONFIG_CMA */
5882