1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGETLB_H
3 #define _LINUX_HUGETLB_H
4
5 #include <linux/mm_types.h>
6 #include <linux/mmdebug.h>
7 #include <linux/fs.h>
8 #include <linux/hugetlb_inline.h>
9 #include <linux/cgroup.h>
10 #include <linux/list.h>
11 #include <linux/kref.h>
12 #include <linux/pgtable.h>
13 #include <linux/gfp.h>
14 #include <linux/userfaultfd_k.h>
15
16 struct ctl_table;
17 struct user_struct;
18 struct mmu_gather;
19
20 #ifndef is_hugepd
21 typedef struct { unsigned long pd; } hugepd_t;
22 #define is_hugepd(hugepd) (0)
23 #define __hugepd(x) ((hugepd_t) { (x) })
24 #endif
25
26 #ifdef CONFIG_HUGETLB_PAGE
27
28 #include <linux/mempolicy.h>
29 #include <linux/shm.h>
30 #include <asm/tlbflush.h>
31
32 struct hugepage_subpool {
33 spinlock_t lock;
34 long count;
35 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
36 long used_hpages; /* Used count against maximum, includes */
37 /* both alloced and reserved pages. */
38 struct hstate *hstate;
39 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
40 long rsv_hpages; /* Pages reserved against global pool to */
41 /* sasitfy minimum size. */
42 };
43
44 struct resv_map {
45 struct kref refs;
46 spinlock_t lock;
47 struct list_head regions;
48 long adds_in_progress;
49 struct list_head region_cache;
50 long region_cache_count;
51 #ifdef CONFIG_CGROUP_HUGETLB
52 /*
53 * On private mappings, the counter to uncharge reservations is stored
54 * here. If these fields are 0, then either the mapping is shared, or
55 * cgroup accounting is disabled for this resv_map.
56 */
57 struct page_counter *reservation_counter;
58 unsigned long pages_per_hpage;
59 struct cgroup_subsys_state *css;
60 #endif
61 };
62
63 /*
64 * Region tracking -- allows tracking of reservations and instantiated pages
65 * across the pages in a mapping.
66 *
67 * The region data structures are embedded into a resv_map and protected
68 * by a resv_map's lock. The set of regions within the resv_map represent
69 * reservations for huge pages, or huge pages that have already been
70 * instantiated within the map. The from and to elements are huge page
71 * indicies into the associated mapping. from indicates the starting index
72 * of the region. to represents the first index past the end of the region.
73 *
74 * For example, a file region structure with from == 0 and to == 4 represents
75 * four huge pages in a mapping. It is important to note that the to element
76 * represents the first element past the end of the region. This is used in
77 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
78 *
79 * Interval notation of the form [from, to) will be used to indicate that
80 * the endpoint from is inclusive and to is exclusive.
81 */
82 struct file_region {
83 struct list_head link;
84 long from;
85 long to;
86 #ifdef CONFIG_CGROUP_HUGETLB
87 /*
88 * On shared mappings, each reserved region appears as a struct
89 * file_region in resv_map. These fields hold the info needed to
90 * uncharge each reservation.
91 */
92 struct page_counter *reservation_counter;
93 struct cgroup_subsys_state *css;
94 #endif
95 };
96
97 extern struct resv_map *resv_map_alloc(void);
98 void resv_map_release(struct kref *ref);
99
100 extern spinlock_t hugetlb_lock;
101 extern int hugetlb_max_hstate __read_mostly;
102 #define for_each_hstate(h) \
103 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
104
105 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
106 long min_hpages);
107 void hugepage_put_subpool(struct hugepage_subpool *spool);
108
109 void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
110 int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
111 int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
112 loff_t *);
113 int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
114 loff_t *);
115 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
116 loff_t *);
117
118 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
119 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
120 struct page **, struct vm_area_struct **,
121 unsigned long *, unsigned long *, long, unsigned int,
122 int *);
123 void unmap_hugepage_range(struct vm_area_struct *,
124 unsigned long, unsigned long, struct page *);
125 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
126 struct vm_area_struct *vma,
127 unsigned long start, unsigned long end,
128 struct page *ref_page);
129 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
130 unsigned long start, unsigned long end,
131 struct page *ref_page);
132 void hugetlb_report_meminfo(struct seq_file *);
133 int hugetlb_report_node_meminfo(char *buf, int len, int nid);
134 void hugetlb_show_meminfo(void);
135 unsigned long hugetlb_total_pages(void);
136 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
137 unsigned long address, unsigned int flags);
138 #ifdef CONFIG_USERFAULTFD
139 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
140 struct vm_area_struct *dst_vma,
141 unsigned long dst_addr,
142 unsigned long src_addr,
143 enum mcopy_atomic_mode mode,
144 struct page **pagep);
145 #endif /* CONFIG_USERFAULTFD */
146 int hugetlb_reserve_pages(struct inode *inode, long from, long to,
147 struct vm_area_struct *vma,
148 vm_flags_t vm_flags);
149 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
150 long freed);
151 bool isolate_huge_page(struct page *page, struct list_head *list);
152 void putback_active_hugepage(struct page *page);
153 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
154 void free_huge_page(struct page *page);
155 void hugetlb_fix_reserve_counts(struct inode *inode);
156 extern struct mutex *hugetlb_fault_mutex_table;
157 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
158
159 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
160 unsigned long addr, pud_t *pud);
161
162 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
163
164 extern int sysctl_hugetlb_shm_group;
165 extern struct list_head huge_boot_pages;
166
167 /* arch callbacks */
168
169 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
170 unsigned long addr, unsigned long sz);
171 pte_t *huge_pte_offset(struct mm_struct *mm,
172 unsigned long addr, unsigned long sz);
173 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
174 unsigned long *addr, pte_t *ptep);
175 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
176 unsigned long *start, unsigned long *end);
177 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
178 int write);
179 struct page *follow_huge_pd(struct vm_area_struct *vma,
180 unsigned long address, hugepd_t hpd,
181 int flags, int pdshift);
182 struct page *follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address,
183 int flags);
184 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
185 pud_t *pud, int flags);
186 struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
187 pgd_t *pgd, int flags);
188
189 int pmd_huge(pmd_t pmd);
190 int pud_huge(pud_t pud);
191 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
192 unsigned long address, unsigned long end, pgprot_t newprot);
193
194 bool is_hugetlb_entry_migration(pte_t pte);
195 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
196
197 #else /* !CONFIG_HUGETLB_PAGE */
198
reset_vma_resv_huge_pages(struct vm_area_struct * vma)199 static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
200 {
201 }
202
hugetlb_total_pages(void)203 static inline unsigned long hugetlb_total_pages(void)
204 {
205 return 0;
206 }
207
hugetlb_page_mapping_lock_write(struct page * hpage)208 static inline struct address_space *hugetlb_page_mapping_lock_write(
209 struct page *hpage)
210 {
211 return NULL;
212 }
213
huge_pmd_unshare(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long * addr,pte_t * ptep)214 static inline int huge_pmd_unshare(struct mm_struct *mm,
215 struct vm_area_struct *vma,
216 unsigned long *addr, pte_t *ptep)
217 {
218 return 0;
219 }
220
adjust_range_if_pmd_sharing_possible(struct vm_area_struct * vma,unsigned long * start,unsigned long * end)221 static inline void adjust_range_if_pmd_sharing_possible(
222 struct vm_area_struct *vma,
223 unsigned long *start, unsigned long *end)
224 {
225 }
226
follow_hugetlb_page(struct mm_struct * mm,struct vm_area_struct * vma,struct page ** pages,struct vm_area_struct ** vmas,unsigned long * position,unsigned long * nr_pages,long i,unsigned int flags,int * nonblocking)227 static inline long follow_hugetlb_page(struct mm_struct *mm,
228 struct vm_area_struct *vma, struct page **pages,
229 struct vm_area_struct **vmas, unsigned long *position,
230 unsigned long *nr_pages, long i, unsigned int flags,
231 int *nonblocking)
232 {
233 BUG();
234 return 0;
235 }
236
follow_huge_addr(struct mm_struct * mm,unsigned long address,int write)237 static inline struct page *follow_huge_addr(struct mm_struct *mm,
238 unsigned long address, int write)
239 {
240 return ERR_PTR(-EINVAL);
241 }
242
copy_hugetlb_page_range(struct mm_struct * dst,struct mm_struct * src,struct vm_area_struct * vma)243 static inline int copy_hugetlb_page_range(struct mm_struct *dst,
244 struct mm_struct *src, struct vm_area_struct *vma)
245 {
246 BUG();
247 return 0;
248 }
249
hugetlb_report_meminfo(struct seq_file * m)250 static inline void hugetlb_report_meminfo(struct seq_file *m)
251 {
252 }
253
hugetlb_report_node_meminfo(char * buf,int len,int nid)254 static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
255 {
256 return 0;
257 }
258
hugetlb_show_meminfo(void)259 static inline void hugetlb_show_meminfo(void)
260 {
261 }
262
follow_huge_pd(struct vm_area_struct * vma,unsigned long address,hugepd_t hpd,int flags,int pdshift)263 static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
264 unsigned long address, hugepd_t hpd, int flags,
265 int pdshift)
266 {
267 return NULL;
268 }
269
follow_huge_pmd_pte(struct vm_area_struct * vma,unsigned long address,int flags)270 static inline struct page *follow_huge_pmd_pte(struct vm_area_struct *vma,
271 unsigned long address, int flags)
272 {
273 return NULL;
274 }
275
follow_huge_pud(struct mm_struct * mm,unsigned long address,pud_t * pud,int flags)276 static inline struct page *follow_huge_pud(struct mm_struct *mm,
277 unsigned long address, pud_t *pud, int flags)
278 {
279 return NULL;
280 }
281
follow_huge_pgd(struct mm_struct * mm,unsigned long address,pgd_t * pgd,int flags)282 static inline struct page *follow_huge_pgd(struct mm_struct *mm,
283 unsigned long address, pgd_t *pgd, int flags)
284 {
285 return NULL;
286 }
287
prepare_hugepage_range(struct file * file,unsigned long addr,unsigned long len)288 static inline int prepare_hugepage_range(struct file *file,
289 unsigned long addr, unsigned long len)
290 {
291 return -EINVAL;
292 }
293
pmd_huge(pmd_t pmd)294 static inline int pmd_huge(pmd_t pmd)
295 {
296 return 0;
297 }
298
pud_huge(pud_t pud)299 static inline int pud_huge(pud_t pud)
300 {
301 return 0;
302 }
303
is_hugepage_only_range(struct mm_struct * mm,unsigned long addr,unsigned long len)304 static inline int is_hugepage_only_range(struct mm_struct *mm,
305 unsigned long addr, unsigned long len)
306 {
307 return 0;
308 }
309
hugetlb_free_pgd_range(struct mmu_gather * tlb,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)310 static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
311 unsigned long addr, unsigned long end,
312 unsigned long floor, unsigned long ceiling)
313 {
314 BUG();
315 }
316
317 #ifdef CONFIG_USERFAULTFD
hugetlb_mcopy_atomic_pte(struct mm_struct * dst_mm,pte_t * dst_pte,struct vm_area_struct * dst_vma,unsigned long dst_addr,unsigned long src_addr,enum mcopy_atomic_mode mode,struct page ** pagep)318 static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
319 pte_t *dst_pte,
320 struct vm_area_struct *dst_vma,
321 unsigned long dst_addr,
322 unsigned long src_addr,
323 enum mcopy_atomic_mode mode,
324 struct page **pagep)
325 {
326 BUG();
327 return 0;
328 }
329 #endif /* CONFIG_USERFAULTFD */
330
huge_pte_offset(struct mm_struct * mm,unsigned long addr,unsigned long sz)331 static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
332 unsigned long sz)
333 {
334 return NULL;
335 }
336
isolate_huge_page(struct page * page,struct list_head * list)337 static inline bool isolate_huge_page(struct page *page, struct list_head *list)
338 {
339 return false;
340 }
341
putback_active_hugepage(struct page * page)342 static inline void putback_active_hugepage(struct page *page)
343 {
344 }
345
move_hugetlb_state(struct page * oldpage,struct page * newpage,int reason)346 static inline void move_hugetlb_state(struct page *oldpage,
347 struct page *newpage, int reason)
348 {
349 }
350
hugetlb_change_protection(struct vm_area_struct * vma,unsigned long address,unsigned long end,pgprot_t newprot)351 static inline unsigned long hugetlb_change_protection(
352 struct vm_area_struct *vma, unsigned long address,
353 unsigned long end, pgprot_t newprot)
354 {
355 return 0;
356 }
357
__unmap_hugepage_range_final(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct page * ref_page)358 static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
359 struct vm_area_struct *vma, unsigned long start,
360 unsigned long end, struct page *ref_page)
361 {
362 BUG();
363 }
364
__unmap_hugepage_range(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct page * ref_page)365 static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
366 struct vm_area_struct *vma, unsigned long start,
367 unsigned long end, struct page *ref_page)
368 {
369 BUG();
370 }
371
hugetlb_fault(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long address,unsigned int flags)372 static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
373 struct vm_area_struct *vma, unsigned long address,
374 unsigned int flags)
375 {
376 BUG();
377 return 0;
378 }
379
hugetlb_unshare_all_pmds(struct vm_area_struct * vma)380 static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
381
382 #endif /* !CONFIG_HUGETLB_PAGE */
383 /*
384 * hugepages at page global directory. If arch support
385 * hugepages at pgd level, they need to define this.
386 */
387 #ifndef pgd_huge
388 #define pgd_huge(x) 0
389 #endif
390 #ifndef p4d_huge
391 #define p4d_huge(x) 0
392 #endif
393
394 #ifndef pgd_write
pgd_write(pgd_t pgd)395 static inline int pgd_write(pgd_t pgd)
396 {
397 BUG();
398 return 0;
399 }
400 #endif
401
402 #define HUGETLB_ANON_FILE "anon_hugepage"
403
404 enum {
405 /*
406 * The file will be used as an shm file so shmfs accounting rules
407 * apply
408 */
409 HUGETLB_SHMFS_INODE = 1,
410 /*
411 * The file is being created on the internal vfs mount and shmfs
412 * accounting rules do not apply
413 */
414 HUGETLB_ANONHUGE_INODE = 2,
415 };
416
417 #ifdef CONFIG_HUGETLBFS
418 struct hugetlbfs_sb_info {
419 long max_inodes; /* inodes allowed */
420 long free_inodes; /* inodes free */
421 spinlock_t stat_lock;
422 struct hstate *hstate;
423 struct hugepage_subpool *spool;
424 kuid_t uid;
425 kgid_t gid;
426 umode_t mode;
427 };
428
HUGETLBFS_SB(struct super_block * sb)429 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
430 {
431 return sb->s_fs_info;
432 }
433
434 struct hugetlbfs_inode_info {
435 struct shared_policy policy;
436 struct inode vfs_inode;
437 unsigned int seals;
438 };
439
HUGETLBFS_I(struct inode * inode)440 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
441 {
442 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
443 }
444
445 extern const struct file_operations hugetlbfs_file_operations;
446 extern const struct vm_operations_struct hugetlb_vm_ops;
447 struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
448 struct user_struct **user, int creat_flags,
449 int page_size_log);
450
is_file_hugepages(struct file * file)451 static inline bool is_file_hugepages(struct file *file)
452 {
453 if (file->f_op == &hugetlbfs_file_operations)
454 return true;
455
456 return is_file_shm_hugepages(file);
457 }
458
hstate_inode(struct inode * i)459 static inline struct hstate *hstate_inode(struct inode *i)
460 {
461 return HUGETLBFS_SB(i->i_sb)->hstate;
462 }
463 #else /* !CONFIG_HUGETLBFS */
464
465 #define is_file_hugepages(file) false
466 static inline struct file *
hugetlb_file_setup(const char * name,size_t size,vm_flags_t acctflag,struct user_struct ** user,int creat_flags,int page_size_log)467 hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
468 struct user_struct **user, int creat_flags,
469 int page_size_log)
470 {
471 return ERR_PTR(-ENOSYS);
472 }
473
hstate_inode(struct inode * i)474 static inline struct hstate *hstate_inode(struct inode *i)
475 {
476 return NULL;
477 }
478 #endif /* !CONFIG_HUGETLBFS */
479
480 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
481 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
482 unsigned long len, unsigned long pgoff,
483 unsigned long flags);
484 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
485
486 #ifdef CONFIG_HUGETLB_PAGE
487
488 #define HSTATE_NAME_LEN 32
489 /* Defines one hugetlb page size */
490 struct hstate {
491 int next_nid_to_alloc;
492 int next_nid_to_free;
493 unsigned int order;
494 unsigned long mask;
495 unsigned long max_huge_pages;
496 unsigned long nr_huge_pages;
497 unsigned long free_huge_pages;
498 unsigned long resv_huge_pages;
499 unsigned long surplus_huge_pages;
500 unsigned long nr_overcommit_huge_pages;
501 struct list_head hugepage_activelist;
502 struct list_head hugepage_freelists[MAX_NUMNODES];
503 unsigned int nr_huge_pages_node[MAX_NUMNODES];
504 unsigned int free_huge_pages_node[MAX_NUMNODES];
505 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
506 #ifdef CONFIG_CGROUP_HUGETLB
507 /* cgroup control files */
508 struct cftype cgroup_files_dfl[7];
509 struct cftype cgroup_files_legacy[9];
510 #endif
511 char name[HSTATE_NAME_LEN];
512 };
513
514 struct huge_bootmem_page {
515 struct list_head list;
516 struct hstate *hstate;
517 };
518
519 struct page *alloc_huge_page(struct vm_area_struct *vma,
520 unsigned long addr, int avoid_reserve);
521 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
522 nodemask_t *nmask, gfp_t gfp_mask);
523 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
524 unsigned long address);
525 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
526 pgoff_t idx);
527
528 /* arch callback */
529 int __init __alloc_bootmem_huge_page(struct hstate *h);
530 int __init alloc_bootmem_huge_page(struct hstate *h);
531
532 void __init hugetlb_add_hstate(unsigned order);
533 bool __init arch_hugetlb_valid_size(unsigned long size);
534 struct hstate *size_to_hstate(unsigned long size);
535
536 #ifndef HUGE_MAX_HSTATE
537 #define HUGE_MAX_HSTATE 1
538 #endif
539
540 extern struct hstate hstates[HUGE_MAX_HSTATE];
541 extern unsigned int default_hstate_idx;
542
543 #define default_hstate (hstates[default_hstate_idx])
544
hstate_file(struct file * f)545 static inline struct hstate *hstate_file(struct file *f)
546 {
547 return hstate_inode(file_inode(f));
548 }
549
hstate_sizelog(int page_size_log)550 static inline struct hstate *hstate_sizelog(int page_size_log)
551 {
552 if (!page_size_log)
553 return &default_hstate;
554
555 return size_to_hstate(1UL << page_size_log);
556 }
557
hstate_vma(struct vm_area_struct * vma)558 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
559 {
560 return hstate_file(vma->vm_file);
561 }
562
huge_page_size(struct hstate * h)563 static inline unsigned long huge_page_size(struct hstate *h)
564 {
565 return (unsigned long)PAGE_SIZE << h->order;
566 }
567
568 extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
569
570 extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
571
huge_page_mask(struct hstate * h)572 static inline unsigned long huge_page_mask(struct hstate *h)
573 {
574 return h->mask;
575 }
576
huge_page_order(struct hstate * h)577 static inline unsigned int huge_page_order(struct hstate *h)
578 {
579 return h->order;
580 }
581
huge_page_shift(struct hstate * h)582 static inline unsigned huge_page_shift(struct hstate *h)
583 {
584 return h->order + PAGE_SHIFT;
585 }
586
hstate_is_gigantic(struct hstate * h)587 static inline bool hstate_is_gigantic(struct hstate *h)
588 {
589 return huge_page_order(h) >= MAX_ORDER;
590 }
591
pages_per_huge_page(struct hstate * h)592 static inline unsigned int pages_per_huge_page(struct hstate *h)
593 {
594 return 1 << h->order;
595 }
596
blocks_per_huge_page(struct hstate * h)597 static inline unsigned int blocks_per_huge_page(struct hstate *h)
598 {
599 return huge_page_size(h) / 512;
600 }
601
602 #include <asm/hugetlb.h>
603
604 #ifndef is_hugepage_only_range
is_hugepage_only_range(struct mm_struct * mm,unsigned long addr,unsigned long len)605 static inline int is_hugepage_only_range(struct mm_struct *mm,
606 unsigned long addr, unsigned long len)
607 {
608 return 0;
609 }
610 #define is_hugepage_only_range is_hugepage_only_range
611 #endif
612
613 #ifndef arch_clear_hugepage_flags
arch_clear_hugepage_flags(struct page * page)614 static inline void arch_clear_hugepage_flags(struct page *page) { }
615 #define arch_clear_hugepage_flags arch_clear_hugepage_flags
616 #endif
617
618 #ifndef arch_make_huge_pte
arch_make_huge_pte(pte_t entry,struct vm_area_struct * vma,struct page * page,int writable)619 static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
620 struct page *page, int writable)
621 {
622 return entry;
623 }
624 #endif
625
page_hstate(struct page * page)626 static inline struct hstate *page_hstate(struct page *page)
627 {
628 VM_BUG_ON_PAGE(!PageHuge(page), page);
629 return size_to_hstate(page_size(page));
630 }
631
hstate_index_to_shift(unsigned index)632 static inline unsigned hstate_index_to_shift(unsigned index)
633 {
634 return hstates[index].order + PAGE_SHIFT;
635 }
636
hstate_index(struct hstate * h)637 static inline int hstate_index(struct hstate *h)
638 {
639 return h - hstates;
640 }
641
642 extern int dissolve_free_huge_page(struct page *page);
643 extern int dissolve_free_huge_pages(unsigned long start_pfn,
644 unsigned long end_pfn);
645
646 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
647 #ifndef arch_hugetlb_migration_supported
arch_hugetlb_migration_supported(struct hstate * h)648 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
649 {
650 if ((huge_page_shift(h) == PMD_SHIFT) ||
651 (huge_page_shift(h) == PUD_SHIFT) ||
652 (huge_page_shift(h) == PGDIR_SHIFT))
653 return true;
654 else
655 return false;
656 }
657 #endif
658 #else
arch_hugetlb_migration_supported(struct hstate * h)659 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
660 {
661 return false;
662 }
663 #endif
664
hugepage_migration_supported(struct hstate * h)665 static inline bool hugepage_migration_supported(struct hstate *h)
666 {
667 return arch_hugetlb_migration_supported(h);
668 }
669
670 /*
671 * Movability check is different as compared to migration check.
672 * It determines whether or not a huge page should be placed on
673 * movable zone or not. Movability of any huge page should be
674 * required only if huge page size is supported for migration.
675 * There wont be any reason for the huge page to be movable if
676 * it is not migratable to start with. Also the size of the huge
677 * page should be large enough to be placed under a movable zone
678 * and still feasible enough to be migratable. Just the presence
679 * in movable zone does not make the migration feasible.
680 *
681 * So even though large huge page sizes like the gigantic ones
682 * are migratable they should not be movable because its not
683 * feasible to migrate them from movable zone.
684 */
hugepage_movable_supported(struct hstate * h)685 static inline bool hugepage_movable_supported(struct hstate *h)
686 {
687 if (!hugepage_migration_supported(h))
688 return false;
689
690 if (hstate_is_gigantic(h))
691 return false;
692 return true;
693 }
694
695 /* Movability of hugepages depends on migration support. */
htlb_alloc_mask(struct hstate * h)696 static inline gfp_t htlb_alloc_mask(struct hstate *h)
697 {
698 if (hugepage_movable_supported(h))
699 return GFP_HIGHUSER_MOVABLE;
700 else
701 return GFP_HIGHUSER;
702 }
703
htlb_modify_alloc_mask(struct hstate * h,gfp_t gfp_mask)704 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
705 {
706 gfp_t modified_mask = htlb_alloc_mask(h);
707
708 /* Some callers might want to enforce node */
709 modified_mask |= (gfp_mask & __GFP_THISNODE);
710
711 modified_mask |= (gfp_mask & __GFP_NOWARN);
712
713 return modified_mask;
714 }
715
huge_pte_lockptr(struct hstate * h,struct mm_struct * mm,pte_t * pte)716 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
717 struct mm_struct *mm, pte_t *pte)
718 {
719 if (huge_page_size(h) == PMD_SIZE)
720 return pmd_lockptr(mm, (pmd_t *) pte);
721 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
722 return &mm->page_table_lock;
723 }
724
725 #ifndef hugepages_supported
726 /*
727 * Some platform decide whether they support huge pages at boot
728 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
729 * when there is no such support
730 */
731 #define hugepages_supported() (HPAGE_SHIFT != 0)
732 #endif
733
734 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
735
hugetlb_count_init(struct mm_struct * mm)736 static inline void hugetlb_count_init(struct mm_struct *mm)
737 {
738 atomic_long_set(&mm->hugetlb_usage, 0);
739 }
740
hugetlb_count_add(long l,struct mm_struct * mm)741 static inline void hugetlb_count_add(long l, struct mm_struct *mm)
742 {
743 atomic_long_add(l, &mm->hugetlb_usage);
744 }
745
hugetlb_count_sub(long l,struct mm_struct * mm)746 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
747 {
748 atomic_long_sub(l, &mm->hugetlb_usage);
749 }
750
751 #ifndef set_huge_swap_pte_at
set_huge_swap_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,unsigned long sz)752 static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
753 pte_t *ptep, pte_t pte, unsigned long sz)
754 {
755 set_huge_pte_at(mm, addr, ptep, pte);
756 }
757 #endif
758
759 #ifndef huge_ptep_modify_prot_start
760 #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
huge_ptep_modify_prot_start(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)761 static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
762 unsigned long addr, pte_t *ptep)
763 {
764 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
765 }
766 #endif
767
768 #ifndef huge_ptep_modify_prot_commit
769 #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
huge_ptep_modify_prot_commit(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t old_pte,pte_t pte)770 static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
771 unsigned long addr, pte_t *ptep,
772 pte_t old_pte, pte_t pte)
773 {
774 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
775 }
776 #endif
777
778 void set_page_huge_active(struct page *page);
779
780 #else /* CONFIG_HUGETLB_PAGE */
781 struct hstate {};
782
alloc_huge_page(struct vm_area_struct * vma,unsigned long addr,int avoid_reserve)783 static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
784 unsigned long addr,
785 int avoid_reserve)
786 {
787 return NULL;
788 }
789
790 static inline struct page *
alloc_huge_page_nodemask(struct hstate * h,int preferred_nid,nodemask_t * nmask,gfp_t gfp_mask)791 alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
792 nodemask_t *nmask, gfp_t gfp_mask)
793 {
794 return NULL;
795 }
796
alloc_huge_page_vma(struct hstate * h,struct vm_area_struct * vma,unsigned long address)797 static inline struct page *alloc_huge_page_vma(struct hstate *h,
798 struct vm_area_struct *vma,
799 unsigned long address)
800 {
801 return NULL;
802 }
803
__alloc_bootmem_huge_page(struct hstate * h)804 static inline int __alloc_bootmem_huge_page(struct hstate *h)
805 {
806 return 0;
807 }
808
hstate_file(struct file * f)809 static inline struct hstate *hstate_file(struct file *f)
810 {
811 return NULL;
812 }
813
hstate_sizelog(int page_size_log)814 static inline struct hstate *hstate_sizelog(int page_size_log)
815 {
816 return NULL;
817 }
818
hstate_vma(struct vm_area_struct * vma)819 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
820 {
821 return NULL;
822 }
823
page_hstate(struct page * page)824 static inline struct hstate *page_hstate(struct page *page)
825 {
826 return NULL;
827 }
828
huge_page_size(struct hstate * h)829 static inline unsigned long huge_page_size(struct hstate *h)
830 {
831 return PAGE_SIZE;
832 }
833
huge_page_mask(struct hstate * h)834 static inline unsigned long huge_page_mask(struct hstate *h)
835 {
836 return PAGE_MASK;
837 }
838
vma_kernel_pagesize(struct vm_area_struct * vma)839 static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
840 {
841 return PAGE_SIZE;
842 }
843
vma_mmu_pagesize(struct vm_area_struct * vma)844 static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
845 {
846 return PAGE_SIZE;
847 }
848
huge_page_order(struct hstate * h)849 static inline unsigned int huge_page_order(struct hstate *h)
850 {
851 return 0;
852 }
853
huge_page_shift(struct hstate * h)854 static inline unsigned int huge_page_shift(struct hstate *h)
855 {
856 return PAGE_SHIFT;
857 }
858
hstate_is_gigantic(struct hstate * h)859 static inline bool hstate_is_gigantic(struct hstate *h)
860 {
861 return false;
862 }
863
pages_per_huge_page(struct hstate * h)864 static inline unsigned int pages_per_huge_page(struct hstate *h)
865 {
866 return 1;
867 }
868
hstate_index_to_shift(unsigned index)869 static inline unsigned hstate_index_to_shift(unsigned index)
870 {
871 return 0;
872 }
873
hstate_index(struct hstate * h)874 static inline int hstate_index(struct hstate *h)
875 {
876 return 0;
877 }
878
dissolve_free_huge_page(struct page * page)879 static inline int dissolve_free_huge_page(struct page *page)
880 {
881 return 0;
882 }
883
dissolve_free_huge_pages(unsigned long start_pfn,unsigned long end_pfn)884 static inline int dissolve_free_huge_pages(unsigned long start_pfn,
885 unsigned long end_pfn)
886 {
887 return 0;
888 }
889
hugepage_migration_supported(struct hstate * h)890 static inline bool hugepage_migration_supported(struct hstate *h)
891 {
892 return false;
893 }
894
hugepage_movable_supported(struct hstate * h)895 static inline bool hugepage_movable_supported(struct hstate *h)
896 {
897 return false;
898 }
899
htlb_alloc_mask(struct hstate * h)900 static inline gfp_t htlb_alloc_mask(struct hstate *h)
901 {
902 return 0;
903 }
904
htlb_modify_alloc_mask(struct hstate * h,gfp_t gfp_mask)905 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
906 {
907 return 0;
908 }
909
huge_pte_lockptr(struct hstate * h,struct mm_struct * mm,pte_t * pte)910 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
911 struct mm_struct *mm, pte_t *pte)
912 {
913 return &mm->page_table_lock;
914 }
915
hugetlb_count_init(struct mm_struct * mm)916 static inline void hugetlb_count_init(struct mm_struct *mm)
917 {
918 }
919
hugetlb_report_usage(struct seq_file * f,struct mm_struct * m)920 static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
921 {
922 }
923
hugetlb_count_sub(long l,struct mm_struct * mm)924 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
925 {
926 }
927
set_huge_swap_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,unsigned long sz)928 static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
929 pte_t *ptep, pte_t pte, unsigned long sz)
930 {
931 }
932 #endif /* CONFIG_HUGETLB_PAGE */
933
huge_pte_lock(struct hstate * h,struct mm_struct * mm,pte_t * pte)934 static inline spinlock_t *huge_pte_lock(struct hstate *h,
935 struct mm_struct *mm, pte_t *pte)
936 {
937 spinlock_t *ptl;
938
939 ptl = huge_pte_lockptr(h, mm, pte);
940 spin_lock(ptl);
941 return ptl;
942 }
943
944 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
945 extern void __init hugetlb_cma_reserve(int order);
946 extern void __init hugetlb_cma_check(void);
947 #else
hugetlb_cma_reserve(int order)948 static inline __init void hugetlb_cma_reserve(int order)
949 {
950 }
hugetlb_cma_check(void)951 static inline __init void hugetlb_cma_check(void)
952 {
953 }
954 #endif
955
956 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
957
958 #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
959 /*
960 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
961 * implement this.
962 */
963 #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
964 #endif
965
966 #endif /* _LINUX_HUGETLB_H */
967