xref: /OK3568_Linux_fs/kernel/include/linux/page-flags.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Macros for manipulating and testing page->flags
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #ifndef PAGE_FLAGS_H
7*4882a593Smuzhiyun #define PAGE_FLAGS_H
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/types.h>
10*4882a593Smuzhiyun #include <linux/bug.h>
11*4882a593Smuzhiyun #include <linux/mmdebug.h>
12*4882a593Smuzhiyun #ifndef __GENERATING_BOUNDS_H
13*4882a593Smuzhiyun #include <linux/mm_types.h>
14*4882a593Smuzhiyun #include <generated/bounds.h>
15*4882a593Smuzhiyun #endif /* !__GENERATING_BOUNDS_H */
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun /*
18*4882a593Smuzhiyun  * Various page->flags bits:
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  * PG_reserved is set for special pages. The "struct page" of such a page
21*4882a593Smuzhiyun  * should in general not be touched (e.g. set dirty) except by its owner.
22*4882a593Smuzhiyun  * Pages marked as PG_reserved include:
23*4882a593Smuzhiyun  * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS,
24*4882a593Smuzhiyun  *   initrd, HW tables)
25*4882a593Smuzhiyun  * - Pages reserved or allocated early during boot (before the page allocator
26*4882a593Smuzhiyun  *   was initialized). This includes (depending on the architecture) the
27*4882a593Smuzhiyun  *   initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much
28*4882a593Smuzhiyun  *   much more. Once (if ever) freed, PG_reserved is cleared and they will
29*4882a593Smuzhiyun  *   be given to the page allocator.
30*4882a593Smuzhiyun  * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying
31*4882a593Smuzhiyun  *   to read/write these pages might end badly. Don't touch!
32*4882a593Smuzhiyun  * - The zero page(s)
33*4882a593Smuzhiyun  * - Pages not added to the page allocator when onlining a section because
34*4882a593Smuzhiyun  *   they were excluded via the online_page_callback() or because they are
35*4882a593Smuzhiyun  *   PG_hwpoison.
36*4882a593Smuzhiyun  * - Pages allocated in the context of kexec/kdump (loaded kernel image,
37*4882a593Smuzhiyun  *   control pages, vmcoreinfo)
38*4882a593Smuzhiyun  * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are
39*4882a593Smuzhiyun  *   not marked PG_reserved (as they might be in use by somebody else who does
40*4882a593Smuzhiyun  *   not respect the caching strategy).
41*4882a593Smuzhiyun  * - Pages part of an offline section (struct pages of offline sections should
42*4882a593Smuzhiyun  *   not be trusted as they will be initialized when first onlined).
43*4882a593Smuzhiyun  * - MCA pages on ia64
44*4882a593Smuzhiyun  * - Pages holding CPU notes for POWER Firmware Assisted Dump
45*4882a593Smuzhiyun  * - Device memory (e.g. PMEM, DAX, HMM)
46*4882a593Smuzhiyun  * Some PG_reserved pages will be excluded from the hibernation image.
47*4882a593Smuzhiyun  * PG_reserved does in general not hinder anybody from dumping or swapping
48*4882a593Smuzhiyun  * and is no longer required for remap_pfn_range(). ioremap might require it.
49*4882a593Smuzhiyun  * Consequently, PG_reserved for a page mapped into user space can indicate
50*4882a593Smuzhiyun  * the zero page, the vDSO, MMIO pages or device memory.
51*4882a593Smuzhiyun  *
52*4882a593Smuzhiyun  * The PG_private bitflag is set on pagecache pages if they contain filesystem
53*4882a593Smuzhiyun  * specific data (which is normally at page->private). It can be used by
54*4882a593Smuzhiyun  * private allocations for its own usage.
55*4882a593Smuzhiyun  *
56*4882a593Smuzhiyun  * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
57*4882a593Smuzhiyun  * and cleared when writeback _starts_ or when read _completes_. PG_writeback
58*4882a593Smuzhiyun  * is set before writeback starts and cleared when it finishes.
59*4882a593Smuzhiyun  *
60*4882a593Smuzhiyun  * PG_locked also pins a page in pagecache, and blocks truncation of the file
61*4882a593Smuzhiyun  * while it is held.
62*4882a593Smuzhiyun  *
63*4882a593Smuzhiyun  * page_waitqueue(page) is a wait queue of all tasks waiting for the page
64*4882a593Smuzhiyun  * to become unlocked.
65*4882a593Smuzhiyun  *
66*4882a593Smuzhiyun  * PG_swapbacked is set when a page uses swap as a backing storage.  This are
67*4882a593Smuzhiyun  * usually PageAnon or shmem pages but please note that even anonymous pages
68*4882a593Smuzhiyun  * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as
69*4882a593Smuzhiyun  * a result of MADV_FREE).
70*4882a593Smuzhiyun  *
71*4882a593Smuzhiyun  * PG_uptodate tells whether the page's contents is valid.  When a read
72*4882a593Smuzhiyun  * completes, the page becomes uptodate, unless a disk I/O error happened.
73*4882a593Smuzhiyun  *
74*4882a593Smuzhiyun  * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
75*4882a593Smuzhiyun  * file-backed pagecache (see mm/vmscan.c).
76*4882a593Smuzhiyun  *
77*4882a593Smuzhiyun  * PG_error is set to indicate that an I/O error occurred on this page.
78*4882a593Smuzhiyun  *
79*4882a593Smuzhiyun  * PG_arch_1 is an architecture specific page state bit.  The generic code
80*4882a593Smuzhiyun  * guarantees that this bit is cleared for a page when it first is entered into
81*4882a593Smuzhiyun  * the page cache.
82*4882a593Smuzhiyun  *
83*4882a593Smuzhiyun  * PG_hwpoison indicates that a page got corrupted in hardware and contains
84*4882a593Smuzhiyun  * data with incorrect ECC bits that triggered a machine check. Accessing is
85*4882a593Smuzhiyun  * not safe since it may cause another machine check. Don't touch!
86*4882a593Smuzhiyun  */
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun /*
89*4882a593Smuzhiyun  * Don't use the *_dontuse flags.  Use the macros.  Otherwise you'll break
90*4882a593Smuzhiyun  * locked- and dirty-page accounting.
91*4882a593Smuzhiyun  *
92*4882a593Smuzhiyun  * The page flags field is split into two parts, the main flags area
93*4882a593Smuzhiyun  * which extends from the low bits upwards, and the fields area which
94*4882a593Smuzhiyun  * extends from the high bits downwards.
95*4882a593Smuzhiyun  *
96*4882a593Smuzhiyun  *  | FIELD | ... | FLAGS |
97*4882a593Smuzhiyun  *  N-1           ^       0
98*4882a593Smuzhiyun  *               (NR_PAGEFLAGS)
99*4882a593Smuzhiyun  *
100*4882a593Smuzhiyun  * The fields area is reserved for fields mapping zone, node (for NUMA) and
101*4882a593Smuzhiyun  * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
102*4882a593Smuzhiyun  * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
103*4882a593Smuzhiyun  */
104*4882a593Smuzhiyun enum pageflags {
105*4882a593Smuzhiyun 	PG_locked,		/* Page is locked. Don't touch. */
106*4882a593Smuzhiyun 	PG_referenced,
107*4882a593Smuzhiyun 	PG_uptodate,
108*4882a593Smuzhiyun 	PG_dirty,
109*4882a593Smuzhiyun 	PG_lru,
110*4882a593Smuzhiyun 	PG_active,
111*4882a593Smuzhiyun 	PG_workingset,
112*4882a593Smuzhiyun 	PG_waiters,		/* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
113*4882a593Smuzhiyun 	PG_error,
114*4882a593Smuzhiyun 	PG_slab,
115*4882a593Smuzhiyun 	PG_owner_priv_1,	/* Owner use. If pagecache, fs may use*/
116*4882a593Smuzhiyun 	PG_arch_1,
117*4882a593Smuzhiyun 	PG_reserved,
118*4882a593Smuzhiyun 	PG_private,		/* If pagecache, has fs-private data */
119*4882a593Smuzhiyun 	PG_private_2,		/* If pagecache, has fs aux data */
120*4882a593Smuzhiyun 	PG_writeback,		/* Page is under writeback */
121*4882a593Smuzhiyun 	PG_head,		/* A head page */
122*4882a593Smuzhiyun 	PG_mappedtodisk,	/* Has blocks allocated on-disk */
123*4882a593Smuzhiyun 	PG_reclaim,		/* To be reclaimed asap */
124*4882a593Smuzhiyun 	PG_swapbacked,		/* Page is backed by RAM/swap */
125*4882a593Smuzhiyun 	PG_unevictable,		/* Page is "unevictable"  */
126*4882a593Smuzhiyun #ifdef CONFIG_MMU
127*4882a593Smuzhiyun 	PG_mlocked,		/* Page is vma mlocked */
128*4882a593Smuzhiyun #endif
129*4882a593Smuzhiyun #ifdef CONFIG_ARCH_USES_PG_UNCACHED
130*4882a593Smuzhiyun 	PG_uncached,		/* Page has been mapped as uncached */
131*4882a593Smuzhiyun #endif
132*4882a593Smuzhiyun #ifdef CONFIG_MEMORY_FAILURE
133*4882a593Smuzhiyun 	PG_hwpoison,		/* hardware poisoned page. Don't touch */
134*4882a593Smuzhiyun #endif
135*4882a593Smuzhiyun #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
136*4882a593Smuzhiyun 	PG_young,
137*4882a593Smuzhiyun 	PG_idle,
138*4882a593Smuzhiyun #endif
139*4882a593Smuzhiyun #ifdef CONFIG_64BIT
140*4882a593Smuzhiyun 	PG_arch_2,
141*4882a593Smuzhiyun #endif
142*4882a593Smuzhiyun #ifdef CONFIG_KASAN_HW_TAGS
143*4882a593Smuzhiyun 	PG_skip_kasan_poison,
144*4882a593Smuzhiyun #endif
145*4882a593Smuzhiyun 	__NR_PAGEFLAGS,
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	/* Filesystems */
148*4882a593Smuzhiyun 	PG_checked = PG_owner_priv_1,
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	/* SwapBacked */
151*4882a593Smuzhiyun 	PG_swapcache = PG_owner_priv_1,	/* Swap page: swp_entry_t in private */
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	/* Two page bits are conscripted by FS-Cache to maintain local caching
154*4882a593Smuzhiyun 	 * state.  These bits are set on pages belonging to the netfs's inodes
155*4882a593Smuzhiyun 	 * when those inodes are being locally cached.
156*4882a593Smuzhiyun 	 */
157*4882a593Smuzhiyun 	PG_fscache = PG_private_2,	/* page backed by cache */
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	/* XEN */
160*4882a593Smuzhiyun 	/* Pinned in Xen as a read-only pagetable page. */
161*4882a593Smuzhiyun 	PG_pinned = PG_owner_priv_1,
162*4882a593Smuzhiyun 	/* Pinned as part of domain save (see xen_mm_pin_all()). */
163*4882a593Smuzhiyun 	PG_savepinned = PG_dirty,
164*4882a593Smuzhiyun 	/* Has a grant mapping of another (foreign) domain's page. */
165*4882a593Smuzhiyun 	PG_foreign = PG_owner_priv_1,
166*4882a593Smuzhiyun 	/* Remapped by swiotlb-xen. */
167*4882a593Smuzhiyun 	PG_xen_remapped = PG_owner_priv_1,
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	/* SLOB */
170*4882a593Smuzhiyun 	PG_slob_free = PG_private,
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	/* Compound pages. Stored in first tail page's flags */
173*4882a593Smuzhiyun 	PG_double_map = PG_workingset,
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	/* non-lru isolated movable page */
176*4882a593Smuzhiyun 	PG_isolated = PG_reclaim,
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	/* Only valid for buddy pages. Used to track pages that are reported */
179*4882a593Smuzhiyun 	PG_reported = PG_uptodate,
180*4882a593Smuzhiyun };
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun #ifndef __GENERATING_BOUNDS_H
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun struct page;	/* forward declaration */
185*4882a593Smuzhiyun 
compound_head(struct page * page)186*4882a593Smuzhiyun static inline struct page *compound_head(struct page *page)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	unsigned long head = READ_ONCE(page->compound_head);
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	if (unlikely(head & 1))
191*4882a593Smuzhiyun 		return (struct page *) (head - 1);
192*4882a593Smuzhiyun 	return page;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun 
PageTail(struct page * page)195*4882a593Smuzhiyun static __always_inline int PageTail(struct page *page)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun 	return READ_ONCE(page->compound_head) & 1;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun 
PageCompound(struct page * page)200*4882a593Smuzhiyun static __always_inline int PageCompound(struct page *page)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun 	return test_bit(PG_head, &page->flags) || PageTail(page);
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun #define	PAGE_POISON_PATTERN	-1l
PagePoisoned(const struct page * page)206*4882a593Smuzhiyun static inline int PagePoisoned(const struct page *page)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun 	return page->flags == PAGE_POISON_PATTERN;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_VM
212*4882a593Smuzhiyun void page_init_poison(struct page *page, size_t size);
213*4882a593Smuzhiyun #else
page_init_poison(struct page * page,size_t size)214*4882a593Smuzhiyun static inline void page_init_poison(struct page *page, size_t size)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun #endif
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun /*
220*4882a593Smuzhiyun  * Page flags policies wrt compound pages
221*4882a593Smuzhiyun  *
222*4882a593Smuzhiyun  * PF_POISONED_CHECK
223*4882a593Smuzhiyun  *     check if this struct page poisoned/uninitialized
224*4882a593Smuzhiyun  *
225*4882a593Smuzhiyun  * PF_ANY:
226*4882a593Smuzhiyun  *     the page flag is relevant for small, head and tail pages.
227*4882a593Smuzhiyun  *
228*4882a593Smuzhiyun  * PF_HEAD:
229*4882a593Smuzhiyun  *     for compound page all operations related to the page flag applied to
230*4882a593Smuzhiyun  *     head page.
231*4882a593Smuzhiyun  *
232*4882a593Smuzhiyun  * PF_ONLY_HEAD:
233*4882a593Smuzhiyun  *     for compound page, callers only ever operate on the head page.
234*4882a593Smuzhiyun  *
235*4882a593Smuzhiyun  * PF_NO_TAIL:
236*4882a593Smuzhiyun  *     modifications of the page flag must be done on small or head pages,
237*4882a593Smuzhiyun  *     checks can be done on tail pages too.
238*4882a593Smuzhiyun  *
239*4882a593Smuzhiyun  * PF_NO_COMPOUND:
240*4882a593Smuzhiyun  *     the page flag is not relevant for compound pages.
241*4882a593Smuzhiyun  *
242*4882a593Smuzhiyun  * PF_SECOND:
243*4882a593Smuzhiyun  *     the page flag is stored in the first tail page.
244*4882a593Smuzhiyun  */
245*4882a593Smuzhiyun #define PF_POISONED_CHECK(page) ({					\
246*4882a593Smuzhiyun 		VM_BUG_ON_PGFLAGS(PagePoisoned(page), page);		\
247*4882a593Smuzhiyun 		page; })
248*4882a593Smuzhiyun #define PF_ANY(page, enforce)	PF_POISONED_CHECK(page)
249*4882a593Smuzhiyun #define PF_HEAD(page, enforce)	PF_POISONED_CHECK(compound_head(page))
250*4882a593Smuzhiyun #define PF_ONLY_HEAD(page, enforce) ({					\
251*4882a593Smuzhiyun 		VM_BUG_ON_PGFLAGS(PageTail(page), page);		\
252*4882a593Smuzhiyun 		PF_POISONED_CHECK(page); })
253*4882a593Smuzhiyun #define PF_NO_TAIL(page, enforce) ({					\
254*4882a593Smuzhiyun 		VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page);	\
255*4882a593Smuzhiyun 		PF_POISONED_CHECK(compound_head(page)); })
256*4882a593Smuzhiyun #define PF_NO_COMPOUND(page, enforce) ({				\
257*4882a593Smuzhiyun 		VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page);	\
258*4882a593Smuzhiyun 		PF_POISONED_CHECK(page); })
259*4882a593Smuzhiyun #define PF_SECOND(page, enforce) ({					\
260*4882a593Smuzhiyun 		VM_BUG_ON_PGFLAGS(!PageHead(page), page);		\
261*4882a593Smuzhiyun 		PF_POISONED_CHECK(&page[1]); })
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun /*
264*4882a593Smuzhiyun  * Macros to create function definitions for page flags
265*4882a593Smuzhiyun  */
266*4882a593Smuzhiyun #define TESTPAGEFLAG(uname, lname, policy)				\
267*4882a593Smuzhiyun static __always_inline int Page##uname(struct page *page)		\
268*4882a593Smuzhiyun 	{ return test_bit(PG_##lname, &policy(page, 0)->flags); }
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun #define SETPAGEFLAG(uname, lname, policy)				\
271*4882a593Smuzhiyun static __always_inline void SetPage##uname(struct page *page)		\
272*4882a593Smuzhiyun 	{ set_bit(PG_##lname, &policy(page, 1)->flags); }
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun #define CLEARPAGEFLAG(uname, lname, policy)				\
275*4882a593Smuzhiyun static __always_inline void ClearPage##uname(struct page *page)		\
276*4882a593Smuzhiyun 	{ clear_bit(PG_##lname, &policy(page, 1)->flags); }
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun #define __SETPAGEFLAG(uname, lname, policy)				\
279*4882a593Smuzhiyun static __always_inline void __SetPage##uname(struct page *page)		\
280*4882a593Smuzhiyun 	{ __set_bit(PG_##lname, &policy(page, 1)->flags); }
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun #define __CLEARPAGEFLAG(uname, lname, policy)				\
283*4882a593Smuzhiyun static __always_inline void __ClearPage##uname(struct page *page)	\
284*4882a593Smuzhiyun 	{ __clear_bit(PG_##lname, &policy(page, 1)->flags); }
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun #define TESTSETFLAG(uname, lname, policy)				\
287*4882a593Smuzhiyun static __always_inline int TestSetPage##uname(struct page *page)	\
288*4882a593Smuzhiyun 	{ return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun #define TESTCLEARFLAG(uname, lname, policy)				\
291*4882a593Smuzhiyun static __always_inline int TestClearPage##uname(struct page *page)	\
292*4882a593Smuzhiyun 	{ return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun #define PAGEFLAG(uname, lname, policy)					\
295*4882a593Smuzhiyun 	TESTPAGEFLAG(uname, lname, policy)				\
296*4882a593Smuzhiyun 	SETPAGEFLAG(uname, lname, policy)				\
297*4882a593Smuzhiyun 	CLEARPAGEFLAG(uname, lname, policy)
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun #define __PAGEFLAG(uname, lname, policy)				\
300*4882a593Smuzhiyun 	TESTPAGEFLAG(uname, lname, policy)				\
301*4882a593Smuzhiyun 	__SETPAGEFLAG(uname, lname, policy)				\
302*4882a593Smuzhiyun 	__CLEARPAGEFLAG(uname, lname, policy)
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun #define TESTSCFLAG(uname, lname, policy)				\
305*4882a593Smuzhiyun 	TESTSETFLAG(uname, lname, policy)				\
306*4882a593Smuzhiyun 	TESTCLEARFLAG(uname, lname, policy)
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun #define TESTPAGEFLAG_FALSE(uname)					\
309*4882a593Smuzhiyun static inline int Page##uname(const struct page *page) { return 0; }
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun #define SETPAGEFLAG_NOOP(uname)						\
312*4882a593Smuzhiyun static inline void SetPage##uname(struct page *page) {  }
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun #define CLEARPAGEFLAG_NOOP(uname)					\
315*4882a593Smuzhiyun static inline void ClearPage##uname(struct page *page) {  }
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun #define __CLEARPAGEFLAG_NOOP(uname)					\
318*4882a593Smuzhiyun static inline void __ClearPage##uname(struct page *page) {  }
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun #define TESTSETFLAG_FALSE(uname)					\
321*4882a593Smuzhiyun static inline int TestSetPage##uname(struct page *page) { return 0; }
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun #define TESTCLEARFLAG_FALSE(uname)					\
324*4882a593Smuzhiyun static inline int TestClearPage##uname(struct page *page) { return 0; }
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun #define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname)			\
327*4882a593Smuzhiyun 	SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname)
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun #define TESTSCFLAG_FALSE(uname)						\
330*4882a593Smuzhiyun 	TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun __PAGEFLAG(Locked, locked, PF_NO_TAIL)
333*4882a593Smuzhiyun PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
334*4882a593Smuzhiyun PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
335*4882a593Smuzhiyun PAGEFLAG(Referenced, referenced, PF_HEAD)
336*4882a593Smuzhiyun 	TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
337*4882a593Smuzhiyun 	__SETPAGEFLAG(Referenced, referenced, PF_HEAD)
338*4882a593Smuzhiyun PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
339*4882a593Smuzhiyun 	__CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
340*4882a593Smuzhiyun PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
341*4882a593Smuzhiyun PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
342*4882a593Smuzhiyun 	TESTCLEARFLAG(Active, active, PF_HEAD)
343*4882a593Smuzhiyun PAGEFLAG(Workingset, workingset, PF_HEAD)
344*4882a593Smuzhiyun 	TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
345*4882a593Smuzhiyun __PAGEFLAG(Slab, slab, PF_NO_TAIL)
346*4882a593Smuzhiyun __PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL)
347*4882a593Smuzhiyun PAGEFLAG(Checked, checked, PF_NO_COMPOUND)	   /* Used by some filesystems */
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun /* Xen */
350*4882a593Smuzhiyun PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
351*4882a593Smuzhiyun 	TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
352*4882a593Smuzhiyun PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
353*4882a593Smuzhiyun PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
PAGEFLAG(XenRemapped,xen_remapped,PF_NO_COMPOUND)354*4882a593Smuzhiyun PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
355*4882a593Smuzhiyun 	TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
358*4882a593Smuzhiyun 	__CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
359*4882a593Smuzhiyun 	__SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
360*4882a593Smuzhiyun PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
361*4882a593Smuzhiyun 	__CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
362*4882a593Smuzhiyun 	__SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun /*
365*4882a593Smuzhiyun  * Private page markings that may be used by the filesystem that owns the page
366*4882a593Smuzhiyun  * for its own purposes.
367*4882a593Smuzhiyun  * - PG_private and PG_private_2 cause releasepage() and co to be invoked
368*4882a593Smuzhiyun  */
369*4882a593Smuzhiyun PAGEFLAG(Private, private, PF_ANY) __SETPAGEFLAG(Private, private, PF_ANY)
370*4882a593Smuzhiyun 	__CLEARPAGEFLAG(Private, private, PF_ANY)
371*4882a593Smuzhiyun PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
372*4882a593Smuzhiyun PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
373*4882a593Smuzhiyun 	TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun /*
376*4882a593Smuzhiyun  * Only test-and-set exist for PG_writeback.  The unconditional operators are
377*4882a593Smuzhiyun  * risky: they bypass page accounting.
378*4882a593Smuzhiyun  */
379*4882a593Smuzhiyun TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
380*4882a593Smuzhiyun 	TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
381*4882a593Smuzhiyun PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun /* PG_readahead is only used for reads; PG_reclaim is only for writes */
384*4882a593Smuzhiyun PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
385*4882a593Smuzhiyun 	TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
386*4882a593Smuzhiyun PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
387*4882a593Smuzhiyun 	TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun #ifdef CONFIG_HIGHMEM
390*4882a593Smuzhiyun /*
391*4882a593Smuzhiyun  * Must use a macro here due to header dependency issues. page_zone() is not
392*4882a593Smuzhiyun  * available at this point.
393*4882a593Smuzhiyun  */
394*4882a593Smuzhiyun #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
395*4882a593Smuzhiyun #else
396*4882a593Smuzhiyun PAGEFLAG_FALSE(HighMem)
397*4882a593Smuzhiyun #endif
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun #ifdef CONFIG_SWAP
400*4882a593Smuzhiyun static __always_inline int PageSwapCache(struct page *page)
401*4882a593Smuzhiyun {
402*4882a593Smuzhiyun #ifdef CONFIG_THP_SWAP
403*4882a593Smuzhiyun 	page = compound_head(page);
404*4882a593Smuzhiyun #endif
405*4882a593Smuzhiyun 	return PageSwapBacked(page) && test_bit(PG_swapcache, &page->flags);
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
409*4882a593Smuzhiyun CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
410*4882a593Smuzhiyun #else
411*4882a593Smuzhiyun PAGEFLAG_FALSE(SwapCache)
412*4882a593Smuzhiyun #endif
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun PAGEFLAG(Unevictable, unevictable, PF_HEAD)
415*4882a593Smuzhiyun 	__CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
416*4882a593Smuzhiyun 	TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun #ifdef CONFIG_MMU
419*4882a593Smuzhiyun PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
420*4882a593Smuzhiyun 	__CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
421*4882a593Smuzhiyun 	TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
422*4882a593Smuzhiyun #else
423*4882a593Smuzhiyun PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked)
424*4882a593Smuzhiyun 	TESTSCFLAG_FALSE(Mlocked)
425*4882a593Smuzhiyun #endif
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun #ifdef CONFIG_ARCH_USES_PG_UNCACHED
428*4882a593Smuzhiyun PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
429*4882a593Smuzhiyun #else
430*4882a593Smuzhiyun PAGEFLAG_FALSE(Uncached)
431*4882a593Smuzhiyun #endif
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun #ifdef CONFIG_MEMORY_FAILURE
434*4882a593Smuzhiyun PAGEFLAG(HWPoison, hwpoison, PF_ANY)
435*4882a593Smuzhiyun TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
436*4882a593Smuzhiyun #define __PG_HWPOISON (1UL << PG_hwpoison)
437*4882a593Smuzhiyun extern bool take_page_off_buddy(struct page *page);
438*4882a593Smuzhiyun #else
439*4882a593Smuzhiyun PAGEFLAG_FALSE(HWPoison)
440*4882a593Smuzhiyun #define __PG_HWPOISON 0
441*4882a593Smuzhiyun #endif
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
TESTPAGEFLAG(Young,young,PF_ANY)444*4882a593Smuzhiyun TESTPAGEFLAG(Young, young, PF_ANY)
445*4882a593Smuzhiyun SETPAGEFLAG(Young, young, PF_ANY)
446*4882a593Smuzhiyun TESTCLEARFLAG(Young, young, PF_ANY)
447*4882a593Smuzhiyun PAGEFLAG(Idle, idle, PF_ANY)
448*4882a593Smuzhiyun #endif
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun #ifdef CONFIG_KASAN_HW_TAGS
451*4882a593Smuzhiyun PAGEFLAG(SkipKASanPoison, skip_kasan_poison, PF_HEAD)
452*4882a593Smuzhiyun #else
453*4882a593Smuzhiyun PAGEFLAG_FALSE(SkipKASanPoison)
454*4882a593Smuzhiyun #endif
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun /*
457*4882a593Smuzhiyun  * PageReported() is used to track reported free pages within the Buddy
458*4882a593Smuzhiyun  * allocator. We can use the non-atomic version of the test and set
459*4882a593Smuzhiyun  * operations as both should be shielded with the zone lock to prevent
460*4882a593Smuzhiyun  * any possible races on the setting or clearing of the bit.
461*4882a593Smuzhiyun  */
462*4882a593Smuzhiyun __PAGEFLAG(Reported, reported, PF_NO_COMPOUND)
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun /*
465*4882a593Smuzhiyun  * On an anonymous page mapped into a user virtual memory area,
466*4882a593Smuzhiyun  * page->mapping points to its anon_vma, not to a struct address_space;
467*4882a593Smuzhiyun  * with the PAGE_MAPPING_ANON bit set to distinguish it.  See rmap.h.
468*4882a593Smuzhiyun  *
469*4882a593Smuzhiyun  * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
470*4882a593Smuzhiyun  * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
471*4882a593Smuzhiyun  * bit; and then page->mapping points, not to an anon_vma, but to a private
472*4882a593Smuzhiyun  * structure which KSM associates with that merged page.  See ksm.h.
473*4882a593Smuzhiyun  *
474*4882a593Smuzhiyun  * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
475*4882a593Smuzhiyun  * page and then page->mapping points a struct address_space.
476*4882a593Smuzhiyun  *
477*4882a593Smuzhiyun  * Please note that, confusingly, "page_mapping" refers to the inode
478*4882a593Smuzhiyun  * address_space which maps the page from disk; whereas "page_mapped"
479*4882a593Smuzhiyun  * refers to user virtual address space into which the page is mapped.
480*4882a593Smuzhiyun  */
481*4882a593Smuzhiyun #define PAGE_MAPPING_ANON	0x1
482*4882a593Smuzhiyun #define PAGE_MAPPING_MOVABLE	0x2
483*4882a593Smuzhiyun #define PAGE_MAPPING_KSM	(PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
484*4882a593Smuzhiyun #define PAGE_MAPPING_FLAGS	(PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun static __always_inline int PageMappingFlags(struct page *page)
487*4882a593Smuzhiyun {
488*4882a593Smuzhiyun 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun 
PageAnon(struct page * page)491*4882a593Smuzhiyun static __always_inline int PageAnon(struct page *page)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun 	page = compound_head(page);
494*4882a593Smuzhiyun 	return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun 
__PageMovable(struct page * page)497*4882a593Smuzhiyun static __always_inline int __PageMovable(struct page *page)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
500*4882a593Smuzhiyun 				PAGE_MAPPING_MOVABLE;
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun #ifdef CONFIG_KSM
504*4882a593Smuzhiyun /*
505*4882a593Smuzhiyun  * A KSM page is one of those write-protected "shared pages" or "merged pages"
506*4882a593Smuzhiyun  * which KSM maps into multiple mms, wherever identical anonymous page content
507*4882a593Smuzhiyun  * is found in VM_MERGEABLE vmas.  It's a PageAnon page, pointing not to any
508*4882a593Smuzhiyun  * anon_vma, but to that page's node of the stable tree.
509*4882a593Smuzhiyun  */
PageKsm(struct page * page)510*4882a593Smuzhiyun static __always_inline int PageKsm(struct page *page)
511*4882a593Smuzhiyun {
512*4882a593Smuzhiyun 	page = compound_head(page);
513*4882a593Smuzhiyun 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
514*4882a593Smuzhiyun 				PAGE_MAPPING_KSM;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun #else
517*4882a593Smuzhiyun TESTPAGEFLAG_FALSE(Ksm)
518*4882a593Smuzhiyun #endif
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun u64 stable_page_flags(struct page *page);
521*4882a593Smuzhiyun 
PageUptodate(struct page * page)522*4882a593Smuzhiyun static inline int PageUptodate(struct page *page)
523*4882a593Smuzhiyun {
524*4882a593Smuzhiyun 	int ret;
525*4882a593Smuzhiyun 	page = compound_head(page);
526*4882a593Smuzhiyun 	ret = test_bit(PG_uptodate, &(page)->flags);
527*4882a593Smuzhiyun 	/*
528*4882a593Smuzhiyun 	 * Must ensure that the data we read out of the page is loaded
529*4882a593Smuzhiyun 	 * _after_ we've loaded page->flags to check for PageUptodate.
530*4882a593Smuzhiyun 	 * We can skip the barrier if the page is not uptodate, because
531*4882a593Smuzhiyun 	 * we wouldn't be reading anything from it.
532*4882a593Smuzhiyun 	 *
533*4882a593Smuzhiyun 	 * See SetPageUptodate() for the other side of the story.
534*4882a593Smuzhiyun 	 */
535*4882a593Smuzhiyun 	if (ret)
536*4882a593Smuzhiyun 		smp_rmb();
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	return ret;
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun 
__SetPageUptodate(struct page * page)541*4882a593Smuzhiyun static __always_inline void __SetPageUptodate(struct page *page)
542*4882a593Smuzhiyun {
543*4882a593Smuzhiyun 	VM_BUG_ON_PAGE(PageTail(page), page);
544*4882a593Smuzhiyun 	smp_wmb();
545*4882a593Smuzhiyun 	__set_bit(PG_uptodate, &page->flags);
546*4882a593Smuzhiyun }
547*4882a593Smuzhiyun 
SetPageUptodate(struct page * page)548*4882a593Smuzhiyun static __always_inline void SetPageUptodate(struct page *page)
549*4882a593Smuzhiyun {
550*4882a593Smuzhiyun 	VM_BUG_ON_PAGE(PageTail(page), page);
551*4882a593Smuzhiyun 	/*
552*4882a593Smuzhiyun 	 * Memory barrier must be issued before setting the PG_uptodate bit,
553*4882a593Smuzhiyun 	 * so that all previous stores issued in order to bring the page
554*4882a593Smuzhiyun 	 * uptodate are actually visible before PageUptodate becomes true.
555*4882a593Smuzhiyun 	 */
556*4882a593Smuzhiyun 	smp_wmb();
557*4882a593Smuzhiyun 	set_bit(PG_uptodate, &page->flags);
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun int test_clear_page_writeback(struct page *page);
563*4882a593Smuzhiyun int __test_set_page_writeback(struct page *page, bool keep_write);
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun #define test_set_page_writeback(page)			\
566*4882a593Smuzhiyun 	__test_set_page_writeback(page, false)
567*4882a593Smuzhiyun #define test_set_page_writeback_keepwrite(page)	\
568*4882a593Smuzhiyun 	__test_set_page_writeback(page, true)
569*4882a593Smuzhiyun 
set_page_writeback(struct page * page)570*4882a593Smuzhiyun static inline void set_page_writeback(struct page *page)
571*4882a593Smuzhiyun {
572*4882a593Smuzhiyun 	test_set_page_writeback(page);
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun 
set_page_writeback_keepwrite(struct page * page)575*4882a593Smuzhiyun static inline void set_page_writeback_keepwrite(struct page *page)
576*4882a593Smuzhiyun {
577*4882a593Smuzhiyun 	test_set_page_writeback_keepwrite(page);
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun 
__PAGEFLAG(Head,head,PF_ANY)580*4882a593Smuzhiyun __PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun static __always_inline void set_compound_head(struct page *page, struct page *head)
583*4882a593Smuzhiyun {
584*4882a593Smuzhiyun 	WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun 
clear_compound_head(struct page * page)587*4882a593Smuzhiyun static __always_inline void clear_compound_head(struct page *page)
588*4882a593Smuzhiyun {
589*4882a593Smuzhiyun 	WRITE_ONCE(page->compound_head, 0);
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
ClearPageCompound(struct page * page)593*4882a593Smuzhiyun static inline void ClearPageCompound(struct page *page)
594*4882a593Smuzhiyun {
595*4882a593Smuzhiyun 	BUG_ON(!PageHead(page));
596*4882a593Smuzhiyun 	ClearPageHead(page);
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun #endif
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun #define PG_head_mask ((1UL << PG_head))
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun #ifdef CONFIG_HUGETLB_PAGE
603*4882a593Smuzhiyun int PageHuge(struct page *page);
604*4882a593Smuzhiyun int PageHeadHuge(struct page *page);
605*4882a593Smuzhiyun bool page_huge_active(struct page *page);
606*4882a593Smuzhiyun #else
607*4882a593Smuzhiyun TESTPAGEFLAG_FALSE(Huge)
TESTPAGEFLAG_FALSE(HeadHuge)608*4882a593Smuzhiyun TESTPAGEFLAG_FALSE(HeadHuge)
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun static inline bool page_huge_active(struct page *page)
611*4882a593Smuzhiyun {
612*4882a593Smuzhiyun 	return 0;
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun #endif
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
618*4882a593Smuzhiyun /*
619*4882a593Smuzhiyun  * PageHuge() only returns true for hugetlbfs pages, but not for
620*4882a593Smuzhiyun  * normal or transparent huge pages.
621*4882a593Smuzhiyun  *
622*4882a593Smuzhiyun  * PageTransHuge() returns true for both transparent huge and
623*4882a593Smuzhiyun  * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
624*4882a593Smuzhiyun  * called only in the core VM paths where hugetlbfs pages can't exist.
625*4882a593Smuzhiyun  */
PageTransHuge(struct page * page)626*4882a593Smuzhiyun static inline int PageTransHuge(struct page *page)
627*4882a593Smuzhiyun {
628*4882a593Smuzhiyun 	VM_BUG_ON_PAGE(PageTail(page), page);
629*4882a593Smuzhiyun 	return PageHead(page);
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun /*
633*4882a593Smuzhiyun  * PageTransCompound returns true for both transparent huge pages
634*4882a593Smuzhiyun  * and hugetlbfs pages, so it should only be called when it's known
635*4882a593Smuzhiyun  * that hugetlbfs pages aren't involved.
636*4882a593Smuzhiyun  */
PageTransCompound(struct page * page)637*4882a593Smuzhiyun static inline int PageTransCompound(struct page *page)
638*4882a593Smuzhiyun {
639*4882a593Smuzhiyun 	return PageCompound(page);
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun /*
643*4882a593Smuzhiyun  * PageTransCompoundMap is the same as PageTransCompound, but it also
644*4882a593Smuzhiyun  * guarantees the primary MMU has the entire compound page mapped
645*4882a593Smuzhiyun  * through pmd_trans_huge, which in turn guarantees the secondary MMUs
646*4882a593Smuzhiyun  * can also map the entire compound page. This allows the secondary
647*4882a593Smuzhiyun  * MMUs to call get_user_pages() only once for each compound page and
648*4882a593Smuzhiyun  * to immediately map the entire compound page with a single secondary
649*4882a593Smuzhiyun  * MMU fault. If there will be a pmd split later, the secondary MMUs
650*4882a593Smuzhiyun  * will get an update through the MMU notifier invalidation through
651*4882a593Smuzhiyun  * split_huge_pmd().
652*4882a593Smuzhiyun  *
653*4882a593Smuzhiyun  * Unlike PageTransCompound, this is safe to be called only while
654*4882a593Smuzhiyun  * split_huge_pmd() cannot run from under us, like if protected by the
655*4882a593Smuzhiyun  * MMU notifier, otherwise it may result in page->_mapcount check false
656*4882a593Smuzhiyun  * positives.
657*4882a593Smuzhiyun  *
658*4882a593Smuzhiyun  * We have to treat page cache THP differently since every subpage of it
659*4882a593Smuzhiyun  * would get _mapcount inc'ed once it is PMD mapped.  But, it may be PTE
660*4882a593Smuzhiyun  * mapped in the current process so comparing subpage's _mapcount to
661*4882a593Smuzhiyun  * compound_mapcount to filter out PTE mapped case.
662*4882a593Smuzhiyun  */
PageTransCompoundMap(struct page * page)663*4882a593Smuzhiyun static inline int PageTransCompoundMap(struct page *page)
664*4882a593Smuzhiyun {
665*4882a593Smuzhiyun 	struct page *head;
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	if (!PageTransCompound(page))
668*4882a593Smuzhiyun 		return 0;
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 	if (PageAnon(page))
671*4882a593Smuzhiyun 		return atomic_read(&page->_mapcount) < 0;
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 	head = compound_head(page);
674*4882a593Smuzhiyun 	/* File THP is PMD mapped and not PTE mapped */
675*4882a593Smuzhiyun 	return atomic_read(&page->_mapcount) ==
676*4882a593Smuzhiyun 	       atomic_read(compound_mapcount_ptr(head));
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun /*
680*4882a593Smuzhiyun  * PageTransTail returns true for both transparent huge pages
681*4882a593Smuzhiyun  * and hugetlbfs pages, so it should only be called when it's known
682*4882a593Smuzhiyun  * that hugetlbfs pages aren't involved.
683*4882a593Smuzhiyun  */
PageTransTail(struct page * page)684*4882a593Smuzhiyun static inline int PageTransTail(struct page *page)
685*4882a593Smuzhiyun {
686*4882a593Smuzhiyun 	return PageTail(page);
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun /*
690*4882a593Smuzhiyun  * PageDoubleMap indicates that the compound page is mapped with PTEs as well
691*4882a593Smuzhiyun  * as PMDs.
692*4882a593Smuzhiyun  *
693*4882a593Smuzhiyun  * This is required for optimization of rmap operations for THP: we can postpone
694*4882a593Smuzhiyun  * per small page mapcount accounting (and its overhead from atomic operations)
695*4882a593Smuzhiyun  * until the first PMD split.
696*4882a593Smuzhiyun  *
697*4882a593Smuzhiyun  * For the page PageDoubleMap means ->_mapcount in all sub-pages is offset up
698*4882a593Smuzhiyun  * by one. This reference will go away with last compound_mapcount.
699*4882a593Smuzhiyun  *
700*4882a593Smuzhiyun  * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap().
701*4882a593Smuzhiyun  */
PAGEFLAG(DoubleMap,double_map,PF_SECOND)702*4882a593Smuzhiyun PAGEFLAG(DoubleMap, double_map, PF_SECOND)
703*4882a593Smuzhiyun 	TESTSCFLAG(DoubleMap, double_map, PF_SECOND)
704*4882a593Smuzhiyun #else
705*4882a593Smuzhiyun TESTPAGEFLAG_FALSE(TransHuge)
706*4882a593Smuzhiyun TESTPAGEFLAG_FALSE(TransCompound)
707*4882a593Smuzhiyun TESTPAGEFLAG_FALSE(TransCompoundMap)
708*4882a593Smuzhiyun TESTPAGEFLAG_FALSE(TransTail)
709*4882a593Smuzhiyun PAGEFLAG_FALSE(DoubleMap)
710*4882a593Smuzhiyun 	TESTSCFLAG_FALSE(DoubleMap)
711*4882a593Smuzhiyun #endif
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun /*
714*4882a593Smuzhiyun  * For pages that are never mapped to userspace (and aren't PageSlab),
715*4882a593Smuzhiyun  * page_type may be used.  Because it is initialised to -1, we invert the
716*4882a593Smuzhiyun  * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
717*4882a593Smuzhiyun  * __ClearPageFoo *sets* the bit used for PageFoo.  We reserve a few high and
718*4882a593Smuzhiyun  * low bits so that an underflow or overflow of page_mapcount() won't be
719*4882a593Smuzhiyun  * mistaken for a page type value.
720*4882a593Smuzhiyun  */
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun #define PAGE_TYPE_BASE	0xf0000000
723*4882a593Smuzhiyun /* Reserve		0x0000007f to catch underflows of page_mapcount */
724*4882a593Smuzhiyun #define PAGE_MAPCOUNT_RESERVE	-128
725*4882a593Smuzhiyun #define PG_buddy	0x00000080
726*4882a593Smuzhiyun #define PG_offline	0x00000100
727*4882a593Smuzhiyun #define PG_kmemcg	0x00000200
728*4882a593Smuzhiyun #define PG_table	0x00000400
729*4882a593Smuzhiyun #define PG_guard	0x00000800
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun #define PageType(page, flag)						\
732*4882a593Smuzhiyun 	((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun static inline int page_has_type(struct page *page)
735*4882a593Smuzhiyun {
736*4882a593Smuzhiyun 	return (int)page->page_type < PAGE_MAPCOUNT_RESERVE;
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun #define PAGE_TYPE_OPS(uname, lname)					\
740*4882a593Smuzhiyun static __always_inline int Page##uname(struct page *page)		\
741*4882a593Smuzhiyun {									\
742*4882a593Smuzhiyun 	return PageType(page, PG_##lname);				\
743*4882a593Smuzhiyun }									\
744*4882a593Smuzhiyun static __always_inline void __SetPage##uname(struct page *page)		\
745*4882a593Smuzhiyun {									\
746*4882a593Smuzhiyun 	VM_BUG_ON_PAGE(!PageType(page, 0), page);			\
747*4882a593Smuzhiyun 	page->page_type &= ~PG_##lname;					\
748*4882a593Smuzhiyun }									\
749*4882a593Smuzhiyun static __always_inline void __ClearPage##uname(struct page *page)	\
750*4882a593Smuzhiyun {									\
751*4882a593Smuzhiyun 	VM_BUG_ON_PAGE(!Page##uname(page), page);			\
752*4882a593Smuzhiyun 	page->page_type |= PG_##lname;					\
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun /*
756*4882a593Smuzhiyun  * PageBuddy() indicates that the page is free and in the buddy system
757*4882a593Smuzhiyun  * (see mm/page_alloc.c).
758*4882a593Smuzhiyun  */
759*4882a593Smuzhiyun PAGE_TYPE_OPS(Buddy, buddy)
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun /*
762*4882a593Smuzhiyun  * PageOffline() indicates that the page is logically offline although the
763*4882a593Smuzhiyun  * containing section is online. (e.g. inflated in a balloon driver or
764*4882a593Smuzhiyun  * not onlined when onlining the section).
765*4882a593Smuzhiyun  * The content of these pages is effectively stale. Such pages should not
766*4882a593Smuzhiyun  * be touched (read/write/dump/save) except by their owner.
767*4882a593Smuzhiyun  *
768*4882a593Smuzhiyun  * If a driver wants to allow to offline unmovable PageOffline() pages without
769*4882a593Smuzhiyun  * putting them back to the buddy, it can do so via the memory notifier by
770*4882a593Smuzhiyun  * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the
771*4882a593Smuzhiyun  * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline()
772*4882a593Smuzhiyun  * pages (now with a reference count of zero) are treated like free pages,
773*4882a593Smuzhiyun  * allowing the containing memory block to get offlined. A driver that
774*4882a593Smuzhiyun  * relies on this feature is aware that re-onlining the memory block will
775*4882a593Smuzhiyun  * require to re-set the pages PageOffline() and not giving them to the
776*4882a593Smuzhiyun  * buddy via online_page_callback_t.
777*4882a593Smuzhiyun  */
778*4882a593Smuzhiyun PAGE_TYPE_OPS(Offline, offline)
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun /*
781*4882a593Smuzhiyun  * If kmemcg is enabled, the buddy allocator will set PageKmemcg() on
782*4882a593Smuzhiyun  * pages allocated with __GFP_ACCOUNT. It gets cleared on page free.
783*4882a593Smuzhiyun  */
784*4882a593Smuzhiyun PAGE_TYPE_OPS(Kmemcg, kmemcg)
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun /*
787*4882a593Smuzhiyun  * Marks pages in use as page tables.
788*4882a593Smuzhiyun  */
789*4882a593Smuzhiyun PAGE_TYPE_OPS(Table, table)
790*4882a593Smuzhiyun 
791*4882a593Smuzhiyun /*
792*4882a593Smuzhiyun  * Marks guardpages used with debug_pagealloc.
793*4882a593Smuzhiyun  */
794*4882a593Smuzhiyun PAGE_TYPE_OPS(Guard, guard)
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun extern bool is_free_buddy_page(struct page *page);
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun PAGEFLAG(Isolated, isolated, PF_ANY);
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun /*
801*4882a593Smuzhiyun  * If network-based swap is enabled, sl*b must keep track of whether pages
802*4882a593Smuzhiyun  * were allocated from pfmemalloc reserves.
803*4882a593Smuzhiyun  */
PageSlabPfmemalloc(struct page * page)804*4882a593Smuzhiyun static inline int PageSlabPfmemalloc(struct page *page)
805*4882a593Smuzhiyun {
806*4882a593Smuzhiyun 	VM_BUG_ON_PAGE(!PageSlab(page), page);
807*4882a593Smuzhiyun 	return PageActive(page);
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun 
SetPageSlabPfmemalloc(struct page * page)810*4882a593Smuzhiyun static inline void SetPageSlabPfmemalloc(struct page *page)
811*4882a593Smuzhiyun {
812*4882a593Smuzhiyun 	VM_BUG_ON_PAGE(!PageSlab(page), page);
813*4882a593Smuzhiyun 	SetPageActive(page);
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun 
__ClearPageSlabPfmemalloc(struct page * page)816*4882a593Smuzhiyun static inline void __ClearPageSlabPfmemalloc(struct page *page)
817*4882a593Smuzhiyun {
818*4882a593Smuzhiyun 	VM_BUG_ON_PAGE(!PageSlab(page), page);
819*4882a593Smuzhiyun 	__ClearPageActive(page);
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun 
ClearPageSlabPfmemalloc(struct page * page)822*4882a593Smuzhiyun static inline void ClearPageSlabPfmemalloc(struct page *page)
823*4882a593Smuzhiyun {
824*4882a593Smuzhiyun 	VM_BUG_ON_PAGE(!PageSlab(page), page);
825*4882a593Smuzhiyun 	ClearPageActive(page);
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun #ifdef CONFIG_MMU
829*4882a593Smuzhiyun #define __PG_MLOCKED		(1UL << PG_mlocked)
830*4882a593Smuzhiyun #else
831*4882a593Smuzhiyun #define __PG_MLOCKED		0
832*4882a593Smuzhiyun #endif
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun /*
835*4882a593Smuzhiyun  * Flags checked when a page is freed.  Pages being freed should not have
836*4882a593Smuzhiyun  * these flags set.  It they are, there is a problem.
837*4882a593Smuzhiyun  */
838*4882a593Smuzhiyun #define PAGE_FLAGS_CHECK_AT_FREE				\
839*4882a593Smuzhiyun 	(1UL << PG_lru		| 1UL << PG_locked	|	\
840*4882a593Smuzhiyun 	 1UL << PG_private	| 1UL << PG_private_2	|	\
841*4882a593Smuzhiyun 	 1UL << PG_writeback	| 1UL << PG_reserved	|	\
842*4882a593Smuzhiyun 	 1UL << PG_slab		| 1UL << PG_active 	|	\
843*4882a593Smuzhiyun 	 1UL << PG_unevictable	| __PG_MLOCKED)
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun /*
846*4882a593Smuzhiyun  * Flags checked when a page is prepped for return by the page allocator.
847*4882a593Smuzhiyun  * Pages being prepped should not have these flags set.  It they are set,
848*4882a593Smuzhiyun  * there has been a kernel bug or struct page corruption.
849*4882a593Smuzhiyun  *
850*4882a593Smuzhiyun  * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
851*4882a593Smuzhiyun  * alloc-free cycle to prevent from reusing the page.
852*4882a593Smuzhiyun  */
853*4882a593Smuzhiyun #define PAGE_FLAGS_CHECK_AT_PREP	\
854*4882a593Smuzhiyun 	(((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun #define PAGE_FLAGS_PRIVATE				\
857*4882a593Smuzhiyun 	(1UL << PG_private | 1UL << PG_private_2)
858*4882a593Smuzhiyun /**
859*4882a593Smuzhiyun  * page_has_private - Determine if page has private stuff
860*4882a593Smuzhiyun  * @page: The page to be checked
861*4882a593Smuzhiyun  *
862*4882a593Smuzhiyun  * Determine if a page has private stuff, indicating that release routines
863*4882a593Smuzhiyun  * should be invoked upon it.
864*4882a593Smuzhiyun  */
page_has_private(struct page * page)865*4882a593Smuzhiyun static inline int page_has_private(struct page *page)
866*4882a593Smuzhiyun {
867*4882a593Smuzhiyun 	return !!(page->flags & PAGE_FLAGS_PRIVATE);
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun #undef PF_ANY
871*4882a593Smuzhiyun #undef PF_HEAD
872*4882a593Smuzhiyun #undef PF_ONLY_HEAD
873*4882a593Smuzhiyun #undef PF_NO_TAIL
874*4882a593Smuzhiyun #undef PF_NO_COMPOUND
875*4882a593Smuzhiyun #undef PF_SECOND
876*4882a593Smuzhiyun #endif /* !__GENERATING_BOUNDS_H */
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun #endif	/* PAGE_FLAGS_H */
879