1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <linux/debugfs.h>
3*4882a593Smuzhiyun #include <linux/mm.h>
4*4882a593Smuzhiyun #include <linux/slab.h>
5*4882a593Smuzhiyun #include <linux/uaccess.h>
6*4882a593Smuzhiyun #include <linux/memblock.h>
7*4882a593Smuzhiyun #include <linux/stacktrace.h>
8*4882a593Smuzhiyun #include <linux/page_owner.h>
9*4882a593Smuzhiyun #include <linux/jump_label.h>
10*4882a593Smuzhiyun #include <linux/migrate.h>
11*4882a593Smuzhiyun #include <linux/stackdepot.h>
12*4882a593Smuzhiyun #include <linux/seq_file.h>
13*4882a593Smuzhiyun #include <linux/sched/clock.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include "internal.h"
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun /*
18*4882a593Smuzhiyun * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack)
19*4882a593Smuzhiyun * to use off stack temporal storage
20*4882a593Smuzhiyun */
21*4882a593Smuzhiyun #define PAGE_OWNER_STACK_DEPTH (16)
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun struct page_owner {
24*4882a593Smuzhiyun unsigned short order;
25*4882a593Smuzhiyun short last_migrate_reason;
26*4882a593Smuzhiyun gfp_t gfp_mask;
27*4882a593Smuzhiyun depot_stack_handle_t handle;
28*4882a593Smuzhiyun depot_stack_handle_t free_handle;
29*4882a593Smuzhiyun u64 ts_nsec;
30*4882a593Smuzhiyun u64 free_ts_nsec;
31*4882a593Smuzhiyun pid_t pid;
32*4882a593Smuzhiyun };
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun bool page_owner_enabled;
35*4882a593Smuzhiyun DEFINE_STATIC_KEY_FALSE(page_owner_inited);
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun static depot_stack_handle_t dummy_handle;
38*4882a593Smuzhiyun static depot_stack_handle_t failure_handle;
39*4882a593Smuzhiyun static depot_stack_handle_t early_handle;
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun static void init_early_allocated_pages(void);
42*4882a593Smuzhiyun
early_page_owner_param(char * buf)43*4882a593Smuzhiyun static int __init early_page_owner_param(char *buf)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun if (!buf)
46*4882a593Smuzhiyun return -EINVAL;
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun if (strcmp(buf, "on") == 0)
49*4882a593Smuzhiyun page_owner_enabled = true;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun return 0;
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun early_param("page_owner", early_page_owner_param);
54*4882a593Smuzhiyun
need_page_owner(void)55*4882a593Smuzhiyun static bool need_page_owner(void)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun return page_owner_enabled;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun
create_dummy_stack(void)60*4882a593Smuzhiyun static __always_inline depot_stack_handle_t create_dummy_stack(void)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun unsigned long entries[4];
63*4882a593Smuzhiyun unsigned int nr_entries;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
66*4882a593Smuzhiyun return stack_depot_save(entries, nr_entries, GFP_KERNEL);
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
register_dummy_stack(void)69*4882a593Smuzhiyun static noinline void register_dummy_stack(void)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun dummy_handle = create_dummy_stack();
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
register_failure_stack(void)74*4882a593Smuzhiyun static noinline void register_failure_stack(void)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun failure_handle = create_dummy_stack();
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
register_early_stack(void)79*4882a593Smuzhiyun static noinline void register_early_stack(void)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun early_handle = create_dummy_stack();
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun
init_page_owner(void)84*4882a593Smuzhiyun static void init_page_owner(void)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun if (!page_owner_enabled)
87*4882a593Smuzhiyun return;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun register_dummy_stack();
90*4882a593Smuzhiyun register_failure_stack();
91*4882a593Smuzhiyun register_early_stack();
92*4882a593Smuzhiyun static_branch_enable(&page_owner_inited);
93*4882a593Smuzhiyun init_early_allocated_pages();
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun struct page_ext_operations page_owner_ops = {
97*4882a593Smuzhiyun .size = sizeof(struct page_owner),
98*4882a593Smuzhiyun .need = need_page_owner,
99*4882a593Smuzhiyun .init = init_page_owner,
100*4882a593Smuzhiyun };
101*4882a593Smuzhiyun
get_page_owner(struct page_ext * page_ext)102*4882a593Smuzhiyun struct page_owner *get_page_owner(struct page_ext *page_ext)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun return (void *)page_ext + page_owner_ops.offset;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(get_page_owner);
107*4882a593Smuzhiyun
get_page_owner_handle(struct page_ext * page_ext,unsigned long pfn)108*4882a593Smuzhiyun depot_stack_handle_t get_page_owner_handle(struct page_ext *page_ext, unsigned long pfn)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun struct page_owner *page_owner;
111*4882a593Smuzhiyun depot_stack_handle_t handle;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun if (!page_owner_enabled)
114*4882a593Smuzhiyun return 0;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun page_owner = get_page_owner(page_ext);
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun /* skip handle for tail pages of higher order allocations */
119*4882a593Smuzhiyun if (!IS_ALIGNED(pfn, 1 << page_owner->order))
120*4882a593Smuzhiyun return 0;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun handle = READ_ONCE(page_owner->handle);
123*4882a593Smuzhiyun return handle;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(get_page_owner_handle);
126*4882a593Smuzhiyun
check_recursive_alloc(unsigned long * entries,unsigned int nr_entries,unsigned long ip)127*4882a593Smuzhiyun static inline bool check_recursive_alloc(unsigned long *entries,
128*4882a593Smuzhiyun unsigned int nr_entries,
129*4882a593Smuzhiyun unsigned long ip)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun unsigned int i;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun for (i = 0; i < nr_entries; i++) {
134*4882a593Smuzhiyun if (entries[i] == ip)
135*4882a593Smuzhiyun return true;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun return false;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
save_stack(gfp_t flags)140*4882a593Smuzhiyun static noinline depot_stack_handle_t save_stack(gfp_t flags)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun unsigned long entries[PAGE_OWNER_STACK_DEPTH];
143*4882a593Smuzhiyun depot_stack_handle_t handle;
144*4882a593Smuzhiyun unsigned int nr_entries;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun /*
149*4882a593Smuzhiyun * We need to check recursion here because our request to
150*4882a593Smuzhiyun * stackdepot could trigger memory allocation to save new
151*4882a593Smuzhiyun * entry. New memory allocation would reach here and call
152*4882a593Smuzhiyun * stack_depot_save_entries() again if we don't catch it. There is
153*4882a593Smuzhiyun * still not enough memory in stackdepot so it would try to
154*4882a593Smuzhiyun * allocate memory again and loop forever.
155*4882a593Smuzhiyun */
156*4882a593Smuzhiyun if (check_recursive_alloc(entries, nr_entries, _RET_IP_))
157*4882a593Smuzhiyun return dummy_handle;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun handle = stack_depot_save(entries, nr_entries, flags);
160*4882a593Smuzhiyun if (!handle)
161*4882a593Smuzhiyun handle = failure_handle;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun return handle;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
__reset_page_owner(struct page * page,unsigned int order)166*4882a593Smuzhiyun void __reset_page_owner(struct page *page, unsigned int order)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun int i;
169*4882a593Smuzhiyun struct page_ext *page_ext;
170*4882a593Smuzhiyun depot_stack_handle_t handle = 0;
171*4882a593Smuzhiyun struct page_owner *page_owner;
172*4882a593Smuzhiyun u64 free_ts_nsec = local_clock();
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun page_ext = page_ext_get(page);
177*4882a593Smuzhiyun if (unlikely(!page_ext))
178*4882a593Smuzhiyun return;
179*4882a593Smuzhiyun for (i = 0; i < (1 << order); i++) {
180*4882a593Smuzhiyun __clear_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
181*4882a593Smuzhiyun page_owner = get_page_owner(page_ext);
182*4882a593Smuzhiyun page_owner->free_handle = handle;
183*4882a593Smuzhiyun page_owner->free_ts_nsec = free_ts_nsec;
184*4882a593Smuzhiyun page_ext = page_ext_next(page_ext);
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun page_ext_put(page_ext);
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
__set_page_owner_handle(struct page * page,struct page_ext * page_ext,depot_stack_handle_t handle,unsigned int order,gfp_t gfp_mask)189*4882a593Smuzhiyun static inline void __set_page_owner_handle(struct page *page,
190*4882a593Smuzhiyun struct page_ext *page_ext, depot_stack_handle_t handle,
191*4882a593Smuzhiyun unsigned int order, gfp_t gfp_mask)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun struct page_owner *page_owner;
194*4882a593Smuzhiyun int i;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun for (i = 0; i < (1 << order); i++) {
197*4882a593Smuzhiyun page_owner = get_page_owner(page_ext);
198*4882a593Smuzhiyun page_owner->handle = handle;
199*4882a593Smuzhiyun page_owner->order = order;
200*4882a593Smuzhiyun page_owner->gfp_mask = gfp_mask;
201*4882a593Smuzhiyun page_owner->last_migrate_reason = -1;
202*4882a593Smuzhiyun page_owner->pid = current->pid;
203*4882a593Smuzhiyun page_owner->ts_nsec = local_clock();
204*4882a593Smuzhiyun __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
205*4882a593Smuzhiyun __set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun page_ext = page_ext_next(page_ext);
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
__set_page_owner(struct page * page,unsigned int order,gfp_t gfp_mask)211*4882a593Smuzhiyun noinline void __set_page_owner(struct page *page, unsigned int order,
212*4882a593Smuzhiyun gfp_t gfp_mask)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun struct page_ext *page_ext;
215*4882a593Smuzhiyun depot_stack_handle_t handle;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun handle = save_stack(gfp_mask);
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun page_ext = page_ext_get(page);
220*4882a593Smuzhiyun if (unlikely(!page_ext))
221*4882a593Smuzhiyun return;
222*4882a593Smuzhiyun __set_page_owner_handle(page, page_ext, handle, order, gfp_mask);
223*4882a593Smuzhiyun page_ext_put(page_ext);
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
__set_page_owner_migrate_reason(struct page * page,int reason)226*4882a593Smuzhiyun void __set_page_owner_migrate_reason(struct page *page, int reason)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun struct page_ext *page_ext = page_ext_get(page);
229*4882a593Smuzhiyun struct page_owner *page_owner;
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun if (unlikely(!page_ext))
232*4882a593Smuzhiyun return;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun page_owner = get_page_owner(page_ext);
235*4882a593Smuzhiyun page_owner->last_migrate_reason = reason;
236*4882a593Smuzhiyun page_ext_put(page_ext);
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
__split_page_owner(struct page * page,unsigned int nr)239*4882a593Smuzhiyun void __split_page_owner(struct page *page, unsigned int nr)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun int i;
242*4882a593Smuzhiyun struct page_ext *page_ext = page_ext_get(page);
243*4882a593Smuzhiyun struct page_owner *page_owner;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun if (unlikely(!page_ext))
246*4882a593Smuzhiyun return;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun for (i = 0; i < nr; i++) {
249*4882a593Smuzhiyun page_owner = get_page_owner(page_ext);
250*4882a593Smuzhiyun page_owner->order = 0;
251*4882a593Smuzhiyun page_ext = page_ext_next(page_ext);
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun page_ext_put(page_ext);
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
__copy_page_owner(struct page * oldpage,struct page * newpage)256*4882a593Smuzhiyun void __copy_page_owner(struct page *oldpage, struct page *newpage)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun struct page_ext *old_ext;
259*4882a593Smuzhiyun struct page_ext *new_ext;
260*4882a593Smuzhiyun struct page_owner *old_page_owner, *new_page_owner;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun old_ext = page_ext_get(oldpage);
263*4882a593Smuzhiyun if (unlikely(!old_ext))
264*4882a593Smuzhiyun return;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun new_ext = page_ext_get(newpage);
267*4882a593Smuzhiyun if (unlikely(!new_ext)) {
268*4882a593Smuzhiyun page_ext_put(old_ext);
269*4882a593Smuzhiyun return;
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun old_page_owner = get_page_owner(old_ext);
273*4882a593Smuzhiyun new_page_owner = get_page_owner(new_ext);
274*4882a593Smuzhiyun new_page_owner->order = old_page_owner->order;
275*4882a593Smuzhiyun new_page_owner->gfp_mask = old_page_owner->gfp_mask;
276*4882a593Smuzhiyun new_page_owner->last_migrate_reason =
277*4882a593Smuzhiyun old_page_owner->last_migrate_reason;
278*4882a593Smuzhiyun new_page_owner->handle = old_page_owner->handle;
279*4882a593Smuzhiyun new_page_owner->pid = old_page_owner->pid;
280*4882a593Smuzhiyun new_page_owner->ts_nsec = old_page_owner->ts_nsec;
281*4882a593Smuzhiyun new_page_owner->free_ts_nsec = old_page_owner->ts_nsec;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun /*
284*4882a593Smuzhiyun * We don't clear the bit on the oldpage as it's going to be freed
285*4882a593Smuzhiyun * after migration. Until then, the info can be useful in case of
286*4882a593Smuzhiyun * a bug, and the overal stats will be off a bit only temporarily.
287*4882a593Smuzhiyun * Also, migrate_misplaced_transhuge_page() can still fail the
288*4882a593Smuzhiyun * migration and then we want the oldpage to retain the info. But
289*4882a593Smuzhiyun * in that case we also don't need to explicitly clear the info from
290*4882a593Smuzhiyun * the new page, which will be freed.
291*4882a593Smuzhiyun */
292*4882a593Smuzhiyun __set_bit(PAGE_EXT_OWNER, &new_ext->flags);
293*4882a593Smuzhiyun __set_bit(PAGE_EXT_OWNER_ALLOCATED, &new_ext->flags);
294*4882a593Smuzhiyun page_ext_put(new_ext);
295*4882a593Smuzhiyun page_ext_put(old_ext);
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun
pagetypeinfo_showmixedcount_print(struct seq_file * m,pg_data_t * pgdat,struct zone * zone)298*4882a593Smuzhiyun void pagetypeinfo_showmixedcount_print(struct seq_file *m,
299*4882a593Smuzhiyun pg_data_t *pgdat, struct zone *zone)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun struct page *page;
302*4882a593Smuzhiyun struct page_ext *page_ext;
303*4882a593Smuzhiyun struct page_owner *page_owner;
304*4882a593Smuzhiyun unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
305*4882a593Smuzhiyun unsigned long end_pfn = pfn + zone->spanned_pages;
306*4882a593Smuzhiyun unsigned long count[MIGRATE_TYPES] = { 0, };
307*4882a593Smuzhiyun int pageblock_mt, page_mt;
308*4882a593Smuzhiyun int i;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun /* Scan block by block. First and last block may be incomplete */
311*4882a593Smuzhiyun pfn = zone->zone_start_pfn;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun /*
314*4882a593Smuzhiyun * Walk the zone in pageblock_nr_pages steps. If a page block spans
315*4882a593Smuzhiyun * a zone boundary, it will be double counted between zones. This does
316*4882a593Smuzhiyun * not matter as the mixed block count will still be correct
317*4882a593Smuzhiyun */
318*4882a593Smuzhiyun for (; pfn < end_pfn; ) {
319*4882a593Smuzhiyun page = pfn_to_online_page(pfn);
320*4882a593Smuzhiyun if (!page) {
321*4882a593Smuzhiyun pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
322*4882a593Smuzhiyun continue;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
326*4882a593Smuzhiyun block_end_pfn = min(block_end_pfn, end_pfn);
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun pageblock_mt = get_pageblock_migratetype(page);
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun for (; pfn < block_end_pfn; pfn++) {
331*4882a593Smuzhiyun if (!pfn_valid_within(pfn))
332*4882a593Smuzhiyun continue;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun /* The pageblock is online, no need to recheck. */
335*4882a593Smuzhiyun page = pfn_to_page(pfn);
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun if (page_zone(page) != zone)
338*4882a593Smuzhiyun continue;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun if (PageBuddy(page)) {
341*4882a593Smuzhiyun unsigned long freepage_order;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun freepage_order = buddy_order_unsafe(page);
344*4882a593Smuzhiyun if (freepage_order < MAX_ORDER)
345*4882a593Smuzhiyun pfn += (1UL << freepage_order) - 1;
346*4882a593Smuzhiyun continue;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun if (PageReserved(page))
350*4882a593Smuzhiyun continue;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun page_ext = page_ext_get(page);
353*4882a593Smuzhiyun if (unlikely(!page_ext))
354*4882a593Smuzhiyun continue;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
357*4882a593Smuzhiyun goto ext_put_continue;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun page_owner = get_page_owner(page_ext);
360*4882a593Smuzhiyun page_mt = gfp_migratetype(page_owner->gfp_mask);
361*4882a593Smuzhiyun if (pageblock_mt != page_mt) {
362*4882a593Smuzhiyun if (is_migrate_cma(pageblock_mt))
363*4882a593Smuzhiyun count[MIGRATE_MOVABLE]++;
364*4882a593Smuzhiyun else
365*4882a593Smuzhiyun count[pageblock_mt]++;
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun pfn = block_end_pfn;
368*4882a593Smuzhiyun page_ext_put(page_ext);
369*4882a593Smuzhiyun break;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun pfn += (1UL << page_owner->order) - 1;
372*4882a593Smuzhiyun ext_put_continue:
373*4882a593Smuzhiyun page_ext_put(page_ext);
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun /* Print counts */
378*4882a593Smuzhiyun seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
379*4882a593Smuzhiyun for (i = 0; i < MIGRATE_TYPES; i++)
380*4882a593Smuzhiyun seq_printf(m, "%12lu ", count[i]);
381*4882a593Smuzhiyun seq_putc(m, '\n');
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun static ssize_t
print_page_owner(char __user * buf,size_t count,unsigned long pfn,struct page * page,struct page_owner * page_owner,depot_stack_handle_t handle)385*4882a593Smuzhiyun print_page_owner(char __user *buf, size_t count, unsigned long pfn,
386*4882a593Smuzhiyun struct page *page, struct page_owner *page_owner,
387*4882a593Smuzhiyun depot_stack_handle_t handle)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun int ret, pageblock_mt, page_mt;
390*4882a593Smuzhiyun unsigned long *entries;
391*4882a593Smuzhiyun unsigned int nr_entries;
392*4882a593Smuzhiyun char *kbuf;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun count = min_t(size_t, count, PAGE_SIZE);
395*4882a593Smuzhiyun kbuf = kmalloc(count, GFP_KERNEL);
396*4882a593Smuzhiyun if (!kbuf)
397*4882a593Smuzhiyun return -ENOMEM;
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun ret = snprintf(kbuf, count,
400*4882a593Smuzhiyun "Page allocated via order %u, mask %#x(%pGg), pid %d, ts %llu ns, free_ts %llu ns\n",
401*4882a593Smuzhiyun page_owner->order, page_owner->gfp_mask,
402*4882a593Smuzhiyun &page_owner->gfp_mask, page_owner->pid,
403*4882a593Smuzhiyun page_owner->ts_nsec, page_owner->free_ts_nsec);
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun if (ret >= count)
406*4882a593Smuzhiyun goto err;
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun /* Print information relevant to grouping pages by mobility */
409*4882a593Smuzhiyun pageblock_mt = get_pageblock_migratetype(page);
410*4882a593Smuzhiyun page_mt = gfp_migratetype(page_owner->gfp_mask);
411*4882a593Smuzhiyun ret += snprintf(kbuf + ret, count - ret,
412*4882a593Smuzhiyun "PFN %lu type %s Block %lu type %s Flags %#lx(%pGp)\n",
413*4882a593Smuzhiyun pfn,
414*4882a593Smuzhiyun migratetype_names[page_mt],
415*4882a593Smuzhiyun pfn >> pageblock_order,
416*4882a593Smuzhiyun migratetype_names[pageblock_mt],
417*4882a593Smuzhiyun page->flags, &page->flags);
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun if (ret >= count)
420*4882a593Smuzhiyun goto err;
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun nr_entries = stack_depot_fetch(handle, &entries);
423*4882a593Smuzhiyun ret += stack_trace_snprint(kbuf + ret, count - ret, entries, nr_entries, 0);
424*4882a593Smuzhiyun if (ret >= count)
425*4882a593Smuzhiyun goto err;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun if (page_owner->last_migrate_reason != -1) {
428*4882a593Smuzhiyun ret += snprintf(kbuf + ret, count - ret,
429*4882a593Smuzhiyun "Page has been migrated, last migrate reason: %s\n",
430*4882a593Smuzhiyun migrate_reason_names[page_owner->last_migrate_reason]);
431*4882a593Smuzhiyun if (ret >= count)
432*4882a593Smuzhiyun goto err;
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun ret += snprintf(kbuf + ret, count - ret, "\n");
436*4882a593Smuzhiyun if (ret >= count)
437*4882a593Smuzhiyun goto err;
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun if (copy_to_user(buf, kbuf, ret))
440*4882a593Smuzhiyun ret = -EFAULT;
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun kfree(kbuf);
443*4882a593Smuzhiyun return ret;
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun err:
446*4882a593Smuzhiyun kfree(kbuf);
447*4882a593Smuzhiyun return -ENOMEM;
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun
__dump_page_owner(struct page * page)450*4882a593Smuzhiyun void __dump_page_owner(struct page *page)
451*4882a593Smuzhiyun {
452*4882a593Smuzhiyun struct page_ext *page_ext = page_ext_get((void *)page);
453*4882a593Smuzhiyun struct page_owner *page_owner;
454*4882a593Smuzhiyun depot_stack_handle_t handle;
455*4882a593Smuzhiyun unsigned long *entries;
456*4882a593Smuzhiyun unsigned int nr_entries;
457*4882a593Smuzhiyun gfp_t gfp_mask;
458*4882a593Smuzhiyun int mt;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun if (unlikely(!page_ext)) {
461*4882a593Smuzhiyun pr_alert("There is not page extension available.\n");
462*4882a593Smuzhiyun return;
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun page_owner = get_page_owner(page_ext);
466*4882a593Smuzhiyun gfp_mask = page_owner->gfp_mask;
467*4882a593Smuzhiyun mt = gfp_migratetype(gfp_mask);
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
470*4882a593Smuzhiyun pr_alert("page_owner info is not present (never set?)\n");
471*4882a593Smuzhiyun page_ext_put(page_ext);
472*4882a593Smuzhiyun return;
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun if (test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
476*4882a593Smuzhiyun pr_alert("page_owner tracks the page as allocated\n");
477*4882a593Smuzhiyun else
478*4882a593Smuzhiyun pr_alert("page_owner tracks the page as freed\n");
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun pr_alert("page last allocated via order %u, migratetype %s, gfp_mask %#x(%pGg), pid %d, ts %llu, free_ts %llu\n",
481*4882a593Smuzhiyun page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask,
482*4882a593Smuzhiyun page_owner->pid, page_owner->ts_nsec, page_owner->free_ts_nsec);
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun handle = READ_ONCE(page_owner->handle);
485*4882a593Smuzhiyun if (!handle) {
486*4882a593Smuzhiyun pr_alert("page_owner allocation stack trace missing\n");
487*4882a593Smuzhiyun } else {
488*4882a593Smuzhiyun nr_entries = stack_depot_fetch(handle, &entries);
489*4882a593Smuzhiyun stack_trace_print(entries, nr_entries, 0);
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun handle = READ_ONCE(page_owner->free_handle);
493*4882a593Smuzhiyun if (!handle) {
494*4882a593Smuzhiyun pr_alert("page_owner free stack trace missing\n");
495*4882a593Smuzhiyun } else {
496*4882a593Smuzhiyun nr_entries = stack_depot_fetch(handle, &entries);
497*4882a593Smuzhiyun pr_alert("page last free stack trace:\n");
498*4882a593Smuzhiyun stack_trace_print(entries, nr_entries, 0);
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun if (page_owner->last_migrate_reason != -1)
502*4882a593Smuzhiyun pr_alert("page has been migrated, last migrate reason: %s\n",
503*4882a593Smuzhiyun migrate_reason_names[page_owner->last_migrate_reason]);
504*4882a593Smuzhiyun page_ext_put(page_ext);
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun static ssize_t
read_page_owner(struct file * file,char __user * buf,size_t count,loff_t * ppos)508*4882a593Smuzhiyun read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
509*4882a593Smuzhiyun {
510*4882a593Smuzhiyun unsigned long pfn;
511*4882a593Smuzhiyun struct page *page;
512*4882a593Smuzhiyun struct page_ext *page_ext;
513*4882a593Smuzhiyun struct page_owner *page_owner;
514*4882a593Smuzhiyun depot_stack_handle_t handle;
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun if (!static_branch_unlikely(&page_owner_inited))
517*4882a593Smuzhiyun return -EINVAL;
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun page = NULL;
520*4882a593Smuzhiyun pfn = min_low_pfn + *ppos;
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
523*4882a593Smuzhiyun while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
524*4882a593Smuzhiyun pfn++;
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun drain_all_pages(NULL);
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun /* Find an allocated page */
529*4882a593Smuzhiyun for (; pfn < max_pfn; pfn++) {
530*4882a593Smuzhiyun /*
531*4882a593Smuzhiyun * This temporary page_owner is required so
532*4882a593Smuzhiyun * that we can avoid the context switches while holding
533*4882a593Smuzhiyun * the rcu lock and copying the page owner information to
534*4882a593Smuzhiyun * user through copy_to_user() or GFP_KERNEL allocations.
535*4882a593Smuzhiyun */
536*4882a593Smuzhiyun struct page_owner page_owner_tmp;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun /*
539*4882a593Smuzhiyun * If the new page is in a new MAX_ORDER_NR_PAGES area,
540*4882a593Smuzhiyun * validate the area as existing, skip it if not
541*4882a593Smuzhiyun */
542*4882a593Smuzhiyun if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
543*4882a593Smuzhiyun pfn += MAX_ORDER_NR_PAGES - 1;
544*4882a593Smuzhiyun continue;
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun /* Check for holes within a MAX_ORDER area */
548*4882a593Smuzhiyun if (!pfn_valid_within(pfn))
549*4882a593Smuzhiyun continue;
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun page = pfn_to_page(pfn);
552*4882a593Smuzhiyun if (PageBuddy(page)) {
553*4882a593Smuzhiyun unsigned long freepage_order = buddy_order_unsafe(page);
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun if (freepage_order < MAX_ORDER)
556*4882a593Smuzhiyun pfn += (1UL << freepage_order) - 1;
557*4882a593Smuzhiyun continue;
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun page_ext = page_ext_get(page);
561*4882a593Smuzhiyun if (unlikely(!page_ext))
562*4882a593Smuzhiyun continue;
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun /*
565*4882a593Smuzhiyun * Some pages could be missed by concurrent allocation or free,
566*4882a593Smuzhiyun * because we don't hold the zone lock.
567*4882a593Smuzhiyun */
568*4882a593Smuzhiyun if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
569*4882a593Smuzhiyun goto ext_put_continue;
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun /*
572*4882a593Smuzhiyun * Although we do have the info about past allocation of free
573*4882a593Smuzhiyun * pages, it's not relevant for current memory usage.
574*4882a593Smuzhiyun */
575*4882a593Smuzhiyun if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
576*4882a593Smuzhiyun goto ext_put_continue;
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun page_owner = get_page_owner(page_ext);
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun /*
581*4882a593Smuzhiyun * Don't print "tail" pages of high-order allocations as that
582*4882a593Smuzhiyun * would inflate the stats.
583*4882a593Smuzhiyun */
584*4882a593Smuzhiyun if (!IS_ALIGNED(pfn, 1 << page_owner->order))
585*4882a593Smuzhiyun goto ext_put_continue;
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun /*
588*4882a593Smuzhiyun * Access to page_ext->handle isn't synchronous so we should
589*4882a593Smuzhiyun * be careful to access it.
590*4882a593Smuzhiyun */
591*4882a593Smuzhiyun handle = READ_ONCE(page_owner->handle);
592*4882a593Smuzhiyun if (!handle)
593*4882a593Smuzhiyun goto ext_put_continue;
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun /* Record the next PFN to read in the file offset */
596*4882a593Smuzhiyun *ppos = (pfn - min_low_pfn) + 1;
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun page_owner_tmp = *page_owner;
599*4882a593Smuzhiyun page_ext_put(page_ext);
600*4882a593Smuzhiyun return print_page_owner(buf, count, pfn, page,
601*4882a593Smuzhiyun &page_owner_tmp, handle);
602*4882a593Smuzhiyun ext_put_continue:
603*4882a593Smuzhiyun page_ext_put(page_ext);
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun return 0;
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun
init_pages_in_zone(pg_data_t * pgdat,struct zone * zone)609*4882a593Smuzhiyun static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
610*4882a593Smuzhiyun {
611*4882a593Smuzhiyun unsigned long pfn = zone->zone_start_pfn;
612*4882a593Smuzhiyun unsigned long end_pfn = zone_end_pfn(zone);
613*4882a593Smuzhiyun unsigned long count = 0;
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun /*
616*4882a593Smuzhiyun * Walk the zone in pageblock_nr_pages steps. If a page block spans
617*4882a593Smuzhiyun * a zone boundary, it will be double counted between zones. This does
618*4882a593Smuzhiyun * not matter as the mixed block count will still be correct
619*4882a593Smuzhiyun */
620*4882a593Smuzhiyun for (; pfn < end_pfn; ) {
621*4882a593Smuzhiyun unsigned long block_end_pfn;
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun if (!pfn_valid(pfn)) {
624*4882a593Smuzhiyun pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
625*4882a593Smuzhiyun continue;
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
629*4882a593Smuzhiyun block_end_pfn = min(block_end_pfn, end_pfn);
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun for (; pfn < block_end_pfn; pfn++) {
632*4882a593Smuzhiyun struct page *page;
633*4882a593Smuzhiyun struct page_ext *page_ext;
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun if (!pfn_valid_within(pfn))
636*4882a593Smuzhiyun continue;
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun page = pfn_to_page(pfn);
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun if (page_zone(page) != zone)
641*4882a593Smuzhiyun continue;
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun /*
644*4882a593Smuzhiyun * To avoid having to grab zone->lock, be a little
645*4882a593Smuzhiyun * careful when reading buddy page order. The only
646*4882a593Smuzhiyun * danger is that we skip too much and potentially miss
647*4882a593Smuzhiyun * some early allocated pages, which is better than
648*4882a593Smuzhiyun * heavy lock contention.
649*4882a593Smuzhiyun */
650*4882a593Smuzhiyun if (PageBuddy(page)) {
651*4882a593Smuzhiyun unsigned long order = buddy_order_unsafe(page);
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun if (order > 0 && order < MAX_ORDER)
654*4882a593Smuzhiyun pfn += (1UL << order) - 1;
655*4882a593Smuzhiyun continue;
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun if (PageReserved(page))
659*4882a593Smuzhiyun continue;
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun page_ext = page_ext_get(page);
662*4882a593Smuzhiyun if (unlikely(!page_ext))
663*4882a593Smuzhiyun continue;
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun /* Maybe overlapping zone */
666*4882a593Smuzhiyun if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
667*4882a593Smuzhiyun goto ext_put_continue;
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun /* Found early allocated page */
670*4882a593Smuzhiyun __set_page_owner_handle(page, page_ext, early_handle,
671*4882a593Smuzhiyun 0, 0);
672*4882a593Smuzhiyun count++;
673*4882a593Smuzhiyun ext_put_continue:
674*4882a593Smuzhiyun page_ext_put(page_ext);
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun cond_resched();
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
680*4882a593Smuzhiyun pgdat->node_id, zone->name, count);
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun
init_zones_in_node(pg_data_t * pgdat)683*4882a593Smuzhiyun static void init_zones_in_node(pg_data_t *pgdat)
684*4882a593Smuzhiyun {
685*4882a593Smuzhiyun struct zone *zone;
686*4882a593Smuzhiyun struct zone *node_zones = pgdat->node_zones;
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
689*4882a593Smuzhiyun if (!populated_zone(zone))
690*4882a593Smuzhiyun continue;
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun init_pages_in_zone(pgdat, zone);
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun
init_early_allocated_pages(void)696*4882a593Smuzhiyun static void init_early_allocated_pages(void)
697*4882a593Smuzhiyun {
698*4882a593Smuzhiyun pg_data_t *pgdat;
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun for_each_online_pgdat(pgdat)
701*4882a593Smuzhiyun init_zones_in_node(pgdat);
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun static const struct file_operations proc_page_owner_operations = {
705*4882a593Smuzhiyun .read = read_page_owner,
706*4882a593Smuzhiyun };
707*4882a593Smuzhiyun
pageowner_init(void)708*4882a593Smuzhiyun static int __init pageowner_init(void)
709*4882a593Smuzhiyun {
710*4882a593Smuzhiyun if (!static_branch_unlikely(&page_owner_inited)) {
711*4882a593Smuzhiyun pr_info("page_owner is disabled\n");
712*4882a593Smuzhiyun return 0;
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun debugfs_create_file("page_owner", 0400, NULL, NULL,
716*4882a593Smuzhiyun &proc_page_owner_operations);
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun return 0;
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun late_initcall(pageowner_init)
721