xref: /OK3568_Linux_fs/kernel/arch/arm/mm/dump.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Debug helper to dump the current kernel pagetables of the system
4*4882a593Smuzhiyun  * so that we can see what the various memory ranges are set to.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Derived from x86 implementation:
7*4882a593Smuzhiyun  * (C) Copyright 2008 Intel Corporation
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * Author: Arjan van de Ven <arjan@linux.intel.com>
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun #include <linux/debugfs.h>
12*4882a593Smuzhiyun #include <linux/fs.h>
13*4882a593Smuzhiyun #include <linux/mm.h>
14*4882a593Smuzhiyun #include <linux/seq_file.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include <asm/domain.h>
17*4882a593Smuzhiyun #include <asm/fixmap.h>
18*4882a593Smuzhiyun #include <asm/memory.h>
19*4882a593Smuzhiyun #include <asm/ptdump.h>
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun static struct addr_marker address_markers[] = {
22*4882a593Smuzhiyun 	{ MODULES_VADDR,	"Modules" },
23*4882a593Smuzhiyun 	{ PAGE_OFFSET,		"Kernel Mapping" },
24*4882a593Smuzhiyun 	{ 0,			"vmalloc() Area" },
25*4882a593Smuzhiyun 	{ VMALLOC_END,		"vmalloc() End" },
26*4882a593Smuzhiyun 	{ FIXADDR_START,	"Fixmap Area" },
27*4882a593Smuzhiyun 	{ VECTORS_BASE,	"Vectors" },
28*4882a593Smuzhiyun 	{ VECTORS_BASE + PAGE_SIZE * 2, "Vectors End" },
29*4882a593Smuzhiyun 	{ -1,			NULL },
30*4882a593Smuzhiyun };
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #define pt_dump_seq_printf(m, fmt, args...) \
33*4882a593Smuzhiyun ({                      \
34*4882a593Smuzhiyun 	if (m)					\
35*4882a593Smuzhiyun 		seq_printf(m, fmt, ##args);	\
36*4882a593Smuzhiyun })
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun #define pt_dump_seq_puts(m, fmt)    \
39*4882a593Smuzhiyun ({						\
40*4882a593Smuzhiyun 	if (m)					\
41*4882a593Smuzhiyun 		seq_printf(m, fmt);	\
42*4882a593Smuzhiyun })
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun struct pg_state {
45*4882a593Smuzhiyun 	struct seq_file *seq;
46*4882a593Smuzhiyun 	const struct addr_marker *marker;
47*4882a593Smuzhiyun 	unsigned long start_address;
48*4882a593Smuzhiyun 	unsigned level;
49*4882a593Smuzhiyun 	u64 current_prot;
50*4882a593Smuzhiyun 	bool check_wx;
51*4882a593Smuzhiyun 	unsigned long wx_pages;
52*4882a593Smuzhiyun 	const char *current_domain;
53*4882a593Smuzhiyun };
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun struct prot_bits {
56*4882a593Smuzhiyun 	u64		mask;
57*4882a593Smuzhiyun 	u64		val;
58*4882a593Smuzhiyun 	const char	*set;
59*4882a593Smuzhiyun 	const char	*clear;
60*4882a593Smuzhiyun 	bool		ro_bit;
61*4882a593Smuzhiyun 	bool		nx_bit;
62*4882a593Smuzhiyun };
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun static const struct prot_bits pte_bits[] = {
65*4882a593Smuzhiyun 	{
66*4882a593Smuzhiyun 		.mask	= L_PTE_USER,
67*4882a593Smuzhiyun 		.val	= L_PTE_USER,
68*4882a593Smuzhiyun 		.set	= "USR",
69*4882a593Smuzhiyun 		.clear	= "   ",
70*4882a593Smuzhiyun 	}, {
71*4882a593Smuzhiyun 		.mask	= L_PTE_RDONLY,
72*4882a593Smuzhiyun 		.val	= L_PTE_RDONLY,
73*4882a593Smuzhiyun 		.set	= "ro",
74*4882a593Smuzhiyun 		.clear	= "RW",
75*4882a593Smuzhiyun 		.ro_bit	= true,
76*4882a593Smuzhiyun 	}, {
77*4882a593Smuzhiyun 		.mask	= L_PTE_XN,
78*4882a593Smuzhiyun 		.val	= L_PTE_XN,
79*4882a593Smuzhiyun 		.set	= "NX",
80*4882a593Smuzhiyun 		.clear	= "x ",
81*4882a593Smuzhiyun 		.nx_bit	= true,
82*4882a593Smuzhiyun 	}, {
83*4882a593Smuzhiyun 		.mask	= L_PTE_SHARED,
84*4882a593Smuzhiyun 		.val	= L_PTE_SHARED,
85*4882a593Smuzhiyun 		.set	= "SHD",
86*4882a593Smuzhiyun 		.clear	= "   ",
87*4882a593Smuzhiyun 	}, {
88*4882a593Smuzhiyun 		.mask	= L_PTE_MT_MASK,
89*4882a593Smuzhiyun 		.val	= L_PTE_MT_UNCACHED,
90*4882a593Smuzhiyun 		.set	= "SO/UNCACHED",
91*4882a593Smuzhiyun 	}, {
92*4882a593Smuzhiyun 		.mask	= L_PTE_MT_MASK,
93*4882a593Smuzhiyun 		.val	= L_PTE_MT_BUFFERABLE,
94*4882a593Smuzhiyun 		.set	= "MEM/BUFFERABLE/WC",
95*4882a593Smuzhiyun 	}, {
96*4882a593Smuzhiyun 		.mask	= L_PTE_MT_MASK,
97*4882a593Smuzhiyun 		.val	= L_PTE_MT_WRITETHROUGH,
98*4882a593Smuzhiyun 		.set	= "MEM/CACHED/WT",
99*4882a593Smuzhiyun 	}, {
100*4882a593Smuzhiyun 		.mask	= L_PTE_MT_MASK,
101*4882a593Smuzhiyun 		.val	= L_PTE_MT_WRITEBACK,
102*4882a593Smuzhiyun 		.set	= "MEM/CACHED/WBRA",
103*4882a593Smuzhiyun #ifndef CONFIG_ARM_LPAE
104*4882a593Smuzhiyun 	}, {
105*4882a593Smuzhiyun 		.mask	= L_PTE_MT_MASK,
106*4882a593Smuzhiyun 		.val	= L_PTE_MT_MINICACHE,
107*4882a593Smuzhiyun 		.set	= "MEM/MINICACHE",
108*4882a593Smuzhiyun #endif
109*4882a593Smuzhiyun 	}, {
110*4882a593Smuzhiyun 		.mask	= L_PTE_MT_MASK,
111*4882a593Smuzhiyun 		.val	= L_PTE_MT_WRITEALLOC,
112*4882a593Smuzhiyun 		.set	= "MEM/CACHED/WBWA",
113*4882a593Smuzhiyun 	}, {
114*4882a593Smuzhiyun 		.mask	= L_PTE_MT_MASK,
115*4882a593Smuzhiyun 		.val	= L_PTE_MT_DEV_SHARED,
116*4882a593Smuzhiyun 		.set	= "DEV/SHARED",
117*4882a593Smuzhiyun #ifndef CONFIG_ARM_LPAE
118*4882a593Smuzhiyun 	}, {
119*4882a593Smuzhiyun 		.mask	= L_PTE_MT_MASK,
120*4882a593Smuzhiyun 		.val	= L_PTE_MT_DEV_NONSHARED,
121*4882a593Smuzhiyun 		.set	= "DEV/NONSHARED",
122*4882a593Smuzhiyun #endif
123*4882a593Smuzhiyun 	}, {
124*4882a593Smuzhiyun 		.mask	= L_PTE_MT_MASK,
125*4882a593Smuzhiyun 		.val	= L_PTE_MT_DEV_WC,
126*4882a593Smuzhiyun 		.set	= "DEV/WC",
127*4882a593Smuzhiyun 	}, {
128*4882a593Smuzhiyun 		.mask	= L_PTE_MT_MASK,
129*4882a593Smuzhiyun 		.val	= L_PTE_MT_DEV_CACHED,
130*4882a593Smuzhiyun 		.set	= "DEV/CACHED",
131*4882a593Smuzhiyun 	},
132*4882a593Smuzhiyun };
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun static const struct prot_bits section_bits[] = {
135*4882a593Smuzhiyun #ifdef CONFIG_ARM_LPAE
136*4882a593Smuzhiyun 	{
137*4882a593Smuzhiyun 		.mask	= PMD_SECT_USER,
138*4882a593Smuzhiyun 		.val	= PMD_SECT_USER,
139*4882a593Smuzhiyun 		.set	= "USR",
140*4882a593Smuzhiyun 	}, {
141*4882a593Smuzhiyun 		.mask	= L_PMD_SECT_RDONLY | PMD_SECT_AP2,
142*4882a593Smuzhiyun 		.val	= L_PMD_SECT_RDONLY | PMD_SECT_AP2,
143*4882a593Smuzhiyun 		.set	= "ro",
144*4882a593Smuzhiyun 		.clear	= "RW",
145*4882a593Smuzhiyun 		.ro_bit	= true,
146*4882a593Smuzhiyun #elif __LINUX_ARM_ARCH__ >= 6
147*4882a593Smuzhiyun 	{
148*4882a593Smuzhiyun 		.mask	= PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
149*4882a593Smuzhiyun 		.val	= PMD_SECT_APX | PMD_SECT_AP_WRITE,
150*4882a593Smuzhiyun 		.set	= "    ro",
151*4882a593Smuzhiyun 		.ro_bit	= true,
152*4882a593Smuzhiyun 	}, {
153*4882a593Smuzhiyun 		.mask	= PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
154*4882a593Smuzhiyun 		.val	= PMD_SECT_AP_WRITE,
155*4882a593Smuzhiyun 		.set	= "    RW",
156*4882a593Smuzhiyun 	}, {
157*4882a593Smuzhiyun 		.mask	= PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
158*4882a593Smuzhiyun 		.val	= PMD_SECT_AP_READ,
159*4882a593Smuzhiyun 		.set	= "USR ro",
160*4882a593Smuzhiyun 	}, {
161*4882a593Smuzhiyun 		.mask	= PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
162*4882a593Smuzhiyun 		.val	= PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
163*4882a593Smuzhiyun 		.set	= "USR RW",
164*4882a593Smuzhiyun #else /* ARMv4/ARMv5  */
165*4882a593Smuzhiyun 	/* These are approximate */
166*4882a593Smuzhiyun 	{
167*4882a593Smuzhiyun 		.mask   = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
168*4882a593Smuzhiyun 		.val    = 0,
169*4882a593Smuzhiyun 		.set    = "    ro",
170*4882a593Smuzhiyun 		.ro_bit	= true,
171*4882a593Smuzhiyun 	}, {
172*4882a593Smuzhiyun 		.mask   = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
173*4882a593Smuzhiyun 		.val    = PMD_SECT_AP_WRITE,
174*4882a593Smuzhiyun 		.set    = "    RW",
175*4882a593Smuzhiyun 	}, {
176*4882a593Smuzhiyun 		.mask   = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
177*4882a593Smuzhiyun 		.val    = PMD_SECT_AP_READ,
178*4882a593Smuzhiyun 		.set    = "USR ro",
179*4882a593Smuzhiyun 	}, {
180*4882a593Smuzhiyun 		.mask   = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
181*4882a593Smuzhiyun 		.val    = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
182*4882a593Smuzhiyun 		.set    = "USR RW",
183*4882a593Smuzhiyun #endif
184*4882a593Smuzhiyun 	}, {
185*4882a593Smuzhiyun 		.mask	= PMD_SECT_XN,
186*4882a593Smuzhiyun 		.val	= PMD_SECT_XN,
187*4882a593Smuzhiyun 		.set	= "NX",
188*4882a593Smuzhiyun 		.clear	= "x ",
189*4882a593Smuzhiyun 		.nx_bit	= true,
190*4882a593Smuzhiyun 	}, {
191*4882a593Smuzhiyun 		.mask	= PMD_SECT_S,
192*4882a593Smuzhiyun 		.val	= PMD_SECT_S,
193*4882a593Smuzhiyun 		.set	= "SHD",
194*4882a593Smuzhiyun 		.clear	= "   ",
195*4882a593Smuzhiyun 	},
196*4882a593Smuzhiyun };
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun struct pg_level {
199*4882a593Smuzhiyun 	const struct prot_bits *bits;
200*4882a593Smuzhiyun 	size_t num;
201*4882a593Smuzhiyun 	u64 mask;
202*4882a593Smuzhiyun 	const struct prot_bits *ro_bit;
203*4882a593Smuzhiyun 	const struct prot_bits *nx_bit;
204*4882a593Smuzhiyun };
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun static struct pg_level pg_level[] = {
207*4882a593Smuzhiyun 	{
208*4882a593Smuzhiyun 	}, { /* pgd */
209*4882a593Smuzhiyun 	}, { /* p4d */
210*4882a593Smuzhiyun 	}, { /* pud */
211*4882a593Smuzhiyun 	}, { /* pmd */
212*4882a593Smuzhiyun 		.bits	= section_bits,
213*4882a593Smuzhiyun 		.num	= ARRAY_SIZE(section_bits),
214*4882a593Smuzhiyun 	}, { /* pte */
215*4882a593Smuzhiyun 		.bits	= pte_bits,
216*4882a593Smuzhiyun 		.num	= ARRAY_SIZE(pte_bits),
217*4882a593Smuzhiyun 	},
218*4882a593Smuzhiyun };
219*4882a593Smuzhiyun 
dump_prot(struct pg_state * st,const struct prot_bits * bits,size_t num)220*4882a593Smuzhiyun static void dump_prot(struct pg_state *st, const struct prot_bits *bits, size_t num)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun 	unsigned i;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	for (i = 0; i < num; i++, bits++) {
225*4882a593Smuzhiyun 		const char *s;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 		if ((st->current_prot & bits->mask) == bits->val)
228*4882a593Smuzhiyun 			s = bits->set;
229*4882a593Smuzhiyun 		else
230*4882a593Smuzhiyun 			s = bits->clear;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 		if (s)
233*4882a593Smuzhiyun 			pt_dump_seq_printf(st->seq, " %s", s);
234*4882a593Smuzhiyun 	}
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun 
note_prot_wx(struct pg_state * st,unsigned long addr)237*4882a593Smuzhiyun static void note_prot_wx(struct pg_state *st, unsigned long addr)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun 	if (!st->check_wx)
240*4882a593Smuzhiyun 		return;
241*4882a593Smuzhiyun 	if ((st->current_prot & pg_level[st->level].ro_bit->mask) ==
242*4882a593Smuzhiyun 				pg_level[st->level].ro_bit->val)
243*4882a593Smuzhiyun 		return;
244*4882a593Smuzhiyun 	if ((st->current_prot & pg_level[st->level].nx_bit->mask) ==
245*4882a593Smuzhiyun 				pg_level[st->level].nx_bit->val)
246*4882a593Smuzhiyun 		return;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	WARN_ONCE(1, "arm/mm: Found insecure W+X mapping at address %pS\n",
249*4882a593Smuzhiyun 			(void *)st->start_address);
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun 
note_page(struct pg_state * st,unsigned long addr,unsigned int level,u64 val,const char * domain)254*4882a593Smuzhiyun static void note_page(struct pg_state *st, unsigned long addr,
255*4882a593Smuzhiyun 		      unsigned int level, u64 val, const char *domain)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun 	static const char units[] = "KMGTPE";
258*4882a593Smuzhiyun 	u64 prot = val & pg_level[level].mask;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	if (!st->level) {
261*4882a593Smuzhiyun 		st->level = level;
262*4882a593Smuzhiyun 		st->current_prot = prot;
263*4882a593Smuzhiyun 		st->current_domain = domain;
264*4882a593Smuzhiyun 		pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
265*4882a593Smuzhiyun 	} else if (prot != st->current_prot || level != st->level ||
266*4882a593Smuzhiyun 		   domain != st->current_domain ||
267*4882a593Smuzhiyun 		   addr >= st->marker[1].start_address) {
268*4882a593Smuzhiyun 		const char *unit = units;
269*4882a593Smuzhiyun 		unsigned long delta;
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 		if (st->current_prot) {
272*4882a593Smuzhiyun 			note_prot_wx(st, addr);
273*4882a593Smuzhiyun 			pt_dump_seq_printf(st->seq, "0x%08lx-0x%08lx   ",
274*4882a593Smuzhiyun 				   st->start_address, addr);
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 			delta = (addr - st->start_address) >> 10;
277*4882a593Smuzhiyun 			while (!(delta & 1023) && unit[1]) {
278*4882a593Smuzhiyun 				delta >>= 10;
279*4882a593Smuzhiyun 				unit++;
280*4882a593Smuzhiyun 			}
281*4882a593Smuzhiyun 			pt_dump_seq_printf(st->seq, "%9lu%c", delta, *unit);
282*4882a593Smuzhiyun 			if (st->current_domain)
283*4882a593Smuzhiyun 				pt_dump_seq_printf(st->seq, " %s",
284*4882a593Smuzhiyun 							st->current_domain);
285*4882a593Smuzhiyun 			if (pg_level[st->level].bits)
286*4882a593Smuzhiyun 				dump_prot(st, pg_level[st->level].bits, pg_level[st->level].num);
287*4882a593Smuzhiyun 			pt_dump_seq_printf(st->seq, "\n");
288*4882a593Smuzhiyun 		}
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 		if (addr >= st->marker[1].start_address) {
291*4882a593Smuzhiyun 			st->marker++;
292*4882a593Smuzhiyun 			pt_dump_seq_printf(st->seq, "---[ %s ]---\n",
293*4882a593Smuzhiyun 							st->marker->name);
294*4882a593Smuzhiyun 		}
295*4882a593Smuzhiyun 		st->start_address = addr;
296*4882a593Smuzhiyun 		st->current_prot = prot;
297*4882a593Smuzhiyun 		st->current_domain = domain;
298*4882a593Smuzhiyun 		st->level = level;
299*4882a593Smuzhiyun 	}
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun 
walk_pte(struct pg_state * st,pmd_t * pmd,unsigned long start,const char * domain)302*4882a593Smuzhiyun static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start,
303*4882a593Smuzhiyun 		     const char *domain)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun 	pte_t *pte = pte_offset_kernel(pmd, 0);
306*4882a593Smuzhiyun 	unsigned long addr;
307*4882a593Smuzhiyun 	unsigned i;
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
310*4882a593Smuzhiyun 		addr = start + i * PAGE_SIZE;
311*4882a593Smuzhiyun 		note_page(st, addr, 5, pte_val(*pte), domain);
312*4882a593Smuzhiyun 	}
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun 
get_domain_name(pmd_t * pmd)315*4882a593Smuzhiyun static const char *get_domain_name(pmd_t *pmd)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun #ifndef CONFIG_ARM_LPAE
318*4882a593Smuzhiyun 	switch (pmd_val(*pmd) & PMD_DOMAIN_MASK) {
319*4882a593Smuzhiyun 	case PMD_DOMAIN(DOMAIN_KERNEL):
320*4882a593Smuzhiyun 		return "KERNEL ";
321*4882a593Smuzhiyun 	case PMD_DOMAIN(DOMAIN_USER):
322*4882a593Smuzhiyun 		return "USER   ";
323*4882a593Smuzhiyun 	case PMD_DOMAIN(DOMAIN_IO):
324*4882a593Smuzhiyun 		return "IO     ";
325*4882a593Smuzhiyun 	case PMD_DOMAIN(DOMAIN_VECTORS):
326*4882a593Smuzhiyun 		return "VECTORS";
327*4882a593Smuzhiyun 	default:
328*4882a593Smuzhiyun 		return "unknown";
329*4882a593Smuzhiyun 	}
330*4882a593Smuzhiyun #endif
331*4882a593Smuzhiyun 	return NULL;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun 
walk_pmd(struct pg_state * st,pud_t * pud,unsigned long start)334*4882a593Smuzhiyun static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun 	pmd_t *pmd = pmd_offset(pud, 0);
337*4882a593Smuzhiyun 	unsigned long addr;
338*4882a593Smuzhiyun 	unsigned i;
339*4882a593Smuzhiyun 	const char *domain;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
342*4882a593Smuzhiyun 		addr = start + i * PMD_SIZE;
343*4882a593Smuzhiyun 		domain = get_domain_name(pmd);
344*4882a593Smuzhiyun 		if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd))
345*4882a593Smuzhiyun 			note_page(st, addr, 4, pmd_val(*pmd), domain);
346*4882a593Smuzhiyun 		else
347*4882a593Smuzhiyun 			walk_pte(st, pmd, addr, domain);
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 		if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1])) {
350*4882a593Smuzhiyun 			addr += SECTION_SIZE;
351*4882a593Smuzhiyun 			pmd++;
352*4882a593Smuzhiyun 			domain = get_domain_name(pmd);
353*4882a593Smuzhiyun 			note_page(st, addr, 4, pmd_val(*pmd), domain);
354*4882a593Smuzhiyun 		}
355*4882a593Smuzhiyun 	}
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun 
walk_pud(struct pg_state * st,p4d_t * p4d,unsigned long start)358*4882a593Smuzhiyun static void walk_pud(struct pg_state *st, p4d_t *p4d, unsigned long start)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun 	pud_t *pud = pud_offset(p4d, 0);
361*4882a593Smuzhiyun 	unsigned long addr;
362*4882a593Smuzhiyun 	unsigned i;
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
365*4882a593Smuzhiyun 		addr = start + i * PUD_SIZE;
366*4882a593Smuzhiyun 		if (!pud_none(*pud)) {
367*4882a593Smuzhiyun 			walk_pmd(st, pud, addr);
368*4882a593Smuzhiyun 		} else {
369*4882a593Smuzhiyun 			note_page(st, addr, 3, pud_val(*pud), NULL);
370*4882a593Smuzhiyun 		}
371*4882a593Smuzhiyun 	}
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun 
walk_p4d(struct pg_state * st,pgd_t * pgd,unsigned long start)374*4882a593Smuzhiyun static void walk_p4d(struct pg_state *st, pgd_t *pgd, unsigned long start)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun 	p4d_t *p4d = p4d_offset(pgd, 0);
377*4882a593Smuzhiyun 	unsigned long addr;
378*4882a593Smuzhiyun 	unsigned i;
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
381*4882a593Smuzhiyun 		addr = start + i * P4D_SIZE;
382*4882a593Smuzhiyun 		if (!p4d_none(*p4d)) {
383*4882a593Smuzhiyun 			walk_pud(st, p4d, addr);
384*4882a593Smuzhiyun 		} else {
385*4882a593Smuzhiyun 			note_page(st, addr, 2, p4d_val(*p4d), NULL);
386*4882a593Smuzhiyun 		}
387*4882a593Smuzhiyun 	}
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun 
walk_pgd(struct pg_state * st,struct mm_struct * mm,unsigned long start)390*4882a593Smuzhiyun static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
391*4882a593Smuzhiyun 			unsigned long start)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun 	pgd_t *pgd = pgd_offset(mm, 0UL);
394*4882a593Smuzhiyun 	unsigned i;
395*4882a593Smuzhiyun 	unsigned long addr;
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
398*4882a593Smuzhiyun 		addr = start + i * PGDIR_SIZE;
399*4882a593Smuzhiyun 		if (!pgd_none(*pgd)) {
400*4882a593Smuzhiyun 			walk_p4d(st, pgd, addr);
401*4882a593Smuzhiyun 		} else {
402*4882a593Smuzhiyun 			note_page(st, addr, 1, pgd_val(*pgd), NULL);
403*4882a593Smuzhiyun 		}
404*4882a593Smuzhiyun 	}
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun 
ptdump_walk_pgd(struct seq_file * m,struct ptdump_info * info)407*4882a593Smuzhiyun void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun 	struct pg_state st = {
410*4882a593Smuzhiyun 		.seq = m,
411*4882a593Smuzhiyun 		.marker = info->markers,
412*4882a593Smuzhiyun 		.check_wx = false,
413*4882a593Smuzhiyun 	};
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	walk_pgd(&st, info->mm, info->base_addr);
416*4882a593Smuzhiyun 	note_page(&st, 0, 0, 0, NULL);
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun 
ptdump_initialize(void)419*4882a593Smuzhiyun static void ptdump_initialize(void)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun 	unsigned i, j;
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(pg_level); i++)
424*4882a593Smuzhiyun 		if (pg_level[i].bits)
425*4882a593Smuzhiyun 			for (j = 0; j < pg_level[i].num; j++) {
426*4882a593Smuzhiyun 				pg_level[i].mask |= pg_level[i].bits[j].mask;
427*4882a593Smuzhiyun 				if (pg_level[i].bits[j].ro_bit)
428*4882a593Smuzhiyun 					pg_level[i].ro_bit = &pg_level[i].bits[j];
429*4882a593Smuzhiyun 				if (pg_level[i].bits[j].nx_bit)
430*4882a593Smuzhiyun 					pg_level[i].nx_bit = &pg_level[i].bits[j];
431*4882a593Smuzhiyun 			}
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	address_markers[2].start_address = VMALLOC_START;
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun static struct ptdump_info kernel_ptdump_info = {
437*4882a593Smuzhiyun 	.mm = &init_mm,
438*4882a593Smuzhiyun 	.markers = address_markers,
439*4882a593Smuzhiyun 	.base_addr = 0,
440*4882a593Smuzhiyun };
441*4882a593Smuzhiyun 
ptdump_check_wx(void)442*4882a593Smuzhiyun void ptdump_check_wx(void)
443*4882a593Smuzhiyun {
444*4882a593Smuzhiyun 	struct pg_state st = {
445*4882a593Smuzhiyun 		.seq = NULL,
446*4882a593Smuzhiyun 		.marker = (struct addr_marker[]) {
447*4882a593Smuzhiyun 			{ 0, NULL},
448*4882a593Smuzhiyun 			{ -1, NULL},
449*4882a593Smuzhiyun 		},
450*4882a593Smuzhiyun 		.check_wx = true,
451*4882a593Smuzhiyun 	};
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	walk_pgd(&st, &init_mm, 0);
454*4882a593Smuzhiyun 	note_page(&st, 0, 0, 0, NULL);
455*4882a593Smuzhiyun 	if (st.wx_pages)
456*4882a593Smuzhiyun 		pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found\n",
457*4882a593Smuzhiyun 			st.wx_pages);
458*4882a593Smuzhiyun 	else
459*4882a593Smuzhiyun 		pr_info("Checked W+X mappings: passed, no W+X pages found\n");
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun 
ptdump_init(void)462*4882a593Smuzhiyun static int ptdump_init(void)
463*4882a593Smuzhiyun {
464*4882a593Smuzhiyun 	ptdump_initialize();
465*4882a593Smuzhiyun 	ptdump_debugfs_register(&kernel_ptdump_info, "kernel_page_tables");
466*4882a593Smuzhiyun 	return 0;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun __initcall(ptdump_init);
469