1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 2*4882a593Smuzhiyun 3*4882a593Smuzhiyun #include <linux/pagewalk.h> 4*4882a593Smuzhiyun #include <linux/ptdump.h> 5*4882a593Smuzhiyun #include <linux/kasan.h> 6*4882a593Smuzhiyun 7*4882a593Smuzhiyun #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 8*4882a593Smuzhiyun /* 9*4882a593Smuzhiyun * This is an optimization for KASAN=y case. Since all kasan page tables 10*4882a593Smuzhiyun * eventually point to the kasan_early_shadow_page we could call note_page() 11*4882a593Smuzhiyun * right away without walking through lower level page tables. This saves 12*4882a593Smuzhiyun * us dozens of seconds (minutes for 5-level config) while checking for 13*4882a593Smuzhiyun * W+X mapping or reading kernel_page_tables debugfs file. 14*4882a593Smuzhiyun */ 15*4882a593Smuzhiyun static inline int note_kasan_page_table(struct mm_walk *walk, 16*4882a593Smuzhiyun unsigned long addr) 17*4882a593Smuzhiyun { 18*4882a593Smuzhiyun struct ptdump_state *st = walk->private; 19*4882a593Smuzhiyun 20*4882a593Smuzhiyun st->note_page(st, addr, 4, pte_val(kasan_early_shadow_pte[0])); 21*4882a593Smuzhiyun 22*4882a593Smuzhiyun walk->action = ACTION_CONTINUE; 23*4882a593Smuzhiyun 24*4882a593Smuzhiyun return 0; 25*4882a593Smuzhiyun } 26*4882a593Smuzhiyun #endif 27*4882a593Smuzhiyun 28*4882a593Smuzhiyun static int ptdump_pgd_entry(pgd_t *pgd, unsigned long addr, 29*4882a593Smuzhiyun unsigned long next, struct mm_walk *walk) 30*4882a593Smuzhiyun { 31*4882a593Smuzhiyun struct ptdump_state *st = walk->private; 32*4882a593Smuzhiyun pgd_t val = READ_ONCE(*pgd); 33*4882a593Smuzhiyun 34*4882a593Smuzhiyun #if CONFIG_PGTABLE_LEVELS > 4 && \ 35*4882a593Smuzhiyun (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) 36*4882a593Smuzhiyun if (pgd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_p4d))) 37*4882a593Smuzhiyun return note_kasan_page_table(walk, addr); 38*4882a593Smuzhiyun #endif 39*4882a593Smuzhiyun 40*4882a593Smuzhiyun if (st->effective_prot) 41*4882a593Smuzhiyun st->effective_prot(st, 0, pgd_val(val)); 42*4882a593Smuzhiyun 43*4882a593Smuzhiyun if (pgd_leaf(val)) 44*4882a593Smuzhiyun st->note_page(st, addr, 0, pgd_val(val)); 45*4882a593Smuzhiyun 46*4882a593Smuzhiyun return 0; 47*4882a593Smuzhiyun } 48*4882a593Smuzhiyun 49*4882a593Smuzhiyun static int ptdump_p4d_entry(p4d_t *p4d, unsigned long addr, 50*4882a593Smuzhiyun unsigned long next, struct mm_walk *walk) 51*4882a593Smuzhiyun { 52*4882a593Smuzhiyun struct ptdump_state *st = walk->private; 53*4882a593Smuzhiyun p4d_t val = READ_ONCE(*p4d); 54*4882a593Smuzhiyun 55*4882a593Smuzhiyun #if CONFIG_PGTABLE_LEVELS > 3 && \ 56*4882a593Smuzhiyun (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) 57*4882a593Smuzhiyun if (p4d_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pud))) 58*4882a593Smuzhiyun return note_kasan_page_table(walk, addr); 59*4882a593Smuzhiyun #endif 60*4882a593Smuzhiyun 61*4882a593Smuzhiyun if (st->effective_prot) 62*4882a593Smuzhiyun st->effective_prot(st, 1, p4d_val(val)); 63*4882a593Smuzhiyun 64*4882a593Smuzhiyun if (p4d_leaf(val)) 65*4882a593Smuzhiyun st->note_page(st, addr, 1, p4d_val(val)); 66*4882a593Smuzhiyun 67*4882a593Smuzhiyun return 0; 68*4882a593Smuzhiyun } 69*4882a593Smuzhiyun 70*4882a593Smuzhiyun static int ptdump_pud_entry(pud_t *pud, unsigned long addr, 71*4882a593Smuzhiyun unsigned long next, struct mm_walk *walk) 72*4882a593Smuzhiyun { 73*4882a593Smuzhiyun struct ptdump_state *st = walk->private; 74*4882a593Smuzhiyun pud_t val = READ_ONCE(*pud); 75*4882a593Smuzhiyun 76*4882a593Smuzhiyun #if CONFIG_PGTABLE_LEVELS > 2 && \ 77*4882a593Smuzhiyun (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) 78*4882a593Smuzhiyun if (pud_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pmd))) 79*4882a593Smuzhiyun return note_kasan_page_table(walk, addr); 80*4882a593Smuzhiyun #endif 81*4882a593Smuzhiyun 82*4882a593Smuzhiyun if (st->effective_prot) 83*4882a593Smuzhiyun st->effective_prot(st, 2, pud_val(val)); 84*4882a593Smuzhiyun 85*4882a593Smuzhiyun if (pud_leaf(val)) 86*4882a593Smuzhiyun st->note_page(st, addr, 2, pud_val(val)); 87*4882a593Smuzhiyun 88*4882a593Smuzhiyun return 0; 89*4882a593Smuzhiyun } 90*4882a593Smuzhiyun 91*4882a593Smuzhiyun static int ptdump_pmd_entry(pmd_t *pmd, unsigned long addr, 92*4882a593Smuzhiyun unsigned long next, struct mm_walk *walk) 93*4882a593Smuzhiyun { 94*4882a593Smuzhiyun struct ptdump_state *st = walk->private; 95*4882a593Smuzhiyun pmd_t val = READ_ONCE(*pmd); 96*4882a593Smuzhiyun 97*4882a593Smuzhiyun #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 98*4882a593Smuzhiyun if (pmd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pte))) 99*4882a593Smuzhiyun return note_kasan_page_table(walk, addr); 100*4882a593Smuzhiyun #endif 101*4882a593Smuzhiyun 102*4882a593Smuzhiyun if (st->effective_prot) 103*4882a593Smuzhiyun st->effective_prot(st, 3, pmd_val(val)); 104*4882a593Smuzhiyun if (pmd_leaf(val)) 105*4882a593Smuzhiyun st->note_page(st, addr, 3, pmd_val(val)); 106*4882a593Smuzhiyun 107*4882a593Smuzhiyun return 0; 108*4882a593Smuzhiyun } 109*4882a593Smuzhiyun 110*4882a593Smuzhiyun static int ptdump_pte_entry(pte_t *pte, unsigned long addr, 111*4882a593Smuzhiyun unsigned long next, struct mm_walk *walk) 112*4882a593Smuzhiyun { 113*4882a593Smuzhiyun struct ptdump_state *st = walk->private; 114*4882a593Smuzhiyun pte_t val = ptep_get(pte); 115*4882a593Smuzhiyun 116*4882a593Smuzhiyun if (st->effective_prot) 117*4882a593Smuzhiyun st->effective_prot(st, 4, pte_val(val)); 118*4882a593Smuzhiyun 119*4882a593Smuzhiyun st->note_page(st, addr, 4, pte_val(val)); 120*4882a593Smuzhiyun 121*4882a593Smuzhiyun return 0; 122*4882a593Smuzhiyun } 123*4882a593Smuzhiyun 124*4882a593Smuzhiyun static int ptdump_hole(unsigned long addr, unsigned long next, 125*4882a593Smuzhiyun int depth, struct mm_walk *walk) 126*4882a593Smuzhiyun { 127*4882a593Smuzhiyun struct ptdump_state *st = walk->private; 128*4882a593Smuzhiyun 129*4882a593Smuzhiyun st->note_page(st, addr, depth, 0); 130*4882a593Smuzhiyun 131*4882a593Smuzhiyun return 0; 132*4882a593Smuzhiyun } 133*4882a593Smuzhiyun 134*4882a593Smuzhiyun static const struct mm_walk_ops ptdump_ops = { 135*4882a593Smuzhiyun .pgd_entry = ptdump_pgd_entry, 136*4882a593Smuzhiyun .p4d_entry = ptdump_p4d_entry, 137*4882a593Smuzhiyun .pud_entry = ptdump_pud_entry, 138*4882a593Smuzhiyun .pmd_entry = ptdump_pmd_entry, 139*4882a593Smuzhiyun .pte_entry = ptdump_pte_entry, 140*4882a593Smuzhiyun .pte_hole = ptdump_hole, 141*4882a593Smuzhiyun }; 142*4882a593Smuzhiyun 143*4882a593Smuzhiyun void ptdump_walk_pgd(struct ptdump_state *st, struct mm_struct *mm, pgd_t *pgd) 144*4882a593Smuzhiyun { 145*4882a593Smuzhiyun const struct ptdump_range *range = st->range; 146*4882a593Smuzhiyun 147*4882a593Smuzhiyun mmap_write_lock(mm); 148*4882a593Smuzhiyun while (range->start != range->end) { 149*4882a593Smuzhiyun walk_page_range_novma(mm, range->start, range->end, 150*4882a593Smuzhiyun &ptdump_ops, pgd, st); 151*4882a593Smuzhiyun range++; 152*4882a593Smuzhiyun } 153*4882a593Smuzhiyun mmap_write_unlock(mm); 154*4882a593Smuzhiyun 155*4882a593Smuzhiyun /* Flush out the last page */ 156*4882a593Smuzhiyun st->note_page(st, 0, -1, 0); 157*4882a593Smuzhiyun } 158