| /OK3568_Linux_fs/kernel/mm/ |
| H A D | page_vma_mapped.c | 10 static inline bool not_found(struct page_vma_mapped_walk *pvmw) in not_found() argument 12 page_vma_mapped_walk_done(pvmw); in not_found() 16 static bool map_pte(struct page_vma_mapped_walk *pvmw) in map_pte() argument 18 pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address); in map_pte() 19 if (!(pvmw->flags & PVMW_SYNC)) { in map_pte() 20 if (pvmw->flags & PVMW_MIGRATION) { in map_pte() 21 if (!is_swap_pte(*pvmw->pte)) in map_pte() 39 if (is_swap_pte(*pvmw->pte)) { in map_pte() 43 entry = pte_to_swp_entry(*pvmw->pte); in map_pte() 46 } else if (!pte_present(*pvmw->pte)) in map_pte() [all …]
|
| H A D | rmap.c | 784 struct page_vma_mapped_walk pvmw = { in page_referenced_one() local 791 while (page_vma_mapped_walk(&pvmw)) { in page_referenced_one() 792 address = pvmw.address; in page_referenced_one() 795 page_vma_mapped_walk_done(&pvmw); in page_referenced_one() 800 if (pvmw.pte) { in page_referenced_one() 801 trace_android_vh_look_around(&pvmw, page, vma, &referenced); in page_referenced_one() 803 pvmw.pte)) { in page_referenced_one() 817 pvmw.pmd)) in page_referenced_one() 918 struct page_vma_mapped_walk pvmw = { in page_mkclean_one() local 936 while (page_vma_mapped_walk(&pvmw)) { in page_mkclean_one() [all …]
|
| H A D | page_idle.c | 55 struct page_vma_mapped_walk pvmw = { in page_idle_clear_pte_refs_one() local 62 while (page_vma_mapped_walk(&pvmw)) { in page_idle_clear_pte_refs_one() 63 addr = pvmw.address; in page_idle_clear_pte_refs_one() 64 if (pvmw.pte) { in page_idle_clear_pte_refs_one() 69 if (ptep_clear_young_notify(vma, addr, pvmw.pte)) in page_idle_clear_pte_refs_one() 72 if (pmdp_clear_young_notify(vma, addr, pvmw.pmd)) in page_idle_clear_pte_refs_one()
|
| H A D | ksm.c | 1038 struct page_vma_mapped_walk pvmw = { in write_protect_page() local 1046 pvmw.address = page_address_in_vma(page, vma); in write_protect_page() 1047 if (pvmw.address == -EFAULT) in write_protect_page() 1053 pvmw.address, in write_protect_page() 1054 pvmw.address + PAGE_SIZE); in write_protect_page() 1057 if (!page_vma_mapped_walk(&pvmw)) in write_protect_page() 1059 if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?")) in write_protect_page() 1062 if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) || in write_protect_page() 1063 (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte)) || in write_protect_page() 1068 flush_cache_page(vma, pvmw.address, page_to_pfn(page)); in write_protect_page() [all …]
|
| H A D | migrate.c | 186 struct page_vma_mapped_walk pvmw = { in remove_migration_pte() local 197 while (page_vma_mapped_walk(&pvmw)) { in remove_migration_pte() 201 new = page - pvmw.page->index + in remove_migration_pte() 202 linear_page_index(vma, pvmw.address); in remove_migration_pte() 206 if (!pvmw.pte) { in remove_migration_pte() 208 remove_migration_pmd(&pvmw, new); in remove_migration_pte() 215 if (pte_swp_soft_dirty(*pvmw.pte)) in remove_migration_pte() 221 entry = pte_to_swp_entry(*pvmw.pte); in remove_migration_pte() 224 else if (pte_swp_uffd_wp(*pvmw.pte)) in remove_migration_pte() 230 if (pte_swp_soft_dirty(*pvmw.pte)) in remove_migration_pte() [all …]
|
| H A D | huge_memory.c | 2965 void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, in set_pmd_migration_entry() argument 2968 struct vm_area_struct *vma = pvmw->vma; in set_pmd_migration_entry() 2970 unsigned long address = pvmw->address; in set_pmd_migration_entry() 2975 if (!(pvmw->pmd && !pvmw->pte)) in set_pmd_migration_entry() 2979 pmdval = pmdp_invalidate(vma, address, pvmw->pmd); in set_pmd_migration_entry() 2986 set_pmd_at(mm, address, pvmw->pmd, pmdswp); in set_pmd_migration_entry() 2991 void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) in remove_migration_pmd() argument 2993 struct vm_area_struct *vma = pvmw->vma; in remove_migration_pmd() 2995 unsigned long address = pvmw->address; in remove_migration_pmd() 3000 if (!(pvmw->pmd && !pvmw->pte)) in remove_migration_pmd() [all …]
|
| /OK3568_Linux_fs/kernel/mm/damon/ |
| H A D | paddr.c | 22 struct page_vma_mapped_walk pvmw = { in __damon_pa_mkold() local 28 while (page_vma_mapped_walk(&pvmw)) { in __damon_pa_mkold() 29 addr = pvmw.address; in __damon_pa_mkold() 30 if (pvmw.pte) in __damon_pa_mkold() 31 damon_ptep_mkold(pvmw.pte, vma->vm_mm, addr); in __damon_pa_mkold() 33 damon_pmdp_mkold(pvmw.pmd, vma->vm_mm, addr); in __damon_pa_mkold() 96 struct page_vma_mapped_walk pvmw = { in __damon_pa_young() local 104 while (page_vma_mapped_walk(&pvmw)) { in __damon_pa_young() 105 addr = pvmw.address; in __damon_pa_young() 106 if (pvmw.pte) { in __damon_pa_young() [all …]
|
| /OK3568_Linux_fs/kernel/include/linux/ |
| H A D | rmap.h | 237 static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw) in page_vma_mapped_walk_done() argument 240 if (pvmw->pte && !PageHuge(pvmw->page)) in page_vma_mapped_walk_done() 241 pte_unmap(pvmw->pte); in page_vma_mapped_walk_done() 242 if (pvmw->ptl) in page_vma_mapped_walk_done() 243 spin_unlock(pvmw->ptl); in page_vma_mapped_walk_done() 246 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw);
|
| H A D | swapops.h | 254 extern void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, 257 extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, 287 static inline void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, in set_pmd_migration_entry() argument 293 static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, in remove_migration_pmd() argument
|
| /OK3568_Linux_fs/kernel/kernel/events/ |
| H A D | uprobes.c | 158 struct page_vma_mapped_walk pvmw = { in __replace_page() local 180 if (!page_vma_mapped_walk(&pvmw)) in __replace_page() 182 VM_BUG_ON_PAGE(addr != pvmw.address, old_page); in __replace_page() 197 flush_cache_page(vma, addr, pte_pfn(*pvmw.pte)); in __replace_page() 198 ptep_clear_flush_notify(vma, addr, pvmw.pte); in __replace_page() 200 set_pte_at_notify(mm, addr, pvmw.pte, in __replace_page() 206 page_vma_mapped_walk_done(&pvmw); in __replace_page()
|
| /OK3568_Linux_fs/kernel/include/trace/hooks/ |
| H A D | mm.h | 305 TP_PROTO(struct page_vma_mapped_walk *pvmw, struct page *page, 307 TP_ARGS(pvmw, page, vma, referenced));
|