Lines Matching refs:vma
126 static void seq_print_vma_name(struct seq_file *m, struct vm_area_struct *vma) in seq_print_vma_name() argument
128 const char __user *name = vma_get_anon_name(vma); in seq_print_vma_name()
129 struct mm_struct *mm = vma->vm_mm; in seq_print_vma_name()
181 struct vm_area_struct *vma; in m_start() local
208 vma = find_vma(mm, last_addr); in m_start()
209 if (vma) in m_start()
210 return vma; in m_start()
218 struct vm_area_struct *next, *vma = v; in m_next() local
220 if (vma == priv->tail_vma) in m_next()
222 else if (vma->vm_next) in m_next()
223 next = vma->vm_next; in m_next()
289 static int is_stack(struct vm_area_struct *vma) in is_stack() argument
296 return vma->vm_start <= vma->vm_mm->start_stack && in is_stack()
297 vma->vm_end >= vma->vm_mm->start_stack; in is_stack()
321 show_map_vma(struct seq_file *m, struct vm_area_struct *vma) in show_map_vma() argument
323 struct mm_struct *mm = vma->vm_mm; in show_map_vma()
324 struct file *file = vma->vm_file; in show_map_vma()
325 vm_flags_t flags = vma->vm_flags; in show_map_vma()
333 struct inode *inode = file_inode(vma->vm_file); in show_map_vma()
336 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; in show_map_vma()
339 start = vma->vm_start; in show_map_vma()
340 end = vma->vm_end; in show_map_vma()
353 if (vma->vm_ops && vma->vm_ops->name) { in show_map_vma()
354 name = vma->vm_ops->name(vma); in show_map_vma()
359 name = arch_vma_name(vma); in show_map_vma()
366 if (vma->vm_start <= mm->brk && in show_map_vma()
367 vma->vm_end >= mm->start_brk) { in show_map_vma()
372 if (is_stack(vma)) { in show_map_vma()
377 if (vma_get_anon_name(vma)) { in show_map_vma()
379 seq_print_vma_name(m, vma); in show_map_vma()
548 walk->vma->vm_file->f_mapping, addr, end); in smaps_pte_hole()
560 struct vm_area_struct *vma = walk->vma; in smaps_pte_entry() local
561 bool locked = !!(vma->vm_flags & VM_LOCKED); in smaps_pte_entry()
566 page = vm_normal_page(vma, addr, *pte); in smaps_pte_entry()
592 page = xa_load(&vma->vm_file->f_mapping->i_pages, in smaps_pte_entry()
593 linear_page_index(vma, addr)); in smaps_pte_entry()
610 struct vm_area_struct *vma = walk->vma; in smaps_pmd_entry() local
611 bool locked = !!(vma->vm_flags & VM_LOCKED); in smaps_pmd_entry()
617 page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP); in smaps_pmd_entry()
650 struct vm_area_struct *vma = walk->vma; in smaps_pte_range() local
654 ptl = pmd_trans_huge_lock(pmd, vma); in smaps_pte_range()
668 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in smaps_pte_range()
677 static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma) in show_smap_vma_flags() argument
748 if (vma->vm_flags & (1UL << i)) { in show_smap_vma_flags()
763 struct vm_area_struct *vma = walk->vma; in smaps_hugetlb_range() local
767 page = vm_normal_page(vma, addr, *pte); in smaps_hugetlb_range()
780 mss->shared_hugetlb += huge_page_size(hstate_vma(vma)); in smaps_hugetlb_range()
782 mss->private_hugetlb += huge_page_size(hstate_vma(vma)); in smaps_hugetlb_range()
807 static void smap_gather_stats(struct vm_area_struct *vma, in smap_gather_stats() argument
813 if (start >= vma->vm_end) in smap_gather_stats()
819 if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) { in smap_gather_stats()
830 unsigned long shmem_swapped = shmem_swap_usage(vma); in smap_gather_stats()
832 if (!start && (!shmem_swapped || (vma->vm_flags & VM_SHARED) || in smap_gather_stats()
833 !(vma->vm_flags & VM_WRITE))) { in smap_gather_stats()
843 walk_page_vma(vma, ops, mss); in smap_gather_stats()
845 walk_page_range(vma->vm_mm, start, vma->vm_end, ops, mss); in smap_gather_stats()
892 struct vm_area_struct *vma = v; in show_smap() local
897 smap_gather_stats(vma, &mss, 0); in show_smap()
899 show_map_vma(m, vma); in show_smap()
900 if (vma_get_anon_name(vma)) { in show_smap()
902 seq_print_vma_name(m, vma); in show_smap()
906 SEQ_PUT_DEC("Size: ", vma->vm_end - vma->vm_start); in show_smap()
907 SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma)); in show_smap()
908 SEQ_PUT_DEC(" kB\nMMUPageSize: ", vma_mmu_pagesize(vma)); in show_smap()
914 transparent_hugepage_active(vma)); in show_smap()
917 seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma)); in show_smap()
918 show_smap_vma_flags(m, vma); in show_smap()
928 struct vm_area_struct *vma; in show_smaps_rollup() local
950 for (vma = priv->mm->mmap; vma;) { in show_smaps_rollup()
951 smap_gather_stats(vma, &mss, 0); in show_smaps_rollup()
952 last_vma_end = vma->vm_end; in show_smaps_rollup()
1002 vma = find_vma(mm, last_vma_end - 1); in show_smaps_rollup()
1004 if (!vma) in show_smaps_rollup()
1008 if (vma->vm_start >= last_vma_end) in show_smaps_rollup()
1012 if (vma->vm_end > last_vma_end) in show_smaps_rollup()
1013 smap_gather_stats(vma, &mss, last_vma_end); in show_smaps_rollup()
1016 vma = vma->vm_next; in show_smaps_rollup()
1123 static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, pte_t pte) in pte_is_pinned() argument
1129 if (!is_cow_mapping(vma->vm_flags)) in pte_is_pinned()
1131 if (likely(!atomic_read(&vma->vm_mm->has_pinned))) in pte_is_pinned()
1133 page = vm_normal_page(vma, addr, pte); in pte_is_pinned()
1139 static inline void clear_soft_dirty(struct vm_area_struct *vma, in clear_soft_dirty() argument
1153 if (pte_is_pinned(vma, addr, ptent)) in clear_soft_dirty()
1155 old_pte = ptep_modify_prot_start(vma, addr, pte); in clear_soft_dirty()
1158 ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent); in clear_soft_dirty()
1161 set_pte_at(vma->vm_mm, addr, pte, ptent); in clear_soft_dirty()
1165 static inline void clear_soft_dirty(struct vm_area_struct *vma, in clear_soft_dirty() argument
1172 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, in clear_soft_dirty_pmd() argument
1179 old = pmdp_invalidate(vma, addr, pmdp); in clear_soft_dirty_pmd()
1188 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); in clear_soft_dirty_pmd()
1191 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); in clear_soft_dirty_pmd()
1195 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, in clear_soft_dirty_pmd() argument
1205 struct vm_area_struct *vma = walk->vma; in clear_refs_pte_range() local
1210 ptl = pmd_trans_huge_lock(pmd, vma); in clear_refs_pte_range()
1213 clear_soft_dirty_pmd(vma, addr, pmd); in clear_refs_pte_range()
1223 pmdp_test_and_clear_young(vma, addr, pmd); in clear_refs_pte_range()
1234 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in clear_refs_pte_range()
1239 clear_soft_dirty(vma, addr, pte); in clear_refs_pte_range()
1246 page = vm_normal_page(vma, addr, ptent); in clear_refs_pte_range()
1251 ptep_test_and_clear_young(vma, addr, pte); in clear_refs_pte_range()
1264 struct vm_area_struct *vma = walk->vma; in clear_refs_test_walk() local
1266 if (vma->vm_flags & VM_PFNMAP) in clear_refs_test_walk()
1275 if (cp->type == CLEAR_REFS_ANON && vma->vm_file) in clear_refs_test_walk()
1277 if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file) in clear_refs_test_walk()
1293 struct vm_area_struct *vma; in clear_refs_write() local
1334 for (vma = mm->mmap; vma; vma = vma->vm_next) { in clear_refs_write()
1335 if (!(vma->vm_flags & VM_SOFTDIRTY)) in clear_refs_write()
1337 vm_write_begin(vma); in clear_refs_write()
1338 WRITE_ONCE(vma->vm_flags, in clear_refs_write()
1339 vma->vm_flags & ~VM_SOFTDIRTY); in clear_refs_write()
1340 vma_set_page_prot(vma); in clear_refs_write()
1341 vm_write_end(vma); in clear_refs_write()
1417 struct vm_area_struct *vma = find_vma(walk->mm, addr); in pagemap_pte_hole() local
1422 if (vma) in pagemap_pte_hole()
1423 hole_end = min(end, vma->vm_start); in pagemap_pte_hole()
1433 if (!vma) in pagemap_pte_hole()
1437 if (vma->vm_flags & VM_SOFTDIRTY) in pagemap_pte_hole()
1439 for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) { in pagemap_pte_hole()
1450 struct vm_area_struct *vma, unsigned long addr, pte_t pte) in pte_to_pagemap_entry() argument
1460 page = vm_normal_page(vma, addr, pte); in pte_to_pagemap_entry()
1485 if (vma->vm_flags & VM_SOFTDIRTY) in pte_to_pagemap_entry()
1494 struct vm_area_struct *vma = walk->vma; in pagemap_pmd_range() local
1502 ptl = pmd_trans_huge_lock(pmdp, vma); in pagemap_pmd_range()
1508 if (vma->vm_flags & VM_SOFTDIRTY) in pagemap_pmd_range()
1573 pme = pte_to_pagemap_entry(pm, vma, addr, *pte); in pagemap_pmd_range()
1592 struct vm_area_struct *vma = walk->vma; in pagemap_hugetlb_range() local
1597 if (vma->vm_flags & VM_SOFTDIRTY) in pagemap_hugetlb_range()
1828 static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma, in can_gather_numa_stats() argument
1837 page = vm_normal_page(vma, addr, pte); in can_gather_numa_stats()
1853 struct vm_area_struct *vma, in can_gather_numa_stats_pmd() argument
1862 page = vm_normal_page_pmd(vma, addr, pmd); in can_gather_numa_stats_pmd()
1881 struct vm_area_struct *vma = walk->vma; in gather_pte_stats() local
1887 ptl = pmd_trans_huge_lock(pmd, vma); in gather_pte_stats()
1891 page = can_gather_numa_stats_pmd(*pmd, vma, addr); in gather_pte_stats()
1904 struct page *page = can_gather_numa_stats(*pte, vma, addr); in gather_pte_stats()
1954 struct vm_area_struct *vma = v; in show_numa_map() local
1956 struct file *file = vma->vm_file; in show_numa_map()
1957 struct mm_struct *mm = vma->vm_mm; in show_numa_map()
1968 pol = __get_vma_policy(vma, vma->vm_start); in show_numa_map()
1976 seq_printf(m, "%08lx %s", vma->vm_start, buffer); in show_numa_map()
1981 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { in show_numa_map()
1983 } else if (is_stack(vma)) { in show_numa_map()
1987 if (is_vm_hugetlb_page(vma)) in show_numa_map()
1991 walk_page_vma(vma, &show_numa_ops, md); in show_numa_map()
2011 if (md->active < md->pages && !is_vm_hugetlb_page(vma)) in show_numa_map()
2021 seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10); in show_numa_map()