| /OK3568_Linux_fs/kernel/arch/arm64/include/asm/ |
| H A D | mman.h | 38 static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags) in arch_vm_get_page_prot() argument 42 if (vm_flags & VM_ARM64_BTI) in arch_vm_get_page_prot() 55 if (vm_flags & VM_MTE) in arch_vm_get_page_prot() 60 #define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags) argument 77 static inline bool arch_validate_flags(unsigned long vm_flags) in arch_validate_flags() argument 83 return !(vm_flags & VM_MTE) || (vm_flags & VM_MTE_ALLOWED); in arch_validate_flags() 85 #define arch_validate_flags(vm_flags) arch_validate_flags(vm_flags) argument
|
| /OK3568_Linux_fs/kernel/arch/sparc/include/asm/ |
| H A D | mman.h | 49 #define arch_vm_get_page_prot(vm_flags) sparc_vm_get_page_prot(vm_flags) argument 50 static inline pgprot_t sparc_vm_get_page_prot(unsigned long vm_flags) in sparc_vm_get_page_prot() argument 52 return (vm_flags & VM_SPARC_ADI) ? __pgprot(_PAGE_MCD_4V) : __pgprot(0); in sparc_vm_get_page_prot() 63 #define arch_validate_flags(vm_flags) arch_validate_flags(vm_flags) argument 67 static inline bool arch_validate_flags(unsigned long vm_flags) in arch_validate_flags() argument 73 if (vm_flags & VM_SPARC_ADI) { in arch_validate_flags() 78 if (vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) in arch_validate_flags() 89 if (vm_flags & VM_MERGEABLE) in arch_validate_flags()
|
| /OK3568_Linux_fs/kernel/mm/ |
| H A D | mmap.c | 110 pgprot_t vm_get_page_prot(unsigned long vm_flags) in vm_get_page_prot() argument 112 pgprot_t ret = __pgprot(pgprot_val(protection_map[vm_flags & in vm_get_page_prot() 114 pgprot_val(arch_vm_get_page_prot(vm_flags))); in vm_get_page_prot() 120 static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags) in vm_pgprot_modify() argument 122 return pgprot_modify(oldprot, vm_get_page_prot(vm_flags)); in vm_pgprot_modify() 128 unsigned long vm_flags = vma->vm_flags; in vma_set_page_prot() local 131 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); in vma_set_page_prot() 133 vm_flags &= ~VM_SHARED; in vma_set_page_prot() 134 vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags); in vma_set_page_prot() 146 if (vma->vm_flags & VM_DENYWRITE) in __remove_shared_vm_struct() [all …]
|
| H A D | mremap.c | 546 unsigned long vm_flags = vma->vm_flags; in move_vma() local 570 MADV_UNMERGEABLE, &vm_flags); in move_vma() 620 if (vm_flags & VM_ACCOUNT) { in move_vma() 621 vma->vm_flags &= ~VM_ACCOUNT; in move_vma() 638 vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT); in move_vma() 641 if (unlikely(vma->vm_flags & VM_PFNMAP)) in move_vma() 645 if (vm_flags & VM_ACCOUNT) { in move_vma() 647 vma->vm_flags |= VM_ACCOUNT; in move_vma() 664 vma->vm_flags &= VM_LOCKED_CLEAR_MASK; in move_vma() 676 if (vm_flags & VM_LOCKED) { in move_vma() [all …]
|
| H A D | nommu.c | 127 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in follow_pfn() 155 pgprot_t prot, unsigned long vm_flags, int node, in __vmalloc_node_range() argument 178 vma->vm_flags |= VM_USERMAP; in __vmalloc_user_flags() 540 if (region->vm_flags & VM_MAPPED_COPY) in __put_nommu_region() 921 unsigned long vm_flags; in determine_vm_flags() local 923 vm_flags = calc_vm_prot_bits(prot, 0) | calc_vm_flag_bits(flags); in determine_vm_flags() 928 vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; in determine_vm_flags() 930 vm_flags |= VM_MAYSHARE; in determine_vm_flags() 935 vm_flags |= VM_MAYSHARE | (capabilities & NOMMU_VMFLAGS); in determine_vm_flags() 937 vm_flags |= VM_SHARED; in determine_vm_flags() [all …]
|
| H A D | userfaultfd.c | 64 bool writable = dst_vma->vm_flags & VM_WRITE; in mfill_atomic_install_pte() 65 bool vm_shared = dst_vma->vm_flags & VM_SHARED; in mfill_atomic_install_pte() 286 int vm_alloc_shared = dst_vma->vm_flags & VM_SHARED; in __mcopy_atomic_hugetlb() 287 int vm_shared = dst_vma->vm_flags & VM_SHARED; in __mcopy_atomic_hugetlb() 337 vm_shared = dst_vma->vm_flags & VM_SHARED; in __mcopy_atomic_hugetlb() 512 if (!(dst_vma->vm_flags & VM_SHARED)) { in mfill_atomic_pte() 588 dst_vma->vm_flags & VM_SHARED)) in __mcopy_atomic() 596 if (wp_copy && !(dst_vma->vm_flags & VM_UFFD_WP)) in __mcopy_atomic() 617 if (!(dst_vma->vm_flags & VM_SHARED) && in __mcopy_atomic() 757 if (!dst_vma || (dst_vma->vm_flags & VM_SHARED)) in mwriteprotect_range() [all …]
|
| H A D | mlock.c | 455 WRITE_ONCE(vma->vm_flags, vma->vm_flags & VM_LOCKED_CLEAR_MASK); in munlock_vma_pages_range() 542 vm_flags_t old_flags = vma->vm_flags; in mlock_fixup() 544 if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) || in mlock_fixup() 589 WRITE_ONCE(vma->vm_flags, newflags); in mlock_fixup() 622 vm_flags_t newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; in apply_vma_lock_flags() 673 if (vma->vm_flags & VM_LOCKED) { in count_mm_mlocked_page_nr() 741 vm_flags_t vm_flags = VM_LOCKED; in SYSCALL_DEFINE3() local 747 vm_flags |= VM_LOCKONFAULT; in SYSCALL_DEFINE3() 749 return do_mlock(start, len, vm_flags); in SYSCALL_DEFINE3() 804 newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; in apply_mlockall_flags()
|
| H A D | mprotect.c | 68 if (prot_numa && !(vma->vm_flags & VM_SHARED) && in change_pte_range() 96 if (is_cow_mapping(vma->vm_flags) && in change_pte_range() 137 !(vma->vm_flags & VM_SOFTDIRTY))) { in change_pte_range() 403 unsigned long oldflags = vma->vm_flags; in mprotect_fixup() 421 (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && in mprotect_fixup() 460 VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY); in mprotect_fixup() 484 WRITE_ONCE(vma->vm_flags, newflags); in mprotect_fixup() 564 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_mprotect_pkey() 572 if (!(vma->vm_flags & VM_GROWSUP)) in do_mprotect_pkey() 587 if (rier && (vma->vm_flags & VM_MAYEXEC)) in do_mprotect_pkey() [all …]
|
| /OK3568_Linux_fs/kernel/include/trace/events/ |
| H A D | fs_dax.h | 18 __field(unsigned long, vm_flags) 31 __entry->vm_flags = vmf->vma->vm_flags; 43 __entry->vm_flags & VM_SHARED ? "shared" : "private", 70 __field(unsigned long, vm_flags) 79 __entry->vm_flags = vmf->vma->vm_flags; 89 __entry->vm_flags & VM_SHARED ? "shared" : "private", 111 __field(unsigned long, vm_flags) 122 __entry->vm_flags = vmf->vma->vm_flags; 134 __entry->vm_flags & VM_SHARED ? "shared" : "private", 158 __field(unsigned long, vm_flags) [all …]
|
| /OK3568_Linux_fs/kernel/arch/powerpc/include/asm/ |
| H A D | mman.h | 27 static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags) in arch_vm_get_page_prot() argument 30 return (vm_flags & VM_SAO) ? in arch_vm_get_page_prot() 31 __pgprot(_PAGE_SAO | vmflag_to_pte_pkey_bits(vm_flags)) : in arch_vm_get_page_prot() 32 __pgprot(0 | vmflag_to_pte_pkey_bits(vm_flags)); in arch_vm_get_page_prot() 34 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0); in arch_vm_get_page_prot() 37 #define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags) argument
|
| /OK3568_Linux_fs/kernel/arch/powerpc/include/asm/book3s/64/ |
| H A D | hash-pkey.h | 5 static inline u64 hash__vmflag_to_pte_pkey_bits(u64 vm_flags) in hash__vmflag_to_pte_pkey_bits() argument 7 return (((vm_flags & VM_PKEY_BIT0) ? H_PTE_PKEY_BIT0 : 0x0UL) | in hash__vmflag_to_pte_pkey_bits() 8 ((vm_flags & VM_PKEY_BIT1) ? H_PTE_PKEY_BIT1 : 0x0UL) | in hash__vmflag_to_pte_pkey_bits() 9 ((vm_flags & VM_PKEY_BIT2) ? H_PTE_PKEY_BIT2 : 0x0UL) | in hash__vmflag_to_pte_pkey_bits() 10 ((vm_flags & VM_PKEY_BIT3) ? H_PTE_PKEY_BIT3 : 0x0UL) | in hash__vmflag_to_pte_pkey_bits() 11 ((vm_flags & VM_PKEY_BIT4) ? H_PTE_PKEY_BIT4 : 0x0UL)); in hash__vmflag_to_pte_pkey_bits()
|
| /OK3568_Linux_fs/kernel/arch/x86/include/uapi/asm/ |
| H A D | mman.h | 16 #define arch_vm_get_page_prot(vm_flags) __pgprot( \ argument 17 ((vm_flags) & VM_PKEY_BIT0 ? _PAGE_PKEY_BIT0 : 0) | \ 18 ((vm_flags) & VM_PKEY_BIT1 ? _PAGE_PKEY_BIT1 : 0) | \ 19 ((vm_flags) & VM_PKEY_BIT2 ? _PAGE_PKEY_BIT2 : 0) | \ 20 ((vm_flags) & VM_PKEY_BIT3 ? _PAGE_PKEY_BIT3 : 0))
|
| /OK3568_Linux_fs/kernel/include/linux/ |
| H A D | khugepaged.h | 18 unsigned long vm_flags); 57 unsigned long vm_flags) in khugepaged_enter() argument 62 (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) && in khugepaged_enter() 63 !(vm_flags & VM_NOHUGEPAGE) && in khugepaged_enter() 78 unsigned long vm_flags) in khugepaged_enter() argument 83 unsigned long vm_flags) in khugepaged_enter_vma_merge() argument
|
| H A D | userfaultfd_k.h | 94 return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR); in uffd_disable_huge_pmd_share() 99 return vma->vm_flags & VM_UFFD_MISSING; in userfaultfd_missing() 104 return vma->vm_flags & VM_UFFD_WP; in userfaultfd_wp() 109 return vma->vm_flags & VM_UFFD_MINOR; in userfaultfd_minor() 126 return vma->vm_flags & __VM_UFFD_FLAGS; in userfaultfd_armed()
|
| H A D | huge_mm.h | 137 unsigned long vm_flags) in transhuge_vma_enabled() argument 140 if ((vm_flags & VM_NOHUGEPAGE) || in transhuge_vma_enabled() 159 if (!transhuge_vma_enabled(vma, vma->vm_flags)) in __transparent_hugepage_enabled() 173 return !!(vma->vm_flags & VM_HUGEPAGE); in __transparent_hugepage_enabled() 226 int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, 377 unsigned long vm_flags) in transhuge_vma_enabled() argument 421 unsigned long *vm_flags, int advice) in hugepage_madvise() argument
|
| /OK3568_Linux_fs/kernel/fs/ |
| H A D | userfaultfd.c | 615 vma->vm_flags &= ~__VM_UFFD_FLAGS; in userfaultfd_event_wait_completion() 648 WRITE_ONCE(vma->vm_flags, in dup_userfaultfd() 649 vma->vm_flags & ~__VM_UFFD_FLAGS); in dup_userfaultfd() 731 vma->vm_flags &= ~__VM_UFFD_FLAGS; in mremap_userfaultfd_prep() 872 !!(vma->vm_flags & __VM_UFFD_FLAGS)); in userfaultfd_release() 877 new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS; in userfaultfd_release() 889 WRITE_ONCE(vma->vm_flags, new_flags); in userfaultfd_release() 1260 unsigned long vm_flags) in vma_can_userfault() argument 1263 if (vm_flags & VM_UFFD_WP) { in vma_can_userfault() 1268 if (vm_flags & VM_UFFD_MINOR) { in vma_can_userfault() [all …]
|
| /OK3568_Linux_fs/kernel/arch/nds32/mm/ |
| H A D | cacheflush.c | 34 cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC); in flush_icache_page() 68 (vma->vm_flags & VM_EXEC)) { in update_mmu_cache() 72 cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC); in update_mmu_cache() 139 if (vma->vm_flags & VM_EXEC) in flush_cache_range() 146 cpu_cache_wbinval_page(start, vma->vm_flags & VM_EXEC); in flush_cache_range() 160 cpu_cache_wbinval_page(vto, vma->vm_flags & VM_EXEC); in flush_cache_page() 274 if (vma->vm_flags & VM_EXEC) { in copy_to_user_page() 310 if (vma->vm_flags & VM_EXEC) in flush_anon_page()
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/mali400/mali/linux/ |
| H A D | mali_memory.c | 169 (unsigned int)(vma->vm_end - vma->vm_start), vma->vm_flags)); in mali_mmap() 213 vma->vm_flags |= VM_IO; in mali_mmap() 214 vma->vm_flags |= VM_DONTCOPY; in mali_mmap() 215 vma->vm_flags |= VM_PFNMAP; in mali_mmap() 217 vma->vm_flags |= VM_RESERVED; in mali_mmap() 219 vma->vm_flags |= VM_DONTDUMP; in mali_mmap() 220 vma->vm_flags |= VM_DONTEXPAND; in mali_mmap() 232 if (!(vma->vm_flags & VM_WRITE)) { in mali_mmap() 235 vma->vm_flags |= VM_WRITE | VM_READ; in mali_mmap()
|
| /OK3568_Linux_fs/kernel/arch/hexagon/mm/ |
| H A D | vm_fault.c | 68 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_page_fault() 80 if (!(vma->vm_flags & VM_EXEC)) in do_page_fault() 84 if (!(vma->vm_flags & VM_READ)) in do_page_fault() 88 if (!(vma->vm_flags & VM_WRITE)) in do_page_fault()
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/mali400/ump/linux/ |
| H A D | ump_osk_low_level_mem.c | 145 vma->vm_flags |= VM_IO; in _ump_osk_mem_mapregion_init() 147 vma->vm_flags |= VM_RESERVED; in _ump_osk_mem_mapregion_init() 149 vma->vm_flags |= VM_DONTDUMP; in _ump_osk_mem_mapregion_init() 150 vma->vm_flags |= VM_DONTEXPAND; in _ump_osk_mem_mapregion_init() 151 vma->vm_flags |= VM_PFNMAP; in _ump_osk_mem_mapregion_init() 209 (unsigned int)vma->vm_page_prot, vma->vm_flags, retval)); in _ump_osk_mem_mapregion_map()
|
| /OK3568_Linux_fs/kernel/arch/nios2/mm/ |
| H A D | cacheflush.c | 87 if (!(mpnt->vm_flags & VM_MAYSHARE)) in flush_aliases() 136 if (vma == NULL || (vma->vm_flags & VM_EXEC)) in flush_cache_range() 156 if (vma->vm_flags & VM_EXEC) in flush_cache_page() 228 if (vma->vm_flags & VM_EXEC) in update_mmu_cache() 259 if (vma->vm_flags & VM_EXEC) in copy_from_user_page() 270 if (vma->vm_flags & VM_EXEC) in copy_to_user_page()
|
| H A D | fault.c | 101 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_page_fault() 118 if (!(vma->vm_flags & VM_EXEC)) in do_page_fault() 122 if (!(vma->vm_flags & VM_READ)) in do_page_fault() 126 if (!(vma->vm_flags & VM_WRITE)) in do_page_fault()
|
| /OK3568_Linux_fs/kernel/drivers/gpu/drm/vmwgfx/ |
| H A D | vmwgfx_ttm_glue.c | 52 if ((vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) != VM_MAYWRITE) in vmw_mmap() 53 vma->vm_flags = (vma->vm_flags & ~VM_MIXEDMAP) | VM_PFNMAP; in vmw_mmap()
|
| /OK3568_Linux_fs/kernel/arch/alpha/mm/ |
| H A D | fault.c | 128 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_page_fault() 138 if (!(vma->vm_flags & VM_EXEC)) in do_page_fault() 142 if (!(vma->vm_flags & (VM_READ | VM_WRITE))) in do_page_fault() 145 if (!(vma->vm_flags & VM_WRITE)) in do_page_fault()
|
| /OK3568_Linux_fs/kernel/arch/sparc/mm/ |
| H A D | fault_32.c | 208 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_sparc_fault() 219 if (!(vma->vm_flags & VM_WRITE)) in do_sparc_fault() 223 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) in do_sparc_fault() 386 if (!(vma->vm_flags & VM_GROWSDOWN)) in force_user_fault() 393 if (!(vma->vm_flags & VM_WRITE)) in force_user_fault() 397 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) in force_user_fault()
|