Home
last modified time | relevance | path

Searched refs:vma (Results 1 – 25 of 1086) sorted by relevance

12345678910>>...44

/OK3568_Linux_fs/kernel/drivers/gpu/drm/i915/
H A Di915_vma.c51 void i915_vma_free(struct i915_vma *vma) in i915_vma_free() argument
53 return kmem_cache_free(global.slab_vmas, vma); in i915_vma_free()
60 static void vma_print_allocator(struct i915_vma *vma, const char *reason) in vma_print_allocator() argument
66 if (!vma->node.stack) { in vma_print_allocator()
68 vma->node.start, vma->node.size, reason); in vma_print_allocator()
72 nr_entries = stack_depot_fetch(vma->node.stack, &entries); in vma_print_allocator()
75 vma->node.start, vma->node.size, reason, buf); in vma_print_allocator()
80 static void vma_print_allocator(struct i915_vma *vma, const char *reason) in vma_print_allocator() argument
108 struct i915_vma *vma; in vma_create() local
114 vma = i915_vma_alloc(); in vma_create()
[all …]
H A Di915_vma.h50 static inline bool i915_vma_is_active(const struct i915_vma *vma) in i915_vma_is_active() argument
52 return !i915_active_is_idle(&vma->active); in i915_vma_is_active()
55 int __must_check __i915_vma_move_to_active(struct i915_vma *vma,
57 int __must_check i915_vma_move_to_active(struct i915_vma *vma,
63 static inline bool i915_vma_is_ggtt(const struct i915_vma *vma) in i915_vma_is_ggtt() argument
65 return test_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma)); in i915_vma_is_ggtt()
68 static inline bool i915_vma_has_ggtt_write(const struct i915_vma *vma) in i915_vma_has_ggtt_write() argument
70 return test_bit(I915_VMA_GGTT_WRITE_BIT, __i915_vma_flags(vma)); in i915_vma_has_ggtt_write()
73 static inline void i915_vma_set_ggtt_write(struct i915_vma *vma) in i915_vma_set_ggtt_write() argument
75 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); in i915_vma_set_ggtt_write()
[all …]
H A Di915_gem_evict.c53 struct i915_vma *vma, in mark_free() argument
57 if (i915_vma_is_pinned(vma)) in mark_free()
60 list_add(&vma->evict_link, unwind); in mark_free()
61 return drm_mm_scan_add_block(scan, &vma->node); in mark_free()
96 struct i915_vma *vma, *next; in i915_gem_evict_something() local
130 list_for_each_entry_safe(vma, next, &vm->bound_list, vm_link) { in i915_gem_evict_something()
131 if (vma == active) { /* now seen this vma twice */ in i915_gem_evict_something()
153 if (active != ERR_PTR(-EAGAIN) && i915_vma_is_active(vma)) { in i915_gem_evict_something()
155 active = vma; in i915_gem_evict_something()
157 list_move_tail(&vma->vm_link, &vm->bound_list); in i915_gem_evict_something()
[all …]
/OK3568_Linux_fs/kernel/mm/
H A Dmmap.c81 struct vm_area_struct *vma, struct vm_area_struct *prev,
126 void vma_set_page_prot(struct vm_area_struct *vma) in vma_set_page_prot() argument
128 unsigned long vm_flags = vma->vm_flags; in vma_set_page_prot()
131 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); in vma_set_page_prot()
132 if (vma_wants_writenotify(vma, vm_page_prot)) { in vma_set_page_prot()
137 WRITE_ONCE(vma->vm_page_prot, vm_page_prot); in vma_set_page_prot()
143 static void __remove_shared_vm_struct(struct vm_area_struct *vma, in __remove_shared_vm_struct() argument
146 if (vma->vm_flags & VM_DENYWRITE) in __remove_shared_vm_struct()
148 if (vma->vm_flags & VM_SHARED) in __remove_shared_vm_struct()
152 vma_interval_tree_remove(vma, &mapping->i_mmap); in __remove_shared_vm_struct()
[all …]
H A Dmremap.c70 static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma, in alloc_new_pud() argument
84 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, in alloc_new_pmd() argument
90 pud = alloc_new_pud(mm, vma, addr); in alloc_new_pmd()
103 static void take_rmap_locks(struct vm_area_struct *vma) in take_rmap_locks() argument
105 if (vma->vm_file) in take_rmap_locks()
106 i_mmap_lock_write(vma->vm_file->f_mapping); in take_rmap_locks()
107 if (vma->anon_vma) in take_rmap_locks()
108 anon_vma_lock_write(vma->anon_vma); in take_rmap_locks()
111 static void drop_rmap_locks(struct vm_area_struct *vma) in drop_rmap_locks() argument
113 if (vma->anon_vma) in drop_rmap_locks()
[all …]
H A Dnommu.c100 struct vm_area_struct *vma; in kobjsize() local
102 vma = find_vma(current->mm, (unsigned long)objp); in kobjsize()
103 if (vma) in kobjsize()
104 return vma->vm_end - vma->vm_start; in kobjsize()
124 int follow_pfn(struct vm_area_struct *vma, unsigned long address, in follow_pfn() argument
127 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in follow_pfn()
173 struct vm_area_struct *vma; in __vmalloc_user_flags() local
176 vma = find_vma(current->mm, (unsigned long)ret); in __vmalloc_user_flags()
177 if (vma) in __vmalloc_user_flags()
178 vma->vm_flags |= VM_USERMAP; in __vmalloc_user_flags()
[all …]
H A Dmadvise.c69 static long madvise_behavior(struct vm_area_struct *vma, in madvise_behavior() argument
73 struct mm_struct *mm = vma->vm_mm; in madvise_behavior()
76 unsigned long new_flags = vma->vm_flags; in madvise_behavior()
92 if (vma->vm_flags & VM_IO) { in madvise_behavior()
100 if (vma->vm_file || vma->vm_flags & VM_SHARED) { in madvise_behavior()
113 if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) { in madvise_behavior()
121 error = ksm_madvise(vma, start, end, behavior, &new_flags); in madvise_behavior()
127 error = hugepage_madvise(vma, &new_flags, behavior); in madvise_behavior()
133 if (new_flags == vma->vm_flags) { in madvise_behavior()
134 *prev = vma; in madvise_behavior()
[all …]
H A Dmprotect.c38 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, in change_pte_range() argument
65 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in change_pte_range()
68 if (prot_numa && !(vma->vm_flags & VM_SHARED) && in change_pte_range()
69 atomic_read(&vma->vm_mm->mm_users) == 1) in change_pte_range()
72 flush_tlb_batched_pending(vma->vm_mm); in change_pte_range()
91 page = vm_normal_page(vma, addr, oldpte); in change_pte_range()
96 if (is_cow_mapping(vma->vm_flags) && in change_pte_range()
116 oldpte = ptep_modify_prot_start(vma, addr, pte); in change_pte_range()
137 !(vma->vm_flags & VM_SOFTDIRTY))) { in change_pte_range()
140 ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent); in change_pte_range()
[all …]
H A Dmemory.c420 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, in free_pgtables() argument
423 while (vma) { in free_pgtables()
424 struct vm_area_struct *next = vma->vm_next; in free_pgtables()
425 unsigned long addr = vma->vm_start; in free_pgtables()
431 vm_write_begin(vma); in free_pgtables()
432 unlink_anon_vmas(vma); in free_pgtables()
433 vm_write_end(vma); in free_pgtables()
434 unlink_file_vma(vma); in free_pgtables()
436 if (is_vm_hugetlb_page(vma)) { in free_pgtables()
437 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables()
[all …]
H A Dmlock.c381 struct vm_area_struct *vma, struct zone *zone, in __munlock_pagevec_fill() argument
392 pte = get_locked_pte(vma->vm_mm, start, &ptl); in __munlock_pagevec_fill()
405 page = vm_normal_page(vma, start, *pte); in __munlock_pagevec_fill()
451 void munlock_vma_pages_range(struct vm_area_struct *vma, in munlock_vma_pages_range() argument
454 vm_write_begin(vma); in munlock_vma_pages_range()
455 WRITE_ONCE(vma->vm_flags, vma->vm_flags & VM_LOCKED_CLEAR_MASK); in munlock_vma_pages_range()
456 vm_write_end(vma); in munlock_vma_pages_range()
473 page = follow_page(vma, start, FOLL_GET | FOLL_DUMP); in munlock_vma_pages_range()
512 start = __munlock_pagevec_fill(&pvec, vma, in munlock_vma_pages_range()
534 static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, in mlock_fixup() argument
[all …]
H A Dhuge_memory.c66 static inline bool file_thp_enabled(struct vm_area_struct *vma) in file_thp_enabled() argument
68 return transhuge_vma_enabled(vma, vma->vm_flags) && vma->vm_file && in file_thp_enabled()
69 !inode_is_open_for_write(vma->vm_file->f_inode) && in file_thp_enabled()
70 (vma->vm_flags & VM_EXEC); in file_thp_enabled()
73 bool transparent_hugepage_active(struct vm_area_struct *vma) in transparent_hugepage_active() argument
76 unsigned long addr = (vma->vm_end & HPAGE_PMD_MASK) - HPAGE_PMD_SIZE; in transparent_hugepage_active()
78 if (!transhuge_vma_suitable(vma, addr)) in transparent_hugepage_active()
80 if (vma_is_anonymous(vma)) in transparent_hugepage_active()
81 return __transparent_hugepage_enabled(vma); in transparent_hugepage_active()
82 if (vma_is_shmem(vma)) in transparent_hugepage_active()
[all …]
/OK3568_Linux_fs/kernel/drivers/gpu/drm/
H A Ddrm_vm.c61 struct vm_area_struct *vma; member
65 static void drm_vm_open(struct vm_area_struct *vma);
66 static void drm_vm_close(struct vm_area_struct *vma);
69 struct vm_area_struct *vma) in drm_io_prot() argument
71 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); in drm_io_prot()
83 if (efi_range_is_wc(vma->vm_start, vma->vm_end - in drm_io_prot()
84 vma->vm_start)) in drm_io_prot()
94 static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma) in drm_dma_prot() argument
96 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); in drm_dma_prot()
117 struct vm_area_struct *vma = vmf->vma; in drm_vm_fault() local
[all …]
/OK3568_Linux_fs/kernel/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
H A Dvmm.c750 struct nvkm_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); in nvkm_vma_new() local
751 if (vma) { in nvkm_vma_new()
752 vma->addr = addr; in nvkm_vma_new()
753 vma->size = size; in nvkm_vma_new()
754 vma->page = NVKM_VMA_PAGE_NONE; in nvkm_vma_new()
755 vma->refd = NVKM_VMA_PAGE_NONE; in nvkm_vma_new()
757 return vma; in nvkm_vma_new()
761 nvkm_vma_tail(struct nvkm_vma *vma, u64 tail) in nvkm_vma_tail() argument
765 BUG_ON(vma->size == tail); in nvkm_vma_tail()
767 if (!(new = nvkm_vma_new(vma->addr + (vma->size - tail), tail))) in nvkm_vma_tail()
[all …]
H A Duvmm.c116 struct nvkm_vma *vma; in nvkm_uvmm_mthd_unmap() local
126 vma = nvkm_vmm_node_search(vmm, addr); in nvkm_uvmm_mthd_unmap()
127 if (ret = -ENOENT, !vma || vma->addr != addr) { in nvkm_uvmm_mthd_unmap()
129 addr, vma ? vma->addr : ~0ULL); in nvkm_uvmm_mthd_unmap()
133 if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) { in nvkm_uvmm_mthd_unmap()
135 vma->user, !client->super, vma->busy); in nvkm_uvmm_mthd_unmap()
139 if (ret = -EINVAL, !vma->memory) { in nvkm_uvmm_mthd_unmap()
144 nvkm_vmm_unmap_locked(vmm, vma, false); in nvkm_uvmm_mthd_unmap()
160 struct nvkm_vma *vma; in nvkm_uvmm_mthd_map() local
179 if (ret = -ENOENT, !(vma = nvkm_vmm_node_search(vmm, addr))) { in nvkm_uvmm_mthd_map()
[all …]
/OK3568_Linux_fs/kernel/drivers/gpu/drm/nouveau/
H A Dnouveau_vmm.c29 nouveau_vma_unmap(struct nouveau_vma *vma) in nouveau_vma_unmap() argument
31 if (vma->mem) { in nouveau_vma_unmap()
32 nvif_vmm_unmap(&vma->vmm->vmm, vma->addr); in nouveau_vma_unmap()
33 vma->mem = NULL; in nouveau_vma_unmap()
38 nouveau_vma_map(struct nouveau_vma *vma, struct nouveau_mem *mem) in nouveau_vma_map() argument
40 struct nvif_vma tmp = { .addr = vma->addr }; in nouveau_vma_map()
41 int ret = nouveau_mem_map(mem, &vma->vmm->vmm, &tmp); in nouveau_vma_map()
44 vma->mem = mem; in nouveau_vma_map()
51 struct nouveau_vma *vma; in nouveau_vma_find() local
53 list_for_each_entry(vma, &nvbo->vma_list, head) { in nouveau_vma_find()
[all …]
/OK3568_Linux_fs/kernel/include/linux/
H A Duserfaultfd_k.h75 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, in is_mergeable_vm_userfaultfd_ctx() argument
78 return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx; in is_mergeable_vm_userfaultfd_ctx()
92 static inline bool uffd_disable_huge_pmd_share(struct vm_area_struct *vma) in uffd_disable_huge_pmd_share() argument
94 return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR); in uffd_disable_huge_pmd_share()
97 static inline bool userfaultfd_missing(struct vm_area_struct *vma) in userfaultfd_missing() argument
99 return vma->vm_flags & VM_UFFD_MISSING; in userfaultfd_missing()
102 static inline bool userfaultfd_wp(struct vm_area_struct *vma) in userfaultfd_wp() argument
104 return vma->vm_flags & VM_UFFD_WP; in userfaultfd_wp()
107 static inline bool userfaultfd_minor(struct vm_area_struct *vma) in userfaultfd_minor() argument
109 return vma->vm_flags & VM_UFFD_MINOR; in userfaultfd_minor()
[all …]
H A Dhuge_mm.h17 struct vm_area_struct *vma);
28 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
31 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
33 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
35 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
37 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
39 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
58 return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write); in vmf_insert_pfn_pmd()
77 return vmf_insert_pfn_pud_prot(vmf, pfn, vmf->vma->vm_page_prot, write); in vmf_insert_pfn_pud()
121 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, in transhuge_vma_suitable() argument
[all …]
/OK3568_Linux_fs/kernel/drivers/gpu/drm/i915/selftests/
H A Di915_gem_gtt.c354 struct i915_vma *vma; in close_object_list() local
356 vma = i915_vma_instance(obj, vm, NULL); in close_object_list()
357 if (!IS_ERR(vma)) in close_object_list()
358 ignored = i915_vma_unbind(vma); in close_object_list()
375 struct i915_vma *vma; in fill_hole() local
413 vma = i915_vma_instance(obj, vm, NULL); in fill_hole()
414 if (IS_ERR(vma)) in fill_hole()
423 err = i915_vma_pin(vma, 0, 0, offset | flags); in fill_hole()
430 if (!drm_mm_node_allocated(&vma->node) || in fill_hole()
431 i915_vma_misplaced(vma, 0, 0, offset | flags)) { in fill_hole()
[all …]
H A Di915_vma.c36 static bool assert_vma(struct i915_vma *vma, in assert_vma() argument
42 if (vma->vm != rcu_access_pointer(ctx->vm)) { in assert_vma()
47 if (vma->size != obj->base.size) { in assert_vma()
49 vma->size, obj->base.size); in assert_vma()
53 if (vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) { in assert_vma()
55 vma->ggtt_view.type); in assert_vma()
67 struct i915_vma *vma; in checked_vma_instance() local
70 vma = i915_vma_instance(obj, vm, view); in checked_vma_instance()
71 if (IS_ERR(vma)) in checked_vma_instance()
72 return vma; in checked_vma_instance()
[all …]
/OK3568_Linux_fs/kernel/drivers/gpu/arm/mali400/ump/linux/
H A Dump_osk_low_level_mem.c38 static void ump_vma_open(struct vm_area_struct *vma);
39 static void ump_vma_close(struct vm_area_struct *vma);
41 static int ump_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf);
43 static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct *vma, unsigned long address);
61 static int ump_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf) in ump_cpu_page_fault_handler() argument
63 static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct *vma, unsigned long address) in ump_cpu_page_fault_handler()
71 MSG_ERR(("VMA: 0x%08lx, virtual address: 0x%08lx\n", (unsigned long)vma, address)); in ump_cpu_page_fault_handler()
80 static void ump_vma_open(struct vm_area_struct *vma) in ump_vma_open() argument
85 vma_usage_tracker = (ump_vma_usage_tracker *)vma->vm_private_data; in ump_vma_open()
90 …A reference count incremented. VMA: 0x%08lx, reference count: %d\n", (unsigned long)vma, new_val)); in ump_vma_open()
[all …]
/OK3568_Linux_fs/kernel/drivers/gpu/drm/msm/
H A Dmsm_gem_vma.c42 struct msm_gem_vma *vma) in msm_gem_purge_vma() argument
44 unsigned size = vma->node.size << PAGE_SHIFT; in msm_gem_purge_vma()
47 if (WARN_ON(vma->inuse > 0)) in msm_gem_purge_vma()
51 if (!vma->mapped) in msm_gem_purge_vma()
55 aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size); in msm_gem_purge_vma()
57 vma->mapped = false; in msm_gem_purge_vma()
62 struct msm_gem_vma *vma) in msm_gem_unmap_vma() argument
64 if (!WARN_ON(!vma->iova)) in msm_gem_unmap_vma()
65 vma->inuse--; in msm_gem_unmap_vma()
70 struct msm_gem_vma *vma, int prot, in msm_gem_map_vma() argument
[all …]
/OK3568_Linux_fs/kernel/drivers/pci/
H A Dmmap.c23 struct vm_area_struct *vma, in pci_mmap_page_range() argument
31 vma->vm_pgoff -= start >> PAGE_SHIFT; in pci_mmap_page_range()
32 return pci_mmap_resource_range(pdev, bar, vma, mmap_state, in pci_mmap_page_range()
44 struct vm_area_struct *vma, in pci_mmap_resource_range() argument
51 if (vma->vm_pgoff + vma_pages(vma) > size) in pci_mmap_resource_range()
55 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); in pci_mmap_resource_range()
57 vma->vm_page_prot = pgprot_device(vma->vm_page_prot); in pci_mmap_resource_range()
60 ret = pci_iobar_pfn(pdev, bar, vma); in pci_mmap_resource_range()
64 vma->vm_pgoff += (pci_resource_start(pdev, bar) >> PAGE_SHIFT); in pci_mmap_resource_range()
66 vma->vm_ops = &pci_phys_vm_ops; in pci_mmap_resource_range()
[all …]
/OK3568_Linux_fs/kernel/fs/proc/
H A Dtask_mmu.c126 static void seq_print_vma_name(struct seq_file *m, struct vm_area_struct *vma) in seq_print_vma_name() argument
128 const char __user *name = vma_get_anon_name(vma); in seq_print_vma_name()
129 struct mm_struct *mm = vma->vm_mm; in seq_print_vma_name()
181 struct vm_area_struct *vma; in m_start() local
208 vma = find_vma(mm, last_addr); in m_start()
209 if (vma) in m_start()
210 return vma; in m_start()
218 struct vm_area_struct *next, *vma = v; in m_next() local
220 if (vma == priv->tail_vma) in m_next()
222 else if (vma->vm_next) in m_next()
[all …]
/OK3568_Linux_fs/kernel/fs/
H A Duserfaultfd.c227 struct vm_area_struct *vma, in userfaultfd_huge_must_wait() argument
238 ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma)); in userfaultfd_huge_must_wait()
259 struct vm_area_struct *vma, in userfaultfd_huge_must_wait() argument
369 struct mm_struct *mm = vmf->vma->vm_mm; in handle_userfault()
396 ctx = vmf->vma->vm_userfaultfd_ctx.ctx; in handle_userfault()
505 if (!is_vm_hugetlb_page(vmf->vma)) in handle_userfault()
509 must_wait = userfaultfd_huge_must_wait(ctx, vmf->vma, in handle_userfault()
607 struct vm_area_struct *vma; in userfaultfd_event_wait_completion() local
612 for (vma = mm->mmap; vma; vma = vma->vm_next) in userfaultfd_event_wait_completion()
613 if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) { in userfaultfd_event_wait_completion()
[all …]
/OK3568_Linux_fs/kernel/arch/x86/entry/vdso/
H A Dvma.c60 struct vm_area_struct *vma, struct vm_fault *vmf) in vdso_fault() argument
62 const struct vdso_image *image = vma->vm_mm->context.vdso_image; in vdso_fault()
117 static struct page *find_timens_vvar_page(struct vm_area_struct *vma) in find_timens_vvar_page() argument
119 if (likely(vma->vm_mm == current->mm)) in find_timens_vvar_page()
145 struct vm_area_struct *vma; in vdso_join_timens() local
149 for (vma = mm->mmap; vma; vma = vma->vm_next) { in vdso_join_timens()
150 unsigned long size = vma->vm_end - vma->vm_start; in vdso_join_timens()
152 if (vma_is_special_mapping(vma, &vvar_mapping)) in vdso_join_timens()
153 zap_page_range(vma, vma->vm_start, size); in vdso_join_timens()
160 static inline struct page *find_timens_vvar_page(struct vm_area_struct *vma) in find_timens_vvar_page() argument
[all …]

12345678910>>...44