Home
last modified time | relevance | path

Searched refs:vmf (Results 1 – 25 of 145) sorted by relevance

123456

/OK3568_Linux_fs/kernel/mm/
H A Dmemory.c2627 static bool pte_spinlock(struct vm_fault *vmf) in pte_spinlock() argument
2635 if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) { in pte_spinlock()
2636 vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); in pte_spinlock()
2637 spin_lock(vmf->ptl); in pte_spinlock()
2642 if (vma_has_changed(vmf)) { in pte_spinlock()
2643 trace_spf_vma_changed(_RET_IP_, vmf->vma, vmf->address); in pte_spinlock()
2652 pmdval = READ_ONCE(*vmf->pmd); in pte_spinlock()
2653 if (!pmd_same(pmdval, vmf->orig_pmd)) { in pte_spinlock()
2654 trace_spf_pmd_changed(_RET_IP_, vmf->vma, vmf->address); in pte_spinlock()
2659 vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); in pte_spinlock()
[all …]
H A Dhuge_memory.c581 static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, in __do_huge_pmd_anonymous_page() argument
584 struct vm_area_struct *vma = vmf->vma; in __do_huge_pmd_anonymous_page()
586 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in __do_huge_pmd_anonymous_page()
605 clear_huge_page(page, vmf->address, HPAGE_PMD_NR); in __do_huge_pmd_anonymous_page()
613 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page()
614 if (unlikely(!pmd_none(*vmf->pmd))) { in __do_huge_pmd_anonymous_page()
627 spin_unlock(vmf->ptl); in __do_huge_pmd_anonymous_page()
630 ret2 = handle_userfault(vmf, VM_UFFD_MISSING); in __do_huge_pmd_anonymous_page()
639 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in __do_huge_pmd_anonymous_page()
640 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in __do_huge_pmd_anonymous_page()
[all …]
H A Dfilemap.c2584 static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page, in lock_page_maybe_drop_mmap() argument
2595 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) in lock_page_maybe_drop_mmap()
2598 *fpin = maybe_unlock_mmap_for_io(vmf, *fpin); in lock_page_maybe_drop_mmap()
2599 if (vmf->flags & FAULT_FLAG_KILLABLE) { in lock_page_maybe_drop_mmap()
2608 mmap_read_unlock(vmf->vma->vm_mm); in lock_page_maybe_drop_mmap()
2624 static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) in do_sync_mmap_readahead() argument
2626 struct file *file = vmf->vma->vm_file; in do_sync_mmap_readahead()
2629 DEFINE_READAHEAD(ractl, file, mapping, vmf->pgoff); in do_sync_mmap_readahead()
2634 if (vmf->vma->vm_flags & VM_RAND_READ) in do_sync_mmap_readahead()
2639 if (vmf->vma->vm_flags & VM_SEQ_READ) { in do_sync_mmap_readahead()
[all …]
H A Dswap_state.c655 struct vm_fault *vmf) in swap_cluster_readahead() argument
665 struct vm_area_struct *vma = vmf->vma; in swap_cluster_readahead()
666 unsigned long addr = vmf->address; in swap_cluster_readahead()
755 static void swap_ra_info(struct vm_fault *vmf, in swap_ra_info() argument
758 struct vm_area_struct *vma = vmf->vma; in swap_ra_info()
776 faddr = vmf->address; in swap_ra_info()
777 orig_pte = pte = pte_offset_map(vmf->pmd, faddr); in swap_ra_info()
838 struct vm_fault *vmf) in swap_vma_readahead() argument
841 struct vm_area_struct *vma = vmf->vma; in swap_vma_readahead()
849 swap_ra_info(vmf, &ra_info); in swap_vma_readahead()
[all …]
H A Dinternal.h37 vm_fault_t do_swap_page(struct vm_fault *vmf);
44 static inline bool vma_has_changed(struct vm_fault *vmf) in vma_has_changed() argument
46 int ret = RB_EMPTY_NODE(&vmf->vma->vm_rb); in vma_has_changed()
47 unsigned int seq = READ_ONCE(vmf->vma->vm_sequence.sequence); in vma_has_changed()
55 return ret || seq != vmf->sequence; in vma_has_changed()
450 static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf, in maybe_unlock_mmap_for_io() argument
453 int flags = vmf->flags; in maybe_unlock_mmap_for_io()
465 fpin = get_file(vmf->vma->vm_file); in maybe_unlock_mmap_for_io()
466 mmap_read_unlock(vmf->vma->vm_mm); in maybe_unlock_mmap_for_io()
/OK3568_Linux_fs/kernel/include/trace/events/
H A Dfs_dax.h11 TP_PROTO(struct inode *inode, struct vm_fault *vmf,
13 TP_ARGS(inode, vmf, max_pgoff, result),
29 __entry->vm_start = vmf->vma->vm_start;
30 __entry->vm_end = vmf->vma->vm_end;
31 __entry->vm_flags = vmf->vma->vm_flags;
32 __entry->address = vmf->address;
33 __entry->flags = vmf->flags;
34 __entry->pgoff = vmf->pgoff;
56 TP_PROTO(struct inode *inode, struct vm_fault *vmf, \
58 TP_ARGS(inode, vmf, max_pgoff, result))
[all …]
/OK3568_Linux_fs/kernel/drivers/dax/
H A Ddevice.c77 struct vm_fault *vmf, pfn_t *pfn) in __dev_dax_pte_fault() argument
83 if (check_vma(dev_dax, vmf->vma, __func__)) in __dev_dax_pte_fault()
95 phys = dax_pgoff_to_phys(dev_dax, vmf->pgoff, PAGE_SIZE); in __dev_dax_pte_fault()
97 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", vmf->pgoff); in __dev_dax_pte_fault()
103 return vmf_insert_mixed(vmf->vma, vmf->address, *pfn); in __dev_dax_pte_fault()
107 struct vm_fault *vmf, pfn_t *pfn) in __dev_dax_pmd_fault() argument
109 unsigned long pmd_addr = vmf->address & PMD_MASK; in __dev_dax_pmd_fault()
115 if (check_vma(dev_dax, vmf->vma, __func__)) in __dev_dax_pmd_fault()
130 if (pmd_addr < vmf->vma->vm_start || in __dev_dax_pmd_fault()
131 (pmd_addr + PMD_SIZE) > vmf->vma->vm_end) in __dev_dax_pmd_fault()
[all …]
/OK3568_Linux_fs/kernel/fs/
H A Ddax.c750 struct address_space *mapping, struct vm_fault *vmf, in dax_insert_entry() argument
774 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address); in dax_insert_entry()
1060 struct vm_fault *vmf) in dax_load_hole() argument
1063 unsigned long vaddr = vmf->address; in dax_load_hole()
1067 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, in dax_load_hole()
1070 ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); in dax_load_hole()
1071 trace_dax_load_hole(inode, vmf, ret); in dax_load_hole()
1265 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, in dax_iomap_pte_fault() argument
1268 struct vm_area_struct *vma = vmf->vma; in dax_iomap_pte_fault()
1270 XA_STATE(xas, &mapping->i_pages, vmf->pgoff); in dax_iomap_pte_fault()
[all …]
/OK3568_Linux_fs/kernel/drivers/gpu/drm/ttm/
H A Dttm_bo_vm.c46 struct vm_fault *vmf) in ttm_bo_vm_fault_idle() argument
65 if (fault_flag_allow_retry_first(vmf->flags)) { in ttm_bo_vm_fault_idle()
67 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) in ttm_bo_vm_fault_idle()
71 mmap_read_unlock(vmf->vma->vm_mm); in ttm_bo_vm_fault_idle()
129 struct vm_fault *vmf) in ttm_bo_vm_reserve() argument
143 if (fault_flag_allow_retry_first(vmf->flags)) { in ttm_bo_vm_reserve()
144 if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { in ttm_bo_vm_reserve()
146 mmap_read_unlock(vmf->vma->vm_mm); in ttm_bo_vm_reserve()
178 static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf, in ttm_bo_vm_insert_huge() argument
189 bool write = vmf->flags & FAULT_FLAG_WRITE; in ttm_bo_vm_insert_huge()
[all …]
/OK3568_Linux_fs/kernel/drivers/gpu/drm/vmwgfx/
H A Dvmwgfx_page_dirty.c393 vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf) in vmw_bo_vm_mkwrite() argument
395 struct vm_area_struct *vma = vmf->vma; in vmw_bo_vm_mkwrite()
408 save_flags = vmf->flags; in vmw_bo_vm_mkwrite()
409 vmf->flags &= ~FAULT_FLAG_ALLOW_RETRY; in vmw_bo_vm_mkwrite()
410 ret = ttm_bo_vm_reserve(bo, vmf); in vmw_bo_vm_mkwrite()
411 vmf->flags = save_flags; in vmw_bo_vm_mkwrite()
415 page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node); in vmw_bo_vm_mkwrite()
435 vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf) in vmw_bo_vm_fault() argument
437 struct vm_area_struct *vma = vmf->vma; in vmw_bo_vm_fault()
446 ret = ttm_bo_vm_reserve(bo, vmf); in vmw_bo_vm_fault()
[all …]
/OK3568_Linux_fs/kernel/drivers/video/fbdev/core/
H A Dfb_defio.c40 static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf) in fb_deferred_io_fault() argument
44 struct fb_info *info = vmf->vma->vm_private_data; in fb_deferred_io_fault()
46 offset = vmf->pgoff << PAGE_SHIFT; in fb_deferred_io_fault()
56 if (vmf->vma->vm_file) in fb_deferred_io_fault()
57 page->mapping = vmf->vma->vm_file->f_mapping; in fb_deferred_io_fault()
62 page->index = vmf->pgoff; in fb_deferred_io_fault()
64 vmf->page = page; in fb_deferred_io_fault()
93 static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf) in fb_deferred_io_mkwrite() argument
95 struct page *page = vmf->page; in fb_deferred_io_mkwrite()
96 struct fb_info *info = vmf->vma->vm_private_data; in fb_deferred_io_mkwrite()
[all …]
/OK3568_Linux_fs/kernel/fs/ocfs2/
H A Dmmap.c33 static vm_fault_t ocfs2_fault(struct vm_fault *vmf) in ocfs2_fault() argument
35 struct vm_area_struct *vma = vmf->vma; in ocfs2_fault()
40 ret = filemap_fault(vmf); in ocfs2_fault()
44 vma, vmf->page, vmf->pgoff); in ocfs2_fault()
115 static vm_fault_t ocfs2_page_mkwrite(struct vm_fault *vmf) in ocfs2_page_mkwrite() argument
117 struct page *page = vmf->page; in ocfs2_page_mkwrite()
118 struct inode *inode = file_inode(vmf->vma->vm_file); in ocfs2_page_mkwrite()
146 ret = __ocfs2_page_mkwrite(vmf->vma->vm_file, di_bh, page); in ocfs2_page_mkwrite()
/OK3568_Linux_fs/kernel/include/linux/
H A Dhuge_mm.h10 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
14 void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
20 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
22 static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) in huge_pud_set_accessed() argument
27 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
41 vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
55 static inline vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, in vmf_insert_pfn_pmd() argument
58 return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write); in vmf_insert_pfn_pmd()
60 vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn,
74 static inline vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, in vmf_insert_pfn_pud() argument
[all …]
/OK3568_Linux_fs/kernel/arch/x86/entry/vdso/
H A Dvma.c60 struct vm_area_struct *vma, struct vm_fault *vmf) in vdso_fault() argument
64 if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size) in vdso_fault()
67 vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT)); in vdso_fault()
68 get_page(vmf->page); in vdso_fault()
167 struct vm_area_struct *vma, struct vm_fault *vmf) in vvar_fault() argument
176 sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) + in vvar_fault()
211 addr = vmf->address + (image->sym_timens_page - sym_offset); in vvar_fault()
219 return vmf_insert_pfn(vma, vmf->address, pfn); in vvar_fault()
224 return vmf_insert_pfn_prot(vma, vmf->address, in vvar_fault()
232 return vmf_insert_pfn(vma, vmf->address, in vvar_fault()
[all …]
/OK3568_Linux_fs/kernel/drivers/gpu/drm/
H A Ddrm_vm.c115 static vm_fault_t drm_vm_fault(struct vm_fault *vmf) in drm_vm_fault() argument
117 struct vm_area_struct *vma = vmf->vma; in drm_vm_fault()
144 resource_size_t offset = vmf->address - vma->vm_start; in drm_vm_fault()
174 vmf->page = page; in drm_vm_fault()
188 static vm_fault_t drm_vm_fault(struct vm_fault *vmf) in drm_vm_fault() argument
204 static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf) in drm_vm_shm_fault() argument
206 struct vm_area_struct *vma = vmf->vma; in drm_vm_shm_fault()
215 offset = vmf->address - vma->vm_start; in drm_vm_shm_fault()
221 vmf->page = page; in drm_vm_shm_fault()
304 static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf) in drm_vm_dma_fault() argument
[all …]
/OK3568_Linux_fs/kernel/fs/xfs/
H A Dxfs_file.c1251 struct vm_fault *vmf, in __xfs_filemap_fault() argument
1255 struct inode *inode = file_inode(vmf->vma->vm_file); in __xfs_filemap_fault()
1263 file_update_time(vmf->vma->vm_file); in __xfs_filemap_fault()
1270 ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL, in __xfs_filemap_fault()
1271 (write_fault && !vmf->cow_page) ? in __xfs_filemap_fault()
1275 ret = dax_finish_sync_fault(vmf, pe_size, pfn); in __xfs_filemap_fault()
1278 ret = iomap_page_mkwrite(vmf, in __xfs_filemap_fault()
1281 ret = filemap_fault(vmf); in __xfs_filemap_fault()
1292 struct vm_fault *vmf) in xfs_is_write_fault() argument
1294 return (vmf->flags & FAULT_FLAG_WRITE) && in xfs_is_write_fault()
[all …]
/OK3568_Linux_fs/kernel/include/trace/hooks/
H A Dmm.h76 TP_PROTO(struct vm_fault *vmf, struct page **page, bool *retry),
77 TP_ARGS(vmf, page, retry));
79 TP_PROTO(struct vm_fault *vmf, struct page *page),
80 TP_ARGS(vmf, page));
197 TP_PROTO(struct vm_fault *vmf, unsigned long highest_memmap_pfn),
198 TP_ARGS(vmf, highest_memmap_pfn), 1);
200 TP_PROTO(struct vm_fault *vmf, unsigned long highest_memmap_pfn),
201 TP_ARGS(vmf, highest_memmap_pfn));
203 TP_PROTO(struct vm_fault *vmf, struct page *page),
204 TP_ARGS(vmf, page));
[all …]
/OK3568_Linux_fs/kernel/drivers/xen/
H A Dprivcmd-buf.c117 static vm_fault_t privcmd_buf_vma_fault(struct vm_fault *vmf) in privcmd_buf_vma_fault() argument
120 vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end, in privcmd_buf_vma_fault()
121 vmf->pgoff, (void *)vmf->address); in privcmd_buf_vma_fault()
/OK3568_Linux_fs/kernel/fs/ext2/
H A Dfile.c91 static vm_fault_t ext2_dax_fault(struct vm_fault *vmf) in ext2_dax_fault() argument
93 struct inode *inode = file_inode(vmf->vma->vm_file); in ext2_dax_fault()
96 bool write = (vmf->flags & FAULT_FLAG_WRITE) && in ext2_dax_fault()
97 (vmf->vma->vm_flags & VM_SHARED); in ext2_dax_fault()
101 file_update_time(vmf->vma->vm_file); in ext2_dax_fault()
105 ret = dax_iomap_fault(vmf, PE_SIZE_PTE, NULL, NULL, &ext2_iomap_ops); in ext2_dax_fault()
/OK3568_Linux_fs/kernel/sound/usb/usx2y/
H A DusX2Yhwdep.c21 static vm_fault_t snd_us428ctls_vm_fault(struct vm_fault *vmf) in snd_us428ctls_vm_fault() argument
28 vmf->vma->vm_start, in snd_us428ctls_vm_fault()
29 vmf->pgoff); in snd_us428ctls_vm_fault()
31 offset = vmf->pgoff << PAGE_SHIFT; in snd_us428ctls_vm_fault()
32 vaddr = (char *)((struct usx2ydev *)vmf->vma->vm_private_data)->us428ctls_sharedmem + offset; in snd_us428ctls_vm_fault()
35 vmf->page = page; in snd_us428ctls_vm_fault()
/OK3568_Linux_fs/kernel/arch/s390/kernel/
H A Dvdso.c45 struct vm_area_struct *vma, struct vm_fault *vmf) in vdso_fault() argument
53 if (vmf->pgoff >= vdso_pages) in vdso_fault()
56 vmf->page = vdso_pagelist[vmf->pgoff]; in vdso_fault()
57 get_page(vmf->page); in vdso_fault()
/OK3568_Linux_fs/kernel/drivers/char/
H A Dmspec.c137 mspec_fault(struct vm_fault *vmf) in mspec_fault() argument
141 pgoff_t index = vmf->pgoff; in mspec_fault()
142 struct vma_data *vdata = vmf->vma->vm_private_data; in mspec_fault()
164 return vmf_insert_pfn(vmf->vma, vmf->address, pfn); in mspec_fault()
/OK3568_Linux_fs/kernel/fs/nilfs2/
H A Dfile.c45 static vm_fault_t nilfs_page_mkwrite(struct vm_fault *vmf) in nilfs_page_mkwrite() argument
47 struct vm_area_struct *vma = vmf->vma; in nilfs_page_mkwrite()
48 struct page *page = vmf->page; in nilfs_page_mkwrite()
99 ret = block_page_mkwrite(vma, vmf, nilfs_get_block); in nilfs_page_mkwrite()
/OK3568_Linux_fs/kernel/drivers/gpu/drm/gma500/
H A Dgem.c126 vm_fault_t psb_gem_fault(struct vm_fault *vmf) in psb_gem_fault() argument
128 struct vm_area_struct *vma = vmf->vma; in psb_gem_fault()
162 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; in psb_gem_fault()
169 ret = vmf_insert_pfn(vma, vmf->address, pfn); in psb_gem_fault()
/OK3568_Linux_fs/kernel/drivers/misc/cxl/
H A Dcontext.c126 static vm_fault_t cxl_mmap_fault(struct vm_fault *vmf) in cxl_mmap_fault() argument
128 struct vm_area_struct *vma = vmf->vma; in cxl_mmap_fault()
133 offset = vmf->pgoff << PAGE_SHIFT; in cxl_mmap_fault()
136 __func__, ctx->pe, vmf->address, offset); in cxl_mmap_fault()
161 vmf->page = ctx->ff_page; in cxl_mmap_fault()
168 ret = vmf_insert_pfn(vma, vmf->address, (area + offset) >> PAGE_SHIFT); in cxl_mmap_fault()

123456