Lines Matching refs:vma
61 struct vm_area_struct *vma; member
65 static void drm_vm_open(struct vm_area_struct *vma);
66 static void drm_vm_close(struct vm_area_struct *vma);
69 struct vm_area_struct *vma) in drm_io_prot() argument
71 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); in drm_io_prot()
83 if (efi_range_is_wc(vma->vm_start, vma->vm_end - in drm_io_prot()
84 vma->vm_start)) in drm_io_prot()
94 static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma) in drm_dma_prot() argument
96 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); in drm_dma_prot()
117 struct vm_area_struct *vma = vmf->vma; in drm_vm_fault() local
118 struct drm_file *priv = vma->vm_file->private_data; in drm_vm_fault()
133 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) in drm_vm_fault()
144 resource_size_t offset = vmf->address - vma->vm_start; in drm_vm_fault()
206 struct vm_area_struct *vma = vmf->vma; in drm_vm_shm_fault() local
207 struct drm_local_map *map = vma->vm_private_data; in drm_vm_shm_fault()
215 offset = vmf->address - vma->vm_start; in drm_vm_shm_fault()
235 static void drm_vm_shm_close(struct vm_area_struct *vma) in drm_vm_shm_close() argument
237 struct drm_file *priv = vma->vm_file->private_data; in drm_vm_shm_close()
245 vma->vm_start, vma->vm_end - vma->vm_start); in drm_vm_shm_close()
247 map = vma->vm_private_data; in drm_vm_shm_close()
251 if (pt->vma->vm_private_data == map) in drm_vm_shm_close()
253 if (pt->vma == vma) { in drm_vm_shm_close()
306 struct vm_area_struct *vma = vmf->vma; in drm_vm_dma_fault() local
307 struct drm_file *priv = vma->vm_file->private_data; in drm_vm_dma_fault()
319 offset = vmf->address - vma->vm_start; in drm_vm_dma_fault()
341 struct vm_area_struct *vma = vmf->vma; in drm_vm_sg_fault() local
342 struct drm_local_map *map = vma->vm_private_data; in drm_vm_sg_fault()
343 struct drm_file *priv = vma->vm_file->private_data; in drm_vm_sg_fault()
356 offset = vmf->address - vma->vm_start; in drm_vm_sg_fault()
395 struct vm_area_struct *vma) in drm_vm_open_locked() argument
400 vma->vm_start, vma->vm_end - vma->vm_start); in drm_vm_open_locked()
404 vma_entry->vma = vma; in drm_vm_open_locked()
410 static void drm_vm_open(struct vm_area_struct *vma) in drm_vm_open() argument
412 struct drm_file *priv = vma->vm_file->private_data; in drm_vm_open()
416 drm_vm_open_locked(dev, vma); in drm_vm_open()
421 struct vm_area_struct *vma) in drm_vm_close_locked() argument
426 vma->vm_start, vma->vm_end - vma->vm_start); in drm_vm_close_locked()
429 if (pt->vma == vma) { in drm_vm_close_locked()
445 static void drm_vm_close(struct vm_area_struct *vma) in drm_vm_close() argument
447 struct drm_file *priv = vma->vm_file->private_data; in drm_vm_close()
451 drm_vm_close_locked(dev, vma); in drm_vm_close()
465 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) in drm_mmap_dma() argument
470 unsigned long length = vma->vm_end - vma->vm_start; in drm_mmap_dma()
475 vma->vm_start, vma->vm_end, vma->vm_pgoff); in drm_mmap_dma()
484 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE); in drm_mmap_dma()
486 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW; in drm_mmap_dma()
491 vma->vm_page_prot = in drm_mmap_dma()
494 (__pte(pgprot_val(vma->vm_page_prot))))); in drm_mmap_dma()
498 vma->vm_ops = &drm_vm_dma_ops; in drm_mmap_dma()
500 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; in drm_mmap_dma()
502 drm_vm_open_locked(dev, vma); in drm_mmap_dma()
528 static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) in drm_mmap_locked() argument
537 vma->vm_start, vma->vm_end, vma->vm_pgoff); in drm_mmap_locked()
546 if (!vma->vm_pgoff in drm_mmap_locked()
552 return drm_mmap_dma(filp, vma); in drm_mmap_locked()
554 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) { in drm_mmap_locked()
564 if (map->size < vma->vm_end - vma->vm_start) in drm_mmap_locked()
568 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE); in drm_mmap_locked()
570 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW; in drm_mmap_locked()
575 vma->vm_page_prot = in drm_mmap_locked()
578 (__pte(pgprot_val(vma->vm_page_prot))))); in drm_mmap_locked()
592 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in drm_mmap_locked()
594 vma->vm_ops = &drm_vm_ops; in drm_mmap_locked()
602 vma->vm_page_prot = drm_io_prot(map, vma); in drm_mmap_locked()
603 if (io_remap_pfn_range(vma, vma->vm_start, in drm_mmap_locked()
605 vma->vm_end - vma->vm_start, in drm_mmap_locked()
606 vma->vm_page_prot)) in drm_mmap_locked()
611 vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset)); in drm_mmap_locked()
613 vma->vm_ops = &drm_vm_ops; in drm_mmap_locked()
618 if (remap_pfn_range(vma, vma->vm_start, in drm_mmap_locked()
620 vma->vm_end - vma->vm_start, vma->vm_page_prot)) in drm_mmap_locked()
622 vma->vm_page_prot = drm_dma_prot(map->type, vma); in drm_mmap_locked()
625 vma->vm_ops = &drm_vm_shm_ops; in drm_mmap_locked()
626 vma->vm_private_data = (void *)map; in drm_mmap_locked()
629 vma->vm_ops = &drm_vm_sg_ops; in drm_mmap_locked()
630 vma->vm_private_data = (void *)map; in drm_mmap_locked()
631 vma->vm_page_prot = drm_dma_prot(map->type, vma); in drm_mmap_locked()
636 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; in drm_mmap_locked()
638 drm_vm_open_locked(dev, vma); in drm_mmap_locked()
642 int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma) in drm_legacy_mmap() argument
652 ret = drm_mmap_locked(filp, vma); in drm_legacy_mmap()
662 struct drm_vma_entry *vma, *vma_temp; in drm_legacy_vma_flush() local
665 list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) { in drm_legacy_vma_flush()
666 list_del(&vma->head); in drm_legacy_vma_flush()
667 kfree(vma); in drm_legacy_vma_flush()