1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * \file drm_vm.c
3*4882a593Smuzhiyun * Memory mapping for DRM
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * \author Rickard E. (Rik) Faith <faith@valinux.com>
6*4882a593Smuzhiyun * \author Gareth Hughes <gareth@valinux.com>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun /*
10*4882a593Smuzhiyun * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13*4882a593Smuzhiyun * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14*4882a593Smuzhiyun * All Rights Reserved.
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
17*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"),
18*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation
19*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the
21*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions:
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * The above copyright notice and this permission notice (including the next
24*4882a593Smuzhiyun * paragraph) shall be included in all copies or substantial portions of the
25*4882a593Smuzhiyun * Software.
26*4882a593Smuzhiyun *
27*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30*4882a593Smuzhiyun * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31*4882a593Smuzhiyun * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32*4882a593Smuzhiyun * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33*4882a593Smuzhiyun * OTHER DEALINGS IN THE SOFTWARE.
34*4882a593Smuzhiyun */
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #include <linux/export.h>
37*4882a593Smuzhiyun #include <linux/pci.h>
38*4882a593Smuzhiyun #include <linux/seq_file.h>
39*4882a593Smuzhiyun #include <linux/vmalloc.h>
40*4882a593Smuzhiyun #include <linux/pgtable.h>
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun #if defined(__ia64__)
43*4882a593Smuzhiyun #include <linux/efi.h>
44*4882a593Smuzhiyun #include <linux/slab.h>
45*4882a593Smuzhiyun #endif
46*4882a593Smuzhiyun #include <linux/mem_encrypt.h>
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun #include <drm/drm_agpsupport.h>
50*4882a593Smuzhiyun #include <drm/drm_device.h>
51*4882a593Smuzhiyun #include <drm/drm_drv.h>
52*4882a593Smuzhiyun #include <drm/drm_file.h>
53*4882a593Smuzhiyun #include <drm/drm_framebuffer.h>
54*4882a593Smuzhiyun #include <drm/drm_print.h>
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun #include "drm_internal.h"
57*4882a593Smuzhiyun #include "drm_legacy.h"
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun struct drm_vma_entry {
60*4882a593Smuzhiyun struct list_head head;
61*4882a593Smuzhiyun struct vm_area_struct *vma;
62*4882a593Smuzhiyun pid_t pid;
63*4882a593Smuzhiyun };
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun static void drm_vm_open(struct vm_area_struct *vma);
66*4882a593Smuzhiyun static void drm_vm_close(struct vm_area_struct *vma);
67*4882a593Smuzhiyun
drm_io_prot(struct drm_local_map * map,struct vm_area_struct * vma)68*4882a593Smuzhiyun static pgprot_t drm_io_prot(struct drm_local_map *map,
69*4882a593Smuzhiyun struct vm_area_struct *vma)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun /* We don't want graphics memory to be mapped encrypted */
74*4882a593Smuzhiyun tmp = pgprot_decrypted(tmp);
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun #if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
77*4882a593Smuzhiyun defined(__mips__)
78*4882a593Smuzhiyun if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
79*4882a593Smuzhiyun tmp = pgprot_noncached(tmp);
80*4882a593Smuzhiyun else
81*4882a593Smuzhiyun tmp = pgprot_writecombine(tmp);
82*4882a593Smuzhiyun #elif defined(__ia64__)
83*4882a593Smuzhiyun if (efi_range_is_wc(vma->vm_start, vma->vm_end -
84*4882a593Smuzhiyun vma->vm_start))
85*4882a593Smuzhiyun tmp = pgprot_writecombine(tmp);
86*4882a593Smuzhiyun else
87*4882a593Smuzhiyun tmp = pgprot_noncached(tmp);
88*4882a593Smuzhiyun #elif defined(__sparc__) || defined(__arm__)
89*4882a593Smuzhiyun tmp = pgprot_noncached(tmp);
90*4882a593Smuzhiyun #endif
91*4882a593Smuzhiyun return tmp;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun
drm_dma_prot(uint32_t map_type,struct vm_area_struct * vma)94*4882a593Smuzhiyun static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
99*4882a593Smuzhiyun tmp = pgprot_noncached_wc(tmp);
100*4882a593Smuzhiyun #endif
101*4882a593Smuzhiyun return tmp;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun /*
105*4882a593Smuzhiyun * \c fault method for AGP virtual memory.
106*4882a593Smuzhiyun *
107*4882a593Smuzhiyun * \param vma virtual memory area.
108*4882a593Smuzhiyun * \param address access address.
109*4882a593Smuzhiyun * \return pointer to the page structure.
110*4882a593Smuzhiyun *
111*4882a593Smuzhiyun * Find the right map and if it's AGP memory find the real physical page to
112*4882a593Smuzhiyun * map, get the page, increment the use count and return it.
113*4882a593Smuzhiyun */
114*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_AGP)
drm_vm_fault(struct vm_fault * vmf)115*4882a593Smuzhiyun static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun struct vm_area_struct *vma = vmf->vma;
118*4882a593Smuzhiyun struct drm_file *priv = vma->vm_file->private_data;
119*4882a593Smuzhiyun struct drm_device *dev = priv->minor->dev;
120*4882a593Smuzhiyun struct drm_local_map *map = NULL;
121*4882a593Smuzhiyun struct drm_map_list *r_list;
122*4882a593Smuzhiyun struct drm_hash_item *hash;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /*
125*4882a593Smuzhiyun * Find the right map
126*4882a593Smuzhiyun */
127*4882a593Smuzhiyun if (!dev->agp)
128*4882a593Smuzhiyun goto vm_fault_error;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun if (!dev->agp || !dev->agp->cant_use_aperture)
131*4882a593Smuzhiyun goto vm_fault_error;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
134*4882a593Smuzhiyun goto vm_fault_error;
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun r_list = drm_hash_entry(hash, struct drm_map_list, hash);
137*4882a593Smuzhiyun map = r_list->map;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun if (map && map->type == _DRM_AGP) {
140*4882a593Smuzhiyun /*
141*4882a593Smuzhiyun * Using vm_pgoff as a selector forces us to use this unusual
142*4882a593Smuzhiyun * addressing scheme.
143*4882a593Smuzhiyun */
144*4882a593Smuzhiyun resource_size_t offset = vmf->address - vma->vm_start;
145*4882a593Smuzhiyun resource_size_t baddr = map->offset + offset;
146*4882a593Smuzhiyun struct drm_agp_mem *agpmem;
147*4882a593Smuzhiyun struct page *page;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun #ifdef __alpha__
150*4882a593Smuzhiyun /*
151*4882a593Smuzhiyun * Adjust to a bus-relative address
152*4882a593Smuzhiyun */
153*4882a593Smuzhiyun baddr -= dev->hose->mem_space->start;
154*4882a593Smuzhiyun #endif
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun /*
157*4882a593Smuzhiyun * It's AGP memory - find the real physical page to map
158*4882a593Smuzhiyun */
159*4882a593Smuzhiyun list_for_each_entry(agpmem, &dev->agp->memory, head) {
160*4882a593Smuzhiyun if (agpmem->bound <= baddr &&
161*4882a593Smuzhiyun agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
162*4882a593Smuzhiyun break;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun if (&agpmem->head == &dev->agp->memory)
166*4882a593Smuzhiyun goto vm_fault_error;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun /*
169*4882a593Smuzhiyun * Get the page, inc the use count, and return it
170*4882a593Smuzhiyun */
171*4882a593Smuzhiyun offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
172*4882a593Smuzhiyun page = agpmem->memory->pages[offset];
173*4882a593Smuzhiyun get_page(page);
174*4882a593Smuzhiyun vmf->page = page;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun DRM_DEBUG
177*4882a593Smuzhiyun ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
178*4882a593Smuzhiyun (unsigned long long)baddr,
179*4882a593Smuzhiyun agpmem->memory->pages[offset],
180*4882a593Smuzhiyun (unsigned long long)offset,
181*4882a593Smuzhiyun page_count(page));
182*4882a593Smuzhiyun return 0;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun vm_fault_error:
185*4882a593Smuzhiyun return VM_FAULT_SIGBUS; /* Disallow mremap */
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun #else
drm_vm_fault(struct vm_fault * vmf)188*4882a593Smuzhiyun static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun return VM_FAULT_SIGBUS;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun #endif
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun /*
195*4882a593Smuzhiyun * \c nopage method for shared virtual memory.
196*4882a593Smuzhiyun *
197*4882a593Smuzhiyun * \param vma virtual memory area.
198*4882a593Smuzhiyun * \param address access address.
199*4882a593Smuzhiyun * \return pointer to the page structure.
200*4882a593Smuzhiyun *
201*4882a593Smuzhiyun * Get the mapping, find the real physical page to map, get the page, and
202*4882a593Smuzhiyun * return it.
203*4882a593Smuzhiyun */
drm_vm_shm_fault(struct vm_fault * vmf)204*4882a593Smuzhiyun static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun struct vm_area_struct *vma = vmf->vma;
207*4882a593Smuzhiyun struct drm_local_map *map = vma->vm_private_data;
208*4882a593Smuzhiyun unsigned long offset;
209*4882a593Smuzhiyun unsigned long i;
210*4882a593Smuzhiyun struct page *page;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun if (!map)
213*4882a593Smuzhiyun return VM_FAULT_SIGBUS; /* Nothing allocated */
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun offset = vmf->address - vma->vm_start;
216*4882a593Smuzhiyun i = (unsigned long)map->handle + offset;
217*4882a593Smuzhiyun page = vmalloc_to_page((void *)i);
218*4882a593Smuzhiyun if (!page)
219*4882a593Smuzhiyun return VM_FAULT_SIGBUS;
220*4882a593Smuzhiyun get_page(page);
221*4882a593Smuzhiyun vmf->page = page;
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun DRM_DEBUG("shm_fault 0x%lx\n", offset);
224*4882a593Smuzhiyun return 0;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun /*
228*4882a593Smuzhiyun * \c close method for shared virtual memory.
229*4882a593Smuzhiyun *
230*4882a593Smuzhiyun * \param vma virtual memory area.
231*4882a593Smuzhiyun *
232*4882a593Smuzhiyun * Deletes map information if we are the last
233*4882a593Smuzhiyun * person to close a mapping and it's not in the global maplist.
234*4882a593Smuzhiyun */
drm_vm_shm_close(struct vm_area_struct * vma)235*4882a593Smuzhiyun static void drm_vm_shm_close(struct vm_area_struct *vma)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun struct drm_file *priv = vma->vm_file->private_data;
238*4882a593Smuzhiyun struct drm_device *dev = priv->minor->dev;
239*4882a593Smuzhiyun struct drm_vma_entry *pt, *temp;
240*4882a593Smuzhiyun struct drm_local_map *map;
241*4882a593Smuzhiyun struct drm_map_list *r_list;
242*4882a593Smuzhiyun int found_maps = 0;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun DRM_DEBUG("0x%08lx,0x%08lx\n",
245*4882a593Smuzhiyun vma->vm_start, vma->vm_end - vma->vm_start);
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun map = vma->vm_private_data;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun mutex_lock(&dev->struct_mutex);
250*4882a593Smuzhiyun list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
251*4882a593Smuzhiyun if (pt->vma->vm_private_data == map)
252*4882a593Smuzhiyun found_maps++;
253*4882a593Smuzhiyun if (pt->vma == vma) {
254*4882a593Smuzhiyun list_del(&pt->head);
255*4882a593Smuzhiyun kfree(pt);
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun /* We were the only map that was found */
260*4882a593Smuzhiyun if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
261*4882a593Smuzhiyun /* Check to see if we are in the maplist, if we are not, then
262*4882a593Smuzhiyun * we delete this mappings information.
263*4882a593Smuzhiyun */
264*4882a593Smuzhiyun found_maps = 0;
265*4882a593Smuzhiyun list_for_each_entry(r_list, &dev->maplist, head) {
266*4882a593Smuzhiyun if (r_list->map == map)
267*4882a593Smuzhiyun found_maps++;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun if (!found_maps) {
271*4882a593Smuzhiyun switch (map->type) {
272*4882a593Smuzhiyun case _DRM_REGISTERS:
273*4882a593Smuzhiyun case _DRM_FRAME_BUFFER:
274*4882a593Smuzhiyun arch_phys_wc_del(map->mtrr);
275*4882a593Smuzhiyun iounmap(map->handle);
276*4882a593Smuzhiyun break;
277*4882a593Smuzhiyun case _DRM_SHM:
278*4882a593Smuzhiyun vfree(map->handle);
279*4882a593Smuzhiyun break;
280*4882a593Smuzhiyun case _DRM_AGP:
281*4882a593Smuzhiyun case _DRM_SCATTER_GATHER:
282*4882a593Smuzhiyun break;
283*4882a593Smuzhiyun case _DRM_CONSISTENT:
284*4882a593Smuzhiyun dma_free_coherent(&dev->pdev->dev,
285*4882a593Smuzhiyun map->size,
286*4882a593Smuzhiyun map->handle,
287*4882a593Smuzhiyun map->offset);
288*4882a593Smuzhiyun break;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun kfree(map);
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun mutex_unlock(&dev->struct_mutex);
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun /*
297*4882a593Smuzhiyun * \c fault method for DMA virtual memory.
298*4882a593Smuzhiyun *
299*4882a593Smuzhiyun * \param address access address.
300*4882a593Smuzhiyun * \return pointer to the page structure.
301*4882a593Smuzhiyun *
302*4882a593Smuzhiyun * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
303*4882a593Smuzhiyun */
drm_vm_dma_fault(struct vm_fault * vmf)304*4882a593Smuzhiyun static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun struct vm_area_struct *vma = vmf->vma;
307*4882a593Smuzhiyun struct drm_file *priv = vma->vm_file->private_data;
308*4882a593Smuzhiyun struct drm_device *dev = priv->minor->dev;
309*4882a593Smuzhiyun struct drm_device_dma *dma = dev->dma;
310*4882a593Smuzhiyun unsigned long offset;
311*4882a593Smuzhiyun unsigned long page_nr;
312*4882a593Smuzhiyun struct page *page;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun if (!dma)
315*4882a593Smuzhiyun return VM_FAULT_SIGBUS; /* Error */
316*4882a593Smuzhiyun if (!dma->pagelist)
317*4882a593Smuzhiyun return VM_FAULT_SIGBUS; /* Nothing allocated */
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun offset = vmf->address - vma->vm_start;
320*4882a593Smuzhiyun /* vm_[pg]off[set] should be 0 */
321*4882a593Smuzhiyun page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
322*4882a593Smuzhiyun page = virt_to_page((void *)dma->pagelist[page_nr]);
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun get_page(page);
325*4882a593Smuzhiyun vmf->page = page;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
328*4882a593Smuzhiyun return 0;
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun /*
332*4882a593Smuzhiyun * \c fault method for scatter-gather virtual memory.
333*4882a593Smuzhiyun *
334*4882a593Smuzhiyun * \param address access address.
335*4882a593Smuzhiyun * \return pointer to the page structure.
336*4882a593Smuzhiyun *
337*4882a593Smuzhiyun * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
338*4882a593Smuzhiyun */
drm_vm_sg_fault(struct vm_fault * vmf)339*4882a593Smuzhiyun static vm_fault_t drm_vm_sg_fault(struct vm_fault *vmf)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun struct vm_area_struct *vma = vmf->vma;
342*4882a593Smuzhiyun struct drm_local_map *map = vma->vm_private_data;
343*4882a593Smuzhiyun struct drm_file *priv = vma->vm_file->private_data;
344*4882a593Smuzhiyun struct drm_device *dev = priv->minor->dev;
345*4882a593Smuzhiyun struct drm_sg_mem *entry = dev->sg;
346*4882a593Smuzhiyun unsigned long offset;
347*4882a593Smuzhiyun unsigned long map_offset;
348*4882a593Smuzhiyun unsigned long page_offset;
349*4882a593Smuzhiyun struct page *page;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun if (!entry)
352*4882a593Smuzhiyun return VM_FAULT_SIGBUS; /* Error */
353*4882a593Smuzhiyun if (!entry->pagelist)
354*4882a593Smuzhiyun return VM_FAULT_SIGBUS; /* Nothing allocated */
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun offset = vmf->address - vma->vm_start;
357*4882a593Smuzhiyun map_offset = map->offset - (unsigned long)dev->sg->virtual;
358*4882a593Smuzhiyun page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
359*4882a593Smuzhiyun page = entry->pagelist[page_offset];
360*4882a593Smuzhiyun get_page(page);
361*4882a593Smuzhiyun vmf->page = page;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun return 0;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun /** AGP virtual memory operations */
367*4882a593Smuzhiyun static const struct vm_operations_struct drm_vm_ops = {
368*4882a593Smuzhiyun .fault = drm_vm_fault,
369*4882a593Smuzhiyun .open = drm_vm_open,
370*4882a593Smuzhiyun .close = drm_vm_close,
371*4882a593Smuzhiyun };
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun /** Shared virtual memory operations */
374*4882a593Smuzhiyun static const struct vm_operations_struct drm_vm_shm_ops = {
375*4882a593Smuzhiyun .fault = drm_vm_shm_fault,
376*4882a593Smuzhiyun .open = drm_vm_open,
377*4882a593Smuzhiyun .close = drm_vm_shm_close,
378*4882a593Smuzhiyun };
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun /** DMA virtual memory operations */
381*4882a593Smuzhiyun static const struct vm_operations_struct drm_vm_dma_ops = {
382*4882a593Smuzhiyun .fault = drm_vm_dma_fault,
383*4882a593Smuzhiyun .open = drm_vm_open,
384*4882a593Smuzhiyun .close = drm_vm_close,
385*4882a593Smuzhiyun };
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun /** Scatter-gather virtual memory operations */
388*4882a593Smuzhiyun static const struct vm_operations_struct drm_vm_sg_ops = {
389*4882a593Smuzhiyun .fault = drm_vm_sg_fault,
390*4882a593Smuzhiyun .open = drm_vm_open,
391*4882a593Smuzhiyun .close = drm_vm_close,
392*4882a593Smuzhiyun };
393*4882a593Smuzhiyun
drm_vm_open_locked(struct drm_device * dev,struct vm_area_struct * vma)394*4882a593Smuzhiyun static void drm_vm_open_locked(struct drm_device *dev,
395*4882a593Smuzhiyun struct vm_area_struct *vma)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun struct drm_vma_entry *vma_entry;
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun DRM_DEBUG("0x%08lx,0x%08lx\n",
400*4882a593Smuzhiyun vma->vm_start, vma->vm_end - vma->vm_start);
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
403*4882a593Smuzhiyun if (vma_entry) {
404*4882a593Smuzhiyun vma_entry->vma = vma;
405*4882a593Smuzhiyun vma_entry->pid = current->pid;
406*4882a593Smuzhiyun list_add(&vma_entry->head, &dev->vmalist);
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun
drm_vm_open(struct vm_area_struct * vma)410*4882a593Smuzhiyun static void drm_vm_open(struct vm_area_struct *vma)
411*4882a593Smuzhiyun {
412*4882a593Smuzhiyun struct drm_file *priv = vma->vm_file->private_data;
413*4882a593Smuzhiyun struct drm_device *dev = priv->minor->dev;
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun mutex_lock(&dev->struct_mutex);
416*4882a593Smuzhiyun drm_vm_open_locked(dev, vma);
417*4882a593Smuzhiyun mutex_unlock(&dev->struct_mutex);
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun
drm_vm_close_locked(struct drm_device * dev,struct vm_area_struct * vma)420*4882a593Smuzhiyun static void drm_vm_close_locked(struct drm_device *dev,
421*4882a593Smuzhiyun struct vm_area_struct *vma)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun struct drm_vma_entry *pt, *temp;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun DRM_DEBUG("0x%08lx,0x%08lx\n",
426*4882a593Smuzhiyun vma->vm_start, vma->vm_end - vma->vm_start);
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
429*4882a593Smuzhiyun if (pt->vma == vma) {
430*4882a593Smuzhiyun list_del(&pt->head);
431*4882a593Smuzhiyun kfree(pt);
432*4882a593Smuzhiyun break;
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun /*
438*4882a593Smuzhiyun * \c close method for all virtual memory types.
439*4882a593Smuzhiyun *
440*4882a593Smuzhiyun * \param vma virtual memory area.
441*4882a593Smuzhiyun *
442*4882a593Smuzhiyun * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
443*4882a593Smuzhiyun * free it.
444*4882a593Smuzhiyun */
drm_vm_close(struct vm_area_struct * vma)445*4882a593Smuzhiyun static void drm_vm_close(struct vm_area_struct *vma)
446*4882a593Smuzhiyun {
447*4882a593Smuzhiyun struct drm_file *priv = vma->vm_file->private_data;
448*4882a593Smuzhiyun struct drm_device *dev = priv->minor->dev;
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun mutex_lock(&dev->struct_mutex);
451*4882a593Smuzhiyun drm_vm_close_locked(dev, vma);
452*4882a593Smuzhiyun mutex_unlock(&dev->struct_mutex);
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun /*
456*4882a593Smuzhiyun * mmap DMA memory.
457*4882a593Smuzhiyun *
458*4882a593Smuzhiyun * \param file_priv DRM file private.
459*4882a593Smuzhiyun * \param vma virtual memory area.
460*4882a593Smuzhiyun * \return zero on success or a negative number on failure.
461*4882a593Smuzhiyun *
462*4882a593Smuzhiyun * Sets the virtual memory area operations structure to vm_dma_ops, the file
463*4882a593Smuzhiyun * pointer, and calls vm_open().
464*4882a593Smuzhiyun */
drm_mmap_dma(struct file * filp,struct vm_area_struct * vma)465*4882a593Smuzhiyun static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
466*4882a593Smuzhiyun {
467*4882a593Smuzhiyun struct drm_file *priv = filp->private_data;
468*4882a593Smuzhiyun struct drm_device *dev;
469*4882a593Smuzhiyun struct drm_device_dma *dma;
470*4882a593Smuzhiyun unsigned long length = vma->vm_end - vma->vm_start;
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun dev = priv->minor->dev;
473*4882a593Smuzhiyun dma = dev->dma;
474*4882a593Smuzhiyun DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
475*4882a593Smuzhiyun vma->vm_start, vma->vm_end, vma->vm_pgoff);
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun /* Length must match exact page count */
478*4882a593Smuzhiyun if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
479*4882a593Smuzhiyun return -EINVAL;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun if (!capable(CAP_SYS_ADMIN) &&
483*4882a593Smuzhiyun (dma->flags & _DRM_DMA_USE_PCI_RO)) {
484*4882a593Smuzhiyun vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
485*4882a593Smuzhiyun #if defined(__i386__) || defined(__x86_64__)
486*4882a593Smuzhiyun pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
487*4882a593Smuzhiyun #else
488*4882a593Smuzhiyun /* Ye gads this is ugly. With more thought
489*4882a593Smuzhiyun we could move this up higher and use
490*4882a593Smuzhiyun `protection_map' instead. */
491*4882a593Smuzhiyun vma->vm_page_prot =
492*4882a593Smuzhiyun __pgprot(pte_val
493*4882a593Smuzhiyun (pte_wrprotect
494*4882a593Smuzhiyun (__pte(pgprot_val(vma->vm_page_prot)))));
495*4882a593Smuzhiyun #endif
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun vma->vm_ops = &drm_vm_dma_ops;
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun drm_vm_open_locked(dev, vma);
503*4882a593Smuzhiyun return 0;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
drm_core_get_reg_ofs(struct drm_device * dev)506*4882a593Smuzhiyun static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun #ifdef __alpha__
509*4882a593Smuzhiyun return dev->hose->dense_mem_base;
510*4882a593Smuzhiyun #else
511*4882a593Smuzhiyun return 0;
512*4882a593Smuzhiyun #endif
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun /*
516*4882a593Smuzhiyun * mmap DMA memory.
517*4882a593Smuzhiyun *
518*4882a593Smuzhiyun * \param file_priv DRM file private.
519*4882a593Smuzhiyun * \param vma virtual memory area.
520*4882a593Smuzhiyun * \return zero on success or a negative number on failure.
521*4882a593Smuzhiyun *
522*4882a593Smuzhiyun * If the virtual memory area has no offset associated with it then it's a DMA
523*4882a593Smuzhiyun * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
524*4882a593Smuzhiyun * checks that the restricted flag is not set, sets the virtual memory operations
525*4882a593Smuzhiyun * according to the mapping type and remaps the pages. Finally sets the file
526*4882a593Smuzhiyun * pointer and calls vm_open().
527*4882a593Smuzhiyun */
drm_mmap_locked(struct file * filp,struct vm_area_struct * vma)528*4882a593Smuzhiyun static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
529*4882a593Smuzhiyun {
530*4882a593Smuzhiyun struct drm_file *priv = filp->private_data;
531*4882a593Smuzhiyun struct drm_device *dev = priv->minor->dev;
532*4882a593Smuzhiyun struct drm_local_map *map = NULL;
533*4882a593Smuzhiyun resource_size_t offset = 0;
534*4882a593Smuzhiyun struct drm_hash_item *hash;
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
537*4882a593Smuzhiyun vma->vm_start, vma->vm_end, vma->vm_pgoff);
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun if (!priv->authenticated)
540*4882a593Smuzhiyun return -EACCES;
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun /* We check for "dma". On Apple's UniNorth, it's valid to have
543*4882a593Smuzhiyun * the AGP mapped at physical address 0
544*4882a593Smuzhiyun * --BenH.
545*4882a593Smuzhiyun */
546*4882a593Smuzhiyun if (!vma->vm_pgoff
547*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_AGP)
548*4882a593Smuzhiyun && (!dev->agp
549*4882a593Smuzhiyun || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
550*4882a593Smuzhiyun #endif
551*4882a593Smuzhiyun )
552*4882a593Smuzhiyun return drm_mmap_dma(filp, vma);
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
555*4882a593Smuzhiyun DRM_ERROR("Could not find map\n");
556*4882a593Smuzhiyun return -EINVAL;
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
560*4882a593Smuzhiyun if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
561*4882a593Smuzhiyun return -EPERM;
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun /* Check for valid size. */
564*4882a593Smuzhiyun if (map->size < vma->vm_end - vma->vm_start)
565*4882a593Smuzhiyun return -EINVAL;
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
568*4882a593Smuzhiyun vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
569*4882a593Smuzhiyun #if defined(__i386__) || defined(__x86_64__)
570*4882a593Smuzhiyun pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
571*4882a593Smuzhiyun #else
572*4882a593Smuzhiyun /* Ye gads this is ugly. With more thought
573*4882a593Smuzhiyun we could move this up higher and use
574*4882a593Smuzhiyun `protection_map' instead. */
575*4882a593Smuzhiyun vma->vm_page_prot =
576*4882a593Smuzhiyun __pgprot(pte_val
577*4882a593Smuzhiyun (pte_wrprotect
578*4882a593Smuzhiyun (__pte(pgprot_val(vma->vm_page_prot)))));
579*4882a593Smuzhiyun #endif
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun switch (map->type) {
583*4882a593Smuzhiyun #if !defined(__arm__)
584*4882a593Smuzhiyun case _DRM_AGP:
585*4882a593Smuzhiyun if (dev->agp && dev->agp->cant_use_aperture) {
586*4882a593Smuzhiyun /*
587*4882a593Smuzhiyun * On some platforms we can't talk to bus dma address from the CPU, so for
588*4882a593Smuzhiyun * memory of type DRM_AGP, we'll deal with sorting out the real physical
589*4882a593Smuzhiyun * pages and mappings in fault()
590*4882a593Smuzhiyun */
591*4882a593Smuzhiyun #if defined(__powerpc__)
592*4882a593Smuzhiyun vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
593*4882a593Smuzhiyun #endif
594*4882a593Smuzhiyun vma->vm_ops = &drm_vm_ops;
595*4882a593Smuzhiyun break;
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun fallthrough; /* to _DRM_FRAME_BUFFER... */
598*4882a593Smuzhiyun #endif
599*4882a593Smuzhiyun case _DRM_FRAME_BUFFER:
600*4882a593Smuzhiyun case _DRM_REGISTERS:
601*4882a593Smuzhiyun offset = drm_core_get_reg_ofs(dev);
602*4882a593Smuzhiyun vma->vm_page_prot = drm_io_prot(map, vma);
603*4882a593Smuzhiyun if (io_remap_pfn_range(vma, vma->vm_start,
604*4882a593Smuzhiyun (map->offset + offset) >> PAGE_SHIFT,
605*4882a593Smuzhiyun vma->vm_end - vma->vm_start,
606*4882a593Smuzhiyun vma->vm_page_prot))
607*4882a593Smuzhiyun return -EAGAIN;
608*4882a593Smuzhiyun DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
609*4882a593Smuzhiyun " offset = 0x%llx\n",
610*4882a593Smuzhiyun map->type,
611*4882a593Smuzhiyun vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun vma->vm_ops = &drm_vm_ops;
614*4882a593Smuzhiyun break;
615*4882a593Smuzhiyun case _DRM_CONSISTENT:
616*4882a593Smuzhiyun /* Consistent memory is really like shared memory. But
617*4882a593Smuzhiyun * it's allocated in a different way, so avoid fault */
618*4882a593Smuzhiyun if (remap_pfn_range(vma, vma->vm_start,
619*4882a593Smuzhiyun page_to_pfn(virt_to_page(map->handle)),
620*4882a593Smuzhiyun vma->vm_end - vma->vm_start, vma->vm_page_prot))
621*4882a593Smuzhiyun return -EAGAIN;
622*4882a593Smuzhiyun vma->vm_page_prot = drm_dma_prot(map->type, vma);
623*4882a593Smuzhiyun fallthrough; /* to _DRM_SHM */
624*4882a593Smuzhiyun case _DRM_SHM:
625*4882a593Smuzhiyun vma->vm_ops = &drm_vm_shm_ops;
626*4882a593Smuzhiyun vma->vm_private_data = (void *)map;
627*4882a593Smuzhiyun break;
628*4882a593Smuzhiyun case _DRM_SCATTER_GATHER:
629*4882a593Smuzhiyun vma->vm_ops = &drm_vm_sg_ops;
630*4882a593Smuzhiyun vma->vm_private_data = (void *)map;
631*4882a593Smuzhiyun vma->vm_page_prot = drm_dma_prot(map->type, vma);
632*4882a593Smuzhiyun break;
633*4882a593Smuzhiyun default:
634*4882a593Smuzhiyun return -EINVAL; /* This should never happen. */
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun drm_vm_open_locked(dev, vma);
639*4882a593Smuzhiyun return 0;
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun
drm_legacy_mmap(struct file * filp,struct vm_area_struct * vma)642*4882a593Smuzhiyun int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun struct drm_file *priv = filp->private_data;
645*4882a593Smuzhiyun struct drm_device *dev = priv->minor->dev;
646*4882a593Smuzhiyun int ret;
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun if (drm_dev_is_unplugged(dev))
649*4882a593Smuzhiyun return -ENODEV;
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun mutex_lock(&dev->struct_mutex);
652*4882a593Smuzhiyun ret = drm_mmap_locked(filp, vma);
653*4882a593Smuzhiyun mutex_unlock(&dev->struct_mutex);
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun return ret;
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun EXPORT_SYMBOL(drm_legacy_mmap);
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_DRM_LEGACY)
drm_legacy_vma_flush(struct drm_device * dev)660*4882a593Smuzhiyun void drm_legacy_vma_flush(struct drm_device *dev)
661*4882a593Smuzhiyun {
662*4882a593Smuzhiyun struct drm_vma_entry *vma, *vma_temp;
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun /* Clear vma list (only needed for legacy drivers) */
665*4882a593Smuzhiyun list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
666*4882a593Smuzhiyun list_del(&vma->head);
667*4882a593Smuzhiyun kfree(vma);
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun #endif
671