1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
4*4882a593Smuzhiyun * Author: Rob Clark <rob.clark@linaro.org>
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/dma-mapping.h>
8*4882a593Smuzhiyun #include <linux/seq_file.h>
9*4882a593Smuzhiyun #include <linux/shmem_fs.h>
10*4882a593Smuzhiyun #include <linux/spinlock.h>
11*4882a593Smuzhiyun #include <linux/pfn_t.h>
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <drm/drm_prime.h>
14*4882a593Smuzhiyun #include <drm/drm_vma_manager.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include "omap_drv.h"
17*4882a593Smuzhiyun #include "omap_dmm_tiler.h"
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun /*
20*4882a593Smuzhiyun * GEM buffer object implementation.
21*4882a593Smuzhiyun */
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun /* note: we use upper 8 bits of flags for driver-internal flags: */
24*4882a593Smuzhiyun #define OMAP_BO_MEM_DMA_API 0x01000000 /* memory allocated with the dma_alloc_* API */
25*4882a593Smuzhiyun #define OMAP_BO_MEM_SHMEM 0x02000000 /* memory allocated through shmem backing */
26*4882a593Smuzhiyun #define OMAP_BO_MEM_DMABUF 0x08000000 /* memory imported from a dmabuf */
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun struct omap_gem_object {
29*4882a593Smuzhiyun struct drm_gem_object base;
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun struct list_head mm_list;
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun u32 flags;
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun /** width/height for tiled formats (rounded up to slot boundaries) */
36*4882a593Smuzhiyun u16 width, height;
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun /** roll applied when mapping to DMM */
39*4882a593Smuzhiyun u32 roll;
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun /** protects dma_addr_cnt, block, pages, dma_addrs and vaddr */
42*4882a593Smuzhiyun struct mutex lock;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /**
45*4882a593Smuzhiyun * dma_addr contains the buffer DMA address. It is valid for
46*4882a593Smuzhiyun *
47*4882a593Smuzhiyun * - buffers allocated through the DMA mapping API (with the
48*4882a593Smuzhiyun * OMAP_BO_MEM_DMA_API flag set)
49*4882a593Smuzhiyun *
50*4882a593Smuzhiyun * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
51*4882a593Smuzhiyun * if they are physically contiguous (when sgt->orig_nents == 1)
52*4882a593Smuzhiyun *
53*4882a593Smuzhiyun * - buffers mapped through the TILER when dma_addr_cnt is not zero, in
54*4882a593Smuzhiyun * which case the DMA address points to the TILER aperture
55*4882a593Smuzhiyun *
56*4882a593Smuzhiyun * Physically contiguous buffers have their DMA address equal to the
57*4882a593Smuzhiyun * physical address as we don't remap those buffers through the TILER.
58*4882a593Smuzhiyun *
59*4882a593Smuzhiyun * Buffers mapped to the TILER have their DMA address pointing to the
60*4882a593Smuzhiyun * TILER aperture. As TILER mappings are refcounted (through
61*4882a593Smuzhiyun * dma_addr_cnt) the DMA address must be accessed through omap_gem_pin()
62*4882a593Smuzhiyun * to ensure that the mapping won't disappear unexpectedly. References
63*4882a593Smuzhiyun * must be released with omap_gem_unpin().
64*4882a593Smuzhiyun */
65*4882a593Smuzhiyun dma_addr_t dma_addr;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun /**
68*4882a593Smuzhiyun * # of users of dma_addr
69*4882a593Smuzhiyun */
70*4882a593Smuzhiyun refcount_t dma_addr_cnt;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /**
73*4882a593Smuzhiyun * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
74*4882a593Smuzhiyun * is set and the sgt field is valid.
75*4882a593Smuzhiyun */
76*4882a593Smuzhiyun struct sg_table *sgt;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /**
79*4882a593Smuzhiyun * tiler block used when buffer is remapped in DMM/TILER.
80*4882a593Smuzhiyun */
81*4882a593Smuzhiyun struct tiler_block *block;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun /**
84*4882a593Smuzhiyun * Array of backing pages, if allocated. Note that pages are never
85*4882a593Smuzhiyun * allocated for buffers originally allocated from contiguous memory
86*4882a593Smuzhiyun */
87*4882a593Smuzhiyun struct page **pages;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun /** addresses corresponding to pages in above array */
90*4882a593Smuzhiyun dma_addr_t *dma_addrs;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun /**
93*4882a593Smuzhiyun * Virtual address, if mapped.
94*4882a593Smuzhiyun */
95*4882a593Smuzhiyun void *vaddr;
96*4882a593Smuzhiyun };
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun #define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
101*4882a593Smuzhiyun * not necessarily pinned in TILER all the time, and (b) when they are
102*4882a593Smuzhiyun * they are not necessarily page aligned, we reserve one or more small
103*4882a593Smuzhiyun * regions in each of the 2d containers to use as a user-GART where we
104*4882a593Smuzhiyun * can create a second page-aligned mapping of parts of the buffer
105*4882a593Smuzhiyun * being accessed from userspace.
106*4882a593Smuzhiyun *
107*4882a593Smuzhiyun * Note that we could optimize slightly when we know that multiple
108*4882a593Smuzhiyun * tiler containers are backed by the same PAT.. but I'll leave that
109*4882a593Smuzhiyun * for later..
110*4882a593Smuzhiyun */
111*4882a593Smuzhiyun #define NUM_USERGART_ENTRIES 2
112*4882a593Smuzhiyun struct omap_drm_usergart_entry {
113*4882a593Smuzhiyun struct tiler_block *block; /* the reserved tiler block */
114*4882a593Smuzhiyun dma_addr_t dma_addr;
115*4882a593Smuzhiyun struct drm_gem_object *obj; /* the current pinned obj */
116*4882a593Smuzhiyun pgoff_t obj_pgoff; /* page offset of obj currently
117*4882a593Smuzhiyun mapped in */
118*4882a593Smuzhiyun };
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun struct omap_drm_usergart {
121*4882a593Smuzhiyun struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
122*4882a593Smuzhiyun int height; /* height in rows */
123*4882a593Smuzhiyun int height_shift; /* ilog2(height in rows) */
124*4882a593Smuzhiyun int slot_shift; /* ilog2(width per slot) */
125*4882a593Smuzhiyun int stride_pfn; /* stride in pages */
126*4882a593Smuzhiyun int last; /* index of last used entry */
127*4882a593Smuzhiyun };
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun /* -----------------------------------------------------------------------------
130*4882a593Smuzhiyun * Helpers
131*4882a593Smuzhiyun */
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun /** get mmap offset */
omap_gem_mmap_offset(struct drm_gem_object * obj)134*4882a593Smuzhiyun u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun struct drm_device *dev = obj->dev;
137*4882a593Smuzhiyun int ret;
138*4882a593Smuzhiyun size_t size;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun /* Make it mmapable */
141*4882a593Smuzhiyun size = omap_gem_mmap_size(obj);
142*4882a593Smuzhiyun ret = drm_gem_create_mmap_offset_size(obj, size);
143*4882a593Smuzhiyun if (ret) {
144*4882a593Smuzhiyun dev_err(dev->dev, "could not allocate mmap offset\n");
145*4882a593Smuzhiyun return 0;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun return drm_vma_node_offset_addr(&obj->vma_node);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
omap_gem_is_contiguous(struct omap_gem_object * omap_obj)151*4882a593Smuzhiyun static bool omap_gem_is_contiguous(struct omap_gem_object *omap_obj)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
154*4882a593Smuzhiyun return true;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1)
157*4882a593Smuzhiyun return true;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun return false;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun /* -----------------------------------------------------------------------------
163*4882a593Smuzhiyun * Eviction
164*4882a593Smuzhiyun */
165*4882a593Smuzhiyun
omap_gem_evict_entry(struct drm_gem_object * obj,enum tiler_fmt fmt,struct omap_drm_usergart_entry * entry)166*4882a593Smuzhiyun static void omap_gem_evict_entry(struct drm_gem_object *obj,
167*4882a593Smuzhiyun enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun struct omap_gem_object *omap_obj = to_omap_bo(obj);
170*4882a593Smuzhiyun struct omap_drm_private *priv = obj->dev->dev_private;
171*4882a593Smuzhiyun int n = priv->usergart[fmt].height;
172*4882a593Smuzhiyun size_t size = PAGE_SIZE * n;
173*4882a593Smuzhiyun loff_t off = omap_gem_mmap_offset(obj) +
174*4882a593Smuzhiyun (entry->obj_pgoff << PAGE_SHIFT);
175*4882a593Smuzhiyun const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun if (m > 1) {
178*4882a593Smuzhiyun int i;
179*4882a593Smuzhiyun /* if stride > than PAGE_SIZE then sparse mapping: */
180*4882a593Smuzhiyun for (i = n; i > 0; i--) {
181*4882a593Smuzhiyun unmap_mapping_range(obj->dev->anon_inode->i_mapping,
182*4882a593Smuzhiyun off, PAGE_SIZE, 1);
183*4882a593Smuzhiyun off += PAGE_SIZE * m;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun } else {
186*4882a593Smuzhiyun unmap_mapping_range(obj->dev->anon_inode->i_mapping,
187*4882a593Smuzhiyun off, size, 1);
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun entry->obj = NULL;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun /* Evict a buffer from usergart, if it is mapped there */
omap_gem_evict(struct drm_gem_object * obj)194*4882a593Smuzhiyun static void omap_gem_evict(struct drm_gem_object *obj)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun struct omap_gem_object *omap_obj = to_omap_bo(obj);
197*4882a593Smuzhiyun struct omap_drm_private *priv = obj->dev->dev_private;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun if (omap_obj->flags & OMAP_BO_TILED_MASK) {
200*4882a593Smuzhiyun enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
201*4882a593Smuzhiyun int i;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
204*4882a593Smuzhiyun struct omap_drm_usergart_entry *entry =
205*4882a593Smuzhiyun &priv->usergart[fmt].entry[i];
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun if (entry->obj == obj)
208*4882a593Smuzhiyun omap_gem_evict_entry(obj, fmt, entry);
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun /* -----------------------------------------------------------------------------
214*4882a593Smuzhiyun * Page Management
215*4882a593Smuzhiyun */
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun /*
218*4882a593Smuzhiyun * Ensure backing pages are allocated. Must be called with the omap_obj.lock
219*4882a593Smuzhiyun * held.
220*4882a593Smuzhiyun */
omap_gem_attach_pages(struct drm_gem_object * obj)221*4882a593Smuzhiyun static int omap_gem_attach_pages(struct drm_gem_object *obj)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun struct drm_device *dev = obj->dev;
224*4882a593Smuzhiyun struct omap_gem_object *omap_obj = to_omap_bo(obj);
225*4882a593Smuzhiyun struct page **pages;
226*4882a593Smuzhiyun int npages = obj->size >> PAGE_SHIFT;
227*4882a593Smuzhiyun int i, ret;
228*4882a593Smuzhiyun dma_addr_t *addrs;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun lockdep_assert_held(&omap_obj->lock);
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun /*
233*4882a593Smuzhiyun * If not using shmem (in which case backing pages don't need to be
234*4882a593Smuzhiyun * allocated) or if pages are already allocated we're done.
235*4882a593Smuzhiyun */
236*4882a593Smuzhiyun if (!(omap_obj->flags & OMAP_BO_MEM_SHMEM) || omap_obj->pages)
237*4882a593Smuzhiyun return 0;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun pages = drm_gem_get_pages(obj);
240*4882a593Smuzhiyun if (IS_ERR(pages)) {
241*4882a593Smuzhiyun dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
242*4882a593Smuzhiyun return PTR_ERR(pages);
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun /* for non-cached buffers, ensure the new pages are clean because
246*4882a593Smuzhiyun * DSS, GPU, etc. are not cache coherent:
247*4882a593Smuzhiyun */
248*4882a593Smuzhiyun if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
249*4882a593Smuzhiyun addrs = kmalloc_array(npages, sizeof(*addrs), GFP_KERNEL);
250*4882a593Smuzhiyun if (!addrs) {
251*4882a593Smuzhiyun ret = -ENOMEM;
252*4882a593Smuzhiyun goto free_pages;
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun for (i = 0; i < npages; i++) {
256*4882a593Smuzhiyun addrs[i] = dma_map_page(dev->dev, pages[i],
257*4882a593Smuzhiyun 0, PAGE_SIZE, DMA_TO_DEVICE);
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun if (dma_mapping_error(dev->dev, addrs[i])) {
260*4882a593Smuzhiyun dev_warn(dev->dev,
261*4882a593Smuzhiyun "%s: failed to map page\n", __func__);
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun for (i = i - 1; i >= 0; --i) {
264*4882a593Smuzhiyun dma_unmap_page(dev->dev, addrs[i],
265*4882a593Smuzhiyun PAGE_SIZE, DMA_TO_DEVICE);
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun ret = -ENOMEM;
269*4882a593Smuzhiyun goto free_addrs;
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun } else {
273*4882a593Smuzhiyun addrs = kcalloc(npages, sizeof(*addrs), GFP_KERNEL);
274*4882a593Smuzhiyun if (!addrs) {
275*4882a593Smuzhiyun ret = -ENOMEM;
276*4882a593Smuzhiyun goto free_pages;
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun omap_obj->dma_addrs = addrs;
281*4882a593Smuzhiyun omap_obj->pages = pages;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun return 0;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun free_addrs:
286*4882a593Smuzhiyun kfree(addrs);
287*4882a593Smuzhiyun free_pages:
288*4882a593Smuzhiyun drm_gem_put_pages(obj, pages, true, false);
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun return ret;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun /* Release backing pages. Must be called with the omap_obj.lock held. */
omap_gem_detach_pages(struct drm_gem_object * obj)294*4882a593Smuzhiyun static void omap_gem_detach_pages(struct drm_gem_object *obj)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun struct omap_gem_object *omap_obj = to_omap_bo(obj);
297*4882a593Smuzhiyun unsigned int npages = obj->size >> PAGE_SHIFT;
298*4882a593Smuzhiyun unsigned int i;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun lockdep_assert_held(&omap_obj->lock);
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun for (i = 0; i < npages; i++) {
303*4882a593Smuzhiyun if (omap_obj->dma_addrs[i])
304*4882a593Smuzhiyun dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i],
305*4882a593Smuzhiyun PAGE_SIZE, DMA_TO_DEVICE);
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun kfree(omap_obj->dma_addrs);
309*4882a593Smuzhiyun omap_obj->dma_addrs = NULL;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun drm_gem_put_pages(obj, omap_obj->pages, true, false);
312*4882a593Smuzhiyun omap_obj->pages = NULL;
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun /* get buffer flags */
omap_gem_flags(struct drm_gem_object * obj)316*4882a593Smuzhiyun u32 omap_gem_flags(struct drm_gem_object *obj)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun return to_omap_bo(obj)->flags;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun /** get mmap size */
omap_gem_mmap_size(struct drm_gem_object * obj)322*4882a593Smuzhiyun size_t omap_gem_mmap_size(struct drm_gem_object *obj)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun struct omap_gem_object *omap_obj = to_omap_bo(obj);
325*4882a593Smuzhiyun size_t size = obj->size;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun if (omap_obj->flags & OMAP_BO_TILED_MASK) {
328*4882a593Smuzhiyun /* for tiled buffers, the virtual size has stride rounded up
329*4882a593Smuzhiyun * to 4kb.. (to hide the fact that row n+1 might start 16kb or
330*4882a593Smuzhiyun * 32kb later!). But we don't back the entire buffer with
331*4882a593Smuzhiyun * pages, only the valid picture part.. so need to adjust for
332*4882a593Smuzhiyun * this in the size used to mmap and generate mmap offset
333*4882a593Smuzhiyun */
334*4882a593Smuzhiyun size = tiler_vsize(gem2fmt(omap_obj->flags),
335*4882a593Smuzhiyun omap_obj->width, omap_obj->height);
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun return size;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun /* -----------------------------------------------------------------------------
342*4882a593Smuzhiyun * Fault Handling
343*4882a593Smuzhiyun */
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun /* Normal handling for the case of faulting in non-tiled buffers */
omap_gem_fault_1d(struct drm_gem_object * obj,struct vm_area_struct * vma,struct vm_fault * vmf)346*4882a593Smuzhiyun static vm_fault_t omap_gem_fault_1d(struct drm_gem_object *obj,
347*4882a593Smuzhiyun struct vm_area_struct *vma, struct vm_fault *vmf)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun struct omap_gem_object *omap_obj = to_omap_bo(obj);
350*4882a593Smuzhiyun unsigned long pfn;
351*4882a593Smuzhiyun pgoff_t pgoff;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun /* We don't use vmf->pgoff since that has the fake offset: */
354*4882a593Smuzhiyun pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun if (omap_obj->pages) {
357*4882a593Smuzhiyun omap_gem_cpu_sync_page(obj, pgoff);
358*4882a593Smuzhiyun pfn = page_to_pfn(omap_obj->pages[pgoff]);
359*4882a593Smuzhiyun } else {
360*4882a593Smuzhiyun BUG_ON(!omap_gem_is_contiguous(omap_obj));
361*4882a593Smuzhiyun pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff;
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
365*4882a593Smuzhiyun pfn, pfn << PAGE_SHIFT);
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun return vmf_insert_mixed(vma, vmf->address,
368*4882a593Smuzhiyun __pfn_to_pfn_t(pfn, PFN_DEV));
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun /* Special handling for the case of faulting in 2d tiled buffers */
omap_gem_fault_2d(struct drm_gem_object * obj,struct vm_area_struct * vma,struct vm_fault * vmf)372*4882a593Smuzhiyun static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj,
373*4882a593Smuzhiyun struct vm_area_struct *vma, struct vm_fault *vmf)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun struct omap_gem_object *omap_obj = to_omap_bo(obj);
376*4882a593Smuzhiyun struct omap_drm_private *priv = obj->dev->dev_private;
377*4882a593Smuzhiyun struct omap_drm_usergart_entry *entry;
378*4882a593Smuzhiyun enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
379*4882a593Smuzhiyun struct page *pages[64]; /* XXX is this too much to have on stack? */
380*4882a593Smuzhiyun unsigned long pfn;
381*4882a593Smuzhiyun pgoff_t pgoff, base_pgoff;
382*4882a593Smuzhiyun unsigned long vaddr;
383*4882a593Smuzhiyun int i, err, slots;
384*4882a593Smuzhiyun vm_fault_t ret = VM_FAULT_NOPAGE;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun /*
387*4882a593Smuzhiyun * Note the height of the slot is also equal to the number of pages
388*4882a593Smuzhiyun * that need to be mapped in to fill 4kb wide CPU page. If the slot
389*4882a593Smuzhiyun * height is 64, then 64 pages fill a 4kb wide by 64 row region.
390*4882a593Smuzhiyun */
391*4882a593Smuzhiyun const int n = priv->usergart[fmt].height;
392*4882a593Smuzhiyun const int n_shift = priv->usergart[fmt].height_shift;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun /*
395*4882a593Smuzhiyun * If buffer width in bytes > PAGE_SIZE then the virtual stride is
396*4882a593Smuzhiyun * rounded up to next multiple of PAGE_SIZE.. this need to be taken
397*4882a593Smuzhiyun * into account in some of the math, so figure out virtual stride
398*4882a593Smuzhiyun * in pages
399*4882a593Smuzhiyun */
400*4882a593Smuzhiyun const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun /* We don't use vmf->pgoff since that has the fake offset: */
403*4882a593Smuzhiyun pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun /*
406*4882a593Smuzhiyun * Actual address we start mapping at is rounded down to previous slot
407*4882a593Smuzhiyun * boundary in the y direction:
408*4882a593Smuzhiyun */
409*4882a593Smuzhiyun base_pgoff = round_down(pgoff, m << n_shift);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun /* figure out buffer width in slots */
412*4882a593Smuzhiyun slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT);
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun /* evict previous buffer using this usergart entry, if any: */
419*4882a593Smuzhiyun if (entry->obj)
420*4882a593Smuzhiyun omap_gem_evict_entry(entry->obj, fmt, entry);
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun entry->obj = obj;
423*4882a593Smuzhiyun entry->obj_pgoff = base_pgoff;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun /* now convert base_pgoff to phys offset from virt offset: */
426*4882a593Smuzhiyun base_pgoff = (base_pgoff >> n_shift) * slots;
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun /* for wider-than 4k.. figure out which part of the slot-row we want: */
429*4882a593Smuzhiyun if (m > 1) {
430*4882a593Smuzhiyun int off = pgoff % m;
431*4882a593Smuzhiyun entry->obj_pgoff += off;
432*4882a593Smuzhiyun base_pgoff /= m;
433*4882a593Smuzhiyun slots = min(slots - (off << n_shift), n);
434*4882a593Smuzhiyun base_pgoff += off << n_shift;
435*4882a593Smuzhiyun vaddr += off << PAGE_SHIFT;
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun /*
439*4882a593Smuzhiyun * Map in pages. Beyond the valid pixel part of the buffer, we set
440*4882a593Smuzhiyun * pages[i] to NULL to get a dummy page mapped in.. if someone
441*4882a593Smuzhiyun * reads/writes it they will get random/undefined content, but at
442*4882a593Smuzhiyun * least it won't be corrupting whatever other random page used to
443*4882a593Smuzhiyun * be mapped in, or other undefined behavior.
444*4882a593Smuzhiyun */
445*4882a593Smuzhiyun memcpy(pages, &omap_obj->pages[base_pgoff],
446*4882a593Smuzhiyun sizeof(struct page *) * slots);
447*4882a593Smuzhiyun memset(pages + slots, 0,
448*4882a593Smuzhiyun sizeof(struct page *) * (n - slots));
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun err = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
451*4882a593Smuzhiyun if (err) {
452*4882a593Smuzhiyun ret = vmf_error(err);
453*4882a593Smuzhiyun dev_err(obj->dev->dev, "failed to pin: %d\n", err);
454*4882a593Smuzhiyun return ret;
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun pfn = entry->dma_addr >> PAGE_SHIFT;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
460*4882a593Smuzhiyun pfn, pfn << PAGE_SHIFT);
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun for (i = n; i > 0; i--) {
463*4882a593Smuzhiyun ret = vmf_insert_mixed(vma,
464*4882a593Smuzhiyun vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
465*4882a593Smuzhiyun if (ret & VM_FAULT_ERROR)
466*4882a593Smuzhiyun break;
467*4882a593Smuzhiyun pfn += priv->usergart[fmt].stride_pfn;
468*4882a593Smuzhiyun vaddr += PAGE_SIZE * m;
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun /* simple round-robin: */
472*4882a593Smuzhiyun priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
473*4882a593Smuzhiyun % NUM_USERGART_ENTRIES;
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun return ret;
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun /**
479*4882a593Smuzhiyun * omap_gem_fault - pagefault handler for GEM objects
480*4882a593Smuzhiyun * @vmf: fault detail
481*4882a593Smuzhiyun *
482*4882a593Smuzhiyun * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
483*4882a593Smuzhiyun * does most of the work for us including the actual map/unmap calls
484*4882a593Smuzhiyun * but we need to do the actual page work.
485*4882a593Smuzhiyun *
486*4882a593Smuzhiyun * The VMA was set up by GEM. In doing so it also ensured that the
487*4882a593Smuzhiyun * vma->vm_private_data points to the GEM object that is backing this
488*4882a593Smuzhiyun * mapping.
489*4882a593Smuzhiyun */
omap_gem_fault(struct vm_fault * vmf)490*4882a593Smuzhiyun vm_fault_t omap_gem_fault(struct vm_fault *vmf)
491*4882a593Smuzhiyun {
492*4882a593Smuzhiyun struct vm_area_struct *vma = vmf->vma;
493*4882a593Smuzhiyun struct drm_gem_object *obj = vma->vm_private_data;
494*4882a593Smuzhiyun struct omap_gem_object *omap_obj = to_omap_bo(obj);
495*4882a593Smuzhiyun int err;
496*4882a593Smuzhiyun vm_fault_t ret;
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun /* Make sure we don't parallel update on a fault, nor move or remove
499*4882a593Smuzhiyun * something from beneath our feet
500*4882a593Smuzhiyun */
501*4882a593Smuzhiyun mutex_lock(&omap_obj->lock);
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun /* if a shmem backed object, make sure we have pages attached now */
504*4882a593Smuzhiyun err = omap_gem_attach_pages(obj);
505*4882a593Smuzhiyun if (err) {
506*4882a593Smuzhiyun ret = vmf_error(err);
507*4882a593Smuzhiyun goto fail;
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun /* where should we do corresponding put_pages().. we are mapping
511*4882a593Smuzhiyun * the original page, rather than thru a GART, so we can't rely
512*4882a593Smuzhiyun * on eviction to trigger this. But munmap() or all mappings should
513*4882a593Smuzhiyun * probably trigger put_pages()?
514*4882a593Smuzhiyun */
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun if (omap_obj->flags & OMAP_BO_TILED_MASK)
517*4882a593Smuzhiyun ret = omap_gem_fault_2d(obj, vma, vmf);
518*4882a593Smuzhiyun else
519*4882a593Smuzhiyun ret = omap_gem_fault_1d(obj, vma, vmf);
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun fail:
523*4882a593Smuzhiyun mutex_unlock(&omap_obj->lock);
524*4882a593Smuzhiyun return ret;
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun /** We override mainly to fix up some of the vm mapping flags.. */
omap_gem_mmap(struct file * filp,struct vm_area_struct * vma)528*4882a593Smuzhiyun int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
529*4882a593Smuzhiyun {
530*4882a593Smuzhiyun int ret;
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun ret = drm_gem_mmap(filp, vma);
533*4882a593Smuzhiyun if (ret) {
534*4882a593Smuzhiyun DBG("mmap failed: %d", ret);
535*4882a593Smuzhiyun return ret;
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun return omap_gem_mmap_obj(vma->vm_private_data, vma);
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun
omap_gem_mmap_obj(struct drm_gem_object * obj,struct vm_area_struct * vma)541*4882a593Smuzhiyun int omap_gem_mmap_obj(struct drm_gem_object *obj,
542*4882a593Smuzhiyun struct vm_area_struct *vma)
543*4882a593Smuzhiyun {
544*4882a593Smuzhiyun struct omap_gem_object *omap_obj = to_omap_bo(obj);
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun vma->vm_flags &= ~VM_PFNMAP;
547*4882a593Smuzhiyun vma->vm_flags |= VM_MIXEDMAP;
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun if (omap_obj->flags & OMAP_BO_WC) {
550*4882a593Smuzhiyun vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
551*4882a593Smuzhiyun } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
552*4882a593Smuzhiyun vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
553*4882a593Smuzhiyun } else {
554*4882a593Smuzhiyun /*
555*4882a593Smuzhiyun * We do have some private objects, at least for scanout buffers
556*4882a593Smuzhiyun * on hardware without DMM/TILER. But these are allocated write-
557*4882a593Smuzhiyun * combine
558*4882a593Smuzhiyun */
559*4882a593Smuzhiyun if (WARN_ON(!obj->filp))
560*4882a593Smuzhiyun return -EINVAL;
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun /*
563*4882a593Smuzhiyun * Shunt off cached objs to shmem file so they have their own
564*4882a593Smuzhiyun * address_space (so unmap_mapping_range does what we want,
565*4882a593Smuzhiyun * in particular in the case of mmap'd dmabufs)
566*4882a593Smuzhiyun */
567*4882a593Smuzhiyun fput(vma->vm_file);
568*4882a593Smuzhiyun vma->vm_pgoff = 0;
569*4882a593Smuzhiyun vma->vm_file = get_file(obj->filp);
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun return 0;
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun /* -----------------------------------------------------------------------------
578*4882a593Smuzhiyun * Dumb Buffers
579*4882a593Smuzhiyun */
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun /**
582*4882a593Smuzhiyun * omap_gem_dumb_create - create a dumb buffer
583*4882a593Smuzhiyun * @drm_file: our client file
584*4882a593Smuzhiyun * @dev: our device
585*4882a593Smuzhiyun * @args: the requested arguments copied from userspace
586*4882a593Smuzhiyun *
587*4882a593Smuzhiyun * Allocate a buffer suitable for use for a frame buffer of the
588*4882a593Smuzhiyun * form described by user space. Give userspace a handle by which
589*4882a593Smuzhiyun * to reference it.
590*4882a593Smuzhiyun */
omap_gem_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)591*4882a593Smuzhiyun int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
592*4882a593Smuzhiyun struct drm_mode_create_dumb *args)
593*4882a593Smuzhiyun {
594*4882a593Smuzhiyun union omap_gem_size gsize;
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun args->size = PAGE_ALIGN(args->pitch * args->height);
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun gsize = (union omap_gem_size){
601*4882a593Smuzhiyun .bytes = args->size,
602*4882a593Smuzhiyun };
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun return omap_gem_new_handle(dev, file, gsize,
605*4882a593Smuzhiyun OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun /**
609*4882a593Smuzhiyun * omap_gem_dumb_map - buffer mapping for dumb interface
610*4882a593Smuzhiyun * @file: our drm client file
611*4882a593Smuzhiyun * @dev: drm device
612*4882a593Smuzhiyun * @handle: GEM handle to the object (from dumb_create)
613*4882a593Smuzhiyun *
614*4882a593Smuzhiyun * Do the necessary setup to allow the mapping of the frame buffer
615*4882a593Smuzhiyun * into user memory. We don't have to do much here at the moment.
616*4882a593Smuzhiyun */
omap_gem_dumb_map_offset(struct drm_file * file,struct drm_device * dev,u32 handle,u64 * offset)617*4882a593Smuzhiyun int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
618*4882a593Smuzhiyun u32 handle, u64 *offset)
619*4882a593Smuzhiyun {
620*4882a593Smuzhiyun struct drm_gem_object *obj;
621*4882a593Smuzhiyun int ret = 0;
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun /* GEM does all our handle to object mapping */
624*4882a593Smuzhiyun obj = drm_gem_object_lookup(file, handle);
625*4882a593Smuzhiyun if (obj == NULL) {
626*4882a593Smuzhiyun ret = -ENOENT;
627*4882a593Smuzhiyun goto fail;
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun *offset = omap_gem_mmap_offset(obj);
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun drm_gem_object_put(obj);
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun fail:
635*4882a593Smuzhiyun return ret;
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun #ifdef CONFIG_DRM_FBDEV_EMULATION
639*4882a593Smuzhiyun /* Set scrolling position. This allows us to implement fast scrolling
640*4882a593Smuzhiyun * for console.
641*4882a593Smuzhiyun *
642*4882a593Smuzhiyun * Call only from non-atomic contexts.
643*4882a593Smuzhiyun */
omap_gem_roll(struct drm_gem_object * obj,u32 roll)644*4882a593Smuzhiyun int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
645*4882a593Smuzhiyun {
646*4882a593Smuzhiyun struct omap_gem_object *omap_obj = to_omap_bo(obj);
647*4882a593Smuzhiyun u32 npages = obj->size >> PAGE_SHIFT;
648*4882a593Smuzhiyun int ret = 0;
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun if (roll > npages) {
651*4882a593Smuzhiyun dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
652*4882a593Smuzhiyun return -EINVAL;
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun omap_obj->roll = roll;
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun mutex_lock(&omap_obj->lock);
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun /* if we aren't mapped yet, we don't need to do anything */
660*4882a593Smuzhiyun if (omap_obj->block) {
661*4882a593Smuzhiyun ret = omap_gem_attach_pages(obj);
662*4882a593Smuzhiyun if (ret)
663*4882a593Smuzhiyun goto fail;
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun ret = tiler_pin(omap_obj->block, omap_obj->pages, npages,
666*4882a593Smuzhiyun roll, true);
667*4882a593Smuzhiyun if (ret)
668*4882a593Smuzhiyun dev_err(obj->dev->dev, "could not repin: %d\n", ret);
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun fail:
672*4882a593Smuzhiyun mutex_unlock(&omap_obj->lock);
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun return ret;
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun #endif
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun /* -----------------------------------------------------------------------------
679*4882a593Smuzhiyun * Memory Management & DMA Sync
680*4882a593Smuzhiyun */
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun /*
683*4882a593Smuzhiyun * shmem buffers that are mapped cached are not coherent.
684*4882a593Smuzhiyun *
685*4882a593Smuzhiyun * We keep track of dirty pages using page faulting to perform cache management.
686*4882a593Smuzhiyun * When a page is mapped to the CPU in read/write mode the device can't access
687*4882a593Smuzhiyun * it and omap_obj->dma_addrs[i] is NULL. When a page is mapped to the device
688*4882a593Smuzhiyun * the omap_obj->dma_addrs[i] is set to the DMA address, and the page is
689*4882a593Smuzhiyun * unmapped from the CPU.
690*4882a593Smuzhiyun */
omap_gem_is_cached_coherent(struct drm_gem_object * obj)691*4882a593Smuzhiyun static inline bool omap_gem_is_cached_coherent(struct drm_gem_object *obj)
692*4882a593Smuzhiyun {
693*4882a593Smuzhiyun struct omap_gem_object *omap_obj = to_omap_bo(obj);
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun return !((omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
696*4882a593Smuzhiyun ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED));
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun /* Sync the buffer for CPU access.. note pages should already be
700*4882a593Smuzhiyun * attached, ie. omap_gem_get_pages()
701*4882a593Smuzhiyun */
omap_gem_cpu_sync_page(struct drm_gem_object * obj,int pgoff)702*4882a593Smuzhiyun void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
703*4882a593Smuzhiyun {
704*4882a593Smuzhiyun struct drm_device *dev = obj->dev;
705*4882a593Smuzhiyun struct omap_gem_object *omap_obj = to_omap_bo(obj);
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun if (omap_gem_is_cached_coherent(obj))
708*4882a593Smuzhiyun return;
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun if (omap_obj->dma_addrs[pgoff]) {
711*4882a593Smuzhiyun dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff],
712*4882a593Smuzhiyun PAGE_SIZE, DMA_TO_DEVICE);
713*4882a593Smuzhiyun omap_obj->dma_addrs[pgoff] = 0;
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun /* sync the buffer for DMA access */
omap_gem_dma_sync_buffer(struct drm_gem_object * obj,enum dma_data_direction dir)718*4882a593Smuzhiyun void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
719*4882a593Smuzhiyun enum dma_data_direction dir)
720*4882a593Smuzhiyun {
721*4882a593Smuzhiyun struct drm_device *dev = obj->dev;
722*4882a593Smuzhiyun struct omap_gem_object *omap_obj = to_omap_bo(obj);
723*4882a593Smuzhiyun int i, npages = obj->size >> PAGE_SHIFT;
724*4882a593Smuzhiyun struct page **pages = omap_obj->pages;
725*4882a593Smuzhiyun bool dirty = false;
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun if (omap_gem_is_cached_coherent(obj))
728*4882a593Smuzhiyun return;
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun for (i = 0; i < npages; i++) {
731*4882a593Smuzhiyun if (!omap_obj->dma_addrs[i]) {
732*4882a593Smuzhiyun dma_addr_t addr;
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun addr = dma_map_page(dev->dev, pages[i], 0,
735*4882a593Smuzhiyun PAGE_SIZE, dir);
736*4882a593Smuzhiyun if (dma_mapping_error(dev->dev, addr)) {
737*4882a593Smuzhiyun dev_warn(dev->dev, "%s: failed to map page\n",
738*4882a593Smuzhiyun __func__);
739*4882a593Smuzhiyun break;
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun dirty = true;
743*4882a593Smuzhiyun omap_obj->dma_addrs[i] = addr;
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun }
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun if (dirty) {
748*4882a593Smuzhiyun unmap_mapping_range(obj->filp->f_mapping, 0,
749*4882a593Smuzhiyun omap_gem_mmap_size(obj), 1);
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun /**
754*4882a593Smuzhiyun * omap_gem_pin() - Pin a GEM object in memory
755*4882a593Smuzhiyun * @obj: the GEM object
756*4882a593Smuzhiyun * @dma_addr: the DMA address
757*4882a593Smuzhiyun *
758*4882a593Smuzhiyun * Pin the given GEM object in memory and fill the dma_addr pointer with the
759*4882a593Smuzhiyun * object's DMA address. If the buffer is not physically contiguous it will be
760*4882a593Smuzhiyun * remapped through the TILER to provide a contiguous view.
761*4882a593Smuzhiyun *
762*4882a593Smuzhiyun * Pins are reference-counted, calling this function multiple times is allowed
763*4882a593Smuzhiyun * as long the corresponding omap_gem_unpin() calls are balanced.
764*4882a593Smuzhiyun *
765*4882a593Smuzhiyun * Return 0 on success or a negative error code otherwise.
766*4882a593Smuzhiyun */
omap_gem_pin(struct drm_gem_object * obj,dma_addr_t * dma_addr)767*4882a593Smuzhiyun int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
768*4882a593Smuzhiyun {
769*4882a593Smuzhiyun struct omap_drm_private *priv = obj->dev->dev_private;
770*4882a593Smuzhiyun struct omap_gem_object *omap_obj = to_omap_bo(obj);
771*4882a593Smuzhiyun int ret = 0;
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun mutex_lock(&omap_obj->lock);
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun if (!omap_gem_is_contiguous(omap_obj) && priv->has_dmm) {
776*4882a593Smuzhiyun if (refcount_read(&omap_obj->dma_addr_cnt) == 0) {
777*4882a593Smuzhiyun u32 npages = obj->size >> PAGE_SHIFT;
778*4882a593Smuzhiyun enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
779*4882a593Smuzhiyun struct tiler_block *block;
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun BUG_ON(omap_obj->block);
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun refcount_set(&omap_obj->dma_addr_cnt, 1);
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun ret = omap_gem_attach_pages(obj);
786*4882a593Smuzhiyun if (ret)
787*4882a593Smuzhiyun goto fail;
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun if (omap_obj->flags & OMAP_BO_TILED_MASK) {
790*4882a593Smuzhiyun block = tiler_reserve_2d(fmt,
791*4882a593Smuzhiyun omap_obj->width,
792*4882a593Smuzhiyun omap_obj->height, 0);
793*4882a593Smuzhiyun } else {
794*4882a593Smuzhiyun block = tiler_reserve_1d(obj->size);
795*4882a593Smuzhiyun }
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun if (IS_ERR(block)) {
798*4882a593Smuzhiyun ret = PTR_ERR(block);
799*4882a593Smuzhiyun dev_err(obj->dev->dev,
800*4882a593Smuzhiyun "could not remap: %d (%d)\n", ret, fmt);
801*4882a593Smuzhiyun goto fail;
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun /* TODO: enable async refill.. */
805*4882a593Smuzhiyun ret = tiler_pin(block, omap_obj->pages, npages,
806*4882a593Smuzhiyun omap_obj->roll, true);
807*4882a593Smuzhiyun if (ret) {
808*4882a593Smuzhiyun tiler_release(block);
809*4882a593Smuzhiyun dev_err(obj->dev->dev,
810*4882a593Smuzhiyun "could not pin: %d\n", ret);
811*4882a593Smuzhiyun goto fail;
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun omap_obj->dma_addr = tiler_ssptr(block);
815*4882a593Smuzhiyun omap_obj->block = block;
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun DBG("got dma address: %pad", &omap_obj->dma_addr);
818*4882a593Smuzhiyun } else {
819*4882a593Smuzhiyun refcount_inc(&omap_obj->dma_addr_cnt);
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun if (dma_addr)
823*4882a593Smuzhiyun *dma_addr = omap_obj->dma_addr;
824*4882a593Smuzhiyun } else if (omap_gem_is_contiguous(omap_obj)) {
825*4882a593Smuzhiyun if (dma_addr)
826*4882a593Smuzhiyun *dma_addr = omap_obj->dma_addr;
827*4882a593Smuzhiyun } else {
828*4882a593Smuzhiyun ret = -EINVAL;
829*4882a593Smuzhiyun goto fail;
830*4882a593Smuzhiyun }
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun fail:
833*4882a593Smuzhiyun mutex_unlock(&omap_obj->lock);
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun return ret;
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun /**
839*4882a593Smuzhiyun * omap_gem_unpin_locked() - Unpin a GEM object from memory
840*4882a593Smuzhiyun * @obj: the GEM object
841*4882a593Smuzhiyun *
842*4882a593Smuzhiyun * omap_gem_unpin() without locking.
843*4882a593Smuzhiyun */
omap_gem_unpin_locked(struct drm_gem_object * obj)844*4882a593Smuzhiyun static void omap_gem_unpin_locked(struct drm_gem_object *obj)
845*4882a593Smuzhiyun {
846*4882a593Smuzhiyun struct omap_drm_private *priv = obj->dev->dev_private;
847*4882a593Smuzhiyun struct omap_gem_object *omap_obj = to_omap_bo(obj);
848*4882a593Smuzhiyun int ret;
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun if (omap_gem_is_contiguous(omap_obj) || !priv->has_dmm)
851*4882a593Smuzhiyun return;
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun if (refcount_dec_and_test(&omap_obj->dma_addr_cnt)) {
854*4882a593Smuzhiyun ret = tiler_unpin(omap_obj->block);
855*4882a593Smuzhiyun if (ret) {
856*4882a593Smuzhiyun dev_err(obj->dev->dev,
857*4882a593Smuzhiyun "could not unpin pages: %d\n", ret);
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun ret = tiler_release(omap_obj->block);
860*4882a593Smuzhiyun if (ret) {
861*4882a593Smuzhiyun dev_err(obj->dev->dev,
862*4882a593Smuzhiyun "could not release unmap: %d\n", ret);
863*4882a593Smuzhiyun }
864*4882a593Smuzhiyun omap_obj->dma_addr = 0;
865*4882a593Smuzhiyun omap_obj->block = NULL;
866*4882a593Smuzhiyun }
867*4882a593Smuzhiyun }
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun /**
870*4882a593Smuzhiyun * omap_gem_unpin() - Unpin a GEM object from memory
871*4882a593Smuzhiyun * @obj: the GEM object
872*4882a593Smuzhiyun *
873*4882a593Smuzhiyun * Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are
874*4882a593Smuzhiyun * reference-counted, the actual unpin will only be performed when the number
875*4882a593Smuzhiyun * of calls to this function matches the number of calls to omap_gem_pin().
876*4882a593Smuzhiyun */
omap_gem_unpin(struct drm_gem_object * obj)877*4882a593Smuzhiyun void omap_gem_unpin(struct drm_gem_object *obj)
878*4882a593Smuzhiyun {
879*4882a593Smuzhiyun struct omap_gem_object *omap_obj = to_omap_bo(obj);
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun mutex_lock(&omap_obj->lock);
882*4882a593Smuzhiyun omap_gem_unpin_locked(obj);
883*4882a593Smuzhiyun mutex_unlock(&omap_obj->lock);
884*4882a593Smuzhiyun }
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun /* Get rotated scanout address (only valid if already pinned), at the
887*4882a593Smuzhiyun * specified orientation and x,y offset from top-left corner of buffer
888*4882a593Smuzhiyun * (only valid for tiled 2d buffers)
889*4882a593Smuzhiyun */
omap_gem_rotated_dma_addr(struct drm_gem_object * obj,u32 orient,int x,int y,dma_addr_t * dma_addr)890*4882a593Smuzhiyun int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
891*4882a593Smuzhiyun int x, int y, dma_addr_t *dma_addr)
892*4882a593Smuzhiyun {
893*4882a593Smuzhiyun struct omap_gem_object *omap_obj = to_omap_bo(obj);
894*4882a593Smuzhiyun int ret = -EINVAL;
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun mutex_lock(&omap_obj->lock);
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun if ((refcount_read(&omap_obj->dma_addr_cnt) > 0) && omap_obj->block &&
899*4882a593Smuzhiyun (omap_obj->flags & OMAP_BO_TILED_MASK)) {
900*4882a593Smuzhiyun *dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
901*4882a593Smuzhiyun ret = 0;
902*4882a593Smuzhiyun }
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun mutex_unlock(&omap_obj->lock);
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun return ret;
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun /* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
omap_gem_tiled_stride(struct drm_gem_object * obj,u32 orient)910*4882a593Smuzhiyun int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
911*4882a593Smuzhiyun {
912*4882a593Smuzhiyun struct omap_gem_object *omap_obj = to_omap_bo(obj);
913*4882a593Smuzhiyun int ret = -EINVAL;
914*4882a593Smuzhiyun if (omap_obj->flags & OMAP_BO_TILED_MASK)
915*4882a593Smuzhiyun ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
916*4882a593Smuzhiyun return ret;
917*4882a593Smuzhiyun }
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun /* if !remap, and we don't have pages backing, then fail, rather than
920*4882a593Smuzhiyun * increasing the pin count (which we don't really do yet anyways,
921*4882a593Smuzhiyun * because we don't support swapping pages back out). And 'remap'
922*4882a593Smuzhiyun * might not be quite the right name, but I wanted to keep it working
923*4882a593Smuzhiyun * similarly to omap_gem_pin(). Note though that mutex is not
924*4882a593Smuzhiyun * aquired if !remap (because this can be called in atomic ctxt),
925*4882a593Smuzhiyun * but probably omap_gem_unpin() should be changed to work in the
926*4882a593Smuzhiyun * same way. If !remap, a matching omap_gem_put_pages() call is not
927*4882a593Smuzhiyun * required (and should not be made).
928*4882a593Smuzhiyun */
omap_gem_get_pages(struct drm_gem_object * obj,struct page *** pages,bool remap)929*4882a593Smuzhiyun int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
930*4882a593Smuzhiyun bool remap)
931*4882a593Smuzhiyun {
932*4882a593Smuzhiyun struct omap_gem_object *omap_obj = to_omap_bo(obj);
933*4882a593Smuzhiyun int ret = 0;
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun mutex_lock(&omap_obj->lock);
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun if (remap) {
938*4882a593Smuzhiyun ret = omap_gem_attach_pages(obj);
939*4882a593Smuzhiyun if (ret)
940*4882a593Smuzhiyun goto unlock;
941*4882a593Smuzhiyun }
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun if (!omap_obj->pages) {
944*4882a593Smuzhiyun ret = -ENOMEM;
945*4882a593Smuzhiyun goto unlock;
946*4882a593Smuzhiyun }
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun *pages = omap_obj->pages;
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun unlock:
951*4882a593Smuzhiyun mutex_unlock(&omap_obj->lock);
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun return ret;
954*4882a593Smuzhiyun }
955*4882a593Smuzhiyun
956*4882a593Smuzhiyun /* release pages when DMA no longer being performed */
omap_gem_put_pages(struct drm_gem_object * obj)957*4882a593Smuzhiyun int omap_gem_put_pages(struct drm_gem_object *obj)
958*4882a593Smuzhiyun {
959*4882a593Smuzhiyun /* do something here if we dynamically attach/detach pages.. at
960*4882a593Smuzhiyun * least they would no longer need to be pinned if everyone has
961*4882a593Smuzhiyun * released the pages..
962*4882a593Smuzhiyun */
963*4882a593Smuzhiyun return 0;
964*4882a593Smuzhiyun }
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun #ifdef CONFIG_DRM_FBDEV_EMULATION
967*4882a593Smuzhiyun /*
968*4882a593Smuzhiyun * Get kernel virtual address for CPU access.. this more or less only
969*4882a593Smuzhiyun * exists for omap_fbdev.
970*4882a593Smuzhiyun */
omap_gem_vaddr(struct drm_gem_object * obj)971*4882a593Smuzhiyun void *omap_gem_vaddr(struct drm_gem_object *obj)
972*4882a593Smuzhiyun {
973*4882a593Smuzhiyun struct omap_gem_object *omap_obj = to_omap_bo(obj);
974*4882a593Smuzhiyun void *vaddr;
975*4882a593Smuzhiyun int ret;
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun mutex_lock(&omap_obj->lock);
978*4882a593Smuzhiyun
979*4882a593Smuzhiyun if (!omap_obj->vaddr) {
980*4882a593Smuzhiyun ret = omap_gem_attach_pages(obj);
981*4882a593Smuzhiyun if (ret) {
982*4882a593Smuzhiyun vaddr = ERR_PTR(ret);
983*4882a593Smuzhiyun goto unlock;
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun omap_obj->vaddr = vmap(omap_obj->pages, obj->size >> PAGE_SHIFT,
987*4882a593Smuzhiyun VM_MAP, pgprot_writecombine(PAGE_KERNEL));
988*4882a593Smuzhiyun }
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun vaddr = omap_obj->vaddr;
991*4882a593Smuzhiyun
992*4882a593Smuzhiyun unlock:
993*4882a593Smuzhiyun mutex_unlock(&omap_obj->lock);
994*4882a593Smuzhiyun return vaddr;
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun #endif
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun /* -----------------------------------------------------------------------------
999*4882a593Smuzhiyun * Power Management
1000*4882a593Smuzhiyun */
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun #ifdef CONFIG_PM
1003*4882a593Smuzhiyun /* re-pin objects in DMM in resume path: */
omap_gem_resume(struct drm_device * dev)1004*4882a593Smuzhiyun int omap_gem_resume(struct drm_device *dev)
1005*4882a593Smuzhiyun {
1006*4882a593Smuzhiyun struct omap_drm_private *priv = dev->dev_private;
1007*4882a593Smuzhiyun struct omap_gem_object *omap_obj;
1008*4882a593Smuzhiyun int ret = 0;
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun mutex_lock(&priv->list_lock);
1011*4882a593Smuzhiyun list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
1012*4882a593Smuzhiyun if (omap_obj->block) {
1013*4882a593Smuzhiyun struct drm_gem_object *obj = &omap_obj->base;
1014*4882a593Smuzhiyun u32 npages = obj->size >> PAGE_SHIFT;
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun WARN_ON(!omap_obj->pages); /* this can't happen */
1017*4882a593Smuzhiyun ret = tiler_pin(omap_obj->block,
1018*4882a593Smuzhiyun omap_obj->pages, npages,
1019*4882a593Smuzhiyun omap_obj->roll, true);
1020*4882a593Smuzhiyun if (ret) {
1021*4882a593Smuzhiyun dev_err(dev->dev, "could not repin: %d\n", ret);
1022*4882a593Smuzhiyun goto done;
1023*4882a593Smuzhiyun }
1024*4882a593Smuzhiyun }
1025*4882a593Smuzhiyun }
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun done:
1028*4882a593Smuzhiyun mutex_unlock(&priv->list_lock);
1029*4882a593Smuzhiyun return ret;
1030*4882a593Smuzhiyun }
1031*4882a593Smuzhiyun #endif
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun /* -----------------------------------------------------------------------------
1034*4882a593Smuzhiyun * DebugFS
1035*4882a593Smuzhiyun */
1036*4882a593Smuzhiyun
1037*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
omap_gem_describe(struct drm_gem_object * obj,struct seq_file * m)1038*4882a593Smuzhiyun void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
1039*4882a593Smuzhiyun {
1040*4882a593Smuzhiyun struct omap_gem_object *omap_obj = to_omap_bo(obj);
1041*4882a593Smuzhiyun u64 off;
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun off = drm_vma_node_start(&obj->vma_node);
1044*4882a593Smuzhiyun
1045*4882a593Smuzhiyun mutex_lock(&omap_obj->lock);
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
1048*4882a593Smuzhiyun omap_obj->flags, obj->name, kref_read(&obj->refcount),
1049*4882a593Smuzhiyun off, &omap_obj->dma_addr,
1050*4882a593Smuzhiyun refcount_read(&omap_obj->dma_addr_cnt),
1051*4882a593Smuzhiyun omap_obj->vaddr, omap_obj->roll);
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun if (omap_obj->flags & OMAP_BO_TILED_MASK) {
1054*4882a593Smuzhiyun seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
1055*4882a593Smuzhiyun if (omap_obj->block) {
1056*4882a593Smuzhiyun struct tcm_area *area = &omap_obj->block->area;
1057*4882a593Smuzhiyun seq_printf(m, " (%dx%d, %dx%d)",
1058*4882a593Smuzhiyun area->p0.x, area->p0.y,
1059*4882a593Smuzhiyun area->p1.x, area->p1.y);
1060*4882a593Smuzhiyun }
1061*4882a593Smuzhiyun } else {
1062*4882a593Smuzhiyun seq_printf(m, " %zu", obj->size);
1063*4882a593Smuzhiyun }
1064*4882a593Smuzhiyun
1065*4882a593Smuzhiyun mutex_unlock(&omap_obj->lock);
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyun seq_printf(m, "\n");
1068*4882a593Smuzhiyun }
1069*4882a593Smuzhiyun
omap_gem_describe_objects(struct list_head * list,struct seq_file * m)1070*4882a593Smuzhiyun void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1071*4882a593Smuzhiyun {
1072*4882a593Smuzhiyun struct omap_gem_object *omap_obj;
1073*4882a593Smuzhiyun int count = 0;
1074*4882a593Smuzhiyun size_t size = 0;
1075*4882a593Smuzhiyun
1076*4882a593Smuzhiyun list_for_each_entry(omap_obj, list, mm_list) {
1077*4882a593Smuzhiyun struct drm_gem_object *obj = &omap_obj->base;
1078*4882a593Smuzhiyun seq_printf(m, " ");
1079*4882a593Smuzhiyun omap_gem_describe(obj, m);
1080*4882a593Smuzhiyun count++;
1081*4882a593Smuzhiyun size += obj->size;
1082*4882a593Smuzhiyun }
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1085*4882a593Smuzhiyun }
1086*4882a593Smuzhiyun #endif
1087*4882a593Smuzhiyun
1088*4882a593Smuzhiyun /* -----------------------------------------------------------------------------
1089*4882a593Smuzhiyun * Constructor & Destructor
1090*4882a593Smuzhiyun */
1091*4882a593Smuzhiyun
omap_gem_free_object(struct drm_gem_object * obj)1092*4882a593Smuzhiyun void omap_gem_free_object(struct drm_gem_object *obj)
1093*4882a593Smuzhiyun {
1094*4882a593Smuzhiyun struct drm_device *dev = obj->dev;
1095*4882a593Smuzhiyun struct omap_drm_private *priv = dev->dev_private;
1096*4882a593Smuzhiyun struct omap_gem_object *omap_obj = to_omap_bo(obj);
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun omap_gem_evict(obj);
1099*4882a593Smuzhiyun
1100*4882a593Smuzhiyun mutex_lock(&priv->list_lock);
1101*4882a593Smuzhiyun list_del(&omap_obj->mm_list);
1102*4882a593Smuzhiyun mutex_unlock(&priv->list_lock);
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun /*
1105*4882a593Smuzhiyun * We own the sole reference to the object at this point, but to keep
1106*4882a593Smuzhiyun * lockdep happy, we must still take the omap_obj_lock to call
1107*4882a593Smuzhiyun * omap_gem_detach_pages(). This should hardly make any difference as
1108*4882a593Smuzhiyun * there can't be any lock contention.
1109*4882a593Smuzhiyun */
1110*4882a593Smuzhiyun mutex_lock(&omap_obj->lock);
1111*4882a593Smuzhiyun
1112*4882a593Smuzhiyun /* The object should not be pinned. */
1113*4882a593Smuzhiyun WARN_ON(refcount_read(&omap_obj->dma_addr_cnt) > 0);
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun if (omap_obj->pages) {
1116*4882a593Smuzhiyun if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
1117*4882a593Smuzhiyun kfree(omap_obj->pages);
1118*4882a593Smuzhiyun else
1119*4882a593Smuzhiyun omap_gem_detach_pages(obj);
1120*4882a593Smuzhiyun }
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
1123*4882a593Smuzhiyun dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
1124*4882a593Smuzhiyun omap_obj->dma_addr);
1125*4882a593Smuzhiyun } else if (omap_obj->vaddr) {
1126*4882a593Smuzhiyun vunmap(omap_obj->vaddr);
1127*4882a593Smuzhiyun } else if (obj->import_attach) {
1128*4882a593Smuzhiyun drm_prime_gem_destroy(obj, omap_obj->sgt);
1129*4882a593Smuzhiyun }
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun mutex_unlock(&omap_obj->lock);
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun drm_gem_object_release(obj);
1134*4882a593Smuzhiyun
1135*4882a593Smuzhiyun mutex_destroy(&omap_obj->lock);
1136*4882a593Smuzhiyun
1137*4882a593Smuzhiyun kfree(omap_obj);
1138*4882a593Smuzhiyun }
1139*4882a593Smuzhiyun
omap_gem_validate_flags(struct drm_device * dev,u32 flags)1140*4882a593Smuzhiyun static bool omap_gem_validate_flags(struct drm_device *dev, u32 flags)
1141*4882a593Smuzhiyun {
1142*4882a593Smuzhiyun struct omap_drm_private *priv = dev->dev_private;
1143*4882a593Smuzhiyun
1144*4882a593Smuzhiyun switch (flags & OMAP_BO_CACHE_MASK) {
1145*4882a593Smuzhiyun case OMAP_BO_CACHED:
1146*4882a593Smuzhiyun case OMAP_BO_WC:
1147*4882a593Smuzhiyun case OMAP_BO_CACHE_MASK:
1148*4882a593Smuzhiyun break;
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun default:
1151*4882a593Smuzhiyun return false;
1152*4882a593Smuzhiyun }
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun if (flags & OMAP_BO_TILED_MASK) {
1155*4882a593Smuzhiyun if (!priv->usergart)
1156*4882a593Smuzhiyun return false;
1157*4882a593Smuzhiyun
1158*4882a593Smuzhiyun switch (flags & OMAP_BO_TILED_MASK) {
1159*4882a593Smuzhiyun case OMAP_BO_TILED_8:
1160*4882a593Smuzhiyun case OMAP_BO_TILED_16:
1161*4882a593Smuzhiyun case OMAP_BO_TILED_32:
1162*4882a593Smuzhiyun break;
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun default:
1165*4882a593Smuzhiyun return false;
1166*4882a593Smuzhiyun }
1167*4882a593Smuzhiyun }
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun return true;
1170*4882a593Smuzhiyun }
1171*4882a593Smuzhiyun
1172*4882a593Smuzhiyun /* GEM buffer object constructor */
omap_gem_new(struct drm_device * dev,union omap_gem_size gsize,u32 flags)1173*4882a593Smuzhiyun struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1174*4882a593Smuzhiyun union omap_gem_size gsize, u32 flags)
1175*4882a593Smuzhiyun {
1176*4882a593Smuzhiyun struct omap_drm_private *priv = dev->dev_private;
1177*4882a593Smuzhiyun struct omap_gem_object *omap_obj;
1178*4882a593Smuzhiyun struct drm_gem_object *obj;
1179*4882a593Smuzhiyun struct address_space *mapping;
1180*4882a593Smuzhiyun size_t size;
1181*4882a593Smuzhiyun int ret;
1182*4882a593Smuzhiyun
1183*4882a593Smuzhiyun if (!omap_gem_validate_flags(dev, flags))
1184*4882a593Smuzhiyun return NULL;
1185*4882a593Smuzhiyun
1186*4882a593Smuzhiyun /* Validate the flags and compute the memory and cache flags. */
1187*4882a593Smuzhiyun if (flags & OMAP_BO_TILED_MASK) {
1188*4882a593Smuzhiyun /*
1189*4882a593Smuzhiyun * Tiled buffers are always shmem paged backed. When they are
1190*4882a593Smuzhiyun * scanned out, they are remapped into DMM/TILER.
1191*4882a593Smuzhiyun */
1192*4882a593Smuzhiyun flags |= OMAP_BO_MEM_SHMEM;
1193*4882a593Smuzhiyun
1194*4882a593Smuzhiyun /*
1195*4882a593Smuzhiyun * Currently don't allow cached buffers. There is some caching
1196*4882a593Smuzhiyun * stuff that needs to be handled better.
1197*4882a593Smuzhiyun */
1198*4882a593Smuzhiyun flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
1199*4882a593Smuzhiyun flags |= tiler_get_cpu_cache_flags();
1200*4882a593Smuzhiyun } else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1201*4882a593Smuzhiyun /*
1202*4882a593Smuzhiyun * If we don't have DMM, we must allocate scanout buffers
1203*4882a593Smuzhiyun * from contiguous DMA memory.
1204*4882a593Smuzhiyun */
1205*4882a593Smuzhiyun flags |= OMAP_BO_MEM_DMA_API;
1206*4882a593Smuzhiyun } else if (!(flags & OMAP_BO_MEM_DMABUF)) {
1207*4882a593Smuzhiyun /*
1208*4882a593Smuzhiyun * All other buffers not backed by dma_buf are shmem-backed.
1209*4882a593Smuzhiyun */
1210*4882a593Smuzhiyun flags |= OMAP_BO_MEM_SHMEM;
1211*4882a593Smuzhiyun }
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun /* Allocate the initialize the OMAP GEM object. */
1214*4882a593Smuzhiyun omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
1215*4882a593Smuzhiyun if (!omap_obj)
1216*4882a593Smuzhiyun return NULL;
1217*4882a593Smuzhiyun
1218*4882a593Smuzhiyun obj = &omap_obj->base;
1219*4882a593Smuzhiyun omap_obj->flags = flags;
1220*4882a593Smuzhiyun mutex_init(&omap_obj->lock);
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun if (flags & OMAP_BO_TILED_MASK) {
1223*4882a593Smuzhiyun /*
1224*4882a593Smuzhiyun * For tiled buffers align dimensions to slot boundaries and
1225*4882a593Smuzhiyun * calculate size based on aligned dimensions.
1226*4882a593Smuzhiyun */
1227*4882a593Smuzhiyun tiler_align(gem2fmt(flags), &gsize.tiled.width,
1228*4882a593Smuzhiyun &gsize.tiled.height);
1229*4882a593Smuzhiyun
1230*4882a593Smuzhiyun size = tiler_size(gem2fmt(flags), gsize.tiled.width,
1231*4882a593Smuzhiyun gsize.tiled.height);
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun omap_obj->width = gsize.tiled.width;
1234*4882a593Smuzhiyun omap_obj->height = gsize.tiled.height;
1235*4882a593Smuzhiyun } else {
1236*4882a593Smuzhiyun size = PAGE_ALIGN(gsize.bytes);
1237*4882a593Smuzhiyun }
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun /* Initialize the GEM object. */
1240*4882a593Smuzhiyun if (!(flags & OMAP_BO_MEM_SHMEM)) {
1241*4882a593Smuzhiyun drm_gem_private_object_init(dev, obj, size);
1242*4882a593Smuzhiyun } else {
1243*4882a593Smuzhiyun ret = drm_gem_object_init(dev, obj, size);
1244*4882a593Smuzhiyun if (ret)
1245*4882a593Smuzhiyun goto err_free;
1246*4882a593Smuzhiyun
1247*4882a593Smuzhiyun mapping = obj->filp->f_mapping;
1248*4882a593Smuzhiyun mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
1249*4882a593Smuzhiyun }
1250*4882a593Smuzhiyun
1251*4882a593Smuzhiyun /* Allocate memory if needed. */
1252*4882a593Smuzhiyun if (flags & OMAP_BO_MEM_DMA_API) {
1253*4882a593Smuzhiyun omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
1254*4882a593Smuzhiyun &omap_obj->dma_addr,
1255*4882a593Smuzhiyun GFP_KERNEL);
1256*4882a593Smuzhiyun if (!omap_obj->vaddr)
1257*4882a593Smuzhiyun goto err_release;
1258*4882a593Smuzhiyun }
1259*4882a593Smuzhiyun
1260*4882a593Smuzhiyun mutex_lock(&priv->list_lock);
1261*4882a593Smuzhiyun list_add(&omap_obj->mm_list, &priv->obj_list);
1262*4882a593Smuzhiyun mutex_unlock(&priv->list_lock);
1263*4882a593Smuzhiyun
1264*4882a593Smuzhiyun return obj;
1265*4882a593Smuzhiyun
1266*4882a593Smuzhiyun err_release:
1267*4882a593Smuzhiyun drm_gem_object_release(obj);
1268*4882a593Smuzhiyun err_free:
1269*4882a593Smuzhiyun kfree(omap_obj);
1270*4882a593Smuzhiyun return NULL;
1271*4882a593Smuzhiyun }
1272*4882a593Smuzhiyun
omap_gem_new_dmabuf(struct drm_device * dev,size_t size,struct sg_table * sgt)1273*4882a593Smuzhiyun struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
1274*4882a593Smuzhiyun struct sg_table *sgt)
1275*4882a593Smuzhiyun {
1276*4882a593Smuzhiyun struct omap_drm_private *priv = dev->dev_private;
1277*4882a593Smuzhiyun struct omap_gem_object *omap_obj;
1278*4882a593Smuzhiyun struct drm_gem_object *obj;
1279*4882a593Smuzhiyun union omap_gem_size gsize;
1280*4882a593Smuzhiyun
1281*4882a593Smuzhiyun /* Without a DMM only physically contiguous buffers can be supported. */
1282*4882a593Smuzhiyun if (sgt->orig_nents != 1 && !priv->has_dmm)
1283*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
1284*4882a593Smuzhiyun
1285*4882a593Smuzhiyun gsize.bytes = PAGE_ALIGN(size);
1286*4882a593Smuzhiyun obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
1287*4882a593Smuzhiyun if (!obj)
1288*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
1289*4882a593Smuzhiyun
1290*4882a593Smuzhiyun omap_obj = to_omap_bo(obj);
1291*4882a593Smuzhiyun
1292*4882a593Smuzhiyun mutex_lock(&omap_obj->lock);
1293*4882a593Smuzhiyun
1294*4882a593Smuzhiyun omap_obj->sgt = sgt;
1295*4882a593Smuzhiyun
1296*4882a593Smuzhiyun if (sgt->orig_nents == 1) {
1297*4882a593Smuzhiyun omap_obj->dma_addr = sg_dma_address(sgt->sgl);
1298*4882a593Smuzhiyun } else {
1299*4882a593Smuzhiyun /* Create pages list from sgt */
1300*4882a593Smuzhiyun struct page **pages;
1301*4882a593Smuzhiyun unsigned int npages;
1302*4882a593Smuzhiyun unsigned int ret;
1303*4882a593Smuzhiyun
1304*4882a593Smuzhiyun npages = DIV_ROUND_UP(size, PAGE_SIZE);
1305*4882a593Smuzhiyun pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
1306*4882a593Smuzhiyun if (!pages) {
1307*4882a593Smuzhiyun omap_gem_free_object(obj);
1308*4882a593Smuzhiyun obj = ERR_PTR(-ENOMEM);
1309*4882a593Smuzhiyun goto done;
1310*4882a593Smuzhiyun }
1311*4882a593Smuzhiyun
1312*4882a593Smuzhiyun omap_obj->pages = pages;
1313*4882a593Smuzhiyun ret = drm_prime_sg_to_page_addr_arrays(sgt, pages, NULL,
1314*4882a593Smuzhiyun npages);
1315*4882a593Smuzhiyun if (ret) {
1316*4882a593Smuzhiyun omap_gem_free_object(obj);
1317*4882a593Smuzhiyun obj = ERR_PTR(-ENOMEM);
1318*4882a593Smuzhiyun goto done;
1319*4882a593Smuzhiyun }
1320*4882a593Smuzhiyun }
1321*4882a593Smuzhiyun
1322*4882a593Smuzhiyun done:
1323*4882a593Smuzhiyun mutex_unlock(&omap_obj->lock);
1324*4882a593Smuzhiyun return obj;
1325*4882a593Smuzhiyun }
1326*4882a593Smuzhiyun
1327*4882a593Smuzhiyun /* convenience method to construct a GEM buffer object, and userspace handle */
omap_gem_new_handle(struct drm_device * dev,struct drm_file * file,union omap_gem_size gsize,u32 flags,u32 * handle)1328*4882a593Smuzhiyun int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1329*4882a593Smuzhiyun union omap_gem_size gsize, u32 flags, u32 *handle)
1330*4882a593Smuzhiyun {
1331*4882a593Smuzhiyun struct drm_gem_object *obj;
1332*4882a593Smuzhiyun int ret;
1333*4882a593Smuzhiyun
1334*4882a593Smuzhiyun obj = omap_gem_new(dev, gsize, flags);
1335*4882a593Smuzhiyun if (!obj)
1336*4882a593Smuzhiyun return -ENOMEM;
1337*4882a593Smuzhiyun
1338*4882a593Smuzhiyun ret = drm_gem_handle_create(file, obj, handle);
1339*4882a593Smuzhiyun if (ret) {
1340*4882a593Smuzhiyun omap_gem_free_object(obj);
1341*4882a593Smuzhiyun return ret;
1342*4882a593Smuzhiyun }
1343*4882a593Smuzhiyun
1344*4882a593Smuzhiyun /* drop reference from allocate - handle holds it now */
1345*4882a593Smuzhiyun drm_gem_object_put(obj);
1346*4882a593Smuzhiyun
1347*4882a593Smuzhiyun return 0;
1348*4882a593Smuzhiyun }
1349*4882a593Smuzhiyun
1350*4882a593Smuzhiyun /* -----------------------------------------------------------------------------
1351*4882a593Smuzhiyun * Init & Cleanup
1352*4882a593Smuzhiyun */
1353*4882a593Smuzhiyun
1354*4882a593Smuzhiyun /* If DMM is used, we need to set some stuff up.. */
omap_gem_init(struct drm_device * dev)1355*4882a593Smuzhiyun void omap_gem_init(struct drm_device *dev)
1356*4882a593Smuzhiyun {
1357*4882a593Smuzhiyun struct omap_drm_private *priv = dev->dev_private;
1358*4882a593Smuzhiyun struct omap_drm_usergart *usergart;
1359*4882a593Smuzhiyun const enum tiler_fmt fmts[] = {
1360*4882a593Smuzhiyun TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1361*4882a593Smuzhiyun };
1362*4882a593Smuzhiyun int i, j;
1363*4882a593Smuzhiyun
1364*4882a593Smuzhiyun if (!dmm_is_available()) {
1365*4882a593Smuzhiyun /* DMM only supported on OMAP4 and later, so this isn't fatal */
1366*4882a593Smuzhiyun dev_warn(dev->dev, "DMM not available, disable DMM support\n");
1367*4882a593Smuzhiyun return;
1368*4882a593Smuzhiyun }
1369*4882a593Smuzhiyun
1370*4882a593Smuzhiyun usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1371*4882a593Smuzhiyun if (!usergart)
1372*4882a593Smuzhiyun return;
1373*4882a593Smuzhiyun
1374*4882a593Smuzhiyun /* reserve 4k aligned/wide regions for userspace mappings: */
1375*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1376*4882a593Smuzhiyun u16 h = 1, w = PAGE_SIZE >> i;
1377*4882a593Smuzhiyun
1378*4882a593Smuzhiyun tiler_align(fmts[i], &w, &h);
1379*4882a593Smuzhiyun /* note: since each region is 1 4kb page wide, and minimum
1380*4882a593Smuzhiyun * number of rows, the height ends up being the same as the
1381*4882a593Smuzhiyun * # of pages in the region
1382*4882a593Smuzhiyun */
1383*4882a593Smuzhiyun usergart[i].height = h;
1384*4882a593Smuzhiyun usergart[i].height_shift = ilog2(h);
1385*4882a593Smuzhiyun usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
1386*4882a593Smuzhiyun usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1387*4882a593Smuzhiyun for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
1388*4882a593Smuzhiyun struct omap_drm_usergart_entry *entry;
1389*4882a593Smuzhiyun struct tiler_block *block;
1390*4882a593Smuzhiyun
1391*4882a593Smuzhiyun entry = &usergart[i].entry[j];
1392*4882a593Smuzhiyun block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
1393*4882a593Smuzhiyun if (IS_ERR(block)) {
1394*4882a593Smuzhiyun dev_err(dev->dev,
1395*4882a593Smuzhiyun "reserve failed: %d, %d, %ld\n",
1396*4882a593Smuzhiyun i, j, PTR_ERR(block));
1397*4882a593Smuzhiyun return;
1398*4882a593Smuzhiyun }
1399*4882a593Smuzhiyun entry->dma_addr = tiler_ssptr(block);
1400*4882a593Smuzhiyun entry->block = block;
1401*4882a593Smuzhiyun
1402*4882a593Smuzhiyun DBG("%d:%d: %dx%d: dma_addr=%pad stride=%d", i, j, w, h,
1403*4882a593Smuzhiyun &entry->dma_addr,
1404*4882a593Smuzhiyun usergart[i].stride_pfn << PAGE_SHIFT);
1405*4882a593Smuzhiyun }
1406*4882a593Smuzhiyun }
1407*4882a593Smuzhiyun
1408*4882a593Smuzhiyun priv->usergart = usergart;
1409*4882a593Smuzhiyun priv->has_dmm = true;
1410*4882a593Smuzhiyun }
1411*4882a593Smuzhiyun
omap_gem_deinit(struct drm_device * dev)1412*4882a593Smuzhiyun void omap_gem_deinit(struct drm_device *dev)
1413*4882a593Smuzhiyun {
1414*4882a593Smuzhiyun struct omap_drm_private *priv = dev->dev_private;
1415*4882a593Smuzhiyun
1416*4882a593Smuzhiyun /* I believe we can rely on there being no more outstanding GEM
1417*4882a593Smuzhiyun * objects which could depend on usergart/dmm at this point.
1418*4882a593Smuzhiyun */
1419*4882a593Smuzhiyun kfree(priv->usergart);
1420*4882a593Smuzhiyun }
1421