1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2013 Red Hat
4*4882a593Smuzhiyun * Author: Rob Clark <robdclark@gmail.com>
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/dma-map-ops.h>
8*4882a593Smuzhiyun #include <linux/spinlock.h>
9*4882a593Smuzhiyun #include <linux/shmem_fs.h>
10*4882a593Smuzhiyun #include <linux/dma-buf.h>
11*4882a593Smuzhiyun #include <linux/pfn_t.h>
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <drm/drm_prime.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include "msm_drv.h"
16*4882a593Smuzhiyun #include "msm_fence.h"
17*4882a593Smuzhiyun #include "msm_gem.h"
18*4882a593Smuzhiyun #include "msm_gpu.h"
19*4882a593Smuzhiyun #include "msm_mmu.h"
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun
physaddr(struct drm_gem_object * obj)24*4882a593Smuzhiyun static dma_addr_t physaddr(struct drm_gem_object *obj)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun struct msm_gem_object *msm_obj = to_msm_bo(obj);
27*4882a593Smuzhiyun struct msm_drm_private *priv = obj->dev->dev_private;
28*4882a593Smuzhiyun return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
29*4882a593Smuzhiyun priv->vram.paddr;
30*4882a593Smuzhiyun }
31*4882a593Smuzhiyun
use_pages(struct drm_gem_object * obj)32*4882a593Smuzhiyun static bool use_pages(struct drm_gem_object *obj)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun struct msm_gem_object *msm_obj = to_msm_bo(obj);
35*4882a593Smuzhiyun return !msm_obj->vram_node;
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun /*
39*4882a593Smuzhiyun * Cache sync.. this is a bit over-complicated, to fit dma-mapping
40*4882a593Smuzhiyun * API. Really GPU cache is out of scope here (handled on cmdstream)
41*4882a593Smuzhiyun * and all we need to do is invalidate newly allocated pages before
42*4882a593Smuzhiyun * mapping to CPU as uncached/writecombine.
43*4882a593Smuzhiyun *
44*4882a593Smuzhiyun * On top of this, we have the added headache, that depending on
45*4882a593Smuzhiyun * display generation, the display's iommu may be wired up to either
46*4882a593Smuzhiyun * the toplevel drm device (mdss), or to the mdp sub-node, meaning
47*4882a593Smuzhiyun * that here we either have dma-direct or iommu ops.
48*4882a593Smuzhiyun *
49*4882a593Smuzhiyun * Let this be a cautionary tail of abstraction gone wrong.
50*4882a593Smuzhiyun */
51*4882a593Smuzhiyun
sync_for_device(struct msm_gem_object * msm_obj)52*4882a593Smuzhiyun static void sync_for_device(struct msm_gem_object *msm_obj)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun struct device *dev = msm_obj->base.dev->dev;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun
sync_for_cpu(struct msm_gem_object * msm_obj)59*4882a593Smuzhiyun static void sync_for_cpu(struct msm_gem_object *msm_obj)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun struct device *dev = msm_obj->base.dev->dev;
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /* allocate pages from VRAM carveout, used when no IOMMU: */
get_pages_vram(struct drm_gem_object * obj,int npages)67*4882a593Smuzhiyun static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun struct msm_gem_object *msm_obj = to_msm_bo(obj);
70*4882a593Smuzhiyun struct msm_drm_private *priv = obj->dev->dev_private;
71*4882a593Smuzhiyun dma_addr_t paddr;
72*4882a593Smuzhiyun struct page **p;
73*4882a593Smuzhiyun int ret, i;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
76*4882a593Smuzhiyun if (!p)
77*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun spin_lock(&priv->vram.lock);
80*4882a593Smuzhiyun ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
81*4882a593Smuzhiyun spin_unlock(&priv->vram.lock);
82*4882a593Smuzhiyun if (ret) {
83*4882a593Smuzhiyun kvfree(p);
84*4882a593Smuzhiyun return ERR_PTR(ret);
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun paddr = physaddr(obj);
88*4882a593Smuzhiyun for (i = 0; i < npages; i++) {
89*4882a593Smuzhiyun p[i] = phys_to_page(paddr);
90*4882a593Smuzhiyun paddr += PAGE_SIZE;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun return p;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
get_pages(struct drm_gem_object * obj)96*4882a593Smuzhiyun static struct page **get_pages(struct drm_gem_object *obj)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun struct msm_gem_object *msm_obj = to_msm_bo(obj);
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun if (!msm_obj->pages) {
101*4882a593Smuzhiyun struct drm_device *dev = obj->dev;
102*4882a593Smuzhiyun struct page **p;
103*4882a593Smuzhiyun int npages = obj->size >> PAGE_SHIFT;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun if (use_pages(obj))
106*4882a593Smuzhiyun p = drm_gem_get_pages(obj);
107*4882a593Smuzhiyun else
108*4882a593Smuzhiyun p = get_pages_vram(obj, npages);
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun if (IS_ERR(p)) {
111*4882a593Smuzhiyun DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
112*4882a593Smuzhiyun PTR_ERR(p));
113*4882a593Smuzhiyun return p;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun msm_obj->pages = p;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
119*4882a593Smuzhiyun if (IS_ERR(msm_obj->sgt)) {
120*4882a593Smuzhiyun void *ptr = ERR_CAST(msm_obj->sgt);
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
123*4882a593Smuzhiyun msm_obj->sgt = NULL;
124*4882a593Smuzhiyun return ptr;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /* For non-cached buffers, ensure the new pages are clean
128*4882a593Smuzhiyun * because display controller, GPU, etc. are not coherent:
129*4882a593Smuzhiyun */
130*4882a593Smuzhiyun if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
131*4882a593Smuzhiyun sync_for_device(msm_obj);
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun return msm_obj->pages;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
put_pages_vram(struct drm_gem_object * obj)137*4882a593Smuzhiyun static void put_pages_vram(struct drm_gem_object *obj)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun struct msm_gem_object *msm_obj = to_msm_bo(obj);
140*4882a593Smuzhiyun struct msm_drm_private *priv = obj->dev->dev_private;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun spin_lock(&priv->vram.lock);
143*4882a593Smuzhiyun drm_mm_remove_node(msm_obj->vram_node);
144*4882a593Smuzhiyun spin_unlock(&priv->vram.lock);
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun kvfree(msm_obj->pages);
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
put_pages(struct drm_gem_object * obj)149*4882a593Smuzhiyun static void put_pages(struct drm_gem_object *obj)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun struct msm_gem_object *msm_obj = to_msm_bo(obj);
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun if (msm_obj->pages) {
154*4882a593Smuzhiyun if (msm_obj->sgt) {
155*4882a593Smuzhiyun /* For non-cached buffers, ensure the new
156*4882a593Smuzhiyun * pages are clean because display controller,
157*4882a593Smuzhiyun * GPU, etc. are not coherent:
158*4882a593Smuzhiyun */
159*4882a593Smuzhiyun if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
160*4882a593Smuzhiyun sync_for_cpu(msm_obj);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun sg_free_table(msm_obj->sgt);
163*4882a593Smuzhiyun kfree(msm_obj->sgt);
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun if (use_pages(obj))
167*4882a593Smuzhiyun drm_gem_put_pages(obj, msm_obj->pages, true, false);
168*4882a593Smuzhiyun else
169*4882a593Smuzhiyun put_pages_vram(obj);
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun msm_obj->pages = NULL;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
msm_gem_get_pages(struct drm_gem_object * obj)175*4882a593Smuzhiyun struct page **msm_gem_get_pages(struct drm_gem_object *obj)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun struct msm_gem_object *msm_obj = to_msm_bo(obj);
178*4882a593Smuzhiyun struct page **p;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun mutex_lock(&msm_obj->lock);
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
183*4882a593Smuzhiyun mutex_unlock(&msm_obj->lock);
184*4882a593Smuzhiyun return ERR_PTR(-EBUSY);
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun p = get_pages(obj);
188*4882a593Smuzhiyun mutex_unlock(&msm_obj->lock);
189*4882a593Smuzhiyun return p;
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
msm_gem_put_pages(struct drm_gem_object * obj)192*4882a593Smuzhiyun void msm_gem_put_pages(struct drm_gem_object *obj)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun /* when we start tracking the pin count, then do something here */
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
msm_gem_mmap_obj(struct drm_gem_object * obj,struct vm_area_struct * vma)197*4882a593Smuzhiyun int msm_gem_mmap_obj(struct drm_gem_object *obj,
198*4882a593Smuzhiyun struct vm_area_struct *vma)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun struct msm_gem_object *msm_obj = to_msm_bo(obj);
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun vma->vm_flags &= ~VM_PFNMAP;
203*4882a593Smuzhiyun vma->vm_flags |= VM_MIXEDMAP;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun if (msm_obj->flags & MSM_BO_WC) {
206*4882a593Smuzhiyun vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
207*4882a593Smuzhiyun } else if (msm_obj->flags & MSM_BO_UNCACHED) {
208*4882a593Smuzhiyun vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
209*4882a593Smuzhiyun } else {
210*4882a593Smuzhiyun /*
211*4882a593Smuzhiyun * Shunt off cached objs to shmem file so they have their own
212*4882a593Smuzhiyun * address_space (so unmap_mapping_range does what we want,
213*4882a593Smuzhiyun * in particular in the case of mmap'd dmabufs)
214*4882a593Smuzhiyun */
215*4882a593Smuzhiyun fput(vma->vm_file);
216*4882a593Smuzhiyun get_file(obj->filp);
217*4882a593Smuzhiyun vma->vm_pgoff = 0;
218*4882a593Smuzhiyun vma->vm_file = obj->filp;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun return 0;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
msm_gem_mmap(struct file * filp,struct vm_area_struct * vma)226*4882a593Smuzhiyun int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun int ret;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun ret = drm_gem_mmap(filp, vma);
231*4882a593Smuzhiyun if (ret) {
232*4882a593Smuzhiyun DBG("mmap failed: %d", ret);
233*4882a593Smuzhiyun return ret;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun return msm_gem_mmap_obj(vma->vm_private_data, vma);
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
msm_gem_fault(struct vm_fault * vmf)239*4882a593Smuzhiyun vm_fault_t msm_gem_fault(struct vm_fault *vmf)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun struct vm_area_struct *vma = vmf->vma;
242*4882a593Smuzhiyun struct drm_gem_object *obj = vma->vm_private_data;
243*4882a593Smuzhiyun struct msm_gem_object *msm_obj = to_msm_bo(obj);
244*4882a593Smuzhiyun struct page **pages;
245*4882a593Smuzhiyun unsigned long pfn;
246*4882a593Smuzhiyun pgoff_t pgoff;
247*4882a593Smuzhiyun int err;
248*4882a593Smuzhiyun vm_fault_t ret;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun /*
251*4882a593Smuzhiyun * vm_ops.open/drm_gem_mmap_obj and close get and put
252*4882a593Smuzhiyun * a reference on obj. So, we dont need to hold one here.
253*4882a593Smuzhiyun */
254*4882a593Smuzhiyun err = mutex_lock_interruptible(&msm_obj->lock);
255*4882a593Smuzhiyun if (err) {
256*4882a593Smuzhiyun ret = VM_FAULT_NOPAGE;
257*4882a593Smuzhiyun goto out;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
261*4882a593Smuzhiyun mutex_unlock(&msm_obj->lock);
262*4882a593Smuzhiyun return VM_FAULT_SIGBUS;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun /* make sure we have pages attached now */
266*4882a593Smuzhiyun pages = get_pages(obj);
267*4882a593Smuzhiyun if (IS_ERR(pages)) {
268*4882a593Smuzhiyun ret = vmf_error(PTR_ERR(pages));
269*4882a593Smuzhiyun goto out_unlock;
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun /* We don't use vmf->pgoff since that has the fake offset: */
273*4882a593Smuzhiyun pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun pfn = page_to_pfn(pages[pgoff]);
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
278*4882a593Smuzhiyun pfn, pfn << PAGE_SHIFT);
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
281*4882a593Smuzhiyun out_unlock:
282*4882a593Smuzhiyun mutex_unlock(&msm_obj->lock);
283*4882a593Smuzhiyun out:
284*4882a593Smuzhiyun return ret;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun /** get mmap offset */
mmap_offset(struct drm_gem_object * obj)288*4882a593Smuzhiyun static uint64_t mmap_offset(struct drm_gem_object *obj)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun struct drm_device *dev = obj->dev;
291*4882a593Smuzhiyun struct msm_gem_object *msm_obj = to_msm_bo(obj);
292*4882a593Smuzhiyun int ret;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun WARN_ON(!mutex_is_locked(&msm_obj->lock));
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun /* Make it mmapable */
297*4882a593Smuzhiyun ret = drm_gem_create_mmap_offset(obj);
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun if (ret) {
300*4882a593Smuzhiyun DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
301*4882a593Smuzhiyun return 0;
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun return drm_vma_node_offset_addr(&obj->vma_node);
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
msm_gem_mmap_offset(struct drm_gem_object * obj)307*4882a593Smuzhiyun uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun uint64_t offset;
310*4882a593Smuzhiyun struct msm_gem_object *msm_obj = to_msm_bo(obj);
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun mutex_lock(&msm_obj->lock);
313*4882a593Smuzhiyun offset = mmap_offset(obj);
314*4882a593Smuzhiyun mutex_unlock(&msm_obj->lock);
315*4882a593Smuzhiyun return offset;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
add_vma(struct drm_gem_object * obj,struct msm_gem_address_space * aspace)318*4882a593Smuzhiyun static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
319*4882a593Smuzhiyun struct msm_gem_address_space *aspace)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun struct msm_gem_object *msm_obj = to_msm_bo(obj);
322*4882a593Smuzhiyun struct msm_gem_vma *vma;
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun WARN_ON(!mutex_is_locked(&msm_obj->lock));
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun vma = kzalloc(sizeof(*vma), GFP_KERNEL);
327*4882a593Smuzhiyun if (!vma)
328*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun vma->aspace = aspace;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun list_add_tail(&vma->list, &msm_obj->vmas);
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun return vma;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
lookup_vma(struct drm_gem_object * obj,struct msm_gem_address_space * aspace)337*4882a593Smuzhiyun static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
338*4882a593Smuzhiyun struct msm_gem_address_space *aspace)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun struct msm_gem_object *msm_obj = to_msm_bo(obj);
341*4882a593Smuzhiyun struct msm_gem_vma *vma;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun WARN_ON(!mutex_is_locked(&msm_obj->lock));
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun list_for_each_entry(vma, &msm_obj->vmas, list) {
346*4882a593Smuzhiyun if (vma->aspace == aspace)
347*4882a593Smuzhiyun return vma;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun return NULL;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun
del_vma(struct msm_gem_vma * vma)353*4882a593Smuzhiyun static void del_vma(struct msm_gem_vma *vma)
354*4882a593Smuzhiyun {
355*4882a593Smuzhiyun if (!vma)
356*4882a593Smuzhiyun return;
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun list_del(&vma->list);
359*4882a593Smuzhiyun kfree(vma);
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun /* Called with msm_obj->lock locked */
363*4882a593Smuzhiyun static void
put_iova(struct drm_gem_object * obj)364*4882a593Smuzhiyun put_iova(struct drm_gem_object *obj)
365*4882a593Smuzhiyun {
366*4882a593Smuzhiyun struct msm_gem_object *msm_obj = to_msm_bo(obj);
367*4882a593Smuzhiyun struct msm_gem_vma *vma, *tmp;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun WARN_ON(!mutex_is_locked(&msm_obj->lock));
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
372*4882a593Smuzhiyun if (vma->aspace) {
373*4882a593Smuzhiyun msm_gem_purge_vma(vma->aspace, vma);
374*4882a593Smuzhiyun msm_gem_close_vma(vma->aspace, vma);
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun del_vma(vma);
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
msm_gem_get_iova_locked(struct drm_gem_object * obj,struct msm_gem_address_space * aspace,uint64_t * iova,u64 range_start,u64 range_end)380*4882a593Smuzhiyun static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
381*4882a593Smuzhiyun struct msm_gem_address_space *aspace, uint64_t *iova,
382*4882a593Smuzhiyun u64 range_start, u64 range_end)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun struct msm_gem_object *msm_obj = to_msm_bo(obj);
385*4882a593Smuzhiyun struct msm_gem_vma *vma;
386*4882a593Smuzhiyun int ret = 0;
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun WARN_ON(!mutex_is_locked(&msm_obj->lock));
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun vma = lookup_vma(obj, aspace);
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun if (!vma) {
393*4882a593Smuzhiyun vma = add_vma(obj, aspace);
394*4882a593Smuzhiyun if (IS_ERR(vma))
395*4882a593Smuzhiyun return PTR_ERR(vma);
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT,
398*4882a593Smuzhiyun range_start, range_end);
399*4882a593Smuzhiyun if (ret) {
400*4882a593Smuzhiyun del_vma(vma);
401*4882a593Smuzhiyun return ret;
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun *iova = vma->iova;
406*4882a593Smuzhiyun return 0;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
msm_gem_pin_iova(struct drm_gem_object * obj,struct msm_gem_address_space * aspace)409*4882a593Smuzhiyun static int msm_gem_pin_iova(struct drm_gem_object *obj,
410*4882a593Smuzhiyun struct msm_gem_address_space *aspace)
411*4882a593Smuzhiyun {
412*4882a593Smuzhiyun struct msm_gem_object *msm_obj = to_msm_bo(obj);
413*4882a593Smuzhiyun struct msm_gem_vma *vma;
414*4882a593Smuzhiyun struct page **pages;
415*4882a593Smuzhiyun int prot = IOMMU_READ;
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
418*4882a593Smuzhiyun prot |= IOMMU_WRITE;
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun if (msm_obj->flags & MSM_BO_MAP_PRIV)
421*4882a593Smuzhiyun prot |= IOMMU_PRIV;
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun WARN_ON(!mutex_is_locked(&msm_obj->lock));
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
426*4882a593Smuzhiyun return -EBUSY;
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun vma = lookup_vma(obj, aspace);
429*4882a593Smuzhiyun if (WARN_ON(!vma))
430*4882a593Smuzhiyun return -EINVAL;
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun pages = get_pages(obj);
433*4882a593Smuzhiyun if (IS_ERR(pages))
434*4882a593Smuzhiyun return PTR_ERR(pages);
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun return msm_gem_map_vma(aspace, vma, prot,
437*4882a593Smuzhiyun msm_obj->sgt, obj->size >> PAGE_SHIFT);
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun /*
441*4882a593Smuzhiyun * get iova and pin it. Should have a matching put
442*4882a593Smuzhiyun * limits iova to specified range (in pages)
443*4882a593Smuzhiyun */
msm_gem_get_and_pin_iova_range(struct drm_gem_object * obj,struct msm_gem_address_space * aspace,uint64_t * iova,u64 range_start,u64 range_end)444*4882a593Smuzhiyun int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
445*4882a593Smuzhiyun struct msm_gem_address_space *aspace, uint64_t *iova,
446*4882a593Smuzhiyun u64 range_start, u64 range_end)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun struct msm_gem_object *msm_obj = to_msm_bo(obj);
449*4882a593Smuzhiyun u64 local;
450*4882a593Smuzhiyun int ret;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun mutex_lock(&msm_obj->lock);
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun ret = msm_gem_get_iova_locked(obj, aspace, &local,
455*4882a593Smuzhiyun range_start, range_end);
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun if (!ret)
458*4882a593Smuzhiyun ret = msm_gem_pin_iova(obj, aspace);
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun if (!ret)
461*4882a593Smuzhiyun *iova = local;
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun mutex_unlock(&msm_obj->lock);
464*4882a593Smuzhiyun return ret;
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun /* get iova and pin it. Should have a matching put */
msm_gem_get_and_pin_iova(struct drm_gem_object * obj,struct msm_gem_address_space * aspace,uint64_t * iova)468*4882a593Smuzhiyun int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
469*4882a593Smuzhiyun struct msm_gem_address_space *aspace, uint64_t *iova)
470*4882a593Smuzhiyun {
471*4882a593Smuzhiyun return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun /*
475*4882a593Smuzhiyun * Get an iova but don't pin it. Doesn't need a put because iovas are currently
476*4882a593Smuzhiyun * valid for the life of the object
477*4882a593Smuzhiyun */
msm_gem_get_iova(struct drm_gem_object * obj,struct msm_gem_address_space * aspace,uint64_t * iova)478*4882a593Smuzhiyun int msm_gem_get_iova(struct drm_gem_object *obj,
479*4882a593Smuzhiyun struct msm_gem_address_space *aspace, uint64_t *iova)
480*4882a593Smuzhiyun {
481*4882a593Smuzhiyun struct msm_gem_object *msm_obj = to_msm_bo(obj);
482*4882a593Smuzhiyun int ret;
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun mutex_lock(&msm_obj->lock);
485*4882a593Smuzhiyun ret = msm_gem_get_iova_locked(obj, aspace, iova, 0, U64_MAX);
486*4882a593Smuzhiyun mutex_unlock(&msm_obj->lock);
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun return ret;
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun /* get iova without taking a reference, used in places where you have
492*4882a593Smuzhiyun * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
493*4882a593Smuzhiyun */
msm_gem_iova(struct drm_gem_object * obj,struct msm_gem_address_space * aspace)494*4882a593Smuzhiyun uint64_t msm_gem_iova(struct drm_gem_object *obj,
495*4882a593Smuzhiyun struct msm_gem_address_space *aspace)
496*4882a593Smuzhiyun {
497*4882a593Smuzhiyun struct msm_gem_object *msm_obj = to_msm_bo(obj);
498*4882a593Smuzhiyun struct msm_gem_vma *vma;
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun mutex_lock(&msm_obj->lock);
501*4882a593Smuzhiyun vma = lookup_vma(obj, aspace);
502*4882a593Smuzhiyun mutex_unlock(&msm_obj->lock);
503*4882a593Smuzhiyun WARN_ON(!vma);
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun return vma ? vma->iova : 0;
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun /*
509*4882a593Smuzhiyun * Unpin a iova by updating the reference counts. The memory isn't actually
510*4882a593Smuzhiyun * purged until something else (shrinker, mm_notifier, destroy, etc) decides
511*4882a593Smuzhiyun * to get rid of it
512*4882a593Smuzhiyun */
msm_gem_unpin_iova(struct drm_gem_object * obj,struct msm_gem_address_space * aspace)513*4882a593Smuzhiyun void msm_gem_unpin_iova(struct drm_gem_object *obj,
514*4882a593Smuzhiyun struct msm_gem_address_space *aspace)
515*4882a593Smuzhiyun {
516*4882a593Smuzhiyun struct msm_gem_object *msm_obj = to_msm_bo(obj);
517*4882a593Smuzhiyun struct msm_gem_vma *vma;
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun mutex_lock(&msm_obj->lock);
520*4882a593Smuzhiyun vma = lookup_vma(obj, aspace);
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun if (!WARN_ON(!vma))
523*4882a593Smuzhiyun msm_gem_unmap_vma(aspace, vma);
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun mutex_unlock(&msm_obj->lock);
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun
msm_gem_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)528*4882a593Smuzhiyun int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
529*4882a593Smuzhiyun struct drm_mode_create_dumb *args)
530*4882a593Smuzhiyun {
531*4882a593Smuzhiyun args->pitch = align_pitch(args->width, args->bpp);
532*4882a593Smuzhiyun args->size = PAGE_ALIGN(args->pitch * args->height);
533*4882a593Smuzhiyun return msm_gem_new_handle(dev, file, args->size,
534*4882a593Smuzhiyun MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun
msm_gem_dumb_map_offset(struct drm_file * file,struct drm_device * dev,uint32_t handle,uint64_t * offset)537*4882a593Smuzhiyun int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
538*4882a593Smuzhiyun uint32_t handle, uint64_t *offset)
539*4882a593Smuzhiyun {
540*4882a593Smuzhiyun struct drm_gem_object *obj;
541*4882a593Smuzhiyun int ret = 0;
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun /* GEM does all our handle to object mapping */
544*4882a593Smuzhiyun obj = drm_gem_object_lookup(file, handle);
545*4882a593Smuzhiyun if (obj == NULL) {
546*4882a593Smuzhiyun ret = -ENOENT;
547*4882a593Smuzhiyun goto fail;
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun *offset = msm_gem_mmap_offset(obj);
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun drm_gem_object_put(obj);
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun fail:
555*4882a593Smuzhiyun return ret;
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun
get_vaddr(struct drm_gem_object * obj,unsigned madv)558*4882a593Smuzhiyun static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
559*4882a593Smuzhiyun {
560*4882a593Smuzhiyun struct msm_gem_object *msm_obj = to_msm_bo(obj);
561*4882a593Smuzhiyun int ret = 0;
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun if (obj->import_attach)
564*4882a593Smuzhiyun return ERR_PTR(-ENODEV);
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun mutex_lock(&msm_obj->lock);
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun if (WARN_ON(msm_obj->madv > madv)) {
569*4882a593Smuzhiyun DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
570*4882a593Smuzhiyun msm_obj->madv, madv);
571*4882a593Smuzhiyun mutex_unlock(&msm_obj->lock);
572*4882a593Smuzhiyun return ERR_PTR(-EBUSY);
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun /* increment vmap_count *before* vmap() call, so shrinker can
576*4882a593Smuzhiyun * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
577*4882a593Smuzhiyun * This guarantees that we won't try to msm_gem_vunmap() this
578*4882a593Smuzhiyun * same object from within the vmap() call (while we already
579*4882a593Smuzhiyun * hold msm_obj->lock)
580*4882a593Smuzhiyun */
581*4882a593Smuzhiyun msm_obj->vmap_count++;
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun if (!msm_obj->vaddr) {
584*4882a593Smuzhiyun struct page **pages = get_pages(obj);
585*4882a593Smuzhiyun if (IS_ERR(pages)) {
586*4882a593Smuzhiyun ret = PTR_ERR(pages);
587*4882a593Smuzhiyun goto fail;
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
590*4882a593Smuzhiyun VM_MAP, pgprot_writecombine(PAGE_KERNEL));
591*4882a593Smuzhiyun if (msm_obj->vaddr == NULL) {
592*4882a593Smuzhiyun ret = -ENOMEM;
593*4882a593Smuzhiyun goto fail;
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun mutex_unlock(&msm_obj->lock);
598*4882a593Smuzhiyun return msm_obj->vaddr;
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun fail:
601*4882a593Smuzhiyun msm_obj->vmap_count--;
602*4882a593Smuzhiyun mutex_unlock(&msm_obj->lock);
603*4882a593Smuzhiyun return ERR_PTR(ret);
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun
msm_gem_get_vaddr(struct drm_gem_object * obj)606*4882a593Smuzhiyun void *msm_gem_get_vaddr(struct drm_gem_object *obj)
607*4882a593Smuzhiyun {
608*4882a593Smuzhiyun return get_vaddr(obj, MSM_MADV_WILLNEED);
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun /*
612*4882a593Smuzhiyun * Don't use this! It is for the very special case of dumping
613*4882a593Smuzhiyun * submits from GPU hangs or faults, were the bo may already
614*4882a593Smuzhiyun * be MSM_MADV_DONTNEED, but we know the buffer is still on the
615*4882a593Smuzhiyun * active list.
616*4882a593Smuzhiyun */
msm_gem_get_vaddr_active(struct drm_gem_object * obj)617*4882a593Smuzhiyun void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
618*4882a593Smuzhiyun {
619*4882a593Smuzhiyun return get_vaddr(obj, __MSM_MADV_PURGED);
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun
msm_gem_put_vaddr(struct drm_gem_object * obj)622*4882a593Smuzhiyun void msm_gem_put_vaddr(struct drm_gem_object *obj)
623*4882a593Smuzhiyun {
624*4882a593Smuzhiyun struct msm_gem_object *msm_obj = to_msm_bo(obj);
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun mutex_lock(&msm_obj->lock);
627*4882a593Smuzhiyun WARN_ON(msm_obj->vmap_count < 1);
628*4882a593Smuzhiyun msm_obj->vmap_count--;
629*4882a593Smuzhiyun mutex_unlock(&msm_obj->lock);
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun /* Update madvise status, returns true if not purged, else
633*4882a593Smuzhiyun * false or -errno.
634*4882a593Smuzhiyun */
msm_gem_madvise(struct drm_gem_object * obj,unsigned madv)635*4882a593Smuzhiyun int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
636*4882a593Smuzhiyun {
637*4882a593Smuzhiyun struct msm_gem_object *msm_obj = to_msm_bo(obj);
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun mutex_lock(&msm_obj->lock);
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun if (msm_obj->madv != __MSM_MADV_PURGED)
644*4882a593Smuzhiyun msm_obj->madv = madv;
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun madv = msm_obj->madv;
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun mutex_unlock(&msm_obj->lock);
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun return (madv != __MSM_MADV_PURGED);
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun
msm_gem_purge(struct drm_gem_object * obj,enum msm_gem_lock subclass)653*4882a593Smuzhiyun void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
654*4882a593Smuzhiyun {
655*4882a593Smuzhiyun struct drm_device *dev = obj->dev;
656*4882a593Smuzhiyun struct msm_gem_object *msm_obj = to_msm_bo(obj);
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun WARN_ON(!mutex_is_locked(&dev->struct_mutex));
659*4882a593Smuzhiyun WARN_ON(!is_purgeable(msm_obj));
660*4882a593Smuzhiyun WARN_ON(obj->import_attach);
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun mutex_lock_nested(&msm_obj->lock, subclass);
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun put_iova(obj);
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun msm_gem_vunmap_locked(obj);
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun put_pages(obj);
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun msm_obj->madv = __MSM_MADV_PURGED;
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
673*4882a593Smuzhiyun drm_gem_free_mmap_offset(obj);
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun /* Our goal here is to return as much of the memory as
676*4882a593Smuzhiyun * is possible back to the system as we are called from OOM.
677*4882a593Smuzhiyun * To do this we must instruct the shmfs to drop all of its
678*4882a593Smuzhiyun * backing pages, *now*.
679*4882a593Smuzhiyun */
680*4882a593Smuzhiyun shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
683*4882a593Smuzhiyun 0, (loff_t)-1);
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun mutex_unlock(&msm_obj->lock);
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun
msm_gem_vunmap_locked(struct drm_gem_object * obj)688*4882a593Smuzhiyun static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
689*4882a593Smuzhiyun {
690*4882a593Smuzhiyun struct msm_gem_object *msm_obj = to_msm_bo(obj);
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun WARN_ON(!mutex_is_locked(&msm_obj->lock));
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
695*4882a593Smuzhiyun return;
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun vunmap(msm_obj->vaddr);
698*4882a593Smuzhiyun msm_obj->vaddr = NULL;
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun
msm_gem_vunmap(struct drm_gem_object * obj,enum msm_gem_lock subclass)701*4882a593Smuzhiyun void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
702*4882a593Smuzhiyun {
703*4882a593Smuzhiyun struct msm_gem_object *msm_obj = to_msm_bo(obj);
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun mutex_lock_nested(&msm_obj->lock, subclass);
706*4882a593Smuzhiyun msm_gem_vunmap_locked(obj);
707*4882a593Smuzhiyun mutex_unlock(&msm_obj->lock);
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun /* must be called before _move_to_active().. */
msm_gem_sync_object(struct drm_gem_object * obj,struct msm_fence_context * fctx,bool exclusive)711*4882a593Smuzhiyun int msm_gem_sync_object(struct drm_gem_object *obj,
712*4882a593Smuzhiyun struct msm_fence_context *fctx, bool exclusive)
713*4882a593Smuzhiyun {
714*4882a593Smuzhiyun struct dma_resv_list *fobj;
715*4882a593Smuzhiyun struct dma_fence *fence;
716*4882a593Smuzhiyun int i, ret;
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun fobj = dma_resv_get_list(obj->resv);
719*4882a593Smuzhiyun if (!fobj || (fobj->shared_count == 0)) {
720*4882a593Smuzhiyun fence = dma_resv_get_excl(obj->resv);
721*4882a593Smuzhiyun /* don't need to wait on our own fences, since ring is fifo */
722*4882a593Smuzhiyun if (fence && (fence->context != fctx->context)) {
723*4882a593Smuzhiyun ret = dma_fence_wait(fence, true);
724*4882a593Smuzhiyun if (ret)
725*4882a593Smuzhiyun return ret;
726*4882a593Smuzhiyun }
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun if (!exclusive || !fobj)
730*4882a593Smuzhiyun return 0;
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun for (i = 0; i < fobj->shared_count; i++) {
733*4882a593Smuzhiyun fence = rcu_dereference_protected(fobj->shared[i],
734*4882a593Smuzhiyun dma_resv_held(obj->resv));
735*4882a593Smuzhiyun if (fence->context != fctx->context) {
736*4882a593Smuzhiyun ret = dma_fence_wait(fence, true);
737*4882a593Smuzhiyun if (ret)
738*4882a593Smuzhiyun return ret;
739*4882a593Smuzhiyun }
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun return 0;
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun
msm_gem_active_get(struct drm_gem_object * obj,struct msm_gpu * gpu)745*4882a593Smuzhiyun void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
746*4882a593Smuzhiyun {
747*4882a593Smuzhiyun struct msm_gem_object *msm_obj = to_msm_bo(obj);
748*4882a593Smuzhiyun WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
749*4882a593Smuzhiyun WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun if (!atomic_fetch_inc(&msm_obj->active_count)) {
752*4882a593Smuzhiyun msm_obj->gpu = gpu;
753*4882a593Smuzhiyun list_del_init(&msm_obj->mm_list);
754*4882a593Smuzhiyun list_add_tail(&msm_obj->mm_list, &gpu->active_list);
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun }
757*4882a593Smuzhiyun
msm_gem_active_put(struct drm_gem_object * obj)758*4882a593Smuzhiyun void msm_gem_active_put(struct drm_gem_object *obj)
759*4882a593Smuzhiyun {
760*4882a593Smuzhiyun struct msm_gem_object *msm_obj = to_msm_bo(obj);
761*4882a593Smuzhiyun struct msm_drm_private *priv = obj->dev->dev_private;
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun if (!atomic_dec_return(&msm_obj->active_count)) {
766*4882a593Smuzhiyun msm_obj->gpu = NULL;
767*4882a593Smuzhiyun list_del_init(&msm_obj->mm_list);
768*4882a593Smuzhiyun list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun }
771*4882a593Smuzhiyun
msm_gem_cpu_prep(struct drm_gem_object * obj,uint32_t op,ktime_t * timeout)772*4882a593Smuzhiyun int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
773*4882a593Smuzhiyun {
774*4882a593Smuzhiyun bool write = !!(op & MSM_PREP_WRITE);
775*4882a593Smuzhiyun unsigned long remain =
776*4882a593Smuzhiyun op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
777*4882a593Smuzhiyun long ret;
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun ret = dma_resv_wait_timeout_rcu(obj->resv, write,
780*4882a593Smuzhiyun true, remain);
781*4882a593Smuzhiyun if (ret == 0)
782*4882a593Smuzhiyun return remain == 0 ? -EBUSY : -ETIMEDOUT;
783*4882a593Smuzhiyun else if (ret < 0)
784*4882a593Smuzhiyun return ret;
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun /* TODO cache maintenance */
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun return 0;
789*4882a593Smuzhiyun }
790*4882a593Smuzhiyun
msm_gem_cpu_fini(struct drm_gem_object * obj)791*4882a593Smuzhiyun int msm_gem_cpu_fini(struct drm_gem_object *obj)
792*4882a593Smuzhiyun {
793*4882a593Smuzhiyun /* TODO cache maintenance */
794*4882a593Smuzhiyun return 0;
795*4882a593Smuzhiyun }
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
describe_fence(struct dma_fence * fence,const char * type,struct seq_file * m)798*4882a593Smuzhiyun static void describe_fence(struct dma_fence *fence, const char *type,
799*4882a593Smuzhiyun struct seq_file *m)
800*4882a593Smuzhiyun {
801*4882a593Smuzhiyun if (!dma_fence_is_signaled(fence))
802*4882a593Smuzhiyun seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
803*4882a593Smuzhiyun fence->ops->get_driver_name(fence),
804*4882a593Smuzhiyun fence->ops->get_timeline_name(fence),
805*4882a593Smuzhiyun fence->seqno);
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun
msm_gem_describe(struct drm_gem_object * obj,struct seq_file * m)808*4882a593Smuzhiyun void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
809*4882a593Smuzhiyun {
810*4882a593Smuzhiyun struct msm_gem_object *msm_obj = to_msm_bo(obj);
811*4882a593Smuzhiyun struct dma_resv *robj = obj->resv;
812*4882a593Smuzhiyun struct dma_resv_list *fobj;
813*4882a593Smuzhiyun struct dma_fence *fence;
814*4882a593Smuzhiyun struct msm_gem_vma *vma;
815*4882a593Smuzhiyun uint64_t off = drm_vma_node_start(&obj->vma_node);
816*4882a593Smuzhiyun const char *madv;
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun mutex_lock(&msm_obj->lock);
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun switch (msm_obj->madv) {
821*4882a593Smuzhiyun case __MSM_MADV_PURGED:
822*4882a593Smuzhiyun madv = " purged";
823*4882a593Smuzhiyun break;
824*4882a593Smuzhiyun case MSM_MADV_DONTNEED:
825*4882a593Smuzhiyun madv = " purgeable";
826*4882a593Smuzhiyun break;
827*4882a593Smuzhiyun case MSM_MADV_WILLNEED:
828*4882a593Smuzhiyun default:
829*4882a593Smuzhiyun madv = "";
830*4882a593Smuzhiyun break;
831*4882a593Smuzhiyun }
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
834*4882a593Smuzhiyun msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
835*4882a593Smuzhiyun obj->name, kref_read(&obj->refcount),
836*4882a593Smuzhiyun off, msm_obj->vaddr);
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun if (!list_empty(&msm_obj->vmas)) {
841*4882a593Smuzhiyun
842*4882a593Smuzhiyun seq_puts(m, " vmas:");
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun list_for_each_entry(vma, &msm_obj->vmas, list) {
845*4882a593Smuzhiyun const char *name, *comm;
846*4882a593Smuzhiyun if (vma->aspace) {
847*4882a593Smuzhiyun struct msm_gem_address_space *aspace = vma->aspace;
848*4882a593Smuzhiyun struct task_struct *task =
849*4882a593Smuzhiyun get_pid_task(aspace->pid, PIDTYPE_PID);
850*4882a593Smuzhiyun if (task) {
851*4882a593Smuzhiyun comm = kstrdup(task->comm, GFP_KERNEL);
852*4882a593Smuzhiyun put_task_struct(task);
853*4882a593Smuzhiyun } else {
854*4882a593Smuzhiyun comm = NULL;
855*4882a593Smuzhiyun }
856*4882a593Smuzhiyun name = aspace->name;
857*4882a593Smuzhiyun } else {
858*4882a593Smuzhiyun name = comm = NULL;
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
861*4882a593Smuzhiyun name, comm ? ":" : "", comm ? comm : "",
862*4882a593Smuzhiyun vma->aspace, vma->iova,
863*4882a593Smuzhiyun vma->mapped ? "mapped" : "unmapped",
864*4882a593Smuzhiyun vma->inuse);
865*4882a593Smuzhiyun kfree(comm);
866*4882a593Smuzhiyun }
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun seq_puts(m, "\n");
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun rcu_read_lock();
872*4882a593Smuzhiyun fobj = rcu_dereference(robj->fence);
873*4882a593Smuzhiyun if (fobj) {
874*4882a593Smuzhiyun unsigned int i, shared_count = fobj->shared_count;
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun for (i = 0; i < shared_count; i++) {
877*4882a593Smuzhiyun fence = rcu_dereference(fobj->shared[i]);
878*4882a593Smuzhiyun describe_fence(fence, "Shared", m);
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun }
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun fence = rcu_dereference(robj->fence_excl);
883*4882a593Smuzhiyun if (fence)
884*4882a593Smuzhiyun describe_fence(fence, "Exclusive", m);
885*4882a593Smuzhiyun rcu_read_unlock();
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun mutex_unlock(&msm_obj->lock);
888*4882a593Smuzhiyun }
889*4882a593Smuzhiyun
msm_gem_describe_objects(struct list_head * list,struct seq_file * m)890*4882a593Smuzhiyun void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
891*4882a593Smuzhiyun {
892*4882a593Smuzhiyun struct msm_gem_object *msm_obj;
893*4882a593Smuzhiyun int count = 0;
894*4882a593Smuzhiyun size_t size = 0;
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun seq_puts(m, " flags id ref offset kaddr size madv name\n");
897*4882a593Smuzhiyun list_for_each_entry(msm_obj, list, mm_list) {
898*4882a593Smuzhiyun struct drm_gem_object *obj = &msm_obj->base;
899*4882a593Smuzhiyun seq_puts(m, " ");
900*4882a593Smuzhiyun msm_gem_describe(obj, m);
901*4882a593Smuzhiyun count++;
902*4882a593Smuzhiyun size += obj->size;
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
906*4882a593Smuzhiyun }
907*4882a593Smuzhiyun #endif
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun /* don't call directly! Use drm_gem_object_put_locked() and friends */
msm_gem_free_object(struct drm_gem_object * obj)910*4882a593Smuzhiyun void msm_gem_free_object(struct drm_gem_object *obj)
911*4882a593Smuzhiyun {
912*4882a593Smuzhiyun struct msm_gem_object *msm_obj = to_msm_bo(obj);
913*4882a593Smuzhiyun struct drm_device *dev = obj->dev;
914*4882a593Smuzhiyun struct msm_drm_private *priv = dev->dev_private;
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun if (llist_add(&msm_obj->freed, &priv->free_list))
917*4882a593Smuzhiyun queue_work(priv->wq, &priv->free_work);
918*4882a593Smuzhiyun }
919*4882a593Smuzhiyun
free_object(struct msm_gem_object * msm_obj)920*4882a593Smuzhiyun static void free_object(struct msm_gem_object *msm_obj)
921*4882a593Smuzhiyun {
922*4882a593Smuzhiyun struct drm_gem_object *obj = &msm_obj->base;
923*4882a593Smuzhiyun struct drm_device *dev = obj->dev;
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun WARN_ON(!mutex_is_locked(&dev->struct_mutex));
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun /* object should not be on active list: */
928*4882a593Smuzhiyun WARN_ON(is_active(msm_obj));
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun list_del(&msm_obj->mm_list);
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun mutex_lock(&msm_obj->lock);
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun put_iova(obj);
935*4882a593Smuzhiyun
936*4882a593Smuzhiyun if (obj->import_attach) {
937*4882a593Smuzhiyun WARN_ON(msm_obj->vaddr);
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun /* Don't drop the pages for imported dmabuf, as they are not
940*4882a593Smuzhiyun * ours, just free the array we allocated:
941*4882a593Smuzhiyun */
942*4882a593Smuzhiyun if (msm_obj->pages)
943*4882a593Smuzhiyun kvfree(msm_obj->pages);
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun drm_prime_gem_destroy(obj, msm_obj->sgt);
946*4882a593Smuzhiyun } else {
947*4882a593Smuzhiyun msm_gem_vunmap_locked(obj);
948*4882a593Smuzhiyun put_pages(obj);
949*4882a593Smuzhiyun }
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun drm_gem_object_release(obj);
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun mutex_unlock(&msm_obj->lock);
954*4882a593Smuzhiyun kfree(msm_obj);
955*4882a593Smuzhiyun }
956*4882a593Smuzhiyun
msm_gem_free_work(struct work_struct * work)957*4882a593Smuzhiyun void msm_gem_free_work(struct work_struct *work)
958*4882a593Smuzhiyun {
959*4882a593Smuzhiyun struct msm_drm_private *priv =
960*4882a593Smuzhiyun container_of(work, struct msm_drm_private, free_work);
961*4882a593Smuzhiyun struct drm_device *dev = priv->dev;
962*4882a593Smuzhiyun struct llist_node *freed;
963*4882a593Smuzhiyun struct msm_gem_object *msm_obj, *next;
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun while ((freed = llist_del_all(&priv->free_list))) {
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun mutex_lock(&dev->struct_mutex);
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun llist_for_each_entry_safe(msm_obj, next,
970*4882a593Smuzhiyun freed, freed)
971*4882a593Smuzhiyun free_object(msm_obj);
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun mutex_unlock(&dev->struct_mutex);
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun if (need_resched())
976*4882a593Smuzhiyun break;
977*4882a593Smuzhiyun }
978*4882a593Smuzhiyun }
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun /* convenience method to construct a GEM buffer object, and userspace handle */
msm_gem_new_handle(struct drm_device * dev,struct drm_file * file,uint32_t size,uint32_t flags,uint32_t * handle,char * name)981*4882a593Smuzhiyun int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
982*4882a593Smuzhiyun uint32_t size, uint32_t flags, uint32_t *handle,
983*4882a593Smuzhiyun char *name)
984*4882a593Smuzhiyun {
985*4882a593Smuzhiyun struct drm_gem_object *obj;
986*4882a593Smuzhiyun int ret;
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun obj = msm_gem_new(dev, size, flags);
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun if (IS_ERR(obj))
991*4882a593Smuzhiyun return PTR_ERR(obj);
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun if (name)
994*4882a593Smuzhiyun msm_gem_object_set_name(obj, "%s", name);
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun ret = drm_gem_handle_create(file, obj, handle);
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun /* drop reference from allocate - handle holds it now */
999*4882a593Smuzhiyun drm_gem_object_put(obj);
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun return ret;
1002*4882a593Smuzhiyun }
1003*4882a593Smuzhiyun
msm_gem_new_impl(struct drm_device * dev,uint32_t size,uint32_t flags,struct drm_gem_object ** obj)1004*4882a593Smuzhiyun static int msm_gem_new_impl(struct drm_device *dev,
1005*4882a593Smuzhiyun uint32_t size, uint32_t flags,
1006*4882a593Smuzhiyun struct drm_gem_object **obj)
1007*4882a593Smuzhiyun {
1008*4882a593Smuzhiyun struct msm_gem_object *msm_obj;
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun switch (flags & MSM_BO_CACHE_MASK) {
1011*4882a593Smuzhiyun case MSM_BO_UNCACHED:
1012*4882a593Smuzhiyun case MSM_BO_CACHED:
1013*4882a593Smuzhiyun case MSM_BO_WC:
1014*4882a593Smuzhiyun break;
1015*4882a593Smuzhiyun default:
1016*4882a593Smuzhiyun DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
1017*4882a593Smuzhiyun (flags & MSM_BO_CACHE_MASK));
1018*4882a593Smuzhiyun return -EINVAL;
1019*4882a593Smuzhiyun }
1020*4882a593Smuzhiyun
1021*4882a593Smuzhiyun msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
1022*4882a593Smuzhiyun if (!msm_obj)
1023*4882a593Smuzhiyun return -ENOMEM;
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun mutex_init(&msm_obj->lock);
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun msm_obj->flags = flags;
1028*4882a593Smuzhiyun msm_obj->madv = MSM_MADV_WILLNEED;
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun INIT_LIST_HEAD(&msm_obj->submit_entry);
1031*4882a593Smuzhiyun INIT_LIST_HEAD(&msm_obj->vmas);
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun *obj = &msm_obj->base;
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun return 0;
1036*4882a593Smuzhiyun }
1037*4882a593Smuzhiyun
_msm_gem_new(struct drm_device * dev,uint32_t size,uint32_t flags,bool struct_mutex_locked)1038*4882a593Smuzhiyun static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
1039*4882a593Smuzhiyun uint32_t size, uint32_t flags, bool struct_mutex_locked)
1040*4882a593Smuzhiyun {
1041*4882a593Smuzhiyun struct msm_drm_private *priv = dev->dev_private;
1042*4882a593Smuzhiyun struct msm_gem_object *msm_obj;
1043*4882a593Smuzhiyun struct drm_gem_object *obj = NULL;
1044*4882a593Smuzhiyun bool use_vram = false;
1045*4882a593Smuzhiyun int ret;
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun size = PAGE_ALIGN(size);
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun if (!msm_use_mmu(dev))
1050*4882a593Smuzhiyun use_vram = true;
1051*4882a593Smuzhiyun else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1052*4882a593Smuzhiyun use_vram = true;
1053*4882a593Smuzhiyun
1054*4882a593Smuzhiyun if (WARN_ON(use_vram && !priv->vram.size))
1055*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
1056*4882a593Smuzhiyun
1057*4882a593Smuzhiyun /* Disallow zero sized objects as they make the underlying
1058*4882a593Smuzhiyun * infrastructure grumpy
1059*4882a593Smuzhiyun */
1060*4882a593Smuzhiyun if (size == 0)
1061*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun ret = msm_gem_new_impl(dev, size, flags, &obj);
1064*4882a593Smuzhiyun if (ret)
1065*4882a593Smuzhiyun return ERR_PTR(ret);
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyun msm_obj = to_msm_bo(obj);
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun if (use_vram) {
1070*4882a593Smuzhiyun struct msm_gem_vma *vma;
1071*4882a593Smuzhiyun struct page **pages;
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun mutex_lock(&msm_obj->lock);
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun vma = add_vma(obj, NULL);
1076*4882a593Smuzhiyun mutex_unlock(&msm_obj->lock);
1077*4882a593Smuzhiyun if (IS_ERR(vma)) {
1078*4882a593Smuzhiyun ret = PTR_ERR(vma);
1079*4882a593Smuzhiyun goto fail;
1080*4882a593Smuzhiyun }
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun to_msm_bo(obj)->vram_node = &vma->node;
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun drm_gem_private_object_init(dev, obj, size);
1085*4882a593Smuzhiyun
1086*4882a593Smuzhiyun pages = get_pages(obj);
1087*4882a593Smuzhiyun if (IS_ERR(pages)) {
1088*4882a593Smuzhiyun ret = PTR_ERR(pages);
1089*4882a593Smuzhiyun goto fail;
1090*4882a593Smuzhiyun }
1091*4882a593Smuzhiyun
1092*4882a593Smuzhiyun vma->iova = physaddr(obj);
1093*4882a593Smuzhiyun } else {
1094*4882a593Smuzhiyun ret = drm_gem_object_init(dev, obj, size);
1095*4882a593Smuzhiyun if (ret)
1096*4882a593Smuzhiyun goto fail;
1097*4882a593Smuzhiyun /*
1098*4882a593Smuzhiyun * Our buffers are kept pinned, so allocating them from the
1099*4882a593Smuzhiyun * MOVABLE zone is a really bad idea, and conflicts with CMA.
1100*4882a593Smuzhiyun * See comments above new_inode() why this is required _and_
1101*4882a593Smuzhiyun * expected if you're going to pin these pages.
1102*4882a593Smuzhiyun */
1103*4882a593Smuzhiyun mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1104*4882a593Smuzhiyun }
1105*4882a593Smuzhiyun
1106*4882a593Smuzhiyun if (struct_mutex_locked) {
1107*4882a593Smuzhiyun WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1108*4882a593Smuzhiyun list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
1109*4882a593Smuzhiyun } else {
1110*4882a593Smuzhiyun mutex_lock(&dev->struct_mutex);
1111*4882a593Smuzhiyun list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
1112*4882a593Smuzhiyun mutex_unlock(&dev->struct_mutex);
1113*4882a593Smuzhiyun }
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun return obj;
1116*4882a593Smuzhiyun
1117*4882a593Smuzhiyun fail:
1118*4882a593Smuzhiyun drm_gem_object_put(obj);
1119*4882a593Smuzhiyun return ERR_PTR(ret);
1120*4882a593Smuzhiyun }
1121*4882a593Smuzhiyun
msm_gem_new_locked(struct drm_device * dev,uint32_t size,uint32_t flags)1122*4882a593Smuzhiyun struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
1123*4882a593Smuzhiyun uint32_t size, uint32_t flags)
1124*4882a593Smuzhiyun {
1125*4882a593Smuzhiyun return _msm_gem_new(dev, size, flags, true);
1126*4882a593Smuzhiyun }
1127*4882a593Smuzhiyun
msm_gem_new(struct drm_device * dev,uint32_t size,uint32_t flags)1128*4882a593Smuzhiyun struct drm_gem_object *msm_gem_new(struct drm_device *dev,
1129*4882a593Smuzhiyun uint32_t size, uint32_t flags)
1130*4882a593Smuzhiyun {
1131*4882a593Smuzhiyun return _msm_gem_new(dev, size, flags, false);
1132*4882a593Smuzhiyun }
1133*4882a593Smuzhiyun
msm_gem_import(struct drm_device * dev,struct dma_buf * dmabuf,struct sg_table * sgt)1134*4882a593Smuzhiyun struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1135*4882a593Smuzhiyun struct dma_buf *dmabuf, struct sg_table *sgt)
1136*4882a593Smuzhiyun {
1137*4882a593Smuzhiyun struct msm_drm_private *priv = dev->dev_private;
1138*4882a593Smuzhiyun struct msm_gem_object *msm_obj;
1139*4882a593Smuzhiyun struct drm_gem_object *obj;
1140*4882a593Smuzhiyun uint32_t size;
1141*4882a593Smuzhiyun int ret, npages;
1142*4882a593Smuzhiyun
1143*4882a593Smuzhiyun /* if we don't have IOMMU, don't bother pretending we can import: */
1144*4882a593Smuzhiyun if (!msm_use_mmu(dev)) {
1145*4882a593Smuzhiyun DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1146*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
1147*4882a593Smuzhiyun }
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyun size = PAGE_ALIGN(dmabuf->size);
1150*4882a593Smuzhiyun
1151*4882a593Smuzhiyun ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
1152*4882a593Smuzhiyun if (ret)
1153*4882a593Smuzhiyun return ERR_PTR(ret);
1154*4882a593Smuzhiyun
1155*4882a593Smuzhiyun drm_gem_private_object_init(dev, obj, size);
1156*4882a593Smuzhiyun
1157*4882a593Smuzhiyun npages = size / PAGE_SIZE;
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun msm_obj = to_msm_bo(obj);
1160*4882a593Smuzhiyun mutex_lock(&msm_obj->lock);
1161*4882a593Smuzhiyun msm_obj->sgt = sgt;
1162*4882a593Smuzhiyun msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1163*4882a593Smuzhiyun if (!msm_obj->pages) {
1164*4882a593Smuzhiyun mutex_unlock(&msm_obj->lock);
1165*4882a593Smuzhiyun ret = -ENOMEM;
1166*4882a593Smuzhiyun goto fail;
1167*4882a593Smuzhiyun }
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
1170*4882a593Smuzhiyun if (ret) {
1171*4882a593Smuzhiyun mutex_unlock(&msm_obj->lock);
1172*4882a593Smuzhiyun goto fail;
1173*4882a593Smuzhiyun }
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun mutex_unlock(&msm_obj->lock);
1176*4882a593Smuzhiyun
1177*4882a593Smuzhiyun mutex_lock(&dev->struct_mutex);
1178*4882a593Smuzhiyun list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
1179*4882a593Smuzhiyun mutex_unlock(&dev->struct_mutex);
1180*4882a593Smuzhiyun
1181*4882a593Smuzhiyun return obj;
1182*4882a593Smuzhiyun
1183*4882a593Smuzhiyun fail:
1184*4882a593Smuzhiyun drm_gem_object_put(obj);
1185*4882a593Smuzhiyun return ERR_PTR(ret);
1186*4882a593Smuzhiyun }
1187*4882a593Smuzhiyun
_msm_gem_kernel_new(struct drm_device * dev,uint32_t size,uint32_t flags,struct msm_gem_address_space * aspace,struct drm_gem_object ** bo,uint64_t * iova,bool locked)1188*4882a593Smuzhiyun static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1189*4882a593Smuzhiyun uint32_t flags, struct msm_gem_address_space *aspace,
1190*4882a593Smuzhiyun struct drm_gem_object **bo, uint64_t *iova, bool locked)
1191*4882a593Smuzhiyun {
1192*4882a593Smuzhiyun void *vaddr;
1193*4882a593Smuzhiyun struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
1194*4882a593Smuzhiyun int ret;
1195*4882a593Smuzhiyun
1196*4882a593Smuzhiyun if (IS_ERR(obj))
1197*4882a593Smuzhiyun return ERR_CAST(obj);
1198*4882a593Smuzhiyun
1199*4882a593Smuzhiyun if (iova) {
1200*4882a593Smuzhiyun ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1201*4882a593Smuzhiyun if (ret)
1202*4882a593Smuzhiyun goto err;
1203*4882a593Smuzhiyun }
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun vaddr = msm_gem_get_vaddr(obj);
1206*4882a593Smuzhiyun if (IS_ERR(vaddr)) {
1207*4882a593Smuzhiyun msm_gem_unpin_iova(obj, aspace);
1208*4882a593Smuzhiyun ret = PTR_ERR(vaddr);
1209*4882a593Smuzhiyun goto err;
1210*4882a593Smuzhiyun }
1211*4882a593Smuzhiyun
1212*4882a593Smuzhiyun if (bo)
1213*4882a593Smuzhiyun *bo = obj;
1214*4882a593Smuzhiyun
1215*4882a593Smuzhiyun return vaddr;
1216*4882a593Smuzhiyun err:
1217*4882a593Smuzhiyun if (locked)
1218*4882a593Smuzhiyun drm_gem_object_put_locked(obj);
1219*4882a593Smuzhiyun else
1220*4882a593Smuzhiyun drm_gem_object_put(obj);
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun return ERR_PTR(ret);
1223*4882a593Smuzhiyun
1224*4882a593Smuzhiyun }
1225*4882a593Smuzhiyun
msm_gem_kernel_new(struct drm_device * dev,uint32_t size,uint32_t flags,struct msm_gem_address_space * aspace,struct drm_gem_object ** bo,uint64_t * iova)1226*4882a593Smuzhiyun void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1227*4882a593Smuzhiyun uint32_t flags, struct msm_gem_address_space *aspace,
1228*4882a593Smuzhiyun struct drm_gem_object **bo, uint64_t *iova)
1229*4882a593Smuzhiyun {
1230*4882a593Smuzhiyun return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
1231*4882a593Smuzhiyun }
1232*4882a593Smuzhiyun
msm_gem_kernel_new_locked(struct drm_device * dev,uint32_t size,uint32_t flags,struct msm_gem_address_space * aspace,struct drm_gem_object ** bo,uint64_t * iova)1233*4882a593Smuzhiyun void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
1234*4882a593Smuzhiyun uint32_t flags, struct msm_gem_address_space *aspace,
1235*4882a593Smuzhiyun struct drm_gem_object **bo, uint64_t *iova)
1236*4882a593Smuzhiyun {
1237*4882a593Smuzhiyun return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
1238*4882a593Smuzhiyun }
1239*4882a593Smuzhiyun
msm_gem_kernel_put(struct drm_gem_object * bo,struct msm_gem_address_space * aspace,bool locked)1240*4882a593Smuzhiyun void msm_gem_kernel_put(struct drm_gem_object *bo,
1241*4882a593Smuzhiyun struct msm_gem_address_space *aspace, bool locked)
1242*4882a593Smuzhiyun {
1243*4882a593Smuzhiyun if (IS_ERR_OR_NULL(bo))
1244*4882a593Smuzhiyun return;
1245*4882a593Smuzhiyun
1246*4882a593Smuzhiyun msm_gem_put_vaddr(bo);
1247*4882a593Smuzhiyun msm_gem_unpin_iova(bo, aspace);
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun if (locked)
1250*4882a593Smuzhiyun drm_gem_object_put_locked(bo);
1251*4882a593Smuzhiyun else
1252*4882a593Smuzhiyun drm_gem_object_put(bo);
1253*4882a593Smuzhiyun }
1254*4882a593Smuzhiyun
msm_gem_object_set_name(struct drm_gem_object * bo,const char * fmt,...)1255*4882a593Smuzhiyun void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1256*4882a593Smuzhiyun {
1257*4882a593Smuzhiyun struct msm_gem_object *msm_obj = to_msm_bo(bo);
1258*4882a593Smuzhiyun va_list ap;
1259*4882a593Smuzhiyun
1260*4882a593Smuzhiyun if (!fmt)
1261*4882a593Smuzhiyun return;
1262*4882a593Smuzhiyun
1263*4882a593Smuzhiyun va_start(ap, fmt);
1264*4882a593Smuzhiyun vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1265*4882a593Smuzhiyun va_end(ap);
1266*4882a593Smuzhiyun }
1267