1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2016 Red Hat
4*4882a593Smuzhiyun * Author: Rob Clark <robdclark@gmail.com>
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include "msm_drv.h"
8*4882a593Smuzhiyun #include "msm_gem.h"
9*4882a593Smuzhiyun #include "msm_mmu.h"
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun static void
msm_gem_address_space_destroy(struct kref * kref)12*4882a593Smuzhiyun msm_gem_address_space_destroy(struct kref *kref)
13*4882a593Smuzhiyun {
14*4882a593Smuzhiyun struct msm_gem_address_space *aspace = container_of(kref,
15*4882a593Smuzhiyun struct msm_gem_address_space, kref);
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun drm_mm_takedown(&aspace->mm);
18*4882a593Smuzhiyun if (aspace->mmu)
19*4882a593Smuzhiyun aspace->mmu->funcs->destroy(aspace->mmu);
20*4882a593Smuzhiyun put_pid(aspace->pid);
21*4882a593Smuzhiyun kfree(aspace);
22*4882a593Smuzhiyun }
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun
msm_gem_address_space_put(struct msm_gem_address_space * aspace)25*4882a593Smuzhiyun void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun if (aspace)
28*4882a593Smuzhiyun kref_put(&aspace->kref, msm_gem_address_space_destroy);
29*4882a593Smuzhiyun }
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun struct msm_gem_address_space *
msm_gem_address_space_get(struct msm_gem_address_space * aspace)32*4882a593Smuzhiyun msm_gem_address_space_get(struct msm_gem_address_space *aspace)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun if (!IS_ERR_OR_NULL(aspace))
35*4882a593Smuzhiyun kref_get(&aspace->kref);
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun return aspace;
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /* Actually unmap memory for the vma */
msm_gem_purge_vma(struct msm_gem_address_space * aspace,struct msm_gem_vma * vma)41*4882a593Smuzhiyun void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
42*4882a593Smuzhiyun struct msm_gem_vma *vma)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun unsigned size = vma->node.size << PAGE_SHIFT;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /* Print a message if we try to purge a vma in use */
47*4882a593Smuzhiyun if (WARN_ON(vma->inuse > 0))
48*4882a593Smuzhiyun return;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun /* Don't do anything if the memory isn't mapped */
51*4882a593Smuzhiyun if (!vma->mapped)
52*4882a593Smuzhiyun return;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun if (aspace->mmu)
55*4882a593Smuzhiyun aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun vma->mapped = false;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun /* Remove reference counts for the mapping */
msm_gem_unmap_vma(struct msm_gem_address_space * aspace,struct msm_gem_vma * vma)61*4882a593Smuzhiyun void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
62*4882a593Smuzhiyun struct msm_gem_vma *vma)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun if (!WARN_ON(!vma->iova))
65*4882a593Smuzhiyun vma->inuse--;
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun int
msm_gem_map_vma(struct msm_gem_address_space * aspace,struct msm_gem_vma * vma,int prot,struct sg_table * sgt,int npages)69*4882a593Smuzhiyun msm_gem_map_vma(struct msm_gem_address_space *aspace,
70*4882a593Smuzhiyun struct msm_gem_vma *vma, int prot,
71*4882a593Smuzhiyun struct sg_table *sgt, int npages)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun unsigned size = npages << PAGE_SHIFT;
74*4882a593Smuzhiyun int ret = 0;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun if (WARN_ON(!vma->iova))
77*4882a593Smuzhiyun return -EINVAL;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /* Increase the usage counter */
80*4882a593Smuzhiyun vma->inuse++;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun if (vma->mapped)
83*4882a593Smuzhiyun return 0;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun vma->mapped = true;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun if (aspace && aspace->mmu)
88*4882a593Smuzhiyun ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
89*4882a593Smuzhiyun size, prot);
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun if (ret) {
92*4882a593Smuzhiyun vma->mapped = false;
93*4882a593Smuzhiyun vma->inuse--;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun return ret;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun /* Close an iova. Warn if it is still in use */
msm_gem_close_vma(struct msm_gem_address_space * aspace,struct msm_gem_vma * vma)100*4882a593Smuzhiyun void msm_gem_close_vma(struct msm_gem_address_space *aspace,
101*4882a593Smuzhiyun struct msm_gem_vma *vma)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun if (WARN_ON(vma->inuse > 0 || vma->mapped))
104*4882a593Smuzhiyun return;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun spin_lock(&aspace->lock);
107*4882a593Smuzhiyun if (vma->iova)
108*4882a593Smuzhiyun drm_mm_remove_node(&vma->node);
109*4882a593Smuzhiyun spin_unlock(&aspace->lock);
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun vma->iova = 0;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun msm_gem_address_space_put(aspace);
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun /* Initialize a new vma and allocate an iova for it */
msm_gem_init_vma(struct msm_gem_address_space * aspace,struct msm_gem_vma * vma,int npages,u64 range_start,u64 range_end)117*4882a593Smuzhiyun int msm_gem_init_vma(struct msm_gem_address_space *aspace,
118*4882a593Smuzhiyun struct msm_gem_vma *vma, int npages,
119*4882a593Smuzhiyun u64 range_start, u64 range_end)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun int ret;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun if (WARN_ON(vma->iova))
124*4882a593Smuzhiyun return -EBUSY;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun spin_lock(&aspace->lock);
127*4882a593Smuzhiyun ret = drm_mm_insert_node_in_range(&aspace->mm, &vma->node, npages, 0,
128*4882a593Smuzhiyun 0, range_start, range_end, 0);
129*4882a593Smuzhiyun spin_unlock(&aspace->lock);
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun if (ret)
132*4882a593Smuzhiyun return ret;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun vma->iova = vma->node.start << PAGE_SHIFT;
135*4882a593Smuzhiyun vma->mapped = false;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun kref_get(&aspace->kref);
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun return 0;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun struct msm_gem_address_space *
msm_gem_address_space_create(struct msm_mmu * mmu,const char * name,u64 va_start,u64 size)143*4882a593Smuzhiyun msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
144*4882a593Smuzhiyun u64 va_start, u64 size)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun struct msm_gem_address_space *aspace;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun if (IS_ERR(mmu))
149*4882a593Smuzhiyun return ERR_CAST(mmu);
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
152*4882a593Smuzhiyun if (!aspace)
153*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun spin_lock_init(&aspace->lock);
156*4882a593Smuzhiyun aspace->name = name;
157*4882a593Smuzhiyun aspace->mmu = mmu;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun drm_mm_init(&aspace->mm, va_start >> PAGE_SHIFT, size >> PAGE_SHIFT);
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun kref_init(&aspace->kref);
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun return aspace;
164*4882a593Smuzhiyun }
165