1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2015-2018 Etnaviv Project
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include <linux/dma-mapping.h>
7*4882a593Smuzhiyun #include <linux/scatterlist.h>
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include "common.xml.h"
10*4882a593Smuzhiyun #include "etnaviv_cmdbuf.h"
11*4882a593Smuzhiyun #include "etnaviv_drv.h"
12*4882a593Smuzhiyun #include "etnaviv_gem.h"
13*4882a593Smuzhiyun #include "etnaviv_gpu.h"
14*4882a593Smuzhiyun #include "etnaviv_mmu.h"
15*4882a593Smuzhiyun
etnaviv_context_unmap(struct etnaviv_iommu_context * context,unsigned long iova,size_t size)16*4882a593Smuzhiyun static void etnaviv_context_unmap(struct etnaviv_iommu_context *context,
17*4882a593Smuzhiyun unsigned long iova, size_t size)
18*4882a593Smuzhiyun {
19*4882a593Smuzhiyun size_t unmapped_page, unmapped = 0;
20*4882a593Smuzhiyun size_t pgsize = SZ_4K;
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun if (!IS_ALIGNED(iova | size, pgsize)) {
23*4882a593Smuzhiyun pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
24*4882a593Smuzhiyun iova, size, pgsize);
25*4882a593Smuzhiyun return;
26*4882a593Smuzhiyun }
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun while (unmapped < size) {
29*4882a593Smuzhiyun unmapped_page = context->global->ops->unmap(context, iova,
30*4882a593Smuzhiyun pgsize);
31*4882a593Smuzhiyun if (!unmapped_page)
32*4882a593Smuzhiyun break;
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun iova += unmapped_page;
35*4882a593Smuzhiyun unmapped += unmapped_page;
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun
etnaviv_context_map(struct etnaviv_iommu_context * context,unsigned long iova,phys_addr_t paddr,size_t size,int prot)39*4882a593Smuzhiyun static int etnaviv_context_map(struct etnaviv_iommu_context *context,
40*4882a593Smuzhiyun unsigned long iova, phys_addr_t paddr,
41*4882a593Smuzhiyun size_t size, int prot)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun unsigned long orig_iova = iova;
44*4882a593Smuzhiyun size_t pgsize = SZ_4K;
45*4882a593Smuzhiyun size_t orig_size = size;
46*4882a593Smuzhiyun int ret = 0;
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
49*4882a593Smuzhiyun pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
50*4882a593Smuzhiyun iova, &paddr, size, pgsize);
51*4882a593Smuzhiyun return -EINVAL;
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun while (size) {
55*4882a593Smuzhiyun ret = context->global->ops->map(context, iova, paddr, pgsize,
56*4882a593Smuzhiyun prot);
57*4882a593Smuzhiyun if (ret)
58*4882a593Smuzhiyun break;
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun iova += pgsize;
61*4882a593Smuzhiyun paddr += pgsize;
62*4882a593Smuzhiyun size -= pgsize;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /* unroll mapping in case something went wrong */
66*4882a593Smuzhiyun if (ret)
67*4882a593Smuzhiyun etnaviv_context_unmap(context, orig_iova, orig_size - size);
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun return ret;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun
etnaviv_iommu_map(struct etnaviv_iommu_context * context,u32 iova,struct sg_table * sgt,unsigned len,int prot)72*4882a593Smuzhiyun static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
73*4882a593Smuzhiyun struct sg_table *sgt, unsigned len, int prot)
74*4882a593Smuzhiyun { struct scatterlist *sg;
75*4882a593Smuzhiyun unsigned int da = iova;
76*4882a593Smuzhiyun unsigned int i;
77*4882a593Smuzhiyun int ret;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun if (!context || !sgt)
80*4882a593Smuzhiyun return -EINVAL;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun for_each_sgtable_dma_sg(sgt, sg, i) {
83*4882a593Smuzhiyun u32 pa = sg_dma_address(sg) - sg->offset;
84*4882a593Smuzhiyun size_t bytes = sg_dma_len(sg) + sg->offset;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun ret = etnaviv_context_map(context, da, pa, bytes, prot);
89*4882a593Smuzhiyun if (ret)
90*4882a593Smuzhiyun goto fail;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun da += bytes;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun return 0;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun fail:
98*4882a593Smuzhiyun etnaviv_context_unmap(context, iova, da - iova);
99*4882a593Smuzhiyun return ret;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
etnaviv_iommu_unmap(struct etnaviv_iommu_context * context,u32 iova,struct sg_table * sgt,unsigned len)102*4882a593Smuzhiyun static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
103*4882a593Smuzhiyun struct sg_table *sgt, unsigned len)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun struct scatterlist *sg;
106*4882a593Smuzhiyun unsigned int da = iova;
107*4882a593Smuzhiyun int i;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun for_each_sgtable_dma_sg(sgt, sg, i) {
110*4882a593Smuzhiyun size_t bytes = sg_dma_len(sg) + sg->offset;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun etnaviv_context_unmap(context, da, bytes);
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun BUG_ON(!PAGE_ALIGNED(bytes));
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun da += bytes;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context * context,struct etnaviv_vram_mapping * mapping)122*4882a593Smuzhiyun static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context,
123*4882a593Smuzhiyun struct etnaviv_vram_mapping *mapping)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun struct etnaviv_gem_object *etnaviv_obj = mapping->object;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun etnaviv_iommu_unmap(context, mapping->vram_node.start,
128*4882a593Smuzhiyun etnaviv_obj->sgt, etnaviv_obj->base.size);
129*4882a593Smuzhiyun drm_mm_remove_node(&mapping->vram_node);
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
etnaviv_iommu_find_iova(struct etnaviv_iommu_context * context,struct drm_mm_node * node,size_t size)132*4882a593Smuzhiyun static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
133*4882a593Smuzhiyun struct drm_mm_node *node, size_t size)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun struct etnaviv_vram_mapping *free = NULL;
136*4882a593Smuzhiyun enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
137*4882a593Smuzhiyun int ret;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun lockdep_assert_held(&context->lock);
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun while (1) {
142*4882a593Smuzhiyun struct etnaviv_vram_mapping *m, *n;
143*4882a593Smuzhiyun struct drm_mm_scan scan;
144*4882a593Smuzhiyun struct list_head list;
145*4882a593Smuzhiyun bool found;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun ret = drm_mm_insert_node_in_range(&context->mm, node,
148*4882a593Smuzhiyun size, 0, 0, 0, U64_MAX, mode);
149*4882a593Smuzhiyun if (ret != -ENOSPC)
150*4882a593Smuzhiyun break;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun /* Try to retire some entries */
153*4882a593Smuzhiyun drm_mm_scan_init(&scan, &context->mm, size, 0, 0, mode);
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun found = 0;
156*4882a593Smuzhiyun INIT_LIST_HEAD(&list);
157*4882a593Smuzhiyun list_for_each_entry(free, &context->mappings, mmu_node) {
158*4882a593Smuzhiyun /* If this vram node has not been used, skip this. */
159*4882a593Smuzhiyun if (!free->vram_node.mm)
160*4882a593Smuzhiyun continue;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun /*
163*4882a593Smuzhiyun * If the iova is pinned, then it's in-use,
164*4882a593Smuzhiyun * so we must keep its mapping.
165*4882a593Smuzhiyun */
166*4882a593Smuzhiyun if (free->use)
167*4882a593Smuzhiyun continue;
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun list_add(&free->scan_node, &list);
170*4882a593Smuzhiyun if (drm_mm_scan_add_block(&scan, &free->vram_node)) {
171*4882a593Smuzhiyun found = true;
172*4882a593Smuzhiyun break;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun if (!found) {
177*4882a593Smuzhiyun /* Nothing found, clean up and fail */
178*4882a593Smuzhiyun list_for_each_entry_safe(m, n, &list, scan_node)
179*4882a593Smuzhiyun BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node));
180*4882a593Smuzhiyun break;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /*
184*4882a593Smuzhiyun * drm_mm does not allow any other operations while
185*4882a593Smuzhiyun * scanning, so we have to remove all blocks first.
186*4882a593Smuzhiyun * If drm_mm_scan_remove_block() returns false, we
187*4882a593Smuzhiyun * can leave the block pinned.
188*4882a593Smuzhiyun */
189*4882a593Smuzhiyun list_for_each_entry_safe(m, n, &list, scan_node)
190*4882a593Smuzhiyun if (!drm_mm_scan_remove_block(&scan, &m->vram_node))
191*4882a593Smuzhiyun list_del_init(&m->scan_node);
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun /*
194*4882a593Smuzhiyun * Unmap the blocks which need to be reaped from the MMU.
195*4882a593Smuzhiyun * Clear the mmu pointer to prevent the mapping_get finding
196*4882a593Smuzhiyun * this mapping.
197*4882a593Smuzhiyun */
198*4882a593Smuzhiyun list_for_each_entry_safe(m, n, &list, scan_node) {
199*4882a593Smuzhiyun etnaviv_iommu_remove_mapping(context, m);
200*4882a593Smuzhiyun etnaviv_iommu_context_put(m->context);
201*4882a593Smuzhiyun m->context = NULL;
202*4882a593Smuzhiyun list_del_init(&m->mmu_node);
203*4882a593Smuzhiyun list_del_init(&m->scan_node);
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun mode = DRM_MM_INSERT_EVICT;
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun /*
209*4882a593Smuzhiyun * We removed enough mappings so that the new allocation will
210*4882a593Smuzhiyun * succeed, retry the allocation one more time.
211*4882a593Smuzhiyun */
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun return ret;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
etnaviv_iommu_insert_exact(struct etnaviv_iommu_context * context,struct drm_mm_node * node,size_t size,u64 va)217*4882a593Smuzhiyun static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context,
218*4882a593Smuzhiyun struct drm_mm_node *node, size_t size, u64 va)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
221*4882a593Smuzhiyun va + size, DRM_MM_INSERT_LOWEST);
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
etnaviv_iommu_map_gem(struct etnaviv_iommu_context * context,struct etnaviv_gem_object * etnaviv_obj,u32 memory_base,struct etnaviv_vram_mapping * mapping,u64 va)224*4882a593Smuzhiyun int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
225*4882a593Smuzhiyun struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
226*4882a593Smuzhiyun struct etnaviv_vram_mapping *mapping, u64 va)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun struct sg_table *sgt = etnaviv_obj->sgt;
229*4882a593Smuzhiyun struct drm_mm_node *node;
230*4882a593Smuzhiyun int ret;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun lockdep_assert_held(&etnaviv_obj->lock);
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun mutex_lock(&context->lock);
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun /* v1 MMU can optimize single entry (contiguous) scatterlists */
237*4882a593Smuzhiyun if (context->global->version == ETNAVIV_IOMMU_V1 &&
238*4882a593Smuzhiyun sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
239*4882a593Smuzhiyun u32 iova;
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun iova = sg_dma_address(sgt->sgl) - memory_base;
242*4882a593Smuzhiyun if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
243*4882a593Smuzhiyun mapping->iova = iova;
244*4882a593Smuzhiyun list_add_tail(&mapping->mmu_node, &context->mappings);
245*4882a593Smuzhiyun ret = 0;
246*4882a593Smuzhiyun goto unlock;
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun node = &mapping->vram_node;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun if (va)
253*4882a593Smuzhiyun ret = etnaviv_iommu_insert_exact(context, node,
254*4882a593Smuzhiyun etnaviv_obj->base.size, va);
255*4882a593Smuzhiyun else
256*4882a593Smuzhiyun ret = etnaviv_iommu_find_iova(context, node,
257*4882a593Smuzhiyun etnaviv_obj->base.size);
258*4882a593Smuzhiyun if (ret < 0)
259*4882a593Smuzhiyun goto unlock;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun mapping->iova = node->start;
262*4882a593Smuzhiyun ret = etnaviv_iommu_map(context, node->start, sgt, etnaviv_obj->base.size,
263*4882a593Smuzhiyun ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun if (ret < 0) {
266*4882a593Smuzhiyun drm_mm_remove_node(node);
267*4882a593Smuzhiyun goto unlock;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun list_add_tail(&mapping->mmu_node, &context->mappings);
271*4882a593Smuzhiyun context->flush_seq++;
272*4882a593Smuzhiyun unlock:
273*4882a593Smuzhiyun mutex_unlock(&context->lock);
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun return ret;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context * context,struct etnaviv_vram_mapping * mapping)278*4882a593Smuzhiyun void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
279*4882a593Smuzhiyun struct etnaviv_vram_mapping *mapping)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun WARN_ON(mapping->use);
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun mutex_lock(&context->lock);
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /* Bail if the mapping has been reaped by another thread */
286*4882a593Smuzhiyun if (!mapping->context) {
287*4882a593Smuzhiyun mutex_unlock(&context->lock);
288*4882a593Smuzhiyun return;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun /* If the vram node is on the mm, unmap and remove the node */
292*4882a593Smuzhiyun if (mapping->vram_node.mm == &context->mm)
293*4882a593Smuzhiyun etnaviv_iommu_remove_mapping(context, mapping);
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun list_del(&mapping->mmu_node);
296*4882a593Smuzhiyun context->flush_seq++;
297*4882a593Smuzhiyun mutex_unlock(&context->lock);
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun
etnaviv_iommu_context_free(struct kref * kref)300*4882a593Smuzhiyun static void etnaviv_iommu_context_free(struct kref *kref)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun struct etnaviv_iommu_context *context =
303*4882a593Smuzhiyun container_of(kref, struct etnaviv_iommu_context, refcount);
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun etnaviv_cmdbuf_suballoc_unmap(context, &context->cmdbuf_mapping);
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun context->global->ops->free(context);
308*4882a593Smuzhiyun }
etnaviv_iommu_context_put(struct etnaviv_iommu_context * context)309*4882a593Smuzhiyun void etnaviv_iommu_context_put(struct etnaviv_iommu_context *context)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun kref_put(&context->refcount, etnaviv_iommu_context_free);
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun struct etnaviv_iommu_context *
etnaviv_iommu_context_init(struct etnaviv_iommu_global * global,struct etnaviv_cmdbuf_suballoc * suballoc)315*4882a593Smuzhiyun etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
316*4882a593Smuzhiyun struct etnaviv_cmdbuf_suballoc *suballoc)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun struct etnaviv_iommu_context *ctx;
319*4882a593Smuzhiyun int ret;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun if (global->version == ETNAVIV_IOMMU_V1)
322*4882a593Smuzhiyun ctx = etnaviv_iommuv1_context_alloc(global);
323*4882a593Smuzhiyun else
324*4882a593Smuzhiyun ctx = etnaviv_iommuv2_context_alloc(global);
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun if (!ctx)
327*4882a593Smuzhiyun return NULL;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun ret = etnaviv_cmdbuf_suballoc_map(suballoc, ctx, &ctx->cmdbuf_mapping,
330*4882a593Smuzhiyun global->memory_base);
331*4882a593Smuzhiyun if (ret)
332*4882a593Smuzhiyun goto out_free;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun if (global->version == ETNAVIV_IOMMU_V1 &&
335*4882a593Smuzhiyun ctx->cmdbuf_mapping.iova > 0x80000000) {
336*4882a593Smuzhiyun dev_err(global->dev,
337*4882a593Smuzhiyun "command buffer outside valid memory window\n");
338*4882a593Smuzhiyun goto out_unmap;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun return ctx;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun out_unmap:
344*4882a593Smuzhiyun etnaviv_cmdbuf_suballoc_unmap(ctx, &ctx->cmdbuf_mapping);
345*4882a593Smuzhiyun out_free:
346*4882a593Smuzhiyun global->ops->free(ctx);
347*4882a593Smuzhiyun return NULL;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun
etnaviv_iommu_restore(struct etnaviv_gpu * gpu,struct etnaviv_iommu_context * context)350*4882a593Smuzhiyun void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
351*4882a593Smuzhiyun struct etnaviv_iommu_context *context)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun context->global->ops->restore(gpu, context);
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun
etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context * context,struct etnaviv_vram_mapping * mapping,u32 memory_base,dma_addr_t paddr,size_t size)356*4882a593Smuzhiyun int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *context,
357*4882a593Smuzhiyun struct etnaviv_vram_mapping *mapping,
358*4882a593Smuzhiyun u32 memory_base, dma_addr_t paddr,
359*4882a593Smuzhiyun size_t size)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun mutex_lock(&context->lock);
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun if (mapping->use > 0) {
364*4882a593Smuzhiyun mapping->use++;
365*4882a593Smuzhiyun mutex_unlock(&context->lock);
366*4882a593Smuzhiyun return 0;
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun /*
370*4882a593Smuzhiyun * For MMUv1 we don't add the suballoc region to the pagetables, as
371*4882a593Smuzhiyun * those GPUs can only work with cmdbufs accessed through the linear
372*4882a593Smuzhiyun * window. Instead we manufacture a mapping to make it look uniform
373*4882a593Smuzhiyun * to the upper layers.
374*4882a593Smuzhiyun */
375*4882a593Smuzhiyun if (context->global->version == ETNAVIV_IOMMU_V1) {
376*4882a593Smuzhiyun mapping->iova = paddr - memory_base;
377*4882a593Smuzhiyun } else {
378*4882a593Smuzhiyun struct drm_mm_node *node = &mapping->vram_node;
379*4882a593Smuzhiyun int ret;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun ret = etnaviv_iommu_find_iova(context, node, size);
382*4882a593Smuzhiyun if (ret < 0) {
383*4882a593Smuzhiyun mutex_unlock(&context->lock);
384*4882a593Smuzhiyun return ret;
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun mapping->iova = node->start;
388*4882a593Smuzhiyun ret = etnaviv_context_map(context, node->start, paddr, size,
389*4882a593Smuzhiyun ETNAVIV_PROT_READ);
390*4882a593Smuzhiyun if (ret < 0) {
391*4882a593Smuzhiyun drm_mm_remove_node(node);
392*4882a593Smuzhiyun mutex_unlock(&context->lock);
393*4882a593Smuzhiyun return ret;
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun context->flush_seq++;
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun list_add_tail(&mapping->mmu_node, &context->mappings);
400*4882a593Smuzhiyun mapping->use = 1;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun mutex_unlock(&context->lock);
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun return 0;
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context * context,struct etnaviv_vram_mapping * mapping)407*4882a593Smuzhiyun void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *context,
408*4882a593Smuzhiyun struct etnaviv_vram_mapping *mapping)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun struct drm_mm_node *node = &mapping->vram_node;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun mutex_lock(&context->lock);
413*4882a593Smuzhiyun mapping->use--;
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun if (mapping->use > 0 || context->global->version == ETNAVIV_IOMMU_V1) {
416*4882a593Smuzhiyun mutex_unlock(&context->lock);
417*4882a593Smuzhiyun return;
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun etnaviv_context_unmap(context, node->start, node->size);
421*4882a593Smuzhiyun drm_mm_remove_node(node);
422*4882a593Smuzhiyun mutex_unlock(&context->lock);
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
etnaviv_iommu_dump_size(struct etnaviv_iommu_context * context)425*4882a593Smuzhiyun size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *context)
426*4882a593Smuzhiyun {
427*4882a593Smuzhiyun return context->global->ops->dump_size(context);
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun
etnaviv_iommu_dump(struct etnaviv_iommu_context * context,void * buf)430*4882a593Smuzhiyun void etnaviv_iommu_dump(struct etnaviv_iommu_context *context, void *buf)
431*4882a593Smuzhiyun {
432*4882a593Smuzhiyun context->global->ops->dump(context, buf);
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
etnaviv_iommu_global_init(struct etnaviv_gpu * gpu)435*4882a593Smuzhiyun int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun enum etnaviv_iommu_version version = ETNAVIV_IOMMU_V1;
438*4882a593Smuzhiyun struct etnaviv_drm_private *priv = gpu->drm->dev_private;
439*4882a593Smuzhiyun struct etnaviv_iommu_global *global;
440*4882a593Smuzhiyun struct device *dev = gpu->drm->dev;
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun if (gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)
443*4882a593Smuzhiyun version = ETNAVIV_IOMMU_V2;
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun if (priv->mmu_global) {
446*4882a593Smuzhiyun if (priv->mmu_global->version != version) {
447*4882a593Smuzhiyun dev_err(gpu->dev,
448*4882a593Smuzhiyun "MMU version doesn't match global version\n");
449*4882a593Smuzhiyun return -ENXIO;
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun priv->mmu_global->use++;
453*4882a593Smuzhiyun return 0;
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun global = kzalloc(sizeof(*global), GFP_KERNEL);
457*4882a593Smuzhiyun if (!global)
458*4882a593Smuzhiyun return -ENOMEM;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun global->bad_page_cpu = dma_alloc_wc(dev, SZ_4K, &global->bad_page_dma,
461*4882a593Smuzhiyun GFP_KERNEL);
462*4882a593Smuzhiyun if (!global->bad_page_cpu)
463*4882a593Smuzhiyun goto free_global;
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun memset32(global->bad_page_cpu, 0xdead55aa, SZ_4K / sizeof(u32));
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun if (version == ETNAVIV_IOMMU_V2) {
468*4882a593Smuzhiyun global->v2.pta_cpu = dma_alloc_wc(dev, ETNAVIV_PTA_SIZE,
469*4882a593Smuzhiyun &global->v2.pta_dma, GFP_KERNEL);
470*4882a593Smuzhiyun if (!global->v2.pta_cpu)
471*4882a593Smuzhiyun goto free_bad_page;
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun global->dev = dev;
475*4882a593Smuzhiyun global->version = version;
476*4882a593Smuzhiyun global->use = 1;
477*4882a593Smuzhiyun mutex_init(&global->lock);
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun if (version == ETNAVIV_IOMMU_V1)
480*4882a593Smuzhiyun global->ops = &etnaviv_iommuv1_ops;
481*4882a593Smuzhiyun else
482*4882a593Smuzhiyun global->ops = &etnaviv_iommuv2_ops;
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun priv->mmu_global = global;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun return 0;
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun free_bad_page:
489*4882a593Smuzhiyun dma_free_wc(dev, SZ_4K, global->bad_page_cpu, global->bad_page_dma);
490*4882a593Smuzhiyun free_global:
491*4882a593Smuzhiyun kfree(global);
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun return -ENOMEM;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun
etnaviv_iommu_global_fini(struct etnaviv_gpu * gpu)496*4882a593Smuzhiyun void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu)
497*4882a593Smuzhiyun {
498*4882a593Smuzhiyun struct etnaviv_drm_private *priv = gpu->drm->dev_private;
499*4882a593Smuzhiyun struct etnaviv_iommu_global *global = priv->mmu_global;
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun if (--global->use > 0)
502*4882a593Smuzhiyun return;
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun if (global->v2.pta_cpu)
505*4882a593Smuzhiyun dma_free_wc(global->dev, ETNAVIV_PTA_SIZE,
506*4882a593Smuzhiyun global->v2.pta_cpu, global->v2.pta_dma);
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun if (global->bad_page_cpu)
509*4882a593Smuzhiyun dma_free_wc(global->dev, SZ_4K,
510*4882a593Smuzhiyun global->bad_page_cpu, global->bad_page_dma);
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun mutex_destroy(&global->lock);
513*4882a593Smuzhiyun kfree(global);
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun priv->mmu_global = NULL;
516*4882a593Smuzhiyun }
517