1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2014-2018 Etnaviv Project
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include <linux/bitops.h>
7*4882a593Smuzhiyun #include <linux/dma-mapping.h>
8*4882a593Smuzhiyun #include <linux/platform_device.h>
9*4882a593Smuzhiyun #include <linux/sizes.h>
10*4882a593Smuzhiyun #include <linux/slab.h>
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include "etnaviv_gpu.h"
13*4882a593Smuzhiyun #include "etnaviv_mmu.h"
14*4882a593Smuzhiyun #include "state_hi.xml.h"
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #define PT_SIZE SZ_2M
17*4882a593Smuzhiyun #define PT_ENTRIES (PT_SIZE / sizeof(u32))
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #define GPU_MEM_START 0x80000000
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun struct etnaviv_iommuv1_context {
22*4882a593Smuzhiyun struct etnaviv_iommu_context base;
23*4882a593Smuzhiyun u32 *pgtable_cpu;
24*4882a593Smuzhiyun dma_addr_t pgtable_dma;
25*4882a593Smuzhiyun };
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun static struct etnaviv_iommuv1_context *
to_v1_context(struct etnaviv_iommu_context * context)28*4882a593Smuzhiyun to_v1_context(struct etnaviv_iommu_context *context)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun return container_of(context, struct etnaviv_iommuv1_context, base);
31*4882a593Smuzhiyun }
32*4882a593Smuzhiyun
etnaviv_iommuv1_free(struct etnaviv_iommu_context * context)33*4882a593Smuzhiyun static void etnaviv_iommuv1_free(struct etnaviv_iommu_context *context)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun drm_mm_takedown(&context->mm);
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun dma_free_wc(context->global->dev, PT_SIZE, v1_context->pgtable_cpu,
40*4882a593Smuzhiyun v1_context->pgtable_dma);
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun context->global->v1.shared_context = NULL;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun kfree(v1_context);
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun
etnaviv_iommuv1_map(struct etnaviv_iommu_context * context,unsigned long iova,phys_addr_t paddr,size_t size,int prot)47*4882a593Smuzhiyun static int etnaviv_iommuv1_map(struct etnaviv_iommu_context *context,
48*4882a593Smuzhiyun unsigned long iova, phys_addr_t paddr,
49*4882a593Smuzhiyun size_t size, int prot)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
52*4882a593Smuzhiyun unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun if (size != SZ_4K)
55*4882a593Smuzhiyun return -EINVAL;
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun v1_context->pgtable_cpu[index] = paddr;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun return 0;
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun
etnaviv_iommuv1_unmap(struct etnaviv_iommu_context * context,unsigned long iova,size_t size)62*4882a593Smuzhiyun static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_context *context,
63*4882a593Smuzhiyun unsigned long iova, size_t size)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
66*4882a593Smuzhiyun unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun if (size != SZ_4K)
69*4882a593Smuzhiyun return -EINVAL;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun v1_context->pgtable_cpu[index] = context->global->bad_page_dma;
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun return SZ_4K;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
etnaviv_iommuv1_dump_size(struct etnaviv_iommu_context * context)76*4882a593Smuzhiyun static size_t etnaviv_iommuv1_dump_size(struct etnaviv_iommu_context *context)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun return PT_SIZE;
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun
etnaviv_iommuv1_dump(struct etnaviv_iommu_context * context,void * buf)81*4882a593Smuzhiyun static void etnaviv_iommuv1_dump(struct etnaviv_iommu_context *context,
82*4882a593Smuzhiyun void *buf)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun memcpy(buf, v1_context->pgtable_cpu, PT_SIZE);
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
etnaviv_iommuv1_restore(struct etnaviv_gpu * gpu,struct etnaviv_iommu_context * context)89*4882a593Smuzhiyun static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu,
90*4882a593Smuzhiyun struct etnaviv_iommu_context *context)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
93*4882a593Smuzhiyun u32 pgtable;
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun if (gpu->mmu_context)
96*4882a593Smuzhiyun etnaviv_iommu_context_put(gpu->mmu_context);
97*4882a593Smuzhiyun gpu->mmu_context = etnaviv_iommu_context_get(context);
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun /* set base addresses */
100*4882a593Smuzhiyun gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base);
101*4882a593Smuzhiyun gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base);
102*4882a593Smuzhiyun gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, context->global->memory_base);
103*4882a593Smuzhiyun gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, context->global->memory_base);
104*4882a593Smuzhiyun gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, context->global->memory_base);
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun /* set page table address in MC */
107*4882a593Smuzhiyun pgtable = (u32)v1_context->pgtable_dma;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable);
110*4882a593Smuzhiyun gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable);
111*4882a593Smuzhiyun gpu_write(gpu, VIVS_MC_MMU_PE_PAGE_TABLE, pgtable);
112*4882a593Smuzhiyun gpu_write(gpu, VIVS_MC_MMU_PEZ_PAGE_TABLE, pgtable);
113*4882a593Smuzhiyun gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun const struct etnaviv_iommu_ops etnaviv_iommuv1_ops = {
118*4882a593Smuzhiyun .free = etnaviv_iommuv1_free,
119*4882a593Smuzhiyun .map = etnaviv_iommuv1_map,
120*4882a593Smuzhiyun .unmap = etnaviv_iommuv1_unmap,
121*4882a593Smuzhiyun .dump_size = etnaviv_iommuv1_dump_size,
122*4882a593Smuzhiyun .dump = etnaviv_iommuv1_dump,
123*4882a593Smuzhiyun .restore = etnaviv_iommuv1_restore,
124*4882a593Smuzhiyun };
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun struct etnaviv_iommu_context *
etnaviv_iommuv1_context_alloc(struct etnaviv_iommu_global * global)127*4882a593Smuzhiyun etnaviv_iommuv1_context_alloc(struct etnaviv_iommu_global *global)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun struct etnaviv_iommuv1_context *v1_context;
130*4882a593Smuzhiyun struct etnaviv_iommu_context *context;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun mutex_lock(&global->lock);
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun /*
135*4882a593Smuzhiyun * MMUv1 does not support switching between different contexts without
136*4882a593Smuzhiyun * a stop the world operation, so we only support a single shared
137*4882a593Smuzhiyun * context with this version.
138*4882a593Smuzhiyun */
139*4882a593Smuzhiyun if (global->v1.shared_context) {
140*4882a593Smuzhiyun context = global->v1.shared_context;
141*4882a593Smuzhiyun etnaviv_iommu_context_get(context);
142*4882a593Smuzhiyun mutex_unlock(&global->lock);
143*4882a593Smuzhiyun return context;
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun v1_context = kzalloc(sizeof(*v1_context), GFP_KERNEL);
147*4882a593Smuzhiyun if (!v1_context) {
148*4882a593Smuzhiyun mutex_unlock(&global->lock);
149*4882a593Smuzhiyun return NULL;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun v1_context->pgtable_cpu = dma_alloc_wc(global->dev, PT_SIZE,
153*4882a593Smuzhiyun &v1_context->pgtable_dma,
154*4882a593Smuzhiyun GFP_KERNEL);
155*4882a593Smuzhiyun if (!v1_context->pgtable_cpu)
156*4882a593Smuzhiyun goto out_free;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun memset32(v1_context->pgtable_cpu, global->bad_page_dma, PT_ENTRIES);
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun context = &v1_context->base;
161*4882a593Smuzhiyun context->global = global;
162*4882a593Smuzhiyun kref_init(&context->refcount);
163*4882a593Smuzhiyun mutex_init(&context->lock);
164*4882a593Smuzhiyun INIT_LIST_HEAD(&context->mappings);
165*4882a593Smuzhiyun drm_mm_init(&context->mm, GPU_MEM_START, PT_ENTRIES * SZ_4K);
166*4882a593Smuzhiyun context->global->v1.shared_context = context;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun mutex_unlock(&global->lock);
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun return context;
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun out_free:
173*4882a593Smuzhiyun mutex_unlock(&global->lock);
174*4882a593Smuzhiyun kfree(v1_context);
175*4882a593Smuzhiyun return NULL;
176*4882a593Smuzhiyun }
177