xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2016-2018 Etnaviv Project
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/bitops.h>
7*4882a593Smuzhiyun #include <linux/dma-mapping.h>
8*4882a593Smuzhiyun #include <linux/platform_device.h>
9*4882a593Smuzhiyun #include <linux/sizes.h>
10*4882a593Smuzhiyun #include <linux/slab.h>
11*4882a593Smuzhiyun #include <linux/vmalloc.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include "etnaviv_cmdbuf.h"
14*4882a593Smuzhiyun #include "etnaviv_gpu.h"
15*4882a593Smuzhiyun #include "etnaviv_mmu.h"
16*4882a593Smuzhiyun #include "state.xml.h"
17*4882a593Smuzhiyun #include "state_hi.xml.h"
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #define MMUv2_PTE_PRESENT		BIT(0)
20*4882a593Smuzhiyun #define MMUv2_PTE_EXCEPTION		BIT(1)
21*4882a593Smuzhiyun #define MMUv2_PTE_WRITEABLE		BIT(2)
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #define MMUv2_MTLB_MASK			0xffc00000
24*4882a593Smuzhiyun #define MMUv2_MTLB_SHIFT		22
25*4882a593Smuzhiyun #define MMUv2_STLB_MASK			0x003ff000
26*4882a593Smuzhiyun #define MMUv2_STLB_SHIFT		12
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #define MMUv2_MAX_STLB_ENTRIES		1024
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun struct etnaviv_iommuv2_context {
31*4882a593Smuzhiyun 	struct etnaviv_iommu_context base;
32*4882a593Smuzhiyun 	unsigned short id;
33*4882a593Smuzhiyun 	/* M(aster) TLB aka first level pagetable */
34*4882a593Smuzhiyun 	u32 *mtlb_cpu;
35*4882a593Smuzhiyun 	dma_addr_t mtlb_dma;
36*4882a593Smuzhiyun 	/* S(lave) TLB aka second level pagetable */
37*4882a593Smuzhiyun 	u32 *stlb_cpu[MMUv2_MAX_STLB_ENTRIES];
38*4882a593Smuzhiyun 	dma_addr_t stlb_dma[MMUv2_MAX_STLB_ENTRIES];
39*4882a593Smuzhiyun };
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun static struct etnaviv_iommuv2_context *
to_v2_context(struct etnaviv_iommu_context * context)42*4882a593Smuzhiyun to_v2_context(struct etnaviv_iommu_context *context)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun 	return container_of(context, struct etnaviv_iommuv2_context, base);
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun 
etnaviv_iommuv2_free(struct etnaviv_iommu_context * context)47*4882a593Smuzhiyun static void etnaviv_iommuv2_free(struct etnaviv_iommu_context *context)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
50*4882a593Smuzhiyun 	int i;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	drm_mm_takedown(&context->mm);
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
55*4882a593Smuzhiyun 		if (v2_context->stlb_cpu[i])
56*4882a593Smuzhiyun 			dma_free_wc(context->global->dev, SZ_4K,
57*4882a593Smuzhiyun 				    v2_context->stlb_cpu[i],
58*4882a593Smuzhiyun 				    v2_context->stlb_dma[i]);
59*4882a593Smuzhiyun 	}
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	dma_free_wc(context->global->dev, SZ_4K, v2_context->mtlb_cpu,
62*4882a593Smuzhiyun 		    v2_context->mtlb_dma);
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	clear_bit(v2_context->id, context->global->v2.pta_alloc);
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	vfree(v2_context);
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun static int
etnaviv_iommuv2_ensure_stlb(struct etnaviv_iommuv2_context * v2_context,int stlb)69*4882a593Smuzhiyun etnaviv_iommuv2_ensure_stlb(struct etnaviv_iommuv2_context *v2_context,
70*4882a593Smuzhiyun 			    int stlb)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun 	if (v2_context->stlb_cpu[stlb])
73*4882a593Smuzhiyun 		return 0;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	v2_context->stlb_cpu[stlb] =
76*4882a593Smuzhiyun 			dma_alloc_wc(v2_context->base.global->dev, SZ_4K,
77*4882a593Smuzhiyun 				     &v2_context->stlb_dma[stlb],
78*4882a593Smuzhiyun 				     GFP_KERNEL);
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	if (!v2_context->stlb_cpu[stlb])
81*4882a593Smuzhiyun 		return -ENOMEM;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	memset32(v2_context->stlb_cpu[stlb], MMUv2_PTE_EXCEPTION,
84*4882a593Smuzhiyun 		 SZ_4K / sizeof(u32));
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	v2_context->mtlb_cpu[stlb] =
87*4882a593Smuzhiyun 			v2_context->stlb_dma[stlb] | MMUv2_PTE_PRESENT;
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	return 0;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun 
etnaviv_iommuv2_map(struct etnaviv_iommu_context * context,unsigned long iova,phys_addr_t paddr,size_t size,int prot)92*4882a593Smuzhiyun static int etnaviv_iommuv2_map(struct etnaviv_iommu_context *context,
93*4882a593Smuzhiyun 			       unsigned long iova, phys_addr_t paddr,
94*4882a593Smuzhiyun 			       size_t size, int prot)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
97*4882a593Smuzhiyun 	int mtlb_entry, stlb_entry, ret;
98*4882a593Smuzhiyun 	u32 entry = lower_32_bits(paddr) | MMUv2_PTE_PRESENT;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	if (size != SZ_4K)
101*4882a593Smuzhiyun 		return -EINVAL;
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
104*4882a593Smuzhiyun 		entry |= (upper_32_bits(paddr) & 0xff) << 4;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	if (prot & ETNAVIV_PROT_WRITE)
107*4882a593Smuzhiyun 		entry |= MMUv2_PTE_WRITEABLE;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
110*4882a593Smuzhiyun 	stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	ret = etnaviv_iommuv2_ensure_stlb(v2_context, mtlb_entry);
113*4882a593Smuzhiyun 	if (ret)
114*4882a593Smuzhiyun 		return ret;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	v2_context->stlb_cpu[mtlb_entry][stlb_entry] = entry;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	return 0;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun 
etnaviv_iommuv2_unmap(struct etnaviv_iommu_context * context,unsigned long iova,size_t size)121*4882a593Smuzhiyun static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_context *context,
122*4882a593Smuzhiyun 				    unsigned long iova, size_t size)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun 	struct etnaviv_iommuv2_context *etnaviv_domain = to_v2_context(context);
125*4882a593Smuzhiyun 	int mtlb_entry, stlb_entry;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	if (size != SZ_4K)
128*4882a593Smuzhiyun 		return -EINVAL;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
131*4882a593Smuzhiyun 	stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = MMUv2_PTE_EXCEPTION;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	return SZ_4K;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun 
etnaviv_iommuv2_dump_size(struct etnaviv_iommu_context * context)138*4882a593Smuzhiyun static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_context *context)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun 	struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
141*4882a593Smuzhiyun 	size_t dump_size = SZ_4K;
142*4882a593Smuzhiyun 	int i;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
145*4882a593Smuzhiyun 		if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
146*4882a593Smuzhiyun 			dump_size += SZ_4K;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	return dump_size;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun 
etnaviv_iommuv2_dump(struct etnaviv_iommu_context * context,void * buf)151*4882a593Smuzhiyun static void etnaviv_iommuv2_dump(struct etnaviv_iommu_context *context, void *buf)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun 	struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
154*4882a593Smuzhiyun 	int i;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	memcpy(buf, v2_context->mtlb_cpu, SZ_4K);
157*4882a593Smuzhiyun 	buf += SZ_4K;
158*4882a593Smuzhiyun 	for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
159*4882a593Smuzhiyun 		if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT) {
160*4882a593Smuzhiyun 			memcpy(buf, v2_context->stlb_cpu[i], SZ_4K);
161*4882a593Smuzhiyun 			buf += SZ_4K;
162*4882a593Smuzhiyun 		}
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun 
etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu * gpu,struct etnaviv_iommu_context * context)165*4882a593Smuzhiyun static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu,
166*4882a593Smuzhiyun 	struct etnaviv_iommu_context *context)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun 	struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
169*4882a593Smuzhiyun 	u16 prefetch;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	/* If the MMU is already enabled the state is still there. */
172*4882a593Smuzhiyun 	if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
173*4882a593Smuzhiyun 		return;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	if (gpu->mmu_context)
176*4882a593Smuzhiyun 		etnaviv_iommu_context_put(gpu->mmu_context);
177*4882a593Smuzhiyun 	gpu->mmu_context = etnaviv_iommu_context_get(context);
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	prefetch = etnaviv_buffer_config_mmuv2(gpu,
180*4882a593Smuzhiyun 				(u32)v2_context->mtlb_dma,
181*4882a593Smuzhiyun 				(u32)context->global->bad_page_dma);
182*4882a593Smuzhiyun 	etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
183*4882a593Smuzhiyun 			     prefetch);
184*4882a593Smuzhiyun 	etnaviv_gpu_wait_idle(gpu, 100);
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun 
etnaviv_iommuv2_restore_sec(struct etnaviv_gpu * gpu,struct etnaviv_iommu_context * context)189*4882a593Smuzhiyun static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu,
190*4882a593Smuzhiyun 	struct etnaviv_iommu_context *context)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun 	struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
193*4882a593Smuzhiyun 	u16 prefetch;
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	/* If the MMU is already enabled the state is still there. */
196*4882a593Smuzhiyun 	if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE)
197*4882a593Smuzhiyun 		return;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	if (gpu->mmu_context)
200*4882a593Smuzhiyun 		etnaviv_iommu_context_put(gpu->mmu_context);
201*4882a593Smuzhiyun 	gpu->mmu_context = etnaviv_iommu_context_get(context);
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
204*4882a593Smuzhiyun 		  lower_32_bits(context->global->v2.pta_dma));
205*4882a593Smuzhiyun 	gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
206*4882a593Smuzhiyun 		  upper_32_bits(context->global->v2.pta_dma));
207*4882a593Smuzhiyun 	gpu_write(gpu, VIVS_MMUv2_PTA_CONTROL, VIVS_MMUv2_PTA_CONTROL_ENABLE);
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	gpu_write(gpu, VIVS_MMUv2_NONSEC_SAFE_ADDR_LOW,
210*4882a593Smuzhiyun 		  lower_32_bits(context->global->bad_page_dma));
211*4882a593Smuzhiyun 	gpu_write(gpu, VIVS_MMUv2_SEC_SAFE_ADDR_LOW,
212*4882a593Smuzhiyun 		  lower_32_bits(context->global->bad_page_dma));
213*4882a593Smuzhiyun 	gpu_write(gpu, VIVS_MMUv2_SAFE_ADDRESS_CONFIG,
214*4882a593Smuzhiyun 		  VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH(
215*4882a593Smuzhiyun 		  upper_32_bits(context->global->bad_page_dma)) |
216*4882a593Smuzhiyun 		  VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH(
217*4882a593Smuzhiyun 		  upper_32_bits(context->global->bad_page_dma)));
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	context->global->v2.pta_cpu[v2_context->id] = v2_context->mtlb_dma |
220*4882a593Smuzhiyun 				 	 VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	/* trigger a PTA load through the FE */
223*4882a593Smuzhiyun 	prefetch = etnaviv_buffer_config_pta(gpu, v2_context->id);
224*4882a593Smuzhiyun 	etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
225*4882a593Smuzhiyun 			     prefetch);
226*4882a593Smuzhiyun 	etnaviv_gpu_wait_idle(gpu, 100);
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	gpu_write(gpu, VIVS_MMUv2_SEC_CONTROL, VIVS_MMUv2_SEC_CONTROL_ENABLE);
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun 
etnaviv_iommuv2_get_mtlb_addr(struct etnaviv_iommu_context * context)231*4882a593Smuzhiyun u32 etnaviv_iommuv2_get_mtlb_addr(struct etnaviv_iommu_context *context)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun 	struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	return v2_context->mtlb_dma;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun 
etnaviv_iommuv2_get_pta_id(struct etnaviv_iommu_context * context)238*4882a593Smuzhiyun unsigned short etnaviv_iommuv2_get_pta_id(struct etnaviv_iommu_context *context)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun 	struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	return v2_context->id;
243*4882a593Smuzhiyun }
etnaviv_iommuv2_restore(struct etnaviv_gpu * gpu,struct etnaviv_iommu_context * context)244*4882a593Smuzhiyun static void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu,
245*4882a593Smuzhiyun 				    struct etnaviv_iommu_context *context)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun 	switch (gpu->sec_mode) {
248*4882a593Smuzhiyun 	case ETNA_SEC_NONE:
249*4882a593Smuzhiyun 		etnaviv_iommuv2_restore_nonsec(gpu, context);
250*4882a593Smuzhiyun 		break;
251*4882a593Smuzhiyun 	case ETNA_SEC_KERNEL:
252*4882a593Smuzhiyun 		etnaviv_iommuv2_restore_sec(gpu, context);
253*4882a593Smuzhiyun 		break;
254*4882a593Smuzhiyun 	default:
255*4882a593Smuzhiyun 		WARN(1, "unhandled GPU security mode\n");
256*4882a593Smuzhiyun 		break;
257*4882a593Smuzhiyun 	}
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun const struct etnaviv_iommu_ops etnaviv_iommuv2_ops = {
261*4882a593Smuzhiyun 	.free = etnaviv_iommuv2_free,
262*4882a593Smuzhiyun 	.map = etnaviv_iommuv2_map,
263*4882a593Smuzhiyun 	.unmap = etnaviv_iommuv2_unmap,
264*4882a593Smuzhiyun 	.dump_size = etnaviv_iommuv2_dump_size,
265*4882a593Smuzhiyun 	.dump = etnaviv_iommuv2_dump,
266*4882a593Smuzhiyun 	.restore = etnaviv_iommuv2_restore,
267*4882a593Smuzhiyun };
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun struct etnaviv_iommu_context *
etnaviv_iommuv2_context_alloc(struct etnaviv_iommu_global * global)270*4882a593Smuzhiyun etnaviv_iommuv2_context_alloc(struct etnaviv_iommu_global *global)
271*4882a593Smuzhiyun {
272*4882a593Smuzhiyun 	struct etnaviv_iommuv2_context *v2_context;
273*4882a593Smuzhiyun 	struct etnaviv_iommu_context *context;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	v2_context = vzalloc(sizeof(*v2_context));
276*4882a593Smuzhiyun 	if (!v2_context)
277*4882a593Smuzhiyun 		return NULL;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	mutex_lock(&global->lock);
280*4882a593Smuzhiyun 	v2_context->id = find_first_zero_bit(global->v2.pta_alloc,
281*4882a593Smuzhiyun 					     ETNAVIV_PTA_ENTRIES);
282*4882a593Smuzhiyun 	if (v2_context->id < ETNAVIV_PTA_ENTRIES) {
283*4882a593Smuzhiyun 		set_bit(v2_context->id, global->v2.pta_alloc);
284*4882a593Smuzhiyun 	} else {
285*4882a593Smuzhiyun 		mutex_unlock(&global->lock);
286*4882a593Smuzhiyun 		goto out_free;
287*4882a593Smuzhiyun 	}
288*4882a593Smuzhiyun 	mutex_unlock(&global->lock);
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	v2_context->mtlb_cpu = dma_alloc_wc(global->dev, SZ_4K,
291*4882a593Smuzhiyun 					    &v2_context->mtlb_dma, GFP_KERNEL);
292*4882a593Smuzhiyun 	if (!v2_context->mtlb_cpu)
293*4882a593Smuzhiyun 		goto out_free_id;
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	memset32(v2_context->mtlb_cpu, MMUv2_PTE_EXCEPTION,
296*4882a593Smuzhiyun 		 MMUv2_MAX_STLB_ENTRIES);
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	global->v2.pta_cpu[v2_context->id] = v2_context->mtlb_dma;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	context = &v2_context->base;
301*4882a593Smuzhiyun 	context->global = global;
302*4882a593Smuzhiyun 	kref_init(&context->refcount);
303*4882a593Smuzhiyun 	mutex_init(&context->lock);
304*4882a593Smuzhiyun 	INIT_LIST_HEAD(&context->mappings);
305*4882a593Smuzhiyun 	drm_mm_init(&context->mm, SZ_4K, (u64)SZ_1G * 4 - SZ_4K);
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	return context;
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun out_free_id:
310*4882a593Smuzhiyun 	clear_bit(v2_context->id, global->v2.pta_alloc);
311*4882a593Smuzhiyun out_free:
312*4882a593Smuzhiyun 	vfree(v2_context);
313*4882a593Smuzhiyun 	return NULL;
314*4882a593Smuzhiyun }
315