xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/midgard/mali_kbase_context.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  *
3*4882a593Smuzhiyun  * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * This program is free software and is provided to you under the terms of the
6*4882a593Smuzhiyun  * GNU General Public License version 2 as published by the Free Software
7*4882a593Smuzhiyun  * Foundation, and any use by you of this program is subject to the terms
8*4882a593Smuzhiyun  * of such GNU licence.
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * A copy of the licence is included with the program, and can also be obtained
11*4882a593Smuzhiyun  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12*4882a593Smuzhiyun  * Boston, MA  02110-1301, USA.
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  */
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun /*
21*4882a593Smuzhiyun  * Base kernel context APIs
22*4882a593Smuzhiyun  */
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #include <mali_kbase.h>
25*4882a593Smuzhiyun #include <mali_midg_regmap.h>
26*4882a593Smuzhiyun #include <mali_kbase_mem_linux.h>
27*4882a593Smuzhiyun #include <mali_kbase_dma_fence.h>
28*4882a593Smuzhiyun #include <mali_kbase_ctx_sched.h>
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun /**
31*4882a593Smuzhiyun  * kbase_create_context() - Create a kernel base context.
32*4882a593Smuzhiyun  * @kbdev: Kbase device
33*4882a593Smuzhiyun  * @is_compat: Force creation of a 32-bit context
34*4882a593Smuzhiyun  *
35*4882a593Smuzhiyun  * Allocate and init a kernel base context.
36*4882a593Smuzhiyun  *
37*4882a593Smuzhiyun  * Return: new kbase context
38*4882a593Smuzhiyun  */
39*4882a593Smuzhiyun struct kbase_context *
kbase_create_context(struct kbase_device * kbdev,bool is_compat)40*4882a593Smuzhiyun kbase_create_context(struct kbase_device *kbdev, bool is_compat)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun 	struct kbase_context *kctx;
43*4882a593Smuzhiyun 	int err;
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	KBASE_DEBUG_ASSERT(kbdev != NULL);
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	/* zero-inited as lot of code assume it's zero'ed out on create */
48*4882a593Smuzhiyun 	kctx = vzalloc(sizeof(*kctx));
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	if (!kctx)
51*4882a593Smuzhiyun 		goto out;
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	/* creating a context is considered a disjoint event */
54*4882a593Smuzhiyun 	kbase_disjoint_event(kbdev);
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	kctx->kbdev = kbdev;
57*4882a593Smuzhiyun 	kctx->as_nr = KBASEP_AS_NR_INVALID;
58*4882a593Smuzhiyun 	atomic_set(&kctx->refcount, 0);
59*4882a593Smuzhiyun 	if (is_compat)
60*4882a593Smuzhiyun 		kbase_ctx_flag_set(kctx, KCTX_COMPAT);
61*4882a593Smuzhiyun #ifdef CONFIG_MALI_TRACE_TIMELINE
62*4882a593Smuzhiyun 	kctx->timeline.owner_tgid = task_tgid_nr(current);
63*4882a593Smuzhiyun #endif
64*4882a593Smuzhiyun 	atomic_set(&kctx->setup_complete, 0);
65*4882a593Smuzhiyun 	atomic_set(&kctx->setup_in_progress, 0);
66*4882a593Smuzhiyun 	spin_lock_init(&kctx->mm_update_lock);
67*4882a593Smuzhiyun 	kctx->process_mm = NULL;
68*4882a593Smuzhiyun 	atomic_set(&kctx->nonmapped_pages, 0);
69*4882a593Smuzhiyun 	kctx->slots_pullable = 0;
70*4882a593Smuzhiyun 	kctx->tgid = current->tgid;
71*4882a593Smuzhiyun 	kctx->pid = current->pid;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	err = kbase_mem_pool_init(&kctx->mem_pool,
74*4882a593Smuzhiyun 			kbdev->mem_pool_max_size_default,
75*4882a593Smuzhiyun 			kctx->kbdev, &kbdev->mem_pool);
76*4882a593Smuzhiyun 	if (err)
77*4882a593Smuzhiyun 		goto free_kctx;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	err = kbase_mem_evictable_init(kctx);
80*4882a593Smuzhiyun 	if (err)
81*4882a593Smuzhiyun 		goto free_pool;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	atomic_set(&kctx->used_pages, 0);
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	err = kbase_jd_init(kctx);
86*4882a593Smuzhiyun 	if (err)
87*4882a593Smuzhiyun 		goto deinit_evictable;
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	err = kbasep_js_kctx_init(kctx);
90*4882a593Smuzhiyun 	if (err)
91*4882a593Smuzhiyun 		goto free_jd;	/* safe to call kbasep_js_kctx_term  in this case */
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	err = kbase_event_init(kctx);
94*4882a593Smuzhiyun 	if (err)
95*4882a593Smuzhiyun 		goto free_jd;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	atomic_set(&kctx->drain_pending, 0);
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	mutex_init(&kctx->reg_lock);
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	INIT_LIST_HEAD(&kctx->waiting_soft_jobs);
102*4882a593Smuzhiyun 	spin_lock_init(&kctx->waiting_soft_jobs_lock);
103*4882a593Smuzhiyun #ifdef CONFIG_KDS
104*4882a593Smuzhiyun 	INIT_LIST_HEAD(&kctx->waiting_kds_resource);
105*4882a593Smuzhiyun #endif
106*4882a593Smuzhiyun 	err = kbase_dma_fence_init(kctx);
107*4882a593Smuzhiyun 	if (err)
108*4882a593Smuzhiyun 		goto free_event;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	err = kbase_mmu_init(kctx);
111*4882a593Smuzhiyun 	if (err)
112*4882a593Smuzhiyun 		goto term_dma_fence;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	do {
115*4882a593Smuzhiyun 		err = kbase_mem_pool_grow(&kctx->mem_pool,
116*4882a593Smuzhiyun 				MIDGARD_MMU_BOTTOMLEVEL);
117*4882a593Smuzhiyun 		if (err)
118*4882a593Smuzhiyun 			goto pgd_no_mem;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 		mutex_lock(&kctx->mmu_lock);
121*4882a593Smuzhiyun 		kctx->pgd = kbase_mmu_alloc_pgd(kctx);
122*4882a593Smuzhiyun 		mutex_unlock(&kctx->mmu_lock);
123*4882a593Smuzhiyun 	} while (!kctx->pgd);
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	kctx->aliasing_sink_page = kbase_mem_alloc_page(kctx->kbdev);
126*4882a593Smuzhiyun 	if (!kctx->aliasing_sink_page)
127*4882a593Smuzhiyun 		goto no_sink_page;
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	init_waitqueue_head(&kctx->event_queue);
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	kctx->cookies = KBASE_COOKIE_MASK;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	/* Make sure page 0 is not used... */
134*4882a593Smuzhiyun 	err = kbase_region_tracker_init(kctx);
135*4882a593Smuzhiyun 	if (err)
136*4882a593Smuzhiyun 		goto no_region_tracker;
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	err = kbase_sticky_resource_init(kctx);
139*4882a593Smuzhiyun 	if (err)
140*4882a593Smuzhiyun 		goto no_sticky;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	err = kbase_jit_init(kctx);
143*4882a593Smuzhiyun 	if (err)
144*4882a593Smuzhiyun 		goto no_jit;
145*4882a593Smuzhiyun #ifdef CONFIG_GPU_TRACEPOINTS
146*4882a593Smuzhiyun 	atomic_set(&kctx->jctx.work_id, 0);
147*4882a593Smuzhiyun #endif
148*4882a593Smuzhiyun #ifdef CONFIG_MALI_TRACE_TIMELINE
149*4882a593Smuzhiyun 	atomic_set(&kctx->timeline.jd_atoms_in_flight, 0);
150*4882a593Smuzhiyun #endif
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	kctx->id = atomic_add_return(1, &(kbdev->ctx_num)) - 1;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	mutex_init(&kctx->vinstr_cli_lock);
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	timer_setup(&kctx->soft_job_timeout,
157*4882a593Smuzhiyun 		    kbasep_soft_job_timeout_worker,
158*4882a593Smuzhiyun 		    0);
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	return kctx;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun no_jit:
163*4882a593Smuzhiyun 	kbase_gpu_vm_lock(kctx);
164*4882a593Smuzhiyun 	kbase_sticky_resource_term(kctx);
165*4882a593Smuzhiyun 	kbase_gpu_vm_unlock(kctx);
166*4882a593Smuzhiyun no_sticky:
167*4882a593Smuzhiyun 	kbase_region_tracker_term(kctx);
168*4882a593Smuzhiyun no_region_tracker:
169*4882a593Smuzhiyun 	kbase_mem_pool_free(&kctx->mem_pool, kctx->aliasing_sink_page, false);
170*4882a593Smuzhiyun no_sink_page:
171*4882a593Smuzhiyun 	/* VM lock needed for the call to kbase_mmu_free_pgd */
172*4882a593Smuzhiyun 	kbase_gpu_vm_lock(kctx);
173*4882a593Smuzhiyun 	kbase_mmu_free_pgd(kctx);
174*4882a593Smuzhiyun 	kbase_gpu_vm_unlock(kctx);
175*4882a593Smuzhiyun pgd_no_mem:
176*4882a593Smuzhiyun 	kbase_mmu_term(kctx);
177*4882a593Smuzhiyun term_dma_fence:
178*4882a593Smuzhiyun 	kbase_dma_fence_term(kctx);
179*4882a593Smuzhiyun free_event:
180*4882a593Smuzhiyun 	kbase_event_cleanup(kctx);
181*4882a593Smuzhiyun free_jd:
182*4882a593Smuzhiyun 	/* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */
183*4882a593Smuzhiyun 	kbasep_js_kctx_term(kctx);
184*4882a593Smuzhiyun 	kbase_jd_exit(kctx);
185*4882a593Smuzhiyun deinit_evictable:
186*4882a593Smuzhiyun 	kbase_mem_evictable_deinit(kctx);
187*4882a593Smuzhiyun free_pool:
188*4882a593Smuzhiyun 	kbase_mem_pool_term(&kctx->mem_pool);
189*4882a593Smuzhiyun free_kctx:
190*4882a593Smuzhiyun 	vfree(kctx);
191*4882a593Smuzhiyun out:
192*4882a593Smuzhiyun 	return NULL;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun KBASE_EXPORT_SYMBOL(kbase_create_context);
195*4882a593Smuzhiyun 
kbase_reg_pending_dtor(struct kbase_va_region * reg)196*4882a593Smuzhiyun static void kbase_reg_pending_dtor(struct kbase_va_region *reg)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun 	dev_dbg(reg->kctx->kbdev->dev, "Freeing pending unmapped region\n");
199*4882a593Smuzhiyun 	kbase_mem_phy_alloc_put(reg->cpu_alloc);
200*4882a593Smuzhiyun 	kbase_mem_phy_alloc_put(reg->gpu_alloc);
201*4882a593Smuzhiyun 	kfree(reg);
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun /**
205*4882a593Smuzhiyun  * kbase_destroy_context - Destroy a kernel base context.
206*4882a593Smuzhiyun  * @kctx: Context to destroy
207*4882a593Smuzhiyun  *
208*4882a593Smuzhiyun  * Calls kbase_destroy_os_context() to free OS specific structures.
209*4882a593Smuzhiyun  * Will release all outstanding regions.
210*4882a593Smuzhiyun  */
kbase_destroy_context(struct kbase_context * kctx)211*4882a593Smuzhiyun void kbase_destroy_context(struct kbase_context *kctx)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	struct kbase_device *kbdev;
214*4882a593Smuzhiyun 	int pages;
215*4882a593Smuzhiyun 	unsigned long pending_regions_to_clean;
216*4882a593Smuzhiyun 	unsigned long flags;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	KBASE_DEBUG_ASSERT(NULL != kctx);
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	kbdev = kctx->kbdev;
221*4882a593Smuzhiyun 	KBASE_DEBUG_ASSERT(NULL != kbdev);
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	KBASE_TRACE_ADD(kbdev, CORE_CTX_DESTROY, kctx, NULL, 0u, 0u);
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	/* Ensure the core is powered up for the destroy process */
226*4882a593Smuzhiyun 	/* A suspend won't happen here, because we're in a syscall from a userspace
227*4882a593Smuzhiyun 	 * thread. */
228*4882a593Smuzhiyun 	kbase_pm_context_active(kbdev);
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	kbase_jd_zap_context(kctx);
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
233*4882a593Smuzhiyun 	/* Removing the rest of the debugfs entries here as we want to keep the
234*4882a593Smuzhiyun 	 * atom debugfs interface alive until all atoms have completed. This
235*4882a593Smuzhiyun 	 * is useful for debugging hung contexts. */
236*4882a593Smuzhiyun 	debugfs_remove_recursive(kctx->kctx_dentry);
237*4882a593Smuzhiyun #endif
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	kbase_event_cleanup(kctx);
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	/*
242*4882a593Smuzhiyun 	 * JIT must be terminated before the code below as it must be called
243*4882a593Smuzhiyun 	 * without the region lock being held.
244*4882a593Smuzhiyun 	 * The code above ensures no new JIT allocations can be made by
245*4882a593Smuzhiyun 	 * by the time we get to this point of context tear down.
246*4882a593Smuzhiyun 	 */
247*4882a593Smuzhiyun 	kbase_jit_term(kctx);
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	kbase_gpu_vm_lock(kctx);
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	kbase_sticky_resource_term(kctx);
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	/* MMU is disabled as part of scheduling out the context */
254*4882a593Smuzhiyun 	kbase_mmu_free_pgd(kctx);
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	/* drop the aliasing sink page now that it can't be mapped anymore */
257*4882a593Smuzhiyun 	kbase_mem_pool_free(&kctx->mem_pool, kctx->aliasing_sink_page, false);
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	/* free pending region setups */
260*4882a593Smuzhiyun 	pending_regions_to_clean = (~kctx->cookies) & KBASE_COOKIE_MASK;
261*4882a593Smuzhiyun 	while (pending_regions_to_clean) {
262*4882a593Smuzhiyun 		unsigned int cookie = __ffs(pending_regions_to_clean);
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 		BUG_ON(!kctx->pending_regions[cookie]);
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 		kbase_reg_pending_dtor(kctx->pending_regions[cookie]);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 		kctx->pending_regions[cookie] = NULL;
269*4882a593Smuzhiyun 		pending_regions_to_clean &= ~(1UL << cookie);
270*4882a593Smuzhiyun 	}
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	kbase_region_tracker_term(kctx);
273*4882a593Smuzhiyun 	kbase_gpu_vm_unlock(kctx);
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	/* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */
276*4882a593Smuzhiyun 	kbasep_js_kctx_term(kctx);
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	kbase_jd_exit(kctx);
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	kbase_pm_context_idle(kbdev);
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	kbase_dma_fence_term(kctx);
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	mutex_lock(&kbdev->mmu_hw_mutex);
285*4882a593Smuzhiyun 	spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, flags);
286*4882a593Smuzhiyun 	kbase_ctx_sched_remove_ctx(kctx);
287*4882a593Smuzhiyun 	spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, flags);
288*4882a593Smuzhiyun 	mutex_unlock(&kbdev->mmu_hw_mutex);
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	kbase_mmu_term(kctx);
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	pages = atomic_read(&kctx->used_pages);
293*4882a593Smuzhiyun 	if (pages != 0)
294*4882a593Smuzhiyun 		dev_warn(kbdev->dev, "%s: %d pages in use!\n", __func__, pages);
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	kbase_mem_evictable_deinit(kctx);
297*4882a593Smuzhiyun 	kbase_mem_pool_term(&kctx->mem_pool);
298*4882a593Smuzhiyun 	WARN_ON(atomic_read(&kctx->nonmapped_pages) != 0);
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	vfree(kctx);
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun KBASE_EXPORT_SYMBOL(kbase_destroy_context);
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun /**
305*4882a593Smuzhiyun  * kbase_context_set_create_flags - Set creation flags on a context
306*4882a593Smuzhiyun  * @kctx: Kbase context
307*4882a593Smuzhiyun  * @flags: Flags to set
308*4882a593Smuzhiyun  *
309*4882a593Smuzhiyun  * Return: 0 on success
310*4882a593Smuzhiyun  */
kbase_context_set_create_flags(struct kbase_context * kctx,u32 flags)311*4882a593Smuzhiyun int kbase_context_set_create_flags(struct kbase_context *kctx, u32 flags)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun 	int err = 0;
314*4882a593Smuzhiyun 	struct kbasep_js_kctx_info *js_kctx_info;
315*4882a593Smuzhiyun 	unsigned long irq_flags;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	KBASE_DEBUG_ASSERT(NULL != kctx);
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	js_kctx_info = &kctx->jctx.sched_info;
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	/* Validate flags */
322*4882a593Smuzhiyun 	if (flags != (flags & BASE_CONTEXT_CREATE_KERNEL_FLAGS)) {
323*4882a593Smuzhiyun 		err = -EINVAL;
324*4882a593Smuzhiyun 		goto out;
325*4882a593Smuzhiyun 	}
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
328*4882a593Smuzhiyun 	spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, irq_flags);
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	/* Translate the flags */
331*4882a593Smuzhiyun 	if ((flags & BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED) == 0)
332*4882a593Smuzhiyun 		kbase_ctx_flag_clear(kctx, KCTX_SUBMIT_DISABLED);
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	/* Latch the initial attributes into the Job Scheduler */
335*4882a593Smuzhiyun 	kbasep_js_ctx_attr_set_initial_attrs(kctx->kbdev, kctx);
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, irq_flags);
338*4882a593Smuzhiyun 	mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
339*4882a593Smuzhiyun  out:
340*4882a593Smuzhiyun 	return err;
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun KBASE_EXPORT_SYMBOL(kbase_context_set_create_flags);
343