xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/mali_kbase_event.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * (C) COPYRIGHT 2010-2016, 2018-2021 ARM Limited. All rights reserved.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * This program is free software and is provided to you under the terms of the
7*4882a593Smuzhiyun  * GNU General Public License version 2 as published by the Free Software
8*4882a593Smuzhiyun  * Foundation, and any use by you of this program is subject to the terms
9*4882a593Smuzhiyun  * of such GNU license.
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * This program is distributed in the hope that it will be useful,
12*4882a593Smuzhiyun  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13*4882a593Smuzhiyun  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14*4882a593Smuzhiyun  * GNU General Public License for more details.
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  * You should have received a copy of the GNU General Public License
17*4882a593Smuzhiyun  * along with this program; if not, you can access it online at
18*4882a593Smuzhiyun  * http://www.gnu.org/licenses/gpl-2.0.html.
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  */
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #include <mali_kbase.h>
23*4882a593Smuzhiyun #include <mali_kbase_debug.h>
24*4882a593Smuzhiyun #include <tl/mali_kbase_tracepoints.h>
25*4882a593Smuzhiyun #include <mali_linux_trace.h>
26*4882a593Smuzhiyun 
kbase_event_process(struct kbase_context * kctx,struct kbase_jd_atom * katom)27*4882a593Smuzhiyun static struct base_jd_udata kbase_event_process(struct kbase_context *kctx, struct kbase_jd_atom *katom)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun 	struct base_jd_udata data;
30*4882a593Smuzhiyun 	struct kbase_device *kbdev;
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun 	lockdep_assert_held(&kctx->jctx.lock);
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	KBASE_DEBUG_ASSERT(kctx != NULL);
35*4882a593Smuzhiyun 	KBASE_DEBUG_ASSERT(katom != NULL);
36*4882a593Smuzhiyun 	KBASE_DEBUG_ASSERT(katom->status == KBASE_JD_ATOM_STATE_COMPLETED);
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	kbdev = kctx->kbdev;
39*4882a593Smuzhiyun 	data = katom->udata;
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	KBASE_TLSTREAM_TL_NRET_ATOM_CTX(kbdev, katom, kctx);
42*4882a593Smuzhiyun 	KBASE_TLSTREAM_TL_DEL_ATOM(kbdev, katom);
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	katom->status = KBASE_JD_ATOM_STATE_UNUSED;
45*4882a593Smuzhiyun 	dev_dbg(kbdev->dev, "Atom %pK status to unused\n", (void *)katom);
46*4882a593Smuzhiyun 	wake_up(&katom->completed);
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	return data;
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun 
kbase_event_dequeue(struct kbase_context * ctx,struct base_jd_event_v2 * uevent)51*4882a593Smuzhiyun int kbase_event_dequeue(struct kbase_context *ctx, struct base_jd_event_v2 *uevent)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun 	struct kbase_jd_atom *atom;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	KBASE_DEBUG_ASSERT(ctx);
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	mutex_lock(&ctx->event_mutex);
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	if (list_empty(&ctx->event_list)) {
60*4882a593Smuzhiyun 		if (!atomic_read(&ctx->event_closed)) {
61*4882a593Smuzhiyun 			mutex_unlock(&ctx->event_mutex);
62*4882a593Smuzhiyun 			return -1;
63*4882a593Smuzhiyun 		}
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 		/* generate the BASE_JD_EVENT_DRV_TERMINATED message on the fly */
66*4882a593Smuzhiyun 		mutex_unlock(&ctx->event_mutex);
67*4882a593Smuzhiyun 		uevent->event_code = BASE_JD_EVENT_DRV_TERMINATED;
68*4882a593Smuzhiyun 		memset(&uevent->udata, 0, sizeof(uevent->udata));
69*4882a593Smuzhiyun 		dev_dbg(ctx->kbdev->dev,
70*4882a593Smuzhiyun 				"event system closed, returning BASE_JD_EVENT_DRV_TERMINATED(0x%X)\n",
71*4882a593Smuzhiyun 				BASE_JD_EVENT_DRV_TERMINATED);
72*4882a593Smuzhiyun 		return 0;
73*4882a593Smuzhiyun 	}
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	/* normal event processing */
76*4882a593Smuzhiyun 	atomic_dec(&ctx->event_count);
77*4882a593Smuzhiyun 	atom = list_entry(ctx->event_list.next, struct kbase_jd_atom, dep_item[0]);
78*4882a593Smuzhiyun 	list_del(ctx->event_list.next);
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	mutex_unlock(&ctx->event_mutex);
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	dev_dbg(ctx->kbdev->dev, "event dequeuing %pK\n", (void *)atom);
83*4882a593Smuzhiyun 	uevent->event_code = atom->event_code;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	uevent->atom_number = (atom - ctx->jctx.atoms);
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	if (atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
88*4882a593Smuzhiyun 		kbase_jd_free_external_resources(atom);
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	mutex_lock(&ctx->jctx.lock);
91*4882a593Smuzhiyun 	uevent->udata = kbase_event_process(ctx, atom);
92*4882a593Smuzhiyun 	mutex_unlock(&ctx->jctx.lock);
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	return 0;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun KBASE_EXPORT_TEST_API(kbase_event_dequeue);
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun /**
100*4882a593Smuzhiyun  * kbase_event_process_noreport_worker - Worker for processing atoms that do not
101*4882a593Smuzhiyun  *                                       return an event but do have external
102*4882a593Smuzhiyun  *                                       resources
103*4882a593Smuzhiyun  * @data:  Work structure
104*4882a593Smuzhiyun  */
kbase_event_process_noreport_worker(struct work_struct * data)105*4882a593Smuzhiyun static void kbase_event_process_noreport_worker(struct work_struct *data)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom,
108*4882a593Smuzhiyun 			work);
109*4882a593Smuzhiyun 	struct kbase_context *kctx = katom->kctx;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
112*4882a593Smuzhiyun 		kbase_jd_free_external_resources(katom);
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	mutex_lock(&kctx->jctx.lock);
115*4882a593Smuzhiyun 	kbase_event_process(kctx, katom);
116*4882a593Smuzhiyun 	mutex_unlock(&kctx->jctx.lock);
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun /**
120*4882a593Smuzhiyun  * kbase_event_process_noreport - Process atoms that do not return an event
121*4882a593Smuzhiyun  * @kctx:  Context pointer
122*4882a593Smuzhiyun  * @katom: Atom to be processed
123*4882a593Smuzhiyun  *
124*4882a593Smuzhiyun  * Atoms that do not have external resources will be processed immediately.
125*4882a593Smuzhiyun  * Atoms that do have external resources will be processed on a workqueue, in
126*4882a593Smuzhiyun  * order to avoid locking issues.
127*4882a593Smuzhiyun  */
kbase_event_process_noreport(struct kbase_context * kctx,struct kbase_jd_atom * katom)128*4882a593Smuzhiyun static void kbase_event_process_noreport(struct kbase_context *kctx,
129*4882a593Smuzhiyun 		struct kbase_jd_atom *katom)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun 	if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) {
132*4882a593Smuzhiyun 		INIT_WORK(&katom->work, kbase_event_process_noreport_worker);
133*4882a593Smuzhiyun 		queue_work(kctx->event_workq, &katom->work);
134*4882a593Smuzhiyun 	} else {
135*4882a593Smuzhiyun 		kbase_event_process(kctx, katom);
136*4882a593Smuzhiyun 	}
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun /**
140*4882a593Smuzhiyun  * kbase_event_coalesce - Move pending events to the main event list
141*4882a593Smuzhiyun  * @kctx:  Context pointer
142*4882a593Smuzhiyun  *
143*4882a593Smuzhiyun  * kctx->event_list and kctx->event_coalesce_count must be protected
144*4882a593Smuzhiyun  * by a lock unless this is the last thread using them
145*4882a593Smuzhiyun  * (and we're about to terminate the lock).
146*4882a593Smuzhiyun  *
147*4882a593Smuzhiyun  * Return: The number of pending events moved to the main event list
148*4882a593Smuzhiyun  */
kbase_event_coalesce(struct kbase_context * kctx)149*4882a593Smuzhiyun static int kbase_event_coalesce(struct kbase_context *kctx)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun 	const int event_count = kctx->event_coalesce_count;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	/* Join the list of pending events onto the tail of the main list
154*4882a593Smuzhiyun 	 * and reset it
155*4882a593Smuzhiyun 	 */
156*4882a593Smuzhiyun 	list_splice_tail_init(&kctx->event_coalesce_list, &kctx->event_list);
157*4882a593Smuzhiyun 	kctx->event_coalesce_count = 0;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	/* Return the number of events moved */
160*4882a593Smuzhiyun 	return event_count;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun 
kbase_event_post(struct kbase_context * ctx,struct kbase_jd_atom * atom)163*4882a593Smuzhiyun void kbase_event_post(struct kbase_context *ctx, struct kbase_jd_atom *atom)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun 	struct kbase_device *kbdev = ctx->kbdev;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	dev_dbg(kbdev->dev, "Posting event for atom %pK\n", (void *)atom);
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	if (WARN_ON(atom->status != KBASE_JD_ATOM_STATE_COMPLETED)) {
170*4882a593Smuzhiyun 		dev_warn(kbdev->dev,
171*4882a593Smuzhiyun 				"%s: Atom %d (%pK) not completed (status %d)\n",
172*4882a593Smuzhiyun 				__func__,
173*4882a593Smuzhiyun 				kbase_jd_atom_id(atom->kctx, atom),
174*4882a593Smuzhiyun 				atom->kctx,
175*4882a593Smuzhiyun 				atom->status);
176*4882a593Smuzhiyun 		return;
177*4882a593Smuzhiyun 	}
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	if (atom->core_req & BASE_JD_REQ_EVENT_ONLY_ON_FAILURE) {
180*4882a593Smuzhiyun 		if (atom->event_code == BASE_JD_EVENT_DONE) {
181*4882a593Smuzhiyun 			dev_dbg(kbdev->dev, "Suppressing event (atom done)\n");
182*4882a593Smuzhiyun 			kbase_event_process_noreport(ctx, atom);
183*4882a593Smuzhiyun 			return;
184*4882a593Smuzhiyun 		}
185*4882a593Smuzhiyun 	}
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	if (atom->core_req & BASEP_JD_REQ_EVENT_NEVER) {
188*4882a593Smuzhiyun 		dev_dbg(kbdev->dev, "Suppressing event (never)\n");
189*4882a593Smuzhiyun 		kbase_event_process_noreport(ctx, atom);
190*4882a593Smuzhiyun 		return;
191*4882a593Smuzhiyun 	}
192*4882a593Smuzhiyun 	KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE(kbdev, atom, TL_ATOM_STATE_POSTED);
193*4882a593Smuzhiyun 	if (atom->core_req & BASE_JD_REQ_EVENT_COALESCE) {
194*4882a593Smuzhiyun 		/* Don't report the event until other event(s) have completed */
195*4882a593Smuzhiyun 		dev_dbg(kbdev->dev, "Deferring event (coalesced)\n");
196*4882a593Smuzhiyun 		mutex_lock(&ctx->event_mutex);
197*4882a593Smuzhiyun 		list_add_tail(&atom->dep_item[0], &ctx->event_coalesce_list);
198*4882a593Smuzhiyun 		++ctx->event_coalesce_count;
199*4882a593Smuzhiyun 		mutex_unlock(&ctx->event_mutex);
200*4882a593Smuzhiyun 	} else {
201*4882a593Smuzhiyun 		/* Report the event and any pending events now */
202*4882a593Smuzhiyun 		int event_count = 1;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 		mutex_lock(&ctx->event_mutex);
205*4882a593Smuzhiyun 		event_count += kbase_event_coalesce(ctx);
206*4882a593Smuzhiyun 		list_add_tail(&atom->dep_item[0], &ctx->event_list);
207*4882a593Smuzhiyun 		atomic_add(event_count, &ctx->event_count);
208*4882a593Smuzhiyun 		mutex_unlock(&ctx->event_mutex);
209*4882a593Smuzhiyun 		dev_dbg(kbdev->dev, "Reporting %d events\n", event_count);
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 		kbase_event_wakeup(ctx);
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 		/* Post-completion latency */
214*4882a593Smuzhiyun 		trace_sysgraph(SGR_POST, ctx->id,
215*4882a593Smuzhiyun 					kbase_jd_atom_id(ctx, atom));
216*4882a593Smuzhiyun 	}
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun KBASE_EXPORT_TEST_API(kbase_event_post);
219*4882a593Smuzhiyun 
kbase_event_close(struct kbase_context * kctx)220*4882a593Smuzhiyun void kbase_event_close(struct kbase_context *kctx)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun 	mutex_lock(&kctx->event_mutex);
223*4882a593Smuzhiyun 	atomic_set(&kctx->event_closed, true);
224*4882a593Smuzhiyun 	mutex_unlock(&kctx->event_mutex);
225*4882a593Smuzhiyun 	kbase_event_wakeup(kctx);
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun 
kbase_event_init(struct kbase_context * kctx)228*4882a593Smuzhiyun int kbase_event_init(struct kbase_context *kctx)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun 	KBASE_DEBUG_ASSERT(kctx);
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	INIT_LIST_HEAD(&kctx->event_list);
233*4882a593Smuzhiyun 	INIT_LIST_HEAD(&kctx->event_coalesce_list);
234*4882a593Smuzhiyun 	mutex_init(&kctx->event_mutex);
235*4882a593Smuzhiyun 	kctx->event_coalesce_count = 0;
236*4882a593Smuzhiyun 	kctx->event_workq = alloc_workqueue("kbase_event", WQ_MEM_RECLAIM, 1);
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	if (kctx->event_workq == NULL)
239*4882a593Smuzhiyun 		return -EINVAL;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	return 0;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun KBASE_EXPORT_TEST_API(kbase_event_init);
245*4882a593Smuzhiyun 
kbase_event_cleanup(struct kbase_context * kctx)246*4882a593Smuzhiyun void kbase_event_cleanup(struct kbase_context *kctx)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun 	int event_count;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	KBASE_DEBUG_ASSERT(kctx);
251*4882a593Smuzhiyun 	KBASE_DEBUG_ASSERT(kctx->event_workq);
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	flush_workqueue(kctx->event_workq);
254*4882a593Smuzhiyun 	destroy_workqueue(kctx->event_workq);
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	/* We use kbase_event_dequeue to remove the remaining events as that
257*4882a593Smuzhiyun 	 * deals with all the cleanup needed for the atoms.
258*4882a593Smuzhiyun 	 *
259*4882a593Smuzhiyun 	 * Note: use of kctx->event_list without a lock is safe because this must be the last
260*4882a593Smuzhiyun 	 * thread using it (because we're about to terminate the lock)
261*4882a593Smuzhiyun 	 */
262*4882a593Smuzhiyun 	event_count = kbase_event_coalesce(kctx);
263*4882a593Smuzhiyun 	atomic_add(event_count, &kctx->event_count);
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	while (!list_empty(&kctx->event_list)) {
266*4882a593Smuzhiyun 		struct base_jd_event_v2 event;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 		kbase_event_dequeue(kctx, &event);
269*4882a593Smuzhiyun 	}
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun KBASE_EXPORT_TEST_API(kbase_event_cleanup);
273