xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/csf/mali_kbase_csf_event.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3  *
4  * (C) COPYRIGHT 2021-2022 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 #include <mali_kbase.h>
22 #include "mali_kbase_csf_event.h"
23 
24 /**
25  * struct kbase_csf_event_cb - CSF event callback.
26  *
27  * @link:      Link to the rest of the list.
28  * @kctx:      Pointer to the Kbase context this event belongs to.
29  * @callback:  Callback function to call when a CSF event is signalled.
30  * @param:     Parameter to pass to the callback function.
31  *
32  * This structure belongs to the list of events which is part of a Kbase
33  * context, and describes a callback function with a custom parameter to pass
34  * to it when a CSF event is signalled.
35  */
36 struct kbase_csf_event_cb {
37 	struct list_head link;
38 	struct kbase_context *kctx;
39 	kbase_csf_event_callback *callback;
40 	void *param;
41 };
42 
kbase_csf_event_wait_add(struct kbase_context * kctx,kbase_csf_event_callback * callback,void * param)43 int kbase_csf_event_wait_add(struct kbase_context *kctx,
44 			     kbase_csf_event_callback *callback, void *param)
45 {
46 	int err = -ENOMEM;
47 	struct kbase_csf_event_cb *event_cb =
48 		kzalloc(sizeof(struct kbase_csf_event_cb), GFP_KERNEL);
49 
50 	if (event_cb) {
51 		unsigned long flags;
52 
53 		event_cb->kctx = kctx;
54 		event_cb->callback = callback;
55 		event_cb->param = param;
56 
57 		spin_lock_irqsave(&kctx->csf.event.lock, flags);
58 		list_add_tail(&event_cb->link, &kctx->csf.event.callback_list);
59 		dev_dbg(kctx->kbdev->dev,
60 			"Added event handler %pK with param %pK\n", event_cb,
61 			event_cb->param);
62 		spin_unlock_irqrestore(&kctx->csf.event.lock, flags);
63 
64 		err = 0;
65 	}
66 
67 	return err;
68 }
69 
kbase_csf_event_wait_remove(struct kbase_context * kctx,kbase_csf_event_callback * callback,void * param)70 void kbase_csf_event_wait_remove(struct kbase_context *kctx,
71 		kbase_csf_event_callback *callback, void *param)
72 {
73 	struct kbase_csf_event_cb *event_cb;
74 	unsigned long flags;
75 
76 	spin_lock_irqsave(&kctx->csf.event.lock, flags);
77 
78 	list_for_each_entry(event_cb, &kctx->csf.event.callback_list, link) {
79 		if ((event_cb->callback == callback) && (event_cb->param == param)) {
80 			list_del(&event_cb->link);
81 			dev_dbg(kctx->kbdev->dev,
82 				"Removed event handler %pK with param %pK\n",
83 				event_cb, event_cb->param);
84 			kfree(event_cb);
85 			break;
86 		}
87 	}
88 	spin_unlock_irqrestore(&kctx->csf.event.lock, flags);
89 }
90 
sync_update_notify_gpu(struct kbase_context * kctx)91 static void sync_update_notify_gpu(struct kbase_context *kctx)
92 {
93 	bool can_notify_gpu;
94 	unsigned long flags;
95 
96 	spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, flags);
97 	can_notify_gpu = kctx->kbdev->pm.backend.gpu_powered;
98 #ifdef KBASE_PM_RUNTIME
99 	if (kctx->kbdev->pm.backend.gpu_sleep_mode_active)
100 		can_notify_gpu = false;
101 #endif
102 
103 	if (can_notify_gpu) {
104 		kbase_csf_ring_doorbell(kctx->kbdev, CSF_KERNEL_DOORBELL_NR);
105 		KBASE_KTRACE_ADD(kctx->kbdev, CSF_SYNC_UPDATE_NOTIFY_GPU_EVENT, kctx, 0u);
106 	}
107 
108 	spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, flags);
109 }
110 
kbase_csf_event_signal(struct kbase_context * kctx,bool notify_gpu)111 void kbase_csf_event_signal(struct kbase_context *kctx, bool notify_gpu)
112 {
113 	struct kbase_csf_event_cb *event_cb, *next_event_cb;
114 	unsigned long flags;
115 
116 	dev_dbg(kctx->kbdev->dev,
117 		"Signal event (%s GPU notify) for context %pK\n",
118 		notify_gpu ? "with" : "without", (void *)kctx);
119 
120 	/* First increment the signal count and wake up event thread.
121 	 */
122 	atomic_set(&kctx->event_count, 1);
123 	kbase_event_wakeup(kctx);
124 
125 	/* Signal the CSF firmware. This is to ensure that pending command
126 	 * stream synch object wait operations are re-evaluated.
127 	 * Write to GLB_DOORBELL would suffice as spec says that all pending
128 	 * synch object wait operations are re-evaluated on a write to any
129 	 * CS_DOORBELL/GLB_DOORBELL register.
130 	 */
131 	if (notify_gpu)
132 		sync_update_notify_gpu(kctx);
133 
134 	/* Now invoke the callbacks registered on backend side.
135 	 * Allow item removal inside the loop, if requested by the callback.
136 	 */
137 	spin_lock_irqsave(&kctx->csf.event.lock, flags);
138 
139 	list_for_each_entry_safe(
140 		event_cb, next_event_cb, &kctx->csf.event.callback_list, link) {
141 		enum kbase_csf_event_callback_action action;
142 
143 		dev_dbg(kctx->kbdev->dev,
144 			"Calling event handler %pK with param %pK\n",
145 			(void *)event_cb, event_cb->param);
146 		action = event_cb->callback(event_cb->param);
147 		if (action == KBASE_CSF_EVENT_CALLBACK_REMOVE) {
148 			list_del(&event_cb->link);
149 			kfree(event_cb);
150 		}
151 	}
152 
153 	spin_unlock_irqrestore(&kctx->csf.event.lock, flags);
154 }
155 
kbase_csf_event_term(struct kbase_context * kctx)156 void kbase_csf_event_term(struct kbase_context *kctx)
157 {
158 	struct kbase_csf_event_cb *event_cb, *next_event_cb;
159 	unsigned long flags;
160 
161 	spin_lock_irqsave(&kctx->csf.event.lock, flags);
162 
163 	list_for_each_entry_safe(
164 		event_cb, next_event_cb, &kctx->csf.event.callback_list, link) {
165 		list_del(&event_cb->link);
166 		dev_warn(kctx->kbdev->dev,
167 			"Removed event handler %pK with param %pK\n",
168 			(void *)event_cb, event_cb->param);
169 		kfree(event_cb);
170 	}
171 
172 	WARN(!list_empty(&kctx->csf.event.error_list),
173 	     "Error list not empty for ctx %d_%d\n", kctx->tgid, kctx->id);
174 
175 	spin_unlock_irqrestore(&kctx->csf.event.lock, flags);
176 }
177 
kbase_csf_event_init(struct kbase_context * const kctx)178 void kbase_csf_event_init(struct kbase_context *const kctx)
179 {
180 	INIT_LIST_HEAD(&kctx->csf.event.callback_list);
181 	INIT_LIST_HEAD(&kctx->csf.event.error_list);
182 	spin_lock_init(&kctx->csf.event.lock);
183 }
184 
kbase_csf_event_remove_error(struct kbase_context * kctx,struct kbase_csf_notification * error)185 void kbase_csf_event_remove_error(struct kbase_context *kctx,
186 				  struct kbase_csf_notification *error)
187 {
188 	unsigned long flags;
189 
190 	spin_lock_irqsave(&kctx->csf.event.lock, flags);
191 	list_del_init(&error->link);
192 	spin_unlock_irqrestore(&kctx->csf.event.lock, flags);
193 }
194 
kbase_csf_event_read_error(struct kbase_context * kctx,struct base_csf_notification * event_data)195 bool kbase_csf_event_read_error(struct kbase_context *kctx,
196 				struct base_csf_notification *event_data)
197 {
198 	struct kbase_csf_notification *error_data = NULL;
199 	unsigned long flags;
200 
201 	spin_lock_irqsave(&kctx->csf.event.lock, flags);
202 	if (likely(!list_empty(&kctx->csf.event.error_list))) {
203 		error_data = list_first_entry(&kctx->csf.event.error_list,
204 			struct kbase_csf_notification, link);
205 		list_del_init(&error_data->link);
206 		*event_data = error_data->data;
207 		dev_dbg(kctx->kbdev->dev, "Dequeued error %pK in context %pK\n",
208 			(void *)error_data, (void *)kctx);
209 	}
210 	spin_unlock_irqrestore(&kctx->csf.event.lock, flags);
211 	return !!error_data;
212 }
213 
kbase_csf_event_add_error(struct kbase_context * const kctx,struct kbase_csf_notification * const error,struct base_csf_notification const * const data)214 void kbase_csf_event_add_error(struct kbase_context *const kctx,
215 			struct kbase_csf_notification *const error,
216 			struct base_csf_notification const *const data)
217 {
218 	unsigned long flags;
219 
220 	if (WARN_ON(!kctx))
221 		return;
222 
223 	if (WARN_ON(!error))
224 		return;
225 
226 	if (WARN_ON(!data))
227 		return;
228 
229 	spin_lock_irqsave(&kctx->csf.event.lock, flags);
230 	if (list_empty(&error->link)) {
231 		error->data = *data;
232 		list_add_tail(&error->link, &kctx->csf.event.error_list);
233 		dev_dbg(kctx->kbdev->dev,
234 			"Added error %pK of type %d in context %pK\n",
235 			(void *)error, data->type, (void *)kctx);
236 	} else {
237 		dev_dbg(kctx->kbdev->dev, "Error %pK of type %d already pending in context %pK",
238 			(void *)error, error->data.type, (void *)kctx);
239 	}
240 	spin_unlock_irqrestore(&kctx->csf.event.lock, flags);
241 }
242 
kbase_csf_event_error_pending(struct kbase_context * kctx)243 bool kbase_csf_event_error_pending(struct kbase_context *kctx)
244 {
245 	bool error_pending = false;
246 	unsigned long flags;
247 
248 	/* Withhold the error event if the dump on fault is ongoing.
249 	 * This would prevent the Userspace from taking error recovery actions
250 	 * (which can potentially affect the state that is being dumped).
251 	 * Event handling thread would eventually notice the error event.
252 	 */
253 	if (unlikely(!kbase_debug_csf_fault_dump_complete(kctx->kbdev)))
254 		return false;
255 
256 	spin_lock_irqsave(&kctx->csf.event.lock, flags);
257 	error_pending = !list_empty(&kctx->csf.event.error_list);
258 
259 	dev_dbg(kctx->kbdev->dev, "%s error is pending in context %pK\n",
260 		error_pending ? "An" : "No", (void *)kctx);
261 
262 	spin_unlock_irqrestore(&kctx->csf.event.lock, flags);
263 
264 	return error_pending;
265 }
266