xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/hwcnt/backend/mali_kbase_hwcnt_backend_jm_watchdog.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3  *
4  * (C) COPYRIGHT 2021-2022 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 #include <mali_kbase.h>
23 
24 #include <hwcnt/mali_kbase_hwcnt_gpu.h>
25 #include <hwcnt/mali_kbase_hwcnt_types.h>
26 
27 #include <hwcnt/backend/mali_kbase_hwcnt_backend.h>
28 #include <hwcnt/backend/mali_kbase_hwcnt_backend_jm_watchdog.h>
29 #include <hwcnt/mali_kbase_hwcnt_watchdog_if.h>
30 
31 #if IS_ENABLED(CONFIG_MALI_IS_FPGA) && !IS_ENABLED(CONFIG_MALI_BIFROST_NO_MALI)
32 /* Backend watch dog timer interval in milliseconds: 18 seconds. */
33 static const u32 hwcnt_backend_watchdog_timer_interval_ms = 18000;
34 #else
35 /* Backend watch dog timer interval in milliseconds: 1 second. */
36 static const u32 hwcnt_backend_watchdog_timer_interval_ms = 1000;
37 #endif /* IS_FPGA && !NO_MALI */
38 
39 /*
40  * IDLE_BUFFER_EMPTY -> USER_DUMPING_BUFFER_EMPTY     on dump_request.
41  * IDLE_BUFFER_EMPTY -> TIMER_DUMPING                 after
42  *                                                    hwcnt_backend_watchdog_timer_interval_ms
43  *                                                    milliseconds, if no dump_request has been
44  *                                                    called in the meantime.
45  * IDLE_BUFFER_FULL  -> USER_DUMPING_BUFFER_FULL      on dump_request.
46  * IDLE_BUFFER_FULL  -> TIMER_DUMPING                 after
47  *                                                    hwcnt_backend_watchdog_timer_interval_ms
48  *                                                    milliseconds, if no dump_request has been
49  *                                                    called in the meantime.
50  * IDLE_BUFFER_FULL -> IDLE_BUFFER_EMPTY              on dump_disable, upon discarding undumped
51  *                                                    counter values since the last dump_get.
52  * IDLE_BUFFER_EMPTY -> BUFFER_CLEARING               on dump_clear, before calling job manager
53  *                                                    backend dump_clear.
54  * IDLE_BUFFER_FULL  -> BUFFER_CLEARING               on dump_clear, before calling job manager
55  *                                                    backend dump_clear.
56  * USER_DUMPING_BUFFER_EMPTY -> BUFFER_CLEARING       on dump_clear, before calling job manager
57  *                                                    backend dump_clear.
58  * USER_DUMPING_BUFFER_FULL  -> BUFFER_CLEARING       on dump_clear, before calling job manager
59  *                                                    backend dump_clear.
60  * BUFFER_CLEARING -> IDLE_BUFFER_EMPTY               on dump_clear, upon job manager backend
61  *                                                    dump_clear completion.
62  * TIMER_DUMPING -> IDLE_BUFFER_FULL                  on timer's callback completion.
63  * TIMER_DUMPING -> TIMER_DUMPING_USER_CLEAR          on dump_clear, notifies the callback thread
64  *                                                    that there is no need for dumping the buffer
65  *                                                    anymore, and that the client will proceed
66  *                                                    clearing the buffer.
67  * TIMER_DUMPING_USER_CLEAR -> IDLE_BUFFER_EMPTY      on timer's callback completion, when a user
68  *                                                    requested a dump_clear.
69  * TIMER_DUMPING -> TIMER_DUMPING_USER_REQUESTED      on dump_request, when a client performs a
70  *                                                    dump request while the timer is dumping (the
71  *                                                    timer will perform the dump and (once
72  *                                                    completed) the client will retrieve the value
73  *                                                    from the buffer).
74  * TIMER_DUMPING_USER_REQUESTED -> IDLE_BUFFER_EMPTY  on dump_get, when a timer completed and the
75  *                                                    user reads the periodic dump buffer.
76  * Any -> ERROR                                       if the job manager backend returns an error
77  *                                                    (of any kind).
78  * USER_DUMPING_BUFFER_EMPTY -> IDLE_BUFFER_EMPTY     on dump_get (performs get, ignores the
79  *                                                    periodic dump buffer and returns).
80  * USER_DUMPING_BUFFER_FULL  -> IDLE_BUFFER_EMPTY     on dump_get (performs get, accumulates with
81  *                                                    periodic dump buffer and returns).
82  */
83 
84 /** enum backend_watchdog_state State used to synchronize timer callbacks with the main thread.
85  * @HWCNT_JM_WD_ERROR: Received an error from the job manager backend calls.
86  * @HWCNT_JM_WD_IDLE_BUFFER_EMPTY: Initial state. Watchdog timer enabled, periodic dump buffer is
87  *                                 empty.
88  * @HWCNT_JM_WD_IDLE_BUFFER_FULL: Watchdog timer enabled, periodic dump buffer is full.
89  * @HWCNT_JM_WD_BUFFER_CLEARING: The client is performing a dump clear. A concurrent timer callback
90  *                               thread should just ignore and reschedule another callback in
91  *                               hwcnt_backend_watchdog_timer_interval_ms milliseconds.
92  * @HWCNT_JM_WD_TIMER_DUMPING: The timer ran out. The callback is performing a periodic dump.
93  * @HWCNT_JM_WD_TIMER_DUMPING_USER_REQUESTED: While the timer is performing a periodic dump, user
94  *                                            requested a dump.
95  * @HWCNT_JM_WD_TIMER_DUMPING_USER_CLEAR: While the timer is performing a dump, user requested a
96  *                                        dump_clear. The timer has to complete the periodic dump
97  *                                        and clear buffer (internal and job manager backend).
98  * @HWCNT_JM_WD_USER_DUMPING_BUFFER_EMPTY: From IDLE state, user requested a dump. The periodic
99  *                                         dump buffer is empty.
100  * @HWCNT_JM_WD_USER_DUMPING_BUFFER_FULL: From IDLE state, user requested a dump. The periodic dump
101  *                                        buffer is full.
102  *
103  * While the state machine is in HWCNT_JM_WD_TIMER_DUMPING*, only the timer callback thread is
104  * allowed to call the job manager backend layer.
105  */
106 enum backend_watchdog_state {
107 	HWCNT_JM_WD_ERROR,
108 	HWCNT_JM_WD_IDLE_BUFFER_EMPTY,
109 	HWCNT_JM_WD_IDLE_BUFFER_FULL,
110 	HWCNT_JM_WD_BUFFER_CLEARING,
111 	HWCNT_JM_WD_TIMER_DUMPING,
112 	HWCNT_JM_WD_TIMER_DUMPING_USER_REQUESTED,
113 	HWCNT_JM_WD_TIMER_DUMPING_USER_CLEAR,
114 	HWCNT_JM_WD_USER_DUMPING_BUFFER_EMPTY,
115 	HWCNT_JM_WD_USER_DUMPING_BUFFER_FULL,
116 };
117 
118 /** enum wd_init_state - State machine for initialization / termination of the backend resources
119  */
120 enum wd_init_state {
121 	HWCNT_JM_WD_INIT_START,
122 	HWCNT_JM_WD_INIT_BACKEND = HWCNT_JM_WD_INIT_START,
123 	HWCNT_JM_WD_INIT_ENABLE_MAP,
124 	HWCNT_JM_WD_INIT_DUMP_BUFFER,
125 	HWCNT_JM_WD_INIT_END
126 };
127 
128 /**
129  * struct kbase_hwcnt_backend_jm_watchdog_info - Immutable information used to initialize an
130  *                                               instance of the job manager watchdog backend.
131  * @jm_backend_iface: Hardware counter backend interface. This module extends
132  *                    this interface with a watchdog that performs regular
133  *                    dumps. The new interface this module provides complies
134  *                    with the old backend interface.
135  * @dump_watchdog_iface: Dump watchdog interface, used to periodically dump the
136  *                       hardware counter in case no reads are requested within
137  *                       a certain time, used to avoid hardware counter's buffer
138  *                       saturation.
139  */
140 struct kbase_hwcnt_backend_jm_watchdog_info {
141 	struct kbase_hwcnt_backend_interface *jm_backend_iface;
142 	struct kbase_hwcnt_watchdog_interface *dump_watchdog_iface;
143 };
144 
145 /**
146  * struct kbase_hwcnt_backend_jm_watchdog - An instance of the job manager watchdog backend.
147  * @info: Immutable information used to create the job manager watchdog backend.
148  * @jm_backend: Job manager's backend internal state. To be passed as argument during parent calls.
149  * @timeout_ms: Time period in milliseconds for hardware counters dumping.
150  * @wd_dump_buffer: Used to store periodic dumps done by a timer callback function. Contents are
151  *                  valid in state %HWCNT_JM_WD_TIMER_DUMPING_USER_REQUESTED,
152  *                  %HWCNT_JM_WD_IDLE_BUFFER_FULL or %HWCNT_JM_WD_USER_DUMPING_BUFFER_FULL.
153  * @wd_enable_map: Watchdog backend internal buffer mask, initialized during dump_enable copying
154  *                 the enable_map passed as argument.
155  * @wd_dump_timestamp: Holds the dumping timestamp for potential future client dump_request, filled
156  *                     during watchdog timer dumps.
157  * @watchdog_complete: Used for synchronization between watchdog dumper thread and client calls.
158  * @locked: Members protected from concurrent access by different threads.
159  * @locked.watchdog_lock: Lock used to access fields within this struct (that require mutual
160  *                        exclusion).
161  * @locked.is_enabled: If true then the wrapped job manager hardware counter backend and the
162  *                     watchdog timer are both enabled. If false then both are disabled (or soon
163  *                     will be). Races between enable and disable have undefined behavior.
164  * @locked.state: State used to synchronize timer callbacks with the main thread.
165  */
166 struct kbase_hwcnt_backend_jm_watchdog {
167 	const struct kbase_hwcnt_backend_jm_watchdog_info *info;
168 	struct kbase_hwcnt_backend *jm_backend;
169 	u32 timeout_ms;
170 	struct kbase_hwcnt_dump_buffer wd_dump_buffer;
171 	struct kbase_hwcnt_enable_map wd_enable_map;
172 	u64 wd_dump_timestamp;
173 	struct completion watchdog_complete;
174 	struct {
175 		spinlock_t watchdog_lock;
176 		bool is_enabled;
177 		enum backend_watchdog_state state;
178 	} locked;
179 };
180 
181 /* timer's callback function */
kbasep_hwcnt_backend_jm_watchdog_timer_callback(void * backend)182 static void kbasep_hwcnt_backend_jm_watchdog_timer_callback(void *backend)
183 {
184 	struct kbase_hwcnt_backend_jm_watchdog *wd_backend = backend;
185 	unsigned long flags;
186 	bool wd_accumulate;
187 
188 	spin_lock_irqsave(&wd_backend->locked.watchdog_lock, flags);
189 
190 	if (!wd_backend->locked.is_enabled || wd_backend->locked.state == HWCNT_JM_WD_ERROR) {
191 		spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);
192 		return;
193 	}
194 
195 	if (!(wd_backend->locked.state == HWCNT_JM_WD_IDLE_BUFFER_EMPTY ||
196 	      wd_backend->locked.state == HWCNT_JM_WD_IDLE_BUFFER_FULL)) {
197 		/*resetting the timer. Calling modify on a disabled timer enables it.*/
198 		wd_backend->info->dump_watchdog_iface->modify(
199 			wd_backend->info->dump_watchdog_iface->timer, wd_backend->timeout_ms);
200 		spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);
201 		return;
202 	}
203 	/*start performing the dump*/
204 
205 	/* if there has been a previous timeout use accumulating dump_get()
206 	 * otherwise use non-accumulating to overwrite buffer
207 	 */
208 	wd_accumulate = (wd_backend->locked.state == HWCNT_JM_WD_IDLE_BUFFER_FULL);
209 
210 	wd_backend->locked.state = HWCNT_JM_WD_TIMER_DUMPING;
211 
212 	spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);
213 
214 	if (wd_backend->info->jm_backend_iface->dump_request(wd_backend->jm_backend,
215 							     &wd_backend->wd_dump_timestamp) ||
216 	    wd_backend->info->jm_backend_iface->dump_wait(wd_backend->jm_backend) ||
217 	    wd_backend->info->jm_backend_iface->dump_get(
218 		    wd_backend->jm_backend, &wd_backend->wd_dump_buffer, &wd_backend->wd_enable_map,
219 		    wd_accumulate)) {
220 		spin_lock_irqsave(&wd_backend->locked.watchdog_lock, flags);
221 		WARN_ON(wd_backend->locked.state != HWCNT_JM_WD_TIMER_DUMPING &&
222 			wd_backend->locked.state != HWCNT_JM_WD_TIMER_DUMPING_USER_CLEAR &&
223 			wd_backend->locked.state != HWCNT_JM_WD_TIMER_DUMPING_USER_REQUESTED);
224 		wd_backend->locked.state = HWCNT_JM_WD_ERROR;
225 		spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);
226 		/* Unblock user if it's waiting. */
227 		complete_all(&wd_backend->watchdog_complete);
228 		return;
229 	}
230 
231 	spin_lock_irqsave(&wd_backend->locked.watchdog_lock, flags);
232 	WARN_ON(wd_backend->locked.state != HWCNT_JM_WD_TIMER_DUMPING &&
233 		wd_backend->locked.state != HWCNT_JM_WD_TIMER_DUMPING_USER_CLEAR &&
234 		wd_backend->locked.state != HWCNT_JM_WD_TIMER_DUMPING_USER_REQUESTED);
235 
236 	if (wd_backend->locked.state == HWCNT_JM_WD_TIMER_DUMPING) {
237 		/* If there is no user request/clear, transit to HWCNT_JM_WD_IDLE_BUFFER_FULL
238 		 * to indicate timer dump is done and the buffer is full. If state changed to
239 		 * HWCNT_JM_WD_TIMER_DUMPING_USER_REQUESTED or
240 		 * HWCNT_JM_WD_TIMER_DUMPING_USER_CLEAR then user will transit the state
241 		 * machine to next state.
242 		 */
243 		wd_backend->locked.state = HWCNT_JM_WD_IDLE_BUFFER_FULL;
244 	}
245 	if (wd_backend->locked.state != HWCNT_JM_WD_ERROR && wd_backend->locked.is_enabled) {
246 		/* reset the timer to schedule another callback. Calling modify on a
247 		 * disabled timer enables it.
248 		 */
249 		/*The spin lock needs to be held in case the client calls dump_enable*/
250 		wd_backend->info->dump_watchdog_iface->modify(
251 			wd_backend->info->dump_watchdog_iface->timer, wd_backend->timeout_ms);
252 	}
253 	spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);
254 
255 	/* Unblock user if it's waiting. */
256 	complete_all(&wd_backend->watchdog_complete);
257 }
258 
259 /* helper methods, info structure creation and destruction*/
260 
261 static struct kbase_hwcnt_backend_jm_watchdog_info *
kbasep_hwcnt_backend_jm_watchdog_info_create(struct kbase_hwcnt_backend_interface * backend_iface,struct kbase_hwcnt_watchdog_interface * watchdog_iface)262 kbasep_hwcnt_backend_jm_watchdog_info_create(struct kbase_hwcnt_backend_interface *backend_iface,
263 					     struct kbase_hwcnt_watchdog_interface *watchdog_iface)
264 {
265 	struct kbase_hwcnt_backend_jm_watchdog_info *const info =
266 		kmalloc(sizeof(*info), GFP_KERNEL);
267 
268 	if (!info)
269 		return NULL;
270 
271 	*info = (struct kbase_hwcnt_backend_jm_watchdog_info){ .jm_backend_iface = backend_iface,
272 							       .dump_watchdog_iface =
273 								       watchdog_iface };
274 
275 	return info;
276 }
277 
278 /****** kbase_hwcnt_backend_interface implementation *******/
279 
280 /* Job manager watchdog backend, implementation of kbase_hwcnt_backend_metadata_fn */
281 static const struct kbase_hwcnt_metadata *
kbasep_hwcnt_backend_jm_watchdog_metadata(const struct kbase_hwcnt_backend_info * info)282 kbasep_hwcnt_backend_jm_watchdog_metadata(const struct kbase_hwcnt_backend_info *info)
283 {
284 	const struct kbase_hwcnt_backend_jm_watchdog_info *wd_info = (void *)info;
285 
286 	if (WARN_ON(!info))
287 		return NULL;
288 
289 	return wd_info->jm_backend_iface->metadata(wd_info->jm_backend_iface->info);
290 }
291 
292 static void
kbasep_hwcnt_backend_jm_watchdog_term_partial(struct kbase_hwcnt_backend_jm_watchdog * wd_backend,enum wd_init_state state)293 kbasep_hwcnt_backend_jm_watchdog_term_partial(struct kbase_hwcnt_backend_jm_watchdog *wd_backend,
294 					      enum wd_init_state state)
295 {
296 	if (!wd_backend)
297 		return;
298 
299 	WARN_ON(state > HWCNT_JM_WD_INIT_END);
300 
301 	while (state-- > HWCNT_JM_WD_INIT_START) {
302 		switch (state) {
303 		case HWCNT_JM_WD_INIT_BACKEND:
304 			wd_backend->info->jm_backend_iface->term(wd_backend->jm_backend);
305 			break;
306 		case HWCNT_JM_WD_INIT_ENABLE_MAP:
307 			kbase_hwcnt_enable_map_free(&wd_backend->wd_enable_map);
308 			break;
309 		case HWCNT_JM_WD_INIT_DUMP_BUFFER:
310 			kbase_hwcnt_dump_buffer_free(&wd_backend->wd_dump_buffer);
311 			break;
312 		case HWCNT_JM_WD_INIT_END:
313 			break;
314 		}
315 	}
316 
317 	kfree(wd_backend);
318 }
319 
320 /* Job manager watchdog backend, implementation of kbase_hwcnt_backend_term_fn
321  * Calling term does *not* destroy the interface
322  */
kbasep_hwcnt_backend_jm_watchdog_term(struct kbase_hwcnt_backend * backend)323 static void kbasep_hwcnt_backend_jm_watchdog_term(struct kbase_hwcnt_backend *backend)
324 {
325 	struct kbase_hwcnt_backend_jm_watchdog *wd_backend =
326 		(struct kbase_hwcnt_backend_jm_watchdog *)backend;
327 
328 	if (!backend)
329 		return;
330 
331 	/* disable timer thread to avoid concurrent access to shared resources */
332 	wd_backend->info->dump_watchdog_iface->disable(
333 		wd_backend->info->dump_watchdog_iface->timer);
334 
335 	kbasep_hwcnt_backend_jm_watchdog_term_partial(wd_backend, HWCNT_JM_WD_INIT_END);
336 }
337 
338 /* Job manager watchdog backend, implementation of kbase_hwcnt_backend_init_fn */
kbasep_hwcnt_backend_jm_watchdog_init(const struct kbase_hwcnt_backend_info * info,struct kbase_hwcnt_backend ** out_backend)339 static int kbasep_hwcnt_backend_jm_watchdog_init(const struct kbase_hwcnt_backend_info *info,
340 						 struct kbase_hwcnt_backend **out_backend)
341 {
342 	int errcode = 0;
343 	struct kbase_hwcnt_backend_jm_watchdog *wd_backend = NULL;
344 	struct kbase_hwcnt_backend_jm_watchdog_info *const wd_info = (void *)info;
345 	const struct kbase_hwcnt_backend_info *jm_info;
346 	const struct kbase_hwcnt_metadata *metadata;
347 	enum wd_init_state state = HWCNT_JM_WD_INIT_START;
348 
349 	if (WARN_ON(!info) || WARN_ON(!out_backend))
350 		return -EINVAL;
351 
352 	jm_info = wd_info->jm_backend_iface->info;
353 	metadata = wd_info->jm_backend_iface->metadata(wd_info->jm_backend_iface->info);
354 
355 	wd_backend = kmalloc(sizeof(*wd_backend), GFP_KERNEL);
356 	if (!wd_backend) {
357 		*out_backend = NULL;
358 		return -ENOMEM;
359 	}
360 
361 	*wd_backend = (struct kbase_hwcnt_backend_jm_watchdog){
362 		.info = wd_info,
363 		.timeout_ms = hwcnt_backend_watchdog_timer_interval_ms,
364 		.locked = { .state = HWCNT_JM_WD_IDLE_BUFFER_EMPTY, .is_enabled = false }
365 	};
366 
367 	while (state < HWCNT_JM_WD_INIT_END && !errcode) {
368 		switch (state) {
369 		case HWCNT_JM_WD_INIT_BACKEND:
370 			errcode = wd_info->jm_backend_iface->init(jm_info, &wd_backend->jm_backend);
371 			break;
372 		case HWCNT_JM_WD_INIT_ENABLE_MAP:
373 			errcode =
374 				kbase_hwcnt_enable_map_alloc(metadata, &wd_backend->wd_enable_map);
375 			break;
376 		case HWCNT_JM_WD_INIT_DUMP_BUFFER:
377 			errcode = kbase_hwcnt_dump_buffer_alloc(metadata,
378 								&wd_backend->wd_dump_buffer);
379 			break;
380 		case HWCNT_JM_WD_INIT_END:
381 			break;
382 		}
383 		if (!errcode)
384 			state++;
385 	}
386 
387 	if (errcode) {
388 		kbasep_hwcnt_backend_jm_watchdog_term_partial(wd_backend, state);
389 		*out_backend = NULL;
390 		return errcode;
391 	}
392 
393 	WARN_ON(state != HWCNT_JM_WD_INIT_END);
394 
395 	spin_lock_init(&wd_backend->locked.watchdog_lock);
396 	init_completion(&wd_backend->watchdog_complete);
397 
398 	*out_backend = (struct kbase_hwcnt_backend *)wd_backend;
399 	return 0;
400 }
401 
402 /* Job manager watchdog backend, implementation of timestamp_ns */
kbasep_hwcnt_backend_jm_watchdog_timestamp_ns(struct kbase_hwcnt_backend * backend)403 static u64 kbasep_hwcnt_backend_jm_watchdog_timestamp_ns(struct kbase_hwcnt_backend *backend)
404 {
405 	struct kbase_hwcnt_backend_jm_watchdog *const wd_backend = (void *)backend;
406 
407 	return wd_backend->info->jm_backend_iface->timestamp_ns(wd_backend->jm_backend);
408 }
409 
kbasep_hwcnt_backend_jm_watchdog_dump_enable_common(struct kbase_hwcnt_backend_jm_watchdog * wd_backend,const struct kbase_hwcnt_enable_map * enable_map,kbase_hwcnt_backend_dump_enable_fn enabler)410 static int kbasep_hwcnt_backend_jm_watchdog_dump_enable_common(
411 	struct kbase_hwcnt_backend_jm_watchdog *wd_backend,
412 	const struct kbase_hwcnt_enable_map *enable_map, kbase_hwcnt_backend_dump_enable_fn enabler)
413 {
414 	int errcode = -EPERM;
415 	unsigned long flags;
416 
417 	if (WARN_ON(!wd_backend) || WARN_ON(!enable_map))
418 		return -EINVAL;
419 
420 	spin_lock_irqsave(&wd_backend->locked.watchdog_lock, flags);
421 
422 	/* If the backend is already enabled return an error */
423 	if (wd_backend->locked.is_enabled) {
424 		spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);
425 		return -EPERM;
426 	}
427 
428 	spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);
429 
430 	/*We copy the enable map into our watchdog backend copy, for future usage*/
431 	kbase_hwcnt_enable_map_copy(&wd_backend->wd_enable_map, enable_map);
432 
433 	errcode = enabler(wd_backend->jm_backend, enable_map);
434 	if (!errcode) {
435 		/*Enable dump watchdog*/
436 		errcode = wd_backend->info->dump_watchdog_iface->enable(
437 			wd_backend->info->dump_watchdog_iface->timer, wd_backend->timeout_ms,
438 			kbasep_hwcnt_backend_jm_watchdog_timer_callback, wd_backend);
439 		if (!errcode) {
440 			spin_lock_irqsave(&wd_backend->locked.watchdog_lock, flags);
441 			WARN_ON(wd_backend->locked.is_enabled);
442 			wd_backend->locked.is_enabled = true;
443 			spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);
444 		} else
445 			/*Reverting the job manager backend back to disabled*/
446 			wd_backend->info->jm_backend_iface->dump_disable(wd_backend->jm_backend);
447 	}
448 
449 	return errcode;
450 }
451 
452 /* Job manager watchdog backend, implementation of dump_enable */
453 static int
kbasep_hwcnt_backend_jm_watchdog_dump_enable(struct kbase_hwcnt_backend * backend,const struct kbase_hwcnt_enable_map * enable_map)454 kbasep_hwcnt_backend_jm_watchdog_dump_enable(struct kbase_hwcnt_backend *backend,
455 					     const struct kbase_hwcnt_enable_map *enable_map)
456 {
457 	struct kbase_hwcnt_backend_jm_watchdog *const wd_backend = (void *)backend;
458 
459 	return kbasep_hwcnt_backend_jm_watchdog_dump_enable_common(
460 		wd_backend, enable_map, wd_backend->info->jm_backend_iface->dump_enable);
461 }
462 
463 /* Job manager watchdog backend, implementation of dump_enable_nolock */
464 static int
kbasep_hwcnt_backend_jm_watchdog_dump_enable_nolock(struct kbase_hwcnt_backend * backend,const struct kbase_hwcnt_enable_map * enable_map)465 kbasep_hwcnt_backend_jm_watchdog_dump_enable_nolock(struct kbase_hwcnt_backend *backend,
466 						    const struct kbase_hwcnt_enable_map *enable_map)
467 {
468 	struct kbase_hwcnt_backend_jm_watchdog *const wd_backend = (void *)backend;
469 
470 	return kbasep_hwcnt_backend_jm_watchdog_dump_enable_common(
471 		wd_backend, enable_map, wd_backend->info->jm_backend_iface->dump_enable_nolock);
472 }
473 
474 /* Job manager watchdog backend, implementation of dump_disable */
kbasep_hwcnt_backend_jm_watchdog_dump_disable(struct kbase_hwcnt_backend * backend)475 static void kbasep_hwcnt_backend_jm_watchdog_dump_disable(struct kbase_hwcnt_backend *backend)
476 {
477 	struct kbase_hwcnt_backend_jm_watchdog *const wd_backend = (void *)backend;
478 	unsigned long flags;
479 
480 	if (WARN_ON(!backend))
481 		return;
482 
483 	spin_lock_irqsave(&wd_backend->locked.watchdog_lock, flags);
484 	if (!wd_backend->locked.is_enabled) {
485 		spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);
486 		return;
487 	}
488 
489 	wd_backend->locked.is_enabled = false;
490 
491 	/* Discard undumped counter values since the last dump_get. */
492 	if (wd_backend->locked.state == HWCNT_JM_WD_IDLE_BUFFER_FULL)
493 		wd_backend->locked.state = HWCNT_JM_WD_IDLE_BUFFER_EMPTY;
494 
495 	spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);
496 
497 	wd_backend->info->dump_watchdog_iface->disable(
498 		wd_backend->info->dump_watchdog_iface->timer);
499 
500 	wd_backend->info->jm_backend_iface->dump_disable(wd_backend->jm_backend);
501 }
502 
503 /* Job manager watchdog backend, implementation of dump_clear */
kbasep_hwcnt_backend_jm_watchdog_dump_clear(struct kbase_hwcnt_backend * backend)504 static int kbasep_hwcnt_backend_jm_watchdog_dump_clear(struct kbase_hwcnt_backend *backend)
505 {
506 	int errcode = -EPERM;
507 	bool clear_wd_wait_completion = false;
508 	unsigned long flags;
509 	struct kbase_hwcnt_backend_jm_watchdog *const wd_backend = (void *)backend;
510 
511 	if (WARN_ON(!backend))
512 		return -EINVAL;
513 
514 	spin_lock_irqsave(&wd_backend->locked.watchdog_lock, flags);
515 	if (!wd_backend->locked.is_enabled) {
516 		spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);
517 		return -EPERM;
518 	}
519 
520 	switch (wd_backend->locked.state) {
521 	case HWCNT_JM_WD_IDLE_BUFFER_FULL:
522 	case HWCNT_JM_WD_USER_DUMPING_BUFFER_FULL:
523 	case HWCNT_JM_WD_IDLE_BUFFER_EMPTY:
524 	case HWCNT_JM_WD_USER_DUMPING_BUFFER_EMPTY:
525 		wd_backend->locked.state = HWCNT_JM_WD_BUFFER_CLEARING;
526 		errcode = 0;
527 		break;
528 	case HWCNT_JM_WD_TIMER_DUMPING:
529 		/* The timer asked for a dump request, when complete, the job manager backend
530 		 * buffer will be zero
531 		 */
532 		clear_wd_wait_completion = true;
533 		/* This thread will have to wait for the callback to terminate and then call a
534 		 * dump_clear on the job manager backend. We change the state to
535 		 * HWCNT_JM_WD_TIMER_DUMPING_USER_CLEAR to notify the callback thread there is
536 		 * no more need to dump the buffer (since we will clear it right after anyway).
537 		 * We set up a wait queue to synchronize with the callback.
538 		 */
539 		reinit_completion(&wd_backend->watchdog_complete);
540 		wd_backend->locked.state = HWCNT_JM_WD_TIMER_DUMPING_USER_CLEAR;
541 		errcode = 0;
542 		break;
543 	default:
544 		errcode = -EPERM;
545 		break;
546 	}
547 	spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);
548 
549 	if (!errcode) {
550 		if (clear_wd_wait_completion) {
551 			/* Waiting for the callback to finish */
552 			wait_for_completion(&wd_backend->watchdog_complete);
553 		}
554 
555 		/* Clearing job manager backend buffer */
556 		errcode = wd_backend->info->jm_backend_iface->dump_clear(wd_backend->jm_backend);
557 
558 		spin_lock_irqsave(&wd_backend->locked.watchdog_lock, flags);
559 
560 		WARN_ON(wd_backend->locked.state != HWCNT_JM_WD_TIMER_DUMPING_USER_CLEAR &&
561 			wd_backend->locked.state != HWCNT_JM_WD_BUFFER_CLEARING &&
562 			wd_backend->locked.state != HWCNT_JM_WD_ERROR);
563 
564 		WARN_ON(!wd_backend->locked.is_enabled);
565 
566 		if (!errcode && wd_backend->locked.state != HWCNT_JM_WD_ERROR) {
567 			/* Setting the internal buffer state to EMPTY */
568 			wd_backend->locked.state = HWCNT_JM_WD_IDLE_BUFFER_EMPTY;
569 			/* Resetting the timer. Calling modify on a disabled timer
570 			 * enables it.
571 			 */
572 			wd_backend->info->dump_watchdog_iface->modify(
573 				wd_backend->info->dump_watchdog_iface->timer,
574 				wd_backend->timeout_ms);
575 		} else {
576 			wd_backend->locked.state = HWCNT_JM_WD_ERROR;
577 			errcode = -EPERM;
578 		}
579 
580 		spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);
581 	}
582 
583 	return errcode;
584 }
585 
586 /* Job manager watchdog backend, implementation of dump_request */
kbasep_hwcnt_backend_jm_watchdog_dump_request(struct kbase_hwcnt_backend * backend,u64 * dump_time_ns)587 static int kbasep_hwcnt_backend_jm_watchdog_dump_request(struct kbase_hwcnt_backend *backend,
588 							 u64 *dump_time_ns)
589 {
590 	bool call_dump_request = false;
591 	int errcode = 0;
592 	unsigned long flags;
593 	struct kbase_hwcnt_backend_jm_watchdog *const wd_backend = (void *)backend;
594 
595 	if (WARN_ON(!backend) || WARN_ON(!dump_time_ns))
596 		return -EINVAL;
597 
598 	spin_lock_irqsave(&wd_backend->locked.watchdog_lock, flags);
599 
600 	if (!wd_backend->locked.is_enabled) {
601 		spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);
602 		return -EPERM;
603 	}
604 
605 	switch (wd_backend->locked.state) {
606 	case HWCNT_JM_WD_IDLE_BUFFER_EMPTY:
607 		/* progressing the state to avoid callbacks running while calling the job manager
608 		 * backend
609 		 */
610 		wd_backend->locked.state = HWCNT_JM_WD_USER_DUMPING_BUFFER_EMPTY;
611 		call_dump_request = true;
612 		break;
613 	case HWCNT_JM_WD_IDLE_BUFFER_FULL:
614 		wd_backend->locked.state = HWCNT_JM_WD_USER_DUMPING_BUFFER_FULL;
615 		call_dump_request = true;
616 		break;
617 	case HWCNT_JM_WD_TIMER_DUMPING:
618 		/* Retrieve timing information from previous dump_request */
619 		*dump_time_ns = wd_backend->wd_dump_timestamp;
620 		/* On the next client call (dump_wait) the thread will have to wait for the
621 		 * callback to finish the dumping.
622 		 * We set up a wait queue to synchronize with the callback.
623 		 */
624 		reinit_completion(&wd_backend->watchdog_complete);
625 		wd_backend->locked.state = HWCNT_JM_WD_TIMER_DUMPING_USER_REQUESTED;
626 		break;
627 	default:
628 		errcode = -EPERM;
629 		break;
630 	}
631 	spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);
632 
633 	if (call_dump_request) {
634 		errcode = wd_backend->info->jm_backend_iface->dump_request(wd_backend->jm_backend,
635 									   dump_time_ns);
636 		if (!errcode) {
637 			/*resetting the timer. Calling modify on a disabled timer enables it*/
638 			wd_backend->info->dump_watchdog_iface->modify(
639 				wd_backend->info->dump_watchdog_iface->timer,
640 				wd_backend->timeout_ms);
641 		} else {
642 			spin_lock_irqsave(&wd_backend->locked.watchdog_lock, flags);
643 			WARN_ON(!wd_backend->locked.is_enabled);
644 			wd_backend->locked.state = HWCNT_JM_WD_ERROR;
645 			spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);
646 		}
647 	}
648 
649 	return errcode;
650 }
651 
652 /* Job manager watchdog backend, implementation of dump_wait */
kbasep_hwcnt_backend_jm_watchdog_dump_wait(struct kbase_hwcnt_backend * backend)653 static int kbasep_hwcnt_backend_jm_watchdog_dump_wait(struct kbase_hwcnt_backend *backend)
654 {
655 	int errcode = -EPERM;
656 	bool wait_for_auto_dump = false, wait_for_user_dump = false;
657 	struct kbase_hwcnt_backend_jm_watchdog *const wd_backend = (void *)backend;
658 	unsigned long flags;
659 
660 	if (WARN_ON(!backend))
661 		return -EINVAL;
662 
663 	spin_lock_irqsave(&wd_backend->locked.watchdog_lock, flags);
664 	if (!wd_backend->locked.is_enabled) {
665 		spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);
666 		return -EPERM;
667 	}
668 
669 	switch (wd_backend->locked.state) {
670 	case HWCNT_JM_WD_TIMER_DUMPING_USER_REQUESTED:
671 		wait_for_auto_dump = true;
672 		errcode = 0;
673 		break;
674 	case HWCNT_JM_WD_USER_DUMPING_BUFFER_EMPTY:
675 	case HWCNT_JM_WD_USER_DUMPING_BUFFER_FULL:
676 		wait_for_user_dump = true;
677 		errcode = 0;
678 		break;
679 	default:
680 		errcode = -EPERM;
681 		break;
682 	}
683 	spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);
684 
685 	if (wait_for_auto_dump)
686 		wait_for_completion(&wd_backend->watchdog_complete);
687 	else if (wait_for_user_dump) {
688 		errcode = wd_backend->info->jm_backend_iface->dump_wait(wd_backend->jm_backend);
689 		if (errcode) {
690 			spin_lock_irqsave(&wd_backend->locked.watchdog_lock, flags);
691 			WARN_ON(!wd_backend->locked.is_enabled);
692 			wd_backend->locked.state = HWCNT_JM_WD_ERROR;
693 			spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);
694 		}
695 	}
696 
697 	return errcode;
698 }
699 
700 /* Job manager watchdog backend, implementation of dump_get */
kbasep_hwcnt_backend_jm_watchdog_dump_get(struct kbase_hwcnt_backend * backend,struct kbase_hwcnt_dump_buffer * dump_buffer,const struct kbase_hwcnt_enable_map * enable_map,bool accumulate)701 static int kbasep_hwcnt_backend_jm_watchdog_dump_get(
702 	struct kbase_hwcnt_backend *backend, struct kbase_hwcnt_dump_buffer *dump_buffer,
703 	const struct kbase_hwcnt_enable_map *enable_map, bool accumulate)
704 {
705 	bool call_dump_get = false;
706 	struct kbase_hwcnt_backend_jm_watchdog *const wd_backend = (void *)backend;
707 	unsigned long flags;
708 	int errcode = 0;
709 
710 	if (WARN_ON(!backend) || WARN_ON(!dump_buffer) || WARN_ON(!enable_map))
711 		return -EINVAL;
712 
713 	/* The resultant contents of the dump buffer are only well defined if a prior
714 	 * call to dump_wait returned successfully, and a new dump has not yet been
715 	 * requested by a call to dump_request.
716 	 */
717 
718 	spin_lock_irqsave(&wd_backend->locked.watchdog_lock, flags);
719 
720 	switch (wd_backend->locked.state) {
721 	case HWCNT_JM_WD_TIMER_DUMPING_USER_REQUESTED:
722 		/*we assume dump_wait has been called and completed successfully*/
723 		if (accumulate)
724 			kbase_hwcnt_dump_buffer_accumulate(dump_buffer, &wd_backend->wd_dump_buffer,
725 							   enable_map);
726 		else
727 			kbase_hwcnt_dump_buffer_copy(dump_buffer, &wd_backend->wd_dump_buffer,
728 						     enable_map);
729 
730 		/*use state to indicate the the buffer is now empty*/
731 		wd_backend->locked.state = HWCNT_JM_WD_IDLE_BUFFER_EMPTY;
732 		break;
733 	case HWCNT_JM_WD_USER_DUMPING_BUFFER_FULL:
734 		/*accumulate or copy watchdog data to user buffer first so that dump_get can set
735 		 * the header correctly
736 		 */
737 		if (accumulate)
738 			kbase_hwcnt_dump_buffer_accumulate(dump_buffer, &wd_backend->wd_dump_buffer,
739 							   enable_map);
740 		else
741 			kbase_hwcnt_dump_buffer_copy(dump_buffer, &wd_backend->wd_dump_buffer,
742 						     enable_map);
743 
744 		/*accumulate backend data into user buffer on top of watchdog data*/
745 		accumulate = true;
746 		call_dump_get = true;
747 		break;
748 	case HWCNT_JM_WD_USER_DUMPING_BUFFER_EMPTY:
749 		call_dump_get = true;
750 		break;
751 	default:
752 		errcode = -EPERM;
753 		break;
754 	}
755 
756 	spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);
757 
758 	if (call_dump_get && !errcode) {
759 		/*we just dump the job manager backend into the user buffer, following
760 		 *accumulate flag
761 		 */
762 		errcode = wd_backend->info->jm_backend_iface->dump_get(
763 			wd_backend->jm_backend, dump_buffer, enable_map, accumulate);
764 
765 		spin_lock_irqsave(&wd_backend->locked.watchdog_lock, flags);
766 
767 		WARN_ON(wd_backend->locked.state != HWCNT_JM_WD_USER_DUMPING_BUFFER_EMPTY &&
768 			wd_backend->locked.state != HWCNT_JM_WD_USER_DUMPING_BUFFER_FULL &&
769 			wd_backend->locked.state != HWCNT_JM_WD_TIMER_DUMPING_USER_REQUESTED);
770 
771 		if (!errcode)
772 			wd_backend->locked.state = HWCNT_JM_WD_IDLE_BUFFER_EMPTY;
773 		else
774 			wd_backend->locked.state = HWCNT_JM_WD_ERROR;
775 
776 		spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);
777 	}
778 
779 	return errcode;
780 }
781 
782 /* exposed methods */
783 
kbase_hwcnt_backend_jm_watchdog_create(struct kbase_hwcnt_backend_interface * backend_iface,struct kbase_hwcnt_watchdog_interface * watchdog_iface,struct kbase_hwcnt_backend_interface * out_iface)784 int kbase_hwcnt_backend_jm_watchdog_create(struct kbase_hwcnt_backend_interface *backend_iface,
785 					   struct kbase_hwcnt_watchdog_interface *watchdog_iface,
786 					   struct kbase_hwcnt_backend_interface *out_iface)
787 {
788 	struct kbase_hwcnt_backend_jm_watchdog_info *info = NULL;
789 
790 	if (WARN_ON(!backend_iface) || WARN_ON(!watchdog_iface) || WARN_ON(!out_iface))
791 		return -EINVAL;
792 
793 	info = kbasep_hwcnt_backend_jm_watchdog_info_create(backend_iface, watchdog_iface);
794 	if (!info)
795 		return -ENOMEM;
796 
797 	/*linking the info table with the output iface, to allow the callbacks below to access the
798 	 *info object later on
799 	 */
800 	*out_iface = (struct kbase_hwcnt_backend_interface){
801 		.info = (void *)info,
802 		.metadata = kbasep_hwcnt_backend_jm_watchdog_metadata,
803 		.init = kbasep_hwcnt_backend_jm_watchdog_init,
804 		.term = kbasep_hwcnt_backend_jm_watchdog_term,
805 		.timestamp_ns = kbasep_hwcnt_backend_jm_watchdog_timestamp_ns,
806 		.dump_enable = kbasep_hwcnt_backend_jm_watchdog_dump_enable,
807 		.dump_enable_nolock = kbasep_hwcnt_backend_jm_watchdog_dump_enable_nolock,
808 		.dump_disable = kbasep_hwcnt_backend_jm_watchdog_dump_disable,
809 		.dump_clear = kbasep_hwcnt_backend_jm_watchdog_dump_clear,
810 		.dump_request = kbasep_hwcnt_backend_jm_watchdog_dump_request,
811 		.dump_wait = kbasep_hwcnt_backend_jm_watchdog_dump_wait,
812 		.dump_get = kbasep_hwcnt_backend_jm_watchdog_dump_get
813 	};
814 
815 	/*registering watchdog backend module methods on the output interface*/
816 
817 	return 0;
818 }
819 
kbase_hwcnt_backend_jm_watchdog_destroy(struct kbase_hwcnt_backend_interface * iface)820 void kbase_hwcnt_backend_jm_watchdog_destroy(struct kbase_hwcnt_backend_interface *iface)
821 {
822 	if (!iface || !iface->info)
823 		return;
824 
825 	kfree((struct kbase_hwcnt_backend_jm_watchdog_info *)iface->info);
826 
827 	/*blanking the watchdog backend interface*/
828 	memset(iface, 0, sizeof(*iface));
829 }
830