1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3 *
4 * (C) COPYRIGHT 2019-2022 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 /**
23 * DOC: Mali arbiter power manager state machine and APIs
24 */
25
26 #include <mali_kbase.h>
27 #include <mali_kbase_pm.h>
28 #include <backend/gpu/mali_kbase_irq_internal.h>
29 #include <backend/gpu/mali_kbase_pm_internal.h>
30 #include <tl/mali_kbase_tracepoints.h>
31 #include <mali_kbase_gpuprops.h>
32
33 /* A dmesg warning will occur if the GPU is not granted
34 * after the following time (in milliseconds) has ellapsed.
35 */
36 #define GPU_REQUEST_TIMEOUT 1000
37 #define KHZ_TO_HZ 1000
38
39 #define MAX_L2_SLICES_MASK 0xFF
40
41 /* Maximum time in ms, before deferring probe incase
42 * GPU_GRANTED message is not received
43 */
44 static int gpu_req_timeout = 1;
45 module_param(gpu_req_timeout, int, 0644);
46 MODULE_PARM_DESC(gpu_req_timeout,
47 "On a virtualized platform, if the GPU is not granted within this time(ms) kbase will defer the probe");
48
49 static void kbase_arbiter_pm_vm_wait_gpu_assignment(struct kbase_device *kbdev);
50 static inline bool kbase_arbiter_pm_vm_gpu_assigned_lockheld(
51 struct kbase_device *kbdev);
52
53 /**
54 * kbase_arbiter_pm_vm_state_str() - Helper function to get string
55 * for kbase VM state.(debug)
56 * @state: kbase VM state
57 *
58 * Return: string representation of Kbase_vm_state
59 */
kbase_arbiter_pm_vm_state_str(enum kbase_vm_state state)60 static inline const char *kbase_arbiter_pm_vm_state_str(
61 enum kbase_vm_state state)
62 {
63 switch (state) {
64 case KBASE_VM_STATE_INITIALIZING:
65 return "KBASE_VM_STATE_INITIALIZING";
66 case KBASE_VM_STATE_INITIALIZING_WITH_GPU:
67 return "KBASE_VM_STATE_INITIALIZING_WITH_GPU";
68 case KBASE_VM_STATE_SUSPENDED:
69 return "KBASE_VM_STATE_SUSPENDED";
70 case KBASE_VM_STATE_STOPPED:
71 return "KBASE_VM_STATE_STOPPED";
72 case KBASE_VM_STATE_STOPPED_GPU_REQUESTED:
73 return "KBASE_VM_STATE_STOPPED_GPU_REQUESTED";
74 case KBASE_VM_STATE_STARTING:
75 return "KBASE_VM_STATE_STARTING";
76 case KBASE_VM_STATE_IDLE:
77 return "KBASE_VM_STATE_IDLE";
78 case KBASE_VM_STATE_ACTIVE:
79 return "KBASE_VM_STATE_ACTIVE";
80 case KBASE_VM_STATE_STOPPING_IDLE:
81 return "KBASE_VM_STATE_STOPPING_IDLE";
82 case KBASE_VM_STATE_STOPPING_ACTIVE:
83 return "KBASE_VM_STATE_STOPPING_ACTIVE";
84 case KBASE_VM_STATE_SUSPEND_PENDING:
85 return "KBASE_VM_STATE_SUSPEND_PENDING";
86 case KBASE_VM_STATE_SUSPEND_WAIT_FOR_GRANT:
87 return "KBASE_VM_STATE_SUSPEND_WAIT_FOR_GRANT";
88 default:
89 KBASE_DEBUG_ASSERT(false);
90 return "[UnknownState]";
91 }
92 }
93
94 /**
95 * kbase_arbiter_pm_vm_event_str() - Helper function to get string
96 * for kbase VM event.(debug)
97 * @evt: kbase VM state
98 *
99 * Return: String representation of Kbase_arbif_event
100 */
kbase_arbiter_pm_vm_event_str(enum kbase_arbif_evt evt)101 static inline const char *kbase_arbiter_pm_vm_event_str(
102 enum kbase_arbif_evt evt)
103 {
104 switch (evt) {
105 case KBASE_VM_GPU_INITIALIZED_EVT:
106 return "KBASE_VM_GPU_INITIALIZED_EVT";
107 case KBASE_VM_GPU_STOP_EVT:
108 return "KBASE_VM_GPU_STOP_EVT";
109 case KBASE_VM_GPU_GRANTED_EVT:
110 return "KBASE_VM_GPU_GRANTED_EVT";
111 case KBASE_VM_GPU_LOST_EVT:
112 return "KBASE_VM_GPU_LOST_EVT";
113 case KBASE_VM_OS_SUSPEND_EVENT:
114 return "KBASE_VM_OS_SUSPEND_EVENT";
115 case KBASE_VM_OS_RESUME_EVENT:
116 return "KBASE_VM_OS_RESUME_EVENT";
117 case KBASE_VM_GPU_IDLE_EVENT:
118 return "KBASE_VM_GPU_IDLE_EVENT";
119 case KBASE_VM_REF_EVENT:
120 return "KBASE_VM_REF_EVENT";
121 default:
122 KBASE_DEBUG_ASSERT(false);
123 return "[UnknownEvent]";
124 }
125 }
126
127 /**
128 * kbase_arbiter_pm_vm_set_state() - Sets new kbase_arbiter_vm_state
129 * @kbdev: The kbase device structure for the device (must be a valid pointer)
130 * @new_state: kbase VM new state
131 *
132 * This function sets the new state for the VM
133 */
kbase_arbiter_pm_vm_set_state(struct kbase_device * kbdev,enum kbase_vm_state new_state)134 static void kbase_arbiter_pm_vm_set_state(struct kbase_device *kbdev,
135 enum kbase_vm_state new_state)
136 {
137 struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
138
139 dev_dbg(kbdev->dev, "VM set_state %s -> %s",
140 kbase_arbiter_pm_vm_state_str(arb_vm_state->vm_state),
141 kbase_arbiter_pm_vm_state_str(new_state));
142
143 lockdep_assert_held(&arb_vm_state->vm_state_lock);
144 arb_vm_state->vm_state = new_state;
145 if (new_state != KBASE_VM_STATE_INITIALIZING_WITH_GPU &&
146 new_state != KBASE_VM_STATE_INITIALIZING)
147 KBASE_KTRACE_ADD(kbdev, ARB_VM_STATE, NULL, new_state);
148 wake_up(&arb_vm_state->vm_state_wait);
149 }
150
151 /**
152 * kbase_arbiter_pm_suspend_wq() - suspend work queue of the driver.
153 * @data: work queue
154 *
155 * Suspends work queue of the driver, when VM is in SUSPEND_PENDING or
156 * STOPPING_IDLE or STOPPING_ACTIVE state
157 */
kbase_arbiter_pm_suspend_wq(struct work_struct * data)158 static void kbase_arbiter_pm_suspend_wq(struct work_struct *data)
159 {
160 struct kbase_arbiter_vm_state *arb_vm_state = container_of(data,
161 struct kbase_arbiter_vm_state,
162 vm_suspend_work);
163 struct kbase_device *kbdev = arb_vm_state->kbdev;
164
165 mutex_lock(&arb_vm_state->vm_state_lock);
166 dev_dbg(kbdev->dev, ">%s\n", __func__);
167 if (arb_vm_state->vm_state == KBASE_VM_STATE_STOPPING_IDLE ||
168 arb_vm_state->vm_state ==
169 KBASE_VM_STATE_STOPPING_ACTIVE ||
170 arb_vm_state->vm_state ==
171 KBASE_VM_STATE_SUSPEND_PENDING) {
172 mutex_unlock(&arb_vm_state->vm_state_lock);
173 dev_dbg(kbdev->dev, ">kbase_pm_driver_suspend\n");
174 kbase_pm_driver_suspend(kbdev);
175 dev_dbg(kbdev->dev, "<kbase_pm_driver_suspend\n");
176 mutex_lock(&arb_vm_state->vm_state_lock);
177 }
178 mutex_unlock(&arb_vm_state->vm_state_lock);
179 dev_dbg(kbdev->dev, "<%s\n", __func__);
180 }
181
182 /**
183 * kbase_arbiter_pm_resume_wq() -Kbase resume work queue.
184 * @data: work item
185 *
186 * Resume work queue of the driver when VM is in STARTING state,
187 * else if its in STOPPING_ACTIVE will request a stop event.
188 */
kbase_arbiter_pm_resume_wq(struct work_struct * data)189 static void kbase_arbiter_pm_resume_wq(struct work_struct *data)
190 {
191 struct kbase_arbiter_vm_state *arb_vm_state = container_of(data,
192 struct kbase_arbiter_vm_state,
193 vm_resume_work);
194 struct kbase_device *kbdev = arb_vm_state->kbdev;
195
196 mutex_lock(&arb_vm_state->vm_state_lock);
197 dev_dbg(kbdev->dev, ">%s\n", __func__);
198 arb_vm_state->vm_arb_starting = true;
199 if (arb_vm_state->vm_state == KBASE_VM_STATE_STARTING) {
200 mutex_unlock(&arb_vm_state->vm_state_lock);
201 dev_dbg(kbdev->dev, ">kbase_pm_driver_resume\n");
202 kbase_pm_driver_resume(kbdev, true);
203 dev_dbg(kbdev->dev, "<kbase_pm_driver_resume\n");
204 mutex_lock(&arb_vm_state->vm_state_lock);
205 } else if (arb_vm_state->vm_state == KBASE_VM_STATE_STOPPING_ACTIVE) {
206 kbase_arbiter_pm_vm_stopped(kbdev);
207 }
208 arb_vm_state->vm_arb_starting = false;
209 mutex_unlock(&arb_vm_state->vm_state_lock);
210 KBASE_TLSTREAM_TL_ARBITER_STARTED(kbdev, kbdev);
211 dev_dbg(kbdev->dev, "<%s\n", __func__);
212 }
213
214 /**
215 * request_timer_callback() - Issue warning on request timer expiration
216 * @timer: Request hr timer data
217 *
218 * Called when the Arbiter takes too long to grant the GPU after a
219 * request has been made. Issues a warning in dmesg.
220 *
221 * Return: Always returns HRTIMER_NORESTART
222 */
request_timer_callback(struct hrtimer * timer)223 static enum hrtimer_restart request_timer_callback(struct hrtimer *timer)
224 {
225 struct kbase_arbiter_vm_state *arb_vm_state = container_of(timer,
226 struct kbase_arbiter_vm_state, vm_request_timer);
227
228 KBASE_DEBUG_ASSERT(arb_vm_state);
229 KBASE_DEBUG_ASSERT(arb_vm_state->kbdev);
230
231 dev_warn(arb_vm_state->kbdev->dev,
232 "Still waiting for GPU to be granted from Arbiter after %d ms\n",
233 GPU_REQUEST_TIMEOUT);
234 return HRTIMER_NORESTART;
235 }
236
237 /**
238 * start_request_timer() - Start a timer after requesting GPU
239 * @kbdev: The kbase device structure for the device (must be a valid pointer)
240 *
241 * Start a timer to track when kbase is waiting for the GPU from the
242 * Arbiter. If the timer expires before GPU is granted, a warning in
243 * dmesg will be issued.
244 */
start_request_timer(struct kbase_device * kbdev)245 static void start_request_timer(struct kbase_device *kbdev)
246 {
247 struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
248
249 hrtimer_start(&arb_vm_state->vm_request_timer,
250 HR_TIMER_DELAY_MSEC(GPU_REQUEST_TIMEOUT),
251 HRTIMER_MODE_REL);
252 }
253
254 /**
255 * cancel_request_timer() - Stop the request timer
256 * @kbdev: The kbase device structure for the device (must be a valid pointer)
257 *
258 * Stops the request timer once GPU has been granted. Safe to call
259 * even if timer is no longer running.
260 */
cancel_request_timer(struct kbase_device * kbdev)261 static void cancel_request_timer(struct kbase_device *kbdev)
262 {
263 struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
264
265 hrtimer_cancel(&arb_vm_state->vm_request_timer);
266 }
267
268 /**
269 * kbase_arbiter_pm_early_init() - Initialize arbiter for VM
270 * Paravirtualized use.
271 * @kbdev: The kbase device structure for the device (must be a valid pointer)
272 *
273 * Initialize the arbiter and other required resources during the runtime
274 * and request the GPU for the VM for the first time.
275 *
276 * Return: 0 if success, or a Linux error code
277 */
kbase_arbiter_pm_early_init(struct kbase_device * kbdev)278 int kbase_arbiter_pm_early_init(struct kbase_device *kbdev)
279 {
280 int err;
281 struct kbase_arbiter_vm_state *arb_vm_state = NULL;
282
283 arb_vm_state = kmalloc(sizeof(struct kbase_arbiter_vm_state),
284 GFP_KERNEL);
285 if (arb_vm_state == NULL)
286 return -ENOMEM;
287
288 arb_vm_state->kbdev = kbdev;
289 arb_vm_state->vm_state = KBASE_VM_STATE_INITIALIZING;
290
291 mutex_init(&arb_vm_state->vm_state_lock);
292 init_waitqueue_head(&arb_vm_state->vm_state_wait);
293 arb_vm_state->vm_arb_wq = alloc_ordered_workqueue("kbase_vm_arb_wq",
294 WQ_HIGHPRI);
295 if (!arb_vm_state->vm_arb_wq) {
296 dev_err(kbdev->dev, "Failed to allocate vm_arb workqueue\n");
297 kfree(arb_vm_state);
298 return -ENOMEM;
299 }
300 INIT_WORK(&arb_vm_state->vm_suspend_work, kbase_arbiter_pm_suspend_wq);
301 INIT_WORK(&arb_vm_state->vm_resume_work, kbase_arbiter_pm_resume_wq);
302 arb_vm_state->vm_arb_starting = false;
303 atomic_set(&kbdev->pm.gpu_users_waiting, 0);
304 hrtimer_init(&arb_vm_state->vm_request_timer, CLOCK_MONOTONIC,
305 HRTIMER_MODE_REL);
306 arb_vm_state->vm_request_timer.function =
307 request_timer_callback;
308 kbdev->pm.arb_vm_state = arb_vm_state;
309
310 err = kbase_arbif_init(kbdev);
311 if (err) {
312 dev_err(kbdev->dev, "Failed to initialise arbif module\n");
313 goto arbif_init_fail;
314 }
315
316 if (kbdev->arb.arb_if) {
317 kbase_arbif_gpu_request(kbdev);
318 dev_dbg(kbdev->dev, "Waiting for initial GPU assignment...\n");
319
320 err = wait_event_timeout(arb_vm_state->vm_state_wait,
321 arb_vm_state->vm_state ==
322 KBASE_VM_STATE_INITIALIZING_WITH_GPU,
323 msecs_to_jiffies(gpu_req_timeout));
324
325 if (!err) {
326 dev_dbg(kbdev->dev,
327 "Kbase probe Deferred after waiting %d ms to receive GPU_GRANT\n",
328 gpu_req_timeout);
329
330 err = -ENODEV;
331 goto arbif_timeout;
332 }
333
334 dev_dbg(kbdev->dev,
335 "Waiting for initial GPU assignment - done\n");
336 }
337 return 0;
338
339 arbif_timeout:
340 kbase_arbiter_pm_early_term(kbdev);
341 return err;
342
343 arbif_init_fail:
344 destroy_workqueue(arb_vm_state->vm_arb_wq);
345 kfree(arb_vm_state);
346 kbdev->pm.arb_vm_state = NULL;
347 return err;
348 }
349
350 /**
351 * kbase_arbiter_pm_early_term() - Shutdown arbiter and free resources
352 * @kbdev: The kbase device structure for the device (must be a valid pointer)
353 *
354 * Clean up all the resources
355 */
kbase_arbiter_pm_early_term(struct kbase_device * kbdev)356 void kbase_arbiter_pm_early_term(struct kbase_device *kbdev)
357 {
358 struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
359
360 cancel_request_timer(kbdev);
361 mutex_lock(&arb_vm_state->vm_state_lock);
362 if (arb_vm_state->vm_state > KBASE_VM_STATE_STOPPED_GPU_REQUESTED) {
363 kbase_pm_set_gpu_lost(kbdev, false);
364 kbase_arbif_gpu_stopped(kbdev, false);
365 }
366 mutex_unlock(&arb_vm_state->vm_state_lock);
367 destroy_workqueue(arb_vm_state->vm_arb_wq);
368 kbase_arbif_destroy(kbdev);
369 arb_vm_state->vm_arb_wq = NULL;
370 kfree(kbdev->pm.arb_vm_state);
371 kbdev->pm.arb_vm_state = NULL;
372 }
373
374 /**
375 * kbase_arbiter_pm_release_interrupts() - Release the GPU interrupts
376 * @kbdev: The kbase device structure for the device (must be a valid pointer)
377 *
378 * Releases interrupts and set the interrupt flag to false
379 */
kbase_arbiter_pm_release_interrupts(struct kbase_device * kbdev)380 void kbase_arbiter_pm_release_interrupts(struct kbase_device *kbdev)
381 {
382 struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
383
384 mutex_lock(&arb_vm_state->vm_state_lock);
385 if (arb_vm_state->interrupts_installed == true) {
386 arb_vm_state->interrupts_installed = false;
387 kbase_release_interrupts(kbdev);
388 }
389 mutex_unlock(&arb_vm_state->vm_state_lock);
390 }
391
392 /**
393 * kbase_arbiter_pm_install_interrupts() - Install the GPU interrupts
394 * @kbdev: The kbase device structure for the device (must be a valid pointer)
395 *
396 * Install interrupts and set the interrupt_install flag to true.
397 *
398 * Return: 0 if success, or a Linux error code
399 */
kbase_arbiter_pm_install_interrupts(struct kbase_device * kbdev)400 int kbase_arbiter_pm_install_interrupts(struct kbase_device *kbdev)
401 {
402 struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
403 int err;
404
405 mutex_lock(&arb_vm_state->vm_state_lock);
406 arb_vm_state->interrupts_installed = true;
407 err = kbase_install_interrupts(kbdev);
408 mutex_unlock(&arb_vm_state->vm_state_lock);
409 return err;
410 }
411
412 /**
413 * kbase_arbiter_pm_vm_stopped() - Handle stop state for the VM
414 * @kbdev: The kbase device structure for the device (must be a valid pointer)
415 *
416 * Handles a stop state for the VM
417 */
kbase_arbiter_pm_vm_stopped(struct kbase_device * kbdev)418 void kbase_arbiter_pm_vm_stopped(struct kbase_device *kbdev)
419 {
420 bool request_gpu = false;
421 struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
422
423 lockdep_assert_held(&arb_vm_state->vm_state_lock);
424
425 if (atomic_read(&kbdev->pm.gpu_users_waiting) > 0 &&
426 arb_vm_state->vm_state == KBASE_VM_STATE_STOPPING_IDLE)
427 kbase_arbiter_pm_vm_set_state(kbdev,
428 KBASE_VM_STATE_STOPPING_ACTIVE);
429
430 dev_dbg(kbdev->dev, "%s %s\n", __func__,
431 kbase_arbiter_pm_vm_state_str(arb_vm_state->vm_state));
432
433 if (arb_vm_state->interrupts_installed) {
434 arb_vm_state->interrupts_installed = false;
435 kbase_release_interrupts(kbdev);
436 }
437
438 switch (arb_vm_state->vm_state) {
439 case KBASE_VM_STATE_STOPPING_ACTIVE:
440 request_gpu = true;
441 kbase_arbiter_pm_vm_set_state(kbdev,
442 KBASE_VM_STATE_STOPPED_GPU_REQUESTED);
443 break;
444 case KBASE_VM_STATE_STOPPING_IDLE:
445 kbase_arbiter_pm_vm_set_state(kbdev, KBASE_VM_STATE_STOPPED);
446 break;
447 case KBASE_VM_STATE_SUSPEND_PENDING:
448 kbase_arbiter_pm_vm_set_state(kbdev, KBASE_VM_STATE_SUSPENDED);
449 break;
450 default:
451 dev_warn(kbdev->dev, "unexpected pm_stop VM state %u",
452 arb_vm_state->vm_state);
453 break;
454 }
455
456 kbase_pm_set_gpu_lost(kbdev, false);
457 kbase_arbif_gpu_stopped(kbdev, request_gpu);
458 if (request_gpu)
459 start_request_timer(kbdev);
460 }
461
kbase_arbiter_set_max_config(struct kbase_device * kbdev,uint32_t max_l2_slices,uint32_t max_core_mask)462 void kbase_arbiter_set_max_config(struct kbase_device *kbdev,
463 uint32_t max_l2_slices,
464 uint32_t max_core_mask)
465 {
466 struct kbase_arbiter_vm_state *arb_vm_state;
467 struct max_config_props max_config;
468
469 if (!kbdev)
470 return;
471
472 /* Mask the max_l2_slices as it is stored as 8 bits into kbase */
473 max_config.l2_slices = max_l2_slices & MAX_L2_SLICES_MASK;
474 max_config.core_mask = max_core_mask;
475 arb_vm_state = kbdev->pm.arb_vm_state;
476
477 mutex_lock(&arb_vm_state->vm_state_lock);
478 /* Just set the max_props in kbase during initialization. */
479 if (arb_vm_state->vm_state == KBASE_VM_STATE_INITIALIZING)
480 kbase_gpuprops_set_max_config(kbdev, &max_config);
481 else
482 dev_dbg(kbdev->dev, "Unexpected max_config on VM state %s",
483 kbase_arbiter_pm_vm_state_str(arb_vm_state->vm_state));
484
485 mutex_unlock(&arb_vm_state->vm_state_lock);
486 }
487
kbase_arbiter_pm_gpu_assigned(struct kbase_device * kbdev)488 int kbase_arbiter_pm_gpu_assigned(struct kbase_device *kbdev)
489 {
490 struct kbase_arbiter_vm_state *arb_vm_state;
491 int result = -EINVAL;
492
493 if (!kbdev)
494 return result;
495
496 /* First check the GPU_LOST state */
497 kbase_pm_lock(kbdev);
498 if (kbase_pm_is_gpu_lost(kbdev)) {
499 kbase_pm_unlock(kbdev);
500 return 0;
501 }
502 kbase_pm_unlock(kbdev);
503
504 /* Then the arbitration state machine */
505 arb_vm_state = kbdev->pm.arb_vm_state;
506
507 mutex_lock(&arb_vm_state->vm_state_lock);
508 switch (arb_vm_state->vm_state) {
509 case KBASE_VM_STATE_INITIALIZING:
510 case KBASE_VM_STATE_SUSPENDED:
511 case KBASE_VM_STATE_STOPPED:
512 case KBASE_VM_STATE_STOPPED_GPU_REQUESTED:
513 case KBASE_VM_STATE_SUSPEND_WAIT_FOR_GRANT:
514 result = 0;
515 break;
516 default:
517 result = 1;
518 break;
519 }
520 mutex_unlock(&arb_vm_state->vm_state_lock);
521
522 return result;
523 }
524
525 /**
526 * kbase_arbiter_pm_vm_gpu_start() - Handles the start state of the VM
527 * @kbdev: The kbase device structure for the device (must be a valid pointer)
528 *
529 * Handles the start state of the VM
530 */
kbase_arbiter_pm_vm_gpu_start(struct kbase_device * kbdev)531 static void kbase_arbiter_pm_vm_gpu_start(struct kbase_device *kbdev)
532 {
533 struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
534 bool freq_updated = false;
535
536 lockdep_assert_held(&arb_vm_state->vm_state_lock);
537 mutex_lock(&kbdev->arb.arb_freq.arb_freq_lock);
538 if (kbdev->arb.arb_freq.freq_updated) {
539 kbdev->arb.arb_freq.freq_updated = false;
540 freq_updated = true;
541 }
542 mutex_unlock(&kbdev->arb.arb_freq.arb_freq_lock);
543
544 cancel_request_timer(kbdev);
545 switch (arb_vm_state->vm_state) {
546 case KBASE_VM_STATE_INITIALIZING:
547 kbase_arbiter_pm_vm_set_state(kbdev,
548 KBASE_VM_STATE_INITIALIZING_WITH_GPU);
549 break;
550 case KBASE_VM_STATE_STOPPED_GPU_REQUESTED:
551 kbase_arbiter_pm_vm_set_state(kbdev, KBASE_VM_STATE_STARTING);
552 arb_vm_state->interrupts_installed = true;
553 kbase_install_interrupts(kbdev);
554 /*
555 * GPU GRANTED received while in stop can be a result of a
556 * repartitioning.
557 */
558 kbase_gpuprops_req_curr_config_update(kbdev);
559 /* curr_config will be updated while resuming the PM. */
560 queue_work(arb_vm_state->vm_arb_wq,
561 &arb_vm_state->vm_resume_work);
562 break;
563 case KBASE_VM_STATE_SUSPEND_WAIT_FOR_GRANT:
564 kbase_pm_set_gpu_lost(kbdev, false);
565 kbase_arbif_gpu_stopped(kbdev, false);
566 kbase_arbiter_pm_vm_set_state(kbdev, KBASE_VM_STATE_SUSPENDED);
567 break;
568 default:
569 /*
570 * GPU_GRANTED can be received when there is a frequency update
571 * Only show a warning if received in an unexpected state
572 * without a frequency update
573 */
574 if (!freq_updated)
575 dev_warn(kbdev->dev,
576 "GPU_GRANTED when not expected - state %s\n",
577 kbase_arbiter_pm_vm_state_str(
578 arb_vm_state->vm_state));
579 break;
580 }
581 }
582
583 /**
584 * kbase_arbiter_pm_vm_gpu_stop() - Handles the stop state of the VM
585 * @kbdev: The kbase device structure for the device (must be a valid pointer)
586 *
587 * Handles the start state of the VM
588 */
kbase_arbiter_pm_vm_gpu_stop(struct kbase_device * kbdev)589 static void kbase_arbiter_pm_vm_gpu_stop(struct kbase_device *kbdev)
590 {
591 struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
592
593 lockdep_assert_held(&arb_vm_state->vm_state_lock);
594 if (arb_vm_state->vm_state == KBASE_VM_STATE_INITIALIZING_WITH_GPU) {
595 mutex_unlock(&arb_vm_state->vm_state_lock);
596 kbase_arbiter_pm_vm_wait_gpu_assignment(kbdev);
597 mutex_lock(&arb_vm_state->vm_state_lock);
598 }
599
600 switch (arb_vm_state->vm_state) {
601 case KBASE_VM_STATE_IDLE:
602 kbase_arbiter_pm_vm_set_state(kbdev,
603 KBASE_VM_STATE_STOPPING_IDLE);
604 queue_work(arb_vm_state->vm_arb_wq,
605 &arb_vm_state->vm_suspend_work);
606 break;
607 case KBASE_VM_STATE_ACTIVE:
608 kbase_arbiter_pm_vm_set_state(kbdev,
609 KBASE_VM_STATE_STOPPING_ACTIVE);
610 queue_work(arb_vm_state->vm_arb_wq,
611 &arb_vm_state->vm_suspend_work);
612 break;
613 case KBASE_VM_STATE_STARTING:
614 dev_dbg(kbdev->dev, "Got GPU_STOP event while STARTING.");
615 kbase_arbiter_pm_vm_set_state(kbdev,
616 KBASE_VM_STATE_STOPPING_ACTIVE);
617 if (arb_vm_state->vm_arb_starting)
618 queue_work(arb_vm_state->vm_arb_wq,
619 &arb_vm_state->vm_suspend_work);
620 break;
621 case KBASE_VM_STATE_SUSPEND_PENDING:
622 /* Suspend finishes with a stop so nothing else to do */
623 break;
624 default:
625 dev_warn(kbdev->dev, "GPU_STOP when not expected - state %s\n",
626 kbase_arbiter_pm_vm_state_str(arb_vm_state->vm_state));
627 break;
628 }
629 }
630
631 /**
632 * kbase_gpu_lost() - Kbase signals GPU is lost on a lost event signal
633 * @kbdev: The kbase device structure for the device (must be a valid pointer)
634 *
635 * On GPU lost event signals GPU_LOST to the aribiter
636 */
kbase_gpu_lost(struct kbase_device * kbdev)637 static void kbase_gpu_lost(struct kbase_device *kbdev)
638 {
639 struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
640 bool handle_gpu_lost = false;
641
642 lockdep_assert_held(&arb_vm_state->vm_state_lock);
643
644 switch (arb_vm_state->vm_state) {
645 case KBASE_VM_STATE_STARTING:
646 case KBASE_VM_STATE_ACTIVE:
647 case KBASE_VM_STATE_IDLE:
648 dev_warn(kbdev->dev, "GPU lost in state %s",
649 kbase_arbiter_pm_vm_state_str(arb_vm_state->vm_state));
650 kbase_arbiter_pm_vm_gpu_stop(kbdev);
651 handle_gpu_lost = true;
652 break;
653 case KBASE_VM_STATE_STOPPING_IDLE:
654 case KBASE_VM_STATE_STOPPING_ACTIVE:
655 case KBASE_VM_STATE_SUSPEND_PENDING:
656 dev_dbg(kbdev->dev, "GPU lost while stopping");
657 handle_gpu_lost = true;
658 break;
659 case KBASE_VM_STATE_SUSPENDED:
660 case KBASE_VM_STATE_STOPPED:
661 case KBASE_VM_STATE_STOPPED_GPU_REQUESTED:
662 dev_dbg(kbdev->dev, "GPU lost while already stopped");
663 break;
664 case KBASE_VM_STATE_SUSPEND_WAIT_FOR_GRANT:
665 dev_dbg(kbdev->dev, "GPU lost while waiting to suspend");
666 kbase_arbiter_pm_vm_set_state(kbdev, KBASE_VM_STATE_SUSPENDED);
667 break;
668 default:
669 break;
670 }
671 if (handle_gpu_lost) {
672 /* Releasing the VM state lock here is safe because
673 * we are guaranteed to be in either STOPPING_IDLE,
674 * STOPPING_ACTIVE or SUSPEND_PENDING at this point.
675 * The only transitions that are valid from here are to
676 * STOPPED, STOPPED_GPU_REQUESTED or SUSPENDED which can
677 * only happen at the completion of the GPU lost handling.
678 */
679 mutex_unlock(&arb_vm_state->vm_state_lock);
680 kbase_pm_handle_gpu_lost(kbdev);
681 mutex_lock(&arb_vm_state->vm_state_lock);
682 }
683 }
684
685 /**
686 * kbase_arbiter_pm_vm_os_suspend_ready_state() - checks if VM is ready
687 * to be moved to suspended state.
688 * @kbdev: The kbase device structure for the device (must be a valid pointer)
689 *
690 * Return: True if its ready to be suspended else False.
691 */
kbase_arbiter_pm_vm_os_suspend_ready_state(struct kbase_device * kbdev)692 static inline bool kbase_arbiter_pm_vm_os_suspend_ready_state(
693 struct kbase_device *kbdev)
694 {
695 switch (kbdev->pm.arb_vm_state->vm_state) {
696 case KBASE_VM_STATE_SUSPENDED:
697 case KBASE_VM_STATE_STOPPED:
698 case KBASE_VM_STATE_IDLE:
699 case KBASE_VM_STATE_ACTIVE:
700 return true;
701 default:
702 return false;
703 }
704 }
705
706 /**
707 * kbase_arbiter_pm_vm_os_prepare_suspend() - Prepare OS to be in suspend state
708 * until it receives the grant message from arbiter
709 * @kbdev: The kbase device structure for the device (must be a valid pointer)
710 *
711 * Prepares OS to be in suspend state until it receives GRANT message
712 * from Arbiter asynchronously.
713 */
kbase_arbiter_pm_vm_os_prepare_suspend(struct kbase_device * kbdev)714 static void kbase_arbiter_pm_vm_os_prepare_suspend(struct kbase_device *kbdev)
715 {
716 struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
717 enum kbase_vm_state prev_state;
718
719 lockdep_assert_held(&arb_vm_state->vm_state_lock);
720 if (kbdev->arb.arb_if) {
721 if (kbdev->pm.arb_vm_state->vm_state ==
722 KBASE_VM_STATE_SUSPENDED)
723 return;
724 }
725 /* Block suspend OS function until we are in a stable state
726 * with vm_state_lock
727 */
728 while (!kbase_arbiter_pm_vm_os_suspend_ready_state(kbdev)) {
729 prev_state = arb_vm_state->vm_state;
730 switch (arb_vm_state->vm_state) {
731 case KBASE_VM_STATE_STOPPING_ACTIVE:
732 case KBASE_VM_STATE_STOPPING_IDLE:
733 kbase_arbiter_pm_vm_set_state(kbdev,
734 KBASE_VM_STATE_SUSPEND_PENDING);
735 break;
736 case KBASE_VM_STATE_STOPPED_GPU_REQUESTED:
737 kbase_arbiter_pm_vm_set_state(kbdev,
738 KBASE_VM_STATE_SUSPEND_WAIT_FOR_GRANT);
739 break;
740 case KBASE_VM_STATE_STARTING:
741 if (!arb_vm_state->vm_arb_starting) {
742 kbase_arbiter_pm_vm_set_state(kbdev,
743 KBASE_VM_STATE_SUSPEND_PENDING);
744 kbase_arbiter_pm_vm_stopped(kbdev);
745 }
746 break;
747 default:
748 break;
749 }
750 mutex_unlock(&arb_vm_state->vm_state_lock);
751 wait_event(arb_vm_state->vm_state_wait,
752 arb_vm_state->vm_state != prev_state);
753 mutex_lock(&arb_vm_state->vm_state_lock);
754 }
755
756 switch (arb_vm_state->vm_state) {
757 case KBASE_VM_STATE_STOPPED:
758 kbase_arbiter_pm_vm_set_state(kbdev,
759 KBASE_VM_STATE_SUSPENDED);
760 break;
761 case KBASE_VM_STATE_IDLE:
762 case KBASE_VM_STATE_ACTIVE:
763 kbase_arbiter_pm_vm_set_state(kbdev,
764 KBASE_VM_STATE_SUSPEND_PENDING);
765 mutex_unlock(&arb_vm_state->vm_state_lock);
766 /* Ensure resume has completed fully before starting suspend */
767 flush_work(&arb_vm_state->vm_resume_work);
768 kbase_pm_driver_suspend(kbdev);
769 mutex_lock(&arb_vm_state->vm_state_lock);
770 break;
771 case KBASE_VM_STATE_SUSPENDED:
772 break;
773 default:
774 KBASE_DEBUG_ASSERT_MSG(false, "Unexpected state to suspend");
775 break;
776 }
777 }
778
779 /**
780 * kbase_arbiter_pm_vm_os_resume() - Resume OS function once it receives
781 * a grant message from arbiter
782 * @kbdev: The kbase device structure for the device (must be a valid pointer)
783 *
784 * Resume OS function once it receives GRANT message
785 * from Arbiter asynchronously.
786 */
kbase_arbiter_pm_vm_os_resume(struct kbase_device * kbdev)787 static void kbase_arbiter_pm_vm_os_resume(struct kbase_device *kbdev)
788 {
789 struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
790
791 lockdep_assert_held(&arb_vm_state->vm_state_lock);
792 KBASE_DEBUG_ASSERT_MSG(arb_vm_state->vm_state ==
793 KBASE_VM_STATE_SUSPENDED,
794 "Unexpected state to resume");
795
796 kbase_arbiter_pm_vm_set_state(kbdev,
797 KBASE_VM_STATE_STOPPED_GPU_REQUESTED);
798 kbase_arbif_gpu_request(kbdev);
799 start_request_timer(kbdev);
800
801 /* Release lock and block resume OS function until we have
802 * asynchronously received the GRANT message from the Arbiter and
803 * fully resumed
804 */
805 mutex_unlock(&arb_vm_state->vm_state_lock);
806 kbase_arbiter_pm_vm_wait_gpu_assignment(kbdev);
807 flush_work(&arb_vm_state->vm_resume_work);
808 mutex_lock(&arb_vm_state->vm_state_lock);
809 }
810
811 /**
812 * kbase_arbiter_pm_vm_event() - Dispatch VM event to the state machine.
813 * @kbdev: The kbase device structure for the device (must be a valid pointer)
814 * @evt: VM event
815 *
816 * The state machine function. Receives events and transitions states
817 * according the event received and the current state
818 */
kbase_arbiter_pm_vm_event(struct kbase_device * kbdev,enum kbase_arbif_evt evt)819 void kbase_arbiter_pm_vm_event(struct kbase_device *kbdev,
820 enum kbase_arbif_evt evt)
821 {
822 struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
823
824 if (!kbdev->arb.arb_if)
825 return;
826
827 mutex_lock(&arb_vm_state->vm_state_lock);
828 dev_dbg(kbdev->dev, "%s %s\n", __func__,
829 kbase_arbiter_pm_vm_event_str(evt));
830 if (arb_vm_state->vm_state != KBASE_VM_STATE_INITIALIZING_WITH_GPU &&
831 arb_vm_state->vm_state != KBASE_VM_STATE_INITIALIZING)
832 KBASE_KTRACE_ADD(kbdev, ARB_VM_EVT, NULL, evt);
833 switch (evt) {
834 case KBASE_VM_GPU_GRANTED_EVT:
835 kbase_arbiter_pm_vm_gpu_start(kbdev);
836 break;
837 case KBASE_VM_GPU_STOP_EVT:
838 kbase_arbiter_pm_vm_gpu_stop(kbdev);
839 break;
840 case KBASE_VM_GPU_LOST_EVT:
841 dev_dbg(kbdev->dev, "KBASE_ARBIF_GPU_LOST_EVT!");
842 kbase_gpu_lost(kbdev);
843 break;
844 case KBASE_VM_OS_SUSPEND_EVENT:
845 kbase_arbiter_pm_vm_os_prepare_suspend(kbdev);
846 break;
847 case KBASE_VM_OS_RESUME_EVENT:
848 kbase_arbiter_pm_vm_os_resume(kbdev);
849 break;
850 case KBASE_VM_GPU_IDLE_EVENT:
851 switch (arb_vm_state->vm_state) {
852 case KBASE_VM_STATE_ACTIVE:
853 kbase_arbiter_pm_vm_set_state(kbdev,
854 KBASE_VM_STATE_IDLE);
855 kbase_arbif_gpu_idle(kbdev);
856 break;
857 default:
858 break;
859 }
860 break;
861
862 case KBASE_VM_REF_EVENT:
863 switch (arb_vm_state->vm_state) {
864 case KBASE_VM_STATE_STARTING:
865 case KBASE_VM_STATE_IDLE:
866 kbase_arbiter_pm_vm_set_state(kbdev,
867 KBASE_VM_STATE_ACTIVE);
868 kbase_arbif_gpu_active(kbdev);
869 break;
870 case KBASE_VM_STATE_STOPPING_IDLE:
871 kbase_arbiter_pm_vm_set_state(kbdev,
872 KBASE_VM_STATE_STOPPING_ACTIVE);
873 break;
874 default:
875 break;
876 }
877 break;
878
879 case KBASE_VM_GPU_INITIALIZED_EVT:
880 switch (arb_vm_state->vm_state) {
881 case KBASE_VM_STATE_INITIALIZING_WITH_GPU:
882 lockdep_assert_held(&kbdev->pm.lock);
883 if (kbdev->pm.active_count > 0) {
884 kbase_arbiter_pm_vm_set_state(kbdev,
885 KBASE_VM_STATE_ACTIVE);
886 kbase_arbif_gpu_active(kbdev);
887 } else {
888 kbase_arbiter_pm_vm_set_state(kbdev,
889 KBASE_VM_STATE_IDLE);
890 kbase_arbif_gpu_idle(kbdev);
891 }
892 break;
893 default:
894 break;
895 }
896 break;
897
898 default:
899 dev_alert(kbdev->dev, "Got Unknown Event!");
900 break;
901 }
902 mutex_unlock(&arb_vm_state->vm_state_lock);
903 }
904
905 KBASE_EXPORT_TEST_API(kbase_arbiter_pm_vm_event);
906
907 /**
908 * kbase_arbiter_pm_vm_wait_gpu_assignment() - VM wait for a GPU assignment.
909 * @kbdev: The kbase device structure for the device (must be a valid pointer)
910 *
911 * VM waits for a GPU assignment.
912 */
kbase_arbiter_pm_vm_wait_gpu_assignment(struct kbase_device * kbdev)913 static void kbase_arbiter_pm_vm_wait_gpu_assignment(struct kbase_device *kbdev)
914 {
915 struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
916
917 dev_dbg(kbdev->dev, "Waiting for GPU assignment...\n");
918 wait_event(arb_vm_state->vm_state_wait,
919 arb_vm_state->vm_state == KBASE_VM_STATE_IDLE ||
920 arb_vm_state->vm_state == KBASE_VM_STATE_ACTIVE);
921 dev_dbg(kbdev->dev, "Waiting for GPU assignment - done\n");
922 }
923
924 /**
925 * kbase_arbiter_pm_vm_gpu_assigned_lockheld() - Check if VM holds VM state lock
926 * @kbdev: The kbase device structure for the device (must be a valid pointer)
927 *
928 * Checks if the virtual machine holds VM state lock.
929 *
930 * Return: true if GPU is assigned, else false.
931 */
kbase_arbiter_pm_vm_gpu_assigned_lockheld(struct kbase_device * kbdev)932 static inline bool kbase_arbiter_pm_vm_gpu_assigned_lockheld(
933 struct kbase_device *kbdev)
934 {
935 struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
936
937 lockdep_assert_held(&arb_vm_state->vm_state_lock);
938 return (arb_vm_state->vm_state == KBASE_VM_STATE_IDLE ||
939 arb_vm_state->vm_state == KBASE_VM_STATE_ACTIVE);
940 }
941
942 /**
943 * kbase_arbiter_pm_ctx_active_handle_suspend() - Handle suspend operation for
944 * arbitration mode
945 * @kbdev: The kbase device structure for the device (must be a valid pointer)
946 * @suspend_handler: The handler code for how to handle a suspend
947 * that might occur
948 *
949 * This function handles a suspend event from the driver,
950 * communicating with the arbiter and waiting synchronously for the GPU
951 * to be granted again depending on the VM state.
952 *
953 * Return: 0 on success else 1 suspend handler isn not possible.
954 */
kbase_arbiter_pm_ctx_active_handle_suspend(struct kbase_device * kbdev,enum kbase_pm_suspend_handler suspend_handler)955 int kbase_arbiter_pm_ctx_active_handle_suspend(struct kbase_device *kbdev,
956 enum kbase_pm_suspend_handler suspend_handler)
957 {
958 struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
959 int res = 0;
960
961 if (kbdev->arb.arb_if) {
962 mutex_lock(&arb_vm_state->vm_state_lock);
963 while (!kbase_arbiter_pm_vm_gpu_assigned_lockheld(kbdev)) {
964 /* Update VM state since we have GPU work to do */
965 if (arb_vm_state->vm_state ==
966 KBASE_VM_STATE_STOPPING_IDLE)
967 kbase_arbiter_pm_vm_set_state(kbdev,
968 KBASE_VM_STATE_STOPPING_ACTIVE);
969 else if (arb_vm_state->vm_state ==
970 KBASE_VM_STATE_STOPPED) {
971 kbase_arbiter_pm_vm_set_state(kbdev,
972 KBASE_VM_STATE_STOPPED_GPU_REQUESTED);
973 kbase_arbif_gpu_request(kbdev);
974 start_request_timer(kbdev);
975 } else if (arb_vm_state->vm_state ==
976 KBASE_VM_STATE_INITIALIZING_WITH_GPU)
977 break;
978
979 if (suspend_handler !=
980 KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE) {
981
982 /* In case of GPU lost, even if
983 * active_count > 0, we no longer have GPU
984 * access
985 */
986 if (kbase_pm_is_gpu_lost(kbdev))
987 res = 1;
988
989 switch (suspend_handler) {
990 case KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE:
991 res = 1;
992 break;
993 case KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE:
994 if (kbdev->pm.active_count == 0)
995 res = 1;
996 break;
997 case KBASE_PM_SUSPEND_HANDLER_VM_GPU_GRANTED:
998 break;
999 default:
1000 WARN(1, "Unknown suspend_handler\n");
1001 res = 1;
1002 break;
1003 }
1004 break;
1005 }
1006
1007 /* Need to synchronously wait for GPU assignment */
1008 atomic_inc(&kbdev->pm.gpu_users_waiting);
1009 mutex_unlock(&arb_vm_state->vm_state_lock);
1010 kbase_pm_unlock(kbdev);
1011 kbase_arbiter_pm_vm_wait_gpu_assignment(kbdev);
1012 kbase_pm_lock(kbdev);
1013 mutex_lock(&arb_vm_state->vm_state_lock);
1014 atomic_dec(&kbdev->pm.gpu_users_waiting);
1015 }
1016 mutex_unlock(&arb_vm_state->vm_state_lock);
1017 }
1018 return res;
1019 }
1020
1021 /**
1022 * kbase_arbiter_pm_update_gpu_freq() - Updates GPU clock frequency received
1023 * from arbiter.
1024 * @arb_freq: Pointer to struchture holding GPU clock frequenecy data
1025 * @freq: New frequency value in KHz
1026 */
kbase_arbiter_pm_update_gpu_freq(struct kbase_arbiter_freq * arb_freq,uint32_t freq)1027 void kbase_arbiter_pm_update_gpu_freq(struct kbase_arbiter_freq *arb_freq,
1028 uint32_t freq)
1029 {
1030 struct kbase_gpu_clk_notifier_data ndata;
1031
1032 mutex_lock(&arb_freq->arb_freq_lock);
1033 if (arb_freq->arb_freq != freq) {
1034 ndata.new_rate = (unsigned long)freq * KHZ_TO_HZ;
1035 ndata.old_rate = (unsigned long)arb_freq->arb_freq * KHZ_TO_HZ;
1036 ndata.gpu_clk_handle = arb_freq;
1037 arb_freq->arb_freq = freq;
1038 arb_freq->freq_updated = true;
1039 if (arb_freq->nb)
1040 arb_freq->nb->notifier_call(arb_freq->nb,
1041 POST_RATE_CHANGE, &ndata);
1042 }
1043
1044 mutex_unlock(&arb_freq->arb_freq_lock);
1045 }
1046
1047 /**
1048 * get_arb_gpu_clk() - Enumerate a GPU clock on the given index
1049 * @kbdev: kbase_device pointer
1050 * @index: GPU clock index
1051 *
1052 * Return: Pointer to structure holding GPU clock frequency data reported from
1053 * arbiter, only index 0 is valid.
1054 */
get_arb_gpu_clk(struct kbase_device * kbdev,unsigned int index)1055 static void *get_arb_gpu_clk(struct kbase_device *kbdev,
1056 unsigned int index)
1057 {
1058 if (index == 0)
1059 return &kbdev->arb.arb_freq;
1060 return NULL;
1061 }
1062
1063 /**
1064 * get_arb_gpu_clk_rate() - Get the current rate of GPU clock frequency value
1065 * @kbdev: kbase_device pointer
1066 * @gpu_clk_handle: Handle unique to the enumerated GPU clock
1067 *
1068 * Return: The GPU clock frequency value saved when gpu is granted from arbiter
1069 */
get_arb_gpu_clk_rate(struct kbase_device * kbdev,void * gpu_clk_handle)1070 static unsigned long get_arb_gpu_clk_rate(struct kbase_device *kbdev,
1071 void *gpu_clk_handle)
1072 {
1073 uint32_t freq;
1074 struct kbase_arbiter_freq *arb_dev_freq =
1075 (struct kbase_arbiter_freq *) gpu_clk_handle;
1076
1077 mutex_lock(&arb_dev_freq->arb_freq_lock);
1078 /* Convert from KHz to Hz */
1079 freq = arb_dev_freq->arb_freq * KHZ_TO_HZ;
1080 mutex_unlock(&arb_dev_freq->arb_freq_lock);
1081 return freq;
1082 }
1083
1084 /**
1085 * arb_gpu_clk_notifier_register() - Register a clock rate change notifier.
1086 * @kbdev: kbase_device pointer
1087 * @gpu_clk_handle: Handle unique to the enumerated GPU clock
1088 * @nb: notifier block containing the callback function pointer
1089 *
1090 * This function registers a callback function that is invoked whenever the
1091 * frequency of the clock corresponding to @gpu_clk_handle changes.
1092 *
1093 * Return: 0 on success, negative error code otherwise.
1094 */
arb_gpu_clk_notifier_register(struct kbase_device * kbdev,void * gpu_clk_handle,struct notifier_block * nb)1095 static int arb_gpu_clk_notifier_register(struct kbase_device *kbdev,
1096 void *gpu_clk_handle, struct notifier_block *nb)
1097 {
1098 int ret = 0;
1099 struct kbase_arbiter_freq *arb_dev_freq =
1100 (struct kbase_arbiter_freq *)gpu_clk_handle;
1101
1102 if (!arb_dev_freq->nb)
1103 arb_dev_freq->nb = nb;
1104 else
1105 ret = -EBUSY;
1106
1107 return ret;
1108 }
1109
1110 /**
1111 * arb_gpu_clk_notifier_unregister() - Unregister clock rate change notifier
1112 * @kbdev: kbase_device pointer
1113 * @gpu_clk_handle: Handle unique to the enumerated GPU clock
1114 * @nb: notifier block containing the callback function pointer
1115 *
1116 * This function pointer is used to unregister a callback function that
1117 * was previously registered to get notified of a frequency change of the
1118 * clock corresponding to @gpu_clk_handle.
1119 */
arb_gpu_clk_notifier_unregister(struct kbase_device * kbdev,void * gpu_clk_handle,struct notifier_block * nb)1120 static void arb_gpu_clk_notifier_unregister(struct kbase_device *kbdev,
1121 void *gpu_clk_handle, struct notifier_block *nb)
1122 {
1123 struct kbase_arbiter_freq *arb_dev_freq =
1124 (struct kbase_arbiter_freq *)gpu_clk_handle;
1125 if (arb_dev_freq->nb == nb) {
1126 arb_dev_freq->nb = NULL;
1127 } else {
1128 dev_err(kbdev->dev, "%s - notifier did not match\n",
1129 __func__);
1130 }
1131 }
1132
1133 struct kbase_clk_rate_trace_op_conf arb_clk_rate_trace_ops = {
1134 .get_gpu_clk_rate = get_arb_gpu_clk_rate,
1135 .enumerate_gpu_clk = get_arb_gpu_clk,
1136 .gpu_clk_notifier_register = arb_gpu_clk_notifier_register,
1137 .gpu_clk_notifier_unregister = arb_gpu_clk_notifier_unregister
1138 };
1139