1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3 *
4 * (C) COPYRIGHT 2019-2022 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 /**
23 * DOC: Mali arbiter interface APIs to share GPU between Virtual Machines
24 */
25
26 #include <mali_kbase.h>
27 #include "mali_kbase_arbif.h"
28 #include <tl/mali_kbase_tracepoints.h>
29 #include <linux/of.h>
30 #include <linux/of_platform.h>
31 #include "linux/mali_arbiter_interface.h"
32
33 /* Arbiter interface version against which was implemented this module */
34 #define MALI_REQUIRED_KBASE_ARBITER_INTERFACE_VERSION 5
35 #if MALI_REQUIRED_KBASE_ARBITER_INTERFACE_VERSION != \
36 MALI_ARBITER_INTERFACE_VERSION
37 #error "Unsupported Mali Arbiter interface version."
38 #endif
39
on_max_config(struct device * dev,uint32_t max_l2_slices,uint32_t max_core_mask)40 static void on_max_config(struct device *dev, uint32_t max_l2_slices,
41 uint32_t max_core_mask)
42 {
43 struct kbase_device *kbdev;
44
45 if (!dev) {
46 pr_err("%s(): dev is NULL", __func__);
47 return;
48 }
49
50 kbdev = dev_get_drvdata(dev);
51 if (!kbdev) {
52 dev_err(dev, "%s(): kbdev is NULL", __func__);
53 return;
54 }
55
56 if (!max_l2_slices || !max_core_mask) {
57 dev_dbg(dev,
58 "%s(): max_config ignored as one of the fields is zero",
59 __func__);
60 return;
61 }
62
63 /* set the max config info in the kbase device */
64 kbase_arbiter_set_max_config(kbdev, max_l2_slices, max_core_mask);
65 }
66
67 /**
68 * on_update_freq() - Updates GPU clock frequency
69 * @dev: arbiter interface device handle
70 * @freq: GPU clock frequency value reported from arbiter
71 *
72 * call back function to update GPU clock frequency with
73 * new value from arbiter
74 */
on_update_freq(struct device * dev,uint32_t freq)75 static void on_update_freq(struct device *dev, uint32_t freq)
76 {
77 struct kbase_device *kbdev;
78
79 if (!dev) {
80 pr_err("%s(): dev is NULL", __func__);
81 return;
82 }
83
84 kbdev = dev_get_drvdata(dev);
85 if (!kbdev) {
86 dev_err(dev, "%s(): kbdev is NULL", __func__);
87 return;
88 }
89
90 kbase_arbiter_pm_update_gpu_freq(&kbdev->arb.arb_freq, freq);
91 }
92
93 /**
94 * on_gpu_stop() - sends KBASE_VM_GPU_STOP_EVT event on VM stop
95 * @dev: arbiter interface device handle
96 *
97 * call back function to signal a GPU STOP event from arbiter interface
98 */
on_gpu_stop(struct device * dev)99 static void on_gpu_stop(struct device *dev)
100 {
101 struct kbase_device *kbdev;
102
103 if (!dev) {
104 pr_err("%s(): dev is NULL", __func__);
105 return;
106 }
107
108 kbdev = dev_get_drvdata(dev);
109 if (!kbdev) {
110 dev_err(dev, "%s(): kbdev is NULL", __func__);
111 return;
112 }
113
114 KBASE_TLSTREAM_TL_ARBITER_STOP_REQUESTED(kbdev, kbdev);
115 kbase_arbiter_pm_vm_event(kbdev, KBASE_VM_GPU_STOP_EVT);
116 }
117
118 /**
119 * on_gpu_granted() - sends KBASE_VM_GPU_GRANTED_EVT event on GPU granted
120 * @dev: arbiter interface device handle
121 *
122 * call back function to signal a GPU GRANT event from arbiter interface
123 */
on_gpu_granted(struct device * dev)124 static void on_gpu_granted(struct device *dev)
125 {
126 struct kbase_device *kbdev;
127
128 if (!dev) {
129 pr_err("%s(): dev is NULL", __func__);
130 return;
131 }
132
133 kbdev = dev_get_drvdata(dev);
134 if (!kbdev) {
135 dev_err(dev, "%s(): kbdev is NULL", __func__);
136 return;
137 }
138
139 KBASE_TLSTREAM_TL_ARBITER_GRANTED(kbdev, kbdev);
140 kbase_arbiter_pm_vm_event(kbdev, KBASE_VM_GPU_GRANTED_EVT);
141 }
142
143 /**
144 * on_gpu_lost() - sends KBASE_VM_GPU_LOST_EVT event on GPU granted
145 * @dev: arbiter interface device handle
146 *
147 * call back function to signal a GPU LOST event from arbiter interface
148 */
on_gpu_lost(struct device * dev)149 static void on_gpu_lost(struct device *dev)
150 {
151 struct kbase_device *kbdev;
152
153 if (!dev) {
154 pr_err("%s(): dev is NULL", __func__);
155 return;
156 }
157
158 kbdev = dev_get_drvdata(dev);
159 if (!kbdev) {
160 dev_err(dev, "%s(): kbdev is NULL", __func__);
161 return;
162 }
163
164 kbase_arbiter_pm_vm_event(kbdev, KBASE_VM_GPU_LOST_EVT);
165 }
166
167 /**
168 * kbase_arbif_init() - Kbase Arbiter interface initialisation.
169 * @kbdev: The kbase device structure for the device (must be a valid pointer)
170 *
171 * Initialise Kbase Arbiter interface and assign callback functions.
172 *
173 * Return:
174 * * 0 - the interface was initialized or was not specified
175 * * in the device tree.
176 * * -EFAULT - the interface was specified but failed to initialize.
177 * * -EPROBE_DEFER - module dependencies are not yet available.
178 */
kbase_arbif_init(struct kbase_device * kbdev)179 int kbase_arbif_init(struct kbase_device *kbdev)
180 {
181 #if IS_ENABLED(CONFIG_OF)
182 struct arbiter_if_arb_vm_ops ops;
183 struct arbiter_if_dev *arb_if;
184 struct device_node *arbiter_if_node;
185 struct platform_device *pdev;
186 int err;
187
188 dev_dbg(kbdev->dev, "%s\n", __func__);
189
190 arbiter_if_node = of_parse_phandle(kbdev->dev->of_node,
191 "arbiter_if", 0);
192 if (!arbiter_if_node) {
193 dev_dbg(kbdev->dev, "No arbiter_if in Device Tree\n");
194 /* no arbiter interface defined in device tree */
195 kbdev->arb.arb_dev = NULL;
196 kbdev->arb.arb_if = NULL;
197 return 0;
198 }
199
200 pdev = of_find_device_by_node(arbiter_if_node);
201 if (!pdev) {
202 dev_err(kbdev->dev, "Failed to find arbiter_if device\n");
203 return -EPROBE_DEFER;
204 }
205
206 if (!pdev->dev.driver || !try_module_get(pdev->dev.driver->owner)) {
207 dev_err(kbdev->dev, "arbiter_if driver not available\n");
208 put_device(&pdev->dev);
209 return -EPROBE_DEFER;
210 }
211 kbdev->arb.arb_dev = &pdev->dev;
212 arb_if = platform_get_drvdata(pdev);
213 if (!arb_if) {
214 dev_err(kbdev->dev, "arbiter_if driver not ready\n");
215 module_put(pdev->dev.driver->owner);
216 put_device(&pdev->dev);
217 return -EPROBE_DEFER;
218 }
219
220 kbdev->arb.arb_if = arb_if;
221 ops.arb_vm_gpu_stop = on_gpu_stop;
222 ops.arb_vm_gpu_granted = on_gpu_granted;
223 ops.arb_vm_gpu_lost = on_gpu_lost;
224 ops.arb_vm_max_config = on_max_config;
225 ops.arb_vm_update_freq = on_update_freq;
226
227 kbdev->arb.arb_freq.arb_freq = 0;
228 kbdev->arb.arb_freq.freq_updated = false;
229 mutex_init(&kbdev->arb.arb_freq.arb_freq_lock);
230
231 /* register kbase arbiter_if callbacks */
232 if (arb_if->vm_ops.vm_arb_register_dev) {
233 err = arb_if->vm_ops.vm_arb_register_dev(arb_if,
234 kbdev->dev, &ops);
235 if (err) {
236 dev_err(&pdev->dev, "Failed to register with arbiter\n");
237 module_put(pdev->dev.driver->owner);
238 put_device(&pdev->dev);
239 if (err != -EPROBE_DEFER)
240 err = -EFAULT;
241 return err;
242 }
243 }
244
245 #else /* CONFIG_OF */
246 dev_dbg(kbdev->dev, "No arbiter without Device Tree support\n");
247 kbdev->arb.arb_dev = NULL;
248 kbdev->arb.arb_if = NULL;
249 #endif
250 return 0;
251 }
252
253 /**
254 * kbase_arbif_destroy() - De-init Kbase arbiter interface
255 * @kbdev: The kbase device structure for the device (must be a valid pointer)
256 *
257 * De-initialise Kbase arbiter interface
258 */
kbase_arbif_destroy(struct kbase_device * kbdev)259 void kbase_arbif_destroy(struct kbase_device *kbdev)
260 {
261 struct arbiter_if_dev *arb_if = kbdev->arb.arb_if;
262
263 if (arb_if && arb_if->vm_ops.vm_arb_unregister_dev) {
264 dev_dbg(kbdev->dev, "%s\n", __func__);
265 arb_if->vm_ops.vm_arb_unregister_dev(kbdev->arb.arb_if);
266 }
267 kbdev->arb.arb_if = NULL;
268 if (kbdev->arb.arb_dev) {
269 module_put(kbdev->arb.arb_dev->driver->owner);
270 put_device(kbdev->arb.arb_dev);
271 }
272 kbdev->arb.arb_dev = NULL;
273 }
274
275 /**
276 * kbase_arbif_get_max_config() - Request max config info
277 * @kbdev: The kbase device structure for the device (must be a valid pointer)
278 *
279 * call back function from arb interface to arbiter requesting max config info
280 */
kbase_arbif_get_max_config(struct kbase_device * kbdev)281 void kbase_arbif_get_max_config(struct kbase_device *kbdev)
282 {
283 struct arbiter_if_dev *arb_if = kbdev->arb.arb_if;
284
285 if (arb_if && arb_if->vm_ops.vm_arb_get_max_config) {
286 dev_dbg(kbdev->dev, "%s\n", __func__);
287 arb_if->vm_ops.vm_arb_get_max_config(arb_if);
288 }
289 }
290
291 /**
292 * kbase_arbif_gpu_request() - Request GPU from
293 * @kbdev: The kbase device structure for the device (must be a valid pointer)
294 *
295 * call back function from arb interface to arbiter requesting GPU for VM
296 */
kbase_arbif_gpu_request(struct kbase_device * kbdev)297 void kbase_arbif_gpu_request(struct kbase_device *kbdev)
298 {
299 struct arbiter_if_dev *arb_if = kbdev->arb.arb_if;
300
301 if (arb_if && arb_if->vm_ops.vm_arb_gpu_request) {
302 dev_dbg(kbdev->dev, "%s\n", __func__);
303 KBASE_TLSTREAM_TL_ARBITER_REQUESTED(kbdev, kbdev);
304 arb_if->vm_ops.vm_arb_gpu_request(arb_if);
305 }
306 }
307
308 /**
309 * kbase_arbif_gpu_stopped() - send GPU stopped message to the arbiter
310 * @kbdev: The kbase device structure for the device (must be a valid pointer)
311 * @gpu_required: GPU request flag
312 *
313 */
kbase_arbif_gpu_stopped(struct kbase_device * kbdev,u8 gpu_required)314 void kbase_arbif_gpu_stopped(struct kbase_device *kbdev, u8 gpu_required)
315 {
316 struct arbiter_if_dev *arb_if = kbdev->arb.arb_if;
317
318 if (arb_if && arb_if->vm_ops.vm_arb_gpu_stopped) {
319 dev_dbg(kbdev->dev, "%s\n", __func__);
320 KBASE_TLSTREAM_TL_ARBITER_STOPPED(kbdev, kbdev);
321 if (gpu_required)
322 KBASE_TLSTREAM_TL_ARBITER_REQUESTED(kbdev, kbdev);
323 arb_if->vm_ops.vm_arb_gpu_stopped(arb_if, gpu_required);
324 }
325 }
326
327 /**
328 * kbase_arbif_gpu_active() - Sends a GPU_ACTIVE message to the Arbiter
329 * @kbdev: The kbase device structure for the device (must be a valid pointer)
330 *
331 * Informs the arbiter VM is active
332 */
kbase_arbif_gpu_active(struct kbase_device * kbdev)333 void kbase_arbif_gpu_active(struct kbase_device *kbdev)
334 {
335 struct arbiter_if_dev *arb_if = kbdev->arb.arb_if;
336
337 if (arb_if && arb_if->vm_ops.vm_arb_gpu_active) {
338 dev_dbg(kbdev->dev, "%s\n", __func__);
339 arb_if->vm_ops.vm_arb_gpu_active(arb_if);
340 }
341 }
342
343 /**
344 * kbase_arbif_gpu_idle() - Inform the arbiter that the VM has gone idle
345 * @kbdev: The kbase device structure for the device (must be a valid pointer)
346 *
347 * Informs the arbiter VM is idle
348 */
kbase_arbif_gpu_idle(struct kbase_device * kbdev)349 void kbase_arbif_gpu_idle(struct kbase_device *kbdev)
350 {
351 struct arbiter_if_dev *arb_if = kbdev->arb.arb_if;
352
353 if (arb_if && arb_if->vm_ops.vm_arb_gpu_idle) {
354 dev_dbg(kbdev->dev, "vm_arb_gpu_idle\n");
355 arb_if->vm_ops.vm_arb_gpu_idle(arb_if);
356 }
357 }
358