1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3 *
4 * (C) COPYRIGHT 2014-2023 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 #include <mali_kbase.h>
23 #include <mali_kbase_hwaccess_time.h>
24 #if MALI_USE_CSF
25 #include <asm/arch_timer.h>
26 #include <linux/gcd.h>
27 #include <csf/mali_kbase_csf_timeout.h>
28 #endif
29 #include <device/mali_kbase_device.h>
30 #include <backend/gpu/mali_kbase_pm_internal.h>
31 #include <mali_kbase_config_defaults.h>
32
kbase_backend_get_gpu_time_norequest(struct kbase_device * kbdev,u64 * cycle_counter,u64 * system_time,struct timespec64 * ts)33 void kbase_backend_get_gpu_time_norequest(struct kbase_device *kbdev,
34 u64 *cycle_counter,
35 u64 *system_time,
36 struct timespec64 *ts)
37 {
38 u32 hi1, hi2;
39
40 if (cycle_counter)
41 *cycle_counter = kbase_backend_get_cycle_cnt(kbdev);
42
43 if (system_time) {
44 /* Read hi, lo, hi to ensure a coherent u64 */
45 do {
46 hi1 = kbase_reg_read(kbdev,
47 GPU_CONTROL_REG(TIMESTAMP_HI));
48 *system_time = kbase_reg_read(kbdev,
49 GPU_CONTROL_REG(TIMESTAMP_LO));
50 hi2 = kbase_reg_read(kbdev,
51 GPU_CONTROL_REG(TIMESTAMP_HI));
52 } while (hi1 != hi2);
53 *system_time |= (((u64) hi1) << 32);
54 }
55
56 /* Record the CPU's idea of current time */
57 if (ts != NULL)
58 #if (KERNEL_VERSION(4, 17, 0) > LINUX_VERSION_CODE)
59 *ts = ktime_to_timespec64(ktime_get_raw());
60 #else
61 ktime_get_raw_ts64(ts);
62 #endif
63 }
64
65 #if !MALI_USE_CSF
66 /**
67 * timedwait_cycle_count_active() - Timed wait till CYCLE_COUNT_ACTIVE is active
68 *
69 * @kbdev: Kbase device
70 *
71 * Return: true if CYCLE_COUNT_ACTIVE is active within the timeout.
72 */
timedwait_cycle_count_active(struct kbase_device * kbdev)73 static bool timedwait_cycle_count_active(struct kbase_device *kbdev)
74 {
75 #if IS_ENABLED(CONFIG_MALI_BIFROST_NO_MALI)
76 return true;
77 #else
78 bool success = false;
79 const unsigned int timeout = 100;
80 const unsigned long remaining = jiffies + msecs_to_jiffies(timeout);
81
82 while (time_is_after_jiffies(remaining)) {
83 if ((kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_STATUS)) &
84 GPU_STATUS_CYCLE_COUNT_ACTIVE)) {
85 success = true;
86 break;
87 }
88 }
89 return success;
90 #endif
91 }
92 #endif
93
kbase_backend_get_gpu_time(struct kbase_device * kbdev,u64 * cycle_counter,u64 * system_time,struct timespec64 * ts)94 void kbase_backend_get_gpu_time(struct kbase_device *kbdev, u64 *cycle_counter,
95 u64 *system_time, struct timespec64 *ts)
96 {
97 #if !MALI_USE_CSF
98 kbase_pm_request_gpu_cycle_counter(kbdev);
99 WARN_ONCE(kbdev->pm.backend.l2_state != KBASE_L2_ON,
100 "L2 not powered up");
101 WARN_ONCE((!timedwait_cycle_count_active(kbdev)),
102 "Timed out on CYCLE_COUNT_ACTIVE");
103 #endif
104 kbase_backend_get_gpu_time_norequest(kbdev, cycle_counter, system_time,
105 ts);
106 #if !MALI_USE_CSF
107 kbase_pm_release_gpu_cycle_counter(kbdev);
108 #endif
109 }
110
kbase_get_timeout_ms(struct kbase_device * kbdev,enum kbase_timeout_selector selector)111 unsigned int kbase_get_timeout_ms(struct kbase_device *kbdev,
112 enum kbase_timeout_selector selector)
113 {
114 /* Timeout calculation:
115 * dividing number of cycles by freq in KHz automatically gives value
116 * in milliseconds. nr_cycles will have to be multiplied by 1e3 to
117 * get result in microseconds, and 1e6 to get result in nanoseconds.
118 */
119
120 u64 timeout, nr_cycles = 0;
121 u64 freq_khz;
122
123 /* Only for debug messages, safe default in case it's mis-maintained */
124 const char *selector_str = "(unknown)";
125
126 if (!kbdev->lowest_gpu_freq_khz) {
127 dev_dbg(kbdev->dev,
128 "Lowest frequency uninitialized! Using reference frequency for scaling");
129 freq_khz = DEFAULT_REF_TIMEOUT_FREQ_KHZ;
130 } else {
131 freq_khz = kbdev->lowest_gpu_freq_khz;
132 }
133
134 switch (selector) {
135 case MMU_AS_INACTIVE_WAIT_TIMEOUT:
136 selector_str = "MMU_AS_INACTIVE_WAIT_TIMEOUT";
137 nr_cycles = MMU_AS_INACTIVE_WAIT_TIMEOUT_CYCLES;
138 break;
139 case KBASE_TIMEOUT_SELECTOR_COUNT:
140 default:
141 #if !MALI_USE_CSF
142 WARN(1, "Invalid timeout selector used! Using default value");
143 nr_cycles = JM_DEFAULT_TIMEOUT_CYCLES;
144 break;
145 case JM_DEFAULT_JS_FREE_TIMEOUT:
146 selector_str = "JM_DEFAULT_JS_FREE_TIMEOUT";
147 nr_cycles = JM_DEFAULT_JS_FREE_TIMEOUT_CYCLES;
148 break;
149 #else
150 /* Use Firmware timeout if invalid selection */
151 WARN(1,
152 "Invalid timeout selector used! Using CSF Firmware timeout");
153 fallthrough;
154 case CSF_FIRMWARE_TIMEOUT:
155 selector_str = "CSF_FIRMWARE_TIMEOUT";
156 /* Any FW timeout cannot be longer than the FW ping interval, after which
157 * the firmware_aliveness_monitor will be triggered and may restart
158 * the GPU if the FW is unresponsive.
159 */
160 nr_cycles = min(CSF_FIRMWARE_PING_TIMEOUT_CYCLES, CSF_FIRMWARE_TIMEOUT_CYCLES);
161
162 if (nr_cycles == CSF_FIRMWARE_PING_TIMEOUT_CYCLES)
163 dev_warn(kbdev->dev, "Capping %s to CSF_FIRMWARE_PING_TIMEOUT\n",
164 selector_str);
165 break;
166 case CSF_PM_TIMEOUT:
167 selector_str = "CSF_PM_TIMEOUT";
168 nr_cycles = CSF_PM_TIMEOUT_CYCLES;
169 break;
170 case CSF_GPU_RESET_TIMEOUT:
171 selector_str = "CSF_GPU_RESET_TIMEOUT";
172 nr_cycles = CSF_GPU_RESET_TIMEOUT_CYCLES;
173 break;
174 case CSF_CSG_SUSPEND_TIMEOUT:
175 selector_str = "CSF_CSG_SUSPEND_TIMEOUT";
176 nr_cycles = CSF_CSG_SUSPEND_TIMEOUT_CYCLES;
177 break;
178 case CSF_FIRMWARE_BOOT_TIMEOUT:
179 selector_str = "CSF_FIRMWARE_BOOT_TIMEOUT";
180 nr_cycles = CSF_FIRMWARE_BOOT_TIMEOUT_CYCLES;
181 break;
182 case CSF_FIRMWARE_PING_TIMEOUT:
183 selector_str = "CSF_FIRMWARE_PING_TIMEOUT";
184 nr_cycles = CSF_FIRMWARE_PING_TIMEOUT_CYCLES;
185 break;
186 case CSF_SCHED_PROTM_PROGRESS_TIMEOUT:
187 selector_str = "CSF_SCHED_PROTM_PROGRESS_TIMEOUT";
188 nr_cycles = kbase_csf_timeout_get(kbdev);
189 break;
190 #endif
191 }
192
193 timeout = div_u64(nr_cycles, freq_khz);
194 if (WARN(timeout > UINT_MAX,
195 "Capping excessive timeout %llums for %s at freq %llukHz to UINT_MAX ms",
196 (unsigned long long)timeout, selector_str, (unsigned long long)freq_khz))
197 timeout = UINT_MAX;
198 return (unsigned int)timeout;
199 }
200 KBASE_EXPORT_TEST_API(kbase_get_timeout_ms);
201
kbase_backend_get_cycle_cnt(struct kbase_device * kbdev)202 u64 kbase_backend_get_cycle_cnt(struct kbase_device *kbdev)
203 {
204 u32 hi1, hi2, lo;
205
206 /* Read hi, lo, hi to ensure a coherent u64 */
207 do {
208 hi1 = kbase_reg_read(kbdev,
209 GPU_CONTROL_REG(CYCLE_COUNT_HI));
210 lo = kbase_reg_read(kbdev,
211 GPU_CONTROL_REG(CYCLE_COUNT_LO));
212 hi2 = kbase_reg_read(kbdev,
213 GPU_CONTROL_REG(CYCLE_COUNT_HI));
214 } while (hi1 != hi2);
215
216 return lo | (((u64) hi1) << 32);
217 }
218
219 #if MALI_USE_CSF
kbase_backend_time_convert_gpu_to_cpu(struct kbase_device * kbdev,u64 gpu_ts)220 u64 __maybe_unused kbase_backend_time_convert_gpu_to_cpu(struct kbase_device *kbdev, u64 gpu_ts)
221 {
222 if (WARN_ON(!kbdev))
223 return 0;
224
225 return div64_u64(gpu_ts * kbdev->backend_time.multiplier, kbdev->backend_time.divisor) +
226 kbdev->backend_time.offset;
227 }
228
229 /**
230 * get_cpu_gpu_time() - Get current CPU and GPU timestamps.
231 *
232 * @kbdev: Kbase device.
233 * @cpu_ts: Output CPU timestamp.
234 * @gpu_ts: Output GPU timestamp.
235 * @gpu_cycle: Output GPU cycle counts.
236 */
get_cpu_gpu_time(struct kbase_device * kbdev,u64 * cpu_ts,u64 * gpu_ts,u64 * gpu_cycle)237 static void get_cpu_gpu_time(struct kbase_device *kbdev, u64 *cpu_ts, u64 *gpu_ts, u64 *gpu_cycle)
238 {
239 struct timespec64 ts;
240
241 kbase_backend_get_gpu_time(kbdev, gpu_cycle, gpu_ts, &ts);
242
243 if (cpu_ts)
244 *cpu_ts = ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
245 }
246 #endif
247
kbase_backend_time_init(struct kbase_device * kbdev)248 int kbase_backend_time_init(struct kbase_device *kbdev)
249 {
250 #if MALI_USE_CSF
251 u64 cpu_ts = 0;
252 u64 gpu_ts = 0;
253 u64 freq;
254 u64 common_factor;
255
256 get_cpu_gpu_time(kbdev, &cpu_ts, &gpu_ts, NULL);
257 freq = arch_timer_get_cntfrq();
258
259 if (!freq) {
260 dev_warn(kbdev->dev, "arch_timer_get_rate() is zero!");
261 return -EINVAL;
262 }
263
264 common_factor = gcd(NSEC_PER_SEC, freq);
265
266 kbdev->backend_time.multiplier = div64_u64(NSEC_PER_SEC, common_factor);
267 kbdev->backend_time.divisor = div64_u64(freq, common_factor);
268
269 if (!kbdev->backend_time.divisor) {
270 dev_warn(kbdev->dev, "CPU to GPU divisor is zero!");
271 return -EINVAL;
272 }
273
274 kbdev->backend_time.offset = cpu_ts - div64_u64(gpu_ts * kbdev->backend_time.multiplier,
275 kbdev->backend_time.divisor);
276 #endif
277
278 return 0;
279 }
280