1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3 *
4 * (C) COPYRIGHT 2019-2023 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 #include "mali_kbase_timeline_priv.h"
23 #include "mali_kbase_tlstream.h"
24 #include "mali_kbase_tracepoints.h"
25 #include "mali_kbase_timeline.h"
26
27 #include <device/mali_kbase_device.h>
28
29 #include <linux/poll.h>
30 #include <linux/version_compat_defs.h>
31 #include <linux/anon_inodes.h>
32
33 /* Explicitly include epoll header for old kernels. Not required from 4.16. */
34 #if KERNEL_VERSION(4, 16, 0) > LINUX_VERSION_CODE
35 #include <uapi/linux/eventpoll.h>
36 #endif
37
38 static int kbase_unprivileged_global_profiling;
39
40 /**
41 * kbase_unprivileged_global_profiling_set - set permissions for unprivileged processes
42 *
43 * @val: String containing value to set. Only strings representing positive
44 * integers are accepted as valid; any non-positive integer (including 0)
45 * is rejected.
46 * @kp: Module parameter associated with this method.
47 *
48 * This method can only be used to enable permissions for unprivileged processes,
49 * if they are disabled: for this reason, the only values which are accepted are
50 * strings representing positive integers. Since it's impossible to disable
51 * permissions once they're set, any integer which is non-positive is rejected,
52 * including 0.
53 *
54 * Return: 0 if success, otherwise error code.
55 */
kbase_unprivileged_global_profiling_set(const char * val,const struct kernel_param * kp)56 static int kbase_unprivileged_global_profiling_set(const char *val, const struct kernel_param *kp)
57 {
58 int new_val;
59 int ret = kstrtoint(val, 0, &new_val);
60
61 if (ret == 0) {
62 if (new_val < 1)
63 return -EINVAL;
64
65 kbase_unprivileged_global_profiling = 1;
66 }
67
68 return ret;
69 }
70
71 static const struct kernel_param_ops kbase_global_unprivileged_profiling_ops = {
72 .get = param_get_int,
73 .set = kbase_unprivileged_global_profiling_set,
74 };
75
76 module_param_cb(kbase_unprivileged_global_profiling, &kbase_global_unprivileged_profiling_ops,
77 &kbase_unprivileged_global_profiling, 0600);
78
79 /* The timeline stream file operations functions. */
80 static ssize_t kbasep_timeline_io_read(struct file *filp, char __user *buffer,
81 size_t size, loff_t *f_pos);
82 static __poll_t kbasep_timeline_io_poll(struct file *filp, poll_table *wait);
83 static int kbasep_timeline_io_release(struct inode *inode, struct file *filp);
84 static int kbasep_timeline_io_fsync(struct file *filp, loff_t start, loff_t end,
85 int datasync);
86
timeline_is_permitted(void)87 static bool timeline_is_permitted(void)
88 {
89 #if KERNEL_VERSION(5, 8, 0) <= LINUX_VERSION_CODE
90 return kbase_unprivileged_global_profiling || perfmon_capable();
91 #else
92 return kbase_unprivileged_global_profiling || capable(CAP_SYS_ADMIN);
93 #endif
94 }
95
96 /**
97 * kbasep_timeline_io_packet_pending - check timeline streams for pending
98 * packets
99 *
100 * @timeline: Timeline instance
101 * @ready_stream: Pointer to variable where stream will be placed
102 * @rb_idx_raw: Pointer to variable where read buffer index will be placed
103 *
104 * Function checks all streams for pending packets. It will stop as soon as
105 * packet ready to be submitted to user space is detected. Variables under
106 * pointers, passed as the parameters to this function will be updated with
107 * values pointing to right stream and buffer.
108 *
109 * Return: non-zero if any of timeline streams has at last one packet ready
110 */
111 static int
kbasep_timeline_io_packet_pending(struct kbase_timeline * timeline,struct kbase_tlstream ** ready_stream,unsigned int * rb_idx_raw)112 kbasep_timeline_io_packet_pending(struct kbase_timeline *timeline,
113 struct kbase_tlstream **ready_stream,
114 unsigned int *rb_idx_raw)
115 {
116 enum tl_stream_type i;
117
118 KBASE_DEBUG_ASSERT(ready_stream);
119 KBASE_DEBUG_ASSERT(rb_idx_raw);
120
121 for (i = (enum tl_stream_type)0; i < TL_STREAM_TYPE_COUNT; ++i) {
122 struct kbase_tlstream *stream = &timeline->streams[i];
123 *rb_idx_raw = atomic_read(&stream->rbi);
124 /* Read buffer index may be updated by writer in case of
125 * overflow. Read and write buffer indexes must be
126 * loaded in correct order.
127 */
128 smp_rmb();
129 if (atomic_read(&stream->wbi) != *rb_idx_raw) {
130 *ready_stream = stream;
131 return 1;
132 }
133 }
134
135 return 0;
136 }
137
138 /**
139 * kbasep_timeline_has_header_data() - check timeline headers for pending
140 * packets
141 *
142 * @timeline: Timeline instance
143 *
144 * Return: non-zero if any of timeline headers has at last one packet ready.
145 */
kbasep_timeline_has_header_data(struct kbase_timeline * timeline)146 static int kbasep_timeline_has_header_data(struct kbase_timeline *timeline)
147 {
148 return timeline->obj_header_btc || timeline->aux_header_btc
149 #if MALI_USE_CSF
150 || timeline->csf_tl_reader.tl_header.btc
151 #endif
152 ;
153 }
154
155 /**
156 * copy_stream_header() - copy timeline stream header.
157 *
158 * @buffer: Pointer to the buffer provided by user.
159 * @size: Maximum amount of data that can be stored in the buffer.
160 * @copy_len: Pointer to amount of bytes that has been copied already
161 * within the read system call.
162 * @hdr: Pointer to the stream header.
163 * @hdr_size: Header size.
164 * @hdr_btc: Pointer to the remaining number of bytes to copy.
165 *
166 * Return: 0 if success, -1 otherwise.
167 */
copy_stream_header(char __user * buffer,size_t size,ssize_t * copy_len,const char * hdr,size_t hdr_size,size_t * hdr_btc)168 static inline int copy_stream_header(char __user *buffer, size_t size,
169 ssize_t *copy_len, const char *hdr,
170 size_t hdr_size, size_t *hdr_btc)
171 {
172 const size_t offset = hdr_size - *hdr_btc;
173 const size_t copy_size = MIN(size - *copy_len, *hdr_btc);
174
175 if (!*hdr_btc)
176 return 0;
177
178 if (WARN_ON(*hdr_btc > hdr_size))
179 return -1;
180
181 if (copy_to_user(&buffer[*copy_len], &hdr[offset], copy_size))
182 return -1;
183
184 *hdr_btc -= copy_size;
185 *copy_len += copy_size;
186
187 return 0;
188 }
189
190 /**
191 * kbasep_timeline_copy_headers - copy timeline headers to the user
192 *
193 * @timeline: Timeline instance
194 * @buffer: Pointer to the buffer provided by user
195 * @size: Maximum amount of data that can be stored in the buffer
196 * @copy_len: Pointer to amount of bytes that has been copied already
197 * within the read system call.
198 *
199 * This helper function checks if timeline headers have not been sent
200 * to the user, and if so, sends them. copy_len is respectively
201 * updated.
202 *
203 * Return: 0 if success, -1 if copy_to_user has failed.
204 */
kbasep_timeline_copy_headers(struct kbase_timeline * timeline,char __user * buffer,size_t size,ssize_t * copy_len)205 static inline int kbasep_timeline_copy_headers(struct kbase_timeline *timeline,
206 char __user *buffer, size_t size,
207 ssize_t *copy_len)
208 {
209 if (copy_stream_header(buffer, size, copy_len, obj_desc_header,
210 obj_desc_header_size, &timeline->obj_header_btc))
211 return -1;
212
213 if (copy_stream_header(buffer, size, copy_len, aux_desc_header,
214 aux_desc_header_size, &timeline->aux_header_btc))
215 return -1;
216 #if MALI_USE_CSF
217 if (copy_stream_header(buffer, size, copy_len,
218 timeline->csf_tl_reader.tl_header.data,
219 timeline->csf_tl_reader.tl_header.size,
220 &timeline->csf_tl_reader.tl_header.btc))
221 return -1;
222 #endif
223 return 0;
224 }
225
226 /**
227 * kbasep_timeline_io_read - copy data from streams to buffer provided by user
228 *
229 * @filp: Pointer to file structure
230 * @buffer: Pointer to the buffer provided by user
231 * @size: Maximum amount of data that can be stored in the buffer
232 * @f_pos: Pointer to file offset (unused)
233 *
234 * Return: number of bytes stored in the buffer
235 */
kbasep_timeline_io_read(struct file * filp,char __user * buffer,size_t size,loff_t * f_pos)236 static ssize_t kbasep_timeline_io_read(struct file *filp, char __user *buffer,
237 size_t size, loff_t *f_pos)
238 {
239 ssize_t copy_len = 0;
240 struct kbase_timeline *timeline;
241
242 KBASE_DEBUG_ASSERT(filp);
243 KBASE_DEBUG_ASSERT(f_pos);
244
245 if (WARN_ON(!filp->private_data))
246 return -EFAULT;
247
248 timeline = (struct kbase_timeline *)filp->private_data;
249
250 if (!buffer)
251 return -EINVAL;
252
253 if (*f_pos < 0)
254 return -EINVAL;
255
256 mutex_lock(&timeline->reader_lock);
257
258 while (copy_len < size) {
259 struct kbase_tlstream *stream = NULL;
260 unsigned int rb_idx_raw = 0;
261 unsigned int wb_idx_raw;
262 unsigned int rb_idx;
263 size_t rb_size;
264
265 if (kbasep_timeline_copy_headers(timeline, buffer, size,
266 ©_len)) {
267 copy_len = -EFAULT;
268 break;
269 }
270
271 /* If we already read some packets and there is no
272 * packet pending then return back to user.
273 * If we don't have any data yet, wait for packet to be
274 * submitted.
275 */
276 if (copy_len > 0) {
277 if (!kbasep_timeline_io_packet_pending(
278 timeline, &stream, &rb_idx_raw))
279 break;
280 } else {
281 if (wait_event_interruptible(
282 timeline->event_queue,
283 kbasep_timeline_io_packet_pending(
284 timeline, &stream, &rb_idx_raw))) {
285 copy_len = -ERESTARTSYS;
286 break;
287 }
288 }
289
290 if (WARN_ON(!stream)) {
291 copy_len = -EFAULT;
292 break;
293 }
294
295 /* Check if this packet fits into the user buffer.
296 * If so copy its content.
297 */
298 rb_idx = rb_idx_raw % PACKET_COUNT;
299 rb_size = atomic_read(&stream->buffer[rb_idx].size);
300 if (rb_size > size - copy_len)
301 break;
302 if (copy_to_user(&buffer[copy_len], stream->buffer[rb_idx].data,
303 rb_size)) {
304 copy_len = -EFAULT;
305 break;
306 }
307
308 /* If the distance between read buffer index and write
309 * buffer index became more than PACKET_COUNT, then overflow
310 * happened and we need to ignore the last portion of bytes
311 * that we have just sent to user.
312 */
313 smp_rmb();
314 wb_idx_raw = atomic_read(&stream->wbi);
315
316 if (wb_idx_raw - rb_idx_raw < PACKET_COUNT) {
317 copy_len += rb_size;
318 atomic_inc(&stream->rbi);
319 #if MALI_UNIT_TEST
320 atomic_add(rb_size, &timeline->bytes_collected);
321 #endif /* MALI_UNIT_TEST */
322
323 } else {
324 const unsigned int new_rb_idx_raw =
325 wb_idx_raw - PACKET_COUNT + 1;
326 /* Adjust read buffer index to the next valid buffer */
327 atomic_set(&stream->rbi, new_rb_idx_raw);
328 }
329 }
330
331 mutex_unlock(&timeline->reader_lock);
332
333 return copy_len;
334 }
335
336 /**
337 * kbasep_timeline_io_poll - poll timeline stream for packets
338 * @filp: Pointer to file structure
339 * @wait: Pointer to poll table
340 *
341 * Return: EPOLLIN | EPOLLRDNORM if data can be read without blocking,
342 * otherwise zero, or EPOLLHUP | EPOLLERR on error.
343 */
kbasep_timeline_io_poll(struct file * filp,poll_table * wait)344 static __poll_t kbasep_timeline_io_poll(struct file *filp, poll_table *wait)
345 {
346 struct kbase_tlstream *stream;
347 unsigned int rb_idx;
348 struct kbase_timeline *timeline;
349
350 KBASE_DEBUG_ASSERT(filp);
351 KBASE_DEBUG_ASSERT(wait);
352
353 if (WARN_ON(!filp->private_data))
354 return EPOLLHUP | EPOLLERR;
355
356 timeline = (struct kbase_timeline *)filp->private_data;
357
358 /* If there are header bytes to copy, read will not block */
359 if (kbasep_timeline_has_header_data(timeline))
360 return EPOLLIN | EPOLLRDNORM;
361
362 poll_wait(filp, &timeline->event_queue, wait);
363 if (kbasep_timeline_io_packet_pending(timeline, &stream, &rb_idx))
364 return EPOLLIN | EPOLLRDNORM;
365
366 return (__poll_t)0;
367 }
368
kbase_timeline_io_acquire(struct kbase_device * kbdev,u32 flags)369 int kbase_timeline_io_acquire(struct kbase_device *kbdev, u32 flags)
370 {
371 /* The timeline stream file operations structure. */
372 static const struct file_operations kbasep_tlstream_fops = {
373 .owner = THIS_MODULE,
374 .release = kbasep_timeline_io_release,
375 .read = kbasep_timeline_io_read,
376 .poll = kbasep_timeline_io_poll,
377 .fsync = kbasep_timeline_io_fsync,
378 };
379 int err;
380
381 if (!timeline_is_permitted())
382 return -EPERM;
383
384 if (WARN_ON(!kbdev) || (flags & ~BASE_TLSTREAM_FLAGS_MASK))
385 return -EINVAL;
386
387 err = kbase_timeline_acquire(kbdev, flags);
388 if (err)
389 return err;
390
391 err = anon_inode_getfd("[mali_tlstream]", &kbasep_tlstream_fops, kbdev->timeline,
392 O_RDONLY | O_CLOEXEC);
393 if (err < 0)
394 kbase_timeline_release(kbdev->timeline);
395
396 return err;
397 }
398
399 #if IS_ENABLED(CONFIG_DEBUG_FS)
kbasep_timeline_io_open(struct inode * in,struct file * file)400 static int kbasep_timeline_io_open(struct inode *in, struct file *file)
401 {
402 struct kbase_device *const kbdev = in->i_private;
403
404 if (WARN_ON(!kbdev))
405 return -EFAULT;
406
407 file->private_data = kbdev->timeline;
408 return kbase_timeline_acquire(kbdev, BASE_TLSTREAM_FLAGS_MASK &
409 ~BASE_TLSTREAM_JOB_DUMPING_ENABLED);
410 }
411
kbase_timeline_io_debugfs_init(struct kbase_device * const kbdev)412 void kbase_timeline_io_debugfs_init(struct kbase_device *const kbdev)
413 {
414 static const struct file_operations kbasep_tlstream_debugfs_fops = {
415 .owner = THIS_MODULE,
416 .open = kbasep_timeline_io_open,
417 .release = kbasep_timeline_io_release,
418 .read = kbasep_timeline_io_read,
419 .poll = kbasep_timeline_io_poll,
420 .fsync = kbasep_timeline_io_fsync,
421 };
422 struct dentry *file;
423
424 if (WARN_ON(!kbdev) || WARN_ON(IS_ERR_OR_NULL(kbdev->mali_debugfs_directory)))
425 return;
426
427 file = debugfs_create_file("tlstream", 0400, kbdev->mali_debugfs_directory, kbdev,
428 &kbasep_tlstream_debugfs_fops);
429
430 if (IS_ERR_OR_NULL(file))
431 dev_warn(kbdev->dev, "Unable to create timeline debugfs entry");
432 }
433 #else
434 /*
435 * Stub function for when debugfs is disabled
436 */
kbase_timeline_io_debugfs_init(struct kbase_device * const kbdev)437 void kbase_timeline_io_debugfs_init(struct kbase_device *const kbdev)
438 {
439 }
440 #endif
441
442 /**
443 * kbasep_timeline_io_release - release timeline stream descriptor
444 * @inode: Pointer to inode structure
445 * @filp: Pointer to file structure
446 *
447 * Return: always return zero
448 */
kbasep_timeline_io_release(struct inode * inode,struct file * filp)449 static int kbasep_timeline_io_release(struct inode *inode, struct file *filp)
450 {
451 CSTD_UNUSED(inode);
452
453 kbase_timeline_release(filp->private_data);
454 return 0;
455 }
456
kbasep_timeline_io_fsync(struct file * filp,loff_t start,loff_t end,int datasync)457 static int kbasep_timeline_io_fsync(struct file *filp, loff_t start, loff_t end,
458 int datasync)
459 {
460 CSTD_UNUSED(start);
461 CSTD_UNUSED(end);
462 CSTD_UNUSED(datasync);
463
464 return kbase_timeline_streams_flush(filp->private_data);
465 }
466