1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * (C) COPYRIGHT 2011-2022 ARM Limited. All rights reserved.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * This program is free software and is provided to you under the terms of the
7*4882a593Smuzhiyun * GNU General Public License version 2 as published by the Free Software
8*4882a593Smuzhiyun * Foundation, and any use by you of this program is subject to the terms
9*4882a593Smuzhiyun * of such GNU license.
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * This program is distributed in the hope that it will be useful,
12*4882a593Smuzhiyun * but WITHOUT ANY WARRANTY; without even the implied warranty of
13*4882a593Smuzhiyun * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14*4882a593Smuzhiyun * GNU General Public License for more details.
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * You should have received a copy of the GNU General Public License
17*4882a593Smuzhiyun * along with this program; if not, you can access it online at
18*4882a593Smuzhiyun * http://www.gnu.org/licenses/gpl-2.0.html.
19*4882a593Smuzhiyun *
20*4882a593Smuzhiyun */
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #include "mali_kbase_vinstr.h"
23*4882a593Smuzhiyun #include "hwcnt/mali_kbase_hwcnt_virtualizer.h"
24*4882a593Smuzhiyun #include "hwcnt/mali_kbase_hwcnt_types.h"
25*4882a593Smuzhiyun #include <uapi/gpu/arm/bifrost/mali_kbase_hwcnt_reader.h>
26*4882a593Smuzhiyun #include "hwcnt/mali_kbase_hwcnt_gpu.h"
27*4882a593Smuzhiyun #include "hwcnt/mali_kbase_hwcnt_gpu_narrow.h"
28*4882a593Smuzhiyun #include <uapi/gpu/arm/bifrost/mali_kbase_ioctl.h>
29*4882a593Smuzhiyun #include "mali_malisw.h"
30*4882a593Smuzhiyun #include "mali_kbase_debug.h"
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #include <linux/anon_inodes.h>
33*4882a593Smuzhiyun #include <linux/fcntl.h>
34*4882a593Smuzhiyun #include <linux/fs.h>
35*4882a593Smuzhiyun #include <linux/hrtimer.h>
36*4882a593Smuzhiyun #include <linux/log2.h>
37*4882a593Smuzhiyun #include <linux/mm.h>
38*4882a593Smuzhiyun #include <linux/mutex.h>
39*4882a593Smuzhiyun #include <linux/poll.h>
40*4882a593Smuzhiyun #include <linux/slab.h>
41*4882a593Smuzhiyun #include <linux/version_compat_defs.h>
42*4882a593Smuzhiyun #include <linux/workqueue.h>
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /* Explicitly include epoll header for old kernels. Not required from 4.16. */
45*4882a593Smuzhiyun #if KERNEL_VERSION(4, 16, 0) > LINUX_VERSION_CODE
46*4882a593Smuzhiyun #include <uapi/linux/eventpoll.h>
47*4882a593Smuzhiyun #endif
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /* Hwcnt reader API version */
50*4882a593Smuzhiyun #define HWCNT_READER_API 1
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /* The minimum allowed interval between dumps (equivalent to 10KHz) */
53*4882a593Smuzhiyun #define DUMP_INTERVAL_MIN_NS (100 * NSEC_PER_USEC)
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /* The maximum allowed buffers per client */
56*4882a593Smuzhiyun #define MAX_BUFFER_COUNT 32
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /**
59*4882a593Smuzhiyun * struct kbase_vinstr_context - IOCTL interface for userspace hardware
60*4882a593Smuzhiyun * counters.
61*4882a593Smuzhiyun * @hvirt: Hardware counter virtualizer used by vinstr.
62*4882a593Smuzhiyun * @metadata: Hardware counter metadata provided by virtualizer.
63*4882a593Smuzhiyun * @metadata_user: API compatible hardware counter metadata provided by vinstr.
64*4882a593Smuzhiyun * For compatibility with the user driver interface, this
65*4882a593Smuzhiyun * contains a narrowed version of the HWCNT metadata limited
66*4882a593Smuzhiyun * to 64 entries per block of 32 bits each.
67*4882a593Smuzhiyun * @lock: Lock protecting all vinstr state.
68*4882a593Smuzhiyun * @suspend_count: Suspend reference count. If non-zero, timer and worker are
69*4882a593Smuzhiyun * prevented from being re-scheduled.
70*4882a593Smuzhiyun * @client_count: Number of vinstr clients.
71*4882a593Smuzhiyun * @clients: List of vinstr clients.
72*4882a593Smuzhiyun * @dump_timer: Timer that enqueues dump_work to a workqueue.
73*4882a593Smuzhiyun * @dump_work: Worker for performing periodic counter dumps.
74*4882a593Smuzhiyun */
75*4882a593Smuzhiyun struct kbase_vinstr_context {
76*4882a593Smuzhiyun struct kbase_hwcnt_virtualizer *hvirt;
77*4882a593Smuzhiyun const struct kbase_hwcnt_metadata *metadata;
78*4882a593Smuzhiyun const struct kbase_hwcnt_metadata_narrow *metadata_user;
79*4882a593Smuzhiyun struct mutex lock;
80*4882a593Smuzhiyun size_t suspend_count;
81*4882a593Smuzhiyun size_t client_count;
82*4882a593Smuzhiyun struct list_head clients;
83*4882a593Smuzhiyun struct hrtimer dump_timer;
84*4882a593Smuzhiyun struct work_struct dump_work;
85*4882a593Smuzhiyun };
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /**
88*4882a593Smuzhiyun * struct kbase_vinstr_client - A vinstr client attached to a vinstr context.
89*4882a593Smuzhiyun * @vctx: Vinstr context client is attached to.
90*4882a593Smuzhiyun * @hvcli: Hardware counter virtualizer client.
91*4882a593Smuzhiyun * @node: Node used to attach this client to list in vinstr
92*4882a593Smuzhiyun * context.
93*4882a593Smuzhiyun * @dump_interval_ns: Interval between periodic dumps. If 0, not a periodic
94*4882a593Smuzhiyun * client.
95*4882a593Smuzhiyun * @next_dump_time_ns: Time in ns when this client's next periodic dump must
96*4882a593Smuzhiyun * occur. If 0, not a periodic client.
97*4882a593Smuzhiyun * @enable_map: Counters enable map.
98*4882a593Smuzhiyun * @tmp_buf: Temporary buffer to use before handing dump to client.
99*4882a593Smuzhiyun * @dump_bufs: Array of narrow dump buffers allocated by this client.
100*4882a593Smuzhiyun * @dump_bufs_meta: Metadata of hwcnt reader client buffers.
101*4882a593Smuzhiyun * @meta_idx: Index of metadata being accessed by userspace.
102*4882a593Smuzhiyun * @read_idx: Index of buffer read by userspace.
103*4882a593Smuzhiyun * @write_idx: Index of buffer being written by dump worker.
104*4882a593Smuzhiyun * @waitq: Client's notification queue.
105*4882a593Smuzhiyun */
106*4882a593Smuzhiyun struct kbase_vinstr_client {
107*4882a593Smuzhiyun struct kbase_vinstr_context *vctx;
108*4882a593Smuzhiyun struct kbase_hwcnt_virtualizer_client *hvcli;
109*4882a593Smuzhiyun struct list_head node;
110*4882a593Smuzhiyun u64 next_dump_time_ns;
111*4882a593Smuzhiyun u32 dump_interval_ns;
112*4882a593Smuzhiyun struct kbase_hwcnt_enable_map enable_map;
113*4882a593Smuzhiyun struct kbase_hwcnt_dump_buffer tmp_buf;
114*4882a593Smuzhiyun struct kbase_hwcnt_dump_buffer_narrow_array dump_bufs;
115*4882a593Smuzhiyun struct kbase_hwcnt_reader_metadata *dump_bufs_meta;
116*4882a593Smuzhiyun atomic_t meta_idx;
117*4882a593Smuzhiyun atomic_t read_idx;
118*4882a593Smuzhiyun atomic_t write_idx;
119*4882a593Smuzhiyun wait_queue_head_t waitq;
120*4882a593Smuzhiyun };
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun static __poll_t kbasep_vinstr_hwcnt_reader_poll(struct file *filp, poll_table *wait);
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun static long kbasep_vinstr_hwcnt_reader_ioctl(
125*4882a593Smuzhiyun struct file *filp,
126*4882a593Smuzhiyun unsigned int cmd,
127*4882a593Smuzhiyun unsigned long arg);
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun static int kbasep_vinstr_hwcnt_reader_mmap(
130*4882a593Smuzhiyun struct file *filp,
131*4882a593Smuzhiyun struct vm_area_struct *vma);
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun static int kbasep_vinstr_hwcnt_reader_release(
134*4882a593Smuzhiyun struct inode *inode,
135*4882a593Smuzhiyun struct file *filp);
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun /* Vinstr client file operations */
138*4882a593Smuzhiyun static const struct file_operations vinstr_client_fops = {
139*4882a593Smuzhiyun .owner = THIS_MODULE,
140*4882a593Smuzhiyun .poll = kbasep_vinstr_hwcnt_reader_poll,
141*4882a593Smuzhiyun .unlocked_ioctl = kbasep_vinstr_hwcnt_reader_ioctl,
142*4882a593Smuzhiyun .compat_ioctl = kbasep_vinstr_hwcnt_reader_ioctl,
143*4882a593Smuzhiyun .mmap = kbasep_vinstr_hwcnt_reader_mmap,
144*4882a593Smuzhiyun .release = kbasep_vinstr_hwcnt_reader_release,
145*4882a593Smuzhiyun };
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun /**
148*4882a593Smuzhiyun * kbasep_vinstr_timestamp_ns() - Get the current time in nanoseconds.
149*4882a593Smuzhiyun *
150*4882a593Smuzhiyun * Return: Current time in nanoseconds.
151*4882a593Smuzhiyun */
kbasep_vinstr_timestamp_ns(void)152*4882a593Smuzhiyun static u64 kbasep_vinstr_timestamp_ns(void)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun return ktime_get_raw_ns();
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun /**
158*4882a593Smuzhiyun * kbasep_vinstr_next_dump_time_ns() - Calculate the next periodic dump time.
159*4882a593Smuzhiyun * @cur_ts_ns: Current time in nanoseconds.
160*4882a593Smuzhiyun * @interval: Interval between dumps in nanoseconds.
161*4882a593Smuzhiyun *
162*4882a593Smuzhiyun * Return: 0 if interval is 0 (i.e. a non-periodic client), or the next dump
163*4882a593Smuzhiyun * time that occurs after cur_ts_ns.
164*4882a593Smuzhiyun */
kbasep_vinstr_next_dump_time_ns(u64 cur_ts_ns,u32 interval)165*4882a593Smuzhiyun static u64 kbasep_vinstr_next_dump_time_ns(u64 cur_ts_ns, u32 interval)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun /* Non-periodic client */
168*4882a593Smuzhiyun if (interval == 0)
169*4882a593Smuzhiyun return 0;
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun /*
172*4882a593Smuzhiyun * Return the next interval after the current time relative to t=0.
173*4882a593Smuzhiyun * This means multiple clients with the same period will synchronise,
174*4882a593Smuzhiyun * regardless of when they were started, allowing the worker to be
175*4882a593Smuzhiyun * scheduled less frequently.
176*4882a593Smuzhiyun */
177*4882a593Smuzhiyun do_div(cur_ts_ns, interval);
178*4882a593Smuzhiyun return (cur_ts_ns + 1) * interval;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun /**
182*4882a593Smuzhiyun * kbasep_vinstr_client_dump() - Perform a dump for a client.
183*4882a593Smuzhiyun * @vcli: Non-NULL pointer to a vinstr client.
184*4882a593Smuzhiyun * @event_id: Event type that triggered the dump.
185*4882a593Smuzhiyun *
186*4882a593Smuzhiyun * Return: 0 on success, else error code.
187*4882a593Smuzhiyun */
kbasep_vinstr_client_dump(struct kbase_vinstr_client * vcli,enum base_hwcnt_reader_event event_id)188*4882a593Smuzhiyun static int kbasep_vinstr_client_dump(
189*4882a593Smuzhiyun struct kbase_vinstr_client *vcli,
190*4882a593Smuzhiyun enum base_hwcnt_reader_event event_id)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun int errcode;
193*4882a593Smuzhiyun u64 ts_start_ns;
194*4882a593Smuzhiyun u64 ts_end_ns;
195*4882a593Smuzhiyun unsigned int write_idx;
196*4882a593Smuzhiyun unsigned int read_idx;
197*4882a593Smuzhiyun struct kbase_hwcnt_dump_buffer *tmp_buf;
198*4882a593Smuzhiyun struct kbase_hwcnt_dump_buffer_narrow *dump_buf;
199*4882a593Smuzhiyun struct kbase_hwcnt_reader_metadata *meta;
200*4882a593Smuzhiyun u8 clk_cnt;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun WARN_ON(!vcli);
203*4882a593Smuzhiyun lockdep_assert_held(&vcli->vctx->lock);
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun write_idx = atomic_read(&vcli->write_idx);
206*4882a593Smuzhiyun read_idx = atomic_read(&vcli->read_idx);
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun /* Check if there is a place to copy HWC block into. */
209*4882a593Smuzhiyun if (write_idx - read_idx == vcli->dump_bufs.buf_cnt)
210*4882a593Smuzhiyun return -EBUSY;
211*4882a593Smuzhiyun write_idx %= vcli->dump_bufs.buf_cnt;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun dump_buf = &vcli->dump_bufs.bufs[write_idx];
214*4882a593Smuzhiyun meta = &vcli->dump_bufs_meta[write_idx];
215*4882a593Smuzhiyun tmp_buf = &vcli->tmp_buf;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun errcode = kbase_hwcnt_virtualizer_client_dump(
218*4882a593Smuzhiyun vcli->hvcli, &ts_start_ns, &ts_end_ns, tmp_buf);
219*4882a593Smuzhiyun if (errcode)
220*4882a593Smuzhiyun return errcode;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun /* Patch the dump buf headers, to hide the counters that other hwcnt
223*4882a593Smuzhiyun * clients are using.
224*4882a593Smuzhiyun */
225*4882a593Smuzhiyun kbase_hwcnt_gpu_patch_dump_headers(tmp_buf, &vcli->enable_map);
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun /* Copy the temp buffer to the userspace visible buffer. The strict
228*4882a593Smuzhiyun * variant will explicitly zero any non-enabled counters to ensure
229*4882a593Smuzhiyun * nothing except exactly what the user asked for is made visible.
230*4882a593Smuzhiyun *
231*4882a593Smuzhiyun * A narrow copy is required since virtualizer has a bigger buffer
232*4882a593Smuzhiyun * but user only needs part of it.
233*4882a593Smuzhiyun */
234*4882a593Smuzhiyun kbase_hwcnt_dump_buffer_copy_strict_narrow(dump_buf, tmp_buf,
235*4882a593Smuzhiyun &vcli->enable_map);
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun clk_cnt = vcli->vctx->metadata->clk_cnt;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun meta->timestamp = ts_end_ns;
240*4882a593Smuzhiyun meta->event_id = event_id;
241*4882a593Smuzhiyun meta->buffer_idx = write_idx;
242*4882a593Smuzhiyun meta->cycles.top = (clk_cnt > 0) ? dump_buf->clk_cnt_buf[0] : 0;
243*4882a593Smuzhiyun meta->cycles.shader_cores =
244*4882a593Smuzhiyun (clk_cnt > 1) ? dump_buf->clk_cnt_buf[1] : 0;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun /* Notify client. Make sure all changes to memory are visible. */
247*4882a593Smuzhiyun wmb();
248*4882a593Smuzhiyun atomic_inc(&vcli->write_idx);
249*4882a593Smuzhiyun wake_up_interruptible(&vcli->waitq);
250*4882a593Smuzhiyun return 0;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun /**
254*4882a593Smuzhiyun * kbasep_vinstr_client_clear() - Reset all the client's counters to zero.
255*4882a593Smuzhiyun * @vcli: Non-NULL pointer to a vinstr client.
256*4882a593Smuzhiyun *
257*4882a593Smuzhiyun * Return: 0 on success, else error code.
258*4882a593Smuzhiyun */
kbasep_vinstr_client_clear(struct kbase_vinstr_client * vcli)259*4882a593Smuzhiyun static int kbasep_vinstr_client_clear(struct kbase_vinstr_client *vcli)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun u64 ts_start_ns;
262*4882a593Smuzhiyun u64 ts_end_ns;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun WARN_ON(!vcli);
265*4882a593Smuzhiyun lockdep_assert_held(&vcli->vctx->lock);
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun /* A virtualizer dump with a NULL buffer will just clear the virtualizer
268*4882a593Smuzhiyun * client's buffer.
269*4882a593Smuzhiyun */
270*4882a593Smuzhiyun return kbase_hwcnt_virtualizer_client_dump(
271*4882a593Smuzhiyun vcli->hvcli, &ts_start_ns, &ts_end_ns, NULL);
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun /**
275*4882a593Smuzhiyun * kbasep_vinstr_reschedule_worker() - Update next dump times for all periodic
276*4882a593Smuzhiyun * vinstr clients, then reschedule the dump
277*4882a593Smuzhiyun * worker appropriately.
278*4882a593Smuzhiyun * @vctx: Non-NULL pointer to the vinstr context.
279*4882a593Smuzhiyun *
280*4882a593Smuzhiyun * If there are no periodic clients, then the dump worker will not be
281*4882a593Smuzhiyun * rescheduled. Else, the dump worker will be rescheduled for the next periodic
282*4882a593Smuzhiyun * client dump.
283*4882a593Smuzhiyun */
kbasep_vinstr_reschedule_worker(struct kbase_vinstr_context * vctx)284*4882a593Smuzhiyun static void kbasep_vinstr_reschedule_worker(struct kbase_vinstr_context *vctx)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun u64 cur_ts_ns;
287*4882a593Smuzhiyun u64 earliest_next_ns = U64_MAX;
288*4882a593Smuzhiyun struct kbase_vinstr_client *pos;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun WARN_ON(!vctx);
291*4882a593Smuzhiyun lockdep_assert_held(&vctx->lock);
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun cur_ts_ns = kbasep_vinstr_timestamp_ns();
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun /*
296*4882a593Smuzhiyun * Update each client's next dump time, and find the earliest next
297*4882a593Smuzhiyun * dump time if any of the clients have a non-zero interval.
298*4882a593Smuzhiyun */
299*4882a593Smuzhiyun list_for_each_entry(pos, &vctx->clients, node) {
300*4882a593Smuzhiyun const u64 cli_next_ns =
301*4882a593Smuzhiyun kbasep_vinstr_next_dump_time_ns(
302*4882a593Smuzhiyun cur_ts_ns, pos->dump_interval_ns);
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun /* Non-zero next dump time implies a periodic client */
305*4882a593Smuzhiyun if ((cli_next_ns != 0) && (cli_next_ns < earliest_next_ns))
306*4882a593Smuzhiyun earliest_next_ns = cli_next_ns;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun pos->next_dump_time_ns = cli_next_ns;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun /* Cancel the timer if it is already pending */
312*4882a593Smuzhiyun hrtimer_cancel(&vctx->dump_timer);
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun /* Start the timer if there are periodic clients and vinstr is not
315*4882a593Smuzhiyun * suspended.
316*4882a593Smuzhiyun */
317*4882a593Smuzhiyun if ((earliest_next_ns != U64_MAX) &&
318*4882a593Smuzhiyun (vctx->suspend_count == 0) &&
319*4882a593Smuzhiyun !WARN_ON(earliest_next_ns < cur_ts_ns))
320*4882a593Smuzhiyun hrtimer_start(
321*4882a593Smuzhiyun &vctx->dump_timer,
322*4882a593Smuzhiyun ns_to_ktime(earliest_next_ns - cur_ts_ns),
323*4882a593Smuzhiyun HRTIMER_MODE_REL);
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun /**
327*4882a593Smuzhiyun * kbasep_vinstr_dump_worker()- Dump worker, that dumps all periodic clients
328*4882a593Smuzhiyun * that need to be dumped, then reschedules itself.
329*4882a593Smuzhiyun * @work: Work structure.
330*4882a593Smuzhiyun */
kbasep_vinstr_dump_worker(struct work_struct * work)331*4882a593Smuzhiyun static void kbasep_vinstr_dump_worker(struct work_struct *work)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun struct kbase_vinstr_context *vctx =
334*4882a593Smuzhiyun container_of(work, struct kbase_vinstr_context, dump_work);
335*4882a593Smuzhiyun struct kbase_vinstr_client *pos;
336*4882a593Smuzhiyun u64 cur_time_ns;
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun mutex_lock(&vctx->lock);
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun cur_time_ns = kbasep_vinstr_timestamp_ns();
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun /* Dump all periodic clients whose next dump time is before the current
343*4882a593Smuzhiyun * time.
344*4882a593Smuzhiyun */
345*4882a593Smuzhiyun list_for_each_entry(pos, &vctx->clients, node) {
346*4882a593Smuzhiyun if ((pos->next_dump_time_ns != 0) &&
347*4882a593Smuzhiyun (pos->next_dump_time_ns < cur_time_ns))
348*4882a593Smuzhiyun kbasep_vinstr_client_dump(
349*4882a593Smuzhiyun pos, BASE_HWCNT_READER_EVENT_PERIODIC);
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun /* Update the next dump times of all periodic clients, then reschedule
353*4882a593Smuzhiyun * this worker at the earliest next dump time.
354*4882a593Smuzhiyun */
355*4882a593Smuzhiyun kbasep_vinstr_reschedule_worker(vctx);
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun mutex_unlock(&vctx->lock);
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun /**
361*4882a593Smuzhiyun * kbasep_vinstr_dump_timer() - Dump timer that schedules the dump worker for
362*4882a593Smuzhiyun * execution as soon as possible.
363*4882a593Smuzhiyun * @timer: Timer structure.
364*4882a593Smuzhiyun *
365*4882a593Smuzhiyun * Return: HRTIMER_NORESTART always.
366*4882a593Smuzhiyun */
kbasep_vinstr_dump_timer(struct hrtimer * timer)367*4882a593Smuzhiyun static enum hrtimer_restart kbasep_vinstr_dump_timer(struct hrtimer *timer)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun struct kbase_vinstr_context *vctx =
370*4882a593Smuzhiyun container_of(timer, struct kbase_vinstr_context, dump_timer);
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun /* We don't need to check vctx->suspend_count here, as the suspend
373*4882a593Smuzhiyun * function will ensure that any worker enqueued here is immediately
374*4882a593Smuzhiyun * cancelled, and the worker itself won't reschedule this timer if
375*4882a593Smuzhiyun * suspend_count != 0.
376*4882a593Smuzhiyun */
377*4882a593Smuzhiyun kbase_hwcnt_virtualizer_queue_work(vctx->hvirt, &vctx->dump_work);
378*4882a593Smuzhiyun return HRTIMER_NORESTART;
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun /**
382*4882a593Smuzhiyun * kbasep_vinstr_client_destroy() - Destroy a vinstr client.
383*4882a593Smuzhiyun * @vcli: vinstr client. Must not be attached to a vinstr context.
384*4882a593Smuzhiyun */
kbasep_vinstr_client_destroy(struct kbase_vinstr_client * vcli)385*4882a593Smuzhiyun static void kbasep_vinstr_client_destroy(struct kbase_vinstr_client *vcli)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun if (!vcli)
388*4882a593Smuzhiyun return;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun kbase_hwcnt_virtualizer_client_destroy(vcli->hvcli);
391*4882a593Smuzhiyun kfree(vcli->dump_bufs_meta);
392*4882a593Smuzhiyun kbase_hwcnt_dump_buffer_narrow_array_free(&vcli->dump_bufs);
393*4882a593Smuzhiyun kbase_hwcnt_dump_buffer_free(&vcli->tmp_buf);
394*4882a593Smuzhiyun kbase_hwcnt_enable_map_free(&vcli->enable_map);
395*4882a593Smuzhiyun kfree(vcli);
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun /**
399*4882a593Smuzhiyun * kbasep_vinstr_client_create() - Create a vinstr client. Does not attach to
400*4882a593Smuzhiyun * the vinstr context.
401*4882a593Smuzhiyun * @vctx: Non-NULL pointer to vinstr context.
402*4882a593Smuzhiyun * @setup: Non-NULL pointer to hardware counter ioctl setup structure.
403*4882a593Smuzhiyun * setup->buffer_count must not be 0 and must be a power of 2.
404*4882a593Smuzhiyun * @out_vcli: Non-NULL pointer to where created client will be stored on
405*4882a593Smuzhiyun * success.
406*4882a593Smuzhiyun *
407*4882a593Smuzhiyun * Return: 0 on success, else error code.
408*4882a593Smuzhiyun */
kbasep_vinstr_client_create(struct kbase_vinstr_context * vctx,struct kbase_ioctl_hwcnt_reader_setup * setup,struct kbase_vinstr_client ** out_vcli)409*4882a593Smuzhiyun static int kbasep_vinstr_client_create(
410*4882a593Smuzhiyun struct kbase_vinstr_context *vctx,
411*4882a593Smuzhiyun struct kbase_ioctl_hwcnt_reader_setup *setup,
412*4882a593Smuzhiyun struct kbase_vinstr_client **out_vcli)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun int errcode;
415*4882a593Smuzhiyun struct kbase_vinstr_client *vcli;
416*4882a593Smuzhiyun struct kbase_hwcnt_physical_enable_map phys_em;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun WARN_ON(!vctx);
419*4882a593Smuzhiyun WARN_ON(!setup);
420*4882a593Smuzhiyun WARN_ON(setup->buffer_count == 0);
421*4882a593Smuzhiyun WARN_ON(!is_power_of_2(setup->buffer_count));
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun vcli = kzalloc(sizeof(*vcli), GFP_KERNEL);
424*4882a593Smuzhiyun if (!vcli)
425*4882a593Smuzhiyun return -ENOMEM;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun vcli->vctx = vctx;
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun errcode = kbase_hwcnt_enable_map_alloc(
430*4882a593Smuzhiyun vctx->metadata, &vcli->enable_map);
431*4882a593Smuzhiyun if (errcode)
432*4882a593Smuzhiyun goto error;
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun phys_em.fe_bm = setup->fe_bm;
435*4882a593Smuzhiyun phys_em.shader_bm = setup->shader_bm;
436*4882a593Smuzhiyun phys_em.tiler_bm = setup->tiler_bm;
437*4882a593Smuzhiyun phys_em.mmu_l2_bm = setup->mmu_l2_bm;
438*4882a593Smuzhiyun kbase_hwcnt_gpu_enable_map_from_physical(&vcli->enable_map, &phys_em);
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun /* Use virtualizer's metadata to alloc tmp buffer which interacts with
441*4882a593Smuzhiyun * the HWC virtualizer.
442*4882a593Smuzhiyun */
443*4882a593Smuzhiyun errcode = kbase_hwcnt_dump_buffer_alloc(vctx->metadata, &vcli->tmp_buf);
444*4882a593Smuzhiyun if (errcode)
445*4882a593Smuzhiyun goto error;
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun /* Enable all the available clk_enable_map. */
448*4882a593Smuzhiyun vcli->enable_map.clk_enable_map = (1ull << vctx->metadata->clk_cnt) - 1;
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun /* Use vinstr's narrowed metadata to alloc narrow dump buffers which
451*4882a593Smuzhiyun * interact with clients.
452*4882a593Smuzhiyun */
453*4882a593Smuzhiyun errcode = kbase_hwcnt_dump_buffer_narrow_array_alloc(
454*4882a593Smuzhiyun vctx->metadata_user, setup->buffer_count, &vcli->dump_bufs);
455*4882a593Smuzhiyun if (errcode)
456*4882a593Smuzhiyun goto error;
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun errcode = -ENOMEM;
459*4882a593Smuzhiyun vcli->dump_bufs_meta = kmalloc_array(
460*4882a593Smuzhiyun setup->buffer_count, sizeof(*vcli->dump_bufs_meta), GFP_KERNEL);
461*4882a593Smuzhiyun if (!vcli->dump_bufs_meta)
462*4882a593Smuzhiyun goto error;
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun errcode = kbase_hwcnt_virtualizer_client_create(
465*4882a593Smuzhiyun vctx->hvirt, &vcli->enable_map, &vcli->hvcli);
466*4882a593Smuzhiyun if (errcode)
467*4882a593Smuzhiyun goto error;
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun init_waitqueue_head(&vcli->waitq);
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun *out_vcli = vcli;
472*4882a593Smuzhiyun return 0;
473*4882a593Smuzhiyun error:
474*4882a593Smuzhiyun kbasep_vinstr_client_destroy(vcli);
475*4882a593Smuzhiyun return errcode;
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun
kbase_vinstr_init(struct kbase_hwcnt_virtualizer * hvirt,struct kbase_vinstr_context ** out_vctx)478*4882a593Smuzhiyun int kbase_vinstr_init(
479*4882a593Smuzhiyun struct kbase_hwcnt_virtualizer *hvirt,
480*4882a593Smuzhiyun struct kbase_vinstr_context **out_vctx)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun int errcode;
483*4882a593Smuzhiyun struct kbase_vinstr_context *vctx;
484*4882a593Smuzhiyun const struct kbase_hwcnt_metadata *metadata;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun if (!hvirt || !out_vctx)
487*4882a593Smuzhiyun return -EINVAL;
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun metadata = kbase_hwcnt_virtualizer_metadata(hvirt);
490*4882a593Smuzhiyun if (!metadata)
491*4882a593Smuzhiyun return -EINVAL;
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun vctx = kzalloc(sizeof(*vctx), GFP_KERNEL);
494*4882a593Smuzhiyun if (!vctx)
495*4882a593Smuzhiyun return -ENOMEM;
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun vctx->hvirt = hvirt;
498*4882a593Smuzhiyun vctx->metadata = metadata;
499*4882a593Smuzhiyun errcode = kbase_hwcnt_gpu_metadata_narrow_create(&vctx->metadata_user,
500*4882a593Smuzhiyun metadata);
501*4882a593Smuzhiyun if (errcode)
502*4882a593Smuzhiyun goto err_metadata_create;
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun mutex_init(&vctx->lock);
505*4882a593Smuzhiyun INIT_LIST_HEAD(&vctx->clients);
506*4882a593Smuzhiyun hrtimer_init(&vctx->dump_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
507*4882a593Smuzhiyun vctx->dump_timer.function = kbasep_vinstr_dump_timer;
508*4882a593Smuzhiyun INIT_WORK(&vctx->dump_work, kbasep_vinstr_dump_worker);
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun *out_vctx = vctx;
511*4882a593Smuzhiyun return 0;
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun err_metadata_create:
514*4882a593Smuzhiyun kfree(vctx);
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun return errcode;
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun
kbase_vinstr_term(struct kbase_vinstr_context * vctx)519*4882a593Smuzhiyun void kbase_vinstr_term(struct kbase_vinstr_context *vctx)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun if (!vctx)
522*4882a593Smuzhiyun return;
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun /* Non-zero client count implies client leak */
525*4882a593Smuzhiyun if (WARN_ON(vctx->client_count != 0)) {
526*4882a593Smuzhiyun struct kbase_vinstr_client *pos, *n;
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun list_for_each_entry_safe(pos, n, &vctx->clients, node) {
529*4882a593Smuzhiyun list_del(&pos->node);
530*4882a593Smuzhiyun vctx->client_count--;
531*4882a593Smuzhiyun kbasep_vinstr_client_destroy(pos);
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun cancel_work_sync(&vctx->dump_work);
536*4882a593Smuzhiyun kbase_hwcnt_gpu_metadata_narrow_destroy(vctx->metadata_user);
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun WARN_ON(vctx->client_count != 0);
539*4882a593Smuzhiyun kfree(vctx);
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun
kbase_vinstr_suspend(struct kbase_vinstr_context * vctx)542*4882a593Smuzhiyun void kbase_vinstr_suspend(struct kbase_vinstr_context *vctx)
543*4882a593Smuzhiyun {
544*4882a593Smuzhiyun if (WARN_ON(!vctx))
545*4882a593Smuzhiyun return;
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun mutex_lock(&vctx->lock);
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun if (!WARN_ON(vctx->suspend_count == SIZE_MAX))
550*4882a593Smuzhiyun vctx->suspend_count++;
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun mutex_unlock(&vctx->lock);
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun /* Always sync cancel the timer and then the worker, regardless of the
555*4882a593Smuzhiyun * new suspend count.
556*4882a593Smuzhiyun *
557*4882a593Smuzhiyun * This ensures concurrent calls to kbase_vinstr_suspend() always block
558*4882a593Smuzhiyun * until vinstr is fully suspended.
559*4882a593Smuzhiyun *
560*4882a593Smuzhiyun * The timer is cancelled before the worker, as the timer
561*4882a593Smuzhiyun * unconditionally re-enqueues the worker, but the worker checks the
562*4882a593Smuzhiyun * suspend_count that we just incremented before rescheduling the timer.
563*4882a593Smuzhiyun *
564*4882a593Smuzhiyun * Therefore if we cancel the worker first, the timer might re-enqueue
565*4882a593Smuzhiyun * the worker before we cancel the timer, but the opposite is not
566*4882a593Smuzhiyun * possible.
567*4882a593Smuzhiyun */
568*4882a593Smuzhiyun hrtimer_cancel(&vctx->dump_timer);
569*4882a593Smuzhiyun cancel_work_sync(&vctx->dump_work);
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun
kbase_vinstr_resume(struct kbase_vinstr_context * vctx)572*4882a593Smuzhiyun void kbase_vinstr_resume(struct kbase_vinstr_context *vctx)
573*4882a593Smuzhiyun {
574*4882a593Smuzhiyun if (WARN_ON(!vctx))
575*4882a593Smuzhiyun return;
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun mutex_lock(&vctx->lock);
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun if (!WARN_ON(vctx->suspend_count == 0)) {
580*4882a593Smuzhiyun vctx->suspend_count--;
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun /* Last resume, so re-enqueue the worker if we have any periodic
583*4882a593Smuzhiyun * clients.
584*4882a593Smuzhiyun */
585*4882a593Smuzhiyun if (vctx->suspend_count == 0) {
586*4882a593Smuzhiyun struct kbase_vinstr_client *pos;
587*4882a593Smuzhiyun bool has_periodic_clients = false;
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun list_for_each_entry(pos, &vctx->clients, node) {
590*4882a593Smuzhiyun if (pos->dump_interval_ns != 0) {
591*4882a593Smuzhiyun has_periodic_clients = true;
592*4882a593Smuzhiyun break;
593*4882a593Smuzhiyun }
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun if (has_periodic_clients)
597*4882a593Smuzhiyun kbase_hwcnt_virtualizer_queue_work(
598*4882a593Smuzhiyun vctx->hvirt, &vctx->dump_work);
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun mutex_unlock(&vctx->lock);
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun
kbase_vinstr_hwcnt_reader_setup(struct kbase_vinstr_context * vctx,struct kbase_ioctl_hwcnt_reader_setup * setup)605*4882a593Smuzhiyun int kbase_vinstr_hwcnt_reader_setup(
606*4882a593Smuzhiyun struct kbase_vinstr_context *vctx,
607*4882a593Smuzhiyun struct kbase_ioctl_hwcnt_reader_setup *setup)
608*4882a593Smuzhiyun {
609*4882a593Smuzhiyun int errcode;
610*4882a593Smuzhiyun int fd;
611*4882a593Smuzhiyun struct kbase_vinstr_client *vcli = NULL;
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun if (!vctx || !setup ||
614*4882a593Smuzhiyun (setup->buffer_count == 0) ||
615*4882a593Smuzhiyun (setup->buffer_count > MAX_BUFFER_COUNT) ||
616*4882a593Smuzhiyun !is_power_of_2(setup->buffer_count))
617*4882a593Smuzhiyun return -EINVAL;
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun errcode = kbasep_vinstr_client_create(vctx, setup, &vcli);
620*4882a593Smuzhiyun if (errcode)
621*4882a593Smuzhiyun goto error;
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun /* Add the new client. No need to reschedule worker, as not periodic */
624*4882a593Smuzhiyun mutex_lock(&vctx->lock);
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun vctx->client_count++;
627*4882a593Smuzhiyun list_add(&vcli->node, &vctx->clients);
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun mutex_unlock(&vctx->lock);
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun /* Expose to user-space only once the client is fully initialized */
632*4882a593Smuzhiyun errcode = anon_inode_getfd(
633*4882a593Smuzhiyun "[mali_vinstr_desc]",
634*4882a593Smuzhiyun &vinstr_client_fops,
635*4882a593Smuzhiyun vcli,
636*4882a593Smuzhiyun O_RDONLY | O_CLOEXEC);
637*4882a593Smuzhiyun if (errcode < 0)
638*4882a593Smuzhiyun goto client_installed_error;
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun fd = errcode;
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun return fd;
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun client_installed_error:
645*4882a593Smuzhiyun mutex_lock(&vctx->lock);
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun vctx->client_count--;
648*4882a593Smuzhiyun list_del(&vcli->node);
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun mutex_unlock(&vctx->lock);
651*4882a593Smuzhiyun error:
652*4882a593Smuzhiyun kbasep_vinstr_client_destroy(vcli);
653*4882a593Smuzhiyun return errcode;
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun /**
657*4882a593Smuzhiyun * kbasep_vinstr_hwcnt_reader_buffer_ready() - Check if client has ready
658*4882a593Smuzhiyun * buffers.
659*4882a593Smuzhiyun * @cli: Non-NULL pointer to vinstr client.
660*4882a593Smuzhiyun *
661*4882a593Smuzhiyun * Return: Non-zero if client has at least one dumping buffer filled that was
662*4882a593Smuzhiyun * not notified to user yet.
663*4882a593Smuzhiyun */
kbasep_vinstr_hwcnt_reader_buffer_ready(struct kbase_vinstr_client * cli)664*4882a593Smuzhiyun static int kbasep_vinstr_hwcnt_reader_buffer_ready(
665*4882a593Smuzhiyun struct kbase_vinstr_client *cli)
666*4882a593Smuzhiyun {
667*4882a593Smuzhiyun WARN_ON(!cli);
668*4882a593Smuzhiyun return atomic_read(&cli->write_idx) != atomic_read(&cli->meta_idx);
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun /**
672*4882a593Smuzhiyun * kbasep_vinstr_hwcnt_reader_ioctl_dump() - Dump ioctl command.
673*4882a593Smuzhiyun * @cli: Non-NULL pointer to vinstr client.
674*4882a593Smuzhiyun *
675*4882a593Smuzhiyun * Return: 0 on success, else error code.
676*4882a593Smuzhiyun */
kbasep_vinstr_hwcnt_reader_ioctl_dump(struct kbase_vinstr_client * cli)677*4882a593Smuzhiyun static long kbasep_vinstr_hwcnt_reader_ioctl_dump(
678*4882a593Smuzhiyun struct kbase_vinstr_client *cli)
679*4882a593Smuzhiyun {
680*4882a593Smuzhiyun int errcode;
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun mutex_lock(&cli->vctx->lock);
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun errcode = kbasep_vinstr_client_dump(
685*4882a593Smuzhiyun cli, BASE_HWCNT_READER_EVENT_MANUAL);
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun mutex_unlock(&cli->vctx->lock);
688*4882a593Smuzhiyun return errcode;
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun /**
692*4882a593Smuzhiyun * kbasep_vinstr_hwcnt_reader_ioctl_clear() - Clear ioctl command.
693*4882a593Smuzhiyun * @cli: Non-NULL pointer to vinstr client.
694*4882a593Smuzhiyun *
695*4882a593Smuzhiyun * Return: 0 on success, else error code.
696*4882a593Smuzhiyun */
kbasep_vinstr_hwcnt_reader_ioctl_clear(struct kbase_vinstr_client * cli)697*4882a593Smuzhiyun static long kbasep_vinstr_hwcnt_reader_ioctl_clear(
698*4882a593Smuzhiyun struct kbase_vinstr_client *cli)
699*4882a593Smuzhiyun {
700*4882a593Smuzhiyun int errcode;
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun mutex_lock(&cli->vctx->lock);
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun errcode = kbasep_vinstr_client_clear(cli);
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun mutex_unlock(&cli->vctx->lock);
707*4882a593Smuzhiyun return errcode;
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun /**
711*4882a593Smuzhiyun * kbasep_vinstr_hwcnt_reader_ioctl_get_buffer() - Get buffer ioctl command.
712*4882a593Smuzhiyun * @cli: Non-NULL pointer to vinstr client.
713*4882a593Smuzhiyun * @buffer: Non-NULL pointer to userspace buffer.
714*4882a593Smuzhiyun * @size: Size of buffer.
715*4882a593Smuzhiyun *
716*4882a593Smuzhiyun * Return: 0 on success, else error code.
717*4882a593Smuzhiyun */
kbasep_vinstr_hwcnt_reader_ioctl_get_buffer(struct kbase_vinstr_client * cli,void __user * buffer,size_t size)718*4882a593Smuzhiyun static long kbasep_vinstr_hwcnt_reader_ioctl_get_buffer(
719*4882a593Smuzhiyun struct kbase_vinstr_client *cli,
720*4882a593Smuzhiyun void __user *buffer,
721*4882a593Smuzhiyun size_t size)
722*4882a593Smuzhiyun {
723*4882a593Smuzhiyun unsigned int meta_idx = atomic_read(&cli->meta_idx);
724*4882a593Smuzhiyun unsigned int idx = meta_idx % cli->dump_bufs.buf_cnt;
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun struct kbase_hwcnt_reader_metadata *meta = &cli->dump_bufs_meta[idx];
727*4882a593Smuzhiyun const size_t meta_size = sizeof(struct kbase_hwcnt_reader_metadata);
728*4882a593Smuzhiyun const size_t min_size = min(size, meta_size);
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun /* Metadata sanity check. */
731*4882a593Smuzhiyun WARN_ON(idx != meta->buffer_idx);
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun /* Check if there is any buffer available. */
734*4882a593Smuzhiyun if (unlikely(atomic_read(&cli->write_idx) == meta_idx))
735*4882a593Smuzhiyun return -EAGAIN;
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun /* Check if previously taken buffer was put back. */
738*4882a593Smuzhiyun if (unlikely(atomic_read(&cli->read_idx) != meta_idx))
739*4882a593Smuzhiyun return -EBUSY;
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun /* Clear user buffer to zero. */
742*4882a593Smuzhiyun if (unlikely(meta_size < size && clear_user(buffer, size)))
743*4882a593Smuzhiyun return -EFAULT;
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun /* Copy next available buffer's metadata to user. */
746*4882a593Smuzhiyun if (unlikely(copy_to_user(buffer, meta, min_size)))
747*4882a593Smuzhiyun return -EFAULT;
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun /* Compare exchange meta idx to protect against concurrent getters */
750*4882a593Smuzhiyun if (meta_idx != atomic_cmpxchg(&cli->meta_idx, meta_idx, meta_idx + 1))
751*4882a593Smuzhiyun return -EBUSY;
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun return 0;
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun /**
757*4882a593Smuzhiyun * kbasep_vinstr_hwcnt_reader_ioctl_put_buffer() - Put buffer ioctl command.
758*4882a593Smuzhiyun * @cli: Non-NULL pointer to vinstr client.
759*4882a593Smuzhiyun * @buffer: Non-NULL pointer to userspace buffer.
760*4882a593Smuzhiyun * @size: Size of buffer.
761*4882a593Smuzhiyun *
762*4882a593Smuzhiyun * Return: 0 on success, else error code.
763*4882a593Smuzhiyun */
kbasep_vinstr_hwcnt_reader_ioctl_put_buffer(struct kbase_vinstr_client * cli,void __user * buffer,size_t size)764*4882a593Smuzhiyun static long kbasep_vinstr_hwcnt_reader_ioctl_put_buffer(
765*4882a593Smuzhiyun struct kbase_vinstr_client *cli,
766*4882a593Smuzhiyun void __user *buffer,
767*4882a593Smuzhiyun size_t size)
768*4882a593Smuzhiyun {
769*4882a593Smuzhiyun unsigned int read_idx = atomic_read(&cli->read_idx);
770*4882a593Smuzhiyun unsigned int idx = read_idx % cli->dump_bufs.buf_cnt;
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun struct kbase_hwcnt_reader_metadata *meta;
773*4882a593Smuzhiyun const size_t meta_size = sizeof(struct kbase_hwcnt_reader_metadata);
774*4882a593Smuzhiyun const size_t max_size = max(size, meta_size);
775*4882a593Smuzhiyun int ret = 0;
776*4882a593Smuzhiyun u8 stack_kbuf[64];
777*4882a593Smuzhiyun u8 *kbuf = NULL;
778*4882a593Smuzhiyun size_t i;
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun /* Check if any buffer was taken. */
781*4882a593Smuzhiyun if (unlikely(atomic_read(&cli->meta_idx) == read_idx))
782*4882a593Smuzhiyun return -EPERM;
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun if (likely(max_size <= sizeof(stack_kbuf))) {
785*4882a593Smuzhiyun /* Use stack buffer when the size is small enough. */
786*4882a593Smuzhiyun if (unlikely(meta_size > size))
787*4882a593Smuzhiyun memset(stack_kbuf, 0, sizeof(stack_kbuf));
788*4882a593Smuzhiyun kbuf = stack_kbuf;
789*4882a593Smuzhiyun } else {
790*4882a593Smuzhiyun kbuf = kzalloc(max_size, GFP_KERNEL);
791*4882a593Smuzhiyun if (unlikely(!kbuf))
792*4882a593Smuzhiyun return -ENOMEM;
793*4882a593Smuzhiyun }
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun /*
796*4882a593Smuzhiyun * Copy user buffer to zero cleared kernel buffer which has enough
797*4882a593Smuzhiyun * space for both user buffer and kernel metadata.
798*4882a593Smuzhiyun */
799*4882a593Smuzhiyun if (unlikely(copy_from_user(kbuf, buffer, size))) {
800*4882a593Smuzhiyun ret = -EFAULT;
801*4882a593Smuzhiyun goto out;
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun /*
805*4882a593Smuzhiyun * Make sure any "extra" data passed from userspace is zero.
806*4882a593Smuzhiyun * It's meaningful only in case meta_size < size.
807*4882a593Smuzhiyun */
808*4882a593Smuzhiyun for (i = meta_size; i < size; i++) {
809*4882a593Smuzhiyun /* Check if user data beyond meta size is zero. */
810*4882a593Smuzhiyun if (unlikely(kbuf[i] != 0)) {
811*4882a593Smuzhiyun ret = -EINVAL;
812*4882a593Smuzhiyun goto out;
813*4882a593Smuzhiyun }
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun /* Check if correct buffer is put back. */
817*4882a593Smuzhiyun meta = (struct kbase_hwcnt_reader_metadata *)kbuf;
818*4882a593Smuzhiyun if (unlikely(idx != meta->buffer_idx)) {
819*4882a593Smuzhiyun ret = -EINVAL;
820*4882a593Smuzhiyun goto out;
821*4882a593Smuzhiyun }
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun /* Compare exchange read idx to protect against concurrent putters */
824*4882a593Smuzhiyun if (read_idx !=
825*4882a593Smuzhiyun atomic_cmpxchg(&cli->read_idx, read_idx, read_idx + 1)) {
826*4882a593Smuzhiyun ret = -EPERM;
827*4882a593Smuzhiyun goto out;
828*4882a593Smuzhiyun }
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun out:
831*4882a593Smuzhiyun if (unlikely(kbuf != stack_kbuf))
832*4882a593Smuzhiyun kfree(kbuf);
833*4882a593Smuzhiyun return ret;
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun /**
837*4882a593Smuzhiyun * kbasep_vinstr_hwcnt_reader_ioctl_set_interval() - Set interval ioctl command.
838*4882a593Smuzhiyun * @cli: Non-NULL pointer to vinstr client.
839*4882a593Smuzhiyun * @interval: Periodic dumping interval (disable periodic dumping if 0).
840*4882a593Smuzhiyun *
841*4882a593Smuzhiyun * Return: 0 always.
842*4882a593Smuzhiyun */
kbasep_vinstr_hwcnt_reader_ioctl_set_interval(struct kbase_vinstr_client * cli,u32 interval)843*4882a593Smuzhiyun static long kbasep_vinstr_hwcnt_reader_ioctl_set_interval(
844*4882a593Smuzhiyun struct kbase_vinstr_client *cli,
845*4882a593Smuzhiyun u32 interval)
846*4882a593Smuzhiyun {
847*4882a593Smuzhiyun mutex_lock(&cli->vctx->lock);
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun if ((interval != 0) && (interval < DUMP_INTERVAL_MIN_NS))
850*4882a593Smuzhiyun interval = DUMP_INTERVAL_MIN_NS;
851*4882a593Smuzhiyun /* Update the interval, and put in a dummy next dump time */
852*4882a593Smuzhiyun cli->dump_interval_ns = interval;
853*4882a593Smuzhiyun cli->next_dump_time_ns = 0;
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun /*
856*4882a593Smuzhiyun * If it's a periodic client, kick off the worker early to do a proper
857*4882a593Smuzhiyun * timer reschedule. Return value is ignored, as we don't care if the
858*4882a593Smuzhiyun * worker is already queued.
859*4882a593Smuzhiyun */
860*4882a593Smuzhiyun if ((interval != 0) && (cli->vctx->suspend_count == 0))
861*4882a593Smuzhiyun kbase_hwcnt_virtualizer_queue_work(cli->vctx->hvirt,
862*4882a593Smuzhiyun &cli->vctx->dump_work);
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun mutex_unlock(&cli->vctx->lock);
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun return 0;
867*4882a593Smuzhiyun }
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun /**
870*4882a593Smuzhiyun * kbasep_vinstr_hwcnt_reader_ioctl_enable_event() - Enable event ioctl command.
871*4882a593Smuzhiyun * @cli: Non-NULL pointer to vinstr client.
872*4882a593Smuzhiyun * @event_id: ID of event to enable.
873*4882a593Smuzhiyun *
874*4882a593Smuzhiyun * Return: 0 always.
875*4882a593Smuzhiyun */
kbasep_vinstr_hwcnt_reader_ioctl_enable_event(struct kbase_vinstr_client * cli,enum base_hwcnt_reader_event event_id)876*4882a593Smuzhiyun static long kbasep_vinstr_hwcnt_reader_ioctl_enable_event(
877*4882a593Smuzhiyun struct kbase_vinstr_client *cli,
878*4882a593Smuzhiyun enum base_hwcnt_reader_event event_id)
879*4882a593Smuzhiyun {
880*4882a593Smuzhiyun /* No-op, as events aren't supported */
881*4882a593Smuzhiyun return 0;
882*4882a593Smuzhiyun }
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun /**
885*4882a593Smuzhiyun * kbasep_vinstr_hwcnt_reader_ioctl_disable_event() - Disable event ioctl
886*4882a593Smuzhiyun * command.
887*4882a593Smuzhiyun * @cli: Non-NULL pointer to vinstr client.
888*4882a593Smuzhiyun * @event_id: ID of event to disable.
889*4882a593Smuzhiyun *
890*4882a593Smuzhiyun * Return: 0 always.
891*4882a593Smuzhiyun */
kbasep_vinstr_hwcnt_reader_ioctl_disable_event(struct kbase_vinstr_client * cli,enum base_hwcnt_reader_event event_id)892*4882a593Smuzhiyun static long kbasep_vinstr_hwcnt_reader_ioctl_disable_event(
893*4882a593Smuzhiyun struct kbase_vinstr_client *cli,
894*4882a593Smuzhiyun enum base_hwcnt_reader_event event_id)
895*4882a593Smuzhiyun {
896*4882a593Smuzhiyun /* No-op, as events aren't supported */
897*4882a593Smuzhiyun return 0;
898*4882a593Smuzhiyun }
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun /**
901*4882a593Smuzhiyun * kbasep_vinstr_hwcnt_reader_ioctl_get_hwver() - Get HW version ioctl command.
902*4882a593Smuzhiyun * @cli: Non-NULL pointer to vinstr client.
903*4882a593Smuzhiyun * @hwver: Non-NULL pointer to user buffer where HW version will be stored.
904*4882a593Smuzhiyun *
905*4882a593Smuzhiyun * Return: 0 on success, else error code.
906*4882a593Smuzhiyun */
kbasep_vinstr_hwcnt_reader_ioctl_get_hwver(struct kbase_vinstr_client * cli,u32 __user * hwver)907*4882a593Smuzhiyun static long kbasep_vinstr_hwcnt_reader_ioctl_get_hwver(
908*4882a593Smuzhiyun struct kbase_vinstr_client *cli,
909*4882a593Smuzhiyun u32 __user *hwver)
910*4882a593Smuzhiyun {
911*4882a593Smuzhiyun u32 ver = 5;
912*4882a593Smuzhiyun const enum kbase_hwcnt_gpu_group_type type =
913*4882a593Smuzhiyun kbase_hwcnt_metadata_group_type(cli->vctx->metadata, 0);
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun if (WARN_ON(type != KBASE_HWCNT_GPU_GROUP_TYPE_V5))
916*4882a593Smuzhiyun return -EINVAL;
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun return put_user(ver, hwver);
919*4882a593Smuzhiyun }
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun /**
922*4882a593Smuzhiyun * kbasep_vinstr_hwcnt_reader_ioctl_get_api_version() - get API version ioctl
923*4882a593Smuzhiyun * command.
924*4882a593Smuzhiyun * @cli: The non-NULL pointer to the client
925*4882a593Smuzhiyun * @arg: Command's argument.
926*4882a593Smuzhiyun * @size: Size of arg.
927*4882a593Smuzhiyun *
928*4882a593Smuzhiyun * Return: 0 on success, else error code.
929*4882a593Smuzhiyun */
kbasep_vinstr_hwcnt_reader_ioctl_get_api_version(struct kbase_vinstr_client * cli,unsigned long arg,size_t size)930*4882a593Smuzhiyun static long kbasep_vinstr_hwcnt_reader_ioctl_get_api_version(
931*4882a593Smuzhiyun struct kbase_vinstr_client *cli, unsigned long arg, size_t size)
932*4882a593Smuzhiyun {
933*4882a593Smuzhiyun long ret = -EINVAL;
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun if (size == sizeof(u32)) {
936*4882a593Smuzhiyun ret = put_user(HWCNT_READER_API, (u32 __user *)arg);
937*4882a593Smuzhiyun } else if (size == sizeof(struct kbase_hwcnt_reader_api_version)) {
938*4882a593Smuzhiyun u8 clk_cnt = cli->vctx->metadata->clk_cnt;
939*4882a593Smuzhiyun unsigned long bytes = 0;
940*4882a593Smuzhiyun struct kbase_hwcnt_reader_api_version api_version = {
941*4882a593Smuzhiyun .version = HWCNT_READER_API,
942*4882a593Smuzhiyun .features = KBASE_HWCNT_READER_API_VERSION_NO_FEATURE,
943*4882a593Smuzhiyun };
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun if (clk_cnt > 0)
946*4882a593Smuzhiyun api_version.features |=
947*4882a593Smuzhiyun KBASE_HWCNT_READER_API_VERSION_FEATURE_CYCLES_TOP;
948*4882a593Smuzhiyun if (clk_cnt > 1)
949*4882a593Smuzhiyun api_version.features |=
950*4882a593Smuzhiyun KBASE_HWCNT_READER_API_VERSION_FEATURE_CYCLES_SHADER_CORES;
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun bytes = copy_to_user(
953*4882a593Smuzhiyun (void __user *)arg, &api_version, sizeof(api_version));
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun /* copy_to_user returns zero in case of success.
956*4882a593Smuzhiyun * If it fails, it returns the number of bytes that could NOT be copied
957*4882a593Smuzhiyun */
958*4882a593Smuzhiyun if (bytes == 0)
959*4882a593Smuzhiyun ret = 0;
960*4882a593Smuzhiyun else
961*4882a593Smuzhiyun ret = -EFAULT;
962*4882a593Smuzhiyun }
963*4882a593Smuzhiyun return ret;
964*4882a593Smuzhiyun }
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun /**
967*4882a593Smuzhiyun * kbasep_vinstr_hwcnt_reader_ioctl() - hwcnt reader's ioctl.
968*4882a593Smuzhiyun * @filp: Non-NULL pointer to file structure.
969*4882a593Smuzhiyun * @cmd: User command.
970*4882a593Smuzhiyun * @arg: Command's argument.
971*4882a593Smuzhiyun *
972*4882a593Smuzhiyun * Return: 0 on success, else error code.
973*4882a593Smuzhiyun */
kbasep_vinstr_hwcnt_reader_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)974*4882a593Smuzhiyun static long kbasep_vinstr_hwcnt_reader_ioctl(
975*4882a593Smuzhiyun struct file *filp,
976*4882a593Smuzhiyun unsigned int cmd,
977*4882a593Smuzhiyun unsigned long arg)
978*4882a593Smuzhiyun {
979*4882a593Smuzhiyun long rcode;
980*4882a593Smuzhiyun struct kbase_vinstr_client *cli;
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun if (!filp || (_IOC_TYPE(cmd) != KBASE_HWCNT_READER))
983*4882a593Smuzhiyun return -EINVAL;
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun cli = filp->private_data;
986*4882a593Smuzhiyun if (!cli)
987*4882a593Smuzhiyun return -EINVAL;
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun switch (_IOC_NR(cmd)) {
990*4882a593Smuzhiyun case _IOC_NR(KBASE_HWCNT_READER_GET_API_VERSION):
991*4882a593Smuzhiyun rcode = kbasep_vinstr_hwcnt_reader_ioctl_get_api_version(
992*4882a593Smuzhiyun cli, arg, _IOC_SIZE(cmd));
993*4882a593Smuzhiyun break;
994*4882a593Smuzhiyun case _IOC_NR(KBASE_HWCNT_READER_GET_HWVER):
995*4882a593Smuzhiyun rcode = kbasep_vinstr_hwcnt_reader_ioctl_get_hwver(
996*4882a593Smuzhiyun cli, (u32 __user *)arg);
997*4882a593Smuzhiyun break;
998*4882a593Smuzhiyun case _IOC_NR(KBASE_HWCNT_READER_GET_BUFFER_SIZE):
999*4882a593Smuzhiyun rcode = put_user((u32)cli->vctx->metadata_user->dump_buf_bytes,
1000*4882a593Smuzhiyun (u32 __user *)arg);
1001*4882a593Smuzhiyun break;
1002*4882a593Smuzhiyun case _IOC_NR(KBASE_HWCNT_READER_DUMP):
1003*4882a593Smuzhiyun rcode = kbasep_vinstr_hwcnt_reader_ioctl_dump(cli);
1004*4882a593Smuzhiyun break;
1005*4882a593Smuzhiyun case _IOC_NR(KBASE_HWCNT_READER_CLEAR):
1006*4882a593Smuzhiyun rcode = kbasep_vinstr_hwcnt_reader_ioctl_clear(cli);
1007*4882a593Smuzhiyun break;
1008*4882a593Smuzhiyun case _IOC_NR(KBASE_HWCNT_READER_GET_BUFFER):
1009*4882a593Smuzhiyun rcode = kbasep_vinstr_hwcnt_reader_ioctl_get_buffer(
1010*4882a593Smuzhiyun cli, (void __user *)arg, _IOC_SIZE(cmd));
1011*4882a593Smuzhiyun break;
1012*4882a593Smuzhiyun case _IOC_NR(KBASE_HWCNT_READER_PUT_BUFFER):
1013*4882a593Smuzhiyun rcode = kbasep_vinstr_hwcnt_reader_ioctl_put_buffer(
1014*4882a593Smuzhiyun cli, (void __user *)arg, _IOC_SIZE(cmd));
1015*4882a593Smuzhiyun break;
1016*4882a593Smuzhiyun case _IOC_NR(KBASE_HWCNT_READER_SET_INTERVAL):
1017*4882a593Smuzhiyun rcode = kbasep_vinstr_hwcnt_reader_ioctl_set_interval(
1018*4882a593Smuzhiyun cli, (u32)arg);
1019*4882a593Smuzhiyun break;
1020*4882a593Smuzhiyun case _IOC_NR(KBASE_HWCNT_READER_ENABLE_EVENT):
1021*4882a593Smuzhiyun rcode = kbasep_vinstr_hwcnt_reader_ioctl_enable_event(
1022*4882a593Smuzhiyun cli, (enum base_hwcnt_reader_event)arg);
1023*4882a593Smuzhiyun break;
1024*4882a593Smuzhiyun case _IOC_NR(KBASE_HWCNT_READER_DISABLE_EVENT):
1025*4882a593Smuzhiyun rcode = kbasep_vinstr_hwcnt_reader_ioctl_disable_event(
1026*4882a593Smuzhiyun cli, (enum base_hwcnt_reader_event)arg);
1027*4882a593Smuzhiyun break;
1028*4882a593Smuzhiyun default:
1029*4882a593Smuzhiyun pr_warn("Unknown HWCNT ioctl 0x%x nr:%d", cmd, _IOC_NR(cmd));
1030*4882a593Smuzhiyun rcode = -EINVAL;
1031*4882a593Smuzhiyun break;
1032*4882a593Smuzhiyun }
1033*4882a593Smuzhiyun
1034*4882a593Smuzhiyun return rcode;
1035*4882a593Smuzhiyun }
1036*4882a593Smuzhiyun
1037*4882a593Smuzhiyun /**
1038*4882a593Smuzhiyun * kbasep_vinstr_hwcnt_reader_poll() - hwcnt reader's poll.
1039*4882a593Smuzhiyun * @filp: Non-NULL pointer to file structure.
1040*4882a593Smuzhiyun * @wait: Non-NULL pointer to poll table.
1041*4882a593Smuzhiyun *
1042*4882a593Smuzhiyun * Return: EPOLLIN | EPOLLRDNORM if data can be read without blocking, 0 if
1043*4882a593Smuzhiyun * data can not be read without blocking, else EPOLLHUP | EPOLLERR.
1044*4882a593Smuzhiyun */
kbasep_vinstr_hwcnt_reader_poll(struct file * filp,poll_table * wait)1045*4882a593Smuzhiyun static __poll_t kbasep_vinstr_hwcnt_reader_poll(struct file *filp, poll_table *wait)
1046*4882a593Smuzhiyun {
1047*4882a593Smuzhiyun struct kbase_vinstr_client *cli;
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun if (!filp || !wait)
1050*4882a593Smuzhiyun return EPOLLHUP | EPOLLERR;
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun cli = filp->private_data;
1053*4882a593Smuzhiyun if (!cli)
1054*4882a593Smuzhiyun return EPOLLHUP | EPOLLERR;
1055*4882a593Smuzhiyun
1056*4882a593Smuzhiyun poll_wait(filp, &cli->waitq, wait);
1057*4882a593Smuzhiyun if (kbasep_vinstr_hwcnt_reader_buffer_ready(cli))
1058*4882a593Smuzhiyun return EPOLLIN | EPOLLRDNORM;
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun return (__poll_t)0;
1061*4882a593Smuzhiyun }
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun /**
1064*4882a593Smuzhiyun * kbasep_vinstr_hwcnt_reader_mmap() - hwcnt reader's mmap.
1065*4882a593Smuzhiyun * @filp: Non-NULL pointer to file structure.
1066*4882a593Smuzhiyun * @vma: Non-NULL pointer to vma structure.
1067*4882a593Smuzhiyun *
1068*4882a593Smuzhiyun * Return: 0 on success, else error code.
1069*4882a593Smuzhiyun */
kbasep_vinstr_hwcnt_reader_mmap(struct file * filp,struct vm_area_struct * vma)1070*4882a593Smuzhiyun static int kbasep_vinstr_hwcnt_reader_mmap(
1071*4882a593Smuzhiyun struct file *filp,
1072*4882a593Smuzhiyun struct vm_area_struct *vma)
1073*4882a593Smuzhiyun {
1074*4882a593Smuzhiyun struct kbase_vinstr_client *cli;
1075*4882a593Smuzhiyun unsigned long vm_size, size, addr, pfn, offset;
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun if (!filp || !vma)
1078*4882a593Smuzhiyun return -EINVAL;
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun cli = filp->private_data;
1081*4882a593Smuzhiyun if (!cli)
1082*4882a593Smuzhiyun return -EINVAL;
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun vm_size = vma->vm_end - vma->vm_start;
1085*4882a593Smuzhiyun
1086*4882a593Smuzhiyun /* The mapping is allowed to span the entirety of the page allocation,
1087*4882a593Smuzhiyun * not just the chunk where the dump buffers are allocated.
1088*4882a593Smuzhiyun * This accommodates the corner case where the combined size of the
1089*4882a593Smuzhiyun * dump buffers is smaller than a single page.
1090*4882a593Smuzhiyun * This does not pose a security risk as the pages are zeroed on
1091*4882a593Smuzhiyun * allocation, and anything out of bounds of the dump buffers is never
1092*4882a593Smuzhiyun * written to.
1093*4882a593Smuzhiyun */
1094*4882a593Smuzhiyun size = (1ull << cli->dump_bufs.page_order) * PAGE_SIZE;
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun if (vma->vm_pgoff > (size >> PAGE_SHIFT))
1097*4882a593Smuzhiyun return -EINVAL;
1098*4882a593Smuzhiyun
1099*4882a593Smuzhiyun offset = vma->vm_pgoff << PAGE_SHIFT;
1100*4882a593Smuzhiyun if (vm_size > size - offset)
1101*4882a593Smuzhiyun return -EINVAL;
1102*4882a593Smuzhiyun
1103*4882a593Smuzhiyun addr = __pa(cli->dump_bufs.page_addr + offset);
1104*4882a593Smuzhiyun pfn = addr >> PAGE_SHIFT;
1105*4882a593Smuzhiyun
1106*4882a593Smuzhiyun return remap_pfn_range(
1107*4882a593Smuzhiyun vma, vma->vm_start, pfn, vm_size, vma->vm_page_prot);
1108*4882a593Smuzhiyun }
1109*4882a593Smuzhiyun
1110*4882a593Smuzhiyun /**
1111*4882a593Smuzhiyun * kbasep_vinstr_hwcnt_reader_release() - hwcnt reader's release.
1112*4882a593Smuzhiyun * @inode: Non-NULL pointer to inode structure.
1113*4882a593Smuzhiyun * @filp: Non-NULL pointer to file structure.
1114*4882a593Smuzhiyun *
1115*4882a593Smuzhiyun * Return: 0 always.
1116*4882a593Smuzhiyun */
kbasep_vinstr_hwcnt_reader_release(struct inode * inode,struct file * filp)1117*4882a593Smuzhiyun static int kbasep_vinstr_hwcnt_reader_release(struct inode *inode,
1118*4882a593Smuzhiyun struct file *filp)
1119*4882a593Smuzhiyun {
1120*4882a593Smuzhiyun struct kbase_vinstr_client *vcli = filp->private_data;
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun mutex_lock(&vcli->vctx->lock);
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun vcli->vctx->client_count--;
1125*4882a593Smuzhiyun list_del(&vcli->node);
1126*4882a593Smuzhiyun
1127*4882a593Smuzhiyun mutex_unlock(&vcli->vctx->lock);
1128*4882a593Smuzhiyun
1129*4882a593Smuzhiyun kbasep_vinstr_client_destroy(vcli);
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun return 0;
1132*4882a593Smuzhiyun }
1133