1 /*
2 *
3 * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved.
4 *
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
8 * of such GNU licence.
9 *
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
13 *
14 */
15
16
17
18
19
20 #include <mali_kbase.h>
21 #include <mali_kbase_jm.h>
22 #include <mali_kbase_hwaccess_jm.h>
23
24 #define CREATE_TRACE_POINTS
25
26 #ifdef CONFIG_MALI_TRACE_TIMELINE
27 #include "mali_timeline.h"
28
29 #include <linux/debugfs.h>
30 #include <linux/seq_file.h>
31
32 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_atoms_in_flight);
33 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_atom);
34 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_gpu_slot_active);
35 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_gpu_slot_action);
36 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_gpu_power_active);
37 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_l2_power_active);
38 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_pm_event);
39 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_slot_atom);
40 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_pm_checktrans);
41 EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_context_active);
42
43 struct kbase_trace_timeline_desc {
44 char *enum_str;
45 char *desc;
46 char *format;
47 char *format_desc;
48 };
49
50 static struct kbase_trace_timeline_desc kbase_trace_timeline_desc_table[] = {
51 #define KBASE_TIMELINE_TRACE_CODE(enum_val, desc, format, format_desc) { #enum_val, desc, format, format_desc }
52 #include "mali_kbase_trace_timeline_defs.h"
53 #undef KBASE_TIMELINE_TRACE_CODE
54 };
55
56 #define KBASE_NR_TRACE_CODES ARRAY_SIZE(kbase_trace_timeline_desc_table)
57
kbasep_trace_timeline_seq_start(struct seq_file * s,loff_t * pos)58 static void *kbasep_trace_timeline_seq_start(struct seq_file *s, loff_t *pos)
59 {
60 if (*pos >= KBASE_NR_TRACE_CODES)
61 return NULL;
62
63 return &kbase_trace_timeline_desc_table[*pos];
64 }
65
kbasep_trace_timeline_seq_stop(struct seq_file * s,void * data)66 static void kbasep_trace_timeline_seq_stop(struct seq_file *s, void *data)
67 {
68 }
69
kbasep_trace_timeline_seq_next(struct seq_file * s,void * data,loff_t * pos)70 static void *kbasep_trace_timeline_seq_next(struct seq_file *s, void *data, loff_t *pos)
71 {
72 (*pos)++;
73
74 if (*pos == KBASE_NR_TRACE_CODES)
75 return NULL;
76
77 return &kbase_trace_timeline_desc_table[*pos];
78 }
79
kbasep_trace_timeline_seq_show(struct seq_file * s,void * data)80 static int kbasep_trace_timeline_seq_show(struct seq_file *s, void *data)
81 {
82 struct kbase_trace_timeline_desc *trace_desc = data;
83
84 seq_printf(s, "%s#%s#%s#%s\n", trace_desc->enum_str, trace_desc->desc, trace_desc->format, trace_desc->format_desc);
85 return 0;
86 }
87
88
89 static const struct seq_operations kbasep_trace_timeline_seq_ops = {
90 .start = kbasep_trace_timeline_seq_start,
91 .next = kbasep_trace_timeline_seq_next,
92 .stop = kbasep_trace_timeline_seq_stop,
93 .show = kbasep_trace_timeline_seq_show,
94 };
95
kbasep_trace_timeline_debugfs_open(struct inode * inode,struct file * file)96 static int kbasep_trace_timeline_debugfs_open(struct inode *inode, struct file *file)
97 {
98 return seq_open(file, &kbasep_trace_timeline_seq_ops);
99 }
100
101 static const struct file_operations kbasep_trace_timeline_debugfs_fops = {
102 .open = kbasep_trace_timeline_debugfs_open,
103 .read = seq_read,
104 .llseek = seq_lseek,
105 .release = seq_release,
106 };
107
108 #ifdef CONFIG_DEBUG_FS
109
kbasep_trace_timeline_debugfs_init(struct kbase_device * kbdev)110 void kbasep_trace_timeline_debugfs_init(struct kbase_device *kbdev)
111 {
112 debugfs_create_file("mali_timeline_defs",
113 S_IRUGO, kbdev->mali_debugfs_directory, NULL,
114 &kbasep_trace_timeline_debugfs_fops);
115 }
116
117 #endif /* CONFIG_DEBUG_FS */
118
kbase_timeline_job_slot_submit(struct kbase_device * kbdev,struct kbase_context * kctx,struct kbase_jd_atom * katom,int js)119 void kbase_timeline_job_slot_submit(struct kbase_device *kbdev, struct kbase_context *kctx,
120 struct kbase_jd_atom *katom, int js)
121 {
122 lockdep_assert_held(&kbdev->hwaccess_lock);
123
124 if (kbdev->timeline.slot_atoms_submitted[js] > 0) {
125 KBASE_TIMELINE_JOB_START_NEXT(kctx, js, 1);
126 } else {
127 base_atom_id atom_number = kbase_jd_atom_id(kctx, katom);
128
129 KBASE_TIMELINE_JOB_START_HEAD(kctx, js, 1);
130 KBASE_TIMELINE_JOB_START(kctx, js, atom_number);
131 }
132 ++kbdev->timeline.slot_atoms_submitted[js];
133
134 KBASE_TIMELINE_ATOMS_SUBMITTED(kctx, js, kbdev->timeline.slot_atoms_submitted[js]);
135 }
136
kbase_timeline_job_slot_done(struct kbase_device * kbdev,struct kbase_context * kctx,struct kbase_jd_atom * katom,int js,kbasep_js_atom_done_code done_code)137 void kbase_timeline_job_slot_done(struct kbase_device *kbdev, struct kbase_context *kctx,
138 struct kbase_jd_atom *katom, int js,
139 kbasep_js_atom_done_code done_code)
140 {
141 lockdep_assert_held(&kbdev->hwaccess_lock);
142
143 if (done_code & KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT) {
144 KBASE_TIMELINE_JOB_START_NEXT(kctx, js, 0);
145 } else {
146 /* Job finished in JS_HEAD */
147 base_atom_id atom_number = kbase_jd_atom_id(kctx, katom);
148
149 KBASE_TIMELINE_JOB_START_HEAD(kctx, js, 0);
150 KBASE_TIMELINE_JOB_STOP(kctx, js, atom_number);
151
152 /* see if we need to trace the job in JS_NEXT moving to JS_HEAD */
153 if (kbase_backend_nr_atoms_submitted(kbdev, js)) {
154 struct kbase_jd_atom *next_katom;
155 struct kbase_context *next_kctx;
156
157 /* Peek the next atom - note that the atom in JS_HEAD will already
158 * have been dequeued */
159 next_katom = kbase_backend_inspect_head(kbdev, js);
160 WARN_ON(!next_katom);
161 next_kctx = next_katom->kctx;
162 KBASE_TIMELINE_JOB_START_NEXT(next_kctx, js, 0);
163 KBASE_TIMELINE_JOB_START_HEAD(next_kctx, js, 1);
164 KBASE_TIMELINE_JOB_START(next_kctx, js, kbase_jd_atom_id(next_kctx, next_katom));
165 }
166 }
167
168 --kbdev->timeline.slot_atoms_submitted[js];
169
170 KBASE_TIMELINE_ATOMS_SUBMITTED(kctx, js, kbdev->timeline.slot_atoms_submitted[js]);
171 }
172
kbase_timeline_pm_send_event(struct kbase_device * kbdev,enum kbase_timeline_pm_event event_sent)173 void kbase_timeline_pm_send_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event_sent)
174 {
175 int uid = 0;
176 int old_uid;
177
178 /* If a producer already exists for the event, try to use their UID (multiple-producers) */
179 uid = atomic_read(&kbdev->timeline.pm_event_uid[event_sent]);
180 old_uid = uid;
181
182 /* Get a new non-zero UID if we don't have one yet */
183 while (!uid)
184 uid = atomic_inc_return(&kbdev->timeline.pm_event_uid_counter);
185
186 /* Try to use this UID */
187 if (old_uid != atomic_cmpxchg(&kbdev->timeline.pm_event_uid[event_sent], old_uid, uid))
188 /* If it changed, raced with another producer: we've lost this UID */
189 uid = 0;
190
191 KBASE_TIMELINE_PM_SEND_EVENT(kbdev, event_sent, uid);
192 }
193
kbase_timeline_pm_check_handle_event(struct kbase_device * kbdev,enum kbase_timeline_pm_event event)194 void kbase_timeline_pm_check_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event)
195 {
196 int uid = atomic_read(&kbdev->timeline.pm_event_uid[event]);
197
198 if (uid != 0) {
199 if (uid != atomic_cmpxchg(&kbdev->timeline.pm_event_uid[event], uid, 0))
200 /* If it changed, raced with another consumer: we've lost this UID */
201 uid = 0;
202
203 KBASE_TIMELINE_PM_HANDLE_EVENT(kbdev, event, uid);
204 }
205 }
206
kbase_timeline_pm_handle_event(struct kbase_device * kbdev,enum kbase_timeline_pm_event event)207 void kbase_timeline_pm_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event)
208 {
209 int uid = atomic_read(&kbdev->timeline.pm_event_uid[event]);
210
211 if (uid != atomic_cmpxchg(&kbdev->timeline.pm_event_uid[event], uid, 0))
212 /* If it changed, raced with another consumer: we've lost this UID */
213 uid = 0;
214
215 KBASE_TIMELINE_PM_HANDLE_EVENT(kbdev, event, uid);
216 }
217
kbase_timeline_pm_l2_transition_start(struct kbase_device * kbdev)218 void kbase_timeline_pm_l2_transition_start(struct kbase_device *kbdev)
219 {
220 lockdep_assert_held(&kbdev->hwaccess_lock);
221 /* Simply log the start of the transition */
222 kbdev->timeline.l2_transitioning = true;
223 KBASE_TIMELINE_POWERING_L2(kbdev);
224 }
225
kbase_timeline_pm_l2_transition_done(struct kbase_device * kbdev)226 void kbase_timeline_pm_l2_transition_done(struct kbase_device *kbdev)
227 {
228 lockdep_assert_held(&kbdev->hwaccess_lock);
229 /* Simply log the end of the transition */
230 if (kbdev->timeline.l2_transitioning) {
231 kbdev->timeline.l2_transitioning = false;
232 KBASE_TIMELINE_POWERED_L2(kbdev);
233 }
234 }
235
236 #endif /* CONFIG_MALI_TRACE_TIMELINE */
237