1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3 *
4 * (C) COPYRIGHT 2022 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 #include <mali_kbase.h>
23
24 #if IS_ENABLED(CONFIG_DEBUG_FS)
25
26 /**
27 * kbasep_fault_occurred - Check if fault occurred.
28 *
29 * @kbdev: Device pointer
30 *
31 * Return: true if a fault occurred.
32 */
kbasep_fault_occurred(struct kbase_device * kbdev)33 static bool kbasep_fault_occurred(struct kbase_device *kbdev)
34 {
35 unsigned long flags;
36 bool ret;
37
38 spin_lock_irqsave(&kbdev->csf.dof.lock, flags);
39 ret = (kbdev->csf.dof.error_code != DF_NO_ERROR);
40 spin_unlock_irqrestore(&kbdev->csf.dof.lock, flags);
41
42 return ret;
43 }
44
kbase_debug_csf_fault_wait_completion(struct kbase_device * kbdev)45 void kbase_debug_csf_fault_wait_completion(struct kbase_device *kbdev)
46 {
47 if (likely(!kbase_debug_csf_fault_dump_enabled(kbdev))) {
48 dev_dbg(kbdev->dev, "No userspace client for dumping exists");
49 return;
50 }
51
52 wait_event(kbdev->csf.dof.dump_wait_wq, kbase_debug_csf_fault_dump_complete(kbdev));
53 }
54 KBASE_EXPORT_TEST_API(kbase_debug_csf_fault_wait_completion);
55
56 /**
57 * kbase_debug_csf_fault_wakeup - Wake up a waiting user space client.
58 *
59 * @kbdev: Kbase device
60 */
kbase_debug_csf_fault_wakeup(struct kbase_device * kbdev)61 static void kbase_debug_csf_fault_wakeup(struct kbase_device *kbdev)
62 {
63 wake_up_interruptible(&kbdev->csf.dof.fault_wait_wq);
64 }
65
kbase_debug_csf_fault_notify(struct kbase_device * kbdev,struct kbase_context * kctx,enum dumpfault_error_type error)66 bool kbase_debug_csf_fault_notify(struct kbase_device *kbdev,
67 struct kbase_context *kctx, enum dumpfault_error_type error)
68 {
69 unsigned long flags;
70
71 if (likely(!kbase_debug_csf_fault_dump_enabled(kbdev)))
72 return false;
73
74 if (WARN_ON(error == DF_NO_ERROR))
75 return false;
76
77 if (kctx && kbase_ctx_flag(kctx, KCTX_DYING)) {
78 dev_info(kbdev->dev, "kctx %d_%d is dying when error %d is reported",
79 kctx->tgid, kctx->id, error);
80 kctx = NULL;
81 }
82
83 spin_lock_irqsave(&kbdev->csf.dof.lock, flags);
84
85 /* Only one fault at a time can be processed */
86 if (kbdev->csf.dof.error_code) {
87 dev_info(kbdev->dev, "skip this fault as there's a pending fault");
88 goto unlock;
89 }
90
91 kbdev->csf.dof.kctx_tgid = kctx ? kctx->tgid : 0;
92 kbdev->csf.dof.kctx_id = kctx ? kctx->id : 0;
93 kbdev->csf.dof.error_code = error;
94 kbase_debug_csf_fault_wakeup(kbdev);
95
96 unlock:
97 spin_unlock_irqrestore(&kbdev->csf.dof.lock, flags);
98 return true;
99 }
100
debug_csf_fault_read(struct file * file,char __user * buffer,size_t size,loff_t * f_pos)101 static ssize_t debug_csf_fault_read(struct file *file, char __user *buffer, size_t size,
102 loff_t *f_pos)
103 {
104 #define BUF_SIZE 64
105 struct kbase_device *kbdev;
106 unsigned long flags;
107 int count;
108 char buf[BUF_SIZE];
109 u32 tgid, ctx_id;
110 enum dumpfault_error_type error_code;
111
112 if (unlikely(!file)) {
113 pr_warn("%s: file is NULL", __func__);
114 return -EINVAL;
115 }
116
117 kbdev = file->private_data;
118 if (unlikely(!buffer)) {
119 dev_warn(kbdev->dev, "%s: buffer is NULL", __func__);
120 return -EINVAL;
121 }
122
123 if (unlikely(*f_pos < 0)) {
124 dev_warn(kbdev->dev, "%s: f_pos is negative", __func__);
125 return -EINVAL;
126 }
127
128 if (size < sizeof(buf)) {
129 dev_warn(kbdev->dev, "%s: buffer is too small", __func__);
130 return -EINVAL;
131 }
132
133 if (wait_event_interruptible(kbdev->csf.dof.fault_wait_wq, kbasep_fault_occurred(kbdev)))
134 return -ERESTARTSYS;
135
136 spin_lock_irqsave(&kbdev->csf.dof.lock, flags);
137 tgid = kbdev->csf.dof.kctx_tgid;
138 ctx_id = kbdev->csf.dof.kctx_id;
139 error_code = kbdev->csf.dof.error_code;
140 BUILD_BUG_ON(sizeof(buf) < (sizeof(tgid) + sizeof(ctx_id) + sizeof(error_code)));
141 count = scnprintf(buf, sizeof(buf), "%u_%u_%u\n", tgid, ctx_id, error_code);
142 spin_unlock_irqrestore(&kbdev->csf.dof.lock, flags);
143
144 dev_info(kbdev->dev, "debug csf fault info read");
145 return simple_read_from_buffer(buffer, size, f_pos, buf, count);
146 }
147
debug_csf_fault_open(struct inode * in,struct file * file)148 static int debug_csf_fault_open(struct inode *in, struct file *file)
149 {
150 struct kbase_device *kbdev;
151
152 if (unlikely(!in)) {
153 pr_warn("%s: inode is NULL", __func__);
154 return -EINVAL;
155 }
156
157 kbdev = in->i_private;
158 if (unlikely(!file)) {
159 dev_warn(kbdev->dev, "%s: file is NULL", __func__);
160 return -EINVAL;
161 }
162
163 if (atomic_cmpxchg(&kbdev->csf.dof.enabled, 0, 1) == 1) {
164 dev_warn(kbdev->dev, "Only one client is allowed for dump on fault");
165 return -EBUSY;
166 }
167
168 dev_info(kbdev->dev, "debug csf fault file open");
169
170 return simple_open(in, file);
171 }
172
debug_csf_fault_write(struct file * file,const char __user * ubuf,size_t count,loff_t * ppos)173 static ssize_t debug_csf_fault_write(struct file *file, const char __user *ubuf, size_t count,
174 loff_t *ppos)
175 {
176 struct kbase_device *kbdev;
177 unsigned long flags;
178
179 if (unlikely(!file)) {
180 pr_warn("%s: file is NULL", __func__);
181 return -EINVAL;
182 }
183
184 kbdev = file->private_data;
185 spin_lock_irqsave(&kbdev->csf.dof.lock, flags);
186 kbdev->csf.dof.error_code = DF_NO_ERROR;
187 kbdev->csf.dof.kctx_tgid = 0;
188 kbdev->csf.dof.kctx_id = 0;
189 dev_info(kbdev->dev, "debug csf fault dump complete");
190 spin_unlock_irqrestore(&kbdev->csf.dof.lock, flags);
191
192 /* User space finished the dump.
193 * Wake up blocked kernel threads to proceed.
194 */
195 wake_up(&kbdev->csf.dof.dump_wait_wq);
196
197 return count;
198 }
199
debug_csf_fault_release(struct inode * in,struct file * file)200 static int debug_csf_fault_release(struct inode *in, struct file *file)
201 {
202 struct kbase_device *kbdev;
203 unsigned long flags;
204
205 if (unlikely(!in)) {
206 pr_warn("%s: inode is NULL", __func__);
207 return -EINVAL;
208 }
209
210 kbdev = in->i_private;
211 spin_lock_irqsave(&kbdev->csf.dof.lock, flags);
212 kbdev->csf.dof.kctx_tgid = 0;
213 kbdev->csf.dof.kctx_id = 0;
214 kbdev->csf.dof.error_code = DF_NO_ERROR;
215 spin_unlock_irqrestore(&kbdev->csf.dof.lock, flags);
216
217 atomic_set(&kbdev->csf.dof.enabled, 0);
218 dev_info(kbdev->dev, "debug csf fault file close");
219
220 /* User space closed the debugfs file.
221 * Wake up blocked kernel threads to resume.
222 */
223 wake_up(&kbdev->csf.dof.dump_wait_wq);
224
225 return 0;
226 }
227
228 static const struct file_operations kbasep_debug_csf_fault_fops = {
229 .owner = THIS_MODULE,
230 .open = debug_csf_fault_open,
231 .read = debug_csf_fault_read,
232 .write = debug_csf_fault_write,
233 .llseek = default_llseek,
234 .release = debug_csf_fault_release,
235 };
236
kbase_debug_csf_fault_debugfs_init(struct kbase_device * kbdev)237 void kbase_debug_csf_fault_debugfs_init(struct kbase_device *kbdev)
238 {
239 const char *fname = "csf_fault";
240
241 if (unlikely(!kbdev)) {
242 pr_warn("%s: kbdev is NULL", __func__);
243 return;
244 }
245
246 debugfs_create_file(fname, 0600, kbdev->mali_debugfs_directory, kbdev,
247 &kbasep_debug_csf_fault_fops);
248 }
249
kbase_debug_csf_fault_init(struct kbase_device * kbdev)250 int kbase_debug_csf_fault_init(struct kbase_device *kbdev)
251 {
252 if (unlikely(!kbdev)) {
253 pr_warn("%s: kbdev is NULL", __func__);
254 return -EINVAL;
255 }
256
257 init_waitqueue_head(&(kbdev->csf.dof.fault_wait_wq));
258 init_waitqueue_head(&(kbdev->csf.dof.dump_wait_wq));
259 spin_lock_init(&kbdev->csf.dof.lock);
260 kbdev->csf.dof.kctx_tgid = 0;
261 kbdev->csf.dof.kctx_id = 0;
262 kbdev->csf.dof.error_code = DF_NO_ERROR;
263 atomic_set(&kbdev->csf.dof.enabled, 0);
264
265 return 0;
266 }
267
kbase_debug_csf_fault_term(struct kbase_device * kbdev)268 void kbase_debug_csf_fault_term(struct kbase_device *kbdev)
269 {
270 }
271 #endif /* CONFIG_DEBUG_FS */
272