1*4882a593Smuzhiyun // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2*4882a593Smuzhiyun //
3*4882a593Smuzhiyun // This file is provided under a dual BSD/GPLv2 license. When using or
4*4882a593Smuzhiyun // redistributing this file, you may do so under either license.
5*4882a593Smuzhiyun //
6*4882a593Smuzhiyun // Copyright(c) 2018 Intel Corporation. All rights reserved.
7*4882a593Smuzhiyun //
8*4882a593Smuzhiyun // Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
9*4882a593Smuzhiyun //
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/debugfs.h>
12*4882a593Smuzhiyun #include <linux/sched/signal.h>
13*4882a593Smuzhiyun #include "sof-priv.h"
14*4882a593Smuzhiyun #include "ops.h"
15*4882a593Smuzhiyun
sof_trace_avail(struct snd_sof_dev * sdev,loff_t pos,size_t buffer_size)16*4882a593Smuzhiyun static size_t sof_trace_avail(struct snd_sof_dev *sdev,
17*4882a593Smuzhiyun loff_t pos, size_t buffer_size)
18*4882a593Smuzhiyun {
19*4882a593Smuzhiyun loff_t host_offset = READ_ONCE(sdev->host_offset);
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun /*
22*4882a593Smuzhiyun * If host offset is less than local pos, it means write pointer of
23*4882a593Smuzhiyun * host DMA buffer has been wrapped. We should output the trace data
24*4882a593Smuzhiyun * at the end of host DMA buffer at first.
25*4882a593Smuzhiyun */
26*4882a593Smuzhiyun if (host_offset < pos)
27*4882a593Smuzhiyun return buffer_size - pos;
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun /* If there is available trace data now, it is unnecessary to wait. */
30*4882a593Smuzhiyun if (host_offset > pos)
31*4882a593Smuzhiyun return host_offset - pos;
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun return 0;
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun
sof_wait_trace_avail(struct snd_sof_dev * sdev,loff_t pos,size_t buffer_size)36*4882a593Smuzhiyun static size_t sof_wait_trace_avail(struct snd_sof_dev *sdev,
37*4882a593Smuzhiyun loff_t pos, size_t buffer_size)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun wait_queue_entry_t wait;
40*4882a593Smuzhiyun size_t ret = sof_trace_avail(sdev, pos, buffer_size);
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /* data immediately available */
43*4882a593Smuzhiyun if (ret)
44*4882a593Smuzhiyun return ret;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun if (!sdev->dtrace_is_enabled && sdev->dtrace_draining) {
47*4882a593Smuzhiyun /*
48*4882a593Smuzhiyun * tracing has ended and all traces have been
49*4882a593Smuzhiyun * read by client, return EOF
50*4882a593Smuzhiyun */
51*4882a593Smuzhiyun sdev->dtrace_draining = false;
52*4882a593Smuzhiyun return 0;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /* wait for available trace data from FW */
56*4882a593Smuzhiyun init_waitqueue_entry(&wait, current);
57*4882a593Smuzhiyun set_current_state(TASK_INTERRUPTIBLE);
58*4882a593Smuzhiyun add_wait_queue(&sdev->trace_sleep, &wait);
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun if (!signal_pending(current)) {
61*4882a593Smuzhiyun /* set timeout to max value, no error code */
62*4882a593Smuzhiyun schedule_timeout(MAX_SCHEDULE_TIMEOUT);
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun remove_wait_queue(&sdev->trace_sleep, &wait);
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun return sof_trace_avail(sdev, pos, buffer_size);
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
sof_dfsentry_trace_read(struct file * file,char __user * buffer,size_t count,loff_t * ppos)69*4882a593Smuzhiyun static ssize_t sof_dfsentry_trace_read(struct file *file, char __user *buffer,
70*4882a593Smuzhiyun size_t count, loff_t *ppos)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun struct snd_sof_dfsentry *dfse = file->private_data;
73*4882a593Smuzhiyun struct snd_sof_dev *sdev = dfse->sdev;
74*4882a593Smuzhiyun unsigned long rem;
75*4882a593Smuzhiyun loff_t lpos = *ppos;
76*4882a593Smuzhiyun size_t avail, buffer_size = dfse->size;
77*4882a593Smuzhiyun u64 lpos_64;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /* make sure we know about any failures on the DSP side */
80*4882a593Smuzhiyun sdev->dtrace_error = false;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun /* check pos and count */
83*4882a593Smuzhiyun if (lpos < 0)
84*4882a593Smuzhiyun return -EINVAL;
85*4882a593Smuzhiyun if (!count)
86*4882a593Smuzhiyun return 0;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun /* check for buffer wrap and count overflow */
89*4882a593Smuzhiyun lpos_64 = lpos;
90*4882a593Smuzhiyun lpos = do_div(lpos_64, buffer_size);
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun if (count > buffer_size - lpos) /* min() not used to avoid sparse warnings */
93*4882a593Smuzhiyun count = buffer_size - lpos;
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun /* get available count based on current host offset */
96*4882a593Smuzhiyun avail = sof_wait_trace_avail(sdev, lpos, buffer_size);
97*4882a593Smuzhiyun if (sdev->dtrace_error) {
98*4882a593Smuzhiyun dev_err(sdev->dev, "error: trace IO error\n");
99*4882a593Smuzhiyun return -EIO;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun /* make sure count is <= avail */
103*4882a593Smuzhiyun count = avail > count ? count : avail;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun /* copy available trace data to debugfs */
106*4882a593Smuzhiyun rem = copy_to_user(buffer, ((u8 *)(dfse->buf) + lpos), count);
107*4882a593Smuzhiyun if (rem)
108*4882a593Smuzhiyun return -EFAULT;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun *ppos += count;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /* move debugfs reading position */
113*4882a593Smuzhiyun return count;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun
sof_dfsentry_trace_release(struct inode * inode,struct file * file)116*4882a593Smuzhiyun static int sof_dfsentry_trace_release(struct inode *inode, struct file *file)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun struct snd_sof_dfsentry *dfse = inode->i_private;
119*4882a593Smuzhiyun struct snd_sof_dev *sdev = dfse->sdev;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun /* avoid duplicate traces at next open */
122*4882a593Smuzhiyun if (!sdev->dtrace_is_enabled)
123*4882a593Smuzhiyun sdev->host_offset = 0;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun return 0;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun static const struct file_operations sof_dfs_trace_fops = {
129*4882a593Smuzhiyun .open = simple_open,
130*4882a593Smuzhiyun .read = sof_dfsentry_trace_read,
131*4882a593Smuzhiyun .llseek = default_llseek,
132*4882a593Smuzhiyun .release = sof_dfsentry_trace_release,
133*4882a593Smuzhiyun };
134*4882a593Smuzhiyun
trace_debugfs_create(struct snd_sof_dev * sdev)135*4882a593Smuzhiyun static int trace_debugfs_create(struct snd_sof_dev *sdev)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun struct snd_sof_dfsentry *dfse;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun if (!sdev)
140*4882a593Smuzhiyun return -EINVAL;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL);
143*4882a593Smuzhiyun if (!dfse)
144*4882a593Smuzhiyun return -ENOMEM;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun dfse->type = SOF_DFSENTRY_TYPE_BUF;
147*4882a593Smuzhiyun dfse->buf = sdev->dmatb.area;
148*4882a593Smuzhiyun dfse->size = sdev->dmatb.bytes;
149*4882a593Smuzhiyun dfse->sdev = sdev;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun debugfs_create_file("trace", 0444, sdev->debugfs_root, dfse,
152*4882a593Smuzhiyun &sof_dfs_trace_fops);
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun return 0;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
snd_sof_init_trace_ipc(struct snd_sof_dev * sdev)157*4882a593Smuzhiyun int snd_sof_init_trace_ipc(struct snd_sof_dev *sdev)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun struct sof_ipc_fw_ready *ready = &sdev->fw_ready;
160*4882a593Smuzhiyun struct sof_ipc_fw_version *v = &ready->version;
161*4882a593Smuzhiyun struct sof_ipc_dma_trace_params_ext params;
162*4882a593Smuzhiyun struct sof_ipc_reply ipc_reply;
163*4882a593Smuzhiyun int ret;
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun if (!sdev->dtrace_is_supported)
166*4882a593Smuzhiyun return 0;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun if (sdev->dtrace_is_enabled || !sdev->dma_trace_pages)
169*4882a593Smuzhiyun return -EINVAL;
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun /* set IPC parameters */
172*4882a593Smuzhiyun params.hdr.cmd = SOF_IPC_GLB_TRACE_MSG;
173*4882a593Smuzhiyun /* PARAMS_EXT is only supported from ABI 3.7.0 onwards */
174*4882a593Smuzhiyun if (v->abi_version >= SOF_ABI_VER(3, 7, 0)) {
175*4882a593Smuzhiyun params.hdr.size = sizeof(struct sof_ipc_dma_trace_params_ext);
176*4882a593Smuzhiyun params.hdr.cmd |= SOF_IPC_TRACE_DMA_PARAMS_EXT;
177*4882a593Smuzhiyun params.timestamp_ns = ktime_get(); /* in nanosecond */
178*4882a593Smuzhiyun } else {
179*4882a593Smuzhiyun params.hdr.size = sizeof(struct sof_ipc_dma_trace_params);
180*4882a593Smuzhiyun params.hdr.cmd |= SOF_IPC_TRACE_DMA_PARAMS;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun params.buffer.phy_addr = sdev->dmatp.addr;
183*4882a593Smuzhiyun params.buffer.size = sdev->dmatb.bytes;
184*4882a593Smuzhiyun params.buffer.pages = sdev->dma_trace_pages;
185*4882a593Smuzhiyun params.stream_tag = 0;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun sdev->host_offset = 0;
188*4882a593Smuzhiyun sdev->dtrace_draining = false;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun ret = snd_sof_dma_trace_init(sdev, ¶ms.stream_tag);
191*4882a593Smuzhiyun if (ret < 0) {
192*4882a593Smuzhiyun dev_err(sdev->dev,
193*4882a593Smuzhiyun "error: fail in snd_sof_dma_trace_init %d\n", ret);
194*4882a593Smuzhiyun return ret;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun dev_dbg(sdev->dev, "stream_tag: %d\n", params.stream_tag);
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /* send IPC to the DSP */
199*4882a593Smuzhiyun ret = sof_ipc_tx_message(sdev->ipc,
200*4882a593Smuzhiyun params.hdr.cmd, ¶ms, sizeof(params),
201*4882a593Smuzhiyun &ipc_reply, sizeof(ipc_reply));
202*4882a593Smuzhiyun if (ret < 0) {
203*4882a593Smuzhiyun dev_err(sdev->dev,
204*4882a593Smuzhiyun "error: can't set params for DMA for trace %d\n", ret);
205*4882a593Smuzhiyun goto trace_release;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun ret = snd_sof_dma_trace_trigger(sdev, SNDRV_PCM_TRIGGER_START);
209*4882a593Smuzhiyun if (ret < 0) {
210*4882a593Smuzhiyun dev_err(sdev->dev,
211*4882a593Smuzhiyun "error: snd_sof_dma_trace_trigger: start: %d\n", ret);
212*4882a593Smuzhiyun goto trace_release;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun sdev->dtrace_is_enabled = true;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun return 0;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun trace_release:
220*4882a593Smuzhiyun snd_sof_dma_trace_release(sdev);
221*4882a593Smuzhiyun return ret;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
snd_sof_init_trace(struct snd_sof_dev * sdev)224*4882a593Smuzhiyun int snd_sof_init_trace(struct snd_sof_dev *sdev)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun int ret;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun if (!sdev->dtrace_is_supported)
229*4882a593Smuzhiyun return 0;
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun /* set false before start initialization */
232*4882a593Smuzhiyun sdev->dtrace_is_enabled = false;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun /* allocate trace page table buffer */
235*4882a593Smuzhiyun ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, sdev->dev,
236*4882a593Smuzhiyun PAGE_SIZE, &sdev->dmatp);
237*4882a593Smuzhiyun if (ret < 0) {
238*4882a593Smuzhiyun dev_err(sdev->dev,
239*4882a593Smuzhiyun "error: can't alloc page table for trace %d\n", ret);
240*4882a593Smuzhiyun return ret;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun /* allocate trace data buffer */
244*4882a593Smuzhiyun ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, sdev->dev,
245*4882a593Smuzhiyun DMA_BUF_SIZE_FOR_TRACE, &sdev->dmatb);
246*4882a593Smuzhiyun if (ret < 0) {
247*4882a593Smuzhiyun dev_err(sdev->dev,
248*4882a593Smuzhiyun "error: can't alloc buffer for trace %d\n", ret);
249*4882a593Smuzhiyun goto page_err;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun /* create compressed page table for audio firmware */
253*4882a593Smuzhiyun ret = snd_sof_create_page_table(sdev->dev, &sdev->dmatb,
254*4882a593Smuzhiyun sdev->dmatp.area, sdev->dmatb.bytes);
255*4882a593Smuzhiyun if (ret < 0)
256*4882a593Smuzhiyun goto table_err;
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun sdev->dma_trace_pages = ret;
259*4882a593Smuzhiyun dev_dbg(sdev->dev, "dma_trace_pages: %d\n", sdev->dma_trace_pages);
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun if (sdev->first_boot) {
262*4882a593Smuzhiyun ret = trace_debugfs_create(sdev);
263*4882a593Smuzhiyun if (ret < 0)
264*4882a593Smuzhiyun goto table_err;
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun init_waitqueue_head(&sdev->trace_sleep);
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun ret = snd_sof_init_trace_ipc(sdev);
270*4882a593Smuzhiyun if (ret < 0)
271*4882a593Smuzhiyun goto table_err;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun return 0;
274*4882a593Smuzhiyun table_err:
275*4882a593Smuzhiyun sdev->dma_trace_pages = 0;
276*4882a593Smuzhiyun snd_dma_free_pages(&sdev->dmatb);
277*4882a593Smuzhiyun page_err:
278*4882a593Smuzhiyun snd_dma_free_pages(&sdev->dmatp);
279*4882a593Smuzhiyun return ret;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun EXPORT_SYMBOL(snd_sof_init_trace);
282*4882a593Smuzhiyun
snd_sof_trace_update_pos(struct snd_sof_dev * sdev,struct sof_ipc_dma_trace_posn * posn)283*4882a593Smuzhiyun int snd_sof_trace_update_pos(struct snd_sof_dev *sdev,
284*4882a593Smuzhiyun struct sof_ipc_dma_trace_posn *posn)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun if (!sdev->dtrace_is_supported)
287*4882a593Smuzhiyun return 0;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun if (sdev->dtrace_is_enabled && sdev->host_offset != posn->host_offset) {
290*4882a593Smuzhiyun sdev->host_offset = posn->host_offset;
291*4882a593Smuzhiyun wake_up(&sdev->trace_sleep);
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun if (posn->overflow != 0)
295*4882a593Smuzhiyun dev_err(sdev->dev,
296*4882a593Smuzhiyun "error: DSP trace buffer overflow %u bytes. Total messages %d\n",
297*4882a593Smuzhiyun posn->overflow, posn->messages);
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun return 0;
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun /* an error has occurred within the DSP that prevents further trace */
snd_sof_trace_notify_for_error(struct snd_sof_dev * sdev)303*4882a593Smuzhiyun void snd_sof_trace_notify_for_error(struct snd_sof_dev *sdev)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun if (!sdev->dtrace_is_supported)
306*4882a593Smuzhiyun return;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun if (sdev->dtrace_is_enabled) {
309*4882a593Smuzhiyun dev_err(sdev->dev, "error: waking up any trace sleepers\n");
310*4882a593Smuzhiyun sdev->dtrace_error = true;
311*4882a593Smuzhiyun wake_up(&sdev->trace_sleep);
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun EXPORT_SYMBOL(snd_sof_trace_notify_for_error);
315*4882a593Smuzhiyun
snd_sof_release_trace(struct snd_sof_dev * sdev)316*4882a593Smuzhiyun void snd_sof_release_trace(struct snd_sof_dev *sdev)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun int ret;
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun if (!sdev->dtrace_is_supported || !sdev->dtrace_is_enabled)
321*4882a593Smuzhiyun return;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun ret = snd_sof_dma_trace_trigger(sdev, SNDRV_PCM_TRIGGER_STOP);
324*4882a593Smuzhiyun if (ret < 0)
325*4882a593Smuzhiyun dev_err(sdev->dev,
326*4882a593Smuzhiyun "error: snd_sof_dma_trace_trigger: stop: %d\n", ret);
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun ret = snd_sof_dma_trace_release(sdev);
329*4882a593Smuzhiyun if (ret < 0)
330*4882a593Smuzhiyun dev_err(sdev->dev,
331*4882a593Smuzhiyun "error: fail in snd_sof_dma_trace_release %d\n", ret);
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun sdev->dtrace_is_enabled = false;
334*4882a593Smuzhiyun sdev->dtrace_draining = true;
335*4882a593Smuzhiyun wake_up(&sdev->trace_sleep);
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun EXPORT_SYMBOL(snd_sof_release_trace);
338*4882a593Smuzhiyun
snd_sof_free_trace(struct snd_sof_dev * sdev)339*4882a593Smuzhiyun void snd_sof_free_trace(struct snd_sof_dev *sdev)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun if (!sdev->dtrace_is_supported)
342*4882a593Smuzhiyun return;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun snd_sof_release_trace(sdev);
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun if (sdev->dma_trace_pages) {
347*4882a593Smuzhiyun snd_dma_free_pages(&sdev->dmatb);
348*4882a593Smuzhiyun snd_dma_free_pages(&sdev->dmatp);
349*4882a593Smuzhiyun sdev->dma_trace_pages = 0;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun EXPORT_SYMBOL(snd_sof_free_trace);
353