• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 //
3 // This file is provided under a dual BSD/GPLv2 license.  When using or
4 // redistributing this file, you may do so under either license.
5 //
6 // Copyright(c) 2018 Intel Corporation. All rights reserved.
7 //
8 // Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
9 //
10 
11 #include <linux/debugfs.h>
12 #include <linux/sched/signal.h>
13 #include "sof-priv.h"
14 #include "ops.h"
15 
sof_trace_avail(struct snd_sof_dev * sdev,loff_t pos,size_t buffer_size)16 static size_t sof_trace_avail(struct snd_sof_dev *sdev,
17 			      loff_t pos, size_t buffer_size)
18 {
19 	loff_t host_offset = READ_ONCE(sdev->host_offset);
20 
21 	/*
22 	 * If host offset is less than local pos, it means write pointer of
23 	 * host DMA buffer has been wrapped. We should output the trace data
24 	 * at the end of host DMA buffer at first.
25 	 */
26 	if (host_offset < pos)
27 		return buffer_size - pos;
28 
29 	/* If there is available trace data now, it is unnecessary to wait. */
30 	if (host_offset > pos)
31 		return host_offset - pos;
32 
33 	return 0;
34 }
35 
sof_wait_trace_avail(struct snd_sof_dev * sdev,loff_t pos,size_t buffer_size)36 static size_t sof_wait_trace_avail(struct snd_sof_dev *sdev,
37 				   loff_t pos, size_t buffer_size)
38 {
39 	wait_queue_entry_t wait;
40 	size_t ret = sof_trace_avail(sdev, pos, buffer_size);
41 
42 	/* data immediately available */
43 	if (ret)
44 		return ret;
45 
46 	if (!sdev->dtrace_is_enabled && sdev->dtrace_draining) {
47 		/*
48 		 * tracing has ended and all traces have been
49 		 * read by client, return EOF
50 		 */
51 		sdev->dtrace_draining = false;
52 		return 0;
53 	}
54 
55 	/* wait for available trace data from FW */
56 	init_waitqueue_entry(&wait, current);
57 	set_current_state(TASK_INTERRUPTIBLE);
58 	add_wait_queue(&sdev->trace_sleep, &wait);
59 
60 	if (!signal_pending(current)) {
61 		/* set timeout to max value, no error code */
62 		schedule_timeout(MAX_SCHEDULE_TIMEOUT);
63 	}
64 	remove_wait_queue(&sdev->trace_sleep, &wait);
65 
66 	return sof_trace_avail(sdev, pos, buffer_size);
67 }
68 
sof_dfsentry_trace_read(struct file * file,char __user * buffer,size_t count,loff_t * ppos)69 static ssize_t sof_dfsentry_trace_read(struct file *file, char __user *buffer,
70 				       size_t count, loff_t *ppos)
71 {
72 	struct snd_sof_dfsentry *dfse = file->private_data;
73 	struct snd_sof_dev *sdev = dfse->sdev;
74 	unsigned long rem;
75 	loff_t lpos = *ppos;
76 	size_t avail, buffer_size = dfse->size;
77 	u64 lpos_64;
78 
79 	/* make sure we know about any failures on the DSP side */
80 	sdev->dtrace_error = false;
81 
82 	/* check pos and count */
83 	if (lpos < 0)
84 		return -EINVAL;
85 	if (!count)
86 		return 0;
87 
88 	/* check for buffer wrap and count overflow */
89 	lpos_64 = lpos;
90 	lpos = do_div(lpos_64, buffer_size);
91 
92 	if (count > buffer_size - lpos) /* min() not used to avoid sparse warnings */
93 		count = buffer_size - lpos;
94 
95 	/* get available count based on current host offset */
96 	avail = sof_wait_trace_avail(sdev, lpos, buffer_size);
97 	if (sdev->dtrace_error) {
98 		dev_err(sdev->dev, "error: trace IO error\n");
99 		return -EIO;
100 	}
101 
102 	/* make sure count is <= avail */
103 	count = avail > count ? count : avail;
104 
105 	/* copy available trace data to debugfs */
106 	rem = copy_to_user(buffer, ((u8 *)(dfse->buf) + lpos), count);
107 	if (rem)
108 		return -EFAULT;
109 
110 	*ppos += count;
111 
112 	/* move debugfs reading position */
113 	return count;
114 }
115 
sof_dfsentry_trace_release(struct inode * inode,struct file * file)116 static int sof_dfsentry_trace_release(struct inode *inode, struct file *file)
117 {
118 	struct snd_sof_dfsentry *dfse = inode->i_private;
119 	struct snd_sof_dev *sdev = dfse->sdev;
120 
121 	/* avoid duplicate traces at next open */
122 	if (!sdev->dtrace_is_enabled)
123 		sdev->host_offset = 0;
124 
125 	return 0;
126 }
127 
128 static const struct file_operations sof_dfs_trace_fops = {
129 	.open = simple_open,
130 	.read = sof_dfsentry_trace_read,
131 	.llseek = default_llseek,
132 	.release = sof_dfsentry_trace_release,
133 };
134 
trace_debugfs_create(struct snd_sof_dev * sdev)135 static int trace_debugfs_create(struct snd_sof_dev *sdev)
136 {
137 	struct snd_sof_dfsentry *dfse;
138 
139 	if (!sdev)
140 		return -EINVAL;
141 
142 	dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL);
143 	if (!dfse)
144 		return -ENOMEM;
145 
146 	dfse->type = SOF_DFSENTRY_TYPE_BUF;
147 	dfse->buf = sdev->dmatb.area;
148 	dfse->size = sdev->dmatb.bytes;
149 	dfse->sdev = sdev;
150 
151 	debugfs_create_file("trace", 0444, sdev->debugfs_root, dfse,
152 			    &sof_dfs_trace_fops);
153 
154 	return 0;
155 }
156 
snd_sof_init_trace_ipc(struct snd_sof_dev * sdev)157 int snd_sof_init_trace_ipc(struct snd_sof_dev *sdev)
158 {
159 	struct sof_ipc_fw_ready *ready = &sdev->fw_ready;
160 	struct sof_ipc_fw_version *v = &ready->version;
161 	struct sof_ipc_dma_trace_params_ext params;
162 	struct sof_ipc_reply ipc_reply;
163 	int ret;
164 
165 	if (sdev->dtrace_is_enabled || !sdev->dma_trace_pages)
166 		return -EINVAL;
167 
168 	/* set IPC parameters */
169 	params.hdr.cmd = SOF_IPC_GLB_TRACE_MSG;
170 	/* PARAMS_EXT is only supported from ABI 3.7.0 onwards */
171 	if (v->abi_version >= SOF_ABI_VER(3, 7, 0)) {
172 		params.hdr.size = sizeof(struct sof_ipc_dma_trace_params_ext);
173 		params.hdr.cmd |= SOF_IPC_TRACE_DMA_PARAMS_EXT;
174 		params.timestamp_ns = ktime_get(); /* in nanosecond */
175 	} else {
176 		params.hdr.size = sizeof(struct sof_ipc_dma_trace_params);
177 		params.hdr.cmd |= SOF_IPC_TRACE_DMA_PARAMS;
178 	}
179 	params.buffer.phy_addr = sdev->dmatp.addr;
180 	params.buffer.size = sdev->dmatb.bytes;
181 	params.buffer.pages = sdev->dma_trace_pages;
182 	params.stream_tag = 0;
183 
184 	sdev->host_offset = 0;
185 	sdev->dtrace_draining = false;
186 
187 	ret = snd_sof_dma_trace_init(sdev, &params.stream_tag);
188 	if (ret < 0) {
189 		dev_err(sdev->dev,
190 			"error: fail in snd_sof_dma_trace_init %d\n", ret);
191 		return ret;
192 	}
193 	dev_dbg(sdev->dev, "stream_tag: %d\n", params.stream_tag);
194 
195 	/* send IPC to the DSP */
196 	ret = sof_ipc_tx_message(sdev->ipc,
197 				 params.hdr.cmd, &params, sizeof(params),
198 				 &ipc_reply, sizeof(ipc_reply));
199 	if (ret < 0) {
200 		dev_err(sdev->dev,
201 			"error: can't set params for DMA for trace %d\n", ret);
202 		goto trace_release;
203 	}
204 
205 	ret = snd_sof_dma_trace_trigger(sdev, SNDRV_PCM_TRIGGER_START);
206 	if (ret < 0) {
207 		dev_err(sdev->dev,
208 			"error: snd_sof_dma_trace_trigger: start: %d\n", ret);
209 		goto trace_release;
210 	}
211 
212 	sdev->dtrace_is_enabled = true;
213 
214 	return 0;
215 
216 trace_release:
217 	snd_sof_dma_trace_release(sdev);
218 	return ret;
219 }
220 
snd_sof_init_trace(struct snd_sof_dev * sdev)221 int snd_sof_init_trace(struct snd_sof_dev *sdev)
222 {
223 	int ret;
224 
225 	/* set false before start initialization */
226 	sdev->dtrace_is_enabled = false;
227 
228 	/* allocate trace page table buffer */
229 	ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, sdev->dev,
230 				  PAGE_SIZE, &sdev->dmatp);
231 	if (ret < 0) {
232 		dev_err(sdev->dev,
233 			"error: can't alloc page table for trace %d\n", ret);
234 		return ret;
235 	}
236 
237 	/* allocate trace data buffer */
238 	ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, sdev->dev,
239 				  DMA_BUF_SIZE_FOR_TRACE, &sdev->dmatb);
240 	if (ret < 0) {
241 		dev_err(sdev->dev,
242 			"error: can't alloc buffer for trace %d\n", ret);
243 		goto page_err;
244 	}
245 
246 	/* create compressed page table for audio firmware */
247 	ret = snd_sof_create_page_table(sdev, &sdev->dmatb, sdev->dmatp.area,
248 					sdev->dmatb.bytes);
249 	if (ret < 0)
250 		goto table_err;
251 
252 	sdev->dma_trace_pages = ret;
253 	dev_dbg(sdev->dev, "dma_trace_pages: %d\n", sdev->dma_trace_pages);
254 
255 	if (sdev->first_boot) {
256 		ret = trace_debugfs_create(sdev);
257 		if (ret < 0)
258 			goto table_err;
259 	}
260 
261 	init_waitqueue_head(&sdev->trace_sleep);
262 
263 	ret = snd_sof_init_trace_ipc(sdev);
264 	if (ret < 0)
265 		goto table_err;
266 
267 	return 0;
268 table_err:
269 	sdev->dma_trace_pages = 0;
270 	snd_dma_free_pages(&sdev->dmatb);
271 page_err:
272 	snd_dma_free_pages(&sdev->dmatp);
273 	return ret;
274 }
275 EXPORT_SYMBOL(snd_sof_init_trace);
276 
snd_sof_trace_update_pos(struct snd_sof_dev * sdev,struct sof_ipc_dma_trace_posn * posn)277 int snd_sof_trace_update_pos(struct snd_sof_dev *sdev,
278 			     struct sof_ipc_dma_trace_posn *posn)
279 {
280 	if (sdev->dtrace_is_enabled && sdev->host_offset != posn->host_offset) {
281 		sdev->host_offset = posn->host_offset;
282 		wake_up(&sdev->trace_sleep);
283 	}
284 
285 	if (posn->overflow != 0)
286 		dev_err(sdev->dev,
287 			"error: DSP trace buffer overflow %u bytes. Total messages %d\n",
288 			posn->overflow, posn->messages);
289 
290 	return 0;
291 }
292 
293 /* an error has occurred within the DSP that prevents further trace */
snd_sof_trace_notify_for_error(struct snd_sof_dev * sdev)294 void snd_sof_trace_notify_for_error(struct snd_sof_dev *sdev)
295 {
296 	if (sdev->dtrace_is_enabled) {
297 		dev_err(sdev->dev, "error: waking up any trace sleepers\n");
298 		sdev->dtrace_error = true;
299 		wake_up(&sdev->trace_sleep);
300 	}
301 }
302 EXPORT_SYMBOL(snd_sof_trace_notify_for_error);
303 
snd_sof_release_trace(struct snd_sof_dev * sdev)304 void snd_sof_release_trace(struct snd_sof_dev *sdev)
305 {
306 	int ret;
307 
308 	if (!sdev->dtrace_is_enabled)
309 		return;
310 
311 	ret = snd_sof_dma_trace_trigger(sdev, SNDRV_PCM_TRIGGER_STOP);
312 	if (ret < 0)
313 		dev_err(sdev->dev,
314 			"error: snd_sof_dma_trace_trigger: stop: %d\n", ret);
315 
316 	ret = snd_sof_dma_trace_release(sdev);
317 	if (ret < 0)
318 		dev_err(sdev->dev,
319 			"error: fail in snd_sof_dma_trace_release %d\n", ret);
320 
321 	sdev->dtrace_is_enabled = false;
322 	sdev->dtrace_draining = true;
323 	wake_up(&sdev->trace_sleep);
324 }
325 EXPORT_SYMBOL(snd_sof_release_trace);
326 
snd_sof_free_trace(struct snd_sof_dev * sdev)327 void snd_sof_free_trace(struct snd_sof_dev *sdev)
328 {
329 	snd_sof_release_trace(sdev);
330 
331 	if (sdev->dma_trace_pages) {
332 		snd_dma_free_pages(&sdev->dmatb);
333 		snd_dma_free_pages(&sdev->dmatp);
334 		sdev->dma_trace_pages = 0;
335 	}
336 }
337 EXPORT_SYMBOL(snd_sof_free_trace);
338