1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 //
3 // Copyright (c) 2018 BayLibre, SAS.
4 // Author: Jerome Brunet <jbrunet@baylibre.com>
5
6 #include <linux/clk.h>
7 #include <linux/of_irq.h>
8 #include <linux/of_platform.h>
9 #include <linux/module.h>
10 #include <linux/regmap.h>
11 #include <linux/reset.h>
12 #include <sound/pcm_params.h>
13 #include <sound/soc.h>
14 #include <sound/soc-dai.h>
15
16 #include "axg-fifo.h"
17
18 /*
19 * This file implements the platform operations common to the playback and
20 * capture frontend DAI. The logic behind this two types of fifo is very
21 * similar but some difference exist.
22 * These differences are handled in the respective DAI drivers
23 */
24
25 static struct snd_pcm_hardware axg_fifo_hw = {
26 .info = (SNDRV_PCM_INFO_INTERLEAVED |
27 SNDRV_PCM_INFO_MMAP |
28 SNDRV_PCM_INFO_MMAP_VALID |
29 SNDRV_PCM_INFO_BLOCK_TRANSFER |
30 SNDRV_PCM_INFO_PAUSE),
31
32 .formats = AXG_FIFO_FORMATS,
33 .rate_min = 5512,
34 .rate_max = 192000,
35 .channels_min = 1,
36 .channels_max = AXG_FIFO_CH_MAX,
37 .period_bytes_min = AXG_FIFO_MIN_DEPTH,
38 .period_bytes_max = UINT_MAX,
39 .periods_min = 2,
40 .periods_max = UINT_MAX,
41
42 /* No real justification for this */
43 .buffer_bytes_max = 1 * 1024 * 1024,
44 };
45
axg_fifo_dai(struct snd_pcm_substream * ss)46 static struct snd_soc_dai *axg_fifo_dai(struct snd_pcm_substream *ss)
47 {
48 struct snd_soc_pcm_runtime *rtd = ss->private_data;
49
50 return rtd->cpu_dai;
51 }
52
axg_fifo_data(struct snd_pcm_substream * ss)53 static struct axg_fifo *axg_fifo_data(struct snd_pcm_substream *ss)
54 {
55 struct snd_soc_dai *dai = axg_fifo_dai(ss);
56
57 return snd_soc_dai_get_drvdata(dai);
58 }
59
axg_fifo_dev(struct snd_pcm_substream * ss)60 static struct device *axg_fifo_dev(struct snd_pcm_substream *ss)
61 {
62 struct snd_soc_dai *dai = axg_fifo_dai(ss);
63
64 return dai->dev;
65 }
66
__dma_enable(struct axg_fifo * fifo,bool enable)67 static void __dma_enable(struct axg_fifo *fifo, bool enable)
68 {
69 regmap_update_bits(fifo->map, FIFO_CTRL0, CTRL0_DMA_EN,
70 enable ? CTRL0_DMA_EN : 0);
71 }
72
axg_fifo_pcm_trigger(struct snd_pcm_substream * ss,int cmd)73 static int axg_fifo_pcm_trigger(struct snd_pcm_substream *ss, int cmd)
74 {
75 struct axg_fifo *fifo = axg_fifo_data(ss);
76
77 switch (cmd) {
78 case SNDRV_PCM_TRIGGER_START:
79 case SNDRV_PCM_TRIGGER_RESUME:
80 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
81 __dma_enable(fifo, true);
82 break;
83 case SNDRV_PCM_TRIGGER_SUSPEND:
84 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
85 case SNDRV_PCM_TRIGGER_STOP:
86 __dma_enable(fifo, false);
87 break;
88 default:
89 return -EINVAL;
90 }
91
92 return 0;
93 }
94
axg_fifo_pcm_pointer(struct snd_pcm_substream * ss)95 static snd_pcm_uframes_t axg_fifo_pcm_pointer(struct snd_pcm_substream *ss)
96 {
97 struct axg_fifo *fifo = axg_fifo_data(ss);
98 struct snd_pcm_runtime *runtime = ss->runtime;
99 unsigned int addr;
100
101 regmap_read(fifo->map, FIFO_STATUS2, &addr);
102
103 return bytes_to_frames(runtime, addr - (unsigned int)runtime->dma_addr);
104 }
105
axg_fifo_pcm_hw_params(struct snd_pcm_substream * ss,struct snd_pcm_hw_params * params)106 static int axg_fifo_pcm_hw_params(struct snd_pcm_substream *ss,
107 struct snd_pcm_hw_params *params)
108 {
109 struct snd_pcm_runtime *runtime = ss->runtime;
110 struct axg_fifo *fifo = axg_fifo_data(ss);
111 unsigned int burst_num, period, threshold;
112 dma_addr_t end_ptr;
113 int ret;
114
115 period = params_period_bytes(params);
116
117 ret = snd_pcm_lib_malloc_pages(ss, params_buffer_bytes(params));
118 if (ret < 0)
119 return ret;
120
121 /* Setup dma memory pointers */
122 end_ptr = runtime->dma_addr + runtime->dma_bytes - AXG_FIFO_BURST;
123 regmap_write(fifo->map, FIFO_START_ADDR, runtime->dma_addr);
124 regmap_write(fifo->map, FIFO_FINISH_ADDR, end_ptr);
125
126 /* Setup interrupt periodicity */
127 burst_num = period / AXG_FIFO_BURST;
128 regmap_write(fifo->map, FIFO_INT_ADDR, burst_num);
129
130 /*
131 * Start the fifo request on the smallest of the following:
132 * - Half the fifo size
133 * - Half the period size
134 */
135 threshold = min(period / 2,
136 (unsigned int)AXG_FIFO_MIN_DEPTH / 2);
137
138 /*
139 * With the threshold in bytes, register value is:
140 * V = (threshold / burst) - 1
141 */
142 threshold /= AXG_FIFO_BURST;
143 regmap_field_write(fifo->field_threshold,
144 threshold ? threshold - 1 : 0);
145
146 /* Enable block count irq */
147 regmap_update_bits(fifo->map, FIFO_CTRL0,
148 CTRL0_INT_EN(FIFO_INT_COUNT_REPEAT),
149 CTRL0_INT_EN(FIFO_INT_COUNT_REPEAT));
150
151 return 0;
152 }
153
g12a_fifo_pcm_hw_params(struct snd_pcm_substream * ss,struct snd_pcm_hw_params * params)154 static int g12a_fifo_pcm_hw_params(struct snd_pcm_substream *ss,
155 struct snd_pcm_hw_params *params)
156 {
157 struct axg_fifo *fifo = axg_fifo_data(ss);
158 struct snd_pcm_runtime *runtime = ss->runtime;
159 int ret;
160
161 ret = axg_fifo_pcm_hw_params(ss, params);
162 if (ret)
163 return ret;
164
165 /* Set the initial memory address of the DMA */
166 regmap_write(fifo->map, FIFO_INIT_ADDR, runtime->dma_addr);
167
168 return 0;
169 }
170
axg_fifo_pcm_hw_free(struct snd_pcm_substream * ss)171 static int axg_fifo_pcm_hw_free(struct snd_pcm_substream *ss)
172 {
173 struct axg_fifo *fifo = axg_fifo_data(ss);
174
175 /* Disable the block count irq */
176 regmap_update_bits(fifo->map, FIFO_CTRL0,
177 CTRL0_INT_EN(FIFO_INT_COUNT_REPEAT), 0);
178
179 return snd_pcm_lib_free_pages(ss);
180 }
181
axg_fifo_ack_irq(struct axg_fifo * fifo,u8 mask)182 static void axg_fifo_ack_irq(struct axg_fifo *fifo, u8 mask)
183 {
184 regmap_update_bits(fifo->map, FIFO_CTRL1,
185 CTRL1_INT_CLR(FIFO_INT_MASK),
186 CTRL1_INT_CLR(mask));
187
188 /* Clear must also be cleared */
189 regmap_update_bits(fifo->map, FIFO_CTRL1,
190 CTRL1_INT_CLR(FIFO_INT_MASK),
191 0);
192 }
193
axg_fifo_pcm_irq_block(int irq,void * dev_id)194 static irqreturn_t axg_fifo_pcm_irq_block(int irq, void *dev_id)
195 {
196 struct snd_pcm_substream *ss = dev_id;
197 struct axg_fifo *fifo = axg_fifo_data(ss);
198 unsigned int status;
199
200 regmap_read(fifo->map, FIFO_STATUS1, &status);
201
202 status = STATUS1_INT_STS(status) & FIFO_INT_MASK;
203 if (status & FIFO_INT_COUNT_REPEAT)
204 snd_pcm_period_elapsed(ss);
205 else
206 dev_dbg(axg_fifo_dev(ss), "unexpected irq - STS 0x%02x\n",
207 status);
208
209 /* Ack irqs */
210 axg_fifo_ack_irq(fifo, status);
211
212 return IRQ_RETVAL(status);
213 }
214
axg_fifo_pcm_open(struct snd_pcm_substream * ss)215 static int axg_fifo_pcm_open(struct snd_pcm_substream *ss)
216 {
217 struct axg_fifo *fifo = axg_fifo_data(ss);
218 struct device *dev = axg_fifo_dev(ss);
219 int ret;
220
221 snd_soc_set_runtime_hwparams(ss, &axg_fifo_hw);
222
223 /*
224 * Make sure the buffer and period size are multiple of the FIFO
225 * minimum depth size
226 */
227 ret = snd_pcm_hw_constraint_step(ss->runtime, 0,
228 SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
229 AXG_FIFO_MIN_DEPTH);
230 if (ret)
231 return ret;
232
233 ret = snd_pcm_hw_constraint_step(ss->runtime, 0,
234 SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
235 AXG_FIFO_MIN_DEPTH);
236 if (ret)
237 return ret;
238
239 ret = request_irq(fifo->irq, axg_fifo_pcm_irq_block, 0,
240 dev_name(dev), ss);
241 if (ret)
242 return ret;
243
244 /* Enable pclk to access registers and clock the fifo ip */
245 ret = clk_prepare_enable(fifo->pclk);
246 if (ret)
247 goto free_irq;
248
249 /* Setup status2 so it reports the memory pointer */
250 regmap_update_bits(fifo->map, FIFO_CTRL1,
251 CTRL1_STATUS2_SEL_MASK,
252 CTRL1_STATUS2_SEL(STATUS2_SEL_DDR_READ));
253
254 /* Make sure the dma is initially disabled */
255 __dma_enable(fifo, false);
256
257 /* Disable irqs until params are ready */
258 regmap_update_bits(fifo->map, FIFO_CTRL0,
259 CTRL0_INT_EN(FIFO_INT_MASK), 0);
260
261 /* Clear any pending interrupt */
262 axg_fifo_ack_irq(fifo, FIFO_INT_MASK);
263
264 /* Take memory arbitror out of reset */
265 ret = reset_control_deassert(fifo->arb);
266 if (ret)
267 goto free_clk;
268
269 return 0;
270
271 free_clk:
272 clk_disable_unprepare(fifo->pclk);
273 free_irq:
274 free_irq(fifo->irq, ss);
275 return ret;
276 }
277
axg_fifo_pcm_close(struct snd_pcm_substream * ss)278 static int axg_fifo_pcm_close(struct snd_pcm_substream *ss)
279 {
280 struct axg_fifo *fifo = axg_fifo_data(ss);
281 int ret;
282
283 /* Put the memory arbitror back in reset */
284 ret = reset_control_assert(fifo->arb);
285
286 /* Disable fifo ip and register access */
287 clk_disable_unprepare(fifo->pclk);
288
289 /* remove IRQ */
290 free_irq(fifo->irq, ss);
291
292 return ret;
293 }
294
295 const struct snd_pcm_ops axg_fifo_pcm_ops = {
296 .open = axg_fifo_pcm_open,
297 .close = axg_fifo_pcm_close,
298 .ioctl = snd_pcm_lib_ioctl,
299 .hw_params = axg_fifo_pcm_hw_params,
300 .hw_free = axg_fifo_pcm_hw_free,
301 .pointer = axg_fifo_pcm_pointer,
302 .trigger = axg_fifo_pcm_trigger,
303 };
304 EXPORT_SYMBOL_GPL(axg_fifo_pcm_ops);
305
306 const struct snd_pcm_ops g12a_fifo_pcm_ops = {
307 .open = axg_fifo_pcm_open,
308 .close = axg_fifo_pcm_close,
309 .ioctl = snd_pcm_lib_ioctl,
310 .hw_params = g12a_fifo_pcm_hw_params,
311 .hw_free = axg_fifo_pcm_hw_free,
312 .pointer = axg_fifo_pcm_pointer,
313 .trigger = axg_fifo_pcm_trigger,
314 };
315 EXPORT_SYMBOL_GPL(g12a_fifo_pcm_ops);
316
axg_fifo_pcm_new(struct snd_soc_pcm_runtime * rtd,unsigned int type)317 int axg_fifo_pcm_new(struct snd_soc_pcm_runtime *rtd, unsigned int type)
318 {
319 struct snd_card *card = rtd->card->snd_card;
320 size_t size = axg_fifo_hw.buffer_bytes_max;
321
322 snd_pcm_lib_preallocate_pages(rtd->pcm->streams[type].substream,
323 SNDRV_DMA_TYPE_DEV, card->dev,
324 size, size);
325 return 0;
326 }
327 EXPORT_SYMBOL_GPL(axg_fifo_pcm_new);
328
329 static const struct regmap_config axg_fifo_regmap_cfg = {
330 .reg_bits = 32,
331 .val_bits = 32,
332 .reg_stride = 4,
333 .max_register = FIFO_CTRL2,
334 };
335
axg_fifo_probe(struct platform_device * pdev)336 int axg_fifo_probe(struct platform_device *pdev)
337 {
338 struct device *dev = &pdev->dev;
339 const struct axg_fifo_match_data *data;
340 struct axg_fifo *fifo;
341 void __iomem *regs;
342
343 data = of_device_get_match_data(dev);
344 if (!data) {
345 dev_err(dev, "failed to match device\n");
346 return -ENODEV;
347 }
348
349 fifo = devm_kzalloc(dev, sizeof(*fifo), GFP_KERNEL);
350 if (!fifo)
351 return -ENOMEM;
352 platform_set_drvdata(pdev, fifo);
353
354 regs = devm_platform_ioremap_resource(pdev, 0);
355 if (IS_ERR(regs))
356 return PTR_ERR(regs);
357
358 fifo->map = devm_regmap_init_mmio(dev, regs, &axg_fifo_regmap_cfg);
359 if (IS_ERR(fifo->map)) {
360 dev_err(dev, "failed to init regmap: %ld\n",
361 PTR_ERR(fifo->map));
362 return PTR_ERR(fifo->map);
363 }
364
365 fifo->pclk = devm_clk_get(dev, NULL);
366 if (IS_ERR(fifo->pclk)) {
367 if (PTR_ERR(fifo->pclk) != -EPROBE_DEFER)
368 dev_err(dev, "failed to get pclk: %ld\n",
369 PTR_ERR(fifo->pclk));
370 return PTR_ERR(fifo->pclk);
371 }
372
373 fifo->arb = devm_reset_control_get_exclusive(dev, NULL);
374 if (IS_ERR(fifo->arb)) {
375 if (PTR_ERR(fifo->arb) != -EPROBE_DEFER)
376 dev_err(dev, "failed to get arb reset: %ld\n",
377 PTR_ERR(fifo->arb));
378 return PTR_ERR(fifo->arb);
379 }
380
381 fifo->irq = of_irq_get(dev->of_node, 0);
382 if (fifo->irq <= 0) {
383 dev_err(dev, "failed to get irq: %d\n", fifo->irq);
384 return fifo->irq;
385 }
386
387 fifo->field_threshold =
388 devm_regmap_field_alloc(dev, fifo->map, data->field_threshold);
389 if (IS_ERR(fifo->field_threshold))
390 return PTR_ERR(fifo->field_threshold);
391
392 return devm_snd_soc_register_component(dev, data->component_drv,
393 data->dai_drv, 1);
394 }
395 EXPORT_SYMBOL_GPL(axg_fifo_probe);
396
397 MODULE_DESCRIPTION("Amlogic AXG/G12A fifo driver");
398 MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
399 MODULE_LICENSE("GPL v2");
400