1 /*
2 * Intel SST Firmware Loader
3 *
4 * Copyright (C) 2013, Intel Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17 #include <linux/kernel.h>
18 #include <linux/slab.h>
19 #include <linux/sched.h>
20 #include <linux/firmware.h>
21 #include <linux/export.h>
22 #include <linux/platform_device.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/dmaengine.h>
25 #include <linux/pci.h>
26 #include <linux/acpi.h>
27
28 /* supported DMA engine drivers */
29 #include <linux/dma/dw.h>
30
31 #include <asm/page.h>
32 #include <asm/pgtable.h>
33
34 #include "sst-dsp.h"
35 #include "sst-dsp-priv.h"
36
37 #define SST_DMA_RESOURCES 2
38 #define SST_DSP_DMA_MAX_BURST 0x3
39 #define SST_HSW_BLOCK_ANY 0xffffffff
40
41 #define SST_HSW_MASK_DMA_ADDR_DSP 0xfff00000
42
43 struct sst_dma {
44 struct sst_dsp *sst;
45
46 struct dw_dma_chip *chip;
47
48 struct dma_async_tx_descriptor *desc;
49 struct dma_chan *ch;
50 };
51
sst_memcpy32(volatile void __iomem * dest,void * src,u32 bytes)52 static inline void sst_memcpy32(volatile void __iomem *dest, void *src, u32 bytes)
53 {
54 u32 tmp = 0;
55 int i, m, n;
56 const u8 *src_byte = src;
57
58 m = bytes / 4;
59 n = bytes % 4;
60
61 /* __iowrite32_copy use 32bit size values so divide by 4 */
62 __iowrite32_copy((void *)dest, src, m);
63
64 if (n) {
65 for (i = 0; i < n; i++)
66 tmp |= (u32)*(src_byte + m * 4 + i) << (i * 8);
67 __iowrite32_copy((void *)(dest + m * 4), &tmp, 1);
68 }
69
70 }
71
sst_dma_transfer_complete(void * arg)72 static void sst_dma_transfer_complete(void *arg)
73 {
74 struct sst_dsp *sst = (struct sst_dsp *)arg;
75
76 dev_dbg(sst->dev, "DMA: callback\n");
77 }
78
sst_dsp_dma_copy(struct sst_dsp * sst,dma_addr_t dest_addr,dma_addr_t src_addr,size_t size)79 static int sst_dsp_dma_copy(struct sst_dsp *sst, dma_addr_t dest_addr,
80 dma_addr_t src_addr, size_t size)
81 {
82 struct dma_async_tx_descriptor *desc;
83 struct sst_dma *dma = sst->dma;
84
85 if (dma->ch == NULL) {
86 dev_err(sst->dev, "error: no DMA channel\n");
87 return -ENODEV;
88 }
89
90 dev_dbg(sst->dev, "DMA: src: 0x%lx dest 0x%lx size %zu\n",
91 (unsigned long)src_addr, (unsigned long)dest_addr, size);
92
93 desc = dma->ch->device->device_prep_dma_memcpy(dma->ch, dest_addr,
94 src_addr, size, DMA_CTRL_ACK);
95 if (!desc){
96 dev_err(sst->dev, "error: dma prep memcpy failed\n");
97 return -EINVAL;
98 }
99
100 desc->callback = sst_dma_transfer_complete;
101 desc->callback_param = sst;
102
103 desc->tx_submit(desc);
104 dma_wait_for_async_tx(desc);
105
106 return 0;
107 }
108
109 /* copy to DSP */
sst_dsp_dma_copyto(struct sst_dsp * sst,dma_addr_t dest_addr,dma_addr_t src_addr,size_t size)110 int sst_dsp_dma_copyto(struct sst_dsp *sst, dma_addr_t dest_addr,
111 dma_addr_t src_addr, size_t size)
112 {
113 return sst_dsp_dma_copy(sst, dest_addr | SST_HSW_MASK_DMA_ADDR_DSP,
114 src_addr, size);
115 }
116 EXPORT_SYMBOL_GPL(sst_dsp_dma_copyto);
117
118 /* copy from DSP */
sst_dsp_dma_copyfrom(struct sst_dsp * sst,dma_addr_t dest_addr,dma_addr_t src_addr,size_t size)119 int sst_dsp_dma_copyfrom(struct sst_dsp *sst, dma_addr_t dest_addr,
120 dma_addr_t src_addr, size_t size)
121 {
122 return sst_dsp_dma_copy(sst, dest_addr,
123 src_addr | SST_HSW_MASK_DMA_ADDR_DSP, size);
124 }
125 EXPORT_SYMBOL_GPL(sst_dsp_dma_copyfrom);
126
127 /* remove module from memory - callers hold locks */
block_list_remove(struct sst_dsp * dsp,struct list_head * block_list)128 static void block_list_remove(struct sst_dsp *dsp,
129 struct list_head *block_list)
130 {
131 struct sst_mem_block *block, *tmp;
132 int err;
133
134 /* disable each block */
135 list_for_each_entry(block, block_list, module_list) {
136
137 if (block->ops && block->ops->disable) {
138 err = block->ops->disable(block);
139 if (err < 0)
140 dev_err(dsp->dev,
141 "error: cant disable block %d:%d\n",
142 block->type, block->index);
143 }
144 }
145
146 /* mark each block as free */
147 list_for_each_entry_safe(block, tmp, block_list, module_list) {
148 list_del(&block->module_list);
149 list_move(&block->list, &dsp->free_block_list);
150 dev_dbg(dsp->dev, "block freed %d:%d at offset 0x%x\n",
151 block->type, block->index, block->offset);
152 }
153 }
154
155 /* prepare the memory block to receive data from host - callers hold locks */
block_list_prepare(struct sst_dsp * dsp,struct list_head * block_list)156 static int block_list_prepare(struct sst_dsp *dsp,
157 struct list_head *block_list)
158 {
159 struct sst_mem_block *block;
160 int ret = 0;
161
162 /* enable each block so that's it'e ready for data */
163 list_for_each_entry(block, block_list, module_list) {
164
165 if (block->ops && block->ops->enable && !block->users) {
166 ret = block->ops->enable(block);
167 if (ret < 0) {
168 dev_err(dsp->dev,
169 "error: cant disable block %d:%d\n",
170 block->type, block->index);
171 goto err;
172 }
173 }
174 }
175 return ret;
176
177 err:
178 list_for_each_entry(block, block_list, module_list) {
179 if (block->ops && block->ops->disable)
180 block->ops->disable(block);
181 }
182 return ret;
183 }
184
dw_probe(struct device * dev,struct resource * mem,int irq)185 static struct dw_dma_chip *dw_probe(struct device *dev, struct resource *mem,
186 int irq)
187 {
188 struct dw_dma_chip *chip;
189 int err;
190
191 chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
192 if (!chip)
193 return ERR_PTR(-ENOMEM);
194
195 chip->irq = irq;
196 chip->regs = devm_ioremap_resource(dev, mem);
197 if (IS_ERR(chip->regs))
198 return ERR_CAST(chip->regs);
199
200 err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(31));
201 if (err)
202 return ERR_PTR(err);
203
204 chip->dev = dev;
205
206 err = dw_dma_probe(chip);
207 if (err)
208 return ERR_PTR(err);
209
210 return chip;
211 }
212
dw_remove(struct dw_dma_chip * chip)213 static void dw_remove(struct dw_dma_chip *chip)
214 {
215 dw_dma_remove(chip);
216 }
217
dma_chan_filter(struct dma_chan * chan,void * param)218 static bool dma_chan_filter(struct dma_chan *chan, void *param)
219 {
220 struct sst_dsp *dsp = (struct sst_dsp *)param;
221
222 return chan->device->dev == dsp->dma_dev;
223 }
224
sst_dsp_dma_get_channel(struct sst_dsp * dsp,int chan_id)225 int sst_dsp_dma_get_channel(struct sst_dsp *dsp, int chan_id)
226 {
227 struct sst_dma *dma = dsp->dma;
228 struct dma_slave_config slave;
229 dma_cap_mask_t mask;
230 int ret;
231
232 dma_cap_zero(mask);
233 dma_cap_set(DMA_SLAVE, mask);
234 dma_cap_set(DMA_MEMCPY, mask);
235
236 dma->ch = dma_request_channel(mask, dma_chan_filter, dsp);
237 if (dma->ch == NULL) {
238 dev_err(dsp->dev, "error: DMA request channel failed\n");
239 return -EIO;
240 }
241
242 memset(&slave, 0, sizeof(slave));
243 slave.direction = DMA_MEM_TO_DEV;
244 slave.src_addr_width =
245 slave.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
246 slave.src_maxburst = slave.dst_maxburst = SST_DSP_DMA_MAX_BURST;
247
248 ret = dmaengine_slave_config(dma->ch, &slave);
249 if (ret) {
250 dev_err(dsp->dev, "error: unable to set DMA slave config %d\n",
251 ret);
252 dma_release_channel(dma->ch);
253 dma->ch = NULL;
254 }
255
256 return ret;
257 }
258 EXPORT_SYMBOL_GPL(sst_dsp_dma_get_channel);
259
sst_dsp_dma_put_channel(struct sst_dsp * dsp)260 void sst_dsp_dma_put_channel(struct sst_dsp *dsp)
261 {
262 struct sst_dma *dma = dsp->dma;
263
264 if (!dma->ch)
265 return;
266
267 dma_release_channel(dma->ch);
268 dma->ch = NULL;
269 }
270 EXPORT_SYMBOL_GPL(sst_dsp_dma_put_channel);
271
sst_dma_new(struct sst_dsp * sst)272 int sst_dma_new(struct sst_dsp *sst)
273 {
274 struct sst_pdata *sst_pdata = sst->pdata;
275 struct sst_dma *dma;
276 struct resource mem;
277 int ret = 0;
278
279 if (sst->pdata->resindex_dma_base == -1)
280 /* DMA is not used, return and squelsh error messages */
281 return 0;
282
283 /* configure the correct platform data for whatever DMA engine
284 * is attached to the ADSP IP. */
285 switch (sst->pdata->dma_engine) {
286 case SST_DMA_TYPE_DW:
287 break;
288 default:
289 dev_err(sst->dev, "error: invalid DMA engine %d\n",
290 sst->pdata->dma_engine);
291 return -EINVAL;
292 }
293
294 dma = devm_kzalloc(sst->dev, sizeof(struct sst_dma), GFP_KERNEL);
295 if (!dma)
296 return -ENOMEM;
297
298 dma->sst = sst;
299
300 memset(&mem, 0, sizeof(mem));
301
302 mem.start = sst->addr.lpe_base + sst_pdata->dma_base;
303 mem.end = sst->addr.lpe_base + sst_pdata->dma_base + sst_pdata->dma_size - 1;
304 mem.flags = IORESOURCE_MEM;
305
306 /* now register DMA engine device */
307 dma->chip = dw_probe(sst->dma_dev, &mem, sst_pdata->irq);
308 if (IS_ERR(dma->chip)) {
309 dev_err(sst->dev, "error: DMA device register failed\n");
310 ret = PTR_ERR(dma->chip);
311 goto err_dma_dev;
312 }
313
314 sst->dma = dma;
315 sst->fw_use_dma = true;
316 return 0;
317
318 err_dma_dev:
319 devm_kfree(sst->dev, dma);
320 return ret;
321 }
322 EXPORT_SYMBOL(sst_dma_new);
323
sst_dma_free(struct sst_dma * dma)324 void sst_dma_free(struct sst_dma *dma)
325 {
326
327 if (dma == NULL)
328 return;
329
330 if (dma->ch)
331 dma_release_channel(dma->ch);
332
333 if (dma->chip)
334 dw_remove(dma->chip);
335
336 }
337 EXPORT_SYMBOL(sst_dma_free);
338
339 /* create new generic firmware object */
sst_fw_new(struct sst_dsp * dsp,const struct firmware * fw,void * private)340 struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
341 const struct firmware *fw, void *private)
342 {
343 struct sst_fw *sst_fw;
344 int err;
345
346 if (!dsp->ops->parse_fw)
347 return NULL;
348
349 sst_fw = kzalloc(sizeof(*sst_fw), GFP_KERNEL);
350 if (sst_fw == NULL)
351 return NULL;
352
353 sst_fw->dsp = dsp;
354 sst_fw->private = private;
355 sst_fw->size = fw->size;
356
357 /* allocate DMA buffer to store FW data */
358 sst_fw->dma_buf = dma_alloc_coherent(dsp->dma_dev, sst_fw->size,
359 &sst_fw->dmable_fw_paddr, GFP_DMA | GFP_KERNEL);
360 if (!sst_fw->dma_buf) {
361 dev_err(dsp->dev, "error: DMA alloc failed\n");
362 kfree(sst_fw);
363 return NULL;
364 }
365
366 /* copy FW data to DMA-able memory */
367 memcpy((void *)sst_fw->dma_buf, (void *)fw->data, fw->size);
368
369 if (dsp->fw_use_dma) {
370 err = sst_dsp_dma_get_channel(dsp, 0);
371 if (err < 0)
372 goto chan_err;
373 }
374
375 /* call core specific FW paser to load FW data into DSP */
376 err = dsp->ops->parse_fw(sst_fw);
377 if (err < 0) {
378 dev_err(dsp->dev, "error: parse fw failed %d\n", err);
379 goto parse_err;
380 }
381
382 if (dsp->fw_use_dma)
383 sst_dsp_dma_put_channel(dsp);
384
385 mutex_lock(&dsp->mutex);
386 list_add(&sst_fw->list, &dsp->fw_list);
387 mutex_unlock(&dsp->mutex);
388
389 return sst_fw;
390
391 parse_err:
392 if (dsp->fw_use_dma)
393 sst_dsp_dma_put_channel(dsp);
394 chan_err:
395 dma_free_coherent(dsp->dma_dev, sst_fw->size,
396 sst_fw->dma_buf,
397 sst_fw->dmable_fw_paddr);
398 sst_fw->dma_buf = NULL;
399 kfree(sst_fw);
400 return NULL;
401 }
402 EXPORT_SYMBOL_GPL(sst_fw_new);
403
sst_fw_reload(struct sst_fw * sst_fw)404 int sst_fw_reload(struct sst_fw *sst_fw)
405 {
406 struct sst_dsp *dsp = sst_fw->dsp;
407 int ret;
408
409 dev_dbg(dsp->dev, "reloading firmware\n");
410
411 /* call core specific FW paser to load FW data into DSP */
412 ret = dsp->ops->parse_fw(sst_fw);
413 if (ret < 0)
414 dev_err(dsp->dev, "error: parse fw failed %d\n", ret);
415
416 return ret;
417 }
418 EXPORT_SYMBOL_GPL(sst_fw_reload);
419
sst_fw_unload(struct sst_fw * sst_fw)420 void sst_fw_unload(struct sst_fw *sst_fw)
421 {
422 struct sst_dsp *dsp = sst_fw->dsp;
423 struct sst_module *module, *mtmp;
424 struct sst_module_runtime *runtime, *rtmp;
425
426 dev_dbg(dsp->dev, "unloading firmware\n");
427
428 mutex_lock(&dsp->mutex);
429
430 /* check module by module */
431 list_for_each_entry_safe(module, mtmp, &dsp->module_list, list) {
432 if (module->sst_fw == sst_fw) {
433
434 /* remove runtime modules */
435 list_for_each_entry_safe(runtime, rtmp, &module->runtime_list, list) {
436
437 block_list_remove(dsp, &runtime->block_list);
438 list_del(&runtime->list);
439 kfree(runtime);
440 }
441
442 /* now remove the module */
443 block_list_remove(dsp, &module->block_list);
444 list_del(&module->list);
445 kfree(module);
446 }
447 }
448
449 /* remove all scratch blocks */
450 block_list_remove(dsp, &dsp->scratch_block_list);
451
452 mutex_unlock(&dsp->mutex);
453 }
454 EXPORT_SYMBOL_GPL(sst_fw_unload);
455
456 /* free single firmware object */
sst_fw_free(struct sst_fw * sst_fw)457 void sst_fw_free(struct sst_fw *sst_fw)
458 {
459 struct sst_dsp *dsp = sst_fw->dsp;
460
461 mutex_lock(&dsp->mutex);
462 list_del(&sst_fw->list);
463 mutex_unlock(&dsp->mutex);
464
465 if (sst_fw->dma_buf)
466 dma_free_coherent(dsp->dma_dev, sst_fw->size, sst_fw->dma_buf,
467 sst_fw->dmable_fw_paddr);
468 kfree(sst_fw);
469 }
470 EXPORT_SYMBOL_GPL(sst_fw_free);
471
472 /* free all firmware objects */
sst_fw_free_all(struct sst_dsp * dsp)473 void sst_fw_free_all(struct sst_dsp *dsp)
474 {
475 struct sst_fw *sst_fw, *t;
476
477 mutex_lock(&dsp->mutex);
478 list_for_each_entry_safe(sst_fw, t, &dsp->fw_list, list) {
479
480 list_del(&sst_fw->list);
481 dma_free_coherent(dsp->dev, sst_fw->size, sst_fw->dma_buf,
482 sst_fw->dmable_fw_paddr);
483 kfree(sst_fw);
484 }
485 mutex_unlock(&dsp->mutex);
486 }
487 EXPORT_SYMBOL_GPL(sst_fw_free_all);
488
489 /* create a new SST generic module from FW template */
sst_module_new(struct sst_fw * sst_fw,struct sst_module_template * template,void * private)490 struct sst_module *sst_module_new(struct sst_fw *sst_fw,
491 struct sst_module_template *template, void *private)
492 {
493 struct sst_dsp *dsp = sst_fw->dsp;
494 struct sst_module *sst_module;
495
496 sst_module = kzalloc(sizeof(*sst_module), GFP_KERNEL);
497 if (sst_module == NULL)
498 return NULL;
499
500 sst_module->id = template->id;
501 sst_module->dsp = dsp;
502 sst_module->sst_fw = sst_fw;
503 sst_module->scratch_size = template->scratch_size;
504 sst_module->persistent_size = template->persistent_size;
505 sst_module->entry = template->entry;
506 sst_module->state = SST_MODULE_STATE_UNLOADED;
507
508 INIT_LIST_HEAD(&sst_module->block_list);
509 INIT_LIST_HEAD(&sst_module->runtime_list);
510
511 mutex_lock(&dsp->mutex);
512 list_add(&sst_module->list, &dsp->module_list);
513 mutex_unlock(&dsp->mutex);
514
515 return sst_module;
516 }
517 EXPORT_SYMBOL_GPL(sst_module_new);
518
519 /* free firmware module and remove from available list */
sst_module_free(struct sst_module * sst_module)520 void sst_module_free(struct sst_module *sst_module)
521 {
522 struct sst_dsp *dsp = sst_module->dsp;
523
524 mutex_lock(&dsp->mutex);
525 list_del(&sst_module->list);
526 mutex_unlock(&dsp->mutex);
527
528 kfree(sst_module);
529 }
530 EXPORT_SYMBOL_GPL(sst_module_free);
531
sst_module_runtime_new(struct sst_module * module,int id,void * private)532 struct sst_module_runtime *sst_module_runtime_new(struct sst_module *module,
533 int id, void *private)
534 {
535 struct sst_dsp *dsp = module->dsp;
536 struct sst_module_runtime *runtime;
537
538 runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
539 if (runtime == NULL)
540 return NULL;
541
542 runtime->id = id;
543 runtime->dsp = dsp;
544 runtime->module = module;
545 INIT_LIST_HEAD(&runtime->block_list);
546
547 mutex_lock(&dsp->mutex);
548 list_add(&runtime->list, &module->runtime_list);
549 mutex_unlock(&dsp->mutex);
550
551 return runtime;
552 }
553 EXPORT_SYMBOL_GPL(sst_module_runtime_new);
554
sst_module_runtime_free(struct sst_module_runtime * runtime)555 void sst_module_runtime_free(struct sst_module_runtime *runtime)
556 {
557 struct sst_dsp *dsp = runtime->dsp;
558
559 mutex_lock(&dsp->mutex);
560 list_del(&runtime->list);
561 mutex_unlock(&dsp->mutex);
562
563 kfree(runtime);
564 }
565 EXPORT_SYMBOL_GPL(sst_module_runtime_free);
566
find_block(struct sst_dsp * dsp,struct sst_block_allocator * ba)567 static struct sst_mem_block *find_block(struct sst_dsp *dsp,
568 struct sst_block_allocator *ba)
569 {
570 struct sst_mem_block *block;
571
572 list_for_each_entry(block, &dsp->free_block_list, list) {
573 if (block->type == ba->type && block->offset == ba->offset)
574 return block;
575 }
576
577 return NULL;
578 }
579
580 /* Block allocator must be on block boundary */
block_alloc_contiguous(struct sst_dsp * dsp,struct sst_block_allocator * ba,struct list_head * block_list)581 static int block_alloc_contiguous(struct sst_dsp *dsp,
582 struct sst_block_allocator *ba, struct list_head *block_list)
583 {
584 struct list_head tmp = LIST_HEAD_INIT(tmp);
585 struct sst_mem_block *block;
586 u32 block_start = SST_HSW_BLOCK_ANY;
587 int size = ba->size, offset = ba->offset;
588
589 while (ba->size > 0) {
590
591 block = find_block(dsp, ba);
592 if (!block) {
593 list_splice(&tmp, &dsp->free_block_list);
594
595 ba->size = size;
596 ba->offset = offset;
597 return -ENOMEM;
598 }
599
600 list_move_tail(&block->list, &tmp);
601 ba->offset += block->size;
602 ba->size -= block->size;
603 }
604 ba->size = size;
605 ba->offset = offset;
606
607 list_for_each_entry(block, &tmp, list) {
608
609 if (block->offset < block_start)
610 block_start = block->offset;
611
612 list_add(&block->module_list, block_list);
613
614 dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
615 block->type, block->index, block->offset);
616 }
617
618 list_splice(&tmp, &dsp->used_block_list);
619 return 0;
620 }
621
622 /* allocate first free DSP blocks for data - callers hold locks */
block_alloc(struct sst_dsp * dsp,struct sst_block_allocator * ba,struct list_head * block_list)623 static int block_alloc(struct sst_dsp *dsp, struct sst_block_allocator *ba,
624 struct list_head *block_list)
625 {
626 struct sst_mem_block *block, *tmp;
627 int ret = 0;
628
629 if (ba->size == 0)
630 return 0;
631
632 /* find first free whole blocks that can hold module */
633 list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
634
635 /* ignore blocks with wrong type */
636 if (block->type != ba->type)
637 continue;
638
639 if (ba->size > block->size)
640 continue;
641
642 ba->offset = block->offset;
643 block->bytes_used = ba->size % block->size;
644 list_add(&block->module_list, block_list);
645 list_move(&block->list, &dsp->used_block_list);
646 dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
647 block->type, block->index, block->offset);
648 return 0;
649 }
650
651 /* then find free multiple blocks that can hold module */
652 list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
653
654 /* ignore blocks with wrong type */
655 if (block->type != ba->type)
656 continue;
657
658 /* do we span > 1 blocks */
659 if (ba->size > block->size) {
660
661 /* align ba to block boundary */
662 ba->offset = block->offset;
663
664 ret = block_alloc_contiguous(dsp, ba, block_list);
665 if (ret == 0)
666 return ret;
667
668 }
669 }
670
671 /* not enough free block space */
672 return -ENOMEM;
673 }
674
sst_alloc_blocks(struct sst_dsp * dsp,struct sst_block_allocator * ba,struct list_head * block_list)675 int sst_alloc_blocks(struct sst_dsp *dsp, struct sst_block_allocator *ba,
676 struct list_head *block_list)
677 {
678 int ret;
679
680 dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n",
681 ba->size, ba->offset, ba->type);
682
683 mutex_lock(&dsp->mutex);
684
685 ret = block_alloc(dsp, ba, block_list);
686 if (ret < 0) {
687 dev_err(dsp->dev, "error: can't alloc blocks %d\n", ret);
688 goto out;
689 }
690
691 /* prepare DSP blocks for module usage */
692 ret = block_list_prepare(dsp, block_list);
693 if (ret < 0)
694 dev_err(dsp->dev, "error: prepare failed\n");
695
696 out:
697 mutex_unlock(&dsp->mutex);
698 return ret;
699 }
700 EXPORT_SYMBOL_GPL(sst_alloc_blocks);
701
sst_free_blocks(struct sst_dsp * dsp,struct list_head * block_list)702 int sst_free_blocks(struct sst_dsp *dsp, struct list_head *block_list)
703 {
704 mutex_lock(&dsp->mutex);
705 block_list_remove(dsp, block_list);
706 mutex_unlock(&dsp->mutex);
707 return 0;
708 }
709 EXPORT_SYMBOL_GPL(sst_free_blocks);
710
711 /* allocate memory blocks for static module addresses - callers hold locks */
block_alloc_fixed(struct sst_dsp * dsp,struct sst_block_allocator * ba,struct list_head * block_list)712 static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba,
713 struct list_head *block_list)
714 {
715 struct sst_mem_block *block, *tmp;
716 struct sst_block_allocator ba_tmp = *ba;
717 u32 end = ba->offset + ba->size, block_end;
718 int err;
719
720 /* only IRAM/DRAM blocks are managed */
721 if (ba->type != SST_MEM_IRAM && ba->type != SST_MEM_DRAM)
722 return 0;
723
724 /* are blocks already attached to this module */
725 list_for_each_entry_safe(block, tmp, block_list, module_list) {
726
727 /* ignore blocks with wrong type */
728 if (block->type != ba->type)
729 continue;
730
731 block_end = block->offset + block->size;
732
733 /* find block that holds section */
734 if (ba->offset >= block->offset && end <= block_end)
735 return 0;
736
737 /* does block span more than 1 section */
738 if (ba->offset >= block->offset && ba->offset < block_end) {
739
740 /* align ba to block boundary */
741 ba_tmp.size -= block_end - ba->offset;
742 ba_tmp.offset = block_end;
743 err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
744 if (err < 0)
745 return -ENOMEM;
746
747 /* module already owns blocks */
748 return 0;
749 }
750 }
751
752 /* find first free blocks that can hold section in free list */
753 list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
754 block_end = block->offset + block->size;
755
756 /* ignore blocks with wrong type */
757 if (block->type != ba->type)
758 continue;
759
760 /* find block that holds section */
761 if (ba->offset >= block->offset && end <= block_end) {
762
763 /* add block */
764 list_move(&block->list, &dsp->used_block_list);
765 list_add(&block->module_list, block_list);
766 dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
767 block->type, block->index, block->offset);
768 return 0;
769 }
770
771 /* does block span more than 1 section */
772 if (ba->offset >= block->offset && ba->offset < block_end) {
773
774 /* add block */
775 list_move(&block->list, &dsp->used_block_list);
776 list_add(&block->module_list, block_list);
777 /* align ba to block boundary */
778 ba_tmp.size -= block_end - ba->offset;
779 ba_tmp.offset = block_end;
780
781 err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
782 if (err < 0)
783 return -ENOMEM;
784
785 return 0;
786 }
787 }
788
789 return -ENOMEM;
790 }
791
792 /* Load fixed module data into DSP memory blocks */
sst_module_alloc_blocks(struct sst_module * module)793 int sst_module_alloc_blocks(struct sst_module *module)
794 {
795 struct sst_dsp *dsp = module->dsp;
796 struct sst_fw *sst_fw = module->sst_fw;
797 struct sst_block_allocator ba;
798 int ret;
799
800 memset(&ba, 0, sizeof(ba));
801 ba.size = module->size;
802 ba.type = module->type;
803 ba.offset = module->offset;
804
805 dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n",
806 ba.size, ba.offset, ba.type);
807
808 mutex_lock(&dsp->mutex);
809
810 /* alloc blocks that includes this section */
811 ret = block_alloc_fixed(dsp, &ba, &module->block_list);
812 if (ret < 0) {
813 dev_err(dsp->dev,
814 "error: no free blocks for section at offset 0x%x size 0x%x\n",
815 module->offset, module->size);
816 mutex_unlock(&dsp->mutex);
817 return -ENOMEM;
818 }
819
820 /* prepare DSP blocks for module copy */
821 ret = block_list_prepare(dsp, &module->block_list);
822 if (ret < 0) {
823 dev_err(dsp->dev, "error: fw module prepare failed\n");
824 goto err;
825 }
826
827 /* copy partial module data to blocks */
828 if (dsp->fw_use_dma) {
829 ret = sst_dsp_dma_copyto(dsp,
830 dsp->addr.lpe_base + module->offset,
831 sst_fw->dmable_fw_paddr + module->data_offset,
832 module->size);
833 if (ret < 0) {
834 dev_err(dsp->dev, "error: module copy failed\n");
835 goto err;
836 }
837 } else
838 sst_memcpy32(dsp->addr.lpe + module->offset, module->data,
839 module->size);
840
841 mutex_unlock(&dsp->mutex);
842 return ret;
843
844 err:
845 block_list_remove(dsp, &module->block_list);
846 mutex_unlock(&dsp->mutex);
847 return ret;
848 }
849 EXPORT_SYMBOL_GPL(sst_module_alloc_blocks);
850
851 /* Unload entire module from DSP memory */
sst_module_free_blocks(struct sst_module * module)852 int sst_module_free_blocks(struct sst_module *module)
853 {
854 struct sst_dsp *dsp = module->dsp;
855
856 mutex_lock(&dsp->mutex);
857 block_list_remove(dsp, &module->block_list);
858 mutex_unlock(&dsp->mutex);
859 return 0;
860 }
861 EXPORT_SYMBOL_GPL(sst_module_free_blocks);
862
sst_module_runtime_alloc_blocks(struct sst_module_runtime * runtime,int offset)863 int sst_module_runtime_alloc_blocks(struct sst_module_runtime *runtime,
864 int offset)
865 {
866 struct sst_dsp *dsp = runtime->dsp;
867 struct sst_module *module = runtime->module;
868 struct sst_block_allocator ba;
869 int ret;
870
871 if (module->persistent_size == 0)
872 return 0;
873
874 memset(&ba, 0, sizeof(ba));
875 ba.size = module->persistent_size;
876 ba.type = SST_MEM_DRAM;
877
878 mutex_lock(&dsp->mutex);
879
880 /* do we need to allocate at a fixed address ? */
881 if (offset != 0) {
882
883 ba.offset = offset;
884
885 dev_dbg(dsp->dev, "persistent fixed block request 0x%x bytes type %d offset 0x%x\n",
886 ba.size, ba.type, ba.offset);
887
888 /* alloc blocks that includes this section */
889 ret = block_alloc_fixed(dsp, &ba, &runtime->block_list);
890
891 } else {
892 dev_dbg(dsp->dev, "persistent block request 0x%x bytes type %d\n",
893 ba.size, ba.type);
894
895 /* alloc blocks that includes this section */
896 ret = block_alloc(dsp, &ba, &runtime->block_list);
897 }
898 if (ret < 0) {
899 dev_err(dsp->dev,
900 "error: no free blocks for runtime module size 0x%x\n",
901 module->persistent_size);
902 mutex_unlock(&dsp->mutex);
903 return -ENOMEM;
904 }
905 runtime->persistent_offset = ba.offset;
906
907 /* prepare DSP blocks for module copy */
908 ret = block_list_prepare(dsp, &runtime->block_list);
909 if (ret < 0) {
910 dev_err(dsp->dev, "error: runtime block prepare failed\n");
911 goto err;
912 }
913
914 mutex_unlock(&dsp->mutex);
915 return ret;
916
917 err:
918 block_list_remove(dsp, &module->block_list);
919 mutex_unlock(&dsp->mutex);
920 return ret;
921 }
922 EXPORT_SYMBOL_GPL(sst_module_runtime_alloc_blocks);
923
sst_module_runtime_free_blocks(struct sst_module_runtime * runtime)924 int sst_module_runtime_free_blocks(struct sst_module_runtime *runtime)
925 {
926 struct sst_dsp *dsp = runtime->dsp;
927
928 mutex_lock(&dsp->mutex);
929 block_list_remove(dsp, &runtime->block_list);
930 mutex_unlock(&dsp->mutex);
931 return 0;
932 }
933 EXPORT_SYMBOL_GPL(sst_module_runtime_free_blocks);
934
sst_module_runtime_save(struct sst_module_runtime * runtime,struct sst_module_runtime_context * context)935 int sst_module_runtime_save(struct sst_module_runtime *runtime,
936 struct sst_module_runtime_context *context)
937 {
938 struct sst_dsp *dsp = runtime->dsp;
939 struct sst_module *module = runtime->module;
940 int ret = 0;
941
942 dev_dbg(dsp->dev, "saving runtime %d memory at 0x%x size 0x%x\n",
943 runtime->id, runtime->persistent_offset,
944 module->persistent_size);
945
946 context->buffer = dma_alloc_coherent(dsp->dma_dev,
947 module->persistent_size,
948 &context->dma_buffer, GFP_DMA | GFP_KERNEL);
949 if (!context->buffer) {
950 dev_err(dsp->dev, "error: DMA context alloc failed\n");
951 return -ENOMEM;
952 }
953
954 mutex_lock(&dsp->mutex);
955
956 if (dsp->fw_use_dma) {
957
958 ret = sst_dsp_dma_get_channel(dsp, 0);
959 if (ret < 0)
960 goto err;
961
962 ret = sst_dsp_dma_copyfrom(dsp, context->dma_buffer,
963 dsp->addr.lpe_base + runtime->persistent_offset,
964 module->persistent_size);
965 sst_dsp_dma_put_channel(dsp);
966 if (ret < 0) {
967 dev_err(dsp->dev, "error: context copy failed\n");
968 goto err;
969 }
970 } else
971 sst_memcpy32(context->buffer, dsp->addr.lpe +
972 runtime->persistent_offset,
973 module->persistent_size);
974
975 err:
976 mutex_unlock(&dsp->mutex);
977 return ret;
978 }
979 EXPORT_SYMBOL_GPL(sst_module_runtime_save);
980
sst_module_runtime_restore(struct sst_module_runtime * runtime,struct sst_module_runtime_context * context)981 int sst_module_runtime_restore(struct sst_module_runtime *runtime,
982 struct sst_module_runtime_context *context)
983 {
984 struct sst_dsp *dsp = runtime->dsp;
985 struct sst_module *module = runtime->module;
986 int ret = 0;
987
988 dev_dbg(dsp->dev, "restoring runtime %d memory at 0x%x size 0x%x\n",
989 runtime->id, runtime->persistent_offset,
990 module->persistent_size);
991
992 mutex_lock(&dsp->mutex);
993
994 if (!context->buffer) {
995 dev_info(dsp->dev, "no context buffer need to restore!\n");
996 goto err;
997 }
998
999 if (dsp->fw_use_dma) {
1000
1001 ret = sst_dsp_dma_get_channel(dsp, 0);
1002 if (ret < 0)
1003 goto err;
1004
1005 ret = sst_dsp_dma_copyto(dsp,
1006 dsp->addr.lpe_base + runtime->persistent_offset,
1007 context->dma_buffer, module->persistent_size);
1008 sst_dsp_dma_put_channel(dsp);
1009 if (ret < 0) {
1010 dev_err(dsp->dev, "error: module copy failed\n");
1011 goto err;
1012 }
1013 } else
1014 sst_memcpy32(dsp->addr.lpe + runtime->persistent_offset,
1015 context->buffer, module->persistent_size);
1016
1017 dma_free_coherent(dsp->dma_dev, module->persistent_size,
1018 context->buffer, context->dma_buffer);
1019 context->buffer = NULL;
1020
1021 err:
1022 mutex_unlock(&dsp->mutex);
1023 return ret;
1024 }
1025 EXPORT_SYMBOL_GPL(sst_module_runtime_restore);
1026
1027 /* register a DSP memory block for use with FW based modules */
sst_mem_block_register(struct sst_dsp * dsp,u32 offset,u32 size,enum sst_mem_type type,const struct sst_block_ops * ops,u32 index,void * private)1028 struct sst_mem_block *sst_mem_block_register(struct sst_dsp *dsp, u32 offset,
1029 u32 size, enum sst_mem_type type, const struct sst_block_ops *ops,
1030 u32 index, void *private)
1031 {
1032 struct sst_mem_block *block;
1033
1034 block = kzalloc(sizeof(*block), GFP_KERNEL);
1035 if (block == NULL)
1036 return NULL;
1037
1038 block->offset = offset;
1039 block->size = size;
1040 block->index = index;
1041 block->type = type;
1042 block->dsp = dsp;
1043 block->private = private;
1044 block->ops = ops;
1045
1046 mutex_lock(&dsp->mutex);
1047 list_add(&block->list, &dsp->free_block_list);
1048 mutex_unlock(&dsp->mutex);
1049
1050 return block;
1051 }
1052 EXPORT_SYMBOL_GPL(sst_mem_block_register);
1053
1054 /* unregister all DSP memory blocks */
sst_mem_block_unregister_all(struct sst_dsp * dsp)1055 void sst_mem_block_unregister_all(struct sst_dsp *dsp)
1056 {
1057 struct sst_mem_block *block, *tmp;
1058
1059 mutex_lock(&dsp->mutex);
1060
1061 /* unregister used blocks */
1062 list_for_each_entry_safe(block, tmp, &dsp->used_block_list, list) {
1063 list_del(&block->list);
1064 kfree(block);
1065 }
1066
1067 /* unregister free blocks */
1068 list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
1069 list_del(&block->list);
1070 kfree(block);
1071 }
1072
1073 mutex_unlock(&dsp->mutex);
1074 }
1075 EXPORT_SYMBOL_GPL(sst_mem_block_unregister_all);
1076
1077 /* allocate scratch buffer blocks */
sst_block_alloc_scratch(struct sst_dsp * dsp)1078 int sst_block_alloc_scratch(struct sst_dsp *dsp)
1079 {
1080 struct sst_module *module;
1081 struct sst_block_allocator ba;
1082 int ret;
1083
1084 mutex_lock(&dsp->mutex);
1085
1086 /* calculate required scratch size */
1087 dsp->scratch_size = 0;
1088 list_for_each_entry(module, &dsp->module_list, list) {
1089 dev_dbg(dsp->dev, "module %d scratch req 0x%x bytes\n",
1090 module->id, module->scratch_size);
1091 if (dsp->scratch_size < module->scratch_size)
1092 dsp->scratch_size = module->scratch_size;
1093 }
1094
1095 dev_dbg(dsp->dev, "scratch buffer required is 0x%x bytes\n",
1096 dsp->scratch_size);
1097
1098 if (dsp->scratch_size == 0) {
1099 dev_info(dsp->dev, "no modules need scratch buffer\n");
1100 mutex_unlock(&dsp->mutex);
1101 return 0;
1102 }
1103
1104 /* allocate blocks for module scratch buffers */
1105 dev_dbg(dsp->dev, "allocating scratch blocks\n");
1106
1107 ba.size = dsp->scratch_size;
1108 ba.type = SST_MEM_DRAM;
1109
1110 /* do we need to allocate at fixed offset */
1111 if (dsp->scratch_offset != 0) {
1112
1113 dev_dbg(dsp->dev, "block request 0x%x bytes type %d at 0x%x\n",
1114 ba.size, ba.type, ba.offset);
1115
1116 ba.offset = dsp->scratch_offset;
1117
1118 /* alloc blocks that includes this section */
1119 ret = block_alloc_fixed(dsp, &ba, &dsp->scratch_block_list);
1120
1121 } else {
1122 dev_dbg(dsp->dev, "block request 0x%x bytes type %d\n",
1123 ba.size, ba.type);
1124
1125 ba.offset = 0;
1126 ret = block_alloc(dsp, &ba, &dsp->scratch_block_list);
1127 }
1128 if (ret < 0) {
1129 dev_err(dsp->dev, "error: can't alloc scratch blocks\n");
1130 mutex_unlock(&dsp->mutex);
1131 return ret;
1132 }
1133
1134 ret = block_list_prepare(dsp, &dsp->scratch_block_list);
1135 if (ret < 0) {
1136 dev_err(dsp->dev, "error: scratch block prepare failed\n");
1137 mutex_unlock(&dsp->mutex);
1138 return ret;
1139 }
1140
1141 /* assign the same offset of scratch to each module */
1142 dsp->scratch_offset = ba.offset;
1143 mutex_unlock(&dsp->mutex);
1144 return dsp->scratch_size;
1145 }
1146 EXPORT_SYMBOL_GPL(sst_block_alloc_scratch);
1147
1148 /* free all scratch blocks */
sst_block_free_scratch(struct sst_dsp * dsp)1149 void sst_block_free_scratch(struct sst_dsp *dsp)
1150 {
1151 mutex_lock(&dsp->mutex);
1152 block_list_remove(dsp, &dsp->scratch_block_list);
1153 mutex_unlock(&dsp->mutex);
1154 }
1155 EXPORT_SYMBOL_GPL(sst_block_free_scratch);
1156
1157 /* get a module from it's unique ID */
sst_module_get_from_id(struct sst_dsp * dsp,u32 id)1158 struct sst_module *sst_module_get_from_id(struct sst_dsp *dsp, u32 id)
1159 {
1160 struct sst_module *module;
1161
1162 mutex_lock(&dsp->mutex);
1163
1164 list_for_each_entry(module, &dsp->module_list, list) {
1165 if (module->id == id) {
1166 mutex_unlock(&dsp->mutex);
1167 return module;
1168 }
1169 }
1170
1171 mutex_unlock(&dsp->mutex);
1172 return NULL;
1173 }
1174 EXPORT_SYMBOL_GPL(sst_module_get_from_id);
1175
sst_module_runtime_get_from_id(struct sst_module * module,u32 id)1176 struct sst_module_runtime *sst_module_runtime_get_from_id(
1177 struct sst_module *module, u32 id)
1178 {
1179 struct sst_module_runtime *runtime;
1180 struct sst_dsp *dsp = module->dsp;
1181
1182 mutex_lock(&dsp->mutex);
1183
1184 list_for_each_entry(runtime, &module->runtime_list, list) {
1185 if (runtime->id == id) {
1186 mutex_unlock(&dsp->mutex);
1187 return runtime;
1188 }
1189 }
1190
1191 mutex_unlock(&dsp->mutex);
1192 return NULL;
1193 }
1194 EXPORT_SYMBOL_GPL(sst_module_runtime_get_from_id);
1195
1196 /* returns block address in DSP address space */
sst_dsp_get_offset(struct sst_dsp * dsp,u32 offset,enum sst_mem_type type)1197 u32 sst_dsp_get_offset(struct sst_dsp *dsp, u32 offset,
1198 enum sst_mem_type type)
1199 {
1200 switch (type) {
1201 case SST_MEM_IRAM:
1202 return offset - dsp->addr.iram_offset +
1203 dsp->addr.dsp_iram_offset;
1204 case SST_MEM_DRAM:
1205 return offset - dsp->addr.dram_offset +
1206 dsp->addr.dsp_dram_offset;
1207 default:
1208 return 0;
1209 }
1210 }
1211 EXPORT_SYMBOL_GPL(sst_dsp_get_offset);
1212
sst_dsp_new(struct device * dev,struct sst_dsp_device * sst_dev,struct sst_pdata * pdata)1213 struct sst_dsp *sst_dsp_new(struct device *dev,
1214 struct sst_dsp_device *sst_dev, struct sst_pdata *pdata)
1215 {
1216 struct sst_dsp *sst;
1217 int err;
1218
1219 dev_dbg(dev, "initialising audio DSP id 0x%x\n", pdata->id);
1220
1221 sst = devm_kzalloc(dev, sizeof(*sst), GFP_KERNEL);
1222 if (sst == NULL)
1223 return NULL;
1224
1225 spin_lock_init(&sst->spinlock);
1226 mutex_init(&sst->mutex);
1227 sst->dev = dev;
1228 sst->dma_dev = pdata->dma_dev;
1229 sst->thread_context = sst_dev->thread_context;
1230 sst->sst_dev = sst_dev;
1231 sst->id = pdata->id;
1232 sst->irq = pdata->irq;
1233 sst->ops = sst_dev->ops;
1234 sst->pdata = pdata;
1235 INIT_LIST_HEAD(&sst->used_block_list);
1236 INIT_LIST_HEAD(&sst->free_block_list);
1237 INIT_LIST_HEAD(&sst->module_list);
1238 INIT_LIST_HEAD(&sst->fw_list);
1239 INIT_LIST_HEAD(&sst->scratch_block_list);
1240
1241 /* Initialise SST Audio DSP */
1242 if (sst->ops->init) {
1243 err = sst->ops->init(sst, pdata);
1244 if (err < 0)
1245 return NULL;
1246 }
1247
1248 /* Register the ISR */
1249 err = request_threaded_irq(sst->irq, sst->ops->irq_handler,
1250 sst_dev->thread, IRQF_SHARED, "AudioDSP", sst);
1251 if (err)
1252 goto irq_err;
1253
1254 err = sst_dma_new(sst);
1255 if (err) {
1256 dev_err(dev, "sst_dma_new failed %d\n", err);
1257 goto dma_err;
1258 }
1259
1260 return sst;
1261
1262 dma_err:
1263 free_irq(sst->irq, sst);
1264 irq_err:
1265 if (sst->ops->free)
1266 sst->ops->free(sst);
1267
1268 return NULL;
1269 }
1270 EXPORT_SYMBOL_GPL(sst_dsp_new);
1271
sst_dsp_free(struct sst_dsp * sst)1272 void sst_dsp_free(struct sst_dsp *sst)
1273 {
1274 free_irq(sst->irq, sst);
1275 if (sst->ops->free)
1276 sst->ops->free(sst);
1277
1278 sst_dma_free(sst->dma);
1279 }
1280 EXPORT_SYMBOL_GPL(sst_dsp_free);
1281
1282 MODULE_DESCRIPTION("Intel SST Firmware Loader");
1283 MODULE_LICENSE("GPL v2");
1284