• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Intel SST Firmware Loader
3  *
4  * Copyright (C) 2013, Intel Corporation. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16 
17 #include <linux/kernel.h>
18 #include <linux/slab.h>
19 #include <linux/sched.h>
20 #include <linux/firmware.h>
21 #include <linux/export.h>
22 #include <linux/platform_device.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/dmaengine.h>
25 #include <linux/pci.h>
26 
27 #include <asm/page.h>
28 #include <asm/pgtable.h>
29 
30 #include "sst-dsp.h"
31 #include "sst-dsp-priv.h"
32 
33 static void block_module_remove(struct sst_module *module);
34 
sst_memcpy32(volatile void __iomem * dest,void * src,u32 bytes)35 static void sst_memcpy32(volatile void __iomem *dest, void *src, u32 bytes)
36 {
37 	u32 i;
38 
39 	/* copy one 32 bit word at a time as 64 bit access is not supported */
40 	for (i = 0; i < bytes; i += 4)
41 		memcpy_toio(dest + i, src + i, 4);
42 }
43 
44 /* create new generic firmware object */
sst_fw_new(struct sst_dsp * dsp,const struct firmware * fw,void * private)45 struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
46 	const struct firmware *fw, void *private)
47 {
48 	struct sst_fw *sst_fw;
49 	int err;
50 
51 	if (!dsp->ops->parse_fw)
52 		return NULL;
53 
54 	sst_fw = kzalloc(sizeof(*sst_fw), GFP_KERNEL);
55 	if (sst_fw == NULL)
56 		return NULL;
57 
58 	sst_fw->dsp = dsp;
59 	sst_fw->private = private;
60 	sst_fw->size = fw->size;
61 
62 	/* allocate DMA buffer to store FW data */
63 	sst_fw->dma_buf = dma_alloc_coherent(dsp->dma_dev, sst_fw->size,
64 				&sst_fw->dmable_fw_paddr, GFP_DMA | GFP_KERNEL);
65 	if (!sst_fw->dma_buf) {
66 		dev_err(dsp->dev, "error: DMA alloc failed\n");
67 		kfree(sst_fw);
68 		return NULL;
69 	}
70 
71 	/* copy FW data to DMA-able memory */
72 	memcpy((void *)sst_fw->dma_buf, (void *)fw->data, fw->size);
73 
74 	/* call core specific FW paser to load FW data into DSP */
75 	err = dsp->ops->parse_fw(sst_fw);
76 	if (err < 0) {
77 		dev_err(dsp->dev, "error: parse fw failed %d\n", err);
78 		goto parse_err;
79 	}
80 
81 	mutex_lock(&dsp->mutex);
82 	list_add(&sst_fw->list, &dsp->fw_list);
83 	mutex_unlock(&dsp->mutex);
84 
85 	return sst_fw;
86 
87 parse_err:
88 	dma_free_coherent(dsp->dev, sst_fw->size,
89 				sst_fw->dma_buf,
90 				sst_fw->dmable_fw_paddr);
91 	kfree(sst_fw);
92 	return NULL;
93 }
94 EXPORT_SYMBOL_GPL(sst_fw_new);
95 
sst_fw_reload(struct sst_fw * sst_fw)96 int sst_fw_reload(struct sst_fw *sst_fw)
97 {
98 	struct sst_dsp *dsp = sst_fw->dsp;
99 	int ret;
100 
101 	dev_dbg(dsp->dev, "reloading firmware\n");
102 
103 	/* call core specific FW paser to load FW data into DSP */
104 	ret = dsp->ops->parse_fw(sst_fw);
105 	if (ret < 0)
106 		dev_err(dsp->dev, "error: parse fw failed %d\n", ret);
107 
108 	return ret;
109 }
110 EXPORT_SYMBOL_GPL(sst_fw_reload);
111 
sst_fw_unload(struct sst_fw * sst_fw)112 void sst_fw_unload(struct sst_fw *sst_fw)
113 {
114         struct sst_dsp *dsp = sst_fw->dsp;
115         struct sst_module *module, *tmp;
116 
117         dev_dbg(dsp->dev, "unloading firmware\n");
118 
119         mutex_lock(&dsp->mutex);
120         list_for_each_entry_safe(module, tmp, &dsp->module_list, list) {
121                 if (module->sst_fw == sst_fw) {
122                         block_module_remove(module);
123                         list_del(&module->list);
124                         kfree(module);
125                 }
126         }
127 
128         mutex_unlock(&dsp->mutex);
129 }
130 EXPORT_SYMBOL_GPL(sst_fw_unload);
131 
132 /* free single firmware object */
sst_fw_free(struct sst_fw * sst_fw)133 void sst_fw_free(struct sst_fw *sst_fw)
134 {
135 	struct sst_dsp *dsp = sst_fw->dsp;
136 
137 	mutex_lock(&dsp->mutex);
138 	list_del(&sst_fw->list);
139 	mutex_unlock(&dsp->mutex);
140 
141 	dma_free_coherent(dsp->dma_dev, sst_fw->size, sst_fw->dma_buf,
142 			sst_fw->dmable_fw_paddr);
143 	kfree(sst_fw);
144 }
145 EXPORT_SYMBOL_GPL(sst_fw_free);
146 
147 /* free all firmware objects */
sst_fw_free_all(struct sst_dsp * dsp)148 void sst_fw_free_all(struct sst_dsp *dsp)
149 {
150 	struct sst_fw *sst_fw, *t;
151 
152 	mutex_lock(&dsp->mutex);
153 	list_for_each_entry_safe(sst_fw, t, &dsp->fw_list, list) {
154 
155 		list_del(&sst_fw->list);
156 		dma_free_coherent(dsp->dev, sst_fw->size, sst_fw->dma_buf,
157 			sst_fw->dmable_fw_paddr);
158 		kfree(sst_fw);
159 	}
160 	mutex_unlock(&dsp->mutex);
161 }
162 EXPORT_SYMBOL_GPL(sst_fw_free_all);
163 
164 /* create a new SST generic module from FW template */
sst_module_new(struct sst_fw * sst_fw,struct sst_module_template * template,void * private)165 struct sst_module *sst_module_new(struct sst_fw *sst_fw,
166 	struct sst_module_template *template, void *private)
167 {
168 	struct sst_dsp *dsp = sst_fw->dsp;
169 	struct sst_module *sst_module;
170 
171 	sst_module = kzalloc(sizeof(*sst_module), GFP_KERNEL);
172 	if (sst_module == NULL)
173 		return NULL;
174 
175 	sst_module->id = template->id;
176 	sst_module->dsp = dsp;
177 	sst_module->sst_fw = sst_fw;
178 
179 	memcpy(&sst_module->s, &template->s, sizeof(struct sst_module_data));
180 	memcpy(&sst_module->p, &template->p, sizeof(struct sst_module_data));
181 
182 	INIT_LIST_HEAD(&sst_module->block_list);
183 
184 	mutex_lock(&dsp->mutex);
185 	list_add(&sst_module->list, &dsp->module_list);
186 	mutex_unlock(&dsp->mutex);
187 
188 	return sst_module;
189 }
190 EXPORT_SYMBOL_GPL(sst_module_new);
191 
192 /* free firmware module and remove from available list */
sst_module_free(struct sst_module * sst_module)193 void sst_module_free(struct sst_module *sst_module)
194 {
195 	struct sst_dsp *dsp = sst_module->dsp;
196 
197 	mutex_lock(&dsp->mutex);
198 	list_del(&sst_module->list);
199 	mutex_unlock(&dsp->mutex);
200 
201 	kfree(sst_module);
202 }
203 EXPORT_SYMBOL_GPL(sst_module_free);
204 
find_block(struct sst_dsp * dsp,int type,u32 offset)205 static struct sst_mem_block *find_block(struct sst_dsp *dsp, int type,
206 	u32 offset)
207 {
208 	struct sst_mem_block *block;
209 
210 	list_for_each_entry(block, &dsp->free_block_list, list) {
211 		if (block->type == type && block->offset == offset)
212 			return block;
213 	}
214 
215 	return NULL;
216 }
217 
block_alloc_contiguous(struct sst_module * module,struct sst_module_data * data,u32 offset,int size)218 static int block_alloc_contiguous(struct sst_module *module,
219 	struct sst_module_data *data, u32 offset, int size)
220 {
221 	struct list_head tmp = LIST_HEAD_INIT(tmp);
222 	struct sst_dsp *dsp = module->dsp;
223 	struct sst_mem_block *block;
224 
225 	while (size > 0) {
226 		block = find_block(dsp, data->type, offset);
227 		if (!block) {
228 			list_splice(&tmp, &dsp->free_block_list);
229 			return -ENOMEM;
230 		}
231 
232 		list_move_tail(&block->list, &tmp);
233 		offset += block->size;
234 		size -= block->size;
235 	}
236 
237 	list_for_each_entry(block, &tmp, list)
238 		list_add(&block->module_list, &module->block_list);
239 
240 	list_splice(&tmp, &dsp->used_block_list);
241 	return 0;
242 }
243 
244 /* allocate free DSP blocks for module data - callers hold locks */
block_alloc(struct sst_module * module,struct sst_module_data * data)245 static int block_alloc(struct sst_module *module,
246 	struct sst_module_data *data)
247 {
248 	struct sst_dsp *dsp = module->dsp;
249 	struct sst_mem_block *block, *tmp;
250 	int ret = 0;
251 
252 	if (data->size == 0)
253 		return 0;
254 
255 	/* find first free whole blocks that can hold module */
256 	list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
257 
258 		/* ignore blocks with wrong type */
259 		if (block->type != data->type)
260 			continue;
261 
262 		if (data->size > block->size)
263 			continue;
264 
265 		data->offset = block->offset;
266 		block->data_type = data->data_type;
267 		block->bytes_used = data->size % block->size;
268 		list_add(&block->module_list, &module->block_list);
269 		list_move(&block->list, &dsp->used_block_list);
270 		dev_dbg(dsp->dev, " *module %d added block %d:%d\n",
271 			module->id, block->type, block->index);
272 		return 0;
273 	}
274 
275 	/* then find free multiple blocks that can hold module */
276 	list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
277 
278 		/* ignore blocks with wrong type */
279 		if (block->type != data->type)
280 			continue;
281 
282 		/* do we span > 1 blocks */
283 		if (data->size > block->size) {
284 			ret = block_alloc_contiguous(module, data,
285 				block->offset, data->size);
286 			if (ret == 0)
287 				return ret;
288 		}
289 	}
290 
291 	/* not enough free block space */
292 	return -ENOMEM;
293 }
294 
295 /* remove module from memory - callers hold locks */
block_module_remove(struct sst_module * module)296 static void block_module_remove(struct sst_module *module)
297 {
298 	struct sst_mem_block *block, *tmp;
299 	struct sst_dsp *dsp = module->dsp;
300 	int err;
301 
302 	/* disable each block  */
303 	list_for_each_entry(block, &module->block_list, module_list) {
304 
305 		if (block->ops && block->ops->disable) {
306 			err = block->ops->disable(block);
307 			if (err < 0)
308 				dev_err(dsp->dev,
309 					"error: cant disable block %d:%d\n",
310 					block->type, block->index);
311 		}
312 	}
313 
314 	/* mark each block as free */
315 	list_for_each_entry_safe(block, tmp, &module->block_list, module_list) {
316 		list_del(&block->module_list);
317 		list_move(&block->list, &dsp->free_block_list);
318 	}
319 }
320 
321 /* prepare the memory block to receive data from host - callers hold locks */
block_module_prepare(struct sst_module * module)322 static int block_module_prepare(struct sst_module *module)
323 {
324 	struct sst_mem_block *block;
325 	int ret = 0;
326 
327 	/* enable each block so that's it'e ready for module P/S data */
328 	list_for_each_entry(block, &module->block_list, module_list) {
329 
330 		if (block->ops && block->ops->enable) {
331 			ret = block->ops->enable(block);
332 			if (ret < 0) {
333 				dev_err(module->dsp->dev,
334 					"error: cant disable block %d:%d\n",
335 					block->type, block->index);
336 				goto err;
337 			}
338 		}
339 	}
340 	return ret;
341 
342 err:
343 	list_for_each_entry(block, &module->block_list, module_list) {
344 		if (block->ops && block->ops->disable)
345 			block->ops->disable(block);
346 	}
347 	return ret;
348 }
349 
350 /* allocate memory blocks for static module addresses - callers hold locks */
block_alloc_fixed(struct sst_module * module,struct sst_module_data * data)351 static int block_alloc_fixed(struct sst_module *module,
352 	struct sst_module_data *data)
353 {
354 	struct sst_dsp *dsp = module->dsp;
355 	struct sst_mem_block *block, *tmp;
356 	u32 end = data->offset + data->size, block_end;
357 	int err;
358 
359 	/* only IRAM/DRAM blocks are managed */
360 	if (data->type != SST_MEM_IRAM && data->type != SST_MEM_DRAM)
361 		return 0;
362 
363 	/* are blocks already attached to this module */
364 	list_for_each_entry_safe(block, tmp, &module->block_list, module_list) {
365 
366 		/* force compacting mem blocks of the same data_type */
367 		if (block->data_type != data->data_type)
368 			continue;
369 
370 		block_end = block->offset + block->size;
371 
372 		/* find block that holds section */
373 		if (data->offset >= block->offset && end < block_end)
374 			return 0;
375 
376 		/* does block span more than 1 section */
377 		if (data->offset >= block->offset && data->offset < block_end) {
378 
379 			err = block_alloc_contiguous(module, data,
380 				block->offset + block->size,
381 				data->size - block->size);
382 			if (err < 0)
383 				return -ENOMEM;
384 
385 			/* module already owns blocks */
386 			return 0;
387 		}
388 	}
389 
390 	/* find first free blocks that can hold section in free list */
391 	list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
392 		block_end = block->offset + block->size;
393 
394 		/* find block that holds section */
395 		if (data->offset >= block->offset && end < block_end) {
396 
397 			/* add block */
398 			block->data_type = data->data_type;
399 			list_move(&block->list, &dsp->used_block_list);
400 			list_add(&block->module_list, &module->block_list);
401 			return 0;
402 		}
403 
404 		/* does block span more than 1 section */
405 		if (data->offset >= block->offset && data->offset < block_end) {
406 
407 			err = block_alloc_contiguous(module, data,
408 				block->offset, data->size);
409 			if (err < 0)
410 				return -ENOMEM;
411 
412 			return 0;
413 		}
414 
415 	}
416 
417 	return -ENOMEM;
418 }
419 
420 /* Load fixed module data into DSP memory blocks */
sst_module_insert_fixed_block(struct sst_module * module,struct sst_module_data * data)421 int sst_module_insert_fixed_block(struct sst_module *module,
422 	struct sst_module_data *data)
423 {
424 	struct sst_dsp *dsp = module->dsp;
425 	int ret;
426 
427 	mutex_lock(&dsp->mutex);
428 
429 	/* alloc blocks that includes this section */
430 	ret = block_alloc_fixed(module, data);
431 	if (ret < 0) {
432 		dev_err(dsp->dev,
433 			"error: no free blocks for section at offset 0x%x size 0x%x\n",
434 			data->offset, data->size);
435 		mutex_unlock(&dsp->mutex);
436 		return -ENOMEM;
437 	}
438 
439 	/* prepare DSP blocks for module copy */
440 	ret = block_module_prepare(module);
441 	if (ret < 0) {
442 		dev_err(dsp->dev, "error: fw module prepare failed\n");
443 		goto err;
444 	}
445 
446 	/* copy partial module data to blocks */
447 	sst_memcpy32(dsp->addr.lpe + data->offset, data->data, data->size);
448 
449 	mutex_unlock(&dsp->mutex);
450 	return ret;
451 
452 err:
453 	block_module_remove(module);
454 	mutex_unlock(&dsp->mutex);
455 	return ret;
456 }
457 EXPORT_SYMBOL_GPL(sst_module_insert_fixed_block);
458 
459 /* Unload entire module from DSP memory */
sst_block_module_remove(struct sst_module * module)460 int sst_block_module_remove(struct sst_module *module)
461 {
462 	struct sst_dsp *dsp = module->dsp;
463 
464 	mutex_lock(&dsp->mutex);
465 	block_module_remove(module);
466 	mutex_unlock(&dsp->mutex);
467 	return 0;
468 }
469 EXPORT_SYMBOL_GPL(sst_block_module_remove);
470 
471 /* register a DSP memory block for use with FW based modules */
sst_mem_block_register(struct sst_dsp * dsp,u32 offset,u32 size,enum sst_mem_type type,struct sst_block_ops * ops,u32 index,void * private)472 struct sst_mem_block *sst_mem_block_register(struct sst_dsp *dsp, u32 offset,
473 	u32 size, enum sst_mem_type type, struct sst_block_ops *ops, u32 index,
474 	void *private)
475 {
476 	struct sst_mem_block *block;
477 
478 	block = kzalloc(sizeof(*block), GFP_KERNEL);
479 	if (block == NULL)
480 		return NULL;
481 
482 	block->offset = offset;
483 	block->size = size;
484 	block->index = index;
485 	block->type = type;
486 	block->dsp = dsp;
487 	block->private = private;
488 	block->ops = ops;
489 
490 	mutex_lock(&dsp->mutex);
491 	list_add(&block->list, &dsp->free_block_list);
492 	mutex_unlock(&dsp->mutex);
493 
494 	return block;
495 }
496 EXPORT_SYMBOL_GPL(sst_mem_block_register);
497 
498 /* unregister all DSP memory blocks */
sst_mem_block_unregister_all(struct sst_dsp * dsp)499 void sst_mem_block_unregister_all(struct sst_dsp *dsp)
500 {
501 	struct sst_mem_block *block, *tmp;
502 
503 	mutex_lock(&dsp->mutex);
504 
505 	/* unregister used blocks */
506 	list_for_each_entry_safe(block, tmp, &dsp->used_block_list, list) {
507 		list_del(&block->list);
508 		kfree(block);
509 	}
510 
511 	/* unregister free blocks */
512 	list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
513 		list_del(&block->list);
514 		kfree(block);
515 	}
516 
517 	mutex_unlock(&dsp->mutex);
518 }
519 EXPORT_SYMBOL_GPL(sst_mem_block_unregister_all);
520 
521 /* allocate scratch buffer blocks */
sst_mem_block_alloc_scratch(struct sst_dsp * dsp)522 struct sst_module *sst_mem_block_alloc_scratch(struct sst_dsp *dsp)
523 {
524 	struct sst_module *sst_module, *scratch;
525 	struct sst_mem_block *block, *tmp;
526 	u32 block_size;
527 	int ret = 0;
528 
529 	scratch = kzalloc(sizeof(struct sst_module), GFP_KERNEL);
530 	if (scratch == NULL)
531 		return NULL;
532 
533 	mutex_lock(&dsp->mutex);
534 
535 	/* calculate required scratch size */
536 	list_for_each_entry(sst_module, &dsp->module_list, list) {
537 		if (scratch->s.size < sst_module->s.size)
538 			scratch->s.size = sst_module->s.size;
539 	}
540 
541 	dev_dbg(dsp->dev, "scratch buffer required is %d bytes\n",
542 		scratch->s.size);
543 
544 	/* init scratch module */
545 	scratch->dsp = dsp;
546 	scratch->s.type = SST_MEM_DRAM;
547 	scratch->s.data_type = SST_DATA_S;
548 	INIT_LIST_HEAD(&scratch->block_list);
549 
550 	/* check free blocks before looking at used blocks for space */
551 	if (!list_empty(&dsp->free_block_list))
552 		block = list_first_entry(&dsp->free_block_list,
553 			struct sst_mem_block, list);
554 	else
555 		block = list_first_entry(&dsp->used_block_list,
556 			struct sst_mem_block, list);
557 	block_size = block->size;
558 
559 	/* allocate blocks for module scratch buffers */
560 	dev_dbg(dsp->dev, "allocating scratch blocks\n");
561 	ret = block_alloc(scratch, &scratch->s);
562 	if (ret < 0) {
563 		dev_err(dsp->dev, "error: can't alloc scratch blocks\n");
564 		goto err;
565 	}
566 
567 	/* assign the same offset of scratch to each module */
568 	list_for_each_entry(sst_module, &dsp->module_list, list)
569 		sst_module->s.offset = scratch->s.offset;
570 
571 	mutex_unlock(&dsp->mutex);
572 	return scratch;
573 
574 err:
575 	list_for_each_entry_safe(block, tmp, &scratch->block_list, module_list)
576 		list_del(&block->module_list);
577 	mutex_unlock(&dsp->mutex);
578 	return NULL;
579 }
580 EXPORT_SYMBOL_GPL(sst_mem_block_alloc_scratch);
581 
582 /* free all scratch blocks */
sst_mem_block_free_scratch(struct sst_dsp * dsp,struct sst_module * scratch)583 void sst_mem_block_free_scratch(struct sst_dsp *dsp,
584 	struct sst_module *scratch)
585 {
586 	struct sst_mem_block *block, *tmp;
587 
588 	mutex_lock(&dsp->mutex);
589 
590 	list_for_each_entry_safe(block, tmp, &scratch->block_list, module_list)
591 		list_del(&block->module_list);
592 
593 	mutex_unlock(&dsp->mutex);
594 }
595 EXPORT_SYMBOL_GPL(sst_mem_block_free_scratch);
596 
597 /* get a module from it's unique ID */
sst_module_get_from_id(struct sst_dsp * dsp,u32 id)598 struct sst_module *sst_module_get_from_id(struct sst_dsp *dsp, u32 id)
599 {
600 	struct sst_module *module;
601 
602 	mutex_lock(&dsp->mutex);
603 
604 	list_for_each_entry(module, &dsp->module_list, list) {
605 		if (module->id == id) {
606 			mutex_unlock(&dsp->mutex);
607 			return module;
608 		}
609 	}
610 
611 	mutex_unlock(&dsp->mutex);
612 	return NULL;
613 }
614 EXPORT_SYMBOL_GPL(sst_module_get_from_id);
615