• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Intel SST Firmware Loader
4  *
5  * Copyright (C) 2013, Intel Corporation. All rights reserved.
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/slab.h>
10 #include <linux/sched.h>
11 #include <linux/firmware.h>
12 #include <linux/export.h>
13 #include <linux/module.h>
14 #include <linux/platform_device.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/dmaengine.h>
17 #include <linux/pci.h>
18 #include <linux/acpi.h>
19 
20 /* supported DMA engine drivers */
21 #include <linux/dma/dw.h>
22 
23 #include <asm/page.h>
24 #include <asm/pgtable.h>
25 
26 #include "sst-dsp.h"
27 #include "sst-dsp-priv.h"
28 
29 #define SST_DMA_RESOURCES	2
30 #define SST_DSP_DMA_MAX_BURST	0x3
31 #define SST_HSW_BLOCK_ANY	0xffffffff
32 
33 #define SST_HSW_MASK_DMA_ADDR_DSP 0xfff00000
34 
35 struct sst_dma {
36 	struct sst_dsp *sst;
37 
38 	struct dw_dma_chip *chip;
39 
40 	struct dma_async_tx_descriptor *desc;
41 	struct dma_chan *ch;
42 };
43 
sst_memcpy32(volatile void __iomem * dest,void * src,u32 bytes)44 static inline void sst_memcpy32(volatile void __iomem *dest, void *src, u32 bytes)
45 {
46 	u32 tmp = 0;
47 	int i, m, n;
48 	const u8 *src_byte = src;
49 
50 	m = bytes / 4;
51 	n = bytes % 4;
52 
53 	/* __iowrite32_copy use 32bit size values so divide by 4 */
54 	__iowrite32_copy((void *)dest, src, m);
55 
56 	if (n) {
57 		for (i = 0; i < n; i++)
58 			tmp |= (u32)*(src_byte + m * 4 + i) << (i * 8);
59 		__iowrite32_copy((void *)(dest + m * 4), &tmp, 1);
60 	}
61 
62 }
63 
sst_dma_transfer_complete(void * arg)64 static void sst_dma_transfer_complete(void *arg)
65 {
66 	struct sst_dsp *sst = (struct sst_dsp *)arg;
67 
68 	dev_dbg(sst->dev, "DMA: callback\n");
69 }
70 
sst_dsp_dma_copy(struct sst_dsp * sst,dma_addr_t dest_addr,dma_addr_t src_addr,size_t size)71 static int sst_dsp_dma_copy(struct sst_dsp *sst, dma_addr_t dest_addr,
72 	dma_addr_t src_addr, size_t size)
73 {
74 	struct dma_async_tx_descriptor *desc;
75 	struct sst_dma *dma = sst->dma;
76 
77 	if (dma->ch == NULL) {
78 		dev_err(sst->dev, "error: no DMA channel\n");
79 		return -ENODEV;
80 	}
81 
82 	dev_dbg(sst->dev, "DMA: src: 0x%lx dest 0x%lx size %zu\n",
83 		(unsigned long)src_addr, (unsigned long)dest_addr, size);
84 
85 	desc = dma->ch->device->device_prep_dma_memcpy(dma->ch, dest_addr,
86 		src_addr, size, DMA_CTRL_ACK);
87 	if (!desc){
88 		dev_err(sst->dev, "error: dma prep memcpy failed\n");
89 		return -EINVAL;
90 	}
91 
92 	desc->callback = sst_dma_transfer_complete;
93 	desc->callback_param = sst;
94 
95 	desc->tx_submit(desc);
96 	dma_wait_for_async_tx(desc);
97 
98 	return 0;
99 }
100 
101 /* copy to DSP */
sst_dsp_dma_copyto(struct sst_dsp * sst,dma_addr_t dest_addr,dma_addr_t src_addr,size_t size)102 int sst_dsp_dma_copyto(struct sst_dsp *sst, dma_addr_t dest_addr,
103 	dma_addr_t src_addr, size_t size)
104 {
105 	return sst_dsp_dma_copy(sst, dest_addr | SST_HSW_MASK_DMA_ADDR_DSP,
106 			src_addr, size);
107 }
108 EXPORT_SYMBOL_GPL(sst_dsp_dma_copyto);
109 
110 /* copy from DSP */
sst_dsp_dma_copyfrom(struct sst_dsp * sst,dma_addr_t dest_addr,dma_addr_t src_addr,size_t size)111 int sst_dsp_dma_copyfrom(struct sst_dsp *sst, dma_addr_t dest_addr,
112 	dma_addr_t src_addr, size_t size)
113 {
114 	return sst_dsp_dma_copy(sst, dest_addr,
115 		src_addr | SST_HSW_MASK_DMA_ADDR_DSP, size);
116 }
117 EXPORT_SYMBOL_GPL(sst_dsp_dma_copyfrom);
118 
119 /* remove module from memory - callers hold locks */
block_list_remove(struct sst_dsp * dsp,struct list_head * block_list)120 static void block_list_remove(struct sst_dsp *dsp,
121 	struct list_head *block_list)
122 {
123 	struct sst_mem_block *block, *tmp;
124 	int err;
125 
126 	/* disable each block  */
127 	list_for_each_entry(block, block_list, module_list) {
128 
129 		if (block->ops && block->ops->disable) {
130 			err = block->ops->disable(block);
131 			if (err < 0)
132 				dev_err(dsp->dev,
133 					"error: cant disable block %d:%d\n",
134 					block->type, block->index);
135 		}
136 	}
137 
138 	/* mark each block as free */
139 	list_for_each_entry_safe(block, tmp, block_list, module_list) {
140 		list_del(&block->module_list);
141 		list_move(&block->list, &dsp->free_block_list);
142 		dev_dbg(dsp->dev, "block freed %d:%d at offset 0x%x\n",
143 			block->type, block->index, block->offset);
144 	}
145 }
146 
147 /* prepare the memory block to receive data from host - callers hold locks */
block_list_prepare(struct sst_dsp * dsp,struct list_head * block_list)148 static int block_list_prepare(struct sst_dsp *dsp,
149 	struct list_head *block_list)
150 {
151 	struct sst_mem_block *block;
152 	int ret = 0;
153 
154 	/* enable each block so that's it'e ready for data */
155 	list_for_each_entry(block, block_list, module_list) {
156 
157 		if (block->ops && block->ops->enable && !block->users) {
158 			ret = block->ops->enable(block);
159 			if (ret < 0) {
160 				dev_err(dsp->dev,
161 					"error: cant disable block %d:%d\n",
162 					block->type, block->index);
163 				goto err;
164 			}
165 		}
166 	}
167 	return ret;
168 
169 err:
170 	list_for_each_entry(block, block_list, module_list) {
171 		if (block->ops && block->ops->disable)
172 			block->ops->disable(block);
173 	}
174 	return ret;
175 }
176 
dw_probe(struct device * dev,struct resource * mem,int irq)177 static struct dw_dma_chip *dw_probe(struct device *dev, struct resource *mem,
178 	int irq)
179 {
180 	struct dw_dma_chip *chip;
181 	int err;
182 
183 	chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
184 	if (!chip)
185 		return ERR_PTR(-ENOMEM);
186 
187 	chip->irq = irq;
188 	chip->regs = devm_ioremap_resource(dev, mem);
189 	if (IS_ERR(chip->regs))
190 		return ERR_CAST(chip->regs);
191 
192 	err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(31));
193 	if (err)
194 		return ERR_PTR(err);
195 
196 	chip->dev = dev;
197 
198 	err = dw_dma_probe(chip);
199 	if (err)
200 		return ERR_PTR(err);
201 
202 	return chip;
203 }
204 
dw_remove(struct dw_dma_chip * chip)205 static void dw_remove(struct dw_dma_chip *chip)
206 {
207 	dw_dma_remove(chip);
208 }
209 
dma_chan_filter(struct dma_chan * chan,void * param)210 static bool dma_chan_filter(struct dma_chan *chan, void *param)
211 {
212 	struct sst_dsp *dsp = (struct sst_dsp *)param;
213 
214 	return chan->device->dev == dsp->dma_dev;
215 }
216 
sst_dsp_dma_get_channel(struct sst_dsp * dsp,int chan_id)217 int sst_dsp_dma_get_channel(struct sst_dsp *dsp, int chan_id)
218 {
219 	struct sst_dma *dma = dsp->dma;
220 	struct dma_slave_config slave;
221 	dma_cap_mask_t mask;
222 	int ret;
223 
224 	dma_cap_zero(mask);
225 	dma_cap_set(DMA_SLAVE, mask);
226 	dma_cap_set(DMA_MEMCPY, mask);
227 
228 	dma->ch = dma_request_channel(mask, dma_chan_filter, dsp);
229 	if (dma->ch == NULL) {
230 		dev_err(dsp->dev, "error: DMA request channel failed\n");
231 		return -EIO;
232 	}
233 
234 	memset(&slave, 0, sizeof(slave));
235 	slave.direction = DMA_MEM_TO_DEV;
236 	slave.src_addr_width =
237 		slave.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
238 	slave.src_maxburst = slave.dst_maxburst = SST_DSP_DMA_MAX_BURST;
239 
240 	ret = dmaengine_slave_config(dma->ch, &slave);
241 	if (ret) {
242 		dev_err(dsp->dev, "error: unable to set DMA slave config %d\n",
243 			ret);
244 		dma_release_channel(dma->ch);
245 		dma->ch = NULL;
246 	}
247 
248 	return ret;
249 }
250 EXPORT_SYMBOL_GPL(sst_dsp_dma_get_channel);
251 
sst_dsp_dma_put_channel(struct sst_dsp * dsp)252 void sst_dsp_dma_put_channel(struct sst_dsp *dsp)
253 {
254 	struct sst_dma *dma = dsp->dma;
255 
256 	if (!dma->ch)
257 		return;
258 
259 	dma_release_channel(dma->ch);
260 	dma->ch = NULL;
261 }
262 EXPORT_SYMBOL_GPL(sst_dsp_dma_put_channel);
263 
sst_dma_new(struct sst_dsp * sst)264 static int sst_dma_new(struct sst_dsp *sst)
265 {
266 	struct sst_pdata *sst_pdata = sst->pdata;
267 	struct sst_dma *dma;
268 	struct resource mem;
269 	int ret = 0;
270 
271 	if (sst->pdata->resindex_dma_base == -1)
272 		/* DMA is not used, return and squelsh error messages */
273 		return 0;
274 
275 	/* configure the correct platform data for whatever DMA engine
276 	* is attached to the ADSP IP. */
277 	switch (sst->pdata->dma_engine) {
278 	case SST_DMA_TYPE_DW:
279 		break;
280 	default:
281 		dev_err(sst->dev, "error: invalid DMA engine %d\n",
282 			sst->pdata->dma_engine);
283 		return -EINVAL;
284 	}
285 
286 	dma = devm_kzalloc(sst->dev, sizeof(struct sst_dma), GFP_KERNEL);
287 	if (!dma)
288 		return -ENOMEM;
289 
290 	dma->sst = sst;
291 
292 	memset(&mem, 0, sizeof(mem));
293 
294 	mem.start = sst->addr.lpe_base + sst_pdata->dma_base;
295 	mem.end   = sst->addr.lpe_base + sst_pdata->dma_base + sst_pdata->dma_size - 1;
296 	mem.flags = IORESOURCE_MEM;
297 
298 	/* now register DMA engine device */
299 	dma->chip = dw_probe(sst->dma_dev, &mem, sst_pdata->irq);
300 	if (IS_ERR(dma->chip)) {
301 		dev_err(sst->dev, "error: DMA device register failed\n");
302 		ret = PTR_ERR(dma->chip);
303 		goto err_dma_dev;
304 	}
305 
306 	sst->dma = dma;
307 	sst->fw_use_dma = true;
308 	return 0;
309 
310 err_dma_dev:
311 	devm_kfree(sst->dev, dma);
312 	return ret;
313 }
314 
sst_dma_free(struct sst_dma * dma)315 static void sst_dma_free(struct sst_dma *dma)
316 {
317 
318 	if (dma == NULL)
319 		return;
320 
321 	if (dma->ch)
322 		dma_release_channel(dma->ch);
323 
324 	if (dma->chip)
325 		dw_remove(dma->chip);
326 
327 }
328 
329 /* create new generic firmware object */
sst_fw_new(struct sst_dsp * dsp,const struct firmware * fw,void * private)330 struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
331 	const struct firmware *fw, void *private)
332 {
333 	struct sst_fw *sst_fw;
334 	int err;
335 
336 	if (!dsp->ops->parse_fw)
337 		return NULL;
338 
339 	sst_fw = kzalloc(sizeof(*sst_fw), GFP_KERNEL);
340 	if (sst_fw == NULL)
341 		return NULL;
342 
343 	sst_fw->dsp = dsp;
344 	sst_fw->private = private;
345 	sst_fw->size = fw->size;
346 
347 	/* allocate DMA buffer to store FW data */
348 	sst_fw->dma_buf = dma_alloc_coherent(dsp->dma_dev, sst_fw->size,
349 				&sst_fw->dmable_fw_paddr, GFP_KERNEL);
350 	if (!sst_fw->dma_buf) {
351 		dev_err(dsp->dev, "error: DMA alloc failed\n");
352 		kfree(sst_fw);
353 		return NULL;
354 	}
355 
356 	/* copy FW data to DMA-able memory */
357 	memcpy((void *)sst_fw->dma_buf, (void *)fw->data, fw->size);
358 
359 	if (dsp->fw_use_dma) {
360 		err = sst_dsp_dma_get_channel(dsp, 0);
361 		if (err < 0)
362 			goto chan_err;
363 	}
364 
365 	/* call core specific FW paser to load FW data into DSP */
366 	err = dsp->ops->parse_fw(sst_fw);
367 	if (err < 0) {
368 		dev_err(dsp->dev, "error: parse fw failed %d\n", err);
369 		goto parse_err;
370 	}
371 
372 	if (dsp->fw_use_dma)
373 		sst_dsp_dma_put_channel(dsp);
374 
375 	mutex_lock(&dsp->mutex);
376 	list_add(&sst_fw->list, &dsp->fw_list);
377 	mutex_unlock(&dsp->mutex);
378 
379 	return sst_fw;
380 
381 parse_err:
382 	if (dsp->fw_use_dma)
383 		sst_dsp_dma_put_channel(dsp);
384 chan_err:
385 	dma_free_coherent(dsp->dma_dev, sst_fw->size,
386 				sst_fw->dma_buf,
387 				sst_fw->dmable_fw_paddr);
388 	sst_fw->dma_buf = NULL;
389 	kfree(sst_fw);
390 	return NULL;
391 }
392 EXPORT_SYMBOL_GPL(sst_fw_new);
393 
sst_fw_reload(struct sst_fw * sst_fw)394 int sst_fw_reload(struct sst_fw *sst_fw)
395 {
396 	struct sst_dsp *dsp = sst_fw->dsp;
397 	int ret;
398 
399 	dev_dbg(dsp->dev, "reloading firmware\n");
400 
401 	/* call core specific FW paser to load FW data into DSP */
402 	ret = dsp->ops->parse_fw(sst_fw);
403 	if (ret < 0)
404 		dev_err(dsp->dev, "error: parse fw failed %d\n", ret);
405 
406 	return ret;
407 }
408 EXPORT_SYMBOL_GPL(sst_fw_reload);
409 
sst_fw_unload(struct sst_fw * sst_fw)410 void sst_fw_unload(struct sst_fw *sst_fw)
411 {
412 	struct sst_dsp *dsp = sst_fw->dsp;
413 	struct sst_module *module, *mtmp;
414 	struct sst_module_runtime *runtime, *rtmp;
415 
416 	dev_dbg(dsp->dev, "unloading firmware\n");
417 
418 	mutex_lock(&dsp->mutex);
419 
420 	/* check module by module */
421 	list_for_each_entry_safe(module, mtmp, &dsp->module_list, list) {
422 		if (module->sst_fw == sst_fw) {
423 
424 			/* remove runtime modules */
425 			list_for_each_entry_safe(runtime, rtmp, &module->runtime_list, list) {
426 
427 				block_list_remove(dsp, &runtime->block_list);
428 				list_del(&runtime->list);
429 				kfree(runtime);
430 			}
431 
432 			/* now remove the module */
433 			block_list_remove(dsp, &module->block_list);
434 			list_del(&module->list);
435 			kfree(module);
436 		}
437 	}
438 
439 	/* remove all scratch blocks */
440 	block_list_remove(dsp, &dsp->scratch_block_list);
441 
442 	mutex_unlock(&dsp->mutex);
443 }
444 EXPORT_SYMBOL_GPL(sst_fw_unload);
445 
446 /* free single firmware object */
sst_fw_free(struct sst_fw * sst_fw)447 void sst_fw_free(struct sst_fw *sst_fw)
448 {
449 	struct sst_dsp *dsp = sst_fw->dsp;
450 
451 	mutex_lock(&dsp->mutex);
452 	list_del(&sst_fw->list);
453 	mutex_unlock(&dsp->mutex);
454 
455 	if (sst_fw->dma_buf)
456 		dma_free_coherent(dsp->dma_dev, sst_fw->size, sst_fw->dma_buf,
457 			sst_fw->dmable_fw_paddr);
458 	kfree(sst_fw);
459 }
460 EXPORT_SYMBOL_GPL(sst_fw_free);
461 
462 /* free all firmware objects */
sst_fw_free_all(struct sst_dsp * dsp)463 void sst_fw_free_all(struct sst_dsp *dsp)
464 {
465 	struct sst_fw *sst_fw, *t;
466 
467 	mutex_lock(&dsp->mutex);
468 	list_for_each_entry_safe(sst_fw, t, &dsp->fw_list, list) {
469 
470 		list_del(&sst_fw->list);
471 		dma_free_coherent(dsp->dev, sst_fw->size, sst_fw->dma_buf,
472 			sst_fw->dmable_fw_paddr);
473 		kfree(sst_fw);
474 	}
475 	mutex_unlock(&dsp->mutex);
476 }
477 EXPORT_SYMBOL_GPL(sst_fw_free_all);
478 
479 /* create a new SST generic module from FW template */
sst_module_new(struct sst_fw * sst_fw,struct sst_module_template * template,void * private)480 struct sst_module *sst_module_new(struct sst_fw *sst_fw,
481 	struct sst_module_template *template, void *private)
482 {
483 	struct sst_dsp *dsp = sst_fw->dsp;
484 	struct sst_module *sst_module;
485 
486 	sst_module = kzalloc(sizeof(*sst_module), GFP_KERNEL);
487 	if (sst_module == NULL)
488 		return NULL;
489 
490 	sst_module->id = template->id;
491 	sst_module->dsp = dsp;
492 	sst_module->sst_fw = sst_fw;
493 	sst_module->scratch_size = template->scratch_size;
494 	sst_module->persistent_size = template->persistent_size;
495 	sst_module->entry = template->entry;
496 	sst_module->state = SST_MODULE_STATE_UNLOADED;
497 
498 	INIT_LIST_HEAD(&sst_module->block_list);
499 	INIT_LIST_HEAD(&sst_module->runtime_list);
500 
501 	mutex_lock(&dsp->mutex);
502 	list_add(&sst_module->list, &dsp->module_list);
503 	mutex_unlock(&dsp->mutex);
504 
505 	return sst_module;
506 }
507 EXPORT_SYMBOL_GPL(sst_module_new);
508 
509 /* free firmware module and remove from available list */
sst_module_free(struct sst_module * sst_module)510 void sst_module_free(struct sst_module *sst_module)
511 {
512 	struct sst_dsp *dsp = sst_module->dsp;
513 
514 	mutex_lock(&dsp->mutex);
515 	list_del(&sst_module->list);
516 	mutex_unlock(&dsp->mutex);
517 
518 	kfree(sst_module);
519 }
520 EXPORT_SYMBOL_GPL(sst_module_free);
521 
sst_module_runtime_new(struct sst_module * module,int id,void * private)522 struct sst_module_runtime *sst_module_runtime_new(struct sst_module *module,
523 	int id, void *private)
524 {
525 	struct sst_dsp *dsp = module->dsp;
526 	struct sst_module_runtime *runtime;
527 
528 	runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
529 	if (runtime == NULL)
530 		return NULL;
531 
532 	runtime->id = id;
533 	runtime->dsp = dsp;
534 	runtime->module = module;
535 	INIT_LIST_HEAD(&runtime->block_list);
536 
537 	mutex_lock(&dsp->mutex);
538 	list_add(&runtime->list, &module->runtime_list);
539 	mutex_unlock(&dsp->mutex);
540 
541 	return runtime;
542 }
543 EXPORT_SYMBOL_GPL(sst_module_runtime_new);
544 
sst_module_runtime_free(struct sst_module_runtime * runtime)545 void sst_module_runtime_free(struct sst_module_runtime *runtime)
546 {
547 	struct sst_dsp *dsp = runtime->dsp;
548 
549 	mutex_lock(&dsp->mutex);
550 	list_del(&runtime->list);
551 	mutex_unlock(&dsp->mutex);
552 
553 	kfree(runtime);
554 }
555 EXPORT_SYMBOL_GPL(sst_module_runtime_free);
556 
find_block(struct sst_dsp * dsp,struct sst_block_allocator * ba)557 static struct sst_mem_block *find_block(struct sst_dsp *dsp,
558 	struct sst_block_allocator *ba)
559 {
560 	struct sst_mem_block *block;
561 
562 	list_for_each_entry(block, &dsp->free_block_list, list) {
563 		if (block->type == ba->type && block->offset == ba->offset)
564 			return block;
565 	}
566 
567 	return NULL;
568 }
569 
570 /* Block allocator must be on block boundary */
block_alloc_contiguous(struct sst_dsp * dsp,struct sst_block_allocator * ba,struct list_head * block_list)571 static int block_alloc_contiguous(struct sst_dsp *dsp,
572 	struct sst_block_allocator *ba, struct list_head *block_list)
573 {
574 	struct list_head tmp = LIST_HEAD_INIT(tmp);
575 	struct sst_mem_block *block;
576 	u32 block_start = SST_HSW_BLOCK_ANY;
577 	int size = ba->size, offset = ba->offset;
578 
579 	while (ba->size > 0) {
580 
581 		block = find_block(dsp, ba);
582 		if (!block) {
583 			list_splice(&tmp, &dsp->free_block_list);
584 
585 			ba->size = size;
586 			ba->offset = offset;
587 			return -ENOMEM;
588 		}
589 
590 		list_move_tail(&block->list, &tmp);
591 		ba->offset += block->size;
592 		ba->size -= block->size;
593 	}
594 	ba->size = size;
595 	ba->offset = offset;
596 
597 	list_for_each_entry(block, &tmp, list) {
598 
599 		if (block->offset < block_start)
600 			block_start = block->offset;
601 
602 		list_add(&block->module_list, block_list);
603 
604 		dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
605 			block->type, block->index, block->offset);
606 	}
607 
608 	list_splice(&tmp, &dsp->used_block_list);
609 	return 0;
610 }
611 
612 /* allocate first free DSP blocks for data - callers hold locks */
block_alloc(struct sst_dsp * dsp,struct sst_block_allocator * ba,struct list_head * block_list)613 static int block_alloc(struct sst_dsp *dsp, struct sst_block_allocator *ba,
614 	struct list_head *block_list)
615 {
616 	struct sst_mem_block *block, *tmp;
617 	int ret = 0;
618 
619 	if (ba->size == 0)
620 		return 0;
621 
622 	/* find first free whole blocks that can hold module */
623 	list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
624 
625 		/* ignore blocks with wrong type */
626 		if (block->type != ba->type)
627 			continue;
628 
629 		if (ba->size > block->size)
630 			continue;
631 
632 		ba->offset = block->offset;
633 		block->bytes_used = ba->size % block->size;
634 		list_add(&block->module_list, block_list);
635 		list_move(&block->list, &dsp->used_block_list);
636 		dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
637 			block->type, block->index, block->offset);
638 		return 0;
639 	}
640 
641 	/* then find free multiple blocks that can hold module */
642 	list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
643 
644 		/* ignore blocks with wrong type */
645 		if (block->type != ba->type)
646 			continue;
647 
648 		/* do we span > 1 blocks */
649 		if (ba->size > block->size) {
650 
651 			/* align ba to block boundary */
652 			ba->offset = block->offset;
653 
654 			ret = block_alloc_contiguous(dsp, ba, block_list);
655 			if (ret == 0)
656 				return ret;
657 
658 		}
659 	}
660 
661 	/* not enough free block space */
662 	return -ENOMEM;
663 }
664 
sst_alloc_blocks(struct sst_dsp * dsp,struct sst_block_allocator * ba,struct list_head * block_list)665 int sst_alloc_blocks(struct sst_dsp *dsp, struct sst_block_allocator *ba,
666 	struct list_head *block_list)
667 {
668 	int ret;
669 
670 	dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n",
671 		ba->size, ba->offset, ba->type);
672 
673 	mutex_lock(&dsp->mutex);
674 
675 	ret = block_alloc(dsp, ba, block_list);
676 	if (ret < 0) {
677 		dev_err(dsp->dev, "error: can't alloc blocks %d\n", ret);
678 		goto out;
679 	}
680 
681 	/* prepare DSP blocks for module usage */
682 	ret = block_list_prepare(dsp, block_list);
683 	if (ret < 0)
684 		dev_err(dsp->dev, "error: prepare failed\n");
685 
686 out:
687 	mutex_unlock(&dsp->mutex);
688 	return ret;
689 }
690 EXPORT_SYMBOL_GPL(sst_alloc_blocks);
691 
sst_free_blocks(struct sst_dsp * dsp,struct list_head * block_list)692 int sst_free_blocks(struct sst_dsp *dsp, struct list_head *block_list)
693 {
694 	mutex_lock(&dsp->mutex);
695 	block_list_remove(dsp, block_list);
696 	mutex_unlock(&dsp->mutex);
697 	return 0;
698 }
699 EXPORT_SYMBOL_GPL(sst_free_blocks);
700 
701 /* allocate memory blocks for static module addresses - callers hold locks */
block_alloc_fixed(struct sst_dsp * dsp,struct sst_block_allocator * ba,struct list_head * block_list)702 static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba,
703 	struct list_head *block_list)
704 {
705 	struct sst_mem_block *block, *tmp;
706 	struct sst_block_allocator ba_tmp = *ba;
707 	u32 end = ba->offset + ba->size, block_end;
708 	int err;
709 
710 	/* only IRAM/DRAM blocks are managed */
711 	if (ba->type != SST_MEM_IRAM && ba->type != SST_MEM_DRAM)
712 		return 0;
713 
714 	/* are blocks already attached to this module */
715 	list_for_each_entry_safe(block, tmp, block_list, module_list) {
716 
717 		/* ignore blocks with wrong type */
718 		if (block->type != ba->type)
719 			continue;
720 
721 		block_end = block->offset + block->size;
722 
723 		/* find block that holds section */
724 		if (ba->offset >= block->offset && end <= block_end)
725 			return 0;
726 
727 		/* does block span more than 1 section */
728 		if (ba->offset >= block->offset && ba->offset < block_end) {
729 
730 			/* align ba to block boundary */
731 			ba_tmp.size -= block_end - ba->offset;
732 			ba_tmp.offset = block_end;
733 			err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
734 			if (err < 0)
735 				return -ENOMEM;
736 
737 			/* module already owns blocks */
738 			return 0;
739 		}
740 	}
741 
742 	/* find first free blocks that can hold section in free list */
743 	list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
744 		block_end = block->offset + block->size;
745 
746 		/* ignore blocks with wrong type */
747 		if (block->type != ba->type)
748 			continue;
749 
750 		/* find block that holds section */
751 		if (ba->offset >= block->offset && end <= block_end) {
752 
753 			/* add block */
754 			list_move(&block->list, &dsp->used_block_list);
755 			list_add(&block->module_list, block_list);
756 			dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
757 				block->type, block->index, block->offset);
758 			return 0;
759 		}
760 
761 		/* does block span more than 1 section */
762 		if (ba->offset >= block->offset && ba->offset < block_end) {
763 
764 			/* add block */
765 			list_move(&block->list, &dsp->used_block_list);
766 			list_add(&block->module_list, block_list);
767 			/* align ba to block boundary */
768 			ba_tmp.size -= block_end - ba->offset;
769 			ba_tmp.offset = block_end;
770 
771 			err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
772 			if (err < 0)
773 				return -ENOMEM;
774 
775 			return 0;
776 		}
777 	}
778 
779 	return -ENOMEM;
780 }
781 
782 /* Load fixed module data into DSP memory blocks */
sst_module_alloc_blocks(struct sst_module * module)783 int sst_module_alloc_blocks(struct sst_module *module)
784 {
785 	struct sst_dsp *dsp = module->dsp;
786 	struct sst_fw *sst_fw = module->sst_fw;
787 	struct sst_block_allocator ba;
788 	int ret;
789 
790 	memset(&ba, 0, sizeof(ba));
791 	ba.size = module->size;
792 	ba.type = module->type;
793 	ba.offset = module->offset;
794 
795 	dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n",
796 		ba.size, ba.offset, ba.type);
797 
798 	mutex_lock(&dsp->mutex);
799 
800 	/* alloc blocks that includes this section */
801 	ret = block_alloc_fixed(dsp, &ba, &module->block_list);
802 	if (ret < 0) {
803 		dev_err(dsp->dev,
804 			"error: no free blocks for section at offset 0x%x size 0x%x\n",
805 			module->offset, module->size);
806 		mutex_unlock(&dsp->mutex);
807 		return -ENOMEM;
808 	}
809 
810 	/* prepare DSP blocks for module copy */
811 	ret = block_list_prepare(dsp, &module->block_list);
812 	if (ret < 0) {
813 		dev_err(dsp->dev, "error: fw module prepare failed\n");
814 		goto err;
815 	}
816 
817 	/* copy partial module data to blocks */
818 	if (dsp->fw_use_dma) {
819 		ret = sst_dsp_dma_copyto(dsp,
820 			dsp->addr.lpe_base + module->offset,
821 			sst_fw->dmable_fw_paddr + module->data_offset,
822 			module->size);
823 		if (ret < 0) {
824 			dev_err(dsp->dev, "error: module copy failed\n");
825 			goto err;
826 		}
827 	} else
828 		sst_memcpy32(dsp->addr.lpe + module->offset, module->data,
829 			module->size);
830 
831 	mutex_unlock(&dsp->mutex);
832 	return ret;
833 
834 err:
835 	block_list_remove(dsp, &module->block_list);
836 	mutex_unlock(&dsp->mutex);
837 	return ret;
838 }
839 EXPORT_SYMBOL_GPL(sst_module_alloc_blocks);
840 
841 /* Unload entire module from DSP memory */
sst_module_free_blocks(struct sst_module * module)842 int sst_module_free_blocks(struct sst_module *module)
843 {
844 	struct sst_dsp *dsp = module->dsp;
845 
846 	mutex_lock(&dsp->mutex);
847 	block_list_remove(dsp, &module->block_list);
848 	mutex_unlock(&dsp->mutex);
849 	return 0;
850 }
851 EXPORT_SYMBOL_GPL(sst_module_free_blocks);
852 
sst_module_runtime_alloc_blocks(struct sst_module_runtime * runtime,int offset)853 int sst_module_runtime_alloc_blocks(struct sst_module_runtime *runtime,
854 	int offset)
855 {
856 	struct sst_dsp *dsp = runtime->dsp;
857 	struct sst_module *module = runtime->module;
858 	struct sst_block_allocator ba;
859 	int ret;
860 
861 	if (module->persistent_size == 0)
862 		return 0;
863 
864 	memset(&ba, 0, sizeof(ba));
865 	ba.size = module->persistent_size;
866 	ba.type = SST_MEM_DRAM;
867 
868 	mutex_lock(&dsp->mutex);
869 
870 	/* do we need to allocate at a fixed address ? */
871 	if (offset != 0) {
872 
873 		ba.offset = offset;
874 
875 		dev_dbg(dsp->dev, "persistent fixed block request 0x%x bytes type %d offset 0x%x\n",
876 			ba.size, ba.type, ba.offset);
877 
878 		/* alloc blocks that includes this section */
879 		ret = block_alloc_fixed(dsp, &ba, &runtime->block_list);
880 
881 	} else {
882 		dev_dbg(dsp->dev, "persistent block request 0x%x bytes type %d\n",
883 			ba.size, ba.type);
884 
885 		/* alloc blocks that includes this section */
886 		ret = block_alloc(dsp, &ba, &runtime->block_list);
887 	}
888 	if (ret < 0) {
889 		dev_err(dsp->dev,
890 		"error: no free blocks for runtime module size 0x%x\n",
891 			module->persistent_size);
892 		mutex_unlock(&dsp->mutex);
893 		return -ENOMEM;
894 	}
895 	runtime->persistent_offset = ba.offset;
896 
897 	/* prepare DSP blocks for module copy */
898 	ret = block_list_prepare(dsp, &runtime->block_list);
899 	if (ret < 0) {
900 		dev_err(dsp->dev, "error: runtime block prepare failed\n");
901 		goto err;
902 	}
903 
904 	mutex_unlock(&dsp->mutex);
905 	return ret;
906 
907 err:
908 	block_list_remove(dsp, &module->block_list);
909 	mutex_unlock(&dsp->mutex);
910 	return ret;
911 }
912 EXPORT_SYMBOL_GPL(sst_module_runtime_alloc_blocks);
913 
sst_module_runtime_free_blocks(struct sst_module_runtime * runtime)914 int sst_module_runtime_free_blocks(struct sst_module_runtime *runtime)
915 {
916 	struct sst_dsp *dsp = runtime->dsp;
917 
918 	mutex_lock(&dsp->mutex);
919 	block_list_remove(dsp, &runtime->block_list);
920 	mutex_unlock(&dsp->mutex);
921 	return 0;
922 }
923 EXPORT_SYMBOL_GPL(sst_module_runtime_free_blocks);
924 
sst_module_runtime_save(struct sst_module_runtime * runtime,struct sst_module_runtime_context * context)925 int sst_module_runtime_save(struct sst_module_runtime *runtime,
926 	struct sst_module_runtime_context *context)
927 {
928 	struct sst_dsp *dsp = runtime->dsp;
929 	struct sst_module *module = runtime->module;
930 	int ret = 0;
931 
932 	dev_dbg(dsp->dev, "saving runtime %d memory at 0x%x size 0x%x\n",
933 		runtime->id, runtime->persistent_offset,
934 		module->persistent_size);
935 
936 	context->buffer = dma_alloc_coherent(dsp->dma_dev,
937 		module->persistent_size,
938 		&context->dma_buffer, GFP_DMA | GFP_KERNEL);
939 	if (!context->buffer) {
940 		dev_err(dsp->dev, "error: DMA context alloc failed\n");
941 		return -ENOMEM;
942 	}
943 
944 	mutex_lock(&dsp->mutex);
945 
946 	if (dsp->fw_use_dma) {
947 
948 		ret = sst_dsp_dma_get_channel(dsp, 0);
949 		if (ret < 0)
950 			goto err;
951 
952 		ret = sst_dsp_dma_copyfrom(dsp, context->dma_buffer,
953 			dsp->addr.lpe_base + runtime->persistent_offset,
954 			module->persistent_size);
955 		sst_dsp_dma_put_channel(dsp);
956 		if (ret < 0) {
957 			dev_err(dsp->dev, "error: context copy failed\n");
958 			goto err;
959 		}
960 	} else
961 		sst_memcpy32(context->buffer, dsp->addr.lpe +
962 			runtime->persistent_offset,
963 			module->persistent_size);
964 
965 err:
966 	mutex_unlock(&dsp->mutex);
967 	return ret;
968 }
969 EXPORT_SYMBOL_GPL(sst_module_runtime_save);
970 
sst_module_runtime_restore(struct sst_module_runtime * runtime,struct sst_module_runtime_context * context)971 int sst_module_runtime_restore(struct sst_module_runtime *runtime,
972 	struct sst_module_runtime_context *context)
973 {
974 	struct sst_dsp *dsp = runtime->dsp;
975 	struct sst_module *module = runtime->module;
976 	int ret = 0;
977 
978 	dev_dbg(dsp->dev, "restoring runtime %d memory at 0x%x size 0x%x\n",
979 		runtime->id, runtime->persistent_offset,
980 		module->persistent_size);
981 
982 	mutex_lock(&dsp->mutex);
983 
984 	if (!context->buffer) {
985 		dev_info(dsp->dev, "no context buffer need to restore!\n");
986 		goto err;
987 	}
988 
989 	if (dsp->fw_use_dma) {
990 
991 		ret = sst_dsp_dma_get_channel(dsp, 0);
992 		if (ret < 0)
993 			goto err;
994 
995 		ret = sst_dsp_dma_copyto(dsp,
996 			dsp->addr.lpe_base + runtime->persistent_offset,
997 			context->dma_buffer, module->persistent_size);
998 		sst_dsp_dma_put_channel(dsp);
999 		if (ret < 0) {
1000 			dev_err(dsp->dev, "error: module copy failed\n");
1001 			goto err;
1002 		}
1003 	} else
1004 		sst_memcpy32(dsp->addr.lpe + runtime->persistent_offset,
1005 			context->buffer, module->persistent_size);
1006 
1007 	dma_free_coherent(dsp->dma_dev, module->persistent_size,
1008 				context->buffer, context->dma_buffer);
1009 	context->buffer = NULL;
1010 
1011 err:
1012 	mutex_unlock(&dsp->mutex);
1013 	return ret;
1014 }
1015 EXPORT_SYMBOL_GPL(sst_module_runtime_restore);
1016 
1017 /* register a DSP memory block for use with FW based modules */
sst_mem_block_register(struct sst_dsp * dsp,u32 offset,u32 size,enum sst_mem_type type,const struct sst_block_ops * ops,u32 index,void * private)1018 struct sst_mem_block *sst_mem_block_register(struct sst_dsp *dsp, u32 offset,
1019 	u32 size, enum sst_mem_type type, const struct sst_block_ops *ops,
1020 	u32 index, void *private)
1021 {
1022 	struct sst_mem_block *block;
1023 
1024 	block = kzalloc(sizeof(*block), GFP_KERNEL);
1025 	if (block == NULL)
1026 		return NULL;
1027 
1028 	block->offset = offset;
1029 	block->size = size;
1030 	block->index = index;
1031 	block->type = type;
1032 	block->dsp = dsp;
1033 	block->private = private;
1034 	block->ops = ops;
1035 
1036 	mutex_lock(&dsp->mutex);
1037 	list_add(&block->list, &dsp->free_block_list);
1038 	mutex_unlock(&dsp->mutex);
1039 
1040 	return block;
1041 }
1042 EXPORT_SYMBOL_GPL(sst_mem_block_register);
1043 
1044 /* unregister all DSP memory blocks */
sst_mem_block_unregister_all(struct sst_dsp * dsp)1045 void sst_mem_block_unregister_all(struct sst_dsp *dsp)
1046 {
1047 	struct sst_mem_block *block, *tmp;
1048 
1049 	mutex_lock(&dsp->mutex);
1050 
1051 	/* unregister used blocks */
1052 	list_for_each_entry_safe(block, tmp, &dsp->used_block_list, list) {
1053 		list_del(&block->list);
1054 		kfree(block);
1055 	}
1056 
1057 	/* unregister free blocks */
1058 	list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
1059 		list_del(&block->list);
1060 		kfree(block);
1061 	}
1062 
1063 	mutex_unlock(&dsp->mutex);
1064 }
1065 EXPORT_SYMBOL_GPL(sst_mem_block_unregister_all);
1066 
1067 /* allocate scratch buffer blocks */
sst_block_alloc_scratch(struct sst_dsp * dsp)1068 int sst_block_alloc_scratch(struct sst_dsp *dsp)
1069 {
1070 	struct sst_module *module;
1071 	struct sst_block_allocator ba;
1072 	int ret;
1073 
1074 	mutex_lock(&dsp->mutex);
1075 
1076 	/* calculate required scratch size */
1077 	dsp->scratch_size = 0;
1078 	list_for_each_entry(module, &dsp->module_list, list) {
1079 		dev_dbg(dsp->dev, "module %d scratch req 0x%x bytes\n",
1080 			module->id, module->scratch_size);
1081 		if (dsp->scratch_size < module->scratch_size)
1082 			dsp->scratch_size = module->scratch_size;
1083 	}
1084 
1085 	dev_dbg(dsp->dev, "scratch buffer required is 0x%x bytes\n",
1086 		dsp->scratch_size);
1087 
1088 	if (dsp->scratch_size == 0) {
1089 		dev_info(dsp->dev, "no modules need scratch buffer\n");
1090 		mutex_unlock(&dsp->mutex);
1091 		return 0;
1092 	}
1093 
1094 	/* allocate blocks for module scratch buffers */
1095 	dev_dbg(dsp->dev, "allocating scratch blocks\n");
1096 
1097 	ba.size = dsp->scratch_size;
1098 	ba.type = SST_MEM_DRAM;
1099 
1100 	/* do we need to allocate at fixed offset */
1101 	if (dsp->scratch_offset != 0) {
1102 
1103 		dev_dbg(dsp->dev, "block request 0x%x bytes type %d at 0x%x\n",
1104 			ba.size, ba.type, ba.offset);
1105 
1106 		ba.offset = dsp->scratch_offset;
1107 
1108 		/* alloc blocks that includes this section */
1109 		ret = block_alloc_fixed(dsp, &ba, &dsp->scratch_block_list);
1110 
1111 	} else {
1112 		dev_dbg(dsp->dev, "block request 0x%x bytes type %d\n",
1113 			ba.size, ba.type);
1114 
1115 		ba.offset = 0;
1116 		ret = block_alloc(dsp, &ba, &dsp->scratch_block_list);
1117 	}
1118 	if (ret < 0) {
1119 		dev_err(dsp->dev, "error: can't alloc scratch blocks\n");
1120 		mutex_unlock(&dsp->mutex);
1121 		return ret;
1122 	}
1123 
1124 	ret = block_list_prepare(dsp, &dsp->scratch_block_list);
1125 	if (ret < 0) {
1126 		dev_err(dsp->dev, "error: scratch block prepare failed\n");
1127 		mutex_unlock(&dsp->mutex);
1128 		return ret;
1129 	}
1130 
1131 	/* assign the same offset of scratch to each module */
1132 	dsp->scratch_offset = ba.offset;
1133 	mutex_unlock(&dsp->mutex);
1134 	return dsp->scratch_size;
1135 }
1136 EXPORT_SYMBOL_GPL(sst_block_alloc_scratch);
1137 
1138 /* free all scratch blocks */
sst_block_free_scratch(struct sst_dsp * dsp)1139 void sst_block_free_scratch(struct sst_dsp *dsp)
1140 {
1141 	mutex_lock(&dsp->mutex);
1142 	block_list_remove(dsp, &dsp->scratch_block_list);
1143 	mutex_unlock(&dsp->mutex);
1144 }
1145 EXPORT_SYMBOL_GPL(sst_block_free_scratch);
1146 
1147 /* get a module from it's unique ID */
sst_module_get_from_id(struct sst_dsp * dsp,u32 id)1148 struct sst_module *sst_module_get_from_id(struct sst_dsp *dsp, u32 id)
1149 {
1150 	struct sst_module *module;
1151 
1152 	mutex_lock(&dsp->mutex);
1153 
1154 	list_for_each_entry(module, &dsp->module_list, list) {
1155 		if (module->id == id) {
1156 			mutex_unlock(&dsp->mutex);
1157 			return module;
1158 		}
1159 	}
1160 
1161 	mutex_unlock(&dsp->mutex);
1162 	return NULL;
1163 }
1164 EXPORT_SYMBOL_GPL(sst_module_get_from_id);
1165 
sst_module_runtime_get_from_id(struct sst_module * module,u32 id)1166 struct sst_module_runtime *sst_module_runtime_get_from_id(
1167 	struct sst_module *module, u32 id)
1168 {
1169 	struct sst_module_runtime *runtime;
1170 	struct sst_dsp *dsp = module->dsp;
1171 
1172 	mutex_lock(&dsp->mutex);
1173 
1174 	list_for_each_entry(runtime, &module->runtime_list, list) {
1175 		if (runtime->id == id) {
1176 			mutex_unlock(&dsp->mutex);
1177 			return runtime;
1178 		}
1179 	}
1180 
1181 	mutex_unlock(&dsp->mutex);
1182 	return NULL;
1183 }
1184 EXPORT_SYMBOL_GPL(sst_module_runtime_get_from_id);
1185 
1186 /* returns block address in DSP address space */
sst_dsp_get_offset(struct sst_dsp * dsp,u32 offset,enum sst_mem_type type)1187 u32 sst_dsp_get_offset(struct sst_dsp *dsp, u32 offset,
1188 	enum sst_mem_type type)
1189 {
1190 	switch (type) {
1191 	case SST_MEM_IRAM:
1192 		return offset - dsp->addr.iram_offset +
1193 			dsp->addr.dsp_iram_offset;
1194 	case SST_MEM_DRAM:
1195 		return offset - dsp->addr.dram_offset +
1196 			dsp->addr.dsp_dram_offset;
1197 	default:
1198 		return 0;
1199 	}
1200 }
1201 EXPORT_SYMBOL_GPL(sst_dsp_get_offset);
1202 
sst_dsp_new(struct device * dev,struct sst_dsp_device * sst_dev,struct sst_pdata * pdata)1203 struct sst_dsp *sst_dsp_new(struct device *dev,
1204 	struct sst_dsp_device *sst_dev, struct sst_pdata *pdata)
1205 {
1206 	struct sst_dsp *sst;
1207 	int err;
1208 
1209 	dev_dbg(dev, "initialising audio DSP id 0x%x\n", pdata->id);
1210 
1211 	sst = devm_kzalloc(dev, sizeof(*sst), GFP_KERNEL);
1212 	if (sst == NULL)
1213 		return NULL;
1214 
1215 	spin_lock_init(&sst->spinlock);
1216 	mutex_init(&sst->mutex);
1217 	sst->dev = dev;
1218 	sst->dma_dev = pdata->dma_dev;
1219 	sst->thread_context = sst_dev->thread_context;
1220 	sst->sst_dev = sst_dev;
1221 	sst->id = pdata->id;
1222 	sst->irq = pdata->irq;
1223 	sst->ops = sst_dev->ops;
1224 	sst->pdata = pdata;
1225 	INIT_LIST_HEAD(&sst->used_block_list);
1226 	INIT_LIST_HEAD(&sst->free_block_list);
1227 	INIT_LIST_HEAD(&sst->module_list);
1228 	INIT_LIST_HEAD(&sst->fw_list);
1229 	INIT_LIST_HEAD(&sst->scratch_block_list);
1230 
1231 	/* Initialise SST Audio DSP */
1232 	if (sst->ops->init) {
1233 		err = sst->ops->init(sst, pdata);
1234 		if (err < 0)
1235 			return NULL;
1236 	}
1237 
1238 	/* Register the ISR */
1239 	err = request_threaded_irq(sst->irq, sst->ops->irq_handler,
1240 		sst_dev->thread, IRQF_SHARED, "AudioDSP", sst);
1241 	if (err)
1242 		goto irq_err;
1243 
1244 	err = sst_dma_new(sst);
1245 	if (err)  {
1246 		dev_err(dev, "sst_dma_new failed %d\n", err);
1247 		goto dma_err;
1248 	}
1249 
1250 	return sst;
1251 
1252 dma_err:
1253 	free_irq(sst->irq, sst);
1254 irq_err:
1255 	if (sst->ops->free)
1256 		sst->ops->free(sst);
1257 
1258 	return NULL;
1259 }
1260 EXPORT_SYMBOL_GPL(sst_dsp_new);
1261 
sst_dsp_free(struct sst_dsp * sst)1262 void sst_dsp_free(struct sst_dsp *sst)
1263 {
1264 	free_irq(sst->irq, sst);
1265 	if (sst->ops->free)
1266 		sst->ops->free(sst);
1267 
1268 	sst_dma_free(sst->dma);
1269 }
1270 EXPORT_SYMBOL_GPL(sst_dsp_free);
1271 
1272 MODULE_DESCRIPTION("Intel SST Firmware Loader");
1273 MODULE_LICENSE("GPL v2");
1274