• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 //
3 // This file is provided under a dual BSD/GPLv2 license.  When using or
4 // redistributing this file, you may do so under either license.
5 //
6 // Copyright(c) 2018 Intel Corporation. All rights reserved.
7 //
8 // Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
9 //	    Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
10 //	    Rander Wang <rander.wang@intel.com>
11 //          Keyon Jie <yang.jie@linux.intel.com>
12 //
13 
14 /*
15  * Hardware interface for HDA DSP code loader
16  */
17 
18 #include <linux/firmware.h>
19 #include <sound/hdaudio_ext.h>
20 #include <sound/hda_register.h>
21 #include <sound/sof.h>
22 #include "ext_manifest.h"
23 #include "../ops.h"
24 #include "hda.h"
25 
26 #define HDA_FW_BOOT_ATTEMPTS	3
27 #define HDA_CL_STREAM_FORMAT 0x40
28 
cl_stream_prepare(struct snd_sof_dev * sdev,unsigned int format,unsigned int size,struct snd_dma_buffer * dmab,int direction)29 static struct hdac_ext_stream *cl_stream_prepare(struct snd_sof_dev *sdev, unsigned int format,
30 						 unsigned int size, struct snd_dma_buffer *dmab,
31 						 int direction)
32 {
33 	struct hdac_ext_stream *dsp_stream;
34 	struct hdac_stream *hstream;
35 	struct pci_dev *pci = to_pci_dev(sdev->dev);
36 	int ret;
37 
38 	dsp_stream = hda_dsp_stream_get(sdev, direction, 0);
39 
40 	if (!dsp_stream) {
41 		dev_err(sdev->dev, "error: no stream available\n");
42 		return ERR_PTR(-ENODEV);
43 	}
44 	hstream = &dsp_stream->hstream;
45 	hstream->substream = NULL;
46 
47 	/* allocate DMA buffer */
48 	ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, &pci->dev, size, dmab);
49 	if (ret < 0) {
50 		dev_err(sdev->dev, "error: memory alloc failed: %d\n", ret);
51 		goto out_put;
52 	}
53 
54 	hstream->period_bytes = 0;/* initialize period_bytes */
55 	hstream->format_val = format;
56 	hstream->bufsize = size;
57 
58 	if (direction == SNDRV_PCM_STREAM_CAPTURE) {
59 		ret = hda_dsp_iccmax_stream_hw_params(sdev, dsp_stream, dmab, NULL);
60 		if (ret < 0) {
61 			dev_err(sdev->dev, "error: iccmax stream prepare failed: %d\n", ret);
62 			goto out_free;
63 		}
64 	} else {
65 		ret = hda_dsp_stream_hw_params(sdev, dsp_stream, dmab, NULL);
66 		if (ret < 0) {
67 			dev_err(sdev->dev, "error: hdac prepare failed: %d\n", ret);
68 			goto out_free;
69 		}
70 		hda_dsp_stream_spib_config(sdev, dsp_stream, HDA_DSP_SPIB_ENABLE, size);
71 	}
72 
73 	return dsp_stream;
74 
75 out_free:
76 	snd_dma_free_pages(dmab);
77 out_put:
78 	hda_dsp_stream_put(sdev, direction, hstream->stream_tag);
79 	return ERR_PTR(ret);
80 }
81 
82 /*
83  * first boot sequence has some extra steps.
84  * power on all host managed cores and only unstall/run the boot core to boot the
85  * DSP then turn off all non boot cores (if any) is powered on.
86  */
cl_dsp_init(struct snd_sof_dev * sdev,int stream_tag)87 static int cl_dsp_init(struct snd_sof_dev *sdev, int stream_tag)
88 {
89 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
90 	const struct sof_intel_dsp_desc *chip = hda->desc;
91 	unsigned int status;
92 	u32 flags;
93 	int ret;
94 	int i;
95 
96 	/* step 1: power up corex */
97 	ret = snd_sof_dsp_core_power_up(sdev, chip->host_managed_cores_mask);
98 	if (ret < 0) {
99 		if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
100 			dev_err(sdev->dev, "error: dsp core 0/1 power up failed\n");
101 		goto err;
102 	}
103 
104 	/* DSP is powered up, set all SSPs to slave mode */
105 	for (i = 0; i < chip->ssp_count; i++) {
106 		snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
107 						 chip->ssp_base_offset
108 						 + i * SSP_DEV_MEM_SIZE
109 						 + SSP_SSC1_OFFSET,
110 						 SSP_SET_SLAVE,
111 						 SSP_SET_SLAVE);
112 	}
113 
114 	/* step 2: purge FW request */
115 	snd_sof_dsp_write(sdev, HDA_DSP_BAR, chip->ipc_req,
116 			  chip->ipc_req_mask | (HDA_DSP_IPC_PURGE_FW |
117 			  ((stream_tag - 1) << 9)));
118 
119 	/* step 3: unset core 0 reset state & unstall/run core 0 */
120 	ret = hda_dsp_core_run(sdev, chip->init_core_mask);
121 	if (ret < 0) {
122 		if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
123 			dev_err(sdev->dev,
124 				"error: dsp core start failed %d\n", ret);
125 		ret = -EIO;
126 		goto err;
127 	}
128 
129 	/* step 4: wait for IPC DONE bit from ROM */
130 	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
131 					    chip->ipc_ack, status,
132 					    ((status & chip->ipc_ack_mask)
133 						    == chip->ipc_ack_mask),
134 					    HDA_DSP_REG_POLL_INTERVAL_US,
135 					    HDA_DSP_INIT_TIMEOUT_US);
136 
137 	if (ret < 0) {
138 		if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
139 			dev_err(sdev->dev,
140 				"error: %s: timeout for HIPCIE done\n",
141 				__func__);
142 		goto err;
143 	}
144 
145 	/* set DONE bit to clear the reply IPC message */
146 	snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
147 				       chip->ipc_ack,
148 				       chip->ipc_ack_mask,
149 				       chip->ipc_ack_mask);
150 
151 	/* step 5: power down cores that are no longer needed */
152 	ret = snd_sof_dsp_core_power_down(sdev, chip->host_managed_cores_mask &
153 					  ~(chip->init_core_mask));
154 	if (ret < 0) {
155 		if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
156 			dev_err(sdev->dev,
157 				"error: dsp core x power down failed\n");
158 		goto err;
159 	}
160 
161 	/* step 6: enable IPC interrupts */
162 	hda_dsp_ipc_int_enable(sdev);
163 
164 	/* step 7: wait for ROM init */
165 	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
166 					chip->rom_status_reg, status,
167 					((status & HDA_DSP_ROM_STS_MASK)
168 						== HDA_DSP_ROM_INIT),
169 					HDA_DSP_REG_POLL_INTERVAL_US,
170 					chip->rom_init_timeout *
171 					USEC_PER_MSEC);
172 	if (!ret)
173 		return 0;
174 
175 	if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
176 		dev_err(sdev->dev,
177 			"%s: timeout with rom_status_reg (%#x) read\n",
178 			__func__, chip->rom_status_reg);
179 
180 err:
181 	flags = SOF_DBG_DUMP_REGS | SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX;
182 
183 	/* force error log level after max boot attempts */
184 	if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
185 		flags |= SOF_DBG_DUMP_FORCE_ERR_LEVEL;
186 
187 	hda_dsp_dump(sdev, flags);
188 	snd_sof_dsp_core_power_down(sdev, chip->host_managed_cores_mask);
189 
190 	return ret;
191 }
192 
cl_trigger(struct snd_sof_dev * sdev,struct hdac_ext_stream * stream,int cmd)193 static int cl_trigger(struct snd_sof_dev *sdev,
194 		      struct hdac_ext_stream *stream, int cmd)
195 {
196 	struct hdac_stream *hstream = &stream->hstream;
197 	int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
198 
199 	/* code loader is special case that reuses stream ops */
200 	switch (cmd) {
201 	case SNDRV_PCM_TRIGGER_START:
202 		snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
203 					1 << hstream->index,
204 					1 << hstream->index);
205 
206 		snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
207 					sd_offset,
208 					SOF_HDA_SD_CTL_DMA_START |
209 					SOF_HDA_CL_DMA_SD_INT_MASK,
210 					SOF_HDA_SD_CTL_DMA_START |
211 					SOF_HDA_CL_DMA_SD_INT_MASK);
212 
213 		hstream->running = true;
214 		return 0;
215 	default:
216 		return hda_dsp_stream_trigger(sdev, stream, cmd);
217 	}
218 }
219 
cl_cleanup(struct snd_sof_dev * sdev,struct snd_dma_buffer * dmab,struct hdac_ext_stream * stream)220 static int cl_cleanup(struct snd_sof_dev *sdev, struct snd_dma_buffer *dmab,
221 		      struct hdac_ext_stream *stream)
222 {
223 	struct hdac_stream *hstream = &stream->hstream;
224 	int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
225 	int ret = 0;
226 
227 	if (hstream->direction == SNDRV_PCM_STREAM_PLAYBACK)
228 		ret = hda_dsp_stream_spib_config(sdev, stream, HDA_DSP_SPIB_DISABLE, 0);
229 	else
230 		snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset,
231 					SOF_HDA_SD_CTL_DMA_START, 0);
232 
233 	hda_dsp_stream_put(sdev, hstream->direction, hstream->stream_tag);
234 	hstream->running = 0;
235 	hstream->substream = NULL;
236 
237 	/* reset BDL address */
238 	snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
239 			  sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPL, 0);
240 	snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
241 			  sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPU, 0);
242 
243 	snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, sd_offset, 0);
244 	snd_dma_free_pages(dmab);
245 	dmab->area = NULL;
246 	hstream->bufsize = 0;
247 	hstream->format_val = 0;
248 
249 	return ret;
250 }
251 
cl_copy_fw(struct snd_sof_dev * sdev,struct hdac_ext_stream * stream)252 static int cl_copy_fw(struct snd_sof_dev *sdev, struct hdac_ext_stream *stream)
253 {
254 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
255 	const struct sof_intel_dsp_desc *chip = hda->desc;
256 	unsigned int reg;
257 	int ret, status;
258 
259 	ret = cl_trigger(sdev, stream, SNDRV_PCM_TRIGGER_START);
260 	if (ret < 0) {
261 		dev_err(sdev->dev, "error: DMA trigger start failed\n");
262 		return ret;
263 	}
264 
265 	status = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
266 					chip->rom_status_reg, reg,
267 					((reg & HDA_DSP_ROM_STS_MASK)
268 						== HDA_DSP_ROM_FW_ENTERED),
269 					HDA_DSP_REG_POLL_INTERVAL_US,
270 					HDA_DSP_BASEFW_TIMEOUT_US);
271 
272 	/*
273 	 * even in case of errors we still need to stop the DMAs,
274 	 * but we return the initial error should the DMA stop also fail
275 	 */
276 
277 	if (status < 0) {
278 		dev_err(sdev->dev,
279 			"%s: timeout with rom_status_reg (%#x) read\n",
280 			__func__, chip->rom_status_reg);
281 	}
282 
283 	ret = cl_trigger(sdev, stream, SNDRV_PCM_TRIGGER_STOP);
284 	if (ret < 0) {
285 		dev_err(sdev->dev, "error: DMA trigger stop failed\n");
286 		if (!status)
287 			status = ret;
288 	}
289 
290 	return status;
291 }
292 
hda_dsp_cl_boot_firmware_iccmax(struct snd_sof_dev * sdev)293 int hda_dsp_cl_boot_firmware_iccmax(struct snd_sof_dev *sdev)
294 {
295 	struct snd_sof_pdata *plat_data = sdev->pdata;
296 	struct hdac_ext_stream *iccmax_stream;
297 	struct hdac_bus *bus = sof_to_bus(sdev);
298 	struct firmware stripped_firmware;
299 	int ret, ret1;
300 	u8 original_gb;
301 
302 	/* save the original LTRP guardband value */
303 	original_gb = snd_hdac_chip_readb(bus, VS_LTRP) & HDA_VS_INTEL_LTRP_GB_MASK;
304 
305 	if (plat_data->fw->size <= plat_data->fw_offset) {
306 		dev_err(sdev->dev, "error: firmware size must be greater than firmware offset\n");
307 		return -EINVAL;
308 	}
309 
310 	stripped_firmware.size = plat_data->fw->size - plat_data->fw_offset;
311 
312 	/* prepare capture stream for ICCMAX */
313 	iccmax_stream = cl_stream_prepare(sdev, HDA_CL_STREAM_FORMAT, stripped_firmware.size,
314 					  &sdev->dmab_bdl, SNDRV_PCM_STREAM_CAPTURE);
315 	if (IS_ERR(iccmax_stream)) {
316 		dev_err(sdev->dev, "error: dma prepare for ICCMAX stream failed\n");
317 		return PTR_ERR(iccmax_stream);
318 	}
319 
320 	ret = hda_dsp_cl_boot_firmware(sdev);
321 
322 	/*
323 	 * Perform iccmax stream cleanup. This should be done even if firmware loading fails.
324 	 * If the cleanup also fails, we return the initial error
325 	 */
326 	ret1 = cl_cleanup(sdev, &sdev->dmab_bdl, iccmax_stream);
327 	if (ret1 < 0) {
328 		dev_err(sdev->dev, "error: ICCMAX stream cleanup failed\n");
329 
330 		/* set return value to indicate cleanup failure */
331 		if (!ret)
332 			ret = ret1;
333 	}
334 
335 	/* restore the original guardband value after FW boot */
336 	snd_hdac_chip_updateb(bus, VS_LTRP, HDA_VS_INTEL_LTRP_GB_MASK, original_gb);
337 
338 	return ret;
339 }
340 
hda_dsp_cl_boot_firmware(struct snd_sof_dev * sdev)341 int hda_dsp_cl_boot_firmware(struct snd_sof_dev *sdev)
342 {
343 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
344 	struct snd_sof_pdata *plat_data = sdev->pdata;
345 	const struct sof_dev_desc *desc = plat_data->desc;
346 	const struct sof_intel_dsp_desc *chip_info;
347 	struct hdac_ext_stream *stream;
348 	struct firmware stripped_firmware;
349 	int ret, ret1, i;
350 
351 	chip_info = desc->chip_info;
352 
353 	if (plat_data->fw->size <= plat_data->fw_offset) {
354 		dev_err(sdev->dev, "error: firmware size must be greater than firmware offset\n");
355 		return -EINVAL;
356 	}
357 
358 	stripped_firmware.data = plat_data->fw->data + plat_data->fw_offset;
359 	stripped_firmware.size = plat_data->fw->size - plat_data->fw_offset;
360 
361 	/* init for booting wait */
362 	init_waitqueue_head(&sdev->boot_wait);
363 
364 	/* prepare DMA for code loader stream */
365 	stream = cl_stream_prepare(sdev, HDA_CL_STREAM_FORMAT, stripped_firmware.size,
366 				   &sdev->dmab, SNDRV_PCM_STREAM_PLAYBACK);
367 	if (IS_ERR(stream)) {
368 		dev_err(sdev->dev, "error: dma prepare for fw loading failed\n");
369 		return PTR_ERR(stream);
370 	}
371 
372 	memcpy(sdev->dmab.area, stripped_firmware.data,
373 	       stripped_firmware.size);
374 
375 	/* try ROM init a few times before giving up */
376 	for (i = 0; i < HDA_FW_BOOT_ATTEMPTS; i++) {
377 		dev_dbg(sdev->dev,
378 			"Attempting iteration %d of Core En/ROM load...\n", i);
379 
380 		hda->boot_iteration = i + 1;
381 		ret = cl_dsp_init(sdev, stream->hstream.stream_tag);
382 
383 		/* don't retry anymore if successful */
384 		if (!ret)
385 			break;
386 	}
387 
388 	if (i == HDA_FW_BOOT_ATTEMPTS) {
389 		dev_err(sdev->dev, "error: dsp init failed after %d attempts with err: %d\n",
390 			i, ret);
391 		goto cleanup;
392 	}
393 
394 	/*
395 	 * When a SoundWire link is in clock stop state, a Slave
396 	 * device may trigger in-band wakes for events such as jack
397 	 * insertion or acoustic event detection. This event will lead
398 	 * to a WAKEEN interrupt, handled by the PCI device and routed
399 	 * to PME if the PCI device is in D3. The resume function in
400 	 * audio PCI driver will be invoked by ACPI for PME event and
401 	 * initialize the device and process WAKEEN interrupt.
402 	 *
403 	 * The WAKEEN interrupt should be processed ASAP to prevent an
404 	 * interrupt flood, otherwise other interrupts, such IPC,
405 	 * cannot work normally.  The WAKEEN is handled after the ROM
406 	 * is initialized successfully, which ensures power rails are
407 	 * enabled before accessing the SoundWire SHIM registers
408 	 */
409 	if (!sdev->first_boot)
410 		hda_sdw_process_wakeen(sdev);
411 
412 	/*
413 	 * at this point DSP ROM has been initialized and
414 	 * should be ready for code loading and firmware boot
415 	 */
416 	ret = cl_copy_fw(sdev, stream);
417 	if (!ret) {
418 		dev_dbg(sdev->dev, "Firmware download successful, booting...\n");
419 	} else {
420 		hda_dsp_dump(sdev, SOF_DBG_DUMP_REGS | SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX |
421 			     SOF_DBG_DUMP_FORCE_ERR_LEVEL);
422 		dev_err(sdev->dev, "error: load fw failed ret: %d\n", ret);
423 	}
424 
425 cleanup:
426 	/*
427 	 * Perform codeloader stream cleanup.
428 	 * This should be done even if firmware loading fails.
429 	 * If the cleanup also fails, we return the initial error
430 	 */
431 	ret1 = cl_cleanup(sdev, &sdev->dmab, stream);
432 	if (ret1 < 0) {
433 		dev_err(sdev->dev, "error: Code loader DSP cleanup failed\n");
434 
435 		/* set return value to indicate cleanup failure */
436 		if (!ret)
437 			ret = ret1;
438 	}
439 
440 	/*
441 	 * return primary core id if both fw copy
442 	 * and stream clean up are successful
443 	 */
444 	if (!ret)
445 		return chip_info->init_core_mask;
446 
447 	/* disable DSP */
448 	snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR,
449 				SOF_HDA_REG_PP_PPCTL,
450 				SOF_HDA_PPCTL_GPROCEN, 0);
451 	return ret;
452 }
453 
454 /* pre fw run operations */
hda_dsp_pre_fw_run(struct snd_sof_dev * sdev)455 int hda_dsp_pre_fw_run(struct snd_sof_dev *sdev)
456 {
457 	/* disable clock gating and power gating */
458 	return hda_dsp_ctrl_clock_power_gating(sdev, false);
459 }
460 
461 /* post fw run operations */
hda_dsp_post_fw_run(struct snd_sof_dev * sdev)462 int hda_dsp_post_fw_run(struct snd_sof_dev *sdev)
463 {
464 	int ret;
465 
466 	if (sdev->first_boot) {
467 		ret = hda_sdw_startup(sdev);
468 		if (ret < 0) {
469 			dev_err(sdev->dev,
470 				"error: could not startup SoundWire links\n");
471 			return ret;
472 		}
473 	}
474 
475 	hda_sdw_int_enable(sdev, true);
476 
477 	/* re-enable clock gating and power gating */
478 	return hda_dsp_ctrl_clock_power_gating(sdev, true);
479 }
480 
481 /*
482  * post fw run operations for ICL,
483  * Core 3 will be powered up and in stall when HPRO is enabled
484  */
hda_dsp_post_fw_run_icl(struct snd_sof_dev * sdev)485 int hda_dsp_post_fw_run_icl(struct snd_sof_dev *sdev)
486 {
487 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
488 	int ret;
489 
490 	if (sdev->first_boot) {
491 		ret = hda_sdw_startup(sdev);
492 		if (ret < 0) {
493 			dev_err(sdev->dev,
494 				"error: could not startup SoundWire links\n");
495 			return ret;
496 		}
497 	}
498 
499 	hda_sdw_int_enable(sdev, true);
500 
501 	/*
502 	 * The recommended HW programming sequence for ICL is to
503 	 * power up core 3 and keep it in stall if HPRO is enabled.
504 	 * Major difference between ICL and TGL, on ICL core 3 is managed by
505 	 * the host whereas on TGL it is handled by the firmware.
506 	 */
507 	if (!hda->clk_config_lpro) {
508 		ret = snd_sof_dsp_core_power_up(sdev, BIT(3));
509 		if (ret < 0) {
510 			dev_err(sdev->dev, "error: dsp core power up failed on core 3\n");
511 			return ret;
512 		}
513 
514 		snd_sof_dsp_stall(sdev, BIT(3));
515 	}
516 
517 	/* re-enable clock gating and power gating */
518 	return hda_dsp_ctrl_clock_power_gating(sdev, true);
519 }
520 
hda_dsp_ext_man_get_cavs_config_data(struct snd_sof_dev * sdev,const struct sof_ext_man_elem_header * hdr)521 int hda_dsp_ext_man_get_cavs_config_data(struct snd_sof_dev *sdev,
522 					 const struct sof_ext_man_elem_header *hdr)
523 {
524 	const struct sof_ext_man_cavs_config_data *config_data =
525 		container_of(hdr, struct sof_ext_man_cavs_config_data, hdr);
526 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
527 	int i, elem_num;
528 
529 	/* calculate total number of config data elements */
530 	elem_num = (hdr->size - sizeof(struct sof_ext_man_elem_header))
531 		   / sizeof(struct sof_config_elem);
532 	if (elem_num <= 0) {
533 		dev_err(sdev->dev, "cavs config data is inconsistent: %d\n", elem_num);
534 		return -EINVAL;
535 	}
536 
537 	for (i = 0; i < elem_num; i++)
538 		switch (config_data->elems[i].token) {
539 		case SOF_EXT_MAN_CAVS_CONFIG_EMPTY:
540 			/* skip empty token */
541 			break;
542 		case SOF_EXT_MAN_CAVS_CONFIG_CAVS_LPRO:
543 			hda->clk_config_lpro = config_data->elems[i].value;
544 			dev_dbg(sdev->dev, "FW clock config: %s\n",
545 				hda->clk_config_lpro ? "LPRO" : "HPRO");
546 			break;
547 		case SOF_EXT_MAN_CAVS_CONFIG_OUTBOX_SIZE:
548 		case SOF_EXT_MAN_CAVS_CONFIG_INBOX_SIZE:
549 			/* These elements are defined but not being used yet. No warn is required */
550 			break;
551 		default:
552 			dev_info(sdev->dev, "unsupported token type: %d\n",
553 				 config_data->elems[i].token);
554 		}
555 
556 	return 0;
557 }
558 
hda_dsp_core_stall_icl(struct snd_sof_dev * sdev,unsigned int core_mask)559 int hda_dsp_core_stall_icl(struct snd_sof_dev *sdev, unsigned int core_mask)
560 {
561 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
562 	const struct sof_intel_dsp_desc *chip = hda->desc;
563 
564 	/* make sure core_mask in host managed cores */
565 	core_mask &= chip->host_managed_cores_mask;
566 	if (!core_mask) {
567 		dev_err(sdev->dev, "error: core_mask is not in host managed cores\n");
568 		return -EINVAL;
569 	}
570 
571 	/* stall core */
572 	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
573 					 HDA_DSP_REG_ADSPCS,
574 					 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask),
575 					 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
576 
577 	return 0;
578 }
579