1 /*
2 *
3 * Implementation of primary alsa driver code base for Intel HD Audio.
4 *
5 * Copyright(c) 2004 Intel Corporation. All rights reserved.
6 *
7 * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
8 * PeiSen Hou <pshou@realtek.com.tw>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 *
20 *
21 */
22
23 #include <linux/clocksource.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/slab.h>
30 #include <linux/reboot.h>
31 #include <sound/core.h>
32 #include <sound/initval.h>
33 #include "hda_priv.h"
34 #include "hda_controller.h"
35
36 #define CREATE_TRACE_POINTS
37 #include "hda_intel_trace.h"
38
39 /* DSP lock helpers */
40 #ifdef CONFIG_SND_HDA_DSP_LOADER
41 #define dsp_lock_init(dev) mutex_init(&(dev)->dsp_mutex)
42 #define dsp_lock(dev) mutex_lock(&(dev)->dsp_mutex)
43 #define dsp_unlock(dev) mutex_unlock(&(dev)->dsp_mutex)
44 #define dsp_is_locked(dev) ((dev)->locked)
45 #else
46 #define dsp_lock_init(dev) do {} while (0)
47 #define dsp_lock(dev) do {} while (0)
48 #define dsp_unlock(dev) do {} while (0)
49 #define dsp_is_locked(dev) 0
50 #endif
51
52 /*
53 * AZX stream operations.
54 */
55
56 /* start a stream */
azx_stream_start(struct azx * chip,struct azx_dev * azx_dev)57 static void azx_stream_start(struct azx *chip, struct azx_dev *azx_dev)
58 {
59 /*
60 * Before stream start, initialize parameter
61 */
62 azx_dev->insufficient = 1;
63
64 /* enable SIE */
65 azx_writel(chip, INTCTL,
66 azx_readl(chip, INTCTL) | (1 << azx_dev->index));
67 /* set DMA start and interrupt mask */
68 azx_sd_writeb(chip, azx_dev, SD_CTL,
69 azx_sd_readb(chip, azx_dev, SD_CTL) |
70 SD_CTL_DMA_START | SD_INT_MASK);
71 }
72
73 /* stop DMA */
azx_stream_clear(struct azx * chip,struct azx_dev * azx_dev)74 static void azx_stream_clear(struct azx *chip, struct azx_dev *azx_dev)
75 {
76 azx_sd_writeb(chip, azx_dev, SD_CTL,
77 azx_sd_readb(chip, azx_dev, SD_CTL) &
78 ~(SD_CTL_DMA_START | SD_INT_MASK));
79 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK); /* to be sure */
80 }
81
82 /* stop a stream */
azx_stream_stop(struct azx * chip,struct azx_dev * azx_dev)83 void azx_stream_stop(struct azx *chip, struct azx_dev *azx_dev)
84 {
85 azx_stream_clear(chip, azx_dev);
86 /* disable SIE */
87 azx_writel(chip, INTCTL,
88 azx_readl(chip, INTCTL) & ~(1 << azx_dev->index));
89 }
90 EXPORT_SYMBOL_GPL(azx_stream_stop);
91
92 /* reset stream */
azx_stream_reset(struct azx * chip,struct azx_dev * azx_dev)93 static void azx_stream_reset(struct azx *chip, struct azx_dev *azx_dev)
94 {
95 unsigned char val;
96 int timeout;
97
98 azx_stream_clear(chip, azx_dev);
99
100 azx_sd_writeb(chip, azx_dev, SD_CTL,
101 azx_sd_readb(chip, azx_dev, SD_CTL) |
102 SD_CTL_STREAM_RESET);
103 udelay(3);
104 timeout = 300;
105 while (!((val = azx_sd_readb(chip, azx_dev, SD_CTL)) &
106 SD_CTL_STREAM_RESET) && --timeout)
107 ;
108 val &= ~SD_CTL_STREAM_RESET;
109 azx_sd_writeb(chip, azx_dev, SD_CTL, val);
110 udelay(3);
111
112 timeout = 300;
113 /* waiting for hardware to report that the stream is out of reset */
114 while (((val = azx_sd_readb(chip, azx_dev, SD_CTL)) &
115 SD_CTL_STREAM_RESET) && --timeout)
116 ;
117
118 /* reset first position - may not be synced with hw at this time */
119 *azx_dev->posbuf = 0;
120 }
121
122 /*
123 * set up the SD for streaming
124 */
azx_setup_controller(struct azx * chip,struct azx_dev * azx_dev)125 static int azx_setup_controller(struct azx *chip, struct azx_dev *azx_dev)
126 {
127 unsigned int val;
128 /* make sure the run bit is zero for SD */
129 azx_stream_clear(chip, azx_dev);
130 /* program the stream_tag */
131 val = azx_sd_readl(chip, azx_dev, SD_CTL);
132 val = (val & ~SD_CTL_STREAM_TAG_MASK) |
133 (azx_dev->stream_tag << SD_CTL_STREAM_TAG_SHIFT);
134 if (!azx_snoop(chip))
135 val |= SD_CTL_TRAFFIC_PRIO;
136 azx_sd_writel(chip, azx_dev, SD_CTL, val);
137
138 /* program the length of samples in cyclic buffer */
139 azx_sd_writel(chip, azx_dev, SD_CBL, azx_dev->bufsize);
140
141 /* program the stream format */
142 /* this value needs to be the same as the one programmed */
143 azx_sd_writew(chip, azx_dev, SD_FORMAT, azx_dev->format_val);
144
145 /* program the stream LVI (last valid index) of the BDL */
146 azx_sd_writew(chip, azx_dev, SD_LVI, azx_dev->frags - 1);
147
148 /* program the BDL address */
149 /* lower BDL address */
150 azx_sd_writel(chip, azx_dev, SD_BDLPL, (u32)azx_dev->bdl.addr);
151 /* upper BDL address */
152 azx_sd_writel(chip, azx_dev, SD_BDLPU,
153 upper_32_bits(azx_dev->bdl.addr));
154
155 /* enable the position buffer */
156 if (chip->get_position[0] != azx_get_pos_lpib ||
157 chip->get_position[1] != azx_get_pos_lpib) {
158 if (!(azx_readl(chip, DPLBASE) & AZX_DPLBASE_ENABLE))
159 azx_writel(chip, DPLBASE,
160 (u32)chip->posbuf.addr | AZX_DPLBASE_ENABLE);
161 }
162
163 /* set the interrupt enable bits in the descriptor control register */
164 azx_sd_writel(chip, azx_dev, SD_CTL,
165 azx_sd_readl(chip, azx_dev, SD_CTL) | SD_INT_MASK);
166
167 return 0;
168 }
169
170 /* assign a stream for the PCM */
171 static inline struct azx_dev *
azx_assign_device(struct azx * chip,struct snd_pcm_substream * substream)172 azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
173 {
174 int dev, i, nums;
175 struct azx_dev *res = NULL;
176 /* make a non-zero unique key for the substream */
177 int key = (substream->pcm->device << 16) | (substream->number << 2) |
178 (substream->stream + 1);
179
180 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
181 dev = chip->playback_index_offset;
182 nums = chip->playback_streams;
183 } else {
184 dev = chip->capture_index_offset;
185 nums = chip->capture_streams;
186 }
187 for (i = 0; i < nums; i++, dev++) {
188 struct azx_dev *azx_dev = &chip->azx_dev[dev];
189 dsp_lock(azx_dev);
190 if (!azx_dev->opened && !dsp_is_locked(azx_dev)) {
191 if (azx_dev->assigned_key == key) {
192 azx_dev->opened = 1;
193 azx_dev->assigned_key = key;
194 dsp_unlock(azx_dev);
195 return azx_dev;
196 }
197 if (!res ||
198 (chip->driver_caps & AZX_DCAPS_REVERSE_ASSIGN))
199 res = azx_dev;
200 }
201 dsp_unlock(azx_dev);
202 }
203 if (res) {
204 dsp_lock(res);
205 res->opened = 1;
206 res->assigned_key = key;
207 dsp_unlock(res);
208 }
209 return res;
210 }
211
212 /* release the assigned stream */
azx_release_device(struct azx_dev * azx_dev)213 static inline void azx_release_device(struct azx_dev *azx_dev)
214 {
215 azx_dev->opened = 0;
216 }
217
azx_cc_read(const struct cyclecounter * cc)218 static cycle_t azx_cc_read(const struct cyclecounter *cc)
219 {
220 struct azx_dev *azx_dev = container_of(cc, struct azx_dev, azx_cc);
221 struct snd_pcm_substream *substream = azx_dev->substream;
222 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
223 struct azx *chip = apcm->chip;
224
225 return azx_readl(chip, WALLCLK);
226 }
227
azx_timecounter_init(struct snd_pcm_substream * substream,bool force,cycle_t last)228 static void azx_timecounter_init(struct snd_pcm_substream *substream,
229 bool force, cycle_t last)
230 {
231 struct azx_dev *azx_dev = get_azx_dev(substream);
232 struct timecounter *tc = &azx_dev->azx_tc;
233 struct cyclecounter *cc = &azx_dev->azx_cc;
234 u64 nsec;
235
236 cc->read = azx_cc_read;
237 cc->mask = CLOCKSOURCE_MASK(32);
238
239 /*
240 * Converting from 24 MHz to ns means applying a 125/3 factor.
241 * To avoid any saturation issues in intermediate operations,
242 * the 125 factor is applied first. The division is applied
243 * last after reading the timecounter value.
244 * Applying the 1/3 factor as part of the multiplication
245 * requires at least 20 bits for a decent precision, however
246 * overflows occur after about 4 hours or less, not a option.
247 */
248
249 cc->mult = 125; /* saturation after 195 years */
250 cc->shift = 0;
251
252 nsec = 0; /* audio time is elapsed time since trigger */
253 timecounter_init(tc, cc, nsec);
254 if (force)
255 /*
256 * force timecounter to use predefined value,
257 * used for synchronized starts
258 */
259 tc->cycle_last = last;
260 }
261
azx_adjust_codec_delay(struct snd_pcm_substream * substream,u64 nsec)262 static u64 azx_adjust_codec_delay(struct snd_pcm_substream *substream,
263 u64 nsec)
264 {
265 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
266 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
267 u64 codec_frames, codec_nsecs;
268
269 if (!hinfo->ops.get_delay)
270 return nsec;
271
272 codec_frames = hinfo->ops.get_delay(hinfo, apcm->codec, substream);
273 codec_nsecs = div_u64(codec_frames * 1000000000LL,
274 substream->runtime->rate);
275
276 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
277 return nsec + codec_nsecs;
278
279 return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
280 }
281
282 /*
283 * set up a BDL entry
284 */
setup_bdle(struct azx * chip,struct snd_dma_buffer * dmab,struct azx_dev * azx_dev,u32 ** bdlp,int ofs,int size,int with_ioc)285 static int setup_bdle(struct azx *chip,
286 struct snd_dma_buffer *dmab,
287 struct azx_dev *azx_dev, u32 **bdlp,
288 int ofs, int size, int with_ioc)
289 {
290 u32 *bdl = *bdlp;
291
292 while (size > 0) {
293 dma_addr_t addr;
294 int chunk;
295
296 if (azx_dev->frags >= AZX_MAX_BDL_ENTRIES)
297 return -EINVAL;
298
299 addr = snd_sgbuf_get_addr(dmab, ofs);
300 /* program the address field of the BDL entry */
301 bdl[0] = cpu_to_le32((u32)addr);
302 bdl[1] = cpu_to_le32(upper_32_bits(addr));
303 /* program the size field of the BDL entry */
304 chunk = snd_sgbuf_get_chunk_size(dmab, ofs, size);
305 /* one BDLE cannot cross 4K boundary on CTHDA chips */
306 if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY) {
307 u32 remain = 0x1000 - (ofs & 0xfff);
308 if (chunk > remain)
309 chunk = remain;
310 }
311 bdl[2] = cpu_to_le32(chunk);
312 /* program the IOC to enable interrupt
313 * only when the whole fragment is processed
314 */
315 size -= chunk;
316 bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01);
317 bdl += 4;
318 azx_dev->frags++;
319 ofs += chunk;
320 }
321 *bdlp = bdl;
322 return ofs;
323 }
324
325 /*
326 * set up BDL entries
327 */
azx_setup_periods(struct azx * chip,struct snd_pcm_substream * substream,struct azx_dev * azx_dev)328 static int azx_setup_periods(struct azx *chip,
329 struct snd_pcm_substream *substream,
330 struct azx_dev *azx_dev)
331 {
332 u32 *bdl;
333 int i, ofs, periods, period_bytes;
334 int pos_adj = 0;
335
336 /* reset BDL address */
337 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
338 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
339
340 period_bytes = azx_dev->period_bytes;
341 periods = azx_dev->bufsize / period_bytes;
342
343 /* program the initial BDL entries */
344 bdl = (u32 *)azx_dev->bdl.area;
345 ofs = 0;
346 azx_dev->frags = 0;
347
348 if (chip->bdl_pos_adj)
349 pos_adj = chip->bdl_pos_adj[chip->dev_index];
350 if (!azx_dev->no_period_wakeup && pos_adj > 0) {
351 struct snd_pcm_runtime *runtime = substream->runtime;
352 int pos_align = pos_adj;
353 pos_adj = (pos_adj * runtime->rate + 47999) / 48000;
354 if (!pos_adj)
355 pos_adj = pos_align;
356 else
357 pos_adj = ((pos_adj + pos_align - 1) / pos_align) *
358 pos_align;
359 pos_adj = frames_to_bytes(runtime, pos_adj);
360 if (pos_adj >= period_bytes) {
361 dev_warn(chip->card->dev,"Too big adjustment %d\n",
362 pos_adj);
363 pos_adj = 0;
364 } else {
365 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
366 azx_dev,
367 &bdl, ofs, pos_adj, true);
368 if (ofs < 0)
369 goto error;
370 }
371 } else
372 pos_adj = 0;
373
374 for (i = 0; i < periods; i++) {
375 if (i == periods - 1 && pos_adj)
376 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
377 azx_dev, &bdl, ofs,
378 period_bytes - pos_adj, 0);
379 else
380 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
381 azx_dev, &bdl, ofs,
382 period_bytes,
383 !azx_dev->no_period_wakeup);
384 if (ofs < 0)
385 goto error;
386 }
387 return 0;
388
389 error:
390 dev_err(chip->card->dev, "Too many BDL entries: buffer=%d, period=%d\n",
391 azx_dev->bufsize, period_bytes);
392 return -EINVAL;
393 }
394
395 /*
396 * PCM ops
397 */
398
azx_pcm_close(struct snd_pcm_substream * substream)399 static int azx_pcm_close(struct snd_pcm_substream *substream)
400 {
401 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
402 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
403 struct azx *chip = apcm->chip;
404 struct azx_dev *azx_dev = get_azx_dev(substream);
405 unsigned long flags;
406
407 mutex_lock(&chip->open_mutex);
408 spin_lock_irqsave(&chip->reg_lock, flags);
409 azx_dev->substream = NULL;
410 azx_dev->running = 0;
411 spin_unlock_irqrestore(&chip->reg_lock, flags);
412 azx_release_device(azx_dev);
413 hinfo->ops.close(hinfo, apcm->codec, substream);
414 snd_hda_power_down(apcm->codec);
415 mutex_unlock(&chip->open_mutex);
416 return 0;
417 }
418
azx_pcm_hw_params(struct snd_pcm_substream * substream,struct snd_pcm_hw_params * hw_params)419 static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
420 struct snd_pcm_hw_params *hw_params)
421 {
422 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
423 struct azx *chip = apcm->chip;
424 int ret;
425
426 dsp_lock(get_azx_dev(substream));
427 if (dsp_is_locked(get_azx_dev(substream))) {
428 ret = -EBUSY;
429 goto unlock;
430 }
431
432 ret = chip->ops->substream_alloc_pages(chip, substream,
433 params_buffer_bytes(hw_params));
434 unlock:
435 dsp_unlock(get_azx_dev(substream));
436 return ret;
437 }
438
azx_pcm_hw_free(struct snd_pcm_substream * substream)439 static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
440 {
441 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
442 struct azx_dev *azx_dev = get_azx_dev(substream);
443 struct azx *chip = apcm->chip;
444 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
445 int err;
446
447 /* reset BDL address */
448 dsp_lock(azx_dev);
449 if (!dsp_is_locked(azx_dev)) {
450 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
451 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
452 azx_sd_writel(chip, azx_dev, SD_CTL, 0);
453 azx_dev->bufsize = 0;
454 azx_dev->period_bytes = 0;
455 azx_dev->format_val = 0;
456 }
457
458 snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
459
460 err = chip->ops->substream_free_pages(chip, substream);
461 azx_dev->prepared = 0;
462 dsp_unlock(azx_dev);
463 return err;
464 }
465
azx_pcm_prepare(struct snd_pcm_substream * substream)466 static int azx_pcm_prepare(struct snd_pcm_substream *substream)
467 {
468 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
469 struct azx *chip = apcm->chip;
470 struct azx_dev *azx_dev = get_azx_dev(substream);
471 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
472 struct snd_pcm_runtime *runtime = substream->runtime;
473 unsigned int bufsize, period_bytes, format_val, stream_tag;
474 int err;
475 struct hda_spdif_out *spdif =
476 snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid);
477 unsigned short ctls = spdif ? spdif->ctls : 0;
478
479 dsp_lock(azx_dev);
480 if (dsp_is_locked(azx_dev)) {
481 err = -EBUSY;
482 goto unlock;
483 }
484
485 azx_stream_reset(chip, azx_dev);
486 format_val = snd_hda_calc_stream_format(apcm->codec,
487 runtime->rate,
488 runtime->channels,
489 runtime->format,
490 hinfo->maxbps,
491 ctls);
492 if (!format_val) {
493 dev_err(chip->card->dev,
494 "invalid format_val, rate=%d, ch=%d, format=%d\n",
495 runtime->rate, runtime->channels, runtime->format);
496 err = -EINVAL;
497 goto unlock;
498 }
499
500 bufsize = snd_pcm_lib_buffer_bytes(substream);
501 period_bytes = snd_pcm_lib_period_bytes(substream);
502
503 dev_dbg(chip->card->dev, "azx_pcm_prepare: bufsize=0x%x, format=0x%x\n",
504 bufsize, format_val);
505
506 if (bufsize != azx_dev->bufsize ||
507 period_bytes != azx_dev->period_bytes ||
508 format_val != azx_dev->format_val ||
509 runtime->no_period_wakeup != azx_dev->no_period_wakeup) {
510 azx_dev->bufsize = bufsize;
511 azx_dev->period_bytes = period_bytes;
512 azx_dev->format_val = format_val;
513 azx_dev->no_period_wakeup = runtime->no_period_wakeup;
514 err = azx_setup_periods(chip, substream, azx_dev);
515 if (err < 0)
516 goto unlock;
517 }
518
519 /* when LPIB delay correction gives a small negative value,
520 * we ignore it; currently set the threshold statically to
521 * 64 frames
522 */
523 if (runtime->period_size > 64)
524 azx_dev->delay_negative_threshold = -frames_to_bytes(runtime, 64);
525 else
526 azx_dev->delay_negative_threshold = 0;
527
528 /* wallclk has 24Mhz clock source */
529 azx_dev->period_wallclk = (((runtime->period_size * 24000) /
530 runtime->rate) * 1000);
531 azx_setup_controller(chip, azx_dev);
532 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
533 azx_dev->fifo_size =
534 azx_sd_readw(chip, azx_dev, SD_FIFOSIZE) + 1;
535 else
536 azx_dev->fifo_size = 0;
537
538 stream_tag = azx_dev->stream_tag;
539 /* CA-IBG chips need the playback stream starting from 1 */
540 if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
541 stream_tag > chip->capture_streams)
542 stream_tag -= chip->capture_streams;
543 err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
544 azx_dev->format_val, substream);
545
546 unlock:
547 if (!err)
548 azx_dev->prepared = 1;
549 dsp_unlock(azx_dev);
550 return err;
551 }
552
azx_pcm_trigger(struct snd_pcm_substream * substream,int cmd)553 static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
554 {
555 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
556 struct azx *chip = apcm->chip;
557 struct azx_dev *azx_dev;
558 struct snd_pcm_substream *s;
559 int rstart = 0, start, nsync = 0, sbits = 0;
560 int nwait, timeout;
561
562 azx_dev = get_azx_dev(substream);
563 trace_azx_pcm_trigger(chip, azx_dev, cmd);
564
565 if (dsp_is_locked(azx_dev) || !azx_dev->prepared)
566 return -EPIPE;
567
568 switch (cmd) {
569 case SNDRV_PCM_TRIGGER_START:
570 rstart = 1;
571 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
572 case SNDRV_PCM_TRIGGER_RESUME:
573 start = 1;
574 break;
575 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
576 case SNDRV_PCM_TRIGGER_SUSPEND:
577 case SNDRV_PCM_TRIGGER_STOP:
578 start = 0;
579 break;
580 default:
581 return -EINVAL;
582 }
583
584 snd_pcm_group_for_each_entry(s, substream) {
585 if (s->pcm->card != substream->pcm->card)
586 continue;
587 azx_dev = get_azx_dev(s);
588 sbits |= 1 << azx_dev->index;
589 nsync++;
590 snd_pcm_trigger_done(s, substream);
591 }
592
593 spin_lock(&chip->reg_lock);
594
595 /* first, set SYNC bits of corresponding streams */
596 if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
597 azx_writel(chip, OLD_SSYNC,
598 azx_readl(chip, OLD_SSYNC) | sbits);
599 else
600 azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) | sbits);
601
602 snd_pcm_group_for_each_entry(s, substream) {
603 if (s->pcm->card != substream->pcm->card)
604 continue;
605 azx_dev = get_azx_dev(s);
606 if (start) {
607 azx_dev->start_wallclk = azx_readl(chip, WALLCLK);
608 if (!rstart)
609 azx_dev->start_wallclk -=
610 azx_dev->period_wallclk;
611 azx_stream_start(chip, azx_dev);
612 } else {
613 azx_stream_stop(chip, azx_dev);
614 }
615 azx_dev->running = start;
616 }
617 spin_unlock(&chip->reg_lock);
618 if (start) {
619 /* wait until all FIFOs get ready */
620 for (timeout = 5000; timeout; timeout--) {
621 nwait = 0;
622 snd_pcm_group_for_each_entry(s, substream) {
623 if (s->pcm->card != substream->pcm->card)
624 continue;
625 azx_dev = get_azx_dev(s);
626 if (!(azx_sd_readb(chip, azx_dev, SD_STS) &
627 SD_STS_FIFO_READY))
628 nwait++;
629 }
630 if (!nwait)
631 break;
632 cpu_relax();
633 }
634 } else {
635 /* wait until all RUN bits are cleared */
636 for (timeout = 5000; timeout; timeout--) {
637 nwait = 0;
638 snd_pcm_group_for_each_entry(s, substream) {
639 if (s->pcm->card != substream->pcm->card)
640 continue;
641 azx_dev = get_azx_dev(s);
642 if (azx_sd_readb(chip, azx_dev, SD_CTL) &
643 SD_CTL_DMA_START)
644 nwait++;
645 }
646 if (!nwait)
647 break;
648 cpu_relax();
649 }
650 }
651 spin_lock(&chip->reg_lock);
652 /* reset SYNC bits */
653 if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
654 azx_writel(chip, OLD_SSYNC,
655 azx_readl(chip, OLD_SSYNC) & ~sbits);
656 else
657 azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) & ~sbits);
658 if (start) {
659 azx_timecounter_init(substream, 0, 0);
660 if (nsync > 1) {
661 cycle_t cycle_last;
662
663 /* same start cycle for master and group */
664 azx_dev = get_azx_dev(substream);
665 cycle_last = azx_dev->azx_tc.cycle_last;
666
667 snd_pcm_group_for_each_entry(s, substream) {
668 if (s->pcm->card != substream->pcm->card)
669 continue;
670 azx_timecounter_init(s, 1, cycle_last);
671 }
672 }
673 }
674 spin_unlock(&chip->reg_lock);
675 return 0;
676 }
677
azx_get_pos_lpib(struct azx * chip,struct azx_dev * azx_dev)678 unsigned int azx_get_pos_lpib(struct azx *chip, struct azx_dev *azx_dev)
679 {
680 return azx_sd_readl(chip, azx_dev, SD_LPIB);
681 }
682 EXPORT_SYMBOL_GPL(azx_get_pos_lpib);
683
azx_get_pos_posbuf(struct azx * chip,struct azx_dev * azx_dev)684 unsigned int azx_get_pos_posbuf(struct azx *chip, struct azx_dev *azx_dev)
685 {
686 return le32_to_cpu(*azx_dev->posbuf);
687 }
688 EXPORT_SYMBOL_GPL(azx_get_pos_posbuf);
689
azx_get_position(struct azx * chip,struct azx_dev * azx_dev)690 unsigned int azx_get_position(struct azx *chip,
691 struct azx_dev *azx_dev)
692 {
693 struct snd_pcm_substream *substream = azx_dev->substream;
694 unsigned int pos;
695 int stream = substream->stream;
696 int delay = 0;
697
698 if (chip->get_position[stream])
699 pos = chip->get_position[stream](chip, azx_dev);
700 else /* use the position buffer as default */
701 pos = azx_get_pos_posbuf(chip, azx_dev);
702
703 if (pos >= azx_dev->bufsize)
704 pos = 0;
705
706 if (substream->runtime) {
707 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
708 struct hda_pcm_stream *hinfo = apcm->hinfo[stream];
709
710 if (chip->get_delay[stream])
711 delay += chip->get_delay[stream](chip, azx_dev, pos);
712 if (hinfo->ops.get_delay)
713 delay += hinfo->ops.get_delay(hinfo, apcm->codec,
714 substream);
715 substream->runtime->delay = delay;
716 }
717
718 trace_azx_get_position(chip, azx_dev, pos, delay);
719 return pos;
720 }
721 EXPORT_SYMBOL_GPL(azx_get_position);
722
azx_pcm_pointer(struct snd_pcm_substream * substream)723 static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
724 {
725 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
726 struct azx *chip = apcm->chip;
727 struct azx_dev *azx_dev = get_azx_dev(substream);
728 return bytes_to_frames(substream->runtime,
729 azx_get_position(chip, azx_dev));
730 }
731
azx_get_wallclock_tstamp(struct snd_pcm_substream * substream,struct timespec * ts)732 static int azx_get_wallclock_tstamp(struct snd_pcm_substream *substream,
733 struct timespec *ts)
734 {
735 struct azx_dev *azx_dev = get_azx_dev(substream);
736 u64 nsec;
737
738 nsec = timecounter_read(&azx_dev->azx_tc);
739 nsec = div_u64(nsec, 3); /* can be optimized */
740 nsec = azx_adjust_codec_delay(substream, nsec);
741
742 *ts = ns_to_timespec(nsec);
743
744 return 0;
745 }
746
747 static struct snd_pcm_hardware azx_pcm_hw = {
748 .info = (SNDRV_PCM_INFO_MMAP |
749 SNDRV_PCM_INFO_INTERLEAVED |
750 SNDRV_PCM_INFO_BLOCK_TRANSFER |
751 SNDRV_PCM_INFO_MMAP_VALID |
752 /* No full-resume yet implemented */
753 /* SNDRV_PCM_INFO_RESUME |*/
754 SNDRV_PCM_INFO_PAUSE |
755 SNDRV_PCM_INFO_SYNC_START |
756 SNDRV_PCM_INFO_HAS_WALL_CLOCK |
757 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
758 .formats = SNDRV_PCM_FMTBIT_S16_LE,
759 .rates = SNDRV_PCM_RATE_48000,
760 .rate_min = 48000,
761 .rate_max = 48000,
762 .channels_min = 2,
763 .channels_max = 2,
764 .buffer_bytes_max = AZX_MAX_BUF_SIZE,
765 .period_bytes_min = 128,
766 .period_bytes_max = AZX_MAX_BUF_SIZE / 2,
767 .periods_min = 2,
768 .periods_max = AZX_MAX_FRAG,
769 .fifo_size = 0,
770 };
771
azx_pcm_open(struct snd_pcm_substream * substream)772 static int azx_pcm_open(struct snd_pcm_substream *substream)
773 {
774 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
775 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
776 struct azx *chip = apcm->chip;
777 struct azx_dev *azx_dev;
778 struct snd_pcm_runtime *runtime = substream->runtime;
779 unsigned long flags;
780 int err;
781 int buff_step;
782
783 mutex_lock(&chip->open_mutex);
784 azx_dev = azx_assign_device(chip, substream);
785 if (azx_dev == NULL) {
786 mutex_unlock(&chip->open_mutex);
787 return -EBUSY;
788 }
789 runtime->hw = azx_pcm_hw;
790 runtime->hw.channels_min = hinfo->channels_min;
791 runtime->hw.channels_max = hinfo->channels_max;
792 runtime->hw.formats = hinfo->formats;
793 runtime->hw.rates = hinfo->rates;
794 snd_pcm_limit_hw_rates(runtime);
795 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
796
797 /* avoid wrap-around with wall-clock */
798 snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
799 20,
800 178000000);
801
802 if (chip->align_buffer_size)
803 /* constrain buffer sizes to be multiple of 128
804 bytes. This is more efficient in terms of memory
805 access but isn't required by the HDA spec and
806 prevents users from specifying exact period/buffer
807 sizes. For example for 44.1kHz, a period size set
808 to 20ms will be rounded to 19.59ms. */
809 buff_step = 128;
810 else
811 /* Don't enforce steps on buffer sizes, still need to
812 be multiple of 4 bytes (HDA spec). Tested on Intel
813 HDA controllers, may not work on all devices where
814 option needs to be disabled */
815 buff_step = 4;
816
817 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
818 buff_step);
819 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
820 buff_step);
821 snd_hda_power_up_d3wait(apcm->codec);
822 err = hinfo->ops.open(hinfo, apcm->codec, substream);
823 if (err < 0) {
824 azx_release_device(azx_dev);
825 snd_hda_power_down(apcm->codec);
826 mutex_unlock(&chip->open_mutex);
827 return err;
828 }
829 snd_pcm_limit_hw_rates(runtime);
830 /* sanity check */
831 if (snd_BUG_ON(!runtime->hw.channels_min) ||
832 snd_BUG_ON(!runtime->hw.channels_max) ||
833 snd_BUG_ON(!runtime->hw.formats) ||
834 snd_BUG_ON(!runtime->hw.rates)) {
835 azx_release_device(azx_dev);
836 hinfo->ops.close(hinfo, apcm->codec, substream);
837 snd_hda_power_down(apcm->codec);
838 mutex_unlock(&chip->open_mutex);
839 return -EINVAL;
840 }
841
842 /* disable WALLCLOCK timestamps for capture streams
843 until we figure out how to handle digital inputs */
844 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
845 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK;
846
847 spin_lock_irqsave(&chip->reg_lock, flags);
848 azx_dev->substream = substream;
849 azx_dev->running = 0;
850 spin_unlock_irqrestore(&chip->reg_lock, flags);
851
852 runtime->private_data = azx_dev;
853 snd_pcm_set_sync(substream);
854 mutex_unlock(&chip->open_mutex);
855 return 0;
856 }
857
azx_pcm_mmap(struct snd_pcm_substream * substream,struct vm_area_struct * area)858 static int azx_pcm_mmap(struct snd_pcm_substream *substream,
859 struct vm_area_struct *area)
860 {
861 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
862 struct azx *chip = apcm->chip;
863 if (chip->ops->pcm_mmap_prepare)
864 chip->ops->pcm_mmap_prepare(substream, area);
865 return snd_pcm_lib_default_mmap(substream, area);
866 }
867
868 static struct snd_pcm_ops azx_pcm_ops = {
869 .open = azx_pcm_open,
870 .close = azx_pcm_close,
871 .ioctl = snd_pcm_lib_ioctl,
872 .hw_params = azx_pcm_hw_params,
873 .hw_free = azx_pcm_hw_free,
874 .prepare = azx_pcm_prepare,
875 .trigger = azx_pcm_trigger,
876 .pointer = azx_pcm_pointer,
877 .wall_clock = azx_get_wallclock_tstamp,
878 .mmap = azx_pcm_mmap,
879 .page = snd_pcm_sgbuf_ops_page,
880 };
881
azx_pcm_free(struct snd_pcm * pcm)882 static void azx_pcm_free(struct snd_pcm *pcm)
883 {
884 struct azx_pcm *apcm = pcm->private_data;
885 if (apcm) {
886 list_del(&apcm->list);
887 kfree(apcm);
888 }
889 }
890
891 #define MAX_PREALLOC_SIZE (32 * 1024 * 1024)
892
azx_attach_pcm_stream(struct hda_bus * bus,struct hda_codec * codec,struct hda_pcm * cpcm)893 static int azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec,
894 struct hda_pcm *cpcm)
895 {
896 struct azx *chip = bus->private_data;
897 struct snd_pcm *pcm;
898 struct azx_pcm *apcm;
899 int pcm_dev = cpcm->device;
900 unsigned int size;
901 int s, err;
902
903 list_for_each_entry(apcm, &chip->pcm_list, list) {
904 if (apcm->pcm->device == pcm_dev) {
905 dev_err(chip->card->dev, "PCM %d already exists\n",
906 pcm_dev);
907 return -EBUSY;
908 }
909 }
910 err = snd_pcm_new(chip->card, cpcm->name, pcm_dev,
911 cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams,
912 cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams,
913 &pcm);
914 if (err < 0)
915 return err;
916 strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
917 apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
918 if (apcm == NULL)
919 return -ENOMEM;
920 apcm->chip = chip;
921 apcm->pcm = pcm;
922 apcm->codec = codec;
923 pcm->private_data = apcm;
924 pcm->private_free = azx_pcm_free;
925 if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM)
926 pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
927 list_add_tail(&apcm->list, &chip->pcm_list);
928 cpcm->pcm = pcm;
929 for (s = 0; s < 2; s++) {
930 apcm->hinfo[s] = &cpcm->stream[s];
931 if (cpcm->stream[s].substreams)
932 snd_pcm_set_ops(pcm, s, &azx_pcm_ops);
933 }
934 /* buffer pre-allocation */
935 size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
936 if (size > MAX_PREALLOC_SIZE)
937 size = MAX_PREALLOC_SIZE;
938 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
939 chip->card->dev,
940 size, MAX_PREALLOC_SIZE);
941 /* link to codec */
942 pcm->dev = &codec->dev;
943 return 0;
944 }
945
946 /*
947 * CORB / RIRB interface
948 */
azx_alloc_cmd_io(struct azx * chip)949 static int azx_alloc_cmd_io(struct azx *chip)
950 {
951 int err;
952
953 /* single page (at least 4096 bytes) must suffice for both ringbuffes */
954 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
955 PAGE_SIZE, &chip->rb);
956 if (err < 0)
957 dev_err(chip->card->dev, "cannot allocate CORB/RIRB\n");
958 return err;
959 }
960
azx_init_cmd_io(struct azx * chip)961 static void azx_init_cmd_io(struct azx *chip)
962 {
963 int timeout;
964
965 spin_lock_irq(&chip->reg_lock);
966 /* CORB set up */
967 chip->corb.addr = chip->rb.addr;
968 chip->corb.buf = (u32 *)chip->rb.area;
969 azx_writel(chip, CORBLBASE, (u32)chip->corb.addr);
970 azx_writel(chip, CORBUBASE, upper_32_bits(chip->corb.addr));
971
972 /* set the corb size to 256 entries (ULI requires explicitly) */
973 azx_writeb(chip, CORBSIZE, 0x02);
974 /* set the corb write pointer to 0 */
975 azx_writew(chip, CORBWP, 0);
976
977 /* reset the corb hw read pointer */
978 azx_writew(chip, CORBRP, AZX_CORBRP_RST);
979 if (!(chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)) {
980 for (timeout = 1000; timeout > 0; timeout--) {
981 if ((azx_readw(chip, CORBRP) & AZX_CORBRP_RST) == AZX_CORBRP_RST)
982 break;
983 udelay(1);
984 }
985 if (timeout <= 0)
986 dev_err(chip->card->dev, "CORB reset timeout#1, CORBRP = %d\n",
987 azx_readw(chip, CORBRP));
988
989 azx_writew(chip, CORBRP, 0);
990 for (timeout = 1000; timeout > 0; timeout--) {
991 if (azx_readw(chip, CORBRP) == 0)
992 break;
993 udelay(1);
994 }
995 if (timeout <= 0)
996 dev_err(chip->card->dev, "CORB reset timeout#2, CORBRP = %d\n",
997 azx_readw(chip, CORBRP));
998 }
999
1000 /* enable corb dma */
1001 azx_writeb(chip, CORBCTL, AZX_CORBCTL_RUN);
1002
1003 /* RIRB set up */
1004 chip->rirb.addr = chip->rb.addr + 2048;
1005 chip->rirb.buf = (u32 *)(chip->rb.area + 2048);
1006 chip->rirb.wp = chip->rirb.rp = 0;
1007 memset(chip->rirb.cmds, 0, sizeof(chip->rirb.cmds));
1008 azx_writel(chip, RIRBLBASE, (u32)chip->rirb.addr);
1009 azx_writel(chip, RIRBUBASE, upper_32_bits(chip->rirb.addr));
1010
1011 /* set the rirb size to 256 entries (ULI requires explicitly) */
1012 azx_writeb(chip, RIRBSIZE, 0x02);
1013 /* reset the rirb hw write pointer */
1014 azx_writew(chip, RIRBWP, AZX_RIRBWP_RST);
1015 /* set N=1, get RIRB response interrupt for new entry */
1016 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1017 azx_writew(chip, RINTCNT, 0xc0);
1018 else
1019 azx_writew(chip, RINTCNT, 1);
1020 /* enable rirb dma and response irq */
1021 azx_writeb(chip, RIRBCTL, AZX_RBCTL_DMA_EN | AZX_RBCTL_IRQ_EN);
1022 spin_unlock_irq(&chip->reg_lock);
1023 }
1024
azx_free_cmd_io(struct azx * chip)1025 static void azx_free_cmd_io(struct azx *chip)
1026 {
1027 spin_lock_irq(&chip->reg_lock);
1028 /* disable ringbuffer DMAs */
1029 azx_writeb(chip, RIRBCTL, 0);
1030 azx_writeb(chip, CORBCTL, 0);
1031 spin_unlock_irq(&chip->reg_lock);
1032 }
1033
azx_command_addr(u32 cmd)1034 static unsigned int azx_command_addr(u32 cmd)
1035 {
1036 unsigned int addr = cmd >> 28;
1037
1038 if (addr >= AZX_MAX_CODECS) {
1039 snd_BUG();
1040 addr = 0;
1041 }
1042
1043 return addr;
1044 }
1045
1046 /* send a command */
azx_corb_send_cmd(struct hda_bus * bus,u32 val)1047 static int azx_corb_send_cmd(struct hda_bus *bus, u32 val)
1048 {
1049 struct azx *chip = bus->private_data;
1050 unsigned int addr = azx_command_addr(val);
1051 unsigned int wp, rp;
1052
1053 spin_lock_irq(&chip->reg_lock);
1054
1055 /* add command to corb */
1056 wp = azx_readw(chip, CORBWP);
1057 if (wp == 0xffff) {
1058 /* something wrong, controller likely turned to D3 */
1059 spin_unlock_irq(&chip->reg_lock);
1060 return -EIO;
1061 }
1062 wp++;
1063 wp %= AZX_MAX_CORB_ENTRIES;
1064
1065 rp = azx_readw(chip, CORBRP);
1066 if (wp == rp) {
1067 /* oops, it's full */
1068 spin_unlock_irq(&chip->reg_lock);
1069 return -EAGAIN;
1070 }
1071
1072 chip->rirb.cmds[addr]++;
1073 chip->corb.buf[wp] = cpu_to_le32(val);
1074 azx_writew(chip, CORBWP, wp);
1075
1076 spin_unlock_irq(&chip->reg_lock);
1077
1078 return 0;
1079 }
1080
1081 #define AZX_RIRB_EX_UNSOL_EV (1<<4)
1082
1083 /* retrieve RIRB entry - called from interrupt handler */
azx_update_rirb(struct azx * chip)1084 static void azx_update_rirb(struct azx *chip)
1085 {
1086 unsigned int rp, wp;
1087 unsigned int addr;
1088 u32 res, res_ex;
1089
1090 wp = azx_readw(chip, RIRBWP);
1091 if (wp == 0xffff) {
1092 /* something wrong, controller likely turned to D3 */
1093 return;
1094 }
1095
1096 if (wp == chip->rirb.wp)
1097 return;
1098 chip->rirb.wp = wp;
1099
1100 while (chip->rirb.rp != wp) {
1101 chip->rirb.rp++;
1102 chip->rirb.rp %= AZX_MAX_RIRB_ENTRIES;
1103
1104 rp = chip->rirb.rp << 1; /* an RIRB entry is 8-bytes */
1105 res_ex = le32_to_cpu(chip->rirb.buf[rp + 1]);
1106 res = le32_to_cpu(chip->rirb.buf[rp]);
1107 addr = res_ex & 0xf;
1108 if ((addr >= AZX_MAX_CODECS) || !(chip->codec_mask & (1 << addr))) {
1109 dev_err(chip->card->dev, "spurious response %#x:%#x, rp = %d, wp = %d",
1110 res, res_ex,
1111 chip->rirb.rp, wp);
1112 snd_BUG();
1113 } else if (res_ex & AZX_RIRB_EX_UNSOL_EV)
1114 snd_hda_queue_unsol_event(chip->bus, res, res_ex);
1115 else if (chip->rirb.cmds[addr]) {
1116 chip->rirb.res[addr] = res;
1117 smp_wmb();
1118 chip->rirb.cmds[addr]--;
1119 } else if (printk_ratelimit()) {
1120 dev_err(chip->card->dev, "spurious response %#x:%#x, last cmd=%#08x\n",
1121 res, res_ex,
1122 chip->last_cmd[addr]);
1123 }
1124 }
1125 }
1126
1127 /* receive a response */
azx_rirb_get_response(struct hda_bus * bus,unsigned int addr)1128 static unsigned int azx_rirb_get_response(struct hda_bus *bus,
1129 unsigned int addr)
1130 {
1131 struct azx *chip = bus->private_data;
1132 unsigned long timeout;
1133 unsigned long loopcounter;
1134 int do_poll = 0;
1135
1136 again:
1137 timeout = jiffies + msecs_to_jiffies(1000);
1138
1139 for (loopcounter = 0;; loopcounter++) {
1140 if (chip->polling_mode || do_poll) {
1141 spin_lock_irq(&chip->reg_lock);
1142 azx_update_rirb(chip);
1143 spin_unlock_irq(&chip->reg_lock);
1144 }
1145 if (!chip->rirb.cmds[addr]) {
1146 smp_rmb();
1147 bus->rirb_error = 0;
1148
1149 if (!do_poll)
1150 chip->poll_count = 0;
1151 return chip->rirb.res[addr]; /* the last value */
1152 }
1153 if (time_after(jiffies, timeout))
1154 break;
1155 if (bus->needs_damn_long_delay || loopcounter > 3000)
1156 msleep(2); /* temporary workaround */
1157 else {
1158 udelay(10);
1159 cond_resched();
1160 }
1161 }
1162
1163 if (bus->no_response_fallback)
1164 return -1;
1165
1166 if (!chip->polling_mode && chip->poll_count < 2) {
1167 dev_dbg(chip->card->dev,
1168 "azx_get_response timeout, polling the codec once: last cmd=0x%08x\n",
1169 chip->last_cmd[addr]);
1170 do_poll = 1;
1171 chip->poll_count++;
1172 goto again;
1173 }
1174
1175
1176 if (!chip->polling_mode) {
1177 dev_warn(chip->card->dev,
1178 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n",
1179 chip->last_cmd[addr]);
1180 chip->polling_mode = 1;
1181 goto again;
1182 }
1183
1184 if (chip->msi) {
1185 dev_warn(chip->card->dev,
1186 "No response from codec, disabling MSI: last cmd=0x%08x\n",
1187 chip->last_cmd[addr]);
1188 if (chip->ops->disable_msi_reset_irq(chip) &&
1189 chip->ops->disable_msi_reset_irq(chip) < 0) {
1190 bus->rirb_error = 1;
1191 return -1;
1192 }
1193 goto again;
1194 }
1195
1196 if (chip->probing) {
1197 /* If this critical timeout happens during the codec probing
1198 * phase, this is likely an access to a non-existing codec
1199 * slot. Better to return an error and reset the system.
1200 */
1201 return -1;
1202 }
1203
1204 /* a fatal communication error; need either to reset or to fallback
1205 * to the single_cmd mode
1206 */
1207 bus->rirb_error = 1;
1208 if (bus->allow_bus_reset && !bus->response_reset && !bus->in_reset) {
1209 bus->response_reset = 1;
1210 return -1; /* give a chance to retry */
1211 }
1212
1213 dev_err(chip->card->dev,
1214 "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
1215 chip->last_cmd[addr]);
1216 chip->single_cmd = 1;
1217 bus->response_reset = 0;
1218 /* release CORB/RIRB */
1219 azx_free_cmd_io(chip);
1220 /* disable unsolicited responses */
1221 azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~AZX_GCTL_UNSOL);
1222 return -1;
1223 }
1224
1225 /*
1226 * Use the single immediate command instead of CORB/RIRB for simplicity
1227 *
1228 * Note: according to Intel, this is not preferred use. The command was
1229 * intended for the BIOS only, and may get confused with unsolicited
1230 * responses. So, we shouldn't use it for normal operation from the
1231 * driver.
1232 * I left the codes, however, for debugging/testing purposes.
1233 */
1234
1235 /* receive a response */
azx_single_wait_for_response(struct azx * chip,unsigned int addr)1236 static int azx_single_wait_for_response(struct azx *chip, unsigned int addr)
1237 {
1238 int timeout = 50;
1239
1240 while (timeout--) {
1241 /* check IRV busy bit */
1242 if (azx_readw(chip, IRS) & AZX_IRS_VALID) {
1243 /* reuse rirb.res as the response return value */
1244 chip->rirb.res[addr] = azx_readl(chip, IR);
1245 return 0;
1246 }
1247 udelay(1);
1248 }
1249 if (printk_ratelimit())
1250 dev_dbg(chip->card->dev, "get_response timeout: IRS=0x%x\n",
1251 azx_readw(chip, IRS));
1252 chip->rirb.res[addr] = -1;
1253 return -EIO;
1254 }
1255
1256 /* send a command */
azx_single_send_cmd(struct hda_bus * bus,u32 val)1257 static int azx_single_send_cmd(struct hda_bus *bus, u32 val)
1258 {
1259 struct azx *chip = bus->private_data;
1260 unsigned int addr = azx_command_addr(val);
1261 int timeout = 50;
1262
1263 bus->rirb_error = 0;
1264 while (timeout--) {
1265 /* check ICB busy bit */
1266 if (!((azx_readw(chip, IRS) & AZX_IRS_BUSY))) {
1267 /* Clear IRV valid bit */
1268 azx_writew(chip, IRS, azx_readw(chip, IRS) |
1269 AZX_IRS_VALID);
1270 azx_writel(chip, IC, val);
1271 azx_writew(chip, IRS, azx_readw(chip, IRS) |
1272 AZX_IRS_BUSY);
1273 return azx_single_wait_for_response(chip, addr);
1274 }
1275 udelay(1);
1276 }
1277 if (printk_ratelimit())
1278 dev_dbg(chip->card->dev,
1279 "send_cmd timeout: IRS=0x%x, val=0x%x\n",
1280 azx_readw(chip, IRS), val);
1281 return -EIO;
1282 }
1283
1284 /* receive a response */
azx_single_get_response(struct hda_bus * bus,unsigned int addr)1285 static unsigned int azx_single_get_response(struct hda_bus *bus,
1286 unsigned int addr)
1287 {
1288 struct azx *chip = bus->private_data;
1289 return chip->rirb.res[addr];
1290 }
1291
1292 /*
1293 * The below are the main callbacks from hda_codec.
1294 *
1295 * They are just the skeleton to call sub-callbacks according to the
1296 * current setting of chip->single_cmd.
1297 */
1298
1299 /* send a command */
azx_send_cmd(struct hda_bus * bus,unsigned int val)1300 static int azx_send_cmd(struct hda_bus *bus, unsigned int val)
1301 {
1302 struct azx *chip = bus->private_data;
1303
1304 if (chip->disabled)
1305 return 0;
1306 chip->last_cmd[azx_command_addr(val)] = val;
1307 if (chip->single_cmd)
1308 return azx_single_send_cmd(bus, val);
1309 else
1310 return azx_corb_send_cmd(bus, val);
1311 }
1312
1313 /* get a response */
azx_get_response(struct hda_bus * bus,unsigned int addr)1314 static unsigned int azx_get_response(struct hda_bus *bus,
1315 unsigned int addr)
1316 {
1317 struct azx *chip = bus->private_data;
1318 if (chip->disabled)
1319 return 0;
1320 if (chip->single_cmd)
1321 return azx_single_get_response(bus, addr);
1322 else
1323 return azx_rirb_get_response(bus, addr);
1324 }
1325
1326 #ifdef CONFIG_SND_HDA_DSP_LOADER
1327 /*
1328 * DSP loading code (e.g. for CA0132)
1329 */
1330
1331 /* use the first stream for loading DSP */
1332 static struct azx_dev *
azx_get_dsp_loader_dev(struct azx * chip)1333 azx_get_dsp_loader_dev(struct azx *chip)
1334 {
1335 return &chip->azx_dev[chip->playback_index_offset];
1336 }
1337
azx_load_dsp_prepare(struct hda_bus * bus,unsigned int format,unsigned int byte_size,struct snd_dma_buffer * bufp)1338 static int azx_load_dsp_prepare(struct hda_bus *bus, unsigned int format,
1339 unsigned int byte_size,
1340 struct snd_dma_buffer *bufp)
1341 {
1342 u32 *bdl;
1343 struct azx *chip = bus->private_data;
1344 struct azx_dev *azx_dev;
1345 int err;
1346
1347 azx_dev = azx_get_dsp_loader_dev(chip);
1348
1349 dsp_lock(azx_dev);
1350 spin_lock_irq(&chip->reg_lock);
1351 if (azx_dev->running || azx_dev->locked) {
1352 spin_unlock_irq(&chip->reg_lock);
1353 err = -EBUSY;
1354 goto unlock;
1355 }
1356 azx_dev->prepared = 0;
1357 chip->saved_azx_dev = *azx_dev;
1358 azx_dev->locked = 1;
1359 spin_unlock_irq(&chip->reg_lock);
1360
1361 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV_SG,
1362 byte_size, bufp);
1363 if (err < 0)
1364 goto err_alloc;
1365
1366 azx_dev->bufsize = byte_size;
1367 azx_dev->period_bytes = byte_size;
1368 azx_dev->format_val = format;
1369
1370 azx_stream_reset(chip, azx_dev);
1371
1372 /* reset BDL address */
1373 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
1374 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
1375
1376 azx_dev->frags = 0;
1377 bdl = (u32 *)azx_dev->bdl.area;
1378 err = setup_bdle(chip, bufp, azx_dev, &bdl, 0, byte_size, 0);
1379 if (err < 0)
1380 goto error;
1381
1382 azx_setup_controller(chip, azx_dev);
1383 dsp_unlock(azx_dev);
1384 return azx_dev->stream_tag;
1385
1386 error:
1387 chip->ops->dma_free_pages(chip, bufp);
1388 err_alloc:
1389 spin_lock_irq(&chip->reg_lock);
1390 if (azx_dev->opened)
1391 *azx_dev = chip->saved_azx_dev;
1392 azx_dev->locked = 0;
1393 spin_unlock_irq(&chip->reg_lock);
1394 unlock:
1395 dsp_unlock(azx_dev);
1396 return err;
1397 }
1398
azx_load_dsp_trigger(struct hda_bus * bus,bool start)1399 static void azx_load_dsp_trigger(struct hda_bus *bus, bool start)
1400 {
1401 struct azx *chip = bus->private_data;
1402 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1403
1404 if (start)
1405 azx_stream_start(chip, azx_dev);
1406 else
1407 azx_stream_stop(chip, azx_dev);
1408 azx_dev->running = start;
1409 }
1410
azx_load_dsp_cleanup(struct hda_bus * bus,struct snd_dma_buffer * dmab)1411 static void azx_load_dsp_cleanup(struct hda_bus *bus,
1412 struct snd_dma_buffer *dmab)
1413 {
1414 struct azx *chip = bus->private_data;
1415 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1416
1417 if (!dmab->area || !azx_dev->locked)
1418 return;
1419
1420 dsp_lock(azx_dev);
1421 /* reset BDL address */
1422 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
1423 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
1424 azx_sd_writel(chip, azx_dev, SD_CTL, 0);
1425 azx_dev->bufsize = 0;
1426 azx_dev->period_bytes = 0;
1427 azx_dev->format_val = 0;
1428
1429 chip->ops->dma_free_pages(chip, dmab);
1430 dmab->area = NULL;
1431
1432 spin_lock_irq(&chip->reg_lock);
1433 if (azx_dev->opened)
1434 *azx_dev = chip->saved_azx_dev;
1435 azx_dev->locked = 0;
1436 spin_unlock_irq(&chip->reg_lock);
1437 dsp_unlock(azx_dev);
1438 }
1439 #endif /* CONFIG_SND_HDA_DSP_LOADER */
1440
azx_alloc_stream_pages(struct azx * chip)1441 int azx_alloc_stream_pages(struct azx *chip)
1442 {
1443 int i, err;
1444 struct snd_card *card = chip->card;
1445
1446 for (i = 0; i < chip->num_streams; i++) {
1447 dsp_lock_init(&chip->azx_dev[i]);
1448 /* allocate memory for the BDL for each stream */
1449 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1450 BDL_SIZE,
1451 &chip->azx_dev[i].bdl);
1452 if (err < 0) {
1453 dev_err(card->dev, "cannot allocate BDL\n");
1454 return -ENOMEM;
1455 }
1456 }
1457 /* allocate memory for the position buffer */
1458 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1459 chip->num_streams * 8, &chip->posbuf);
1460 if (err < 0) {
1461 dev_err(card->dev, "cannot allocate posbuf\n");
1462 return -ENOMEM;
1463 }
1464
1465 /* allocate CORB/RIRB */
1466 err = azx_alloc_cmd_io(chip);
1467 if (err < 0)
1468 return err;
1469 return 0;
1470 }
1471 EXPORT_SYMBOL_GPL(azx_alloc_stream_pages);
1472
azx_free_stream_pages(struct azx * chip)1473 void azx_free_stream_pages(struct azx *chip)
1474 {
1475 int i;
1476 if (chip->azx_dev) {
1477 for (i = 0; i < chip->num_streams; i++)
1478 if (chip->azx_dev[i].bdl.area)
1479 chip->ops->dma_free_pages(
1480 chip, &chip->azx_dev[i].bdl);
1481 }
1482 if (chip->rb.area)
1483 chip->ops->dma_free_pages(chip, &chip->rb);
1484 if (chip->posbuf.area)
1485 chip->ops->dma_free_pages(chip, &chip->posbuf);
1486 }
1487 EXPORT_SYMBOL_GPL(azx_free_stream_pages);
1488
1489 /*
1490 * Lowlevel interface
1491 */
1492
1493 /* enter link reset */
azx_enter_link_reset(struct azx * chip)1494 void azx_enter_link_reset(struct azx *chip)
1495 {
1496 unsigned long timeout;
1497
1498 /* reset controller */
1499 azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~AZX_GCTL_RESET);
1500
1501 timeout = jiffies + msecs_to_jiffies(100);
1502 while ((azx_readb(chip, GCTL) & AZX_GCTL_RESET) &&
1503 time_before(jiffies, timeout))
1504 usleep_range(500, 1000);
1505 }
1506 EXPORT_SYMBOL_GPL(azx_enter_link_reset);
1507
1508 /* exit link reset */
azx_exit_link_reset(struct azx * chip)1509 static void azx_exit_link_reset(struct azx *chip)
1510 {
1511 unsigned long timeout;
1512
1513 azx_writeb(chip, GCTL, azx_readb(chip, GCTL) | AZX_GCTL_RESET);
1514
1515 timeout = jiffies + msecs_to_jiffies(100);
1516 while (!azx_readb(chip, GCTL) &&
1517 time_before(jiffies, timeout))
1518 usleep_range(500, 1000);
1519 }
1520
1521 /* reset codec link */
azx_reset(struct azx * chip,bool full_reset)1522 static int azx_reset(struct azx *chip, bool full_reset)
1523 {
1524 if (!full_reset)
1525 goto __skip;
1526
1527 /* clear STATESTS */
1528 azx_writew(chip, STATESTS, STATESTS_INT_MASK);
1529
1530 /* reset controller */
1531 azx_enter_link_reset(chip);
1532
1533 /* delay for >= 100us for codec PLL to settle per spec
1534 * Rev 0.9 section 5.5.1
1535 */
1536 usleep_range(500, 1000);
1537
1538 /* Bring controller out of reset */
1539 azx_exit_link_reset(chip);
1540
1541 /* Brent Chartrand said to wait >= 540us for codecs to initialize */
1542 usleep_range(1000, 1200);
1543
1544 __skip:
1545 /* check to see if controller is ready */
1546 if (!azx_readb(chip, GCTL)) {
1547 dev_dbg(chip->card->dev, "azx_reset: controller not ready!\n");
1548 return -EBUSY;
1549 }
1550
1551 /* Accept unsolicited responses */
1552 if (!chip->single_cmd)
1553 azx_writel(chip, GCTL, azx_readl(chip, GCTL) |
1554 AZX_GCTL_UNSOL);
1555
1556 /* detect codecs */
1557 if (!chip->codec_mask) {
1558 chip->codec_mask = azx_readw(chip, STATESTS);
1559 dev_dbg(chip->card->dev, "codec_mask = 0x%x\n",
1560 chip->codec_mask);
1561 }
1562
1563 return 0;
1564 }
1565
1566 /* enable interrupts */
azx_int_enable(struct azx * chip)1567 static void azx_int_enable(struct azx *chip)
1568 {
1569 /* enable controller CIE and GIE */
1570 azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) |
1571 AZX_INT_CTRL_EN | AZX_INT_GLOBAL_EN);
1572 }
1573
1574 /* disable interrupts */
azx_int_disable(struct azx * chip)1575 static void azx_int_disable(struct azx *chip)
1576 {
1577 int i;
1578
1579 /* disable interrupts in stream descriptor */
1580 for (i = 0; i < chip->num_streams; i++) {
1581 struct azx_dev *azx_dev = &chip->azx_dev[i];
1582 azx_sd_writeb(chip, azx_dev, SD_CTL,
1583 azx_sd_readb(chip, azx_dev, SD_CTL) &
1584 ~SD_INT_MASK);
1585 }
1586
1587 /* disable SIE for all streams */
1588 azx_writeb(chip, INTCTL, 0);
1589
1590 /* disable controller CIE and GIE */
1591 azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) &
1592 ~(AZX_INT_CTRL_EN | AZX_INT_GLOBAL_EN));
1593 }
1594
1595 /* clear interrupts */
azx_int_clear(struct azx * chip)1596 static void azx_int_clear(struct azx *chip)
1597 {
1598 int i;
1599
1600 /* clear stream status */
1601 for (i = 0; i < chip->num_streams; i++) {
1602 struct azx_dev *azx_dev = &chip->azx_dev[i];
1603 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK);
1604 }
1605
1606 /* clear STATESTS */
1607 azx_writew(chip, STATESTS, STATESTS_INT_MASK);
1608
1609 /* clear rirb status */
1610 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1611
1612 /* clear int status */
1613 azx_writel(chip, INTSTS, AZX_INT_CTRL_EN | AZX_INT_ALL_STREAM);
1614 }
1615
1616 /*
1617 * reset and start the controller registers
1618 */
azx_init_chip(struct azx * chip,bool full_reset)1619 void azx_init_chip(struct azx *chip, bool full_reset)
1620 {
1621 if (chip->initialized)
1622 return;
1623
1624 /* reset controller */
1625 azx_reset(chip, full_reset);
1626
1627 /* initialize interrupts */
1628 azx_int_clear(chip);
1629 azx_int_enable(chip);
1630
1631 /* initialize the codec command I/O */
1632 if (!chip->single_cmd)
1633 azx_init_cmd_io(chip);
1634
1635 /* program the position buffer */
1636 azx_writel(chip, DPLBASE, (u32)chip->posbuf.addr);
1637 azx_writel(chip, DPUBASE, upper_32_bits(chip->posbuf.addr));
1638
1639 chip->initialized = 1;
1640 }
1641 EXPORT_SYMBOL_GPL(azx_init_chip);
1642
azx_stop_chip(struct azx * chip)1643 void azx_stop_chip(struct azx *chip)
1644 {
1645 if (!chip->initialized)
1646 return;
1647
1648 /* disable interrupts */
1649 azx_int_disable(chip);
1650 azx_int_clear(chip);
1651
1652 /* disable CORB/RIRB */
1653 azx_free_cmd_io(chip);
1654
1655 /* disable position buffer */
1656 azx_writel(chip, DPLBASE, 0);
1657 azx_writel(chip, DPUBASE, 0);
1658
1659 chip->initialized = 0;
1660 }
1661 EXPORT_SYMBOL_GPL(azx_stop_chip);
1662
1663 /*
1664 * interrupt handler
1665 */
azx_interrupt(int irq,void * dev_id)1666 irqreturn_t azx_interrupt(int irq, void *dev_id)
1667 {
1668 struct azx *chip = dev_id;
1669 struct azx_dev *azx_dev;
1670 u32 status;
1671 u8 sd_status;
1672 int i;
1673
1674 #ifdef CONFIG_PM_RUNTIME
1675 if (chip->driver_caps & AZX_DCAPS_PM_RUNTIME)
1676 if (!pm_runtime_active(chip->card->dev))
1677 return IRQ_NONE;
1678 #endif
1679
1680 spin_lock(&chip->reg_lock);
1681
1682 if (chip->disabled) {
1683 spin_unlock(&chip->reg_lock);
1684 return IRQ_NONE;
1685 }
1686
1687 status = azx_readl(chip, INTSTS);
1688 if (status == 0 || status == 0xffffffff) {
1689 spin_unlock(&chip->reg_lock);
1690 return IRQ_NONE;
1691 }
1692
1693 for (i = 0; i < chip->num_streams; i++) {
1694 azx_dev = &chip->azx_dev[i];
1695 if (status & azx_dev->sd_int_sta_mask) {
1696 sd_status = azx_sd_readb(chip, azx_dev, SD_STS);
1697 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK);
1698 if (!azx_dev->substream || !azx_dev->running ||
1699 !(sd_status & SD_INT_COMPLETE))
1700 continue;
1701 /* check whether this IRQ is really acceptable */
1702 if (!chip->ops->position_check ||
1703 chip->ops->position_check(chip, azx_dev)) {
1704 spin_unlock(&chip->reg_lock);
1705 snd_pcm_period_elapsed(azx_dev->substream);
1706 spin_lock(&chip->reg_lock);
1707 }
1708 }
1709 }
1710
1711 /* clear rirb int */
1712 status = azx_readb(chip, RIRBSTS);
1713 if (status & RIRB_INT_MASK) {
1714 if (status & RIRB_INT_RESPONSE) {
1715 if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY)
1716 udelay(80);
1717 azx_update_rirb(chip);
1718 }
1719 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1720 }
1721
1722 spin_unlock(&chip->reg_lock);
1723
1724 return IRQ_HANDLED;
1725 }
1726 EXPORT_SYMBOL_GPL(azx_interrupt);
1727
1728 /*
1729 * Codec initerface
1730 */
1731
1732 /*
1733 * Probe the given codec address
1734 */
probe_codec(struct azx * chip,int addr)1735 static int probe_codec(struct azx *chip, int addr)
1736 {
1737 unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
1738 (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
1739 unsigned int res;
1740
1741 mutex_lock(&chip->bus->cmd_mutex);
1742 chip->probing = 1;
1743 azx_send_cmd(chip->bus, cmd);
1744 res = azx_get_response(chip->bus, addr);
1745 chip->probing = 0;
1746 mutex_unlock(&chip->bus->cmd_mutex);
1747 if (res == -1)
1748 return -EIO;
1749 dev_dbg(chip->card->dev, "codec #%d probed OK\n", addr);
1750 return 0;
1751 }
1752
azx_bus_reset(struct hda_bus * bus)1753 static void azx_bus_reset(struct hda_bus *bus)
1754 {
1755 struct azx *chip = bus->private_data;
1756
1757 bus->in_reset = 1;
1758 azx_stop_chip(chip);
1759 azx_init_chip(chip, true);
1760 #ifdef CONFIG_PM
1761 if (chip->initialized) {
1762 struct azx_pcm *p;
1763 list_for_each_entry(p, &chip->pcm_list, list)
1764 snd_pcm_suspend_all(p->pcm);
1765 snd_hda_suspend(chip->bus);
1766 snd_hda_resume(chip->bus);
1767 }
1768 #endif
1769 bus->in_reset = 0;
1770 }
1771
1772 #ifdef CONFIG_PM
1773 /* power-up/down the controller */
azx_power_notify(struct hda_bus * bus,bool power_up)1774 static void azx_power_notify(struct hda_bus *bus, bool power_up)
1775 {
1776 struct azx *chip = bus->private_data;
1777
1778 if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
1779 return;
1780
1781 if (power_up)
1782 pm_runtime_get_sync(chip->card->dev);
1783 else
1784 pm_runtime_put_sync(chip->card->dev);
1785 }
1786 #endif
1787
get_jackpoll_interval(struct azx * chip)1788 static int get_jackpoll_interval(struct azx *chip)
1789 {
1790 int i;
1791 unsigned int j;
1792
1793 if (!chip->jackpoll_ms)
1794 return 0;
1795
1796 i = chip->jackpoll_ms[chip->dev_index];
1797 if (i == 0)
1798 return 0;
1799 if (i < 50 || i > 60000)
1800 j = 0;
1801 else
1802 j = msecs_to_jiffies(i);
1803 if (j == 0)
1804 dev_warn(chip->card->dev,
1805 "jackpoll_ms value out of range: %d\n", i);
1806 return j;
1807 }
1808
1809 /* Codec initialization */
azx_codec_create(struct azx * chip,const char * model,unsigned int max_slots,int * power_save_to)1810 int azx_codec_create(struct azx *chip, const char *model,
1811 unsigned int max_slots,
1812 int *power_save_to)
1813 {
1814 struct hda_bus_template bus_temp;
1815 int c, codecs, err;
1816
1817 memset(&bus_temp, 0, sizeof(bus_temp));
1818 bus_temp.private_data = chip;
1819 bus_temp.modelname = model;
1820 bus_temp.pci = chip->pci;
1821 bus_temp.ops.command = azx_send_cmd;
1822 bus_temp.ops.get_response = azx_get_response;
1823 bus_temp.ops.attach_pcm = azx_attach_pcm_stream;
1824 bus_temp.ops.bus_reset = azx_bus_reset;
1825 #ifdef CONFIG_PM
1826 bus_temp.power_save = power_save_to;
1827 bus_temp.ops.pm_notify = azx_power_notify;
1828 #endif
1829 #ifdef CONFIG_SND_HDA_DSP_LOADER
1830 bus_temp.ops.load_dsp_prepare = azx_load_dsp_prepare;
1831 bus_temp.ops.load_dsp_trigger = azx_load_dsp_trigger;
1832 bus_temp.ops.load_dsp_cleanup = azx_load_dsp_cleanup;
1833 #endif
1834
1835 err = snd_hda_bus_new(chip->card, &bus_temp, &chip->bus);
1836 if (err < 0)
1837 return err;
1838
1839 if (chip->driver_caps & AZX_DCAPS_RIRB_DELAY) {
1840 dev_dbg(chip->card->dev, "Enable delay in RIRB handling\n");
1841 chip->bus->needs_damn_long_delay = 1;
1842 }
1843
1844 codecs = 0;
1845 if (!max_slots)
1846 max_slots = AZX_DEFAULT_CODECS;
1847
1848 /* First try to probe all given codec slots */
1849 for (c = 0; c < max_slots; c++) {
1850 if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1851 if (probe_codec(chip, c) < 0) {
1852 /* Some BIOSen give you wrong codec addresses
1853 * that don't exist
1854 */
1855 dev_warn(chip->card->dev,
1856 "Codec #%d probe error; disabling it...\n", c);
1857 chip->codec_mask &= ~(1 << c);
1858 /* More badly, accessing to a non-existing
1859 * codec often screws up the controller chip,
1860 * and disturbs the further communications.
1861 * Thus if an error occurs during probing,
1862 * better to reset the controller chip to
1863 * get back to the sanity state.
1864 */
1865 azx_stop_chip(chip);
1866 azx_init_chip(chip, true);
1867 }
1868 }
1869 }
1870
1871 /* AMD chipsets often cause the communication stalls upon certain
1872 * sequence like the pin-detection. It seems that forcing the synced
1873 * access works around the stall. Grrr...
1874 */
1875 if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) {
1876 dev_dbg(chip->card->dev, "Enable sync_write for stable communication\n");
1877 chip->bus->sync_write = 1;
1878 chip->bus->allow_bus_reset = 1;
1879 }
1880
1881 /* Then create codec instances */
1882 for (c = 0; c < max_slots; c++) {
1883 if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1884 struct hda_codec *codec;
1885 err = snd_hda_codec_new(chip->bus, c, &codec);
1886 if (err < 0)
1887 continue;
1888 codec->jackpoll_interval = get_jackpoll_interval(chip);
1889 codec->beep_mode = chip->beep_mode;
1890 codecs++;
1891 }
1892 }
1893 if (!codecs) {
1894 dev_err(chip->card->dev, "no codecs initialized\n");
1895 return -ENXIO;
1896 }
1897 return 0;
1898 }
1899 EXPORT_SYMBOL_GPL(azx_codec_create);
1900
1901 /* configure each codec instance */
azx_codec_configure(struct azx * chip)1902 int azx_codec_configure(struct azx *chip)
1903 {
1904 struct hda_codec *codec;
1905 list_for_each_entry(codec, &chip->bus->codec_list, list) {
1906 snd_hda_codec_configure(codec);
1907 }
1908 return 0;
1909 }
1910 EXPORT_SYMBOL_GPL(azx_codec_configure);
1911
1912 /* mixer creation - all stuff is implemented in hda module */
azx_mixer_create(struct azx * chip)1913 int azx_mixer_create(struct azx *chip)
1914 {
1915 return snd_hda_build_controls(chip->bus);
1916 }
1917 EXPORT_SYMBOL_GPL(azx_mixer_create);
1918
1919
1920 /* initialize SD streams */
azx_init_stream(struct azx * chip)1921 int azx_init_stream(struct azx *chip)
1922 {
1923 int i;
1924
1925 /* initialize each stream (aka device)
1926 * assign the starting bdl address to each stream (device)
1927 * and initialize
1928 */
1929 for (i = 0; i < chip->num_streams; i++) {
1930 struct azx_dev *azx_dev = &chip->azx_dev[i];
1931 azx_dev->posbuf = (u32 __iomem *)(chip->posbuf.area + i * 8);
1932 /* offset: SDI0=0x80, SDI1=0xa0, ... SDO3=0x160 */
1933 azx_dev->sd_addr = chip->remap_addr + (0x20 * i + 0x80);
1934 /* int mask: SDI0=0x01, SDI1=0x02, ... SDO3=0x80 */
1935 azx_dev->sd_int_sta_mask = 1 << i;
1936 /* stream tag: must be non-zero and unique */
1937 azx_dev->index = i;
1938 azx_dev->stream_tag = i + 1;
1939 }
1940
1941 return 0;
1942 }
1943 EXPORT_SYMBOL_GPL(azx_init_stream);
1944
1945 /*
1946 * reboot notifier for hang-up problem at power-down
1947 */
azx_halt(struct notifier_block * nb,unsigned long event,void * buf)1948 static int azx_halt(struct notifier_block *nb, unsigned long event, void *buf)
1949 {
1950 struct azx *chip = container_of(nb, struct azx, reboot_notifier);
1951 snd_hda_bus_reboot_notify(chip->bus);
1952 azx_stop_chip(chip);
1953 return NOTIFY_OK;
1954 }
1955
azx_notifier_register(struct azx * chip)1956 void azx_notifier_register(struct azx *chip)
1957 {
1958 chip->reboot_notifier.notifier_call = azx_halt;
1959 register_reboot_notifier(&chip->reboot_notifier);
1960 }
1961 EXPORT_SYMBOL_GPL(azx_notifier_register);
1962
azx_notifier_unregister(struct azx * chip)1963 void azx_notifier_unregister(struct azx *chip)
1964 {
1965 if (chip->reboot_notifier.notifier_call)
1966 unregister_reboot_notifier(&chip->reboot_notifier);
1967 }
1968 EXPORT_SYMBOL_GPL(azx_notifier_unregister);
1969
1970 MODULE_LICENSE("GPL");
1971 MODULE_DESCRIPTION("Common HDA driver funcitons");
1972