• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Digital Audio (PCM) abstract layer
4  *  Copyright (c) by Jaroslav Kysela <perex@perex.cz>
5  */
6 
7 #include <linux/compat.h>
8 #include <linux/mm.h>
9 #include <linux/module.h>
10 #include <linux/file.h>
11 #include <linux/slab.h>
12 #include <linux/sched/signal.h>
13 #include <linux/time.h>
14 #include <linux/pm_qos.h>
15 #include <linux/io.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/vmalloc.h>
18 #include <sound/core.h>
19 #include <sound/control.h>
20 #include <sound/info.h>
21 #include <sound/pcm.h>
22 #include <sound/pcm_params.h>
23 #include <sound/timer.h>
24 #include <sound/minors.h>
25 #include <linux/uio.h>
26 #include <linux/delay.h>
27 
28 #include "pcm_local.h"
29 
30 #ifdef CONFIG_SND_DEBUG
31 #define CREATE_TRACE_POINTS
32 #include "pcm_param_trace.h"
33 #else
34 #define trace_hw_mask_param_enabled()		0
35 #define trace_hw_interval_param_enabled()	0
36 #define trace_hw_mask_param(substream, type, index, prev, curr)
37 #define trace_hw_interval_param(substream, type, index, prev, curr)
38 #endif
39 
40 /*
41  *  Compatibility
42  */
43 
44 struct snd_pcm_hw_params_old {
45 	unsigned int flags;
46 	unsigned int masks[SNDRV_PCM_HW_PARAM_SUBFORMAT -
47 			   SNDRV_PCM_HW_PARAM_ACCESS + 1];
48 	struct snd_interval intervals[SNDRV_PCM_HW_PARAM_TICK_TIME -
49 					SNDRV_PCM_HW_PARAM_SAMPLE_BITS + 1];
50 	unsigned int rmask;
51 	unsigned int cmask;
52 	unsigned int info;
53 	unsigned int msbits;
54 	unsigned int rate_num;
55 	unsigned int rate_den;
56 	snd_pcm_uframes_t fifo_size;
57 	unsigned char reserved[64];
58 };
59 
60 #ifdef CONFIG_SND_SUPPORT_OLD_API
61 #define SNDRV_PCM_IOCTL_HW_REFINE_OLD _IOWR('A', 0x10, struct snd_pcm_hw_params_old)
62 #define SNDRV_PCM_IOCTL_HW_PARAMS_OLD _IOWR('A', 0x11, struct snd_pcm_hw_params_old)
63 
64 static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream,
65 				      struct snd_pcm_hw_params_old __user * _oparams);
66 static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream,
67 				      struct snd_pcm_hw_params_old __user * _oparams);
68 #endif
69 static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream);
70 
71 /*
72  *
73  */
74 
75 static DECLARE_RWSEM(snd_pcm_link_rwsem);
76 
snd_pcm_group_init(struct snd_pcm_group * group)77 void snd_pcm_group_init(struct snd_pcm_group *group)
78 {
79 	spin_lock_init(&group->lock);
80 	mutex_init(&group->mutex);
81 	INIT_LIST_HEAD(&group->substreams);
82 	refcount_set(&group->refs, 1);
83 }
84 
85 /* define group lock helpers */
86 #define DEFINE_PCM_GROUP_LOCK(action, mutex_action) \
87 static void snd_pcm_group_ ## action(struct snd_pcm_group *group, bool nonatomic) \
88 { \
89 	if (nonatomic) \
90 		mutex_ ## mutex_action(&group->mutex); \
91 	else \
92 		spin_ ## action(&group->lock); \
93 }
94 
95 DEFINE_PCM_GROUP_LOCK(lock, lock);
96 DEFINE_PCM_GROUP_LOCK(unlock, unlock);
97 DEFINE_PCM_GROUP_LOCK(lock_irq, lock);
98 DEFINE_PCM_GROUP_LOCK(unlock_irq, unlock);
99 
100 /**
101  * snd_pcm_stream_lock - Lock the PCM stream
102  * @substream: PCM substream
103  *
104  * This locks the PCM stream's spinlock or mutex depending on the nonatomic
105  * flag of the given substream.  This also takes the global link rw lock
106  * (or rw sem), too, for avoiding the race with linked streams.
107  */
snd_pcm_stream_lock(struct snd_pcm_substream * substream)108 void snd_pcm_stream_lock(struct snd_pcm_substream *substream)
109 {
110 	snd_pcm_group_lock(&substream->self_group, substream->pcm->nonatomic);
111 }
112 EXPORT_SYMBOL_GPL(snd_pcm_stream_lock);
113 
114 /**
115  * snd_pcm_stream_unlock - Unlock the PCM stream
116  * @substream: PCM substream
117  *
118  * This unlocks the PCM stream that has been locked via snd_pcm_stream_lock().
119  */
snd_pcm_stream_unlock(struct snd_pcm_substream * substream)120 void snd_pcm_stream_unlock(struct snd_pcm_substream *substream)
121 {
122 	snd_pcm_group_unlock(&substream->self_group, substream->pcm->nonatomic);
123 }
124 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock);
125 
126 /**
127  * snd_pcm_stream_lock_irq - Lock the PCM stream
128  * @substream: PCM substream
129  *
130  * This locks the PCM stream like snd_pcm_stream_lock() and disables the local
131  * IRQ (only when nonatomic is false).  In nonatomic case, this is identical
132  * as snd_pcm_stream_lock().
133  */
snd_pcm_stream_lock_irq(struct snd_pcm_substream * substream)134 void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
135 {
136 	snd_pcm_group_lock_irq(&substream->self_group,
137 			       substream->pcm->nonatomic);
138 }
139 EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq);
140 
snd_pcm_stream_lock_nested(struct snd_pcm_substream * substream)141 static void snd_pcm_stream_lock_nested(struct snd_pcm_substream *substream)
142 {
143 	struct snd_pcm_group *group = &substream->self_group;
144 
145 	if (substream->pcm->nonatomic)
146 		mutex_lock_nested(&group->mutex, SINGLE_DEPTH_NESTING);
147 	else
148 		spin_lock_nested(&group->lock, SINGLE_DEPTH_NESTING);
149 }
150 
151 /**
152  * snd_pcm_stream_unlock_irq - Unlock the PCM stream
153  * @substream: PCM substream
154  *
155  * This is a counter-part of snd_pcm_stream_lock_irq().
156  */
snd_pcm_stream_unlock_irq(struct snd_pcm_substream * substream)157 void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream)
158 {
159 	snd_pcm_group_unlock_irq(&substream->self_group,
160 				 substream->pcm->nonatomic);
161 }
162 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irq);
163 
_snd_pcm_stream_lock_irqsave(struct snd_pcm_substream * substream)164 unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream)
165 {
166 	unsigned long flags = 0;
167 	if (substream->pcm->nonatomic)
168 		mutex_lock(&substream->self_group.mutex);
169 	else
170 		spin_lock_irqsave(&substream->self_group.lock, flags);
171 	return flags;
172 }
173 EXPORT_SYMBOL_GPL(_snd_pcm_stream_lock_irqsave);
174 
175 /**
176  * snd_pcm_stream_unlock_irqrestore - Unlock the PCM stream
177  * @substream: PCM substream
178  * @flags: irq flags
179  *
180  * This is a counter-part of snd_pcm_stream_lock_irqsave().
181  */
snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream * substream,unsigned long flags)182 void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
183 				      unsigned long flags)
184 {
185 	if (substream->pcm->nonatomic)
186 		mutex_unlock(&substream->self_group.mutex);
187 	else
188 		spin_unlock_irqrestore(&substream->self_group.lock, flags);
189 }
190 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore);
191 
192 /* Run PCM ioctl ops */
snd_pcm_ops_ioctl(struct snd_pcm_substream * substream,unsigned cmd,void * arg)193 static int snd_pcm_ops_ioctl(struct snd_pcm_substream *substream,
194 			     unsigned cmd, void *arg)
195 {
196 	if (substream->ops->ioctl)
197 		return substream->ops->ioctl(substream, cmd, arg);
198 	else
199 		return snd_pcm_lib_ioctl(substream, cmd, arg);
200 }
201 
snd_pcm_info(struct snd_pcm_substream * substream,struct snd_pcm_info * info)202 int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info)
203 {
204 	struct snd_pcm *pcm = substream->pcm;
205 	struct snd_pcm_str *pstr = substream->pstr;
206 
207 	memset(info, 0, sizeof(*info));
208 	info->card = pcm->card->number;
209 	info->device = pcm->device;
210 	info->stream = substream->stream;
211 	info->subdevice = substream->number;
212 	strlcpy(info->id, pcm->id, sizeof(info->id));
213 	strlcpy(info->name, pcm->name, sizeof(info->name));
214 	info->dev_class = pcm->dev_class;
215 	info->dev_subclass = pcm->dev_subclass;
216 	info->subdevices_count = pstr->substream_count;
217 	info->subdevices_avail = pstr->substream_count - pstr->substream_opened;
218 	strlcpy(info->subname, substream->name, sizeof(info->subname));
219 
220 	return 0;
221 }
222 
snd_pcm_info_user(struct snd_pcm_substream * substream,struct snd_pcm_info __user * _info)223 int snd_pcm_info_user(struct snd_pcm_substream *substream,
224 		      struct snd_pcm_info __user * _info)
225 {
226 	struct snd_pcm_info *info;
227 	int err;
228 
229 	info = kmalloc(sizeof(*info), GFP_KERNEL);
230 	if (! info)
231 		return -ENOMEM;
232 	err = snd_pcm_info(substream, info);
233 	if (err >= 0) {
234 		if (copy_to_user(_info, info, sizeof(*info)))
235 			err = -EFAULT;
236 	}
237 	kfree(info);
238 	return err;
239 }
240 
241 /* macro for simplified cast */
242 #define PARAM_MASK_BIT(b)	(1U << (__force int)(b))
243 
hw_support_mmap(struct snd_pcm_substream * substream)244 static bool hw_support_mmap(struct snd_pcm_substream *substream)
245 {
246 	if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_MMAP))
247 		return false;
248 
249 	if (substream->ops->mmap || substream->ops->page)
250 		return true;
251 
252 	switch (substream->dma_buffer.dev.type) {
253 	case SNDRV_DMA_TYPE_UNKNOWN:
254 		/* we can't know the device, so just assume that the driver does
255 		 * everything right
256 		 */
257 		return true;
258 	case SNDRV_DMA_TYPE_CONTINUOUS:
259 	case SNDRV_DMA_TYPE_VMALLOC:
260 		return true;
261 	default:
262 		return dma_can_mmap(substream->dma_buffer.dev.dev);
263 	}
264 }
265 
constrain_mask_params(struct snd_pcm_substream * substream,struct snd_pcm_hw_params * params)266 static int constrain_mask_params(struct snd_pcm_substream *substream,
267 				 struct snd_pcm_hw_params *params)
268 {
269 	struct snd_pcm_hw_constraints *constrs =
270 					&substream->runtime->hw_constraints;
271 	struct snd_mask *m;
272 	unsigned int k;
273 	struct snd_mask old_mask;
274 	int changed;
275 
276 	for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) {
277 		m = hw_param_mask(params, k);
278 		if (snd_mask_empty(m))
279 			return -EINVAL;
280 
281 		/* This parameter is not requested to change by a caller. */
282 		if (!(params->rmask & PARAM_MASK_BIT(k)))
283 			continue;
284 
285 		if (trace_hw_mask_param_enabled())
286 			old_mask = *m;
287 
288 		changed = snd_mask_refine(m, constrs_mask(constrs, k));
289 		if (changed < 0)
290 			return changed;
291 		if (changed == 0)
292 			continue;
293 
294 		/* Set corresponding flag so that the caller gets it. */
295 		trace_hw_mask_param(substream, k, 0, &old_mask, m);
296 		params->cmask |= PARAM_MASK_BIT(k);
297 	}
298 
299 	return 0;
300 }
301 
constrain_interval_params(struct snd_pcm_substream * substream,struct snd_pcm_hw_params * params)302 static int constrain_interval_params(struct snd_pcm_substream *substream,
303 				     struct snd_pcm_hw_params *params)
304 {
305 	struct snd_pcm_hw_constraints *constrs =
306 					&substream->runtime->hw_constraints;
307 	struct snd_interval *i;
308 	unsigned int k;
309 	struct snd_interval old_interval;
310 	int changed;
311 
312 	for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) {
313 		i = hw_param_interval(params, k);
314 		if (snd_interval_empty(i))
315 			return -EINVAL;
316 
317 		/* This parameter is not requested to change by a caller. */
318 		if (!(params->rmask & PARAM_MASK_BIT(k)))
319 			continue;
320 
321 		if (trace_hw_interval_param_enabled())
322 			old_interval = *i;
323 
324 		changed = snd_interval_refine(i, constrs_interval(constrs, k));
325 		if (changed < 0)
326 			return changed;
327 		if (changed == 0)
328 			continue;
329 
330 		/* Set corresponding flag so that the caller gets it. */
331 		trace_hw_interval_param(substream, k, 0, &old_interval, i);
332 		params->cmask |= PARAM_MASK_BIT(k);
333 	}
334 
335 	return 0;
336 }
337 
constrain_params_by_rules(struct snd_pcm_substream * substream,struct snd_pcm_hw_params * params)338 static int constrain_params_by_rules(struct snd_pcm_substream *substream,
339 				     struct snd_pcm_hw_params *params)
340 {
341 	struct snd_pcm_hw_constraints *constrs =
342 					&substream->runtime->hw_constraints;
343 	unsigned int k;
344 	unsigned int *rstamps;
345 	unsigned int vstamps[SNDRV_PCM_HW_PARAM_LAST_INTERVAL + 1];
346 	unsigned int stamp;
347 	struct snd_pcm_hw_rule *r;
348 	unsigned int d;
349 	struct snd_mask old_mask;
350 	struct snd_interval old_interval;
351 	bool again;
352 	int changed, err = 0;
353 
354 	/*
355 	 * Each application of rule has own sequence number.
356 	 *
357 	 * Each member of 'rstamps' array represents the sequence number of
358 	 * recent application of corresponding rule.
359 	 */
360 	rstamps = kcalloc(constrs->rules_num, sizeof(unsigned int), GFP_KERNEL);
361 	if (!rstamps)
362 		return -ENOMEM;
363 
364 	/*
365 	 * Each member of 'vstamps' array represents the sequence number of
366 	 * recent application of rule in which corresponding parameters were
367 	 * changed.
368 	 *
369 	 * In initial state, elements corresponding to parameters requested by
370 	 * a caller is 1. For unrequested parameters, corresponding members
371 	 * have 0 so that the parameters are never changed anymore.
372 	 */
373 	for (k = 0; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
374 		vstamps[k] = (params->rmask & PARAM_MASK_BIT(k)) ? 1 : 0;
375 
376 	/* Due to the above design, actual sequence number starts at 2. */
377 	stamp = 2;
378 retry:
379 	/* Apply all rules in order. */
380 	again = false;
381 	for (k = 0; k < constrs->rules_num; k++) {
382 		r = &constrs->rules[k];
383 
384 		/*
385 		 * Check condition bits of this rule. When the rule has
386 		 * some condition bits, parameter without the bits is
387 		 * never processed. SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP
388 		 * is an example of the condition bits.
389 		 */
390 		if (r->cond && !(r->cond & params->flags))
391 			continue;
392 
393 		/*
394 		 * The 'deps' array includes maximum three dependencies
395 		 * to SNDRV_PCM_HW_PARAM_XXXs for this rule. The fourth
396 		 * member of this array is a sentinel and should be
397 		 * negative value.
398 		 *
399 		 * This rule should be processed in this time when dependent
400 		 * parameters were changed at former applications of the other
401 		 * rules.
402 		 */
403 		for (d = 0; r->deps[d] >= 0; d++) {
404 			if (vstamps[r->deps[d]] > rstamps[k])
405 				break;
406 		}
407 		if (r->deps[d] < 0)
408 			continue;
409 
410 		if (trace_hw_mask_param_enabled()) {
411 			if (hw_is_mask(r->var))
412 				old_mask = *hw_param_mask(params, r->var);
413 		}
414 		if (trace_hw_interval_param_enabled()) {
415 			if (hw_is_interval(r->var))
416 				old_interval = *hw_param_interval(params, r->var);
417 		}
418 
419 		changed = r->func(params, r);
420 		if (changed < 0) {
421 			err = changed;
422 			goto out;
423 		}
424 
425 		/*
426 		 * When the parameter is changed, notify it to the caller
427 		 * by corresponding returned bit, then preparing for next
428 		 * iteration.
429 		 */
430 		if (changed && r->var >= 0) {
431 			if (hw_is_mask(r->var)) {
432 				trace_hw_mask_param(substream, r->var,
433 					k + 1, &old_mask,
434 					hw_param_mask(params, r->var));
435 			}
436 			if (hw_is_interval(r->var)) {
437 				trace_hw_interval_param(substream, r->var,
438 					k + 1, &old_interval,
439 					hw_param_interval(params, r->var));
440 			}
441 
442 			params->cmask |= PARAM_MASK_BIT(r->var);
443 			vstamps[r->var] = stamp;
444 			again = true;
445 		}
446 
447 		rstamps[k] = stamp++;
448 	}
449 
450 	/* Iterate to evaluate all rules till no parameters are changed. */
451 	if (again)
452 		goto retry;
453 
454  out:
455 	kfree(rstamps);
456 	return err;
457 }
458 
fixup_unreferenced_params(struct snd_pcm_substream * substream,struct snd_pcm_hw_params * params)459 static int fixup_unreferenced_params(struct snd_pcm_substream *substream,
460 				     struct snd_pcm_hw_params *params)
461 {
462 	const struct snd_interval *i;
463 	const struct snd_mask *m;
464 	int err;
465 
466 	if (!params->msbits) {
467 		i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
468 		if (snd_interval_single(i))
469 			params->msbits = snd_interval_value(i);
470 	}
471 
472 	if (!params->rate_den) {
473 		i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
474 		if (snd_interval_single(i)) {
475 			params->rate_num = snd_interval_value(i);
476 			params->rate_den = 1;
477 		}
478 	}
479 
480 	if (!params->fifo_size) {
481 		m = hw_param_mask_c(params, SNDRV_PCM_HW_PARAM_FORMAT);
482 		i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_CHANNELS);
483 		if (snd_mask_single(m) && snd_interval_single(i)) {
484 			err = snd_pcm_ops_ioctl(substream,
485 						SNDRV_PCM_IOCTL1_FIFO_SIZE,
486 						params);
487 			if (err < 0)
488 				return err;
489 		}
490 	}
491 
492 	if (!params->info) {
493 		params->info = substream->runtime->hw.info;
494 		params->info &= ~(SNDRV_PCM_INFO_FIFO_IN_FRAMES |
495 				  SNDRV_PCM_INFO_DRAIN_TRIGGER);
496 		if (!hw_support_mmap(substream))
497 			params->info &= ~(SNDRV_PCM_INFO_MMAP |
498 					  SNDRV_PCM_INFO_MMAP_VALID);
499 	}
500 
501 	return 0;
502 }
503 
snd_pcm_hw_refine(struct snd_pcm_substream * substream,struct snd_pcm_hw_params * params)504 int snd_pcm_hw_refine(struct snd_pcm_substream *substream,
505 		      struct snd_pcm_hw_params *params)
506 {
507 	int err;
508 
509 	params->info = 0;
510 	params->fifo_size = 0;
511 	if (params->rmask & PARAM_MASK_BIT(SNDRV_PCM_HW_PARAM_SAMPLE_BITS))
512 		params->msbits = 0;
513 	if (params->rmask & PARAM_MASK_BIT(SNDRV_PCM_HW_PARAM_RATE)) {
514 		params->rate_num = 0;
515 		params->rate_den = 0;
516 	}
517 
518 	err = constrain_mask_params(substream, params);
519 	if (err < 0)
520 		return err;
521 
522 	err = constrain_interval_params(substream, params);
523 	if (err < 0)
524 		return err;
525 
526 	err = constrain_params_by_rules(substream, params);
527 	if (err < 0)
528 		return err;
529 
530 	params->rmask = 0;
531 
532 	return 0;
533 }
534 EXPORT_SYMBOL(snd_pcm_hw_refine);
535 
snd_pcm_hw_refine_user(struct snd_pcm_substream * substream,struct snd_pcm_hw_params __user * _params)536 static int snd_pcm_hw_refine_user(struct snd_pcm_substream *substream,
537 				  struct snd_pcm_hw_params __user * _params)
538 {
539 	struct snd_pcm_hw_params *params;
540 	int err;
541 
542 	params = memdup_user(_params, sizeof(*params));
543 	if (IS_ERR(params))
544 		return PTR_ERR(params);
545 
546 	err = snd_pcm_hw_refine(substream, params);
547 	if (err < 0)
548 		goto end;
549 
550 	err = fixup_unreferenced_params(substream, params);
551 	if (err < 0)
552 		goto end;
553 
554 	if (copy_to_user(_params, params, sizeof(*params)))
555 		err = -EFAULT;
556 end:
557 	kfree(params);
558 	return err;
559 }
560 
period_to_usecs(struct snd_pcm_runtime * runtime)561 static int period_to_usecs(struct snd_pcm_runtime *runtime)
562 {
563 	int usecs;
564 
565 	if (! runtime->rate)
566 		return -1; /* invalid */
567 
568 	/* take 75% of period time as the deadline */
569 	usecs = (750000 / runtime->rate) * runtime->period_size;
570 	usecs += ((750000 % runtime->rate) * runtime->period_size) /
571 		runtime->rate;
572 
573 	return usecs;
574 }
575 
snd_pcm_set_state(struct snd_pcm_substream * substream,snd_pcm_state_t state)576 static void snd_pcm_set_state(struct snd_pcm_substream *substream,
577 			      snd_pcm_state_t state)
578 {
579 	snd_pcm_stream_lock_irq(substream);
580 	if (substream->runtime->status->state != SNDRV_PCM_STATE_DISCONNECTED)
581 		substream->runtime->status->state = state;
582 	snd_pcm_stream_unlock_irq(substream);
583 }
584 
snd_pcm_timer_notify(struct snd_pcm_substream * substream,int event)585 static inline void snd_pcm_timer_notify(struct snd_pcm_substream *substream,
586 					int event)
587 {
588 #ifdef CONFIG_SND_PCM_TIMER
589 	if (substream->timer)
590 		snd_timer_notify(substream->timer, event,
591 					&substream->runtime->trigger_tstamp);
592 #endif
593 }
594 
snd_pcm_sync_stop(struct snd_pcm_substream * substream,bool sync_irq)595 void snd_pcm_sync_stop(struct snd_pcm_substream *substream, bool sync_irq)
596 {
597 	if (substream->runtime && substream->runtime->stop_operating) {
598 		substream->runtime->stop_operating = false;
599 		if (substream->ops && substream->ops->sync_stop)
600 			substream->ops->sync_stop(substream);
601 		else if (sync_irq && substream->pcm->card->sync_irq > 0)
602 			synchronize_irq(substream->pcm->card->sync_irq);
603 	}
604 }
605 
606 /**
607  * snd_pcm_hw_params_choose - choose a configuration defined by @params
608  * @pcm: PCM instance
609  * @params: the hw_params instance
610  *
611  * Choose one configuration from configuration space defined by @params.
612  * The configuration chosen is that obtained fixing in this order:
613  * first access, first format, first subformat, min channels,
614  * min rate, min period time, max buffer size, min tick time
615  *
616  * Return: Zero if successful, or a negative error code on failure.
617  */
snd_pcm_hw_params_choose(struct snd_pcm_substream * pcm,struct snd_pcm_hw_params * params)618 static int snd_pcm_hw_params_choose(struct snd_pcm_substream *pcm,
619 				    struct snd_pcm_hw_params *params)
620 {
621 	static const int vars[] = {
622 		SNDRV_PCM_HW_PARAM_ACCESS,
623 		SNDRV_PCM_HW_PARAM_FORMAT,
624 		SNDRV_PCM_HW_PARAM_SUBFORMAT,
625 		SNDRV_PCM_HW_PARAM_CHANNELS,
626 		SNDRV_PCM_HW_PARAM_RATE,
627 		SNDRV_PCM_HW_PARAM_PERIOD_TIME,
628 		SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
629 		SNDRV_PCM_HW_PARAM_TICK_TIME,
630 		-1
631 	};
632 	const int *v;
633 	struct snd_mask old_mask;
634 	struct snd_interval old_interval;
635 	int changed;
636 
637 	for (v = vars; *v != -1; v++) {
638 		/* Keep old parameter to trace. */
639 		if (trace_hw_mask_param_enabled()) {
640 			if (hw_is_mask(*v))
641 				old_mask = *hw_param_mask(params, *v);
642 		}
643 		if (trace_hw_interval_param_enabled()) {
644 			if (hw_is_interval(*v))
645 				old_interval = *hw_param_interval(params, *v);
646 		}
647 		if (*v != SNDRV_PCM_HW_PARAM_BUFFER_SIZE)
648 			changed = snd_pcm_hw_param_first(pcm, params, *v, NULL);
649 		else
650 			changed = snd_pcm_hw_param_last(pcm, params, *v, NULL);
651 		if (changed < 0)
652 			return changed;
653 		if (changed == 0)
654 			continue;
655 
656 		/* Trace the changed parameter. */
657 		if (hw_is_mask(*v)) {
658 			trace_hw_mask_param(pcm, *v, 0, &old_mask,
659 					    hw_param_mask(params, *v));
660 		}
661 		if (hw_is_interval(*v)) {
662 			trace_hw_interval_param(pcm, *v, 0, &old_interval,
663 						hw_param_interval(params, *v));
664 		}
665 	}
666 
667 	return 0;
668 }
669 
670 #if IS_ENABLED(CONFIG_SND_PCM_OSS)
671 #define is_oss_stream(substream)	((substream)->oss.oss)
672 #else
673 #define is_oss_stream(substream)	false
674 #endif
675 
snd_pcm_hw_params(struct snd_pcm_substream * substream,struct snd_pcm_hw_params * params)676 static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
677 			     struct snd_pcm_hw_params *params)
678 {
679 	struct snd_pcm_runtime *runtime;
680 	int err = 0, usecs;
681 	unsigned int bits;
682 	snd_pcm_uframes_t frames;
683 
684 	if (PCM_RUNTIME_CHECK(substream))
685 		return -ENXIO;
686 	runtime = substream->runtime;
687 	mutex_lock(&runtime->buffer_mutex);
688 	snd_pcm_stream_lock_irq(substream);
689 	switch (runtime->status->state) {
690 	case SNDRV_PCM_STATE_OPEN:
691 	case SNDRV_PCM_STATE_SETUP:
692 	case SNDRV_PCM_STATE_PREPARED:
693 		if (!is_oss_stream(substream) &&
694 		    atomic_read(&substream->mmap_count))
695 			err = -EBADFD;
696 		break;
697 	default:
698 		err = -EBADFD;
699 		break;
700 	}
701 	snd_pcm_stream_unlock_irq(substream);
702 	if (err)
703 		goto unlock;
704 
705 	snd_pcm_sync_stop(substream, true);
706 
707 	params->rmask = ~0U;
708 	err = snd_pcm_hw_refine(substream, params);
709 	if (err < 0)
710 		goto _error;
711 
712 	err = snd_pcm_hw_params_choose(substream, params);
713 	if (err < 0)
714 		goto _error;
715 
716 	err = fixup_unreferenced_params(substream, params);
717 	if (err < 0)
718 		goto _error;
719 
720 	if (substream->managed_buffer_alloc) {
721 		err = snd_pcm_lib_malloc_pages(substream,
722 					       params_buffer_bytes(params));
723 		if (err < 0)
724 			goto _error;
725 		runtime->buffer_changed = err > 0;
726 	}
727 
728 	if (substream->ops->hw_params != NULL) {
729 		err = substream->ops->hw_params(substream, params);
730 		if (err < 0)
731 			goto _error;
732 	}
733 
734 	runtime->access = params_access(params);
735 	runtime->format = params_format(params);
736 	runtime->subformat = params_subformat(params);
737 	runtime->channels = params_channels(params);
738 	runtime->rate = params_rate(params);
739 	runtime->period_size = params_period_size(params);
740 	runtime->periods = params_periods(params);
741 	runtime->buffer_size = params_buffer_size(params);
742 	runtime->info = params->info;
743 	runtime->rate_num = params->rate_num;
744 	runtime->rate_den = params->rate_den;
745 	runtime->no_period_wakeup =
746 			(params->info & SNDRV_PCM_INFO_NO_PERIOD_WAKEUP) &&
747 			(params->flags & SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP);
748 
749 	bits = snd_pcm_format_physical_width(runtime->format);
750 	runtime->sample_bits = bits;
751 	bits *= runtime->channels;
752 	runtime->frame_bits = bits;
753 	frames = 1;
754 	while (bits % 8 != 0) {
755 		bits *= 2;
756 		frames *= 2;
757 	}
758 	runtime->byte_align = bits / 8;
759 	runtime->min_align = frames;
760 
761 	/* Default sw params */
762 	runtime->tstamp_mode = SNDRV_PCM_TSTAMP_NONE;
763 	runtime->period_step = 1;
764 	runtime->control->avail_min = runtime->period_size;
765 	runtime->start_threshold = 1;
766 	runtime->stop_threshold = runtime->buffer_size;
767 	runtime->silence_threshold = 0;
768 	runtime->silence_size = 0;
769 	runtime->boundary = runtime->buffer_size;
770 	while (runtime->boundary * 2 <= LONG_MAX - runtime->buffer_size)
771 		runtime->boundary *= 2;
772 
773 	/* clear the buffer for avoiding possible kernel info leaks */
774 	if (runtime->dma_area && !substream->ops->copy_user) {
775 		size_t size = runtime->dma_bytes;
776 
777 		if (runtime->info & SNDRV_PCM_INFO_MMAP)
778 			size = PAGE_ALIGN(size);
779 		memset(runtime->dma_area, 0, size);
780 	}
781 
782 	snd_pcm_timer_resolution_change(substream);
783 	snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP);
784 
785 	if (cpu_latency_qos_request_active(&substream->latency_pm_qos_req))
786 		cpu_latency_qos_remove_request(&substream->latency_pm_qos_req);
787 	if ((usecs = period_to_usecs(runtime)) >= 0)
788 		cpu_latency_qos_add_request(&substream->latency_pm_qos_req,
789 					    usecs);
790 	err = 0;
791  _error:
792 	if (err) {
793 		/* hardware might be unusable from this time,
794 		 * so we force application to retry to set
795 		 * the correct hardware parameter settings
796 		 */
797 		snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
798 		if (substream->ops->hw_free != NULL)
799 			substream->ops->hw_free(substream);
800 		if (substream->managed_buffer_alloc)
801 			snd_pcm_lib_free_pages(substream);
802 	}
803  unlock:
804 	mutex_unlock(&runtime->buffer_mutex);
805 	return err;
806 }
807 
snd_pcm_hw_params_user(struct snd_pcm_substream * substream,struct snd_pcm_hw_params __user * _params)808 static int snd_pcm_hw_params_user(struct snd_pcm_substream *substream,
809 				  struct snd_pcm_hw_params __user * _params)
810 {
811 	struct snd_pcm_hw_params *params;
812 	int err;
813 
814 	params = memdup_user(_params, sizeof(*params));
815 	if (IS_ERR(params))
816 		return PTR_ERR(params);
817 
818 	err = snd_pcm_hw_params(substream, params);
819 	if (err < 0)
820 		goto end;
821 
822 	if (copy_to_user(_params, params, sizeof(*params)))
823 		err = -EFAULT;
824 end:
825 	kfree(params);
826 	return err;
827 }
828 
do_hw_free(struct snd_pcm_substream * substream)829 static int do_hw_free(struct snd_pcm_substream *substream)
830 {
831 	int result = 0;
832 
833 	snd_pcm_sync_stop(substream, true);
834 	if (substream->ops->hw_free)
835 		result = substream->ops->hw_free(substream);
836 	if (substream->managed_buffer_alloc)
837 		snd_pcm_lib_free_pages(substream);
838 	return result;
839 }
840 
snd_pcm_hw_free(struct snd_pcm_substream * substream)841 static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
842 {
843 	struct snd_pcm_runtime *runtime;
844 	int result = 0;
845 
846 	if (PCM_RUNTIME_CHECK(substream))
847 		return -ENXIO;
848 	runtime = substream->runtime;
849 	mutex_lock(&runtime->buffer_mutex);
850 	snd_pcm_stream_lock_irq(substream);
851 	switch (runtime->status->state) {
852 	case SNDRV_PCM_STATE_SETUP:
853 	case SNDRV_PCM_STATE_PREPARED:
854 		if (atomic_read(&substream->mmap_count))
855 			result = -EBADFD;
856 		break;
857 	default:
858 		result = -EBADFD;
859 		break;
860 	}
861 	snd_pcm_stream_unlock_irq(substream);
862 	if (result)
863 		goto unlock;
864 	result = do_hw_free(substream);
865 	snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
866 	cpu_latency_qos_remove_request(&substream->latency_pm_qos_req);
867  unlock:
868 	mutex_unlock(&runtime->buffer_mutex);
869 	return result;
870 }
871 
snd_pcm_sw_params(struct snd_pcm_substream * substream,struct snd_pcm_sw_params * params)872 static int snd_pcm_sw_params(struct snd_pcm_substream *substream,
873 			     struct snd_pcm_sw_params *params)
874 {
875 	struct snd_pcm_runtime *runtime;
876 	int err;
877 
878 	if (PCM_RUNTIME_CHECK(substream))
879 		return -ENXIO;
880 	runtime = substream->runtime;
881 	snd_pcm_stream_lock_irq(substream);
882 	if (runtime->status->state == SNDRV_PCM_STATE_OPEN) {
883 		snd_pcm_stream_unlock_irq(substream);
884 		return -EBADFD;
885 	}
886 	snd_pcm_stream_unlock_irq(substream);
887 
888 	if (params->tstamp_mode < 0 ||
889 	    params->tstamp_mode > SNDRV_PCM_TSTAMP_LAST)
890 		return -EINVAL;
891 	if (params->proto >= SNDRV_PROTOCOL_VERSION(2, 0, 12) &&
892 	    params->tstamp_type > SNDRV_PCM_TSTAMP_TYPE_LAST)
893 		return -EINVAL;
894 	if (params->avail_min == 0)
895 		return -EINVAL;
896 	if (params->silence_size >= runtime->boundary) {
897 		if (params->silence_threshold != 0)
898 			return -EINVAL;
899 	} else {
900 		if (params->silence_size > params->silence_threshold)
901 			return -EINVAL;
902 		if (params->silence_threshold > runtime->buffer_size)
903 			return -EINVAL;
904 	}
905 	err = 0;
906 	snd_pcm_stream_lock_irq(substream);
907 	runtime->tstamp_mode = params->tstamp_mode;
908 	if (params->proto >= SNDRV_PROTOCOL_VERSION(2, 0, 12))
909 		runtime->tstamp_type = params->tstamp_type;
910 	runtime->period_step = params->period_step;
911 	runtime->control->avail_min = params->avail_min;
912 	runtime->start_threshold = params->start_threshold;
913 	runtime->stop_threshold = params->stop_threshold;
914 	runtime->silence_threshold = params->silence_threshold;
915 	runtime->silence_size = params->silence_size;
916         params->boundary = runtime->boundary;
917 	if (snd_pcm_running(substream)) {
918 		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
919 		    runtime->silence_size > 0)
920 			snd_pcm_playback_silence(substream, ULONG_MAX);
921 		err = snd_pcm_update_state(substream, runtime);
922 	}
923 	snd_pcm_stream_unlock_irq(substream);
924 	return err;
925 }
926 
snd_pcm_sw_params_user(struct snd_pcm_substream * substream,struct snd_pcm_sw_params __user * _params)927 static int snd_pcm_sw_params_user(struct snd_pcm_substream *substream,
928 				  struct snd_pcm_sw_params __user * _params)
929 {
930 	struct snd_pcm_sw_params params;
931 	int err;
932 	if (copy_from_user(&params, _params, sizeof(params)))
933 		return -EFAULT;
934 	err = snd_pcm_sw_params(substream, &params);
935 	if (copy_to_user(_params, &params, sizeof(params)))
936 		return -EFAULT;
937 	return err;
938 }
939 
940 static inline snd_pcm_uframes_t
snd_pcm_calc_delay(struct snd_pcm_substream * substream)941 snd_pcm_calc_delay(struct snd_pcm_substream *substream)
942 {
943 	snd_pcm_uframes_t delay;
944 
945 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
946 		delay = snd_pcm_playback_hw_avail(substream->runtime);
947 	else
948 		delay = snd_pcm_capture_avail(substream->runtime);
949 	return delay + substream->runtime->delay;
950 }
951 
snd_pcm_status64(struct snd_pcm_substream * substream,struct snd_pcm_status64 * status)952 int snd_pcm_status64(struct snd_pcm_substream *substream,
953 		     struct snd_pcm_status64 *status)
954 {
955 	struct snd_pcm_runtime *runtime = substream->runtime;
956 
957 	snd_pcm_stream_lock_irq(substream);
958 
959 	snd_pcm_unpack_audio_tstamp_config(status->audio_tstamp_data,
960 					&runtime->audio_tstamp_config);
961 
962 	/* backwards compatible behavior */
963 	if (runtime->audio_tstamp_config.type_requested ==
964 		SNDRV_PCM_AUDIO_TSTAMP_TYPE_COMPAT) {
965 		if (runtime->hw.info & SNDRV_PCM_INFO_HAS_WALL_CLOCK)
966 			runtime->audio_tstamp_config.type_requested =
967 				SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
968 		else
969 			runtime->audio_tstamp_config.type_requested =
970 				SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
971 		runtime->audio_tstamp_report.valid = 0;
972 	} else
973 		runtime->audio_tstamp_report.valid = 1;
974 
975 	status->state = runtime->status->state;
976 	status->suspended_state = runtime->status->suspended_state;
977 	if (status->state == SNDRV_PCM_STATE_OPEN)
978 		goto _end;
979 	status->trigger_tstamp_sec = runtime->trigger_tstamp.tv_sec;
980 	status->trigger_tstamp_nsec = runtime->trigger_tstamp.tv_nsec;
981 	if (snd_pcm_running(substream)) {
982 		snd_pcm_update_hw_ptr(substream);
983 		if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
984 			status->tstamp_sec = runtime->status->tstamp.tv_sec;
985 			status->tstamp_nsec =
986 				runtime->status->tstamp.tv_nsec;
987 			status->driver_tstamp_sec =
988 				runtime->driver_tstamp.tv_sec;
989 			status->driver_tstamp_nsec =
990 				runtime->driver_tstamp.tv_nsec;
991 			status->audio_tstamp_sec =
992 				runtime->status->audio_tstamp.tv_sec;
993 			status->audio_tstamp_nsec =
994 				runtime->status->audio_tstamp.tv_nsec;
995 			if (runtime->audio_tstamp_report.valid == 1)
996 				/* backwards compatibility, no report provided in COMPAT mode */
997 				snd_pcm_pack_audio_tstamp_report(&status->audio_tstamp_data,
998 								&status->audio_tstamp_accuracy,
999 								&runtime->audio_tstamp_report);
1000 
1001 			goto _tstamp_end;
1002 		}
1003 	} else {
1004 		/* get tstamp only in fallback mode and only if enabled */
1005 		if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
1006 			struct timespec64 tstamp;
1007 
1008 			snd_pcm_gettime(runtime, &tstamp);
1009 			status->tstamp_sec = tstamp.tv_sec;
1010 			status->tstamp_nsec = tstamp.tv_nsec;
1011 		}
1012 	}
1013  _tstamp_end:
1014 	status->appl_ptr = runtime->control->appl_ptr;
1015 	status->hw_ptr = runtime->status->hw_ptr;
1016 	status->avail = snd_pcm_avail(substream);
1017 	status->delay = snd_pcm_running(substream) ?
1018 		snd_pcm_calc_delay(substream) : 0;
1019 	status->avail_max = runtime->avail_max;
1020 	status->overrange = runtime->overrange;
1021 	runtime->avail_max = 0;
1022 	runtime->overrange = 0;
1023  _end:
1024  	snd_pcm_stream_unlock_irq(substream);
1025 	return 0;
1026 }
1027 
snd_pcm_status_user64(struct snd_pcm_substream * substream,struct snd_pcm_status64 __user * _status,bool ext)1028 static int snd_pcm_status_user64(struct snd_pcm_substream *substream,
1029 				 struct snd_pcm_status64 __user * _status,
1030 				 bool ext)
1031 {
1032 	struct snd_pcm_status64 status;
1033 	int res;
1034 
1035 	memset(&status, 0, sizeof(status));
1036 	/*
1037 	 * with extension, parameters are read/write,
1038 	 * get audio_tstamp_data from user,
1039 	 * ignore rest of status structure
1040 	 */
1041 	if (ext && get_user(status.audio_tstamp_data,
1042 				(u32 __user *)(&_status->audio_tstamp_data)))
1043 		return -EFAULT;
1044 	res = snd_pcm_status64(substream, &status);
1045 	if (res < 0)
1046 		return res;
1047 	if (copy_to_user(_status, &status, sizeof(status)))
1048 		return -EFAULT;
1049 	return 0;
1050 }
1051 
snd_pcm_status_user32(struct snd_pcm_substream * substream,struct snd_pcm_status32 __user * _status,bool ext)1052 static int snd_pcm_status_user32(struct snd_pcm_substream *substream,
1053 				 struct snd_pcm_status32 __user * _status,
1054 				 bool ext)
1055 {
1056 	struct snd_pcm_status64 status64;
1057 	struct snd_pcm_status32 status32;
1058 	int res;
1059 
1060 	memset(&status64, 0, sizeof(status64));
1061 	memset(&status32, 0, sizeof(status32));
1062 	/*
1063 	 * with extension, parameters are read/write,
1064 	 * get audio_tstamp_data from user,
1065 	 * ignore rest of status structure
1066 	 */
1067 	if (ext && get_user(status64.audio_tstamp_data,
1068 			    (u32 __user *)(&_status->audio_tstamp_data)))
1069 		return -EFAULT;
1070 	res = snd_pcm_status64(substream, &status64);
1071 	if (res < 0)
1072 		return res;
1073 
1074 	status32 = (struct snd_pcm_status32) {
1075 		.state = status64.state,
1076 		.trigger_tstamp_sec = status64.trigger_tstamp_sec,
1077 		.trigger_tstamp_nsec = status64.trigger_tstamp_nsec,
1078 		.tstamp_sec = status64.tstamp_sec,
1079 		.tstamp_nsec = status64.tstamp_nsec,
1080 		.appl_ptr = status64.appl_ptr,
1081 		.hw_ptr = status64.hw_ptr,
1082 		.delay = status64.delay,
1083 		.avail = status64.avail,
1084 		.avail_max = status64.avail_max,
1085 		.overrange = status64.overrange,
1086 		.suspended_state = status64.suspended_state,
1087 		.audio_tstamp_data = status64.audio_tstamp_data,
1088 		.audio_tstamp_sec = status64.audio_tstamp_sec,
1089 		.audio_tstamp_nsec = status64.audio_tstamp_nsec,
1090 		.driver_tstamp_sec = status64.audio_tstamp_sec,
1091 		.driver_tstamp_nsec = status64.audio_tstamp_nsec,
1092 		.audio_tstamp_accuracy = status64.audio_tstamp_accuracy,
1093 	};
1094 
1095 	if (copy_to_user(_status, &status32, sizeof(status32)))
1096 		return -EFAULT;
1097 
1098 	return 0;
1099 }
1100 
snd_pcm_channel_info(struct snd_pcm_substream * substream,struct snd_pcm_channel_info * info)1101 static int snd_pcm_channel_info(struct snd_pcm_substream *substream,
1102 				struct snd_pcm_channel_info * info)
1103 {
1104 	struct snd_pcm_runtime *runtime;
1105 	unsigned int channel;
1106 
1107 	channel = info->channel;
1108 	runtime = substream->runtime;
1109 	snd_pcm_stream_lock_irq(substream);
1110 	if (runtime->status->state == SNDRV_PCM_STATE_OPEN) {
1111 		snd_pcm_stream_unlock_irq(substream);
1112 		return -EBADFD;
1113 	}
1114 	snd_pcm_stream_unlock_irq(substream);
1115 	if (channel >= runtime->channels)
1116 		return -EINVAL;
1117 	memset(info, 0, sizeof(*info));
1118 	info->channel = channel;
1119 	return snd_pcm_ops_ioctl(substream, SNDRV_PCM_IOCTL1_CHANNEL_INFO, info);
1120 }
1121 
snd_pcm_channel_info_user(struct snd_pcm_substream * substream,struct snd_pcm_channel_info __user * _info)1122 static int snd_pcm_channel_info_user(struct snd_pcm_substream *substream,
1123 				     struct snd_pcm_channel_info __user * _info)
1124 {
1125 	struct snd_pcm_channel_info info;
1126 	int res;
1127 
1128 	if (copy_from_user(&info, _info, sizeof(info)))
1129 		return -EFAULT;
1130 	res = snd_pcm_channel_info(substream, &info);
1131 	if (res < 0)
1132 		return res;
1133 	if (copy_to_user(_info, &info, sizeof(info)))
1134 		return -EFAULT;
1135 	return 0;
1136 }
1137 
snd_pcm_trigger_tstamp(struct snd_pcm_substream * substream)1138 static void snd_pcm_trigger_tstamp(struct snd_pcm_substream *substream)
1139 {
1140 	struct snd_pcm_runtime *runtime = substream->runtime;
1141 	if (runtime->trigger_master == NULL)
1142 		return;
1143 	if (runtime->trigger_master == substream) {
1144 		if (!runtime->trigger_tstamp_latched)
1145 			snd_pcm_gettime(runtime, &runtime->trigger_tstamp);
1146 	} else {
1147 		snd_pcm_trigger_tstamp(runtime->trigger_master);
1148 		runtime->trigger_tstamp = runtime->trigger_master->runtime->trigger_tstamp;
1149 	}
1150 	runtime->trigger_master = NULL;
1151 }
1152 
1153 #define ACTION_ARG_IGNORE	(__force snd_pcm_state_t)0
1154 
1155 struct action_ops {
1156 	int (*pre_action)(struct snd_pcm_substream *substream,
1157 			  snd_pcm_state_t state);
1158 	int (*do_action)(struct snd_pcm_substream *substream,
1159 			 snd_pcm_state_t state);
1160 	void (*undo_action)(struct snd_pcm_substream *substream,
1161 			    snd_pcm_state_t state);
1162 	void (*post_action)(struct snd_pcm_substream *substream,
1163 			    snd_pcm_state_t state);
1164 };
1165 
1166 /*
1167  *  this functions is core for handling of linked stream
1168  *  Note: the stream state might be changed also on failure
1169  *  Note2: call with calling stream lock + link lock
1170  */
snd_pcm_action_group(const struct action_ops * ops,struct snd_pcm_substream * substream,snd_pcm_state_t state,bool stream_lock)1171 static int snd_pcm_action_group(const struct action_ops *ops,
1172 				struct snd_pcm_substream *substream,
1173 				snd_pcm_state_t state,
1174 				bool stream_lock)
1175 {
1176 	struct snd_pcm_substream *s = NULL;
1177 	struct snd_pcm_substream *s1;
1178 	int res = 0, depth = 1;
1179 
1180 	snd_pcm_group_for_each_entry(s, substream) {
1181 		if (s != substream) {
1182 			if (!stream_lock)
1183 				mutex_lock_nested(&s->runtime->buffer_mutex, depth);
1184 			else if (s->pcm->nonatomic)
1185 				mutex_lock_nested(&s->self_group.mutex, depth);
1186 			else
1187 				spin_lock_nested(&s->self_group.lock, depth);
1188 			depth++;
1189 		}
1190 		res = ops->pre_action(s, state);
1191 		if (res < 0)
1192 			goto _unlock;
1193 	}
1194 	snd_pcm_group_for_each_entry(s, substream) {
1195 		res = ops->do_action(s, state);
1196 		if (res < 0) {
1197 			if (ops->undo_action) {
1198 				snd_pcm_group_for_each_entry(s1, substream) {
1199 					if (s1 == s) /* failed stream */
1200 						break;
1201 					ops->undo_action(s1, state);
1202 				}
1203 			}
1204 			s = NULL; /* unlock all */
1205 			goto _unlock;
1206 		}
1207 	}
1208 	snd_pcm_group_for_each_entry(s, substream) {
1209 		ops->post_action(s, state);
1210 	}
1211  _unlock:
1212 	/* unlock streams */
1213 	snd_pcm_group_for_each_entry(s1, substream) {
1214 		if (s1 != substream) {
1215 			if (!stream_lock)
1216 				mutex_unlock(&s1->runtime->buffer_mutex);
1217 			else if (s1->pcm->nonatomic)
1218 				mutex_unlock(&s1->self_group.mutex);
1219 			else
1220 				spin_unlock(&s1->self_group.lock);
1221 		}
1222 		if (s1 == s)	/* end */
1223 			break;
1224 	}
1225 	return res;
1226 }
1227 
1228 /*
1229  *  Note: call with stream lock
1230  */
snd_pcm_action_single(const struct action_ops * ops,struct snd_pcm_substream * substream,snd_pcm_state_t state)1231 static int snd_pcm_action_single(const struct action_ops *ops,
1232 				 struct snd_pcm_substream *substream,
1233 				 snd_pcm_state_t state)
1234 {
1235 	int res;
1236 
1237 	res = ops->pre_action(substream, state);
1238 	if (res < 0)
1239 		return res;
1240 	res = ops->do_action(substream, state);
1241 	if (res == 0)
1242 		ops->post_action(substream, state);
1243 	else if (ops->undo_action)
1244 		ops->undo_action(substream, state);
1245 	return res;
1246 }
1247 
snd_pcm_group_assign(struct snd_pcm_substream * substream,struct snd_pcm_group * new_group)1248 static void snd_pcm_group_assign(struct snd_pcm_substream *substream,
1249 				 struct snd_pcm_group *new_group)
1250 {
1251 	substream->group = new_group;
1252 	list_move(&substream->link_list, &new_group->substreams);
1253 }
1254 
1255 /*
1256  * Unref and unlock the group, but keep the stream lock;
1257  * when the group becomes empty and no longer referred, destroy itself
1258  */
snd_pcm_group_unref(struct snd_pcm_group * group,struct snd_pcm_substream * substream)1259 static void snd_pcm_group_unref(struct snd_pcm_group *group,
1260 				struct snd_pcm_substream *substream)
1261 {
1262 	bool do_free;
1263 
1264 	if (!group)
1265 		return;
1266 	do_free = refcount_dec_and_test(&group->refs);
1267 	snd_pcm_group_unlock(group, substream->pcm->nonatomic);
1268 	if (do_free)
1269 		kfree(group);
1270 }
1271 
1272 /*
1273  * Lock the group inside a stream lock and reference it;
1274  * return the locked group object, or NULL if not linked
1275  */
1276 static struct snd_pcm_group *
snd_pcm_stream_group_ref(struct snd_pcm_substream * substream)1277 snd_pcm_stream_group_ref(struct snd_pcm_substream *substream)
1278 {
1279 	bool nonatomic = substream->pcm->nonatomic;
1280 	struct snd_pcm_group *group;
1281 	bool trylock;
1282 
1283 	for (;;) {
1284 		if (!snd_pcm_stream_linked(substream))
1285 			return NULL;
1286 		group = substream->group;
1287 		/* block freeing the group object */
1288 		refcount_inc(&group->refs);
1289 
1290 		trylock = nonatomic ? mutex_trylock(&group->mutex) :
1291 			spin_trylock(&group->lock);
1292 		if (trylock)
1293 			break; /* OK */
1294 
1295 		/* re-lock for avoiding ABBA deadlock */
1296 		snd_pcm_stream_unlock(substream);
1297 		snd_pcm_group_lock(group, nonatomic);
1298 		snd_pcm_stream_lock(substream);
1299 
1300 		/* check the group again; the above opens a small race window */
1301 		if (substream->group == group)
1302 			break; /* OK */
1303 		/* group changed, try again */
1304 		snd_pcm_group_unref(group, substream);
1305 	}
1306 	return group;
1307 }
1308 
1309 /*
1310  *  Note: call with stream lock
1311  */
snd_pcm_action(const struct action_ops * ops,struct snd_pcm_substream * substream,snd_pcm_state_t state)1312 static int snd_pcm_action(const struct action_ops *ops,
1313 			  struct snd_pcm_substream *substream,
1314 			  snd_pcm_state_t state)
1315 {
1316 	struct snd_pcm_group *group;
1317 	int res;
1318 
1319 	group = snd_pcm_stream_group_ref(substream);
1320 	if (group)
1321 		res = snd_pcm_action_group(ops, substream, state, true);
1322 	else
1323 		res = snd_pcm_action_single(ops, substream, state);
1324 	snd_pcm_group_unref(group, substream);
1325 	return res;
1326 }
1327 
1328 /*
1329  *  Note: don't use any locks before
1330  */
snd_pcm_action_lock_irq(const struct action_ops * ops,struct snd_pcm_substream * substream,snd_pcm_state_t state)1331 static int snd_pcm_action_lock_irq(const struct action_ops *ops,
1332 				   struct snd_pcm_substream *substream,
1333 				   snd_pcm_state_t state)
1334 {
1335 	int res;
1336 
1337 	snd_pcm_stream_lock_irq(substream);
1338 	res = snd_pcm_action(ops, substream, state);
1339 	snd_pcm_stream_unlock_irq(substream);
1340 	return res;
1341 }
1342 
1343 /*
1344  */
snd_pcm_action_nonatomic(const struct action_ops * ops,struct snd_pcm_substream * substream,snd_pcm_state_t state)1345 static int snd_pcm_action_nonatomic(const struct action_ops *ops,
1346 				    struct snd_pcm_substream *substream,
1347 				    snd_pcm_state_t state)
1348 {
1349 	int res;
1350 
1351 	/* Guarantee the group members won't change during non-atomic action */
1352 	down_read(&snd_pcm_link_rwsem);
1353 	mutex_lock(&substream->runtime->buffer_mutex);
1354 	if (snd_pcm_stream_linked(substream))
1355 		res = snd_pcm_action_group(ops, substream, state, false);
1356 	else
1357 		res = snd_pcm_action_single(ops, substream, state);
1358 	mutex_unlock(&substream->runtime->buffer_mutex);
1359 	up_read(&snd_pcm_link_rwsem);
1360 	return res;
1361 }
1362 
1363 /*
1364  * start callbacks
1365  */
snd_pcm_pre_start(struct snd_pcm_substream * substream,snd_pcm_state_t state)1366 static int snd_pcm_pre_start(struct snd_pcm_substream *substream,
1367 			     snd_pcm_state_t state)
1368 {
1369 	struct snd_pcm_runtime *runtime = substream->runtime;
1370 	if (runtime->status->state != SNDRV_PCM_STATE_PREPARED)
1371 		return -EBADFD;
1372 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
1373 	    !snd_pcm_playback_data(substream))
1374 		return -EPIPE;
1375 	runtime->trigger_tstamp_latched = false;
1376 	runtime->trigger_master = substream;
1377 	return 0;
1378 }
1379 
snd_pcm_do_start(struct snd_pcm_substream * substream,snd_pcm_state_t state)1380 static int snd_pcm_do_start(struct snd_pcm_substream *substream,
1381 			    snd_pcm_state_t state)
1382 {
1383 	if (substream->runtime->trigger_master != substream)
1384 		return 0;
1385 	return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_START);
1386 }
1387 
snd_pcm_undo_start(struct snd_pcm_substream * substream,snd_pcm_state_t state)1388 static void snd_pcm_undo_start(struct snd_pcm_substream *substream,
1389 			       snd_pcm_state_t state)
1390 {
1391 	if (substream->runtime->trigger_master == substream)
1392 		substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
1393 }
1394 
snd_pcm_post_start(struct snd_pcm_substream * substream,snd_pcm_state_t state)1395 static void snd_pcm_post_start(struct snd_pcm_substream *substream,
1396 			       snd_pcm_state_t state)
1397 {
1398 	struct snd_pcm_runtime *runtime = substream->runtime;
1399 	snd_pcm_trigger_tstamp(substream);
1400 	runtime->hw_ptr_jiffies = jiffies;
1401 	runtime->hw_ptr_buffer_jiffies = (runtime->buffer_size * HZ) /
1402 							    runtime->rate;
1403 	runtime->status->state = state;
1404 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
1405 	    runtime->silence_size > 0)
1406 		snd_pcm_playback_silence(substream, ULONG_MAX);
1407 	snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSTART);
1408 }
1409 
1410 static const struct action_ops snd_pcm_action_start = {
1411 	.pre_action = snd_pcm_pre_start,
1412 	.do_action = snd_pcm_do_start,
1413 	.undo_action = snd_pcm_undo_start,
1414 	.post_action = snd_pcm_post_start
1415 };
1416 
1417 /**
1418  * snd_pcm_start - start all linked streams
1419  * @substream: the PCM substream instance
1420  *
1421  * Return: Zero if successful, or a negative error code.
1422  * The stream lock must be acquired before calling this function.
1423  */
snd_pcm_start(struct snd_pcm_substream * substream)1424 int snd_pcm_start(struct snd_pcm_substream *substream)
1425 {
1426 	return snd_pcm_action(&snd_pcm_action_start, substream,
1427 			      SNDRV_PCM_STATE_RUNNING);
1428 }
1429 
1430 /* take the stream lock and start the streams */
snd_pcm_start_lock_irq(struct snd_pcm_substream * substream)1431 static int snd_pcm_start_lock_irq(struct snd_pcm_substream *substream)
1432 {
1433 	return snd_pcm_action_lock_irq(&snd_pcm_action_start, substream,
1434 				       SNDRV_PCM_STATE_RUNNING);
1435 }
1436 
1437 /*
1438  * stop callbacks
1439  */
snd_pcm_pre_stop(struct snd_pcm_substream * substream,snd_pcm_state_t state)1440 static int snd_pcm_pre_stop(struct snd_pcm_substream *substream,
1441 			    snd_pcm_state_t state)
1442 {
1443 	struct snd_pcm_runtime *runtime = substream->runtime;
1444 	if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
1445 		return -EBADFD;
1446 	runtime->trigger_master = substream;
1447 	return 0;
1448 }
1449 
snd_pcm_do_stop(struct snd_pcm_substream * substream,snd_pcm_state_t state)1450 static int snd_pcm_do_stop(struct snd_pcm_substream *substream,
1451 			   snd_pcm_state_t state)
1452 {
1453 	if (substream->runtime->trigger_master == substream &&
1454 	    snd_pcm_running(substream)) {
1455 		substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
1456 		substream->runtime->stop_operating = true;
1457 	}
1458 	return 0; /* unconditonally stop all substreams */
1459 }
1460 
snd_pcm_post_stop(struct snd_pcm_substream * substream,snd_pcm_state_t state)1461 static void snd_pcm_post_stop(struct snd_pcm_substream *substream,
1462 			      snd_pcm_state_t state)
1463 {
1464 	struct snd_pcm_runtime *runtime = substream->runtime;
1465 	if (runtime->status->state != state) {
1466 		snd_pcm_trigger_tstamp(substream);
1467 		runtime->status->state = state;
1468 		snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSTOP);
1469 	}
1470 	wake_up(&runtime->sleep);
1471 	wake_up(&runtime->tsleep);
1472 }
1473 
1474 static const struct action_ops snd_pcm_action_stop = {
1475 	.pre_action = snd_pcm_pre_stop,
1476 	.do_action = snd_pcm_do_stop,
1477 	.post_action = snd_pcm_post_stop
1478 };
1479 
1480 /**
1481  * snd_pcm_stop - try to stop all running streams in the substream group
1482  * @substream: the PCM substream instance
1483  * @state: PCM state after stopping the stream
1484  *
1485  * The state of each stream is then changed to the given state unconditionally.
1486  *
1487  * Return: Zero if successful, or a negative error code.
1488  */
snd_pcm_stop(struct snd_pcm_substream * substream,snd_pcm_state_t state)1489 int snd_pcm_stop(struct snd_pcm_substream *substream, snd_pcm_state_t state)
1490 {
1491 	return snd_pcm_action(&snd_pcm_action_stop, substream, state);
1492 }
1493 EXPORT_SYMBOL(snd_pcm_stop);
1494 
1495 /**
1496  * snd_pcm_drain_done - stop the DMA only when the given stream is playback
1497  * @substream: the PCM substream
1498  *
1499  * After stopping, the state is changed to SETUP.
1500  * Unlike snd_pcm_stop(), this affects only the given stream.
1501  *
1502  * Return: Zero if succesful, or a negative error code.
1503  */
snd_pcm_drain_done(struct snd_pcm_substream * substream)1504 int snd_pcm_drain_done(struct snd_pcm_substream *substream)
1505 {
1506 	return snd_pcm_action_single(&snd_pcm_action_stop, substream,
1507 				     SNDRV_PCM_STATE_SETUP);
1508 }
1509 
1510 /**
1511  * snd_pcm_stop_xrun - stop the running streams as XRUN
1512  * @substream: the PCM substream instance
1513  *
1514  * This stops the given running substream (and all linked substreams) as XRUN.
1515  * Unlike snd_pcm_stop(), this function takes the substream lock by itself.
1516  *
1517  * Return: Zero if successful, or a negative error code.
1518  */
snd_pcm_stop_xrun(struct snd_pcm_substream * substream)1519 int snd_pcm_stop_xrun(struct snd_pcm_substream *substream)
1520 {
1521 	unsigned long flags;
1522 
1523 	snd_pcm_stream_lock_irqsave(substream, flags);
1524 	if (substream->runtime && snd_pcm_running(substream))
1525 		__snd_pcm_xrun(substream);
1526 	snd_pcm_stream_unlock_irqrestore(substream, flags);
1527 	return 0;
1528 }
1529 EXPORT_SYMBOL_GPL(snd_pcm_stop_xrun);
1530 
1531 /*
1532  * pause callbacks: pass boolean (to start pause or resume) as state argument
1533  */
1534 #define pause_pushed(state)	(__force bool)(state)
1535 
snd_pcm_pre_pause(struct snd_pcm_substream * substream,snd_pcm_state_t state)1536 static int snd_pcm_pre_pause(struct snd_pcm_substream *substream,
1537 			     snd_pcm_state_t state)
1538 {
1539 	struct snd_pcm_runtime *runtime = substream->runtime;
1540 	if (!(runtime->info & SNDRV_PCM_INFO_PAUSE))
1541 		return -ENOSYS;
1542 	if (pause_pushed(state)) {
1543 		if (runtime->status->state != SNDRV_PCM_STATE_RUNNING)
1544 			return -EBADFD;
1545 	} else if (runtime->status->state != SNDRV_PCM_STATE_PAUSED)
1546 		return -EBADFD;
1547 	runtime->trigger_master = substream;
1548 	return 0;
1549 }
1550 
snd_pcm_do_pause(struct snd_pcm_substream * substream,snd_pcm_state_t state)1551 static int snd_pcm_do_pause(struct snd_pcm_substream *substream,
1552 			    snd_pcm_state_t state)
1553 {
1554 	if (substream->runtime->trigger_master != substream)
1555 		return 0;
1556 	/* some drivers might use hw_ptr to recover from the pause -
1557 	   update the hw_ptr now */
1558 	if (pause_pushed(state))
1559 		snd_pcm_update_hw_ptr(substream);
1560 	/* The jiffies check in snd_pcm_update_hw_ptr*() is done by
1561 	 * a delta between the current jiffies, this gives a large enough
1562 	 * delta, effectively to skip the check once.
1563 	 */
1564 	substream->runtime->hw_ptr_jiffies = jiffies - HZ * 1000;
1565 	return substream->ops->trigger(substream,
1566 				       pause_pushed(state) ?
1567 				       SNDRV_PCM_TRIGGER_PAUSE_PUSH :
1568 				       SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
1569 }
1570 
snd_pcm_undo_pause(struct snd_pcm_substream * substream,snd_pcm_state_t state)1571 static void snd_pcm_undo_pause(struct snd_pcm_substream *substream,
1572 			       snd_pcm_state_t state)
1573 {
1574 	if (substream->runtime->trigger_master == substream)
1575 		substream->ops->trigger(substream,
1576 					pause_pushed(state) ?
1577 					SNDRV_PCM_TRIGGER_PAUSE_RELEASE :
1578 					SNDRV_PCM_TRIGGER_PAUSE_PUSH);
1579 }
1580 
snd_pcm_post_pause(struct snd_pcm_substream * substream,snd_pcm_state_t state)1581 static void snd_pcm_post_pause(struct snd_pcm_substream *substream,
1582 			       snd_pcm_state_t state)
1583 {
1584 	struct snd_pcm_runtime *runtime = substream->runtime;
1585 	snd_pcm_trigger_tstamp(substream);
1586 	if (pause_pushed(state)) {
1587 		runtime->status->state = SNDRV_PCM_STATE_PAUSED;
1588 		snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MPAUSE);
1589 		wake_up(&runtime->sleep);
1590 		wake_up(&runtime->tsleep);
1591 	} else {
1592 		runtime->status->state = SNDRV_PCM_STATE_RUNNING;
1593 		snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MCONTINUE);
1594 	}
1595 }
1596 
1597 static const struct action_ops snd_pcm_action_pause = {
1598 	.pre_action = snd_pcm_pre_pause,
1599 	.do_action = snd_pcm_do_pause,
1600 	.undo_action = snd_pcm_undo_pause,
1601 	.post_action = snd_pcm_post_pause
1602 };
1603 
1604 /*
1605  * Push/release the pause for all linked streams.
1606  */
snd_pcm_pause(struct snd_pcm_substream * substream,bool push)1607 static int snd_pcm_pause(struct snd_pcm_substream *substream, bool push)
1608 {
1609 	return snd_pcm_action(&snd_pcm_action_pause, substream,
1610 			      (__force snd_pcm_state_t)push);
1611 }
1612 
snd_pcm_pause_lock_irq(struct snd_pcm_substream * substream,bool push)1613 static int snd_pcm_pause_lock_irq(struct snd_pcm_substream *substream,
1614 				  bool push)
1615 {
1616 	return snd_pcm_action_lock_irq(&snd_pcm_action_pause, substream,
1617 				       (__force snd_pcm_state_t)push);
1618 }
1619 
1620 #ifdef CONFIG_PM
1621 /* suspend callback: state argument ignored */
1622 
snd_pcm_pre_suspend(struct snd_pcm_substream * substream,snd_pcm_state_t state)1623 static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream,
1624 			       snd_pcm_state_t state)
1625 {
1626 	struct snd_pcm_runtime *runtime = substream->runtime;
1627 	switch (runtime->status->state) {
1628 	case SNDRV_PCM_STATE_SUSPENDED:
1629 		return -EBUSY;
1630 	/* unresumable PCM state; return -EBUSY for skipping suspend */
1631 	case SNDRV_PCM_STATE_OPEN:
1632 	case SNDRV_PCM_STATE_SETUP:
1633 	case SNDRV_PCM_STATE_DISCONNECTED:
1634 		return -EBUSY;
1635 	}
1636 	runtime->trigger_master = substream;
1637 	return 0;
1638 }
1639 
snd_pcm_do_suspend(struct snd_pcm_substream * substream,snd_pcm_state_t state)1640 static int snd_pcm_do_suspend(struct snd_pcm_substream *substream,
1641 			      snd_pcm_state_t state)
1642 {
1643 	struct snd_pcm_runtime *runtime = substream->runtime;
1644 	if (runtime->trigger_master != substream)
1645 		return 0;
1646 	if (! snd_pcm_running(substream))
1647 		return 0;
1648 	substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND);
1649 	runtime->stop_operating = true;
1650 	return 0; /* suspend unconditionally */
1651 }
1652 
snd_pcm_post_suspend(struct snd_pcm_substream * substream,snd_pcm_state_t state)1653 static void snd_pcm_post_suspend(struct snd_pcm_substream *substream,
1654 				 snd_pcm_state_t state)
1655 {
1656 	struct snd_pcm_runtime *runtime = substream->runtime;
1657 	snd_pcm_trigger_tstamp(substream);
1658 	runtime->status->suspended_state = runtime->status->state;
1659 	runtime->status->state = SNDRV_PCM_STATE_SUSPENDED;
1660 	snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSUSPEND);
1661 	wake_up(&runtime->sleep);
1662 	wake_up(&runtime->tsleep);
1663 }
1664 
1665 static const struct action_ops snd_pcm_action_suspend = {
1666 	.pre_action = snd_pcm_pre_suspend,
1667 	.do_action = snd_pcm_do_suspend,
1668 	.post_action = snd_pcm_post_suspend
1669 };
1670 
1671 /*
1672  * snd_pcm_suspend - trigger SUSPEND to all linked streams
1673  * @substream: the PCM substream
1674  *
1675  * After this call, all streams are changed to SUSPENDED state.
1676  *
1677  * Return: Zero if successful, or a negative error code.
1678  */
snd_pcm_suspend(struct snd_pcm_substream * substream)1679 static int snd_pcm_suspend(struct snd_pcm_substream *substream)
1680 {
1681 	int err;
1682 	unsigned long flags;
1683 
1684 	snd_pcm_stream_lock_irqsave(substream, flags);
1685 	err = snd_pcm_action(&snd_pcm_action_suspend, substream,
1686 			     ACTION_ARG_IGNORE);
1687 	snd_pcm_stream_unlock_irqrestore(substream, flags);
1688 	return err;
1689 }
1690 
1691 /**
1692  * snd_pcm_suspend_all - trigger SUSPEND to all substreams in the given pcm
1693  * @pcm: the PCM instance
1694  *
1695  * After this call, all streams are changed to SUSPENDED state.
1696  *
1697  * Return: Zero if successful (or @pcm is %NULL), or a negative error code.
1698  */
snd_pcm_suspend_all(struct snd_pcm * pcm)1699 int snd_pcm_suspend_all(struct snd_pcm *pcm)
1700 {
1701 	struct snd_pcm_substream *substream;
1702 	int stream, err = 0;
1703 
1704 	if (! pcm)
1705 		return 0;
1706 
1707 	for (stream = 0; stream < 2; stream++) {
1708 		for (substream = pcm->streams[stream].substream;
1709 		     substream; substream = substream->next) {
1710 			/* FIXME: the open/close code should lock this as well */
1711 			if (substream->runtime == NULL)
1712 				continue;
1713 
1714 			/*
1715 			 * Skip BE dai link PCM's that are internal and may
1716 			 * not have their substream ops set.
1717 			 */
1718 			if (!substream->ops)
1719 				continue;
1720 
1721 			err = snd_pcm_suspend(substream);
1722 			if (err < 0 && err != -EBUSY)
1723 				return err;
1724 		}
1725 	}
1726 
1727 	for (stream = 0; stream < 2; stream++)
1728 		for (substream = pcm->streams[stream].substream;
1729 		     substream; substream = substream->next)
1730 			snd_pcm_sync_stop(substream, false);
1731 
1732 	return 0;
1733 }
1734 EXPORT_SYMBOL(snd_pcm_suspend_all);
1735 
1736 /* resume callbacks: state argument ignored */
1737 
snd_pcm_pre_resume(struct snd_pcm_substream * substream,snd_pcm_state_t state)1738 static int snd_pcm_pre_resume(struct snd_pcm_substream *substream,
1739 			      snd_pcm_state_t state)
1740 {
1741 	struct snd_pcm_runtime *runtime = substream->runtime;
1742 	if (!(runtime->info & SNDRV_PCM_INFO_RESUME))
1743 		return -ENOSYS;
1744 	runtime->trigger_master = substream;
1745 	return 0;
1746 }
1747 
snd_pcm_do_resume(struct snd_pcm_substream * substream,snd_pcm_state_t state)1748 static int snd_pcm_do_resume(struct snd_pcm_substream *substream,
1749 			     snd_pcm_state_t state)
1750 {
1751 	struct snd_pcm_runtime *runtime = substream->runtime;
1752 	if (runtime->trigger_master != substream)
1753 		return 0;
1754 	/* DMA not running previously? */
1755 	if (runtime->status->suspended_state != SNDRV_PCM_STATE_RUNNING &&
1756 	    (runtime->status->suspended_state != SNDRV_PCM_STATE_DRAINING ||
1757 	     substream->stream != SNDRV_PCM_STREAM_PLAYBACK))
1758 		return 0;
1759 	return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_RESUME);
1760 }
1761 
snd_pcm_undo_resume(struct snd_pcm_substream * substream,snd_pcm_state_t state)1762 static void snd_pcm_undo_resume(struct snd_pcm_substream *substream,
1763 				snd_pcm_state_t state)
1764 {
1765 	if (substream->runtime->trigger_master == substream &&
1766 	    snd_pcm_running(substream))
1767 		substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND);
1768 }
1769 
snd_pcm_post_resume(struct snd_pcm_substream * substream,snd_pcm_state_t state)1770 static void snd_pcm_post_resume(struct snd_pcm_substream *substream,
1771 				snd_pcm_state_t state)
1772 {
1773 	struct snd_pcm_runtime *runtime = substream->runtime;
1774 	snd_pcm_trigger_tstamp(substream);
1775 	runtime->status->state = runtime->status->suspended_state;
1776 	snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MRESUME);
1777 }
1778 
1779 static const struct action_ops snd_pcm_action_resume = {
1780 	.pre_action = snd_pcm_pre_resume,
1781 	.do_action = snd_pcm_do_resume,
1782 	.undo_action = snd_pcm_undo_resume,
1783 	.post_action = snd_pcm_post_resume
1784 };
1785 
snd_pcm_resume(struct snd_pcm_substream * substream)1786 static int snd_pcm_resume(struct snd_pcm_substream *substream)
1787 {
1788 	return snd_pcm_action_lock_irq(&snd_pcm_action_resume, substream,
1789 				       ACTION_ARG_IGNORE);
1790 }
1791 
1792 #else
1793 
snd_pcm_resume(struct snd_pcm_substream * substream)1794 static int snd_pcm_resume(struct snd_pcm_substream *substream)
1795 {
1796 	return -ENOSYS;
1797 }
1798 
1799 #endif /* CONFIG_PM */
1800 
1801 /*
1802  * xrun ioctl
1803  *
1804  * Change the RUNNING stream(s) to XRUN state.
1805  */
snd_pcm_xrun(struct snd_pcm_substream * substream)1806 static int snd_pcm_xrun(struct snd_pcm_substream *substream)
1807 {
1808 	struct snd_pcm_runtime *runtime = substream->runtime;
1809 	int result;
1810 
1811 	snd_pcm_stream_lock_irq(substream);
1812 	switch (runtime->status->state) {
1813 	case SNDRV_PCM_STATE_XRUN:
1814 		result = 0;	/* already there */
1815 		break;
1816 	case SNDRV_PCM_STATE_RUNNING:
1817 		__snd_pcm_xrun(substream);
1818 		result = 0;
1819 		break;
1820 	default:
1821 		result = -EBADFD;
1822 	}
1823 	snd_pcm_stream_unlock_irq(substream);
1824 	return result;
1825 }
1826 
1827 /*
1828  * reset ioctl
1829  */
1830 /* reset callbacks:  state argument ignored */
snd_pcm_pre_reset(struct snd_pcm_substream * substream,snd_pcm_state_t state)1831 static int snd_pcm_pre_reset(struct snd_pcm_substream *substream,
1832 			     snd_pcm_state_t state)
1833 {
1834 	struct snd_pcm_runtime *runtime = substream->runtime;
1835 	switch (runtime->status->state) {
1836 	case SNDRV_PCM_STATE_RUNNING:
1837 	case SNDRV_PCM_STATE_PREPARED:
1838 	case SNDRV_PCM_STATE_PAUSED:
1839 	case SNDRV_PCM_STATE_SUSPENDED:
1840 		return 0;
1841 	default:
1842 		return -EBADFD;
1843 	}
1844 }
1845 
snd_pcm_do_reset(struct snd_pcm_substream * substream,snd_pcm_state_t state)1846 static int snd_pcm_do_reset(struct snd_pcm_substream *substream,
1847 			    snd_pcm_state_t state)
1848 {
1849 	struct snd_pcm_runtime *runtime = substream->runtime;
1850 	int err = snd_pcm_ops_ioctl(substream, SNDRV_PCM_IOCTL1_RESET, NULL);
1851 	if (err < 0)
1852 		return err;
1853 	runtime->hw_ptr_base = 0;
1854 	runtime->hw_ptr_interrupt = runtime->status->hw_ptr -
1855 		runtime->status->hw_ptr % runtime->period_size;
1856 	runtime->silence_start = runtime->status->hw_ptr;
1857 	runtime->silence_filled = 0;
1858 	return 0;
1859 }
1860 
snd_pcm_post_reset(struct snd_pcm_substream * substream,snd_pcm_state_t state)1861 static void snd_pcm_post_reset(struct snd_pcm_substream *substream,
1862 			       snd_pcm_state_t state)
1863 {
1864 	struct snd_pcm_runtime *runtime = substream->runtime;
1865 	runtime->control->appl_ptr = runtime->status->hw_ptr;
1866 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
1867 	    runtime->silence_size > 0)
1868 		snd_pcm_playback_silence(substream, ULONG_MAX);
1869 }
1870 
1871 static const struct action_ops snd_pcm_action_reset = {
1872 	.pre_action = snd_pcm_pre_reset,
1873 	.do_action = snd_pcm_do_reset,
1874 	.post_action = snd_pcm_post_reset
1875 };
1876 
snd_pcm_reset(struct snd_pcm_substream * substream)1877 static int snd_pcm_reset(struct snd_pcm_substream *substream)
1878 {
1879 	return snd_pcm_action_nonatomic(&snd_pcm_action_reset, substream,
1880 					ACTION_ARG_IGNORE);
1881 }
1882 
1883 /*
1884  * prepare ioctl
1885  */
1886 /* pass f_flags as state argument */
snd_pcm_pre_prepare(struct snd_pcm_substream * substream,snd_pcm_state_t state)1887 static int snd_pcm_pre_prepare(struct snd_pcm_substream *substream,
1888 			       snd_pcm_state_t state)
1889 {
1890 	struct snd_pcm_runtime *runtime = substream->runtime;
1891 	int f_flags = (__force int)state;
1892 
1893 	if (runtime->status->state == SNDRV_PCM_STATE_OPEN ||
1894 	    runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
1895 		return -EBADFD;
1896 	if (snd_pcm_running(substream))
1897 		return -EBUSY;
1898 	substream->f_flags = f_flags;
1899 	return 0;
1900 }
1901 
snd_pcm_do_prepare(struct snd_pcm_substream * substream,snd_pcm_state_t state)1902 static int snd_pcm_do_prepare(struct snd_pcm_substream *substream,
1903 			      snd_pcm_state_t state)
1904 {
1905 	int err;
1906 	snd_pcm_sync_stop(substream, true);
1907 	err = substream->ops->prepare(substream);
1908 	if (err < 0)
1909 		return err;
1910 	return snd_pcm_do_reset(substream, state);
1911 }
1912 
snd_pcm_post_prepare(struct snd_pcm_substream * substream,snd_pcm_state_t state)1913 static void snd_pcm_post_prepare(struct snd_pcm_substream *substream,
1914 				 snd_pcm_state_t state)
1915 {
1916 	struct snd_pcm_runtime *runtime = substream->runtime;
1917 	runtime->control->appl_ptr = runtime->status->hw_ptr;
1918 	snd_pcm_set_state(substream, SNDRV_PCM_STATE_PREPARED);
1919 }
1920 
1921 static const struct action_ops snd_pcm_action_prepare = {
1922 	.pre_action = snd_pcm_pre_prepare,
1923 	.do_action = snd_pcm_do_prepare,
1924 	.post_action = snd_pcm_post_prepare
1925 };
1926 
1927 /**
1928  * snd_pcm_prepare - prepare the PCM substream to be triggerable
1929  * @substream: the PCM substream instance
1930  * @file: file to refer f_flags
1931  *
1932  * Return: Zero if successful, or a negative error code.
1933  */
snd_pcm_prepare(struct snd_pcm_substream * substream,struct file * file)1934 static int snd_pcm_prepare(struct snd_pcm_substream *substream,
1935 			   struct file *file)
1936 {
1937 	int f_flags;
1938 
1939 	if (file)
1940 		f_flags = file->f_flags;
1941 	else
1942 		f_flags = substream->f_flags;
1943 
1944 	snd_pcm_stream_lock_irq(substream);
1945 	switch (substream->runtime->status->state) {
1946 	case SNDRV_PCM_STATE_PAUSED:
1947 		snd_pcm_pause(substream, false);
1948 		fallthrough;
1949 	case SNDRV_PCM_STATE_SUSPENDED:
1950 		snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
1951 		break;
1952 	}
1953 	snd_pcm_stream_unlock_irq(substream);
1954 
1955 	return snd_pcm_action_nonatomic(&snd_pcm_action_prepare,
1956 					substream,
1957 					(__force snd_pcm_state_t)f_flags);
1958 }
1959 
1960 /*
1961  * drain ioctl
1962  */
1963 
1964 /* drain init callbacks: state argument ignored */
snd_pcm_pre_drain_init(struct snd_pcm_substream * substream,snd_pcm_state_t state)1965 static int snd_pcm_pre_drain_init(struct snd_pcm_substream *substream,
1966 				  snd_pcm_state_t state)
1967 {
1968 	struct snd_pcm_runtime *runtime = substream->runtime;
1969 	switch (runtime->status->state) {
1970 	case SNDRV_PCM_STATE_OPEN:
1971 	case SNDRV_PCM_STATE_DISCONNECTED:
1972 	case SNDRV_PCM_STATE_SUSPENDED:
1973 		return -EBADFD;
1974 	}
1975 	runtime->trigger_master = substream;
1976 	return 0;
1977 }
1978 
snd_pcm_do_drain_init(struct snd_pcm_substream * substream,snd_pcm_state_t state)1979 static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream,
1980 				 snd_pcm_state_t state)
1981 {
1982 	struct snd_pcm_runtime *runtime = substream->runtime;
1983 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1984 		switch (runtime->status->state) {
1985 		case SNDRV_PCM_STATE_PREPARED:
1986 			/* start playback stream if possible */
1987 			if (! snd_pcm_playback_empty(substream)) {
1988 				snd_pcm_do_start(substream, SNDRV_PCM_STATE_DRAINING);
1989 				snd_pcm_post_start(substream, SNDRV_PCM_STATE_DRAINING);
1990 			} else {
1991 				runtime->status->state = SNDRV_PCM_STATE_SETUP;
1992 			}
1993 			break;
1994 		case SNDRV_PCM_STATE_RUNNING:
1995 			runtime->status->state = SNDRV_PCM_STATE_DRAINING;
1996 			break;
1997 		case SNDRV_PCM_STATE_XRUN:
1998 			runtime->status->state = SNDRV_PCM_STATE_SETUP;
1999 			break;
2000 		default:
2001 			break;
2002 		}
2003 	} else {
2004 		/* stop running stream */
2005 		if (runtime->status->state == SNDRV_PCM_STATE_RUNNING) {
2006 			snd_pcm_state_t new_state;
2007 
2008 			new_state = snd_pcm_capture_avail(runtime) > 0 ?
2009 				SNDRV_PCM_STATE_DRAINING : SNDRV_PCM_STATE_SETUP;
2010 			snd_pcm_do_stop(substream, new_state);
2011 			snd_pcm_post_stop(substream, new_state);
2012 		}
2013 	}
2014 
2015 	if (runtime->status->state == SNDRV_PCM_STATE_DRAINING &&
2016 	    runtime->trigger_master == substream &&
2017 	    (runtime->hw.info & SNDRV_PCM_INFO_DRAIN_TRIGGER))
2018 		return substream->ops->trigger(substream,
2019 					       SNDRV_PCM_TRIGGER_DRAIN);
2020 
2021 	return 0;
2022 }
2023 
snd_pcm_post_drain_init(struct snd_pcm_substream * substream,snd_pcm_state_t state)2024 static void snd_pcm_post_drain_init(struct snd_pcm_substream *substream,
2025 				    snd_pcm_state_t state)
2026 {
2027 }
2028 
2029 static const struct action_ops snd_pcm_action_drain_init = {
2030 	.pre_action = snd_pcm_pre_drain_init,
2031 	.do_action = snd_pcm_do_drain_init,
2032 	.post_action = snd_pcm_post_drain_init
2033 };
2034 
2035 /*
2036  * Drain the stream(s).
2037  * When the substream is linked, sync until the draining of all playback streams
2038  * is finished.
2039  * After this call, all streams are supposed to be either SETUP or DRAINING
2040  * (capture only) state.
2041  */
snd_pcm_drain(struct snd_pcm_substream * substream,struct file * file)2042 static int snd_pcm_drain(struct snd_pcm_substream *substream,
2043 			 struct file *file)
2044 {
2045 	struct snd_card *card;
2046 	struct snd_pcm_runtime *runtime;
2047 	struct snd_pcm_substream *s;
2048 	struct snd_pcm_group *group;
2049 	wait_queue_entry_t wait;
2050 	int result = 0;
2051 	int nonblock = 0;
2052 
2053 	card = substream->pcm->card;
2054 	runtime = substream->runtime;
2055 
2056 	if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
2057 		return -EBADFD;
2058 
2059 	if (file) {
2060 		if (file->f_flags & O_NONBLOCK)
2061 			nonblock = 1;
2062 	} else if (substream->f_flags & O_NONBLOCK)
2063 		nonblock = 1;
2064 
2065 	snd_pcm_stream_lock_irq(substream);
2066 	/* resume pause */
2067 	if (runtime->status->state == SNDRV_PCM_STATE_PAUSED)
2068 		snd_pcm_pause(substream, false);
2069 
2070 	/* pre-start/stop - all running streams are changed to DRAINING state */
2071 	result = snd_pcm_action(&snd_pcm_action_drain_init, substream,
2072 				ACTION_ARG_IGNORE);
2073 	if (result < 0)
2074 		goto unlock;
2075 	/* in non-blocking, we don't wait in ioctl but let caller poll */
2076 	if (nonblock) {
2077 		result = -EAGAIN;
2078 		goto unlock;
2079 	}
2080 
2081 	for (;;) {
2082 		long tout;
2083 		struct snd_pcm_runtime *to_check;
2084 		if (signal_pending(current)) {
2085 			result = -ERESTARTSYS;
2086 			break;
2087 		}
2088 		/* find a substream to drain */
2089 		to_check = NULL;
2090 		group = snd_pcm_stream_group_ref(substream);
2091 		snd_pcm_group_for_each_entry(s, substream) {
2092 			if (s->stream != SNDRV_PCM_STREAM_PLAYBACK)
2093 				continue;
2094 			runtime = s->runtime;
2095 			if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
2096 				to_check = runtime;
2097 				break;
2098 			}
2099 		}
2100 		snd_pcm_group_unref(group, substream);
2101 		if (!to_check)
2102 			break; /* all drained */
2103 		init_waitqueue_entry(&wait, current);
2104 		set_current_state(TASK_INTERRUPTIBLE);
2105 		add_wait_queue(&to_check->sleep, &wait);
2106 		snd_pcm_stream_unlock_irq(substream);
2107 		if (runtime->no_period_wakeup)
2108 			tout = MAX_SCHEDULE_TIMEOUT;
2109 		else {
2110 			tout = 10;
2111 			if (runtime->rate) {
2112 				long t = runtime->period_size * 2 / runtime->rate;
2113 				tout = max(t, tout);
2114 			}
2115 			tout = msecs_to_jiffies(tout * 1000);
2116 		}
2117 		tout = schedule_timeout(tout);
2118 
2119 		snd_pcm_stream_lock_irq(substream);
2120 		group = snd_pcm_stream_group_ref(substream);
2121 		snd_pcm_group_for_each_entry(s, substream) {
2122 			if (s->runtime == to_check) {
2123 				remove_wait_queue(&to_check->sleep, &wait);
2124 				break;
2125 			}
2126 		}
2127 		snd_pcm_group_unref(group, substream);
2128 
2129 		if (card->shutdown) {
2130 			result = -ENODEV;
2131 			break;
2132 		}
2133 		if (tout == 0) {
2134 			if (substream->runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
2135 				result = -ESTRPIPE;
2136 			else {
2137 				dev_dbg(substream->pcm->card->dev,
2138 					"playback drain error (DMA or IRQ trouble?)\n");
2139 				snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
2140 				result = -EIO;
2141 			}
2142 			break;
2143 		}
2144 	}
2145 
2146  unlock:
2147 	snd_pcm_stream_unlock_irq(substream);
2148 
2149 	return result;
2150 }
2151 
2152 /*
2153  * drop ioctl
2154  *
2155  * Immediately put all linked substreams into SETUP state.
2156  */
snd_pcm_drop(struct snd_pcm_substream * substream)2157 static int snd_pcm_drop(struct snd_pcm_substream *substream)
2158 {
2159 	struct snd_pcm_runtime *runtime;
2160 	int result = 0;
2161 
2162 	if (PCM_RUNTIME_CHECK(substream))
2163 		return -ENXIO;
2164 	runtime = substream->runtime;
2165 
2166 	if (runtime->status->state == SNDRV_PCM_STATE_OPEN ||
2167 	    runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
2168 		return -EBADFD;
2169 
2170 	snd_pcm_stream_lock_irq(substream);
2171 	/* resume pause */
2172 	if (runtime->status->state == SNDRV_PCM_STATE_PAUSED)
2173 		snd_pcm_pause(substream, false);
2174 
2175 	snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
2176 	/* runtime->control->appl_ptr = runtime->status->hw_ptr; */
2177 	snd_pcm_stream_unlock_irq(substream);
2178 
2179 	return result;
2180 }
2181 
2182 
is_pcm_file(struct file * file)2183 static bool is_pcm_file(struct file *file)
2184 {
2185 	struct inode *inode = file_inode(file);
2186 	struct snd_pcm *pcm;
2187 	unsigned int minor;
2188 
2189 	if (!S_ISCHR(inode->i_mode) || imajor(inode) != snd_major)
2190 		return false;
2191 	minor = iminor(inode);
2192 	pcm = snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_PLAYBACK);
2193 	if (!pcm)
2194 		pcm = snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_CAPTURE);
2195 	if (!pcm)
2196 		return false;
2197 	snd_card_unref(pcm->card);
2198 	return true;
2199 }
2200 
2201 /*
2202  * PCM link handling
2203  */
snd_pcm_link(struct snd_pcm_substream * substream,int fd)2204 static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
2205 {
2206 	int res = 0;
2207 	struct snd_pcm_file *pcm_file;
2208 	struct snd_pcm_substream *substream1;
2209 	struct snd_pcm_group *group, *target_group;
2210 	bool nonatomic = substream->pcm->nonatomic;
2211 	struct fd f = fdget(fd);
2212 
2213 	if (!f.file)
2214 		return -EBADFD;
2215 	if (!is_pcm_file(f.file)) {
2216 		res = -EBADFD;
2217 		goto _badf;
2218 	}
2219 	pcm_file = f.file->private_data;
2220 	substream1 = pcm_file->substream;
2221 
2222 	if (substream == substream1) {
2223 		res = -EINVAL;
2224 		goto _badf;
2225 	}
2226 
2227 	group = kzalloc(sizeof(*group), GFP_KERNEL);
2228 	if (!group) {
2229 		res = -ENOMEM;
2230 		goto _nolock;
2231 	}
2232 	snd_pcm_group_init(group);
2233 
2234 	down_write(&snd_pcm_link_rwsem);
2235 	if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN ||
2236 	    substream->runtime->status->state != substream1->runtime->status->state ||
2237 	    substream->pcm->nonatomic != substream1->pcm->nonatomic) {
2238 		res = -EBADFD;
2239 		goto _end;
2240 	}
2241 	if (snd_pcm_stream_linked(substream1)) {
2242 		res = -EALREADY;
2243 		goto _end;
2244 	}
2245 
2246 	snd_pcm_stream_lock_irq(substream);
2247 	if (!snd_pcm_stream_linked(substream)) {
2248 		snd_pcm_group_assign(substream, group);
2249 		group = NULL; /* assigned, don't free this one below */
2250 	}
2251 	target_group = substream->group;
2252 	snd_pcm_stream_unlock_irq(substream);
2253 
2254 	snd_pcm_group_lock_irq(target_group, nonatomic);
2255 	snd_pcm_stream_lock_nested(substream1);
2256 	snd_pcm_group_assign(substream1, target_group);
2257 	refcount_inc(&target_group->refs);
2258 	snd_pcm_stream_unlock(substream1);
2259 	snd_pcm_group_unlock_irq(target_group, nonatomic);
2260  _end:
2261 	up_write(&snd_pcm_link_rwsem);
2262  _nolock:
2263 	kfree(group);
2264  _badf:
2265 	fdput(f);
2266 	return res;
2267 }
2268 
relink_to_local(struct snd_pcm_substream * substream)2269 static void relink_to_local(struct snd_pcm_substream *substream)
2270 {
2271 	snd_pcm_stream_lock_nested(substream);
2272 	snd_pcm_group_assign(substream, &substream->self_group);
2273 	snd_pcm_stream_unlock(substream);
2274 }
2275 
snd_pcm_unlink(struct snd_pcm_substream * substream)2276 static int snd_pcm_unlink(struct snd_pcm_substream *substream)
2277 {
2278 	struct snd_pcm_group *group;
2279 	bool nonatomic = substream->pcm->nonatomic;
2280 	bool do_free = false;
2281 	int res = 0;
2282 
2283 	down_write(&snd_pcm_link_rwsem);
2284 
2285 	if (!snd_pcm_stream_linked(substream)) {
2286 		res = -EALREADY;
2287 		goto _end;
2288 	}
2289 
2290 	group = substream->group;
2291 	snd_pcm_group_lock_irq(group, nonatomic);
2292 
2293 	relink_to_local(substream);
2294 	refcount_dec(&group->refs);
2295 
2296 	/* detach the last stream, too */
2297 	if (list_is_singular(&group->substreams)) {
2298 		relink_to_local(list_first_entry(&group->substreams,
2299 						 struct snd_pcm_substream,
2300 						 link_list));
2301 		do_free = refcount_dec_and_test(&group->refs);
2302 	}
2303 
2304 	snd_pcm_group_unlock_irq(group, nonatomic);
2305 	if (do_free)
2306 		kfree(group);
2307 
2308        _end:
2309 	up_write(&snd_pcm_link_rwsem);
2310 	return res;
2311 }
2312 
2313 /*
2314  * hw configurator
2315  */
snd_pcm_hw_rule_mul(struct snd_pcm_hw_params * params,struct snd_pcm_hw_rule * rule)2316 static int snd_pcm_hw_rule_mul(struct snd_pcm_hw_params *params,
2317 			       struct snd_pcm_hw_rule *rule)
2318 {
2319 	struct snd_interval t;
2320 	snd_interval_mul(hw_param_interval_c(params, rule->deps[0]),
2321 		     hw_param_interval_c(params, rule->deps[1]), &t);
2322 	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2323 }
2324 
snd_pcm_hw_rule_div(struct snd_pcm_hw_params * params,struct snd_pcm_hw_rule * rule)2325 static int snd_pcm_hw_rule_div(struct snd_pcm_hw_params *params,
2326 			       struct snd_pcm_hw_rule *rule)
2327 {
2328 	struct snd_interval t;
2329 	snd_interval_div(hw_param_interval_c(params, rule->deps[0]),
2330 		     hw_param_interval_c(params, rule->deps[1]), &t);
2331 	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2332 }
2333 
snd_pcm_hw_rule_muldivk(struct snd_pcm_hw_params * params,struct snd_pcm_hw_rule * rule)2334 static int snd_pcm_hw_rule_muldivk(struct snd_pcm_hw_params *params,
2335 				   struct snd_pcm_hw_rule *rule)
2336 {
2337 	struct snd_interval t;
2338 	snd_interval_muldivk(hw_param_interval_c(params, rule->deps[0]),
2339 			 hw_param_interval_c(params, rule->deps[1]),
2340 			 (unsigned long) rule->private, &t);
2341 	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2342 }
2343 
snd_pcm_hw_rule_mulkdiv(struct snd_pcm_hw_params * params,struct snd_pcm_hw_rule * rule)2344 static int snd_pcm_hw_rule_mulkdiv(struct snd_pcm_hw_params *params,
2345 				   struct snd_pcm_hw_rule *rule)
2346 {
2347 	struct snd_interval t;
2348 	snd_interval_mulkdiv(hw_param_interval_c(params, rule->deps[0]),
2349 			 (unsigned long) rule->private,
2350 			 hw_param_interval_c(params, rule->deps[1]), &t);
2351 	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2352 }
2353 
snd_pcm_hw_rule_format(struct snd_pcm_hw_params * params,struct snd_pcm_hw_rule * rule)2354 static int snd_pcm_hw_rule_format(struct snd_pcm_hw_params *params,
2355 				  struct snd_pcm_hw_rule *rule)
2356 {
2357 	snd_pcm_format_t k;
2358 	const struct snd_interval *i =
2359 				hw_param_interval_c(params, rule->deps[0]);
2360 	struct snd_mask m;
2361 	struct snd_mask *mask = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
2362 	snd_mask_any(&m);
2363 	pcm_for_each_format(k) {
2364 		int bits;
2365 		if (!snd_mask_test_format(mask, k))
2366 			continue;
2367 		bits = snd_pcm_format_physical_width(k);
2368 		if (bits <= 0)
2369 			continue; /* ignore invalid formats */
2370 		if ((unsigned)bits < i->min || (unsigned)bits > i->max)
2371 			snd_mask_reset(&m, (__force unsigned)k);
2372 	}
2373 	return snd_mask_refine(mask, &m);
2374 }
2375 
snd_pcm_hw_rule_sample_bits(struct snd_pcm_hw_params * params,struct snd_pcm_hw_rule * rule)2376 static int snd_pcm_hw_rule_sample_bits(struct snd_pcm_hw_params *params,
2377 				       struct snd_pcm_hw_rule *rule)
2378 {
2379 	struct snd_interval t;
2380 	snd_pcm_format_t k;
2381 
2382 	t.min = UINT_MAX;
2383 	t.max = 0;
2384 	t.openmin = 0;
2385 	t.openmax = 0;
2386 	pcm_for_each_format(k) {
2387 		int bits;
2388 		if (!snd_mask_test_format(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT), k))
2389 			continue;
2390 		bits = snd_pcm_format_physical_width(k);
2391 		if (bits <= 0)
2392 			continue; /* ignore invalid formats */
2393 		if (t.min > (unsigned)bits)
2394 			t.min = bits;
2395 		if (t.max < (unsigned)bits)
2396 			t.max = bits;
2397 	}
2398 	t.integer = 1;
2399 	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2400 }
2401 
2402 #if SNDRV_PCM_RATE_5512 != 1 << 0 || SNDRV_PCM_RATE_192000 != 1 << 12
2403 #error "Change this table"
2404 #endif
2405 
2406 static const unsigned int rates[] = {
2407 	5512, 8000, 11025, 16000, 22050, 32000, 44100,
2408 	48000, 64000, 88200, 96000, 176400, 192000, 352800, 384000
2409 };
2410 
2411 const struct snd_pcm_hw_constraint_list snd_pcm_known_rates = {
2412 	.count = ARRAY_SIZE(rates),
2413 	.list = rates,
2414 };
2415 
snd_pcm_hw_rule_rate(struct snd_pcm_hw_params * params,struct snd_pcm_hw_rule * rule)2416 static int snd_pcm_hw_rule_rate(struct snd_pcm_hw_params *params,
2417 				struct snd_pcm_hw_rule *rule)
2418 {
2419 	struct snd_pcm_hardware *hw = rule->private;
2420 	return snd_interval_list(hw_param_interval(params, rule->var),
2421 				 snd_pcm_known_rates.count,
2422 				 snd_pcm_known_rates.list, hw->rates);
2423 }
2424 
snd_pcm_hw_rule_buffer_bytes_max(struct snd_pcm_hw_params * params,struct snd_pcm_hw_rule * rule)2425 static int snd_pcm_hw_rule_buffer_bytes_max(struct snd_pcm_hw_params *params,
2426 					    struct snd_pcm_hw_rule *rule)
2427 {
2428 	struct snd_interval t;
2429 	struct snd_pcm_substream *substream = rule->private;
2430 	t.min = 0;
2431 	t.max = substream->buffer_bytes_max;
2432 	t.openmin = 0;
2433 	t.openmax = 0;
2434 	t.integer = 1;
2435 	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2436 }
2437 
snd_pcm_hw_constraints_init(struct snd_pcm_substream * substream)2438 static int snd_pcm_hw_constraints_init(struct snd_pcm_substream *substream)
2439 {
2440 	struct snd_pcm_runtime *runtime = substream->runtime;
2441 	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
2442 	int k, err;
2443 
2444 	for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) {
2445 		snd_mask_any(constrs_mask(constrs, k));
2446 	}
2447 
2448 	for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) {
2449 		snd_interval_any(constrs_interval(constrs, k));
2450 	}
2451 
2452 	snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_CHANNELS));
2453 	snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_BUFFER_SIZE));
2454 	snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_BUFFER_BYTES));
2455 	snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_SAMPLE_BITS));
2456 	snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_FRAME_BITS));
2457 
2458 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
2459 				   snd_pcm_hw_rule_format, NULL,
2460 				   SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
2461 	if (err < 0)
2462 		return err;
2463 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
2464 				  snd_pcm_hw_rule_sample_bits, NULL,
2465 				  SNDRV_PCM_HW_PARAM_FORMAT,
2466 				  SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
2467 	if (err < 0)
2468 		return err;
2469 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
2470 				  snd_pcm_hw_rule_div, NULL,
2471 				  SNDRV_PCM_HW_PARAM_FRAME_BITS, SNDRV_PCM_HW_PARAM_CHANNELS, -1);
2472 	if (err < 0)
2473 		return err;
2474 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
2475 				  snd_pcm_hw_rule_mul, NULL,
2476 				  SNDRV_PCM_HW_PARAM_SAMPLE_BITS, SNDRV_PCM_HW_PARAM_CHANNELS, -1);
2477 	if (err < 0)
2478 		return err;
2479 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
2480 				  snd_pcm_hw_rule_mulkdiv, (void*) 8,
2481 				  SNDRV_PCM_HW_PARAM_PERIOD_BYTES, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
2482 	if (err < 0)
2483 		return err;
2484 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
2485 				  snd_pcm_hw_rule_mulkdiv, (void*) 8,
2486 				  SNDRV_PCM_HW_PARAM_BUFFER_BYTES, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, -1);
2487 	if (err < 0)
2488 		return err;
2489 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
2490 				  snd_pcm_hw_rule_div, NULL,
2491 				  SNDRV_PCM_HW_PARAM_FRAME_BITS, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
2492 	if (err < 0)
2493 		return err;
2494 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
2495 				  snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2496 				  SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_PERIOD_TIME, -1);
2497 	if (err < 0)
2498 		return err;
2499 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
2500 				  snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2501 				  SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_BUFFER_TIME, -1);
2502 	if (err < 0)
2503 		return err;
2504 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIODS,
2505 				  snd_pcm_hw_rule_div, NULL,
2506 				  SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
2507 	if (err < 0)
2508 		return err;
2509 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
2510 				  snd_pcm_hw_rule_div, NULL,
2511 				  SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_PERIODS, -1);
2512 	if (err < 0)
2513 		return err;
2514 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
2515 				  snd_pcm_hw_rule_mulkdiv, (void*) 8,
2516 				  SNDRV_PCM_HW_PARAM_PERIOD_BYTES, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2517 	if (err < 0)
2518 		return err;
2519 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
2520 				  snd_pcm_hw_rule_muldivk, (void*) 1000000,
2521 				  SNDRV_PCM_HW_PARAM_PERIOD_TIME, SNDRV_PCM_HW_PARAM_RATE, -1);
2522 	if (err < 0)
2523 		return err;
2524 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
2525 				  snd_pcm_hw_rule_mul, NULL,
2526 				  SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_PERIODS, -1);
2527 	if (err < 0)
2528 		return err;
2529 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
2530 				  snd_pcm_hw_rule_mulkdiv, (void*) 8,
2531 				  SNDRV_PCM_HW_PARAM_BUFFER_BYTES, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2532 	if (err < 0)
2533 		return err;
2534 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
2535 				  snd_pcm_hw_rule_muldivk, (void*) 1000000,
2536 				  SNDRV_PCM_HW_PARAM_BUFFER_TIME, SNDRV_PCM_HW_PARAM_RATE, -1);
2537 	if (err < 0)
2538 		return err;
2539 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
2540 				  snd_pcm_hw_rule_muldivk, (void*) 8,
2541 				  SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2542 	if (err < 0)
2543 		return err;
2544 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
2545 				  snd_pcm_hw_rule_muldivk, (void*) 8,
2546 				  SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2547 	if (err < 0)
2548 		return err;
2549 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_TIME,
2550 				  snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2551 				  SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1);
2552 	if (err < 0)
2553 		return err;
2554 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
2555 				  snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2556 				  SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1);
2557 	if (err < 0)
2558 		return err;
2559 	return 0;
2560 }
2561 
snd_pcm_hw_constraints_complete(struct snd_pcm_substream * substream)2562 static int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream)
2563 {
2564 	struct snd_pcm_runtime *runtime = substream->runtime;
2565 	struct snd_pcm_hardware *hw = &runtime->hw;
2566 	int err;
2567 	unsigned int mask = 0;
2568 
2569         if (hw->info & SNDRV_PCM_INFO_INTERLEAVED)
2570 		mask |= PARAM_MASK_BIT(SNDRV_PCM_ACCESS_RW_INTERLEAVED);
2571         if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED)
2572 		mask |= PARAM_MASK_BIT(SNDRV_PCM_ACCESS_RW_NONINTERLEAVED);
2573 	if (hw_support_mmap(substream)) {
2574 		if (hw->info & SNDRV_PCM_INFO_INTERLEAVED)
2575 			mask |= PARAM_MASK_BIT(SNDRV_PCM_ACCESS_MMAP_INTERLEAVED);
2576 		if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED)
2577 			mask |= PARAM_MASK_BIT(SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED);
2578 		if (hw->info & SNDRV_PCM_INFO_COMPLEX)
2579 			mask |= PARAM_MASK_BIT(SNDRV_PCM_ACCESS_MMAP_COMPLEX);
2580 	}
2581 	err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_ACCESS, mask);
2582 	if (err < 0)
2583 		return err;
2584 
2585 	err = snd_pcm_hw_constraint_mask64(runtime, SNDRV_PCM_HW_PARAM_FORMAT, hw->formats);
2586 	if (err < 0)
2587 		return err;
2588 
2589 	err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_SUBFORMAT,
2590 					 PARAM_MASK_BIT(SNDRV_PCM_SUBFORMAT_STD));
2591 	if (err < 0)
2592 		return err;
2593 
2594 	err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_CHANNELS,
2595 					   hw->channels_min, hw->channels_max);
2596 	if (err < 0)
2597 		return err;
2598 
2599 	err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_RATE,
2600 					   hw->rate_min, hw->rate_max);
2601 	if (err < 0)
2602 		return err;
2603 
2604 	err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
2605 					   hw->period_bytes_min, hw->period_bytes_max);
2606 	if (err < 0)
2607 		return err;
2608 
2609 	err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIODS,
2610 					   hw->periods_min, hw->periods_max);
2611 	if (err < 0)
2612 		return err;
2613 
2614 	err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
2615 					   hw->period_bytes_min, hw->buffer_bytes_max);
2616 	if (err < 0)
2617 		return err;
2618 
2619 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
2620 				  snd_pcm_hw_rule_buffer_bytes_max, substream,
2621 				  SNDRV_PCM_HW_PARAM_BUFFER_BYTES, -1);
2622 	if (err < 0)
2623 		return err;
2624 
2625 	/* FIXME: remove */
2626 	if (runtime->dma_bytes) {
2627 		err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 0, runtime->dma_bytes);
2628 		if (err < 0)
2629 			return err;
2630 	}
2631 
2632 	if (!(hw->rates & (SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_CONTINUOUS))) {
2633 		err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
2634 					  snd_pcm_hw_rule_rate, hw,
2635 					  SNDRV_PCM_HW_PARAM_RATE, -1);
2636 		if (err < 0)
2637 			return err;
2638 	}
2639 
2640 	/* FIXME: this belong to lowlevel */
2641 	snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
2642 
2643 	return 0;
2644 }
2645 
pcm_release_private(struct snd_pcm_substream * substream)2646 static void pcm_release_private(struct snd_pcm_substream *substream)
2647 {
2648 	if (snd_pcm_stream_linked(substream))
2649 		snd_pcm_unlink(substream);
2650 }
2651 
snd_pcm_release_substream(struct snd_pcm_substream * substream)2652 void snd_pcm_release_substream(struct snd_pcm_substream *substream)
2653 {
2654 	substream->ref_count--;
2655 	if (substream->ref_count > 0)
2656 		return;
2657 
2658 	snd_pcm_drop(substream);
2659 	if (substream->hw_opened) {
2660 		if (substream->runtime->status->state != SNDRV_PCM_STATE_OPEN)
2661 			do_hw_free(substream);
2662 		substream->ops->close(substream);
2663 		substream->hw_opened = 0;
2664 	}
2665 	if (cpu_latency_qos_request_active(&substream->latency_pm_qos_req))
2666 		cpu_latency_qos_remove_request(&substream->latency_pm_qos_req);
2667 	if (substream->pcm_release) {
2668 		substream->pcm_release(substream);
2669 		substream->pcm_release = NULL;
2670 	}
2671 	snd_pcm_detach_substream(substream);
2672 }
2673 EXPORT_SYMBOL(snd_pcm_release_substream);
2674 
snd_pcm_open_substream(struct snd_pcm * pcm,int stream,struct file * file,struct snd_pcm_substream ** rsubstream)2675 int snd_pcm_open_substream(struct snd_pcm *pcm, int stream,
2676 			   struct file *file,
2677 			   struct snd_pcm_substream **rsubstream)
2678 {
2679 	struct snd_pcm_substream *substream;
2680 	int err;
2681 
2682 	err = snd_pcm_attach_substream(pcm, stream, file, &substream);
2683 	if (err < 0)
2684 		return err;
2685 	if (substream->ref_count > 1) {
2686 		*rsubstream = substream;
2687 		return 0;
2688 	}
2689 
2690 	err = snd_pcm_hw_constraints_init(substream);
2691 	if (err < 0) {
2692 		pcm_dbg(pcm, "snd_pcm_hw_constraints_init failed\n");
2693 		goto error;
2694 	}
2695 
2696 	if ((err = substream->ops->open(substream)) < 0)
2697 		goto error;
2698 
2699 	substream->hw_opened = 1;
2700 
2701 	err = snd_pcm_hw_constraints_complete(substream);
2702 	if (err < 0) {
2703 		pcm_dbg(pcm, "snd_pcm_hw_constraints_complete failed\n");
2704 		goto error;
2705 	}
2706 
2707 	*rsubstream = substream;
2708 	return 0;
2709 
2710  error:
2711 	snd_pcm_release_substream(substream);
2712 	return err;
2713 }
2714 EXPORT_SYMBOL(snd_pcm_open_substream);
2715 
snd_pcm_open_file(struct file * file,struct snd_pcm * pcm,int stream)2716 static int snd_pcm_open_file(struct file *file,
2717 			     struct snd_pcm *pcm,
2718 			     int stream)
2719 {
2720 	struct snd_pcm_file *pcm_file;
2721 	struct snd_pcm_substream *substream;
2722 	int err;
2723 
2724 	err = snd_pcm_open_substream(pcm, stream, file, &substream);
2725 	if (err < 0)
2726 		return err;
2727 
2728 	pcm_file = kzalloc(sizeof(*pcm_file), GFP_KERNEL);
2729 	if (pcm_file == NULL) {
2730 		snd_pcm_release_substream(substream);
2731 		return -ENOMEM;
2732 	}
2733 	pcm_file->substream = substream;
2734 	if (substream->ref_count == 1)
2735 		substream->pcm_release = pcm_release_private;
2736 	file->private_data = pcm_file;
2737 
2738 	return 0;
2739 }
2740 
snd_pcm_playback_open(struct inode * inode,struct file * file)2741 static int snd_pcm_playback_open(struct inode *inode, struct file *file)
2742 {
2743 	struct snd_pcm *pcm;
2744 	int err = nonseekable_open(inode, file);
2745 	if (err < 0)
2746 		return err;
2747 	pcm = snd_lookup_minor_data(iminor(inode),
2748 				    SNDRV_DEVICE_TYPE_PCM_PLAYBACK);
2749 	err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_PLAYBACK);
2750 	if (pcm)
2751 		snd_card_unref(pcm->card);
2752 	return err;
2753 }
2754 
snd_pcm_capture_open(struct inode * inode,struct file * file)2755 static int snd_pcm_capture_open(struct inode *inode, struct file *file)
2756 {
2757 	struct snd_pcm *pcm;
2758 	int err = nonseekable_open(inode, file);
2759 	if (err < 0)
2760 		return err;
2761 	pcm = snd_lookup_minor_data(iminor(inode),
2762 				    SNDRV_DEVICE_TYPE_PCM_CAPTURE);
2763 	err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_CAPTURE);
2764 	if (pcm)
2765 		snd_card_unref(pcm->card);
2766 	return err;
2767 }
2768 
snd_pcm_open(struct file * file,struct snd_pcm * pcm,int stream)2769 static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream)
2770 {
2771 	int err;
2772 	wait_queue_entry_t wait;
2773 
2774 	if (pcm == NULL) {
2775 		err = -ENODEV;
2776 		goto __error1;
2777 	}
2778 	err = snd_card_file_add(pcm->card, file);
2779 	if (err < 0)
2780 		goto __error1;
2781 	if (!try_module_get(pcm->card->module)) {
2782 		err = -EFAULT;
2783 		goto __error2;
2784 	}
2785 	init_waitqueue_entry(&wait, current);
2786 	add_wait_queue(&pcm->open_wait, &wait);
2787 	mutex_lock(&pcm->open_mutex);
2788 	while (1) {
2789 		err = snd_pcm_open_file(file, pcm, stream);
2790 		if (err >= 0)
2791 			break;
2792 		if (err == -EAGAIN) {
2793 			if (file->f_flags & O_NONBLOCK) {
2794 				err = -EBUSY;
2795 				break;
2796 			}
2797 		} else
2798 			break;
2799 		set_current_state(TASK_INTERRUPTIBLE);
2800 		mutex_unlock(&pcm->open_mutex);
2801 		schedule();
2802 		mutex_lock(&pcm->open_mutex);
2803 		if (pcm->card->shutdown) {
2804 			err = -ENODEV;
2805 			break;
2806 		}
2807 		if (signal_pending(current)) {
2808 			err = -ERESTARTSYS;
2809 			break;
2810 		}
2811 	}
2812 	remove_wait_queue(&pcm->open_wait, &wait);
2813 	mutex_unlock(&pcm->open_mutex);
2814 	if (err < 0)
2815 		goto __error;
2816 	return err;
2817 
2818       __error:
2819 	module_put(pcm->card->module);
2820       __error2:
2821       	snd_card_file_remove(pcm->card, file);
2822       __error1:
2823       	return err;
2824 }
2825 
snd_pcm_release(struct inode * inode,struct file * file)2826 static int snd_pcm_release(struct inode *inode, struct file *file)
2827 {
2828 	struct snd_pcm *pcm;
2829 	struct snd_pcm_substream *substream;
2830 	struct snd_pcm_file *pcm_file;
2831 
2832 	pcm_file = file->private_data;
2833 	substream = pcm_file->substream;
2834 	if (snd_BUG_ON(!substream))
2835 		return -ENXIO;
2836 	pcm = substream->pcm;
2837 	mutex_lock(&pcm->open_mutex);
2838 	snd_pcm_release_substream(substream);
2839 	kfree(pcm_file);
2840 	mutex_unlock(&pcm->open_mutex);
2841 	wake_up(&pcm->open_wait);
2842 	module_put(pcm->card->module);
2843 	snd_card_file_remove(pcm->card, file);
2844 	return 0;
2845 }
2846 
2847 /* check and update PCM state; return 0 or a negative error
2848  * call this inside PCM lock
2849  */
do_pcm_hwsync(struct snd_pcm_substream * substream)2850 static int do_pcm_hwsync(struct snd_pcm_substream *substream)
2851 {
2852 	switch (substream->runtime->status->state) {
2853 	case SNDRV_PCM_STATE_DRAINING:
2854 		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
2855 			return -EBADFD;
2856 		fallthrough;
2857 	case SNDRV_PCM_STATE_RUNNING:
2858 		return snd_pcm_update_hw_ptr(substream);
2859 	case SNDRV_PCM_STATE_PREPARED:
2860 	case SNDRV_PCM_STATE_PAUSED:
2861 		return 0;
2862 	case SNDRV_PCM_STATE_SUSPENDED:
2863 		return -ESTRPIPE;
2864 	case SNDRV_PCM_STATE_XRUN:
2865 		return -EPIPE;
2866 	default:
2867 		return -EBADFD;
2868 	}
2869 }
2870 
2871 /* increase the appl_ptr; returns the processed frames or a negative error */
forward_appl_ptr(struct snd_pcm_substream * substream,snd_pcm_uframes_t frames,snd_pcm_sframes_t avail)2872 static snd_pcm_sframes_t forward_appl_ptr(struct snd_pcm_substream *substream,
2873 					  snd_pcm_uframes_t frames,
2874 					   snd_pcm_sframes_t avail)
2875 {
2876 	struct snd_pcm_runtime *runtime = substream->runtime;
2877 	snd_pcm_sframes_t appl_ptr;
2878 	int ret;
2879 
2880 	if (avail <= 0)
2881 		return 0;
2882 	if (frames > (snd_pcm_uframes_t)avail)
2883 		frames = avail;
2884 	appl_ptr = runtime->control->appl_ptr + frames;
2885 	if (appl_ptr >= (snd_pcm_sframes_t)runtime->boundary)
2886 		appl_ptr -= runtime->boundary;
2887 	ret = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2888 	return ret < 0 ? ret : frames;
2889 }
2890 
2891 /* decrease the appl_ptr; returns the processed frames or zero for error */
rewind_appl_ptr(struct snd_pcm_substream * substream,snd_pcm_uframes_t frames,snd_pcm_sframes_t avail)2892 static snd_pcm_sframes_t rewind_appl_ptr(struct snd_pcm_substream *substream,
2893 					 snd_pcm_uframes_t frames,
2894 					 snd_pcm_sframes_t avail)
2895 {
2896 	struct snd_pcm_runtime *runtime = substream->runtime;
2897 	snd_pcm_sframes_t appl_ptr;
2898 	int ret;
2899 
2900 	if (avail <= 0)
2901 		return 0;
2902 	if (frames > (snd_pcm_uframes_t)avail)
2903 		frames = avail;
2904 	appl_ptr = runtime->control->appl_ptr - frames;
2905 	if (appl_ptr < 0)
2906 		appl_ptr += runtime->boundary;
2907 	ret = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2908 	/* NOTE: we return zero for errors because PulseAudio gets depressed
2909 	 * upon receiving an error from rewind ioctl and stops processing
2910 	 * any longer.  Returning zero means that no rewind is done, so
2911 	 * it's not absolutely wrong to answer like that.
2912 	 */
2913 	return ret < 0 ? 0 : frames;
2914 }
2915 
snd_pcm_rewind(struct snd_pcm_substream * substream,snd_pcm_uframes_t frames)2916 static snd_pcm_sframes_t snd_pcm_rewind(struct snd_pcm_substream *substream,
2917 					snd_pcm_uframes_t frames)
2918 {
2919 	snd_pcm_sframes_t ret;
2920 
2921 	if (frames == 0)
2922 		return 0;
2923 
2924 	snd_pcm_stream_lock_irq(substream);
2925 	ret = do_pcm_hwsync(substream);
2926 	if (!ret)
2927 		ret = rewind_appl_ptr(substream, frames,
2928 				      snd_pcm_hw_avail(substream));
2929 	snd_pcm_stream_unlock_irq(substream);
2930 	return ret;
2931 }
2932 
snd_pcm_forward(struct snd_pcm_substream * substream,snd_pcm_uframes_t frames)2933 static snd_pcm_sframes_t snd_pcm_forward(struct snd_pcm_substream *substream,
2934 					 snd_pcm_uframes_t frames)
2935 {
2936 	snd_pcm_sframes_t ret;
2937 
2938 	if (frames == 0)
2939 		return 0;
2940 
2941 	snd_pcm_stream_lock_irq(substream);
2942 	ret = do_pcm_hwsync(substream);
2943 	if (!ret)
2944 		ret = forward_appl_ptr(substream, frames,
2945 				       snd_pcm_avail(substream));
2946 	snd_pcm_stream_unlock_irq(substream);
2947 	return ret;
2948 }
2949 
snd_pcm_hwsync(struct snd_pcm_substream * substream)2950 static int snd_pcm_hwsync(struct snd_pcm_substream *substream)
2951 {
2952 	int err;
2953 
2954 	snd_pcm_stream_lock_irq(substream);
2955 	err = do_pcm_hwsync(substream);
2956 	snd_pcm_stream_unlock_irq(substream);
2957 	return err;
2958 }
2959 
snd_pcm_delay(struct snd_pcm_substream * substream,snd_pcm_sframes_t * delay)2960 static int snd_pcm_delay(struct snd_pcm_substream *substream,
2961 			 snd_pcm_sframes_t *delay)
2962 {
2963 	int err;
2964 	snd_pcm_sframes_t n = 0;
2965 
2966 	snd_pcm_stream_lock_irq(substream);
2967 	err = do_pcm_hwsync(substream);
2968 	if (!err)
2969 		n = snd_pcm_calc_delay(substream);
2970 	snd_pcm_stream_unlock_irq(substream);
2971 	if (!err)
2972 		*delay = n;
2973 	return err;
2974 }
2975 
snd_pcm_sync_ptr(struct snd_pcm_substream * substream,struct snd_pcm_sync_ptr __user * _sync_ptr)2976 static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream,
2977 			    struct snd_pcm_sync_ptr __user *_sync_ptr)
2978 {
2979 	struct snd_pcm_runtime *runtime = substream->runtime;
2980 	struct snd_pcm_sync_ptr sync_ptr;
2981 	volatile struct snd_pcm_mmap_status *status;
2982 	volatile struct snd_pcm_mmap_control *control;
2983 	int err;
2984 
2985 	memset(&sync_ptr, 0, sizeof(sync_ptr));
2986 	if (get_user(sync_ptr.flags, (unsigned __user *)&(_sync_ptr->flags)))
2987 		return -EFAULT;
2988 	if (copy_from_user(&sync_ptr.c.control, &(_sync_ptr->c.control), sizeof(struct snd_pcm_mmap_control)))
2989 		return -EFAULT;
2990 	status = runtime->status;
2991 	control = runtime->control;
2992 	if (sync_ptr.flags & SNDRV_PCM_SYNC_PTR_HWSYNC) {
2993 		err = snd_pcm_hwsync(substream);
2994 		if (err < 0)
2995 			return err;
2996 	}
2997 	snd_pcm_stream_lock_irq(substream);
2998 	if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_APPL)) {
2999 		err = pcm_lib_apply_appl_ptr(substream,
3000 					     sync_ptr.c.control.appl_ptr);
3001 		if (err < 0) {
3002 			snd_pcm_stream_unlock_irq(substream);
3003 			return err;
3004 		}
3005 	} else {
3006 		sync_ptr.c.control.appl_ptr = control->appl_ptr;
3007 	}
3008 	if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
3009 		control->avail_min = sync_ptr.c.control.avail_min;
3010 	else
3011 		sync_ptr.c.control.avail_min = control->avail_min;
3012 	sync_ptr.s.status.state = status->state;
3013 	sync_ptr.s.status.hw_ptr = status->hw_ptr;
3014 	sync_ptr.s.status.tstamp = status->tstamp;
3015 	sync_ptr.s.status.suspended_state = status->suspended_state;
3016 	sync_ptr.s.status.audio_tstamp = status->audio_tstamp;
3017 	snd_pcm_stream_unlock_irq(substream);
3018 	if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr)))
3019 		return -EFAULT;
3020 	return 0;
3021 }
3022 
3023 struct snd_pcm_mmap_status32 {
3024 	snd_pcm_state_t state;
3025 	s32 pad1;
3026 	u32 hw_ptr;
3027 	s32 tstamp_sec;
3028 	s32 tstamp_nsec;
3029 	snd_pcm_state_t suspended_state;
3030 	s32 audio_tstamp_sec;
3031 	s32 audio_tstamp_nsec;
3032 } __attribute__((packed));
3033 
3034 struct snd_pcm_mmap_control32 {
3035 	u32 appl_ptr;
3036 	u32 avail_min;
3037 };
3038 
3039 struct snd_pcm_sync_ptr32 {
3040 	u32 flags;
3041 	union {
3042 		struct snd_pcm_mmap_status32 status;
3043 		unsigned char reserved[64];
3044 	} s;
3045 	union {
3046 		struct snd_pcm_mmap_control32 control;
3047 		unsigned char reserved[64];
3048 	} c;
3049 } __attribute__((packed));
3050 
3051 /* recalcuate the boundary within 32bit */
recalculate_boundary(struct snd_pcm_runtime * runtime)3052 static snd_pcm_uframes_t recalculate_boundary(struct snd_pcm_runtime *runtime)
3053 {
3054 	snd_pcm_uframes_t boundary;
3055 
3056 	if (! runtime->buffer_size)
3057 		return 0;
3058 	boundary = runtime->buffer_size;
3059 	while (boundary * 2 <= 0x7fffffffUL - runtime->buffer_size)
3060 		boundary *= 2;
3061 	return boundary;
3062 }
3063 
snd_pcm_ioctl_sync_ptr_compat(struct snd_pcm_substream * substream,struct snd_pcm_sync_ptr32 __user * src)3064 static int snd_pcm_ioctl_sync_ptr_compat(struct snd_pcm_substream *substream,
3065 					 struct snd_pcm_sync_ptr32 __user *src)
3066 {
3067 	struct snd_pcm_runtime *runtime = substream->runtime;
3068 	volatile struct snd_pcm_mmap_status *status;
3069 	volatile struct snd_pcm_mmap_control *control;
3070 	u32 sflags;
3071 	struct snd_pcm_mmap_control scontrol;
3072 	struct snd_pcm_mmap_status sstatus;
3073 	snd_pcm_uframes_t boundary;
3074 	int err;
3075 
3076 	if (snd_BUG_ON(!runtime))
3077 		return -EINVAL;
3078 
3079 	if (get_user(sflags, &src->flags) ||
3080 	    get_user(scontrol.appl_ptr, &src->c.control.appl_ptr) ||
3081 	    get_user(scontrol.avail_min, &src->c.control.avail_min))
3082 		return -EFAULT;
3083 	if (sflags & SNDRV_PCM_SYNC_PTR_HWSYNC) {
3084 		err = snd_pcm_hwsync(substream);
3085 		if (err < 0)
3086 			return err;
3087 	}
3088 	status = runtime->status;
3089 	control = runtime->control;
3090 	boundary = recalculate_boundary(runtime);
3091 	if (! boundary)
3092 		boundary = 0x7fffffff;
3093 	snd_pcm_stream_lock_irq(substream);
3094 	/* FIXME: we should consider the boundary for the sync from app */
3095 	if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL)) {
3096 		err = pcm_lib_apply_appl_ptr(substream,
3097 				scontrol.appl_ptr);
3098 		if (err < 0) {
3099 			snd_pcm_stream_unlock_irq(substream);
3100 			return err;
3101 		}
3102 	} else
3103 		scontrol.appl_ptr = control->appl_ptr % boundary;
3104 	if (!(sflags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
3105 		control->avail_min = scontrol.avail_min;
3106 	else
3107 		scontrol.avail_min = control->avail_min;
3108 	sstatus.state = status->state;
3109 	sstatus.hw_ptr = status->hw_ptr % boundary;
3110 	sstatus.tstamp = status->tstamp;
3111 	sstatus.suspended_state = status->suspended_state;
3112 	sstatus.audio_tstamp = status->audio_tstamp;
3113 	snd_pcm_stream_unlock_irq(substream);
3114 	if (put_user(sstatus.state, &src->s.status.state) ||
3115 	    put_user(sstatus.hw_ptr, &src->s.status.hw_ptr) ||
3116 	    put_user(sstatus.tstamp.tv_sec, &src->s.status.tstamp_sec) ||
3117 	    put_user(sstatus.tstamp.tv_nsec, &src->s.status.tstamp_nsec) ||
3118 	    put_user(sstatus.suspended_state, &src->s.status.suspended_state) ||
3119 	    put_user(sstatus.audio_tstamp.tv_sec, &src->s.status.audio_tstamp_sec) ||
3120 	    put_user(sstatus.audio_tstamp.tv_nsec, &src->s.status.audio_tstamp_nsec) ||
3121 	    put_user(scontrol.appl_ptr, &src->c.control.appl_ptr) ||
3122 	    put_user(scontrol.avail_min, &src->c.control.avail_min))
3123 		return -EFAULT;
3124 
3125 	return 0;
3126 }
3127 #define __SNDRV_PCM_IOCTL_SYNC_PTR32 _IOWR('A', 0x23, struct snd_pcm_sync_ptr32)
3128 
snd_pcm_tstamp(struct snd_pcm_substream * substream,int __user * _arg)3129 static int snd_pcm_tstamp(struct snd_pcm_substream *substream, int __user *_arg)
3130 {
3131 	struct snd_pcm_runtime *runtime = substream->runtime;
3132 	int arg;
3133 
3134 	if (get_user(arg, _arg))
3135 		return -EFAULT;
3136 	if (arg < 0 || arg > SNDRV_PCM_TSTAMP_TYPE_LAST)
3137 		return -EINVAL;
3138 	runtime->tstamp_type = arg;
3139 	return 0;
3140 }
3141 
snd_pcm_xferi_frames_ioctl(struct snd_pcm_substream * substream,struct snd_xferi __user * _xferi)3142 static int snd_pcm_xferi_frames_ioctl(struct snd_pcm_substream *substream,
3143 				      struct snd_xferi __user *_xferi)
3144 {
3145 	struct snd_xferi xferi;
3146 	struct snd_pcm_runtime *runtime = substream->runtime;
3147 	snd_pcm_sframes_t result;
3148 
3149 	if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3150 		return -EBADFD;
3151 	if (put_user(0, &_xferi->result))
3152 		return -EFAULT;
3153 	if (copy_from_user(&xferi, _xferi, sizeof(xferi)))
3154 		return -EFAULT;
3155 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
3156 		result = snd_pcm_lib_write(substream, xferi.buf, xferi.frames);
3157 	else
3158 		result = snd_pcm_lib_read(substream, xferi.buf, xferi.frames);
3159 	if (put_user(result, &_xferi->result))
3160 		return -EFAULT;
3161 	return result < 0 ? result : 0;
3162 }
3163 
snd_pcm_xfern_frames_ioctl(struct snd_pcm_substream * substream,struct snd_xfern __user * _xfern)3164 static int snd_pcm_xfern_frames_ioctl(struct snd_pcm_substream *substream,
3165 				      struct snd_xfern __user *_xfern)
3166 {
3167 	struct snd_xfern xfern;
3168 	struct snd_pcm_runtime *runtime = substream->runtime;
3169 	void *bufs;
3170 	snd_pcm_sframes_t result;
3171 
3172 	if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3173 		return -EBADFD;
3174 	if (runtime->channels > 128)
3175 		return -EINVAL;
3176 	if (put_user(0, &_xfern->result))
3177 		return -EFAULT;
3178 	if (copy_from_user(&xfern, _xfern, sizeof(xfern)))
3179 		return -EFAULT;
3180 
3181 	bufs = memdup_user(xfern.bufs, sizeof(void *) * runtime->channels);
3182 	if (IS_ERR(bufs))
3183 		return PTR_ERR(bufs);
3184 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
3185 		result = snd_pcm_lib_writev(substream, bufs, xfern.frames);
3186 	else
3187 		result = snd_pcm_lib_readv(substream, bufs, xfern.frames);
3188 	kfree(bufs);
3189 	if (put_user(result, &_xfern->result))
3190 		return -EFAULT;
3191 	return result < 0 ? result : 0;
3192 }
3193 
snd_pcm_rewind_ioctl(struct snd_pcm_substream * substream,snd_pcm_uframes_t __user * _frames)3194 static int snd_pcm_rewind_ioctl(struct snd_pcm_substream *substream,
3195 				snd_pcm_uframes_t __user *_frames)
3196 {
3197 	snd_pcm_uframes_t frames;
3198 	snd_pcm_sframes_t result;
3199 
3200 	if (get_user(frames, _frames))
3201 		return -EFAULT;
3202 	if (put_user(0, _frames))
3203 		return -EFAULT;
3204 	result = snd_pcm_rewind(substream, frames);
3205 	if (put_user(result, _frames))
3206 		return -EFAULT;
3207 	return result < 0 ? result : 0;
3208 }
3209 
snd_pcm_forward_ioctl(struct snd_pcm_substream * substream,snd_pcm_uframes_t __user * _frames)3210 static int snd_pcm_forward_ioctl(struct snd_pcm_substream *substream,
3211 				 snd_pcm_uframes_t __user *_frames)
3212 {
3213 	snd_pcm_uframes_t frames;
3214 	snd_pcm_sframes_t result;
3215 
3216 	if (get_user(frames, _frames))
3217 		return -EFAULT;
3218 	if (put_user(0, _frames))
3219 		return -EFAULT;
3220 	result = snd_pcm_forward(substream, frames);
3221 	if (put_user(result, _frames))
3222 		return -EFAULT;
3223 	return result < 0 ? result : 0;
3224 }
3225 
snd_pcm_common_ioctl(struct file * file,struct snd_pcm_substream * substream,unsigned int cmd,void __user * arg)3226 static int snd_pcm_common_ioctl(struct file *file,
3227 				 struct snd_pcm_substream *substream,
3228 				 unsigned int cmd, void __user *arg)
3229 {
3230 	struct snd_pcm_file *pcm_file = file->private_data;
3231 	int res;
3232 
3233 	if (PCM_RUNTIME_CHECK(substream))
3234 		return -ENXIO;
3235 
3236 	res = snd_power_wait(substream->pcm->card, SNDRV_CTL_POWER_D0);
3237 	if (res < 0)
3238 		return res;
3239 
3240 	switch (cmd) {
3241 	case SNDRV_PCM_IOCTL_PVERSION:
3242 		return put_user(SNDRV_PCM_VERSION, (int __user *)arg) ? -EFAULT : 0;
3243 	case SNDRV_PCM_IOCTL_INFO:
3244 		return snd_pcm_info_user(substream, arg);
3245 	case SNDRV_PCM_IOCTL_TSTAMP:	/* just for compatibility */
3246 		return 0;
3247 	case SNDRV_PCM_IOCTL_TTSTAMP:
3248 		return snd_pcm_tstamp(substream, arg);
3249 	case SNDRV_PCM_IOCTL_USER_PVERSION:
3250 		if (get_user(pcm_file->user_pversion,
3251 			     (unsigned int __user *)arg))
3252 			return -EFAULT;
3253 		return 0;
3254 	case SNDRV_PCM_IOCTL_HW_REFINE:
3255 		return snd_pcm_hw_refine_user(substream, arg);
3256 	case SNDRV_PCM_IOCTL_HW_PARAMS:
3257 		return snd_pcm_hw_params_user(substream, arg);
3258 	case SNDRV_PCM_IOCTL_HW_FREE:
3259 		return snd_pcm_hw_free(substream);
3260 	case SNDRV_PCM_IOCTL_SW_PARAMS:
3261 		return snd_pcm_sw_params_user(substream, arg);
3262 	case SNDRV_PCM_IOCTL_STATUS32:
3263 		return snd_pcm_status_user32(substream, arg, false);
3264 	case SNDRV_PCM_IOCTL_STATUS_EXT32:
3265 		return snd_pcm_status_user32(substream, arg, true);
3266 	case SNDRV_PCM_IOCTL_STATUS64:
3267 		return snd_pcm_status_user64(substream, arg, false);
3268 	case SNDRV_PCM_IOCTL_STATUS_EXT64:
3269 		return snd_pcm_status_user64(substream, arg, true);
3270 	case SNDRV_PCM_IOCTL_CHANNEL_INFO:
3271 		return snd_pcm_channel_info_user(substream, arg);
3272 	case SNDRV_PCM_IOCTL_PREPARE:
3273 		return snd_pcm_prepare(substream, file);
3274 	case SNDRV_PCM_IOCTL_RESET:
3275 		return snd_pcm_reset(substream);
3276 	case SNDRV_PCM_IOCTL_START:
3277 		return snd_pcm_start_lock_irq(substream);
3278 	case SNDRV_PCM_IOCTL_LINK:
3279 		return snd_pcm_link(substream, (int)(unsigned long) arg);
3280 	case SNDRV_PCM_IOCTL_UNLINK:
3281 		return snd_pcm_unlink(substream);
3282 	case SNDRV_PCM_IOCTL_RESUME:
3283 		return snd_pcm_resume(substream);
3284 	case SNDRV_PCM_IOCTL_XRUN:
3285 		return snd_pcm_xrun(substream);
3286 	case SNDRV_PCM_IOCTL_HWSYNC:
3287 		return snd_pcm_hwsync(substream);
3288 	case SNDRV_PCM_IOCTL_DELAY:
3289 	{
3290 		snd_pcm_sframes_t delay;
3291 		snd_pcm_sframes_t __user *res = arg;
3292 		int err;
3293 
3294 		err = snd_pcm_delay(substream, &delay);
3295 		if (err)
3296 			return err;
3297 		if (put_user(delay, res))
3298 			return -EFAULT;
3299 		return 0;
3300 	}
3301 	case __SNDRV_PCM_IOCTL_SYNC_PTR32:
3302 		return snd_pcm_ioctl_sync_ptr_compat(substream, arg);
3303 	case __SNDRV_PCM_IOCTL_SYNC_PTR64:
3304 		return snd_pcm_sync_ptr(substream, arg);
3305 #ifdef CONFIG_SND_SUPPORT_OLD_API
3306 	case SNDRV_PCM_IOCTL_HW_REFINE_OLD:
3307 		return snd_pcm_hw_refine_old_user(substream, arg);
3308 	case SNDRV_PCM_IOCTL_HW_PARAMS_OLD:
3309 		return snd_pcm_hw_params_old_user(substream, arg);
3310 #endif
3311 	case SNDRV_PCM_IOCTL_DRAIN:
3312 		return snd_pcm_drain(substream, file);
3313 	case SNDRV_PCM_IOCTL_DROP:
3314 		return snd_pcm_drop(substream);
3315 	case SNDRV_PCM_IOCTL_PAUSE:
3316 		return snd_pcm_pause_lock_irq(substream, (unsigned long)arg);
3317 	case SNDRV_PCM_IOCTL_WRITEI_FRAMES:
3318 	case SNDRV_PCM_IOCTL_READI_FRAMES:
3319 		return snd_pcm_xferi_frames_ioctl(substream, arg);
3320 	case SNDRV_PCM_IOCTL_WRITEN_FRAMES:
3321 	case SNDRV_PCM_IOCTL_READN_FRAMES:
3322 		return snd_pcm_xfern_frames_ioctl(substream, arg);
3323 	case SNDRV_PCM_IOCTL_REWIND:
3324 		return snd_pcm_rewind_ioctl(substream, arg);
3325 	case SNDRV_PCM_IOCTL_FORWARD:
3326 		return snd_pcm_forward_ioctl(substream, arg);
3327 	}
3328 	pcm_dbg(substream->pcm, "unknown ioctl = 0x%x\n", cmd);
3329 	return -ENOTTY;
3330 }
3331 
snd_pcm_ioctl(struct file * file,unsigned int cmd,unsigned long arg)3332 static long snd_pcm_ioctl(struct file *file, unsigned int cmd,
3333 			  unsigned long arg)
3334 {
3335 	struct snd_pcm_file *pcm_file;
3336 
3337 	pcm_file = file->private_data;
3338 
3339 	if (((cmd >> 8) & 0xff) != 'A')
3340 		return -ENOTTY;
3341 
3342 	return snd_pcm_common_ioctl(file, pcm_file->substream, cmd,
3343 				     (void __user *)arg);
3344 }
3345 
3346 /**
3347  * snd_pcm_kernel_ioctl - Execute PCM ioctl in the kernel-space
3348  * @substream: PCM substream
3349  * @cmd: IOCTL cmd
3350  * @arg: IOCTL argument
3351  *
3352  * The function is provided primarily for OSS layer and USB gadget drivers,
3353  * and it allows only the limited set of ioctls (hw_params, sw_params,
3354  * prepare, start, drain, drop, forward).
3355  */
snd_pcm_kernel_ioctl(struct snd_pcm_substream * substream,unsigned int cmd,void * arg)3356 int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
3357 			 unsigned int cmd, void *arg)
3358 {
3359 	snd_pcm_uframes_t *frames = arg;
3360 	snd_pcm_sframes_t result;
3361 
3362 	switch (cmd) {
3363 	case SNDRV_PCM_IOCTL_FORWARD:
3364 	{
3365 		/* provided only for OSS; capture-only and no value returned */
3366 		if (substream->stream != SNDRV_PCM_STREAM_CAPTURE)
3367 			return -EINVAL;
3368 		result = snd_pcm_forward(substream, *frames);
3369 		return result < 0 ? result : 0;
3370 	}
3371 	case SNDRV_PCM_IOCTL_HW_PARAMS:
3372 		return snd_pcm_hw_params(substream, arg);
3373 	case SNDRV_PCM_IOCTL_SW_PARAMS:
3374 		return snd_pcm_sw_params(substream, arg);
3375 	case SNDRV_PCM_IOCTL_PREPARE:
3376 		return snd_pcm_prepare(substream, NULL);
3377 	case SNDRV_PCM_IOCTL_START:
3378 		return snd_pcm_start_lock_irq(substream);
3379 	case SNDRV_PCM_IOCTL_DRAIN:
3380 		return snd_pcm_drain(substream, NULL);
3381 	case SNDRV_PCM_IOCTL_DROP:
3382 		return snd_pcm_drop(substream);
3383 	case SNDRV_PCM_IOCTL_DELAY:
3384 		return snd_pcm_delay(substream, frames);
3385 	default:
3386 		return -EINVAL;
3387 	}
3388 }
3389 EXPORT_SYMBOL(snd_pcm_kernel_ioctl);
3390 
snd_pcm_read(struct file * file,char __user * buf,size_t count,loff_t * offset)3391 static ssize_t snd_pcm_read(struct file *file, char __user *buf, size_t count,
3392 			    loff_t * offset)
3393 {
3394 	struct snd_pcm_file *pcm_file;
3395 	struct snd_pcm_substream *substream;
3396 	struct snd_pcm_runtime *runtime;
3397 	snd_pcm_sframes_t result;
3398 
3399 	pcm_file = file->private_data;
3400 	substream = pcm_file->substream;
3401 	if (PCM_RUNTIME_CHECK(substream))
3402 		return -ENXIO;
3403 	runtime = substream->runtime;
3404 	if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3405 		return -EBADFD;
3406 	if (!frame_aligned(runtime, count))
3407 		return -EINVAL;
3408 	count = bytes_to_frames(runtime, count);
3409 	result = snd_pcm_lib_read(substream, buf, count);
3410 	if (result > 0)
3411 		result = frames_to_bytes(runtime, result);
3412 	return result;
3413 }
3414 
snd_pcm_write(struct file * file,const char __user * buf,size_t count,loff_t * offset)3415 static ssize_t snd_pcm_write(struct file *file, const char __user *buf,
3416 			     size_t count, loff_t * offset)
3417 {
3418 	struct snd_pcm_file *pcm_file;
3419 	struct snd_pcm_substream *substream;
3420 	struct snd_pcm_runtime *runtime;
3421 	snd_pcm_sframes_t result;
3422 
3423 	pcm_file = file->private_data;
3424 	substream = pcm_file->substream;
3425 	if (PCM_RUNTIME_CHECK(substream))
3426 		return -ENXIO;
3427 	runtime = substream->runtime;
3428 	if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3429 		return -EBADFD;
3430 	if (!frame_aligned(runtime, count))
3431 		return -EINVAL;
3432 	count = bytes_to_frames(runtime, count);
3433 	result = snd_pcm_lib_write(substream, buf, count);
3434 	if (result > 0)
3435 		result = frames_to_bytes(runtime, result);
3436 	return result;
3437 }
3438 
snd_pcm_readv(struct kiocb * iocb,struct iov_iter * to)3439 static ssize_t snd_pcm_readv(struct kiocb *iocb, struct iov_iter *to)
3440 {
3441 	struct snd_pcm_file *pcm_file;
3442 	struct snd_pcm_substream *substream;
3443 	struct snd_pcm_runtime *runtime;
3444 	snd_pcm_sframes_t result;
3445 	unsigned long i;
3446 	void __user **bufs;
3447 	snd_pcm_uframes_t frames;
3448 
3449 	pcm_file = iocb->ki_filp->private_data;
3450 	substream = pcm_file->substream;
3451 	if (PCM_RUNTIME_CHECK(substream))
3452 		return -ENXIO;
3453 	runtime = substream->runtime;
3454 	if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3455 		return -EBADFD;
3456 	if (!iter_is_iovec(to))
3457 		return -EINVAL;
3458 	if (to->nr_segs > 1024 || to->nr_segs != runtime->channels)
3459 		return -EINVAL;
3460 	if (!frame_aligned(runtime, to->iov->iov_len))
3461 		return -EINVAL;
3462 	frames = bytes_to_samples(runtime, to->iov->iov_len);
3463 	bufs = kmalloc_array(to->nr_segs, sizeof(void *), GFP_KERNEL);
3464 	if (bufs == NULL)
3465 		return -ENOMEM;
3466 	for (i = 0; i < to->nr_segs; ++i)
3467 		bufs[i] = to->iov[i].iov_base;
3468 	result = snd_pcm_lib_readv(substream, bufs, frames);
3469 	if (result > 0)
3470 		result = frames_to_bytes(runtime, result);
3471 	kfree(bufs);
3472 	return result;
3473 }
3474 
snd_pcm_writev(struct kiocb * iocb,struct iov_iter * from)3475 static ssize_t snd_pcm_writev(struct kiocb *iocb, struct iov_iter *from)
3476 {
3477 	struct snd_pcm_file *pcm_file;
3478 	struct snd_pcm_substream *substream;
3479 	struct snd_pcm_runtime *runtime;
3480 	snd_pcm_sframes_t result;
3481 	unsigned long i;
3482 	void __user **bufs;
3483 	snd_pcm_uframes_t frames;
3484 
3485 	pcm_file = iocb->ki_filp->private_data;
3486 	substream = pcm_file->substream;
3487 	if (PCM_RUNTIME_CHECK(substream))
3488 		return -ENXIO;
3489 	runtime = substream->runtime;
3490 	if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3491 		return -EBADFD;
3492 	if (!iter_is_iovec(from))
3493 		return -EINVAL;
3494 	if (from->nr_segs > 128 || from->nr_segs != runtime->channels ||
3495 	    !frame_aligned(runtime, from->iov->iov_len))
3496 		return -EINVAL;
3497 	frames = bytes_to_samples(runtime, from->iov->iov_len);
3498 	bufs = kmalloc_array(from->nr_segs, sizeof(void *), GFP_KERNEL);
3499 	if (bufs == NULL)
3500 		return -ENOMEM;
3501 	for (i = 0; i < from->nr_segs; ++i)
3502 		bufs[i] = from->iov[i].iov_base;
3503 	result = snd_pcm_lib_writev(substream, bufs, frames);
3504 	if (result > 0)
3505 		result = frames_to_bytes(runtime, result);
3506 	kfree(bufs);
3507 	return result;
3508 }
3509 
snd_pcm_poll(struct file * file,poll_table * wait)3510 static __poll_t snd_pcm_poll(struct file *file, poll_table *wait)
3511 {
3512 	struct snd_pcm_file *pcm_file;
3513 	struct snd_pcm_substream *substream;
3514 	struct snd_pcm_runtime *runtime;
3515 	__poll_t mask, ok;
3516 	snd_pcm_uframes_t avail;
3517 
3518 	pcm_file = file->private_data;
3519 
3520 	substream = pcm_file->substream;
3521 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
3522 		ok = EPOLLOUT | EPOLLWRNORM;
3523 	else
3524 		ok = EPOLLIN | EPOLLRDNORM;
3525 	if (PCM_RUNTIME_CHECK(substream))
3526 		return ok | EPOLLERR;
3527 
3528 	runtime = substream->runtime;
3529 	poll_wait(file, &runtime->sleep, wait);
3530 
3531 	mask = 0;
3532 	snd_pcm_stream_lock_irq(substream);
3533 	avail = snd_pcm_avail(substream);
3534 	switch (runtime->status->state) {
3535 	case SNDRV_PCM_STATE_RUNNING:
3536 	case SNDRV_PCM_STATE_PREPARED:
3537 	case SNDRV_PCM_STATE_PAUSED:
3538 		if (avail >= runtime->control->avail_min)
3539 			mask = ok;
3540 		break;
3541 	case SNDRV_PCM_STATE_DRAINING:
3542 		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
3543 			mask = ok;
3544 			if (!avail)
3545 				mask |= EPOLLERR;
3546 		}
3547 		break;
3548 	default:
3549 		mask = ok | EPOLLERR;
3550 		break;
3551 	}
3552 	snd_pcm_stream_unlock_irq(substream);
3553 	return mask;
3554 }
3555 
3556 /*
3557  * mmap support
3558  */
3559 
3560 /*
3561  * Only on coherent architectures, we can mmap the status and the control records
3562  * for effcient data transfer.  On others, we have to use HWSYNC ioctl...
3563  */
3564 #if defined(CONFIG_X86) || defined(CONFIG_PPC) || defined(CONFIG_ALPHA)
3565 /*
3566  * mmap status record
3567  */
snd_pcm_mmap_status_fault(struct vm_fault * vmf)3568 static vm_fault_t snd_pcm_mmap_status_fault(struct vm_fault *vmf)
3569 {
3570 	struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3571 	struct snd_pcm_runtime *runtime;
3572 
3573 	if (substream == NULL)
3574 		return VM_FAULT_SIGBUS;
3575 	runtime = substream->runtime;
3576 	vmf->page = virt_to_page(runtime->status);
3577 	get_page(vmf->page);
3578 	return 0;
3579 }
3580 
3581 static const struct vm_operations_struct snd_pcm_vm_ops_status =
3582 {
3583 	.fault =	snd_pcm_mmap_status_fault,
3584 };
3585 
snd_pcm_mmap_status(struct snd_pcm_substream * substream,struct file * file,struct vm_area_struct * area)3586 static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file,
3587 			       struct vm_area_struct *area)
3588 {
3589 	long size;
3590 	if (!(area->vm_flags & VM_READ))
3591 		return -EINVAL;
3592 	size = area->vm_end - area->vm_start;
3593 	if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_status)))
3594 		return -EINVAL;
3595 	area->vm_ops = &snd_pcm_vm_ops_status;
3596 	area->vm_private_data = substream;
3597 	area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3598 	return 0;
3599 }
3600 
3601 /*
3602  * mmap control record
3603  */
snd_pcm_mmap_control_fault(struct vm_fault * vmf)3604 static vm_fault_t snd_pcm_mmap_control_fault(struct vm_fault *vmf)
3605 {
3606 	struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3607 	struct snd_pcm_runtime *runtime;
3608 
3609 	if (substream == NULL)
3610 		return VM_FAULT_SIGBUS;
3611 	runtime = substream->runtime;
3612 	vmf->page = virt_to_page(runtime->control);
3613 	get_page(vmf->page);
3614 	return 0;
3615 }
3616 
3617 static const struct vm_operations_struct snd_pcm_vm_ops_control =
3618 {
3619 	.fault =	snd_pcm_mmap_control_fault,
3620 };
3621 
snd_pcm_mmap_control(struct snd_pcm_substream * substream,struct file * file,struct vm_area_struct * area)3622 static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file,
3623 				struct vm_area_struct *area)
3624 {
3625 	long size;
3626 	if (!(area->vm_flags & VM_READ))
3627 		return -EINVAL;
3628 	size = area->vm_end - area->vm_start;
3629 	if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control)))
3630 		return -EINVAL;
3631 	area->vm_ops = &snd_pcm_vm_ops_control;
3632 	area->vm_private_data = substream;
3633 	area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3634 	return 0;
3635 }
3636 
pcm_status_mmap_allowed(struct snd_pcm_file * pcm_file)3637 static bool pcm_status_mmap_allowed(struct snd_pcm_file *pcm_file)
3638 {
3639 	/* See pcm_control_mmap_allowed() below.
3640 	 * Since older alsa-lib requires both status and control mmaps to be
3641 	 * coupled, we have to disable the status mmap for old alsa-lib, too.
3642 	 */
3643 	if (pcm_file->user_pversion < SNDRV_PROTOCOL_VERSION(2, 0, 14) &&
3644 	    (pcm_file->substream->runtime->hw.info & SNDRV_PCM_INFO_SYNC_APPLPTR))
3645 		return false;
3646 	return true;
3647 }
3648 
pcm_control_mmap_allowed(struct snd_pcm_file * pcm_file)3649 static bool pcm_control_mmap_allowed(struct snd_pcm_file *pcm_file)
3650 {
3651 	if (pcm_file->no_compat_mmap)
3652 		return false;
3653 	/* Disallow the control mmap when SYNC_APPLPTR flag is set;
3654 	 * it enforces the user-space to fall back to snd_pcm_sync_ptr(),
3655 	 * thus it effectively assures the manual update of appl_ptr.
3656 	 */
3657 	if (pcm_file->substream->runtime->hw.info & SNDRV_PCM_INFO_SYNC_APPLPTR)
3658 		return false;
3659 	return true;
3660 }
3661 
3662 #else /* ! coherent mmap */
3663 /*
3664  * don't support mmap for status and control records.
3665  */
3666 #define pcm_status_mmap_allowed(pcm_file)	false
3667 #define pcm_control_mmap_allowed(pcm_file)	false
3668 
snd_pcm_mmap_status(struct snd_pcm_substream * substream,struct file * file,struct vm_area_struct * area)3669 static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file,
3670 			       struct vm_area_struct *area)
3671 {
3672 	return -ENXIO;
3673 }
snd_pcm_mmap_control(struct snd_pcm_substream * substream,struct file * file,struct vm_area_struct * area)3674 static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file,
3675 				struct vm_area_struct *area)
3676 {
3677 	return -ENXIO;
3678 }
3679 #endif /* coherent mmap */
3680 
3681 static inline struct page *
snd_pcm_default_page_ops(struct snd_pcm_substream * substream,unsigned long ofs)3682 snd_pcm_default_page_ops(struct snd_pcm_substream *substream, unsigned long ofs)
3683 {
3684 	void *vaddr = substream->runtime->dma_area + ofs;
3685 
3686 	switch (substream->dma_buffer.dev.type) {
3687 #ifdef CONFIG_SND_DMA_SGBUF
3688 	case SNDRV_DMA_TYPE_DEV_SG:
3689 	case SNDRV_DMA_TYPE_DEV_UC_SG:
3690 		return snd_pcm_sgbuf_ops_page(substream, ofs);
3691 #endif /* CONFIG_SND_DMA_SGBUF */
3692 	case SNDRV_DMA_TYPE_VMALLOC:
3693 		return vmalloc_to_page(vaddr);
3694 	default:
3695 		return virt_to_page(vaddr);
3696 	}
3697 }
3698 
3699 /*
3700  * fault callback for mmapping a RAM page
3701  */
snd_pcm_mmap_data_fault(struct vm_fault * vmf)3702 static vm_fault_t snd_pcm_mmap_data_fault(struct vm_fault *vmf)
3703 {
3704 	struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3705 	struct snd_pcm_runtime *runtime;
3706 	unsigned long offset;
3707 	struct page * page;
3708 	size_t dma_bytes;
3709 
3710 	if (substream == NULL)
3711 		return VM_FAULT_SIGBUS;
3712 	runtime = substream->runtime;
3713 	offset = vmf->pgoff << PAGE_SHIFT;
3714 	dma_bytes = PAGE_ALIGN(runtime->dma_bytes);
3715 	if (offset > dma_bytes - PAGE_SIZE)
3716 		return VM_FAULT_SIGBUS;
3717 	if (substream->ops->page)
3718 		page = substream->ops->page(substream, offset);
3719 	else
3720 		page = snd_pcm_default_page_ops(substream, offset);
3721 	if (!page)
3722 		return VM_FAULT_SIGBUS;
3723 	get_page(page);
3724 	vmf->page = page;
3725 	return 0;
3726 }
3727 
3728 static const struct vm_operations_struct snd_pcm_vm_ops_data = {
3729 	.open =		snd_pcm_mmap_data_open,
3730 	.close =	snd_pcm_mmap_data_close,
3731 };
3732 
3733 static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = {
3734 	.open =		snd_pcm_mmap_data_open,
3735 	.close =	snd_pcm_mmap_data_close,
3736 	.fault =	snd_pcm_mmap_data_fault,
3737 };
3738 
3739 /*
3740  * mmap the DMA buffer on RAM
3741  */
3742 
3743 /**
3744  * snd_pcm_lib_default_mmap - Default PCM data mmap function
3745  * @substream: PCM substream
3746  * @area: VMA
3747  *
3748  * This is the default mmap handler for PCM data.  When mmap pcm_ops is NULL,
3749  * this function is invoked implicitly.
3750  */
snd_pcm_lib_default_mmap(struct snd_pcm_substream * substream,struct vm_area_struct * area)3751 int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
3752 			     struct vm_area_struct *area)
3753 {
3754 	area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3755 #ifdef CONFIG_GENERIC_ALLOCATOR
3756 	if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_IRAM) {
3757 		area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
3758 		return remap_pfn_range(area, area->vm_start,
3759 				substream->dma_buffer.addr >> PAGE_SHIFT,
3760 				area->vm_end - area->vm_start, area->vm_page_prot);
3761 	}
3762 #endif /* CONFIG_GENERIC_ALLOCATOR */
3763 	if (IS_ENABLED(CONFIG_HAS_DMA) && !substream->ops->page &&
3764 	    (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV ||
3765 	     substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_UC))
3766 		return dma_mmap_coherent(substream->dma_buffer.dev.dev,
3767 					 area,
3768 					 substream->runtime->dma_area,
3769 					 substream->runtime->dma_addr,
3770 					 substream->runtime->dma_bytes);
3771 	/* mmap with fault handler */
3772 	area->vm_ops = &snd_pcm_vm_ops_data_fault;
3773 	return 0;
3774 }
3775 EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap);
3776 
3777 /*
3778  * mmap the DMA buffer on I/O memory area
3779  */
3780 #if SNDRV_PCM_INFO_MMAP_IOMEM
3781 /**
3782  * snd_pcm_lib_mmap_iomem - Default PCM data mmap function for I/O mem
3783  * @substream: PCM substream
3784  * @area: VMA
3785  *
3786  * When your hardware uses the iomapped pages as the hardware buffer and
3787  * wants to mmap it, pass this function as mmap pcm_ops.  Note that this
3788  * is supposed to work only on limited architectures.
3789  */
snd_pcm_lib_mmap_iomem(struct snd_pcm_substream * substream,struct vm_area_struct * area)3790 int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream,
3791 			   struct vm_area_struct *area)
3792 {
3793 	struct snd_pcm_runtime *runtime = substream->runtime;
3794 
3795 	area->vm_page_prot = pgprot_noncached(area->vm_page_prot);
3796 	return vm_iomap_memory(area, runtime->dma_addr, runtime->dma_bytes);
3797 }
3798 EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem);
3799 #endif /* SNDRV_PCM_INFO_MMAP */
3800 
3801 /*
3802  * mmap DMA buffer
3803  */
snd_pcm_mmap_data(struct snd_pcm_substream * substream,struct file * file,struct vm_area_struct * area)3804 int snd_pcm_mmap_data(struct snd_pcm_substream *substream, struct file *file,
3805 		      struct vm_area_struct *area)
3806 {
3807 	struct snd_pcm_runtime *runtime;
3808 	long size;
3809 	unsigned long offset;
3810 	size_t dma_bytes;
3811 	int err;
3812 
3813 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
3814 		if (!(area->vm_flags & (VM_WRITE|VM_READ)))
3815 			return -EINVAL;
3816 	} else {
3817 		if (!(area->vm_flags & VM_READ))
3818 			return -EINVAL;
3819 	}
3820 	runtime = substream->runtime;
3821 	if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
3822 		return -EBADFD;
3823 	if (!(runtime->info & SNDRV_PCM_INFO_MMAP))
3824 		return -ENXIO;
3825 	if (runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED ||
3826 	    runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
3827 		return -EINVAL;
3828 	size = area->vm_end - area->vm_start;
3829 	offset = area->vm_pgoff << PAGE_SHIFT;
3830 	dma_bytes = PAGE_ALIGN(runtime->dma_bytes);
3831 	if ((size_t)size > dma_bytes)
3832 		return -EINVAL;
3833 	if (offset > dma_bytes - size)
3834 		return -EINVAL;
3835 
3836 	area->vm_ops = &snd_pcm_vm_ops_data;
3837 	area->vm_private_data = substream;
3838 	if (substream->ops->mmap)
3839 		err = substream->ops->mmap(substream, area);
3840 	else
3841 		err = snd_pcm_lib_default_mmap(substream, area);
3842 	if (!err)
3843 		atomic_inc(&substream->mmap_count);
3844 	return err;
3845 }
3846 EXPORT_SYMBOL(snd_pcm_mmap_data);
3847 
snd_pcm_mmap(struct file * file,struct vm_area_struct * area)3848 static int snd_pcm_mmap(struct file *file, struct vm_area_struct *area)
3849 {
3850 	struct snd_pcm_file * pcm_file;
3851 	struct snd_pcm_substream *substream;
3852 	unsigned long offset;
3853 
3854 	pcm_file = file->private_data;
3855 	substream = pcm_file->substream;
3856 	if (PCM_RUNTIME_CHECK(substream))
3857 		return -ENXIO;
3858 
3859 	offset = area->vm_pgoff << PAGE_SHIFT;
3860 	switch (offset) {
3861 	case SNDRV_PCM_MMAP_OFFSET_STATUS_OLD:
3862 		if (pcm_file->no_compat_mmap || !IS_ENABLED(CONFIG_64BIT))
3863 			return -ENXIO;
3864 		fallthrough;
3865 	case SNDRV_PCM_MMAP_OFFSET_STATUS_NEW:
3866 		if (!pcm_status_mmap_allowed(pcm_file))
3867 			return -ENXIO;
3868 		return snd_pcm_mmap_status(substream, file, area);
3869 	case SNDRV_PCM_MMAP_OFFSET_CONTROL_OLD:
3870 		if (pcm_file->no_compat_mmap || !IS_ENABLED(CONFIG_64BIT))
3871 			return -ENXIO;
3872 		fallthrough;
3873 	case SNDRV_PCM_MMAP_OFFSET_CONTROL_NEW:
3874 		if (!pcm_control_mmap_allowed(pcm_file))
3875 			return -ENXIO;
3876 		return snd_pcm_mmap_control(substream, file, area);
3877 	default:
3878 		return snd_pcm_mmap_data(substream, file, area);
3879 	}
3880 	return 0;
3881 }
3882 
snd_pcm_fasync(int fd,struct file * file,int on)3883 static int snd_pcm_fasync(int fd, struct file * file, int on)
3884 {
3885 	struct snd_pcm_file * pcm_file;
3886 	struct snd_pcm_substream *substream;
3887 	struct snd_pcm_runtime *runtime;
3888 
3889 	pcm_file = file->private_data;
3890 	substream = pcm_file->substream;
3891 	if (PCM_RUNTIME_CHECK(substream))
3892 		return -ENXIO;
3893 	runtime = substream->runtime;
3894 	return fasync_helper(fd, file, on, &runtime->fasync);
3895 }
3896 
3897 /*
3898  * ioctl32 compat
3899  */
3900 #ifdef CONFIG_COMPAT
3901 #include "pcm_compat.c"
3902 #else
3903 #define snd_pcm_ioctl_compat	NULL
3904 #endif
3905 
3906 /*
3907  *  To be removed helpers to keep binary compatibility
3908  */
3909 
3910 #ifdef CONFIG_SND_SUPPORT_OLD_API
3911 #define __OLD_TO_NEW_MASK(x) ((x&7)|((x&0x07fffff8)<<5))
3912 #define __NEW_TO_OLD_MASK(x) ((x&7)|((x&0xffffff00)>>5))
3913 
snd_pcm_hw_convert_from_old_params(struct snd_pcm_hw_params * params,struct snd_pcm_hw_params_old * oparams)3914 static void snd_pcm_hw_convert_from_old_params(struct snd_pcm_hw_params *params,
3915 					       struct snd_pcm_hw_params_old *oparams)
3916 {
3917 	unsigned int i;
3918 
3919 	memset(params, 0, sizeof(*params));
3920 	params->flags = oparams->flags;
3921 	for (i = 0; i < ARRAY_SIZE(oparams->masks); i++)
3922 		params->masks[i].bits[0] = oparams->masks[i];
3923 	memcpy(params->intervals, oparams->intervals, sizeof(oparams->intervals));
3924 	params->rmask = __OLD_TO_NEW_MASK(oparams->rmask);
3925 	params->cmask = __OLD_TO_NEW_MASK(oparams->cmask);
3926 	params->info = oparams->info;
3927 	params->msbits = oparams->msbits;
3928 	params->rate_num = oparams->rate_num;
3929 	params->rate_den = oparams->rate_den;
3930 	params->fifo_size = oparams->fifo_size;
3931 }
3932 
snd_pcm_hw_convert_to_old_params(struct snd_pcm_hw_params_old * oparams,struct snd_pcm_hw_params * params)3933 static void snd_pcm_hw_convert_to_old_params(struct snd_pcm_hw_params_old *oparams,
3934 					     struct snd_pcm_hw_params *params)
3935 {
3936 	unsigned int i;
3937 
3938 	memset(oparams, 0, sizeof(*oparams));
3939 	oparams->flags = params->flags;
3940 	for (i = 0; i < ARRAY_SIZE(oparams->masks); i++)
3941 		oparams->masks[i] = params->masks[i].bits[0];
3942 	memcpy(oparams->intervals, params->intervals, sizeof(oparams->intervals));
3943 	oparams->rmask = __NEW_TO_OLD_MASK(params->rmask);
3944 	oparams->cmask = __NEW_TO_OLD_MASK(params->cmask);
3945 	oparams->info = params->info;
3946 	oparams->msbits = params->msbits;
3947 	oparams->rate_num = params->rate_num;
3948 	oparams->rate_den = params->rate_den;
3949 	oparams->fifo_size = params->fifo_size;
3950 }
3951 
snd_pcm_hw_refine_old_user(struct snd_pcm_substream * substream,struct snd_pcm_hw_params_old __user * _oparams)3952 static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream,
3953 				      struct snd_pcm_hw_params_old __user * _oparams)
3954 {
3955 	struct snd_pcm_hw_params *params;
3956 	struct snd_pcm_hw_params_old *oparams = NULL;
3957 	int err;
3958 
3959 	params = kmalloc(sizeof(*params), GFP_KERNEL);
3960 	if (!params)
3961 		return -ENOMEM;
3962 
3963 	oparams = memdup_user(_oparams, sizeof(*oparams));
3964 	if (IS_ERR(oparams)) {
3965 		err = PTR_ERR(oparams);
3966 		goto out;
3967 	}
3968 	snd_pcm_hw_convert_from_old_params(params, oparams);
3969 	err = snd_pcm_hw_refine(substream, params);
3970 	if (err < 0)
3971 		goto out_old;
3972 
3973 	err = fixup_unreferenced_params(substream, params);
3974 	if (err < 0)
3975 		goto out_old;
3976 
3977 	snd_pcm_hw_convert_to_old_params(oparams, params);
3978 	if (copy_to_user(_oparams, oparams, sizeof(*oparams)))
3979 		err = -EFAULT;
3980 out_old:
3981 	kfree(oparams);
3982 out:
3983 	kfree(params);
3984 	return err;
3985 }
3986 
snd_pcm_hw_params_old_user(struct snd_pcm_substream * substream,struct snd_pcm_hw_params_old __user * _oparams)3987 static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream,
3988 				      struct snd_pcm_hw_params_old __user * _oparams)
3989 {
3990 	struct snd_pcm_hw_params *params;
3991 	struct snd_pcm_hw_params_old *oparams = NULL;
3992 	int err;
3993 
3994 	params = kmalloc(sizeof(*params), GFP_KERNEL);
3995 	if (!params)
3996 		return -ENOMEM;
3997 
3998 	oparams = memdup_user(_oparams, sizeof(*oparams));
3999 	if (IS_ERR(oparams)) {
4000 		err = PTR_ERR(oparams);
4001 		goto out;
4002 	}
4003 
4004 	snd_pcm_hw_convert_from_old_params(params, oparams);
4005 	err = snd_pcm_hw_params(substream, params);
4006 	if (err < 0)
4007 		goto out_old;
4008 
4009 	snd_pcm_hw_convert_to_old_params(oparams, params);
4010 	if (copy_to_user(_oparams, oparams, sizeof(*oparams)))
4011 		err = -EFAULT;
4012 out_old:
4013 	kfree(oparams);
4014 out:
4015 	kfree(params);
4016 	return err;
4017 }
4018 #endif /* CONFIG_SND_SUPPORT_OLD_API */
4019 
4020 #ifndef CONFIG_MMU
snd_pcm_get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)4021 static unsigned long snd_pcm_get_unmapped_area(struct file *file,
4022 					       unsigned long addr,
4023 					       unsigned long len,
4024 					       unsigned long pgoff,
4025 					       unsigned long flags)
4026 {
4027 	struct snd_pcm_file *pcm_file = file->private_data;
4028 	struct snd_pcm_substream *substream = pcm_file->substream;
4029 	struct snd_pcm_runtime *runtime = substream->runtime;
4030 	unsigned long offset = pgoff << PAGE_SHIFT;
4031 
4032 	switch (offset) {
4033 	case SNDRV_PCM_MMAP_OFFSET_STATUS_NEW:
4034 		return (unsigned long)runtime->status;
4035 	case SNDRV_PCM_MMAP_OFFSET_CONTROL_NEW:
4036 		return (unsigned long)runtime->control;
4037 	default:
4038 		return (unsigned long)runtime->dma_area + offset;
4039 	}
4040 }
4041 #else
4042 # define snd_pcm_get_unmapped_area NULL
4043 #endif
4044 
4045 /*
4046  *  Register section
4047  */
4048 
4049 const struct file_operations snd_pcm_f_ops[2] = {
4050 	{
4051 		.owner =		THIS_MODULE,
4052 		.write =		snd_pcm_write,
4053 		.write_iter =		snd_pcm_writev,
4054 		.open =			snd_pcm_playback_open,
4055 		.release =		snd_pcm_release,
4056 		.llseek =		no_llseek,
4057 		.poll =			snd_pcm_poll,
4058 		.unlocked_ioctl =	snd_pcm_ioctl,
4059 		.compat_ioctl = 	snd_pcm_ioctl_compat,
4060 		.mmap =			snd_pcm_mmap,
4061 		.fasync =		snd_pcm_fasync,
4062 		.get_unmapped_area =	snd_pcm_get_unmapped_area,
4063 	},
4064 	{
4065 		.owner =		THIS_MODULE,
4066 		.read =			snd_pcm_read,
4067 		.read_iter =		snd_pcm_readv,
4068 		.open =			snd_pcm_capture_open,
4069 		.release =		snd_pcm_release,
4070 		.llseek =		no_llseek,
4071 		.poll =			snd_pcm_poll,
4072 		.unlocked_ioctl =	snd_pcm_ioctl,
4073 		.compat_ioctl = 	snd_pcm_ioctl_compat,
4074 		.mmap =			snd_pcm_mmap,
4075 		.fasync =		snd_pcm_fasync,
4076 		.get_unmapped_area =	snd_pcm_get_unmapped_area,
4077 	}
4078 };
4079