1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5
6 PulseAudio is free software; you can redistribute it and/or modify
7 it under the terms of the GNU Lesser General Public License as published
8 by the Free Software Foundation; either version 2.1 of the License,
9 or (at your option) any later version.
10
11 PulseAudio is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public License
17 along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
18 ***/
19
20 #ifdef HAVE_CONFIG_H
21 #include <config.h>
22 #endif
23
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <string.h>
27
28 #include <pulse/xmalloc.h>
29
30 #include <pulsecore/log.h>
31 #include <pulsecore/mcalign.h>
32 #include <pulsecore/macro.h>
33 #include <pulsecore/flist.h>
34
35 #include "memblockq.h"
36
37 /* #define MEMBLOCKQ_DEBUG */
38
39 struct list_item {
40 struct list_item *next, *prev;
41 int64_t index;
42 pa_memchunk chunk;
43 };
44
45 PA_STATIC_FLIST_DECLARE(list_items, 0, pa_xfree);
46
47 struct pa_memblockq {
48 struct list_item *blocks, *blocks_tail;
49 struct list_item *current_read, *current_write;
50 unsigned n_blocks;
51 size_t maxlength, tlength, base, prebuf, minreq, maxrewind;
52 int64_t read_index, write_index;
53 bool in_prebuf;
54 pa_memchunk silence;
55 pa_mcalign *mcalign;
56 int64_t missing, requested;
57 char *name;
58 pa_sample_spec sample_spec;
59 };
60
pa_memblockq_new(const char * name,int64_t idx,size_t maxlength,size_t tlength,const pa_sample_spec * sample_spec,size_t prebuf,size_t minreq,size_t maxrewind,pa_memchunk * silence)61 pa_memblockq* pa_memblockq_new(
62 const char *name,
63 int64_t idx,
64 size_t maxlength,
65 size_t tlength,
66 const pa_sample_spec *sample_spec,
67 size_t prebuf,
68 size_t minreq,
69 size_t maxrewind,
70 pa_memchunk *silence) {
71
72 pa_memblockq* bq;
73
74 pa_assert(sample_spec);
75 pa_assert(name);
76
77 bq = pa_xnew0(pa_memblockq, 1);
78 bq->name = pa_xstrdup(name);
79
80 bq->sample_spec = *sample_spec;
81 bq->base = pa_frame_size(sample_spec);
82 bq->read_index = bq->write_index = idx;
83
84 pa_log_debug("memblockq requested: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu maxrewind=%lu",
85 (unsigned long) maxlength, (unsigned long) tlength, (unsigned long) bq->base, (unsigned long) prebuf, (unsigned long) minreq, (unsigned long) maxrewind);
86
87 bq->in_prebuf = true;
88
89 pa_memblockq_set_maxlength(bq, maxlength);
90 pa_memblockq_set_tlength(bq, tlength);
91 pa_memblockq_set_minreq(bq, minreq);
92 pa_memblockq_set_prebuf(bq, prebuf);
93 pa_memblockq_set_maxrewind(bq, maxrewind);
94
95 pa_log_debug("memblockq sanitized: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu maxrewind=%lu",
96 (unsigned long) bq->maxlength, (unsigned long) bq->tlength, (unsigned long) bq->base, (unsigned long) bq->prebuf, (unsigned long) bq->minreq, (unsigned long) bq->maxrewind);
97
98 if (silence) {
99 bq->silence = *silence;
100 pa_memblock_ref(bq->silence.memblock);
101 }
102
103 bq->mcalign = pa_mcalign_new(bq->base);
104
105 return bq;
106 }
107
pa_memblockq_free(pa_memblockq * bq)108 void pa_memblockq_free(pa_memblockq* bq) {
109 pa_assert(bq);
110
111 pa_memblockq_silence(bq);
112
113 if (bq->silence.memblock)
114 pa_memblock_unref(bq->silence.memblock);
115
116 if (bq->mcalign)
117 pa_mcalign_free(bq->mcalign);
118
119 pa_xfree(bq->name);
120 pa_xfree(bq);
121 }
122
fix_current_read(pa_memblockq * bq)123 static void fix_current_read(pa_memblockq *bq) {
124 pa_assert(bq);
125
126 if (PA_UNLIKELY(!bq->blocks)) {
127 bq->current_read = NULL;
128 return;
129 }
130
131 if (PA_UNLIKELY(!bq->current_read))
132 bq->current_read = bq->blocks;
133
134 /* Scan left */
135 while (PA_UNLIKELY(bq->current_read->index > bq->read_index))
136
137 if (bq->current_read->prev)
138 bq->current_read = bq->current_read->prev;
139 else
140 break;
141
142 /* Scan right */
143 while (PA_LIKELY(bq->current_read != NULL) && PA_UNLIKELY(bq->current_read->index + (int64_t) bq->current_read->chunk.length <= bq->read_index))
144 bq->current_read = bq->current_read->next;
145
146 /* At this point current_read will either point at or left of the
147 next block to play. It may be NULL in case everything in
148 the queue was already played */
149 }
150
fix_current_write(pa_memblockq * bq)151 static void fix_current_write(pa_memblockq *bq) {
152 pa_assert(bq);
153
154 if (PA_UNLIKELY(!bq->blocks)) {
155 bq->current_write = NULL;
156 return;
157 }
158
159 if (PA_UNLIKELY(!bq->current_write))
160 bq->current_write = bq->blocks_tail;
161
162 /* Scan right */
163 while (PA_UNLIKELY(bq->current_write->index + (int64_t) bq->current_write->chunk.length <= bq->write_index))
164
165 if (bq->current_write->next)
166 bq->current_write = bq->current_write->next;
167 else
168 break;
169
170 /* Scan left */
171 while (PA_LIKELY(bq->current_write != NULL) && PA_UNLIKELY(bq->current_write->index > bq->write_index))
172 bq->current_write = bq->current_write->prev;
173
174 /* At this point current_write will either point at or right of
175 the next block to write data to. It may be NULL in case
176 everything in the queue is still to be played */
177 }
178
drop_block(pa_memblockq * bq,struct list_item * q)179 static void drop_block(pa_memblockq *bq, struct list_item *q) {
180 pa_assert(bq);
181 pa_assert(q);
182
183 pa_assert(bq->n_blocks >= 1);
184
185 if (q->prev)
186 q->prev->next = q->next;
187 else {
188 pa_assert(bq->blocks == q);
189 bq->blocks = q->next;
190 }
191
192 if (q->next)
193 q->next->prev = q->prev;
194 else {
195 pa_assert(bq->blocks_tail == q);
196 bq->blocks_tail = q->prev;
197 }
198
199 if (bq->current_write == q)
200 bq->current_write = q->prev;
201
202 if (bq->current_read == q)
203 bq->current_read = q->next;
204
205 pa_memblock_unref(q->chunk.memblock);
206
207 if (pa_flist_push(PA_STATIC_FLIST_GET(list_items), q) < 0)
208 pa_xfree(q);
209
210 bq->n_blocks--;
211 }
212
drop_backlog(pa_memblockq * bq)213 static void drop_backlog(pa_memblockq *bq) {
214 int64_t boundary;
215 pa_assert(bq);
216
217 boundary = bq->read_index - (int64_t) bq->maxrewind;
218
219 while (bq->blocks && (bq->blocks->index + (int64_t) bq->blocks->chunk.length <= boundary))
220 drop_block(bq, bq->blocks);
221 }
222
can_push(pa_memblockq * bq,size_t l)223 static bool can_push(pa_memblockq *bq, size_t l) {
224 int64_t end;
225
226 pa_assert(bq);
227
228 if (bq->read_index > bq->write_index) {
229 int64_t d = bq->read_index - bq->write_index;
230
231 if ((int64_t) l > d)
232 l -= (size_t) d;
233 else
234 return true;
235 }
236
237 end = bq->blocks_tail ? bq->blocks_tail->index + (int64_t) bq->blocks_tail->chunk.length : bq->write_index;
238
239 /* Make sure that the list doesn't get too long */
240 if (bq->write_index + (int64_t) l > end)
241 if (bq->write_index + (int64_t) l - bq->read_index > (int64_t) bq->maxlength)
242 return false;
243
244 return true;
245 }
246
write_index_changed(pa_memblockq * bq,int64_t old_write_index,bool account)247 static void write_index_changed(pa_memblockq *bq, int64_t old_write_index, bool account) {
248 int64_t delta;
249
250 pa_assert(bq);
251
252 delta = bq->write_index - old_write_index;
253
254 if (account)
255 bq->requested -= delta;
256 else
257 bq->missing -= delta;
258
259 #ifdef MEMBLOCKQ_DEBUG
260 pa_log_debug("[%s] pushed/seeked %lli: requested counter at %lli, account=%i", bq->name, (long long) delta, (long long) bq->requested, account);
261 #endif
262 }
263
read_index_changed(pa_memblockq * bq,int64_t old_read_index)264 static void read_index_changed(pa_memblockq *bq, int64_t old_read_index) {
265 int64_t delta;
266
267 pa_assert(bq);
268
269 delta = bq->read_index - old_read_index;
270 bq->missing += delta;
271
272 #ifdef MEMBLOCKQ_DEBUG
273 pa_log_debug("[%s] popped %lli: missing counter at %lli", bq->name, (long long) delta, (long long) bq->missing);
274 #endif
275 }
276
pa_memblockq_push(pa_memblockq * bq,const pa_memchunk * uchunk)277 int pa_memblockq_push(pa_memblockq* bq, const pa_memchunk *uchunk) {
278 struct list_item *q, *n;
279 pa_memchunk chunk;
280 int64_t old;
281
282 pa_assert(bq);
283 pa_assert(uchunk);
284 pa_assert(uchunk->memblock);
285 pa_assert(uchunk->length > 0);
286 pa_assert(uchunk->index + uchunk->length <= pa_memblock_get_length(uchunk->memblock));
287
288 pa_assert(uchunk->length % bq->base == 0);
289 pa_assert(uchunk->index % bq->base == 0);
290
291 if (!can_push(bq, uchunk->length))
292 return -1;
293
294 old = bq->write_index;
295 chunk = *uchunk;
296
297 fix_current_write(bq);
298 q = bq->current_write;
299
300 /* First we advance the q pointer right of where we want to
301 * write to */
302
303 if (q) {
304 while (bq->write_index + (int64_t) chunk.length > q->index)
305 if (q->next)
306 q = q->next;
307 else
308 break;
309 }
310
311 if (!q)
312 q = bq->blocks_tail;
313
314 /* We go from back to front to look for the right place to add
315 * this new entry. Drop data we will overwrite on the way */
316
317 while (q) {
318
319 if (bq->write_index >= q->index + (int64_t) q->chunk.length)
320 /* We found the entry where we need to place the new entry immediately after */
321 break;
322 else if (bq->write_index + (int64_t) chunk.length <= q->index) {
323 /* This entry isn't touched at all, let's skip it */
324 q = q->prev;
325 } else if (bq->write_index <= q->index &&
326 bq->write_index + (int64_t) chunk.length >= q->index + (int64_t) q->chunk.length) {
327
328 /* This entry is fully replaced by the new entry, so let's drop it */
329
330 struct list_item *p;
331 p = q;
332 q = q->prev;
333 drop_block(bq, p);
334 } else if (bq->write_index >= q->index) {
335 /* The write index points into this memblock, so let's
336 * truncate or split it */
337
338 if (bq->write_index + (int64_t) chunk.length < q->index + (int64_t) q->chunk.length) {
339
340 /* We need to save the end of this memchunk */
341 struct list_item *p;
342 size_t d;
343
344 /* Create a new list entry for the end of the memchunk */
345 if (!(p = pa_flist_pop(PA_STATIC_FLIST_GET(list_items))))
346 p = pa_xnew(struct list_item, 1);
347
348 p->chunk = q->chunk;
349 pa_memblock_ref(p->chunk.memblock);
350
351 /* Calculate offset */
352 d = (size_t) (bq->write_index + (int64_t) chunk.length - q->index);
353 pa_assert(d > 0);
354
355 /* Drop it from the new entry */
356 p->index = q->index + (int64_t) d;
357 p->chunk.length -= d;
358
359 /* Add it to the list */
360 p->prev = q;
361 if ((p->next = q->next))
362 q->next->prev = p;
363 else
364 bq->blocks_tail = p;
365 q->next = p;
366
367 bq->n_blocks++;
368 }
369
370 /* Truncate the chunk */
371 if (!(q->chunk.length = (size_t) (bq->write_index - q->index))) {
372 struct list_item *p;
373 p = q;
374 q = q->prev;
375 drop_block(bq, p);
376 }
377
378 /* We had to truncate this block, hence we're now at the right position */
379 break;
380 } else {
381 size_t d;
382
383 pa_assert(bq->write_index + (int64_t)chunk.length > q->index &&
384 bq->write_index + (int64_t)chunk.length < q->index + (int64_t)q->chunk.length &&
385 bq->write_index < q->index);
386
387 /* The job overwrites the current entry at the end, so let's drop the beginning of this entry */
388
389 d = (size_t) (bq->write_index + (int64_t) chunk.length - q->index);
390 q->index += (int64_t) d;
391 q->chunk.index += d;
392 q->chunk.length -= d;
393
394 q = q->prev;
395 }
396 }
397
398 if (q) {
399 pa_assert(bq->write_index >= q->index + (int64_t)q->chunk.length);
400 pa_assert(!q->next || (bq->write_index + (int64_t)chunk.length <= q->next->index));
401
402 /* Try to merge memory blocks */
403
404 if (q->chunk.memblock == chunk.memblock &&
405 q->chunk.index + q->chunk.length == chunk.index &&
406 bq->write_index == q->index + (int64_t) q->chunk.length) {
407
408 q->chunk.length += chunk.length;
409 bq->write_index += (int64_t) chunk.length;
410 goto finish;
411 }
412 } else
413 pa_assert(!bq->blocks || (bq->write_index + (int64_t)chunk.length <= bq->blocks->index));
414
415 if (!(n = pa_flist_pop(PA_STATIC_FLIST_GET(list_items))))
416 n = pa_xnew(struct list_item, 1);
417
418 n->chunk = chunk;
419 pa_memblock_ref(n->chunk.memblock);
420 n->index = bq->write_index;
421 bq->write_index += (int64_t) n->chunk.length;
422
423 n->next = q ? q->next : bq->blocks;
424 n->prev = q;
425
426 if (n->next)
427 n->next->prev = n;
428 else
429 bq->blocks_tail = n;
430
431 if (n->prev)
432 n->prev->next = n;
433 else
434 bq->blocks = n;
435
436 bq->n_blocks++;
437
438 finish:
439
440 write_index_changed(bq, old, true);
441 return 0;
442 }
443
pa_memblockq_prebuf_active(pa_memblockq * bq)444 bool pa_memblockq_prebuf_active(pa_memblockq *bq) {
445 pa_assert(bq);
446
447 if (bq->in_prebuf)
448 return pa_memblockq_get_length(bq) < bq->prebuf;
449 else
450 return bq->prebuf > 0 && bq->read_index >= bq->write_index;
451 }
452
update_prebuf(pa_memblockq * bq)453 static bool update_prebuf(pa_memblockq *bq) {
454 pa_assert(bq);
455
456 if (bq->in_prebuf) {
457
458 if (pa_memblockq_get_length(bq) < bq->prebuf)
459 return true;
460
461 bq->in_prebuf = false;
462 return false;
463 } else {
464
465 if (bq->prebuf > 0 && bq->read_index >= bq->write_index) {
466 bq->in_prebuf = true;
467 return true;
468 }
469
470 return false;
471 }
472 }
473
pa_memblockq_peek(pa_memblockq * bq,pa_memchunk * chunk)474 int pa_memblockq_peek(pa_memblockq* bq, pa_memchunk *chunk) {
475 int64_t d;
476 pa_assert(bq);
477 pa_assert(chunk);
478
479 /* We need to pre-buffer */
480 if (update_prebuf(bq))
481 return -1;
482
483 fix_current_read(bq);
484
485 /* Do we need to spit out silence? */
486 if (!bq->current_read || bq->current_read->index > bq->read_index) {
487 size_t length;
488
489 /* How much silence shall we return? */
490 if (bq->current_read)
491 length = (size_t) (bq->current_read->index - bq->read_index);
492 else if (bq->write_index > bq->read_index)
493 length = (size_t) (bq->write_index - bq->read_index);
494 else
495 length = 0;
496
497 /* We need to return silence, since no data is yet available */
498 if (bq->silence.memblock) {
499 *chunk = bq->silence;
500 pa_memblock_ref(chunk->memblock);
501
502 if (length > 0 && length < chunk->length)
503 chunk->length = length;
504
505 } else {
506
507 /* If the memblockq is empty, return -1, otherwise return
508 * the time to sleep */
509 if (length <= 0)
510 return -1;
511
512 chunk->memblock = NULL;
513 chunk->length = length;
514 }
515
516 chunk->index = 0;
517 return 0;
518 }
519
520 /* Ok, let's pass real data to the caller */
521 *chunk = bq->current_read->chunk;
522 pa_memblock_ref(chunk->memblock);
523
524 pa_assert(bq->read_index >= bq->current_read->index);
525 d = bq->read_index - bq->current_read->index;
526 chunk->index += (size_t) d;
527 chunk->length -= (size_t) d;
528
529 return 0;
530 }
531
pa_memblockq_peek_fixed_size(pa_memblockq * bq,size_t block_size,pa_memchunk * chunk)532 int pa_memblockq_peek_fixed_size(pa_memblockq *bq, size_t block_size, pa_memchunk *chunk) {
533 pa_mempool *pool;
534 pa_memchunk tchunk, rchunk;
535 int64_t ri;
536 struct list_item *item;
537
538 pa_assert(bq);
539 pa_assert(block_size > 0);
540 pa_assert(chunk);
541 pa_assert(bq->silence.memblock);
542
543 if (pa_memblockq_peek(bq, &tchunk) < 0)
544 return -1;
545
546 if (tchunk.length >= block_size) {
547 *chunk = tchunk;
548 chunk->length = block_size;
549 return 0;
550 }
551
552 pool = pa_memblock_get_pool(tchunk.memblock);
553 rchunk.memblock = pa_memblock_new(pool, block_size);
554 rchunk.index = 0;
555 rchunk.length = tchunk.length;
556 pa_mempool_unref(pool), pool = NULL;
557
558 pa_memchunk_memcpy(&rchunk, &tchunk);
559 pa_memblock_unref(tchunk.memblock);
560
561 rchunk.index += tchunk.length;
562
563 /* We don't need to call fix_current_read() here, since
564 * pa_memblock_peek() already did that */
565 item = bq->current_read;
566 ri = bq->read_index + tchunk.length;
567
568 while (rchunk.index < block_size) {
569
570 if (!item || item->index > ri) {
571 /* Do we need to append silence? */
572 tchunk = bq->silence;
573
574 if (item)
575 tchunk.length = PA_MIN(tchunk.length, (size_t) (item->index - ri));
576
577 } else {
578 int64_t d;
579
580 /* We can append real data! */
581 tchunk = item->chunk;
582
583 d = ri - item->index;
584 tchunk.index += (size_t) d;
585 tchunk.length -= (size_t) d;
586
587 /* Go to next item for the next iteration */
588 item = item->next;
589 }
590
591 rchunk.length = tchunk.length = PA_MIN(tchunk.length, block_size - rchunk.index);
592 pa_memchunk_memcpy(&rchunk, &tchunk);
593
594 rchunk.index += rchunk.length;
595 ri += rchunk.length;
596 }
597
598 rchunk.index = 0;
599 rchunk.length = block_size;
600
601 *chunk = rchunk;
602 return 0;
603 }
604
pa_memblockq_drop(pa_memblockq * bq,size_t length)605 void pa_memblockq_drop(pa_memblockq *bq, size_t length) {
606 int64_t old;
607 pa_assert(bq);
608 pa_assert(length % bq->base == 0);
609
610 old = bq->read_index;
611
612 while (length > 0) {
613
614 /* Do not drop any data when we are in prebuffering mode */
615 if (update_prebuf(bq))
616 break;
617
618 fix_current_read(bq);
619
620 if (bq->current_read) {
621 int64_t p, d;
622
623 /* We go through this piece by piece to make sure we don't
624 * drop more than allowed by prebuf */
625
626 p = bq->current_read->index + (int64_t) bq->current_read->chunk.length;
627 pa_assert(p >= bq->read_index);
628 d = p - bq->read_index;
629
630 if (d > (int64_t) length)
631 d = (int64_t) length;
632
633 bq->read_index += d;
634 length -= (size_t) d;
635
636 } else {
637
638 /* The list is empty, there's nothing we could drop */
639 bq->read_index += (int64_t) length;
640 break;
641 }
642 }
643
644 drop_backlog(bq);
645 read_index_changed(bq, old);
646 }
647
pa_memblockq_rewind(pa_memblockq * bq,size_t length)648 void pa_memblockq_rewind(pa_memblockq *bq, size_t length) {
649 int64_t old;
650 pa_assert(bq);
651 pa_assert(length % bq->base == 0);
652
653 old = bq->read_index;
654
655 /* This is kind of the inverse of pa_memblockq_drop() */
656
657 bq->read_index -= (int64_t) length;
658
659 read_index_changed(bq, old);
660 }
661
pa_memblockq_is_readable(pa_memblockq * bq)662 bool pa_memblockq_is_readable(pa_memblockq *bq) {
663 pa_assert(bq);
664
665 if (pa_memblockq_prebuf_active(bq))
666 return false;
667
668 if (pa_memblockq_get_length(bq) <= 0)
669 return false;
670
671 return true;
672 }
673
pa_memblockq_get_length(pa_memblockq * bq)674 size_t pa_memblockq_get_length(pa_memblockq *bq) {
675 pa_assert(bq);
676
677 if (bq->write_index <= bq->read_index)
678 return 0;
679
680 return (size_t) (bq->write_index - bq->read_index);
681 }
682
pa_memblockq_seek(pa_memblockq * bq,int64_t offset,pa_seek_mode_t seek,bool account)683 void pa_memblockq_seek(pa_memblockq *bq, int64_t offset, pa_seek_mode_t seek, bool account) {
684 int64_t old;
685 pa_assert(bq);
686
687 old = bq->write_index;
688
689 switch (seek) {
690 case PA_SEEK_RELATIVE:
691 bq->write_index += offset;
692 break;
693 case PA_SEEK_ABSOLUTE:
694 bq->write_index = offset;
695 break;
696 case PA_SEEK_RELATIVE_ON_READ:
697 bq->write_index = bq->read_index + offset;
698 break;
699 case PA_SEEK_RELATIVE_END:
700 bq->write_index = (bq->blocks_tail ? bq->blocks_tail->index + (int64_t) bq->blocks_tail->chunk.length : bq->read_index) + offset;
701 break;
702 default:
703 pa_assert_not_reached();
704 }
705
706 drop_backlog(bq);
707 write_index_changed(bq, old, account);
708 }
709
pa_memblockq_flush_write(pa_memblockq * bq,bool account)710 void pa_memblockq_flush_write(pa_memblockq *bq, bool account) {
711 int64_t old;
712 pa_assert(bq);
713
714 pa_memblockq_silence(bq);
715
716 old = bq->write_index;
717 bq->write_index = bq->read_index;
718
719 pa_memblockq_prebuf_force(bq);
720 write_index_changed(bq, old, account);
721 }
722
pa_memblockq_flush_read(pa_memblockq * bq)723 void pa_memblockq_flush_read(pa_memblockq *bq) {
724 int64_t old;
725 pa_assert(bq);
726
727 pa_memblockq_silence(bq);
728
729 old = bq->read_index;
730 bq->read_index = bq->write_index;
731
732 pa_memblockq_prebuf_force(bq);
733 read_index_changed(bq, old);
734 }
735
pa_memblockq_get_tlength(pa_memblockq * bq)736 size_t pa_memblockq_get_tlength(pa_memblockq *bq) {
737 pa_assert(bq);
738
739 return bq->tlength;
740 }
741
pa_memblockq_get_minreq(pa_memblockq * bq)742 size_t pa_memblockq_get_minreq(pa_memblockq *bq) {
743 pa_assert(bq);
744
745 return bq->minreq;
746 }
747
pa_memblockq_get_maxrewind(pa_memblockq * bq)748 size_t pa_memblockq_get_maxrewind(pa_memblockq *bq) {
749 pa_assert(bq);
750
751 return bq->maxrewind;
752 }
753
pa_memblockq_get_read_index(pa_memblockq * bq)754 int64_t pa_memblockq_get_read_index(pa_memblockq *bq) {
755 pa_assert(bq);
756
757 return bq->read_index;
758 }
759
pa_memblockq_get_write_index(pa_memblockq * bq)760 int64_t pa_memblockq_get_write_index(pa_memblockq *bq) {
761 pa_assert(bq);
762
763 return bq->write_index;
764 }
765
pa_memblockq_push_align(pa_memblockq * bq,const pa_memchunk * chunk)766 int pa_memblockq_push_align(pa_memblockq* bq, const pa_memchunk *chunk) {
767 pa_memchunk rchunk;
768
769 pa_assert(bq);
770 pa_assert(chunk);
771
772 if (bq->base == 1)
773 return pa_memblockq_push(bq, chunk);
774
775 if (!can_push(bq, pa_mcalign_csize(bq->mcalign, chunk->length)))
776 return -1;
777
778 pa_mcalign_push(bq->mcalign, chunk);
779
780 while (pa_mcalign_pop(bq->mcalign, &rchunk) >= 0) {
781 int r;
782 r = pa_memblockq_push(bq, &rchunk);
783 pa_memblock_unref(rchunk.memblock);
784
785 if (r < 0) {
786 pa_mcalign_flush(bq->mcalign);
787 return -1;
788 }
789 }
790
791 return 0;
792 }
793
pa_memblockq_prebuf_disable(pa_memblockq * bq)794 void pa_memblockq_prebuf_disable(pa_memblockq *bq) {
795 pa_assert(bq);
796
797 bq->in_prebuf = false;
798 }
799
pa_memblockq_prebuf_force(pa_memblockq * bq)800 void pa_memblockq_prebuf_force(pa_memblockq *bq) {
801 pa_assert(bq);
802
803 if (bq->prebuf > 0)
804 bq->in_prebuf = true;
805 }
806
pa_memblockq_get_maxlength(pa_memblockq * bq)807 size_t pa_memblockq_get_maxlength(pa_memblockq *bq) {
808 pa_assert(bq);
809
810 return bq->maxlength;
811 }
812
pa_memblockq_get_prebuf(pa_memblockq * bq)813 size_t pa_memblockq_get_prebuf(pa_memblockq *bq) {
814 pa_assert(bq);
815
816 return bq->prebuf;
817 }
818
pa_memblockq_pop_missing(pa_memblockq * bq)819 size_t pa_memblockq_pop_missing(pa_memblockq *bq) {
820 size_t l;
821
822 pa_assert(bq);
823
824 #ifdef MEMBLOCKQ_DEBUG
825 pa_log_debug("[%s] pop: %lli", bq->name, (long long) bq->missing);
826 #endif
827
828 if (bq->missing <= 0)
829 return 0;
830
831 if (((size_t) bq->missing < bq->minreq) &&
832 !pa_memblockq_prebuf_active(bq))
833 return 0;
834
835 l = (size_t) bq->missing;
836
837 bq->requested += bq->missing;
838 bq->missing = 0;
839
840 #ifdef MEMBLOCKQ_DEBUG
841 pa_log_debug("[%s] sent %lli: request counter is at %lli", bq->name, (long long) l, (long long) bq->requested);
842 #endif
843
844 return l;
845 }
846
pa_memblockq_set_maxlength(pa_memblockq * bq,size_t maxlength)847 void pa_memblockq_set_maxlength(pa_memblockq *bq, size_t maxlength) {
848 pa_assert(bq);
849
850 bq->maxlength = ((maxlength+bq->base-1)/bq->base)*bq->base;
851
852 if (bq->maxlength < bq->base)
853 bq->maxlength = bq->base;
854
855 if (bq->tlength > bq->maxlength)
856 pa_memblockq_set_tlength(bq, bq->maxlength);
857 }
858
pa_memblockq_set_tlength(pa_memblockq * bq,size_t tlength)859 void pa_memblockq_set_tlength(pa_memblockq *bq, size_t tlength) {
860 size_t old_tlength;
861 pa_assert(bq);
862
863 if (tlength <= 0 || tlength == (size_t) -1)
864 tlength = bq->maxlength;
865
866 old_tlength = bq->tlength;
867 bq->tlength = ((tlength+bq->base-1)/bq->base)*bq->base;
868
869 if (bq->tlength > bq->maxlength)
870 bq->tlength = bq->maxlength;
871
872 if (bq->minreq > bq->tlength)
873 pa_memblockq_set_minreq(bq, bq->tlength);
874
875 if (bq->prebuf > bq->tlength+bq->base-bq->minreq)
876 pa_memblockq_set_prebuf(bq, bq->tlength+bq->base-bq->minreq);
877
878 bq->missing += (int64_t) bq->tlength - (int64_t) old_tlength;
879 }
880
pa_memblockq_set_minreq(pa_memblockq * bq,size_t minreq)881 void pa_memblockq_set_minreq(pa_memblockq *bq, size_t minreq) {
882 pa_assert(bq);
883
884 bq->minreq = (minreq/bq->base)*bq->base;
885
886 if (bq->minreq > bq->tlength)
887 bq->minreq = bq->tlength;
888
889 if (bq->minreq < bq->base)
890 bq->minreq = bq->base;
891
892 if (bq->prebuf > bq->tlength+bq->base-bq->minreq)
893 pa_memblockq_set_prebuf(bq, bq->tlength+bq->base-bq->minreq);
894 }
895
pa_memblockq_set_prebuf(pa_memblockq * bq,size_t prebuf)896 void pa_memblockq_set_prebuf(pa_memblockq *bq, size_t prebuf) {
897 pa_assert(bq);
898
899 if (prebuf == (size_t) -1)
900 prebuf = bq->tlength+bq->base-bq->minreq;
901
902 bq->prebuf = ((prebuf+bq->base-1)/bq->base)*bq->base;
903
904 if (prebuf > 0 && bq->prebuf < bq->base)
905 bq->prebuf = bq->base;
906
907 if (bq->prebuf > bq->tlength+bq->base-bq->minreq)
908 bq->prebuf = bq->tlength+bq->base-bq->minreq;
909
910 if (bq->prebuf <= 0 || pa_memblockq_get_length(bq) >= bq->prebuf)
911 bq->in_prebuf = false;
912 }
913
pa_memblockq_set_maxrewind(pa_memblockq * bq,size_t maxrewind)914 void pa_memblockq_set_maxrewind(pa_memblockq *bq, size_t maxrewind) {
915 pa_assert(bq);
916
917 bq->maxrewind = (maxrewind/bq->base)*bq->base;
918 }
919
pa_memblockq_apply_attr(pa_memblockq * bq,const pa_buffer_attr * a)920 void pa_memblockq_apply_attr(pa_memblockq *bq, const pa_buffer_attr *a) {
921 pa_assert(bq);
922 pa_assert(a);
923
924 pa_memblockq_set_maxlength(bq, a->maxlength);
925 pa_memblockq_set_tlength(bq, a->tlength);
926 pa_memblockq_set_minreq(bq, a->minreq);
927 pa_memblockq_set_prebuf(bq, a->prebuf);
928 }
929
pa_memblockq_get_attr(pa_memblockq * bq,pa_buffer_attr * a)930 void pa_memblockq_get_attr(pa_memblockq *bq, pa_buffer_attr *a) {
931 pa_assert(bq);
932 pa_assert(a);
933
934 a->maxlength = (uint32_t) pa_memblockq_get_maxlength(bq);
935 a->tlength = (uint32_t) pa_memblockq_get_tlength(bq);
936 a->prebuf = (uint32_t) pa_memblockq_get_prebuf(bq);
937 a->minreq = (uint32_t) pa_memblockq_get_minreq(bq);
938 }
939
pa_memblockq_splice(pa_memblockq * bq,pa_memblockq * source)940 int pa_memblockq_splice(pa_memblockq *bq, pa_memblockq *source) {
941
942 pa_assert(bq);
943 pa_assert(source);
944
945 pa_memblockq_prebuf_disable(bq);
946
947 for (;;) {
948 pa_memchunk chunk;
949
950 if (pa_memblockq_peek(source, &chunk) < 0)
951 return 0;
952
953 pa_assert(chunk.length > 0);
954
955 if (chunk.memblock) {
956
957 if (pa_memblockq_push_align(bq, &chunk) < 0) {
958 pa_memblock_unref(chunk.memblock);
959 return -1;
960 }
961
962 pa_memblock_unref(chunk.memblock);
963 } else
964 pa_memblockq_seek(bq, (int64_t) chunk.length, PA_SEEK_RELATIVE, true);
965
966 pa_memblockq_drop(bq, chunk.length);
967 }
968 }
969
pa_memblockq_willneed(pa_memblockq * bq)970 void pa_memblockq_willneed(pa_memblockq *bq) {
971 struct list_item *q;
972
973 pa_assert(bq);
974
975 fix_current_read(bq);
976
977 for (q = bq->current_read; q; q = q->next)
978 pa_memchunk_will_need(&q->chunk);
979 }
980
pa_memblockq_set_silence(pa_memblockq * bq,pa_memchunk * silence)981 void pa_memblockq_set_silence(pa_memblockq *bq, pa_memchunk *silence) {
982 pa_assert(bq);
983
984 if (bq->silence.memblock)
985 pa_memblock_unref(bq->silence.memblock);
986
987 if (silence) {
988 bq->silence = *silence;
989 pa_memblock_ref(bq->silence.memblock);
990 } else
991 pa_memchunk_reset(&bq->silence);
992 }
993
pa_memblockq_is_empty(pa_memblockq * bq)994 bool pa_memblockq_is_empty(pa_memblockq *bq) {
995 pa_assert(bq);
996
997 return !bq->blocks;
998 }
999
pa_memblockq_silence(pa_memblockq * bq)1000 void pa_memblockq_silence(pa_memblockq *bq) {
1001 pa_assert(bq);
1002
1003 while (bq->blocks)
1004 drop_block(bq, bq->blocks);
1005
1006 pa_assert(bq->n_blocks == 0);
1007 }
1008
pa_memblockq_get_nblocks(pa_memblockq * bq)1009 unsigned pa_memblockq_get_nblocks(pa_memblockq *bq) {
1010 pa_assert(bq);
1011
1012 return bq->n_blocks;
1013 }
1014
pa_memblockq_get_base(pa_memblockq * bq)1015 size_t pa_memblockq_get_base(pa_memblockq *bq) {
1016 pa_assert(bq);
1017
1018 return bq->base;
1019 }
1020